1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Infrastructure for profiling code inserted by 'gcc -pg'.
4 *
5 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
6 * Copyright (C) 2004-2008 Ingo Molnar <mingo@redhat.com>
7 *
8 * Originally ported from the -rt patch by:
9 * Copyright (C) 2007 Arnaldo Carvalho de Melo <acme@redhat.com>
10 *
11 * Based on code in the latency_tracer, that is:
12 *
13 * Copyright (C) 2004-2006 Ingo Molnar
14 * Copyright (C) 2004 Nadia Yvette Chambers
15 */
16
17 #include <linux/stop_machine.h>
18 #include <linux/clocksource.h>
19 #include <linux/sched/task.h>
20 #include <linux/kallsyms.h>
21 #include <linux/security.h>
22 #include <linux/seq_file.h>
23 #include <linux/tracefs.h>
24 #include <linux/hardirq.h>
25 #include <linux/kthread.h>
26 #include <linux/uaccess.h>
27 #include <linux/bsearch.h>
28 #include <linux/module.h>
29 #include <linux/ftrace.h>
30 #include <linux/sysctl.h>
31 #include <linux/slab.h>
32 #include <linux/ctype.h>
33 #include <linux/sort.h>
34 #include <linux/list.h>
35 #include <linux/hash.h>
36 #include <linux/rcupdate.h>
37 #include <linux/kprobes.h>
38
39 #include <trace/events/sched.h>
40
41 #include <asm/sections.h>
42 #include <asm/setup.h>
43
44 #include "ftrace_internal.h"
45 #include "trace_output.h"
46 #include "trace_stat.h"
47
48 /* Flags that do not get reset */
49 #define FTRACE_NOCLEAR_FLAGS (FTRACE_FL_DISABLED | FTRACE_FL_TOUCHED | \
50 FTRACE_FL_MODIFIED)
51
52 #define FTRACE_INVALID_FUNCTION "__ftrace_invalid_address__"
53
54 #define FTRACE_WARN_ON(cond) \
55 ({ \
56 int ___r = cond; \
57 if (WARN_ON(___r)) \
58 ftrace_kill(); \
59 ___r; \
60 })
61
62 #define FTRACE_WARN_ON_ONCE(cond) \
63 ({ \
64 int ___r = cond; \
65 if (WARN_ON_ONCE(___r)) \
66 ftrace_kill(); \
67 ___r; \
68 })
69
70 /* hash bits for specific function selection */
71 #define FTRACE_HASH_DEFAULT_BITS 10
72 #define FTRACE_HASH_MAX_BITS 12
73
74 #ifdef CONFIG_DYNAMIC_FTRACE
75 #define INIT_OPS_HASH(opsname) \
76 .func_hash = &opsname.local_hash, \
77 .local_hash.regex_lock = __MUTEX_INITIALIZER(opsname.local_hash.regex_lock), \
78 .subop_list = LIST_HEAD_INIT(opsname.subop_list),
79 #else
80 #define INIT_OPS_HASH(opsname)
81 #endif
82
83 enum {
84 FTRACE_MODIFY_ENABLE_FL = (1 << 0),
85 FTRACE_MODIFY_MAY_SLEEP_FL = (1 << 1),
86 };
87
88 struct ftrace_ops ftrace_list_end __read_mostly = {
89 .func = ftrace_stub,
90 .flags = FTRACE_OPS_FL_STUB,
91 INIT_OPS_HASH(ftrace_list_end)
92 };
93
94 /* ftrace_enabled is a method to turn ftrace on or off */
95 int ftrace_enabled __read_mostly;
96 static int __maybe_unused last_ftrace_enabled;
97
98 /* Current function tracing op */
99 struct ftrace_ops *function_trace_op __read_mostly = &ftrace_list_end;
100 /* What to set function_trace_op to */
101 static struct ftrace_ops *set_function_trace_op;
102
ftrace_pids_enabled(struct ftrace_ops * ops)103 bool ftrace_pids_enabled(struct ftrace_ops *ops)
104 {
105 struct trace_array *tr;
106
107 if (!(ops->flags & FTRACE_OPS_FL_PID) || !ops->private)
108 return false;
109
110 tr = ops->private;
111
112 return tr->function_pids != NULL || tr->function_no_pids != NULL;
113 }
114
115 static void ftrace_update_trampoline(struct ftrace_ops *ops);
116
117 /*
118 * ftrace_disabled is set when an anomaly is discovered.
119 * ftrace_disabled is much stronger than ftrace_enabled.
120 */
121 static int ftrace_disabled __read_mostly;
122
123 DEFINE_MUTEX(ftrace_lock);
124
125 struct ftrace_ops __rcu *ftrace_ops_list __read_mostly = (struct ftrace_ops __rcu *)&ftrace_list_end;
126 ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub;
127 struct ftrace_ops global_ops;
128
129 /* Defined by vmlinux.lds.h see the comment above arch_ftrace_ops_list_func for details */
130 void ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip,
131 struct ftrace_ops *op, struct ftrace_regs *fregs);
132
133 #ifdef CONFIG_DYNAMIC_FTRACE_WITH_CALL_OPS
134 /*
135 * Stub used to invoke the list ops without requiring a separate trampoline.
136 */
137 const struct ftrace_ops ftrace_list_ops = {
138 .func = ftrace_ops_list_func,
139 .flags = FTRACE_OPS_FL_STUB,
140 };
141
ftrace_ops_nop_func(unsigned long ip,unsigned long parent_ip,struct ftrace_ops * op,struct ftrace_regs * fregs)142 static void ftrace_ops_nop_func(unsigned long ip, unsigned long parent_ip,
143 struct ftrace_ops *op,
144 struct ftrace_regs *fregs)
145 {
146 /* do nothing */
147 }
148
149 /*
150 * Stub used when a call site is disabled. May be called transiently by threads
151 * which have made it into ftrace_caller but haven't yet recovered the ops at
152 * the point the call site is disabled.
153 */
154 const struct ftrace_ops ftrace_nop_ops = {
155 .func = ftrace_ops_nop_func,
156 .flags = FTRACE_OPS_FL_STUB,
157 };
158 #endif
159
ftrace_ops_init(struct ftrace_ops * ops)160 static inline void ftrace_ops_init(struct ftrace_ops *ops)
161 {
162 #ifdef CONFIG_DYNAMIC_FTRACE
163 if (!(ops->flags & FTRACE_OPS_FL_INITIALIZED)) {
164 mutex_init(&ops->local_hash.regex_lock);
165 INIT_LIST_HEAD(&ops->subop_list);
166 ops->func_hash = &ops->local_hash;
167 ops->flags |= FTRACE_OPS_FL_INITIALIZED;
168 }
169 #endif
170 }
171
172 /* Call this function for when a callback filters on set_ftrace_pid */
ftrace_pid_func(unsigned long ip,unsigned long parent_ip,struct ftrace_ops * op,struct ftrace_regs * fregs)173 static void ftrace_pid_func(unsigned long ip, unsigned long parent_ip,
174 struct ftrace_ops *op, struct ftrace_regs *fregs)
175 {
176 struct trace_array *tr = op->private;
177 int pid;
178
179 if (tr) {
180 pid = this_cpu_read(tr->array_buffer.data->ftrace_ignore_pid);
181 if (pid == FTRACE_PID_IGNORE)
182 return;
183 if (pid != FTRACE_PID_TRACE &&
184 pid != current->pid)
185 return;
186 }
187
188 op->saved_func(ip, parent_ip, op, fregs);
189 }
190
ftrace_sync_ipi(void * data)191 static void ftrace_sync_ipi(void *data)
192 {
193 /* Probably not needed, but do it anyway */
194 smp_rmb();
195 }
196
ftrace_ops_get_list_func(struct ftrace_ops * ops)197 static ftrace_func_t ftrace_ops_get_list_func(struct ftrace_ops *ops)
198 {
199 /*
200 * If this is a dynamic or RCU ops, or we force list func,
201 * then it needs to call the list anyway.
202 */
203 if (ops->flags & (FTRACE_OPS_FL_DYNAMIC | FTRACE_OPS_FL_RCU) ||
204 FTRACE_FORCE_LIST_FUNC)
205 return ftrace_ops_list_func;
206
207 return ftrace_ops_get_func(ops);
208 }
209
update_ftrace_function(void)210 static void update_ftrace_function(void)
211 {
212 ftrace_func_t func;
213
214 /*
215 * Prepare the ftrace_ops that the arch callback will use.
216 * If there's only one ftrace_ops registered, the ftrace_ops_list
217 * will point to the ops we want.
218 */
219 set_function_trace_op = rcu_dereference_protected(ftrace_ops_list,
220 lockdep_is_held(&ftrace_lock));
221
222 /* If there's no ftrace_ops registered, just call the stub function */
223 if (set_function_trace_op == &ftrace_list_end) {
224 func = ftrace_stub;
225
226 /*
227 * If we are at the end of the list and this ops is
228 * recursion safe and not dynamic and the arch supports passing ops,
229 * then have the mcount trampoline call the function directly.
230 */
231 } else if (rcu_dereference_protected(ftrace_ops_list->next,
232 lockdep_is_held(&ftrace_lock)) == &ftrace_list_end) {
233 func = ftrace_ops_get_list_func(ftrace_ops_list);
234
235 } else {
236 /* Just use the default ftrace_ops */
237 set_function_trace_op = &ftrace_list_end;
238 func = ftrace_ops_list_func;
239 }
240
241 /* If there's no change, then do nothing more here */
242 if (ftrace_trace_function == func)
243 return;
244
245 /*
246 * If we are using the list function, it doesn't care
247 * about the function_trace_ops.
248 */
249 if (func == ftrace_ops_list_func) {
250 ftrace_trace_function = func;
251 /*
252 * Don't even bother setting function_trace_ops,
253 * it would be racy to do so anyway.
254 */
255 return;
256 }
257
258 #ifndef CONFIG_DYNAMIC_FTRACE
259 /*
260 * For static tracing, we need to be a bit more careful.
261 * The function change takes affect immediately. Thus,
262 * we need to coordinate the setting of the function_trace_ops
263 * with the setting of the ftrace_trace_function.
264 *
265 * Set the function to the list ops, which will call the
266 * function we want, albeit indirectly, but it handles the
267 * ftrace_ops and doesn't depend on function_trace_op.
268 */
269 ftrace_trace_function = ftrace_ops_list_func;
270 /*
271 * Make sure all CPUs see this. Yes this is slow, but static
272 * tracing is slow and nasty to have enabled.
273 */
274 synchronize_rcu_tasks_rude();
275 /* Now all cpus are using the list ops. */
276 function_trace_op = set_function_trace_op;
277 /* Make sure the function_trace_op is visible on all CPUs */
278 smp_wmb();
279 /* Nasty way to force a rmb on all cpus */
280 smp_call_function(ftrace_sync_ipi, NULL, 1);
281 /* OK, we are all set to update the ftrace_trace_function now! */
282 #endif /* !CONFIG_DYNAMIC_FTRACE */
283
284 ftrace_trace_function = func;
285 }
286
add_ftrace_ops(struct ftrace_ops __rcu ** list,struct ftrace_ops * ops)287 static void add_ftrace_ops(struct ftrace_ops __rcu **list,
288 struct ftrace_ops *ops)
289 {
290 rcu_assign_pointer(ops->next, *list);
291
292 /*
293 * We are entering ops into the list but another
294 * CPU might be walking that list. We need to make sure
295 * the ops->next pointer is valid before another CPU sees
296 * the ops pointer included into the list.
297 */
298 rcu_assign_pointer(*list, ops);
299 }
300
remove_ftrace_ops(struct ftrace_ops __rcu ** list,struct ftrace_ops * ops)301 static int remove_ftrace_ops(struct ftrace_ops __rcu **list,
302 struct ftrace_ops *ops)
303 {
304 struct ftrace_ops **p;
305
306 /*
307 * If we are removing the last function, then simply point
308 * to the ftrace_stub.
309 */
310 if (rcu_dereference_protected(*list,
311 lockdep_is_held(&ftrace_lock)) == ops &&
312 rcu_dereference_protected(ops->next,
313 lockdep_is_held(&ftrace_lock)) == &ftrace_list_end) {
314 rcu_assign_pointer(*list, &ftrace_list_end);
315 return 0;
316 }
317
318 for (p = list; *p != &ftrace_list_end; p = &(*p)->next)
319 if (*p == ops)
320 break;
321
322 if (*p != ops)
323 return -1;
324
325 *p = (*p)->next;
326 return 0;
327 }
328
329 static void ftrace_update_trampoline(struct ftrace_ops *ops);
330
__register_ftrace_function(struct ftrace_ops * ops)331 int __register_ftrace_function(struct ftrace_ops *ops)
332 {
333 if (ops->flags & FTRACE_OPS_FL_DELETED)
334 return -EINVAL;
335
336 if (WARN_ON(ops->flags & FTRACE_OPS_FL_ENABLED))
337 return -EBUSY;
338
339 #ifndef CONFIG_DYNAMIC_FTRACE_WITH_REGS
340 /*
341 * If the ftrace_ops specifies SAVE_REGS, then it only can be used
342 * if the arch supports it, or SAVE_REGS_IF_SUPPORTED is also set.
343 * Setting SAVE_REGS_IF_SUPPORTED makes SAVE_REGS irrelevant.
344 */
345 if (ops->flags & FTRACE_OPS_FL_SAVE_REGS &&
346 !(ops->flags & FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED))
347 return -EINVAL;
348
349 if (ops->flags & FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED)
350 ops->flags |= FTRACE_OPS_FL_SAVE_REGS;
351 #endif
352 if (!ftrace_enabled && (ops->flags & FTRACE_OPS_FL_PERMANENT))
353 return -EBUSY;
354
355 if (!is_kernel_core_data((unsigned long)ops))
356 ops->flags |= FTRACE_OPS_FL_DYNAMIC;
357
358 add_ftrace_ops(&ftrace_ops_list, ops);
359
360 /* Always save the function, and reset at unregistering */
361 ops->saved_func = ops->func;
362
363 if (ftrace_pids_enabled(ops))
364 ops->func = ftrace_pid_func;
365
366 ftrace_update_trampoline(ops);
367
368 if (ftrace_enabled)
369 update_ftrace_function();
370
371 return 0;
372 }
373
__unregister_ftrace_function(struct ftrace_ops * ops)374 int __unregister_ftrace_function(struct ftrace_ops *ops)
375 {
376 int ret;
377
378 if (WARN_ON(!(ops->flags & FTRACE_OPS_FL_ENABLED)))
379 return -EBUSY;
380
381 ret = remove_ftrace_ops(&ftrace_ops_list, ops);
382
383 if (ret < 0)
384 return ret;
385
386 if (ftrace_enabled)
387 update_ftrace_function();
388
389 ops->func = ops->saved_func;
390
391 return 0;
392 }
393
ftrace_update_pid_func(void)394 static void ftrace_update_pid_func(void)
395 {
396 struct ftrace_ops *op;
397
398 /* Only do something if we are tracing something */
399 if (ftrace_trace_function == ftrace_stub)
400 return;
401
402 do_for_each_ftrace_op(op, ftrace_ops_list) {
403 if (op->flags & FTRACE_OPS_FL_PID) {
404 op->func = ftrace_pids_enabled(op) ?
405 ftrace_pid_func : op->saved_func;
406 ftrace_update_trampoline(op);
407 }
408 } while_for_each_ftrace_op(op);
409
410 fgraph_update_pid_func();
411
412 update_ftrace_function();
413 }
414
415 #ifdef CONFIG_FUNCTION_PROFILER
416 struct ftrace_profile {
417 struct hlist_node node;
418 unsigned long ip;
419 unsigned long counter;
420 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
421 unsigned long long time;
422 unsigned long long time_squared;
423 #endif
424 };
425
426 struct ftrace_profile_page {
427 struct ftrace_profile_page *next;
428 unsigned long index;
429 struct ftrace_profile records[];
430 };
431
432 struct ftrace_profile_stat {
433 atomic_t disabled;
434 struct hlist_head *hash;
435 struct ftrace_profile_page *pages;
436 struct ftrace_profile_page *start;
437 struct tracer_stat stat;
438 };
439
440 #define PROFILE_RECORDS_SIZE \
441 (PAGE_SIZE - offsetof(struct ftrace_profile_page, records))
442
443 #define PROFILES_PER_PAGE \
444 (PROFILE_RECORDS_SIZE / sizeof(struct ftrace_profile))
445
446 static int ftrace_profile_enabled __read_mostly;
447
448 /* ftrace_profile_lock - synchronize the enable and disable of the profiler */
449 static DEFINE_MUTEX(ftrace_profile_lock);
450
451 static DEFINE_PER_CPU(struct ftrace_profile_stat, ftrace_profile_stats);
452
453 #define FTRACE_PROFILE_HASH_BITS 10
454 #define FTRACE_PROFILE_HASH_SIZE (1 << FTRACE_PROFILE_HASH_BITS)
455
456 static void *
function_stat_next(void * v,int idx)457 function_stat_next(void *v, int idx)
458 {
459 struct ftrace_profile *rec = v;
460 struct ftrace_profile_page *pg;
461
462 pg = (struct ftrace_profile_page *)((unsigned long)rec & PAGE_MASK);
463
464 again:
465 if (idx != 0)
466 rec++;
467
468 if ((void *)rec >= (void *)&pg->records[pg->index]) {
469 pg = pg->next;
470 if (!pg)
471 return NULL;
472 rec = &pg->records[0];
473 if (!rec->counter)
474 goto again;
475 }
476
477 return rec;
478 }
479
function_stat_start(struct tracer_stat * trace)480 static void *function_stat_start(struct tracer_stat *trace)
481 {
482 struct ftrace_profile_stat *stat =
483 container_of(trace, struct ftrace_profile_stat, stat);
484
485 if (!stat || !stat->start)
486 return NULL;
487
488 return function_stat_next(&stat->start->records[0], 0);
489 }
490
491 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
492 /* function graph compares on total time */
function_stat_cmp(const void * p1,const void * p2)493 static int function_stat_cmp(const void *p1, const void *p2)
494 {
495 const struct ftrace_profile *a = p1;
496 const struct ftrace_profile *b = p2;
497
498 if (a->time < b->time)
499 return -1;
500 if (a->time > b->time)
501 return 1;
502 else
503 return 0;
504 }
505 #else
506 /* not function graph compares against hits */
function_stat_cmp(const void * p1,const void * p2)507 static int function_stat_cmp(const void *p1, const void *p2)
508 {
509 const struct ftrace_profile *a = p1;
510 const struct ftrace_profile *b = p2;
511
512 if (a->counter < b->counter)
513 return -1;
514 if (a->counter > b->counter)
515 return 1;
516 else
517 return 0;
518 }
519 #endif
520
function_stat_headers(struct seq_file * m)521 static int function_stat_headers(struct seq_file *m)
522 {
523 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
524 seq_puts(m, " Function "
525 "Hit Time Avg s^2\n"
526 " -------- "
527 "--- ---- --- ---\n");
528 #else
529 seq_puts(m, " Function Hit\n"
530 " -------- ---\n");
531 #endif
532 return 0;
533 }
534
function_stat_show(struct seq_file * m,void * v)535 static int function_stat_show(struct seq_file *m, void *v)
536 {
537 struct ftrace_profile *rec = v;
538 char str[KSYM_SYMBOL_LEN];
539 int ret = 0;
540 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
541 static struct trace_seq s;
542 unsigned long long avg;
543 unsigned long long stddev;
544 #endif
545 mutex_lock(&ftrace_profile_lock);
546
547 /* we raced with function_profile_reset() */
548 if (unlikely(rec->counter == 0)) {
549 ret = -EBUSY;
550 goto out;
551 }
552
553 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
554 avg = div64_ul(rec->time, rec->counter);
555 if (tracing_thresh && (avg < tracing_thresh))
556 goto out;
557 #endif
558
559 kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
560 seq_printf(m, " %-30.30s %10lu", str, rec->counter);
561
562 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
563 seq_puts(m, " ");
564
565 /* Sample standard deviation (s^2) */
566 if (rec->counter <= 1)
567 stddev = 0;
568 else {
569 /*
570 * Apply Welford's method:
571 * s^2 = 1 / (n * (n-1)) * (n * \Sum (x_i)^2 - (\Sum x_i)^2)
572 */
573 stddev = rec->counter * rec->time_squared -
574 rec->time * rec->time;
575
576 /*
577 * Divide only 1000 for ns^2 -> us^2 conversion.
578 * trace_print_graph_duration will divide 1000 again.
579 */
580 stddev = div64_ul(stddev,
581 rec->counter * (rec->counter - 1) * 1000);
582 }
583
584 trace_seq_init(&s);
585 trace_print_graph_duration(rec->time, &s);
586 trace_seq_puts(&s, " ");
587 trace_print_graph_duration(avg, &s);
588 trace_seq_puts(&s, " ");
589 trace_print_graph_duration(stddev, &s);
590 trace_print_seq(m, &s);
591 #endif
592 seq_putc(m, '\n');
593 out:
594 mutex_unlock(&ftrace_profile_lock);
595
596 return ret;
597 }
598
ftrace_profile_reset(struct ftrace_profile_stat * stat)599 static void ftrace_profile_reset(struct ftrace_profile_stat *stat)
600 {
601 struct ftrace_profile_page *pg;
602
603 pg = stat->pages = stat->start;
604
605 while (pg) {
606 memset(pg->records, 0, PROFILE_RECORDS_SIZE);
607 pg->index = 0;
608 pg = pg->next;
609 }
610
611 memset(stat->hash, 0,
612 FTRACE_PROFILE_HASH_SIZE * sizeof(struct hlist_head));
613 }
614
ftrace_profile_pages_init(struct ftrace_profile_stat * stat)615 static int ftrace_profile_pages_init(struct ftrace_profile_stat *stat)
616 {
617 struct ftrace_profile_page *pg;
618 int functions;
619 int pages;
620 int i;
621
622 /* If we already allocated, do nothing */
623 if (stat->pages)
624 return 0;
625
626 stat->pages = (void *)get_zeroed_page(GFP_KERNEL);
627 if (!stat->pages)
628 return -ENOMEM;
629
630 #ifdef CONFIG_DYNAMIC_FTRACE
631 functions = ftrace_update_tot_cnt;
632 #else
633 /*
634 * We do not know the number of functions that exist because
635 * dynamic tracing is what counts them. With past experience
636 * we have around 20K functions. That should be more than enough.
637 * It is highly unlikely we will execute every function in
638 * the kernel.
639 */
640 functions = 20000;
641 #endif
642
643 pg = stat->start = stat->pages;
644
645 pages = DIV_ROUND_UP(functions, PROFILES_PER_PAGE);
646
647 for (i = 1; i < pages; i++) {
648 pg->next = (void *)get_zeroed_page(GFP_KERNEL);
649 if (!pg->next)
650 goto out_free;
651 pg = pg->next;
652 }
653
654 return 0;
655
656 out_free:
657 pg = stat->start;
658 while (pg) {
659 unsigned long tmp = (unsigned long)pg;
660
661 pg = pg->next;
662 free_page(tmp);
663 }
664
665 stat->pages = NULL;
666 stat->start = NULL;
667
668 return -ENOMEM;
669 }
670
ftrace_profile_init_cpu(int cpu)671 static int ftrace_profile_init_cpu(int cpu)
672 {
673 struct ftrace_profile_stat *stat;
674 int size;
675
676 stat = &per_cpu(ftrace_profile_stats, cpu);
677
678 if (stat->hash) {
679 /* If the profile is already created, simply reset it */
680 ftrace_profile_reset(stat);
681 return 0;
682 }
683
684 /*
685 * We are profiling all functions, but usually only a few thousand
686 * functions are hit. We'll make a hash of 1024 items.
687 */
688 size = FTRACE_PROFILE_HASH_SIZE;
689
690 stat->hash = kcalloc(size, sizeof(struct hlist_head), GFP_KERNEL);
691
692 if (!stat->hash)
693 return -ENOMEM;
694
695 /* Preallocate the function profiling pages */
696 if (ftrace_profile_pages_init(stat) < 0) {
697 kfree(stat->hash);
698 stat->hash = NULL;
699 return -ENOMEM;
700 }
701
702 return 0;
703 }
704
ftrace_profile_init(void)705 static int ftrace_profile_init(void)
706 {
707 int cpu;
708 int ret = 0;
709
710 for_each_possible_cpu(cpu) {
711 ret = ftrace_profile_init_cpu(cpu);
712 if (ret)
713 break;
714 }
715
716 return ret;
717 }
718
719 /* interrupts must be disabled */
720 static struct ftrace_profile *
ftrace_find_profiled_func(struct ftrace_profile_stat * stat,unsigned long ip)721 ftrace_find_profiled_func(struct ftrace_profile_stat *stat, unsigned long ip)
722 {
723 struct ftrace_profile *rec;
724 struct hlist_head *hhd;
725 unsigned long key;
726
727 key = hash_long(ip, FTRACE_PROFILE_HASH_BITS);
728 hhd = &stat->hash[key];
729
730 if (hlist_empty(hhd))
731 return NULL;
732
733 hlist_for_each_entry_rcu_notrace(rec, hhd, node) {
734 if (rec->ip == ip)
735 return rec;
736 }
737
738 return NULL;
739 }
740
ftrace_add_profile(struct ftrace_profile_stat * stat,struct ftrace_profile * rec)741 static void ftrace_add_profile(struct ftrace_profile_stat *stat,
742 struct ftrace_profile *rec)
743 {
744 unsigned long key;
745
746 key = hash_long(rec->ip, FTRACE_PROFILE_HASH_BITS);
747 hlist_add_head_rcu(&rec->node, &stat->hash[key]);
748 }
749
750 /*
751 * The memory is already allocated, this simply finds a new record to use.
752 */
753 static struct ftrace_profile *
ftrace_profile_alloc(struct ftrace_profile_stat * stat,unsigned long ip)754 ftrace_profile_alloc(struct ftrace_profile_stat *stat, unsigned long ip)
755 {
756 struct ftrace_profile *rec = NULL;
757
758 /* prevent recursion (from NMIs) */
759 if (atomic_inc_return(&stat->disabled) != 1)
760 goto out;
761
762 /*
763 * Try to find the function again since an NMI
764 * could have added it
765 */
766 rec = ftrace_find_profiled_func(stat, ip);
767 if (rec)
768 goto out;
769
770 if (stat->pages->index == PROFILES_PER_PAGE) {
771 if (!stat->pages->next)
772 goto out;
773 stat->pages = stat->pages->next;
774 }
775
776 rec = &stat->pages->records[stat->pages->index++];
777 rec->ip = ip;
778 ftrace_add_profile(stat, rec);
779
780 out:
781 atomic_dec(&stat->disabled);
782
783 return rec;
784 }
785
786 static void
function_profile_call(unsigned long ip,unsigned long parent_ip,struct ftrace_ops * ops,struct ftrace_regs * fregs)787 function_profile_call(unsigned long ip, unsigned long parent_ip,
788 struct ftrace_ops *ops, struct ftrace_regs *fregs)
789 {
790 struct ftrace_profile_stat *stat;
791 struct ftrace_profile *rec;
792 unsigned long flags;
793
794 if (!ftrace_profile_enabled)
795 return;
796
797 local_irq_save(flags);
798
799 stat = this_cpu_ptr(&ftrace_profile_stats);
800 if (!stat->hash || !ftrace_profile_enabled)
801 goto out;
802
803 rec = ftrace_find_profiled_func(stat, ip);
804 if (!rec) {
805 rec = ftrace_profile_alloc(stat, ip);
806 if (!rec)
807 goto out;
808 }
809
810 rec->counter++;
811 out:
812 local_irq_restore(flags);
813 }
814
815 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
816 static bool fgraph_graph_time = true;
817
ftrace_graph_graph_time_control(bool enable)818 void ftrace_graph_graph_time_control(bool enable)
819 {
820 fgraph_graph_time = enable;
821 }
822
profile_graph_entry(struct ftrace_graph_ent * trace,struct fgraph_ops * gops)823 static int profile_graph_entry(struct ftrace_graph_ent *trace,
824 struct fgraph_ops *gops)
825 {
826 struct ftrace_ret_stack *ret_stack;
827
828 function_profile_call(trace->func, 0, NULL, NULL);
829
830 /* If function graph is shutting down, ret_stack can be NULL */
831 if (!current->ret_stack)
832 return 0;
833
834 ret_stack = ftrace_graph_get_ret_stack(current, 0);
835 if (ret_stack)
836 ret_stack->subtime = 0;
837
838 return 1;
839 }
840
profile_graph_return(struct ftrace_graph_ret * trace,struct fgraph_ops * gops)841 static void profile_graph_return(struct ftrace_graph_ret *trace,
842 struct fgraph_ops *gops)
843 {
844 struct ftrace_ret_stack *ret_stack;
845 struct ftrace_profile_stat *stat;
846 unsigned long long calltime;
847 struct ftrace_profile *rec;
848 unsigned long flags;
849
850 local_irq_save(flags);
851 stat = this_cpu_ptr(&ftrace_profile_stats);
852 if (!stat->hash || !ftrace_profile_enabled)
853 goto out;
854
855 /* If the calltime was zero'd ignore it */
856 if (!trace->calltime)
857 goto out;
858
859 calltime = trace->rettime - trace->calltime;
860
861 if (!fgraph_graph_time) {
862
863 /* Append this call time to the parent time to subtract */
864 ret_stack = ftrace_graph_get_ret_stack(current, 1);
865 if (ret_stack)
866 ret_stack->subtime += calltime;
867
868 ret_stack = ftrace_graph_get_ret_stack(current, 0);
869 if (ret_stack && ret_stack->subtime < calltime)
870 calltime -= ret_stack->subtime;
871 else
872 calltime = 0;
873 }
874
875 rec = ftrace_find_profiled_func(stat, trace->func);
876 if (rec) {
877 rec->time += calltime;
878 rec->time_squared += calltime * calltime;
879 }
880
881 out:
882 local_irq_restore(flags);
883 }
884
885 static struct fgraph_ops fprofiler_ops = {
886 .entryfunc = &profile_graph_entry,
887 .retfunc = &profile_graph_return,
888 };
889
register_ftrace_profiler(void)890 static int register_ftrace_profiler(void)
891 {
892 return register_ftrace_graph(&fprofiler_ops);
893 }
894
unregister_ftrace_profiler(void)895 static void unregister_ftrace_profiler(void)
896 {
897 unregister_ftrace_graph(&fprofiler_ops);
898 }
899 #else
900 static struct ftrace_ops ftrace_profile_ops __read_mostly = {
901 .func = function_profile_call,
902 .flags = FTRACE_OPS_FL_INITIALIZED,
903 INIT_OPS_HASH(ftrace_profile_ops)
904 };
905
register_ftrace_profiler(void)906 static int register_ftrace_profiler(void)
907 {
908 return register_ftrace_function(&ftrace_profile_ops);
909 }
910
unregister_ftrace_profiler(void)911 static void unregister_ftrace_profiler(void)
912 {
913 unregister_ftrace_function(&ftrace_profile_ops);
914 }
915 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
916
917 static ssize_t
ftrace_profile_write(struct file * filp,const char __user * ubuf,size_t cnt,loff_t * ppos)918 ftrace_profile_write(struct file *filp, const char __user *ubuf,
919 size_t cnt, loff_t *ppos)
920 {
921 unsigned long val;
922 int ret;
923
924 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
925 if (ret)
926 return ret;
927
928 val = !!val;
929
930 mutex_lock(&ftrace_profile_lock);
931 if (ftrace_profile_enabled ^ val) {
932 if (val) {
933 ret = ftrace_profile_init();
934 if (ret < 0) {
935 cnt = ret;
936 goto out;
937 }
938
939 ret = register_ftrace_profiler();
940 if (ret < 0) {
941 cnt = ret;
942 goto out;
943 }
944 ftrace_profile_enabled = 1;
945 } else {
946 ftrace_profile_enabled = 0;
947 /*
948 * unregister_ftrace_profiler calls stop_machine
949 * so this acts like an synchronize_rcu.
950 */
951 unregister_ftrace_profiler();
952 }
953 }
954 out:
955 mutex_unlock(&ftrace_profile_lock);
956
957 *ppos += cnt;
958
959 return cnt;
960 }
961
962 static ssize_t
ftrace_profile_read(struct file * filp,char __user * ubuf,size_t cnt,loff_t * ppos)963 ftrace_profile_read(struct file *filp, char __user *ubuf,
964 size_t cnt, loff_t *ppos)
965 {
966 char buf[64]; /* big enough to hold a number */
967 int r;
968
969 r = sprintf(buf, "%u\n", ftrace_profile_enabled);
970 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
971 }
972
973 static const struct file_operations ftrace_profile_fops = {
974 .open = tracing_open_generic,
975 .read = ftrace_profile_read,
976 .write = ftrace_profile_write,
977 .llseek = default_llseek,
978 };
979
980 /* used to initialize the real stat files */
981 static struct tracer_stat function_stats __initdata = {
982 .name = "functions",
983 .stat_start = function_stat_start,
984 .stat_next = function_stat_next,
985 .stat_cmp = function_stat_cmp,
986 .stat_headers = function_stat_headers,
987 .stat_show = function_stat_show
988 };
989
ftrace_profile_tracefs(struct dentry * d_tracer)990 static __init void ftrace_profile_tracefs(struct dentry *d_tracer)
991 {
992 struct ftrace_profile_stat *stat;
993 char *name;
994 int ret;
995 int cpu;
996
997 for_each_possible_cpu(cpu) {
998 stat = &per_cpu(ftrace_profile_stats, cpu);
999
1000 name = kasprintf(GFP_KERNEL, "function%d", cpu);
1001 if (!name) {
1002 /*
1003 * The files created are permanent, if something happens
1004 * we still do not free memory.
1005 */
1006 WARN(1,
1007 "Could not allocate stat file for cpu %d\n",
1008 cpu);
1009 return;
1010 }
1011 stat->stat = function_stats;
1012 stat->stat.name = name;
1013 ret = register_stat_tracer(&stat->stat);
1014 if (ret) {
1015 WARN(1,
1016 "Could not register function stat for cpu %d\n",
1017 cpu);
1018 kfree(name);
1019 return;
1020 }
1021 }
1022
1023 trace_create_file("function_profile_enabled",
1024 TRACE_MODE_WRITE, d_tracer, NULL,
1025 &ftrace_profile_fops);
1026 }
1027
1028 #else /* CONFIG_FUNCTION_PROFILER */
ftrace_profile_tracefs(struct dentry * d_tracer)1029 static __init void ftrace_profile_tracefs(struct dentry *d_tracer)
1030 {
1031 }
1032 #endif /* CONFIG_FUNCTION_PROFILER */
1033
1034 #ifdef CONFIG_DYNAMIC_FTRACE
1035
1036 static struct ftrace_ops *removed_ops;
1037
1038 /*
1039 * Set when doing a global update, like enabling all recs or disabling them.
1040 * It is not set when just updating a single ftrace_ops.
1041 */
1042 static bool update_all_ops;
1043
1044 #ifndef CONFIG_FTRACE_MCOUNT_RECORD
1045 # error Dynamic ftrace depends on MCOUNT_RECORD
1046 #endif
1047
1048 struct ftrace_func_probe {
1049 struct ftrace_probe_ops *probe_ops;
1050 struct ftrace_ops ops;
1051 struct trace_array *tr;
1052 struct list_head list;
1053 void *data;
1054 int ref;
1055 };
1056
1057 /*
1058 * We make these constant because no one should touch them,
1059 * but they are used as the default "empty hash", to avoid allocating
1060 * it all the time. These are in a read only section such that if
1061 * anyone does try to modify it, it will cause an exception.
1062 */
1063 static const struct hlist_head empty_buckets[1];
1064 static const struct ftrace_hash empty_hash = {
1065 .buckets = (struct hlist_head *)empty_buckets,
1066 };
1067 #define EMPTY_HASH ((struct ftrace_hash *)&empty_hash)
1068
1069 struct ftrace_ops global_ops = {
1070 .func = ftrace_stub,
1071 .local_hash.notrace_hash = EMPTY_HASH,
1072 .local_hash.filter_hash = EMPTY_HASH,
1073 INIT_OPS_HASH(global_ops)
1074 .flags = FTRACE_OPS_FL_INITIALIZED |
1075 FTRACE_OPS_FL_PID,
1076 };
1077
1078 /*
1079 * Used by the stack unwinder to know about dynamic ftrace trampolines.
1080 */
ftrace_ops_trampoline(unsigned long addr)1081 struct ftrace_ops *ftrace_ops_trampoline(unsigned long addr)
1082 {
1083 struct ftrace_ops *op = NULL;
1084
1085 /*
1086 * Some of the ops may be dynamically allocated,
1087 * they are freed after a synchronize_rcu().
1088 */
1089 preempt_disable_notrace();
1090
1091 do_for_each_ftrace_op(op, ftrace_ops_list) {
1092 /*
1093 * This is to check for dynamically allocated trampolines.
1094 * Trampolines that are in kernel text will have
1095 * core_kernel_text() return true.
1096 */
1097 if (op->trampoline && op->trampoline_size)
1098 if (addr >= op->trampoline &&
1099 addr < op->trampoline + op->trampoline_size) {
1100 preempt_enable_notrace();
1101 return op;
1102 }
1103 } while_for_each_ftrace_op(op);
1104 preempt_enable_notrace();
1105
1106 return NULL;
1107 }
1108
1109 /*
1110 * This is used by __kernel_text_address() to return true if the
1111 * address is on a dynamically allocated trampoline that would
1112 * not return true for either core_kernel_text() or
1113 * is_module_text_address().
1114 */
is_ftrace_trampoline(unsigned long addr)1115 bool is_ftrace_trampoline(unsigned long addr)
1116 {
1117 return ftrace_ops_trampoline(addr) != NULL;
1118 }
1119
1120 struct ftrace_page {
1121 struct ftrace_page *next;
1122 struct dyn_ftrace *records;
1123 int index;
1124 int order;
1125 };
1126
1127 #define ENTRY_SIZE sizeof(struct dyn_ftrace)
1128 #define ENTRIES_PER_PAGE (PAGE_SIZE / ENTRY_SIZE)
1129
1130 static struct ftrace_page *ftrace_pages_start;
1131 static struct ftrace_page *ftrace_pages;
1132
1133 static __always_inline unsigned long
ftrace_hash_key(struct ftrace_hash * hash,unsigned long ip)1134 ftrace_hash_key(struct ftrace_hash *hash, unsigned long ip)
1135 {
1136 if (hash->size_bits > 0)
1137 return hash_long(ip, hash->size_bits);
1138
1139 return 0;
1140 }
1141
1142 /* Only use this function if ftrace_hash_empty() has already been tested */
1143 static __always_inline struct ftrace_func_entry *
__ftrace_lookup_ip(struct ftrace_hash * hash,unsigned long ip)1144 __ftrace_lookup_ip(struct ftrace_hash *hash, unsigned long ip)
1145 {
1146 unsigned long key;
1147 struct ftrace_func_entry *entry;
1148 struct hlist_head *hhd;
1149
1150 key = ftrace_hash_key(hash, ip);
1151 hhd = &hash->buckets[key];
1152
1153 hlist_for_each_entry_rcu_notrace(entry, hhd, hlist) {
1154 if (entry->ip == ip)
1155 return entry;
1156 }
1157 return NULL;
1158 }
1159
1160 /**
1161 * ftrace_lookup_ip - Test to see if an ip exists in an ftrace_hash
1162 * @hash: The hash to look at
1163 * @ip: The instruction pointer to test
1164 *
1165 * Search a given @hash to see if a given instruction pointer (@ip)
1166 * exists in it.
1167 *
1168 * Returns: the entry that holds the @ip if found. NULL otherwise.
1169 */
1170 struct ftrace_func_entry *
ftrace_lookup_ip(struct ftrace_hash * hash,unsigned long ip)1171 ftrace_lookup_ip(struct ftrace_hash *hash, unsigned long ip)
1172 {
1173 if (ftrace_hash_empty(hash))
1174 return NULL;
1175
1176 return __ftrace_lookup_ip(hash, ip);
1177 }
1178
__add_hash_entry(struct ftrace_hash * hash,struct ftrace_func_entry * entry)1179 static void __add_hash_entry(struct ftrace_hash *hash,
1180 struct ftrace_func_entry *entry)
1181 {
1182 struct hlist_head *hhd;
1183 unsigned long key;
1184
1185 key = ftrace_hash_key(hash, entry->ip);
1186 hhd = &hash->buckets[key];
1187 hlist_add_head(&entry->hlist, hhd);
1188 hash->count++;
1189 }
1190
1191 static struct ftrace_func_entry *
add_hash_entry(struct ftrace_hash * hash,unsigned long ip)1192 add_hash_entry(struct ftrace_hash *hash, unsigned long ip)
1193 {
1194 struct ftrace_func_entry *entry;
1195
1196 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
1197 if (!entry)
1198 return NULL;
1199
1200 entry->ip = ip;
1201 __add_hash_entry(hash, entry);
1202
1203 return entry;
1204 }
1205
1206 static void
free_hash_entry(struct ftrace_hash * hash,struct ftrace_func_entry * entry)1207 free_hash_entry(struct ftrace_hash *hash,
1208 struct ftrace_func_entry *entry)
1209 {
1210 hlist_del(&entry->hlist);
1211 kfree(entry);
1212 hash->count--;
1213 }
1214
1215 static void
remove_hash_entry(struct ftrace_hash * hash,struct ftrace_func_entry * entry)1216 remove_hash_entry(struct ftrace_hash *hash,
1217 struct ftrace_func_entry *entry)
1218 {
1219 hlist_del_rcu(&entry->hlist);
1220 hash->count--;
1221 }
1222
ftrace_hash_clear(struct ftrace_hash * hash)1223 static void ftrace_hash_clear(struct ftrace_hash *hash)
1224 {
1225 struct hlist_head *hhd;
1226 struct hlist_node *tn;
1227 struct ftrace_func_entry *entry;
1228 int size = 1 << hash->size_bits;
1229 int i;
1230
1231 if (!hash->count)
1232 return;
1233
1234 for (i = 0; i < size; i++) {
1235 hhd = &hash->buckets[i];
1236 hlist_for_each_entry_safe(entry, tn, hhd, hlist)
1237 free_hash_entry(hash, entry);
1238 }
1239 FTRACE_WARN_ON(hash->count);
1240 }
1241
free_ftrace_mod(struct ftrace_mod_load * ftrace_mod)1242 static void free_ftrace_mod(struct ftrace_mod_load *ftrace_mod)
1243 {
1244 list_del(&ftrace_mod->list);
1245 kfree(ftrace_mod->module);
1246 kfree(ftrace_mod->func);
1247 kfree(ftrace_mod);
1248 }
1249
clear_ftrace_mod_list(struct list_head * head)1250 static void clear_ftrace_mod_list(struct list_head *head)
1251 {
1252 struct ftrace_mod_load *p, *n;
1253
1254 /* stack tracer isn't supported yet */
1255 if (!head)
1256 return;
1257
1258 mutex_lock(&ftrace_lock);
1259 list_for_each_entry_safe(p, n, head, list)
1260 free_ftrace_mod(p);
1261 mutex_unlock(&ftrace_lock);
1262 }
1263
free_ftrace_hash(struct ftrace_hash * hash)1264 static void free_ftrace_hash(struct ftrace_hash *hash)
1265 {
1266 if (!hash || hash == EMPTY_HASH)
1267 return;
1268 ftrace_hash_clear(hash);
1269 kfree(hash->buckets);
1270 kfree(hash);
1271 }
1272
__free_ftrace_hash_rcu(struct rcu_head * rcu)1273 static void __free_ftrace_hash_rcu(struct rcu_head *rcu)
1274 {
1275 struct ftrace_hash *hash;
1276
1277 hash = container_of(rcu, struct ftrace_hash, rcu);
1278 free_ftrace_hash(hash);
1279 }
1280
free_ftrace_hash_rcu(struct ftrace_hash * hash)1281 static void free_ftrace_hash_rcu(struct ftrace_hash *hash)
1282 {
1283 if (!hash || hash == EMPTY_HASH)
1284 return;
1285 call_rcu(&hash->rcu, __free_ftrace_hash_rcu);
1286 }
1287
1288 /**
1289 * ftrace_free_filter - remove all filters for an ftrace_ops
1290 * @ops: the ops to remove the filters from
1291 */
ftrace_free_filter(struct ftrace_ops * ops)1292 void ftrace_free_filter(struct ftrace_ops *ops)
1293 {
1294 ftrace_ops_init(ops);
1295 free_ftrace_hash(ops->func_hash->filter_hash);
1296 free_ftrace_hash(ops->func_hash->notrace_hash);
1297 }
1298 EXPORT_SYMBOL_GPL(ftrace_free_filter);
1299
alloc_ftrace_hash(int size_bits)1300 static struct ftrace_hash *alloc_ftrace_hash(int size_bits)
1301 {
1302 struct ftrace_hash *hash;
1303 int size;
1304
1305 hash = kzalloc(sizeof(*hash), GFP_KERNEL);
1306 if (!hash)
1307 return NULL;
1308
1309 size = 1 << size_bits;
1310 hash->buckets = kcalloc(size, sizeof(*hash->buckets), GFP_KERNEL);
1311
1312 if (!hash->buckets) {
1313 kfree(hash);
1314 return NULL;
1315 }
1316
1317 hash->size_bits = size_bits;
1318
1319 return hash;
1320 }
1321
1322 /* Used to save filters on functions for modules not loaded yet */
ftrace_add_mod(struct trace_array * tr,const char * func,const char * module,int enable)1323 static int ftrace_add_mod(struct trace_array *tr,
1324 const char *func, const char *module,
1325 int enable)
1326 {
1327 struct ftrace_mod_load *ftrace_mod;
1328 struct list_head *mod_head = enable ? &tr->mod_trace : &tr->mod_notrace;
1329
1330 ftrace_mod = kzalloc(sizeof(*ftrace_mod), GFP_KERNEL);
1331 if (!ftrace_mod)
1332 return -ENOMEM;
1333
1334 INIT_LIST_HEAD(&ftrace_mod->list);
1335 ftrace_mod->func = kstrdup(func, GFP_KERNEL);
1336 ftrace_mod->module = kstrdup(module, GFP_KERNEL);
1337 ftrace_mod->enable = enable;
1338
1339 if (!ftrace_mod->func || !ftrace_mod->module)
1340 goto out_free;
1341
1342 list_add(&ftrace_mod->list, mod_head);
1343
1344 return 0;
1345
1346 out_free:
1347 free_ftrace_mod(ftrace_mod);
1348
1349 return -ENOMEM;
1350 }
1351
1352 static struct ftrace_hash *
alloc_and_copy_ftrace_hash(int size_bits,struct ftrace_hash * hash)1353 alloc_and_copy_ftrace_hash(int size_bits, struct ftrace_hash *hash)
1354 {
1355 struct ftrace_func_entry *entry;
1356 struct ftrace_hash *new_hash;
1357 int size;
1358 int i;
1359
1360 new_hash = alloc_ftrace_hash(size_bits);
1361 if (!new_hash)
1362 return NULL;
1363
1364 if (hash)
1365 new_hash->flags = hash->flags;
1366
1367 /* Empty hash? */
1368 if (ftrace_hash_empty(hash))
1369 return new_hash;
1370
1371 size = 1 << hash->size_bits;
1372 for (i = 0; i < size; i++) {
1373 hlist_for_each_entry(entry, &hash->buckets[i], hlist) {
1374 if (add_hash_entry(new_hash, entry->ip) == NULL)
1375 goto free_hash;
1376 }
1377 }
1378
1379 FTRACE_WARN_ON(new_hash->count != hash->count);
1380
1381 return new_hash;
1382
1383 free_hash:
1384 free_ftrace_hash(new_hash);
1385 return NULL;
1386 }
1387
1388 static void ftrace_hash_rec_disable_modify(struct ftrace_ops *ops);
1389 static void ftrace_hash_rec_enable_modify(struct ftrace_ops *ops);
1390
1391 static int ftrace_hash_ipmodify_update(struct ftrace_ops *ops,
1392 struct ftrace_hash *new_hash);
1393
1394 /*
1395 * Allocate a new hash and remove entries from @src and move them to the new hash.
1396 * On success, the @src hash will be empty and should be freed.
1397 */
__move_hash(struct ftrace_hash * src,int size)1398 static struct ftrace_hash *__move_hash(struct ftrace_hash *src, int size)
1399 {
1400 struct ftrace_func_entry *entry;
1401 struct ftrace_hash *new_hash;
1402 struct hlist_head *hhd;
1403 struct hlist_node *tn;
1404 int bits = 0;
1405 int i;
1406
1407 /*
1408 * Use around half the size (max bit of it), but
1409 * a minimum of 2 is fine (as size of 0 or 1 both give 1 for bits).
1410 */
1411 bits = fls(size / 2);
1412
1413 /* Don't allocate too much */
1414 if (bits > FTRACE_HASH_MAX_BITS)
1415 bits = FTRACE_HASH_MAX_BITS;
1416
1417 new_hash = alloc_ftrace_hash(bits);
1418 if (!new_hash)
1419 return NULL;
1420
1421 new_hash->flags = src->flags;
1422
1423 size = 1 << src->size_bits;
1424 for (i = 0; i < size; i++) {
1425 hhd = &src->buckets[i];
1426 hlist_for_each_entry_safe(entry, tn, hhd, hlist) {
1427 remove_hash_entry(src, entry);
1428 __add_hash_entry(new_hash, entry);
1429 }
1430 }
1431 return new_hash;
1432 }
1433
1434 /* Move the @src entries to a newly allocated hash */
1435 static struct ftrace_hash *
__ftrace_hash_move(struct ftrace_hash * src)1436 __ftrace_hash_move(struct ftrace_hash *src)
1437 {
1438 int size = src->count;
1439
1440 /*
1441 * If the new source is empty, just return the empty_hash.
1442 */
1443 if (ftrace_hash_empty(src))
1444 return EMPTY_HASH;
1445
1446 return __move_hash(src, size);
1447 }
1448
1449 /**
1450 * ftrace_hash_move - move a new hash to a filter and do updates
1451 * @ops: The ops with the hash that @dst points to
1452 * @enable: True if for the filter hash, false for the notrace hash
1453 * @dst: Points to the @ops hash that should be updated
1454 * @src: The hash to update @dst with
1455 *
1456 * This is called when an ftrace_ops hash is being updated and the
1457 * the kernel needs to reflect this. Note, this only updates the kernel
1458 * function callbacks if the @ops is enabled (not to be confused with
1459 * @enable above). If the @ops is enabled, its hash determines what
1460 * callbacks get called. This function gets called when the @ops hash
1461 * is updated and it requires new callbacks.
1462 *
1463 * On success the elements of @src is moved to @dst, and @dst is updated
1464 * properly, as well as the functions determined by the @ops hashes
1465 * are now calling the @ops callback function.
1466 *
1467 * Regardless of return type, @src should be freed with free_ftrace_hash().
1468 */
1469 static int
ftrace_hash_move(struct ftrace_ops * ops,int enable,struct ftrace_hash ** dst,struct ftrace_hash * src)1470 ftrace_hash_move(struct ftrace_ops *ops, int enable,
1471 struct ftrace_hash **dst, struct ftrace_hash *src)
1472 {
1473 struct ftrace_hash *new_hash;
1474 int ret;
1475
1476 /* Reject setting notrace hash on IPMODIFY ftrace_ops */
1477 if (ops->flags & FTRACE_OPS_FL_IPMODIFY && !enable)
1478 return -EINVAL;
1479
1480 new_hash = __ftrace_hash_move(src);
1481 if (!new_hash)
1482 return -ENOMEM;
1483
1484 /* Make sure this can be applied if it is IPMODIFY ftrace_ops */
1485 if (enable) {
1486 /* IPMODIFY should be updated only when filter_hash updating */
1487 ret = ftrace_hash_ipmodify_update(ops, new_hash);
1488 if (ret < 0) {
1489 free_ftrace_hash(new_hash);
1490 return ret;
1491 }
1492 }
1493
1494 /*
1495 * Remove the current set, update the hash and add
1496 * them back.
1497 */
1498 ftrace_hash_rec_disable_modify(ops);
1499
1500 rcu_assign_pointer(*dst, new_hash);
1501
1502 ftrace_hash_rec_enable_modify(ops);
1503
1504 return 0;
1505 }
1506
hash_contains_ip(unsigned long ip,struct ftrace_ops_hash * hash)1507 static bool hash_contains_ip(unsigned long ip,
1508 struct ftrace_ops_hash *hash)
1509 {
1510 /*
1511 * The function record is a match if it exists in the filter
1512 * hash and not in the notrace hash. Note, an empty hash is
1513 * considered a match for the filter hash, but an empty
1514 * notrace hash is considered not in the notrace hash.
1515 */
1516 return (ftrace_hash_empty(hash->filter_hash) ||
1517 __ftrace_lookup_ip(hash->filter_hash, ip)) &&
1518 (ftrace_hash_empty(hash->notrace_hash) ||
1519 !__ftrace_lookup_ip(hash->notrace_hash, ip));
1520 }
1521
1522 /*
1523 * Test the hashes for this ops to see if we want to call
1524 * the ops->func or not.
1525 *
1526 * It's a match if the ip is in the ops->filter_hash or
1527 * the filter_hash does not exist or is empty,
1528 * AND
1529 * the ip is not in the ops->notrace_hash.
1530 *
1531 * This needs to be called with preemption disabled as
1532 * the hashes are freed with call_rcu().
1533 */
1534 int
ftrace_ops_test(struct ftrace_ops * ops,unsigned long ip,void * regs)1535 ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip, void *regs)
1536 {
1537 struct ftrace_ops_hash hash;
1538 int ret;
1539
1540 #ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
1541 /*
1542 * There's a small race when adding ops that the ftrace handler
1543 * that wants regs, may be called without them. We can not
1544 * allow that handler to be called if regs is NULL.
1545 */
1546 if (regs == NULL && (ops->flags & FTRACE_OPS_FL_SAVE_REGS))
1547 return 0;
1548 #endif
1549
1550 rcu_assign_pointer(hash.filter_hash, ops->func_hash->filter_hash);
1551 rcu_assign_pointer(hash.notrace_hash, ops->func_hash->notrace_hash);
1552
1553 if (hash_contains_ip(ip, &hash))
1554 ret = 1;
1555 else
1556 ret = 0;
1557
1558 return ret;
1559 }
1560
1561 /*
1562 * This is a double for. Do not use 'break' to break out of the loop,
1563 * you must use a goto.
1564 */
1565 #define do_for_each_ftrace_rec(pg, rec) \
1566 for (pg = ftrace_pages_start; pg; pg = pg->next) { \
1567 int _____i; \
1568 for (_____i = 0; _____i < pg->index; _____i++) { \
1569 rec = &pg->records[_____i];
1570
1571 #define while_for_each_ftrace_rec() \
1572 } \
1573 }
1574
1575
ftrace_cmp_recs(const void * a,const void * b)1576 static int ftrace_cmp_recs(const void *a, const void *b)
1577 {
1578 const struct dyn_ftrace *key = a;
1579 const struct dyn_ftrace *rec = b;
1580
1581 if (key->flags < rec->ip)
1582 return -1;
1583 if (key->ip >= rec->ip + MCOUNT_INSN_SIZE)
1584 return 1;
1585 return 0;
1586 }
1587
lookup_rec(unsigned long start,unsigned long end)1588 static struct dyn_ftrace *lookup_rec(unsigned long start, unsigned long end)
1589 {
1590 struct ftrace_page *pg;
1591 struct dyn_ftrace *rec = NULL;
1592 struct dyn_ftrace key;
1593
1594 key.ip = start;
1595 key.flags = end; /* overload flags, as it is unsigned long */
1596
1597 for (pg = ftrace_pages_start; pg; pg = pg->next) {
1598 if (pg->index == 0 ||
1599 end < pg->records[0].ip ||
1600 start >= (pg->records[pg->index - 1].ip + MCOUNT_INSN_SIZE))
1601 continue;
1602 rec = bsearch(&key, pg->records, pg->index,
1603 sizeof(struct dyn_ftrace),
1604 ftrace_cmp_recs);
1605 if (rec)
1606 break;
1607 }
1608 return rec;
1609 }
1610
1611 /**
1612 * ftrace_location_range - return the first address of a traced location
1613 * if it touches the given ip range
1614 * @start: start of range to search.
1615 * @end: end of range to search (inclusive). @end points to the last byte
1616 * to check.
1617 *
1618 * Returns: rec->ip if the related ftrace location is a least partly within
1619 * the given address range. That is, the first address of the instruction
1620 * that is either a NOP or call to the function tracer. It checks the ftrace
1621 * internal tables to determine if the address belongs or not.
1622 */
ftrace_location_range(unsigned long start,unsigned long end)1623 unsigned long ftrace_location_range(unsigned long start, unsigned long end)
1624 {
1625 struct dyn_ftrace *rec;
1626 unsigned long ip = 0;
1627
1628 rcu_read_lock();
1629 rec = lookup_rec(start, end);
1630 if (rec)
1631 ip = rec->ip;
1632 rcu_read_unlock();
1633
1634 return ip;
1635 }
1636
1637 /**
1638 * ftrace_location - return the ftrace location
1639 * @ip: the instruction pointer to check
1640 *
1641 * Returns:
1642 * * If @ip matches the ftrace location, return @ip.
1643 * * If @ip matches sym+0, return sym's ftrace location.
1644 * * Otherwise, return 0.
1645 */
ftrace_location(unsigned long ip)1646 unsigned long ftrace_location(unsigned long ip)
1647 {
1648 unsigned long loc;
1649 unsigned long offset;
1650 unsigned long size;
1651
1652 loc = ftrace_location_range(ip, ip);
1653 if (!loc) {
1654 if (!kallsyms_lookup_size_offset(ip, &size, &offset))
1655 goto out;
1656
1657 /* map sym+0 to __fentry__ */
1658 if (!offset)
1659 loc = ftrace_location_range(ip, ip + size - 1);
1660 }
1661
1662 out:
1663 return loc;
1664 }
1665
1666 /**
1667 * ftrace_text_reserved - return true if range contains an ftrace location
1668 * @start: start of range to search
1669 * @end: end of range to search (inclusive). @end points to the last byte to check.
1670 *
1671 * Returns: 1 if @start and @end contains a ftrace location.
1672 * That is, the instruction that is either a NOP or call to
1673 * the function tracer. It checks the ftrace internal tables to
1674 * determine if the address belongs or not.
1675 */
ftrace_text_reserved(const void * start,const void * end)1676 int ftrace_text_reserved(const void *start, const void *end)
1677 {
1678 unsigned long ret;
1679
1680 ret = ftrace_location_range((unsigned long)start,
1681 (unsigned long)end);
1682
1683 return (int)!!ret;
1684 }
1685
1686 /* Test if ops registered to this rec needs regs */
test_rec_ops_needs_regs(struct dyn_ftrace * rec)1687 static bool test_rec_ops_needs_regs(struct dyn_ftrace *rec)
1688 {
1689 struct ftrace_ops *ops;
1690 bool keep_regs = false;
1691
1692 for (ops = ftrace_ops_list;
1693 ops != &ftrace_list_end; ops = ops->next) {
1694 /* pass rec in as regs to have non-NULL val */
1695 if (ftrace_ops_test(ops, rec->ip, rec)) {
1696 if (ops->flags & FTRACE_OPS_FL_SAVE_REGS) {
1697 keep_regs = true;
1698 break;
1699 }
1700 }
1701 }
1702
1703 return keep_regs;
1704 }
1705
1706 static struct ftrace_ops *
1707 ftrace_find_tramp_ops_any(struct dyn_ftrace *rec);
1708 static struct ftrace_ops *
1709 ftrace_find_tramp_ops_any_other(struct dyn_ftrace *rec, struct ftrace_ops *op_exclude);
1710 static struct ftrace_ops *
1711 ftrace_find_tramp_ops_next(struct dyn_ftrace *rec, struct ftrace_ops *ops);
1712
skip_record(struct dyn_ftrace * rec)1713 static bool skip_record(struct dyn_ftrace *rec)
1714 {
1715 /*
1716 * At boot up, weak functions are set to disable. Function tracing
1717 * can be enabled before they are, and they still need to be disabled now.
1718 * If the record is disabled, still continue if it is marked as already
1719 * enabled (this is needed to keep the accounting working).
1720 */
1721 return rec->flags & FTRACE_FL_DISABLED &&
1722 !(rec->flags & FTRACE_FL_ENABLED);
1723 }
1724
1725 /*
1726 * This is the main engine to the ftrace updates to the dyn_ftrace records.
1727 *
1728 * It will iterate through all the available ftrace functions
1729 * (the ones that ftrace can have callbacks to) and set the flags
1730 * in the associated dyn_ftrace records.
1731 *
1732 * @inc: If true, the functions associated to @ops are added to
1733 * the dyn_ftrace records, otherwise they are removed.
1734 */
__ftrace_hash_rec_update(struct ftrace_ops * ops,bool inc)1735 static bool __ftrace_hash_rec_update(struct ftrace_ops *ops,
1736 bool inc)
1737 {
1738 struct ftrace_hash *hash;
1739 struct ftrace_hash *notrace_hash;
1740 struct ftrace_page *pg;
1741 struct dyn_ftrace *rec;
1742 bool update = false;
1743 int count = 0;
1744 int all = false;
1745
1746 /* Only update if the ops has been registered */
1747 if (!(ops->flags & FTRACE_OPS_FL_ENABLED))
1748 return false;
1749
1750 /*
1751 * If the count is zero, we update all records.
1752 * Otherwise we just update the items in the hash.
1753 */
1754 hash = ops->func_hash->filter_hash;
1755 notrace_hash = ops->func_hash->notrace_hash;
1756 if (ftrace_hash_empty(hash))
1757 all = true;
1758
1759 do_for_each_ftrace_rec(pg, rec) {
1760 int in_notrace_hash = 0;
1761 int in_hash = 0;
1762 int match = 0;
1763
1764 if (skip_record(rec))
1765 continue;
1766
1767 if (all) {
1768 /*
1769 * Only the filter_hash affects all records.
1770 * Update if the record is not in the notrace hash.
1771 */
1772 if (!notrace_hash || !ftrace_lookup_ip(notrace_hash, rec->ip))
1773 match = 1;
1774 } else {
1775 in_hash = !!ftrace_lookup_ip(hash, rec->ip);
1776 in_notrace_hash = !!ftrace_lookup_ip(notrace_hash, rec->ip);
1777
1778 /*
1779 * We want to match all functions that are in the hash but
1780 * not in the other hash.
1781 */
1782 if (in_hash && !in_notrace_hash)
1783 match = 1;
1784 }
1785 if (!match)
1786 continue;
1787
1788 if (inc) {
1789 rec->flags++;
1790 if (FTRACE_WARN_ON(ftrace_rec_count(rec) == FTRACE_REF_MAX))
1791 return false;
1792
1793 if (ops->flags & FTRACE_OPS_FL_DIRECT)
1794 rec->flags |= FTRACE_FL_DIRECT;
1795
1796 /*
1797 * If there's only a single callback registered to a
1798 * function, and the ops has a trampoline registered
1799 * for it, then we can call it directly.
1800 */
1801 if (ftrace_rec_count(rec) == 1 && ops->trampoline)
1802 rec->flags |= FTRACE_FL_TRAMP;
1803 else
1804 /*
1805 * If we are adding another function callback
1806 * to this function, and the previous had a
1807 * custom trampoline in use, then we need to go
1808 * back to the default trampoline.
1809 */
1810 rec->flags &= ~FTRACE_FL_TRAMP;
1811
1812 /*
1813 * If any ops wants regs saved for this function
1814 * then all ops will get saved regs.
1815 */
1816 if (ops->flags & FTRACE_OPS_FL_SAVE_REGS)
1817 rec->flags |= FTRACE_FL_REGS;
1818 } else {
1819 if (FTRACE_WARN_ON(ftrace_rec_count(rec) == 0))
1820 return false;
1821 rec->flags--;
1822
1823 /*
1824 * Only the internal direct_ops should have the
1825 * DIRECT flag set. Thus, if it is removing a
1826 * function, then that function should no longer
1827 * be direct.
1828 */
1829 if (ops->flags & FTRACE_OPS_FL_DIRECT)
1830 rec->flags &= ~FTRACE_FL_DIRECT;
1831
1832 /*
1833 * If the rec had REGS enabled and the ops that is
1834 * being removed had REGS set, then see if there is
1835 * still any ops for this record that wants regs.
1836 * If not, we can stop recording them.
1837 */
1838 if (ftrace_rec_count(rec) > 0 &&
1839 rec->flags & FTRACE_FL_REGS &&
1840 ops->flags & FTRACE_OPS_FL_SAVE_REGS) {
1841 if (!test_rec_ops_needs_regs(rec))
1842 rec->flags &= ~FTRACE_FL_REGS;
1843 }
1844
1845 /*
1846 * The TRAMP needs to be set only if rec count
1847 * is decremented to one, and the ops that is
1848 * left has a trampoline. As TRAMP can only be
1849 * enabled if there is only a single ops attached
1850 * to it.
1851 */
1852 if (ftrace_rec_count(rec) == 1 &&
1853 ftrace_find_tramp_ops_any_other(rec, ops))
1854 rec->flags |= FTRACE_FL_TRAMP;
1855 else
1856 rec->flags &= ~FTRACE_FL_TRAMP;
1857
1858 /*
1859 * flags will be cleared in ftrace_check_record()
1860 * if rec count is zero.
1861 */
1862 }
1863
1864 /*
1865 * If the rec has a single associated ops, and ops->func can be
1866 * called directly, allow the call site to call via the ops.
1867 */
1868 if (IS_ENABLED(CONFIG_DYNAMIC_FTRACE_WITH_CALL_OPS) &&
1869 ftrace_rec_count(rec) == 1 &&
1870 ftrace_ops_get_func(ops) == ops->func)
1871 rec->flags |= FTRACE_FL_CALL_OPS;
1872 else
1873 rec->flags &= ~FTRACE_FL_CALL_OPS;
1874
1875 count++;
1876
1877 /* Must match FTRACE_UPDATE_CALLS in ftrace_modify_all_code() */
1878 update |= ftrace_test_record(rec, true) != FTRACE_UPDATE_IGNORE;
1879
1880 /* Shortcut, if we handled all records, we are done. */
1881 if (!all && count == hash->count)
1882 return update;
1883 } while_for_each_ftrace_rec();
1884
1885 return update;
1886 }
1887
1888 /*
1889 * This is called when an ops is removed from tracing. It will decrement
1890 * the counters of the dyn_ftrace records for all the functions that
1891 * the @ops attached to.
1892 */
ftrace_hash_rec_disable(struct ftrace_ops * ops)1893 static bool ftrace_hash_rec_disable(struct ftrace_ops *ops)
1894 {
1895 return __ftrace_hash_rec_update(ops, false);
1896 }
1897
1898 /*
1899 * This is called when an ops is added to tracing. It will increment
1900 * the counters of the dyn_ftrace records for all the functions that
1901 * the @ops attached to.
1902 */
ftrace_hash_rec_enable(struct ftrace_ops * ops)1903 static bool ftrace_hash_rec_enable(struct ftrace_ops *ops)
1904 {
1905 return __ftrace_hash_rec_update(ops, true);
1906 }
1907
1908 /*
1909 * This function will update what functions @ops traces when its filter
1910 * changes.
1911 *
1912 * The @inc states if the @ops callbacks are going to be added or removed.
1913 * When one of the @ops hashes are updated to a "new_hash" the dyn_ftrace
1914 * records are update via:
1915 *
1916 * ftrace_hash_rec_disable_modify(ops);
1917 * ops->hash = new_hash
1918 * ftrace_hash_rec_enable_modify(ops);
1919 *
1920 * Where the @ops is removed from all the records it is tracing using
1921 * its old hash. The @ops hash is updated to the new hash, and then
1922 * the @ops is added back to the records so that it is tracing all
1923 * the new functions.
1924 */
ftrace_hash_rec_update_modify(struct ftrace_ops * ops,bool inc)1925 static void ftrace_hash_rec_update_modify(struct ftrace_ops *ops, bool inc)
1926 {
1927 struct ftrace_ops *op;
1928
1929 __ftrace_hash_rec_update(ops, inc);
1930
1931 if (ops->func_hash != &global_ops.local_hash)
1932 return;
1933
1934 /*
1935 * If the ops shares the global_ops hash, then we need to update
1936 * all ops that are enabled and use this hash.
1937 */
1938 do_for_each_ftrace_op(op, ftrace_ops_list) {
1939 /* Already done */
1940 if (op == ops)
1941 continue;
1942 if (op->func_hash == &global_ops.local_hash)
1943 __ftrace_hash_rec_update(op, inc);
1944 } while_for_each_ftrace_op(op);
1945 }
1946
ftrace_hash_rec_disable_modify(struct ftrace_ops * ops)1947 static void ftrace_hash_rec_disable_modify(struct ftrace_ops *ops)
1948 {
1949 ftrace_hash_rec_update_modify(ops, false);
1950 }
1951
ftrace_hash_rec_enable_modify(struct ftrace_ops * ops)1952 static void ftrace_hash_rec_enable_modify(struct ftrace_ops *ops)
1953 {
1954 ftrace_hash_rec_update_modify(ops, true);
1955 }
1956
1957 /*
1958 * Try to update IPMODIFY flag on each ftrace_rec. Return 0 if it is OK
1959 * or no-needed to update, -EBUSY if it detects a conflict of the flag
1960 * on a ftrace_rec, and -EINVAL if the new_hash tries to trace all recs.
1961 * Note that old_hash and new_hash has below meanings
1962 * - If the hash is NULL, it hits all recs (if IPMODIFY is set, this is rejected)
1963 * - If the hash is EMPTY_HASH, it hits nothing
1964 * - Anything else hits the recs which match the hash entries.
1965 *
1966 * DIRECT ops does not have IPMODIFY flag, but we still need to check it
1967 * against functions with FTRACE_FL_IPMODIFY. If there is any overlap, call
1968 * ops_func(SHARE_IPMODIFY_SELF) to make sure current ops can share with
1969 * IPMODIFY. If ops_func(SHARE_IPMODIFY_SELF) returns non-zero, propagate
1970 * the return value to the caller and eventually to the owner of the DIRECT
1971 * ops.
1972 */
__ftrace_hash_update_ipmodify(struct ftrace_ops * ops,struct ftrace_hash * old_hash,struct ftrace_hash * new_hash)1973 static int __ftrace_hash_update_ipmodify(struct ftrace_ops *ops,
1974 struct ftrace_hash *old_hash,
1975 struct ftrace_hash *new_hash)
1976 {
1977 struct ftrace_page *pg;
1978 struct dyn_ftrace *rec, *end = NULL;
1979 int in_old, in_new;
1980 bool is_ipmodify, is_direct;
1981
1982 /* Only update if the ops has been registered */
1983 if (!(ops->flags & FTRACE_OPS_FL_ENABLED))
1984 return 0;
1985
1986 is_ipmodify = ops->flags & FTRACE_OPS_FL_IPMODIFY;
1987 is_direct = ops->flags & FTRACE_OPS_FL_DIRECT;
1988
1989 /* neither IPMODIFY nor DIRECT, skip */
1990 if (!is_ipmodify && !is_direct)
1991 return 0;
1992
1993 if (WARN_ON_ONCE(is_ipmodify && is_direct))
1994 return 0;
1995
1996 /*
1997 * Since the IPMODIFY and DIRECT are very address sensitive
1998 * actions, we do not allow ftrace_ops to set all functions to new
1999 * hash.
2000 */
2001 if (!new_hash || !old_hash)
2002 return -EINVAL;
2003
2004 /* Update rec->flags */
2005 do_for_each_ftrace_rec(pg, rec) {
2006
2007 if (rec->flags & FTRACE_FL_DISABLED)
2008 continue;
2009
2010 /* We need to update only differences of filter_hash */
2011 in_old = !!ftrace_lookup_ip(old_hash, rec->ip);
2012 in_new = !!ftrace_lookup_ip(new_hash, rec->ip);
2013 if (in_old == in_new)
2014 continue;
2015
2016 if (in_new) {
2017 if (rec->flags & FTRACE_FL_IPMODIFY) {
2018 int ret;
2019
2020 /* Cannot have two ipmodify on same rec */
2021 if (is_ipmodify)
2022 goto rollback;
2023
2024 FTRACE_WARN_ON(rec->flags & FTRACE_FL_DIRECT);
2025
2026 /*
2027 * Another ops with IPMODIFY is already
2028 * attached. We are now attaching a direct
2029 * ops. Run SHARE_IPMODIFY_SELF, to check
2030 * whether sharing is supported.
2031 */
2032 if (!ops->ops_func)
2033 return -EBUSY;
2034 ret = ops->ops_func(ops, FTRACE_OPS_CMD_ENABLE_SHARE_IPMODIFY_SELF);
2035 if (ret)
2036 return ret;
2037 } else if (is_ipmodify) {
2038 rec->flags |= FTRACE_FL_IPMODIFY;
2039 }
2040 } else if (is_ipmodify) {
2041 rec->flags &= ~FTRACE_FL_IPMODIFY;
2042 }
2043 } while_for_each_ftrace_rec();
2044
2045 return 0;
2046
2047 rollback:
2048 end = rec;
2049
2050 /* Roll back what we did above */
2051 do_for_each_ftrace_rec(pg, rec) {
2052
2053 if (rec->flags & FTRACE_FL_DISABLED)
2054 continue;
2055
2056 if (rec == end)
2057 goto err_out;
2058
2059 in_old = !!ftrace_lookup_ip(old_hash, rec->ip);
2060 in_new = !!ftrace_lookup_ip(new_hash, rec->ip);
2061 if (in_old == in_new)
2062 continue;
2063
2064 if (in_new)
2065 rec->flags &= ~FTRACE_FL_IPMODIFY;
2066 else
2067 rec->flags |= FTRACE_FL_IPMODIFY;
2068 } while_for_each_ftrace_rec();
2069
2070 err_out:
2071 return -EBUSY;
2072 }
2073
ftrace_hash_ipmodify_enable(struct ftrace_ops * ops)2074 static int ftrace_hash_ipmodify_enable(struct ftrace_ops *ops)
2075 {
2076 struct ftrace_hash *hash = ops->func_hash->filter_hash;
2077
2078 if (ftrace_hash_empty(hash))
2079 hash = NULL;
2080
2081 return __ftrace_hash_update_ipmodify(ops, EMPTY_HASH, hash);
2082 }
2083
2084 /* Disabling always succeeds */
ftrace_hash_ipmodify_disable(struct ftrace_ops * ops)2085 static void ftrace_hash_ipmodify_disable(struct ftrace_ops *ops)
2086 {
2087 struct ftrace_hash *hash = ops->func_hash->filter_hash;
2088
2089 if (ftrace_hash_empty(hash))
2090 hash = NULL;
2091
2092 __ftrace_hash_update_ipmodify(ops, hash, EMPTY_HASH);
2093 }
2094
ftrace_hash_ipmodify_update(struct ftrace_ops * ops,struct ftrace_hash * new_hash)2095 static int ftrace_hash_ipmodify_update(struct ftrace_ops *ops,
2096 struct ftrace_hash *new_hash)
2097 {
2098 struct ftrace_hash *old_hash = ops->func_hash->filter_hash;
2099
2100 if (ftrace_hash_empty(old_hash))
2101 old_hash = NULL;
2102
2103 if (ftrace_hash_empty(new_hash))
2104 new_hash = NULL;
2105
2106 return __ftrace_hash_update_ipmodify(ops, old_hash, new_hash);
2107 }
2108
print_ip_ins(const char * fmt,const unsigned char * p)2109 static void print_ip_ins(const char *fmt, const unsigned char *p)
2110 {
2111 char ins[MCOUNT_INSN_SIZE];
2112
2113 if (copy_from_kernel_nofault(ins, p, MCOUNT_INSN_SIZE)) {
2114 printk(KERN_CONT "%s[FAULT] %px\n", fmt, p);
2115 return;
2116 }
2117
2118 printk(KERN_CONT "%s", fmt);
2119 pr_cont("%*phC", MCOUNT_INSN_SIZE, ins);
2120 }
2121
2122 enum ftrace_bug_type ftrace_bug_type;
2123 const void *ftrace_expected;
2124
print_bug_type(void)2125 static void print_bug_type(void)
2126 {
2127 switch (ftrace_bug_type) {
2128 case FTRACE_BUG_UNKNOWN:
2129 break;
2130 case FTRACE_BUG_INIT:
2131 pr_info("Initializing ftrace call sites\n");
2132 break;
2133 case FTRACE_BUG_NOP:
2134 pr_info("Setting ftrace call site to NOP\n");
2135 break;
2136 case FTRACE_BUG_CALL:
2137 pr_info("Setting ftrace call site to call ftrace function\n");
2138 break;
2139 case FTRACE_BUG_UPDATE:
2140 pr_info("Updating ftrace call site to call a different ftrace function\n");
2141 break;
2142 }
2143 }
2144
2145 /**
2146 * ftrace_bug - report and shutdown function tracer
2147 * @failed: The failed type (EFAULT, EINVAL, EPERM)
2148 * @rec: The record that failed
2149 *
2150 * The arch code that enables or disables the function tracing
2151 * can call ftrace_bug() when it has detected a problem in
2152 * modifying the code. @failed should be one of either:
2153 * EFAULT - if the problem happens on reading the @ip address
2154 * EINVAL - if what is read at @ip is not what was expected
2155 * EPERM - if the problem happens on writing to the @ip address
2156 */
ftrace_bug(int failed,struct dyn_ftrace * rec)2157 void ftrace_bug(int failed, struct dyn_ftrace *rec)
2158 {
2159 unsigned long ip = rec ? rec->ip : 0;
2160
2161 pr_info("------------[ ftrace bug ]------------\n");
2162
2163 switch (failed) {
2164 case -EFAULT:
2165 pr_info("ftrace faulted on modifying ");
2166 print_ip_sym(KERN_INFO, ip);
2167 break;
2168 case -EINVAL:
2169 pr_info("ftrace failed to modify ");
2170 print_ip_sym(KERN_INFO, ip);
2171 print_ip_ins(" actual: ", (unsigned char *)ip);
2172 pr_cont("\n");
2173 if (ftrace_expected) {
2174 print_ip_ins(" expected: ", ftrace_expected);
2175 pr_cont("\n");
2176 }
2177 break;
2178 case -EPERM:
2179 pr_info("ftrace faulted on writing ");
2180 print_ip_sym(KERN_INFO, ip);
2181 break;
2182 default:
2183 pr_info("ftrace faulted on unknown error ");
2184 print_ip_sym(KERN_INFO, ip);
2185 }
2186 print_bug_type();
2187 if (rec) {
2188 struct ftrace_ops *ops = NULL;
2189
2190 pr_info("ftrace record flags: %lx\n", rec->flags);
2191 pr_cont(" (%ld)%s%s", ftrace_rec_count(rec),
2192 rec->flags & FTRACE_FL_REGS ? " R" : " ",
2193 rec->flags & FTRACE_FL_CALL_OPS ? " O" : " ");
2194 if (rec->flags & FTRACE_FL_TRAMP_EN) {
2195 ops = ftrace_find_tramp_ops_any(rec);
2196 if (ops) {
2197 do {
2198 pr_cont("\ttramp: %pS (%pS)",
2199 (void *)ops->trampoline,
2200 (void *)ops->func);
2201 ops = ftrace_find_tramp_ops_next(rec, ops);
2202 } while (ops);
2203 } else
2204 pr_cont("\ttramp: ERROR!");
2205
2206 }
2207 ip = ftrace_get_addr_curr(rec);
2208 pr_cont("\n expected tramp: %lx\n", ip);
2209 }
2210
2211 FTRACE_WARN_ON_ONCE(1);
2212 }
2213
ftrace_check_record(struct dyn_ftrace * rec,bool enable,bool update)2214 static int ftrace_check_record(struct dyn_ftrace *rec, bool enable, bool update)
2215 {
2216 unsigned long flag = 0UL;
2217
2218 ftrace_bug_type = FTRACE_BUG_UNKNOWN;
2219
2220 if (skip_record(rec))
2221 return FTRACE_UPDATE_IGNORE;
2222
2223 /*
2224 * If we are updating calls:
2225 *
2226 * If the record has a ref count, then we need to enable it
2227 * because someone is using it.
2228 *
2229 * Otherwise we make sure its disabled.
2230 *
2231 * If we are disabling calls, then disable all records that
2232 * are enabled.
2233 */
2234 if (enable && ftrace_rec_count(rec))
2235 flag = FTRACE_FL_ENABLED;
2236
2237 /*
2238 * If enabling and the REGS flag does not match the REGS_EN, or
2239 * the TRAMP flag doesn't match the TRAMP_EN, then do not ignore
2240 * this record. Set flags to fail the compare against ENABLED.
2241 * Same for direct calls.
2242 */
2243 if (flag) {
2244 if (!(rec->flags & FTRACE_FL_REGS) !=
2245 !(rec->flags & FTRACE_FL_REGS_EN))
2246 flag |= FTRACE_FL_REGS;
2247
2248 if (!(rec->flags & FTRACE_FL_TRAMP) !=
2249 !(rec->flags & FTRACE_FL_TRAMP_EN))
2250 flag |= FTRACE_FL_TRAMP;
2251
2252 /*
2253 * Direct calls are special, as count matters.
2254 * We must test the record for direct, if the
2255 * DIRECT and DIRECT_EN do not match, but only
2256 * if the count is 1. That's because, if the
2257 * count is something other than one, we do not
2258 * want the direct enabled (it will be done via the
2259 * direct helper). But if DIRECT_EN is set, and
2260 * the count is not one, we need to clear it.
2261 *
2262 */
2263 if (ftrace_rec_count(rec) == 1) {
2264 if (!(rec->flags & FTRACE_FL_DIRECT) !=
2265 !(rec->flags & FTRACE_FL_DIRECT_EN))
2266 flag |= FTRACE_FL_DIRECT;
2267 } else if (rec->flags & FTRACE_FL_DIRECT_EN) {
2268 flag |= FTRACE_FL_DIRECT;
2269 }
2270
2271 /*
2272 * Ops calls are special, as count matters.
2273 * As with direct calls, they must only be enabled when count
2274 * is one, otherwise they'll be handled via the list ops.
2275 */
2276 if (ftrace_rec_count(rec) == 1) {
2277 if (!(rec->flags & FTRACE_FL_CALL_OPS) !=
2278 !(rec->flags & FTRACE_FL_CALL_OPS_EN))
2279 flag |= FTRACE_FL_CALL_OPS;
2280 } else if (rec->flags & FTRACE_FL_CALL_OPS_EN) {
2281 flag |= FTRACE_FL_CALL_OPS;
2282 }
2283 }
2284
2285 /* If the state of this record hasn't changed, then do nothing */
2286 if ((rec->flags & FTRACE_FL_ENABLED) == flag)
2287 return FTRACE_UPDATE_IGNORE;
2288
2289 if (flag) {
2290 /* Save off if rec is being enabled (for return value) */
2291 flag ^= rec->flags & FTRACE_FL_ENABLED;
2292
2293 if (update) {
2294 rec->flags |= FTRACE_FL_ENABLED | FTRACE_FL_TOUCHED;
2295 if (flag & FTRACE_FL_REGS) {
2296 if (rec->flags & FTRACE_FL_REGS)
2297 rec->flags |= FTRACE_FL_REGS_EN;
2298 else
2299 rec->flags &= ~FTRACE_FL_REGS_EN;
2300 }
2301 if (flag & FTRACE_FL_TRAMP) {
2302 if (rec->flags & FTRACE_FL_TRAMP)
2303 rec->flags |= FTRACE_FL_TRAMP_EN;
2304 else
2305 rec->flags &= ~FTRACE_FL_TRAMP_EN;
2306 }
2307
2308 /* Keep track of anything that modifies the function */
2309 if (rec->flags & (FTRACE_FL_DIRECT | FTRACE_FL_IPMODIFY))
2310 rec->flags |= FTRACE_FL_MODIFIED;
2311
2312 if (flag & FTRACE_FL_DIRECT) {
2313 /*
2314 * If there's only one user (direct_ops helper)
2315 * then we can call the direct function
2316 * directly (no ftrace trampoline).
2317 */
2318 if (ftrace_rec_count(rec) == 1) {
2319 if (rec->flags & FTRACE_FL_DIRECT)
2320 rec->flags |= FTRACE_FL_DIRECT_EN;
2321 else
2322 rec->flags &= ~FTRACE_FL_DIRECT_EN;
2323 } else {
2324 /*
2325 * Can only call directly if there's
2326 * only one callback to the function.
2327 */
2328 rec->flags &= ~FTRACE_FL_DIRECT_EN;
2329 }
2330 }
2331
2332 if (flag & FTRACE_FL_CALL_OPS) {
2333 if (ftrace_rec_count(rec) == 1) {
2334 if (rec->flags & FTRACE_FL_CALL_OPS)
2335 rec->flags |= FTRACE_FL_CALL_OPS_EN;
2336 else
2337 rec->flags &= ~FTRACE_FL_CALL_OPS_EN;
2338 } else {
2339 /*
2340 * Can only call directly if there's
2341 * only one set of associated ops.
2342 */
2343 rec->flags &= ~FTRACE_FL_CALL_OPS_EN;
2344 }
2345 }
2346 }
2347
2348 /*
2349 * If this record is being updated from a nop, then
2350 * return UPDATE_MAKE_CALL.
2351 * Otherwise,
2352 * return UPDATE_MODIFY_CALL to tell the caller to convert
2353 * from the save regs, to a non-save regs function or
2354 * vice versa, or from a trampoline call.
2355 */
2356 if (flag & FTRACE_FL_ENABLED) {
2357 ftrace_bug_type = FTRACE_BUG_CALL;
2358 return FTRACE_UPDATE_MAKE_CALL;
2359 }
2360
2361 ftrace_bug_type = FTRACE_BUG_UPDATE;
2362 return FTRACE_UPDATE_MODIFY_CALL;
2363 }
2364
2365 if (update) {
2366 /* If there's no more users, clear all flags */
2367 if (!ftrace_rec_count(rec))
2368 rec->flags &= FTRACE_NOCLEAR_FLAGS;
2369 else
2370 /*
2371 * Just disable the record, but keep the ops TRAMP
2372 * and REGS states. The _EN flags must be disabled though.
2373 */
2374 rec->flags &= ~(FTRACE_FL_ENABLED | FTRACE_FL_TRAMP_EN |
2375 FTRACE_FL_REGS_EN | FTRACE_FL_DIRECT_EN |
2376 FTRACE_FL_CALL_OPS_EN);
2377 }
2378
2379 ftrace_bug_type = FTRACE_BUG_NOP;
2380 return FTRACE_UPDATE_MAKE_NOP;
2381 }
2382
2383 /**
2384 * ftrace_update_record - set a record that now is tracing or not
2385 * @rec: the record to update
2386 * @enable: set to true if the record is tracing, false to force disable
2387 *
2388 * The records that represent all functions that can be traced need
2389 * to be updated when tracing has been enabled.
2390 */
ftrace_update_record(struct dyn_ftrace * rec,bool enable)2391 int ftrace_update_record(struct dyn_ftrace *rec, bool enable)
2392 {
2393 return ftrace_check_record(rec, enable, true);
2394 }
2395
2396 /**
2397 * ftrace_test_record - check if the record has been enabled or not
2398 * @rec: the record to test
2399 * @enable: set to true to check if enabled, false if it is disabled
2400 *
2401 * The arch code may need to test if a record is already set to
2402 * tracing to determine how to modify the function code that it
2403 * represents.
2404 */
ftrace_test_record(struct dyn_ftrace * rec,bool enable)2405 int ftrace_test_record(struct dyn_ftrace *rec, bool enable)
2406 {
2407 return ftrace_check_record(rec, enable, false);
2408 }
2409
2410 static struct ftrace_ops *
ftrace_find_tramp_ops_any(struct dyn_ftrace * rec)2411 ftrace_find_tramp_ops_any(struct dyn_ftrace *rec)
2412 {
2413 struct ftrace_ops *op;
2414 unsigned long ip = rec->ip;
2415
2416 do_for_each_ftrace_op(op, ftrace_ops_list) {
2417
2418 if (!op->trampoline)
2419 continue;
2420
2421 if (hash_contains_ip(ip, op->func_hash))
2422 return op;
2423 } while_for_each_ftrace_op(op);
2424
2425 return NULL;
2426 }
2427
2428 static struct ftrace_ops *
ftrace_find_tramp_ops_any_other(struct dyn_ftrace * rec,struct ftrace_ops * op_exclude)2429 ftrace_find_tramp_ops_any_other(struct dyn_ftrace *rec, struct ftrace_ops *op_exclude)
2430 {
2431 struct ftrace_ops *op;
2432 unsigned long ip = rec->ip;
2433
2434 do_for_each_ftrace_op(op, ftrace_ops_list) {
2435
2436 if (op == op_exclude || !op->trampoline)
2437 continue;
2438
2439 if (hash_contains_ip(ip, op->func_hash))
2440 return op;
2441 } while_for_each_ftrace_op(op);
2442
2443 return NULL;
2444 }
2445
2446 static struct ftrace_ops *
ftrace_find_tramp_ops_next(struct dyn_ftrace * rec,struct ftrace_ops * op)2447 ftrace_find_tramp_ops_next(struct dyn_ftrace *rec,
2448 struct ftrace_ops *op)
2449 {
2450 unsigned long ip = rec->ip;
2451
2452 while_for_each_ftrace_op(op) {
2453
2454 if (!op->trampoline)
2455 continue;
2456
2457 if (hash_contains_ip(ip, op->func_hash))
2458 return op;
2459 }
2460
2461 return NULL;
2462 }
2463
2464 static struct ftrace_ops *
ftrace_find_tramp_ops_curr(struct dyn_ftrace * rec)2465 ftrace_find_tramp_ops_curr(struct dyn_ftrace *rec)
2466 {
2467 struct ftrace_ops *op;
2468 unsigned long ip = rec->ip;
2469
2470 /*
2471 * Need to check removed ops first.
2472 * If they are being removed, and this rec has a tramp,
2473 * and this rec is in the ops list, then it would be the
2474 * one with the tramp.
2475 */
2476 if (removed_ops) {
2477 if (hash_contains_ip(ip, &removed_ops->old_hash))
2478 return removed_ops;
2479 }
2480
2481 /*
2482 * Need to find the current trampoline for a rec.
2483 * Now, a trampoline is only attached to a rec if there
2484 * was a single 'ops' attached to it. But this can be called
2485 * when we are adding another op to the rec or removing the
2486 * current one. Thus, if the op is being added, we can
2487 * ignore it because it hasn't attached itself to the rec
2488 * yet.
2489 *
2490 * If an ops is being modified (hooking to different functions)
2491 * then we don't care about the new functions that are being
2492 * added, just the old ones (that are probably being removed).
2493 *
2494 * If we are adding an ops to a function that already is using
2495 * a trampoline, it needs to be removed (trampolines are only
2496 * for single ops connected), then an ops that is not being
2497 * modified also needs to be checked.
2498 */
2499 do_for_each_ftrace_op(op, ftrace_ops_list) {
2500
2501 if (!op->trampoline)
2502 continue;
2503
2504 /*
2505 * If the ops is being added, it hasn't gotten to
2506 * the point to be removed from this tree yet.
2507 */
2508 if (op->flags & FTRACE_OPS_FL_ADDING)
2509 continue;
2510
2511
2512 /*
2513 * If the ops is being modified and is in the old
2514 * hash, then it is probably being removed from this
2515 * function.
2516 */
2517 if ((op->flags & FTRACE_OPS_FL_MODIFYING) &&
2518 hash_contains_ip(ip, &op->old_hash))
2519 return op;
2520 /*
2521 * If the ops is not being added or modified, and it's
2522 * in its normal filter hash, then this must be the one
2523 * we want!
2524 */
2525 if (!(op->flags & FTRACE_OPS_FL_MODIFYING) &&
2526 hash_contains_ip(ip, op->func_hash))
2527 return op;
2528
2529 } while_for_each_ftrace_op(op);
2530
2531 return NULL;
2532 }
2533
2534 static struct ftrace_ops *
ftrace_find_tramp_ops_new(struct dyn_ftrace * rec)2535 ftrace_find_tramp_ops_new(struct dyn_ftrace *rec)
2536 {
2537 struct ftrace_ops *op;
2538 unsigned long ip = rec->ip;
2539
2540 do_for_each_ftrace_op(op, ftrace_ops_list) {
2541 /* pass rec in as regs to have non-NULL val */
2542 if (hash_contains_ip(ip, op->func_hash))
2543 return op;
2544 } while_for_each_ftrace_op(op);
2545
2546 return NULL;
2547 }
2548
2549 struct ftrace_ops *
ftrace_find_unique_ops(struct dyn_ftrace * rec)2550 ftrace_find_unique_ops(struct dyn_ftrace *rec)
2551 {
2552 struct ftrace_ops *op, *found = NULL;
2553 unsigned long ip = rec->ip;
2554
2555 do_for_each_ftrace_op(op, ftrace_ops_list) {
2556
2557 if (hash_contains_ip(ip, op->func_hash)) {
2558 if (found)
2559 return NULL;
2560 found = op;
2561 }
2562
2563 } while_for_each_ftrace_op(op);
2564
2565 return found;
2566 }
2567
2568 #ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS
2569 /* Protected by rcu_tasks for reading, and direct_mutex for writing */
2570 static struct ftrace_hash __rcu *direct_functions = EMPTY_HASH;
2571 static DEFINE_MUTEX(direct_mutex);
2572
2573 /*
2574 * Search the direct_functions hash to see if the given instruction pointer
2575 * has a direct caller attached to it.
2576 */
ftrace_find_rec_direct(unsigned long ip)2577 unsigned long ftrace_find_rec_direct(unsigned long ip)
2578 {
2579 struct ftrace_func_entry *entry;
2580
2581 entry = __ftrace_lookup_ip(direct_functions, ip);
2582 if (!entry)
2583 return 0;
2584
2585 return entry->direct;
2586 }
2587
call_direct_funcs(unsigned long ip,unsigned long pip,struct ftrace_ops * ops,struct ftrace_regs * fregs)2588 static void call_direct_funcs(unsigned long ip, unsigned long pip,
2589 struct ftrace_ops *ops, struct ftrace_regs *fregs)
2590 {
2591 unsigned long addr = READ_ONCE(ops->direct_call);
2592
2593 if (!addr)
2594 return;
2595
2596 arch_ftrace_set_direct_caller(fregs, addr);
2597 }
2598 #endif /* CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS */
2599
2600 /**
2601 * ftrace_get_addr_new - Get the call address to set to
2602 * @rec: The ftrace record descriptor
2603 *
2604 * If the record has the FTRACE_FL_REGS set, that means that it
2605 * wants to convert to a callback that saves all regs. If FTRACE_FL_REGS
2606 * is not set, then it wants to convert to the normal callback.
2607 *
2608 * Returns: the address of the trampoline to set to
2609 */
ftrace_get_addr_new(struct dyn_ftrace * rec)2610 unsigned long ftrace_get_addr_new(struct dyn_ftrace *rec)
2611 {
2612 struct ftrace_ops *ops;
2613 unsigned long addr;
2614
2615 if ((rec->flags & FTRACE_FL_DIRECT) &&
2616 (ftrace_rec_count(rec) == 1)) {
2617 addr = ftrace_find_rec_direct(rec->ip);
2618 if (addr)
2619 return addr;
2620 WARN_ON_ONCE(1);
2621 }
2622
2623 /* Trampolines take precedence over regs */
2624 if (rec->flags & FTRACE_FL_TRAMP) {
2625 ops = ftrace_find_tramp_ops_new(rec);
2626 if (FTRACE_WARN_ON(!ops || !ops->trampoline)) {
2627 pr_warn("Bad trampoline accounting at: %p (%pS) (%lx)\n",
2628 (void *)rec->ip, (void *)rec->ip, rec->flags);
2629 /* Ftrace is shutting down, return anything */
2630 return (unsigned long)FTRACE_ADDR;
2631 }
2632 return ops->trampoline;
2633 }
2634
2635 if (rec->flags & FTRACE_FL_REGS)
2636 return (unsigned long)FTRACE_REGS_ADDR;
2637 else
2638 return (unsigned long)FTRACE_ADDR;
2639 }
2640
2641 /**
2642 * ftrace_get_addr_curr - Get the call address that is already there
2643 * @rec: The ftrace record descriptor
2644 *
2645 * The FTRACE_FL_REGS_EN is set when the record already points to
2646 * a function that saves all the regs. Basically the '_EN' version
2647 * represents the current state of the function.
2648 *
2649 * Returns: the address of the trampoline that is currently being called
2650 */
ftrace_get_addr_curr(struct dyn_ftrace * rec)2651 unsigned long ftrace_get_addr_curr(struct dyn_ftrace *rec)
2652 {
2653 struct ftrace_ops *ops;
2654 unsigned long addr;
2655
2656 /* Direct calls take precedence over trampolines */
2657 if (rec->flags & FTRACE_FL_DIRECT_EN) {
2658 addr = ftrace_find_rec_direct(rec->ip);
2659 if (addr)
2660 return addr;
2661 WARN_ON_ONCE(1);
2662 }
2663
2664 /* Trampolines take precedence over regs */
2665 if (rec->flags & FTRACE_FL_TRAMP_EN) {
2666 ops = ftrace_find_tramp_ops_curr(rec);
2667 if (FTRACE_WARN_ON(!ops)) {
2668 pr_warn("Bad trampoline accounting at: %p (%pS)\n",
2669 (void *)rec->ip, (void *)rec->ip);
2670 /* Ftrace is shutting down, return anything */
2671 return (unsigned long)FTRACE_ADDR;
2672 }
2673 return ops->trampoline;
2674 }
2675
2676 if (rec->flags & FTRACE_FL_REGS_EN)
2677 return (unsigned long)FTRACE_REGS_ADDR;
2678 else
2679 return (unsigned long)FTRACE_ADDR;
2680 }
2681
2682 static int
__ftrace_replace_code(struct dyn_ftrace * rec,bool enable)2683 __ftrace_replace_code(struct dyn_ftrace *rec, bool enable)
2684 {
2685 unsigned long ftrace_old_addr;
2686 unsigned long ftrace_addr;
2687 int ret;
2688
2689 ftrace_addr = ftrace_get_addr_new(rec);
2690
2691 /* This needs to be done before we call ftrace_update_record */
2692 ftrace_old_addr = ftrace_get_addr_curr(rec);
2693
2694 ret = ftrace_update_record(rec, enable);
2695
2696 ftrace_bug_type = FTRACE_BUG_UNKNOWN;
2697
2698 switch (ret) {
2699 case FTRACE_UPDATE_IGNORE:
2700 return 0;
2701
2702 case FTRACE_UPDATE_MAKE_CALL:
2703 ftrace_bug_type = FTRACE_BUG_CALL;
2704 return ftrace_make_call(rec, ftrace_addr);
2705
2706 case FTRACE_UPDATE_MAKE_NOP:
2707 ftrace_bug_type = FTRACE_BUG_NOP;
2708 return ftrace_make_nop(NULL, rec, ftrace_old_addr);
2709
2710 case FTRACE_UPDATE_MODIFY_CALL:
2711 ftrace_bug_type = FTRACE_BUG_UPDATE;
2712 return ftrace_modify_call(rec, ftrace_old_addr, ftrace_addr);
2713 }
2714
2715 return -1; /* unknown ftrace bug */
2716 }
2717
ftrace_replace_code(int mod_flags)2718 void __weak ftrace_replace_code(int mod_flags)
2719 {
2720 struct dyn_ftrace *rec;
2721 struct ftrace_page *pg;
2722 bool enable = mod_flags & FTRACE_MODIFY_ENABLE_FL;
2723 int schedulable = mod_flags & FTRACE_MODIFY_MAY_SLEEP_FL;
2724 int failed;
2725
2726 if (unlikely(ftrace_disabled))
2727 return;
2728
2729 do_for_each_ftrace_rec(pg, rec) {
2730
2731 if (skip_record(rec))
2732 continue;
2733
2734 failed = __ftrace_replace_code(rec, enable);
2735 if (failed) {
2736 ftrace_bug(failed, rec);
2737 /* Stop processing */
2738 return;
2739 }
2740 if (schedulable)
2741 cond_resched();
2742 } while_for_each_ftrace_rec();
2743 }
2744
2745 struct ftrace_rec_iter {
2746 struct ftrace_page *pg;
2747 int index;
2748 };
2749
2750 /**
2751 * ftrace_rec_iter_start - start up iterating over traced functions
2752 *
2753 * Returns: an iterator handle that is used to iterate over all
2754 * the records that represent address locations where functions
2755 * are traced.
2756 *
2757 * May return NULL if no records are available.
2758 */
ftrace_rec_iter_start(void)2759 struct ftrace_rec_iter *ftrace_rec_iter_start(void)
2760 {
2761 /*
2762 * We only use a single iterator.
2763 * Protected by the ftrace_lock mutex.
2764 */
2765 static struct ftrace_rec_iter ftrace_rec_iter;
2766 struct ftrace_rec_iter *iter = &ftrace_rec_iter;
2767
2768 iter->pg = ftrace_pages_start;
2769 iter->index = 0;
2770
2771 /* Could have empty pages */
2772 while (iter->pg && !iter->pg->index)
2773 iter->pg = iter->pg->next;
2774
2775 if (!iter->pg)
2776 return NULL;
2777
2778 return iter;
2779 }
2780
2781 /**
2782 * ftrace_rec_iter_next - get the next record to process.
2783 * @iter: The handle to the iterator.
2784 *
2785 * Returns: the next iterator after the given iterator @iter.
2786 */
ftrace_rec_iter_next(struct ftrace_rec_iter * iter)2787 struct ftrace_rec_iter *ftrace_rec_iter_next(struct ftrace_rec_iter *iter)
2788 {
2789 iter->index++;
2790
2791 if (iter->index >= iter->pg->index) {
2792 iter->pg = iter->pg->next;
2793 iter->index = 0;
2794
2795 /* Could have empty pages */
2796 while (iter->pg && !iter->pg->index)
2797 iter->pg = iter->pg->next;
2798 }
2799
2800 if (!iter->pg)
2801 return NULL;
2802
2803 return iter;
2804 }
2805
2806 /**
2807 * ftrace_rec_iter_record - get the record at the iterator location
2808 * @iter: The current iterator location
2809 *
2810 * Returns: the record that the current @iter is at.
2811 */
ftrace_rec_iter_record(struct ftrace_rec_iter * iter)2812 struct dyn_ftrace *ftrace_rec_iter_record(struct ftrace_rec_iter *iter)
2813 {
2814 return &iter->pg->records[iter->index];
2815 }
2816
2817 static int
ftrace_nop_initialize(struct module * mod,struct dyn_ftrace * rec)2818 ftrace_nop_initialize(struct module *mod, struct dyn_ftrace *rec)
2819 {
2820 int ret;
2821
2822 if (unlikely(ftrace_disabled))
2823 return 0;
2824
2825 ret = ftrace_init_nop(mod, rec);
2826 if (ret) {
2827 ftrace_bug_type = FTRACE_BUG_INIT;
2828 ftrace_bug(ret, rec);
2829 return 0;
2830 }
2831 return 1;
2832 }
2833
2834 /*
2835 * archs can override this function if they must do something
2836 * before the modifying code is performed.
2837 */
ftrace_arch_code_modify_prepare(void)2838 void __weak ftrace_arch_code_modify_prepare(void)
2839 {
2840 }
2841
2842 /*
2843 * archs can override this function if they must do something
2844 * after the modifying code is performed.
2845 */
ftrace_arch_code_modify_post_process(void)2846 void __weak ftrace_arch_code_modify_post_process(void)
2847 {
2848 }
2849
update_ftrace_func(ftrace_func_t func)2850 static int update_ftrace_func(ftrace_func_t func)
2851 {
2852 static ftrace_func_t save_func;
2853
2854 /* Avoid updating if it hasn't changed */
2855 if (func == save_func)
2856 return 0;
2857
2858 save_func = func;
2859
2860 return ftrace_update_ftrace_func(func);
2861 }
2862
ftrace_modify_all_code(int command)2863 void ftrace_modify_all_code(int command)
2864 {
2865 int update = command & FTRACE_UPDATE_TRACE_FUNC;
2866 int mod_flags = 0;
2867 int err = 0;
2868
2869 if (command & FTRACE_MAY_SLEEP)
2870 mod_flags = FTRACE_MODIFY_MAY_SLEEP_FL;
2871
2872 /*
2873 * If the ftrace_caller calls a ftrace_ops func directly,
2874 * we need to make sure that it only traces functions it
2875 * expects to trace. When doing the switch of functions,
2876 * we need to update to the ftrace_ops_list_func first
2877 * before the transition between old and new calls are set,
2878 * as the ftrace_ops_list_func will check the ops hashes
2879 * to make sure the ops are having the right functions
2880 * traced.
2881 */
2882 if (update) {
2883 err = update_ftrace_func(ftrace_ops_list_func);
2884 if (FTRACE_WARN_ON(err))
2885 return;
2886 }
2887
2888 if (command & FTRACE_UPDATE_CALLS)
2889 ftrace_replace_code(mod_flags | FTRACE_MODIFY_ENABLE_FL);
2890 else if (command & FTRACE_DISABLE_CALLS)
2891 ftrace_replace_code(mod_flags);
2892
2893 if (update && ftrace_trace_function != ftrace_ops_list_func) {
2894 function_trace_op = set_function_trace_op;
2895 smp_wmb();
2896 /* If irqs are disabled, we are in stop machine */
2897 if (!irqs_disabled())
2898 smp_call_function(ftrace_sync_ipi, NULL, 1);
2899 err = update_ftrace_func(ftrace_trace_function);
2900 if (FTRACE_WARN_ON(err))
2901 return;
2902 }
2903
2904 if (command & FTRACE_START_FUNC_RET)
2905 err = ftrace_enable_ftrace_graph_caller();
2906 else if (command & FTRACE_STOP_FUNC_RET)
2907 err = ftrace_disable_ftrace_graph_caller();
2908 FTRACE_WARN_ON(err);
2909 }
2910
__ftrace_modify_code(void * data)2911 static int __ftrace_modify_code(void *data)
2912 {
2913 int *command = data;
2914
2915 ftrace_modify_all_code(*command);
2916
2917 return 0;
2918 }
2919
2920 /**
2921 * ftrace_run_stop_machine - go back to the stop machine method
2922 * @command: The command to tell ftrace what to do
2923 *
2924 * If an arch needs to fall back to the stop machine method, the
2925 * it can call this function.
2926 */
ftrace_run_stop_machine(int command)2927 void ftrace_run_stop_machine(int command)
2928 {
2929 stop_machine(__ftrace_modify_code, &command, NULL);
2930 }
2931
2932 /**
2933 * arch_ftrace_update_code - modify the code to trace or not trace
2934 * @command: The command that needs to be done
2935 *
2936 * Archs can override this function if it does not need to
2937 * run stop_machine() to modify code.
2938 */
arch_ftrace_update_code(int command)2939 void __weak arch_ftrace_update_code(int command)
2940 {
2941 ftrace_run_stop_machine(command);
2942 }
2943
ftrace_run_update_code(int command)2944 static void ftrace_run_update_code(int command)
2945 {
2946 ftrace_arch_code_modify_prepare();
2947
2948 /*
2949 * By default we use stop_machine() to modify the code.
2950 * But archs can do what ever they want as long as it
2951 * is safe. The stop_machine() is the safest, but also
2952 * produces the most overhead.
2953 */
2954 arch_ftrace_update_code(command);
2955
2956 ftrace_arch_code_modify_post_process();
2957 }
2958
ftrace_run_modify_code(struct ftrace_ops * ops,int command,struct ftrace_ops_hash * old_hash)2959 static void ftrace_run_modify_code(struct ftrace_ops *ops, int command,
2960 struct ftrace_ops_hash *old_hash)
2961 {
2962 ops->flags |= FTRACE_OPS_FL_MODIFYING;
2963 ops->old_hash.filter_hash = old_hash->filter_hash;
2964 ops->old_hash.notrace_hash = old_hash->notrace_hash;
2965 ftrace_run_update_code(command);
2966 ops->old_hash.filter_hash = NULL;
2967 ops->old_hash.notrace_hash = NULL;
2968 ops->flags &= ~FTRACE_OPS_FL_MODIFYING;
2969 }
2970
2971 static ftrace_func_t saved_ftrace_func;
2972 static int ftrace_start_up;
2973
arch_ftrace_trampoline_free(struct ftrace_ops * ops)2974 void __weak arch_ftrace_trampoline_free(struct ftrace_ops *ops)
2975 {
2976 }
2977
2978 /* List of trace_ops that have allocated trampolines */
2979 static LIST_HEAD(ftrace_ops_trampoline_list);
2980
ftrace_add_trampoline_to_kallsyms(struct ftrace_ops * ops)2981 static void ftrace_add_trampoline_to_kallsyms(struct ftrace_ops *ops)
2982 {
2983 lockdep_assert_held(&ftrace_lock);
2984 list_add_rcu(&ops->list, &ftrace_ops_trampoline_list);
2985 }
2986
ftrace_remove_trampoline_from_kallsyms(struct ftrace_ops * ops)2987 static void ftrace_remove_trampoline_from_kallsyms(struct ftrace_ops *ops)
2988 {
2989 lockdep_assert_held(&ftrace_lock);
2990 list_del_rcu(&ops->list);
2991 synchronize_rcu();
2992 }
2993
2994 /*
2995 * "__builtin__ftrace" is used as a module name in /proc/kallsyms for symbols
2996 * for pages allocated for ftrace purposes, even though "__builtin__ftrace" is
2997 * not a module.
2998 */
2999 #define FTRACE_TRAMPOLINE_MOD "__builtin__ftrace"
3000 #define FTRACE_TRAMPOLINE_SYM "ftrace_trampoline"
3001
ftrace_trampoline_free(struct ftrace_ops * ops)3002 static void ftrace_trampoline_free(struct ftrace_ops *ops)
3003 {
3004 if (ops && (ops->flags & FTRACE_OPS_FL_ALLOC_TRAMP) &&
3005 ops->trampoline) {
3006 /*
3007 * Record the text poke event before the ksymbol unregister
3008 * event.
3009 */
3010 perf_event_text_poke((void *)ops->trampoline,
3011 (void *)ops->trampoline,
3012 ops->trampoline_size, NULL, 0);
3013 perf_event_ksymbol(PERF_RECORD_KSYMBOL_TYPE_OOL,
3014 ops->trampoline, ops->trampoline_size,
3015 true, FTRACE_TRAMPOLINE_SYM);
3016 /* Remove from kallsyms after the perf events */
3017 ftrace_remove_trampoline_from_kallsyms(ops);
3018 }
3019
3020 arch_ftrace_trampoline_free(ops);
3021 }
3022
ftrace_startup_enable(int command)3023 static void ftrace_startup_enable(int command)
3024 {
3025 if (saved_ftrace_func != ftrace_trace_function) {
3026 saved_ftrace_func = ftrace_trace_function;
3027 command |= FTRACE_UPDATE_TRACE_FUNC;
3028 }
3029
3030 if (!command || !ftrace_enabled)
3031 return;
3032
3033 ftrace_run_update_code(command);
3034 }
3035
ftrace_startup_all(int command)3036 static void ftrace_startup_all(int command)
3037 {
3038 update_all_ops = true;
3039 ftrace_startup_enable(command);
3040 update_all_ops = false;
3041 }
3042
ftrace_startup(struct ftrace_ops * ops,int command)3043 int ftrace_startup(struct ftrace_ops *ops, int command)
3044 {
3045 int ret;
3046
3047 if (unlikely(ftrace_disabled))
3048 return -ENODEV;
3049
3050 ret = __register_ftrace_function(ops);
3051 if (ret)
3052 return ret;
3053
3054 ftrace_start_up++;
3055
3056 /*
3057 * Note that ftrace probes uses this to start up
3058 * and modify functions it will probe. But we still
3059 * set the ADDING flag for modification, as probes
3060 * do not have trampolines. If they add them in the
3061 * future, then the probes will need to distinguish
3062 * between adding and updating probes.
3063 */
3064 ops->flags |= FTRACE_OPS_FL_ENABLED | FTRACE_OPS_FL_ADDING;
3065
3066 ret = ftrace_hash_ipmodify_enable(ops);
3067 if (ret < 0) {
3068 /* Rollback registration process */
3069 __unregister_ftrace_function(ops);
3070 ftrace_start_up--;
3071 ops->flags &= ~FTRACE_OPS_FL_ENABLED;
3072 if (ops->flags & FTRACE_OPS_FL_DYNAMIC)
3073 ftrace_trampoline_free(ops);
3074 return ret;
3075 }
3076
3077 if (ftrace_hash_rec_enable(ops))
3078 command |= FTRACE_UPDATE_CALLS;
3079
3080 ftrace_startup_enable(command);
3081
3082 /*
3083 * If ftrace is in an undefined state, we just remove ops from list
3084 * to prevent the NULL pointer, instead of totally rolling it back and
3085 * free trampoline, because those actions could cause further damage.
3086 */
3087 if (unlikely(ftrace_disabled)) {
3088 __unregister_ftrace_function(ops);
3089 return -ENODEV;
3090 }
3091
3092 ops->flags &= ~FTRACE_OPS_FL_ADDING;
3093
3094 return 0;
3095 }
3096
ftrace_shutdown(struct ftrace_ops * ops,int command)3097 int ftrace_shutdown(struct ftrace_ops *ops, int command)
3098 {
3099 int ret;
3100
3101 if (unlikely(ftrace_disabled))
3102 return -ENODEV;
3103
3104 ret = __unregister_ftrace_function(ops);
3105 if (ret)
3106 return ret;
3107
3108 ftrace_start_up--;
3109 /*
3110 * Just warn in case of unbalance, no need to kill ftrace, it's not
3111 * critical but the ftrace_call callers may be never nopped again after
3112 * further ftrace uses.
3113 */
3114 WARN_ON_ONCE(ftrace_start_up < 0);
3115
3116 /* Disabling ipmodify never fails */
3117 ftrace_hash_ipmodify_disable(ops);
3118
3119 if (ftrace_hash_rec_disable(ops))
3120 command |= FTRACE_UPDATE_CALLS;
3121
3122 ops->flags &= ~FTRACE_OPS_FL_ENABLED;
3123
3124 if (saved_ftrace_func != ftrace_trace_function) {
3125 saved_ftrace_func = ftrace_trace_function;
3126 command |= FTRACE_UPDATE_TRACE_FUNC;
3127 }
3128
3129 if (!command || !ftrace_enabled)
3130 goto out;
3131
3132 /*
3133 * If the ops uses a trampoline, then it needs to be
3134 * tested first on update.
3135 */
3136 ops->flags |= FTRACE_OPS_FL_REMOVING;
3137 removed_ops = ops;
3138
3139 /* The trampoline logic checks the old hashes */
3140 ops->old_hash.filter_hash = ops->func_hash->filter_hash;
3141 ops->old_hash.notrace_hash = ops->func_hash->notrace_hash;
3142
3143 ftrace_run_update_code(command);
3144
3145 /*
3146 * If there's no more ops registered with ftrace, run a
3147 * sanity check to make sure all rec flags are cleared.
3148 */
3149 if (rcu_dereference_protected(ftrace_ops_list,
3150 lockdep_is_held(&ftrace_lock)) == &ftrace_list_end) {
3151 struct ftrace_page *pg;
3152 struct dyn_ftrace *rec;
3153
3154 do_for_each_ftrace_rec(pg, rec) {
3155 if (FTRACE_WARN_ON_ONCE(rec->flags & ~FTRACE_NOCLEAR_FLAGS))
3156 pr_warn(" %pS flags:%lx\n",
3157 (void *)rec->ip, rec->flags);
3158 } while_for_each_ftrace_rec();
3159 }
3160
3161 ops->old_hash.filter_hash = NULL;
3162 ops->old_hash.notrace_hash = NULL;
3163
3164 removed_ops = NULL;
3165 ops->flags &= ~FTRACE_OPS_FL_REMOVING;
3166
3167 out:
3168 /*
3169 * Dynamic ops may be freed, we must make sure that all
3170 * callers are done before leaving this function.
3171 */
3172 if (ops->flags & FTRACE_OPS_FL_DYNAMIC) {
3173 /*
3174 * We need to do a hard force of sched synchronization.
3175 * This is because we use preempt_disable() to do RCU, but
3176 * the function tracers can be called where RCU is not watching
3177 * (like before user_exit()). We can not rely on the RCU
3178 * infrastructure to do the synchronization, thus we must do it
3179 * ourselves.
3180 */
3181 synchronize_rcu_tasks_rude();
3182
3183 /*
3184 * When the kernel is preemptive, tasks can be preempted
3185 * while on a ftrace trampoline. Just scheduling a task on
3186 * a CPU is not good enough to flush them. Calling
3187 * synchronize_rcu_tasks() will wait for those tasks to
3188 * execute and either schedule voluntarily or enter user space.
3189 */
3190 synchronize_rcu_tasks();
3191
3192 ftrace_trampoline_free(ops);
3193 }
3194
3195 return 0;
3196 }
3197
3198 /* Simply make a copy of @src and return it */
copy_hash(struct ftrace_hash * src)3199 static struct ftrace_hash *copy_hash(struct ftrace_hash *src)
3200 {
3201 if (ftrace_hash_empty(src))
3202 return EMPTY_HASH;
3203
3204 return alloc_and_copy_ftrace_hash(src->size_bits, src);
3205 }
3206
3207 /*
3208 * Append @new_hash entries to @hash:
3209 *
3210 * If @hash is the EMPTY_HASH then it traces all functions and nothing
3211 * needs to be done.
3212 *
3213 * If @new_hash is the EMPTY_HASH, then make *hash the EMPTY_HASH so
3214 * that it traces everything.
3215 *
3216 * Otherwise, go through all of @new_hash and add anything that @hash
3217 * doesn't already have, to @hash.
3218 *
3219 * The filter_hash updates uses just the append_hash() function
3220 * and the notrace_hash does not.
3221 */
append_hash(struct ftrace_hash ** hash,struct ftrace_hash * new_hash)3222 static int append_hash(struct ftrace_hash **hash, struct ftrace_hash *new_hash)
3223 {
3224 struct ftrace_func_entry *entry;
3225 int size;
3226 int i;
3227
3228 /* An empty hash does everything */
3229 if (ftrace_hash_empty(*hash))
3230 return 0;
3231
3232 /* If new_hash has everything make hash have everything */
3233 if (ftrace_hash_empty(new_hash)) {
3234 free_ftrace_hash(*hash);
3235 *hash = EMPTY_HASH;
3236 return 0;
3237 }
3238
3239 size = 1 << new_hash->size_bits;
3240 for (i = 0; i < size; i++) {
3241 hlist_for_each_entry(entry, &new_hash->buckets[i], hlist) {
3242 /* Only add if not already in hash */
3243 if (!__ftrace_lookup_ip(*hash, entry->ip) &&
3244 add_hash_entry(*hash, entry->ip) == NULL)
3245 return -ENOMEM;
3246 }
3247 }
3248 return 0;
3249 }
3250
3251 /*
3252 * Add to @hash only those that are in both @new_hash1 and @new_hash2
3253 *
3254 * The notrace_hash updates uses just the intersect_hash() function
3255 * and the filter_hash does not.
3256 */
intersect_hash(struct ftrace_hash ** hash,struct ftrace_hash * new_hash1,struct ftrace_hash * new_hash2)3257 static int intersect_hash(struct ftrace_hash **hash, struct ftrace_hash *new_hash1,
3258 struct ftrace_hash *new_hash2)
3259 {
3260 struct ftrace_func_entry *entry;
3261 int size;
3262 int i;
3263
3264 /*
3265 * If new_hash1 or new_hash2 is the EMPTY_HASH then make the hash
3266 * empty as well as empty for notrace means none are notraced.
3267 */
3268 if (ftrace_hash_empty(new_hash1) || ftrace_hash_empty(new_hash2)) {
3269 free_ftrace_hash(*hash);
3270 *hash = EMPTY_HASH;
3271 return 0;
3272 }
3273
3274 size = 1 << new_hash1->size_bits;
3275 for (i = 0; i < size; i++) {
3276 hlist_for_each_entry(entry, &new_hash1->buckets[i], hlist) {
3277 /* Only add if in both @new_hash1 and @new_hash2 */
3278 if (__ftrace_lookup_ip(new_hash2, entry->ip) &&
3279 add_hash_entry(*hash, entry->ip) == NULL)
3280 return -ENOMEM;
3281 }
3282 }
3283 /* If nothing intersects, make it the empty set */
3284 if (ftrace_hash_empty(*hash)) {
3285 free_ftrace_hash(*hash);
3286 *hash = EMPTY_HASH;
3287 }
3288 return 0;
3289 }
3290
3291 /* Return a new hash that has a union of all @ops->filter_hash entries */
append_hashes(struct ftrace_ops * ops)3292 static struct ftrace_hash *append_hashes(struct ftrace_ops *ops)
3293 {
3294 struct ftrace_hash *new_hash;
3295 struct ftrace_ops *subops;
3296 int ret;
3297
3298 new_hash = alloc_ftrace_hash(ops->func_hash->filter_hash->size_bits);
3299 if (!new_hash)
3300 return NULL;
3301
3302 list_for_each_entry(subops, &ops->subop_list, list) {
3303 ret = append_hash(&new_hash, subops->func_hash->filter_hash);
3304 if (ret < 0) {
3305 free_ftrace_hash(new_hash);
3306 return NULL;
3307 }
3308 /* Nothing more to do if new_hash is empty */
3309 if (ftrace_hash_empty(new_hash))
3310 break;
3311 }
3312 return new_hash;
3313 }
3314
3315 /* Make @ops trace evenything except what all its subops do not trace */
intersect_hashes(struct ftrace_ops * ops)3316 static struct ftrace_hash *intersect_hashes(struct ftrace_ops *ops)
3317 {
3318 struct ftrace_hash *new_hash = NULL;
3319 struct ftrace_ops *subops;
3320 int size_bits;
3321 int ret;
3322
3323 list_for_each_entry(subops, &ops->subop_list, list) {
3324 struct ftrace_hash *next_hash;
3325
3326 if (!new_hash) {
3327 size_bits = subops->func_hash->notrace_hash->size_bits;
3328 new_hash = alloc_and_copy_ftrace_hash(size_bits, ops->func_hash->notrace_hash);
3329 if (!new_hash)
3330 return NULL;
3331 continue;
3332 }
3333 size_bits = new_hash->size_bits;
3334 next_hash = new_hash;
3335 new_hash = alloc_ftrace_hash(size_bits);
3336 ret = intersect_hash(&new_hash, next_hash, subops->func_hash->notrace_hash);
3337 free_ftrace_hash(next_hash);
3338 if (ret < 0) {
3339 free_ftrace_hash(new_hash);
3340 return NULL;
3341 }
3342 /* Nothing more to do if new_hash is empty */
3343 if (ftrace_hash_empty(new_hash))
3344 break;
3345 }
3346 return new_hash;
3347 }
3348
ops_equal(struct ftrace_hash * A,struct ftrace_hash * B)3349 static bool ops_equal(struct ftrace_hash *A, struct ftrace_hash *B)
3350 {
3351 struct ftrace_func_entry *entry;
3352 int size;
3353 int i;
3354
3355 if (ftrace_hash_empty(A))
3356 return ftrace_hash_empty(B);
3357
3358 if (ftrace_hash_empty(B))
3359 return ftrace_hash_empty(A);
3360
3361 if (A->count != B->count)
3362 return false;
3363
3364 size = 1 << A->size_bits;
3365 for (i = 0; i < size; i++) {
3366 hlist_for_each_entry(entry, &A->buckets[i], hlist) {
3367 if (!__ftrace_lookup_ip(B, entry->ip))
3368 return false;
3369 }
3370 }
3371
3372 return true;
3373 }
3374
3375 static void ftrace_ops_update_code(struct ftrace_ops *ops,
3376 struct ftrace_ops_hash *old_hash);
3377
__ftrace_hash_move_and_update_ops(struct ftrace_ops * ops,struct ftrace_hash ** orig_hash,struct ftrace_hash * hash,int enable)3378 static int __ftrace_hash_move_and_update_ops(struct ftrace_ops *ops,
3379 struct ftrace_hash **orig_hash,
3380 struct ftrace_hash *hash,
3381 int enable)
3382 {
3383 struct ftrace_ops_hash old_hash_ops;
3384 struct ftrace_hash *old_hash;
3385 int ret;
3386
3387 old_hash = *orig_hash;
3388 old_hash_ops.filter_hash = ops->func_hash->filter_hash;
3389 old_hash_ops.notrace_hash = ops->func_hash->notrace_hash;
3390 ret = ftrace_hash_move(ops, enable, orig_hash, hash);
3391 if (!ret) {
3392 ftrace_ops_update_code(ops, &old_hash_ops);
3393 free_ftrace_hash_rcu(old_hash);
3394 }
3395 return ret;
3396 }
3397
ftrace_update_ops(struct ftrace_ops * ops,struct ftrace_hash * filter_hash,struct ftrace_hash * notrace_hash)3398 static int ftrace_update_ops(struct ftrace_ops *ops, struct ftrace_hash *filter_hash,
3399 struct ftrace_hash *notrace_hash)
3400 {
3401 int ret;
3402
3403 if (!ops_equal(filter_hash, ops->func_hash->filter_hash)) {
3404 ret = __ftrace_hash_move_and_update_ops(ops, &ops->func_hash->filter_hash,
3405 filter_hash, 1);
3406 if (ret < 0)
3407 return ret;
3408 }
3409
3410 if (!ops_equal(notrace_hash, ops->func_hash->notrace_hash)) {
3411 ret = __ftrace_hash_move_and_update_ops(ops, &ops->func_hash->notrace_hash,
3412 notrace_hash, 0);
3413 if (ret < 0)
3414 return ret;
3415 }
3416
3417 return 0;
3418 }
3419
3420 /**
3421 * ftrace_startup_subops - enable tracing for subops of an ops
3422 * @ops: Manager ops (used to pick all the functions of its subops)
3423 * @subops: A new ops to add to @ops
3424 * @command: Extra commands to use to enable tracing
3425 *
3426 * The @ops is a manager @ops that has the filter that includes all the functions
3427 * that its list of subops are tracing. Adding a new @subops will add the
3428 * functions of @subops to @ops.
3429 */
ftrace_startup_subops(struct ftrace_ops * ops,struct ftrace_ops * subops,int command)3430 int ftrace_startup_subops(struct ftrace_ops *ops, struct ftrace_ops *subops, int command)
3431 {
3432 struct ftrace_hash *filter_hash;
3433 struct ftrace_hash *notrace_hash;
3434 struct ftrace_hash *save_filter_hash;
3435 struct ftrace_hash *save_notrace_hash;
3436 int size_bits;
3437 int ret;
3438
3439 if (unlikely(ftrace_disabled))
3440 return -ENODEV;
3441
3442 ftrace_ops_init(ops);
3443 ftrace_ops_init(subops);
3444
3445 if (WARN_ON_ONCE(subops->flags & FTRACE_OPS_FL_ENABLED))
3446 return -EBUSY;
3447
3448 /* Make everything canonical (Just in case!) */
3449 if (!ops->func_hash->filter_hash)
3450 ops->func_hash->filter_hash = EMPTY_HASH;
3451 if (!ops->func_hash->notrace_hash)
3452 ops->func_hash->notrace_hash = EMPTY_HASH;
3453 if (!subops->func_hash->filter_hash)
3454 subops->func_hash->filter_hash = EMPTY_HASH;
3455 if (!subops->func_hash->notrace_hash)
3456 subops->func_hash->notrace_hash = EMPTY_HASH;
3457
3458 /* For the first subops to ops just enable it normally */
3459 if (list_empty(&ops->subop_list)) {
3460 /* Just use the subops hashes */
3461 filter_hash = copy_hash(subops->func_hash->filter_hash);
3462 notrace_hash = copy_hash(subops->func_hash->notrace_hash);
3463 if (!filter_hash || !notrace_hash) {
3464 free_ftrace_hash(filter_hash);
3465 free_ftrace_hash(notrace_hash);
3466 return -ENOMEM;
3467 }
3468
3469 save_filter_hash = ops->func_hash->filter_hash;
3470 save_notrace_hash = ops->func_hash->notrace_hash;
3471
3472 ops->func_hash->filter_hash = filter_hash;
3473 ops->func_hash->notrace_hash = notrace_hash;
3474 list_add(&subops->list, &ops->subop_list);
3475 ret = ftrace_startup(ops, command);
3476 if (ret < 0) {
3477 list_del(&subops->list);
3478 ops->func_hash->filter_hash = save_filter_hash;
3479 ops->func_hash->notrace_hash = save_notrace_hash;
3480 free_ftrace_hash(filter_hash);
3481 free_ftrace_hash(notrace_hash);
3482 } else {
3483 free_ftrace_hash(save_filter_hash);
3484 free_ftrace_hash(save_notrace_hash);
3485 subops->flags |= FTRACE_OPS_FL_ENABLED | FTRACE_OPS_FL_SUBOP;
3486 subops->managed = ops;
3487 }
3488 return ret;
3489 }
3490
3491 /*
3492 * Here there's already something attached. Here are the rules:
3493 * o If either filter_hash is empty then the final stays empty
3494 * o Otherwise, the final is a superset of both hashes
3495 * o If either notrace_hash is empty then the final stays empty
3496 * o Otherwise, the final is an intersection between the hashes
3497 */
3498 if (ftrace_hash_empty(ops->func_hash->filter_hash) ||
3499 ftrace_hash_empty(subops->func_hash->filter_hash)) {
3500 filter_hash = EMPTY_HASH;
3501 } else {
3502 size_bits = max(ops->func_hash->filter_hash->size_bits,
3503 subops->func_hash->filter_hash->size_bits);
3504 filter_hash = alloc_and_copy_ftrace_hash(size_bits, ops->func_hash->filter_hash);
3505 if (!filter_hash)
3506 return -ENOMEM;
3507 ret = append_hash(&filter_hash, subops->func_hash->filter_hash);
3508 if (ret < 0) {
3509 free_ftrace_hash(filter_hash);
3510 return ret;
3511 }
3512 }
3513
3514 if (ftrace_hash_empty(ops->func_hash->notrace_hash) ||
3515 ftrace_hash_empty(subops->func_hash->notrace_hash)) {
3516 notrace_hash = EMPTY_HASH;
3517 } else {
3518 size_bits = max(ops->func_hash->filter_hash->size_bits,
3519 subops->func_hash->filter_hash->size_bits);
3520 notrace_hash = alloc_ftrace_hash(size_bits);
3521 if (!notrace_hash) {
3522 free_ftrace_hash(filter_hash);
3523 return -ENOMEM;
3524 }
3525
3526 ret = intersect_hash(¬race_hash, ops->func_hash->filter_hash,
3527 subops->func_hash->filter_hash);
3528 if (ret < 0) {
3529 free_ftrace_hash(filter_hash);
3530 free_ftrace_hash(notrace_hash);
3531 return ret;
3532 }
3533 }
3534
3535 list_add(&subops->list, &ops->subop_list);
3536
3537 ret = ftrace_update_ops(ops, filter_hash, notrace_hash);
3538 free_ftrace_hash(filter_hash);
3539 free_ftrace_hash(notrace_hash);
3540 if (ret < 0) {
3541 list_del(&subops->list);
3542 } else {
3543 subops->flags |= FTRACE_OPS_FL_ENABLED | FTRACE_OPS_FL_SUBOP;
3544 subops->managed = ops;
3545 }
3546 return ret;
3547 }
3548
3549 /**
3550 * ftrace_shutdown_subops - Remove a subops from a manager ops
3551 * @ops: A manager ops to remove @subops from
3552 * @subops: The subops to remove from @ops
3553 * @command: Any extra command flags to add to modifying the text
3554 *
3555 * Removes the functions being traced by the @subops from @ops. Note, it
3556 * will not affect functions that are being traced by other subops that
3557 * still exist in @ops.
3558 *
3559 * If the last subops is removed from @ops, then @ops is shutdown normally.
3560 */
ftrace_shutdown_subops(struct ftrace_ops * ops,struct ftrace_ops * subops,int command)3561 int ftrace_shutdown_subops(struct ftrace_ops *ops, struct ftrace_ops *subops, int command)
3562 {
3563 struct ftrace_hash *filter_hash;
3564 struct ftrace_hash *notrace_hash;
3565 int ret;
3566
3567 if (unlikely(ftrace_disabled))
3568 return -ENODEV;
3569
3570 if (WARN_ON_ONCE(!(subops->flags & FTRACE_OPS_FL_ENABLED)))
3571 return -EINVAL;
3572
3573 list_del(&subops->list);
3574
3575 if (list_empty(&ops->subop_list)) {
3576 /* Last one, just disable the current ops */
3577
3578 ret = ftrace_shutdown(ops, command);
3579 if (ret < 0) {
3580 list_add(&subops->list, &ops->subop_list);
3581 return ret;
3582 }
3583
3584 subops->flags &= ~FTRACE_OPS_FL_ENABLED;
3585
3586 free_ftrace_hash(ops->func_hash->filter_hash);
3587 free_ftrace_hash(ops->func_hash->notrace_hash);
3588 ops->func_hash->filter_hash = EMPTY_HASH;
3589 ops->func_hash->notrace_hash = EMPTY_HASH;
3590 subops->flags &= ~(FTRACE_OPS_FL_ENABLED | FTRACE_OPS_FL_SUBOP);
3591 subops->managed = NULL;
3592
3593 return 0;
3594 }
3595
3596 /* Rebuild the hashes without subops */
3597 filter_hash = append_hashes(ops);
3598 notrace_hash = intersect_hashes(ops);
3599 if (!filter_hash || !notrace_hash) {
3600 free_ftrace_hash(filter_hash);
3601 free_ftrace_hash(notrace_hash);
3602 list_add(&subops->list, &ops->subop_list);
3603 return -ENOMEM;
3604 }
3605
3606 ret = ftrace_update_ops(ops, filter_hash, notrace_hash);
3607 if (ret < 0) {
3608 list_add(&subops->list, &ops->subop_list);
3609 } else {
3610 subops->flags &= ~(FTRACE_OPS_FL_ENABLED | FTRACE_OPS_FL_SUBOP);
3611 subops->managed = NULL;
3612 }
3613 free_ftrace_hash(filter_hash);
3614 free_ftrace_hash(notrace_hash);
3615 return ret;
3616 }
3617
ftrace_hash_move_and_update_subops(struct ftrace_ops * subops,struct ftrace_hash ** orig_subhash,struct ftrace_hash * hash,int enable)3618 static int ftrace_hash_move_and_update_subops(struct ftrace_ops *subops,
3619 struct ftrace_hash **orig_subhash,
3620 struct ftrace_hash *hash,
3621 int enable)
3622 {
3623 struct ftrace_ops *ops = subops->managed;
3624 struct ftrace_hash **orig_hash;
3625 struct ftrace_hash *save_hash;
3626 struct ftrace_hash *new_hash;
3627 int ret;
3628
3629 /* Manager ops can not be subops (yet) */
3630 if (WARN_ON_ONCE(!ops || ops->flags & FTRACE_OPS_FL_SUBOP))
3631 return -EINVAL;
3632
3633 /* Move the new hash over to the subops hash */
3634 save_hash = *orig_subhash;
3635 *orig_subhash = __ftrace_hash_move(hash);
3636 if (!*orig_subhash) {
3637 *orig_subhash = save_hash;
3638 return -ENOMEM;
3639 }
3640
3641 /* Create a new_hash to hold the ops new functions */
3642 if (enable) {
3643 orig_hash = &ops->func_hash->filter_hash;
3644 new_hash = append_hashes(ops);
3645 } else {
3646 orig_hash = &ops->func_hash->notrace_hash;
3647 new_hash = intersect_hashes(ops);
3648 }
3649
3650 /* Move the hash over to the new hash */
3651 ret = __ftrace_hash_move_and_update_ops(ops, orig_hash, new_hash, enable);
3652
3653 free_ftrace_hash(new_hash);
3654
3655 if (ret) {
3656 /* Put back the original hash */
3657 free_ftrace_hash_rcu(*orig_subhash);
3658 *orig_subhash = save_hash;
3659 } else {
3660 free_ftrace_hash_rcu(save_hash);
3661 }
3662 return ret;
3663 }
3664
3665
3666 static u64 ftrace_update_time;
3667 unsigned long ftrace_update_tot_cnt;
3668 unsigned long ftrace_number_of_pages;
3669 unsigned long ftrace_number_of_groups;
3670
ops_traces_mod(struct ftrace_ops * ops)3671 static inline int ops_traces_mod(struct ftrace_ops *ops)
3672 {
3673 /*
3674 * Filter_hash being empty will default to trace module.
3675 * But notrace hash requires a test of individual module functions.
3676 */
3677 return ftrace_hash_empty(ops->func_hash->filter_hash) &&
3678 ftrace_hash_empty(ops->func_hash->notrace_hash);
3679 }
3680
ftrace_update_code(struct module * mod,struct ftrace_page * new_pgs)3681 static int ftrace_update_code(struct module *mod, struct ftrace_page *new_pgs)
3682 {
3683 bool init_nop = ftrace_need_init_nop();
3684 struct ftrace_page *pg;
3685 struct dyn_ftrace *p;
3686 u64 start, stop;
3687 unsigned long update_cnt = 0;
3688 unsigned long rec_flags = 0;
3689 int i;
3690
3691 start = ftrace_now(raw_smp_processor_id());
3692
3693 /*
3694 * When a module is loaded, this function is called to convert
3695 * the calls to mcount in its text to nops, and also to create
3696 * an entry in the ftrace data. Now, if ftrace is activated
3697 * after this call, but before the module sets its text to
3698 * read-only, the modification of enabling ftrace can fail if
3699 * the read-only is done while ftrace is converting the calls.
3700 * To prevent this, the module's records are set as disabled
3701 * and will be enabled after the call to set the module's text
3702 * to read-only.
3703 */
3704 if (mod)
3705 rec_flags |= FTRACE_FL_DISABLED;
3706
3707 for (pg = new_pgs; pg; pg = pg->next) {
3708
3709 for (i = 0; i < pg->index; i++) {
3710
3711 /* If something went wrong, bail without enabling anything */
3712 if (unlikely(ftrace_disabled))
3713 return -1;
3714
3715 p = &pg->records[i];
3716 p->flags = rec_flags;
3717
3718 /*
3719 * Do the initial record conversion from mcount jump
3720 * to the NOP instructions.
3721 */
3722 if (init_nop && !ftrace_nop_initialize(mod, p))
3723 break;
3724
3725 update_cnt++;
3726 }
3727 }
3728
3729 stop = ftrace_now(raw_smp_processor_id());
3730 ftrace_update_time = stop - start;
3731 ftrace_update_tot_cnt += update_cnt;
3732
3733 return 0;
3734 }
3735
ftrace_allocate_records(struct ftrace_page * pg,int count)3736 static int ftrace_allocate_records(struct ftrace_page *pg, int count)
3737 {
3738 int order;
3739 int pages;
3740 int cnt;
3741
3742 if (WARN_ON(!count))
3743 return -EINVAL;
3744
3745 /* We want to fill as much as possible, with no empty pages */
3746 pages = DIV_ROUND_UP(count, ENTRIES_PER_PAGE);
3747 order = fls(pages) - 1;
3748
3749 again:
3750 pg->records = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, order);
3751
3752 if (!pg->records) {
3753 /* if we can't allocate this size, try something smaller */
3754 if (!order)
3755 return -ENOMEM;
3756 order--;
3757 goto again;
3758 }
3759
3760 ftrace_number_of_pages += 1 << order;
3761 ftrace_number_of_groups++;
3762
3763 cnt = (PAGE_SIZE << order) / ENTRY_SIZE;
3764 pg->order = order;
3765
3766 if (cnt > count)
3767 cnt = count;
3768
3769 return cnt;
3770 }
3771
ftrace_free_pages(struct ftrace_page * pages)3772 static void ftrace_free_pages(struct ftrace_page *pages)
3773 {
3774 struct ftrace_page *pg = pages;
3775
3776 while (pg) {
3777 if (pg->records) {
3778 free_pages((unsigned long)pg->records, pg->order);
3779 ftrace_number_of_pages -= 1 << pg->order;
3780 }
3781 pages = pg->next;
3782 kfree(pg);
3783 pg = pages;
3784 ftrace_number_of_groups--;
3785 }
3786 }
3787
3788 static struct ftrace_page *
ftrace_allocate_pages(unsigned long num_to_init)3789 ftrace_allocate_pages(unsigned long num_to_init)
3790 {
3791 struct ftrace_page *start_pg;
3792 struct ftrace_page *pg;
3793 int cnt;
3794
3795 if (!num_to_init)
3796 return NULL;
3797
3798 start_pg = pg = kzalloc(sizeof(*pg), GFP_KERNEL);
3799 if (!pg)
3800 return NULL;
3801
3802 /*
3803 * Try to allocate as much as possible in one continues
3804 * location that fills in all of the space. We want to
3805 * waste as little space as possible.
3806 */
3807 for (;;) {
3808 cnt = ftrace_allocate_records(pg, num_to_init);
3809 if (cnt < 0)
3810 goto free_pages;
3811
3812 num_to_init -= cnt;
3813 if (!num_to_init)
3814 break;
3815
3816 pg->next = kzalloc(sizeof(*pg), GFP_KERNEL);
3817 if (!pg->next)
3818 goto free_pages;
3819
3820 pg = pg->next;
3821 }
3822
3823 return start_pg;
3824
3825 free_pages:
3826 ftrace_free_pages(start_pg);
3827 pr_info("ftrace: FAILED to allocate memory for functions\n");
3828 return NULL;
3829 }
3830
3831 #define FTRACE_BUFF_MAX (KSYM_SYMBOL_LEN+4) /* room for wildcards */
3832
3833 struct ftrace_iterator {
3834 loff_t pos;
3835 loff_t func_pos;
3836 loff_t mod_pos;
3837 struct ftrace_page *pg;
3838 struct dyn_ftrace *func;
3839 struct ftrace_func_probe *probe;
3840 struct ftrace_func_entry *probe_entry;
3841 struct trace_parser parser;
3842 struct ftrace_hash *hash;
3843 struct ftrace_ops *ops;
3844 struct trace_array *tr;
3845 struct list_head *mod_list;
3846 int pidx;
3847 int idx;
3848 unsigned flags;
3849 };
3850
3851 static void *
t_probe_next(struct seq_file * m,loff_t * pos)3852 t_probe_next(struct seq_file *m, loff_t *pos)
3853 {
3854 struct ftrace_iterator *iter = m->private;
3855 struct trace_array *tr = iter->ops->private;
3856 struct list_head *func_probes;
3857 struct ftrace_hash *hash;
3858 struct list_head *next;
3859 struct hlist_node *hnd = NULL;
3860 struct hlist_head *hhd;
3861 int size;
3862
3863 (*pos)++;
3864 iter->pos = *pos;
3865
3866 if (!tr)
3867 return NULL;
3868
3869 func_probes = &tr->func_probes;
3870 if (list_empty(func_probes))
3871 return NULL;
3872
3873 if (!iter->probe) {
3874 next = func_probes->next;
3875 iter->probe = list_entry(next, struct ftrace_func_probe, list);
3876 }
3877
3878 if (iter->probe_entry)
3879 hnd = &iter->probe_entry->hlist;
3880
3881 hash = iter->probe->ops.func_hash->filter_hash;
3882
3883 /*
3884 * A probe being registered may temporarily have an empty hash
3885 * and it's at the end of the func_probes list.
3886 */
3887 if (!hash || hash == EMPTY_HASH)
3888 return NULL;
3889
3890 size = 1 << hash->size_bits;
3891
3892 retry:
3893 if (iter->pidx >= size) {
3894 if (iter->probe->list.next == func_probes)
3895 return NULL;
3896 next = iter->probe->list.next;
3897 iter->probe = list_entry(next, struct ftrace_func_probe, list);
3898 hash = iter->probe->ops.func_hash->filter_hash;
3899 size = 1 << hash->size_bits;
3900 iter->pidx = 0;
3901 }
3902
3903 hhd = &hash->buckets[iter->pidx];
3904
3905 if (hlist_empty(hhd)) {
3906 iter->pidx++;
3907 hnd = NULL;
3908 goto retry;
3909 }
3910
3911 if (!hnd)
3912 hnd = hhd->first;
3913 else {
3914 hnd = hnd->next;
3915 if (!hnd) {
3916 iter->pidx++;
3917 goto retry;
3918 }
3919 }
3920
3921 if (WARN_ON_ONCE(!hnd))
3922 return NULL;
3923
3924 iter->probe_entry = hlist_entry(hnd, struct ftrace_func_entry, hlist);
3925
3926 return iter;
3927 }
3928
t_probe_start(struct seq_file * m,loff_t * pos)3929 static void *t_probe_start(struct seq_file *m, loff_t *pos)
3930 {
3931 struct ftrace_iterator *iter = m->private;
3932 void *p = NULL;
3933 loff_t l;
3934
3935 if (!(iter->flags & FTRACE_ITER_DO_PROBES))
3936 return NULL;
3937
3938 if (iter->mod_pos > *pos)
3939 return NULL;
3940
3941 iter->probe = NULL;
3942 iter->probe_entry = NULL;
3943 iter->pidx = 0;
3944 for (l = 0; l <= (*pos - iter->mod_pos); ) {
3945 p = t_probe_next(m, &l);
3946 if (!p)
3947 break;
3948 }
3949 if (!p)
3950 return NULL;
3951
3952 /* Only set this if we have an item */
3953 iter->flags |= FTRACE_ITER_PROBE;
3954
3955 return iter;
3956 }
3957
3958 static int
t_probe_show(struct seq_file * m,struct ftrace_iterator * iter)3959 t_probe_show(struct seq_file *m, struct ftrace_iterator *iter)
3960 {
3961 struct ftrace_func_entry *probe_entry;
3962 struct ftrace_probe_ops *probe_ops;
3963 struct ftrace_func_probe *probe;
3964
3965 probe = iter->probe;
3966 probe_entry = iter->probe_entry;
3967
3968 if (WARN_ON_ONCE(!probe || !probe_entry))
3969 return -EIO;
3970
3971 probe_ops = probe->probe_ops;
3972
3973 if (probe_ops->print)
3974 return probe_ops->print(m, probe_entry->ip, probe_ops, probe->data);
3975
3976 seq_printf(m, "%ps:%ps\n", (void *)probe_entry->ip,
3977 (void *)probe_ops->func);
3978
3979 return 0;
3980 }
3981
3982 static void *
t_mod_next(struct seq_file * m,loff_t * pos)3983 t_mod_next(struct seq_file *m, loff_t *pos)
3984 {
3985 struct ftrace_iterator *iter = m->private;
3986 struct trace_array *tr = iter->tr;
3987
3988 (*pos)++;
3989 iter->pos = *pos;
3990
3991 iter->mod_list = iter->mod_list->next;
3992
3993 if (iter->mod_list == &tr->mod_trace ||
3994 iter->mod_list == &tr->mod_notrace) {
3995 iter->flags &= ~FTRACE_ITER_MOD;
3996 return NULL;
3997 }
3998
3999 iter->mod_pos = *pos;
4000
4001 return iter;
4002 }
4003
t_mod_start(struct seq_file * m,loff_t * pos)4004 static void *t_mod_start(struct seq_file *m, loff_t *pos)
4005 {
4006 struct ftrace_iterator *iter = m->private;
4007 void *p = NULL;
4008 loff_t l;
4009
4010 if (iter->func_pos > *pos)
4011 return NULL;
4012
4013 iter->mod_pos = iter->func_pos;
4014
4015 /* probes are only available if tr is set */
4016 if (!iter->tr)
4017 return NULL;
4018
4019 for (l = 0; l <= (*pos - iter->func_pos); ) {
4020 p = t_mod_next(m, &l);
4021 if (!p)
4022 break;
4023 }
4024 if (!p) {
4025 iter->flags &= ~FTRACE_ITER_MOD;
4026 return t_probe_start(m, pos);
4027 }
4028
4029 /* Only set this if we have an item */
4030 iter->flags |= FTRACE_ITER_MOD;
4031
4032 return iter;
4033 }
4034
4035 static int
t_mod_show(struct seq_file * m,struct ftrace_iterator * iter)4036 t_mod_show(struct seq_file *m, struct ftrace_iterator *iter)
4037 {
4038 struct ftrace_mod_load *ftrace_mod;
4039 struct trace_array *tr = iter->tr;
4040
4041 if (WARN_ON_ONCE(!iter->mod_list) ||
4042 iter->mod_list == &tr->mod_trace ||
4043 iter->mod_list == &tr->mod_notrace)
4044 return -EIO;
4045
4046 ftrace_mod = list_entry(iter->mod_list, struct ftrace_mod_load, list);
4047
4048 if (ftrace_mod->func)
4049 seq_printf(m, "%s", ftrace_mod->func);
4050 else
4051 seq_putc(m, '*');
4052
4053 seq_printf(m, ":mod:%s\n", ftrace_mod->module);
4054
4055 return 0;
4056 }
4057
4058 static void *
t_func_next(struct seq_file * m,loff_t * pos)4059 t_func_next(struct seq_file *m, loff_t *pos)
4060 {
4061 struct ftrace_iterator *iter = m->private;
4062 struct dyn_ftrace *rec = NULL;
4063
4064 (*pos)++;
4065
4066 retry:
4067 if (iter->idx >= iter->pg->index) {
4068 if (iter->pg->next) {
4069 iter->pg = iter->pg->next;
4070 iter->idx = 0;
4071 goto retry;
4072 }
4073 } else {
4074 rec = &iter->pg->records[iter->idx++];
4075 if (((iter->flags & (FTRACE_ITER_FILTER | FTRACE_ITER_NOTRACE)) &&
4076 !ftrace_lookup_ip(iter->hash, rec->ip)) ||
4077
4078 ((iter->flags & FTRACE_ITER_ENABLED) &&
4079 !(rec->flags & FTRACE_FL_ENABLED)) ||
4080
4081 ((iter->flags & FTRACE_ITER_TOUCHED) &&
4082 !(rec->flags & FTRACE_FL_TOUCHED))) {
4083
4084 rec = NULL;
4085 goto retry;
4086 }
4087 }
4088
4089 if (!rec)
4090 return NULL;
4091
4092 iter->pos = iter->func_pos = *pos;
4093 iter->func = rec;
4094
4095 return iter;
4096 }
4097
4098 static void *
t_next(struct seq_file * m,void * v,loff_t * pos)4099 t_next(struct seq_file *m, void *v, loff_t *pos)
4100 {
4101 struct ftrace_iterator *iter = m->private;
4102 loff_t l = *pos; /* t_probe_start() must use original pos */
4103 void *ret;
4104
4105 if (unlikely(ftrace_disabled))
4106 return NULL;
4107
4108 if (iter->flags & FTRACE_ITER_PROBE)
4109 return t_probe_next(m, pos);
4110
4111 if (iter->flags & FTRACE_ITER_MOD)
4112 return t_mod_next(m, pos);
4113
4114 if (iter->flags & FTRACE_ITER_PRINTALL) {
4115 /* next must increment pos, and t_probe_start does not */
4116 (*pos)++;
4117 return t_mod_start(m, &l);
4118 }
4119
4120 ret = t_func_next(m, pos);
4121
4122 if (!ret)
4123 return t_mod_start(m, &l);
4124
4125 return ret;
4126 }
4127
reset_iter_read(struct ftrace_iterator * iter)4128 static void reset_iter_read(struct ftrace_iterator *iter)
4129 {
4130 iter->pos = 0;
4131 iter->func_pos = 0;
4132 iter->flags &= ~(FTRACE_ITER_PRINTALL | FTRACE_ITER_PROBE | FTRACE_ITER_MOD);
4133 }
4134
t_start(struct seq_file * m,loff_t * pos)4135 static void *t_start(struct seq_file *m, loff_t *pos)
4136 {
4137 struct ftrace_iterator *iter = m->private;
4138 void *p = NULL;
4139 loff_t l;
4140
4141 mutex_lock(&ftrace_lock);
4142
4143 if (unlikely(ftrace_disabled))
4144 return NULL;
4145
4146 /*
4147 * If an lseek was done, then reset and start from beginning.
4148 */
4149 if (*pos < iter->pos)
4150 reset_iter_read(iter);
4151
4152 /*
4153 * For set_ftrace_filter reading, if we have the filter
4154 * off, we can short cut and just print out that all
4155 * functions are enabled.
4156 */
4157 if ((iter->flags & (FTRACE_ITER_FILTER | FTRACE_ITER_NOTRACE)) &&
4158 ftrace_hash_empty(iter->hash)) {
4159 iter->func_pos = 1; /* Account for the message */
4160 if (*pos > 0)
4161 return t_mod_start(m, pos);
4162 iter->flags |= FTRACE_ITER_PRINTALL;
4163 /* reset in case of seek/pread */
4164 iter->flags &= ~FTRACE_ITER_PROBE;
4165 return iter;
4166 }
4167
4168 if (iter->flags & FTRACE_ITER_MOD)
4169 return t_mod_start(m, pos);
4170
4171 /*
4172 * Unfortunately, we need to restart at ftrace_pages_start
4173 * every time we let go of the ftrace_mutex. This is because
4174 * those pointers can change without the lock.
4175 */
4176 iter->pg = ftrace_pages_start;
4177 iter->idx = 0;
4178 for (l = 0; l <= *pos; ) {
4179 p = t_func_next(m, &l);
4180 if (!p)
4181 break;
4182 }
4183
4184 if (!p)
4185 return t_mod_start(m, pos);
4186
4187 return iter;
4188 }
4189
t_stop(struct seq_file * m,void * p)4190 static void t_stop(struct seq_file *m, void *p)
4191 {
4192 mutex_unlock(&ftrace_lock);
4193 }
4194
4195 void * __weak
arch_ftrace_trampoline_func(struct ftrace_ops * ops,struct dyn_ftrace * rec)4196 arch_ftrace_trampoline_func(struct ftrace_ops *ops, struct dyn_ftrace *rec)
4197 {
4198 return NULL;
4199 }
4200
add_trampoline_func(struct seq_file * m,struct ftrace_ops * ops,struct dyn_ftrace * rec)4201 static void add_trampoline_func(struct seq_file *m, struct ftrace_ops *ops,
4202 struct dyn_ftrace *rec)
4203 {
4204 void *ptr;
4205
4206 ptr = arch_ftrace_trampoline_func(ops, rec);
4207 if (ptr)
4208 seq_printf(m, " ->%pS", ptr);
4209 }
4210
4211 #ifdef FTRACE_MCOUNT_MAX_OFFSET
4212 /*
4213 * Weak functions can still have an mcount/fentry that is saved in
4214 * the __mcount_loc section. These can be detected by having a
4215 * symbol offset of greater than FTRACE_MCOUNT_MAX_OFFSET, as the
4216 * symbol found by kallsyms is not the function that the mcount/fentry
4217 * is part of. The offset is much greater in these cases.
4218 *
4219 * Test the record to make sure that the ip points to a valid kallsyms
4220 * and if not, mark it disabled.
4221 */
test_for_valid_rec(struct dyn_ftrace * rec)4222 static int test_for_valid_rec(struct dyn_ftrace *rec)
4223 {
4224 char str[KSYM_SYMBOL_LEN];
4225 unsigned long offset;
4226 const char *ret;
4227
4228 ret = kallsyms_lookup(rec->ip, NULL, &offset, NULL, str);
4229
4230 /* Weak functions can cause invalid addresses */
4231 if (!ret || offset > FTRACE_MCOUNT_MAX_OFFSET) {
4232 rec->flags |= FTRACE_FL_DISABLED;
4233 return 0;
4234 }
4235 return 1;
4236 }
4237
4238 static struct workqueue_struct *ftrace_check_wq __initdata;
4239 static struct work_struct ftrace_check_work __initdata;
4240
4241 /*
4242 * Scan all the mcount/fentry entries to make sure they are valid.
4243 */
ftrace_check_work_func(struct work_struct * work)4244 static __init void ftrace_check_work_func(struct work_struct *work)
4245 {
4246 struct ftrace_page *pg;
4247 struct dyn_ftrace *rec;
4248
4249 mutex_lock(&ftrace_lock);
4250 do_for_each_ftrace_rec(pg, rec) {
4251 test_for_valid_rec(rec);
4252 } while_for_each_ftrace_rec();
4253 mutex_unlock(&ftrace_lock);
4254 }
4255
ftrace_check_for_weak_functions(void)4256 static int __init ftrace_check_for_weak_functions(void)
4257 {
4258 INIT_WORK(&ftrace_check_work, ftrace_check_work_func);
4259
4260 ftrace_check_wq = alloc_workqueue("ftrace_check_wq", WQ_UNBOUND, 0);
4261
4262 queue_work(ftrace_check_wq, &ftrace_check_work);
4263 return 0;
4264 }
4265
ftrace_check_sync(void)4266 static int __init ftrace_check_sync(void)
4267 {
4268 /* Make sure the ftrace_check updates are finished */
4269 if (ftrace_check_wq)
4270 destroy_workqueue(ftrace_check_wq);
4271 return 0;
4272 }
4273
4274 late_initcall_sync(ftrace_check_sync);
4275 subsys_initcall(ftrace_check_for_weak_functions);
4276
print_rec(struct seq_file * m,unsigned long ip)4277 static int print_rec(struct seq_file *m, unsigned long ip)
4278 {
4279 unsigned long offset;
4280 char str[KSYM_SYMBOL_LEN];
4281 char *modname;
4282 const char *ret;
4283
4284 ret = kallsyms_lookup(ip, NULL, &offset, &modname, str);
4285 /* Weak functions can cause invalid addresses */
4286 if (!ret || offset > FTRACE_MCOUNT_MAX_OFFSET) {
4287 snprintf(str, KSYM_SYMBOL_LEN, "%s_%ld",
4288 FTRACE_INVALID_FUNCTION, offset);
4289 ret = NULL;
4290 }
4291
4292 seq_puts(m, str);
4293 if (modname)
4294 seq_printf(m, " [%s]", modname);
4295 return ret == NULL ? -1 : 0;
4296 }
4297 #else
test_for_valid_rec(struct dyn_ftrace * rec)4298 static inline int test_for_valid_rec(struct dyn_ftrace *rec)
4299 {
4300 return 1;
4301 }
4302
print_rec(struct seq_file * m,unsigned long ip)4303 static inline int print_rec(struct seq_file *m, unsigned long ip)
4304 {
4305 seq_printf(m, "%ps", (void *)ip);
4306 return 0;
4307 }
4308 #endif
4309
t_show(struct seq_file * m,void * v)4310 static int t_show(struct seq_file *m, void *v)
4311 {
4312 struct ftrace_iterator *iter = m->private;
4313 struct dyn_ftrace *rec;
4314
4315 if (iter->flags & FTRACE_ITER_PROBE)
4316 return t_probe_show(m, iter);
4317
4318 if (iter->flags & FTRACE_ITER_MOD)
4319 return t_mod_show(m, iter);
4320
4321 if (iter->flags & FTRACE_ITER_PRINTALL) {
4322 if (iter->flags & FTRACE_ITER_NOTRACE)
4323 seq_puts(m, "#### no functions disabled ####\n");
4324 else
4325 seq_puts(m, "#### all functions enabled ####\n");
4326 return 0;
4327 }
4328
4329 rec = iter->func;
4330
4331 if (!rec)
4332 return 0;
4333
4334 if (iter->flags & FTRACE_ITER_ADDRS)
4335 seq_printf(m, "%lx ", rec->ip);
4336
4337 if (print_rec(m, rec->ip)) {
4338 /* This should only happen when a rec is disabled */
4339 WARN_ON_ONCE(!(rec->flags & FTRACE_FL_DISABLED));
4340 seq_putc(m, '\n');
4341 return 0;
4342 }
4343
4344 if (iter->flags & (FTRACE_ITER_ENABLED | FTRACE_ITER_TOUCHED)) {
4345 struct ftrace_ops *ops;
4346
4347 seq_printf(m, " (%ld)%s%s%s%s%s",
4348 ftrace_rec_count(rec),
4349 rec->flags & FTRACE_FL_REGS ? " R" : " ",
4350 rec->flags & FTRACE_FL_IPMODIFY ? " I" : " ",
4351 rec->flags & FTRACE_FL_DIRECT ? " D" : " ",
4352 rec->flags & FTRACE_FL_CALL_OPS ? " O" : " ",
4353 rec->flags & FTRACE_FL_MODIFIED ? " M " : " ");
4354 if (rec->flags & FTRACE_FL_TRAMP_EN) {
4355 ops = ftrace_find_tramp_ops_any(rec);
4356 if (ops) {
4357 do {
4358 seq_printf(m, "\ttramp: %pS (%pS)",
4359 (void *)ops->trampoline,
4360 (void *)ops->func);
4361 add_trampoline_func(m, ops, rec);
4362 ops = ftrace_find_tramp_ops_next(rec, ops);
4363 } while (ops);
4364 } else
4365 seq_puts(m, "\ttramp: ERROR!");
4366 } else {
4367 add_trampoline_func(m, NULL, rec);
4368 }
4369 if (rec->flags & FTRACE_FL_CALL_OPS_EN) {
4370 ops = ftrace_find_unique_ops(rec);
4371 if (ops) {
4372 seq_printf(m, "\tops: %pS (%pS)",
4373 ops, ops->func);
4374 } else {
4375 seq_puts(m, "\tops: ERROR!");
4376 }
4377 }
4378 if (rec->flags & FTRACE_FL_DIRECT) {
4379 unsigned long direct;
4380
4381 direct = ftrace_find_rec_direct(rec->ip);
4382 if (direct)
4383 seq_printf(m, "\n\tdirect-->%pS", (void *)direct);
4384 }
4385 }
4386
4387 seq_putc(m, '\n');
4388
4389 return 0;
4390 }
4391
4392 static const struct seq_operations show_ftrace_seq_ops = {
4393 .start = t_start,
4394 .next = t_next,
4395 .stop = t_stop,
4396 .show = t_show,
4397 };
4398
4399 static int
ftrace_avail_open(struct inode * inode,struct file * file)4400 ftrace_avail_open(struct inode *inode, struct file *file)
4401 {
4402 struct ftrace_iterator *iter;
4403 int ret;
4404
4405 ret = security_locked_down(LOCKDOWN_TRACEFS);
4406 if (ret)
4407 return ret;
4408
4409 if (unlikely(ftrace_disabled))
4410 return -ENODEV;
4411
4412 iter = __seq_open_private(file, &show_ftrace_seq_ops, sizeof(*iter));
4413 if (!iter)
4414 return -ENOMEM;
4415
4416 iter->pg = ftrace_pages_start;
4417 iter->ops = &global_ops;
4418
4419 return 0;
4420 }
4421
4422 static int
ftrace_enabled_open(struct inode * inode,struct file * file)4423 ftrace_enabled_open(struct inode *inode, struct file *file)
4424 {
4425 struct ftrace_iterator *iter;
4426
4427 /*
4428 * This shows us what functions are currently being
4429 * traced and by what. Not sure if we want lockdown
4430 * to hide such critical information for an admin.
4431 * Although, perhaps it can show information we don't
4432 * want people to see, but if something is tracing
4433 * something, we probably want to know about it.
4434 */
4435
4436 iter = __seq_open_private(file, &show_ftrace_seq_ops, sizeof(*iter));
4437 if (!iter)
4438 return -ENOMEM;
4439
4440 iter->pg = ftrace_pages_start;
4441 iter->flags = FTRACE_ITER_ENABLED;
4442 iter->ops = &global_ops;
4443
4444 return 0;
4445 }
4446
4447 static int
ftrace_touched_open(struct inode * inode,struct file * file)4448 ftrace_touched_open(struct inode *inode, struct file *file)
4449 {
4450 struct ftrace_iterator *iter;
4451
4452 /*
4453 * This shows us what functions have ever been enabled
4454 * (traced, direct, patched, etc). Not sure if we want lockdown
4455 * to hide such critical information for an admin.
4456 * Although, perhaps it can show information we don't
4457 * want people to see, but if something had traced
4458 * something, we probably want to know about it.
4459 */
4460
4461 iter = __seq_open_private(file, &show_ftrace_seq_ops, sizeof(*iter));
4462 if (!iter)
4463 return -ENOMEM;
4464
4465 iter->pg = ftrace_pages_start;
4466 iter->flags = FTRACE_ITER_TOUCHED;
4467 iter->ops = &global_ops;
4468
4469 return 0;
4470 }
4471
4472 static int
ftrace_avail_addrs_open(struct inode * inode,struct file * file)4473 ftrace_avail_addrs_open(struct inode *inode, struct file *file)
4474 {
4475 struct ftrace_iterator *iter;
4476 int ret;
4477
4478 ret = security_locked_down(LOCKDOWN_TRACEFS);
4479 if (ret)
4480 return ret;
4481
4482 if (unlikely(ftrace_disabled))
4483 return -ENODEV;
4484
4485 iter = __seq_open_private(file, &show_ftrace_seq_ops, sizeof(*iter));
4486 if (!iter)
4487 return -ENOMEM;
4488
4489 iter->pg = ftrace_pages_start;
4490 iter->flags = FTRACE_ITER_ADDRS;
4491 iter->ops = &global_ops;
4492
4493 return 0;
4494 }
4495
4496 /**
4497 * ftrace_regex_open - initialize function tracer filter files
4498 * @ops: The ftrace_ops that hold the hash filters
4499 * @flag: The type of filter to process
4500 * @inode: The inode, usually passed in to your open routine
4501 * @file: The file, usually passed in to your open routine
4502 *
4503 * ftrace_regex_open() initializes the filter files for the
4504 * @ops. Depending on @flag it may process the filter hash or
4505 * the notrace hash of @ops. With this called from the open
4506 * routine, you can use ftrace_filter_write() for the write
4507 * routine if @flag has FTRACE_ITER_FILTER set, or
4508 * ftrace_notrace_write() if @flag has FTRACE_ITER_NOTRACE set.
4509 * tracing_lseek() should be used as the lseek routine, and
4510 * release must call ftrace_regex_release().
4511 *
4512 * Returns: 0 on success or a negative errno value on failure
4513 */
4514 int
ftrace_regex_open(struct ftrace_ops * ops,int flag,struct inode * inode,struct file * file)4515 ftrace_regex_open(struct ftrace_ops *ops, int flag,
4516 struct inode *inode, struct file *file)
4517 {
4518 struct ftrace_iterator *iter;
4519 struct ftrace_hash *hash;
4520 struct list_head *mod_head;
4521 struct trace_array *tr = ops->private;
4522 int ret = -ENOMEM;
4523
4524 ftrace_ops_init(ops);
4525
4526 if (unlikely(ftrace_disabled))
4527 return -ENODEV;
4528
4529 if (tracing_check_open_get_tr(tr))
4530 return -ENODEV;
4531
4532 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
4533 if (!iter)
4534 goto out;
4535
4536 if (trace_parser_get_init(&iter->parser, FTRACE_BUFF_MAX))
4537 goto out;
4538
4539 iter->ops = ops;
4540 iter->flags = flag;
4541 iter->tr = tr;
4542
4543 mutex_lock(&ops->func_hash->regex_lock);
4544
4545 if (flag & FTRACE_ITER_NOTRACE) {
4546 hash = ops->func_hash->notrace_hash;
4547 mod_head = tr ? &tr->mod_notrace : NULL;
4548 } else {
4549 hash = ops->func_hash->filter_hash;
4550 mod_head = tr ? &tr->mod_trace : NULL;
4551 }
4552
4553 iter->mod_list = mod_head;
4554
4555 if (file->f_mode & FMODE_WRITE) {
4556 const int size_bits = FTRACE_HASH_DEFAULT_BITS;
4557
4558 if (file->f_flags & O_TRUNC) {
4559 iter->hash = alloc_ftrace_hash(size_bits);
4560 clear_ftrace_mod_list(mod_head);
4561 } else {
4562 iter->hash = alloc_and_copy_ftrace_hash(size_bits, hash);
4563 }
4564
4565 if (!iter->hash) {
4566 trace_parser_put(&iter->parser);
4567 goto out_unlock;
4568 }
4569 } else
4570 iter->hash = hash;
4571
4572 ret = 0;
4573
4574 if (file->f_mode & FMODE_READ) {
4575 iter->pg = ftrace_pages_start;
4576
4577 ret = seq_open(file, &show_ftrace_seq_ops);
4578 if (!ret) {
4579 struct seq_file *m = file->private_data;
4580 m->private = iter;
4581 } else {
4582 /* Failed */
4583 free_ftrace_hash(iter->hash);
4584 trace_parser_put(&iter->parser);
4585 }
4586 } else
4587 file->private_data = iter;
4588
4589 out_unlock:
4590 mutex_unlock(&ops->func_hash->regex_lock);
4591
4592 out:
4593 if (ret) {
4594 kfree(iter);
4595 if (tr)
4596 trace_array_put(tr);
4597 }
4598
4599 return ret;
4600 }
4601
4602 static int
ftrace_filter_open(struct inode * inode,struct file * file)4603 ftrace_filter_open(struct inode *inode, struct file *file)
4604 {
4605 struct ftrace_ops *ops = inode->i_private;
4606
4607 /* Checks for tracefs lockdown */
4608 return ftrace_regex_open(ops,
4609 FTRACE_ITER_FILTER | FTRACE_ITER_DO_PROBES,
4610 inode, file);
4611 }
4612
4613 static int
ftrace_notrace_open(struct inode * inode,struct file * file)4614 ftrace_notrace_open(struct inode *inode, struct file *file)
4615 {
4616 struct ftrace_ops *ops = inode->i_private;
4617
4618 /* Checks for tracefs lockdown */
4619 return ftrace_regex_open(ops, FTRACE_ITER_NOTRACE,
4620 inode, file);
4621 }
4622
4623 /* Type for quick search ftrace basic regexes (globs) from filter_parse_regex */
4624 struct ftrace_glob {
4625 char *search;
4626 unsigned len;
4627 int type;
4628 };
4629
4630 /*
4631 * If symbols in an architecture don't correspond exactly to the user-visible
4632 * name of what they represent, it is possible to define this function to
4633 * perform the necessary adjustments.
4634 */
arch_ftrace_match_adjust(char * str,const char * search)4635 char * __weak arch_ftrace_match_adjust(char *str, const char *search)
4636 {
4637 return str;
4638 }
4639
ftrace_match(char * str,struct ftrace_glob * g)4640 static int ftrace_match(char *str, struct ftrace_glob *g)
4641 {
4642 int matched = 0;
4643 int slen;
4644
4645 str = arch_ftrace_match_adjust(str, g->search);
4646
4647 switch (g->type) {
4648 case MATCH_FULL:
4649 if (strcmp(str, g->search) == 0)
4650 matched = 1;
4651 break;
4652 case MATCH_FRONT_ONLY:
4653 if (strncmp(str, g->search, g->len) == 0)
4654 matched = 1;
4655 break;
4656 case MATCH_MIDDLE_ONLY:
4657 if (strstr(str, g->search))
4658 matched = 1;
4659 break;
4660 case MATCH_END_ONLY:
4661 slen = strlen(str);
4662 if (slen >= g->len &&
4663 memcmp(str + slen - g->len, g->search, g->len) == 0)
4664 matched = 1;
4665 break;
4666 case MATCH_GLOB:
4667 if (glob_match(g->search, str))
4668 matched = 1;
4669 break;
4670 }
4671
4672 return matched;
4673 }
4674
4675 static int
enter_record(struct ftrace_hash * hash,struct dyn_ftrace * rec,int clear_filter)4676 enter_record(struct ftrace_hash *hash, struct dyn_ftrace *rec, int clear_filter)
4677 {
4678 struct ftrace_func_entry *entry;
4679 int ret = 0;
4680
4681 entry = ftrace_lookup_ip(hash, rec->ip);
4682 if (clear_filter) {
4683 /* Do nothing if it doesn't exist */
4684 if (!entry)
4685 return 0;
4686
4687 free_hash_entry(hash, entry);
4688 } else {
4689 /* Do nothing if it exists */
4690 if (entry)
4691 return 0;
4692 if (add_hash_entry(hash, rec->ip) == NULL)
4693 ret = -ENOMEM;
4694 }
4695 return ret;
4696 }
4697
4698 static int
add_rec_by_index(struct ftrace_hash * hash,struct ftrace_glob * func_g,int clear_filter)4699 add_rec_by_index(struct ftrace_hash *hash, struct ftrace_glob *func_g,
4700 int clear_filter)
4701 {
4702 long index;
4703 struct ftrace_page *pg;
4704 struct dyn_ftrace *rec;
4705
4706 /* The index starts at 1 */
4707 if (kstrtoul(func_g->search, 0, &index) || --index < 0)
4708 return 0;
4709
4710 do_for_each_ftrace_rec(pg, rec) {
4711 if (pg->index <= index) {
4712 index -= pg->index;
4713 /* this is a double loop, break goes to the next page */
4714 break;
4715 }
4716 rec = &pg->records[index];
4717 enter_record(hash, rec, clear_filter);
4718 return 1;
4719 } while_for_each_ftrace_rec();
4720 return 0;
4721 }
4722
4723 #ifdef FTRACE_MCOUNT_MAX_OFFSET
lookup_ip(unsigned long ip,char ** modname,char * str)4724 static int lookup_ip(unsigned long ip, char **modname, char *str)
4725 {
4726 unsigned long offset;
4727
4728 kallsyms_lookup(ip, NULL, &offset, modname, str);
4729 if (offset > FTRACE_MCOUNT_MAX_OFFSET)
4730 return -1;
4731 return 0;
4732 }
4733 #else
lookup_ip(unsigned long ip,char ** modname,char * str)4734 static int lookup_ip(unsigned long ip, char **modname, char *str)
4735 {
4736 kallsyms_lookup(ip, NULL, NULL, modname, str);
4737 return 0;
4738 }
4739 #endif
4740
4741 static int
ftrace_match_record(struct dyn_ftrace * rec,struct ftrace_glob * func_g,struct ftrace_glob * mod_g,int exclude_mod)4742 ftrace_match_record(struct dyn_ftrace *rec, struct ftrace_glob *func_g,
4743 struct ftrace_glob *mod_g, int exclude_mod)
4744 {
4745 char str[KSYM_SYMBOL_LEN];
4746 char *modname;
4747
4748 if (lookup_ip(rec->ip, &modname, str)) {
4749 /* This should only happen when a rec is disabled */
4750 WARN_ON_ONCE(system_state == SYSTEM_RUNNING &&
4751 !(rec->flags & FTRACE_FL_DISABLED));
4752 return 0;
4753 }
4754
4755 if (mod_g) {
4756 int mod_matches = (modname) ? ftrace_match(modname, mod_g) : 0;
4757
4758 /* blank module name to match all modules */
4759 if (!mod_g->len) {
4760 /* blank module globbing: modname xor exclude_mod */
4761 if (!exclude_mod != !modname)
4762 goto func_match;
4763 return 0;
4764 }
4765
4766 /*
4767 * exclude_mod is set to trace everything but the given
4768 * module. If it is set and the module matches, then
4769 * return 0. If it is not set, and the module doesn't match
4770 * also return 0. Otherwise, check the function to see if
4771 * that matches.
4772 */
4773 if (!mod_matches == !exclude_mod)
4774 return 0;
4775 func_match:
4776 /* blank search means to match all funcs in the mod */
4777 if (!func_g->len)
4778 return 1;
4779 }
4780
4781 return ftrace_match(str, func_g);
4782 }
4783
4784 static int
match_records(struct ftrace_hash * hash,char * func,int len,char * mod)4785 match_records(struct ftrace_hash *hash, char *func, int len, char *mod)
4786 {
4787 struct ftrace_page *pg;
4788 struct dyn_ftrace *rec;
4789 struct ftrace_glob func_g = { .type = MATCH_FULL };
4790 struct ftrace_glob mod_g = { .type = MATCH_FULL };
4791 struct ftrace_glob *mod_match = (mod) ? &mod_g : NULL;
4792 int exclude_mod = 0;
4793 int found = 0;
4794 int ret;
4795 int clear_filter = 0;
4796
4797 if (func) {
4798 func_g.type = filter_parse_regex(func, len, &func_g.search,
4799 &clear_filter);
4800 func_g.len = strlen(func_g.search);
4801 }
4802
4803 if (mod) {
4804 mod_g.type = filter_parse_regex(mod, strlen(mod),
4805 &mod_g.search, &exclude_mod);
4806 mod_g.len = strlen(mod_g.search);
4807 }
4808
4809 mutex_lock(&ftrace_lock);
4810
4811 if (unlikely(ftrace_disabled))
4812 goto out_unlock;
4813
4814 if (func_g.type == MATCH_INDEX) {
4815 found = add_rec_by_index(hash, &func_g, clear_filter);
4816 goto out_unlock;
4817 }
4818
4819 do_for_each_ftrace_rec(pg, rec) {
4820
4821 if (rec->flags & FTRACE_FL_DISABLED)
4822 continue;
4823
4824 if (ftrace_match_record(rec, &func_g, mod_match, exclude_mod)) {
4825 ret = enter_record(hash, rec, clear_filter);
4826 if (ret < 0) {
4827 found = ret;
4828 goto out_unlock;
4829 }
4830 found = 1;
4831 }
4832 cond_resched();
4833 } while_for_each_ftrace_rec();
4834 out_unlock:
4835 mutex_unlock(&ftrace_lock);
4836
4837 return found;
4838 }
4839
4840 static int
ftrace_match_records(struct ftrace_hash * hash,char * buff,int len)4841 ftrace_match_records(struct ftrace_hash *hash, char *buff, int len)
4842 {
4843 return match_records(hash, buff, len, NULL);
4844 }
4845
ftrace_ops_update_code(struct ftrace_ops * ops,struct ftrace_ops_hash * old_hash)4846 static void ftrace_ops_update_code(struct ftrace_ops *ops,
4847 struct ftrace_ops_hash *old_hash)
4848 {
4849 struct ftrace_ops *op;
4850
4851 if (!ftrace_enabled)
4852 return;
4853
4854 if (ops->flags & FTRACE_OPS_FL_ENABLED) {
4855 ftrace_run_modify_code(ops, FTRACE_UPDATE_CALLS, old_hash);
4856 return;
4857 }
4858
4859 /*
4860 * If this is the shared global_ops filter, then we need to
4861 * check if there is another ops that shares it, is enabled.
4862 * If so, we still need to run the modify code.
4863 */
4864 if (ops->func_hash != &global_ops.local_hash)
4865 return;
4866
4867 do_for_each_ftrace_op(op, ftrace_ops_list) {
4868 if (op->func_hash == &global_ops.local_hash &&
4869 op->flags & FTRACE_OPS_FL_ENABLED) {
4870 ftrace_run_modify_code(op, FTRACE_UPDATE_CALLS, old_hash);
4871 /* Only need to do this once */
4872 return;
4873 }
4874 } while_for_each_ftrace_op(op);
4875 }
4876
ftrace_hash_move_and_update_ops(struct ftrace_ops * ops,struct ftrace_hash ** orig_hash,struct ftrace_hash * hash,int enable)4877 static int ftrace_hash_move_and_update_ops(struct ftrace_ops *ops,
4878 struct ftrace_hash **orig_hash,
4879 struct ftrace_hash *hash,
4880 int enable)
4881 {
4882 if (ops->flags & FTRACE_OPS_FL_SUBOP)
4883 return ftrace_hash_move_and_update_subops(ops, orig_hash, hash, enable);
4884
4885 /*
4886 * If this ops is not enabled, it could be sharing its filters
4887 * with a subop. If that's the case, update the subop instead of
4888 * this ops. Shared filters are only allowed to have one ops set
4889 * at a time, and if we update the ops that is not enabled,
4890 * it will not affect subops that share it.
4891 */
4892 if (!(ops->flags & FTRACE_OPS_FL_ENABLED)) {
4893 struct ftrace_ops *op;
4894
4895 /* Check if any other manager subops maps to this hash */
4896 do_for_each_ftrace_op(op, ftrace_ops_list) {
4897 struct ftrace_ops *subops;
4898
4899 list_for_each_entry(subops, &op->subop_list, list) {
4900 if ((subops->flags & FTRACE_OPS_FL_ENABLED) &&
4901 subops->func_hash == ops->func_hash) {
4902 return ftrace_hash_move_and_update_subops(subops, orig_hash, hash, enable);
4903 }
4904 }
4905 } while_for_each_ftrace_op(op);
4906 }
4907
4908 return __ftrace_hash_move_and_update_ops(ops, orig_hash, hash, enable);
4909 }
4910
module_exists(const char * module)4911 static bool module_exists(const char *module)
4912 {
4913 /* All modules have the symbol __this_module */
4914 static const char this_mod[] = "__this_module";
4915 char modname[MAX_PARAM_PREFIX_LEN + sizeof(this_mod) + 2];
4916 unsigned long val;
4917 int n;
4918
4919 n = snprintf(modname, sizeof(modname), "%s:%s", module, this_mod);
4920
4921 if (n > sizeof(modname) - 1)
4922 return false;
4923
4924 val = module_kallsyms_lookup_name(modname);
4925 return val != 0;
4926 }
4927
cache_mod(struct trace_array * tr,const char * func,char * module,int enable)4928 static int cache_mod(struct trace_array *tr,
4929 const char *func, char *module, int enable)
4930 {
4931 struct ftrace_mod_load *ftrace_mod, *n;
4932 struct list_head *head = enable ? &tr->mod_trace : &tr->mod_notrace;
4933 int ret;
4934
4935 mutex_lock(&ftrace_lock);
4936
4937 /* We do not cache inverse filters */
4938 if (func[0] == '!') {
4939 func++;
4940 ret = -EINVAL;
4941
4942 /* Look to remove this hash */
4943 list_for_each_entry_safe(ftrace_mod, n, head, list) {
4944 if (strcmp(ftrace_mod->module, module) != 0)
4945 continue;
4946
4947 /* no func matches all */
4948 if (strcmp(func, "*") == 0 ||
4949 (ftrace_mod->func &&
4950 strcmp(ftrace_mod->func, func) == 0)) {
4951 ret = 0;
4952 free_ftrace_mod(ftrace_mod);
4953 continue;
4954 }
4955 }
4956 goto out;
4957 }
4958
4959 ret = -EINVAL;
4960 /* We only care about modules that have not been loaded yet */
4961 if (module_exists(module))
4962 goto out;
4963
4964 /* Save this string off, and execute it when the module is loaded */
4965 ret = ftrace_add_mod(tr, func, module, enable);
4966 out:
4967 mutex_unlock(&ftrace_lock);
4968
4969 return ret;
4970 }
4971
4972 static int
4973 ftrace_set_regex(struct ftrace_ops *ops, unsigned char *buf, int len,
4974 int reset, int enable);
4975
4976 #ifdef CONFIG_MODULES
process_mod_list(struct list_head * head,struct ftrace_ops * ops,char * mod,bool enable)4977 static void process_mod_list(struct list_head *head, struct ftrace_ops *ops,
4978 char *mod, bool enable)
4979 {
4980 struct ftrace_mod_load *ftrace_mod, *n;
4981 struct ftrace_hash **orig_hash, *new_hash;
4982 LIST_HEAD(process_mods);
4983 char *func;
4984
4985 mutex_lock(&ops->func_hash->regex_lock);
4986
4987 if (enable)
4988 orig_hash = &ops->func_hash->filter_hash;
4989 else
4990 orig_hash = &ops->func_hash->notrace_hash;
4991
4992 new_hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS,
4993 *orig_hash);
4994 if (!new_hash)
4995 goto out; /* warn? */
4996
4997 mutex_lock(&ftrace_lock);
4998
4999 list_for_each_entry_safe(ftrace_mod, n, head, list) {
5000
5001 if (strcmp(ftrace_mod->module, mod) != 0)
5002 continue;
5003
5004 if (ftrace_mod->func)
5005 func = kstrdup(ftrace_mod->func, GFP_KERNEL);
5006 else
5007 func = kstrdup("*", GFP_KERNEL);
5008
5009 if (!func) /* warn? */
5010 continue;
5011
5012 list_move(&ftrace_mod->list, &process_mods);
5013
5014 /* Use the newly allocated func, as it may be "*" */
5015 kfree(ftrace_mod->func);
5016 ftrace_mod->func = func;
5017 }
5018
5019 mutex_unlock(&ftrace_lock);
5020
5021 list_for_each_entry_safe(ftrace_mod, n, &process_mods, list) {
5022
5023 func = ftrace_mod->func;
5024
5025 /* Grabs ftrace_lock, which is why we have this extra step */
5026 match_records(new_hash, func, strlen(func), mod);
5027 free_ftrace_mod(ftrace_mod);
5028 }
5029
5030 if (enable && list_empty(head))
5031 new_hash->flags &= ~FTRACE_HASH_FL_MOD;
5032
5033 mutex_lock(&ftrace_lock);
5034
5035 ftrace_hash_move_and_update_ops(ops, orig_hash,
5036 new_hash, enable);
5037 mutex_unlock(&ftrace_lock);
5038
5039 out:
5040 mutex_unlock(&ops->func_hash->regex_lock);
5041
5042 free_ftrace_hash(new_hash);
5043 }
5044
process_cached_mods(const char * mod_name)5045 static void process_cached_mods(const char *mod_name)
5046 {
5047 struct trace_array *tr;
5048 char *mod;
5049
5050 mod = kstrdup(mod_name, GFP_KERNEL);
5051 if (!mod)
5052 return;
5053
5054 mutex_lock(&trace_types_lock);
5055 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
5056 if (!list_empty(&tr->mod_trace))
5057 process_mod_list(&tr->mod_trace, tr->ops, mod, true);
5058 if (!list_empty(&tr->mod_notrace))
5059 process_mod_list(&tr->mod_notrace, tr->ops, mod, false);
5060 }
5061 mutex_unlock(&trace_types_lock);
5062
5063 kfree(mod);
5064 }
5065 #endif
5066
5067 /*
5068 * We register the module command as a template to show others how
5069 * to register the a command as well.
5070 */
5071
5072 static int
ftrace_mod_callback(struct trace_array * tr,struct ftrace_hash * hash,char * func_orig,char * cmd,char * module,int enable)5073 ftrace_mod_callback(struct trace_array *tr, struct ftrace_hash *hash,
5074 char *func_orig, char *cmd, char *module, int enable)
5075 {
5076 char *func;
5077 int ret;
5078
5079 /* match_records() modifies func, and we need the original */
5080 func = kstrdup(func_orig, GFP_KERNEL);
5081 if (!func)
5082 return -ENOMEM;
5083
5084 /*
5085 * cmd == 'mod' because we only registered this func
5086 * for the 'mod' ftrace_func_command.
5087 * But if you register one func with multiple commands,
5088 * you can tell which command was used by the cmd
5089 * parameter.
5090 */
5091 ret = match_records(hash, func, strlen(func), module);
5092 kfree(func);
5093
5094 if (!ret)
5095 return cache_mod(tr, func_orig, module, enable);
5096 if (ret < 0)
5097 return ret;
5098 return 0;
5099 }
5100
5101 static struct ftrace_func_command ftrace_mod_cmd = {
5102 .name = "mod",
5103 .func = ftrace_mod_callback,
5104 };
5105
ftrace_mod_cmd_init(void)5106 static int __init ftrace_mod_cmd_init(void)
5107 {
5108 return register_ftrace_command(&ftrace_mod_cmd);
5109 }
5110 core_initcall(ftrace_mod_cmd_init);
5111
function_trace_probe_call(unsigned long ip,unsigned long parent_ip,struct ftrace_ops * op,struct ftrace_regs * fregs)5112 static void function_trace_probe_call(unsigned long ip, unsigned long parent_ip,
5113 struct ftrace_ops *op, struct ftrace_regs *fregs)
5114 {
5115 struct ftrace_probe_ops *probe_ops;
5116 struct ftrace_func_probe *probe;
5117
5118 probe = container_of(op, struct ftrace_func_probe, ops);
5119 probe_ops = probe->probe_ops;
5120
5121 /*
5122 * Disable preemption for these calls to prevent a RCU grace
5123 * period. This syncs the hash iteration and freeing of items
5124 * on the hash. rcu_read_lock is too dangerous here.
5125 */
5126 preempt_disable_notrace();
5127 probe_ops->func(ip, parent_ip, probe->tr, probe_ops, probe->data);
5128 preempt_enable_notrace();
5129 }
5130
5131 struct ftrace_func_map {
5132 struct ftrace_func_entry entry;
5133 void *data;
5134 };
5135
5136 struct ftrace_func_mapper {
5137 struct ftrace_hash hash;
5138 };
5139
5140 /**
5141 * allocate_ftrace_func_mapper - allocate a new ftrace_func_mapper
5142 *
5143 * Returns: a ftrace_func_mapper descriptor that can be used to map ips to data.
5144 */
allocate_ftrace_func_mapper(void)5145 struct ftrace_func_mapper *allocate_ftrace_func_mapper(void)
5146 {
5147 struct ftrace_hash *hash;
5148
5149 /*
5150 * The mapper is simply a ftrace_hash, but since the entries
5151 * in the hash are not ftrace_func_entry type, we define it
5152 * as a separate structure.
5153 */
5154 hash = alloc_ftrace_hash(FTRACE_HASH_DEFAULT_BITS);
5155 return (struct ftrace_func_mapper *)hash;
5156 }
5157
5158 /**
5159 * ftrace_func_mapper_find_ip - Find some data mapped to an ip
5160 * @mapper: The mapper that has the ip maps
5161 * @ip: the instruction pointer to find the data for
5162 *
5163 * Returns: the data mapped to @ip if found otherwise NULL. The return
5164 * is actually the address of the mapper data pointer. The address is
5165 * returned for use cases where the data is no bigger than a long, and
5166 * the user can use the data pointer as its data instead of having to
5167 * allocate more memory for the reference.
5168 */
ftrace_func_mapper_find_ip(struct ftrace_func_mapper * mapper,unsigned long ip)5169 void **ftrace_func_mapper_find_ip(struct ftrace_func_mapper *mapper,
5170 unsigned long ip)
5171 {
5172 struct ftrace_func_entry *entry;
5173 struct ftrace_func_map *map;
5174
5175 entry = ftrace_lookup_ip(&mapper->hash, ip);
5176 if (!entry)
5177 return NULL;
5178
5179 map = (struct ftrace_func_map *)entry;
5180 return &map->data;
5181 }
5182
5183 /**
5184 * ftrace_func_mapper_add_ip - Map some data to an ip
5185 * @mapper: The mapper that has the ip maps
5186 * @ip: The instruction pointer address to map @data to
5187 * @data: The data to map to @ip
5188 *
5189 * Returns: 0 on success otherwise an error.
5190 */
ftrace_func_mapper_add_ip(struct ftrace_func_mapper * mapper,unsigned long ip,void * data)5191 int ftrace_func_mapper_add_ip(struct ftrace_func_mapper *mapper,
5192 unsigned long ip, void *data)
5193 {
5194 struct ftrace_func_entry *entry;
5195 struct ftrace_func_map *map;
5196
5197 entry = ftrace_lookup_ip(&mapper->hash, ip);
5198 if (entry)
5199 return -EBUSY;
5200
5201 map = kmalloc(sizeof(*map), GFP_KERNEL);
5202 if (!map)
5203 return -ENOMEM;
5204
5205 map->entry.ip = ip;
5206 map->data = data;
5207
5208 __add_hash_entry(&mapper->hash, &map->entry);
5209
5210 return 0;
5211 }
5212
5213 /**
5214 * ftrace_func_mapper_remove_ip - Remove an ip from the mapping
5215 * @mapper: The mapper that has the ip maps
5216 * @ip: The instruction pointer address to remove the data from
5217 *
5218 * Returns: the data if it is found, otherwise NULL.
5219 * Note, if the data pointer is used as the data itself, (see
5220 * ftrace_func_mapper_find_ip(), then the return value may be meaningless,
5221 * if the data pointer was set to zero.
5222 */
ftrace_func_mapper_remove_ip(struct ftrace_func_mapper * mapper,unsigned long ip)5223 void *ftrace_func_mapper_remove_ip(struct ftrace_func_mapper *mapper,
5224 unsigned long ip)
5225 {
5226 struct ftrace_func_entry *entry;
5227 struct ftrace_func_map *map;
5228 void *data;
5229
5230 entry = ftrace_lookup_ip(&mapper->hash, ip);
5231 if (!entry)
5232 return NULL;
5233
5234 map = (struct ftrace_func_map *)entry;
5235 data = map->data;
5236
5237 remove_hash_entry(&mapper->hash, entry);
5238 kfree(entry);
5239
5240 return data;
5241 }
5242
5243 /**
5244 * free_ftrace_func_mapper - free a mapping of ips and data
5245 * @mapper: The mapper that has the ip maps
5246 * @free_func: A function to be called on each data item.
5247 *
5248 * This is used to free the function mapper. The @free_func is optional
5249 * and can be used if the data needs to be freed as well.
5250 */
free_ftrace_func_mapper(struct ftrace_func_mapper * mapper,ftrace_mapper_func free_func)5251 void free_ftrace_func_mapper(struct ftrace_func_mapper *mapper,
5252 ftrace_mapper_func free_func)
5253 {
5254 struct ftrace_func_entry *entry;
5255 struct ftrace_func_map *map;
5256 struct hlist_head *hhd;
5257 int size, i;
5258
5259 if (!mapper)
5260 return;
5261
5262 if (free_func && mapper->hash.count) {
5263 size = 1 << mapper->hash.size_bits;
5264 for (i = 0; i < size; i++) {
5265 hhd = &mapper->hash.buckets[i];
5266 hlist_for_each_entry(entry, hhd, hlist) {
5267 map = (struct ftrace_func_map *)entry;
5268 free_func(map);
5269 }
5270 }
5271 }
5272 free_ftrace_hash(&mapper->hash);
5273 }
5274
release_probe(struct ftrace_func_probe * probe)5275 static void release_probe(struct ftrace_func_probe *probe)
5276 {
5277 struct ftrace_probe_ops *probe_ops;
5278
5279 mutex_lock(&ftrace_lock);
5280
5281 WARN_ON(probe->ref <= 0);
5282
5283 /* Subtract the ref that was used to protect this instance */
5284 probe->ref--;
5285
5286 if (!probe->ref) {
5287 probe_ops = probe->probe_ops;
5288 /*
5289 * Sending zero as ip tells probe_ops to free
5290 * the probe->data itself
5291 */
5292 if (probe_ops->free)
5293 probe_ops->free(probe_ops, probe->tr, 0, probe->data);
5294 list_del(&probe->list);
5295 kfree(probe);
5296 }
5297 mutex_unlock(&ftrace_lock);
5298 }
5299
acquire_probe_locked(struct ftrace_func_probe * probe)5300 static void acquire_probe_locked(struct ftrace_func_probe *probe)
5301 {
5302 /*
5303 * Add one ref to keep it from being freed when releasing the
5304 * ftrace_lock mutex.
5305 */
5306 probe->ref++;
5307 }
5308
5309 int
register_ftrace_function_probe(char * glob,struct trace_array * tr,struct ftrace_probe_ops * probe_ops,void * data)5310 register_ftrace_function_probe(char *glob, struct trace_array *tr,
5311 struct ftrace_probe_ops *probe_ops,
5312 void *data)
5313 {
5314 struct ftrace_func_probe *probe = NULL, *iter;
5315 struct ftrace_func_entry *entry;
5316 struct ftrace_hash **orig_hash;
5317 struct ftrace_hash *old_hash;
5318 struct ftrace_hash *hash;
5319 int count = 0;
5320 int size;
5321 int ret;
5322 int i;
5323
5324 if (WARN_ON(!tr))
5325 return -EINVAL;
5326
5327 /* We do not support '!' for function probes */
5328 if (WARN_ON(glob[0] == '!'))
5329 return -EINVAL;
5330
5331
5332 mutex_lock(&ftrace_lock);
5333 /* Check if the probe_ops is already registered */
5334 list_for_each_entry(iter, &tr->func_probes, list) {
5335 if (iter->probe_ops == probe_ops) {
5336 probe = iter;
5337 break;
5338 }
5339 }
5340 if (!probe) {
5341 probe = kzalloc(sizeof(*probe), GFP_KERNEL);
5342 if (!probe) {
5343 mutex_unlock(&ftrace_lock);
5344 return -ENOMEM;
5345 }
5346 probe->probe_ops = probe_ops;
5347 probe->ops.func = function_trace_probe_call;
5348 probe->tr = tr;
5349 ftrace_ops_init(&probe->ops);
5350 list_add(&probe->list, &tr->func_probes);
5351 }
5352
5353 acquire_probe_locked(probe);
5354
5355 mutex_unlock(&ftrace_lock);
5356
5357 /*
5358 * Note, there's a small window here that the func_hash->filter_hash
5359 * may be NULL or empty. Need to be careful when reading the loop.
5360 */
5361 mutex_lock(&probe->ops.func_hash->regex_lock);
5362
5363 orig_hash = &probe->ops.func_hash->filter_hash;
5364 old_hash = *orig_hash;
5365 hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, old_hash);
5366
5367 if (!hash) {
5368 ret = -ENOMEM;
5369 goto out;
5370 }
5371
5372 ret = ftrace_match_records(hash, glob, strlen(glob));
5373
5374 /* Nothing found? */
5375 if (!ret)
5376 ret = -EINVAL;
5377
5378 if (ret < 0)
5379 goto out;
5380
5381 size = 1 << hash->size_bits;
5382 for (i = 0; i < size; i++) {
5383 hlist_for_each_entry(entry, &hash->buckets[i], hlist) {
5384 if (ftrace_lookup_ip(old_hash, entry->ip))
5385 continue;
5386 /*
5387 * The caller might want to do something special
5388 * for each function we find. We call the callback
5389 * to give the caller an opportunity to do so.
5390 */
5391 if (probe_ops->init) {
5392 ret = probe_ops->init(probe_ops, tr,
5393 entry->ip, data,
5394 &probe->data);
5395 if (ret < 0) {
5396 if (probe_ops->free && count)
5397 probe_ops->free(probe_ops, tr,
5398 0, probe->data);
5399 probe->data = NULL;
5400 goto out;
5401 }
5402 }
5403 count++;
5404 }
5405 }
5406
5407 mutex_lock(&ftrace_lock);
5408
5409 if (!count) {
5410 /* Nothing was added? */
5411 ret = -EINVAL;
5412 goto out_unlock;
5413 }
5414
5415 ret = ftrace_hash_move_and_update_ops(&probe->ops, orig_hash,
5416 hash, 1);
5417 if (ret < 0)
5418 goto err_unlock;
5419
5420 /* One ref for each new function traced */
5421 probe->ref += count;
5422
5423 if (!(probe->ops.flags & FTRACE_OPS_FL_ENABLED))
5424 ret = ftrace_startup(&probe->ops, 0);
5425
5426 out_unlock:
5427 mutex_unlock(&ftrace_lock);
5428
5429 if (!ret)
5430 ret = count;
5431 out:
5432 mutex_unlock(&probe->ops.func_hash->regex_lock);
5433 free_ftrace_hash(hash);
5434
5435 release_probe(probe);
5436
5437 return ret;
5438
5439 err_unlock:
5440 if (!probe_ops->free || !count)
5441 goto out_unlock;
5442
5443 /* Failed to do the move, need to call the free functions */
5444 for (i = 0; i < size; i++) {
5445 hlist_for_each_entry(entry, &hash->buckets[i], hlist) {
5446 if (ftrace_lookup_ip(old_hash, entry->ip))
5447 continue;
5448 probe_ops->free(probe_ops, tr, entry->ip, probe->data);
5449 }
5450 }
5451 goto out_unlock;
5452 }
5453
5454 int
unregister_ftrace_function_probe_func(char * glob,struct trace_array * tr,struct ftrace_probe_ops * probe_ops)5455 unregister_ftrace_function_probe_func(char *glob, struct trace_array *tr,
5456 struct ftrace_probe_ops *probe_ops)
5457 {
5458 struct ftrace_func_probe *probe = NULL, *iter;
5459 struct ftrace_ops_hash old_hash_ops;
5460 struct ftrace_func_entry *entry;
5461 struct ftrace_glob func_g;
5462 struct ftrace_hash **orig_hash;
5463 struct ftrace_hash *old_hash;
5464 struct ftrace_hash *hash = NULL;
5465 struct hlist_node *tmp;
5466 struct hlist_head hhd;
5467 char str[KSYM_SYMBOL_LEN];
5468 int count = 0;
5469 int i, ret = -ENODEV;
5470 int size;
5471
5472 if (!glob || !strlen(glob) || !strcmp(glob, "*"))
5473 func_g.search = NULL;
5474 else {
5475 int not;
5476
5477 func_g.type = filter_parse_regex(glob, strlen(glob),
5478 &func_g.search, ¬);
5479 func_g.len = strlen(func_g.search);
5480
5481 /* we do not support '!' for function probes */
5482 if (WARN_ON(not))
5483 return -EINVAL;
5484 }
5485
5486 mutex_lock(&ftrace_lock);
5487 /* Check if the probe_ops is already registered */
5488 list_for_each_entry(iter, &tr->func_probes, list) {
5489 if (iter->probe_ops == probe_ops) {
5490 probe = iter;
5491 break;
5492 }
5493 }
5494 if (!probe)
5495 goto err_unlock_ftrace;
5496
5497 ret = -EINVAL;
5498 if (!(probe->ops.flags & FTRACE_OPS_FL_INITIALIZED))
5499 goto err_unlock_ftrace;
5500
5501 acquire_probe_locked(probe);
5502
5503 mutex_unlock(&ftrace_lock);
5504
5505 mutex_lock(&probe->ops.func_hash->regex_lock);
5506
5507 orig_hash = &probe->ops.func_hash->filter_hash;
5508 old_hash = *orig_hash;
5509
5510 if (ftrace_hash_empty(old_hash))
5511 goto out_unlock;
5512
5513 old_hash_ops.filter_hash = old_hash;
5514 /* Probes only have filters */
5515 old_hash_ops.notrace_hash = NULL;
5516
5517 ret = -ENOMEM;
5518 hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, old_hash);
5519 if (!hash)
5520 goto out_unlock;
5521
5522 INIT_HLIST_HEAD(&hhd);
5523
5524 size = 1 << hash->size_bits;
5525 for (i = 0; i < size; i++) {
5526 hlist_for_each_entry_safe(entry, tmp, &hash->buckets[i], hlist) {
5527
5528 if (func_g.search) {
5529 kallsyms_lookup(entry->ip, NULL, NULL,
5530 NULL, str);
5531 if (!ftrace_match(str, &func_g))
5532 continue;
5533 }
5534 count++;
5535 remove_hash_entry(hash, entry);
5536 hlist_add_head(&entry->hlist, &hhd);
5537 }
5538 }
5539
5540 /* Nothing found? */
5541 if (!count) {
5542 ret = -EINVAL;
5543 goto out_unlock;
5544 }
5545
5546 mutex_lock(&ftrace_lock);
5547
5548 WARN_ON(probe->ref < count);
5549
5550 probe->ref -= count;
5551
5552 if (ftrace_hash_empty(hash))
5553 ftrace_shutdown(&probe->ops, 0);
5554
5555 ret = ftrace_hash_move_and_update_ops(&probe->ops, orig_hash,
5556 hash, 1);
5557
5558 /* still need to update the function call sites */
5559 if (ftrace_enabled && !ftrace_hash_empty(hash))
5560 ftrace_run_modify_code(&probe->ops, FTRACE_UPDATE_CALLS,
5561 &old_hash_ops);
5562 synchronize_rcu();
5563
5564 hlist_for_each_entry_safe(entry, tmp, &hhd, hlist) {
5565 hlist_del(&entry->hlist);
5566 if (probe_ops->free)
5567 probe_ops->free(probe_ops, tr, entry->ip, probe->data);
5568 kfree(entry);
5569 }
5570 mutex_unlock(&ftrace_lock);
5571
5572 out_unlock:
5573 mutex_unlock(&probe->ops.func_hash->regex_lock);
5574 free_ftrace_hash(hash);
5575
5576 release_probe(probe);
5577
5578 return ret;
5579
5580 err_unlock_ftrace:
5581 mutex_unlock(&ftrace_lock);
5582 return ret;
5583 }
5584
clear_ftrace_function_probes(struct trace_array * tr)5585 void clear_ftrace_function_probes(struct trace_array *tr)
5586 {
5587 struct ftrace_func_probe *probe, *n;
5588
5589 list_for_each_entry_safe(probe, n, &tr->func_probes, list)
5590 unregister_ftrace_function_probe_func(NULL, tr, probe->probe_ops);
5591 }
5592
5593 static LIST_HEAD(ftrace_commands);
5594 static DEFINE_MUTEX(ftrace_cmd_mutex);
5595
5596 /*
5597 * Currently we only register ftrace commands from __init, so mark this
5598 * __init too.
5599 */
register_ftrace_command(struct ftrace_func_command * cmd)5600 __init int register_ftrace_command(struct ftrace_func_command *cmd)
5601 {
5602 struct ftrace_func_command *p;
5603 int ret = 0;
5604
5605 mutex_lock(&ftrace_cmd_mutex);
5606 list_for_each_entry(p, &ftrace_commands, list) {
5607 if (strcmp(cmd->name, p->name) == 0) {
5608 ret = -EBUSY;
5609 goto out_unlock;
5610 }
5611 }
5612 list_add(&cmd->list, &ftrace_commands);
5613 out_unlock:
5614 mutex_unlock(&ftrace_cmd_mutex);
5615
5616 return ret;
5617 }
5618
5619 /*
5620 * Currently we only unregister ftrace commands from __init, so mark
5621 * this __init too.
5622 */
unregister_ftrace_command(struct ftrace_func_command * cmd)5623 __init int unregister_ftrace_command(struct ftrace_func_command *cmd)
5624 {
5625 struct ftrace_func_command *p, *n;
5626 int ret = -ENODEV;
5627
5628 mutex_lock(&ftrace_cmd_mutex);
5629 list_for_each_entry_safe(p, n, &ftrace_commands, list) {
5630 if (strcmp(cmd->name, p->name) == 0) {
5631 ret = 0;
5632 list_del_init(&p->list);
5633 goto out_unlock;
5634 }
5635 }
5636 out_unlock:
5637 mutex_unlock(&ftrace_cmd_mutex);
5638
5639 return ret;
5640 }
5641
ftrace_process_regex(struct ftrace_iterator * iter,char * buff,int len,int enable)5642 static int ftrace_process_regex(struct ftrace_iterator *iter,
5643 char *buff, int len, int enable)
5644 {
5645 struct ftrace_hash *hash = iter->hash;
5646 struct trace_array *tr = iter->ops->private;
5647 char *func, *command, *next = buff;
5648 struct ftrace_func_command *p;
5649 int ret = -EINVAL;
5650
5651 func = strsep(&next, ":");
5652
5653 if (!next) {
5654 ret = ftrace_match_records(hash, func, len);
5655 if (!ret)
5656 ret = -EINVAL;
5657 if (ret < 0)
5658 return ret;
5659 return 0;
5660 }
5661
5662 /* command found */
5663
5664 command = strsep(&next, ":");
5665
5666 mutex_lock(&ftrace_cmd_mutex);
5667 list_for_each_entry(p, &ftrace_commands, list) {
5668 if (strcmp(p->name, command) == 0) {
5669 ret = p->func(tr, hash, func, command, next, enable);
5670 goto out_unlock;
5671 }
5672 }
5673 out_unlock:
5674 mutex_unlock(&ftrace_cmd_mutex);
5675
5676 return ret;
5677 }
5678
5679 static ssize_t
ftrace_regex_write(struct file * file,const char __user * ubuf,size_t cnt,loff_t * ppos,int enable)5680 ftrace_regex_write(struct file *file, const char __user *ubuf,
5681 size_t cnt, loff_t *ppos, int enable)
5682 {
5683 struct ftrace_iterator *iter;
5684 struct trace_parser *parser;
5685 ssize_t ret, read;
5686
5687 if (!cnt)
5688 return 0;
5689
5690 if (file->f_mode & FMODE_READ) {
5691 struct seq_file *m = file->private_data;
5692 iter = m->private;
5693 } else
5694 iter = file->private_data;
5695
5696 if (unlikely(ftrace_disabled))
5697 return -ENODEV;
5698
5699 /* iter->hash is a local copy, so we don't need regex_lock */
5700
5701 parser = &iter->parser;
5702 read = trace_get_user(parser, ubuf, cnt, ppos);
5703
5704 if (read >= 0 && trace_parser_loaded(parser) &&
5705 !trace_parser_cont(parser)) {
5706 ret = ftrace_process_regex(iter, parser->buffer,
5707 parser->idx, enable);
5708 trace_parser_clear(parser);
5709 if (ret < 0)
5710 goto out;
5711 }
5712
5713 ret = read;
5714 out:
5715 return ret;
5716 }
5717
5718 ssize_t
ftrace_filter_write(struct file * file,const char __user * ubuf,size_t cnt,loff_t * ppos)5719 ftrace_filter_write(struct file *file, const char __user *ubuf,
5720 size_t cnt, loff_t *ppos)
5721 {
5722 return ftrace_regex_write(file, ubuf, cnt, ppos, 1);
5723 }
5724
5725 ssize_t
ftrace_notrace_write(struct file * file,const char __user * ubuf,size_t cnt,loff_t * ppos)5726 ftrace_notrace_write(struct file *file, const char __user *ubuf,
5727 size_t cnt, loff_t *ppos)
5728 {
5729 return ftrace_regex_write(file, ubuf, cnt, ppos, 0);
5730 }
5731
5732 static int
__ftrace_match_addr(struct ftrace_hash * hash,unsigned long ip,int remove)5733 __ftrace_match_addr(struct ftrace_hash *hash, unsigned long ip, int remove)
5734 {
5735 struct ftrace_func_entry *entry;
5736
5737 ip = ftrace_location(ip);
5738 if (!ip)
5739 return -EINVAL;
5740
5741 if (remove) {
5742 entry = ftrace_lookup_ip(hash, ip);
5743 if (!entry)
5744 return -ENOENT;
5745 free_hash_entry(hash, entry);
5746 return 0;
5747 }
5748
5749 entry = add_hash_entry(hash, ip);
5750 return entry ? 0 : -ENOMEM;
5751 }
5752
5753 static int
ftrace_match_addr(struct ftrace_hash * hash,unsigned long * ips,unsigned int cnt,int remove)5754 ftrace_match_addr(struct ftrace_hash *hash, unsigned long *ips,
5755 unsigned int cnt, int remove)
5756 {
5757 unsigned int i;
5758 int err;
5759
5760 for (i = 0; i < cnt; i++) {
5761 err = __ftrace_match_addr(hash, ips[i], remove);
5762 if (err) {
5763 /*
5764 * This expects the @hash is a temporary hash and if this
5765 * fails the caller must free the @hash.
5766 */
5767 return err;
5768 }
5769 }
5770 return 0;
5771 }
5772
5773 static int
ftrace_set_hash(struct ftrace_ops * ops,unsigned char * buf,int len,unsigned long * ips,unsigned int cnt,int remove,int reset,int enable)5774 ftrace_set_hash(struct ftrace_ops *ops, unsigned char *buf, int len,
5775 unsigned long *ips, unsigned int cnt,
5776 int remove, int reset, int enable)
5777 {
5778 struct ftrace_hash **orig_hash;
5779 struct ftrace_hash *hash;
5780 int ret;
5781
5782 if (unlikely(ftrace_disabled))
5783 return -ENODEV;
5784
5785 mutex_lock(&ops->func_hash->regex_lock);
5786
5787 if (enable)
5788 orig_hash = &ops->func_hash->filter_hash;
5789 else
5790 orig_hash = &ops->func_hash->notrace_hash;
5791
5792 if (reset)
5793 hash = alloc_ftrace_hash(FTRACE_HASH_DEFAULT_BITS);
5794 else
5795 hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, *orig_hash);
5796
5797 if (!hash) {
5798 ret = -ENOMEM;
5799 goto out_regex_unlock;
5800 }
5801
5802 if (buf && !ftrace_match_records(hash, buf, len)) {
5803 ret = -EINVAL;
5804 goto out_regex_unlock;
5805 }
5806 if (ips) {
5807 ret = ftrace_match_addr(hash, ips, cnt, remove);
5808 if (ret < 0)
5809 goto out_regex_unlock;
5810 }
5811
5812 mutex_lock(&ftrace_lock);
5813 ret = ftrace_hash_move_and_update_ops(ops, orig_hash, hash, enable);
5814 mutex_unlock(&ftrace_lock);
5815
5816 out_regex_unlock:
5817 mutex_unlock(&ops->func_hash->regex_lock);
5818
5819 free_ftrace_hash(hash);
5820 return ret;
5821 }
5822
5823 static int
ftrace_set_addr(struct ftrace_ops * ops,unsigned long * ips,unsigned int cnt,int remove,int reset,int enable)5824 ftrace_set_addr(struct ftrace_ops *ops, unsigned long *ips, unsigned int cnt,
5825 int remove, int reset, int enable)
5826 {
5827 return ftrace_set_hash(ops, NULL, 0, ips, cnt, remove, reset, enable);
5828 }
5829
5830 #ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS
5831
5832 static int register_ftrace_function_nolock(struct ftrace_ops *ops);
5833
5834 /*
5835 * If there are multiple ftrace_ops, use SAVE_REGS by default, so that direct
5836 * call will be jumped from ftrace_regs_caller. Only if the architecture does
5837 * not support ftrace_regs_caller but direct_call, use SAVE_ARGS so that it
5838 * jumps from ftrace_caller for multiple ftrace_ops.
5839 */
5840 #ifndef CONFIG_HAVE_DYNAMIC_FTRACE_WITH_REGS
5841 #define MULTI_FLAGS (FTRACE_OPS_FL_DIRECT | FTRACE_OPS_FL_SAVE_ARGS)
5842 #else
5843 #define MULTI_FLAGS (FTRACE_OPS_FL_DIRECT | FTRACE_OPS_FL_SAVE_REGS)
5844 #endif
5845
check_direct_multi(struct ftrace_ops * ops)5846 static int check_direct_multi(struct ftrace_ops *ops)
5847 {
5848 if (!(ops->flags & FTRACE_OPS_FL_INITIALIZED))
5849 return -EINVAL;
5850 if ((ops->flags & MULTI_FLAGS) != MULTI_FLAGS)
5851 return -EINVAL;
5852 return 0;
5853 }
5854
remove_direct_functions_hash(struct ftrace_hash * hash,unsigned long addr)5855 static void remove_direct_functions_hash(struct ftrace_hash *hash, unsigned long addr)
5856 {
5857 struct ftrace_func_entry *entry, *del;
5858 int size, i;
5859
5860 size = 1 << hash->size_bits;
5861 for (i = 0; i < size; i++) {
5862 hlist_for_each_entry(entry, &hash->buckets[i], hlist) {
5863 del = __ftrace_lookup_ip(direct_functions, entry->ip);
5864 if (del && del->direct == addr) {
5865 remove_hash_entry(direct_functions, del);
5866 kfree(del);
5867 }
5868 }
5869 }
5870 }
5871
register_ftrace_direct_cb(struct rcu_head * rhp)5872 static void register_ftrace_direct_cb(struct rcu_head *rhp)
5873 {
5874 struct ftrace_hash *fhp = container_of(rhp, struct ftrace_hash, rcu);
5875
5876 free_ftrace_hash(fhp);
5877 }
5878
5879 /**
5880 * register_ftrace_direct - Call a custom trampoline directly
5881 * for multiple functions registered in @ops
5882 * @ops: The address of the struct ftrace_ops object
5883 * @addr: The address of the trampoline to call at @ops functions
5884 *
5885 * This is used to connect a direct calls to @addr from the nop locations
5886 * of the functions registered in @ops (with by ftrace_set_filter_ip
5887 * function).
5888 *
5889 * The location that it calls (@addr) must be able to handle a direct call,
5890 * and save the parameters of the function being traced, and restore them
5891 * (or inject new ones if needed), before returning.
5892 *
5893 * Returns:
5894 * 0 on success
5895 * -EINVAL - The @ops object was already registered with this call or
5896 * when there are no functions in @ops object.
5897 * -EBUSY - Another direct function is already attached (there can be only one)
5898 * -ENODEV - @ip does not point to a ftrace nop location (or not supported)
5899 * -ENOMEM - There was an allocation failure.
5900 */
register_ftrace_direct(struct ftrace_ops * ops,unsigned long addr)5901 int register_ftrace_direct(struct ftrace_ops *ops, unsigned long addr)
5902 {
5903 struct ftrace_hash *hash, *new_hash = NULL, *free_hash = NULL;
5904 struct ftrace_func_entry *entry, *new;
5905 int err = -EBUSY, size, i;
5906
5907 if (ops->func || ops->trampoline)
5908 return -EINVAL;
5909 if (!(ops->flags & FTRACE_OPS_FL_INITIALIZED))
5910 return -EINVAL;
5911 if (ops->flags & FTRACE_OPS_FL_ENABLED)
5912 return -EINVAL;
5913
5914 hash = ops->func_hash->filter_hash;
5915 if (ftrace_hash_empty(hash))
5916 return -EINVAL;
5917
5918 mutex_lock(&direct_mutex);
5919
5920 /* Make sure requested entries are not already registered.. */
5921 size = 1 << hash->size_bits;
5922 for (i = 0; i < size; i++) {
5923 hlist_for_each_entry(entry, &hash->buckets[i], hlist) {
5924 if (ftrace_find_rec_direct(entry->ip))
5925 goto out_unlock;
5926 }
5927 }
5928
5929 err = -ENOMEM;
5930
5931 /* Make a copy hash to place the new and the old entries in */
5932 size = hash->count + direct_functions->count;
5933 if (size > 32)
5934 size = 32;
5935 new_hash = alloc_ftrace_hash(fls(size));
5936 if (!new_hash)
5937 goto out_unlock;
5938
5939 /* Now copy over the existing direct entries */
5940 size = 1 << direct_functions->size_bits;
5941 for (i = 0; i < size; i++) {
5942 hlist_for_each_entry(entry, &direct_functions->buckets[i], hlist) {
5943 new = add_hash_entry(new_hash, entry->ip);
5944 if (!new)
5945 goto out_unlock;
5946 new->direct = entry->direct;
5947 }
5948 }
5949
5950 /* ... and add the new entries */
5951 size = 1 << hash->size_bits;
5952 for (i = 0; i < size; i++) {
5953 hlist_for_each_entry(entry, &hash->buckets[i], hlist) {
5954 new = add_hash_entry(new_hash, entry->ip);
5955 if (!new)
5956 goto out_unlock;
5957 /* Update both the copy and the hash entry */
5958 new->direct = addr;
5959 entry->direct = addr;
5960 }
5961 }
5962
5963 free_hash = direct_functions;
5964 rcu_assign_pointer(direct_functions, new_hash);
5965 new_hash = NULL;
5966
5967 ops->func = call_direct_funcs;
5968 ops->flags = MULTI_FLAGS;
5969 ops->trampoline = FTRACE_REGS_ADDR;
5970 ops->direct_call = addr;
5971
5972 err = register_ftrace_function_nolock(ops);
5973
5974 out_unlock:
5975 mutex_unlock(&direct_mutex);
5976
5977 if (free_hash && free_hash != EMPTY_HASH)
5978 call_rcu_tasks(&free_hash->rcu, register_ftrace_direct_cb);
5979
5980 if (new_hash)
5981 free_ftrace_hash(new_hash);
5982
5983 return err;
5984 }
5985 EXPORT_SYMBOL_GPL(register_ftrace_direct);
5986
5987 /**
5988 * unregister_ftrace_direct - Remove calls to custom trampoline
5989 * previously registered by register_ftrace_direct for @ops object.
5990 * @ops: The address of the struct ftrace_ops object
5991 * @addr: The address of the direct function that is called by the @ops functions
5992 * @free_filters: Set to true to remove all filters for the ftrace_ops, false otherwise
5993 *
5994 * This is used to remove a direct calls to @addr from the nop locations
5995 * of the functions registered in @ops (with by ftrace_set_filter_ip
5996 * function).
5997 *
5998 * Returns:
5999 * 0 on success
6000 * -EINVAL - The @ops object was not properly registered.
6001 */
unregister_ftrace_direct(struct ftrace_ops * ops,unsigned long addr,bool free_filters)6002 int unregister_ftrace_direct(struct ftrace_ops *ops, unsigned long addr,
6003 bool free_filters)
6004 {
6005 struct ftrace_hash *hash = ops->func_hash->filter_hash;
6006 int err;
6007
6008 if (check_direct_multi(ops))
6009 return -EINVAL;
6010 if (!(ops->flags & FTRACE_OPS_FL_ENABLED))
6011 return -EINVAL;
6012
6013 mutex_lock(&direct_mutex);
6014 err = unregister_ftrace_function(ops);
6015 remove_direct_functions_hash(hash, addr);
6016 mutex_unlock(&direct_mutex);
6017
6018 /* cleanup for possible another register call */
6019 ops->func = NULL;
6020 ops->trampoline = 0;
6021
6022 if (free_filters)
6023 ftrace_free_filter(ops);
6024 return err;
6025 }
6026 EXPORT_SYMBOL_GPL(unregister_ftrace_direct);
6027
6028 static int
__modify_ftrace_direct(struct ftrace_ops * ops,unsigned long addr)6029 __modify_ftrace_direct(struct ftrace_ops *ops, unsigned long addr)
6030 {
6031 struct ftrace_hash *hash;
6032 struct ftrace_func_entry *entry, *iter;
6033 static struct ftrace_ops tmp_ops = {
6034 .func = ftrace_stub,
6035 .flags = FTRACE_OPS_FL_STUB,
6036 };
6037 int i, size;
6038 int err;
6039
6040 lockdep_assert_held_once(&direct_mutex);
6041
6042 /* Enable the tmp_ops to have the same functions as the direct ops */
6043 ftrace_ops_init(&tmp_ops);
6044 tmp_ops.func_hash = ops->func_hash;
6045 tmp_ops.direct_call = addr;
6046
6047 err = register_ftrace_function_nolock(&tmp_ops);
6048 if (err)
6049 return err;
6050
6051 /*
6052 * Now the ftrace_ops_list_func() is called to do the direct callers.
6053 * We can safely change the direct functions attached to each entry.
6054 */
6055 mutex_lock(&ftrace_lock);
6056
6057 hash = ops->func_hash->filter_hash;
6058 size = 1 << hash->size_bits;
6059 for (i = 0; i < size; i++) {
6060 hlist_for_each_entry(iter, &hash->buckets[i], hlist) {
6061 entry = __ftrace_lookup_ip(direct_functions, iter->ip);
6062 if (!entry)
6063 continue;
6064 entry->direct = addr;
6065 }
6066 }
6067 /* Prevent store tearing if a trampoline concurrently accesses the value */
6068 WRITE_ONCE(ops->direct_call, addr);
6069
6070 mutex_unlock(&ftrace_lock);
6071
6072 /* Removing the tmp_ops will add the updated direct callers to the functions */
6073 unregister_ftrace_function(&tmp_ops);
6074
6075 return err;
6076 }
6077
6078 /**
6079 * modify_ftrace_direct_nolock - Modify an existing direct 'multi' call
6080 * to call something else
6081 * @ops: The address of the struct ftrace_ops object
6082 * @addr: The address of the new trampoline to call at @ops functions
6083 *
6084 * This is used to unregister currently registered direct caller and
6085 * register new one @addr on functions registered in @ops object.
6086 *
6087 * Note there's window between ftrace_shutdown and ftrace_startup calls
6088 * where there will be no callbacks called.
6089 *
6090 * Caller should already have direct_mutex locked, so we don't lock
6091 * direct_mutex here.
6092 *
6093 * Returns: zero on success. Non zero on error, which includes:
6094 * -EINVAL - The @ops object was not properly registered.
6095 */
modify_ftrace_direct_nolock(struct ftrace_ops * ops,unsigned long addr)6096 int modify_ftrace_direct_nolock(struct ftrace_ops *ops, unsigned long addr)
6097 {
6098 if (check_direct_multi(ops))
6099 return -EINVAL;
6100 if (!(ops->flags & FTRACE_OPS_FL_ENABLED))
6101 return -EINVAL;
6102
6103 return __modify_ftrace_direct(ops, addr);
6104 }
6105 EXPORT_SYMBOL_GPL(modify_ftrace_direct_nolock);
6106
6107 /**
6108 * modify_ftrace_direct - Modify an existing direct 'multi' call
6109 * to call something else
6110 * @ops: The address of the struct ftrace_ops object
6111 * @addr: The address of the new trampoline to call at @ops functions
6112 *
6113 * This is used to unregister currently registered direct caller and
6114 * register new one @addr on functions registered in @ops object.
6115 *
6116 * Note there's window between ftrace_shutdown and ftrace_startup calls
6117 * where there will be no callbacks called.
6118 *
6119 * Returns: zero on success. Non zero on error, which includes:
6120 * -EINVAL - The @ops object was not properly registered.
6121 */
modify_ftrace_direct(struct ftrace_ops * ops,unsigned long addr)6122 int modify_ftrace_direct(struct ftrace_ops *ops, unsigned long addr)
6123 {
6124 int err;
6125
6126 if (check_direct_multi(ops))
6127 return -EINVAL;
6128 if (!(ops->flags & FTRACE_OPS_FL_ENABLED))
6129 return -EINVAL;
6130
6131 mutex_lock(&direct_mutex);
6132 err = __modify_ftrace_direct(ops, addr);
6133 mutex_unlock(&direct_mutex);
6134 return err;
6135 }
6136 EXPORT_SYMBOL_GPL(modify_ftrace_direct);
6137 #endif /* CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS */
6138
6139 /**
6140 * ftrace_set_filter_ip - set a function to filter on in ftrace by address
6141 * @ops: the ops to set the filter with
6142 * @ip: the address to add to or remove from the filter.
6143 * @remove: non zero to remove the ip from the filter
6144 * @reset: non zero to reset all filters before applying this filter.
6145 *
6146 * Filters denote which functions should be enabled when tracing is enabled
6147 * If @ip is NULL, it fails to update filter.
6148 *
6149 * This can allocate memory which must be freed before @ops can be freed,
6150 * either by removing each filtered addr or by using
6151 * ftrace_free_filter(@ops).
6152 */
ftrace_set_filter_ip(struct ftrace_ops * ops,unsigned long ip,int remove,int reset)6153 int ftrace_set_filter_ip(struct ftrace_ops *ops, unsigned long ip,
6154 int remove, int reset)
6155 {
6156 ftrace_ops_init(ops);
6157 return ftrace_set_addr(ops, &ip, 1, remove, reset, 1);
6158 }
6159 EXPORT_SYMBOL_GPL(ftrace_set_filter_ip);
6160
6161 /**
6162 * ftrace_set_filter_ips - set functions to filter on in ftrace by addresses
6163 * @ops: the ops to set the filter with
6164 * @ips: the array of addresses to add to or remove from the filter.
6165 * @cnt: the number of addresses in @ips
6166 * @remove: non zero to remove ips from the filter
6167 * @reset: non zero to reset all filters before applying this filter.
6168 *
6169 * Filters denote which functions should be enabled when tracing is enabled
6170 * If @ips array or any ip specified within is NULL , it fails to update filter.
6171 *
6172 * This can allocate memory which must be freed before @ops can be freed,
6173 * either by removing each filtered addr or by using
6174 * ftrace_free_filter(@ops).
6175 */
ftrace_set_filter_ips(struct ftrace_ops * ops,unsigned long * ips,unsigned int cnt,int remove,int reset)6176 int ftrace_set_filter_ips(struct ftrace_ops *ops, unsigned long *ips,
6177 unsigned int cnt, int remove, int reset)
6178 {
6179 ftrace_ops_init(ops);
6180 return ftrace_set_addr(ops, ips, cnt, remove, reset, 1);
6181 }
6182 EXPORT_SYMBOL_GPL(ftrace_set_filter_ips);
6183
6184 /**
6185 * ftrace_ops_set_global_filter - setup ops to use global filters
6186 * @ops: the ops which will use the global filters
6187 *
6188 * ftrace users who need global function trace filtering should call this.
6189 * It can set the global filter only if ops were not initialized before.
6190 */
ftrace_ops_set_global_filter(struct ftrace_ops * ops)6191 void ftrace_ops_set_global_filter(struct ftrace_ops *ops)
6192 {
6193 if (ops->flags & FTRACE_OPS_FL_INITIALIZED)
6194 return;
6195
6196 ftrace_ops_init(ops);
6197 ops->func_hash = &global_ops.local_hash;
6198 }
6199 EXPORT_SYMBOL_GPL(ftrace_ops_set_global_filter);
6200
6201 static int
ftrace_set_regex(struct ftrace_ops * ops,unsigned char * buf,int len,int reset,int enable)6202 ftrace_set_regex(struct ftrace_ops *ops, unsigned char *buf, int len,
6203 int reset, int enable)
6204 {
6205 return ftrace_set_hash(ops, buf, len, NULL, 0, 0, reset, enable);
6206 }
6207
6208 /**
6209 * ftrace_set_filter - set a function to filter on in ftrace
6210 * @ops: the ops to set the filter with
6211 * @buf: the string that holds the function filter text.
6212 * @len: the length of the string.
6213 * @reset: non-zero to reset all filters before applying this filter.
6214 *
6215 * Filters denote which functions should be enabled when tracing is enabled.
6216 * If @buf is NULL and reset is set, all functions will be enabled for tracing.
6217 *
6218 * This can allocate memory which must be freed before @ops can be freed,
6219 * either by removing each filtered addr or by using
6220 * ftrace_free_filter(@ops).
6221 */
ftrace_set_filter(struct ftrace_ops * ops,unsigned char * buf,int len,int reset)6222 int ftrace_set_filter(struct ftrace_ops *ops, unsigned char *buf,
6223 int len, int reset)
6224 {
6225 ftrace_ops_init(ops);
6226 return ftrace_set_regex(ops, buf, len, reset, 1);
6227 }
6228 EXPORT_SYMBOL_GPL(ftrace_set_filter);
6229
6230 /**
6231 * ftrace_set_notrace - set a function to not trace in ftrace
6232 * @ops: the ops to set the notrace filter with
6233 * @buf: the string that holds the function notrace text.
6234 * @len: the length of the string.
6235 * @reset: non-zero to reset all filters before applying this filter.
6236 *
6237 * Notrace Filters denote which functions should not be enabled when tracing
6238 * is enabled. If @buf is NULL and reset is set, all functions will be enabled
6239 * for tracing.
6240 *
6241 * This can allocate memory which must be freed before @ops can be freed,
6242 * either by removing each filtered addr or by using
6243 * ftrace_free_filter(@ops).
6244 */
ftrace_set_notrace(struct ftrace_ops * ops,unsigned char * buf,int len,int reset)6245 int ftrace_set_notrace(struct ftrace_ops *ops, unsigned char *buf,
6246 int len, int reset)
6247 {
6248 ftrace_ops_init(ops);
6249 return ftrace_set_regex(ops, buf, len, reset, 0);
6250 }
6251 EXPORT_SYMBOL_GPL(ftrace_set_notrace);
6252 /**
6253 * ftrace_set_global_filter - set a function to filter on with global tracers
6254 * @buf: the string that holds the function filter text.
6255 * @len: the length of the string.
6256 * @reset: non-zero to reset all filters before applying this filter.
6257 *
6258 * Filters denote which functions should be enabled when tracing is enabled.
6259 * If @buf is NULL and reset is set, all functions will be enabled for tracing.
6260 */
ftrace_set_global_filter(unsigned char * buf,int len,int reset)6261 void ftrace_set_global_filter(unsigned char *buf, int len, int reset)
6262 {
6263 ftrace_set_regex(&global_ops, buf, len, reset, 1);
6264 }
6265 EXPORT_SYMBOL_GPL(ftrace_set_global_filter);
6266
6267 /**
6268 * ftrace_set_global_notrace - set a function to not trace with global tracers
6269 * @buf: the string that holds the function notrace text.
6270 * @len: the length of the string.
6271 * @reset: non-zero to reset all filters before applying this filter.
6272 *
6273 * Notrace Filters denote which functions should not be enabled when tracing
6274 * is enabled. If @buf is NULL and reset is set, all functions will be enabled
6275 * for tracing.
6276 */
ftrace_set_global_notrace(unsigned char * buf,int len,int reset)6277 void ftrace_set_global_notrace(unsigned char *buf, int len, int reset)
6278 {
6279 ftrace_set_regex(&global_ops, buf, len, reset, 0);
6280 }
6281 EXPORT_SYMBOL_GPL(ftrace_set_global_notrace);
6282
6283 /*
6284 * command line interface to allow users to set filters on boot up.
6285 */
6286 #define FTRACE_FILTER_SIZE COMMAND_LINE_SIZE
6287 static char ftrace_notrace_buf[FTRACE_FILTER_SIZE] __initdata;
6288 static char ftrace_filter_buf[FTRACE_FILTER_SIZE] __initdata;
6289
6290 /* Used by function selftest to not test if filter is set */
6291 bool ftrace_filter_param __initdata;
6292
set_ftrace_notrace(char * str)6293 static int __init set_ftrace_notrace(char *str)
6294 {
6295 ftrace_filter_param = true;
6296 strscpy(ftrace_notrace_buf, str, FTRACE_FILTER_SIZE);
6297 return 1;
6298 }
6299 __setup("ftrace_notrace=", set_ftrace_notrace);
6300
set_ftrace_filter(char * str)6301 static int __init set_ftrace_filter(char *str)
6302 {
6303 ftrace_filter_param = true;
6304 strscpy(ftrace_filter_buf, str, FTRACE_FILTER_SIZE);
6305 return 1;
6306 }
6307 __setup("ftrace_filter=", set_ftrace_filter);
6308
6309 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
6310 static char ftrace_graph_buf[FTRACE_FILTER_SIZE] __initdata;
6311 static char ftrace_graph_notrace_buf[FTRACE_FILTER_SIZE] __initdata;
6312 static int ftrace_graph_set_hash(struct ftrace_hash *hash, char *buffer);
6313
set_graph_function(char * str)6314 static int __init set_graph_function(char *str)
6315 {
6316 strscpy(ftrace_graph_buf, str, FTRACE_FILTER_SIZE);
6317 return 1;
6318 }
6319 __setup("ftrace_graph_filter=", set_graph_function);
6320
set_graph_notrace_function(char * str)6321 static int __init set_graph_notrace_function(char *str)
6322 {
6323 strscpy(ftrace_graph_notrace_buf, str, FTRACE_FILTER_SIZE);
6324 return 1;
6325 }
6326 __setup("ftrace_graph_notrace=", set_graph_notrace_function);
6327
set_graph_max_depth_function(char * str)6328 static int __init set_graph_max_depth_function(char *str)
6329 {
6330 if (!str || kstrtouint(str, 0, &fgraph_max_depth))
6331 return 0;
6332 return 1;
6333 }
6334 __setup("ftrace_graph_max_depth=", set_graph_max_depth_function);
6335
set_ftrace_early_graph(char * buf,int enable)6336 static void __init set_ftrace_early_graph(char *buf, int enable)
6337 {
6338 int ret;
6339 char *func;
6340 struct ftrace_hash *hash;
6341
6342 hash = alloc_ftrace_hash(FTRACE_HASH_DEFAULT_BITS);
6343 if (MEM_FAIL(!hash, "Failed to allocate hash\n"))
6344 return;
6345
6346 while (buf) {
6347 func = strsep(&buf, ",");
6348 /* we allow only one expression at a time */
6349 ret = ftrace_graph_set_hash(hash, func);
6350 if (ret)
6351 printk(KERN_DEBUG "ftrace: function %s not "
6352 "traceable\n", func);
6353 }
6354
6355 if (enable)
6356 ftrace_graph_hash = hash;
6357 else
6358 ftrace_graph_notrace_hash = hash;
6359 }
6360 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
6361
6362 void __init
ftrace_set_early_filter(struct ftrace_ops * ops,char * buf,int enable)6363 ftrace_set_early_filter(struct ftrace_ops *ops, char *buf, int enable)
6364 {
6365 char *func;
6366
6367 ftrace_ops_init(ops);
6368
6369 while (buf) {
6370 func = strsep(&buf, ",");
6371 ftrace_set_regex(ops, func, strlen(func), 0, enable);
6372 }
6373 }
6374
set_ftrace_early_filters(void)6375 static void __init set_ftrace_early_filters(void)
6376 {
6377 if (ftrace_filter_buf[0])
6378 ftrace_set_early_filter(&global_ops, ftrace_filter_buf, 1);
6379 if (ftrace_notrace_buf[0])
6380 ftrace_set_early_filter(&global_ops, ftrace_notrace_buf, 0);
6381 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
6382 if (ftrace_graph_buf[0])
6383 set_ftrace_early_graph(ftrace_graph_buf, 1);
6384 if (ftrace_graph_notrace_buf[0])
6385 set_ftrace_early_graph(ftrace_graph_notrace_buf, 0);
6386 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
6387 }
6388
ftrace_regex_release(struct inode * inode,struct file * file)6389 int ftrace_regex_release(struct inode *inode, struct file *file)
6390 {
6391 struct seq_file *m = (struct seq_file *)file->private_data;
6392 struct ftrace_iterator *iter;
6393 struct ftrace_hash **orig_hash;
6394 struct trace_parser *parser;
6395 int filter_hash;
6396
6397 if (file->f_mode & FMODE_READ) {
6398 iter = m->private;
6399 seq_release(inode, file);
6400 } else
6401 iter = file->private_data;
6402
6403 parser = &iter->parser;
6404 if (trace_parser_loaded(parser)) {
6405 int enable = !(iter->flags & FTRACE_ITER_NOTRACE);
6406
6407 ftrace_process_regex(iter, parser->buffer,
6408 parser->idx, enable);
6409 }
6410
6411 trace_parser_put(parser);
6412
6413 mutex_lock(&iter->ops->func_hash->regex_lock);
6414
6415 if (file->f_mode & FMODE_WRITE) {
6416 filter_hash = !!(iter->flags & FTRACE_ITER_FILTER);
6417
6418 if (filter_hash) {
6419 orig_hash = &iter->ops->func_hash->filter_hash;
6420 if (iter->tr) {
6421 if (list_empty(&iter->tr->mod_trace))
6422 iter->hash->flags &= ~FTRACE_HASH_FL_MOD;
6423 else
6424 iter->hash->flags |= FTRACE_HASH_FL_MOD;
6425 }
6426 } else
6427 orig_hash = &iter->ops->func_hash->notrace_hash;
6428
6429 mutex_lock(&ftrace_lock);
6430 ftrace_hash_move_and_update_ops(iter->ops, orig_hash,
6431 iter->hash, filter_hash);
6432 mutex_unlock(&ftrace_lock);
6433 } else {
6434 /* For read only, the hash is the ops hash */
6435 iter->hash = NULL;
6436 }
6437
6438 mutex_unlock(&iter->ops->func_hash->regex_lock);
6439 free_ftrace_hash(iter->hash);
6440 if (iter->tr)
6441 trace_array_put(iter->tr);
6442 kfree(iter);
6443
6444 return 0;
6445 }
6446
6447 static const struct file_operations ftrace_avail_fops = {
6448 .open = ftrace_avail_open,
6449 .read = seq_read,
6450 .llseek = seq_lseek,
6451 .release = seq_release_private,
6452 };
6453
6454 static const struct file_operations ftrace_enabled_fops = {
6455 .open = ftrace_enabled_open,
6456 .read = seq_read,
6457 .llseek = seq_lseek,
6458 .release = seq_release_private,
6459 };
6460
6461 static const struct file_operations ftrace_touched_fops = {
6462 .open = ftrace_touched_open,
6463 .read = seq_read,
6464 .llseek = seq_lseek,
6465 .release = seq_release_private,
6466 };
6467
6468 static const struct file_operations ftrace_avail_addrs_fops = {
6469 .open = ftrace_avail_addrs_open,
6470 .read = seq_read,
6471 .llseek = seq_lseek,
6472 .release = seq_release_private,
6473 };
6474
6475 static const struct file_operations ftrace_filter_fops = {
6476 .open = ftrace_filter_open,
6477 .read = seq_read,
6478 .write = ftrace_filter_write,
6479 .llseek = tracing_lseek,
6480 .release = ftrace_regex_release,
6481 };
6482
6483 static const struct file_operations ftrace_notrace_fops = {
6484 .open = ftrace_notrace_open,
6485 .read = seq_read,
6486 .write = ftrace_notrace_write,
6487 .llseek = tracing_lseek,
6488 .release = ftrace_regex_release,
6489 };
6490
6491 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
6492
6493 static DEFINE_MUTEX(graph_lock);
6494
6495 struct ftrace_hash __rcu *ftrace_graph_hash = EMPTY_HASH;
6496 struct ftrace_hash __rcu *ftrace_graph_notrace_hash = EMPTY_HASH;
6497
6498 enum graph_filter_type {
6499 GRAPH_FILTER_NOTRACE = 0,
6500 GRAPH_FILTER_FUNCTION,
6501 };
6502
6503 #define FTRACE_GRAPH_EMPTY ((void *)1)
6504
6505 struct ftrace_graph_data {
6506 struct ftrace_hash *hash;
6507 struct ftrace_func_entry *entry;
6508 int idx; /* for hash table iteration */
6509 enum graph_filter_type type;
6510 struct ftrace_hash *new_hash;
6511 const struct seq_operations *seq_ops;
6512 struct trace_parser parser;
6513 };
6514
6515 static void *
__g_next(struct seq_file * m,loff_t * pos)6516 __g_next(struct seq_file *m, loff_t *pos)
6517 {
6518 struct ftrace_graph_data *fgd = m->private;
6519 struct ftrace_func_entry *entry = fgd->entry;
6520 struct hlist_head *head;
6521 int i, idx = fgd->idx;
6522
6523 if (*pos >= fgd->hash->count)
6524 return NULL;
6525
6526 if (entry) {
6527 hlist_for_each_entry_continue(entry, hlist) {
6528 fgd->entry = entry;
6529 return entry;
6530 }
6531
6532 idx++;
6533 }
6534
6535 for (i = idx; i < 1 << fgd->hash->size_bits; i++) {
6536 head = &fgd->hash->buckets[i];
6537 hlist_for_each_entry(entry, head, hlist) {
6538 fgd->entry = entry;
6539 fgd->idx = i;
6540 return entry;
6541 }
6542 }
6543 return NULL;
6544 }
6545
6546 static void *
g_next(struct seq_file * m,void * v,loff_t * pos)6547 g_next(struct seq_file *m, void *v, loff_t *pos)
6548 {
6549 (*pos)++;
6550 return __g_next(m, pos);
6551 }
6552
g_start(struct seq_file * m,loff_t * pos)6553 static void *g_start(struct seq_file *m, loff_t *pos)
6554 {
6555 struct ftrace_graph_data *fgd = m->private;
6556
6557 mutex_lock(&graph_lock);
6558
6559 if (fgd->type == GRAPH_FILTER_FUNCTION)
6560 fgd->hash = rcu_dereference_protected(ftrace_graph_hash,
6561 lockdep_is_held(&graph_lock));
6562 else
6563 fgd->hash = rcu_dereference_protected(ftrace_graph_notrace_hash,
6564 lockdep_is_held(&graph_lock));
6565
6566 /* Nothing, tell g_show to print all functions are enabled */
6567 if (ftrace_hash_empty(fgd->hash) && !*pos)
6568 return FTRACE_GRAPH_EMPTY;
6569
6570 fgd->idx = 0;
6571 fgd->entry = NULL;
6572 return __g_next(m, pos);
6573 }
6574
g_stop(struct seq_file * m,void * p)6575 static void g_stop(struct seq_file *m, void *p)
6576 {
6577 mutex_unlock(&graph_lock);
6578 }
6579
g_show(struct seq_file * m,void * v)6580 static int g_show(struct seq_file *m, void *v)
6581 {
6582 struct ftrace_func_entry *entry = v;
6583
6584 if (!entry)
6585 return 0;
6586
6587 if (entry == FTRACE_GRAPH_EMPTY) {
6588 struct ftrace_graph_data *fgd = m->private;
6589
6590 if (fgd->type == GRAPH_FILTER_FUNCTION)
6591 seq_puts(m, "#### all functions enabled ####\n");
6592 else
6593 seq_puts(m, "#### no functions disabled ####\n");
6594 return 0;
6595 }
6596
6597 seq_printf(m, "%ps\n", (void *)entry->ip);
6598
6599 return 0;
6600 }
6601
6602 static const struct seq_operations ftrace_graph_seq_ops = {
6603 .start = g_start,
6604 .next = g_next,
6605 .stop = g_stop,
6606 .show = g_show,
6607 };
6608
6609 static int
__ftrace_graph_open(struct inode * inode,struct file * file,struct ftrace_graph_data * fgd)6610 __ftrace_graph_open(struct inode *inode, struct file *file,
6611 struct ftrace_graph_data *fgd)
6612 {
6613 int ret;
6614 struct ftrace_hash *new_hash = NULL;
6615
6616 ret = security_locked_down(LOCKDOWN_TRACEFS);
6617 if (ret)
6618 return ret;
6619
6620 if (file->f_mode & FMODE_WRITE) {
6621 const int size_bits = FTRACE_HASH_DEFAULT_BITS;
6622
6623 if (trace_parser_get_init(&fgd->parser, FTRACE_BUFF_MAX))
6624 return -ENOMEM;
6625
6626 if (file->f_flags & O_TRUNC)
6627 new_hash = alloc_ftrace_hash(size_bits);
6628 else
6629 new_hash = alloc_and_copy_ftrace_hash(size_bits,
6630 fgd->hash);
6631 if (!new_hash) {
6632 ret = -ENOMEM;
6633 goto out;
6634 }
6635 }
6636
6637 if (file->f_mode & FMODE_READ) {
6638 ret = seq_open(file, &ftrace_graph_seq_ops);
6639 if (!ret) {
6640 struct seq_file *m = file->private_data;
6641 m->private = fgd;
6642 } else {
6643 /* Failed */
6644 free_ftrace_hash(new_hash);
6645 new_hash = NULL;
6646 }
6647 } else
6648 file->private_data = fgd;
6649
6650 out:
6651 if (ret < 0 && file->f_mode & FMODE_WRITE)
6652 trace_parser_put(&fgd->parser);
6653
6654 fgd->new_hash = new_hash;
6655
6656 /*
6657 * All uses of fgd->hash must be taken with the graph_lock
6658 * held. The graph_lock is going to be released, so force
6659 * fgd->hash to be reinitialized when it is taken again.
6660 */
6661 fgd->hash = NULL;
6662
6663 return ret;
6664 }
6665
6666 static int
ftrace_graph_open(struct inode * inode,struct file * file)6667 ftrace_graph_open(struct inode *inode, struct file *file)
6668 {
6669 struct ftrace_graph_data *fgd;
6670 int ret;
6671
6672 if (unlikely(ftrace_disabled))
6673 return -ENODEV;
6674
6675 fgd = kmalloc(sizeof(*fgd), GFP_KERNEL);
6676 if (fgd == NULL)
6677 return -ENOMEM;
6678
6679 mutex_lock(&graph_lock);
6680
6681 fgd->hash = rcu_dereference_protected(ftrace_graph_hash,
6682 lockdep_is_held(&graph_lock));
6683 fgd->type = GRAPH_FILTER_FUNCTION;
6684 fgd->seq_ops = &ftrace_graph_seq_ops;
6685
6686 ret = __ftrace_graph_open(inode, file, fgd);
6687 if (ret < 0)
6688 kfree(fgd);
6689
6690 mutex_unlock(&graph_lock);
6691 return ret;
6692 }
6693
6694 static int
ftrace_graph_notrace_open(struct inode * inode,struct file * file)6695 ftrace_graph_notrace_open(struct inode *inode, struct file *file)
6696 {
6697 struct ftrace_graph_data *fgd;
6698 int ret;
6699
6700 if (unlikely(ftrace_disabled))
6701 return -ENODEV;
6702
6703 fgd = kmalloc(sizeof(*fgd), GFP_KERNEL);
6704 if (fgd == NULL)
6705 return -ENOMEM;
6706
6707 mutex_lock(&graph_lock);
6708
6709 fgd->hash = rcu_dereference_protected(ftrace_graph_notrace_hash,
6710 lockdep_is_held(&graph_lock));
6711 fgd->type = GRAPH_FILTER_NOTRACE;
6712 fgd->seq_ops = &ftrace_graph_seq_ops;
6713
6714 ret = __ftrace_graph_open(inode, file, fgd);
6715 if (ret < 0)
6716 kfree(fgd);
6717
6718 mutex_unlock(&graph_lock);
6719 return ret;
6720 }
6721
6722 static int
ftrace_graph_release(struct inode * inode,struct file * file)6723 ftrace_graph_release(struct inode *inode, struct file *file)
6724 {
6725 struct ftrace_graph_data *fgd;
6726 struct ftrace_hash *old_hash, *new_hash;
6727 struct trace_parser *parser;
6728 int ret = 0;
6729
6730 if (file->f_mode & FMODE_READ) {
6731 struct seq_file *m = file->private_data;
6732
6733 fgd = m->private;
6734 seq_release(inode, file);
6735 } else {
6736 fgd = file->private_data;
6737 }
6738
6739
6740 if (file->f_mode & FMODE_WRITE) {
6741
6742 parser = &fgd->parser;
6743
6744 if (trace_parser_loaded((parser))) {
6745 ret = ftrace_graph_set_hash(fgd->new_hash,
6746 parser->buffer);
6747 }
6748
6749 trace_parser_put(parser);
6750
6751 new_hash = __ftrace_hash_move(fgd->new_hash);
6752 if (!new_hash) {
6753 ret = -ENOMEM;
6754 goto out;
6755 }
6756
6757 mutex_lock(&graph_lock);
6758
6759 if (fgd->type == GRAPH_FILTER_FUNCTION) {
6760 old_hash = rcu_dereference_protected(ftrace_graph_hash,
6761 lockdep_is_held(&graph_lock));
6762 rcu_assign_pointer(ftrace_graph_hash, new_hash);
6763 } else {
6764 old_hash = rcu_dereference_protected(ftrace_graph_notrace_hash,
6765 lockdep_is_held(&graph_lock));
6766 rcu_assign_pointer(ftrace_graph_notrace_hash, new_hash);
6767 }
6768
6769 mutex_unlock(&graph_lock);
6770
6771 /*
6772 * We need to do a hard force of sched synchronization.
6773 * This is because we use preempt_disable() to do RCU, but
6774 * the function tracers can be called where RCU is not watching
6775 * (like before user_exit()). We can not rely on the RCU
6776 * infrastructure to do the synchronization, thus we must do it
6777 * ourselves.
6778 */
6779 if (old_hash != EMPTY_HASH)
6780 synchronize_rcu_tasks_rude();
6781
6782 free_ftrace_hash(old_hash);
6783 }
6784
6785 out:
6786 free_ftrace_hash(fgd->new_hash);
6787 kfree(fgd);
6788
6789 return ret;
6790 }
6791
6792 static int
ftrace_graph_set_hash(struct ftrace_hash * hash,char * buffer)6793 ftrace_graph_set_hash(struct ftrace_hash *hash, char *buffer)
6794 {
6795 struct ftrace_glob func_g;
6796 struct dyn_ftrace *rec;
6797 struct ftrace_page *pg;
6798 struct ftrace_func_entry *entry;
6799 int fail = 1;
6800 int not;
6801
6802 /* decode regex */
6803 func_g.type = filter_parse_regex(buffer, strlen(buffer),
6804 &func_g.search, ¬);
6805
6806 func_g.len = strlen(func_g.search);
6807
6808 mutex_lock(&ftrace_lock);
6809
6810 if (unlikely(ftrace_disabled)) {
6811 mutex_unlock(&ftrace_lock);
6812 return -ENODEV;
6813 }
6814
6815 do_for_each_ftrace_rec(pg, rec) {
6816
6817 if (rec->flags & FTRACE_FL_DISABLED)
6818 continue;
6819
6820 if (ftrace_match_record(rec, &func_g, NULL, 0)) {
6821 entry = ftrace_lookup_ip(hash, rec->ip);
6822
6823 if (!not) {
6824 fail = 0;
6825
6826 if (entry)
6827 continue;
6828 if (add_hash_entry(hash, rec->ip) == NULL)
6829 goto out;
6830 } else {
6831 if (entry) {
6832 free_hash_entry(hash, entry);
6833 fail = 0;
6834 }
6835 }
6836 }
6837 } while_for_each_ftrace_rec();
6838 out:
6839 mutex_unlock(&ftrace_lock);
6840
6841 if (fail)
6842 return -EINVAL;
6843
6844 return 0;
6845 }
6846
6847 static ssize_t
ftrace_graph_write(struct file * file,const char __user * ubuf,size_t cnt,loff_t * ppos)6848 ftrace_graph_write(struct file *file, const char __user *ubuf,
6849 size_t cnt, loff_t *ppos)
6850 {
6851 ssize_t read, ret = 0;
6852 struct ftrace_graph_data *fgd = file->private_data;
6853 struct trace_parser *parser;
6854
6855 if (!cnt)
6856 return 0;
6857
6858 /* Read mode uses seq functions */
6859 if (file->f_mode & FMODE_READ) {
6860 struct seq_file *m = file->private_data;
6861 fgd = m->private;
6862 }
6863
6864 parser = &fgd->parser;
6865
6866 read = trace_get_user(parser, ubuf, cnt, ppos);
6867
6868 if (read >= 0 && trace_parser_loaded(parser) &&
6869 !trace_parser_cont(parser)) {
6870
6871 ret = ftrace_graph_set_hash(fgd->new_hash,
6872 parser->buffer);
6873 trace_parser_clear(parser);
6874 }
6875
6876 if (!ret)
6877 ret = read;
6878
6879 return ret;
6880 }
6881
6882 static const struct file_operations ftrace_graph_fops = {
6883 .open = ftrace_graph_open,
6884 .read = seq_read,
6885 .write = ftrace_graph_write,
6886 .llseek = tracing_lseek,
6887 .release = ftrace_graph_release,
6888 };
6889
6890 static const struct file_operations ftrace_graph_notrace_fops = {
6891 .open = ftrace_graph_notrace_open,
6892 .read = seq_read,
6893 .write = ftrace_graph_write,
6894 .llseek = tracing_lseek,
6895 .release = ftrace_graph_release,
6896 };
6897 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
6898
ftrace_create_filter_files(struct ftrace_ops * ops,struct dentry * parent)6899 void ftrace_create_filter_files(struct ftrace_ops *ops,
6900 struct dentry *parent)
6901 {
6902
6903 trace_create_file("set_ftrace_filter", TRACE_MODE_WRITE, parent,
6904 ops, &ftrace_filter_fops);
6905
6906 trace_create_file("set_ftrace_notrace", TRACE_MODE_WRITE, parent,
6907 ops, &ftrace_notrace_fops);
6908 }
6909
6910 /*
6911 * The name "destroy_filter_files" is really a misnomer. Although
6912 * in the future, it may actually delete the files, but this is
6913 * really intended to make sure the ops passed in are disabled
6914 * and that when this function returns, the caller is free to
6915 * free the ops.
6916 *
6917 * The "destroy" name is only to match the "create" name that this
6918 * should be paired with.
6919 */
ftrace_destroy_filter_files(struct ftrace_ops * ops)6920 void ftrace_destroy_filter_files(struct ftrace_ops *ops)
6921 {
6922 mutex_lock(&ftrace_lock);
6923 if (ops->flags & FTRACE_OPS_FL_ENABLED)
6924 ftrace_shutdown(ops, 0);
6925 ops->flags |= FTRACE_OPS_FL_DELETED;
6926 ftrace_free_filter(ops);
6927 mutex_unlock(&ftrace_lock);
6928 }
6929
ftrace_init_dyn_tracefs(struct dentry * d_tracer)6930 static __init int ftrace_init_dyn_tracefs(struct dentry *d_tracer)
6931 {
6932
6933 trace_create_file("available_filter_functions", TRACE_MODE_READ,
6934 d_tracer, NULL, &ftrace_avail_fops);
6935
6936 trace_create_file("available_filter_functions_addrs", TRACE_MODE_READ,
6937 d_tracer, NULL, &ftrace_avail_addrs_fops);
6938
6939 trace_create_file("enabled_functions", TRACE_MODE_READ,
6940 d_tracer, NULL, &ftrace_enabled_fops);
6941
6942 trace_create_file("touched_functions", TRACE_MODE_READ,
6943 d_tracer, NULL, &ftrace_touched_fops);
6944
6945 ftrace_create_filter_files(&global_ops, d_tracer);
6946
6947 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
6948 trace_create_file("set_graph_function", TRACE_MODE_WRITE, d_tracer,
6949 NULL,
6950 &ftrace_graph_fops);
6951 trace_create_file("set_graph_notrace", TRACE_MODE_WRITE, d_tracer,
6952 NULL,
6953 &ftrace_graph_notrace_fops);
6954 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
6955
6956 return 0;
6957 }
6958
ftrace_cmp_ips(const void * a,const void * b)6959 static int ftrace_cmp_ips(const void *a, const void *b)
6960 {
6961 const unsigned long *ipa = a;
6962 const unsigned long *ipb = b;
6963
6964 if (*ipa > *ipb)
6965 return 1;
6966 if (*ipa < *ipb)
6967 return -1;
6968 return 0;
6969 }
6970
6971 #ifdef CONFIG_FTRACE_SORT_STARTUP_TEST
test_is_sorted(unsigned long * start,unsigned long count)6972 static void test_is_sorted(unsigned long *start, unsigned long count)
6973 {
6974 int i;
6975
6976 for (i = 1; i < count; i++) {
6977 if (WARN(start[i - 1] > start[i],
6978 "[%d] %pS at %lx is not sorted with %pS at %lx\n", i,
6979 (void *)start[i - 1], start[i - 1],
6980 (void *)start[i], start[i]))
6981 break;
6982 }
6983 if (i == count)
6984 pr_info("ftrace section at %px sorted properly\n", start);
6985 }
6986 #else
test_is_sorted(unsigned long * start,unsigned long count)6987 static void test_is_sorted(unsigned long *start, unsigned long count)
6988 {
6989 }
6990 #endif
6991
ftrace_process_locs(struct module * mod,unsigned long * start,unsigned long * end)6992 static int ftrace_process_locs(struct module *mod,
6993 unsigned long *start,
6994 unsigned long *end)
6995 {
6996 struct ftrace_page *pg_unuse = NULL;
6997 struct ftrace_page *start_pg;
6998 struct ftrace_page *pg;
6999 struct dyn_ftrace *rec;
7000 unsigned long skipped = 0;
7001 unsigned long count;
7002 unsigned long *p;
7003 unsigned long addr;
7004 unsigned long flags = 0; /* Shut up gcc */
7005 int ret = -ENOMEM;
7006
7007 count = end - start;
7008
7009 if (!count)
7010 return 0;
7011
7012 /*
7013 * Sorting mcount in vmlinux at build time depend on
7014 * CONFIG_BUILDTIME_MCOUNT_SORT, while mcount loc in
7015 * modules can not be sorted at build time.
7016 */
7017 if (!IS_ENABLED(CONFIG_BUILDTIME_MCOUNT_SORT) || mod) {
7018 sort(start, count, sizeof(*start),
7019 ftrace_cmp_ips, NULL);
7020 } else {
7021 test_is_sorted(start, count);
7022 }
7023
7024 start_pg = ftrace_allocate_pages(count);
7025 if (!start_pg)
7026 return -ENOMEM;
7027
7028 mutex_lock(&ftrace_lock);
7029
7030 /*
7031 * Core and each module needs their own pages, as
7032 * modules will free them when they are removed.
7033 * Force a new page to be allocated for modules.
7034 */
7035 if (!mod) {
7036 WARN_ON(ftrace_pages || ftrace_pages_start);
7037 /* First initialization */
7038 ftrace_pages = ftrace_pages_start = start_pg;
7039 } else {
7040 if (!ftrace_pages)
7041 goto out;
7042
7043 if (WARN_ON(ftrace_pages->next)) {
7044 /* Hmm, we have free pages? */
7045 while (ftrace_pages->next)
7046 ftrace_pages = ftrace_pages->next;
7047 }
7048
7049 ftrace_pages->next = start_pg;
7050 }
7051
7052 p = start;
7053 pg = start_pg;
7054 while (p < end) {
7055 unsigned long end_offset;
7056 addr = ftrace_call_adjust(*p++);
7057 /*
7058 * Some architecture linkers will pad between
7059 * the different mcount_loc sections of different
7060 * object files to satisfy alignments.
7061 * Skip any NULL pointers.
7062 */
7063 if (!addr) {
7064 skipped++;
7065 continue;
7066 }
7067
7068 end_offset = (pg->index+1) * sizeof(pg->records[0]);
7069 if (end_offset > PAGE_SIZE << pg->order) {
7070 /* We should have allocated enough */
7071 if (WARN_ON(!pg->next))
7072 break;
7073 pg = pg->next;
7074 }
7075
7076 rec = &pg->records[pg->index++];
7077 rec->ip = addr;
7078 }
7079
7080 if (pg->next) {
7081 pg_unuse = pg->next;
7082 pg->next = NULL;
7083 }
7084
7085 /* Assign the last page to ftrace_pages */
7086 ftrace_pages = pg;
7087
7088 /*
7089 * We only need to disable interrupts on start up
7090 * because we are modifying code that an interrupt
7091 * may execute, and the modification is not atomic.
7092 * But for modules, nothing runs the code we modify
7093 * until we are finished with it, and there's no
7094 * reason to cause large interrupt latencies while we do it.
7095 */
7096 if (!mod)
7097 local_irq_save(flags);
7098 ftrace_update_code(mod, start_pg);
7099 if (!mod)
7100 local_irq_restore(flags);
7101 ret = 0;
7102 out:
7103 mutex_unlock(&ftrace_lock);
7104
7105 /* We should have used all pages unless we skipped some */
7106 if (pg_unuse) {
7107 WARN_ON(!skipped);
7108 /* Need to synchronize with ftrace_location_range() */
7109 synchronize_rcu();
7110 ftrace_free_pages(pg_unuse);
7111 }
7112 return ret;
7113 }
7114
7115 struct ftrace_mod_func {
7116 struct list_head list;
7117 char *name;
7118 unsigned long ip;
7119 unsigned int size;
7120 };
7121
7122 struct ftrace_mod_map {
7123 struct rcu_head rcu;
7124 struct list_head list;
7125 struct module *mod;
7126 unsigned long start_addr;
7127 unsigned long end_addr;
7128 struct list_head funcs;
7129 unsigned int num_funcs;
7130 };
7131
ftrace_get_trampoline_kallsym(unsigned int symnum,unsigned long * value,char * type,char * name,char * module_name,int * exported)7132 static int ftrace_get_trampoline_kallsym(unsigned int symnum,
7133 unsigned long *value, char *type,
7134 char *name, char *module_name,
7135 int *exported)
7136 {
7137 struct ftrace_ops *op;
7138
7139 list_for_each_entry_rcu(op, &ftrace_ops_trampoline_list, list) {
7140 if (!op->trampoline || symnum--)
7141 continue;
7142 *value = op->trampoline;
7143 *type = 't';
7144 strscpy(name, FTRACE_TRAMPOLINE_SYM, KSYM_NAME_LEN);
7145 strscpy(module_name, FTRACE_TRAMPOLINE_MOD, MODULE_NAME_LEN);
7146 *exported = 0;
7147 return 0;
7148 }
7149
7150 return -ERANGE;
7151 }
7152
7153 #if defined(CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS) || defined(CONFIG_MODULES)
7154 /*
7155 * Check if the current ops references the given ip.
7156 *
7157 * If the ops traces all functions, then it was already accounted for.
7158 * If the ops does not trace the current record function, skip it.
7159 * If the ops ignores the function via notrace filter, skip it.
7160 */
7161 static bool
ops_references_ip(struct ftrace_ops * ops,unsigned long ip)7162 ops_references_ip(struct ftrace_ops *ops, unsigned long ip)
7163 {
7164 /* If ops isn't enabled, ignore it */
7165 if (!(ops->flags & FTRACE_OPS_FL_ENABLED))
7166 return false;
7167
7168 /* If ops traces all then it includes this function */
7169 if (ops_traces_mod(ops))
7170 return true;
7171
7172 /* The function must be in the filter */
7173 if (!ftrace_hash_empty(ops->func_hash->filter_hash) &&
7174 !__ftrace_lookup_ip(ops->func_hash->filter_hash, ip))
7175 return false;
7176
7177 /* If in notrace hash, we ignore it too */
7178 if (ftrace_lookup_ip(ops->func_hash->notrace_hash, ip))
7179 return false;
7180
7181 return true;
7182 }
7183 #endif
7184
7185 #ifdef CONFIG_MODULES
7186
7187 #define next_to_ftrace_page(p) container_of(p, struct ftrace_page, next)
7188
7189 static LIST_HEAD(ftrace_mod_maps);
7190
referenced_filters(struct dyn_ftrace * rec)7191 static int referenced_filters(struct dyn_ftrace *rec)
7192 {
7193 struct ftrace_ops *ops;
7194 int cnt = 0;
7195
7196 for (ops = ftrace_ops_list; ops != &ftrace_list_end; ops = ops->next) {
7197 if (ops_references_ip(ops, rec->ip)) {
7198 if (WARN_ON_ONCE(ops->flags & FTRACE_OPS_FL_DIRECT))
7199 continue;
7200 if (WARN_ON_ONCE(ops->flags & FTRACE_OPS_FL_IPMODIFY))
7201 continue;
7202 cnt++;
7203 if (ops->flags & FTRACE_OPS_FL_SAVE_REGS)
7204 rec->flags |= FTRACE_FL_REGS;
7205 if (cnt == 1 && ops->trampoline)
7206 rec->flags |= FTRACE_FL_TRAMP;
7207 else
7208 rec->flags &= ~FTRACE_FL_TRAMP;
7209 }
7210 }
7211
7212 return cnt;
7213 }
7214
7215 static void
clear_mod_from_hash(struct ftrace_page * pg,struct ftrace_hash * hash)7216 clear_mod_from_hash(struct ftrace_page *pg, struct ftrace_hash *hash)
7217 {
7218 struct ftrace_func_entry *entry;
7219 struct dyn_ftrace *rec;
7220 int i;
7221
7222 if (ftrace_hash_empty(hash))
7223 return;
7224
7225 for (i = 0; i < pg->index; i++) {
7226 rec = &pg->records[i];
7227 entry = __ftrace_lookup_ip(hash, rec->ip);
7228 /*
7229 * Do not allow this rec to match again.
7230 * Yeah, it may waste some memory, but will be removed
7231 * if/when the hash is modified again.
7232 */
7233 if (entry)
7234 entry->ip = 0;
7235 }
7236 }
7237
7238 /* Clear any records from hashes */
clear_mod_from_hashes(struct ftrace_page * pg)7239 static void clear_mod_from_hashes(struct ftrace_page *pg)
7240 {
7241 struct trace_array *tr;
7242
7243 mutex_lock(&trace_types_lock);
7244 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
7245 if (!tr->ops || !tr->ops->func_hash)
7246 continue;
7247 mutex_lock(&tr->ops->func_hash->regex_lock);
7248 clear_mod_from_hash(pg, tr->ops->func_hash->filter_hash);
7249 clear_mod_from_hash(pg, tr->ops->func_hash->notrace_hash);
7250 mutex_unlock(&tr->ops->func_hash->regex_lock);
7251 }
7252 mutex_unlock(&trace_types_lock);
7253 }
7254
ftrace_free_mod_map(struct rcu_head * rcu)7255 static void ftrace_free_mod_map(struct rcu_head *rcu)
7256 {
7257 struct ftrace_mod_map *mod_map = container_of(rcu, struct ftrace_mod_map, rcu);
7258 struct ftrace_mod_func *mod_func;
7259 struct ftrace_mod_func *n;
7260
7261 /* All the contents of mod_map are now not visible to readers */
7262 list_for_each_entry_safe(mod_func, n, &mod_map->funcs, list) {
7263 kfree(mod_func->name);
7264 list_del(&mod_func->list);
7265 kfree(mod_func);
7266 }
7267
7268 kfree(mod_map);
7269 }
7270
ftrace_release_mod(struct module * mod)7271 void ftrace_release_mod(struct module *mod)
7272 {
7273 struct ftrace_mod_map *mod_map;
7274 struct ftrace_mod_map *n;
7275 struct dyn_ftrace *rec;
7276 struct ftrace_page **last_pg;
7277 struct ftrace_page *tmp_page = NULL;
7278 struct ftrace_page *pg;
7279
7280 mutex_lock(&ftrace_lock);
7281
7282 if (ftrace_disabled)
7283 goto out_unlock;
7284
7285 list_for_each_entry_safe(mod_map, n, &ftrace_mod_maps, list) {
7286 if (mod_map->mod == mod) {
7287 list_del_rcu(&mod_map->list);
7288 call_rcu(&mod_map->rcu, ftrace_free_mod_map);
7289 break;
7290 }
7291 }
7292
7293 /*
7294 * Each module has its own ftrace_pages, remove
7295 * them from the list.
7296 */
7297 last_pg = &ftrace_pages_start;
7298 for (pg = ftrace_pages_start; pg; pg = *last_pg) {
7299 rec = &pg->records[0];
7300 if (within_module(rec->ip, mod)) {
7301 /*
7302 * As core pages are first, the first
7303 * page should never be a module page.
7304 */
7305 if (WARN_ON(pg == ftrace_pages_start))
7306 goto out_unlock;
7307
7308 /* Check if we are deleting the last page */
7309 if (pg == ftrace_pages)
7310 ftrace_pages = next_to_ftrace_page(last_pg);
7311
7312 ftrace_update_tot_cnt -= pg->index;
7313 *last_pg = pg->next;
7314
7315 pg->next = tmp_page;
7316 tmp_page = pg;
7317 } else
7318 last_pg = &pg->next;
7319 }
7320 out_unlock:
7321 mutex_unlock(&ftrace_lock);
7322
7323 /* Need to synchronize with ftrace_location_range() */
7324 if (tmp_page)
7325 synchronize_rcu();
7326 for (pg = tmp_page; pg; pg = tmp_page) {
7327
7328 /* Needs to be called outside of ftrace_lock */
7329 clear_mod_from_hashes(pg);
7330
7331 if (pg->records) {
7332 free_pages((unsigned long)pg->records, pg->order);
7333 ftrace_number_of_pages -= 1 << pg->order;
7334 }
7335 tmp_page = pg->next;
7336 kfree(pg);
7337 ftrace_number_of_groups--;
7338 }
7339 }
7340
ftrace_module_enable(struct module * mod)7341 void ftrace_module_enable(struct module *mod)
7342 {
7343 struct dyn_ftrace *rec;
7344 struct ftrace_page *pg;
7345
7346 mutex_lock(&ftrace_lock);
7347
7348 if (ftrace_disabled)
7349 goto out_unlock;
7350
7351 /*
7352 * If the tracing is enabled, go ahead and enable the record.
7353 *
7354 * The reason not to enable the record immediately is the
7355 * inherent check of ftrace_make_nop/ftrace_make_call for
7356 * correct previous instructions. Making first the NOP
7357 * conversion puts the module to the correct state, thus
7358 * passing the ftrace_make_call check.
7359 *
7360 * We also delay this to after the module code already set the
7361 * text to read-only, as we now need to set it back to read-write
7362 * so that we can modify the text.
7363 */
7364 if (ftrace_start_up)
7365 ftrace_arch_code_modify_prepare();
7366
7367 do_for_each_ftrace_rec(pg, rec) {
7368 int cnt;
7369 /*
7370 * do_for_each_ftrace_rec() is a double loop.
7371 * module text shares the pg. If a record is
7372 * not part of this module, then skip this pg,
7373 * which the "break" will do.
7374 */
7375 if (!within_module(rec->ip, mod))
7376 break;
7377
7378 /* Weak functions should still be ignored */
7379 if (!test_for_valid_rec(rec)) {
7380 /* Clear all other flags. Should not be enabled anyway */
7381 rec->flags = FTRACE_FL_DISABLED;
7382 continue;
7383 }
7384
7385 cnt = 0;
7386
7387 /*
7388 * When adding a module, we need to check if tracers are
7389 * currently enabled and if they are, and can trace this record,
7390 * we need to enable the module functions as well as update the
7391 * reference counts for those function records.
7392 */
7393 if (ftrace_start_up)
7394 cnt += referenced_filters(rec);
7395
7396 rec->flags &= ~FTRACE_FL_DISABLED;
7397 rec->flags += cnt;
7398
7399 if (ftrace_start_up && cnt) {
7400 int failed = __ftrace_replace_code(rec, 1);
7401 if (failed) {
7402 ftrace_bug(failed, rec);
7403 goto out_loop;
7404 }
7405 }
7406
7407 } while_for_each_ftrace_rec();
7408
7409 out_loop:
7410 if (ftrace_start_up)
7411 ftrace_arch_code_modify_post_process();
7412
7413 out_unlock:
7414 mutex_unlock(&ftrace_lock);
7415
7416 process_cached_mods(mod->name);
7417 }
7418
ftrace_module_init(struct module * mod)7419 void ftrace_module_init(struct module *mod)
7420 {
7421 int ret;
7422
7423 if (ftrace_disabled || !mod->num_ftrace_callsites)
7424 return;
7425
7426 ret = ftrace_process_locs(mod, mod->ftrace_callsites,
7427 mod->ftrace_callsites + mod->num_ftrace_callsites);
7428 if (ret)
7429 pr_warn("ftrace: failed to allocate entries for module '%s' functions\n",
7430 mod->name);
7431 }
7432
save_ftrace_mod_rec(struct ftrace_mod_map * mod_map,struct dyn_ftrace * rec)7433 static void save_ftrace_mod_rec(struct ftrace_mod_map *mod_map,
7434 struct dyn_ftrace *rec)
7435 {
7436 struct ftrace_mod_func *mod_func;
7437 unsigned long symsize;
7438 unsigned long offset;
7439 char str[KSYM_SYMBOL_LEN];
7440 char *modname;
7441 const char *ret;
7442
7443 ret = kallsyms_lookup(rec->ip, &symsize, &offset, &modname, str);
7444 if (!ret)
7445 return;
7446
7447 mod_func = kmalloc(sizeof(*mod_func), GFP_KERNEL);
7448 if (!mod_func)
7449 return;
7450
7451 mod_func->name = kstrdup(str, GFP_KERNEL);
7452 if (!mod_func->name) {
7453 kfree(mod_func);
7454 return;
7455 }
7456
7457 mod_func->ip = rec->ip - offset;
7458 mod_func->size = symsize;
7459
7460 mod_map->num_funcs++;
7461
7462 list_add_rcu(&mod_func->list, &mod_map->funcs);
7463 }
7464
7465 static struct ftrace_mod_map *
allocate_ftrace_mod_map(struct module * mod,unsigned long start,unsigned long end)7466 allocate_ftrace_mod_map(struct module *mod,
7467 unsigned long start, unsigned long end)
7468 {
7469 struct ftrace_mod_map *mod_map;
7470
7471 mod_map = kmalloc(sizeof(*mod_map), GFP_KERNEL);
7472 if (!mod_map)
7473 return NULL;
7474
7475 mod_map->mod = mod;
7476 mod_map->start_addr = start;
7477 mod_map->end_addr = end;
7478 mod_map->num_funcs = 0;
7479
7480 INIT_LIST_HEAD_RCU(&mod_map->funcs);
7481
7482 list_add_rcu(&mod_map->list, &ftrace_mod_maps);
7483
7484 return mod_map;
7485 }
7486
7487 static int
ftrace_func_address_lookup(struct ftrace_mod_map * mod_map,unsigned long addr,unsigned long * size,unsigned long * off,char * sym)7488 ftrace_func_address_lookup(struct ftrace_mod_map *mod_map,
7489 unsigned long addr, unsigned long *size,
7490 unsigned long *off, char *sym)
7491 {
7492 struct ftrace_mod_func *found_func = NULL;
7493 struct ftrace_mod_func *mod_func;
7494
7495 list_for_each_entry_rcu(mod_func, &mod_map->funcs, list) {
7496 if (addr >= mod_func->ip &&
7497 addr < mod_func->ip + mod_func->size) {
7498 found_func = mod_func;
7499 break;
7500 }
7501 }
7502
7503 if (found_func) {
7504 if (size)
7505 *size = found_func->size;
7506 if (off)
7507 *off = addr - found_func->ip;
7508 return strscpy(sym, found_func->name, KSYM_NAME_LEN);
7509 }
7510
7511 return 0;
7512 }
7513
7514 int
ftrace_mod_address_lookup(unsigned long addr,unsigned long * size,unsigned long * off,char ** modname,char * sym)7515 ftrace_mod_address_lookup(unsigned long addr, unsigned long *size,
7516 unsigned long *off, char **modname, char *sym)
7517 {
7518 struct ftrace_mod_map *mod_map;
7519 int ret = 0;
7520
7521 /* mod_map is freed via call_rcu() */
7522 preempt_disable();
7523 list_for_each_entry_rcu(mod_map, &ftrace_mod_maps, list) {
7524 ret = ftrace_func_address_lookup(mod_map, addr, size, off, sym);
7525 if (ret) {
7526 if (modname)
7527 *modname = mod_map->mod->name;
7528 break;
7529 }
7530 }
7531 preempt_enable();
7532
7533 return ret;
7534 }
7535
ftrace_mod_get_kallsym(unsigned int symnum,unsigned long * value,char * type,char * name,char * module_name,int * exported)7536 int ftrace_mod_get_kallsym(unsigned int symnum, unsigned long *value,
7537 char *type, char *name,
7538 char *module_name, int *exported)
7539 {
7540 struct ftrace_mod_map *mod_map;
7541 struct ftrace_mod_func *mod_func;
7542 int ret;
7543
7544 preempt_disable();
7545 list_for_each_entry_rcu(mod_map, &ftrace_mod_maps, list) {
7546
7547 if (symnum >= mod_map->num_funcs) {
7548 symnum -= mod_map->num_funcs;
7549 continue;
7550 }
7551
7552 list_for_each_entry_rcu(mod_func, &mod_map->funcs, list) {
7553 if (symnum > 1) {
7554 symnum--;
7555 continue;
7556 }
7557
7558 *value = mod_func->ip;
7559 *type = 'T';
7560 strscpy(name, mod_func->name, KSYM_NAME_LEN);
7561 strscpy(module_name, mod_map->mod->name, MODULE_NAME_LEN);
7562 *exported = 1;
7563 preempt_enable();
7564 return 0;
7565 }
7566 WARN_ON(1);
7567 break;
7568 }
7569 ret = ftrace_get_trampoline_kallsym(symnum, value, type, name,
7570 module_name, exported);
7571 preempt_enable();
7572 return ret;
7573 }
7574
7575 #else
save_ftrace_mod_rec(struct ftrace_mod_map * mod_map,struct dyn_ftrace * rec)7576 static void save_ftrace_mod_rec(struct ftrace_mod_map *mod_map,
7577 struct dyn_ftrace *rec) { }
7578 static inline struct ftrace_mod_map *
allocate_ftrace_mod_map(struct module * mod,unsigned long start,unsigned long end)7579 allocate_ftrace_mod_map(struct module *mod,
7580 unsigned long start, unsigned long end)
7581 {
7582 return NULL;
7583 }
ftrace_mod_get_kallsym(unsigned int symnum,unsigned long * value,char * type,char * name,char * module_name,int * exported)7584 int ftrace_mod_get_kallsym(unsigned int symnum, unsigned long *value,
7585 char *type, char *name, char *module_name,
7586 int *exported)
7587 {
7588 int ret;
7589
7590 preempt_disable();
7591 ret = ftrace_get_trampoline_kallsym(symnum, value, type, name,
7592 module_name, exported);
7593 preempt_enable();
7594 return ret;
7595 }
7596 #endif /* CONFIG_MODULES */
7597
7598 struct ftrace_init_func {
7599 struct list_head list;
7600 unsigned long ip;
7601 };
7602
7603 /* Clear any init ips from hashes */
7604 static void
clear_func_from_hash(struct ftrace_init_func * func,struct ftrace_hash * hash)7605 clear_func_from_hash(struct ftrace_init_func *func, struct ftrace_hash *hash)
7606 {
7607 struct ftrace_func_entry *entry;
7608
7609 entry = ftrace_lookup_ip(hash, func->ip);
7610 /*
7611 * Do not allow this rec to match again.
7612 * Yeah, it may waste some memory, but will be removed
7613 * if/when the hash is modified again.
7614 */
7615 if (entry)
7616 entry->ip = 0;
7617 }
7618
7619 static void
clear_func_from_hashes(struct ftrace_init_func * func)7620 clear_func_from_hashes(struct ftrace_init_func *func)
7621 {
7622 struct trace_array *tr;
7623
7624 mutex_lock(&trace_types_lock);
7625 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
7626 if (!tr->ops || !tr->ops->func_hash)
7627 continue;
7628 mutex_lock(&tr->ops->func_hash->regex_lock);
7629 clear_func_from_hash(func, tr->ops->func_hash->filter_hash);
7630 clear_func_from_hash(func, tr->ops->func_hash->notrace_hash);
7631 mutex_unlock(&tr->ops->func_hash->regex_lock);
7632 }
7633 mutex_unlock(&trace_types_lock);
7634 }
7635
add_to_clear_hash_list(struct list_head * clear_list,struct dyn_ftrace * rec)7636 static void add_to_clear_hash_list(struct list_head *clear_list,
7637 struct dyn_ftrace *rec)
7638 {
7639 struct ftrace_init_func *func;
7640
7641 func = kmalloc(sizeof(*func), GFP_KERNEL);
7642 if (!func) {
7643 MEM_FAIL(1, "alloc failure, ftrace filter could be stale\n");
7644 return;
7645 }
7646
7647 func->ip = rec->ip;
7648 list_add(&func->list, clear_list);
7649 }
7650
ftrace_free_mem(struct module * mod,void * start_ptr,void * end_ptr)7651 void ftrace_free_mem(struct module *mod, void *start_ptr, void *end_ptr)
7652 {
7653 unsigned long start = (unsigned long)(start_ptr);
7654 unsigned long end = (unsigned long)(end_ptr);
7655 struct ftrace_page **last_pg = &ftrace_pages_start;
7656 struct ftrace_page *tmp_page = NULL;
7657 struct ftrace_page *pg;
7658 struct dyn_ftrace *rec;
7659 struct dyn_ftrace key;
7660 struct ftrace_mod_map *mod_map = NULL;
7661 struct ftrace_init_func *func, *func_next;
7662 LIST_HEAD(clear_hash);
7663
7664 key.ip = start;
7665 key.flags = end; /* overload flags, as it is unsigned long */
7666
7667 mutex_lock(&ftrace_lock);
7668
7669 /*
7670 * If we are freeing module init memory, then check if
7671 * any tracer is active. If so, we need to save a mapping of
7672 * the module functions being freed with the address.
7673 */
7674 if (mod && ftrace_ops_list != &ftrace_list_end)
7675 mod_map = allocate_ftrace_mod_map(mod, start, end);
7676
7677 for (pg = ftrace_pages_start; pg; last_pg = &pg->next, pg = *last_pg) {
7678 if (end < pg->records[0].ip ||
7679 start >= (pg->records[pg->index - 1].ip + MCOUNT_INSN_SIZE))
7680 continue;
7681 again:
7682 rec = bsearch(&key, pg->records, pg->index,
7683 sizeof(struct dyn_ftrace),
7684 ftrace_cmp_recs);
7685 if (!rec)
7686 continue;
7687
7688 /* rec will be cleared from hashes after ftrace_lock unlock */
7689 add_to_clear_hash_list(&clear_hash, rec);
7690
7691 if (mod_map)
7692 save_ftrace_mod_rec(mod_map, rec);
7693
7694 pg->index--;
7695 ftrace_update_tot_cnt--;
7696 if (!pg->index) {
7697 *last_pg = pg->next;
7698 pg->next = tmp_page;
7699 tmp_page = pg;
7700 pg = container_of(last_pg, struct ftrace_page, next);
7701 if (!(*last_pg))
7702 ftrace_pages = pg;
7703 continue;
7704 }
7705 memmove(rec, rec + 1,
7706 (pg->index - (rec - pg->records)) * sizeof(*rec));
7707 /* More than one function may be in this block */
7708 goto again;
7709 }
7710 mutex_unlock(&ftrace_lock);
7711
7712 list_for_each_entry_safe(func, func_next, &clear_hash, list) {
7713 clear_func_from_hashes(func);
7714 kfree(func);
7715 }
7716 /* Need to synchronize with ftrace_location_range() */
7717 if (tmp_page) {
7718 synchronize_rcu();
7719 ftrace_free_pages(tmp_page);
7720 }
7721 }
7722
ftrace_free_init_mem(void)7723 void __init ftrace_free_init_mem(void)
7724 {
7725 void *start = (void *)(&__init_begin);
7726 void *end = (void *)(&__init_end);
7727
7728 ftrace_boot_snapshot();
7729
7730 ftrace_free_mem(NULL, start, end);
7731 }
7732
ftrace_dyn_arch_init(void)7733 int __init __weak ftrace_dyn_arch_init(void)
7734 {
7735 return 0;
7736 }
7737
ftrace_init(void)7738 void __init ftrace_init(void)
7739 {
7740 extern unsigned long __start_mcount_loc[];
7741 extern unsigned long __stop_mcount_loc[];
7742 unsigned long count, flags;
7743 int ret;
7744
7745 local_irq_save(flags);
7746 ret = ftrace_dyn_arch_init();
7747 local_irq_restore(flags);
7748 if (ret)
7749 goto failed;
7750
7751 count = __stop_mcount_loc - __start_mcount_loc;
7752 if (!count) {
7753 pr_info("ftrace: No functions to be traced?\n");
7754 goto failed;
7755 }
7756
7757 pr_info("ftrace: allocating %ld entries in %ld pages\n",
7758 count, DIV_ROUND_UP(count, ENTRIES_PER_PAGE));
7759
7760 ret = ftrace_process_locs(NULL,
7761 __start_mcount_loc,
7762 __stop_mcount_loc);
7763 if (ret) {
7764 pr_warn("ftrace: failed to allocate entries for functions\n");
7765 goto failed;
7766 }
7767
7768 pr_info("ftrace: allocated %ld pages with %ld groups\n",
7769 ftrace_number_of_pages, ftrace_number_of_groups);
7770
7771 last_ftrace_enabled = ftrace_enabled = 1;
7772
7773 set_ftrace_early_filters();
7774
7775 return;
7776 failed:
7777 ftrace_disabled = 1;
7778 }
7779
7780 /* Do nothing if arch does not support this */
arch_ftrace_update_trampoline(struct ftrace_ops * ops)7781 void __weak arch_ftrace_update_trampoline(struct ftrace_ops *ops)
7782 {
7783 }
7784
ftrace_update_trampoline(struct ftrace_ops * ops)7785 static void ftrace_update_trampoline(struct ftrace_ops *ops)
7786 {
7787 unsigned long trampoline = ops->trampoline;
7788
7789 arch_ftrace_update_trampoline(ops);
7790 if (ops->trampoline && ops->trampoline != trampoline &&
7791 (ops->flags & FTRACE_OPS_FL_ALLOC_TRAMP)) {
7792 /* Add to kallsyms before the perf events */
7793 ftrace_add_trampoline_to_kallsyms(ops);
7794 perf_event_ksymbol(PERF_RECORD_KSYMBOL_TYPE_OOL,
7795 ops->trampoline, ops->trampoline_size, false,
7796 FTRACE_TRAMPOLINE_SYM);
7797 /*
7798 * Record the perf text poke event after the ksymbol register
7799 * event.
7800 */
7801 perf_event_text_poke((void *)ops->trampoline, NULL, 0,
7802 (void *)ops->trampoline,
7803 ops->trampoline_size);
7804 }
7805 }
7806
ftrace_init_trace_array(struct trace_array * tr)7807 void ftrace_init_trace_array(struct trace_array *tr)
7808 {
7809 INIT_LIST_HEAD(&tr->func_probes);
7810 INIT_LIST_HEAD(&tr->mod_trace);
7811 INIT_LIST_HEAD(&tr->mod_notrace);
7812 }
7813 #else
7814
7815 struct ftrace_ops global_ops = {
7816 .func = ftrace_stub,
7817 .flags = FTRACE_OPS_FL_INITIALIZED |
7818 FTRACE_OPS_FL_PID,
7819 };
7820
ftrace_nodyn_init(void)7821 static int __init ftrace_nodyn_init(void)
7822 {
7823 ftrace_enabled = 1;
7824 return 0;
7825 }
7826 core_initcall(ftrace_nodyn_init);
7827
ftrace_init_dyn_tracefs(struct dentry * d_tracer)7828 static inline int ftrace_init_dyn_tracefs(struct dentry *d_tracer) { return 0; }
ftrace_startup_all(int command)7829 static inline void ftrace_startup_all(int command) { }
7830
ftrace_update_trampoline(struct ftrace_ops * ops)7831 static void ftrace_update_trampoline(struct ftrace_ops *ops)
7832 {
7833 }
7834
7835 #endif /* CONFIG_DYNAMIC_FTRACE */
7836
ftrace_init_global_array_ops(struct trace_array * tr)7837 __init void ftrace_init_global_array_ops(struct trace_array *tr)
7838 {
7839 tr->ops = &global_ops;
7840 tr->ops->private = tr;
7841 ftrace_init_trace_array(tr);
7842 init_array_fgraph_ops(tr, tr->ops);
7843 }
7844
ftrace_init_array_ops(struct trace_array * tr,ftrace_func_t func)7845 void ftrace_init_array_ops(struct trace_array *tr, ftrace_func_t func)
7846 {
7847 /* If we filter on pids, update to use the pid function */
7848 if (tr->flags & TRACE_ARRAY_FL_GLOBAL) {
7849 if (WARN_ON(tr->ops->func != ftrace_stub))
7850 printk("ftrace ops had %pS for function\n",
7851 tr->ops->func);
7852 }
7853 tr->ops->func = func;
7854 tr->ops->private = tr;
7855 }
7856
ftrace_reset_array_ops(struct trace_array * tr)7857 void ftrace_reset_array_ops(struct trace_array *tr)
7858 {
7859 tr->ops->func = ftrace_stub;
7860 }
7861
7862 static nokprobe_inline void
__ftrace_ops_list_func(unsigned long ip,unsigned long parent_ip,struct ftrace_ops * ignored,struct ftrace_regs * fregs)7863 __ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip,
7864 struct ftrace_ops *ignored, struct ftrace_regs *fregs)
7865 {
7866 struct pt_regs *regs = ftrace_get_regs(fregs);
7867 struct ftrace_ops *op;
7868 int bit;
7869
7870 /*
7871 * The ftrace_test_and_set_recursion() will disable preemption,
7872 * which is required since some of the ops may be dynamically
7873 * allocated, they must be freed after a synchronize_rcu().
7874 */
7875 bit = trace_test_and_set_recursion(ip, parent_ip, TRACE_LIST_START);
7876 if (bit < 0)
7877 return;
7878
7879 do_for_each_ftrace_op(op, ftrace_ops_list) {
7880 /* Stub functions don't need to be called nor tested */
7881 if (op->flags & FTRACE_OPS_FL_STUB)
7882 continue;
7883 /*
7884 * Check the following for each ops before calling their func:
7885 * if RCU flag is set, then rcu_is_watching() must be true
7886 * Otherwise test if the ip matches the ops filter
7887 *
7888 * If any of the above fails then the op->func() is not executed.
7889 */
7890 if ((!(op->flags & FTRACE_OPS_FL_RCU) || rcu_is_watching()) &&
7891 ftrace_ops_test(op, ip, regs)) {
7892 if (FTRACE_WARN_ON(!op->func)) {
7893 pr_warn("op=%p %pS\n", op, op);
7894 goto out;
7895 }
7896 op->func(ip, parent_ip, op, fregs);
7897 }
7898 } while_for_each_ftrace_op(op);
7899 out:
7900 trace_clear_recursion(bit);
7901 }
7902
7903 /*
7904 * Some archs only support passing ip and parent_ip. Even though
7905 * the list function ignores the op parameter, we do not want any
7906 * C side effects, where a function is called without the caller
7907 * sending a third parameter.
7908 * Archs are to support both the regs and ftrace_ops at the same time.
7909 * If they support ftrace_ops, it is assumed they support regs.
7910 * If call backs want to use regs, they must either check for regs
7911 * being NULL, or CONFIG_DYNAMIC_FTRACE_WITH_REGS.
7912 * Note, CONFIG_DYNAMIC_FTRACE_WITH_REGS expects a full regs to be saved.
7913 * An architecture can pass partial regs with ftrace_ops and still
7914 * set the ARCH_SUPPORTS_FTRACE_OPS.
7915 *
7916 * In vmlinux.lds.h, ftrace_ops_list_func() is defined to be
7917 * arch_ftrace_ops_list_func.
7918 */
7919 #if ARCH_SUPPORTS_FTRACE_OPS
arch_ftrace_ops_list_func(unsigned long ip,unsigned long parent_ip,struct ftrace_ops * op,struct ftrace_regs * fregs)7920 void arch_ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip,
7921 struct ftrace_ops *op, struct ftrace_regs *fregs)
7922 {
7923 kmsan_unpoison_memory(fregs, sizeof(*fregs));
7924 __ftrace_ops_list_func(ip, parent_ip, NULL, fregs);
7925 }
7926 #else
arch_ftrace_ops_list_func(unsigned long ip,unsigned long parent_ip)7927 void arch_ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip)
7928 {
7929 __ftrace_ops_list_func(ip, parent_ip, NULL, NULL);
7930 }
7931 #endif
7932 NOKPROBE_SYMBOL(arch_ftrace_ops_list_func);
7933
7934 /*
7935 * If there's only one function registered but it does not support
7936 * recursion, needs RCU protection, then this function will be called
7937 * by the mcount trampoline.
7938 */
ftrace_ops_assist_func(unsigned long ip,unsigned long parent_ip,struct ftrace_ops * op,struct ftrace_regs * fregs)7939 static void ftrace_ops_assist_func(unsigned long ip, unsigned long parent_ip,
7940 struct ftrace_ops *op, struct ftrace_regs *fregs)
7941 {
7942 int bit;
7943
7944 bit = trace_test_and_set_recursion(ip, parent_ip, TRACE_LIST_START);
7945 if (bit < 0)
7946 return;
7947
7948 if (!(op->flags & FTRACE_OPS_FL_RCU) || rcu_is_watching())
7949 op->func(ip, parent_ip, op, fregs);
7950
7951 trace_clear_recursion(bit);
7952 }
7953 NOKPROBE_SYMBOL(ftrace_ops_assist_func);
7954
7955 /**
7956 * ftrace_ops_get_func - get the function a trampoline should call
7957 * @ops: the ops to get the function for
7958 *
7959 * Normally the mcount trampoline will call the ops->func, but there
7960 * are times that it should not. For example, if the ops does not
7961 * have its own recursion protection, then it should call the
7962 * ftrace_ops_assist_func() instead.
7963 *
7964 * Returns: the function that the trampoline should call for @ops.
7965 */
ftrace_ops_get_func(struct ftrace_ops * ops)7966 ftrace_func_t ftrace_ops_get_func(struct ftrace_ops *ops)
7967 {
7968 /*
7969 * If the function does not handle recursion or needs to be RCU safe,
7970 * then we need to call the assist handler.
7971 */
7972 if (ops->flags & (FTRACE_OPS_FL_RECURSION |
7973 FTRACE_OPS_FL_RCU))
7974 return ftrace_ops_assist_func;
7975
7976 return ops->func;
7977 }
7978
7979 static void
ftrace_filter_pid_sched_switch_probe(void * data,bool preempt,struct task_struct * prev,struct task_struct * next,unsigned int prev_state)7980 ftrace_filter_pid_sched_switch_probe(void *data, bool preempt,
7981 struct task_struct *prev,
7982 struct task_struct *next,
7983 unsigned int prev_state)
7984 {
7985 struct trace_array *tr = data;
7986 struct trace_pid_list *pid_list;
7987 struct trace_pid_list *no_pid_list;
7988
7989 pid_list = rcu_dereference_sched(tr->function_pids);
7990 no_pid_list = rcu_dereference_sched(tr->function_no_pids);
7991
7992 if (trace_ignore_this_task(pid_list, no_pid_list, next))
7993 this_cpu_write(tr->array_buffer.data->ftrace_ignore_pid,
7994 FTRACE_PID_IGNORE);
7995 else
7996 this_cpu_write(tr->array_buffer.data->ftrace_ignore_pid,
7997 next->pid);
7998 }
7999
8000 static void
ftrace_pid_follow_sched_process_fork(void * data,struct task_struct * self,struct task_struct * task)8001 ftrace_pid_follow_sched_process_fork(void *data,
8002 struct task_struct *self,
8003 struct task_struct *task)
8004 {
8005 struct trace_pid_list *pid_list;
8006 struct trace_array *tr = data;
8007
8008 pid_list = rcu_dereference_sched(tr->function_pids);
8009 trace_filter_add_remove_task(pid_list, self, task);
8010
8011 pid_list = rcu_dereference_sched(tr->function_no_pids);
8012 trace_filter_add_remove_task(pid_list, self, task);
8013 }
8014
8015 static void
ftrace_pid_follow_sched_process_exit(void * data,struct task_struct * task)8016 ftrace_pid_follow_sched_process_exit(void *data, struct task_struct *task)
8017 {
8018 struct trace_pid_list *pid_list;
8019 struct trace_array *tr = data;
8020
8021 pid_list = rcu_dereference_sched(tr->function_pids);
8022 trace_filter_add_remove_task(pid_list, NULL, task);
8023
8024 pid_list = rcu_dereference_sched(tr->function_no_pids);
8025 trace_filter_add_remove_task(pid_list, NULL, task);
8026 }
8027
ftrace_pid_follow_fork(struct trace_array * tr,bool enable)8028 void ftrace_pid_follow_fork(struct trace_array *tr, bool enable)
8029 {
8030 if (enable) {
8031 register_trace_sched_process_fork(ftrace_pid_follow_sched_process_fork,
8032 tr);
8033 register_trace_sched_process_free(ftrace_pid_follow_sched_process_exit,
8034 tr);
8035 } else {
8036 unregister_trace_sched_process_fork(ftrace_pid_follow_sched_process_fork,
8037 tr);
8038 unregister_trace_sched_process_free(ftrace_pid_follow_sched_process_exit,
8039 tr);
8040 }
8041 }
8042
clear_ftrace_pids(struct trace_array * tr,int type)8043 static void clear_ftrace_pids(struct trace_array *tr, int type)
8044 {
8045 struct trace_pid_list *pid_list;
8046 struct trace_pid_list *no_pid_list;
8047 int cpu;
8048
8049 pid_list = rcu_dereference_protected(tr->function_pids,
8050 lockdep_is_held(&ftrace_lock));
8051 no_pid_list = rcu_dereference_protected(tr->function_no_pids,
8052 lockdep_is_held(&ftrace_lock));
8053
8054 /* Make sure there's something to do */
8055 if (!pid_type_enabled(type, pid_list, no_pid_list))
8056 return;
8057
8058 /* See if the pids still need to be checked after this */
8059 if (!still_need_pid_events(type, pid_list, no_pid_list)) {
8060 unregister_trace_sched_switch(ftrace_filter_pid_sched_switch_probe, tr);
8061 for_each_possible_cpu(cpu)
8062 per_cpu_ptr(tr->array_buffer.data, cpu)->ftrace_ignore_pid = FTRACE_PID_TRACE;
8063 }
8064
8065 if (type & TRACE_PIDS)
8066 rcu_assign_pointer(tr->function_pids, NULL);
8067
8068 if (type & TRACE_NO_PIDS)
8069 rcu_assign_pointer(tr->function_no_pids, NULL);
8070
8071 /* Wait till all users are no longer using pid filtering */
8072 synchronize_rcu();
8073
8074 if ((type & TRACE_PIDS) && pid_list)
8075 trace_pid_list_free(pid_list);
8076
8077 if ((type & TRACE_NO_PIDS) && no_pid_list)
8078 trace_pid_list_free(no_pid_list);
8079 }
8080
ftrace_clear_pids(struct trace_array * tr)8081 void ftrace_clear_pids(struct trace_array *tr)
8082 {
8083 mutex_lock(&ftrace_lock);
8084
8085 clear_ftrace_pids(tr, TRACE_PIDS | TRACE_NO_PIDS);
8086
8087 mutex_unlock(&ftrace_lock);
8088 }
8089
ftrace_pid_reset(struct trace_array * tr,int type)8090 static void ftrace_pid_reset(struct trace_array *tr, int type)
8091 {
8092 mutex_lock(&ftrace_lock);
8093 clear_ftrace_pids(tr, type);
8094
8095 ftrace_update_pid_func();
8096 ftrace_startup_all(0);
8097
8098 mutex_unlock(&ftrace_lock);
8099 }
8100
8101 /* Greater than any max PID */
8102 #define FTRACE_NO_PIDS (void *)(PID_MAX_LIMIT + 1)
8103
fpid_start(struct seq_file * m,loff_t * pos)8104 static void *fpid_start(struct seq_file *m, loff_t *pos)
8105 __acquires(RCU)
8106 {
8107 struct trace_pid_list *pid_list;
8108 struct trace_array *tr = m->private;
8109
8110 mutex_lock(&ftrace_lock);
8111 rcu_read_lock_sched();
8112
8113 pid_list = rcu_dereference_sched(tr->function_pids);
8114
8115 if (!pid_list)
8116 return !(*pos) ? FTRACE_NO_PIDS : NULL;
8117
8118 return trace_pid_start(pid_list, pos);
8119 }
8120
fpid_next(struct seq_file * m,void * v,loff_t * pos)8121 static void *fpid_next(struct seq_file *m, void *v, loff_t *pos)
8122 {
8123 struct trace_array *tr = m->private;
8124 struct trace_pid_list *pid_list = rcu_dereference_sched(tr->function_pids);
8125
8126 if (v == FTRACE_NO_PIDS) {
8127 (*pos)++;
8128 return NULL;
8129 }
8130 return trace_pid_next(pid_list, v, pos);
8131 }
8132
fpid_stop(struct seq_file * m,void * p)8133 static void fpid_stop(struct seq_file *m, void *p)
8134 __releases(RCU)
8135 {
8136 rcu_read_unlock_sched();
8137 mutex_unlock(&ftrace_lock);
8138 }
8139
fpid_show(struct seq_file * m,void * v)8140 static int fpid_show(struct seq_file *m, void *v)
8141 {
8142 if (v == FTRACE_NO_PIDS) {
8143 seq_puts(m, "no pid\n");
8144 return 0;
8145 }
8146
8147 return trace_pid_show(m, v);
8148 }
8149
8150 static const struct seq_operations ftrace_pid_sops = {
8151 .start = fpid_start,
8152 .next = fpid_next,
8153 .stop = fpid_stop,
8154 .show = fpid_show,
8155 };
8156
fnpid_start(struct seq_file * m,loff_t * pos)8157 static void *fnpid_start(struct seq_file *m, loff_t *pos)
8158 __acquires(RCU)
8159 {
8160 struct trace_pid_list *pid_list;
8161 struct trace_array *tr = m->private;
8162
8163 mutex_lock(&ftrace_lock);
8164 rcu_read_lock_sched();
8165
8166 pid_list = rcu_dereference_sched(tr->function_no_pids);
8167
8168 if (!pid_list)
8169 return !(*pos) ? FTRACE_NO_PIDS : NULL;
8170
8171 return trace_pid_start(pid_list, pos);
8172 }
8173
fnpid_next(struct seq_file * m,void * v,loff_t * pos)8174 static void *fnpid_next(struct seq_file *m, void *v, loff_t *pos)
8175 {
8176 struct trace_array *tr = m->private;
8177 struct trace_pid_list *pid_list = rcu_dereference_sched(tr->function_no_pids);
8178
8179 if (v == FTRACE_NO_PIDS) {
8180 (*pos)++;
8181 return NULL;
8182 }
8183 return trace_pid_next(pid_list, v, pos);
8184 }
8185
8186 static const struct seq_operations ftrace_no_pid_sops = {
8187 .start = fnpid_start,
8188 .next = fnpid_next,
8189 .stop = fpid_stop,
8190 .show = fpid_show,
8191 };
8192
pid_open(struct inode * inode,struct file * file,int type)8193 static int pid_open(struct inode *inode, struct file *file, int type)
8194 {
8195 const struct seq_operations *seq_ops;
8196 struct trace_array *tr = inode->i_private;
8197 struct seq_file *m;
8198 int ret = 0;
8199
8200 ret = tracing_check_open_get_tr(tr);
8201 if (ret)
8202 return ret;
8203
8204 if ((file->f_mode & FMODE_WRITE) &&
8205 (file->f_flags & O_TRUNC))
8206 ftrace_pid_reset(tr, type);
8207
8208 switch (type) {
8209 case TRACE_PIDS:
8210 seq_ops = &ftrace_pid_sops;
8211 break;
8212 case TRACE_NO_PIDS:
8213 seq_ops = &ftrace_no_pid_sops;
8214 break;
8215 default:
8216 trace_array_put(tr);
8217 WARN_ON_ONCE(1);
8218 return -EINVAL;
8219 }
8220
8221 ret = seq_open(file, seq_ops);
8222 if (ret < 0) {
8223 trace_array_put(tr);
8224 } else {
8225 m = file->private_data;
8226 /* copy tr over to seq ops */
8227 m->private = tr;
8228 }
8229
8230 return ret;
8231 }
8232
8233 static int
ftrace_pid_open(struct inode * inode,struct file * file)8234 ftrace_pid_open(struct inode *inode, struct file *file)
8235 {
8236 return pid_open(inode, file, TRACE_PIDS);
8237 }
8238
8239 static int
ftrace_no_pid_open(struct inode * inode,struct file * file)8240 ftrace_no_pid_open(struct inode *inode, struct file *file)
8241 {
8242 return pid_open(inode, file, TRACE_NO_PIDS);
8243 }
8244
ignore_task_cpu(void * data)8245 static void ignore_task_cpu(void *data)
8246 {
8247 struct trace_array *tr = data;
8248 struct trace_pid_list *pid_list;
8249 struct trace_pid_list *no_pid_list;
8250
8251 /*
8252 * This function is called by on_each_cpu() while the
8253 * event_mutex is held.
8254 */
8255 pid_list = rcu_dereference_protected(tr->function_pids,
8256 mutex_is_locked(&ftrace_lock));
8257 no_pid_list = rcu_dereference_protected(tr->function_no_pids,
8258 mutex_is_locked(&ftrace_lock));
8259
8260 if (trace_ignore_this_task(pid_list, no_pid_list, current))
8261 this_cpu_write(tr->array_buffer.data->ftrace_ignore_pid,
8262 FTRACE_PID_IGNORE);
8263 else
8264 this_cpu_write(tr->array_buffer.data->ftrace_ignore_pid,
8265 current->pid);
8266 }
8267
8268 static ssize_t
pid_write(struct file * filp,const char __user * ubuf,size_t cnt,loff_t * ppos,int type)8269 pid_write(struct file *filp, const char __user *ubuf,
8270 size_t cnt, loff_t *ppos, int type)
8271 {
8272 struct seq_file *m = filp->private_data;
8273 struct trace_array *tr = m->private;
8274 struct trace_pid_list *filtered_pids;
8275 struct trace_pid_list *other_pids;
8276 struct trace_pid_list *pid_list;
8277 ssize_t ret;
8278
8279 if (!cnt)
8280 return 0;
8281
8282 mutex_lock(&ftrace_lock);
8283
8284 switch (type) {
8285 case TRACE_PIDS:
8286 filtered_pids = rcu_dereference_protected(tr->function_pids,
8287 lockdep_is_held(&ftrace_lock));
8288 other_pids = rcu_dereference_protected(tr->function_no_pids,
8289 lockdep_is_held(&ftrace_lock));
8290 break;
8291 case TRACE_NO_PIDS:
8292 filtered_pids = rcu_dereference_protected(tr->function_no_pids,
8293 lockdep_is_held(&ftrace_lock));
8294 other_pids = rcu_dereference_protected(tr->function_pids,
8295 lockdep_is_held(&ftrace_lock));
8296 break;
8297 default:
8298 ret = -EINVAL;
8299 WARN_ON_ONCE(1);
8300 goto out;
8301 }
8302
8303 ret = trace_pid_write(filtered_pids, &pid_list, ubuf, cnt);
8304 if (ret < 0)
8305 goto out;
8306
8307 switch (type) {
8308 case TRACE_PIDS:
8309 rcu_assign_pointer(tr->function_pids, pid_list);
8310 break;
8311 case TRACE_NO_PIDS:
8312 rcu_assign_pointer(tr->function_no_pids, pid_list);
8313 break;
8314 }
8315
8316
8317 if (filtered_pids) {
8318 synchronize_rcu();
8319 trace_pid_list_free(filtered_pids);
8320 } else if (pid_list && !other_pids) {
8321 /* Register a probe to set whether to ignore the tracing of a task */
8322 register_trace_sched_switch(ftrace_filter_pid_sched_switch_probe, tr);
8323 }
8324
8325 /*
8326 * Ignoring of pids is done at task switch. But we have to
8327 * check for those tasks that are currently running.
8328 * Always do this in case a pid was appended or removed.
8329 */
8330 on_each_cpu(ignore_task_cpu, tr, 1);
8331
8332 ftrace_update_pid_func();
8333 ftrace_startup_all(0);
8334 out:
8335 mutex_unlock(&ftrace_lock);
8336
8337 if (ret > 0)
8338 *ppos += ret;
8339
8340 return ret;
8341 }
8342
8343 static ssize_t
ftrace_pid_write(struct file * filp,const char __user * ubuf,size_t cnt,loff_t * ppos)8344 ftrace_pid_write(struct file *filp, const char __user *ubuf,
8345 size_t cnt, loff_t *ppos)
8346 {
8347 return pid_write(filp, ubuf, cnt, ppos, TRACE_PIDS);
8348 }
8349
8350 static ssize_t
ftrace_no_pid_write(struct file * filp,const char __user * ubuf,size_t cnt,loff_t * ppos)8351 ftrace_no_pid_write(struct file *filp, const char __user *ubuf,
8352 size_t cnt, loff_t *ppos)
8353 {
8354 return pid_write(filp, ubuf, cnt, ppos, TRACE_NO_PIDS);
8355 }
8356
8357 static int
ftrace_pid_release(struct inode * inode,struct file * file)8358 ftrace_pid_release(struct inode *inode, struct file *file)
8359 {
8360 struct trace_array *tr = inode->i_private;
8361
8362 trace_array_put(tr);
8363
8364 return seq_release(inode, file);
8365 }
8366
8367 static const struct file_operations ftrace_pid_fops = {
8368 .open = ftrace_pid_open,
8369 .write = ftrace_pid_write,
8370 .read = seq_read,
8371 .llseek = tracing_lseek,
8372 .release = ftrace_pid_release,
8373 };
8374
8375 static const struct file_operations ftrace_no_pid_fops = {
8376 .open = ftrace_no_pid_open,
8377 .write = ftrace_no_pid_write,
8378 .read = seq_read,
8379 .llseek = tracing_lseek,
8380 .release = ftrace_pid_release,
8381 };
8382
ftrace_init_tracefs(struct trace_array * tr,struct dentry * d_tracer)8383 void ftrace_init_tracefs(struct trace_array *tr, struct dentry *d_tracer)
8384 {
8385 trace_create_file("set_ftrace_pid", TRACE_MODE_WRITE, d_tracer,
8386 tr, &ftrace_pid_fops);
8387 trace_create_file("set_ftrace_notrace_pid", TRACE_MODE_WRITE,
8388 d_tracer, tr, &ftrace_no_pid_fops);
8389 }
8390
ftrace_init_tracefs_toplevel(struct trace_array * tr,struct dentry * d_tracer)8391 void __init ftrace_init_tracefs_toplevel(struct trace_array *tr,
8392 struct dentry *d_tracer)
8393 {
8394 /* Only the top level directory has the dyn_tracefs and profile */
8395 WARN_ON(!(tr->flags & TRACE_ARRAY_FL_GLOBAL));
8396
8397 ftrace_init_dyn_tracefs(d_tracer);
8398 ftrace_profile_tracefs(d_tracer);
8399 }
8400
8401 /**
8402 * ftrace_kill - kill ftrace
8403 *
8404 * This function should be used by panic code. It stops ftrace
8405 * but in a not so nice way. If you need to simply kill ftrace
8406 * from a non-atomic section, use ftrace_kill.
8407 */
ftrace_kill(void)8408 void ftrace_kill(void)
8409 {
8410 ftrace_disabled = 1;
8411 ftrace_enabled = 0;
8412 ftrace_trace_function = ftrace_stub;
8413 kprobe_ftrace_kill();
8414 }
8415
8416 /**
8417 * ftrace_is_dead - Test if ftrace is dead or not.
8418 *
8419 * Returns: 1 if ftrace is "dead", zero otherwise.
8420 */
ftrace_is_dead(void)8421 int ftrace_is_dead(void)
8422 {
8423 return ftrace_disabled;
8424 }
8425
8426 #ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS
8427 /*
8428 * When registering ftrace_ops with IPMODIFY, it is necessary to make sure
8429 * it doesn't conflict with any direct ftrace_ops. If there is existing
8430 * direct ftrace_ops on a kernel function being patched, call
8431 * FTRACE_OPS_CMD_ENABLE_SHARE_IPMODIFY_PEER on it to enable sharing.
8432 *
8433 * @ops: ftrace_ops being registered.
8434 *
8435 * Returns:
8436 * 0 on success;
8437 * Negative on failure.
8438 */
prepare_direct_functions_for_ipmodify(struct ftrace_ops * ops)8439 static int prepare_direct_functions_for_ipmodify(struct ftrace_ops *ops)
8440 {
8441 struct ftrace_func_entry *entry;
8442 struct ftrace_hash *hash;
8443 struct ftrace_ops *op;
8444 int size, i, ret;
8445
8446 lockdep_assert_held_once(&direct_mutex);
8447
8448 if (!(ops->flags & FTRACE_OPS_FL_IPMODIFY))
8449 return 0;
8450
8451 hash = ops->func_hash->filter_hash;
8452 size = 1 << hash->size_bits;
8453 for (i = 0; i < size; i++) {
8454 hlist_for_each_entry(entry, &hash->buckets[i], hlist) {
8455 unsigned long ip = entry->ip;
8456 bool found_op = false;
8457
8458 mutex_lock(&ftrace_lock);
8459 do_for_each_ftrace_op(op, ftrace_ops_list) {
8460 if (!(op->flags & FTRACE_OPS_FL_DIRECT))
8461 continue;
8462 if (ops_references_ip(op, ip)) {
8463 found_op = true;
8464 break;
8465 }
8466 } while_for_each_ftrace_op(op);
8467 mutex_unlock(&ftrace_lock);
8468
8469 if (found_op) {
8470 if (!op->ops_func)
8471 return -EBUSY;
8472
8473 ret = op->ops_func(op, FTRACE_OPS_CMD_ENABLE_SHARE_IPMODIFY_PEER);
8474 if (ret)
8475 return ret;
8476 }
8477 }
8478 }
8479
8480 return 0;
8481 }
8482
8483 /*
8484 * Similar to prepare_direct_functions_for_ipmodify, clean up after ops
8485 * with IPMODIFY is unregistered. The cleanup is optional for most DIRECT
8486 * ops.
8487 */
cleanup_direct_functions_after_ipmodify(struct ftrace_ops * ops)8488 static void cleanup_direct_functions_after_ipmodify(struct ftrace_ops *ops)
8489 {
8490 struct ftrace_func_entry *entry;
8491 struct ftrace_hash *hash;
8492 struct ftrace_ops *op;
8493 int size, i;
8494
8495 if (!(ops->flags & FTRACE_OPS_FL_IPMODIFY))
8496 return;
8497
8498 mutex_lock(&direct_mutex);
8499
8500 hash = ops->func_hash->filter_hash;
8501 size = 1 << hash->size_bits;
8502 for (i = 0; i < size; i++) {
8503 hlist_for_each_entry(entry, &hash->buckets[i], hlist) {
8504 unsigned long ip = entry->ip;
8505 bool found_op = false;
8506
8507 mutex_lock(&ftrace_lock);
8508 do_for_each_ftrace_op(op, ftrace_ops_list) {
8509 if (!(op->flags & FTRACE_OPS_FL_DIRECT))
8510 continue;
8511 if (ops_references_ip(op, ip)) {
8512 found_op = true;
8513 break;
8514 }
8515 } while_for_each_ftrace_op(op);
8516 mutex_unlock(&ftrace_lock);
8517
8518 /* The cleanup is optional, ignore any errors */
8519 if (found_op && op->ops_func)
8520 op->ops_func(op, FTRACE_OPS_CMD_DISABLE_SHARE_IPMODIFY_PEER);
8521 }
8522 }
8523 mutex_unlock(&direct_mutex);
8524 }
8525
8526 #define lock_direct_mutex() mutex_lock(&direct_mutex)
8527 #define unlock_direct_mutex() mutex_unlock(&direct_mutex)
8528
8529 #else /* CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS */
8530
prepare_direct_functions_for_ipmodify(struct ftrace_ops * ops)8531 static int prepare_direct_functions_for_ipmodify(struct ftrace_ops *ops)
8532 {
8533 return 0;
8534 }
8535
cleanup_direct_functions_after_ipmodify(struct ftrace_ops * ops)8536 static void cleanup_direct_functions_after_ipmodify(struct ftrace_ops *ops)
8537 {
8538 }
8539
8540 #define lock_direct_mutex() do { } while (0)
8541 #define unlock_direct_mutex() do { } while (0)
8542
8543 #endif /* CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS */
8544
8545 /*
8546 * Similar to register_ftrace_function, except we don't lock direct_mutex.
8547 */
register_ftrace_function_nolock(struct ftrace_ops * ops)8548 static int register_ftrace_function_nolock(struct ftrace_ops *ops)
8549 {
8550 int ret;
8551
8552 ftrace_ops_init(ops);
8553
8554 mutex_lock(&ftrace_lock);
8555
8556 ret = ftrace_startup(ops, 0);
8557
8558 mutex_unlock(&ftrace_lock);
8559
8560 return ret;
8561 }
8562
8563 /**
8564 * register_ftrace_function - register a function for profiling
8565 * @ops: ops structure that holds the function for profiling.
8566 *
8567 * Register a function to be called by all functions in the
8568 * kernel.
8569 *
8570 * Note: @ops->func and all the functions it calls must be labeled
8571 * with "notrace", otherwise it will go into a
8572 * recursive loop.
8573 */
register_ftrace_function(struct ftrace_ops * ops)8574 int register_ftrace_function(struct ftrace_ops *ops)
8575 {
8576 int ret;
8577
8578 lock_direct_mutex();
8579 ret = prepare_direct_functions_for_ipmodify(ops);
8580 if (ret < 0)
8581 goto out_unlock;
8582
8583 ret = register_ftrace_function_nolock(ops);
8584
8585 out_unlock:
8586 unlock_direct_mutex();
8587 return ret;
8588 }
8589 EXPORT_SYMBOL_GPL(register_ftrace_function);
8590
8591 /**
8592 * unregister_ftrace_function - unregister a function for profiling.
8593 * @ops: ops structure that holds the function to unregister
8594 *
8595 * Unregister a function that was added to be called by ftrace profiling.
8596 */
unregister_ftrace_function(struct ftrace_ops * ops)8597 int unregister_ftrace_function(struct ftrace_ops *ops)
8598 {
8599 int ret;
8600
8601 mutex_lock(&ftrace_lock);
8602 ret = ftrace_shutdown(ops, 0);
8603 mutex_unlock(&ftrace_lock);
8604
8605 cleanup_direct_functions_after_ipmodify(ops);
8606 return ret;
8607 }
8608 EXPORT_SYMBOL_GPL(unregister_ftrace_function);
8609
symbols_cmp(const void * a,const void * b)8610 static int symbols_cmp(const void *a, const void *b)
8611 {
8612 const char **str_a = (const char **) a;
8613 const char **str_b = (const char **) b;
8614
8615 return strcmp(*str_a, *str_b);
8616 }
8617
8618 struct kallsyms_data {
8619 unsigned long *addrs;
8620 const char **syms;
8621 size_t cnt;
8622 size_t found;
8623 };
8624
8625 /* This function gets called for all kernel and module symbols
8626 * and returns 1 in case we resolved all the requested symbols,
8627 * 0 otherwise.
8628 */
kallsyms_callback(void * data,const char * name,unsigned long addr)8629 static int kallsyms_callback(void *data, const char *name, unsigned long addr)
8630 {
8631 struct kallsyms_data *args = data;
8632 const char **sym;
8633 int idx;
8634
8635 sym = bsearch(&name, args->syms, args->cnt, sizeof(*args->syms), symbols_cmp);
8636 if (!sym)
8637 return 0;
8638
8639 idx = sym - args->syms;
8640 if (args->addrs[idx])
8641 return 0;
8642
8643 if (!ftrace_location(addr))
8644 return 0;
8645
8646 args->addrs[idx] = addr;
8647 args->found++;
8648 return args->found == args->cnt ? 1 : 0;
8649 }
8650
8651 /**
8652 * ftrace_lookup_symbols - Lookup addresses for array of symbols
8653 *
8654 * @sorted_syms: array of symbols pointers symbols to resolve,
8655 * must be alphabetically sorted
8656 * @cnt: number of symbols/addresses in @syms/@addrs arrays
8657 * @addrs: array for storing resulting addresses
8658 *
8659 * This function looks up addresses for array of symbols provided in
8660 * @syms array (must be alphabetically sorted) and stores them in
8661 * @addrs array, which needs to be big enough to store at least @cnt
8662 * addresses.
8663 *
8664 * Returns: 0 if all provided symbols are found, -ESRCH otherwise.
8665 */
ftrace_lookup_symbols(const char ** sorted_syms,size_t cnt,unsigned long * addrs)8666 int ftrace_lookup_symbols(const char **sorted_syms, size_t cnt, unsigned long *addrs)
8667 {
8668 struct kallsyms_data args;
8669 int found_all;
8670
8671 memset(addrs, 0, sizeof(*addrs) * cnt);
8672 args.addrs = addrs;
8673 args.syms = sorted_syms;
8674 args.cnt = cnt;
8675 args.found = 0;
8676
8677 found_all = kallsyms_on_each_symbol(kallsyms_callback, &args);
8678 if (found_all)
8679 return 0;
8680 found_all = module_kallsyms_on_each_symbol(NULL, kallsyms_callback, &args);
8681 return found_all ? 0 : -ESRCH;
8682 }
8683
8684 #ifdef CONFIG_SYSCTL
8685
8686 #ifdef CONFIG_DYNAMIC_FTRACE
ftrace_startup_sysctl(void)8687 static void ftrace_startup_sysctl(void)
8688 {
8689 int command;
8690
8691 if (unlikely(ftrace_disabled))
8692 return;
8693
8694 /* Force update next time */
8695 saved_ftrace_func = NULL;
8696 /* ftrace_start_up is true if we want ftrace running */
8697 if (ftrace_start_up) {
8698 command = FTRACE_UPDATE_CALLS;
8699 if (ftrace_graph_active)
8700 command |= FTRACE_START_FUNC_RET;
8701 ftrace_startup_enable(command);
8702 }
8703 }
8704
ftrace_shutdown_sysctl(void)8705 static void ftrace_shutdown_sysctl(void)
8706 {
8707 int command;
8708
8709 if (unlikely(ftrace_disabled))
8710 return;
8711
8712 /* ftrace_start_up is true if ftrace is running */
8713 if (ftrace_start_up) {
8714 command = FTRACE_DISABLE_CALLS;
8715 if (ftrace_graph_active)
8716 command |= FTRACE_STOP_FUNC_RET;
8717 ftrace_run_update_code(command);
8718 }
8719 }
8720 #else
8721 # define ftrace_startup_sysctl() do { } while (0)
8722 # define ftrace_shutdown_sysctl() do { } while (0)
8723 #endif /* CONFIG_DYNAMIC_FTRACE */
8724
is_permanent_ops_registered(void)8725 static bool is_permanent_ops_registered(void)
8726 {
8727 struct ftrace_ops *op;
8728
8729 do_for_each_ftrace_op(op, ftrace_ops_list) {
8730 if (op->flags & FTRACE_OPS_FL_PERMANENT)
8731 return true;
8732 } while_for_each_ftrace_op(op);
8733
8734 return false;
8735 }
8736
8737 static int
ftrace_enable_sysctl(const struct ctl_table * table,int write,void * buffer,size_t * lenp,loff_t * ppos)8738 ftrace_enable_sysctl(const struct ctl_table *table, int write,
8739 void *buffer, size_t *lenp, loff_t *ppos)
8740 {
8741 int ret = -ENODEV;
8742
8743 mutex_lock(&ftrace_lock);
8744
8745 if (unlikely(ftrace_disabled))
8746 goto out;
8747
8748 ret = proc_dointvec(table, write, buffer, lenp, ppos);
8749
8750 if (ret || !write || (last_ftrace_enabled == !!ftrace_enabled))
8751 goto out;
8752
8753 if (ftrace_enabled) {
8754
8755 /* we are starting ftrace again */
8756 if (rcu_dereference_protected(ftrace_ops_list,
8757 lockdep_is_held(&ftrace_lock)) != &ftrace_list_end)
8758 update_ftrace_function();
8759
8760 ftrace_startup_sysctl();
8761
8762 } else {
8763 if (is_permanent_ops_registered()) {
8764 ftrace_enabled = true;
8765 ret = -EBUSY;
8766 goto out;
8767 }
8768
8769 /* stopping ftrace calls (just send to ftrace_stub) */
8770 ftrace_trace_function = ftrace_stub;
8771
8772 ftrace_shutdown_sysctl();
8773 }
8774
8775 last_ftrace_enabled = !!ftrace_enabled;
8776 out:
8777 mutex_unlock(&ftrace_lock);
8778 return ret;
8779 }
8780
8781 static struct ctl_table ftrace_sysctls[] = {
8782 {
8783 .procname = "ftrace_enabled",
8784 .data = &ftrace_enabled,
8785 .maxlen = sizeof(int),
8786 .mode = 0644,
8787 .proc_handler = ftrace_enable_sysctl,
8788 },
8789 };
8790
ftrace_sysctl_init(void)8791 static int __init ftrace_sysctl_init(void)
8792 {
8793 register_sysctl_init("kernel", ftrace_sysctls);
8794 return 0;
8795 }
8796 late_initcall(ftrace_sysctl_init);
8797 #endif
8798