Lines Matching +full:multi +full:- +full:ported

1 // SPDX-License-Identifier: GPL-2.0
3 * Infrastructure for profiling code inserted by 'gcc -pg'.
5 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
6 * Copyright (C) 2004-2008 Ingo Molnar <mingo@redhat.com>
8 * Originally ported from the -rt patch by:
13 * Copyright (C) 2004-2006 Ingo Molnar
107 if (!(ops->flags & FTRACE_OPS_FL_PID) || !ops->private) in ftrace_pids_enabled()
110 tr = ops->private; in ftrace_pids_enabled()
112 return tr->function_pids != NULL || tr->function_no_pids != NULL; in ftrace_pids_enabled()
163 if (!(ops->flags & FTRACE_OPS_FL_INITIALIZED)) { in ftrace_ops_init()
164 mutex_init(&ops->local_hash.regex_lock); in ftrace_ops_init()
165 INIT_LIST_HEAD(&ops->subop_list); in ftrace_ops_init()
166 ops->func_hash = &ops->local_hash; in ftrace_ops_init()
167 ops->flags |= FTRACE_OPS_FL_INITIALIZED; in ftrace_ops_init()
176 struct trace_array *tr = op->private; in ftrace_pid_func()
180 pid = this_cpu_read(tr->array_buffer.data->ftrace_ignore_pid); in ftrace_pid_func()
184 pid != current->pid) in ftrace_pid_func()
188 op->saved_func(ip, parent_ip, op, fregs); in ftrace_pid_func()
203 if (ops->flags & (FTRACE_OPS_FL_DYNAMIC | FTRACE_OPS_FL_RCU) || in ftrace_ops_get_list_func()
231 } else if (rcu_dereference_protected(ftrace_ops_list->next, in update_ftrace_function()
290 rcu_assign_pointer(ops->next, *list); in add_ftrace_ops()
295 * the ops->next pointer is valid before another CPU sees in add_ftrace_ops()
312 rcu_dereference_protected(ops->next, in remove_ftrace_ops()
318 for (p = list; *p != &ftrace_list_end; p = &(*p)->next) in remove_ftrace_ops()
323 return -1; in remove_ftrace_ops()
325 *p = (*p)->next; in remove_ftrace_ops()
333 if (ops->flags & FTRACE_OPS_FL_DELETED) in __register_ftrace_function()
334 return -EINVAL; in __register_ftrace_function()
336 if (WARN_ON(ops->flags & FTRACE_OPS_FL_ENABLED)) in __register_ftrace_function()
337 return -EBUSY; in __register_ftrace_function()
345 if (ops->flags & FTRACE_OPS_FL_SAVE_REGS && in __register_ftrace_function()
346 !(ops->flags & FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED)) in __register_ftrace_function()
347 return -EINVAL; in __register_ftrace_function()
349 if (ops->flags & FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED) in __register_ftrace_function()
350 ops->flags |= FTRACE_OPS_FL_SAVE_REGS; in __register_ftrace_function()
352 if (!ftrace_enabled && (ops->flags & FTRACE_OPS_FL_PERMANENT)) in __register_ftrace_function()
353 return -EBUSY; in __register_ftrace_function()
356 ops->flags |= FTRACE_OPS_FL_DYNAMIC; in __register_ftrace_function()
361 ops->saved_func = ops->func; in __register_ftrace_function()
364 ops->func = ftrace_pid_func; in __register_ftrace_function()
378 if (WARN_ON(!(ops->flags & FTRACE_OPS_FL_ENABLED))) in __unregister_ftrace_function()
379 return -EBUSY; in __unregister_ftrace_function()
389 ops->func = ops->saved_func; in __unregister_ftrace_function()
403 if (op->flags & FTRACE_OPS_FL_PID) { in ftrace_update_pid_func()
404 op->func = ftrace_pids_enabled(op) ? in ftrace_update_pid_func()
405 ftrace_pid_func : op->saved_func; in ftrace_update_pid_func()
441 (PAGE_SIZE - offsetof(struct ftrace_profile_page, records))
448 /* ftrace_profile_lock - synchronize the enable and disable of the profiler */
468 if ((void *)rec >= (void *)&pg->records[pg->index]) { in function_stat_next()
469 pg = pg->next; in function_stat_next()
472 rec = &pg->records[0]; in function_stat_next()
473 if (!rec->counter) in function_stat_next()
485 if (!stat || !stat->start) in function_stat_start()
488 return function_stat_next(&stat->start->records[0], 0); in function_stat_start()
498 if (a->time < b->time) in function_stat_cmp()
499 return -1; in function_stat_cmp()
500 if (a->time > b->time) in function_stat_cmp()
512 if (a->counter < b->counter) in function_stat_cmp()
513 return -1; in function_stat_cmp()
514 if (a->counter > b->counter) in function_stat_cmp()
526 " -------- " in function_stat_headers()
527 "--- ---- --- ---\n"); in function_stat_headers()
530 " -------- ---\n"); in function_stat_headers()
548 if (unlikely(rec->counter == 0)) in function_stat_show()
549 return -EBUSY; in function_stat_show()
552 avg = div64_ul(rec->time, rec->counter); in function_stat_show()
557 kallsyms_lookup(rec->ip, NULL, NULL, NULL, str); in function_stat_show()
558 seq_printf(m, " %-30.30s %10lu", str, rec->counter); in function_stat_show()
565 * s^2 = 1 / (n * (n-1)) * (n * \Sum (x_i)^2 - (\Sum x_i)^2) in function_stat_show()
567 * Divide only by 1000 for ns^2 -> us^2 conversion. in function_stat_show()
571 stddev_denom = rec->counter * (rec->counter - 1) * 1000; in function_stat_show()
573 stddev = rec->counter * rec->time_squared - in function_stat_show()
574 rec->time * rec->time; in function_stat_show()
579 trace_print_graph_duration(rec->time, &s); in function_stat_show()
595 pg = stat->pages = stat->start; in ftrace_profile_reset()
598 memset(pg->records, 0, PROFILE_RECORDS_SIZE); in ftrace_profile_reset()
599 pg->index = 0; in ftrace_profile_reset()
600 pg = pg->next; in ftrace_profile_reset()
603 memset(stat->hash, 0, in ftrace_profile_reset()
615 if (stat->pages) in ftrace_profile_pages_init()
618 stat->pages = (void *)get_zeroed_page(GFP_KERNEL); in ftrace_profile_pages_init()
619 if (!stat->pages) in ftrace_profile_pages_init()
620 return -ENOMEM; in ftrace_profile_pages_init()
635 pg = stat->start = stat->pages; in ftrace_profile_pages_init()
640 pg->next = (void *)get_zeroed_page(GFP_KERNEL); in ftrace_profile_pages_init()
641 if (!pg->next) in ftrace_profile_pages_init()
643 pg = pg->next; in ftrace_profile_pages_init()
649 pg = stat->start; in ftrace_profile_pages_init()
653 pg = pg->next; in ftrace_profile_pages_init()
657 stat->pages = NULL; in ftrace_profile_pages_init()
658 stat->start = NULL; in ftrace_profile_pages_init()
660 return -ENOMEM; in ftrace_profile_pages_init()
670 if (stat->hash) { in ftrace_profile_init_cpu()
682 stat->hash = kcalloc(size, sizeof(struct hlist_head), GFP_KERNEL); in ftrace_profile_init_cpu()
684 if (!stat->hash) in ftrace_profile_init_cpu()
685 return -ENOMEM; in ftrace_profile_init_cpu()
689 kfree(stat->hash); in ftrace_profile_init_cpu()
690 stat->hash = NULL; in ftrace_profile_init_cpu()
691 return -ENOMEM; in ftrace_profile_init_cpu()
720 hhd = &stat->hash[key]; in ftrace_find_profiled_func()
726 if (rec->ip == ip) in ftrace_find_profiled_func()
738 key = hash_long(rec->ip, FTRACE_PROFILE_HASH_BITS); in ftrace_add_profile()
739 hlist_add_head_rcu(&rec->node, &stat->hash[key]); in ftrace_add_profile()
751 if (atomic_inc_return(&stat->disabled) != 1) in ftrace_profile_alloc()
762 if (stat->pages->index == PROFILES_PER_PAGE) { in ftrace_profile_alloc()
763 if (!stat->pages->next) in ftrace_profile_alloc()
765 stat->pages = stat->pages->next; in ftrace_profile_alloc()
768 rec = &stat->pages->records[stat->pages->index++]; in ftrace_profile_alloc()
769 rec->ip = ip; in ftrace_profile_alloc()
773 atomic_dec(&stat->disabled); in ftrace_profile_alloc()
791 if (!stat->hash || !ftrace_profile_enabled) in function_profile_call()
801 rec->counter++; in function_profile_call()
824 function_profile_call(trace->func, 0, NULL, NULL); in profile_graph_entry()
827 if (!current->ret_stack) in profile_graph_entry()
830 profile_data = fgraph_reserve_data(gops->idx, sizeof(*profile_data)); in profile_graph_entry()
834 profile_data->subtime = 0; in profile_graph_entry()
835 profile_data->sleeptime = current->ftrace_sleeptime; in profile_graph_entry()
836 profile_data->calltime = trace_clock_local(); in profile_graph_entry()
855 if (!stat->hash || !ftrace_profile_enabled) in profile_graph_return()
858 profile_data = fgraph_retrieve_data(gops->idx, &size); in profile_graph_return()
861 if (!profile_data || !profile_data->calltime) in profile_graph_return()
864 calltime = rettime - profile_data->calltime; in profile_graph_return()
867 if (current->ftrace_sleeptime) in profile_graph_return()
868 calltime -= current->ftrace_sleeptime - profile_data->sleeptime; in profile_graph_return()
875 parent_data = fgraph_retrieve_parent_data(gops->idx, &size, 1); in profile_graph_return()
877 parent_data->subtime += calltime; in profile_graph_return()
879 if (profile_data->subtime && profile_data->subtime < calltime) in profile_graph_return()
880 calltime -= profile_data->subtime; in profile_graph_return()
885 rec = ftrace_find_profiled_func(stat, trace->func); in profile_graph_return()
887 rec->time += calltime; in profile_graph_return()
888 rec->time_squared += calltime * calltime; in profile_graph_return()
1012 stat->stat = function_stats; in ftrace_profile_tracefs()
1013 stat->stat.name = name; in ftrace_profile_tracefs()
1014 ret = register_stat_tracer(&stat->stat); in ftrace_profile_tracefs()
1098 if (op->trampoline && op->trampoline_size) in ftrace_ops_trampoline()
1099 if (addr >= op->trampoline && in ftrace_ops_trampoline()
1100 addr < op->trampoline + op->trampoline_size) { in ftrace_ops_trampoline()
1137 if (hash->size_bits > 0) in ftrace_hash_key()
1138 return hash_long(ip, hash->size_bits); in ftrace_hash_key()
1152 hhd = &hash->buckets[key]; in __ftrace_lookup_ip()
1155 if (entry->ip == ip) in __ftrace_lookup_ip()
1162 * ftrace_lookup_ip - Test to see if an ip exists in an ftrace_hash
1186 key = ftrace_hash_key(hash, entry->ip); in __add_hash_entry()
1187 hhd = &hash->buckets[key]; in __add_hash_entry()
1188 hlist_add_head(&entry->hlist, hhd); in __add_hash_entry()
1189 hash->count++; in __add_hash_entry()
1201 entry->ip = ip; in add_hash_entry()
1211 hlist_del(&entry->hlist); in free_hash_entry()
1213 hash->count--; in free_hash_entry()
1220 hlist_del_rcu(&entry->hlist); in remove_hash_entry()
1221 hash->count--; in remove_hash_entry()
1229 int size = 1 << hash->size_bits; in ftrace_hash_clear()
1232 if (!hash->count) in ftrace_hash_clear()
1236 hhd = &hash->buckets[i]; in ftrace_hash_clear()
1240 FTRACE_WARN_ON(hash->count); in ftrace_hash_clear()
1245 list_del(&ftrace_mod->list); in free_ftrace_mod()
1246 kfree(ftrace_mod->module); in free_ftrace_mod()
1247 kfree(ftrace_mod->func); in free_ftrace_mod()
1270 kfree(hash->buckets); in free_ftrace_hash()
1286 call_rcu(&hash->rcu, __free_ftrace_hash_rcu); in free_ftrace_hash_rcu()
1290 * ftrace_free_filter - remove all filters for an ftrace_ops
1296 free_ftrace_hash(ops->func_hash->filter_hash); in ftrace_free_filter()
1297 free_ftrace_hash(ops->func_hash->notrace_hash); in ftrace_free_filter()
1311 hash->buckets = kcalloc(size, sizeof(*hash->buckets), GFP_KERNEL); in alloc_ftrace_hash()
1313 if (!hash->buckets) { in alloc_ftrace_hash()
1318 hash->size_bits = size_bits; in alloc_ftrace_hash()
1329 struct list_head *mod_head = enable ? &tr->mod_trace : &tr->mod_notrace; in ftrace_add_mod()
1333 return -ENOMEM; in ftrace_add_mod()
1335 INIT_LIST_HEAD(&ftrace_mod->list); in ftrace_add_mod()
1336 ftrace_mod->func = kstrdup(func, GFP_KERNEL); in ftrace_add_mod()
1337 ftrace_mod->module = kstrdup(module, GFP_KERNEL); in ftrace_add_mod()
1338 ftrace_mod->enable = enable; in ftrace_add_mod()
1340 if (!ftrace_mod->func || !ftrace_mod->module) in ftrace_add_mod()
1343 list_add(&ftrace_mod->list, mod_head); in ftrace_add_mod()
1350 return -ENOMEM; in ftrace_add_mod()
1366 new_hash->flags = hash->flags; in alloc_and_copy_ftrace_hash()
1372 size = 1 << hash->size_bits; in alloc_and_copy_ftrace_hash()
1374 hlist_for_each_entry(entry, &hash->buckets[i], hlist) { in alloc_and_copy_ftrace_hash()
1375 if (add_hash_entry(new_hash, entry->ip) == NULL) in alloc_and_copy_ftrace_hash()
1380 FTRACE_WARN_ON(new_hash->count != hash->count); in alloc_and_copy_ftrace_hash()
1422 new_hash->flags = src->flags; in __move_hash()
1424 size = 1 << src->size_bits; in __move_hash()
1426 hhd = &src->buckets[i]; in __move_hash()
1439 int size = src->count; in __ftrace_hash_move()
1451 * ftrace_hash_move - move a new hash to a filter and do updates
1478 if (ops->flags & FTRACE_OPS_FL_IPMODIFY && !enable) in ftrace_hash_move()
1479 return -EINVAL; in ftrace_hash_move()
1483 return -ENOMEM; in ftrace_hash_move()
1517 return (ftrace_hash_empty(hash->filter_hash) || in hash_contains_ip()
1518 __ftrace_lookup_ip(hash->filter_hash, ip)) && in hash_contains_ip()
1519 (ftrace_hash_empty(hash->notrace_hash) || in hash_contains_ip()
1520 !__ftrace_lookup_ip(hash->notrace_hash, ip)); in hash_contains_ip()
1525 * the ops->func or not.
1527 * It's a match if the ip is in the ops->filter_hash or
1530 * the ip is not in the ops->notrace_hash.
1547 if (regs == NULL && (ops->flags & FTRACE_OPS_FL_SAVE_REGS)) in ftrace_ops_test()
1551 rcu_assign_pointer(hash.filter_hash, ops->func_hash->filter_hash); in ftrace_ops_test()
1552 rcu_assign_pointer(hash.notrace_hash, ops->func_hash->notrace_hash); in ftrace_ops_test()
1567 for (pg = ftrace_pages_start; pg; pg = pg->next) { \
1569 for (_____i = 0; _____i < pg->index; _____i++) { \
1570 rec = &pg->records[_____i];
1582 if (key->flags < rec->ip) in ftrace_cmp_recs()
1583 return -1; in ftrace_cmp_recs()
1584 if (key->ip >= rec->ip + MCOUNT_INSN_SIZE) in ftrace_cmp_recs()
1598 for (pg = ftrace_pages_start; pg; pg = pg->next) { in lookup_rec()
1599 if (pg->index == 0 || in lookup_rec()
1600 end < pg->records[0].ip || in lookup_rec()
1601 start >= (pg->records[pg->index - 1].ip + MCOUNT_INSN_SIZE)) in lookup_rec()
1603 rec = bsearch(&key, pg->records, pg->index, in lookup_rec()
1613 * ftrace_location_range - return the first address of a traced location
1619 * Returns: rec->ip if the related ftrace location is a least partly within
1632 ip = rec->ip; in ftrace_location_range()
1639 * ftrace_location - return the ftrace location
1660 loc = ftrace_location_range(ip, ip + size - 1); in ftrace_location()
1666 * ftrace_text_reserved - return true if range contains an ftrace location
1692 ops != &ftrace_list_end; ops = ops->next) { in test_rec_ops_needs_regs()
1693 /* pass rec in as regs to have non-NULL val */ in test_rec_ops_needs_regs()
1694 if (ftrace_ops_test(ops, rec->ip, rec)) { in test_rec_ops_needs_regs()
1695 if (ops->flags & FTRACE_OPS_FL_SAVE_REGS) { in test_rec_ops_needs_regs()
1720 return rec->flags & FTRACE_FL_DISABLED && in skip_record()
1721 !(rec->flags & FTRACE_FL_ENABLED); in skip_record()
1746 if (!(ops->flags & FTRACE_OPS_FL_ENABLED)) in __ftrace_hash_rec_update()
1753 hash = ops->func_hash->filter_hash; in __ftrace_hash_rec_update()
1754 notrace_hash = ops->func_hash->notrace_hash; in __ftrace_hash_rec_update()
1771 if (!notrace_hash || !ftrace_lookup_ip(notrace_hash, rec->ip)) in __ftrace_hash_rec_update()
1774 in_hash = !!ftrace_lookup_ip(hash, rec->ip); in __ftrace_hash_rec_update()
1775 in_notrace_hash = !!ftrace_lookup_ip(notrace_hash, rec->ip); in __ftrace_hash_rec_update()
1788 rec->flags++; in __ftrace_hash_rec_update()
1792 if (ops->flags & FTRACE_OPS_FL_DIRECT) in __ftrace_hash_rec_update()
1793 rec->flags |= FTRACE_FL_DIRECT; in __ftrace_hash_rec_update()
1800 if (ftrace_rec_count(rec) == 1 && ops->trampoline) in __ftrace_hash_rec_update()
1801 rec->flags |= FTRACE_FL_TRAMP; in __ftrace_hash_rec_update()
1809 rec->flags &= ~FTRACE_FL_TRAMP; in __ftrace_hash_rec_update()
1815 if (ops->flags & FTRACE_OPS_FL_SAVE_REGS) in __ftrace_hash_rec_update()
1816 rec->flags |= FTRACE_FL_REGS; in __ftrace_hash_rec_update()
1820 rec->flags--; in __ftrace_hash_rec_update()
1828 if (ops->flags & FTRACE_OPS_FL_DIRECT) in __ftrace_hash_rec_update()
1829 rec->flags &= ~FTRACE_FL_DIRECT; in __ftrace_hash_rec_update()
1838 rec->flags & FTRACE_FL_REGS && in __ftrace_hash_rec_update()
1839 ops->flags & FTRACE_OPS_FL_SAVE_REGS) { in __ftrace_hash_rec_update()
1841 rec->flags &= ~FTRACE_FL_REGS; in __ftrace_hash_rec_update()
1853 rec->flags |= FTRACE_FL_TRAMP; in __ftrace_hash_rec_update()
1855 rec->flags &= ~FTRACE_FL_TRAMP; in __ftrace_hash_rec_update()
1864 * If the rec has a single associated ops, and ops->func can be in __ftrace_hash_rec_update()
1869 ftrace_ops_get_func(ops) == ops->func) in __ftrace_hash_rec_update()
1870 rec->flags |= FTRACE_FL_CALL_OPS; in __ftrace_hash_rec_update()
1872 rec->flags &= ~FTRACE_FL_CALL_OPS; in __ftrace_hash_rec_update()
1880 if (!all && count == hash->count) in __ftrace_hash_rec_update()
1916 * ops->hash = new_hash
1930 if (ops->func_hash != &global_ops.local_hash) in ftrace_hash_rec_update_modify()
1941 if (op->func_hash == &global_ops.local_hash) in ftrace_hash_rec_update_modify()
1958 * or no-needed to update, -EBUSY if it detects a conflict of the flag
1959 * on a ftrace_rec, and -EINVAL if the new_hash tries to trace all recs.
1961 * - If the hash is NULL, it hits all recs (if IPMODIFY is set, this is rejected)
1962 * - If the hash is EMPTY_HASH, it hits nothing
1963 * - Anything else hits the recs which match the hash entries.
1968 * IPMODIFY. If ops_func(SHARE_IPMODIFY_SELF) returns non-zero, propagate
1982 if (!(ops->flags & FTRACE_OPS_FL_ENABLED)) in __ftrace_hash_update_ipmodify()
1985 is_ipmodify = ops->flags & FTRACE_OPS_FL_IPMODIFY; in __ftrace_hash_update_ipmodify()
1986 is_direct = ops->flags & FTRACE_OPS_FL_DIRECT; in __ftrace_hash_update_ipmodify()
2001 return -EINVAL; in __ftrace_hash_update_ipmodify()
2003 /* Update rec->flags */ in __ftrace_hash_update_ipmodify()
2006 if (rec->flags & FTRACE_FL_DISABLED) in __ftrace_hash_update_ipmodify()
2010 in_old = !!ftrace_lookup_ip(old_hash, rec->ip); in __ftrace_hash_update_ipmodify()
2011 in_new = !!ftrace_lookup_ip(new_hash, rec->ip); in __ftrace_hash_update_ipmodify()
2016 if (rec->flags & FTRACE_FL_IPMODIFY) { in __ftrace_hash_update_ipmodify()
2023 FTRACE_WARN_ON(rec->flags & FTRACE_FL_DIRECT); in __ftrace_hash_update_ipmodify()
2031 if (!ops->ops_func) in __ftrace_hash_update_ipmodify()
2032 return -EBUSY; in __ftrace_hash_update_ipmodify()
2033 ret = ops->ops_func(ops, FTRACE_OPS_CMD_ENABLE_SHARE_IPMODIFY_SELF); in __ftrace_hash_update_ipmodify()
2037 rec->flags |= FTRACE_FL_IPMODIFY; in __ftrace_hash_update_ipmodify()
2040 rec->flags &= ~FTRACE_FL_IPMODIFY; in __ftrace_hash_update_ipmodify()
2052 if (rec->flags & FTRACE_FL_DISABLED) in __ftrace_hash_update_ipmodify()
2056 return -EBUSY; in __ftrace_hash_update_ipmodify()
2058 in_old = !!ftrace_lookup_ip(old_hash, rec->ip); in __ftrace_hash_update_ipmodify()
2059 in_new = !!ftrace_lookup_ip(new_hash, rec->ip); in __ftrace_hash_update_ipmodify()
2064 rec->flags &= ~FTRACE_FL_IPMODIFY; in __ftrace_hash_update_ipmodify()
2066 rec->flags |= FTRACE_FL_IPMODIFY; in __ftrace_hash_update_ipmodify()
2069 return -EBUSY; in __ftrace_hash_update_ipmodify()
2074 struct ftrace_hash *hash = ops->func_hash->filter_hash; in ftrace_hash_ipmodify_enable()
2085 struct ftrace_hash *hash = ops->func_hash->filter_hash; in ftrace_hash_ipmodify_disable()
2096 struct ftrace_hash *old_hash = ops->func_hash->filter_hash; in ftrace_hash_ipmodify_update()
2144 * ftrace_bug - report and shutdown function tracer
2151 * EFAULT - if the problem happens on reading the @ip address
2152 * EINVAL - if what is read at @ip is not what was expected
2153 * EPERM - if the problem happens on writing to the @ip address
2157 unsigned long ip = rec ? rec->ip : 0; in ftrace_bug()
2159 pr_info("------------[ ftrace bug ]------------\n"); in ftrace_bug()
2162 case -EFAULT: in ftrace_bug()
2166 case -EINVAL: in ftrace_bug()
2176 case -EPERM: in ftrace_bug()
2188 pr_info("ftrace record flags: %lx\n", rec->flags); in ftrace_bug()
2190 rec->flags & FTRACE_FL_REGS ? " R" : " ", in ftrace_bug()
2191 rec->flags & FTRACE_FL_CALL_OPS ? " O" : " "); in ftrace_bug()
2192 if (rec->flags & FTRACE_FL_TRAMP_EN) { in ftrace_bug()
2197 (void *)ops->trampoline, in ftrace_bug()
2198 (void *)ops->func); in ftrace_bug()
2242 if (!(rec->flags & FTRACE_FL_REGS) != in ftrace_check_record()
2243 !(rec->flags & FTRACE_FL_REGS_EN)) in ftrace_check_record()
2246 if (!(rec->flags & FTRACE_FL_TRAMP) != in ftrace_check_record()
2247 !(rec->flags & FTRACE_FL_TRAMP_EN)) in ftrace_check_record()
2262 if (!(rec->flags & FTRACE_FL_DIRECT) != in ftrace_check_record()
2263 !(rec->flags & FTRACE_FL_DIRECT_EN)) in ftrace_check_record()
2265 } else if (rec->flags & FTRACE_FL_DIRECT_EN) { in ftrace_check_record()
2275 if (!(rec->flags & FTRACE_FL_CALL_OPS) != in ftrace_check_record()
2276 !(rec->flags & FTRACE_FL_CALL_OPS_EN)) in ftrace_check_record()
2278 } else if (rec->flags & FTRACE_FL_CALL_OPS_EN) { in ftrace_check_record()
2284 if ((rec->flags & FTRACE_FL_ENABLED) == flag) in ftrace_check_record()
2289 flag ^= rec->flags & FTRACE_FL_ENABLED; in ftrace_check_record()
2292 rec->flags |= FTRACE_FL_ENABLED | FTRACE_FL_TOUCHED; in ftrace_check_record()
2294 if (rec->flags & FTRACE_FL_REGS) in ftrace_check_record()
2295 rec->flags |= FTRACE_FL_REGS_EN; in ftrace_check_record()
2297 rec->flags &= ~FTRACE_FL_REGS_EN; in ftrace_check_record()
2300 if (rec->flags & FTRACE_FL_TRAMP) in ftrace_check_record()
2301 rec->flags |= FTRACE_FL_TRAMP_EN; in ftrace_check_record()
2303 rec->flags &= ~FTRACE_FL_TRAMP_EN; in ftrace_check_record()
2307 if (rec->flags & (FTRACE_FL_DIRECT | FTRACE_FL_IPMODIFY)) in ftrace_check_record()
2308 rec->flags |= FTRACE_FL_MODIFIED; in ftrace_check_record()
2317 if (rec->flags & FTRACE_FL_DIRECT) in ftrace_check_record()
2318 rec->flags |= FTRACE_FL_DIRECT_EN; in ftrace_check_record()
2320 rec->flags &= ~FTRACE_FL_DIRECT_EN; in ftrace_check_record()
2326 rec->flags &= ~FTRACE_FL_DIRECT_EN; in ftrace_check_record()
2332 if (rec->flags & FTRACE_FL_CALL_OPS) in ftrace_check_record()
2333 rec->flags |= FTRACE_FL_CALL_OPS_EN; in ftrace_check_record()
2335 rec->flags &= ~FTRACE_FL_CALL_OPS_EN; in ftrace_check_record()
2341 rec->flags &= ~FTRACE_FL_CALL_OPS_EN; in ftrace_check_record()
2351 * from the save regs, to a non-save regs function or in ftrace_check_record()
2366 rec->flags &= FTRACE_NOCLEAR_FLAGS; in ftrace_check_record()
2372 rec->flags &= ~(FTRACE_FL_ENABLED | FTRACE_FL_TRAMP_EN | in ftrace_check_record()
2382 * ftrace_update_record - set a record that now is tracing or not
2395 * ftrace_test_record - check if the record has been enabled or not
2412 unsigned long ip = rec->ip; in ftrace_find_tramp_ops_any()
2416 if (!op->trampoline) in ftrace_find_tramp_ops_any()
2419 if (hash_contains_ip(ip, op->func_hash)) in ftrace_find_tramp_ops_any()
2430 unsigned long ip = rec->ip; in ftrace_find_tramp_ops_any_other()
2434 if (op == op_exclude || !op->trampoline) in ftrace_find_tramp_ops_any_other()
2437 if (hash_contains_ip(ip, op->func_hash)) in ftrace_find_tramp_ops_any_other()
2448 unsigned long ip = rec->ip; in ftrace_find_tramp_ops_next()
2452 if (!op->trampoline) in ftrace_find_tramp_ops_next()
2455 if (hash_contains_ip(ip, op->func_hash)) in ftrace_find_tramp_ops_next()
2466 unsigned long ip = rec->ip; in ftrace_find_tramp_ops_curr()
2475 if (hash_contains_ip(ip, &removed_ops->old_hash)) in ftrace_find_tramp_ops_curr()
2499 if (!op->trampoline) in ftrace_find_tramp_ops_curr()
2506 if (op->flags & FTRACE_OPS_FL_ADDING) in ftrace_find_tramp_ops_curr()
2515 if ((op->flags & FTRACE_OPS_FL_MODIFYING) && in ftrace_find_tramp_ops_curr()
2516 hash_contains_ip(ip, &op->old_hash)) in ftrace_find_tramp_ops_curr()
2523 if (!(op->flags & FTRACE_OPS_FL_MODIFYING) && in ftrace_find_tramp_ops_curr()
2524 hash_contains_ip(ip, op->func_hash)) in ftrace_find_tramp_ops_curr()
2536 unsigned long ip = rec->ip; in ftrace_find_tramp_ops_new()
2539 /* pass rec in as regs to have non-NULL val */ in ftrace_find_tramp_ops_new()
2540 if (hash_contains_ip(ip, op->func_hash)) in ftrace_find_tramp_ops_new()
2551 unsigned long ip = rec->ip; in ftrace_find_unique_ops()
2555 if (hash_contains_ip(ip, op->func_hash)) { in ftrace_find_unique_ops()
2583 return entry->direct; in ftrace_find_rec_direct()
2589 unsigned long addr = READ_ONCE(ops->direct_call); in call_direct_funcs()
2599 * ftrace_get_addr_new - Get the call address to set to
2613 if ((rec->flags & FTRACE_FL_DIRECT) && in ftrace_get_addr_new()
2615 addr = ftrace_find_rec_direct(rec->ip); in ftrace_get_addr_new()
2622 if (rec->flags & FTRACE_FL_TRAMP) { in ftrace_get_addr_new()
2624 if (FTRACE_WARN_ON(!ops || !ops->trampoline)) { in ftrace_get_addr_new()
2626 (void *)rec->ip, (void *)rec->ip, rec->flags); in ftrace_get_addr_new()
2630 return ops->trampoline; in ftrace_get_addr_new()
2633 if (rec->flags & FTRACE_FL_REGS) in ftrace_get_addr_new()
2640 * ftrace_get_addr_curr - Get the call address that is already there
2655 if (rec->flags & FTRACE_FL_DIRECT_EN) { in ftrace_get_addr_curr()
2656 addr = ftrace_find_rec_direct(rec->ip); in ftrace_get_addr_curr()
2663 if (rec->flags & FTRACE_FL_TRAMP_EN) { in ftrace_get_addr_curr()
2667 (void *)rec->ip, (void *)rec->ip); in ftrace_get_addr_curr()
2671 return ops->trampoline; in ftrace_get_addr_curr()
2674 if (rec->flags & FTRACE_FL_REGS_EN) in ftrace_get_addr_curr()
2713 return -1; /* unknown ftrace bug */ in __ftrace_replace_code()
2749 * ftrace_rec_iter_start - start up iterating over traced functions
2766 iter->pg = ftrace_pages_start; in ftrace_rec_iter_start()
2767 iter->index = 0; in ftrace_rec_iter_start()
2770 while (iter->pg && !iter->pg->index) in ftrace_rec_iter_start()
2771 iter->pg = iter->pg->next; in ftrace_rec_iter_start()
2773 if (!iter->pg) in ftrace_rec_iter_start()
2780 * ftrace_rec_iter_next - get the next record to process.
2787 iter->index++; in ftrace_rec_iter_next()
2789 if (iter->index >= iter->pg->index) { in ftrace_rec_iter_next()
2790 iter->pg = iter->pg->next; in ftrace_rec_iter_next()
2791 iter->index = 0; in ftrace_rec_iter_next()
2794 while (iter->pg && !iter->pg->index) in ftrace_rec_iter_next()
2795 iter->pg = iter->pg->next; in ftrace_rec_iter_next()
2798 if (!iter->pg) in ftrace_rec_iter_next()
2805 * ftrace_rec_iter_record - get the record at the iterator location
2812 return &iter->pg->records[iter->index]; in ftrace_rec_iter_record()
2919 * ftrace_run_stop_machine - go back to the stop machine method
2931 * arch_ftrace_update_code - modify the code to trace or not trace
2960 ops->flags |= FTRACE_OPS_FL_MODIFYING; in ftrace_run_modify_code()
2961 ops->old_hash.filter_hash = old_hash->filter_hash; in ftrace_run_modify_code()
2962 ops->old_hash.notrace_hash = old_hash->notrace_hash; in ftrace_run_modify_code()
2964 ops->old_hash.filter_hash = NULL; in ftrace_run_modify_code()
2965 ops->old_hash.notrace_hash = NULL; in ftrace_run_modify_code()
2966 ops->flags &= ~FTRACE_OPS_FL_MODIFYING; in ftrace_run_modify_code()
2982 list_add_rcu(&ops->list, &ftrace_ops_trampoline_list); in ftrace_add_trampoline_to_kallsyms()
2988 list_del_rcu(&ops->list); in ftrace_remove_trampoline_from_kallsyms()
3002 if (ops && (ops->flags & FTRACE_OPS_FL_ALLOC_TRAMP) && in ftrace_trampoline_free()
3003 ops->trampoline) { in ftrace_trampoline_free()
3008 perf_event_text_poke((void *)ops->trampoline, in ftrace_trampoline_free()
3009 (void *)ops->trampoline, in ftrace_trampoline_free()
3010 ops->trampoline_size, NULL, 0); in ftrace_trampoline_free()
3012 ops->trampoline, ops->trampoline_size, in ftrace_trampoline_free()
3046 return -ENODEV; in ftrace_startup()
3062 ops->flags |= FTRACE_OPS_FL_ENABLED | FTRACE_OPS_FL_ADDING; in ftrace_startup()
3068 ftrace_start_up--; in ftrace_startup()
3069 ops->flags &= ~FTRACE_OPS_FL_ENABLED; in ftrace_startup()
3070 if (ops->flags & FTRACE_OPS_FL_DYNAMIC) in ftrace_startup()
3087 return -ENODEV; in ftrace_startup()
3090 ops->flags &= ~FTRACE_OPS_FL_ADDING; in ftrace_startup()
3100 return -ENODEV; in ftrace_shutdown()
3106 ftrace_start_up--; in ftrace_shutdown()
3120 ops->flags &= ~FTRACE_OPS_FL_ENABLED; in ftrace_shutdown()
3134 ops->flags |= FTRACE_OPS_FL_REMOVING; in ftrace_shutdown()
3138 ops->old_hash.filter_hash = ops->func_hash->filter_hash; in ftrace_shutdown()
3139 ops->old_hash.notrace_hash = ops->func_hash->notrace_hash; in ftrace_shutdown()
3153 if (FTRACE_WARN_ON_ONCE(rec->flags & ~FTRACE_NOCLEAR_FLAGS)) in ftrace_shutdown()
3155 (void *)rec->ip, rec->flags); in ftrace_shutdown()
3159 ops->old_hash.filter_hash = NULL; in ftrace_shutdown()
3160 ops->old_hash.notrace_hash = NULL; in ftrace_shutdown()
3163 ops->flags &= ~FTRACE_OPS_FL_REMOVING; in ftrace_shutdown()
3170 if (ops->flags & FTRACE_OPS_FL_DYNAMIC) { in ftrace_shutdown()
3202 return alloc_and_copy_ftrace_hash(src->size_bits, src); in copy_hash()
3234 return -ENOMEM; in append_hash()
3244 size = 1 << new_hash->size_bits; in append_hash()
3246 hlist_for_each_entry(entry, &new_hash->buckets[i], hlist) { in append_hash()
3248 if (!__ftrace_lookup_ip(*hash, entry->ip) && in append_hash()
3249 add_hash_entry(*hash, entry->ip) == NULL) in append_hash()
3250 return -ENOMEM; in append_hash()
3279 size = 1 << new_hash1->size_bits; in intersect_hash()
3281 hlist_for_each_entry(entry, &new_hash1->buckets[i], hlist) { in intersect_hash()
3283 if (__ftrace_lookup_ip(new_hash2, entry->ip) && in intersect_hash()
3284 add_hash_entry(*hash, entry->ip) == NULL) in intersect_hash()
3285 return -ENOMEM; in intersect_hash()
3296 /* Return a new hash that has a union of all @ops->filter_hash entries */
3304 if (ops->func_hash->filter_hash) in append_hashes()
3305 size_bits = ops->func_hash->filter_hash->size_bits; in append_hashes()
3309 list_for_each_entry(subops, &ops->subop_list, list) { in append_hashes()
3310 ret = append_hash(&new_hash, subops->func_hash->filter_hash, size_bits); in append_hashes()
3331 list_for_each_entry(subops, &ops->subop_list, list) { in intersect_hashes()
3335 size_bits = subops->func_hash->notrace_hash->size_bits; in intersect_hashes()
3336 new_hash = alloc_and_copy_ftrace_hash(size_bits, ops->func_hash->notrace_hash); in intersect_hashes()
3341 size_bits = new_hash->size_bits; in intersect_hashes()
3344 ret = intersect_hash(&new_hash, next_hash, subops->func_hash->notrace_hash); in intersect_hashes()
3369 if (A->count != B->count) in ops_equal()
3372 size = 1 << A->size_bits; in ops_equal()
3374 hlist_for_each_entry(entry, &A->buckets[i], hlist) { in ops_equal()
3375 if (!__ftrace_lookup_ip(B, entry->ip)) in ops_equal()
3396 old_hash_ops.filter_hash = ops->func_hash->filter_hash; in __ftrace_hash_move_and_update_ops()
3397 old_hash_ops.notrace_hash = ops->func_hash->notrace_hash; in __ftrace_hash_move_and_update_ops()
3411 if (!ops_equal(filter_hash, ops->func_hash->filter_hash)) { in ftrace_update_ops()
3412 ret = __ftrace_hash_move_and_update_ops(ops, &ops->func_hash->filter_hash, in ftrace_update_ops()
3418 if (!ops_equal(notrace_hash, ops->func_hash->notrace_hash)) { in ftrace_update_ops()
3419 ret = __ftrace_hash_move_and_update_ops(ops, &ops->func_hash->notrace_hash, in ftrace_update_ops()
3429 * ftrace_startup_subops - enable tracing for subops of an ops
3448 return -ENODEV; in ftrace_startup_subops()
3453 if (WARN_ON_ONCE(subops->flags & FTRACE_OPS_FL_ENABLED)) in ftrace_startup_subops()
3454 return -EBUSY; in ftrace_startup_subops()
3457 if (!ops->func_hash->filter_hash) in ftrace_startup_subops()
3458 ops->func_hash->filter_hash = EMPTY_HASH; in ftrace_startup_subops()
3459 if (!ops->func_hash->notrace_hash) in ftrace_startup_subops()
3460 ops->func_hash->notrace_hash = EMPTY_HASH; in ftrace_startup_subops()
3461 if (!subops->func_hash->filter_hash) in ftrace_startup_subops()
3462 subops->func_hash->filter_hash = EMPTY_HASH; in ftrace_startup_subops()
3463 if (!subops->func_hash->notrace_hash) in ftrace_startup_subops()
3464 subops->func_hash->notrace_hash = EMPTY_HASH; in ftrace_startup_subops()
3467 if (list_empty(&ops->subop_list)) { in ftrace_startup_subops()
3469 filter_hash = copy_hash(subops->func_hash->filter_hash); in ftrace_startup_subops()
3470 notrace_hash = copy_hash(subops->func_hash->notrace_hash); in ftrace_startup_subops()
3474 return -ENOMEM; in ftrace_startup_subops()
3477 save_filter_hash = ops->func_hash->filter_hash; in ftrace_startup_subops()
3478 save_notrace_hash = ops->func_hash->notrace_hash; in ftrace_startup_subops()
3480 ops->func_hash->filter_hash = filter_hash; in ftrace_startup_subops()
3481 ops->func_hash->notrace_hash = notrace_hash; in ftrace_startup_subops()
3482 list_add(&subops->list, &ops->subop_list); in ftrace_startup_subops()
3485 list_del(&subops->list); in ftrace_startup_subops()
3486 ops->func_hash->filter_hash = save_filter_hash; in ftrace_startup_subops()
3487 ops->func_hash->notrace_hash = save_notrace_hash; in ftrace_startup_subops()
3493 subops->flags |= FTRACE_OPS_FL_ENABLED | FTRACE_OPS_FL_SUBOP; in ftrace_startup_subops()
3494 subops->managed = ops; in ftrace_startup_subops()
3506 if (ftrace_hash_empty(ops->func_hash->filter_hash) || in ftrace_startup_subops()
3507 ftrace_hash_empty(subops->func_hash->filter_hash)) { in ftrace_startup_subops()
3510 size_bits = max(ops->func_hash->filter_hash->size_bits, in ftrace_startup_subops()
3511 subops->func_hash->filter_hash->size_bits); in ftrace_startup_subops()
3512 filter_hash = alloc_and_copy_ftrace_hash(size_bits, ops->func_hash->filter_hash); in ftrace_startup_subops()
3514 return -ENOMEM; in ftrace_startup_subops()
3515 ret = append_hash(&filter_hash, subops->func_hash->filter_hash, in ftrace_startup_subops()
3523 if (ftrace_hash_empty(ops->func_hash->notrace_hash) || in ftrace_startup_subops()
3524 ftrace_hash_empty(subops->func_hash->notrace_hash)) { in ftrace_startup_subops()
3527 size_bits = max(ops->func_hash->filter_hash->size_bits, in ftrace_startup_subops()
3528 subops->func_hash->filter_hash->size_bits); in ftrace_startup_subops()
3532 return -ENOMEM; in ftrace_startup_subops()
3535 ret = intersect_hash(&notrace_hash, ops->func_hash->filter_hash, in ftrace_startup_subops()
3536 subops->func_hash->filter_hash); in ftrace_startup_subops()
3544 list_add(&subops->list, &ops->subop_list); in ftrace_startup_subops()
3550 list_del(&subops->list); in ftrace_startup_subops()
3552 subops->flags |= FTRACE_OPS_FL_ENABLED | FTRACE_OPS_FL_SUBOP; in ftrace_startup_subops()
3553 subops->managed = ops; in ftrace_startup_subops()
3559 * ftrace_shutdown_subops - Remove a subops from a manager ops
3577 return -ENODEV; in ftrace_shutdown_subops()
3579 if (WARN_ON_ONCE(!(subops->flags & FTRACE_OPS_FL_ENABLED))) in ftrace_shutdown_subops()
3580 return -EINVAL; in ftrace_shutdown_subops()
3582 list_del(&subops->list); in ftrace_shutdown_subops()
3584 if (list_empty(&ops->subop_list)) { in ftrace_shutdown_subops()
3589 list_add(&subops->list, &ops->subop_list); in ftrace_shutdown_subops()
3593 subops->flags &= ~FTRACE_OPS_FL_ENABLED; in ftrace_shutdown_subops()
3595 free_ftrace_hash(ops->func_hash->filter_hash); in ftrace_shutdown_subops()
3596 free_ftrace_hash(ops->func_hash->notrace_hash); in ftrace_shutdown_subops()
3597 ops->func_hash->filter_hash = EMPTY_HASH; in ftrace_shutdown_subops()
3598 ops->func_hash->notrace_hash = EMPTY_HASH; in ftrace_shutdown_subops()
3599 subops->flags &= ~(FTRACE_OPS_FL_ENABLED | FTRACE_OPS_FL_SUBOP); in ftrace_shutdown_subops()
3600 subops->managed = NULL; in ftrace_shutdown_subops()
3611 list_add(&subops->list, &ops->subop_list); in ftrace_shutdown_subops()
3612 return -ENOMEM; in ftrace_shutdown_subops()
3617 list_add(&subops->list, &ops->subop_list); in ftrace_shutdown_subops()
3619 subops->flags &= ~(FTRACE_OPS_FL_ENABLED | FTRACE_OPS_FL_SUBOP); in ftrace_shutdown_subops()
3620 subops->managed = NULL; in ftrace_shutdown_subops()
3632 struct ftrace_ops *ops = subops->managed; in ftrace_hash_move_and_update_subops()
3639 if (WARN_ON_ONCE(!ops || ops->flags & FTRACE_OPS_FL_SUBOP)) in ftrace_hash_move_and_update_subops()
3640 return -EINVAL; in ftrace_hash_move_and_update_subops()
3647 return -ENOMEM; in ftrace_hash_move_and_update_subops()
3652 orig_hash = &ops->func_hash->filter_hash; in ftrace_hash_move_and_update_subops()
3655 orig_hash = &ops->func_hash->notrace_hash; in ftrace_hash_move_and_update_subops()
3687 return ftrace_hash_empty(ops->func_hash->filter_hash) && in ops_traces_mod()
3688 ftrace_hash_empty(ops->func_hash->notrace_hash); in ops_traces_mod()
3708 * read-only, the modification of enabling ftrace can fail if in ftrace_update_code()
3709 * the read-only is done while ftrace is converting the calls. in ftrace_update_code()
3712 * to read-only. in ftrace_update_code()
3717 for (pg = new_pgs; pg; pg = pg->next) { in ftrace_update_code()
3719 for (i = 0; i < pg->index; i++) { in ftrace_update_code()
3723 return -1; in ftrace_update_code()
3725 p = &pg->records[i]; in ftrace_update_code()
3726 p->flags = rec_flags; in ftrace_update_code()
3740 update_time = stop - start; in ftrace_update_code()
3757 return -EINVAL; in ftrace_allocate_records()
3761 order = fls(pages) - 1; in ftrace_allocate_records()
3764 pg->records = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, order); in ftrace_allocate_records()
3766 if (!pg->records) { in ftrace_allocate_records()
3769 return -ENOMEM; in ftrace_allocate_records()
3770 order--; in ftrace_allocate_records()
3778 pg->order = order; in ftrace_allocate_records()
3791 if (pg->records) { in ftrace_free_pages()
3792 free_pages((unsigned long)pg->records, pg->order); in ftrace_free_pages()
3793 ftrace_number_of_pages -= 1 << pg->order; in ftrace_free_pages()
3795 pages = pg->next; in ftrace_free_pages()
3798 ftrace_number_of_groups--; in ftrace_free_pages()
3826 num_to_init -= cnt; in ftrace_allocate_pages()
3830 pg->next = kzalloc(sizeof(*pg), GFP_KERNEL); in ftrace_allocate_pages()
3831 if (!pg->next) in ftrace_allocate_pages()
3834 pg = pg->next; in ftrace_allocate_pages()
3868 struct ftrace_iterator *iter = m->private; in t_probe_next()
3869 struct trace_array *tr = iter->ops->private; in t_probe_next()
3878 iter->pos = *pos; in t_probe_next()
3883 func_probes = &tr->func_probes; in t_probe_next()
3887 if (!iter->probe) { in t_probe_next()
3888 next = func_probes->next; in t_probe_next()
3889 iter->probe = list_entry(next, struct ftrace_func_probe, list); in t_probe_next()
3892 if (iter->probe_entry) in t_probe_next()
3893 hnd = &iter->probe_entry->hlist; in t_probe_next()
3895 hash = iter->probe->ops.func_hash->filter_hash; in t_probe_next()
3904 size = 1 << hash->size_bits; in t_probe_next()
3907 if (iter->pidx >= size) { in t_probe_next()
3908 if (iter->probe->list.next == func_probes) in t_probe_next()
3910 next = iter->probe->list.next; in t_probe_next()
3911 iter->probe = list_entry(next, struct ftrace_func_probe, list); in t_probe_next()
3912 hash = iter->probe->ops.func_hash->filter_hash; in t_probe_next()
3913 size = 1 << hash->size_bits; in t_probe_next()
3914 iter->pidx = 0; in t_probe_next()
3917 hhd = &hash->buckets[iter->pidx]; in t_probe_next()
3920 iter->pidx++; in t_probe_next()
3926 hnd = hhd->first; in t_probe_next()
3928 hnd = hnd->next; in t_probe_next()
3930 iter->pidx++; in t_probe_next()
3938 iter->probe_entry = hlist_entry(hnd, struct ftrace_func_entry, hlist); in t_probe_next()
3945 struct ftrace_iterator *iter = m->private; in t_probe_start()
3949 if (!(iter->flags & FTRACE_ITER_DO_PROBES)) in t_probe_start()
3952 if (iter->mod_pos > *pos) in t_probe_start()
3955 iter->probe = NULL; in t_probe_start()
3956 iter->probe_entry = NULL; in t_probe_start()
3957 iter->pidx = 0; in t_probe_start()
3958 for (l = 0; l <= (*pos - iter->mod_pos); ) { in t_probe_start()
3967 iter->flags |= FTRACE_ITER_PROBE; in t_probe_start()
3979 probe = iter->probe; in t_probe_show()
3980 probe_entry = iter->probe_entry; in t_probe_show()
3983 return -EIO; in t_probe_show()
3985 probe_ops = probe->probe_ops; in t_probe_show()
3987 if (probe_ops->print) in t_probe_show()
3988 return probe_ops->print(m, probe_entry->ip, probe_ops, probe->data); in t_probe_show()
3990 seq_printf(m, "%ps:%ps\n", (void *)probe_entry->ip, in t_probe_show()
3991 (void *)probe_ops->func); in t_probe_show()
3999 struct ftrace_iterator *iter = m->private; in t_mod_next()
4000 struct trace_array *tr = iter->tr; in t_mod_next()
4003 iter->pos = *pos; in t_mod_next()
4005 iter->mod_list = iter->mod_list->next; in t_mod_next()
4007 if (iter->mod_list == &tr->mod_trace || in t_mod_next()
4008 iter->mod_list == &tr->mod_notrace) { in t_mod_next()
4009 iter->flags &= ~FTRACE_ITER_MOD; in t_mod_next()
4013 iter->mod_pos = *pos; in t_mod_next()
4020 struct ftrace_iterator *iter = m->private; in t_mod_start()
4024 if (iter->func_pos > *pos) in t_mod_start()
4027 iter->mod_pos = iter->func_pos; in t_mod_start()
4030 if (!iter->tr) in t_mod_start()
4033 for (l = 0; l <= (*pos - iter->func_pos); ) { in t_mod_start()
4039 iter->flags &= ~FTRACE_ITER_MOD; in t_mod_start()
4044 iter->flags |= FTRACE_ITER_MOD; in t_mod_start()
4053 struct trace_array *tr = iter->tr; in t_mod_show()
4055 if (WARN_ON_ONCE(!iter->mod_list) || in t_mod_show()
4056 iter->mod_list == &tr->mod_trace || in t_mod_show()
4057 iter->mod_list == &tr->mod_notrace) in t_mod_show()
4058 return -EIO; in t_mod_show()
4060 ftrace_mod = list_entry(iter->mod_list, struct ftrace_mod_load, list); in t_mod_show()
4062 if (ftrace_mod->func) in t_mod_show()
4063 seq_printf(m, "%s", ftrace_mod->func); in t_mod_show()
4067 seq_printf(m, ":mod:%s\n", ftrace_mod->module); in t_mod_show()
4075 struct ftrace_iterator *iter = m->private; in t_func_next()
4081 if (iter->idx >= iter->pg->index) { in t_func_next()
4082 if (iter->pg->next) { in t_func_next()
4083 iter->pg = iter->pg->next; in t_func_next()
4084 iter->idx = 0; in t_func_next()
4088 rec = &iter->pg->records[iter->idx++]; in t_func_next()
4089 if (((iter->flags & (FTRACE_ITER_FILTER | FTRACE_ITER_NOTRACE)) && in t_func_next()
4090 !ftrace_lookup_ip(iter->hash, rec->ip)) || in t_func_next()
4092 ((iter->flags & FTRACE_ITER_ENABLED) && in t_func_next()
4093 !(rec->flags & FTRACE_FL_ENABLED)) || in t_func_next()
4095 ((iter->flags & FTRACE_ITER_TOUCHED) && in t_func_next()
4096 !(rec->flags & FTRACE_FL_TOUCHED))) { in t_func_next()
4106 iter->pos = iter->func_pos = *pos; in t_func_next()
4107 iter->func = rec; in t_func_next()
4115 struct ftrace_iterator *iter = m->private; in t_next()
4122 if (iter->flags & FTRACE_ITER_PROBE) in t_next()
4125 if (iter->flags & FTRACE_ITER_MOD) in t_next()
4128 if (iter->flags & FTRACE_ITER_PRINTALL) { in t_next()
4144 iter->pos = 0; in reset_iter_read()
4145 iter->func_pos = 0; in reset_iter_read()
4146 iter->flags &= ~(FTRACE_ITER_PRINTALL | FTRACE_ITER_PROBE | FTRACE_ITER_MOD); in reset_iter_read()
4151 struct ftrace_iterator *iter = m->private; in t_start()
4163 if (*pos < iter->pos) in t_start()
4171 if ((iter->flags & (FTRACE_ITER_FILTER | FTRACE_ITER_NOTRACE)) && in t_start()
4172 ftrace_hash_empty(iter->hash)) { in t_start()
4173 iter->func_pos = 1; /* Account for the message */ in t_start()
4176 iter->flags |= FTRACE_ITER_PRINTALL; in t_start()
4178 iter->flags &= ~FTRACE_ITER_PROBE; in t_start()
4182 if (iter->flags & FTRACE_ITER_MOD) in t_start()
4190 iter->pg = ftrace_pages_start; in t_start()
4191 iter->idx = 0; in t_start()
4222 seq_printf(m, " ->%pS", ptr); in add_trampoline_func()
4242 ret = kallsyms_lookup(rec->ip, NULL, &offset, NULL, str); in test_for_valid_rec()
4246 rec->flags |= FTRACE_FL_DISABLED; in test_for_valid_rec()
4309 return ret == NULL ? -1 : 0; in print_rec()
4326 struct ftrace_iterator *iter = m->private; in t_show()
4329 if (iter->flags & FTRACE_ITER_PROBE) in t_show()
4332 if (iter->flags & FTRACE_ITER_MOD) in t_show()
4335 if (iter->flags & FTRACE_ITER_PRINTALL) { in t_show()
4336 if (iter->flags & FTRACE_ITER_NOTRACE) in t_show()
4343 rec = iter->func; in t_show()
4348 if (iter->flags & FTRACE_ITER_ADDRS) in t_show()
4349 seq_printf(m, "%lx ", rec->ip); in t_show()
4351 if (print_rec(m, rec->ip)) { in t_show()
4353 WARN_ON_ONCE(!(rec->flags & FTRACE_FL_DISABLED)); in t_show()
4358 if (iter->flags & (FTRACE_ITER_ENABLED | FTRACE_ITER_TOUCHED)) { in t_show()
4363 rec->flags & FTRACE_FL_REGS ? " R" : " ", in t_show()
4364 rec->flags & FTRACE_FL_IPMODIFY ? " I" : " ", in t_show()
4365 rec->flags & FTRACE_FL_DIRECT ? " D" : " ", in t_show()
4366 rec->flags & FTRACE_FL_CALL_OPS ? " O" : " ", in t_show()
4367 rec->flags & FTRACE_FL_MODIFIED ? " M " : " "); in t_show()
4368 if (rec->flags & FTRACE_FL_TRAMP_EN) { in t_show()
4373 (void *)ops->trampoline, in t_show()
4374 (void *)ops->func); in t_show()
4383 if (rec->flags & FTRACE_FL_CALL_OPS_EN) { in t_show()
4387 ops, ops->func); in t_show()
4392 if (rec->flags & FTRACE_FL_DIRECT) { in t_show()
4395 direct = ftrace_find_rec_direct(rec->ip); in t_show()
4397 seq_printf(m, "\n\tdirect-->%pS", (void *)direct); in t_show()
4424 return -ENODEV; in ftrace_avail_open()
4428 return -ENOMEM; in ftrace_avail_open()
4430 iter->pg = ftrace_pages_start; in ftrace_avail_open()
4431 iter->ops = &global_ops; in ftrace_avail_open()
4452 return -ENOMEM; in ftrace_enabled_open()
4454 iter->pg = ftrace_pages_start; in ftrace_enabled_open()
4455 iter->flags = FTRACE_ITER_ENABLED; in ftrace_enabled_open()
4456 iter->ops = &global_ops; in ftrace_enabled_open()
4477 return -ENOMEM; in ftrace_touched_open()
4479 iter->pg = ftrace_pages_start; in ftrace_touched_open()
4480 iter->flags = FTRACE_ITER_TOUCHED; in ftrace_touched_open()
4481 iter->ops = &global_ops; in ftrace_touched_open()
4497 return -ENODEV; in ftrace_avail_addrs_open()
4501 return -ENOMEM; in ftrace_avail_addrs_open()
4503 iter->pg = ftrace_pages_start; in ftrace_avail_addrs_open()
4504 iter->flags = FTRACE_ITER_ADDRS; in ftrace_avail_addrs_open()
4505 iter->ops = &global_ops; in ftrace_avail_addrs_open()
4511 * ftrace_regex_open - initialize function tracer filter files
4535 struct trace_array *tr = ops->private; in ftrace_regex_open()
4536 int ret = -ENOMEM; in ftrace_regex_open()
4541 return -ENODEV; in ftrace_regex_open()
4544 return -ENODEV; in ftrace_regex_open()
4550 if (trace_parser_get_init(&iter->parser, FTRACE_BUFF_MAX)) in ftrace_regex_open()
4553 iter->ops = ops; in ftrace_regex_open()
4554 iter->flags = flag; in ftrace_regex_open()
4555 iter->tr = tr; in ftrace_regex_open()
4557 mutex_lock(&ops->func_hash->regex_lock); in ftrace_regex_open()
4560 hash = ops->func_hash->notrace_hash; in ftrace_regex_open()
4561 mod_head = tr ? &tr->mod_notrace : NULL; in ftrace_regex_open()
4563 hash = ops->func_hash->filter_hash; in ftrace_regex_open()
4564 mod_head = tr ? &tr->mod_trace : NULL; in ftrace_regex_open()
4567 iter->mod_list = mod_head; in ftrace_regex_open()
4569 if (file->f_mode & FMODE_WRITE) { in ftrace_regex_open()
4572 if (file->f_flags & O_TRUNC) { in ftrace_regex_open()
4573 iter->hash = alloc_ftrace_hash(size_bits); in ftrace_regex_open()
4576 iter->hash = alloc_and_copy_ftrace_hash(size_bits, hash); in ftrace_regex_open()
4579 if (!iter->hash) { in ftrace_regex_open()
4580 trace_parser_put(&iter->parser); in ftrace_regex_open()
4584 iter->hash = hash; in ftrace_regex_open()
4588 if (file->f_mode & FMODE_READ) { in ftrace_regex_open()
4589 iter->pg = ftrace_pages_start; in ftrace_regex_open()
4593 struct seq_file *m = file->private_data; in ftrace_regex_open()
4594 m->private = iter; in ftrace_regex_open()
4597 free_ftrace_hash(iter->hash); in ftrace_regex_open()
4598 trace_parser_put(&iter->parser); in ftrace_regex_open()
4601 file->private_data = iter; in ftrace_regex_open()
4604 mutex_unlock(&ops->func_hash->regex_lock); in ftrace_regex_open()
4619 struct ftrace_ops *ops = inode->i_private; in ftrace_filter_open()
4630 struct ftrace_ops *ops = inode->i_private; in ftrace_notrace_open()
4645 * If symbols in an architecture don't correspond exactly to the user-visible
4659 str = arch_ftrace_match_adjust(str, g->search); in ftrace_match()
4661 switch (g->type) { in ftrace_match()
4663 if (strcmp(str, g->search) == 0) in ftrace_match()
4667 if (strncmp(str, g->search, g->len) == 0) in ftrace_match()
4671 if (strstr(str, g->search)) in ftrace_match()
4676 if (slen >= g->len && in ftrace_match()
4677 memcmp(str + slen - g->len, g->search, g->len) == 0) in ftrace_match()
4681 if (glob_match(g->search, str)) in ftrace_match()
4695 entry = ftrace_lookup_ip(hash, rec->ip); in enter_record()
4706 if (add_hash_entry(hash, rec->ip) == NULL) in enter_record()
4707 ret = -ENOMEM; in enter_record()
4721 if (kstrtoul(func_g->search, 0, &index) || --index < 0) in add_rec_by_index()
4725 if (pg->index <= index) { in add_rec_by_index()
4726 index -= pg->index; in add_rec_by_index()
4730 rec = &pg->records[index]; in add_rec_by_index()
4744 return -1; in lookup_ip()
4762 if (lookup_ip(rec->ip, &modname, str)) { in ftrace_match_record()
4765 !(rec->flags & FTRACE_FL_DISABLED)); in ftrace_match_record()
4773 if (!mod_g->len) { in ftrace_match_record()
4791 if (!func_g->len) in ftrace_match_record()
4833 if (rec->flags & FTRACE_FL_DISABLED) in match_records()
4862 if (ops->flags & FTRACE_OPS_FL_ENABLED) { in ftrace_ops_update_code()
4872 if (ops->func_hash != &global_ops.local_hash) in ftrace_ops_update_code()
4876 if (op->func_hash == &global_ops.local_hash && in ftrace_ops_update_code()
4877 op->flags & FTRACE_OPS_FL_ENABLED) { in ftrace_ops_update_code()
4890 if (ops->flags & FTRACE_OPS_FL_SUBOP) in ftrace_hash_move_and_update_ops()
4900 if (!(ops->flags & FTRACE_OPS_FL_ENABLED)) { in ftrace_hash_move_and_update_ops()
4907 list_for_each_entry(subops, &op->subop_list, list) { in ftrace_hash_move_and_update_ops()
4908 if ((subops->flags & FTRACE_OPS_FL_ENABLED) && in ftrace_hash_move_and_update_ops()
4909 subops->func_hash == ops->func_hash) { in ftrace_hash_move_and_update_ops()
4923 struct list_head *head = enable ? &tr->mod_trace : &tr->mod_notrace; in cache_mod()
4929 int ret = -EINVAL; in cache_mod()
4935 if (strcmp(ftrace_mod->module, module) != 0) in cache_mod()
4940 (ftrace_mod->func && in cache_mod()
4941 strcmp(ftrace_mod->func, func) == 0)) { in cache_mod()
4952 return -EINVAL; in cache_mod()
4967 mutex_lock(&ops->func_hash->regex_lock); in process_mod_list()
4970 orig_hash = &ops->func_hash->filter_hash; in process_mod_list()
4972 orig_hash = &ops->func_hash->notrace_hash; in process_mod_list()
4983 if (strcmp(ftrace_mod->module, mod) != 0) in process_mod_list()
4986 if (ftrace_mod->func) in process_mod_list()
4987 func = kstrdup(ftrace_mod->func, GFP_KERNEL); in process_mod_list()
4994 list_move(&ftrace_mod->list, &process_mods); in process_mod_list()
4997 kfree(ftrace_mod->func); in process_mod_list()
4998 ftrace_mod->func = func; in process_mod_list()
5005 func = ftrace_mod->func; in process_mod_list()
5013 new_hash->flags &= ~FTRACE_HASH_FL_MOD; in process_mod_list()
5022 mutex_unlock(&ops->func_hash->regex_lock); in process_mod_list()
5038 if (!list_empty(&tr->mod_trace)) in process_cached_mods()
5039 process_mod_list(&tr->mod_trace, tr->ops, mod, true); in process_cached_mods()
5040 if (!list_empty(&tr->mod_notrace)) in process_cached_mods()
5041 process_mod_list(&tr->mod_notrace, tr->ops, mod, false); in process_cached_mods()
5062 return -ENODEV; in ftrace_mod_callback()
5067 return -ENOMEM; in ftrace_mod_callback()
5104 probe_ops = probe->probe_ops; in function_trace_probe_call()
5112 probe_ops->func(ip, parent_ip, probe->tr, probe_ops, probe->data); in function_trace_probe_call()
5126 * allocate_ftrace_func_mapper - allocate a new ftrace_func_mapper
5144 * ftrace_func_mapper_find_ip - Find some data mapped to an ip
5160 entry = ftrace_lookup_ip(&mapper->hash, ip); in ftrace_func_mapper_find_ip()
5165 return &map->data; in ftrace_func_mapper_find_ip()
5169 * ftrace_func_mapper_add_ip - Map some data to an ip
5182 entry = ftrace_lookup_ip(&mapper->hash, ip); in ftrace_func_mapper_add_ip()
5184 return -EBUSY; in ftrace_func_mapper_add_ip()
5188 return -ENOMEM; in ftrace_func_mapper_add_ip()
5190 map->entry.ip = ip; in ftrace_func_mapper_add_ip()
5191 map->data = data; in ftrace_func_mapper_add_ip()
5193 __add_hash_entry(&mapper->hash, &map->entry); in ftrace_func_mapper_add_ip()
5199 * ftrace_func_mapper_remove_ip - Remove an ip from the mapping
5215 entry = ftrace_lookup_ip(&mapper->hash, ip); in ftrace_func_mapper_remove_ip()
5220 data = map->data; in ftrace_func_mapper_remove_ip()
5222 remove_hash_entry(&mapper->hash, entry); in ftrace_func_mapper_remove_ip()
5229 * free_ftrace_func_mapper - free a mapping of ips and data
5247 if (free_func && mapper->hash.count) { in free_ftrace_func_mapper()
5248 size = 1 << mapper->hash.size_bits; in free_ftrace_func_mapper()
5250 hhd = &mapper->hash.buckets[i]; in free_ftrace_func_mapper()
5257 free_ftrace_hash(&mapper->hash); in free_ftrace_func_mapper()
5266 WARN_ON(probe->ref <= 0); in release_probe()
5269 probe->ref--; in release_probe()
5271 if (!probe->ref) { in release_probe()
5272 probe_ops = probe->probe_ops; in release_probe()
5275 * the probe->data itself in release_probe()
5277 if (probe_ops->free) in release_probe()
5278 probe_ops->free(probe_ops, probe->tr, 0, probe->data); in release_probe()
5279 list_del(&probe->list); in release_probe()
5290 probe->ref++; in acquire_probe_locked()
5309 return -EINVAL; in register_ftrace_function_probe()
5313 return -EINVAL; in register_ftrace_function_probe()
5318 list_for_each_entry(iter, &tr->func_probes, list) { in register_ftrace_function_probe()
5319 if (iter->probe_ops == probe_ops) { in register_ftrace_function_probe()
5328 return -ENOMEM; in register_ftrace_function_probe()
5330 probe->probe_ops = probe_ops; in register_ftrace_function_probe()
5331 probe->ops.func = function_trace_probe_call; in register_ftrace_function_probe()
5332 probe->tr = tr; in register_ftrace_function_probe()
5333 ftrace_ops_init(&probe->ops); in register_ftrace_function_probe()
5334 list_add(&probe->list, &tr->func_probes); in register_ftrace_function_probe()
5342 * Note, there's a small window here that the func_hash->filter_hash in register_ftrace_function_probe()
5345 mutex_lock(&probe->ops.func_hash->regex_lock); in register_ftrace_function_probe()
5347 orig_hash = &probe->ops.func_hash->filter_hash; in register_ftrace_function_probe()
5352 ret = -ENOMEM; in register_ftrace_function_probe()
5360 ret = -EINVAL; in register_ftrace_function_probe()
5365 size = 1 << hash->size_bits; in register_ftrace_function_probe()
5367 hlist_for_each_entry(entry, &hash->buckets[i], hlist) { in register_ftrace_function_probe()
5368 if (ftrace_lookup_ip(old_hash, entry->ip)) in register_ftrace_function_probe()
5375 if (probe_ops->init) { in register_ftrace_function_probe()
5376 ret = probe_ops->init(probe_ops, tr, in register_ftrace_function_probe()
5377 entry->ip, data, in register_ftrace_function_probe()
5378 &probe->data); in register_ftrace_function_probe()
5380 if (probe_ops->free && count) in register_ftrace_function_probe()
5381 probe_ops->free(probe_ops, tr, in register_ftrace_function_probe()
5382 0, probe->data); in register_ftrace_function_probe()
5383 probe->data = NULL; in register_ftrace_function_probe()
5395 ret = -EINVAL; in register_ftrace_function_probe()
5399 ret = ftrace_hash_move_and_update_ops(&probe->ops, orig_hash, in register_ftrace_function_probe()
5405 probe->ref += count; in register_ftrace_function_probe()
5407 if (!(probe->ops.flags & FTRACE_OPS_FL_ENABLED)) in register_ftrace_function_probe()
5408 ret = ftrace_startup(&probe->ops, 0); in register_ftrace_function_probe()
5416 mutex_unlock(&probe->ops.func_hash->regex_lock); in register_ftrace_function_probe()
5424 if (!probe_ops->free || !count) in register_ftrace_function_probe()
5429 hlist_for_each_entry(entry, &hash->buckets[i], hlist) { in register_ftrace_function_probe()
5430 if (ftrace_lookup_ip(old_hash, entry->ip)) in register_ftrace_function_probe()
5432 probe_ops->free(probe_ops, tr, entry->ip, probe->data); in register_ftrace_function_probe()
5453 int i, ret = -ENODEV; in unregister_ftrace_function_probe_func()
5467 return -EINVAL; in unregister_ftrace_function_probe_func()
5472 list_for_each_entry(iter, &tr->func_probes, list) { in unregister_ftrace_function_probe_func()
5473 if (iter->probe_ops == probe_ops) { in unregister_ftrace_function_probe_func()
5481 ret = -EINVAL; in unregister_ftrace_function_probe_func()
5482 if (!(probe->ops.flags & FTRACE_OPS_FL_INITIALIZED)) in unregister_ftrace_function_probe_func()
5489 mutex_lock(&probe->ops.func_hash->regex_lock); in unregister_ftrace_function_probe_func()
5491 orig_hash = &probe->ops.func_hash->filter_hash; in unregister_ftrace_function_probe_func()
5501 ret = -ENOMEM; in unregister_ftrace_function_probe_func()
5508 size = 1 << hash->size_bits; in unregister_ftrace_function_probe_func()
5510 hlist_for_each_entry_safe(entry, tmp, &hash->buckets[i], hlist) { in unregister_ftrace_function_probe_func()
5513 kallsyms_lookup(entry->ip, NULL, NULL, in unregister_ftrace_function_probe_func()
5520 hlist_add_head(&entry->hlist, &hhd); in unregister_ftrace_function_probe_func()
5526 ret = -EINVAL; in unregister_ftrace_function_probe_func()
5532 WARN_ON(probe->ref < count); in unregister_ftrace_function_probe_func()
5534 probe->ref -= count; in unregister_ftrace_function_probe_func()
5537 ftrace_shutdown(&probe->ops, 0); in unregister_ftrace_function_probe_func()
5539 ret = ftrace_hash_move_and_update_ops(&probe->ops, orig_hash, in unregister_ftrace_function_probe_func()
5544 ftrace_run_modify_code(&probe->ops, FTRACE_UPDATE_CALLS, in unregister_ftrace_function_probe_func()
5549 hlist_del(&entry->hlist); in unregister_ftrace_function_probe_func()
5550 if (probe_ops->free) in unregister_ftrace_function_probe_func()
5551 probe_ops->free(probe_ops, tr, entry->ip, probe->data); in unregister_ftrace_function_probe_func()
5557 mutex_unlock(&probe->ops.func_hash->regex_lock); in unregister_ftrace_function_probe_func()
5573 list_for_each_entry_safe(probe, n, &tr->func_probes, list) in clear_ftrace_function_probes()
5574 unregister_ftrace_function_probe_func(NULL, tr, probe->probe_ops); in clear_ftrace_function_probes()
5590 if (strcmp(cmd->name, p->name) == 0) in register_ftrace_command()
5591 return -EBUSY; in register_ftrace_command()
5593 list_add(&cmd->list, &ftrace_commands); in register_ftrace_command()
5609 if (strcmp(cmd->name, p->name) == 0) { in unregister_ftrace_command()
5610 list_del_init(&p->list); in unregister_ftrace_command()
5615 return -ENODEV; in unregister_ftrace_command()
5621 struct ftrace_hash *hash = iter->hash; in ftrace_process_regex()
5622 struct trace_array *tr = iter->ops->private; in ftrace_process_regex()
5632 ret = -EINVAL; in ftrace_process_regex()
5645 if (strcmp(p->name, command) == 0) in ftrace_process_regex()
5646 return p->func(tr, hash, func, command, next, enable); in ftrace_process_regex()
5649 return -EINVAL; in ftrace_process_regex()
5663 if (file->f_mode & FMODE_READ) { in ftrace_regex_write()
5664 struct seq_file *m = file->private_data; in ftrace_regex_write()
5665 iter = m->private; in ftrace_regex_write()
5667 iter = file->private_data; in ftrace_regex_write()
5670 return -ENODEV; in ftrace_regex_write()
5672 /* iter->hash is a local copy, so we don't need regex_lock */ in ftrace_regex_write()
5674 parser = &iter->parser; in ftrace_regex_write()
5679 ret = ftrace_process_regex(iter, parser->buffer, in ftrace_regex_write()
5680 parser->idx, enable); in ftrace_regex_write()
5710 return -EINVAL; in __ftrace_match_addr()
5715 return -ENOENT; in __ftrace_match_addr()
5724 return entry ? 0 : -ENOMEM; in __ftrace_match_addr()
5757 return -ENODEV; in ftrace_set_hash()
5759 mutex_lock(&ops->func_hash->regex_lock); in ftrace_set_hash()
5762 orig_hash = &ops->func_hash->filter_hash; in ftrace_set_hash()
5764 orig_hash = &ops->func_hash->notrace_hash; in ftrace_set_hash()
5772 ret = -ENOMEM; in ftrace_set_hash()
5779 (*orig_hash)->flags |= FTRACE_HASH_FL_MOD; in ftrace_set_hash()
5785 ret = -EINVAL; in ftrace_set_hash()
5799 mutex_unlock(&ops->func_hash->regex_lock); in ftrace_set_hash()
5830 if (!(ops->flags & FTRACE_OPS_FL_INITIALIZED)) in check_direct_multi()
5831 return -EINVAL; in check_direct_multi()
5832 if ((ops->flags & MULTI_FLAGS) != MULTI_FLAGS) in check_direct_multi()
5833 return -EINVAL; in check_direct_multi()
5842 size = 1 << hash->size_bits; in remove_direct_functions_hash()
5844 hlist_for_each_entry(entry, &hash->buckets[i], hlist) { in remove_direct_functions_hash()
5845 del = __ftrace_lookup_ip(direct_functions, entry->ip); in remove_direct_functions_hash()
5846 if (del && del->direct == addr) { in remove_direct_functions_hash()
5862 * register_ftrace_direct - Call a custom trampoline directly
5877 * -EINVAL - The @ops object was already registered with this call or
5879 * -EBUSY - Another direct function is already attached (there can be only one)
5880 * -ENODEV - @ip does not point to a ftrace nop location (or not supported)
5881 * -ENOMEM - There was an allocation failure.
5887 int err = -EBUSY, size, i; in register_ftrace_direct()
5889 if (ops->func || ops->trampoline) in register_ftrace_direct()
5890 return -EINVAL; in register_ftrace_direct()
5891 if (!(ops->flags & FTRACE_OPS_FL_INITIALIZED)) in register_ftrace_direct()
5892 return -EINVAL; in register_ftrace_direct()
5893 if (ops->flags & FTRACE_OPS_FL_ENABLED) in register_ftrace_direct()
5894 return -EINVAL; in register_ftrace_direct()
5896 hash = ops->func_hash->filter_hash; in register_ftrace_direct()
5898 return -EINVAL; in register_ftrace_direct()
5903 size = 1 << hash->size_bits; in register_ftrace_direct()
5905 hlist_for_each_entry(entry, &hash->buckets[i], hlist) { in register_ftrace_direct()
5906 if (ftrace_find_rec_direct(entry->ip)) in register_ftrace_direct()
5911 err = -ENOMEM; in register_ftrace_direct()
5914 size = hash->count + direct_functions->count; in register_ftrace_direct()
5922 size = 1 << direct_functions->size_bits; in register_ftrace_direct()
5924 hlist_for_each_entry(entry, &direct_functions->buckets[i], hlist) { in register_ftrace_direct()
5925 new = add_hash_entry(new_hash, entry->ip); in register_ftrace_direct()
5928 new->direct = entry->direct; in register_ftrace_direct()
5933 size = 1 << hash->size_bits; in register_ftrace_direct()
5935 hlist_for_each_entry(entry, &hash->buckets[i], hlist) { in register_ftrace_direct()
5936 new = add_hash_entry(new_hash, entry->ip); in register_ftrace_direct()
5940 new->direct = addr; in register_ftrace_direct()
5941 entry->direct = addr; in register_ftrace_direct()
5949 ops->func = call_direct_funcs; in register_ftrace_direct()
5950 ops->flags = MULTI_FLAGS; in register_ftrace_direct()
5951 ops->trampoline = FTRACE_REGS_ADDR; in register_ftrace_direct()
5952 ops->direct_call = addr; in register_ftrace_direct()
5960 call_rcu_tasks(&free_hash->rcu, register_ftrace_direct_cb); in register_ftrace_direct()
5970 * unregister_ftrace_direct - Remove calls to custom trampoline
5982 * -EINVAL - The @ops object was not properly registered.
5987 struct ftrace_hash *hash = ops->func_hash->filter_hash; in unregister_ftrace_direct()
5991 return -EINVAL; in unregister_ftrace_direct()
5992 if (!(ops->flags & FTRACE_OPS_FL_ENABLED)) in unregister_ftrace_direct()
5993 return -EINVAL; in unregister_ftrace_direct()
6001 ops->func = NULL; in unregister_ftrace_direct()
6002 ops->trampoline = 0; in unregister_ftrace_direct()
6026 tmp_ops.func_hash = ops->func_hash; in __modify_ftrace_direct()
6039 hash = ops->func_hash->filter_hash; in __modify_ftrace_direct()
6040 size = 1 << hash->size_bits; in __modify_ftrace_direct()
6042 hlist_for_each_entry(iter, &hash->buckets[i], hlist) { in __modify_ftrace_direct()
6043 entry = __ftrace_lookup_ip(direct_functions, iter->ip); in __modify_ftrace_direct()
6046 entry->direct = addr; in __modify_ftrace_direct()
6050 WRITE_ONCE(ops->direct_call, addr); in __modify_ftrace_direct()
6061 * modify_ftrace_direct_nolock - Modify an existing direct 'multi' call
6076 * -EINVAL - The @ops object was not properly registered.
6081 return -EINVAL; in modify_ftrace_direct_nolock()
6082 if (!(ops->flags & FTRACE_OPS_FL_ENABLED)) in modify_ftrace_direct_nolock()
6083 return -EINVAL; in modify_ftrace_direct_nolock()
6090 * modify_ftrace_direct - Modify an existing direct 'multi' call
6102 * -EINVAL - The @ops object was not properly registered.
6109 return -EINVAL; in modify_ftrace_direct()
6110 if (!(ops->flags & FTRACE_OPS_FL_ENABLED)) in modify_ftrace_direct()
6111 return -EINVAL; in modify_ftrace_direct()
6122 * ftrace_set_filter_ip - set a function to filter on in ftrace by address
6144 * ftrace_set_filter_ips - set functions to filter on in ftrace by addresses
6167 * ftrace_ops_set_global_filter - setup ops to use global filters
6175 if (ops->flags & FTRACE_OPS_FL_INITIALIZED) in ftrace_ops_set_global_filter()
6179 ops->func_hash = &global_ops.local_hash; in ftrace_ops_set_global_filter()
6189 struct trace_array *tr = ops->private; in ftrace_set_regex()
6197 return -EINVAL; in ftrace_set_regex()
6201 return -EINVAL; in ftrace_set_regex()
6204 len = command - func; in ftrace_set_regex()
6214 return -ENOMEM; in ftrace_set_regex()
6222 * ftrace_set_filter - set a function to filter on in ftrace
6226 * @reset: non-zero to reset all filters before applying this filter.
6244 * ftrace_set_notrace - set a function to not trace in ftrace
6248 * @reset: non-zero to reset all filters before applying this filter.
6266 * ftrace_set_global_filter - set a function to filter on with global tracers
6269 * @reset: non-zero to reset all filters before applying this filter.
6281 * ftrace_set_global_notrace - set a function to not trace with global tracers
6284 * @reset: non-zero to reset all filters before applying this filter.
6383 if (!ops->private) { in ftrace_set_early_filter()
6386 ops->private = tr; in ftrace_set_early_filter()
6412 struct seq_file *m = (struct seq_file *)file->private_data; in ftrace_regex_release()
6418 if (file->f_mode & FMODE_READ) { in ftrace_regex_release()
6419 iter = m->private; in ftrace_regex_release()
6422 iter = file->private_data; in ftrace_regex_release()
6424 parser = &iter->parser; in ftrace_regex_release()
6426 int enable = !(iter->flags & FTRACE_ITER_NOTRACE); in ftrace_regex_release()
6428 ftrace_process_regex(iter, parser->buffer, in ftrace_regex_release()
6429 parser->idx, enable); in ftrace_regex_release()
6434 mutex_lock(&iter->ops->func_hash->regex_lock); in ftrace_regex_release()
6436 if (file->f_mode & FMODE_WRITE) { in ftrace_regex_release()
6437 filter_hash = !!(iter->flags & FTRACE_ITER_FILTER); in ftrace_regex_release()
6440 orig_hash = &iter->ops->func_hash->filter_hash; in ftrace_regex_release()
6441 if (iter->tr) { in ftrace_regex_release()
6442 if (list_empty(&iter->tr->mod_trace)) in ftrace_regex_release()
6443 iter->hash->flags &= ~FTRACE_HASH_FL_MOD; in ftrace_regex_release()
6445 iter->hash->flags |= FTRACE_HASH_FL_MOD; in ftrace_regex_release()
6448 orig_hash = &iter->ops->func_hash->notrace_hash; in ftrace_regex_release()
6451 ftrace_hash_move_and_update_ops(iter->ops, orig_hash, in ftrace_regex_release()
6452 iter->hash, filter_hash); in ftrace_regex_release()
6456 iter->hash = NULL; in ftrace_regex_release()
6459 mutex_unlock(&iter->ops->func_hash->regex_lock); in ftrace_regex_release()
6460 free_ftrace_hash(iter->hash); in ftrace_regex_release()
6461 if (iter->tr) in ftrace_regex_release()
6462 trace_array_put(iter->tr); in ftrace_regex_release()
6539 struct ftrace_graph_data *fgd = m->private; in __g_next()
6540 struct ftrace_func_entry *entry = fgd->entry; in __g_next()
6542 int i, idx = fgd->idx; in __g_next()
6544 if (*pos >= fgd->hash->count) in __g_next()
6549 fgd->entry = entry; in __g_next()
6556 for (i = idx; i < 1 << fgd->hash->size_bits; i++) { in __g_next()
6557 head = &fgd->hash->buckets[i]; in __g_next()
6559 fgd->entry = entry; in __g_next()
6560 fgd->idx = i; in __g_next()
6576 struct ftrace_graph_data *fgd = m->private; in g_start()
6580 if (fgd->type == GRAPH_FILTER_FUNCTION) in g_start()
6581 fgd->hash = rcu_dereference_protected(ftrace_graph_hash, in g_start()
6584 fgd->hash = rcu_dereference_protected(ftrace_graph_notrace_hash, in g_start()
6588 if (ftrace_hash_empty(fgd->hash) && !*pos) in g_start()
6591 fgd->idx = 0; in g_start()
6592 fgd->entry = NULL; in g_start()
6609 struct ftrace_graph_data *fgd = m->private; in g_show()
6611 if (fgd->type == GRAPH_FILTER_FUNCTION) in g_show()
6618 seq_printf(m, "%ps\n", (void *)entry->ip); in g_show()
6641 if (file->f_mode & FMODE_WRITE) { in __ftrace_graph_open()
6644 if (trace_parser_get_init(&fgd->parser, FTRACE_BUFF_MAX)) in __ftrace_graph_open()
6645 return -ENOMEM; in __ftrace_graph_open()
6647 if (file->f_flags & O_TRUNC) in __ftrace_graph_open()
6651 fgd->hash); in __ftrace_graph_open()
6653 ret = -ENOMEM; in __ftrace_graph_open()
6658 if (file->f_mode & FMODE_READ) { in __ftrace_graph_open()
6661 struct seq_file *m = file->private_data; in __ftrace_graph_open()
6662 m->private = fgd; in __ftrace_graph_open()
6669 file->private_data = fgd; in __ftrace_graph_open()
6672 if (ret < 0 && file->f_mode & FMODE_WRITE) in __ftrace_graph_open()
6673 trace_parser_put(&fgd->parser); in __ftrace_graph_open()
6675 fgd->new_hash = new_hash; in __ftrace_graph_open()
6678 * All uses of fgd->hash must be taken with the graph_lock in __ftrace_graph_open()
6680 * fgd->hash to be reinitialized when it is taken again. in __ftrace_graph_open()
6682 fgd->hash = NULL; in __ftrace_graph_open()
6694 return -ENODEV; in ftrace_graph_open()
6698 return -ENOMEM; in ftrace_graph_open()
6702 fgd->hash = rcu_dereference_protected(ftrace_graph_hash, in ftrace_graph_open()
6704 fgd->type = GRAPH_FILTER_FUNCTION; in ftrace_graph_open()
6705 fgd->seq_ops = &ftrace_graph_seq_ops; in ftrace_graph_open()
6722 return -ENODEV; in ftrace_graph_notrace_open()
6726 return -ENOMEM; in ftrace_graph_notrace_open()
6730 fgd->hash = rcu_dereference_protected(ftrace_graph_notrace_hash, in ftrace_graph_notrace_open()
6732 fgd->type = GRAPH_FILTER_NOTRACE; in ftrace_graph_notrace_open()
6733 fgd->seq_ops = &ftrace_graph_seq_ops; in ftrace_graph_notrace_open()
6751 if (file->f_mode & FMODE_READ) { in ftrace_graph_release()
6752 struct seq_file *m = file->private_data; in ftrace_graph_release()
6754 fgd = m->private; in ftrace_graph_release()
6757 fgd = file->private_data; in ftrace_graph_release()
6761 if (file->f_mode & FMODE_WRITE) { in ftrace_graph_release()
6763 parser = &fgd->parser; in ftrace_graph_release()
6766 ret = ftrace_graph_set_hash(fgd->new_hash, in ftrace_graph_release()
6767 parser->buffer); in ftrace_graph_release()
6772 new_hash = __ftrace_hash_move(fgd->new_hash); in ftrace_graph_release()
6774 ret = -ENOMEM; in ftrace_graph_release()
6780 if (fgd->type == GRAPH_FILTER_FUNCTION) { in ftrace_graph_release()
6807 free_ftrace_hash(fgd->new_hash); in ftrace_graph_release()
6832 return -ENODEV; in ftrace_graph_set_hash()
6836 if (rec->flags & FTRACE_FL_DISABLED) in ftrace_graph_set_hash()
6840 entry = ftrace_lookup_ip(hash, rec->ip); in ftrace_graph_set_hash()
6847 if (add_hash_entry(hash, rec->ip) == NULL) in ftrace_graph_set_hash()
6858 return fail ? -EINVAL : 0; in ftrace_graph_set_hash()
6866 struct ftrace_graph_data *fgd = file->private_data; in ftrace_graph_write()
6873 if (file->f_mode & FMODE_READ) { in ftrace_graph_write()
6874 struct seq_file *m = file->private_data; in ftrace_graph_write()
6875 fgd = m->private; in ftrace_graph_write()
6878 parser = &fgd->parser; in ftrace_graph_write()
6885 ret = ftrace_graph_set_hash(fgd->new_hash, in ftrace_graph_write()
6886 parser->buffer); in ftrace_graph_write()
6937 if (ops->flags & FTRACE_OPS_FL_ENABLED) in ftrace_destroy_filter_files()
6939 ops->flags |= FTRACE_OPS_FL_DELETED; in ftrace_destroy_filter_files()
6981 return -1; in ftrace_cmp_ips()
6991 if (WARN(start[i - 1] > start[i], in test_is_sorted()
6993 (void *)start[i - 1], start[i - 1], in test_is_sorted()
7019 int ret = -ENOMEM; in ftrace_process_locs()
7021 count = end - start; in ftrace_process_locs()
7040 return -ENOMEM; in ftrace_process_locs()
7057 if (WARN_ON(ftrace_pages->next)) { in ftrace_process_locs()
7059 while (ftrace_pages->next) in ftrace_process_locs()
7060 ftrace_pages = ftrace_pages->next; in ftrace_process_locs()
7063 ftrace_pages->next = start_pg; in ftrace_process_locs()
7082 end_offset = (pg->index+1) * sizeof(pg->records[0]); in ftrace_process_locs()
7083 if (end_offset > PAGE_SIZE << pg->order) { in ftrace_process_locs()
7085 if (WARN_ON(!pg->next)) in ftrace_process_locs()
7087 pg = pg->next; in ftrace_process_locs()
7090 rec = &pg->records[pg->index++]; in ftrace_process_locs()
7091 rec->ip = addr; in ftrace_process_locs()
7094 if (pg->next) { in ftrace_process_locs()
7095 pg_unuse = pg->next; in ftrace_process_locs()
7096 pg->next = NULL; in ftrace_process_locs()
7154 if (!op->trampoline || symnum--) in ftrace_get_trampoline_kallsym()
7156 *value = op->trampoline; in ftrace_get_trampoline_kallsym()
7164 return -ERANGE; in ftrace_get_trampoline_kallsym()
7179 if (!(ops->flags & FTRACE_OPS_FL_ENABLED)) in ops_references_ip()
7187 if (!ftrace_hash_empty(ops->func_hash->filter_hash) && in ops_references_ip()
7188 !__ftrace_lookup_ip(ops->func_hash->filter_hash, ip)) in ops_references_ip()
7192 if (ftrace_lookup_ip(ops->func_hash->notrace_hash, ip)) in ops_references_ip()
7210 for (ops = ftrace_ops_list; ops != &ftrace_list_end; ops = ops->next) { in referenced_filters()
7211 if (ops_references_ip(ops, rec->ip)) { in referenced_filters()
7212 if (WARN_ON_ONCE(ops->flags & FTRACE_OPS_FL_DIRECT)) in referenced_filters()
7214 if (WARN_ON_ONCE(ops->flags & FTRACE_OPS_FL_IPMODIFY)) in referenced_filters()
7217 if (ops->flags & FTRACE_OPS_FL_SAVE_REGS) in referenced_filters()
7218 rec->flags |= FTRACE_FL_REGS; in referenced_filters()
7219 if (cnt == 1 && ops->trampoline) in referenced_filters()
7220 rec->flags |= FTRACE_FL_TRAMP; in referenced_filters()
7222 rec->flags &= ~FTRACE_FL_TRAMP; in referenced_filters()
7239 for (i = 0; i < pg->index; i++) { in clear_mod_from_hash()
7240 rec = &pg->records[i]; in clear_mod_from_hash()
7241 entry = __ftrace_lookup_ip(hash, rec->ip); in clear_mod_from_hash()
7248 entry->ip = 0; in clear_mod_from_hash()
7259 if (!tr->ops || !tr->ops->func_hash) in clear_mod_from_hashes()
7261 mutex_lock(&tr->ops->func_hash->regex_lock); in clear_mod_from_hashes()
7262 clear_mod_from_hash(pg, tr->ops->func_hash->filter_hash); in clear_mod_from_hashes()
7263 clear_mod_from_hash(pg, tr->ops->func_hash->notrace_hash); in clear_mod_from_hashes()
7264 mutex_unlock(&tr->ops->func_hash->regex_lock); in clear_mod_from_hashes()
7276 list_for_each_entry_safe(mod_func, n, &mod_map->funcs, list) { in ftrace_free_mod_map()
7277 kfree(mod_func->name); in ftrace_free_mod_map()
7278 list_del(&mod_func->list); in ftrace_free_mod_map()
7300 if (mod_map->mod == mod) { in ftrace_release_mod()
7301 list_del_rcu(&mod_map->list); in ftrace_release_mod()
7302 call_rcu(&mod_map->rcu, ftrace_free_mod_map); in ftrace_release_mod()
7313 rec = &pg->records[0]; in ftrace_release_mod()
7314 if (within_module(rec->ip, mod)) { in ftrace_release_mod()
7326 ftrace_update_tot_cnt -= pg->index; in ftrace_release_mod()
7327 *last_pg = pg->next; in ftrace_release_mod()
7329 pg->next = tmp_page; in ftrace_release_mod()
7332 last_pg = &pg->next; in ftrace_release_mod()
7345 if (pg->records) { in ftrace_release_mod()
7346 free_pages((unsigned long)pg->records, pg->order); in ftrace_release_mod()
7347 ftrace_number_of_pages -= 1 << pg->order; in ftrace_release_mod()
7349 tmp_page = pg->next; in ftrace_release_mod()
7351 ftrace_number_of_groups--; in ftrace_release_mod()
7375 * text to read-only, as we now need to set it back to read-write in ftrace_module_enable()
7389 if (!within_module(rec->ip, mod)) in ftrace_module_enable()
7395 rec->flags = FTRACE_FL_DISABLED; in ftrace_module_enable()
7410 rec->flags &= ~FTRACE_FL_DISABLED; in ftrace_module_enable()
7411 rec->flags += cnt; in ftrace_module_enable()
7430 process_cached_mods(mod->name); in ftrace_module_enable()
7437 if (ftrace_disabled || !mod->num_ftrace_callsites) in ftrace_module_init()
7440 ret = ftrace_process_locs(mod, mod->ftrace_callsites, in ftrace_module_init()
7441 mod->ftrace_callsites + mod->num_ftrace_callsites); in ftrace_module_init()
7444 mod->name); in ftrace_module_init()
7457 ret = kallsyms_lookup(rec->ip, &symsize, &offset, &modname, str); in save_ftrace_mod_rec()
7465 mod_func->name = kstrdup(str, GFP_KERNEL); in save_ftrace_mod_rec()
7466 if (!mod_func->name) { in save_ftrace_mod_rec()
7471 mod_func->ip = rec->ip - offset; in save_ftrace_mod_rec()
7472 mod_func->size = symsize; in save_ftrace_mod_rec()
7474 mod_map->num_funcs++; in save_ftrace_mod_rec()
7476 list_add_rcu(&mod_func->list, &mod_map->funcs); in save_ftrace_mod_rec()
7489 mod_map->mod = mod; in allocate_ftrace_mod_map()
7490 mod_map->start_addr = start; in allocate_ftrace_mod_map()
7491 mod_map->end_addr = end; in allocate_ftrace_mod_map()
7492 mod_map->num_funcs = 0; in allocate_ftrace_mod_map()
7494 INIT_LIST_HEAD_RCU(&mod_map->funcs); in allocate_ftrace_mod_map()
7496 list_add_rcu(&mod_map->list, &ftrace_mod_maps); in allocate_ftrace_mod_map()
7509 list_for_each_entry_rcu(mod_func, &mod_map->funcs, list) { in ftrace_func_address_lookup()
7510 if (addr >= mod_func->ip && in ftrace_func_address_lookup()
7511 addr < mod_func->ip + mod_func->size) { in ftrace_func_address_lookup()
7519 *size = found_func->size; in ftrace_func_address_lookup()
7521 *off = addr - found_func->ip; in ftrace_func_address_lookup()
7522 return strscpy(sym, found_func->name, KSYM_NAME_LEN); in ftrace_func_address_lookup()
7541 *modname = mod_map->mod->name; in ftrace_mod_address_lookup()
7561 if (symnum >= mod_map->num_funcs) { in ftrace_mod_get_kallsym()
7562 symnum -= mod_map->num_funcs; in ftrace_mod_get_kallsym()
7566 list_for_each_entry_rcu(mod_func, &mod_map->funcs, list) { in ftrace_mod_get_kallsym()
7568 symnum--; in ftrace_mod_get_kallsym()
7572 *value = mod_func->ip; in ftrace_mod_get_kallsym()
7574 strscpy(name, mod_func->name, KSYM_NAME_LEN); in ftrace_mod_get_kallsym()
7575 strscpy(module_name, mod_map->mod->name, MODULE_NAME_LEN); in ftrace_mod_get_kallsym()
7623 entry = ftrace_lookup_ip(hash, func->ip); in clear_func_from_hash()
7630 entry->ip = 0; in clear_func_from_hash()
7640 if (!tr->ops || !tr->ops->func_hash) in clear_func_from_hashes()
7642 mutex_lock(&tr->ops->func_hash->regex_lock); in clear_func_from_hashes()
7643 clear_func_from_hash(func, tr->ops->func_hash->filter_hash); in clear_func_from_hashes()
7644 clear_func_from_hash(func, tr->ops->func_hash->notrace_hash); in clear_func_from_hashes()
7645 mutex_unlock(&tr->ops->func_hash->regex_lock); in clear_func_from_hashes()
7661 func->ip = rec->ip; in add_to_clear_hash_list()
7662 list_add(&func->list, clear_list); in add_to_clear_hash_list()
7691 for (pg = ftrace_pages_start; pg; last_pg = &pg->next, pg = *last_pg) { in ftrace_free_mem()
7692 if (end < pg->records[0].ip || in ftrace_free_mem()
7693 start >= (pg->records[pg->index - 1].ip + MCOUNT_INSN_SIZE)) in ftrace_free_mem()
7696 rec = bsearch(&key, pg->records, pg->index, in ftrace_free_mem()
7708 pg->index--; in ftrace_free_mem()
7709 ftrace_update_tot_cnt--; in ftrace_free_mem()
7710 if (!pg->index) { in ftrace_free_mem()
7711 *last_pg = pg->next; in ftrace_free_mem()
7712 pg->next = tmp_page; in ftrace_free_mem()
7720 (pg->index - (rec - pg->records)) * sizeof(*rec)); in ftrace_free_mem()
7765 count = __stop_mcount_loc - __start_mcount_loc; in ftrace_init()
7801 unsigned long trampoline = ops->trampoline; in ftrace_update_trampoline()
7804 if (ops->trampoline && ops->trampoline != trampoline && in ftrace_update_trampoline()
7805 (ops->flags & FTRACE_OPS_FL_ALLOC_TRAMP)) { in ftrace_update_trampoline()
7809 ops->trampoline, ops->trampoline_size, false, in ftrace_update_trampoline()
7815 perf_event_text_poke((void *)ops->trampoline, NULL, 0, in ftrace_update_trampoline()
7816 (void *)ops->trampoline, in ftrace_update_trampoline()
7817 ops->trampoline_size); in ftrace_update_trampoline()
7823 if (tr->flags & TRACE_ARRAY_FL_MOD_INIT) in ftrace_init_trace_array()
7826 INIT_LIST_HEAD(&tr->func_probes); in ftrace_init_trace_array()
7827 INIT_LIST_HEAD(&tr->mod_trace); in ftrace_init_trace_array()
7828 INIT_LIST_HEAD(&tr->mod_notrace); in ftrace_init_trace_array()
7830 tr->flags |= TRACE_ARRAY_FL_MOD_INIT; in ftrace_init_trace_array()
7858 tr->ops = &global_ops; in ftrace_init_global_array_ops()
7862 init_array_fgraph_ops(tr, tr->ops); in ftrace_init_global_array_ops()
7868 if (tr->flags & TRACE_ARRAY_FL_GLOBAL) { in ftrace_init_array_ops()
7869 if (WARN_ON(tr->ops->func != ftrace_stub)) in ftrace_init_array_ops()
7871 tr->ops->func); in ftrace_init_array_ops()
7873 tr->ops->func = func; in ftrace_init_array_ops()
7874 tr->ops->private = tr; in ftrace_init_array_ops()
7879 tr->ops->func = ftrace_stub; in ftrace_reset_array_ops()
7901 if (op->flags & FTRACE_OPS_FL_STUB) in __ftrace_ops_list_func()
7908 * If any of the above fails then the op->func() is not executed. in __ftrace_ops_list_func()
7910 if ((!(op->flags & FTRACE_OPS_FL_RCU) || rcu_is_watching()) && in __ftrace_ops_list_func()
7912 if (FTRACE_WARN_ON(!op->func)) { in __ftrace_ops_list_func()
7916 op->func(ip, parent_ip, op, fregs); in __ftrace_ops_list_func()
7968 if (!(op->flags & FTRACE_OPS_FL_RCU) || rcu_is_watching()) in ftrace_ops_assist_func()
7969 op->func(ip, parent_ip, op, fregs); in ftrace_ops_assist_func()
7976 * ftrace_ops_get_func - get the function a trampoline should call
7979 * Normally the mcount trampoline will call the ops->func, but there
7992 if (ops->flags & (FTRACE_OPS_FL_RECURSION | in ftrace_ops_get_func()
7996 return ops->func; in ftrace_ops_get_func()
8009 pid_list = rcu_dereference_sched(tr->function_pids); in ftrace_filter_pid_sched_switch_probe()
8010 no_pid_list = rcu_dereference_sched(tr->function_no_pids); in ftrace_filter_pid_sched_switch_probe()
8013 this_cpu_write(tr->array_buffer.data->ftrace_ignore_pid, in ftrace_filter_pid_sched_switch_probe()
8016 this_cpu_write(tr->array_buffer.data->ftrace_ignore_pid, in ftrace_filter_pid_sched_switch_probe()
8017 next->pid); in ftrace_filter_pid_sched_switch_probe()
8028 pid_list = rcu_dereference_sched(tr->function_pids); in ftrace_pid_follow_sched_process_fork()
8031 pid_list = rcu_dereference_sched(tr->function_no_pids); in ftrace_pid_follow_sched_process_fork()
8041 pid_list = rcu_dereference_sched(tr->function_pids); in ftrace_pid_follow_sched_process_exit()
8044 pid_list = rcu_dereference_sched(tr->function_no_pids); in ftrace_pid_follow_sched_process_exit()
8069 pid_list = rcu_dereference_protected(tr->function_pids, in clear_ftrace_pids()
8071 no_pid_list = rcu_dereference_protected(tr->function_no_pids, in clear_ftrace_pids()
8082 per_cpu_ptr(tr->array_buffer.data, cpu)->ftrace_ignore_pid = FTRACE_PID_TRACE; in clear_ftrace_pids()
8086 rcu_assign_pointer(tr->function_pids, NULL); in clear_ftrace_pids()
8089 rcu_assign_pointer(tr->function_no_pids, NULL); in clear_ftrace_pids()
8128 struct trace_array *tr = m->private; in fpid_start()
8133 pid_list = rcu_dereference_sched(tr->function_pids); in fpid_start()
8143 struct trace_array *tr = m->private; in fpid_next()
8144 struct trace_pid_list *pid_list = rcu_dereference_sched(tr->function_pids); in fpid_next()
8181 struct trace_array *tr = m->private; in fnpid_start()
8186 pid_list = rcu_dereference_sched(tr->function_no_pids); in fnpid_start()
8196 struct trace_array *tr = m->private; in fnpid_next()
8197 struct trace_pid_list *pid_list = rcu_dereference_sched(tr->function_no_pids); in fnpid_next()
8216 struct trace_array *tr = inode->i_private; in pid_open()
8224 if ((file->f_mode & FMODE_WRITE) && in pid_open()
8225 (file->f_flags & O_TRUNC)) in pid_open()
8238 return -EINVAL; in pid_open()
8245 m = file->private_data; in pid_open()
8247 m->private = tr; in pid_open()
8275 pid_list = rcu_dereference_protected(tr->function_pids, in ignore_task_cpu()
8277 no_pid_list = rcu_dereference_protected(tr->function_no_pids, in ignore_task_cpu()
8281 this_cpu_write(tr->array_buffer.data->ftrace_ignore_pid, in ignore_task_cpu()
8284 this_cpu_write(tr->array_buffer.data->ftrace_ignore_pid, in ignore_task_cpu()
8285 current->pid); in ignore_task_cpu()
8292 struct seq_file *m = filp->private_data; in pid_write()
8293 struct trace_array *tr = m->private; in pid_write()
8306 filtered_pids = rcu_dereference_protected(tr->function_pids, in pid_write()
8308 other_pids = rcu_dereference_protected(tr->function_no_pids, in pid_write()
8312 filtered_pids = rcu_dereference_protected(tr->function_no_pids, in pid_write()
8314 other_pids = rcu_dereference_protected(tr->function_pids, in pid_write()
8319 return -EINVAL; in pid_write()
8328 rcu_assign_pointer(tr->function_pids, pid_list); in pid_write()
8331 rcu_assign_pointer(tr->function_no_pids, pid_list); in pid_write()
8376 struct trace_array *tr = inode->i_private; in ftrace_pid_release()
8411 WARN_ON(!(tr->flags & TRACE_ARRAY_FL_GLOBAL)); in ftrace_init_tracefs_toplevel()
8418 * ftrace_kill - kill ftrace
8422 * from a non-atomic section, use ftrace_kill.
8433 * ftrace_is_dead - Test if ftrace is dead or not.
8464 if (!(ops->flags & FTRACE_OPS_FL_IPMODIFY)) in prepare_direct_functions_for_ipmodify()
8467 hash = ops->func_hash->filter_hash; in prepare_direct_functions_for_ipmodify()
8468 size = 1 << hash->size_bits; in prepare_direct_functions_for_ipmodify()
8470 hlist_for_each_entry(entry, &hash->buckets[i], hlist) { in prepare_direct_functions_for_ipmodify()
8471 unsigned long ip = entry->ip; in prepare_direct_functions_for_ipmodify()
8476 if (!(op->flags & FTRACE_OPS_FL_DIRECT)) in prepare_direct_functions_for_ipmodify()
8486 if (!op->ops_func) in prepare_direct_functions_for_ipmodify()
8487 return -EBUSY; in prepare_direct_functions_for_ipmodify()
8489 ret = op->ops_func(op, FTRACE_OPS_CMD_ENABLE_SHARE_IPMODIFY_PEER); in prepare_direct_functions_for_ipmodify()
8511 if (!(ops->flags & FTRACE_OPS_FL_IPMODIFY)) in cleanup_direct_functions_after_ipmodify()
8516 hash = ops->func_hash->filter_hash; in cleanup_direct_functions_after_ipmodify()
8517 size = 1 << hash->size_bits; in cleanup_direct_functions_after_ipmodify()
8519 hlist_for_each_entry(entry, &hash->buckets[i], hlist) { in cleanup_direct_functions_after_ipmodify()
8520 unsigned long ip = entry->ip; in cleanup_direct_functions_after_ipmodify()
8525 if (!(op->flags & FTRACE_OPS_FL_DIRECT)) in cleanup_direct_functions_after_ipmodify()
8535 if (found_op && op->ops_func) in cleanup_direct_functions_after_ipmodify()
8536 op->ops_func(op, FTRACE_OPS_CMD_DISABLE_SHARE_IPMODIFY_PEER); in cleanup_direct_functions_after_ipmodify()
8580 * register_ftrace_function - register a function for profiling
8586 * Note: @ops->func and all the functions it calls must be labeled
8608 * unregister_ftrace_function - unregister a function for profiling.
8651 sym = bsearch(&name, args->syms, args->cnt, sizeof(*args->syms), symbols_cmp); in kallsyms_callback()
8655 idx = sym - args->syms; in kallsyms_callback()
8656 if (args->addrs[idx]) in kallsyms_callback()
8662 args->addrs[idx] = addr; in kallsyms_callback()
8663 args->found++; in kallsyms_callback()
8664 return args->found == args->cnt ? 1 : 0; in kallsyms_callback()
8668 * ftrace_lookup_symbols - Lookup addresses for array of symbols
8680 * Returns: 0 if all provided symbols are found, -ESRCH otherwise.
8697 return found_all ? 0 : -ESRCH; in ftrace_lookup_symbols()
8746 if (op->flags & FTRACE_OPS_FL_PERMANENT) in is_permanent_ops_registered()
8762 return -ENODEV; in ftrace_enable_sysctl()
8781 return -EBUSY; in ftrace_enable_sysctl()