Lines Matching +full:ipa +full:- +full:shared

1 // SPDX-License-Identifier: GPL-2.0
3 * Infrastructure for profiling code inserted by 'gcc -pg'.
5 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
6 * Copyright (C) 2004-2008 Ingo Molnar <mingo@redhat.com>
8 * Originally ported from the -rt patch by:
13 * Copyright (C) 2004-2006 Ingo Molnar
107 if (!(ops->flags & FTRACE_OPS_FL_PID) || !ops->private) in ftrace_pids_enabled()
110 tr = ops->private; in ftrace_pids_enabled()
112 return tr->function_pids != NULL || tr->function_no_pids != NULL; in ftrace_pids_enabled()
163 if (!(ops->flags & FTRACE_OPS_FL_INITIALIZED)) { in ftrace_ops_init()
164 mutex_init(&ops->local_hash.regex_lock); in ftrace_ops_init()
165 INIT_LIST_HEAD(&ops->subop_list); in ftrace_ops_init()
166 ops->func_hash = &ops->local_hash; in ftrace_ops_init()
167 ops->flags |= FTRACE_OPS_FL_INITIALIZED; in ftrace_ops_init()
176 struct trace_array *tr = op->private; in ftrace_pid_func()
180 pid = this_cpu_read(tr->array_buffer.data->ftrace_ignore_pid); in ftrace_pid_func()
184 pid != current->pid) in ftrace_pid_func()
188 op->saved_func(ip, parent_ip, op, fregs); in ftrace_pid_func()
203 if (ops->flags & (FTRACE_OPS_FL_DYNAMIC | FTRACE_OPS_FL_RCU) || in ftrace_ops_get_list_func()
231 } else if (rcu_dereference_protected(ftrace_ops_list->next, in update_ftrace_function()
290 rcu_assign_pointer(ops->next, *list); in add_ftrace_ops()
295 * the ops->next pointer is valid before another CPU sees in add_ftrace_ops()
312 rcu_dereference_protected(ops->next, in remove_ftrace_ops()
318 for (p = list; *p != &ftrace_list_end; p = &(*p)->next) in remove_ftrace_ops()
323 return -1; in remove_ftrace_ops()
325 *p = (*p)->next; in remove_ftrace_ops()
333 if (ops->flags & FTRACE_OPS_FL_DELETED) in __register_ftrace_function()
334 return -EINVAL; in __register_ftrace_function()
336 if (WARN_ON(ops->flags & FTRACE_OPS_FL_ENABLED)) in __register_ftrace_function()
337 return -EBUSY; in __register_ftrace_function()
345 if (ops->flags & FTRACE_OPS_FL_SAVE_REGS && in __register_ftrace_function()
346 !(ops->flags & FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED)) in __register_ftrace_function()
347 return -EINVAL; in __register_ftrace_function()
349 if (ops->flags & FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED) in __register_ftrace_function()
350 ops->flags |= FTRACE_OPS_FL_SAVE_REGS; in __register_ftrace_function()
352 if (!ftrace_enabled && (ops->flags & FTRACE_OPS_FL_PERMANENT)) in __register_ftrace_function()
353 return -EBUSY; in __register_ftrace_function()
356 ops->flags |= FTRACE_OPS_FL_DYNAMIC; in __register_ftrace_function()
361 ops->saved_func = ops->func; in __register_ftrace_function()
364 ops->func = ftrace_pid_func; in __register_ftrace_function()
378 if (WARN_ON(!(ops->flags & FTRACE_OPS_FL_ENABLED))) in __unregister_ftrace_function()
379 return -EBUSY; in __unregister_ftrace_function()
389 ops->func = ops->saved_func; in __unregister_ftrace_function()
403 if (op->flags & FTRACE_OPS_FL_PID) { in ftrace_update_pid_func()
404 op->func = ftrace_pids_enabled(op) ? in ftrace_update_pid_func()
405 ftrace_pid_func : op->saved_func; in ftrace_update_pid_func()
441 (PAGE_SIZE - offsetof(struct ftrace_profile_page, records))
448 /* ftrace_profile_lock - synchronize the enable and disable of the profiler */
468 if ((void *)rec >= (void *)&pg->records[pg->index]) { in function_stat_next()
469 pg = pg->next; in function_stat_next()
472 rec = &pg->records[0]; in function_stat_next()
473 if (!rec->counter) in function_stat_next()
485 if (!stat || !stat->start) in function_stat_start()
488 return function_stat_next(&stat->start->records[0], 0); in function_stat_start()
498 if (a->time < b->time) in function_stat_cmp()
499 return -1; in function_stat_cmp()
500 if (a->time > b->time) in function_stat_cmp()
512 if (a->counter < b->counter) in function_stat_cmp()
513 return -1; in function_stat_cmp()
514 if (a->counter > b->counter) in function_stat_cmp()
526 " -------- " in function_stat_headers()
527 "--- ---- --- ---\n"); in function_stat_headers()
530 " -------- ---\n"); in function_stat_headers()
548 if (unlikely(rec->counter == 0)) in function_stat_show()
549 return -EBUSY; in function_stat_show()
552 avg = div64_ul(rec->time, rec->counter); in function_stat_show()
557 kallsyms_lookup(rec->ip, NULL, NULL, NULL, str); in function_stat_show()
558 seq_printf(m, " %-30.30s %10lu", str, rec->counter); in function_stat_show()
565 * s^2 = 1 / (n * (n-1)) * (n * \Sum (x_i)^2 - (\Sum x_i)^2) in function_stat_show()
567 * Divide only by 1000 for ns^2 -> us^2 conversion. in function_stat_show()
571 stddev_denom = rec->counter * (rec->counter - 1) * 1000; in function_stat_show()
573 stddev = rec->counter * rec->time_squared - in function_stat_show()
574 rec->time * rec->time; in function_stat_show()
579 trace_print_graph_duration(rec->time, &s); in function_stat_show()
595 pg = stat->pages = stat->start; in ftrace_profile_reset()
598 memset(pg->records, 0, PROFILE_RECORDS_SIZE); in ftrace_profile_reset()
599 pg->index = 0; in ftrace_profile_reset()
600 pg = pg->next; in ftrace_profile_reset()
603 memset(stat->hash, 0, in ftrace_profile_reset()
615 if (stat->pages) in ftrace_profile_pages_init()
618 stat->pages = (void *)get_zeroed_page(GFP_KERNEL); in ftrace_profile_pages_init()
619 if (!stat->pages) in ftrace_profile_pages_init()
620 return -ENOMEM; in ftrace_profile_pages_init()
635 pg = stat->start = stat->pages; in ftrace_profile_pages_init()
640 pg->next = (void *)get_zeroed_page(GFP_KERNEL); in ftrace_profile_pages_init()
641 if (!pg->next) in ftrace_profile_pages_init()
643 pg = pg->next; in ftrace_profile_pages_init()
649 pg = stat->start; in ftrace_profile_pages_init()
653 pg = pg->next; in ftrace_profile_pages_init()
657 stat->pages = NULL; in ftrace_profile_pages_init()
658 stat->start = NULL; in ftrace_profile_pages_init()
660 return -ENOMEM; in ftrace_profile_pages_init()
670 if (stat->hash) { in ftrace_profile_init_cpu()
682 stat->hash = kcalloc(size, sizeof(struct hlist_head), GFP_KERNEL); in ftrace_profile_init_cpu()
684 if (!stat->hash) in ftrace_profile_init_cpu()
685 return -ENOMEM; in ftrace_profile_init_cpu()
689 kfree(stat->hash); in ftrace_profile_init_cpu()
690 stat->hash = NULL; in ftrace_profile_init_cpu()
691 return -ENOMEM; in ftrace_profile_init_cpu()
720 hhd = &stat->hash[key]; in ftrace_find_profiled_func()
726 if (rec->ip == ip) in ftrace_find_profiled_func()
738 key = hash_long(rec->ip, FTRACE_PROFILE_HASH_BITS); in ftrace_add_profile()
739 hlist_add_head_rcu(&rec->node, &stat->hash[key]); in ftrace_add_profile()
751 if (atomic_inc_return(&stat->disabled) != 1) in ftrace_profile_alloc()
762 if (stat->pages->index == PROFILES_PER_PAGE) { in ftrace_profile_alloc()
763 if (!stat->pages->next) in ftrace_profile_alloc()
765 stat->pages = stat->pages->next; in ftrace_profile_alloc()
768 rec = &stat->pages->records[stat->pages->index++]; in ftrace_profile_alloc()
769 rec->ip = ip; in ftrace_profile_alloc()
773 atomic_dec(&stat->disabled); in ftrace_profile_alloc()
791 if (!stat->hash || !ftrace_profile_enabled) in function_profile_call()
801 rec->counter++; in function_profile_call()
824 function_profile_call(trace->func, 0, NULL, NULL); in profile_graph_entry()
827 if (!current->ret_stack) in profile_graph_entry()
830 profile_data = fgraph_reserve_data(gops->idx, sizeof(*profile_data)); in profile_graph_entry()
834 profile_data->subtime = 0; in profile_graph_entry()
835 profile_data->sleeptime = current->ftrace_sleeptime; in profile_graph_entry()
836 profile_data->calltime = trace_clock_local(); in profile_graph_entry()
855 if (!stat->hash || !ftrace_profile_enabled) in profile_graph_return()
858 profile_data = fgraph_retrieve_data(gops->idx, &size); in profile_graph_return()
861 if (!profile_data || !profile_data->calltime) in profile_graph_return()
864 calltime = rettime - profile_data->calltime; in profile_graph_return()
867 if (current->ftrace_sleeptime) in profile_graph_return()
868 calltime -= current->ftrace_sleeptime - profile_data->sleeptime; in profile_graph_return()
875 parent_data = fgraph_retrieve_parent_data(gops->idx, &size, 1); in profile_graph_return()
877 parent_data->subtime += calltime; in profile_graph_return()
879 if (profile_data->subtime && profile_data->subtime < calltime) in profile_graph_return()
880 calltime -= profile_data->subtime; in profile_graph_return()
885 rec = ftrace_find_profiled_func(stat, trace->func); in profile_graph_return()
887 rec->time += calltime; in profile_graph_return()
888 rec->time_squared += calltime * calltime; in profile_graph_return()
1012 stat->stat = function_stats; in ftrace_profile_tracefs()
1013 stat->stat.name = name; in ftrace_profile_tracefs()
1014 ret = register_stat_tracer(&stat->stat); in ftrace_profile_tracefs()
1094 if (op->trampoline && op->trampoline_size) in ftrace_ops_trampoline()
1095 if (addr >= op->trampoline && in ftrace_ops_trampoline()
1096 addr < op->trampoline + op->trampoline_size) { in ftrace_ops_trampoline()
1133 if (hash->size_bits > 0) in ftrace_hash_key()
1134 return hash_long(ip, hash->size_bits); in ftrace_hash_key()
1148 hhd = &hash->buckets[key]; in __ftrace_lookup_ip()
1151 if (entry->ip == ip) in __ftrace_lookup_ip()
1158 * ftrace_lookup_ip - Test to see if an ip exists in an ftrace_hash
1182 key = ftrace_hash_key(hash, entry->ip); in __add_hash_entry()
1183 hhd = &hash->buckets[key]; in __add_hash_entry()
1184 hlist_add_head(&entry->hlist, hhd); in __add_hash_entry()
1185 hash->count++; in __add_hash_entry()
1197 entry->ip = ip; in add_hash_entry()
1207 hlist_del(&entry->hlist); in free_hash_entry()
1209 hash->count--; in free_hash_entry()
1216 hlist_del_rcu(&entry->hlist); in remove_hash_entry()
1217 hash->count--; in remove_hash_entry()
1225 int size = 1 << hash->size_bits; in ftrace_hash_clear()
1228 if (!hash->count) in ftrace_hash_clear()
1232 hhd = &hash->buckets[i]; in ftrace_hash_clear()
1236 FTRACE_WARN_ON(hash->count); in ftrace_hash_clear()
1241 list_del(&ftrace_mod->list); in free_ftrace_mod()
1242 kfree(ftrace_mod->module); in free_ftrace_mod()
1243 kfree(ftrace_mod->func); in free_ftrace_mod()
1266 kfree(hash->buckets); in free_ftrace_hash()
1282 call_rcu(&hash->rcu, __free_ftrace_hash_rcu); in free_ftrace_hash_rcu()
1286 * ftrace_free_filter - remove all filters for an ftrace_ops
1292 if (WARN_ON(ops->flags & FTRACE_OPS_FL_ENABLED)) in ftrace_free_filter()
1294 free_ftrace_hash(ops->func_hash->filter_hash); in ftrace_free_filter()
1295 free_ftrace_hash(ops->func_hash->notrace_hash); in ftrace_free_filter()
1296 ops->func_hash->filter_hash = EMPTY_HASH; in ftrace_free_filter()
1297 ops->func_hash->notrace_hash = EMPTY_HASH; in ftrace_free_filter()
1311 hash->buckets = kcalloc(size, sizeof(*hash->buckets), GFP_KERNEL); in alloc_ftrace_hash()
1313 if (!hash->buckets) { in alloc_ftrace_hash()
1318 hash->size_bits = size_bits; in alloc_ftrace_hash()
1329 struct list_head *mod_head = enable ? &tr->mod_trace : &tr->mod_notrace; in ftrace_add_mod()
1333 return -ENOMEM; in ftrace_add_mod()
1335 INIT_LIST_HEAD(&ftrace_mod->list); in ftrace_add_mod()
1336 ftrace_mod->func = kstrdup(func, GFP_KERNEL); in ftrace_add_mod()
1337 ftrace_mod->module = kstrdup(module, GFP_KERNEL); in ftrace_add_mod()
1338 ftrace_mod->enable = enable; in ftrace_add_mod()
1340 if (!ftrace_mod->func || !ftrace_mod->module) in ftrace_add_mod()
1343 list_add(&ftrace_mod->list, mod_head); in ftrace_add_mod()
1350 return -ENOMEM; in ftrace_add_mod()
1366 new_hash->flags = hash->flags; in alloc_and_copy_ftrace_hash()
1372 size = 1 << hash->size_bits; in alloc_and_copy_ftrace_hash()
1374 hlist_for_each_entry(entry, &hash->buckets[i], hlist) { in alloc_and_copy_ftrace_hash()
1375 if (add_hash_entry(new_hash, entry->ip) == NULL) in alloc_and_copy_ftrace_hash()
1380 FTRACE_WARN_ON(new_hash->count != hash->count); in alloc_and_copy_ftrace_hash()
1422 new_hash->flags = src->flags; in __move_hash()
1424 size = 1 << src->size_bits; in __move_hash()
1426 hhd = &src->buckets[i]; in __move_hash()
1439 int size = src->count; in __ftrace_hash_move()
1451 * ftrace_hash_move - move a new hash to a filter and do updates
1478 if (ops->flags & FTRACE_OPS_FL_IPMODIFY && !enable) in ftrace_hash_move()
1479 return -EINVAL; in ftrace_hash_move()
1483 return -ENOMEM; in ftrace_hash_move()
1517 return (ftrace_hash_empty(hash->filter_hash) || in hash_contains_ip()
1518 __ftrace_lookup_ip(hash->filter_hash, ip)) && in hash_contains_ip()
1519 (ftrace_hash_empty(hash->notrace_hash) || in hash_contains_ip()
1520 !__ftrace_lookup_ip(hash->notrace_hash, ip)); in hash_contains_ip()
1525 * the ops->func or not.
1527 * It's a match if the ip is in the ops->filter_hash or
1530 * the ip is not in the ops->notrace_hash.
1547 if (regs == NULL && (ops->flags & FTRACE_OPS_FL_SAVE_REGS)) in ftrace_ops_test()
1551 rcu_assign_pointer(hash.filter_hash, ops->func_hash->filter_hash); in ftrace_ops_test()
1552 rcu_assign_pointer(hash.notrace_hash, ops->func_hash->notrace_hash); in ftrace_ops_test()
1567 for (pg = ftrace_pages_start; pg; pg = pg->next) { \
1569 for (_____i = 0; _____i < pg->index; _____i++) { \
1570 rec = &pg->records[_____i];
1582 if (key->flags < rec->ip) in ftrace_cmp_recs()
1583 return -1; in ftrace_cmp_recs()
1584 if (key->ip >= rec->ip + MCOUNT_INSN_SIZE) in ftrace_cmp_recs()
1598 for (pg = ftrace_pages_start; pg; pg = pg->next) { in lookup_rec()
1599 if (pg->index == 0 || in lookup_rec()
1600 end < pg->records[0].ip || in lookup_rec()
1601 start >= (pg->records[pg->index - 1].ip + MCOUNT_INSN_SIZE)) in lookup_rec()
1603 rec = bsearch(&key, pg->records, pg->index, in lookup_rec()
1613 * ftrace_location_range - return the first address of a traced location
1619 * Returns: rec->ip if the related ftrace location is a least partly within
1632 ip = rec->ip; in ftrace_location_range()
1639 * ftrace_location - return the ftrace location
1660 loc = ftrace_location_range(ip, ip + size - 1); in ftrace_location()
1666 * ftrace_text_reserved - return true if range contains an ftrace location
1692 ops != &ftrace_list_end; ops = ops->next) { in test_rec_ops_needs_regs()
1693 /* pass rec in as regs to have non-NULL val */ in test_rec_ops_needs_regs()
1694 if (ftrace_ops_test(ops, rec->ip, rec)) { in test_rec_ops_needs_regs()
1695 if (ops->flags & FTRACE_OPS_FL_SAVE_REGS) { in test_rec_ops_needs_regs()
1720 return rec->flags & FTRACE_FL_DISABLED && in skip_record()
1721 !(rec->flags & FTRACE_FL_ENABLED); in skip_record()
1746 if (!(ops->flags & FTRACE_OPS_FL_ENABLED)) in __ftrace_hash_rec_update()
1753 hash = ops->func_hash->filter_hash; in __ftrace_hash_rec_update()
1754 notrace_hash = ops->func_hash->notrace_hash; in __ftrace_hash_rec_update()
1771 if (!notrace_hash || !ftrace_lookup_ip(notrace_hash, rec->ip)) in __ftrace_hash_rec_update()
1774 in_hash = !!ftrace_lookup_ip(hash, rec->ip); in __ftrace_hash_rec_update()
1775 in_notrace_hash = !!ftrace_lookup_ip(notrace_hash, rec->ip); in __ftrace_hash_rec_update()
1788 rec->flags++; in __ftrace_hash_rec_update()
1792 if (ops->flags & FTRACE_OPS_FL_DIRECT) in __ftrace_hash_rec_update()
1793 rec->flags |= FTRACE_FL_DIRECT; in __ftrace_hash_rec_update()
1800 if (ftrace_rec_count(rec) == 1 && ops->trampoline) in __ftrace_hash_rec_update()
1801 rec->flags |= FTRACE_FL_TRAMP; in __ftrace_hash_rec_update()
1809 rec->flags &= ~FTRACE_FL_TRAMP; in __ftrace_hash_rec_update()
1815 if (ops->flags & FTRACE_OPS_FL_SAVE_REGS) in __ftrace_hash_rec_update()
1816 rec->flags |= FTRACE_FL_REGS; in __ftrace_hash_rec_update()
1820 rec->flags--; in __ftrace_hash_rec_update()
1828 if (ops->flags & FTRACE_OPS_FL_DIRECT) in __ftrace_hash_rec_update()
1829 rec->flags &= ~FTRACE_FL_DIRECT; in __ftrace_hash_rec_update()
1838 rec->flags & FTRACE_FL_REGS && in __ftrace_hash_rec_update()
1839 ops->flags & FTRACE_OPS_FL_SAVE_REGS) { in __ftrace_hash_rec_update()
1841 rec->flags &= ~FTRACE_FL_REGS; in __ftrace_hash_rec_update()
1853 rec->flags |= FTRACE_FL_TRAMP; in __ftrace_hash_rec_update()
1855 rec->flags &= ~FTRACE_FL_TRAMP; in __ftrace_hash_rec_update()
1864 * If the rec has a single associated ops, and ops->func can be in __ftrace_hash_rec_update()
1869 ftrace_ops_get_func(ops) == ops->func) in __ftrace_hash_rec_update()
1870 rec->flags |= FTRACE_FL_CALL_OPS; in __ftrace_hash_rec_update()
1872 rec->flags &= ~FTRACE_FL_CALL_OPS; in __ftrace_hash_rec_update()
1880 if (!all && count == hash->count) in __ftrace_hash_rec_update()
1916 * ops->hash = new_hash
1930 if (ops->func_hash != &global_ops.local_hash) in ftrace_hash_rec_update_modify()
1941 if (op->func_hash == &global_ops.local_hash) in ftrace_hash_rec_update_modify()
1958 * or no-needed to update, -EBUSY if it detects a conflict of the flag
1959 * on a ftrace_rec, and -EINVAL if the new_hash tries to trace all recs.
1961 * - If the hash is NULL, it hits all recs (if IPMODIFY is set, this is rejected)
1962 * - If the hash is EMPTY_HASH, it hits nothing
1963 * - Anything else hits the recs which match the hash entries.
1968 * IPMODIFY. If ops_func(SHARE_IPMODIFY_SELF) returns non-zero, propagate
1982 if (!(ops->flags & FTRACE_OPS_FL_ENABLED)) in __ftrace_hash_update_ipmodify()
1985 is_ipmodify = ops->flags & FTRACE_OPS_FL_IPMODIFY; in __ftrace_hash_update_ipmodify()
1986 is_direct = ops->flags & FTRACE_OPS_FL_DIRECT; in __ftrace_hash_update_ipmodify()
2001 return -EINVAL; in __ftrace_hash_update_ipmodify()
2003 /* Update rec->flags */ in __ftrace_hash_update_ipmodify()
2006 if (rec->flags & FTRACE_FL_DISABLED) in __ftrace_hash_update_ipmodify()
2010 in_old = !!ftrace_lookup_ip(old_hash, rec->ip); in __ftrace_hash_update_ipmodify()
2011 in_new = !!ftrace_lookup_ip(new_hash, rec->ip); in __ftrace_hash_update_ipmodify()
2016 if (rec->flags & FTRACE_FL_IPMODIFY) { in __ftrace_hash_update_ipmodify()
2023 FTRACE_WARN_ON(rec->flags & FTRACE_FL_DIRECT); in __ftrace_hash_update_ipmodify()
2031 if (!ops->ops_func) in __ftrace_hash_update_ipmodify()
2032 return -EBUSY; in __ftrace_hash_update_ipmodify()
2033 ret = ops->ops_func(ops, FTRACE_OPS_CMD_ENABLE_SHARE_IPMODIFY_SELF); in __ftrace_hash_update_ipmodify()
2037 rec->flags |= FTRACE_FL_IPMODIFY; in __ftrace_hash_update_ipmodify()
2040 rec->flags &= ~FTRACE_FL_IPMODIFY; in __ftrace_hash_update_ipmodify()
2052 if (rec->flags & FTRACE_FL_DISABLED) in __ftrace_hash_update_ipmodify()
2056 return -EBUSY; in __ftrace_hash_update_ipmodify()
2058 in_old = !!ftrace_lookup_ip(old_hash, rec->ip); in __ftrace_hash_update_ipmodify()
2059 in_new = !!ftrace_lookup_ip(new_hash, rec->ip); in __ftrace_hash_update_ipmodify()
2064 rec->flags &= ~FTRACE_FL_IPMODIFY; in __ftrace_hash_update_ipmodify()
2066 rec->flags |= FTRACE_FL_IPMODIFY; in __ftrace_hash_update_ipmodify()
2069 return -EBUSY; in __ftrace_hash_update_ipmodify()
2074 struct ftrace_hash *hash = ops->func_hash->filter_hash; in ftrace_hash_ipmodify_enable()
2085 struct ftrace_hash *hash = ops->func_hash->filter_hash; in ftrace_hash_ipmodify_disable()
2096 struct ftrace_hash *old_hash = ops->func_hash->filter_hash; in ftrace_hash_ipmodify_update()
2144 * ftrace_bug - report and shutdown function tracer
2151 * EFAULT - if the problem happens on reading the @ip address
2152 * EINVAL - if what is read at @ip is not what was expected
2153 * EPERM - if the problem happens on writing to the @ip address
2157 unsigned long ip = rec ? rec->ip : 0; in ftrace_bug()
2159 pr_info("------------[ ftrace bug ]------------\n"); in ftrace_bug()
2162 case -EFAULT: in ftrace_bug()
2166 case -EINVAL: in ftrace_bug()
2176 case -EPERM: in ftrace_bug()
2188 pr_info("ftrace record flags: %lx\n", rec->flags); in ftrace_bug()
2190 rec->flags & FTRACE_FL_REGS ? " R" : " ", in ftrace_bug()
2191 rec->flags & FTRACE_FL_CALL_OPS ? " O" : " "); in ftrace_bug()
2192 if (rec->flags & FTRACE_FL_TRAMP_EN) { in ftrace_bug()
2197 (void *)ops->trampoline, in ftrace_bug()
2198 (void *)ops->func); in ftrace_bug()
2242 if (!(rec->flags & FTRACE_FL_REGS) != in ftrace_check_record()
2243 !(rec->flags & FTRACE_FL_REGS_EN)) in ftrace_check_record()
2246 if (!(rec->flags & FTRACE_FL_TRAMP) != in ftrace_check_record()
2247 !(rec->flags & FTRACE_FL_TRAMP_EN)) in ftrace_check_record()
2262 if (!(rec->flags & FTRACE_FL_DIRECT) != in ftrace_check_record()
2263 !(rec->flags & FTRACE_FL_DIRECT_EN)) in ftrace_check_record()
2265 } else if (rec->flags & FTRACE_FL_DIRECT_EN) { in ftrace_check_record()
2275 if (!(rec->flags & FTRACE_FL_CALL_OPS) != in ftrace_check_record()
2276 !(rec->flags & FTRACE_FL_CALL_OPS_EN)) in ftrace_check_record()
2278 } else if (rec->flags & FTRACE_FL_CALL_OPS_EN) { in ftrace_check_record()
2284 if ((rec->flags & FTRACE_FL_ENABLED) == flag) in ftrace_check_record()
2289 flag ^= rec->flags & FTRACE_FL_ENABLED; in ftrace_check_record()
2292 rec->flags |= FTRACE_FL_ENABLED | FTRACE_FL_TOUCHED; in ftrace_check_record()
2294 if (rec->flags & FTRACE_FL_REGS) in ftrace_check_record()
2295 rec->flags |= FTRACE_FL_REGS_EN; in ftrace_check_record()
2297 rec->flags &= ~FTRACE_FL_REGS_EN; in ftrace_check_record()
2300 if (rec->flags & FTRACE_FL_TRAMP) in ftrace_check_record()
2301 rec->flags |= FTRACE_FL_TRAMP_EN; in ftrace_check_record()
2303 rec->flags &= ~FTRACE_FL_TRAMP_EN; in ftrace_check_record()
2307 if (rec->flags & (FTRACE_FL_DIRECT | FTRACE_FL_IPMODIFY)) in ftrace_check_record()
2308 rec->flags |= FTRACE_FL_MODIFIED; in ftrace_check_record()
2317 if (rec->flags & FTRACE_FL_DIRECT) in ftrace_check_record()
2318 rec->flags |= FTRACE_FL_DIRECT_EN; in ftrace_check_record()
2320 rec->flags &= ~FTRACE_FL_DIRECT_EN; in ftrace_check_record()
2326 rec->flags &= ~FTRACE_FL_DIRECT_EN; in ftrace_check_record()
2332 if (rec->flags & FTRACE_FL_CALL_OPS) in ftrace_check_record()
2333 rec->flags |= FTRACE_FL_CALL_OPS_EN; in ftrace_check_record()
2335 rec->flags &= ~FTRACE_FL_CALL_OPS_EN; in ftrace_check_record()
2341 rec->flags &= ~FTRACE_FL_CALL_OPS_EN; in ftrace_check_record()
2351 * from the save regs, to a non-save regs function or in ftrace_check_record()
2366 rec->flags &= FTRACE_NOCLEAR_FLAGS; in ftrace_check_record()
2372 rec->flags &= ~(FTRACE_FL_ENABLED | FTRACE_FL_TRAMP_EN | in ftrace_check_record()
2382 * ftrace_update_record - set a record that now is tracing or not
2395 * ftrace_test_record - check if the record has been enabled or not
2412 unsigned long ip = rec->ip; in ftrace_find_tramp_ops_any()
2416 if (!op->trampoline) in ftrace_find_tramp_ops_any()
2419 if (hash_contains_ip(ip, op->func_hash)) in ftrace_find_tramp_ops_any()
2430 unsigned long ip = rec->ip; in ftrace_find_tramp_ops_any_other()
2434 if (op == op_exclude || !op->trampoline) in ftrace_find_tramp_ops_any_other()
2437 if (hash_contains_ip(ip, op->func_hash)) in ftrace_find_tramp_ops_any_other()
2448 unsigned long ip = rec->ip; in ftrace_find_tramp_ops_next()
2452 if (!op->trampoline) in ftrace_find_tramp_ops_next()
2455 if (hash_contains_ip(ip, op->func_hash)) in ftrace_find_tramp_ops_next()
2466 unsigned long ip = rec->ip; in ftrace_find_tramp_ops_curr()
2475 if (hash_contains_ip(ip, &removed_ops->old_hash)) in ftrace_find_tramp_ops_curr()
2499 if (!op->trampoline) in ftrace_find_tramp_ops_curr()
2506 if (op->flags & FTRACE_OPS_FL_ADDING) in ftrace_find_tramp_ops_curr()
2515 if ((op->flags & FTRACE_OPS_FL_MODIFYING) && in ftrace_find_tramp_ops_curr()
2516 hash_contains_ip(ip, &op->old_hash)) in ftrace_find_tramp_ops_curr()
2523 if (!(op->flags & FTRACE_OPS_FL_MODIFYING) && in ftrace_find_tramp_ops_curr()
2524 hash_contains_ip(ip, op->func_hash)) in ftrace_find_tramp_ops_curr()
2536 unsigned long ip = rec->ip; in ftrace_find_tramp_ops_new()
2539 /* pass rec in as regs to have non-NULL val */ in ftrace_find_tramp_ops_new()
2540 if (hash_contains_ip(ip, op->func_hash)) in ftrace_find_tramp_ops_new()
2551 unsigned long ip = rec->ip; in ftrace_find_unique_ops()
2555 if (hash_contains_ip(ip, op->func_hash)) { in ftrace_find_unique_ops()
2583 return entry->direct; in ftrace_find_rec_direct()
2589 unsigned long addr = READ_ONCE(ops->direct_call); in call_direct_funcs()
2599 * ftrace_get_addr_new - Get the call address to set to
2613 if ((rec->flags & FTRACE_FL_DIRECT) && in ftrace_get_addr_new()
2615 addr = ftrace_find_rec_direct(rec->ip); in ftrace_get_addr_new()
2622 if (rec->flags & FTRACE_FL_TRAMP) { in ftrace_get_addr_new()
2624 if (FTRACE_WARN_ON(!ops || !ops->trampoline)) { in ftrace_get_addr_new()
2626 (void *)rec->ip, (void *)rec->ip, rec->flags); in ftrace_get_addr_new()
2630 return ops->trampoline; in ftrace_get_addr_new()
2633 if (rec->flags & FTRACE_FL_REGS) in ftrace_get_addr_new()
2640 * ftrace_get_addr_curr - Get the call address that is already there
2655 if (rec->flags & FTRACE_FL_DIRECT_EN) { in ftrace_get_addr_curr()
2656 addr = ftrace_find_rec_direct(rec->ip); in ftrace_get_addr_curr()
2663 if (rec->flags & FTRACE_FL_TRAMP_EN) { in ftrace_get_addr_curr()
2667 (void *)rec->ip, (void *)rec->ip); in ftrace_get_addr_curr()
2671 return ops->trampoline; in ftrace_get_addr_curr()
2674 if (rec->flags & FTRACE_FL_REGS_EN) in ftrace_get_addr_curr()
2713 return -1; /* unknown ftrace bug */ in __ftrace_replace_code()
2749 * ftrace_rec_iter_start - start up iterating over traced functions
2766 iter->pg = ftrace_pages_start; in ftrace_rec_iter_start()
2767 iter->index = 0; in ftrace_rec_iter_start()
2770 while (iter->pg && !iter->pg->index) in ftrace_rec_iter_start()
2771 iter->pg = iter->pg->next; in ftrace_rec_iter_start()
2773 if (!iter->pg) in ftrace_rec_iter_start()
2780 * ftrace_rec_iter_next - get the next record to process.
2787 iter->index++; in ftrace_rec_iter_next()
2789 if (iter->index >= iter->pg->index) { in ftrace_rec_iter_next()
2790 iter->pg = iter->pg->next; in ftrace_rec_iter_next()
2791 iter->index = 0; in ftrace_rec_iter_next()
2794 while (iter->pg && !iter->pg->index) in ftrace_rec_iter_next()
2795 iter->pg = iter->pg->next; in ftrace_rec_iter_next()
2798 if (!iter->pg) in ftrace_rec_iter_next()
2805 * ftrace_rec_iter_record - get the record at the iterator location
2812 return &iter->pg->records[iter->index]; in ftrace_rec_iter_record()
2919 * ftrace_run_stop_machine - go back to the stop machine method
2931 * arch_ftrace_update_code - modify the code to trace or not trace
2960 ops->flags |= FTRACE_OPS_FL_MODIFYING; in ftrace_run_modify_code()
2961 ops->old_hash.filter_hash = old_hash->filter_hash; in ftrace_run_modify_code()
2962 ops->old_hash.notrace_hash = old_hash->notrace_hash; in ftrace_run_modify_code()
2964 ops->old_hash.filter_hash = NULL; in ftrace_run_modify_code()
2965 ops->old_hash.notrace_hash = NULL; in ftrace_run_modify_code()
2966 ops->flags &= ~FTRACE_OPS_FL_MODIFYING; in ftrace_run_modify_code()
2982 list_add_rcu(&ops->list, &ftrace_ops_trampoline_list); in ftrace_add_trampoline_to_kallsyms()
2988 list_del_rcu(&ops->list); in ftrace_remove_trampoline_from_kallsyms()
3002 if (ops && (ops->flags & FTRACE_OPS_FL_ALLOC_TRAMP) && in ftrace_trampoline_free()
3003 ops->trampoline) { in ftrace_trampoline_free()
3008 perf_event_text_poke((void *)ops->trampoline, in ftrace_trampoline_free()
3009 (void *)ops->trampoline, in ftrace_trampoline_free()
3010 ops->trampoline_size, NULL, 0); in ftrace_trampoline_free()
3012 ops->trampoline, ops->trampoline_size, in ftrace_trampoline_free()
3046 return -ENODEV; in ftrace_startup()
3062 ops->flags |= FTRACE_OPS_FL_ENABLED | FTRACE_OPS_FL_ADDING; in ftrace_startup()
3068 ftrace_start_up--; in ftrace_startup()
3069 ops->flags &= ~FTRACE_OPS_FL_ENABLED; in ftrace_startup()
3070 if (ops->flags & FTRACE_OPS_FL_DYNAMIC) in ftrace_startup()
3087 return -ENODEV; in ftrace_startup()
3090 ops->flags &= ~FTRACE_OPS_FL_ADDING; in ftrace_startup()
3100 return -ENODEV; in ftrace_shutdown()
3106 ftrace_start_up--; in ftrace_shutdown()
3120 ops->flags &= ~FTRACE_OPS_FL_ENABLED; in ftrace_shutdown()
3134 ops->flags |= FTRACE_OPS_FL_REMOVING; in ftrace_shutdown()
3138 ops->old_hash.filter_hash = ops->func_hash->filter_hash; in ftrace_shutdown()
3139 ops->old_hash.notrace_hash = ops->func_hash->notrace_hash; in ftrace_shutdown()
3153 if (FTRACE_WARN_ON_ONCE(rec->flags & ~FTRACE_NOCLEAR_FLAGS)) in ftrace_shutdown()
3155 (void *)rec->ip, rec->flags); in ftrace_shutdown()
3159 ops->old_hash.filter_hash = NULL; in ftrace_shutdown()
3160 ops->old_hash.notrace_hash = NULL; in ftrace_shutdown()
3163 ops->flags &= ~FTRACE_OPS_FL_REMOVING; in ftrace_shutdown()
3170 if (ops->flags & FTRACE_OPS_FL_DYNAMIC) { in ftrace_shutdown()
3202 return alloc_and_copy_ftrace_hash(src->size_bits, src); in copy_hash()
3234 return -ENOMEM; in append_hash()
3244 size = 1 << new_hash->size_bits; in append_hash()
3246 hlist_for_each_entry(entry, &new_hash->buckets[i], hlist) { in append_hash()
3248 if (!__ftrace_lookup_ip(*hash, entry->ip) && in append_hash()
3249 add_hash_entry(*hash, entry->ip) == NULL) in append_hash()
3250 return -ENOMEM; in append_hash()
3270 size = 1 << hash->size_bits; in remove_hash()
3272 hlist_for_each_entry_safe(entry, tmp, &hash->buckets[i], hlist) { in remove_hash()
3273 if (!__ftrace_lookup_ip(notrace_hash, entry->ip)) in remove_hash()
3304 size = 1 << new_hash1->size_bits; in intersect_hash()
3306 hlist_for_each_entry(entry, &new_hash1->buckets[i], hlist) { in intersect_hash()
3308 if (__ftrace_lookup_ip(new_hash2, entry->ip) && in intersect_hash()
3309 add_hash_entry(*hash, entry->ip) == NULL) in intersect_hash()
3310 return -ENOMEM; in intersect_hash()
3333 if (A->count != B->count) in ops_equal()
3336 size = 1 << A->size_bits; in ops_equal()
3338 hlist_for_each_entry(entry, &A->buckets[i], hlist) { in ops_equal()
3339 if (!__ftrace_lookup_ip(B, entry->ip)) in ops_equal()
3360 old_hash_ops.filter_hash = ops->func_hash->filter_hash; in __ftrace_hash_move_and_update_ops()
3361 old_hash_ops.notrace_hash = ops->func_hash->notrace_hash; in __ftrace_hash_move_and_update_ops()
3375 if (!ops_equal(filter_hash, ops->func_hash->filter_hash)) { in ftrace_update_ops()
3376 ret = __ftrace_hash_move_and_update_ops(ops, &ops->func_hash->filter_hash, in ftrace_update_ops()
3382 if (!ops_equal(notrace_hash, ops->func_hash->notrace_hash)) { in ftrace_update_ops()
3383 ret = __ftrace_hash_move_and_update_ops(ops, &ops->func_hash->notrace_hash, in ftrace_update_ops()
3396 if (!ftrace_hash_empty(func_hash->filter_hash)) { in add_first_hash()
3397 *filter_hash = copy_hash(func_hash->filter_hash); in add_first_hash()
3399 return -ENOMEM; in add_first_hash()
3400 remove_hash(*filter_hash, func_hash->notrace_hash); in add_first_hash()
3404 *notrace_hash = copy_hash(func_hash->notrace_hash); in add_first_hash()
3406 return -ENOMEM; in add_first_hash()
3419 if (ftrace_hash_empty(ops_hash->filter_hash) || in add_next_hash()
3420 ftrace_hash_empty(subops_hash->filter_hash)) { in add_next_hash()
3428 WARN_ON_ONCE(!ftrace_hash_empty(ops_hash->notrace_hash)); in add_next_hash()
3430 size_bits = max(ops_hash->filter_hash->size_bits, in add_next_hash()
3431 subops_hash->filter_hash->size_bits); in add_next_hash()
3434 *filter_hash = alloc_and_copy_ftrace_hash(size_bits, subops_hash->filter_hash); in add_next_hash()
3436 return -ENOMEM; in add_next_hash()
3438 remove_hash(*filter_hash, subops_hash->notrace_hash); in add_next_hash()
3440 ret = append_hash(filter_hash, ops_hash->filter_hash, in add_next_hash()
3464 size_bits = max(ops_hash->notrace_hash->size_bits, in add_next_hash()
3465 subops_hash->notrace_hash->size_bits); in add_next_hash()
3468 return -ENOMEM; in add_next_hash()
3470 ret = intersect_hash(notrace_hash, ops_hash->notrace_hash, in add_next_hash()
3471 subops_hash->notrace_hash); in add_next_hash()
3482 * ftrace_startup_subops - enable tracing for subops of an ops
3500 return -ENODEV; in ftrace_startup_subops()
3505 if (WARN_ON_ONCE(subops->flags & FTRACE_OPS_FL_ENABLED)) in ftrace_startup_subops()
3506 return -EBUSY; in ftrace_startup_subops()
3509 if (!ops->func_hash->filter_hash) in ftrace_startup_subops()
3510 ops->func_hash->filter_hash = EMPTY_HASH; in ftrace_startup_subops()
3511 if (!ops->func_hash->notrace_hash) in ftrace_startup_subops()
3512 ops->func_hash->notrace_hash = EMPTY_HASH; in ftrace_startup_subops()
3513 if (!subops->func_hash->filter_hash) in ftrace_startup_subops()
3514 subops->func_hash->filter_hash = EMPTY_HASH; in ftrace_startup_subops()
3515 if (!subops->func_hash->notrace_hash) in ftrace_startup_subops()
3516 subops->func_hash->notrace_hash = EMPTY_HASH; in ftrace_startup_subops()
3519 if (list_empty(&ops->subop_list)) { in ftrace_startup_subops()
3522 WARN_ON_ONCE(!ftrace_hash_empty(ops->func_hash->filter_hash)); in ftrace_startup_subops()
3523 WARN_ON_ONCE(!ftrace_hash_empty(ops->func_hash->notrace_hash)); in ftrace_startup_subops()
3525 ret = add_first_hash(&filter_hash, &notrace_hash, subops->func_hash); in ftrace_startup_subops()
3529 save_filter_hash = ops->func_hash->filter_hash; in ftrace_startup_subops()
3530 save_notrace_hash = ops->func_hash->notrace_hash; in ftrace_startup_subops()
3532 ops->func_hash->filter_hash = filter_hash; in ftrace_startup_subops()
3533 ops->func_hash->notrace_hash = notrace_hash; in ftrace_startup_subops()
3534 list_add(&subops->list, &ops->subop_list); in ftrace_startup_subops()
3537 list_del(&subops->list); in ftrace_startup_subops()
3538 ops->func_hash->filter_hash = save_filter_hash; in ftrace_startup_subops()
3539 ops->func_hash->notrace_hash = save_notrace_hash; in ftrace_startup_subops()
3545 subops->flags |= FTRACE_OPS_FL_ENABLED | FTRACE_OPS_FL_SUBOP; in ftrace_startup_subops()
3546 subops->managed = ops; in ftrace_startup_subops()
3560 ret = add_next_hash(&filter_hash, &notrace_hash, ops->func_hash, subops->func_hash); in ftrace_startup_subops()
3564 list_add(&subops->list, &ops->subop_list); in ftrace_startup_subops()
3570 list_del(&subops->list); in ftrace_startup_subops()
3572 subops->flags |= FTRACE_OPS_FL_ENABLED | FTRACE_OPS_FL_SUBOP; in ftrace_startup_subops()
3573 subops->managed = ops; in ftrace_startup_subops()
3589 list_for_each_entry(subops, &ops->subop_list, list) { in rebuild_hashes()
3594 ret = add_first_hash(filter_hash, notrace_hash, subops->func_hash); in rebuild_hashes()
3600 &temp_hash, subops->func_hash); in rebuild_hashes()
3618 * ftrace_shutdown_subops - Remove a subops from a manager ops
3636 return -ENODEV; in ftrace_shutdown_subops()
3638 if (WARN_ON_ONCE(!(subops->flags & FTRACE_OPS_FL_ENABLED))) in ftrace_shutdown_subops()
3639 return -EINVAL; in ftrace_shutdown_subops()
3641 list_del(&subops->list); in ftrace_shutdown_subops()
3643 if (list_empty(&ops->subop_list)) { in ftrace_shutdown_subops()
3648 list_add(&subops->list, &ops->subop_list); in ftrace_shutdown_subops()
3652 subops->flags &= ~FTRACE_OPS_FL_ENABLED; in ftrace_shutdown_subops()
3654 free_ftrace_hash(ops->func_hash->filter_hash); in ftrace_shutdown_subops()
3655 free_ftrace_hash(ops->func_hash->notrace_hash); in ftrace_shutdown_subops()
3656 ops->func_hash->filter_hash = EMPTY_HASH; in ftrace_shutdown_subops()
3657 ops->func_hash->notrace_hash = EMPTY_HASH; in ftrace_shutdown_subops()
3658 subops->flags &= ~(FTRACE_OPS_FL_ENABLED | FTRACE_OPS_FL_SUBOP); in ftrace_shutdown_subops()
3659 subops->managed = NULL; in ftrace_shutdown_subops()
3671 list_add(&subops->list, &ops->subop_list); in ftrace_shutdown_subops()
3673 subops->flags &= ~(FTRACE_OPS_FL_ENABLED | FTRACE_OPS_FL_SUBOP); in ftrace_shutdown_subops()
3674 subops->managed = NULL; in ftrace_shutdown_subops()
3685 struct ftrace_ops *ops = subops->managed; in ftrace_hash_move_and_update_subops()
3693 if (WARN_ON_ONCE(!ops || ops->flags & FTRACE_OPS_FL_SUBOP)) in ftrace_hash_move_and_update_subops()
3694 return -EINVAL; in ftrace_hash_move_and_update_subops()
3701 return -ENOMEM; in ftrace_hash_move_and_update_subops()
3735 return ftrace_hash_empty(ops->func_hash->filter_hash) && in ops_traces_mod()
3736 ftrace_hash_empty(ops->func_hash->notrace_hash); in ops_traces_mod()
3756 * read-only, the modification of enabling ftrace can fail if in ftrace_update_code()
3757 * the read-only is done while ftrace is converting the calls. in ftrace_update_code()
3760 * to read-only. in ftrace_update_code()
3765 for (pg = new_pgs; pg; pg = pg->next) { in ftrace_update_code()
3767 for (i = 0; i < pg->index; i++) { in ftrace_update_code()
3771 return -1; in ftrace_update_code()
3773 p = &pg->records[i]; in ftrace_update_code()
3774 p->flags = rec_flags; in ftrace_update_code()
3788 update_time = stop - start; in ftrace_update_code()
3805 return -EINVAL; in ftrace_allocate_records()
3809 order = fls(pages) - 1; in ftrace_allocate_records()
3812 pg->records = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, order); in ftrace_allocate_records()
3814 if (!pg->records) { in ftrace_allocate_records()
3817 return -ENOMEM; in ftrace_allocate_records()
3818 order--; in ftrace_allocate_records()
3826 pg->order = order; in ftrace_allocate_records()
3839 if (pg->records) { in ftrace_free_pages()
3840 free_pages((unsigned long)pg->records, pg->order); in ftrace_free_pages()
3841 ftrace_number_of_pages -= 1 << pg->order; in ftrace_free_pages()
3843 pages = pg->next; in ftrace_free_pages()
3846 ftrace_number_of_groups--; in ftrace_free_pages()
3874 num_to_init -= cnt; in ftrace_allocate_pages()
3878 pg->next = kzalloc(sizeof(*pg), GFP_KERNEL); in ftrace_allocate_pages()
3879 if (!pg->next) in ftrace_allocate_pages()
3882 pg = pg->next; in ftrace_allocate_pages()
3916 struct ftrace_iterator *iter = m->private; in t_probe_next()
3917 struct trace_array *tr = iter->ops->private; in t_probe_next()
3926 iter->pos = *pos; in t_probe_next()
3931 func_probes = &tr->func_probes; in t_probe_next()
3935 if (!iter->probe) { in t_probe_next()
3936 next = func_probes->next; in t_probe_next()
3937 iter->probe = list_entry(next, struct ftrace_func_probe, list); in t_probe_next()
3940 if (iter->probe_entry) in t_probe_next()
3941 hnd = &iter->probe_entry->hlist; in t_probe_next()
3943 hash = iter->probe->ops.func_hash->filter_hash; in t_probe_next()
3952 size = 1 << hash->size_bits; in t_probe_next()
3955 if (iter->pidx >= size) { in t_probe_next()
3956 if (iter->probe->list.next == func_probes) in t_probe_next()
3958 next = iter->probe->list.next; in t_probe_next()
3959 iter->probe = list_entry(next, struct ftrace_func_probe, list); in t_probe_next()
3960 hash = iter->probe->ops.func_hash->filter_hash; in t_probe_next()
3961 size = 1 << hash->size_bits; in t_probe_next()
3962 iter->pidx = 0; in t_probe_next()
3965 hhd = &hash->buckets[iter->pidx]; in t_probe_next()
3968 iter->pidx++; in t_probe_next()
3974 hnd = hhd->first; in t_probe_next()
3976 hnd = hnd->next; in t_probe_next()
3978 iter->pidx++; in t_probe_next()
3986 iter->probe_entry = hlist_entry(hnd, struct ftrace_func_entry, hlist); in t_probe_next()
3993 struct ftrace_iterator *iter = m->private; in t_probe_start()
3997 if (!(iter->flags & FTRACE_ITER_DO_PROBES)) in t_probe_start()
4000 if (iter->mod_pos > *pos) in t_probe_start()
4003 iter->probe = NULL; in t_probe_start()
4004 iter->probe_entry = NULL; in t_probe_start()
4005 iter->pidx = 0; in t_probe_start()
4006 for (l = 0; l <= (*pos - iter->mod_pos); ) { in t_probe_start()
4015 iter->flags |= FTRACE_ITER_PROBE; in t_probe_start()
4027 probe = iter->probe; in t_probe_show()
4028 probe_entry = iter->probe_entry; in t_probe_show()
4031 return -EIO; in t_probe_show()
4033 probe_ops = probe->probe_ops; in t_probe_show()
4035 if (probe_ops->print) in t_probe_show()
4036 return probe_ops->print(m, probe_entry->ip, probe_ops, probe->data); in t_probe_show()
4038 seq_printf(m, "%ps:%ps\n", (void *)probe_entry->ip, in t_probe_show()
4039 (void *)probe_ops->func); in t_probe_show()
4047 struct ftrace_iterator *iter = m->private; in t_mod_next()
4048 struct trace_array *tr = iter->tr; in t_mod_next()
4051 iter->pos = *pos; in t_mod_next()
4053 iter->mod_list = iter->mod_list->next; in t_mod_next()
4055 if (iter->mod_list == &tr->mod_trace || in t_mod_next()
4056 iter->mod_list == &tr->mod_notrace) { in t_mod_next()
4057 iter->flags &= ~FTRACE_ITER_MOD; in t_mod_next()
4061 iter->mod_pos = *pos; in t_mod_next()
4068 struct ftrace_iterator *iter = m->private; in t_mod_start()
4072 if (iter->func_pos > *pos) in t_mod_start()
4075 iter->mod_pos = iter->func_pos; in t_mod_start()
4078 if (!iter->tr) in t_mod_start()
4081 for (l = 0; l <= (*pos - iter->func_pos); ) { in t_mod_start()
4087 iter->flags &= ~FTRACE_ITER_MOD; in t_mod_start()
4092 iter->flags |= FTRACE_ITER_MOD; in t_mod_start()
4101 struct trace_array *tr = iter->tr; in t_mod_show()
4103 if (WARN_ON_ONCE(!iter->mod_list) || in t_mod_show()
4104 iter->mod_list == &tr->mod_trace || in t_mod_show()
4105 iter->mod_list == &tr->mod_notrace) in t_mod_show()
4106 return -EIO; in t_mod_show()
4108 ftrace_mod = list_entry(iter->mod_list, struct ftrace_mod_load, list); in t_mod_show()
4110 if (ftrace_mod->func) in t_mod_show()
4111 seq_printf(m, "%s", ftrace_mod->func); in t_mod_show()
4115 seq_printf(m, ":mod:%s\n", ftrace_mod->module); in t_mod_show()
4123 struct ftrace_iterator *iter = m->private; in t_func_next()
4129 if (iter->idx >= iter->pg->index) { in t_func_next()
4130 if (iter->pg->next) { in t_func_next()
4131 iter->pg = iter->pg->next; in t_func_next()
4132 iter->idx = 0; in t_func_next()
4136 rec = &iter->pg->records[iter->idx++]; in t_func_next()
4137 if (((iter->flags & (FTRACE_ITER_FILTER | FTRACE_ITER_NOTRACE)) && in t_func_next()
4138 !ftrace_lookup_ip(iter->hash, rec->ip)) || in t_func_next()
4140 ((iter->flags & FTRACE_ITER_ENABLED) && in t_func_next()
4141 !(rec->flags & FTRACE_FL_ENABLED)) || in t_func_next()
4143 ((iter->flags & FTRACE_ITER_TOUCHED) && in t_func_next()
4144 !(rec->flags & FTRACE_FL_TOUCHED))) { in t_func_next()
4154 iter->pos = iter->func_pos = *pos; in t_func_next()
4155 iter->func = rec; in t_func_next()
4163 struct ftrace_iterator *iter = m->private; in t_next()
4170 if (iter->flags & FTRACE_ITER_PROBE) in t_next()
4173 if (iter->flags & FTRACE_ITER_MOD) in t_next()
4176 if (iter->flags & FTRACE_ITER_PRINTALL) { in t_next()
4192 iter->pos = 0; in reset_iter_read()
4193 iter->func_pos = 0; in reset_iter_read()
4194 iter->flags &= ~(FTRACE_ITER_PRINTALL | FTRACE_ITER_PROBE | FTRACE_ITER_MOD); in reset_iter_read()
4199 struct ftrace_iterator *iter = m->private; in t_start()
4211 if (*pos < iter->pos) in t_start()
4219 if ((iter->flags & (FTRACE_ITER_FILTER | FTRACE_ITER_NOTRACE)) && in t_start()
4220 ftrace_hash_empty(iter->hash)) { in t_start()
4221 iter->func_pos = 1; /* Account for the message */ in t_start()
4224 iter->flags |= FTRACE_ITER_PRINTALL; in t_start()
4226 iter->flags &= ~FTRACE_ITER_PROBE; in t_start()
4230 if (iter->flags & FTRACE_ITER_MOD) in t_start()
4238 iter->pg = ftrace_pages_start; in t_start()
4239 iter->idx = 0; in t_start()
4270 seq_printf(m, " ->%pS", ptr); in add_trampoline_func()
4290 ret = kallsyms_lookup(rec->ip, NULL, &offset, NULL, str); in test_for_valid_rec()
4294 rec->flags |= FTRACE_FL_DISABLED; in test_for_valid_rec()
4357 return ret == NULL ? -1 : 0; in print_rec()
4377 list_for_each_entry(subops, &ops->subop_list, list) { in print_subops()
4378 if (!((subops->flags & FTRACE_OPS_FL_ENABLED) && in print_subops()
4379 hash_contains_ip(rec->ip, subops->func_hash))) in print_subops()
4386 if (subops->flags & FTRACE_OPS_FL_GRAPH) { in print_subops()
4391 (void *)gops->entryfunc, in print_subops()
4392 (void *)gops->retfunc); in print_subops()
4396 if (subops->trampoline) { in print_subops()
4398 (void *)subops->trampoline, in print_subops()
4399 (void *)subops->func); in print_subops()
4403 (void *)subops->func); in print_subops()
4410 struct ftrace_iterator *iter = m->private; in t_show()
4413 if (iter->flags & FTRACE_ITER_PROBE) in t_show()
4416 if (iter->flags & FTRACE_ITER_MOD) in t_show()
4419 if (iter->flags & FTRACE_ITER_PRINTALL) { in t_show()
4420 if (iter->flags & FTRACE_ITER_NOTRACE) in t_show()
4427 rec = iter->func; in t_show()
4432 if (iter->flags & FTRACE_ITER_ADDRS) in t_show()
4433 seq_printf(m, "%lx ", rec->ip); in t_show()
4435 if (print_rec(m, rec->ip)) { in t_show()
4437 WARN_ON_ONCE(!(rec->flags & FTRACE_FL_DISABLED)); in t_show()
4442 if (iter->flags & (FTRACE_ITER_ENABLED | FTRACE_ITER_TOUCHED)) { in t_show()
4447 rec->flags & FTRACE_FL_REGS ? " R" : " ", in t_show()
4448 rec->flags & FTRACE_FL_IPMODIFY ? " I" : " ", in t_show()
4449 rec->flags & FTRACE_FL_DIRECT ? " D" : " ", in t_show()
4450 rec->flags & FTRACE_FL_CALL_OPS ? " O" : " ", in t_show()
4451 rec->flags & FTRACE_FL_MODIFIED ? " M " : " "); in t_show()
4452 if (rec->flags & FTRACE_FL_TRAMP_EN) { in t_show()
4457 (void *)ops->trampoline, in t_show()
4458 (void *)ops->func); in t_show()
4468 if (rec->flags & FTRACE_FL_CALL_OPS_EN) { in t_show()
4472 ops, ops->func); in t_show()
4478 if (rec->flags & FTRACE_FL_DIRECT) { in t_show()
4481 direct = ftrace_find_rec_direct(rec->ip); in t_show()
4483 seq_printf(m, "\n\tdirect-->%pS", (void *)direct); in t_show()
4510 return -ENODEV; in ftrace_avail_open()
4514 return -ENOMEM; in ftrace_avail_open()
4516 iter->pg = ftrace_pages_start; in ftrace_avail_open()
4517 iter->ops = &global_ops; in ftrace_avail_open()
4538 return -ENOMEM; in ftrace_enabled_open()
4540 iter->pg = ftrace_pages_start; in ftrace_enabled_open()
4541 iter->flags = FTRACE_ITER_ENABLED; in ftrace_enabled_open()
4542 iter->ops = &global_ops; in ftrace_enabled_open()
4563 return -ENOMEM; in ftrace_touched_open()
4565 iter->pg = ftrace_pages_start; in ftrace_touched_open()
4566 iter->flags = FTRACE_ITER_TOUCHED; in ftrace_touched_open()
4567 iter->ops = &global_ops; in ftrace_touched_open()
4583 return -ENODEV; in ftrace_avail_addrs_open()
4587 return -ENOMEM; in ftrace_avail_addrs_open()
4589 iter->pg = ftrace_pages_start; in ftrace_avail_addrs_open()
4590 iter->flags = FTRACE_ITER_ADDRS; in ftrace_avail_addrs_open()
4591 iter->ops = &global_ops; in ftrace_avail_addrs_open()
4597 * ftrace_regex_open - initialize function tracer filter files
4621 struct trace_array *tr = ops->private; in ftrace_regex_open()
4622 int ret = -ENOMEM; in ftrace_regex_open()
4627 return -ENODEV; in ftrace_regex_open()
4630 return -ENODEV; in ftrace_regex_open()
4636 if (trace_parser_get_init(&iter->parser, FTRACE_BUFF_MAX)) in ftrace_regex_open()
4639 iter->ops = ops; in ftrace_regex_open()
4640 iter->flags = flag; in ftrace_regex_open()
4641 iter->tr = tr; in ftrace_regex_open()
4643 mutex_lock(&ops->func_hash->regex_lock); in ftrace_regex_open()
4646 hash = ops->func_hash->notrace_hash; in ftrace_regex_open()
4647 mod_head = tr ? &tr->mod_notrace : NULL; in ftrace_regex_open()
4649 hash = ops->func_hash->filter_hash; in ftrace_regex_open()
4650 mod_head = tr ? &tr->mod_trace : NULL; in ftrace_regex_open()
4653 iter->mod_list = mod_head; in ftrace_regex_open()
4655 if (file->f_mode & FMODE_WRITE) { in ftrace_regex_open()
4658 if (file->f_flags & O_TRUNC) { in ftrace_regex_open()
4659 iter->hash = alloc_ftrace_hash(size_bits); in ftrace_regex_open()
4662 iter->hash = alloc_and_copy_ftrace_hash(size_bits, hash); in ftrace_regex_open()
4666 iter->hash = alloc_and_copy_ftrace_hash(hash->size_bits, hash); in ftrace_regex_open()
4668 iter->hash = EMPTY_HASH; in ftrace_regex_open()
4671 if (!iter->hash) { in ftrace_regex_open()
4672 trace_parser_put(&iter->parser); in ftrace_regex_open()
4678 if (file->f_mode & FMODE_READ) { in ftrace_regex_open()
4679 iter->pg = ftrace_pages_start; in ftrace_regex_open()
4683 struct seq_file *m = file->private_data; in ftrace_regex_open()
4684 m->private = iter; in ftrace_regex_open()
4687 free_ftrace_hash(iter->hash); in ftrace_regex_open()
4688 trace_parser_put(&iter->parser); in ftrace_regex_open()
4691 file->private_data = iter; in ftrace_regex_open()
4694 mutex_unlock(&ops->func_hash->regex_lock); in ftrace_regex_open()
4709 struct ftrace_ops *ops = inode->i_private; in ftrace_filter_open()
4720 struct ftrace_ops *ops = inode->i_private; in ftrace_notrace_open()
4735 * If symbols in an architecture don't correspond exactly to the user-visible
4749 str = arch_ftrace_match_adjust(str, g->search); in ftrace_match()
4751 switch (g->type) { in ftrace_match()
4753 if (strcmp(str, g->search) == 0) in ftrace_match()
4757 if (strncmp(str, g->search, g->len) == 0) in ftrace_match()
4761 if (strstr(str, g->search)) in ftrace_match()
4766 if (slen >= g->len && in ftrace_match()
4767 memcmp(str + slen - g->len, g->search, g->len) == 0) in ftrace_match()
4771 if (glob_match(g->search, str)) in ftrace_match()
4785 entry = ftrace_lookup_ip(hash, rec->ip); in enter_record()
4796 if (add_hash_entry(hash, rec->ip) == NULL) in enter_record()
4797 ret = -ENOMEM; in enter_record()
4811 if (kstrtoul(func_g->search, 0, &index) || --index < 0) in add_rec_by_index()
4815 if (pg->index <= index) { in add_rec_by_index()
4816 index -= pg->index; in add_rec_by_index()
4820 rec = &pg->records[index]; in add_rec_by_index()
4834 return -1; in lookup_ip()
4852 if (lookup_ip(rec->ip, &modname, str)) { in ftrace_match_record()
4855 !(rec->flags & FTRACE_FL_DISABLED)); in ftrace_match_record()
4863 if (!mod_g->len) { in ftrace_match_record()
4881 if (!func_g->len) in ftrace_match_record()
4923 if (rec->flags & FTRACE_FL_DISABLED) in match_records()
4952 if (ops->flags & FTRACE_OPS_FL_ENABLED) { in ftrace_ops_update_code()
4958 * If this is the shared global_ops filter, then we need to in ftrace_ops_update_code()
4962 if (ops->func_hash != &global_ops.local_hash) in ftrace_ops_update_code()
4966 if (op->func_hash == &global_ops.local_hash && in ftrace_ops_update_code()
4967 op->flags & FTRACE_OPS_FL_ENABLED) { in ftrace_ops_update_code()
4980 if (ops->flags & FTRACE_OPS_FL_SUBOP) in ftrace_hash_move_and_update_ops()
4986 * this ops. Shared filters are only allowed to have one ops set in ftrace_hash_move_and_update_ops()
4990 if (!(ops->flags & FTRACE_OPS_FL_ENABLED)) { in ftrace_hash_move_and_update_ops()
4997 list_for_each_entry(subops, &op->subop_list, list) { in ftrace_hash_move_and_update_ops()
4998 if ((subops->flags & FTRACE_OPS_FL_ENABLED) && in ftrace_hash_move_and_update_ops()
4999 subops->func_hash == ops->func_hash) { in ftrace_hash_move_and_update_ops()
5013 struct list_head *head = enable ? &tr->mod_trace : &tr->mod_notrace; in cache_mod()
5019 int ret = -EINVAL; in cache_mod()
5025 if (strcmp(ftrace_mod->module, module) != 0) in cache_mod()
5030 (ftrace_mod->func && in cache_mod()
5031 strcmp(ftrace_mod->func, func) == 0)) { in cache_mod()
5042 return -EINVAL; in cache_mod()
5057 mutex_lock(&ops->func_hash->regex_lock); in process_mod_list()
5060 orig_hash = &ops->func_hash->filter_hash; in process_mod_list()
5062 orig_hash = &ops->func_hash->notrace_hash; in process_mod_list()
5073 if (strcmp(ftrace_mod->module, mod) != 0) in process_mod_list()
5076 if (ftrace_mod->func) in process_mod_list()
5077 func = kstrdup(ftrace_mod->func, GFP_KERNEL); in process_mod_list()
5084 list_move(&ftrace_mod->list, &process_mods); in process_mod_list()
5087 kfree(ftrace_mod->func); in process_mod_list()
5088 ftrace_mod->func = func; in process_mod_list()
5095 func = ftrace_mod->func; in process_mod_list()
5103 new_hash->flags &= ~FTRACE_HASH_FL_MOD; in process_mod_list()
5112 mutex_unlock(&ops->func_hash->regex_lock); in process_mod_list()
5128 if (!list_empty(&tr->mod_trace)) in process_cached_mods()
5129 process_mod_list(&tr->mod_trace, tr->ops, mod, true); in process_cached_mods()
5130 if (!list_empty(&tr->mod_notrace)) in process_cached_mods()
5131 process_mod_list(&tr->mod_notrace, tr->ops, mod, false); in process_cached_mods()
5152 return -ENODEV; in ftrace_mod_callback()
5157 return -ENOMEM; in ftrace_mod_callback()
5194 probe_ops = probe->probe_ops; in function_trace_probe_call()
5202 probe_ops->func(ip, parent_ip, probe->tr, probe_ops, probe->data); in function_trace_probe_call()
5212 * Note, ftrace_func_mapper is freed by free_ftrace_hash(&mapper->hash).
5220 * allocate_ftrace_func_mapper - allocate a new ftrace_func_mapper
5238 * ftrace_func_mapper_find_ip - Find some data mapped to an ip
5254 entry = ftrace_lookup_ip(&mapper->hash, ip); in ftrace_func_mapper_find_ip()
5259 return &map->data; in ftrace_func_mapper_find_ip()
5263 * ftrace_func_mapper_add_ip - Map some data to an ip
5276 entry = ftrace_lookup_ip(&mapper->hash, ip); in ftrace_func_mapper_add_ip()
5278 return -EBUSY; in ftrace_func_mapper_add_ip()
5282 return -ENOMEM; in ftrace_func_mapper_add_ip()
5284 map->entry.ip = ip; in ftrace_func_mapper_add_ip()
5285 map->data = data; in ftrace_func_mapper_add_ip()
5287 __add_hash_entry(&mapper->hash, &map->entry); in ftrace_func_mapper_add_ip()
5293 * ftrace_func_mapper_remove_ip - Remove an ip from the mapping
5309 entry = ftrace_lookup_ip(&mapper->hash, ip); in ftrace_func_mapper_remove_ip()
5314 data = map->data; in ftrace_func_mapper_remove_ip()
5316 remove_hash_entry(&mapper->hash, entry); in ftrace_func_mapper_remove_ip()
5323 * free_ftrace_func_mapper - free a mapping of ips and data
5341 if (free_func && mapper->hash.count) { in free_ftrace_func_mapper()
5342 size = 1 << mapper->hash.size_bits; in free_ftrace_func_mapper()
5344 hhd = &mapper->hash.buckets[i]; in free_ftrace_func_mapper()
5352 free_ftrace_hash(&mapper->hash); in free_ftrace_func_mapper()
5361 WARN_ON(probe->ref <= 0); in release_probe()
5364 probe->ref--; in release_probe()
5366 if (!probe->ref) { in release_probe()
5367 probe_ops = probe->probe_ops; in release_probe()
5370 * the probe->data itself in release_probe()
5372 if (probe_ops->free) in release_probe()
5373 probe_ops->free(probe_ops, probe->tr, 0, probe->data); in release_probe()
5374 list_del(&probe->list); in release_probe()
5385 probe->ref++; in acquire_probe_locked()
5404 return -EINVAL; in register_ftrace_function_probe()
5408 return -EINVAL; in register_ftrace_function_probe()
5413 list_for_each_entry(iter, &tr->func_probes, list) { in register_ftrace_function_probe()
5414 if (iter->probe_ops == probe_ops) { in register_ftrace_function_probe()
5423 return -ENOMEM; in register_ftrace_function_probe()
5425 probe->probe_ops = probe_ops; in register_ftrace_function_probe()
5426 probe->ops.func = function_trace_probe_call; in register_ftrace_function_probe()
5427 probe->tr = tr; in register_ftrace_function_probe()
5428 ftrace_ops_init(&probe->ops); in register_ftrace_function_probe()
5429 list_add(&probe->list, &tr->func_probes); in register_ftrace_function_probe()
5437 * Note, there's a small window here that the func_hash->filter_hash in register_ftrace_function_probe()
5440 mutex_lock(&probe->ops.func_hash->regex_lock); in register_ftrace_function_probe()
5442 orig_hash = &probe->ops.func_hash->filter_hash; in register_ftrace_function_probe()
5447 ret = -ENOMEM; in register_ftrace_function_probe()
5455 ret = -EINVAL; in register_ftrace_function_probe()
5460 size = 1 << hash->size_bits; in register_ftrace_function_probe()
5462 hlist_for_each_entry(entry, &hash->buckets[i], hlist) { in register_ftrace_function_probe()
5463 if (ftrace_lookup_ip(old_hash, entry->ip)) in register_ftrace_function_probe()
5470 if (probe_ops->init) { in register_ftrace_function_probe()
5471 ret = probe_ops->init(probe_ops, tr, in register_ftrace_function_probe()
5472 entry->ip, data, in register_ftrace_function_probe()
5473 &probe->data); in register_ftrace_function_probe()
5475 if (probe_ops->free && count) in register_ftrace_function_probe()
5476 probe_ops->free(probe_ops, tr, in register_ftrace_function_probe()
5477 0, probe->data); in register_ftrace_function_probe()
5478 probe->data = NULL; in register_ftrace_function_probe()
5490 ret = -EINVAL; in register_ftrace_function_probe()
5494 ret = ftrace_hash_move_and_update_ops(&probe->ops, orig_hash, in register_ftrace_function_probe()
5500 probe->ref += count; in register_ftrace_function_probe()
5502 if (!(probe->ops.flags & FTRACE_OPS_FL_ENABLED)) in register_ftrace_function_probe()
5503 ret = ftrace_startup(&probe->ops, 0); in register_ftrace_function_probe()
5511 mutex_unlock(&probe->ops.func_hash->regex_lock); in register_ftrace_function_probe()
5519 if (!probe_ops->free || !count) in register_ftrace_function_probe()
5524 hlist_for_each_entry(entry, &hash->buckets[i], hlist) { in register_ftrace_function_probe()
5525 if (ftrace_lookup_ip(old_hash, entry->ip)) in register_ftrace_function_probe()
5527 probe_ops->free(probe_ops, tr, entry->ip, probe->data); in register_ftrace_function_probe()
5548 int i, ret = -ENODEV; in unregister_ftrace_function_probe_func()
5562 return -EINVAL; in unregister_ftrace_function_probe_func()
5567 list_for_each_entry(iter, &tr->func_probes, list) { in unregister_ftrace_function_probe_func()
5568 if (iter->probe_ops == probe_ops) { in unregister_ftrace_function_probe_func()
5576 ret = -EINVAL; in unregister_ftrace_function_probe_func()
5577 if (!(probe->ops.flags & FTRACE_OPS_FL_INITIALIZED)) in unregister_ftrace_function_probe_func()
5584 mutex_lock(&probe->ops.func_hash->regex_lock); in unregister_ftrace_function_probe_func()
5586 orig_hash = &probe->ops.func_hash->filter_hash; in unregister_ftrace_function_probe_func()
5596 ret = -ENOMEM; in unregister_ftrace_function_probe_func()
5603 size = 1 << hash->size_bits; in unregister_ftrace_function_probe_func()
5605 hlist_for_each_entry_safe(entry, tmp, &hash->buckets[i], hlist) { in unregister_ftrace_function_probe_func()
5608 kallsyms_lookup(entry->ip, NULL, NULL, in unregister_ftrace_function_probe_func()
5615 hlist_add_head(&entry->hlist, &hhd); in unregister_ftrace_function_probe_func()
5621 ret = -EINVAL; in unregister_ftrace_function_probe_func()
5627 WARN_ON(probe->ref < count); in unregister_ftrace_function_probe_func()
5629 probe->ref -= count; in unregister_ftrace_function_probe_func()
5632 ftrace_shutdown(&probe->ops, 0); in unregister_ftrace_function_probe_func()
5634 ret = ftrace_hash_move_and_update_ops(&probe->ops, orig_hash, in unregister_ftrace_function_probe_func()
5639 ftrace_run_modify_code(&probe->ops, FTRACE_UPDATE_CALLS, in unregister_ftrace_function_probe_func()
5644 hlist_del(&entry->hlist); in unregister_ftrace_function_probe_func()
5645 if (probe_ops->free) in unregister_ftrace_function_probe_func()
5646 probe_ops->free(probe_ops, tr, entry->ip, probe->data); in unregister_ftrace_function_probe_func()
5652 mutex_unlock(&probe->ops.func_hash->regex_lock); in unregister_ftrace_function_probe_func()
5668 list_for_each_entry_safe(probe, n, &tr->func_probes, list) in clear_ftrace_function_probes()
5669 unregister_ftrace_function_probe_func(NULL, tr, probe->probe_ops); in clear_ftrace_function_probes()
5685 if (strcmp(cmd->name, p->name) == 0) in register_ftrace_command()
5686 return -EBUSY; in register_ftrace_command()
5688 list_add(&cmd->list, &ftrace_commands); in register_ftrace_command()
5704 if (strcmp(cmd->name, p->name) == 0) { in unregister_ftrace_command()
5705 list_del_init(&p->list); in unregister_ftrace_command()
5710 return -ENODEV; in unregister_ftrace_command()
5716 struct ftrace_hash *hash = iter->hash; in ftrace_process_regex()
5717 struct trace_array *tr = iter->ops->private; in ftrace_process_regex()
5727 ret = -EINVAL; in ftrace_process_regex()
5740 if (strcmp(p->name, command) == 0) in ftrace_process_regex()
5741 return p->func(tr, hash, func, command, next, enable); in ftrace_process_regex()
5744 return -EINVAL; in ftrace_process_regex()
5758 if (file->f_mode & FMODE_READ) { in ftrace_regex_write()
5759 struct seq_file *m = file->private_data; in ftrace_regex_write()
5760 iter = m->private; in ftrace_regex_write()
5762 iter = file->private_data; in ftrace_regex_write()
5765 return -ENODEV; in ftrace_regex_write()
5767 /* iter->hash is a local copy, so we don't need regex_lock */ in ftrace_regex_write()
5769 parser = &iter->parser; in ftrace_regex_write()
5774 ret = ftrace_process_regex(iter, parser->buffer, in ftrace_regex_write()
5775 parser->idx, enable); in ftrace_regex_write()
5805 return -EINVAL; in __ftrace_match_addr()
5810 return -ENOENT; in __ftrace_match_addr()
5819 return entry ? 0 : -ENOMEM; in __ftrace_match_addr()
5852 return -ENODEV; in ftrace_set_hash()
5854 mutex_lock(&ops->func_hash->regex_lock); in ftrace_set_hash()
5857 orig_hash = &ops->func_hash->filter_hash; in ftrace_set_hash()
5859 orig_hash = &ops->func_hash->notrace_hash; in ftrace_set_hash()
5867 ret = -ENOMEM; in ftrace_set_hash()
5874 (*orig_hash)->flags |= FTRACE_HASH_FL_MOD; in ftrace_set_hash()
5880 ret = -EINVAL; in ftrace_set_hash()
5894 mutex_unlock(&ops->func_hash->regex_lock); in ftrace_set_hash()
5925 if (!(ops->flags & FTRACE_OPS_FL_INITIALIZED)) in check_direct_multi()
5926 return -EINVAL; in check_direct_multi()
5927 if ((ops->flags & MULTI_FLAGS) != MULTI_FLAGS) in check_direct_multi()
5928 return -EINVAL; in check_direct_multi()
5937 size = 1 << hash->size_bits; in remove_direct_functions_hash()
5939 hlist_for_each_entry(entry, &hash->buckets[i], hlist) { in remove_direct_functions_hash()
5940 del = __ftrace_lookup_ip(direct_functions, entry->ip); in remove_direct_functions_hash()
5941 if (del && del->direct == addr) { in remove_direct_functions_hash()
5957 * register_ftrace_direct - Call a custom trampoline directly
5972 * -EINVAL - The @ops object was already registered with this call or
5974 * -EBUSY - Another direct function is already attached (there can be only one)
5975 * -ENODEV - @ip does not point to a ftrace nop location (or not supported)
5976 * -ENOMEM - There was an allocation failure.
5982 int err = -EBUSY, size, i; in register_ftrace_direct()
5984 if (ops->func || ops->trampoline) in register_ftrace_direct()
5985 return -EINVAL; in register_ftrace_direct()
5986 if (!(ops->flags & FTRACE_OPS_FL_INITIALIZED)) in register_ftrace_direct()
5987 return -EINVAL; in register_ftrace_direct()
5988 if (ops->flags & FTRACE_OPS_FL_ENABLED) in register_ftrace_direct()
5989 return -EINVAL; in register_ftrace_direct()
5991 hash = ops->func_hash->filter_hash; in register_ftrace_direct()
5993 return -EINVAL; in register_ftrace_direct()
5998 size = 1 << hash->size_bits; in register_ftrace_direct()
6000 hlist_for_each_entry(entry, &hash->buckets[i], hlist) { in register_ftrace_direct()
6001 if (ftrace_find_rec_direct(entry->ip)) in register_ftrace_direct()
6006 err = -ENOMEM; in register_ftrace_direct()
6009 size = hash->count + direct_functions->count; in register_ftrace_direct()
6018 size = 1 << direct_functions->size_bits; in register_ftrace_direct()
6020 hlist_for_each_entry(entry, &direct_functions->buckets[i], hlist) { in register_ftrace_direct()
6021 new = add_hash_entry(new_hash, entry->ip); in register_ftrace_direct()
6024 new->direct = entry->direct; in register_ftrace_direct()
6029 size = 1 << hash->size_bits; in register_ftrace_direct()
6031 hlist_for_each_entry(entry, &hash->buckets[i], hlist) { in register_ftrace_direct()
6032 new = add_hash_entry(new_hash, entry->ip); in register_ftrace_direct()
6036 new->direct = addr; in register_ftrace_direct()
6037 entry->direct = addr; in register_ftrace_direct()
6045 ops->func = call_direct_funcs; in register_ftrace_direct()
6046 ops->flags = MULTI_FLAGS; in register_ftrace_direct()
6047 ops->trampoline = FTRACE_REGS_ADDR; in register_ftrace_direct()
6048 ops->direct_call = addr; in register_ftrace_direct()
6056 call_rcu_tasks(&free_hash->rcu, register_ftrace_direct_cb); in register_ftrace_direct()
6066 * unregister_ftrace_direct - Remove calls to custom trampoline
6078 * -EINVAL - The @ops object was not properly registered.
6083 struct ftrace_hash *hash = ops->func_hash->filter_hash; in unregister_ftrace_direct()
6087 return -EINVAL; in unregister_ftrace_direct()
6088 if (!(ops->flags & FTRACE_OPS_FL_ENABLED)) in unregister_ftrace_direct()
6089 return -EINVAL; in unregister_ftrace_direct()
6097 ops->func = NULL; in unregister_ftrace_direct()
6098 ops->trampoline = 0; in unregister_ftrace_direct()
6122 tmp_ops.func_hash = ops->func_hash; in __modify_ftrace_direct()
6135 hash = ops->func_hash->filter_hash; in __modify_ftrace_direct()
6136 size = 1 << hash->size_bits; in __modify_ftrace_direct()
6138 hlist_for_each_entry(iter, &hash->buckets[i], hlist) { in __modify_ftrace_direct()
6139 entry = __ftrace_lookup_ip(direct_functions, iter->ip); in __modify_ftrace_direct()
6142 entry->direct = addr; in __modify_ftrace_direct()
6146 WRITE_ONCE(ops->direct_call, addr); in __modify_ftrace_direct()
6157 * modify_ftrace_direct_nolock - Modify an existing direct 'multi' call
6172 * -EINVAL - The @ops object was not properly registered.
6177 return -EINVAL; in modify_ftrace_direct_nolock()
6178 if (!(ops->flags & FTRACE_OPS_FL_ENABLED)) in modify_ftrace_direct_nolock()
6179 return -EINVAL; in modify_ftrace_direct_nolock()
6186 * modify_ftrace_direct - Modify an existing direct 'multi' call
6198 * -EINVAL - The @ops object was not properly registered.
6205 return -EINVAL; in modify_ftrace_direct()
6206 if (!(ops->flags & FTRACE_OPS_FL_ENABLED)) in modify_ftrace_direct()
6207 return -EINVAL; in modify_ftrace_direct()
6218 * ftrace_set_filter_ip - set a function to filter on in ftrace by address
6240 * ftrace_set_filter_ips - set functions to filter on in ftrace by addresses
6263 * ftrace_ops_set_global_filter - setup ops to use global filters
6271 if (ops->flags & FTRACE_OPS_FL_INITIALIZED) in ftrace_ops_set_global_filter()
6275 ops->func_hash = &global_ops.local_hash; in ftrace_ops_set_global_filter()
6285 struct trace_array *tr = ops->private; in ftrace_set_regex()
6293 return -EINVAL; in ftrace_set_regex()
6297 return -EINVAL; in ftrace_set_regex()
6300 len = command - func; in ftrace_set_regex()
6310 return -ENOMEM; in ftrace_set_regex()
6318 * ftrace_set_filter - set a function to filter on in ftrace
6322 * @reset: non-zero to reset all filters before applying this filter.
6340 * ftrace_set_notrace - set a function to not trace in ftrace
6344 * @reset: non-zero to reset all filters before applying this filter.
6362 * ftrace_set_global_filter - set a function to filter on with global tracers
6365 * @reset: non-zero to reset all filters before applying this filter.
6377 * ftrace_set_global_notrace - set a function to not trace with global tracers
6380 * @reset: non-zero to reset all filters before applying this filter.
6479 if (!ops->private) { in ftrace_set_early_filter()
6482 ops->private = tr; in ftrace_set_early_filter()
6508 struct seq_file *m = (struct seq_file *)file->private_data; in ftrace_regex_release()
6514 if (file->f_mode & FMODE_READ) { in ftrace_regex_release()
6515 iter = m->private; in ftrace_regex_release()
6518 iter = file->private_data; in ftrace_regex_release()
6520 parser = &iter->parser; in ftrace_regex_release()
6522 int enable = !(iter->flags & FTRACE_ITER_NOTRACE); in ftrace_regex_release()
6524 ftrace_process_regex(iter, parser->buffer, in ftrace_regex_release()
6525 parser->idx, enable); in ftrace_regex_release()
6530 mutex_lock(&iter->ops->func_hash->regex_lock); in ftrace_regex_release()
6532 if (file->f_mode & FMODE_WRITE) { in ftrace_regex_release()
6533 filter_hash = !!(iter->flags & FTRACE_ITER_FILTER); in ftrace_regex_release()
6536 orig_hash = &iter->ops->func_hash->filter_hash; in ftrace_regex_release()
6537 if (iter->tr) { in ftrace_regex_release()
6538 if (list_empty(&iter->tr->mod_trace)) in ftrace_regex_release()
6539 iter->hash->flags &= ~FTRACE_HASH_FL_MOD; in ftrace_regex_release()
6541 iter->hash->flags |= FTRACE_HASH_FL_MOD; in ftrace_regex_release()
6544 orig_hash = &iter->ops->func_hash->notrace_hash; in ftrace_regex_release()
6547 ftrace_hash_move_and_update_ops(iter->ops, orig_hash, in ftrace_regex_release()
6548 iter->hash, filter_hash); in ftrace_regex_release()
6552 mutex_unlock(&iter->ops->func_hash->regex_lock); in ftrace_regex_release()
6553 free_ftrace_hash(iter->hash); in ftrace_regex_release()
6554 if (iter->tr) in ftrace_regex_release()
6555 trace_array_put(iter->tr); in ftrace_regex_release()
6632 struct ftrace_graph_data *fgd = m->private; in __g_next()
6633 struct ftrace_func_entry *entry = fgd->entry; in __g_next()
6635 int i, idx = fgd->idx; in __g_next()
6637 if (*pos >= fgd->hash->count) in __g_next()
6642 fgd->entry = entry; in __g_next()
6649 for (i = idx; i < 1 << fgd->hash->size_bits; i++) { in __g_next()
6650 head = &fgd->hash->buckets[i]; in __g_next()
6652 fgd->entry = entry; in __g_next()
6653 fgd->idx = i; in __g_next()
6669 struct ftrace_graph_data *fgd = m->private; in g_start()
6673 if (fgd->type == GRAPH_FILTER_FUNCTION) in g_start()
6674 fgd->hash = rcu_dereference_protected(ftrace_graph_hash, in g_start()
6677 fgd->hash = rcu_dereference_protected(ftrace_graph_notrace_hash, in g_start()
6681 if (ftrace_hash_empty(fgd->hash) && !*pos) in g_start()
6684 fgd->idx = 0; in g_start()
6685 fgd->entry = NULL; in g_start()
6702 struct ftrace_graph_data *fgd = m->private; in g_show()
6704 if (fgd->type == GRAPH_FILTER_FUNCTION) in g_show()
6711 seq_printf(m, "%ps\n", (void *)entry->ip); in g_show()
6734 if (file->f_mode & FMODE_WRITE) { in __ftrace_graph_open()
6737 if (trace_parser_get_init(&fgd->parser, FTRACE_BUFF_MAX)) in __ftrace_graph_open()
6738 return -ENOMEM; in __ftrace_graph_open()
6740 if (file->f_flags & O_TRUNC) in __ftrace_graph_open()
6744 fgd->hash); in __ftrace_graph_open()
6746 ret = -ENOMEM; in __ftrace_graph_open()
6751 if (file->f_mode & FMODE_READ) { in __ftrace_graph_open()
6754 struct seq_file *m = file->private_data; in __ftrace_graph_open()
6755 m->private = fgd; in __ftrace_graph_open()
6762 file->private_data = fgd; in __ftrace_graph_open()
6765 if (ret < 0 && file->f_mode & FMODE_WRITE) in __ftrace_graph_open()
6766 trace_parser_put(&fgd->parser); in __ftrace_graph_open()
6768 fgd->new_hash = new_hash; in __ftrace_graph_open()
6771 * All uses of fgd->hash must be taken with the graph_lock in __ftrace_graph_open()
6773 * fgd->hash to be reinitialized when it is taken again. in __ftrace_graph_open()
6775 fgd->hash = NULL; in __ftrace_graph_open()
6787 return -ENODEV; in ftrace_graph_open()
6791 return -ENOMEM; in ftrace_graph_open()
6795 fgd->hash = rcu_dereference_protected(ftrace_graph_hash, in ftrace_graph_open()
6797 fgd->type = GRAPH_FILTER_FUNCTION; in ftrace_graph_open()
6798 fgd->seq_ops = &ftrace_graph_seq_ops; in ftrace_graph_open()
6815 return -ENODEV; in ftrace_graph_notrace_open()
6819 return -ENOMEM; in ftrace_graph_notrace_open()
6823 fgd->hash = rcu_dereference_protected(ftrace_graph_notrace_hash, in ftrace_graph_notrace_open()
6825 fgd->type = GRAPH_FILTER_NOTRACE; in ftrace_graph_notrace_open()
6826 fgd->seq_ops = &ftrace_graph_seq_ops; in ftrace_graph_notrace_open()
6844 if (file->f_mode & FMODE_READ) { in ftrace_graph_release()
6845 struct seq_file *m = file->private_data; in ftrace_graph_release()
6847 fgd = m->private; in ftrace_graph_release()
6850 fgd = file->private_data; in ftrace_graph_release()
6854 if (file->f_mode & FMODE_WRITE) { in ftrace_graph_release()
6856 parser = &fgd->parser; in ftrace_graph_release()
6859 ret = ftrace_graph_set_hash(fgd->new_hash, in ftrace_graph_release()
6860 parser->buffer); in ftrace_graph_release()
6865 new_hash = __ftrace_hash_move(fgd->new_hash); in ftrace_graph_release()
6867 ret = -ENOMEM; in ftrace_graph_release()
6873 if (fgd->type == GRAPH_FILTER_FUNCTION) { in ftrace_graph_release()
6900 free_ftrace_hash(fgd->new_hash); in ftrace_graph_release()
6925 return -ENODEV; in ftrace_graph_set_hash()
6929 if (rec->flags & FTRACE_FL_DISABLED) in ftrace_graph_set_hash()
6933 entry = ftrace_lookup_ip(hash, rec->ip); in ftrace_graph_set_hash()
6940 if (add_hash_entry(hash, rec->ip) == NULL) in ftrace_graph_set_hash()
6952 return fail ? -EINVAL : 0; in ftrace_graph_set_hash()
6960 struct ftrace_graph_data *fgd = file->private_data; in ftrace_graph_write()
6967 if (file->f_mode & FMODE_READ) { in ftrace_graph_write()
6968 struct seq_file *m = file->private_data; in ftrace_graph_write()
6969 fgd = m->private; in ftrace_graph_write()
6972 parser = &fgd->parser; in ftrace_graph_write()
6979 ret = ftrace_graph_set_hash(fgd->new_hash, in ftrace_graph_write()
6980 parser->buffer); in ftrace_graph_write()
7031 if (ops->flags & FTRACE_OPS_FL_ENABLED) in ftrace_destroy_filter_files()
7033 ops->flags |= FTRACE_OPS_FL_DELETED; in ftrace_destroy_filter_files()
7069 const unsigned long *ipa = a; in ftrace_cmp_ips() local
7072 if (*ipa > *ipb) in ftrace_cmp_ips()
7074 if (*ipa < *ipb) in ftrace_cmp_ips()
7075 return -1; in ftrace_cmp_ips()
7085 if (WARN(start[i - 1] > start[i], in test_is_sorted()
7087 (void *)start[i - 1], start[i - 1], in test_is_sorted()
7114 int ret = -ENOMEM; in ftrace_process_locs()
7116 count = end - start; in ftrace_process_locs()
7137 return -ENOMEM; in ftrace_process_locs()
7154 if (WARN_ON(ftrace_pages->next)) { in ftrace_process_locs()
7156 while (ftrace_pages->next) in ftrace_process_locs()
7157 ftrace_pages = ftrace_pages->next; in ftrace_process_locs()
7160 ftrace_pages->next = start_pg; in ftrace_process_locs()
7194 end_offset = (pg->index+1) * sizeof(pg->records[0]); in ftrace_process_locs()
7195 if (end_offset > PAGE_SIZE << pg->order) { in ftrace_process_locs()
7197 if (WARN_ON(!pg->next)) in ftrace_process_locs()
7199 pg = pg->next; in ftrace_process_locs()
7202 rec = &pg->records[pg->index++]; in ftrace_process_locs()
7203 rec->ip = addr; in ftrace_process_locs()
7206 if (pg->next) { in ftrace_process_locs()
7207 pg_unuse = pg->next; in ftrace_process_locs()
7208 pg->next = NULL; in ftrace_process_locs()
7237 pg_remaining = (ENTRIES_PER_PAGE << pg->order) - pg->index; in ftrace_process_locs()
7241 skip = skipped - pg_remaining; in ftrace_process_locs()
7243 for (pg = pg_unuse; pg; pg = pg->next) in ftrace_process_locs()
7244 remaining += 1 << pg->order; in ftrace_process_locs()
7246 pages -= remaining; in ftrace_process_locs()
7263 count -= skipped; in ftrace_process_locs()
7296 if (!op->trampoline || symnum--) in ftrace_get_trampoline_kallsym()
7298 *value = op->trampoline; in ftrace_get_trampoline_kallsym()
7306 return -ERANGE; in ftrace_get_trampoline_kallsym()
7321 if (!(ops->flags & FTRACE_OPS_FL_ENABLED)) in ops_references_ip()
7329 if (!ftrace_hash_empty(ops->func_hash->filter_hash) && in ops_references_ip()
7330 !__ftrace_lookup_ip(ops->func_hash->filter_hash, ip)) in ops_references_ip()
7334 if (ftrace_lookup_ip(ops->func_hash->notrace_hash, ip)) in ops_references_ip()
7352 for (ops = ftrace_ops_list; ops != &ftrace_list_end; ops = ops->next) { in referenced_filters()
7353 if (ops_references_ip(ops, rec->ip)) { in referenced_filters()
7354 if (WARN_ON_ONCE(ops->flags & FTRACE_OPS_FL_DIRECT)) in referenced_filters()
7356 if (WARN_ON_ONCE(ops->flags & FTRACE_OPS_FL_IPMODIFY)) in referenced_filters()
7359 if (ops->flags & FTRACE_OPS_FL_SAVE_REGS) in referenced_filters()
7360 rec->flags |= FTRACE_FL_REGS; in referenced_filters()
7361 if (cnt == 1 && ops->trampoline) in referenced_filters()
7362 rec->flags |= FTRACE_FL_TRAMP; in referenced_filters()
7364 rec->flags &= ~FTRACE_FL_TRAMP; in referenced_filters()
7381 for (i = 0; i < pg->index; i++) { in clear_mod_from_hash()
7382 rec = &pg->records[i]; in clear_mod_from_hash()
7383 entry = __ftrace_lookup_ip(hash, rec->ip); in clear_mod_from_hash()
7390 entry->ip = 0; in clear_mod_from_hash()
7401 if (!tr->ops || !tr->ops->func_hash) in clear_mod_from_hashes()
7403 mutex_lock(&tr->ops->func_hash->regex_lock); in clear_mod_from_hashes()
7404 clear_mod_from_hash(pg, tr->ops->func_hash->filter_hash); in clear_mod_from_hashes()
7405 clear_mod_from_hash(pg, tr->ops->func_hash->notrace_hash); in clear_mod_from_hashes()
7406 mutex_unlock(&tr->ops->func_hash->regex_lock); in clear_mod_from_hashes()
7418 list_for_each_entry_safe(mod_func, n, &mod_map->funcs, list) { in ftrace_free_mod_map()
7419 kfree(mod_func->name); in ftrace_free_mod_map()
7420 list_del(&mod_func->list); in ftrace_free_mod_map()
7443 if (mod_map->mod == mod) { in ftrace_release_mod()
7444 list_del_rcu(&mod_map->list); in ftrace_release_mod()
7445 call_rcu(&mod_map->rcu, ftrace_free_mod_map); in ftrace_release_mod()
7459 rec = &pg->records[0]; in ftrace_release_mod()
7460 if (within_module(rec->ip, mod)) { in ftrace_release_mod()
7472 ftrace_update_tot_cnt -= pg->index; in ftrace_release_mod()
7473 *last_pg = pg->next; in ftrace_release_mod()
7475 pg->next = tmp_page; in ftrace_release_mod()
7478 last_pg = &pg->next; in ftrace_release_mod()
7491 if (pg->records) { in ftrace_release_mod()
7492 free_pages((unsigned long)pg->records, pg->order); in ftrace_release_mod()
7493 ftrace_number_of_pages -= 1 << pg->order; in ftrace_release_mod()
7495 tmp_page = pg->next; in ftrace_release_mod()
7497 ftrace_number_of_groups--; in ftrace_release_mod()
7521 * text to read-only, as we now need to set it back to read-write in ftrace_module_enable()
7535 if (!within_module(rec->ip, mod)) in ftrace_module_enable()
7543 rec->flags = FTRACE_FL_DISABLED; in ftrace_module_enable()
7558 rec->flags &= ~FTRACE_FL_DISABLED; in ftrace_module_enable()
7559 rec->flags += cnt; in ftrace_module_enable()
7578 process_cached_mods(mod->name); in ftrace_module_enable()
7585 if (ftrace_disabled || !mod->num_ftrace_callsites) in ftrace_module_init()
7588 ret = ftrace_process_locs(mod, mod->ftrace_callsites, in ftrace_module_init()
7589 mod->ftrace_callsites + mod->num_ftrace_callsites); in ftrace_module_init()
7592 mod->name); in ftrace_module_init()
7605 ret = kallsyms_lookup(rec->ip, &symsize, &offset, &modname, str); in save_ftrace_mod_rec()
7613 mod_func->name = kstrdup(str, GFP_KERNEL); in save_ftrace_mod_rec()
7614 if (!mod_func->name) { in save_ftrace_mod_rec()
7619 mod_func->ip = rec->ip - offset; in save_ftrace_mod_rec()
7620 mod_func->size = symsize; in save_ftrace_mod_rec()
7622 mod_map->num_funcs++; in save_ftrace_mod_rec()
7624 list_add_rcu(&mod_func->list, &mod_map->funcs); in save_ftrace_mod_rec()
7640 mod_map->mod = mod; in allocate_ftrace_mod_map()
7641 mod_map->start_addr = start; in allocate_ftrace_mod_map()
7642 mod_map->end_addr = end; in allocate_ftrace_mod_map()
7643 mod_map->num_funcs = 0; in allocate_ftrace_mod_map()
7645 INIT_LIST_HEAD_RCU(&mod_map->funcs); in allocate_ftrace_mod_map()
7647 list_add_rcu(&mod_map->list, &ftrace_mod_maps); in allocate_ftrace_mod_map()
7660 list_for_each_entry_rcu(mod_func, &mod_map->funcs, list) { in ftrace_func_address_lookup()
7661 if (addr >= mod_func->ip && in ftrace_func_address_lookup()
7662 addr < mod_func->ip + mod_func->size) { in ftrace_func_address_lookup()
7670 *size = found_func->size; in ftrace_func_address_lookup()
7672 *off = addr - found_func->ip; in ftrace_func_address_lookup()
7673 return strscpy(sym, found_func->name, KSYM_NAME_LEN); in ftrace_func_address_lookup()
7692 *modname = mod_map->mod->name; in ftrace_mod_address_lookup()
7712 if (symnum >= mod_map->num_funcs) { in ftrace_mod_get_kallsym()
7713 symnum -= mod_map->num_funcs; in ftrace_mod_get_kallsym()
7717 list_for_each_entry_rcu(mod_func, &mod_map->funcs, list) { in ftrace_mod_get_kallsym()
7719 symnum--; in ftrace_mod_get_kallsym()
7723 *value = mod_func->ip; in ftrace_mod_get_kallsym()
7725 strscpy(name, mod_func->name, KSYM_NAME_LEN); in ftrace_mod_get_kallsym()
7726 strscpy(module_name, mod_map->mod->name, MODULE_NAME_LEN); in ftrace_mod_get_kallsym()
7774 entry = ftrace_lookup_ip(hash, func->ip); in clear_func_from_hash()
7781 entry->ip = 0; in clear_func_from_hash()
7791 if (!tr->ops || !tr->ops->func_hash) in clear_func_from_hashes()
7793 mutex_lock(&tr->ops->func_hash->regex_lock); in clear_func_from_hashes()
7794 clear_func_from_hash(func, tr->ops->func_hash->filter_hash); in clear_func_from_hashes()
7795 clear_func_from_hash(func, tr->ops->func_hash->notrace_hash); in clear_func_from_hashes()
7796 mutex_unlock(&tr->ops->func_hash->regex_lock); in clear_func_from_hashes()
7812 func->ip = rec->ip; in add_to_clear_hash_list()
7813 list_add(&func->list, clear_list); in add_to_clear_hash_list()
7842 for (pg = ftrace_pages_start; pg; last_pg = &pg->next, pg = *last_pg) { in ftrace_free_mem()
7843 if (end < pg->records[0].ip || in ftrace_free_mem()
7844 start >= (pg->records[pg->index - 1].ip + MCOUNT_INSN_SIZE)) in ftrace_free_mem()
7847 rec = bsearch(&key, pg->records, pg->index, in ftrace_free_mem()
7859 pg->index--; in ftrace_free_mem()
7860 ftrace_update_tot_cnt--; in ftrace_free_mem()
7861 if (!pg->index) { in ftrace_free_mem()
7862 *last_pg = pg->next; in ftrace_free_mem()
7863 pg->next = tmp_page; in ftrace_free_mem()
7871 (pg->index - (rec - pg->records)) * sizeof(*rec)); in ftrace_free_mem()
7916 count = __stop_mcount_loc - __start_mcount_loc; in ftrace_init()
7949 unsigned long trampoline = ops->trampoline; in ftrace_update_trampoline()
7952 if (ops->trampoline && ops->trampoline != trampoline && in ftrace_update_trampoline()
7953 (ops->flags & FTRACE_OPS_FL_ALLOC_TRAMP)) { in ftrace_update_trampoline()
7957 ops->trampoline, ops->trampoline_size, false, in ftrace_update_trampoline()
7963 perf_event_text_poke((void *)ops->trampoline, NULL, 0, in ftrace_update_trampoline()
7964 (void *)ops->trampoline, in ftrace_update_trampoline()
7965 ops->trampoline_size); in ftrace_update_trampoline()
7971 if (tr->flags & TRACE_ARRAY_FL_MOD_INIT) in ftrace_init_trace_array()
7974 INIT_LIST_HEAD(&tr->func_probes); in ftrace_init_trace_array()
7975 INIT_LIST_HEAD(&tr->mod_trace); in ftrace_init_trace_array()
7976 INIT_LIST_HEAD(&tr->mod_notrace); in ftrace_init_trace_array()
7978 tr->flags |= TRACE_ARRAY_FL_MOD_INIT; in ftrace_init_trace_array()
8006 tr->ops = &global_ops; in ftrace_init_global_array_ops()
8010 init_array_fgraph_ops(tr, tr->ops); in ftrace_init_global_array_ops()
8016 if (tr->flags & TRACE_ARRAY_FL_GLOBAL) { in ftrace_init_array_ops()
8017 if (WARN_ON(tr->ops->func != ftrace_stub)) in ftrace_init_array_ops()
8019 tr->ops->func); in ftrace_init_array_ops()
8021 tr->ops->func = func; in ftrace_init_array_ops()
8022 tr->ops->private = tr; in ftrace_init_array_ops()
8027 tr->ops->func = ftrace_stub; in ftrace_reset_array_ops()
8049 if (op->flags & FTRACE_OPS_FL_STUB) in __ftrace_ops_list_func()
8056 * If any of the above fails then the op->func() is not executed. in __ftrace_ops_list_func()
8058 if ((!(op->flags & FTRACE_OPS_FL_RCU) || rcu_is_watching()) && in __ftrace_ops_list_func()
8060 if (FTRACE_WARN_ON(!op->func)) { in __ftrace_ops_list_func()
8064 op->func(ip, parent_ip, op, fregs); in __ftrace_ops_list_func()
8116 if (!(op->flags & FTRACE_OPS_FL_RCU) || rcu_is_watching()) in ftrace_ops_assist_func()
8117 op->func(ip, parent_ip, op, fregs); in ftrace_ops_assist_func()
8124 * ftrace_ops_get_func - get the function a trampoline should call
8127 * Normally the mcount trampoline will call the ops->func, but there
8140 if (ops->flags & (FTRACE_OPS_FL_RECURSION | in ftrace_ops_get_func()
8144 return ops->func; in ftrace_ops_get_func()
8157 pid_list = rcu_dereference_sched(tr->function_pids); in ftrace_filter_pid_sched_switch_probe()
8158 no_pid_list = rcu_dereference_sched(tr->function_no_pids); in ftrace_filter_pid_sched_switch_probe()
8161 this_cpu_write(tr->array_buffer.data->ftrace_ignore_pid, in ftrace_filter_pid_sched_switch_probe()
8164 this_cpu_write(tr->array_buffer.data->ftrace_ignore_pid, in ftrace_filter_pid_sched_switch_probe()
8165 next->pid); in ftrace_filter_pid_sched_switch_probe()
8176 pid_list = rcu_dereference_sched(tr->function_pids); in ftrace_pid_follow_sched_process_fork()
8179 pid_list = rcu_dereference_sched(tr->function_no_pids); in ftrace_pid_follow_sched_process_fork()
8189 pid_list = rcu_dereference_sched(tr->function_pids); in ftrace_pid_follow_sched_process_exit()
8192 pid_list = rcu_dereference_sched(tr->function_no_pids); in ftrace_pid_follow_sched_process_exit()
8217 pid_list = rcu_dereference_protected(tr->function_pids, in clear_ftrace_pids()
8219 no_pid_list = rcu_dereference_protected(tr->function_no_pids, in clear_ftrace_pids()
8230 per_cpu_ptr(tr->array_buffer.data, cpu)->ftrace_ignore_pid = FTRACE_PID_TRACE; in clear_ftrace_pids()
8234 rcu_assign_pointer(tr->function_pids, NULL); in clear_ftrace_pids()
8237 rcu_assign_pointer(tr->function_no_pids, NULL); in clear_ftrace_pids()
8276 struct trace_array *tr = m->private; in fpid_start()
8281 pid_list = rcu_dereference_sched(tr->function_pids); in fpid_start()
8291 struct trace_array *tr = m->private; in fpid_next()
8292 struct trace_pid_list *pid_list = rcu_dereference_sched(tr->function_pids); in fpid_next()
8329 struct trace_array *tr = m->private; in fnpid_start()
8334 pid_list = rcu_dereference_sched(tr->function_no_pids); in fnpid_start()
8344 struct trace_array *tr = m->private; in fnpid_next()
8345 struct trace_pid_list *pid_list = rcu_dereference_sched(tr->function_no_pids); in fnpid_next()
8364 struct trace_array *tr = inode->i_private; in pid_open()
8372 if ((file->f_mode & FMODE_WRITE) && in pid_open()
8373 (file->f_flags & O_TRUNC)) in pid_open()
8386 return -EINVAL; in pid_open()
8393 m = file->private_data; in pid_open()
8395 m->private = tr; in pid_open()
8423 pid_list = rcu_dereference_protected(tr->function_pids, in ignore_task_cpu()
8425 no_pid_list = rcu_dereference_protected(tr->function_no_pids, in ignore_task_cpu()
8429 this_cpu_write(tr->array_buffer.data->ftrace_ignore_pid, in ignore_task_cpu()
8432 this_cpu_write(tr->array_buffer.data->ftrace_ignore_pid, in ignore_task_cpu()
8433 current->pid); in ignore_task_cpu()
8440 struct seq_file *m = filp->private_data; in pid_write()
8441 struct trace_array *tr = m->private; in pid_write()
8454 filtered_pids = rcu_dereference_protected(tr->function_pids, in pid_write()
8456 other_pids = rcu_dereference_protected(tr->function_no_pids, in pid_write()
8460 filtered_pids = rcu_dereference_protected(tr->function_no_pids, in pid_write()
8462 other_pids = rcu_dereference_protected(tr->function_pids, in pid_write()
8467 return -EINVAL; in pid_write()
8476 rcu_assign_pointer(tr->function_pids, pid_list); in pid_write()
8479 rcu_assign_pointer(tr->function_no_pids, pid_list); in pid_write()
8524 struct trace_array *tr = inode->i_private; in ftrace_pid_release()
8559 WARN_ON(!(tr->flags & TRACE_ARRAY_FL_GLOBAL)); in ftrace_init_tracefs_toplevel()
8566 * ftrace_kill - kill ftrace
8570 * from a non-atomic section, use ftrace_kill.
8581 * ftrace_is_dead - Test if ftrace is dead or not.
8612 if (!(ops->flags & FTRACE_OPS_FL_IPMODIFY)) in prepare_direct_functions_for_ipmodify()
8615 hash = ops->func_hash->filter_hash; in prepare_direct_functions_for_ipmodify()
8616 size = 1 << hash->size_bits; in prepare_direct_functions_for_ipmodify()
8618 hlist_for_each_entry(entry, &hash->buckets[i], hlist) { in prepare_direct_functions_for_ipmodify()
8619 unsigned long ip = entry->ip; in prepare_direct_functions_for_ipmodify()
8624 if (!(op->flags & FTRACE_OPS_FL_DIRECT)) in prepare_direct_functions_for_ipmodify()
8634 if (!op->ops_func) in prepare_direct_functions_for_ipmodify()
8635 return -EBUSY; in prepare_direct_functions_for_ipmodify()
8637 ret = op->ops_func(op, FTRACE_OPS_CMD_ENABLE_SHARE_IPMODIFY_PEER); in prepare_direct_functions_for_ipmodify()
8659 if (!(ops->flags & FTRACE_OPS_FL_IPMODIFY)) in cleanup_direct_functions_after_ipmodify()
8664 hash = ops->func_hash->filter_hash; in cleanup_direct_functions_after_ipmodify()
8665 size = 1 << hash->size_bits; in cleanup_direct_functions_after_ipmodify()
8667 hlist_for_each_entry(entry, &hash->buckets[i], hlist) { in cleanup_direct_functions_after_ipmodify()
8668 unsigned long ip = entry->ip; in cleanup_direct_functions_after_ipmodify()
8673 if (!(op->flags & FTRACE_OPS_FL_DIRECT)) in cleanup_direct_functions_after_ipmodify()
8683 if (found_op && op->ops_func) in cleanup_direct_functions_after_ipmodify()
8684 op->ops_func(op, FTRACE_OPS_CMD_DISABLE_SHARE_IPMODIFY_PEER); in cleanup_direct_functions_after_ipmodify()
8728 * register_ftrace_function - register a function for profiling
8734 * Note: @ops->func and all the functions it calls must be labeled
8756 * unregister_ftrace_function - unregister a function for profiling.
8799 sym = bsearch(&name, args->syms, args->cnt, sizeof(*args->syms), symbols_cmp); in kallsyms_callback()
8803 idx = sym - args->syms; in kallsyms_callback()
8804 if (args->addrs[idx]) in kallsyms_callback()
8810 args->addrs[idx] = addr; in kallsyms_callback()
8811 args->found++; in kallsyms_callback()
8812 return args->found == args->cnt ? 1 : 0; in kallsyms_callback()
8816 * ftrace_lookup_symbols - Lookup addresses for array of symbols
8828 * Returns: 0 if all provided symbols are found, -ESRCH otherwise.
8845 return found_all ? 0 : -ESRCH; in ftrace_lookup_symbols()
8894 if (op->flags & FTRACE_OPS_FL_PERMANENT) in is_permanent_ops_registered()
8910 return -ENODEV; in ftrace_enable_sysctl()
8929 return -EBUSY; in ftrace_enable_sysctl()