Lines Matching full:hash
35 #include <linux/hash.h>
70 /* hash bits for specific function selection */
434 struct hlist_head *hash; member
603 memset(stat->hash, 0, in ftrace_profile_reset()
670 if (stat->hash) { in ftrace_profile_init_cpu()
678 * functions are hit. We'll make a hash of 1024 items. in ftrace_profile_init_cpu()
682 stat->hash = kcalloc(size, sizeof(struct hlist_head), GFP_KERNEL); in ftrace_profile_init_cpu()
684 if (!stat->hash) in ftrace_profile_init_cpu()
689 kfree(stat->hash); in ftrace_profile_init_cpu()
690 stat->hash = NULL; in ftrace_profile_init_cpu()
720 hhd = &stat->hash[key]; in ftrace_find_profiled_func()
739 hlist_add_head_rcu(&rec->node, &stat->hash[key]); in ftrace_add_profile()
791 if (!stat->hash || !ftrace_profile_enabled) in function_profile_call()
855 if (!stat->hash || !ftrace_profile_enabled) in profile_graph_return()
1056 * but they are used as the default "empty hash", to avoid allocating
1131 ftrace_hash_key(struct ftrace_hash *hash, unsigned long ip) in ftrace_hash_key() argument
1133 if (hash->size_bits > 0) in ftrace_hash_key()
1134 return hash_long(ip, hash->size_bits); in ftrace_hash_key()
1141 __ftrace_lookup_ip(struct ftrace_hash *hash, unsigned long ip) in __ftrace_lookup_ip() argument
1147 key = ftrace_hash_key(hash, ip); in __ftrace_lookup_ip()
1148 hhd = &hash->buckets[key]; in __ftrace_lookup_ip()
1159 * @hash: The hash to look at
1162 * Search a given @hash to see if a given instruction pointer (@ip)
1168 ftrace_lookup_ip(struct ftrace_hash *hash, unsigned long ip) in ftrace_lookup_ip() argument
1170 if (ftrace_hash_empty(hash)) in ftrace_lookup_ip()
1173 return __ftrace_lookup_ip(hash, ip); in ftrace_lookup_ip()
1176 static void __add_hash_entry(struct ftrace_hash *hash, in __add_hash_entry() argument
1182 key = ftrace_hash_key(hash, entry->ip); in __add_hash_entry()
1183 hhd = &hash->buckets[key]; in __add_hash_entry()
1185 hash->count++; in __add_hash_entry()
1189 add_hash_entry(struct ftrace_hash *hash, unsigned long ip) in add_hash_entry() argument
1198 __add_hash_entry(hash, entry); in add_hash_entry()
1204 free_hash_entry(struct ftrace_hash *hash, in free_hash_entry() argument
1209 hash->count--; in free_hash_entry()
1213 remove_hash_entry(struct ftrace_hash *hash, in remove_hash_entry() argument
1217 hash->count--; in remove_hash_entry()
1220 static void ftrace_hash_clear(struct ftrace_hash *hash) in ftrace_hash_clear() argument
1225 int size = 1 << hash->size_bits; in ftrace_hash_clear()
1228 if (!hash->count) in ftrace_hash_clear()
1232 hhd = &hash->buckets[i]; in ftrace_hash_clear()
1234 free_hash_entry(hash, entry); in ftrace_hash_clear()
1236 FTRACE_WARN_ON(hash->count); in ftrace_hash_clear()
1261 static void free_ftrace_hash(struct ftrace_hash *hash) in free_ftrace_hash() argument
1263 if (!hash || hash == EMPTY_HASH) in free_ftrace_hash()
1265 ftrace_hash_clear(hash); in free_ftrace_hash()
1266 kfree(hash->buckets); in free_ftrace_hash()
1267 kfree(hash); in free_ftrace_hash()
1272 struct ftrace_hash *hash; in __free_ftrace_hash_rcu() local
1274 hash = container_of(rcu, struct ftrace_hash, rcu); in __free_ftrace_hash_rcu()
1275 free_ftrace_hash(hash); in __free_ftrace_hash_rcu()
1278 static void free_ftrace_hash_rcu(struct ftrace_hash *hash) in free_ftrace_hash_rcu() argument
1280 if (!hash || hash == EMPTY_HASH) in free_ftrace_hash_rcu()
1282 call_rcu(&hash->rcu, __free_ftrace_hash_rcu); in free_ftrace_hash_rcu()
1303 struct ftrace_hash *hash; in alloc_ftrace_hash() local
1306 hash = kzalloc(sizeof(*hash), GFP_KERNEL); in alloc_ftrace_hash()
1307 if (!hash) in alloc_ftrace_hash()
1311 hash->buckets = kcalloc(size, sizeof(*hash->buckets), GFP_KERNEL); in alloc_ftrace_hash()
1313 if (!hash->buckets) { in alloc_ftrace_hash()
1314 kfree(hash); in alloc_ftrace_hash()
1318 hash->size_bits = size_bits; in alloc_ftrace_hash()
1320 return hash; in alloc_ftrace_hash()
1354 alloc_and_copy_ftrace_hash(int size_bits, struct ftrace_hash *hash) in alloc_and_copy_ftrace_hash() argument
1365 if (hash) in alloc_and_copy_ftrace_hash()
1366 new_hash->flags = hash->flags; in alloc_and_copy_ftrace_hash()
1368 /* Empty hash? */ in alloc_and_copy_ftrace_hash()
1369 if (ftrace_hash_empty(hash)) in alloc_and_copy_ftrace_hash()
1372 size = 1 << hash->size_bits; in alloc_and_copy_ftrace_hash()
1374 hlist_for_each_entry(entry, &hash->buckets[i], hlist) { in alloc_and_copy_ftrace_hash()
1380 FTRACE_WARN_ON(new_hash->count != hash->count); in alloc_and_copy_ftrace_hash()
1396 * Allocate a new hash and remove entries from @src and move them to the new hash.
1397 * On success, the @src hash will be empty and should be freed.
1435 /* Move the @src entries to a newly allocated hash */
1451 * ftrace_hash_move - move a new hash to a filter and do updates
1452 * @ops: The ops with the hash that @dst points to
1453 * @enable: True if for the filter hash, false for the notrace hash
1454 * @dst: Points to the @ops hash that should be updated
1455 * @src: The hash to update @dst with
1457 * This is called when an ftrace_ops hash is being updated and the
1460 * @enable above). If the @ops is enabled, its hash determines what
1461 * callbacks get called. This function gets called when the @ops hash
1477 /* Reject setting notrace hash on IPMODIFY ftrace_ops */ in ftrace_hash_move()
1496 * Remove the current set, update the hash and add in ftrace_hash_move()
1509 struct ftrace_ops_hash *hash) in hash_contains_ip() argument
1513 * hash and not in the notrace hash. Note, an empty hash is in hash_contains_ip()
1514 * considered a match for the filter hash, but an empty in hash_contains_ip()
1515 * notrace hash is considered not in the notrace hash. in hash_contains_ip()
1517 return (ftrace_hash_empty(hash->filter_hash) || in hash_contains_ip()
1518 __ftrace_lookup_ip(hash->filter_hash, ip)) && in hash_contains_ip()
1519 (ftrace_hash_empty(hash->notrace_hash) || in hash_contains_ip()
1520 !__ftrace_lookup_ip(hash->notrace_hash, ip)); in hash_contains_ip()
1538 struct ftrace_ops_hash hash; in ftrace_ops_test() local
1551 rcu_assign_pointer(hash.filter_hash, ops->func_hash->filter_hash); in ftrace_ops_test()
1552 rcu_assign_pointer(hash.notrace_hash, ops->func_hash->notrace_hash); in ftrace_ops_test()
1554 if (hash_contains_ip(ip, &hash)) in ftrace_ops_test()
1737 struct ftrace_hash *hash; in __ftrace_hash_rec_update() local
1751 * Otherwise we just update the items in the hash. in __ftrace_hash_rec_update()
1753 hash = ops->func_hash->filter_hash; in __ftrace_hash_rec_update()
1755 if (ftrace_hash_empty(hash)) in __ftrace_hash_rec_update()
1769 * Update if the record is not in the notrace hash. in __ftrace_hash_rec_update()
1774 in_hash = !!ftrace_lookup_ip(hash, rec->ip); in __ftrace_hash_rec_update()
1778 * We want to match all functions that are in the hash but in __ftrace_hash_rec_update()
1779 * not in the other hash. in __ftrace_hash_rec_update()
1880 if (!all && count == hash->count) in __ftrace_hash_rec_update()
1916 * ops->hash = new_hash
1920 * its old hash. The @ops hash is updated to the new hash, and then
1934 * If the ops shares the global_ops hash, then we need to update in ftrace_hash_rec_update_modify()
1935 * all ops that are enabled and use this hash. in ftrace_hash_rec_update_modify()
1961 * - If the hash is NULL, it hits all recs (if IPMODIFY is set, this is rejected)
1962 * - If the hash is EMPTY_HASH, it hits nothing
1963 * - Anything else hits the recs which match the hash entries.
1998 * hash. in __ftrace_hash_update_ipmodify()
2074 struct ftrace_hash *hash = ops->func_hash->filter_hash; in ftrace_hash_ipmodify_enable() local
2076 if (ftrace_hash_empty(hash)) in ftrace_hash_ipmodify_enable()
2077 hash = NULL; in ftrace_hash_ipmodify_enable()
2079 return __ftrace_hash_update_ipmodify(ops, EMPTY_HASH, hash); in ftrace_hash_ipmodify_enable()
2085 struct ftrace_hash *hash = ops->func_hash->filter_hash; in ftrace_hash_ipmodify_disable() local
2087 if (ftrace_hash_empty(hash)) in ftrace_hash_ipmodify_disable()
2088 hash = NULL; in ftrace_hash_ipmodify_disable()
2090 __ftrace_hash_update_ipmodify(ops, hash, EMPTY_HASH); in ftrace_hash_ipmodify_disable()
2512 * hash, then it is probably being removed from this in ftrace_find_tramp_ops_curr()
2520 * in its normal filter hash, then this must be the one in ftrace_find_tramp_ops_curr()
2572 * Search the direct_functions hash to see if the given instruction pointer
3206 * Append @new_hash entries to @hash:
3208 * If @hash is the EMPTY_HASH then it traces all functions and nothing
3211 * If @new_hash is the EMPTY_HASH, then make *hash the EMPTY_HASH so
3214 * Otherwise, go through all of @new_hash and add anything that @hash
3215 * doesn't already have, to @hash.
3220 static int append_hash(struct ftrace_hash **hash, struct ftrace_hash *new_hash, in append_hash() argument
3227 if (*hash) { in append_hash()
3228 /* An empty hash does everything */ in append_hash()
3229 if (ftrace_hash_empty(*hash)) in append_hash()
3232 *hash = alloc_ftrace_hash(size_bits); in append_hash()
3233 if (!*hash) in append_hash()
3237 /* If new_hash has everything make hash have everything */ in append_hash()
3239 free_ftrace_hash(*hash); in append_hash()
3240 *hash = EMPTY_HASH; in append_hash()
3247 /* Only add if not already in hash */ in append_hash()
3248 if (!__ftrace_lookup_ip(*hash, entry->ip) && in append_hash()
3249 add_hash_entry(*hash, entry->ip) == NULL) in append_hash()
3257 * Remove functions from @hash that are in @notrace_hash
3259 static void remove_hash(struct ftrace_hash *hash, struct ftrace_hash *notrace_hash) in remove_hash() argument
3266 /* If the notrace hash is empty, there's nothing to do */ in remove_hash()
3270 size = 1 << hash->size_bits; in remove_hash()
3272 hlist_for_each_entry_safe(entry, tmp, &hash->buckets[i], hlist) { in remove_hash()
3275 remove_hash_entry(hash, entry); in remove_hash()
3282 * Add to @hash only those that are in both @new_hash1 and @new_hash2
3287 static int intersect_hash(struct ftrace_hash **hash, struct ftrace_hash *new_hash1, in intersect_hash() argument
3295 * If new_hash1 or new_hash2 is the EMPTY_HASH then make the hash in intersect_hash()
3299 free_ftrace_hash(*hash); in intersect_hash()
3300 *hash = EMPTY_HASH; in intersect_hash()
3309 add_hash_entry(*hash, entry->ip) == NULL) in intersect_hash()
3314 if (ftrace_hash_empty(*hash)) { in intersect_hash()
3315 free_ftrace_hash(*hash); in intersect_hash()
3316 *hash = EMPTY_HASH; in intersect_hash()
3352 struct ftrace_hash *hash, in __ftrace_hash_move_and_update_ops() argument
3362 ret = ftrace_hash_move(ops, enable, orig_hash, hash); in __ftrace_hash_move_and_update_ops()
3395 /* If the filter hash is not empty, simply remove the nohash from it */ in add_first_hash()
3424 * The main ops filter hash is not empty, so its in add_next_hash()
3425 * notrace_hash had better be, as the notrace hash in add_next_hash()
3433 /* Copy the subops hash */ in add_next_hash()
3450 * Only process notrace hashes if the main filter hash is empty in add_next_hash()
3451 * (tracing all functions), otherwise the filter hash will just in add_next_hash()
3452 * remove the notrace hash functions, and the notrace hash is in add_next_hash()
3554 * o Make a copy of the subops filter hash in ftrace_startup_subops()
3556 * o Add in the main hash filter functions in ftrace_startup_subops()
3557 * o Remove any of these functions from the main notrace hash in ftrace_startup_subops()
3683 struct ftrace_hash *hash) in ftrace_hash_move_and_update_subops() argument
3696 /* Move the new hash over to the subops hash */ in ftrace_hash_move_and_update_subops()
3698 *orig_subhash = __ftrace_hash_move(hash); in ftrace_hash_move_and_update_subops()
3712 /* Put back the original hash */ in ftrace_hash_move_and_update_subops()
3733 * But notrace hash requires a test of individual module functions. in ops_traces_mod()
3904 struct ftrace_hash *hash; member
3919 struct ftrace_hash *hash; in t_probe_next() local
3943 hash = iter->probe->ops.func_hash->filter_hash; in t_probe_next()
3946 * A probe being registered may temporarily have an empty hash in t_probe_next()
3949 if (!hash || hash == EMPTY_HASH) in t_probe_next()
3952 size = 1 << hash->size_bits; in t_probe_next()
3960 hash = iter->probe->ops.func_hash->filter_hash; in t_probe_next()
3961 size = 1 << hash->size_bits; in t_probe_next()
3965 hhd = &hash->buckets[iter->pidx]; in t_probe_next()
4138 !ftrace_lookup_ip(iter->hash, rec->ip)) || in t_func_next()
4220 ftrace_hash_empty(iter->hash)) { in t_start()
4598 * @ops: The ftrace_ops that hold the hash filters
4604 * @ops. Depending on @flag it may process the filter hash or
4605 * the notrace hash of @ops. With this called from the open
4619 struct ftrace_hash *hash; in ftrace_regex_open() local
4646 hash = ops->func_hash->notrace_hash; in ftrace_regex_open()
4649 hash = ops->func_hash->filter_hash; in ftrace_regex_open()
4659 iter->hash = alloc_ftrace_hash(size_bits); in ftrace_regex_open()
4662 iter->hash = alloc_and_copy_ftrace_hash(size_bits, hash); in ftrace_regex_open()
4665 if (hash) in ftrace_regex_open()
4666 iter->hash = alloc_and_copy_ftrace_hash(hash->size_bits, hash); in ftrace_regex_open()
4668 iter->hash = EMPTY_HASH; in ftrace_regex_open()
4671 if (!iter->hash) { in ftrace_regex_open()
4687 free_ftrace_hash(iter->hash); in ftrace_regex_open()
4780 enter_record(struct ftrace_hash *hash, struct dyn_ftrace *rec, int clear_filter) in enter_record() argument
4785 entry = ftrace_lookup_ip(hash, rec->ip); in enter_record()
4791 free_hash_entry(hash, entry); in enter_record()
4796 if (add_hash_entry(hash, rec->ip) == NULL) in enter_record()
4803 add_rec_by_index(struct ftrace_hash *hash, struct ftrace_glob *func_g, in add_rec_by_index() argument
4821 enter_record(hash, rec, clear_filter); in add_rec_by_index()
4889 match_records(struct ftrace_hash *hash, char *func, int len, char *mod) in match_records() argument
4919 return add_rec_by_index(hash, &func_g, clear_filter); in match_records()
4927 ret = enter_record(hash, rec, clear_filter); in match_records()
4939 ftrace_match_records(struct ftrace_hash *hash, char *buff, int len) in ftrace_match_records() argument
4941 return match_records(hash, buff, len, NULL); in ftrace_match_records()
4977 struct ftrace_hash *hash, in ftrace_hash_move_and_update_ops() argument
4981 return ftrace_hash_move_and_update_subops(ops, orig_hash, hash); in ftrace_hash_move_and_update_ops()
4993 /* Check if any other manager subops maps to this hash */ in ftrace_hash_move_and_update_ops()
5000 return ftrace_hash_move_and_update_subops(subops, orig_hash, hash); in ftrace_hash_move_and_update_ops()
5006 return __ftrace_hash_move_and_update_ops(ops, orig_hash, hash, enable); in ftrace_hash_move_and_update_ops()
5023 /* Look to remove this hash */ in cache_mod()
5145 ftrace_mod_callback(struct trace_array *tr, struct ftrace_hash *hash, in ftrace_mod_callback() argument
5166 ret = match_records(hash, func, strlen(func), module); in ftrace_mod_callback()
5198 * period. This syncs the hash iteration and freeing of items in function_trace_probe_call()
5199 * on the hash. rcu_read_lock is too dangerous here. in function_trace_probe_call()
5212 * Note, ftrace_func_mapper is freed by free_ftrace_hash(&mapper->hash).
5213 * The hash field must be the first field.
5216 struct ftrace_hash hash; /* Must be first! */ member
5226 struct ftrace_hash *hash; in allocate_ftrace_func_mapper() local
5230 * in the hash are not ftrace_func_entry type, we define it in allocate_ftrace_func_mapper()
5233 hash = alloc_ftrace_hash(FTRACE_HASH_DEFAULT_BITS); in allocate_ftrace_func_mapper()
5234 return (struct ftrace_func_mapper *)hash; in allocate_ftrace_func_mapper()
5254 entry = ftrace_lookup_ip(&mapper->hash, ip); in ftrace_func_mapper_find_ip()
5276 entry = ftrace_lookup_ip(&mapper->hash, ip); in ftrace_func_mapper_add_ip()
5287 __add_hash_entry(&mapper->hash, &map->entry); in ftrace_func_mapper_add_ip()
5309 entry = ftrace_lookup_ip(&mapper->hash, ip); in ftrace_func_mapper_remove_ip()
5316 remove_hash_entry(&mapper->hash, entry); in ftrace_func_mapper_remove_ip()
5341 if (free_func && mapper->hash.count) { in free_ftrace_func_mapper()
5342 size = 1 << mapper->hash.size_bits; in free_ftrace_func_mapper()
5344 hhd = &mapper->hash.buckets[i]; in free_ftrace_func_mapper()
5352 free_ftrace_hash(&mapper->hash); in free_ftrace_func_mapper()
5397 struct ftrace_hash *hash; in register_ftrace_function_probe() local
5444 hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, old_hash); in register_ftrace_function_probe()
5446 if (!hash) { in register_ftrace_function_probe()
5451 ret = ftrace_match_records(hash, glob, strlen(glob)); in register_ftrace_function_probe()
5460 size = 1 << hash->size_bits; in register_ftrace_function_probe()
5462 hlist_for_each_entry(entry, &hash->buckets[i], hlist) { in register_ftrace_function_probe()
5495 hash, 1); in register_ftrace_function_probe()
5512 free_ftrace_hash(hash); in register_ftrace_function_probe()
5524 hlist_for_each_entry(entry, &hash->buckets[i], hlist) { in register_ftrace_function_probe()
5543 struct ftrace_hash *hash = NULL; in unregister_ftrace_function_probe_func() local
5597 hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, old_hash); in unregister_ftrace_function_probe_func()
5598 if (!hash) in unregister_ftrace_function_probe_func()
5603 size = 1 << hash->size_bits; in unregister_ftrace_function_probe_func()
5605 hlist_for_each_entry_safe(entry, tmp, &hash->buckets[i], hlist) { in unregister_ftrace_function_probe_func()
5614 remove_hash_entry(hash, entry); in unregister_ftrace_function_probe_func()
5631 if (ftrace_hash_empty(hash)) in unregister_ftrace_function_probe_func()
5635 hash, 1); in unregister_ftrace_function_probe_func()
5638 if (ftrace_enabled && !ftrace_hash_empty(hash)) in unregister_ftrace_function_probe_func()
5653 free_ftrace_hash(hash); in unregister_ftrace_function_probe_func()
5716 struct ftrace_hash *hash = iter->hash; in ftrace_process_regex() local
5725 ret = ftrace_match_records(hash, func, len); in ftrace_process_regex()
5741 return p->func(tr, hash, func, command, next, enable); in ftrace_process_regex()
5767 /* iter->hash is a local copy, so we don't need regex_lock */ in ftrace_regex_write()
5799 __ftrace_match_addr(struct ftrace_hash *hash, unsigned long ip, int remove) in __ftrace_match_addr() argument
5808 entry = ftrace_lookup_ip(hash, ip); in __ftrace_match_addr()
5811 free_hash_entry(hash, entry); in __ftrace_match_addr()
5813 } else if (__ftrace_lookup_ip(hash, ip) != NULL) { in __ftrace_match_addr()
5818 entry = add_hash_entry(hash, ip); in __ftrace_match_addr()
5823 ftrace_match_addr(struct ftrace_hash *hash, unsigned long *ips, in ftrace_match_addr() argument
5830 err = __ftrace_match_addr(hash, ips[i], remove); in ftrace_match_addr()
5833 * This expects the @hash is a temporary hash and if this in ftrace_match_addr()
5834 * fails the caller must free the @hash. in ftrace_match_addr()
5848 struct ftrace_hash *hash; in ftrace_set_hash() local
5862 hash = alloc_ftrace_hash(FTRACE_HASH_DEFAULT_BITS); in ftrace_set_hash()
5864 hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, *orig_hash); in ftrace_set_hash()
5866 if (!hash) { in ftrace_set_hash()
5871 if (buf && !match_records(hash, buf, len, mod)) { in ftrace_set_hash()
5884 ret = ftrace_match_addr(hash, ips, cnt, remove); in ftrace_set_hash()
5890 ret = ftrace_hash_move_and_update_ops(ops, orig_hash, hash, enable); in ftrace_set_hash()
5896 free_ftrace_hash(hash); in ftrace_set_hash()
5932 static void remove_direct_functions_hash(struct ftrace_hash *hash, unsigned long addr) in remove_direct_functions_hash() argument
5937 size = 1 << hash->size_bits; in remove_direct_functions_hash()
5939 hlist_for_each_entry(entry, &hash->buckets[i], hlist) { in remove_direct_functions_hash()
5980 struct ftrace_hash *hash, *new_hash = NULL, *free_hash = NULL; in register_ftrace_direct() local
5991 hash = ops->func_hash->filter_hash; in register_ftrace_direct()
5992 if (ftrace_hash_empty(hash)) in register_ftrace_direct()
5998 size = 1 << hash->size_bits; in register_ftrace_direct()
6000 hlist_for_each_entry(entry, &hash->buckets[i], hlist) { in register_ftrace_direct()
6008 /* Make a copy hash to place the new and the old entries in */ in register_ftrace_direct()
6009 size = hash->count + direct_functions->count; in register_ftrace_direct()
6029 size = 1 << hash->size_bits; in register_ftrace_direct()
6031 hlist_for_each_entry(entry, &hash->buckets[i], hlist) { in register_ftrace_direct()
6035 /* Update both the copy and the hash entry */ in register_ftrace_direct()
6083 struct ftrace_hash *hash = ops->func_hash->filter_hash; in unregister_ftrace_direct() local
6093 remove_direct_functions_hash(hash, addr); in unregister_ftrace_direct()
6109 struct ftrace_hash *hash; in __modify_ftrace_direct() local
6135 hash = ops->func_hash->filter_hash; in __modify_ftrace_direct()
6136 size = 1 << hash->size_bits; in __modify_ftrace_direct()
6138 hlist_for_each_entry(iter, &hash->buckets[i], hlist) { in __modify_ftrace_direct()
6421 static int ftrace_graph_set_hash(struct ftrace_hash *hash, char *buffer);
6449 struct ftrace_hash *hash; in set_ftrace_early_graph() local
6451 hash = alloc_ftrace_hash(FTRACE_HASH_DEFAULT_BITS); in set_ftrace_early_graph()
6452 if (MEM_FAIL(!hash, "Failed to allocate hash\n")) in set_ftrace_early_graph()
6458 ret = ftrace_graph_set_hash(hash, func); in set_ftrace_early_graph()
6465 ftrace_graph_hash = hash; in set_ftrace_early_graph()
6467 ftrace_graph_notrace_hash = hash; in set_ftrace_early_graph()
6539 iter->hash->flags &= ~FTRACE_HASH_FL_MOD; in ftrace_regex_release()
6541 iter->hash->flags |= FTRACE_HASH_FL_MOD; in ftrace_regex_release()
6548 iter->hash, filter_hash); in ftrace_regex_release()
6553 free_ftrace_hash(iter->hash); in ftrace_regex_release()
6620 struct ftrace_hash *hash; member
6622 int idx; /* for hash table iteration */
6637 if (*pos >= fgd->hash->count) in __g_next()
6649 for (i = idx; i < 1 << fgd->hash->size_bits; i++) { in __g_next()
6650 head = &fgd->hash->buckets[i]; in __g_next()
6674 fgd->hash = rcu_dereference_protected(ftrace_graph_hash, in g_start()
6677 fgd->hash = rcu_dereference_protected(ftrace_graph_notrace_hash, in g_start()
6681 if (ftrace_hash_empty(fgd->hash) && !*pos) in g_start()
6744 fgd->hash); in __ftrace_graph_open()
6771 * All uses of fgd->hash must be taken with the graph_lock in __ftrace_graph_open()
6773 * fgd->hash to be reinitialized when it is taken again. in __ftrace_graph_open()
6775 fgd->hash = NULL; in __ftrace_graph_open()
6795 fgd->hash = rcu_dereference_protected(ftrace_graph_hash, in ftrace_graph_open()
6823 fgd->hash = rcu_dereference_protected(ftrace_graph_notrace_hash, in ftrace_graph_notrace_open()
6907 ftrace_graph_set_hash(struct ftrace_hash *hash, char *buffer) in ftrace_graph_set_hash() argument
6933 entry = ftrace_lookup_ip(hash, rec->ip); in ftrace_graph_set_hash()
6940 if (add_hash_entry(hash, rec->ip) == NULL) in ftrace_graph_set_hash()
6944 free_hash_entry(hash, entry); in ftrace_graph_set_hash()
7333 /* If in notrace hash, we ignore it too */ in ops_references_ip()
7372 clear_mod_from_hash(struct ftrace_page *pg, struct ftrace_hash *hash) in clear_mod_from_hash() argument
7378 if (ftrace_hash_empty(hash)) in clear_mod_from_hash()
7383 entry = __ftrace_lookup_ip(hash, rec->ip); in clear_mod_from_hash()
7387 * if/when the hash is modified again. in clear_mod_from_hash()
7770 clear_func_from_hash(struct ftrace_init_func *func, struct ftrace_hash *hash) in clear_func_from_hash() argument
7774 entry = ftrace_lookup_ip(hash, func->ip); in clear_func_from_hash()
7778 * if/when the hash is modified again. in clear_func_from_hash()
8606 struct ftrace_hash *hash; in prepare_direct_functions_for_ipmodify() local
8615 hash = ops->func_hash->filter_hash; in prepare_direct_functions_for_ipmodify()
8616 size = 1 << hash->size_bits; in prepare_direct_functions_for_ipmodify()
8618 hlist_for_each_entry(entry, &hash->buckets[i], hlist) { in prepare_direct_functions_for_ipmodify()
8655 struct ftrace_hash *hash; in cleanup_direct_functions_after_ipmodify() local
8664 hash = ops->func_hash->filter_hash; in cleanup_direct_functions_after_ipmodify()
8665 size = 1 << hash->size_bits; in cleanup_direct_functions_after_ipmodify()
8667 hlist_for_each_entry(entry, &hash->buckets[i], hlist) { in cleanup_direct_functions_after_ipmodify()