Lines Matching refs:entries

141 static void init_stack_table(unsigned long entries)  in init_stack_table()  argument
145 for (i = 0; i < entries; i++) in init_stack_table()
152 unsigned long entries = 0; in stack_depot_early_init() local
188 entries = 1UL << stack_bucket_number_order; in stack_depot_early_init()
192 entries, in stack_depot_early_init()
204 if (!entries) { in stack_depot_early_init()
209 entries = stack_hash_mask + 1; in stack_depot_early_init()
211 init_stack_table(entries); in stack_depot_early_init()
219 memblock_free(stack_table, entries * sizeof(struct list_head)); in stack_depot_early_init()
231 unsigned long entries; in stack_depot_init() local
244 entries = 1UL << stack_bucket_number_order; in stack_depot_init()
248 entries = nr_free_buffer_pages(); in stack_depot_init()
249 entries = roundup_pow_of_two(entries); in stack_depot_init()
252 entries >>= (scale - PAGE_SHIFT); in stack_depot_init()
254 entries <<= (PAGE_SHIFT - scale); in stack_depot_init()
257 if (entries < 1UL << STACK_BUCKET_NUMBER_ORDER_MIN) in stack_depot_init()
258 entries = 1UL << STACK_BUCKET_NUMBER_ORDER_MIN; in stack_depot_init()
259 if (entries > 1UL << STACK_BUCKET_NUMBER_ORDER_MAX) in stack_depot_init()
260 entries = 1UL << STACK_BUCKET_NUMBER_ORDER_MAX; in stack_depot_init()
262 pr_info("allocating hash table of %lu entries via kvcalloc\n", entries); in stack_depot_init()
263 stack_table = kvcalloc(entries, sizeof(struct list_head), GFP_KERNEL); in stack_depot_init()
270 stack_hash_mask = entries - 1; in stack_depot_init()
271 init_stack_table(entries); in stack_depot_init()
420 const size_t used = flex_array_size(s, entries, nr_entries); in depot_stack_record_size()
421 const size_t unused = sizeof(s->entries) - used; in depot_stack_record_size()
423 WARN_ON_ONCE(sizeof(s->entries) < used); in depot_stack_record_size()
430 depot_alloc_stack(unsigned long *entries, unsigned int nr_entries, u32 hash, depot_flags_t flags, v… in depot_alloc_stack() argument
466 memcpy(stack->entries, entries, flex_array_size(stack, entries, nr_entries)); in depot_alloc_stack()
561 static inline u32 hash_stack(unsigned long *entries, unsigned int size) in hash_stack() argument
563 return jhash2((u32 *)entries, in hash_stack()
564 array_size(size, sizeof(*entries)) / sizeof(u32), in hash_stack()
585 unsigned long *entries, int size, in find_stack() argument
610 if (data_race(stackdepot_memcmp(entries, stack->entries, size))) in find_stack()
633 depot_stack_handle_t stack_depot_save_flags(unsigned long *entries, in stack_depot_save_flags() argument
659 nr_entries = filter_irq_stacks(entries, nr_entries); in stack_depot_save_flags()
664 hash = hash_stack(entries, nr_entries); in stack_depot_save_flags()
668 found = find_stack(bucket, entries, nr_entries, hash, depot_flags); in stack_depot_save_flags()
695 found = find_stack(bucket, entries, nr_entries, hash, depot_flags); in stack_depot_save_flags()
698 depot_alloc_stack(entries, nr_entries, hash, depot_flags, &prealloc); in stack_depot_save_flags()
735 depot_stack_handle_t stack_depot_save(unsigned long *entries, in stack_depot_save() argument
739 return stack_depot_save_flags(entries, nr_entries, alloc_flags, in stack_depot_save()
753 unsigned long **entries) in stack_depot_fetch() argument
757 *entries = NULL; in stack_depot_fetch()
762 kmsan_unpoison_memory(entries, sizeof(*entries)); in stack_depot_fetch()
775 *entries = stack->entries; in stack_depot_fetch()
802 unsigned long *entries; in stack_depot_print() local
805 nr_entries = stack_depot_fetch(stack, &entries); in stack_depot_print()
807 stack_trace_print(entries, nr_entries, 0); in stack_depot_print()
814 unsigned long *entries; in stack_depot_snprint() local
817 nr_entries = stack_depot_fetch(handle, &entries); in stack_depot_snprint()
818 return nr_entries ? stack_trace_snprint(buf, size, entries, nr_entries, in stack_depot_snprint()