Lines Matching +full:period +full:- +full:scale
1 // SPDX-License-Identifier: GPL-2.0-only
3 * Stack depot - a stack trace storage that avoids duplication.
37 #include <linux/kasan-enabled.h>
42 (((1LL << (DEPOT_POOL_INDEX_BITS)) - 1 < DEPOT_POOLS_CAP) ? \
43 (1LL << (DEPOT_POOL_INDEX_BITS)) - 1 : DEPOT_POOLS_CAP)
174 return -ENOMEM; in stack_depot_early_init()
207 int scale = STACK_HASH_TABLE_SCALE; in stack_depot_init() local
212 if (scale > PAGE_SHIFT) in stack_depot_init()
213 entries >>= (scale - PAGE_SHIFT); in stack_depot_init()
215 entries <<= (PAGE_SHIFT - scale); in stack_depot_init()
228 ret = -ENOMEM; in stack_depot_init()
231 stack_hash_mask = entries - 1; in stack_depot_init()
251 WARN_ON_ONCE(!new_pool); /* to avoid unnecessary pre-allocation */ in depot_init_pool()
308 * the current pre-allocation.
325 pool_index = pools_num - 1; in depot_pop_free_pool()
332 /* Pre-initialize handle once. */ in depot_pop_free_pool()
333 stack->handle.pool_index_plus_1 = pool_index + 1; in depot_pop_free_pool()
334 stack->handle.offset = pool_offset >> DEPOT_STACK_ALIGN; in depot_pop_free_pool()
335 stack->handle.extra = 0; in depot_pop_free_pool()
336 INIT_LIST_HEAD(&stack->hash_list); in depot_pop_free_pool()
356 * RCU grace period in the past. Consequently it is sufficient to only in depot_pop_free()
360 if (!poll_state_synchronize_rcu(stack->rcu_state)) in depot_pop_free()
363 list_del(&stack->free_list); in depot_pop_free()
364 counters[DEPOT_COUNTER_FREELIST_SIZE]--; in depot_pop_free()
372 const size_t unused = sizeof(s->entries) - used; in depot_stack_record_size()
374 WARN_ON_ONCE(sizeof(s->entries) < used); in depot_stack_record_size()
376 return ALIGN(sizeof(struct stack_record) - unused, 1 << DEPOT_STACK_ALIGN); in depot_stack_record_size()
399 * safely be re-used by differently sized allocations. in depot_alloc_stack()
414 stack->hash = hash; in depot_alloc_stack()
415 stack->size = nr_entries; in depot_alloc_stack()
416 /* stack->handle is already filled in by depot_pop_free_pool(). */ in depot_alloc_stack()
417 memcpy(stack->entries, entries, flex_array_size(stack, entries, nr_entries)); in depot_alloc_stack()
420 refcount_set(&stack->count, 1); in depot_alloc_stack()
425 refcount_set(&stack->count, REFCOUNT_SATURATED); in depot_alloc_stack()
444 u32 pool_index = parts.pool_index_plus_1 - 1; in depot_fetch_stack()
461 if (WARN_ON(!refcount_read(&stack->count))) in depot_fetch_stack()
482 list_del_rcu(&stack->hash_list); in depot_free_stack()
491 * moved onto another list until the next grace period, and concurrent in depot_free_stack()
494 stack->rcu_state = get_state_synchronize_rcu(); in depot_free_stack()
498 * considered first - their RCU cookie is more likely to no longer be in depot_free_stack()
499 * associated with the current grace period. in depot_free_stack()
501 list_add_tail(&stack->free_list, &free_stacks); in depot_free_stack()
505 counters[DEPOT_COUNTER_REFD_INUSE]--; in depot_free_stack()
520 * Non-instrumented version of memcmp().
527 for ( ; n-- ; u1++, u2++) { in stackdepot_memcmp()
547 * unused entries, because the stack record free-then-reuse code paths in find_stack()
553 if (stack->hash != hash || stack->size != size) in find_stack()
561 if (data_race(stackdepot_memcmp(entries, stack->entries, size))) in find_stack()
572 if ((flags & STACK_DEPOT_FLAG_GET) && !refcount_inc_not_zero(&stack->count)) in find_stack()
655 list_add_rcu(&new->hash_list, bucket); in stack_depot_save_flags()
677 handle = found->handle.handle; in stack_depot_save_flags()
716 * Should never be NULL, otherwise this is a use-after-put (or just a in stack_depot_fetch()
722 *entries = stack->entries; in stack_depot_fetch()
723 return stack->size; in stack_depot_fetch()
742 if (refcount_dec_and_test(&stack->count)) in stack_depot_put()