Lines Matching +full:re +full:- +full:initialization

1 // SPDX-License-Identifier: GPL-2.0
16 #include <linux/kcsan-checks.h>
77 *((unsigned long *)kp->arg) = num; in param_set_sample_interval()
80 return disabled_by_warn ? -EINVAL : kfence_enable_late(); in param_set_sample_interval()
119 * Per-object metadata, with one-to-one mapping of object metadata to
128 * kfence_metadata visible after initialization is successful. This prevents
150 * Assuming a range of 15%-85% unique allocations in the pool at any point in
151 * time, the below parameters provide a probablity of 0.02-0.33 for false
154 * P(alloc_traces) = (1 - e^(-HNUM * (alloc_traces / SIZE)) ^ HNUM
160 #define ALLOC_COVERED_MASK (ALLOC_COVERED_SIZE - 1)
229 * currently contained (non-zero count) in Counting Bloom filter.
256 unsigned long offset = (meta - kfence_metadata + 1) * PAGE_SIZE * 2; in metadata_to_pageaddr()
259 /* The checks do not affect performance; only called from slow-paths. */ in metadata_to_pageaddr()
270 if (KFENCE_WARN_ON(ALIGN_DOWN(meta->addr, PAGE_SIZE) != pageaddr)) in metadata_to_pageaddr()
278 enum kfence_object_state state = READ_ONCE(meta->state); in kfence_obj_allocated()
292 next == KFENCE_OBJECT_ALLOCATED ? &meta->alloc_track : &meta->free_track; in metadata_update_state()
294 lockdep_assert_held(&meta->lock); in metadata_update_state()
297 if (READ_ONCE(meta->state) == KFENCE_OBJECT_RCU_FREEING) in metadata_update_state()
301 memcpy(track->stack_entries, stack_entries, in metadata_update_state()
308 num_stack_entries = stack_trace_save(track->stack_entries, KFENCE_STACK_DEPTH, 1); in metadata_update_state()
310 track->num_stack_entries = num_stack_entries; in metadata_update_state()
311 track->pid = task_pid_nr(current); in metadata_update_state()
312 track->cpu = raw_smp_processor_id(); in metadata_update_state()
313 track->ts_nsec = local_clock(); /* Same source as printk timestamps. */ in metadata_update_state()
321 WRITE_ONCE(meta->state, next); in metadata_update_state()
342 raw_spin_lock_irqsave(&meta->lock, flags); in check_canary_byte()
344 raw_spin_unlock_irqrestore(&meta->lock, flags); in check_canary_byte()
351 const unsigned long pageaddr = ALIGN_DOWN(meta->addr, PAGE_SIZE); in set_canary()
358 for (; addr < meta->addr; addr += sizeof(u64)) in set_canary()
361 addr = ALIGN_DOWN(meta->addr + meta->size, sizeof(u64)); in set_canary()
362 for (; addr - pageaddr < PAGE_SIZE; addr += sizeof(u64)) in set_canary()
369 const unsigned long pageaddr = ALIGN_DOWN(meta->addr, PAGE_SIZE); in check_canary()
373 * We'll iterate over each canary byte per-side until a corrupted byte in check_canary()
382 for (; meta->addr - addr >= sizeof(u64); addr += sizeof(u64)) { in check_canary()
392 for (; addr < meta->addr; addr++) { in check_canary()
398 for (addr = meta->addr + meta->size; addr % sizeof(u64) != 0; addr++) { in check_canary()
402 for (; addr - pageaddr < PAGE_SIZE; addr += sizeof(u64)) { in check_canary()
405 for (; addr - pageaddr < PAGE_SIZE; addr++) { in check_canary()
429 list_del_init(&meta->list); in kfence_guarded_alloc()
437 if (unlikely(!raw_spin_trylock_irqsave(&meta->lock, flags))) { in kfence_guarded_alloc()
439 * This is extremely unlikely -- we are reporting on a in kfence_guarded_alloc()
440 * use-after-free, which locked meta->lock, and the reporting in kfence_guarded_alloc()
442 * kfence_alloc() and tries to grab the same object that we're in kfence_guarded_alloc()
449 list_add_tail(&meta->list, &kfence_freelist); in kfence_guarded_alloc()
455 meta->addr = metadata_to_pageaddr(meta); in kfence_guarded_alloc()
456 /* Unprotect if we're reusing this page. */ in kfence_guarded_alloc()
457 if (meta->state == KFENCE_OBJECT_FREED) in kfence_guarded_alloc()
458 kfence_unprotect(meta->addr); in kfence_guarded_alloc()
461 * Note: for allocations made before RNG initialization, will always in kfence_guarded_alloc()
465 * is that the out-of-bounds accesses detected are deterministic for in kfence_guarded_alloc()
469 /* Allocate on the "right" side, re-calculate address. */ in kfence_guarded_alloc()
470 meta->addr += PAGE_SIZE - size; in kfence_guarded_alloc()
471 meta->addr = ALIGN_DOWN(meta->addr, cache->align); in kfence_guarded_alloc()
474 addr = (void *)meta->addr; in kfence_guarded_alloc()
479 WRITE_ONCE(meta->cache, cache); in kfence_guarded_alloc()
480 meta->size = size; in kfence_guarded_alloc()
481 meta->alloc_stack_hash = alloc_stack_hash; in kfence_guarded_alloc()
482 raw_spin_unlock_irqrestore(&meta->lock, flags); in kfence_guarded_alloc()
487 slab = virt_to_slab((void *)meta->addr); in kfence_guarded_alloc()
488 slab->slab_cache = cache; in kfence_guarded_alloc()
489 slab->objects = 1; in kfence_guarded_alloc()
491 /* Memory initialization. */ in kfence_guarded_alloc()
496 * SL*B do the initialization, as otherwise we might overwrite KFENCE's in kfence_guarded_alloc()
501 if (cache->ctor) in kfence_guarded_alloc()
502 cache->ctor(addr); in kfence_guarded_alloc()
505 kfence_protect(meta->addr); /* Random "faults" by protecting the object. */ in kfence_guarded_alloc()
519 raw_spin_lock_irqsave(&meta->lock, flags); in kfence_guarded_free()
521 if (!kfence_obj_allocated(meta) || meta->addr != (unsigned long)addr) { in kfence_guarded_free()
522 /* Invalid or double-free, bail out. */ in kfence_guarded_free()
526 raw_spin_unlock_irqrestore(&meta->lock, flags); in kfence_guarded_free()
530 /* Detect racy use-after-free, or incorrect reallocation of this page by KFENCE. */ in kfence_guarded_free()
539 if (meta->unprotected_page) { in kfence_guarded_free()
540 memzero_explicit((void *)ALIGN_DOWN(meta->unprotected_page, PAGE_SIZE), PAGE_SIZE); in kfence_guarded_free()
541 kfence_protect(meta->unprotected_page); in kfence_guarded_free()
542 meta->unprotected_page = 0; in kfence_guarded_free()
547 init = slab_want_init_on_free(meta->cache); in kfence_guarded_free()
548 raw_spin_unlock_irqrestore(&meta->lock, flags); in kfence_guarded_free()
550 alloc_covered_add(meta->alloc_stack_hash, -1); in kfence_guarded_free()
556 * Clear memory if init-on-free is set. While we protect the page, the in kfence_guarded_free()
557 * data is still there, and after a use-after-free is detected, we in kfence_guarded_free()
561 memzero_explicit(addr, meta->size); in kfence_guarded_free()
563 /* Protect to detect use-after-frees. */ in kfence_guarded_free()
570 KFENCE_WARN_ON(!list_empty(&meta->list)); in kfence_guarded_free()
571 list_add_tail(&meta->list, &kfence_freelist); in kfence_guarded_free()
586 kfence_guarded_free((void *)meta->addr, meta, false); in rcu_guarded_free()
590 * Initialization of the KFENCE pool after its allocation.
592 * which partial initialization succeeded.
611 * fast-path in SLUB, and therefore need to ensure kfree() correctly in kfence_init_pool()
612 * enters __slab_free() slow-path. in kfence_init_pool()
622 slab->obj_exts = (unsigned long)&kfence_metadata_init[i / 2 - 1].obj_exts | in kfence_init_pool()
644 INIT_LIST_HEAD(&meta->list); in kfence_init_pool()
645 raw_spin_lock_init(&meta->lock); in kfence_init_pool()
646 meta->state = KFENCE_OBJECT_UNUSED; in kfence_init_pool()
647 meta->addr = addr; /* Initialize for validation in metadata_to_pageaddr(). */ in kfence_init_pool()
648 list_add_tail(&meta->list, &kfence_freelist); in kfence_init_pool()
658 * Make kfence_metadata visible only when initialization is successful. in kfence_init_pool()
659 * Otherwise, if the initialization fails and kfence_metadata is freed, in kfence_init_pool()
672 slab->obj_exts = 0; in kfence_init_pool()
694 * are registered with kmemleak through the slab post-alloc hook. in kfence_init_pool_early()
707 memblock_free_late(__pa(addr), KFENCE_POOL_SIZE - (addr - (unsigned long)__kfence_pool)); in kfence_init_pool_early()
756 struct kfence_metadata *meta = &kfence_metadata[(long)v - 1]; in show_object()
759 raw_spin_lock_irqsave(&meta->lock, flags); in show_object()
761 raw_spin_unlock_irqrestore(&meta->lock, flags); in show_object()
762 seq_puts(seq, "---------------------------------\n"); in show_object()
820 /* Wait queue to wake up allocation-gate timer task. */
846 atomic_set(&kfence_allocation_gate, -kfence_burst); in toggle_allocation_gate()
869 * re-allocate the memory pool. in kfence_alloc_pool_and_metadata()
904 pr_info("initialized - using %lu bytes for %d objects at 0x%p-0x%p\n", KFENCE_POOL_SIZE, in kfence_init_enable()
931 int err = -ENOMEM; in kfence_init_late()
939 return -ENOMEM; in kfence_init_late()
950 return -EINVAL; in kfence_init_late()
955 return -ENOMEM; in kfence_init_late()
972 free_size = KFENCE_POOL_SIZE - (addr - (unsigned long)__kfence_pool); in kfence_init_late()
973 err = -EBUSY; in kfence_init_late()
999 pr_info("re-enabled\n"); in kfence_enable_late()
1025 if (READ_ONCE(meta->cache) != s || !kfence_obj_allocated(meta)) in kfence_shutdown_cache()
1028 raw_spin_lock_irqsave(&meta->lock, flags); in kfence_shutdown_cache()
1029 in_use = meta->cache == s && kfence_obj_allocated(meta); in kfence_shutdown_cache()
1030 raw_spin_unlock_irqrestore(&meta->lock, flags); in kfence_shutdown_cache()
1047 kfence_guarded_free((void *)meta->addr, meta, /*zombie=*/true); in kfence_shutdown_cache()
1055 if (READ_ONCE(meta->cache) != s || READ_ONCE(meta->state) != KFENCE_OBJECT_FREED) in kfence_shutdown_cache()
1058 raw_spin_lock_irqsave(&meta->lock, flags); in kfence_shutdown_cache()
1059 if (meta->cache == s && meta->state == KFENCE_OBJECT_FREED) in kfence_shutdown_cache()
1060 meta->cache = NULL; in kfence_shutdown_cache()
1061 raw_spin_unlock_irqrestore(&meta->lock, flags); in kfence_shutdown_cache()
1082 * Skip allocations from non-default zones, including DMA. We cannot in __kfence_alloc()
1087 (s->flags & (SLAB_CACHE_DMA | SLAB_CACHE_DMA32))) { in __kfence_alloc()
1096 if (s->flags & SLAB_SKIP_KFENCE) in __kfence_alloc()
1122 * Do expensive check for coverage of allocation in slow-path after in __kfence_alloc()
1123 * allocation_gate has already become non-zero, even though it might in __kfence_alloc()
1127 * full, including avoiding long-lived allocations of the same source in __kfence_alloc()
1145 * Read locklessly -- if there is a race with __kfence_alloc(), this is in kfence_ksize()
1146 * either a use-after-free or invalid access. in kfence_ksize()
1148 return meta ? meta->size : 0; in kfence_ksize()
1156 * Read locklessly -- if there is a race with __kfence_alloc(), this is in kfence_object_start()
1157 * either a use-after-free or invalid access. in kfence_object_start()
1159 return meta ? (void *)meta->addr : NULL; in kfence_object_start()
1167 KFENCE_WARN_ON(meta->obj_exts.objcg); in __kfence_free()
1171 * the object, as the object page may be recycled for other-typed in __kfence_free()
1172 * objects once it has been freed. meta->cache may be NULL if the cache in __kfence_free()
1177 if (unlikely(meta->cache && (meta->cache->flags & SLAB_TYPESAFE_BY_RCU))) { in __kfence_free()
1180 raw_spin_lock_irqsave(&meta->lock, flags); in __kfence_free()
1182 raw_spin_unlock_irqrestore(&meta->lock, flags); in __kfence_free()
1183 call_rcu(&meta->rcu_head, rcu_guarded_free); in __kfence_free()
1191 const int page_index = (addr - (unsigned long)__kfence_pool) / PAGE_SIZE; in kfence_handle_page_fault()
1209 meta = addr_to_metadata(addr - PAGE_SIZE); in kfence_handle_page_fault()
1213 distance = addr - data_race(meta->addr + meta->size); in kfence_handle_page_fault()
1219 if (!to_report || distance > data_race(meta->addr) - addr) in kfence_handle_page_fault()
1226 raw_spin_lock_irqsave(&to_report->lock, flags); in kfence_handle_page_fault()
1227 to_report->unprotected_page = addr; in kfence_handle_page_fault()
1232 * report this as an OOB -- the report will simply show the in kfence_handle_page_fault()
1240 raw_spin_lock_irqsave(&to_report->lock, flags); in kfence_handle_page_fault()
1245 * use-after-free, with the stack trace showing the place where in kfence_handle_page_fault()
1246 * the object was re-allocated. in kfence_handle_page_fault()
1253 raw_spin_unlock_irqrestore(&to_report->lock, flags); in kfence_handle_page_fault()