xref: /linux/mm/kfence/core.c (revision f9bff0e31881d03badf191d3b0005839391f5f2b)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * KFENCE guarded object allocator and fault handling.
4  *
5  * Copyright (C) 2020, Google LLC.
6  */
7 
8 #define pr_fmt(fmt) "kfence: " fmt
9 
10 #include <linux/atomic.h>
11 #include <linux/bug.h>
12 #include <linux/debugfs.h>
13 #include <linux/hash.h>
14 #include <linux/irq_work.h>
15 #include <linux/jhash.h>
16 #include <linux/kcsan-checks.h>
17 #include <linux/kfence.h>
18 #include <linux/kmemleak.h>
19 #include <linux/list.h>
20 #include <linux/lockdep.h>
21 #include <linux/log2.h>
22 #include <linux/memblock.h>
23 #include <linux/moduleparam.h>
24 #include <linux/notifier.h>
25 #include <linux/panic_notifier.h>
26 #include <linux/random.h>
27 #include <linux/rcupdate.h>
28 #include <linux/sched/clock.h>
29 #include <linux/seq_file.h>
30 #include <linux/slab.h>
31 #include <linux/spinlock.h>
32 #include <linux/string.h>
33 
34 #include <asm/kfence.h>
35 
36 #include "kfence.h"
37 
38 /* Disables KFENCE on the first warning assuming an irrecoverable error. */
39 #define KFENCE_WARN_ON(cond)                                                   \
40 	({                                                                     \
41 		const bool __cond = WARN_ON(cond);                             \
42 		if (unlikely(__cond)) {                                        \
43 			WRITE_ONCE(kfence_enabled, false);                     \
44 			disabled_by_warn = true;                               \
45 		}                                                              \
46 		__cond;                                                        \
47 	})
48 
49 /* === Data ================================================================= */
50 
51 static bool kfence_enabled __read_mostly;
52 static bool disabled_by_warn __read_mostly;
53 
54 unsigned long kfence_sample_interval __read_mostly = CONFIG_KFENCE_SAMPLE_INTERVAL;
55 EXPORT_SYMBOL_GPL(kfence_sample_interval); /* Export for test modules. */
56 
57 #ifdef MODULE_PARAM_PREFIX
58 #undef MODULE_PARAM_PREFIX
59 #endif
60 #define MODULE_PARAM_PREFIX "kfence."
61 
62 static int kfence_enable_late(void);
63 static int param_set_sample_interval(const char *val, const struct kernel_param *kp)
64 {
65 	unsigned long num;
66 	int ret = kstrtoul(val, 0, &num);
67 
68 	if (ret < 0)
69 		return ret;
70 
71 	/* Using 0 to indicate KFENCE is disabled. */
72 	if (!num && READ_ONCE(kfence_enabled)) {
73 		pr_info("disabled\n");
74 		WRITE_ONCE(kfence_enabled, false);
75 	}
76 
77 	*((unsigned long *)kp->arg) = num;
78 
79 	if (num && !READ_ONCE(kfence_enabled) && system_state != SYSTEM_BOOTING)
80 		return disabled_by_warn ? -EINVAL : kfence_enable_late();
81 	return 0;
82 }
83 
84 static int param_get_sample_interval(char *buffer, const struct kernel_param *kp)
85 {
86 	if (!READ_ONCE(kfence_enabled))
87 		return sprintf(buffer, "0\n");
88 
89 	return param_get_ulong(buffer, kp);
90 }
91 
92 static const struct kernel_param_ops sample_interval_param_ops = {
93 	.set = param_set_sample_interval,
94 	.get = param_get_sample_interval,
95 };
96 module_param_cb(sample_interval, &sample_interval_param_ops, &kfence_sample_interval, 0600);
97 
98 /* Pool usage% threshold when currently covered allocations are skipped. */
99 static unsigned long kfence_skip_covered_thresh __read_mostly = 75;
100 module_param_named(skip_covered_thresh, kfence_skip_covered_thresh, ulong, 0644);
101 
102 /* If true, use a deferrable timer. */
103 static bool kfence_deferrable __read_mostly = IS_ENABLED(CONFIG_KFENCE_DEFERRABLE);
104 module_param_named(deferrable, kfence_deferrable, bool, 0444);
105 
106 /* If true, check all canary bytes on panic. */
107 static bool kfence_check_on_panic __read_mostly;
108 module_param_named(check_on_panic, kfence_check_on_panic, bool, 0444);
109 
110 /* The pool of pages used for guard pages and objects. */
111 char *__kfence_pool __read_mostly;
112 EXPORT_SYMBOL(__kfence_pool); /* Export for test modules. */
113 
114 /*
115  * Per-object metadata, with one-to-one mapping of object metadata to
116  * backing pages (in __kfence_pool).
117  */
118 static_assert(CONFIG_KFENCE_NUM_OBJECTS > 0);
119 struct kfence_metadata *kfence_metadata __read_mostly;
120 
121 /*
122  * If kfence_metadata is not NULL, it may be accessed by kfence_shutdown_cache().
123  * So introduce kfence_metadata_init to initialize metadata, and then make
124  * kfence_metadata visible after initialization is successful. This prevents
125  * potential UAF or access to uninitialized metadata.
126  */
127 static struct kfence_metadata *kfence_metadata_init __read_mostly;
128 
129 /* Freelist with available objects. */
130 static struct list_head kfence_freelist = LIST_HEAD_INIT(kfence_freelist);
131 static DEFINE_RAW_SPINLOCK(kfence_freelist_lock); /* Lock protecting freelist. */
132 
133 /*
134  * The static key to set up a KFENCE allocation; or if static keys are not used
135  * to gate allocations, to avoid a load and compare if KFENCE is disabled.
136  */
137 DEFINE_STATIC_KEY_FALSE(kfence_allocation_key);
138 
139 /* Gates the allocation, ensuring only one succeeds in a given period. */
140 atomic_t kfence_allocation_gate = ATOMIC_INIT(1);
141 
142 /*
143  * A Counting Bloom filter of allocation coverage: limits currently covered
144  * allocations of the same source filling up the pool.
145  *
146  * Assuming a range of 15%-85% unique allocations in the pool at any point in
147  * time, the below parameters provide a probablity of 0.02-0.33 for false
148  * positive hits respectively:
149  *
150  *	P(alloc_traces) = (1 - e^(-HNUM * (alloc_traces / SIZE)) ^ HNUM
151  */
152 #define ALLOC_COVERED_HNUM	2
153 #define ALLOC_COVERED_ORDER	(const_ilog2(CONFIG_KFENCE_NUM_OBJECTS) + 2)
154 #define ALLOC_COVERED_SIZE	(1 << ALLOC_COVERED_ORDER)
155 #define ALLOC_COVERED_HNEXT(h)	hash_32(h, ALLOC_COVERED_ORDER)
156 #define ALLOC_COVERED_MASK	(ALLOC_COVERED_SIZE - 1)
157 static atomic_t alloc_covered[ALLOC_COVERED_SIZE];
158 
159 /* Stack depth used to determine uniqueness of an allocation. */
160 #define UNIQUE_ALLOC_STACK_DEPTH ((size_t)8)
161 
162 /*
163  * Randomness for stack hashes, making the same collisions across reboots and
164  * different machines less likely.
165  */
166 static u32 stack_hash_seed __ro_after_init;
167 
168 /* Statistics counters for debugfs. */
169 enum kfence_counter_id {
170 	KFENCE_COUNTER_ALLOCATED,
171 	KFENCE_COUNTER_ALLOCS,
172 	KFENCE_COUNTER_FREES,
173 	KFENCE_COUNTER_ZOMBIES,
174 	KFENCE_COUNTER_BUGS,
175 	KFENCE_COUNTER_SKIP_INCOMPAT,
176 	KFENCE_COUNTER_SKIP_CAPACITY,
177 	KFENCE_COUNTER_SKIP_COVERED,
178 	KFENCE_COUNTER_COUNT,
179 };
180 static atomic_long_t counters[KFENCE_COUNTER_COUNT];
181 static const char *const counter_names[] = {
182 	[KFENCE_COUNTER_ALLOCATED]	= "currently allocated",
183 	[KFENCE_COUNTER_ALLOCS]		= "total allocations",
184 	[KFENCE_COUNTER_FREES]		= "total frees",
185 	[KFENCE_COUNTER_ZOMBIES]	= "zombie allocations",
186 	[KFENCE_COUNTER_BUGS]		= "total bugs",
187 	[KFENCE_COUNTER_SKIP_INCOMPAT]	= "skipped allocations (incompatible)",
188 	[KFENCE_COUNTER_SKIP_CAPACITY]	= "skipped allocations (capacity)",
189 	[KFENCE_COUNTER_SKIP_COVERED]	= "skipped allocations (covered)",
190 };
191 static_assert(ARRAY_SIZE(counter_names) == KFENCE_COUNTER_COUNT);
192 
193 /* === Internals ============================================================ */
194 
195 static inline bool should_skip_covered(void)
196 {
197 	unsigned long thresh = (CONFIG_KFENCE_NUM_OBJECTS * kfence_skip_covered_thresh) / 100;
198 
199 	return atomic_long_read(&counters[KFENCE_COUNTER_ALLOCATED]) > thresh;
200 }
201 
202 static u32 get_alloc_stack_hash(unsigned long *stack_entries, size_t num_entries)
203 {
204 	num_entries = min(num_entries, UNIQUE_ALLOC_STACK_DEPTH);
205 	num_entries = filter_irq_stacks(stack_entries, num_entries);
206 	return jhash(stack_entries, num_entries * sizeof(stack_entries[0]), stack_hash_seed);
207 }
208 
209 /*
210  * Adds (or subtracts) count @val for allocation stack trace hash
211  * @alloc_stack_hash from Counting Bloom filter.
212  */
213 static void alloc_covered_add(u32 alloc_stack_hash, int val)
214 {
215 	int i;
216 
217 	for (i = 0; i < ALLOC_COVERED_HNUM; i++) {
218 		atomic_add(val, &alloc_covered[alloc_stack_hash & ALLOC_COVERED_MASK]);
219 		alloc_stack_hash = ALLOC_COVERED_HNEXT(alloc_stack_hash);
220 	}
221 }
222 
223 /*
224  * Returns true if the allocation stack trace hash @alloc_stack_hash is
225  * currently contained (non-zero count) in Counting Bloom filter.
226  */
227 static bool alloc_covered_contains(u32 alloc_stack_hash)
228 {
229 	int i;
230 
231 	for (i = 0; i < ALLOC_COVERED_HNUM; i++) {
232 		if (!atomic_read(&alloc_covered[alloc_stack_hash & ALLOC_COVERED_MASK]))
233 			return false;
234 		alloc_stack_hash = ALLOC_COVERED_HNEXT(alloc_stack_hash);
235 	}
236 
237 	return true;
238 }
239 
240 static bool kfence_protect(unsigned long addr)
241 {
242 	return !KFENCE_WARN_ON(!kfence_protect_page(ALIGN_DOWN(addr, PAGE_SIZE), true));
243 }
244 
245 static bool kfence_unprotect(unsigned long addr)
246 {
247 	return !KFENCE_WARN_ON(!kfence_protect_page(ALIGN_DOWN(addr, PAGE_SIZE), false));
248 }
249 
250 static inline unsigned long metadata_to_pageaddr(const struct kfence_metadata *meta)
251 {
252 	unsigned long offset = (meta - kfence_metadata + 1) * PAGE_SIZE * 2;
253 	unsigned long pageaddr = (unsigned long)&__kfence_pool[offset];
254 
255 	/* The checks do not affect performance; only called from slow-paths. */
256 
257 	/* Only call with a pointer into kfence_metadata. */
258 	if (KFENCE_WARN_ON(meta < kfence_metadata ||
259 			   meta >= kfence_metadata + CONFIG_KFENCE_NUM_OBJECTS))
260 		return 0;
261 
262 	/*
263 	 * This metadata object only ever maps to 1 page; verify that the stored
264 	 * address is in the expected range.
265 	 */
266 	if (KFENCE_WARN_ON(ALIGN_DOWN(meta->addr, PAGE_SIZE) != pageaddr))
267 		return 0;
268 
269 	return pageaddr;
270 }
271 
272 /*
273  * Update the object's metadata state, including updating the alloc/free stacks
274  * depending on the state transition.
275  */
276 static noinline void
277 metadata_update_state(struct kfence_metadata *meta, enum kfence_object_state next,
278 		      unsigned long *stack_entries, size_t num_stack_entries)
279 {
280 	struct kfence_track *track =
281 		next == KFENCE_OBJECT_FREED ? &meta->free_track : &meta->alloc_track;
282 
283 	lockdep_assert_held(&meta->lock);
284 
285 	if (stack_entries) {
286 		memcpy(track->stack_entries, stack_entries,
287 		       num_stack_entries * sizeof(stack_entries[0]));
288 	} else {
289 		/*
290 		 * Skip over 1 (this) functions; noinline ensures we do not
291 		 * accidentally skip over the caller by never inlining.
292 		 */
293 		num_stack_entries = stack_trace_save(track->stack_entries, KFENCE_STACK_DEPTH, 1);
294 	}
295 	track->num_stack_entries = num_stack_entries;
296 	track->pid = task_pid_nr(current);
297 	track->cpu = raw_smp_processor_id();
298 	track->ts_nsec = local_clock(); /* Same source as printk timestamps. */
299 
300 	/*
301 	 * Pairs with READ_ONCE() in
302 	 *	kfence_shutdown_cache(),
303 	 *	kfence_handle_page_fault().
304 	 */
305 	WRITE_ONCE(meta->state, next);
306 }
307 
308 /* Check canary byte at @addr. */
309 static inline bool check_canary_byte(u8 *addr)
310 {
311 	struct kfence_metadata *meta;
312 	unsigned long flags;
313 
314 	if (likely(*addr == KFENCE_CANARY_PATTERN_U8(addr)))
315 		return true;
316 
317 	atomic_long_inc(&counters[KFENCE_COUNTER_BUGS]);
318 
319 	meta = addr_to_metadata((unsigned long)addr);
320 	raw_spin_lock_irqsave(&meta->lock, flags);
321 	kfence_report_error((unsigned long)addr, false, NULL, meta, KFENCE_ERROR_CORRUPTION);
322 	raw_spin_unlock_irqrestore(&meta->lock, flags);
323 
324 	return false;
325 }
326 
327 static inline void set_canary(const struct kfence_metadata *meta)
328 {
329 	const unsigned long pageaddr = ALIGN_DOWN(meta->addr, PAGE_SIZE);
330 	unsigned long addr = pageaddr;
331 
332 	/*
333 	 * The canary may be written to part of the object memory, but it does
334 	 * not affect it. The user should initialize the object before using it.
335 	 */
336 	for (; addr < meta->addr; addr += sizeof(u64))
337 		*((u64 *)addr) = KFENCE_CANARY_PATTERN_U64;
338 
339 	addr = ALIGN_DOWN(meta->addr + meta->size, sizeof(u64));
340 	for (; addr - pageaddr < PAGE_SIZE; addr += sizeof(u64))
341 		*((u64 *)addr) = KFENCE_CANARY_PATTERN_U64;
342 }
343 
344 static inline void check_canary(const struct kfence_metadata *meta)
345 {
346 	const unsigned long pageaddr = ALIGN_DOWN(meta->addr, PAGE_SIZE);
347 	unsigned long addr = pageaddr;
348 
349 	/*
350 	 * We'll iterate over each canary byte per-side until a corrupted byte
351 	 * is found. However, we'll still iterate over the canary bytes to the
352 	 * right of the object even if there was an error in the canary bytes to
353 	 * the left of the object. Specifically, if check_canary_byte()
354 	 * generates an error, showing both sides might give more clues as to
355 	 * what the error is about when displaying which bytes were corrupted.
356 	 */
357 
358 	/* Apply to left of object. */
359 	for (; meta->addr - addr >= sizeof(u64); addr += sizeof(u64)) {
360 		if (unlikely(*((u64 *)addr) != KFENCE_CANARY_PATTERN_U64))
361 			break;
362 	}
363 
364 	/*
365 	 * If the canary is corrupted in a certain 64 bytes, or the canary
366 	 * memory cannot be completely covered by multiple consecutive 64 bytes,
367 	 * it needs to be checked one by one.
368 	 */
369 	for (; addr < meta->addr; addr++) {
370 		if (unlikely(!check_canary_byte((u8 *)addr)))
371 			break;
372 	}
373 
374 	/* Apply to right of object. */
375 	for (addr = meta->addr + meta->size; addr % sizeof(u64) != 0; addr++) {
376 		if (unlikely(!check_canary_byte((u8 *)addr)))
377 			return;
378 	}
379 	for (; addr - pageaddr < PAGE_SIZE; addr += sizeof(u64)) {
380 		if (unlikely(*((u64 *)addr) != KFENCE_CANARY_PATTERN_U64)) {
381 
382 			for (; addr - pageaddr < PAGE_SIZE; addr++) {
383 				if (!check_canary_byte((u8 *)addr))
384 					return;
385 			}
386 		}
387 	}
388 }
389 
390 static void *kfence_guarded_alloc(struct kmem_cache *cache, size_t size, gfp_t gfp,
391 				  unsigned long *stack_entries, size_t num_stack_entries,
392 				  u32 alloc_stack_hash)
393 {
394 	struct kfence_metadata *meta = NULL;
395 	unsigned long flags;
396 	struct slab *slab;
397 	void *addr;
398 	const bool random_right_allocate = get_random_u32_below(2);
399 	const bool random_fault = CONFIG_KFENCE_STRESS_TEST_FAULTS &&
400 				  !get_random_u32_below(CONFIG_KFENCE_STRESS_TEST_FAULTS);
401 
402 	/* Try to obtain a free object. */
403 	raw_spin_lock_irqsave(&kfence_freelist_lock, flags);
404 	if (!list_empty(&kfence_freelist)) {
405 		meta = list_entry(kfence_freelist.next, struct kfence_metadata, list);
406 		list_del_init(&meta->list);
407 	}
408 	raw_spin_unlock_irqrestore(&kfence_freelist_lock, flags);
409 	if (!meta) {
410 		atomic_long_inc(&counters[KFENCE_COUNTER_SKIP_CAPACITY]);
411 		return NULL;
412 	}
413 
414 	if (unlikely(!raw_spin_trylock_irqsave(&meta->lock, flags))) {
415 		/*
416 		 * This is extremely unlikely -- we are reporting on a
417 		 * use-after-free, which locked meta->lock, and the reporting
418 		 * code via printk calls kmalloc() which ends up in
419 		 * kfence_alloc() and tries to grab the same object that we're
420 		 * reporting on. While it has never been observed, lockdep does
421 		 * report that there is a possibility of deadlock. Fix it by
422 		 * using trylock and bailing out gracefully.
423 		 */
424 		raw_spin_lock_irqsave(&kfence_freelist_lock, flags);
425 		/* Put the object back on the freelist. */
426 		list_add_tail(&meta->list, &kfence_freelist);
427 		raw_spin_unlock_irqrestore(&kfence_freelist_lock, flags);
428 
429 		return NULL;
430 	}
431 
432 	meta->addr = metadata_to_pageaddr(meta);
433 	/* Unprotect if we're reusing this page. */
434 	if (meta->state == KFENCE_OBJECT_FREED)
435 		kfence_unprotect(meta->addr);
436 
437 	/*
438 	 * Note: for allocations made before RNG initialization, will always
439 	 * return zero. We still benefit from enabling KFENCE as early as
440 	 * possible, even when the RNG is not yet available, as this will allow
441 	 * KFENCE to detect bugs due to earlier allocations. The only downside
442 	 * is that the out-of-bounds accesses detected are deterministic for
443 	 * such allocations.
444 	 */
445 	if (random_right_allocate) {
446 		/* Allocate on the "right" side, re-calculate address. */
447 		meta->addr += PAGE_SIZE - size;
448 		meta->addr = ALIGN_DOWN(meta->addr, cache->align);
449 	}
450 
451 	addr = (void *)meta->addr;
452 
453 	/* Update remaining metadata. */
454 	metadata_update_state(meta, KFENCE_OBJECT_ALLOCATED, stack_entries, num_stack_entries);
455 	/* Pairs with READ_ONCE() in kfence_shutdown_cache(). */
456 	WRITE_ONCE(meta->cache, cache);
457 	meta->size = size;
458 	meta->alloc_stack_hash = alloc_stack_hash;
459 	raw_spin_unlock_irqrestore(&meta->lock, flags);
460 
461 	alloc_covered_add(alloc_stack_hash, 1);
462 
463 	/* Set required slab fields. */
464 	slab = virt_to_slab((void *)meta->addr);
465 	slab->slab_cache = cache;
466 #if defined(CONFIG_SLUB)
467 	slab->objects = 1;
468 #elif defined(CONFIG_SLAB)
469 	slab->s_mem = addr;
470 #endif
471 
472 	/* Memory initialization. */
473 	set_canary(meta);
474 
475 	/*
476 	 * We check slab_want_init_on_alloc() ourselves, rather than letting
477 	 * SL*B do the initialization, as otherwise we might overwrite KFENCE's
478 	 * redzone.
479 	 */
480 	if (unlikely(slab_want_init_on_alloc(gfp, cache)))
481 		memzero_explicit(addr, size);
482 	if (cache->ctor)
483 		cache->ctor(addr);
484 
485 	if (random_fault)
486 		kfence_protect(meta->addr); /* Random "faults" by protecting the object. */
487 
488 	atomic_long_inc(&counters[KFENCE_COUNTER_ALLOCATED]);
489 	atomic_long_inc(&counters[KFENCE_COUNTER_ALLOCS]);
490 
491 	return addr;
492 }
493 
494 static void kfence_guarded_free(void *addr, struct kfence_metadata *meta, bool zombie)
495 {
496 	struct kcsan_scoped_access assert_page_exclusive;
497 	unsigned long flags;
498 	bool init;
499 
500 	raw_spin_lock_irqsave(&meta->lock, flags);
501 
502 	if (meta->state != KFENCE_OBJECT_ALLOCATED || meta->addr != (unsigned long)addr) {
503 		/* Invalid or double-free, bail out. */
504 		atomic_long_inc(&counters[KFENCE_COUNTER_BUGS]);
505 		kfence_report_error((unsigned long)addr, false, NULL, meta,
506 				    KFENCE_ERROR_INVALID_FREE);
507 		raw_spin_unlock_irqrestore(&meta->lock, flags);
508 		return;
509 	}
510 
511 	/* Detect racy use-after-free, or incorrect reallocation of this page by KFENCE. */
512 	kcsan_begin_scoped_access((void *)ALIGN_DOWN((unsigned long)addr, PAGE_SIZE), PAGE_SIZE,
513 				  KCSAN_ACCESS_SCOPED | KCSAN_ACCESS_WRITE | KCSAN_ACCESS_ASSERT,
514 				  &assert_page_exclusive);
515 
516 	if (CONFIG_KFENCE_STRESS_TEST_FAULTS)
517 		kfence_unprotect((unsigned long)addr); /* To check canary bytes. */
518 
519 	/* Restore page protection if there was an OOB access. */
520 	if (meta->unprotected_page) {
521 		memzero_explicit((void *)ALIGN_DOWN(meta->unprotected_page, PAGE_SIZE), PAGE_SIZE);
522 		kfence_protect(meta->unprotected_page);
523 		meta->unprotected_page = 0;
524 	}
525 
526 	/* Mark the object as freed. */
527 	metadata_update_state(meta, KFENCE_OBJECT_FREED, NULL, 0);
528 	init = slab_want_init_on_free(meta->cache);
529 	raw_spin_unlock_irqrestore(&meta->lock, flags);
530 
531 	alloc_covered_add(meta->alloc_stack_hash, -1);
532 
533 	/* Check canary bytes for memory corruption. */
534 	check_canary(meta);
535 
536 	/*
537 	 * Clear memory if init-on-free is set. While we protect the page, the
538 	 * data is still there, and after a use-after-free is detected, we
539 	 * unprotect the page, so the data is still accessible.
540 	 */
541 	if (!zombie && unlikely(init))
542 		memzero_explicit(addr, meta->size);
543 
544 	/* Protect to detect use-after-frees. */
545 	kfence_protect((unsigned long)addr);
546 
547 	kcsan_end_scoped_access(&assert_page_exclusive);
548 	if (!zombie) {
549 		/* Add it to the tail of the freelist for reuse. */
550 		raw_spin_lock_irqsave(&kfence_freelist_lock, flags);
551 		KFENCE_WARN_ON(!list_empty(&meta->list));
552 		list_add_tail(&meta->list, &kfence_freelist);
553 		raw_spin_unlock_irqrestore(&kfence_freelist_lock, flags);
554 
555 		atomic_long_dec(&counters[KFENCE_COUNTER_ALLOCATED]);
556 		atomic_long_inc(&counters[KFENCE_COUNTER_FREES]);
557 	} else {
558 		/* See kfence_shutdown_cache(). */
559 		atomic_long_inc(&counters[KFENCE_COUNTER_ZOMBIES]);
560 	}
561 }
562 
563 static void rcu_guarded_free(struct rcu_head *h)
564 {
565 	struct kfence_metadata *meta = container_of(h, struct kfence_metadata, rcu_head);
566 
567 	kfence_guarded_free((void *)meta->addr, meta, false);
568 }
569 
570 /*
571  * Initialization of the KFENCE pool after its allocation.
572  * Returns 0 on success; otherwise returns the address up to
573  * which partial initialization succeeded.
574  */
575 static unsigned long kfence_init_pool(void)
576 {
577 	unsigned long addr = (unsigned long)__kfence_pool;
578 	struct page *pages;
579 	int i;
580 
581 	if (!arch_kfence_init_pool())
582 		return addr;
583 
584 	pages = virt_to_page(__kfence_pool);
585 
586 	/*
587 	 * Set up object pages: they must have PG_slab set, to avoid freeing
588 	 * these as real pages.
589 	 *
590 	 * We also want to avoid inserting kfence_free() in the kfree()
591 	 * fast-path in SLUB, and therefore need to ensure kfree() correctly
592 	 * enters __slab_free() slow-path.
593 	 */
594 	for (i = 0; i < KFENCE_POOL_SIZE / PAGE_SIZE; i++) {
595 		struct slab *slab = page_slab(nth_page(pages, i));
596 
597 		if (!i || (i % 2))
598 			continue;
599 
600 		__folio_set_slab(slab_folio(slab));
601 #ifdef CONFIG_MEMCG
602 		slab->memcg_data = (unsigned long)&kfence_metadata_init[i / 2 - 1].objcg |
603 				   MEMCG_DATA_OBJCGS;
604 #endif
605 	}
606 
607 	/*
608 	 * Protect the first 2 pages. The first page is mostly unnecessary, and
609 	 * merely serves as an extended guard page. However, adding one
610 	 * additional page in the beginning gives us an even number of pages,
611 	 * which simplifies the mapping of address to metadata index.
612 	 */
613 	for (i = 0; i < 2; i++) {
614 		if (unlikely(!kfence_protect(addr)))
615 			return addr;
616 
617 		addr += PAGE_SIZE;
618 	}
619 
620 	for (i = 0; i < CONFIG_KFENCE_NUM_OBJECTS; i++) {
621 		struct kfence_metadata *meta = &kfence_metadata_init[i];
622 
623 		/* Initialize metadata. */
624 		INIT_LIST_HEAD(&meta->list);
625 		raw_spin_lock_init(&meta->lock);
626 		meta->state = KFENCE_OBJECT_UNUSED;
627 		meta->addr = addr; /* Initialize for validation in metadata_to_pageaddr(). */
628 		list_add_tail(&meta->list, &kfence_freelist);
629 
630 		/* Protect the right redzone. */
631 		if (unlikely(!kfence_protect(addr + PAGE_SIZE)))
632 			goto reset_slab;
633 
634 		addr += 2 * PAGE_SIZE;
635 	}
636 
637 	/*
638 	 * Make kfence_metadata visible only when initialization is successful.
639 	 * Otherwise, if the initialization fails and kfence_metadata is freed,
640 	 * it may cause UAF in kfence_shutdown_cache().
641 	 */
642 	smp_store_release(&kfence_metadata, kfence_metadata_init);
643 	return 0;
644 
645 reset_slab:
646 	for (i = 0; i < KFENCE_POOL_SIZE / PAGE_SIZE; i++) {
647 		struct slab *slab = page_slab(nth_page(pages, i));
648 
649 		if (!i || (i % 2))
650 			continue;
651 #ifdef CONFIG_MEMCG
652 		slab->memcg_data = 0;
653 #endif
654 		__folio_clear_slab(slab_folio(slab));
655 	}
656 
657 	return addr;
658 }
659 
660 static bool __init kfence_init_pool_early(void)
661 {
662 	unsigned long addr;
663 
664 	if (!__kfence_pool)
665 		return false;
666 
667 	addr = kfence_init_pool();
668 
669 	if (!addr) {
670 		/*
671 		 * The pool is live and will never be deallocated from this point on.
672 		 * Ignore the pool object from the kmemleak phys object tree, as it would
673 		 * otherwise overlap with allocations returned by kfence_alloc(), which
674 		 * are registered with kmemleak through the slab post-alloc hook.
675 		 */
676 		kmemleak_ignore_phys(__pa(__kfence_pool));
677 		return true;
678 	}
679 
680 	/*
681 	 * Only release unprotected pages, and do not try to go back and change
682 	 * page attributes due to risk of failing to do so as well. If changing
683 	 * page attributes for some pages fails, it is very likely that it also
684 	 * fails for the first page, and therefore expect addr==__kfence_pool in
685 	 * most failure cases.
686 	 */
687 	memblock_free_late(__pa(addr), KFENCE_POOL_SIZE - (addr - (unsigned long)__kfence_pool));
688 	__kfence_pool = NULL;
689 
690 	memblock_free_late(__pa(kfence_metadata_init), KFENCE_METADATA_SIZE);
691 	kfence_metadata_init = NULL;
692 
693 	return false;
694 }
695 
696 /* === DebugFS Interface ==================================================== */
697 
698 static int stats_show(struct seq_file *seq, void *v)
699 {
700 	int i;
701 
702 	seq_printf(seq, "enabled: %i\n", READ_ONCE(kfence_enabled));
703 	for (i = 0; i < KFENCE_COUNTER_COUNT; i++)
704 		seq_printf(seq, "%s: %ld\n", counter_names[i], atomic_long_read(&counters[i]));
705 
706 	return 0;
707 }
708 DEFINE_SHOW_ATTRIBUTE(stats);
709 
710 /*
711  * debugfs seq_file operations for /sys/kernel/debug/kfence/objects.
712  * start_object() and next_object() return the object index + 1, because NULL is used
713  * to stop iteration.
714  */
715 static void *start_object(struct seq_file *seq, loff_t *pos)
716 {
717 	if (*pos < CONFIG_KFENCE_NUM_OBJECTS)
718 		return (void *)((long)*pos + 1);
719 	return NULL;
720 }
721 
722 static void stop_object(struct seq_file *seq, void *v)
723 {
724 }
725 
726 static void *next_object(struct seq_file *seq, void *v, loff_t *pos)
727 {
728 	++*pos;
729 	if (*pos < CONFIG_KFENCE_NUM_OBJECTS)
730 		return (void *)((long)*pos + 1);
731 	return NULL;
732 }
733 
734 static int show_object(struct seq_file *seq, void *v)
735 {
736 	struct kfence_metadata *meta = &kfence_metadata[(long)v - 1];
737 	unsigned long flags;
738 
739 	raw_spin_lock_irqsave(&meta->lock, flags);
740 	kfence_print_object(seq, meta);
741 	raw_spin_unlock_irqrestore(&meta->lock, flags);
742 	seq_puts(seq, "---------------------------------\n");
743 
744 	return 0;
745 }
746 
747 static const struct seq_operations objects_sops = {
748 	.start = start_object,
749 	.next = next_object,
750 	.stop = stop_object,
751 	.show = show_object,
752 };
753 DEFINE_SEQ_ATTRIBUTE(objects);
754 
755 static int kfence_debugfs_init(void)
756 {
757 	struct dentry *kfence_dir;
758 
759 	if (!READ_ONCE(kfence_enabled))
760 		return 0;
761 
762 	kfence_dir = debugfs_create_dir("kfence", NULL);
763 	debugfs_create_file("stats", 0444, kfence_dir, NULL, &stats_fops);
764 	debugfs_create_file("objects", 0400, kfence_dir, NULL, &objects_fops);
765 	return 0;
766 }
767 
768 late_initcall(kfence_debugfs_init);
769 
770 /* === Panic Notifier ====================================================== */
771 
772 static void kfence_check_all_canary(void)
773 {
774 	int i;
775 
776 	for (i = 0; i < CONFIG_KFENCE_NUM_OBJECTS; i++) {
777 		struct kfence_metadata *meta = &kfence_metadata[i];
778 
779 		if (meta->state == KFENCE_OBJECT_ALLOCATED)
780 			check_canary(meta);
781 	}
782 }
783 
784 static int kfence_check_canary_callback(struct notifier_block *nb,
785 					unsigned long reason, void *arg)
786 {
787 	kfence_check_all_canary();
788 	return NOTIFY_OK;
789 }
790 
791 static struct notifier_block kfence_check_canary_notifier = {
792 	.notifier_call = kfence_check_canary_callback,
793 };
794 
795 /* === Allocation Gate Timer ================================================ */
796 
797 static struct delayed_work kfence_timer;
798 
799 #ifdef CONFIG_KFENCE_STATIC_KEYS
800 /* Wait queue to wake up allocation-gate timer task. */
801 static DECLARE_WAIT_QUEUE_HEAD(allocation_wait);
802 
803 static void wake_up_kfence_timer(struct irq_work *work)
804 {
805 	wake_up(&allocation_wait);
806 }
807 static DEFINE_IRQ_WORK(wake_up_kfence_timer_work, wake_up_kfence_timer);
808 #endif
809 
810 /*
811  * Set up delayed work, which will enable and disable the static key. We need to
812  * use a work queue (rather than a simple timer), since enabling and disabling a
813  * static key cannot be done from an interrupt.
814  *
815  * Note: Toggling a static branch currently causes IPIs, and here we'll end up
816  * with a total of 2 IPIs to all CPUs. If this ends up a problem in future (with
817  * more aggressive sampling intervals), we could get away with a variant that
818  * avoids IPIs, at the cost of not immediately capturing allocations if the
819  * instructions remain cached.
820  */
821 static void toggle_allocation_gate(struct work_struct *work)
822 {
823 	if (!READ_ONCE(kfence_enabled))
824 		return;
825 
826 	atomic_set(&kfence_allocation_gate, 0);
827 #ifdef CONFIG_KFENCE_STATIC_KEYS
828 	/* Enable static key, and await allocation to happen. */
829 	static_branch_enable(&kfence_allocation_key);
830 
831 	wait_event_idle(allocation_wait, atomic_read(&kfence_allocation_gate));
832 
833 	/* Disable static key and reset timer. */
834 	static_branch_disable(&kfence_allocation_key);
835 #endif
836 	queue_delayed_work(system_unbound_wq, &kfence_timer,
837 			   msecs_to_jiffies(kfence_sample_interval));
838 }
839 
840 /* === Public interface ===================================================== */
841 
842 void __init kfence_alloc_pool_and_metadata(void)
843 {
844 	if (!kfence_sample_interval)
845 		return;
846 
847 	/*
848 	 * If the pool has already been initialized by arch, there is no need to
849 	 * re-allocate the memory pool.
850 	 */
851 	if (!__kfence_pool)
852 		__kfence_pool = memblock_alloc(KFENCE_POOL_SIZE, PAGE_SIZE);
853 
854 	if (!__kfence_pool) {
855 		pr_err("failed to allocate pool\n");
856 		return;
857 	}
858 
859 	/* The memory allocated by memblock has been zeroed out. */
860 	kfence_metadata_init = memblock_alloc(KFENCE_METADATA_SIZE, PAGE_SIZE);
861 	if (!kfence_metadata_init) {
862 		pr_err("failed to allocate metadata\n");
863 		memblock_free(__kfence_pool, KFENCE_POOL_SIZE);
864 		__kfence_pool = NULL;
865 	}
866 }
867 
868 static void kfence_init_enable(void)
869 {
870 	if (!IS_ENABLED(CONFIG_KFENCE_STATIC_KEYS))
871 		static_branch_enable(&kfence_allocation_key);
872 
873 	if (kfence_deferrable)
874 		INIT_DEFERRABLE_WORK(&kfence_timer, toggle_allocation_gate);
875 	else
876 		INIT_DELAYED_WORK(&kfence_timer, toggle_allocation_gate);
877 
878 	if (kfence_check_on_panic)
879 		atomic_notifier_chain_register(&panic_notifier_list, &kfence_check_canary_notifier);
880 
881 	WRITE_ONCE(kfence_enabled, true);
882 	queue_delayed_work(system_unbound_wq, &kfence_timer, 0);
883 
884 	pr_info("initialized - using %lu bytes for %d objects at 0x%p-0x%p\n", KFENCE_POOL_SIZE,
885 		CONFIG_KFENCE_NUM_OBJECTS, (void *)__kfence_pool,
886 		(void *)(__kfence_pool + KFENCE_POOL_SIZE));
887 }
888 
889 void __init kfence_init(void)
890 {
891 	stack_hash_seed = get_random_u32();
892 
893 	/* Setting kfence_sample_interval to 0 on boot disables KFENCE. */
894 	if (!kfence_sample_interval)
895 		return;
896 
897 	if (!kfence_init_pool_early()) {
898 		pr_err("%s failed\n", __func__);
899 		return;
900 	}
901 
902 	kfence_init_enable();
903 }
904 
905 static int kfence_init_late(void)
906 {
907 	const unsigned long nr_pages_pool = KFENCE_POOL_SIZE / PAGE_SIZE;
908 	const unsigned long nr_pages_meta = KFENCE_METADATA_SIZE / PAGE_SIZE;
909 	unsigned long addr = (unsigned long)__kfence_pool;
910 	unsigned long free_size = KFENCE_POOL_SIZE;
911 	int err = -ENOMEM;
912 
913 #ifdef CONFIG_CONTIG_ALLOC
914 	struct page *pages;
915 
916 	pages = alloc_contig_pages(nr_pages_pool, GFP_KERNEL, first_online_node,
917 				   NULL);
918 	if (!pages)
919 		return -ENOMEM;
920 
921 	__kfence_pool = page_to_virt(pages);
922 	pages = alloc_contig_pages(nr_pages_meta, GFP_KERNEL, first_online_node,
923 				   NULL);
924 	if (pages)
925 		kfence_metadata_init = page_to_virt(pages);
926 #else
927 	if (nr_pages_pool > MAX_ORDER_NR_PAGES ||
928 	    nr_pages_meta > MAX_ORDER_NR_PAGES) {
929 		pr_warn("KFENCE_NUM_OBJECTS too large for buddy allocator\n");
930 		return -EINVAL;
931 	}
932 
933 	__kfence_pool = alloc_pages_exact(KFENCE_POOL_SIZE, GFP_KERNEL);
934 	if (!__kfence_pool)
935 		return -ENOMEM;
936 
937 	kfence_metadata_init = alloc_pages_exact(KFENCE_METADATA_SIZE, GFP_KERNEL);
938 #endif
939 
940 	if (!kfence_metadata_init)
941 		goto free_pool;
942 
943 	memzero_explicit(kfence_metadata_init, KFENCE_METADATA_SIZE);
944 	addr = kfence_init_pool();
945 	if (!addr) {
946 		kfence_init_enable();
947 		kfence_debugfs_init();
948 		return 0;
949 	}
950 
951 	pr_err("%s failed\n", __func__);
952 	free_size = KFENCE_POOL_SIZE - (addr - (unsigned long)__kfence_pool);
953 	err = -EBUSY;
954 
955 #ifdef CONFIG_CONTIG_ALLOC
956 	free_contig_range(page_to_pfn(virt_to_page((void *)kfence_metadata_init)),
957 			  nr_pages_meta);
958 free_pool:
959 	free_contig_range(page_to_pfn(virt_to_page((void *)addr)),
960 			  free_size / PAGE_SIZE);
961 #else
962 	free_pages_exact((void *)kfence_metadata_init, KFENCE_METADATA_SIZE);
963 free_pool:
964 	free_pages_exact((void *)addr, free_size);
965 #endif
966 
967 	kfence_metadata_init = NULL;
968 	__kfence_pool = NULL;
969 	return err;
970 }
971 
972 static int kfence_enable_late(void)
973 {
974 	if (!__kfence_pool)
975 		return kfence_init_late();
976 
977 	WRITE_ONCE(kfence_enabled, true);
978 	queue_delayed_work(system_unbound_wq, &kfence_timer, 0);
979 	pr_info("re-enabled\n");
980 	return 0;
981 }
982 
983 void kfence_shutdown_cache(struct kmem_cache *s)
984 {
985 	unsigned long flags;
986 	struct kfence_metadata *meta;
987 	int i;
988 
989 	/* Pairs with release in kfence_init_pool(). */
990 	if (!smp_load_acquire(&kfence_metadata))
991 		return;
992 
993 	for (i = 0; i < CONFIG_KFENCE_NUM_OBJECTS; i++) {
994 		bool in_use;
995 
996 		meta = &kfence_metadata[i];
997 
998 		/*
999 		 * If we observe some inconsistent cache and state pair where we
1000 		 * should have returned false here, cache destruction is racing
1001 		 * with either kmem_cache_alloc() or kmem_cache_free(). Taking
1002 		 * the lock will not help, as different critical section
1003 		 * serialization will have the same outcome.
1004 		 */
1005 		if (READ_ONCE(meta->cache) != s ||
1006 		    READ_ONCE(meta->state) != KFENCE_OBJECT_ALLOCATED)
1007 			continue;
1008 
1009 		raw_spin_lock_irqsave(&meta->lock, flags);
1010 		in_use = meta->cache == s && meta->state == KFENCE_OBJECT_ALLOCATED;
1011 		raw_spin_unlock_irqrestore(&meta->lock, flags);
1012 
1013 		if (in_use) {
1014 			/*
1015 			 * This cache still has allocations, and we should not
1016 			 * release them back into the freelist so they can still
1017 			 * safely be used and retain the kernel's default
1018 			 * behaviour of keeping the allocations alive (leak the
1019 			 * cache); however, they effectively become "zombie
1020 			 * allocations" as the KFENCE objects are the only ones
1021 			 * still in use and the owning cache is being destroyed.
1022 			 *
1023 			 * We mark them freed, so that any subsequent use shows
1024 			 * more useful error messages that will include stack
1025 			 * traces of the user of the object, the original
1026 			 * allocation, and caller to shutdown_cache().
1027 			 */
1028 			kfence_guarded_free((void *)meta->addr, meta, /*zombie=*/true);
1029 		}
1030 	}
1031 
1032 	for (i = 0; i < CONFIG_KFENCE_NUM_OBJECTS; i++) {
1033 		meta = &kfence_metadata[i];
1034 
1035 		/* See above. */
1036 		if (READ_ONCE(meta->cache) != s || READ_ONCE(meta->state) != KFENCE_OBJECT_FREED)
1037 			continue;
1038 
1039 		raw_spin_lock_irqsave(&meta->lock, flags);
1040 		if (meta->cache == s && meta->state == KFENCE_OBJECT_FREED)
1041 			meta->cache = NULL;
1042 		raw_spin_unlock_irqrestore(&meta->lock, flags);
1043 	}
1044 }
1045 
1046 void *__kfence_alloc(struct kmem_cache *s, size_t size, gfp_t flags)
1047 {
1048 	unsigned long stack_entries[KFENCE_STACK_DEPTH];
1049 	size_t num_stack_entries;
1050 	u32 alloc_stack_hash;
1051 
1052 	/*
1053 	 * Perform size check before switching kfence_allocation_gate, so that
1054 	 * we don't disable KFENCE without making an allocation.
1055 	 */
1056 	if (size > PAGE_SIZE) {
1057 		atomic_long_inc(&counters[KFENCE_COUNTER_SKIP_INCOMPAT]);
1058 		return NULL;
1059 	}
1060 
1061 	/*
1062 	 * Skip allocations from non-default zones, including DMA. We cannot
1063 	 * guarantee that pages in the KFENCE pool will have the requested
1064 	 * properties (e.g. reside in DMAable memory).
1065 	 */
1066 	if ((flags & GFP_ZONEMASK) ||
1067 	    (s->flags & (SLAB_CACHE_DMA | SLAB_CACHE_DMA32))) {
1068 		atomic_long_inc(&counters[KFENCE_COUNTER_SKIP_INCOMPAT]);
1069 		return NULL;
1070 	}
1071 
1072 	/*
1073 	 * Skip allocations for this slab, if KFENCE has been disabled for
1074 	 * this slab.
1075 	 */
1076 	if (s->flags & SLAB_SKIP_KFENCE)
1077 		return NULL;
1078 
1079 	if (atomic_inc_return(&kfence_allocation_gate) > 1)
1080 		return NULL;
1081 #ifdef CONFIG_KFENCE_STATIC_KEYS
1082 	/*
1083 	 * waitqueue_active() is fully ordered after the update of
1084 	 * kfence_allocation_gate per atomic_inc_return().
1085 	 */
1086 	if (waitqueue_active(&allocation_wait)) {
1087 		/*
1088 		 * Calling wake_up() here may deadlock when allocations happen
1089 		 * from within timer code. Use an irq_work to defer it.
1090 		 */
1091 		irq_work_queue(&wake_up_kfence_timer_work);
1092 	}
1093 #endif
1094 
1095 	if (!READ_ONCE(kfence_enabled))
1096 		return NULL;
1097 
1098 	num_stack_entries = stack_trace_save(stack_entries, KFENCE_STACK_DEPTH, 0);
1099 
1100 	/*
1101 	 * Do expensive check for coverage of allocation in slow-path after
1102 	 * allocation_gate has already become non-zero, even though it might
1103 	 * mean not making any allocation within a given sample interval.
1104 	 *
1105 	 * This ensures reasonable allocation coverage when the pool is almost
1106 	 * full, including avoiding long-lived allocations of the same source
1107 	 * filling up the pool (e.g. pagecache allocations).
1108 	 */
1109 	alloc_stack_hash = get_alloc_stack_hash(stack_entries, num_stack_entries);
1110 	if (should_skip_covered() && alloc_covered_contains(alloc_stack_hash)) {
1111 		atomic_long_inc(&counters[KFENCE_COUNTER_SKIP_COVERED]);
1112 		return NULL;
1113 	}
1114 
1115 	return kfence_guarded_alloc(s, size, flags, stack_entries, num_stack_entries,
1116 				    alloc_stack_hash);
1117 }
1118 
1119 size_t kfence_ksize(const void *addr)
1120 {
1121 	const struct kfence_metadata *meta = addr_to_metadata((unsigned long)addr);
1122 
1123 	/*
1124 	 * Read locklessly -- if there is a race with __kfence_alloc(), this is
1125 	 * either a use-after-free or invalid access.
1126 	 */
1127 	return meta ? meta->size : 0;
1128 }
1129 
1130 void *kfence_object_start(const void *addr)
1131 {
1132 	const struct kfence_metadata *meta = addr_to_metadata((unsigned long)addr);
1133 
1134 	/*
1135 	 * Read locklessly -- if there is a race with __kfence_alloc(), this is
1136 	 * either a use-after-free or invalid access.
1137 	 */
1138 	return meta ? (void *)meta->addr : NULL;
1139 }
1140 
1141 void __kfence_free(void *addr)
1142 {
1143 	struct kfence_metadata *meta = addr_to_metadata((unsigned long)addr);
1144 
1145 #ifdef CONFIG_MEMCG
1146 	KFENCE_WARN_ON(meta->objcg);
1147 #endif
1148 	/*
1149 	 * If the objects of the cache are SLAB_TYPESAFE_BY_RCU, defer freeing
1150 	 * the object, as the object page may be recycled for other-typed
1151 	 * objects once it has been freed. meta->cache may be NULL if the cache
1152 	 * was destroyed.
1153 	 */
1154 	if (unlikely(meta->cache && (meta->cache->flags & SLAB_TYPESAFE_BY_RCU)))
1155 		call_rcu(&meta->rcu_head, rcu_guarded_free);
1156 	else
1157 		kfence_guarded_free(addr, meta, false);
1158 }
1159 
1160 bool kfence_handle_page_fault(unsigned long addr, bool is_write, struct pt_regs *regs)
1161 {
1162 	const int page_index = (addr - (unsigned long)__kfence_pool) / PAGE_SIZE;
1163 	struct kfence_metadata *to_report = NULL;
1164 	enum kfence_error_type error_type;
1165 	unsigned long flags;
1166 
1167 	if (!is_kfence_address((void *)addr))
1168 		return false;
1169 
1170 	if (!READ_ONCE(kfence_enabled)) /* If disabled at runtime ... */
1171 		return kfence_unprotect(addr); /* ... unprotect and proceed. */
1172 
1173 	atomic_long_inc(&counters[KFENCE_COUNTER_BUGS]);
1174 
1175 	if (page_index % 2) {
1176 		/* This is a redzone, report a buffer overflow. */
1177 		struct kfence_metadata *meta;
1178 		int distance = 0;
1179 
1180 		meta = addr_to_metadata(addr - PAGE_SIZE);
1181 		if (meta && READ_ONCE(meta->state) == KFENCE_OBJECT_ALLOCATED) {
1182 			to_report = meta;
1183 			/* Data race ok; distance calculation approximate. */
1184 			distance = addr - data_race(meta->addr + meta->size);
1185 		}
1186 
1187 		meta = addr_to_metadata(addr + PAGE_SIZE);
1188 		if (meta && READ_ONCE(meta->state) == KFENCE_OBJECT_ALLOCATED) {
1189 			/* Data race ok; distance calculation approximate. */
1190 			if (!to_report || distance > data_race(meta->addr) - addr)
1191 				to_report = meta;
1192 		}
1193 
1194 		if (!to_report)
1195 			goto out;
1196 
1197 		raw_spin_lock_irqsave(&to_report->lock, flags);
1198 		to_report->unprotected_page = addr;
1199 		error_type = KFENCE_ERROR_OOB;
1200 
1201 		/*
1202 		 * If the object was freed before we took the look we can still
1203 		 * report this as an OOB -- the report will simply show the
1204 		 * stacktrace of the free as well.
1205 		 */
1206 	} else {
1207 		to_report = addr_to_metadata(addr);
1208 		if (!to_report)
1209 			goto out;
1210 
1211 		raw_spin_lock_irqsave(&to_report->lock, flags);
1212 		error_type = KFENCE_ERROR_UAF;
1213 		/*
1214 		 * We may race with __kfence_alloc(), and it is possible that a
1215 		 * freed object may be reallocated. We simply report this as a
1216 		 * use-after-free, with the stack trace showing the place where
1217 		 * the object was re-allocated.
1218 		 */
1219 	}
1220 
1221 out:
1222 	if (to_report) {
1223 		kfence_report_error(addr, is_write, regs, to_report, error_type);
1224 		raw_spin_unlock_irqrestore(&to_report->lock, flags);
1225 	} else {
1226 		/* This may be a UAF or OOB access, but we can't be sure. */
1227 		kfence_report_error(addr, is_write, regs, NULL, KFENCE_ERROR_INVALID);
1228 	}
1229 
1230 	return kfence_unprotect(addr); /* Unprotect and let access proceed. */
1231 }
1232