xref: /linux/mm/kfence/core.c (revision e80a48bade619ec5a92230b3d4ae84bfc2746822)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * KFENCE guarded object allocator and fault handling.
4  *
5  * Copyright (C) 2020, Google LLC.
6  */
7 
8 #define pr_fmt(fmt) "kfence: " fmt
9 
10 #include <linux/atomic.h>
11 #include <linux/bug.h>
12 #include <linux/debugfs.h>
13 #include <linux/hash.h>
14 #include <linux/irq_work.h>
15 #include <linux/jhash.h>
16 #include <linux/kcsan-checks.h>
17 #include <linux/kfence.h>
18 #include <linux/kmemleak.h>
19 #include <linux/list.h>
20 #include <linux/lockdep.h>
21 #include <linux/log2.h>
22 #include <linux/memblock.h>
23 #include <linux/moduleparam.h>
24 #include <linux/notifier.h>
25 #include <linux/panic_notifier.h>
26 #include <linux/random.h>
27 #include <linux/rcupdate.h>
28 #include <linux/sched/clock.h>
29 #include <linux/seq_file.h>
30 #include <linux/slab.h>
31 #include <linux/spinlock.h>
32 #include <linux/string.h>
33 
34 #include <asm/kfence.h>
35 
36 #include "kfence.h"
37 
38 /* Disables KFENCE on the first warning assuming an irrecoverable error. */
39 #define KFENCE_WARN_ON(cond)                                                   \
40 	({                                                                     \
41 		const bool __cond = WARN_ON(cond);                             \
42 		if (unlikely(__cond)) {                                        \
43 			WRITE_ONCE(kfence_enabled, false);                     \
44 			disabled_by_warn = true;                               \
45 		}                                                              \
46 		__cond;                                                        \
47 	})
48 
49 /* === Data ================================================================= */
50 
51 static bool kfence_enabled __read_mostly;
52 static bool disabled_by_warn __read_mostly;
53 
54 unsigned long kfence_sample_interval __read_mostly = CONFIG_KFENCE_SAMPLE_INTERVAL;
55 EXPORT_SYMBOL_GPL(kfence_sample_interval); /* Export for test modules. */
56 
57 #ifdef MODULE_PARAM_PREFIX
58 #undef MODULE_PARAM_PREFIX
59 #endif
60 #define MODULE_PARAM_PREFIX "kfence."
61 
62 static int kfence_enable_late(void);
63 static int param_set_sample_interval(const char *val, const struct kernel_param *kp)
64 {
65 	unsigned long num;
66 	int ret = kstrtoul(val, 0, &num);
67 
68 	if (ret < 0)
69 		return ret;
70 
71 	/* Using 0 to indicate KFENCE is disabled. */
72 	if (!num && READ_ONCE(kfence_enabled)) {
73 		pr_info("disabled\n");
74 		WRITE_ONCE(kfence_enabled, false);
75 	}
76 
77 	*((unsigned long *)kp->arg) = num;
78 
79 	if (num && !READ_ONCE(kfence_enabled) && system_state != SYSTEM_BOOTING)
80 		return disabled_by_warn ? -EINVAL : kfence_enable_late();
81 	return 0;
82 }
83 
84 static int param_get_sample_interval(char *buffer, const struct kernel_param *kp)
85 {
86 	if (!READ_ONCE(kfence_enabled))
87 		return sprintf(buffer, "0\n");
88 
89 	return param_get_ulong(buffer, kp);
90 }
91 
92 static const struct kernel_param_ops sample_interval_param_ops = {
93 	.set = param_set_sample_interval,
94 	.get = param_get_sample_interval,
95 };
96 module_param_cb(sample_interval, &sample_interval_param_ops, &kfence_sample_interval, 0600);
97 
98 /* Pool usage% threshold when currently covered allocations are skipped. */
99 static unsigned long kfence_skip_covered_thresh __read_mostly = 75;
100 module_param_named(skip_covered_thresh, kfence_skip_covered_thresh, ulong, 0644);
101 
102 /* If true, use a deferrable timer. */
103 static bool kfence_deferrable __read_mostly = IS_ENABLED(CONFIG_KFENCE_DEFERRABLE);
104 module_param_named(deferrable, kfence_deferrable, bool, 0444);
105 
106 /* If true, check all canary bytes on panic. */
107 static bool kfence_check_on_panic __read_mostly;
108 module_param_named(check_on_panic, kfence_check_on_panic, bool, 0444);
109 
110 /* The pool of pages used for guard pages and objects. */
111 char *__kfence_pool __read_mostly;
112 EXPORT_SYMBOL(__kfence_pool); /* Export for test modules. */
113 
114 /*
115  * Per-object metadata, with one-to-one mapping of object metadata to
116  * backing pages (in __kfence_pool).
117  */
118 static_assert(CONFIG_KFENCE_NUM_OBJECTS > 0);
119 struct kfence_metadata kfence_metadata[CONFIG_KFENCE_NUM_OBJECTS];
120 
121 /* Freelist with available objects. */
122 static struct list_head kfence_freelist = LIST_HEAD_INIT(kfence_freelist);
123 static DEFINE_RAW_SPINLOCK(kfence_freelist_lock); /* Lock protecting freelist. */
124 
125 /*
126  * The static key to set up a KFENCE allocation; or if static keys are not used
127  * to gate allocations, to avoid a load and compare if KFENCE is disabled.
128  */
129 DEFINE_STATIC_KEY_FALSE(kfence_allocation_key);
130 
131 /* Gates the allocation, ensuring only one succeeds in a given period. */
132 atomic_t kfence_allocation_gate = ATOMIC_INIT(1);
133 
134 /*
135  * A Counting Bloom filter of allocation coverage: limits currently covered
136  * allocations of the same source filling up the pool.
137  *
138  * Assuming a range of 15%-85% unique allocations in the pool at any point in
139  * time, the below parameters provide a probablity of 0.02-0.33 for false
140  * positive hits respectively:
141  *
142  *	P(alloc_traces) = (1 - e^(-HNUM * (alloc_traces / SIZE)) ^ HNUM
143  */
144 #define ALLOC_COVERED_HNUM	2
145 #define ALLOC_COVERED_ORDER	(const_ilog2(CONFIG_KFENCE_NUM_OBJECTS) + 2)
146 #define ALLOC_COVERED_SIZE	(1 << ALLOC_COVERED_ORDER)
147 #define ALLOC_COVERED_HNEXT(h)	hash_32(h, ALLOC_COVERED_ORDER)
148 #define ALLOC_COVERED_MASK	(ALLOC_COVERED_SIZE - 1)
149 static atomic_t alloc_covered[ALLOC_COVERED_SIZE];
150 
151 /* Stack depth used to determine uniqueness of an allocation. */
152 #define UNIQUE_ALLOC_STACK_DEPTH ((size_t)8)
153 
154 /*
155  * Randomness for stack hashes, making the same collisions across reboots and
156  * different machines less likely.
157  */
158 static u32 stack_hash_seed __ro_after_init;
159 
160 /* Statistics counters for debugfs. */
161 enum kfence_counter_id {
162 	KFENCE_COUNTER_ALLOCATED,
163 	KFENCE_COUNTER_ALLOCS,
164 	KFENCE_COUNTER_FREES,
165 	KFENCE_COUNTER_ZOMBIES,
166 	KFENCE_COUNTER_BUGS,
167 	KFENCE_COUNTER_SKIP_INCOMPAT,
168 	KFENCE_COUNTER_SKIP_CAPACITY,
169 	KFENCE_COUNTER_SKIP_COVERED,
170 	KFENCE_COUNTER_COUNT,
171 };
172 static atomic_long_t counters[KFENCE_COUNTER_COUNT];
173 static const char *const counter_names[] = {
174 	[KFENCE_COUNTER_ALLOCATED]	= "currently allocated",
175 	[KFENCE_COUNTER_ALLOCS]		= "total allocations",
176 	[KFENCE_COUNTER_FREES]		= "total frees",
177 	[KFENCE_COUNTER_ZOMBIES]	= "zombie allocations",
178 	[KFENCE_COUNTER_BUGS]		= "total bugs",
179 	[KFENCE_COUNTER_SKIP_INCOMPAT]	= "skipped allocations (incompatible)",
180 	[KFENCE_COUNTER_SKIP_CAPACITY]	= "skipped allocations (capacity)",
181 	[KFENCE_COUNTER_SKIP_COVERED]	= "skipped allocations (covered)",
182 };
183 static_assert(ARRAY_SIZE(counter_names) == KFENCE_COUNTER_COUNT);
184 
185 /* === Internals ============================================================ */
186 
187 static inline bool should_skip_covered(void)
188 {
189 	unsigned long thresh = (CONFIG_KFENCE_NUM_OBJECTS * kfence_skip_covered_thresh) / 100;
190 
191 	return atomic_long_read(&counters[KFENCE_COUNTER_ALLOCATED]) > thresh;
192 }
193 
194 static u32 get_alloc_stack_hash(unsigned long *stack_entries, size_t num_entries)
195 {
196 	num_entries = min(num_entries, UNIQUE_ALLOC_STACK_DEPTH);
197 	num_entries = filter_irq_stacks(stack_entries, num_entries);
198 	return jhash(stack_entries, num_entries * sizeof(stack_entries[0]), stack_hash_seed);
199 }
200 
201 /*
202  * Adds (or subtracts) count @val for allocation stack trace hash
203  * @alloc_stack_hash from Counting Bloom filter.
204  */
205 static void alloc_covered_add(u32 alloc_stack_hash, int val)
206 {
207 	int i;
208 
209 	for (i = 0; i < ALLOC_COVERED_HNUM; i++) {
210 		atomic_add(val, &alloc_covered[alloc_stack_hash & ALLOC_COVERED_MASK]);
211 		alloc_stack_hash = ALLOC_COVERED_HNEXT(alloc_stack_hash);
212 	}
213 }
214 
215 /*
216  * Returns true if the allocation stack trace hash @alloc_stack_hash is
217  * currently contained (non-zero count) in Counting Bloom filter.
218  */
219 static bool alloc_covered_contains(u32 alloc_stack_hash)
220 {
221 	int i;
222 
223 	for (i = 0; i < ALLOC_COVERED_HNUM; i++) {
224 		if (!atomic_read(&alloc_covered[alloc_stack_hash & ALLOC_COVERED_MASK]))
225 			return false;
226 		alloc_stack_hash = ALLOC_COVERED_HNEXT(alloc_stack_hash);
227 	}
228 
229 	return true;
230 }
231 
232 static bool kfence_protect(unsigned long addr)
233 {
234 	return !KFENCE_WARN_ON(!kfence_protect_page(ALIGN_DOWN(addr, PAGE_SIZE), true));
235 }
236 
237 static bool kfence_unprotect(unsigned long addr)
238 {
239 	return !KFENCE_WARN_ON(!kfence_protect_page(ALIGN_DOWN(addr, PAGE_SIZE), false));
240 }
241 
242 static inline unsigned long metadata_to_pageaddr(const struct kfence_metadata *meta)
243 {
244 	unsigned long offset = (meta - kfence_metadata + 1) * PAGE_SIZE * 2;
245 	unsigned long pageaddr = (unsigned long)&__kfence_pool[offset];
246 
247 	/* The checks do not affect performance; only called from slow-paths. */
248 
249 	/* Only call with a pointer into kfence_metadata. */
250 	if (KFENCE_WARN_ON(meta < kfence_metadata ||
251 			   meta >= kfence_metadata + CONFIG_KFENCE_NUM_OBJECTS))
252 		return 0;
253 
254 	/*
255 	 * This metadata object only ever maps to 1 page; verify that the stored
256 	 * address is in the expected range.
257 	 */
258 	if (KFENCE_WARN_ON(ALIGN_DOWN(meta->addr, PAGE_SIZE) != pageaddr))
259 		return 0;
260 
261 	return pageaddr;
262 }
263 
264 /*
265  * Update the object's metadata state, including updating the alloc/free stacks
266  * depending on the state transition.
267  */
268 static noinline void
269 metadata_update_state(struct kfence_metadata *meta, enum kfence_object_state next,
270 		      unsigned long *stack_entries, size_t num_stack_entries)
271 {
272 	struct kfence_track *track =
273 		next == KFENCE_OBJECT_FREED ? &meta->free_track : &meta->alloc_track;
274 
275 	lockdep_assert_held(&meta->lock);
276 
277 	if (stack_entries) {
278 		memcpy(track->stack_entries, stack_entries,
279 		       num_stack_entries * sizeof(stack_entries[0]));
280 	} else {
281 		/*
282 		 * Skip over 1 (this) functions; noinline ensures we do not
283 		 * accidentally skip over the caller by never inlining.
284 		 */
285 		num_stack_entries = stack_trace_save(track->stack_entries, KFENCE_STACK_DEPTH, 1);
286 	}
287 	track->num_stack_entries = num_stack_entries;
288 	track->pid = task_pid_nr(current);
289 	track->cpu = raw_smp_processor_id();
290 	track->ts_nsec = local_clock(); /* Same source as printk timestamps. */
291 
292 	/*
293 	 * Pairs with READ_ONCE() in
294 	 *	kfence_shutdown_cache(),
295 	 *	kfence_handle_page_fault().
296 	 */
297 	WRITE_ONCE(meta->state, next);
298 }
299 
300 /* Write canary byte to @addr. */
301 static inline bool set_canary_byte(u8 *addr)
302 {
303 	*addr = KFENCE_CANARY_PATTERN(addr);
304 	return true;
305 }
306 
307 /* Check canary byte at @addr. */
308 static inline bool check_canary_byte(u8 *addr)
309 {
310 	struct kfence_metadata *meta;
311 	unsigned long flags;
312 
313 	if (likely(*addr == KFENCE_CANARY_PATTERN(addr)))
314 		return true;
315 
316 	atomic_long_inc(&counters[KFENCE_COUNTER_BUGS]);
317 
318 	meta = addr_to_metadata((unsigned long)addr);
319 	raw_spin_lock_irqsave(&meta->lock, flags);
320 	kfence_report_error((unsigned long)addr, false, NULL, meta, KFENCE_ERROR_CORRUPTION);
321 	raw_spin_unlock_irqrestore(&meta->lock, flags);
322 
323 	return false;
324 }
325 
326 /* __always_inline this to ensure we won't do an indirect call to fn. */
327 static __always_inline void for_each_canary(const struct kfence_metadata *meta, bool (*fn)(u8 *))
328 {
329 	const unsigned long pageaddr = ALIGN_DOWN(meta->addr, PAGE_SIZE);
330 	unsigned long addr;
331 
332 	/*
333 	 * We'll iterate over each canary byte per-side until fn() returns
334 	 * false. However, we'll still iterate over the canary bytes to the
335 	 * right of the object even if there was an error in the canary bytes to
336 	 * the left of the object. Specifically, if check_canary_byte()
337 	 * generates an error, showing both sides might give more clues as to
338 	 * what the error is about when displaying which bytes were corrupted.
339 	 */
340 
341 	/* Apply to left of object. */
342 	for (addr = pageaddr; addr < meta->addr; addr++) {
343 		if (!fn((u8 *)addr))
344 			break;
345 	}
346 
347 	/* Apply to right of object. */
348 	for (addr = meta->addr + meta->size; addr < pageaddr + PAGE_SIZE; addr++) {
349 		if (!fn((u8 *)addr))
350 			break;
351 	}
352 }
353 
354 static void *kfence_guarded_alloc(struct kmem_cache *cache, size_t size, gfp_t gfp,
355 				  unsigned long *stack_entries, size_t num_stack_entries,
356 				  u32 alloc_stack_hash)
357 {
358 	struct kfence_metadata *meta = NULL;
359 	unsigned long flags;
360 	struct slab *slab;
361 	void *addr;
362 	const bool random_right_allocate = get_random_u32_below(2);
363 	const bool random_fault = CONFIG_KFENCE_STRESS_TEST_FAULTS &&
364 				  !get_random_u32_below(CONFIG_KFENCE_STRESS_TEST_FAULTS);
365 
366 	/* Try to obtain a free object. */
367 	raw_spin_lock_irqsave(&kfence_freelist_lock, flags);
368 	if (!list_empty(&kfence_freelist)) {
369 		meta = list_entry(kfence_freelist.next, struct kfence_metadata, list);
370 		list_del_init(&meta->list);
371 	}
372 	raw_spin_unlock_irqrestore(&kfence_freelist_lock, flags);
373 	if (!meta) {
374 		atomic_long_inc(&counters[KFENCE_COUNTER_SKIP_CAPACITY]);
375 		return NULL;
376 	}
377 
378 	if (unlikely(!raw_spin_trylock_irqsave(&meta->lock, flags))) {
379 		/*
380 		 * This is extremely unlikely -- we are reporting on a
381 		 * use-after-free, which locked meta->lock, and the reporting
382 		 * code via printk calls kmalloc() which ends up in
383 		 * kfence_alloc() and tries to grab the same object that we're
384 		 * reporting on. While it has never been observed, lockdep does
385 		 * report that there is a possibility of deadlock. Fix it by
386 		 * using trylock and bailing out gracefully.
387 		 */
388 		raw_spin_lock_irqsave(&kfence_freelist_lock, flags);
389 		/* Put the object back on the freelist. */
390 		list_add_tail(&meta->list, &kfence_freelist);
391 		raw_spin_unlock_irqrestore(&kfence_freelist_lock, flags);
392 
393 		return NULL;
394 	}
395 
396 	meta->addr = metadata_to_pageaddr(meta);
397 	/* Unprotect if we're reusing this page. */
398 	if (meta->state == KFENCE_OBJECT_FREED)
399 		kfence_unprotect(meta->addr);
400 
401 	/*
402 	 * Note: for allocations made before RNG initialization, will always
403 	 * return zero. We still benefit from enabling KFENCE as early as
404 	 * possible, even when the RNG is not yet available, as this will allow
405 	 * KFENCE to detect bugs due to earlier allocations. The only downside
406 	 * is that the out-of-bounds accesses detected are deterministic for
407 	 * such allocations.
408 	 */
409 	if (random_right_allocate) {
410 		/* Allocate on the "right" side, re-calculate address. */
411 		meta->addr += PAGE_SIZE - size;
412 		meta->addr = ALIGN_DOWN(meta->addr, cache->align);
413 	}
414 
415 	addr = (void *)meta->addr;
416 
417 	/* Update remaining metadata. */
418 	metadata_update_state(meta, KFENCE_OBJECT_ALLOCATED, stack_entries, num_stack_entries);
419 	/* Pairs with READ_ONCE() in kfence_shutdown_cache(). */
420 	WRITE_ONCE(meta->cache, cache);
421 	meta->size = size;
422 	meta->alloc_stack_hash = alloc_stack_hash;
423 	raw_spin_unlock_irqrestore(&meta->lock, flags);
424 
425 	alloc_covered_add(alloc_stack_hash, 1);
426 
427 	/* Set required slab fields. */
428 	slab = virt_to_slab((void *)meta->addr);
429 	slab->slab_cache = cache;
430 #if defined(CONFIG_SLUB)
431 	slab->objects = 1;
432 #elif defined(CONFIG_SLAB)
433 	slab->s_mem = addr;
434 #endif
435 
436 	/* Memory initialization. */
437 	for_each_canary(meta, set_canary_byte);
438 
439 	/*
440 	 * We check slab_want_init_on_alloc() ourselves, rather than letting
441 	 * SL*B do the initialization, as otherwise we might overwrite KFENCE's
442 	 * redzone.
443 	 */
444 	if (unlikely(slab_want_init_on_alloc(gfp, cache)))
445 		memzero_explicit(addr, size);
446 	if (cache->ctor)
447 		cache->ctor(addr);
448 
449 	if (random_fault)
450 		kfence_protect(meta->addr); /* Random "faults" by protecting the object. */
451 
452 	atomic_long_inc(&counters[KFENCE_COUNTER_ALLOCATED]);
453 	atomic_long_inc(&counters[KFENCE_COUNTER_ALLOCS]);
454 
455 	return addr;
456 }
457 
458 static void kfence_guarded_free(void *addr, struct kfence_metadata *meta, bool zombie)
459 {
460 	struct kcsan_scoped_access assert_page_exclusive;
461 	unsigned long flags;
462 	bool init;
463 
464 	raw_spin_lock_irqsave(&meta->lock, flags);
465 
466 	if (meta->state != KFENCE_OBJECT_ALLOCATED || meta->addr != (unsigned long)addr) {
467 		/* Invalid or double-free, bail out. */
468 		atomic_long_inc(&counters[KFENCE_COUNTER_BUGS]);
469 		kfence_report_error((unsigned long)addr, false, NULL, meta,
470 				    KFENCE_ERROR_INVALID_FREE);
471 		raw_spin_unlock_irqrestore(&meta->lock, flags);
472 		return;
473 	}
474 
475 	/* Detect racy use-after-free, or incorrect reallocation of this page by KFENCE. */
476 	kcsan_begin_scoped_access((void *)ALIGN_DOWN((unsigned long)addr, PAGE_SIZE), PAGE_SIZE,
477 				  KCSAN_ACCESS_SCOPED | KCSAN_ACCESS_WRITE | KCSAN_ACCESS_ASSERT,
478 				  &assert_page_exclusive);
479 
480 	if (CONFIG_KFENCE_STRESS_TEST_FAULTS)
481 		kfence_unprotect((unsigned long)addr); /* To check canary bytes. */
482 
483 	/* Restore page protection if there was an OOB access. */
484 	if (meta->unprotected_page) {
485 		memzero_explicit((void *)ALIGN_DOWN(meta->unprotected_page, PAGE_SIZE), PAGE_SIZE);
486 		kfence_protect(meta->unprotected_page);
487 		meta->unprotected_page = 0;
488 	}
489 
490 	/* Mark the object as freed. */
491 	metadata_update_state(meta, KFENCE_OBJECT_FREED, NULL, 0);
492 	init = slab_want_init_on_free(meta->cache);
493 	raw_spin_unlock_irqrestore(&meta->lock, flags);
494 
495 	alloc_covered_add(meta->alloc_stack_hash, -1);
496 
497 	/* Check canary bytes for memory corruption. */
498 	for_each_canary(meta, check_canary_byte);
499 
500 	/*
501 	 * Clear memory if init-on-free is set. While we protect the page, the
502 	 * data is still there, and after a use-after-free is detected, we
503 	 * unprotect the page, so the data is still accessible.
504 	 */
505 	if (!zombie && unlikely(init))
506 		memzero_explicit(addr, meta->size);
507 
508 	/* Protect to detect use-after-frees. */
509 	kfence_protect((unsigned long)addr);
510 
511 	kcsan_end_scoped_access(&assert_page_exclusive);
512 	if (!zombie) {
513 		/* Add it to the tail of the freelist for reuse. */
514 		raw_spin_lock_irqsave(&kfence_freelist_lock, flags);
515 		KFENCE_WARN_ON(!list_empty(&meta->list));
516 		list_add_tail(&meta->list, &kfence_freelist);
517 		raw_spin_unlock_irqrestore(&kfence_freelist_lock, flags);
518 
519 		atomic_long_dec(&counters[KFENCE_COUNTER_ALLOCATED]);
520 		atomic_long_inc(&counters[KFENCE_COUNTER_FREES]);
521 	} else {
522 		/* See kfence_shutdown_cache(). */
523 		atomic_long_inc(&counters[KFENCE_COUNTER_ZOMBIES]);
524 	}
525 }
526 
527 static void rcu_guarded_free(struct rcu_head *h)
528 {
529 	struct kfence_metadata *meta = container_of(h, struct kfence_metadata, rcu_head);
530 
531 	kfence_guarded_free((void *)meta->addr, meta, false);
532 }
533 
534 /*
535  * Initialization of the KFENCE pool after its allocation.
536  * Returns 0 on success; otherwise returns the address up to
537  * which partial initialization succeeded.
538  */
539 static unsigned long kfence_init_pool(void)
540 {
541 	unsigned long addr = (unsigned long)__kfence_pool;
542 	struct page *pages;
543 	int i;
544 
545 	if (!arch_kfence_init_pool())
546 		return addr;
547 
548 	pages = virt_to_page(__kfence_pool);
549 
550 	/*
551 	 * Set up object pages: they must have PG_slab set, to avoid freeing
552 	 * these as real pages.
553 	 *
554 	 * We also want to avoid inserting kfence_free() in the kfree()
555 	 * fast-path in SLUB, and therefore need to ensure kfree() correctly
556 	 * enters __slab_free() slow-path.
557 	 */
558 	for (i = 0; i < KFENCE_POOL_SIZE / PAGE_SIZE; i++) {
559 		struct slab *slab = page_slab(&pages[i]);
560 
561 		if (!i || (i % 2))
562 			continue;
563 
564 		/* Verify we do not have a compound head page. */
565 		if (WARN_ON(compound_head(&pages[i]) != &pages[i]))
566 			return addr;
567 
568 		__folio_set_slab(slab_folio(slab));
569 #ifdef CONFIG_MEMCG
570 		slab->memcg_data = (unsigned long)&kfence_metadata[i / 2 - 1].objcg |
571 				   MEMCG_DATA_OBJCGS;
572 #endif
573 	}
574 
575 	/*
576 	 * Protect the first 2 pages. The first page is mostly unnecessary, and
577 	 * merely serves as an extended guard page. However, adding one
578 	 * additional page in the beginning gives us an even number of pages,
579 	 * which simplifies the mapping of address to metadata index.
580 	 */
581 	for (i = 0; i < 2; i++) {
582 		if (unlikely(!kfence_protect(addr)))
583 			return addr;
584 
585 		addr += PAGE_SIZE;
586 	}
587 
588 	for (i = 0; i < CONFIG_KFENCE_NUM_OBJECTS; i++) {
589 		struct kfence_metadata *meta = &kfence_metadata[i];
590 
591 		/* Initialize metadata. */
592 		INIT_LIST_HEAD(&meta->list);
593 		raw_spin_lock_init(&meta->lock);
594 		meta->state = KFENCE_OBJECT_UNUSED;
595 		meta->addr = addr; /* Initialize for validation in metadata_to_pageaddr(). */
596 		list_add_tail(&meta->list, &kfence_freelist);
597 
598 		/* Protect the right redzone. */
599 		if (unlikely(!kfence_protect(addr + PAGE_SIZE)))
600 			return addr;
601 
602 		addr += 2 * PAGE_SIZE;
603 	}
604 
605 	return 0;
606 }
607 
608 static bool __init kfence_init_pool_early(void)
609 {
610 	unsigned long addr;
611 
612 	if (!__kfence_pool)
613 		return false;
614 
615 	addr = kfence_init_pool();
616 
617 	if (!addr) {
618 		/*
619 		 * The pool is live and will never be deallocated from this point on.
620 		 * Ignore the pool object from the kmemleak phys object tree, as it would
621 		 * otherwise overlap with allocations returned by kfence_alloc(), which
622 		 * are registered with kmemleak through the slab post-alloc hook.
623 		 */
624 		kmemleak_ignore_phys(__pa(__kfence_pool));
625 		return true;
626 	}
627 
628 	/*
629 	 * Only release unprotected pages, and do not try to go back and change
630 	 * page attributes due to risk of failing to do so as well. If changing
631 	 * page attributes for some pages fails, it is very likely that it also
632 	 * fails for the first page, and therefore expect addr==__kfence_pool in
633 	 * most failure cases.
634 	 */
635 	for (char *p = (char *)addr; p < __kfence_pool + KFENCE_POOL_SIZE; p += PAGE_SIZE) {
636 		struct slab *slab = virt_to_slab(p);
637 
638 		if (!slab)
639 			continue;
640 #ifdef CONFIG_MEMCG
641 		slab->memcg_data = 0;
642 #endif
643 		__folio_clear_slab(slab_folio(slab));
644 	}
645 	memblock_free_late(__pa(addr), KFENCE_POOL_SIZE - (addr - (unsigned long)__kfence_pool));
646 	__kfence_pool = NULL;
647 	return false;
648 }
649 
650 static bool kfence_init_pool_late(void)
651 {
652 	unsigned long addr, free_size;
653 
654 	addr = kfence_init_pool();
655 
656 	if (!addr)
657 		return true;
658 
659 	/* Same as above. */
660 	free_size = KFENCE_POOL_SIZE - (addr - (unsigned long)__kfence_pool);
661 #ifdef CONFIG_CONTIG_ALLOC
662 	free_contig_range(page_to_pfn(virt_to_page((void *)addr)), free_size / PAGE_SIZE);
663 #else
664 	free_pages_exact((void *)addr, free_size);
665 #endif
666 	__kfence_pool = NULL;
667 	return false;
668 }
669 
670 /* === DebugFS Interface ==================================================== */
671 
672 static int stats_show(struct seq_file *seq, void *v)
673 {
674 	int i;
675 
676 	seq_printf(seq, "enabled: %i\n", READ_ONCE(kfence_enabled));
677 	for (i = 0; i < KFENCE_COUNTER_COUNT; i++)
678 		seq_printf(seq, "%s: %ld\n", counter_names[i], atomic_long_read(&counters[i]));
679 
680 	return 0;
681 }
682 DEFINE_SHOW_ATTRIBUTE(stats);
683 
684 /*
685  * debugfs seq_file operations for /sys/kernel/debug/kfence/objects.
686  * start_object() and next_object() return the object index + 1, because NULL is used
687  * to stop iteration.
688  */
689 static void *start_object(struct seq_file *seq, loff_t *pos)
690 {
691 	if (*pos < CONFIG_KFENCE_NUM_OBJECTS)
692 		return (void *)((long)*pos + 1);
693 	return NULL;
694 }
695 
696 static void stop_object(struct seq_file *seq, void *v)
697 {
698 }
699 
700 static void *next_object(struct seq_file *seq, void *v, loff_t *pos)
701 {
702 	++*pos;
703 	if (*pos < CONFIG_KFENCE_NUM_OBJECTS)
704 		return (void *)((long)*pos + 1);
705 	return NULL;
706 }
707 
708 static int show_object(struct seq_file *seq, void *v)
709 {
710 	struct kfence_metadata *meta = &kfence_metadata[(long)v - 1];
711 	unsigned long flags;
712 
713 	raw_spin_lock_irqsave(&meta->lock, flags);
714 	kfence_print_object(seq, meta);
715 	raw_spin_unlock_irqrestore(&meta->lock, flags);
716 	seq_puts(seq, "---------------------------------\n");
717 
718 	return 0;
719 }
720 
721 static const struct seq_operations objects_sops = {
722 	.start = start_object,
723 	.next = next_object,
724 	.stop = stop_object,
725 	.show = show_object,
726 };
727 DEFINE_SEQ_ATTRIBUTE(objects);
728 
729 static int __init kfence_debugfs_init(void)
730 {
731 	struct dentry *kfence_dir = debugfs_create_dir("kfence", NULL);
732 
733 	debugfs_create_file("stats", 0444, kfence_dir, NULL, &stats_fops);
734 	debugfs_create_file("objects", 0400, kfence_dir, NULL, &objects_fops);
735 	return 0;
736 }
737 
738 late_initcall(kfence_debugfs_init);
739 
740 /* === Panic Notifier ====================================================== */
741 
742 static void kfence_check_all_canary(void)
743 {
744 	int i;
745 
746 	for (i = 0; i < CONFIG_KFENCE_NUM_OBJECTS; i++) {
747 		struct kfence_metadata *meta = &kfence_metadata[i];
748 
749 		if (meta->state == KFENCE_OBJECT_ALLOCATED)
750 			for_each_canary(meta, check_canary_byte);
751 	}
752 }
753 
754 static int kfence_check_canary_callback(struct notifier_block *nb,
755 					unsigned long reason, void *arg)
756 {
757 	kfence_check_all_canary();
758 	return NOTIFY_OK;
759 }
760 
761 static struct notifier_block kfence_check_canary_notifier = {
762 	.notifier_call = kfence_check_canary_callback,
763 };
764 
765 /* === Allocation Gate Timer ================================================ */
766 
767 static struct delayed_work kfence_timer;
768 
769 #ifdef CONFIG_KFENCE_STATIC_KEYS
770 /* Wait queue to wake up allocation-gate timer task. */
771 static DECLARE_WAIT_QUEUE_HEAD(allocation_wait);
772 
773 static void wake_up_kfence_timer(struct irq_work *work)
774 {
775 	wake_up(&allocation_wait);
776 }
777 static DEFINE_IRQ_WORK(wake_up_kfence_timer_work, wake_up_kfence_timer);
778 #endif
779 
780 /*
781  * Set up delayed work, which will enable and disable the static key. We need to
782  * use a work queue (rather than a simple timer), since enabling and disabling a
783  * static key cannot be done from an interrupt.
784  *
785  * Note: Toggling a static branch currently causes IPIs, and here we'll end up
786  * with a total of 2 IPIs to all CPUs. If this ends up a problem in future (with
787  * more aggressive sampling intervals), we could get away with a variant that
788  * avoids IPIs, at the cost of not immediately capturing allocations if the
789  * instructions remain cached.
790  */
791 static void toggle_allocation_gate(struct work_struct *work)
792 {
793 	if (!READ_ONCE(kfence_enabled))
794 		return;
795 
796 	atomic_set(&kfence_allocation_gate, 0);
797 #ifdef CONFIG_KFENCE_STATIC_KEYS
798 	/* Enable static key, and await allocation to happen. */
799 	static_branch_enable(&kfence_allocation_key);
800 
801 	wait_event_idle(allocation_wait, atomic_read(&kfence_allocation_gate));
802 
803 	/* Disable static key and reset timer. */
804 	static_branch_disable(&kfence_allocation_key);
805 #endif
806 	queue_delayed_work(system_unbound_wq, &kfence_timer,
807 			   msecs_to_jiffies(kfence_sample_interval));
808 }
809 
810 /* === Public interface ===================================================== */
811 
812 void __init kfence_alloc_pool(void)
813 {
814 	if (!kfence_sample_interval)
815 		return;
816 
817 	__kfence_pool = memblock_alloc(KFENCE_POOL_SIZE, PAGE_SIZE);
818 
819 	if (!__kfence_pool)
820 		pr_err("failed to allocate pool\n");
821 }
822 
823 static void kfence_init_enable(void)
824 {
825 	if (!IS_ENABLED(CONFIG_KFENCE_STATIC_KEYS))
826 		static_branch_enable(&kfence_allocation_key);
827 
828 	if (kfence_deferrable)
829 		INIT_DEFERRABLE_WORK(&kfence_timer, toggle_allocation_gate);
830 	else
831 		INIT_DELAYED_WORK(&kfence_timer, toggle_allocation_gate);
832 
833 	if (kfence_check_on_panic)
834 		atomic_notifier_chain_register(&panic_notifier_list, &kfence_check_canary_notifier);
835 
836 	WRITE_ONCE(kfence_enabled, true);
837 	queue_delayed_work(system_unbound_wq, &kfence_timer, 0);
838 
839 	pr_info("initialized - using %lu bytes for %d objects at 0x%p-0x%p\n", KFENCE_POOL_SIZE,
840 		CONFIG_KFENCE_NUM_OBJECTS, (void *)__kfence_pool,
841 		(void *)(__kfence_pool + KFENCE_POOL_SIZE));
842 }
843 
844 void __init kfence_init(void)
845 {
846 	stack_hash_seed = get_random_u32();
847 
848 	/* Setting kfence_sample_interval to 0 on boot disables KFENCE. */
849 	if (!kfence_sample_interval)
850 		return;
851 
852 	if (!kfence_init_pool_early()) {
853 		pr_err("%s failed\n", __func__);
854 		return;
855 	}
856 
857 	kfence_init_enable();
858 }
859 
860 static int kfence_init_late(void)
861 {
862 	const unsigned long nr_pages = KFENCE_POOL_SIZE / PAGE_SIZE;
863 #ifdef CONFIG_CONTIG_ALLOC
864 	struct page *pages;
865 
866 	pages = alloc_contig_pages(nr_pages, GFP_KERNEL, first_online_node, NULL);
867 	if (!pages)
868 		return -ENOMEM;
869 	__kfence_pool = page_to_virt(pages);
870 #else
871 	if (nr_pages > MAX_ORDER_NR_PAGES) {
872 		pr_warn("KFENCE_NUM_OBJECTS too large for buddy allocator\n");
873 		return -EINVAL;
874 	}
875 	__kfence_pool = alloc_pages_exact(KFENCE_POOL_SIZE, GFP_KERNEL);
876 	if (!__kfence_pool)
877 		return -ENOMEM;
878 #endif
879 
880 	if (!kfence_init_pool_late()) {
881 		pr_err("%s failed\n", __func__);
882 		return -EBUSY;
883 	}
884 
885 	kfence_init_enable();
886 	return 0;
887 }
888 
889 static int kfence_enable_late(void)
890 {
891 	if (!__kfence_pool)
892 		return kfence_init_late();
893 
894 	WRITE_ONCE(kfence_enabled, true);
895 	queue_delayed_work(system_unbound_wq, &kfence_timer, 0);
896 	pr_info("re-enabled\n");
897 	return 0;
898 }
899 
900 void kfence_shutdown_cache(struct kmem_cache *s)
901 {
902 	unsigned long flags;
903 	struct kfence_metadata *meta;
904 	int i;
905 
906 	for (i = 0; i < CONFIG_KFENCE_NUM_OBJECTS; i++) {
907 		bool in_use;
908 
909 		meta = &kfence_metadata[i];
910 
911 		/*
912 		 * If we observe some inconsistent cache and state pair where we
913 		 * should have returned false here, cache destruction is racing
914 		 * with either kmem_cache_alloc() or kmem_cache_free(). Taking
915 		 * the lock will not help, as different critical section
916 		 * serialization will have the same outcome.
917 		 */
918 		if (READ_ONCE(meta->cache) != s ||
919 		    READ_ONCE(meta->state) != KFENCE_OBJECT_ALLOCATED)
920 			continue;
921 
922 		raw_spin_lock_irqsave(&meta->lock, flags);
923 		in_use = meta->cache == s && meta->state == KFENCE_OBJECT_ALLOCATED;
924 		raw_spin_unlock_irqrestore(&meta->lock, flags);
925 
926 		if (in_use) {
927 			/*
928 			 * This cache still has allocations, and we should not
929 			 * release them back into the freelist so they can still
930 			 * safely be used and retain the kernel's default
931 			 * behaviour of keeping the allocations alive (leak the
932 			 * cache); however, they effectively become "zombie
933 			 * allocations" as the KFENCE objects are the only ones
934 			 * still in use and the owning cache is being destroyed.
935 			 *
936 			 * We mark them freed, so that any subsequent use shows
937 			 * more useful error messages that will include stack
938 			 * traces of the user of the object, the original
939 			 * allocation, and caller to shutdown_cache().
940 			 */
941 			kfence_guarded_free((void *)meta->addr, meta, /*zombie=*/true);
942 		}
943 	}
944 
945 	for (i = 0; i < CONFIG_KFENCE_NUM_OBJECTS; i++) {
946 		meta = &kfence_metadata[i];
947 
948 		/* See above. */
949 		if (READ_ONCE(meta->cache) != s || READ_ONCE(meta->state) != KFENCE_OBJECT_FREED)
950 			continue;
951 
952 		raw_spin_lock_irqsave(&meta->lock, flags);
953 		if (meta->cache == s && meta->state == KFENCE_OBJECT_FREED)
954 			meta->cache = NULL;
955 		raw_spin_unlock_irqrestore(&meta->lock, flags);
956 	}
957 }
958 
959 void *__kfence_alloc(struct kmem_cache *s, size_t size, gfp_t flags)
960 {
961 	unsigned long stack_entries[KFENCE_STACK_DEPTH];
962 	size_t num_stack_entries;
963 	u32 alloc_stack_hash;
964 
965 	/*
966 	 * Perform size check before switching kfence_allocation_gate, so that
967 	 * we don't disable KFENCE without making an allocation.
968 	 */
969 	if (size > PAGE_SIZE) {
970 		atomic_long_inc(&counters[KFENCE_COUNTER_SKIP_INCOMPAT]);
971 		return NULL;
972 	}
973 
974 	/*
975 	 * Skip allocations from non-default zones, including DMA. We cannot
976 	 * guarantee that pages in the KFENCE pool will have the requested
977 	 * properties (e.g. reside in DMAable memory).
978 	 */
979 	if ((flags & GFP_ZONEMASK) ||
980 	    (s->flags & (SLAB_CACHE_DMA | SLAB_CACHE_DMA32))) {
981 		atomic_long_inc(&counters[KFENCE_COUNTER_SKIP_INCOMPAT]);
982 		return NULL;
983 	}
984 
985 	/*
986 	 * Skip allocations for this slab, if KFENCE has been disabled for
987 	 * this slab.
988 	 */
989 	if (s->flags & SLAB_SKIP_KFENCE)
990 		return NULL;
991 
992 	if (atomic_inc_return(&kfence_allocation_gate) > 1)
993 		return NULL;
994 #ifdef CONFIG_KFENCE_STATIC_KEYS
995 	/*
996 	 * waitqueue_active() is fully ordered after the update of
997 	 * kfence_allocation_gate per atomic_inc_return().
998 	 */
999 	if (waitqueue_active(&allocation_wait)) {
1000 		/*
1001 		 * Calling wake_up() here may deadlock when allocations happen
1002 		 * from within timer code. Use an irq_work to defer it.
1003 		 */
1004 		irq_work_queue(&wake_up_kfence_timer_work);
1005 	}
1006 #endif
1007 
1008 	if (!READ_ONCE(kfence_enabled))
1009 		return NULL;
1010 
1011 	num_stack_entries = stack_trace_save(stack_entries, KFENCE_STACK_DEPTH, 0);
1012 
1013 	/*
1014 	 * Do expensive check for coverage of allocation in slow-path after
1015 	 * allocation_gate has already become non-zero, even though it might
1016 	 * mean not making any allocation within a given sample interval.
1017 	 *
1018 	 * This ensures reasonable allocation coverage when the pool is almost
1019 	 * full, including avoiding long-lived allocations of the same source
1020 	 * filling up the pool (e.g. pagecache allocations).
1021 	 */
1022 	alloc_stack_hash = get_alloc_stack_hash(stack_entries, num_stack_entries);
1023 	if (should_skip_covered() && alloc_covered_contains(alloc_stack_hash)) {
1024 		atomic_long_inc(&counters[KFENCE_COUNTER_SKIP_COVERED]);
1025 		return NULL;
1026 	}
1027 
1028 	return kfence_guarded_alloc(s, size, flags, stack_entries, num_stack_entries,
1029 				    alloc_stack_hash);
1030 }
1031 
1032 size_t kfence_ksize(const void *addr)
1033 {
1034 	const struct kfence_metadata *meta = addr_to_metadata((unsigned long)addr);
1035 
1036 	/*
1037 	 * Read locklessly -- if there is a race with __kfence_alloc(), this is
1038 	 * either a use-after-free or invalid access.
1039 	 */
1040 	return meta ? meta->size : 0;
1041 }
1042 
1043 void *kfence_object_start(const void *addr)
1044 {
1045 	const struct kfence_metadata *meta = addr_to_metadata((unsigned long)addr);
1046 
1047 	/*
1048 	 * Read locklessly -- if there is a race with __kfence_alloc(), this is
1049 	 * either a use-after-free or invalid access.
1050 	 */
1051 	return meta ? (void *)meta->addr : NULL;
1052 }
1053 
1054 void __kfence_free(void *addr)
1055 {
1056 	struct kfence_metadata *meta = addr_to_metadata((unsigned long)addr);
1057 
1058 #ifdef CONFIG_MEMCG
1059 	KFENCE_WARN_ON(meta->objcg);
1060 #endif
1061 	/*
1062 	 * If the objects of the cache are SLAB_TYPESAFE_BY_RCU, defer freeing
1063 	 * the object, as the object page may be recycled for other-typed
1064 	 * objects once it has been freed. meta->cache may be NULL if the cache
1065 	 * was destroyed.
1066 	 */
1067 	if (unlikely(meta->cache && (meta->cache->flags & SLAB_TYPESAFE_BY_RCU)))
1068 		call_rcu(&meta->rcu_head, rcu_guarded_free);
1069 	else
1070 		kfence_guarded_free(addr, meta, false);
1071 }
1072 
1073 bool kfence_handle_page_fault(unsigned long addr, bool is_write, struct pt_regs *regs)
1074 {
1075 	const int page_index = (addr - (unsigned long)__kfence_pool) / PAGE_SIZE;
1076 	struct kfence_metadata *to_report = NULL;
1077 	enum kfence_error_type error_type;
1078 	unsigned long flags;
1079 
1080 	if (!is_kfence_address((void *)addr))
1081 		return false;
1082 
1083 	if (!READ_ONCE(kfence_enabled)) /* If disabled at runtime ... */
1084 		return kfence_unprotect(addr); /* ... unprotect and proceed. */
1085 
1086 	atomic_long_inc(&counters[KFENCE_COUNTER_BUGS]);
1087 
1088 	if (page_index % 2) {
1089 		/* This is a redzone, report a buffer overflow. */
1090 		struct kfence_metadata *meta;
1091 		int distance = 0;
1092 
1093 		meta = addr_to_metadata(addr - PAGE_SIZE);
1094 		if (meta && READ_ONCE(meta->state) == KFENCE_OBJECT_ALLOCATED) {
1095 			to_report = meta;
1096 			/* Data race ok; distance calculation approximate. */
1097 			distance = addr - data_race(meta->addr + meta->size);
1098 		}
1099 
1100 		meta = addr_to_metadata(addr + PAGE_SIZE);
1101 		if (meta && READ_ONCE(meta->state) == KFENCE_OBJECT_ALLOCATED) {
1102 			/* Data race ok; distance calculation approximate. */
1103 			if (!to_report || distance > data_race(meta->addr) - addr)
1104 				to_report = meta;
1105 		}
1106 
1107 		if (!to_report)
1108 			goto out;
1109 
1110 		raw_spin_lock_irqsave(&to_report->lock, flags);
1111 		to_report->unprotected_page = addr;
1112 		error_type = KFENCE_ERROR_OOB;
1113 
1114 		/*
1115 		 * If the object was freed before we took the look we can still
1116 		 * report this as an OOB -- the report will simply show the
1117 		 * stacktrace of the free as well.
1118 		 */
1119 	} else {
1120 		to_report = addr_to_metadata(addr);
1121 		if (!to_report)
1122 			goto out;
1123 
1124 		raw_spin_lock_irqsave(&to_report->lock, flags);
1125 		error_type = KFENCE_ERROR_UAF;
1126 		/*
1127 		 * We may race with __kfence_alloc(), and it is possible that a
1128 		 * freed object may be reallocated. We simply report this as a
1129 		 * use-after-free, with the stack trace showing the place where
1130 		 * the object was re-allocated.
1131 		 */
1132 	}
1133 
1134 out:
1135 	if (to_report) {
1136 		kfence_report_error(addr, is_write, regs, to_report, error_type);
1137 		raw_spin_unlock_irqrestore(&to_report->lock, flags);
1138 	} else {
1139 		/* This may be a UAF or OOB access, but we can't be sure. */
1140 		kfence_report_error(addr, is_write, regs, NULL, KFENCE_ERROR_INVALID);
1141 	}
1142 
1143 	return kfence_unprotect(addr); /* Unprotect and let access proceed. */
1144 }
1145