1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * KFENCE guarded object allocator and fault handling.
4 *
5 * Copyright (C) 2020, Google LLC.
6 */
7
8 #define pr_fmt(fmt) "kfence: " fmt
9
10 #include <linux/atomic.h>
11 #include <linux/bug.h>
12 #include <linux/debugfs.h>
13 #include <linux/hash.h>
14 #include <linux/irq_work.h>
15 #include <linux/jhash.h>
16 #include <linux/kcsan-checks.h>
17 #include <linux/kfence.h>
18 #include <linux/kmemleak.h>
19 #include <linux/list.h>
20 #include <linux/lockdep.h>
21 #include <linux/log2.h>
22 #include <linux/memblock.h>
23 #include <linux/moduleparam.h>
24 #include <linux/notifier.h>
25 #include <linux/panic_notifier.h>
26 #include <linux/random.h>
27 #include <linux/rcupdate.h>
28 #include <linux/sched/clock.h>
29 #include <linux/seq_file.h>
30 #include <linux/slab.h>
31 #include <linux/spinlock.h>
32 #include <linux/string.h>
33
34 #include <asm/kfence.h>
35
36 #include "kfence.h"
37
38 /* Disables KFENCE on the first warning assuming an irrecoverable error. */
39 #define KFENCE_WARN_ON(cond) \
40 ({ \
41 const bool __cond = WARN_ON(cond); \
42 if (unlikely(__cond)) { \
43 WRITE_ONCE(kfence_enabled, false); \
44 disabled_by_warn = true; \
45 } \
46 __cond; \
47 })
48
49 /* === Data ================================================================= */
50
51 static bool kfence_enabled __read_mostly;
52 static bool disabled_by_warn __read_mostly;
53
54 unsigned long kfence_sample_interval __read_mostly = CONFIG_KFENCE_SAMPLE_INTERVAL;
55 EXPORT_SYMBOL_GPL(kfence_sample_interval); /* Export for test modules. */
56
57 #ifdef MODULE_PARAM_PREFIX
58 #undef MODULE_PARAM_PREFIX
59 #endif
60 #define MODULE_PARAM_PREFIX "kfence."
61
62 static int kfence_enable_late(void);
param_set_sample_interval(const char * val,const struct kernel_param * kp)63 static int param_set_sample_interval(const char *val, const struct kernel_param *kp)
64 {
65 unsigned long num;
66 int ret = kstrtoul(val, 0, &num);
67
68 if (ret < 0)
69 return ret;
70
71 /* Using 0 to indicate KFENCE is disabled. */
72 if (!num && READ_ONCE(kfence_enabled)) {
73 pr_info("disabled\n");
74 WRITE_ONCE(kfence_enabled, false);
75 }
76
77 *((unsigned long *)kp->arg) = num;
78
79 if (num && !READ_ONCE(kfence_enabled) && system_state != SYSTEM_BOOTING)
80 return disabled_by_warn ? -EINVAL : kfence_enable_late();
81 return 0;
82 }
83
param_get_sample_interval(char * buffer,const struct kernel_param * kp)84 static int param_get_sample_interval(char *buffer, const struct kernel_param *kp)
85 {
86 if (!READ_ONCE(kfence_enabled))
87 return sprintf(buffer, "0\n");
88
89 return param_get_ulong(buffer, kp);
90 }
91
92 static const struct kernel_param_ops sample_interval_param_ops = {
93 .set = param_set_sample_interval,
94 .get = param_get_sample_interval,
95 };
96 module_param_cb(sample_interval, &sample_interval_param_ops, &kfence_sample_interval, 0600);
97
98 /* Pool usage% threshold when currently covered allocations are skipped. */
99 static unsigned long kfence_skip_covered_thresh __read_mostly = 75;
100 module_param_named(skip_covered_thresh, kfence_skip_covered_thresh, ulong, 0644);
101
102 /* Allocation burst count: number of excess KFENCE allocations per sample. */
103 static unsigned int kfence_burst __read_mostly;
104 module_param_named(burst, kfence_burst, uint, 0644);
105
106 /* If true, use a deferrable timer. */
107 static bool kfence_deferrable __read_mostly = IS_ENABLED(CONFIG_KFENCE_DEFERRABLE);
108 module_param_named(deferrable, kfence_deferrable, bool, 0444);
109
110 /* If true, check all canary bytes on panic. */
111 static bool kfence_check_on_panic __read_mostly;
112 module_param_named(check_on_panic, kfence_check_on_panic, bool, 0444);
113
114 /* The pool of pages used for guard pages and objects. */
115 char *__kfence_pool __read_mostly;
116 EXPORT_SYMBOL(__kfence_pool); /* Export for test modules. */
117
118 /*
119 * Per-object metadata, with one-to-one mapping of object metadata to
120 * backing pages (in __kfence_pool).
121 */
122 static_assert(CONFIG_KFENCE_NUM_OBJECTS > 0);
123 struct kfence_metadata *kfence_metadata __read_mostly;
124
125 /*
126 * If kfence_metadata is not NULL, it may be accessed by kfence_shutdown_cache().
127 * So introduce kfence_metadata_init to initialize metadata, and then make
128 * kfence_metadata visible after initialization is successful. This prevents
129 * potential UAF or access to uninitialized metadata.
130 */
131 static struct kfence_metadata *kfence_metadata_init __read_mostly;
132
133 /* Freelist with available objects. */
134 static struct list_head kfence_freelist = LIST_HEAD_INIT(kfence_freelist);
135 static DEFINE_RAW_SPINLOCK(kfence_freelist_lock); /* Lock protecting freelist. */
136
137 /*
138 * The static key to set up a KFENCE allocation; or if static keys are not used
139 * to gate allocations, to avoid a load and compare if KFENCE is disabled.
140 */
141 DEFINE_STATIC_KEY_FALSE(kfence_allocation_key);
142
143 /* Gates the allocation, ensuring only one succeeds in a given period. */
144 atomic_t kfence_allocation_gate = ATOMIC_INIT(1);
145
146 /*
147 * A Counting Bloom filter of allocation coverage: limits currently covered
148 * allocations of the same source filling up the pool.
149 *
150 * Assuming a range of 15%-85% unique allocations in the pool at any point in
151 * time, the below parameters provide a probablity of 0.02-0.33 for false
152 * positive hits respectively:
153 *
154 * P(alloc_traces) = (1 - e^(-HNUM * (alloc_traces / SIZE)) ^ HNUM
155 */
156 #define ALLOC_COVERED_HNUM 2
157 #define ALLOC_COVERED_ORDER (const_ilog2(CONFIG_KFENCE_NUM_OBJECTS) + 2)
158 #define ALLOC_COVERED_SIZE (1 << ALLOC_COVERED_ORDER)
159 #define ALLOC_COVERED_HNEXT(h) hash_32(h, ALLOC_COVERED_ORDER)
160 #define ALLOC_COVERED_MASK (ALLOC_COVERED_SIZE - 1)
161 static atomic_t alloc_covered[ALLOC_COVERED_SIZE];
162
163 /* Stack depth used to determine uniqueness of an allocation. */
164 #define UNIQUE_ALLOC_STACK_DEPTH ((size_t)8)
165
166 /*
167 * Randomness for stack hashes, making the same collisions across reboots and
168 * different machines less likely.
169 */
170 static u32 stack_hash_seed __ro_after_init;
171
172 /* Statistics counters for debugfs. */
173 enum kfence_counter_id {
174 KFENCE_COUNTER_ALLOCATED,
175 KFENCE_COUNTER_ALLOCS,
176 KFENCE_COUNTER_FREES,
177 KFENCE_COUNTER_ZOMBIES,
178 KFENCE_COUNTER_BUGS,
179 KFENCE_COUNTER_SKIP_INCOMPAT,
180 KFENCE_COUNTER_SKIP_CAPACITY,
181 KFENCE_COUNTER_SKIP_COVERED,
182 KFENCE_COUNTER_COUNT,
183 };
184 static atomic_long_t counters[KFENCE_COUNTER_COUNT];
185 static const char *const counter_names[] = {
186 [KFENCE_COUNTER_ALLOCATED] = "currently allocated",
187 [KFENCE_COUNTER_ALLOCS] = "total allocations",
188 [KFENCE_COUNTER_FREES] = "total frees",
189 [KFENCE_COUNTER_ZOMBIES] = "zombie allocations",
190 [KFENCE_COUNTER_BUGS] = "total bugs",
191 [KFENCE_COUNTER_SKIP_INCOMPAT] = "skipped allocations (incompatible)",
192 [KFENCE_COUNTER_SKIP_CAPACITY] = "skipped allocations (capacity)",
193 [KFENCE_COUNTER_SKIP_COVERED] = "skipped allocations (covered)",
194 };
195 static_assert(ARRAY_SIZE(counter_names) == KFENCE_COUNTER_COUNT);
196
197 /* === Internals ============================================================ */
198
should_skip_covered(void)199 static inline bool should_skip_covered(void)
200 {
201 unsigned long thresh = (CONFIG_KFENCE_NUM_OBJECTS * kfence_skip_covered_thresh) / 100;
202
203 return atomic_long_read(&counters[KFENCE_COUNTER_ALLOCATED]) > thresh;
204 }
205
get_alloc_stack_hash(unsigned long * stack_entries,size_t num_entries)206 static u32 get_alloc_stack_hash(unsigned long *stack_entries, size_t num_entries)
207 {
208 num_entries = min(num_entries, UNIQUE_ALLOC_STACK_DEPTH);
209 num_entries = filter_irq_stacks(stack_entries, num_entries);
210 return jhash(stack_entries, num_entries * sizeof(stack_entries[0]), stack_hash_seed);
211 }
212
213 /*
214 * Adds (or subtracts) count @val for allocation stack trace hash
215 * @alloc_stack_hash from Counting Bloom filter.
216 */
alloc_covered_add(u32 alloc_stack_hash,int val)217 static void alloc_covered_add(u32 alloc_stack_hash, int val)
218 {
219 int i;
220
221 for (i = 0; i < ALLOC_COVERED_HNUM; i++) {
222 atomic_add(val, &alloc_covered[alloc_stack_hash & ALLOC_COVERED_MASK]);
223 alloc_stack_hash = ALLOC_COVERED_HNEXT(alloc_stack_hash);
224 }
225 }
226
227 /*
228 * Returns true if the allocation stack trace hash @alloc_stack_hash is
229 * currently contained (non-zero count) in Counting Bloom filter.
230 */
alloc_covered_contains(u32 alloc_stack_hash)231 static bool alloc_covered_contains(u32 alloc_stack_hash)
232 {
233 int i;
234
235 for (i = 0; i < ALLOC_COVERED_HNUM; i++) {
236 if (!atomic_read(&alloc_covered[alloc_stack_hash & ALLOC_COVERED_MASK]))
237 return false;
238 alloc_stack_hash = ALLOC_COVERED_HNEXT(alloc_stack_hash);
239 }
240
241 return true;
242 }
243
kfence_protect(unsigned long addr)244 static bool kfence_protect(unsigned long addr)
245 {
246 return !KFENCE_WARN_ON(!kfence_protect_page(ALIGN_DOWN(addr, PAGE_SIZE), true));
247 }
248
kfence_unprotect(unsigned long addr)249 static bool kfence_unprotect(unsigned long addr)
250 {
251 return !KFENCE_WARN_ON(!kfence_protect_page(ALIGN_DOWN(addr, PAGE_SIZE), false));
252 }
253
metadata_to_pageaddr(const struct kfence_metadata * meta)254 static inline unsigned long metadata_to_pageaddr(const struct kfence_metadata *meta)
255 {
256 unsigned long offset = (meta - kfence_metadata + 1) * PAGE_SIZE * 2;
257 unsigned long pageaddr = (unsigned long)&__kfence_pool[offset];
258
259 /* The checks do not affect performance; only called from slow-paths. */
260
261 /* Only call with a pointer into kfence_metadata. */
262 if (KFENCE_WARN_ON(meta < kfence_metadata ||
263 meta >= kfence_metadata + CONFIG_KFENCE_NUM_OBJECTS))
264 return 0;
265
266 /*
267 * This metadata object only ever maps to 1 page; verify that the stored
268 * address is in the expected range.
269 */
270 if (KFENCE_WARN_ON(ALIGN_DOWN(meta->addr, PAGE_SIZE) != pageaddr))
271 return 0;
272
273 return pageaddr;
274 }
275
kfence_obj_allocated(const struct kfence_metadata * meta)276 static inline bool kfence_obj_allocated(const struct kfence_metadata *meta)
277 {
278 enum kfence_object_state state = READ_ONCE(meta->state);
279
280 return state == KFENCE_OBJECT_ALLOCATED || state == KFENCE_OBJECT_RCU_FREEING;
281 }
282
283 /*
284 * Update the object's metadata state, including updating the alloc/free stacks
285 * depending on the state transition.
286 */
287 static noinline void
metadata_update_state(struct kfence_metadata * meta,enum kfence_object_state next,unsigned long * stack_entries,size_t num_stack_entries)288 metadata_update_state(struct kfence_metadata *meta, enum kfence_object_state next,
289 unsigned long *stack_entries, size_t num_stack_entries)
290 {
291 struct kfence_track *track =
292 next == KFENCE_OBJECT_ALLOCATED ? &meta->alloc_track : &meta->free_track;
293
294 lockdep_assert_held(&meta->lock);
295
296 /* Stack has been saved when calling rcu, skip. */
297 if (READ_ONCE(meta->state) == KFENCE_OBJECT_RCU_FREEING)
298 goto out;
299
300 if (stack_entries) {
301 memcpy(track->stack_entries, stack_entries,
302 num_stack_entries * sizeof(stack_entries[0]));
303 } else {
304 /*
305 * Skip over 1 (this) functions; noinline ensures we do not
306 * accidentally skip over the caller by never inlining.
307 */
308 num_stack_entries = stack_trace_save(track->stack_entries, KFENCE_STACK_DEPTH, 1);
309 }
310 track->num_stack_entries = num_stack_entries;
311 track->pid = task_pid_nr(current);
312 track->cpu = raw_smp_processor_id();
313 track->ts_nsec = local_clock(); /* Same source as printk timestamps. */
314
315 out:
316 /*
317 * Pairs with READ_ONCE() in
318 * kfence_shutdown_cache(),
319 * kfence_handle_page_fault().
320 */
321 WRITE_ONCE(meta->state, next);
322 }
323
324 #ifdef CONFIG_KMSAN
325 #define check_canary_attributes noinline __no_kmsan_checks
326 #else
327 #define check_canary_attributes inline
328 #endif
329
330 /* Check canary byte at @addr. */
check_canary_byte(u8 * addr)331 static check_canary_attributes bool check_canary_byte(u8 *addr)
332 {
333 struct kfence_metadata *meta;
334 unsigned long flags;
335
336 if (likely(*addr == KFENCE_CANARY_PATTERN_U8(addr)))
337 return true;
338
339 atomic_long_inc(&counters[KFENCE_COUNTER_BUGS]);
340
341 meta = addr_to_metadata((unsigned long)addr);
342 raw_spin_lock_irqsave(&meta->lock, flags);
343 kfence_report_error((unsigned long)addr, false, NULL, meta, KFENCE_ERROR_CORRUPTION);
344 raw_spin_unlock_irqrestore(&meta->lock, flags);
345
346 return false;
347 }
348
set_canary(const struct kfence_metadata * meta)349 static inline void set_canary(const struct kfence_metadata *meta)
350 {
351 const unsigned long pageaddr = ALIGN_DOWN(meta->addr, PAGE_SIZE);
352 unsigned long addr = pageaddr;
353
354 /*
355 * The canary may be written to part of the object memory, but it does
356 * not affect it. The user should initialize the object before using it.
357 */
358 for (; addr < meta->addr; addr += sizeof(u64))
359 *((u64 *)addr) = KFENCE_CANARY_PATTERN_U64;
360
361 addr = ALIGN_DOWN(meta->addr + meta->size, sizeof(u64));
362 for (; addr - pageaddr < PAGE_SIZE; addr += sizeof(u64))
363 *((u64 *)addr) = KFENCE_CANARY_PATTERN_U64;
364 }
365
366 static check_canary_attributes void
check_canary(const struct kfence_metadata * meta)367 check_canary(const struct kfence_metadata *meta)
368 {
369 const unsigned long pageaddr = ALIGN_DOWN(meta->addr, PAGE_SIZE);
370 unsigned long addr = pageaddr;
371
372 /*
373 * We'll iterate over each canary byte per-side until a corrupted byte
374 * is found. However, we'll still iterate over the canary bytes to the
375 * right of the object even if there was an error in the canary bytes to
376 * the left of the object. Specifically, if check_canary_byte()
377 * generates an error, showing both sides might give more clues as to
378 * what the error is about when displaying which bytes were corrupted.
379 */
380
381 /* Apply to left of object. */
382 for (; meta->addr - addr >= sizeof(u64); addr += sizeof(u64)) {
383 if (unlikely(*((u64 *)addr) != KFENCE_CANARY_PATTERN_U64))
384 break;
385 }
386
387 /*
388 * If the canary is corrupted in a certain 64 bytes, or the canary
389 * memory cannot be completely covered by multiple consecutive 64 bytes,
390 * it needs to be checked one by one.
391 */
392 for (; addr < meta->addr; addr++) {
393 if (unlikely(!check_canary_byte((u8 *)addr)))
394 break;
395 }
396
397 /* Apply to right of object. */
398 for (addr = meta->addr + meta->size; addr % sizeof(u64) != 0; addr++) {
399 if (unlikely(!check_canary_byte((u8 *)addr)))
400 return;
401 }
402 for (; addr - pageaddr < PAGE_SIZE; addr += sizeof(u64)) {
403 if (unlikely(*((u64 *)addr) != KFENCE_CANARY_PATTERN_U64)) {
404
405 for (; addr - pageaddr < PAGE_SIZE; addr++) {
406 if (!check_canary_byte((u8 *)addr))
407 return;
408 }
409 }
410 }
411 }
412
kfence_guarded_alloc(struct kmem_cache * cache,size_t size,gfp_t gfp,unsigned long * stack_entries,size_t num_stack_entries,u32 alloc_stack_hash)413 static void *kfence_guarded_alloc(struct kmem_cache *cache, size_t size, gfp_t gfp,
414 unsigned long *stack_entries, size_t num_stack_entries,
415 u32 alloc_stack_hash)
416 {
417 struct kfence_metadata *meta = NULL;
418 unsigned long flags;
419 struct slab *slab;
420 void *addr;
421 const bool random_right_allocate = get_random_u32_below(2);
422 const bool random_fault = CONFIG_KFENCE_STRESS_TEST_FAULTS &&
423 !get_random_u32_below(CONFIG_KFENCE_STRESS_TEST_FAULTS);
424
425 /* Try to obtain a free object. */
426 raw_spin_lock_irqsave(&kfence_freelist_lock, flags);
427 if (!list_empty(&kfence_freelist)) {
428 meta = list_entry(kfence_freelist.next, struct kfence_metadata, list);
429 list_del_init(&meta->list);
430 }
431 raw_spin_unlock_irqrestore(&kfence_freelist_lock, flags);
432 if (!meta) {
433 atomic_long_inc(&counters[KFENCE_COUNTER_SKIP_CAPACITY]);
434 return NULL;
435 }
436
437 if (unlikely(!raw_spin_trylock_irqsave(&meta->lock, flags))) {
438 /*
439 * This is extremely unlikely -- we are reporting on a
440 * use-after-free, which locked meta->lock, and the reporting
441 * code via printk calls kmalloc() which ends up in
442 * kfence_alloc() and tries to grab the same object that we're
443 * reporting on. While it has never been observed, lockdep does
444 * report that there is a possibility of deadlock. Fix it by
445 * using trylock and bailing out gracefully.
446 */
447 raw_spin_lock_irqsave(&kfence_freelist_lock, flags);
448 /* Put the object back on the freelist. */
449 list_add_tail(&meta->list, &kfence_freelist);
450 raw_spin_unlock_irqrestore(&kfence_freelist_lock, flags);
451
452 return NULL;
453 }
454
455 meta->addr = metadata_to_pageaddr(meta);
456 /* Unprotect if we're reusing this page. */
457 if (meta->state == KFENCE_OBJECT_FREED)
458 kfence_unprotect(meta->addr);
459
460 /*
461 * Note: for allocations made before RNG initialization, will always
462 * return zero. We still benefit from enabling KFENCE as early as
463 * possible, even when the RNG is not yet available, as this will allow
464 * KFENCE to detect bugs due to earlier allocations. The only downside
465 * is that the out-of-bounds accesses detected are deterministic for
466 * such allocations.
467 */
468 if (random_right_allocate) {
469 /* Allocate on the "right" side, re-calculate address. */
470 meta->addr += PAGE_SIZE - size;
471 meta->addr = ALIGN_DOWN(meta->addr, cache->align);
472 }
473
474 addr = (void *)meta->addr;
475
476 /* Update remaining metadata. */
477 metadata_update_state(meta, KFENCE_OBJECT_ALLOCATED, stack_entries, num_stack_entries);
478 /* Pairs with READ_ONCE() in kfence_shutdown_cache(). */
479 WRITE_ONCE(meta->cache, cache);
480 meta->size = size;
481 meta->alloc_stack_hash = alloc_stack_hash;
482 raw_spin_unlock_irqrestore(&meta->lock, flags);
483
484 alloc_covered_add(alloc_stack_hash, 1);
485
486 /* Set required slab fields. */
487 slab = virt_to_slab((void *)meta->addr);
488 slab->slab_cache = cache;
489 slab->objects = 1;
490
491 /* Memory initialization. */
492 set_canary(meta);
493
494 /*
495 * We check slab_want_init_on_alloc() ourselves, rather than letting
496 * SL*B do the initialization, as otherwise we might overwrite KFENCE's
497 * redzone.
498 */
499 if (unlikely(slab_want_init_on_alloc(gfp, cache)))
500 memzero_explicit(addr, size);
501 if (cache->ctor)
502 cache->ctor(addr);
503
504 if (random_fault)
505 kfence_protect(meta->addr); /* Random "faults" by protecting the object. */
506
507 atomic_long_inc(&counters[KFENCE_COUNTER_ALLOCATED]);
508 atomic_long_inc(&counters[KFENCE_COUNTER_ALLOCS]);
509
510 return addr;
511 }
512
kfence_guarded_free(void * addr,struct kfence_metadata * meta,bool zombie)513 static void kfence_guarded_free(void *addr, struct kfence_metadata *meta, bool zombie)
514 {
515 struct kcsan_scoped_access assert_page_exclusive;
516 unsigned long flags;
517 bool init;
518
519 raw_spin_lock_irqsave(&meta->lock, flags);
520
521 if (!kfence_obj_allocated(meta) || meta->addr != (unsigned long)addr) {
522 /* Invalid or double-free, bail out. */
523 atomic_long_inc(&counters[KFENCE_COUNTER_BUGS]);
524 kfence_report_error((unsigned long)addr, false, NULL, meta,
525 KFENCE_ERROR_INVALID_FREE);
526 raw_spin_unlock_irqrestore(&meta->lock, flags);
527 return;
528 }
529
530 /* Detect racy use-after-free, or incorrect reallocation of this page by KFENCE. */
531 kcsan_begin_scoped_access((void *)ALIGN_DOWN((unsigned long)addr, PAGE_SIZE), PAGE_SIZE,
532 KCSAN_ACCESS_SCOPED | KCSAN_ACCESS_WRITE | KCSAN_ACCESS_ASSERT,
533 &assert_page_exclusive);
534
535 if (CONFIG_KFENCE_STRESS_TEST_FAULTS)
536 kfence_unprotect((unsigned long)addr); /* To check canary bytes. */
537
538 /* Restore page protection if there was an OOB access. */
539 if (meta->unprotected_page) {
540 memzero_explicit((void *)ALIGN_DOWN(meta->unprotected_page, PAGE_SIZE), PAGE_SIZE);
541 kfence_protect(meta->unprotected_page);
542 meta->unprotected_page = 0;
543 }
544
545 /* Mark the object as freed. */
546 metadata_update_state(meta, KFENCE_OBJECT_FREED, NULL, 0);
547 init = slab_want_init_on_free(meta->cache);
548 raw_spin_unlock_irqrestore(&meta->lock, flags);
549
550 alloc_covered_add(meta->alloc_stack_hash, -1);
551
552 /* Check canary bytes for memory corruption. */
553 check_canary(meta);
554
555 /*
556 * Clear memory if init-on-free is set. While we protect the page, the
557 * data is still there, and after a use-after-free is detected, we
558 * unprotect the page, so the data is still accessible.
559 */
560 if (!zombie && unlikely(init))
561 memzero_explicit(addr, meta->size);
562
563 /* Protect to detect use-after-frees. */
564 kfence_protect((unsigned long)addr);
565
566 kcsan_end_scoped_access(&assert_page_exclusive);
567 if (!zombie) {
568 /* Add it to the tail of the freelist for reuse. */
569 raw_spin_lock_irqsave(&kfence_freelist_lock, flags);
570 KFENCE_WARN_ON(!list_empty(&meta->list));
571 list_add_tail(&meta->list, &kfence_freelist);
572 raw_spin_unlock_irqrestore(&kfence_freelist_lock, flags);
573
574 atomic_long_dec(&counters[KFENCE_COUNTER_ALLOCATED]);
575 atomic_long_inc(&counters[KFENCE_COUNTER_FREES]);
576 } else {
577 /* See kfence_shutdown_cache(). */
578 atomic_long_inc(&counters[KFENCE_COUNTER_ZOMBIES]);
579 }
580 }
581
rcu_guarded_free(struct rcu_head * h)582 static void rcu_guarded_free(struct rcu_head *h)
583 {
584 struct kfence_metadata *meta = container_of(h, struct kfence_metadata, rcu_head);
585
586 kfence_guarded_free((void *)meta->addr, meta, false);
587 }
588
589 /*
590 * Initialization of the KFENCE pool after its allocation.
591 * Returns 0 on success; otherwise returns the address up to
592 * which partial initialization succeeded.
593 */
kfence_init_pool(void)594 static unsigned long kfence_init_pool(void)
595 {
596 unsigned long addr;
597 struct page *pages;
598 int i;
599
600 if (!arch_kfence_init_pool())
601 return (unsigned long)__kfence_pool;
602
603 addr = (unsigned long)__kfence_pool;
604 pages = virt_to_page(__kfence_pool);
605
606 /*
607 * Set up object pages: they must have PG_slab set, to avoid freeing
608 * these as real pages.
609 *
610 * We also want to avoid inserting kfence_free() in the kfree()
611 * fast-path in SLUB, and therefore need to ensure kfree() correctly
612 * enters __slab_free() slow-path.
613 */
614 for (i = 0; i < KFENCE_POOL_SIZE / PAGE_SIZE; i++) {
615 struct slab *slab = page_slab(nth_page(pages, i));
616
617 if (!i || (i % 2))
618 continue;
619
620 __folio_set_slab(slab_folio(slab));
621 #ifdef CONFIG_MEMCG
622 slab->obj_exts = (unsigned long)&kfence_metadata_init[i / 2 - 1].obj_exts |
623 MEMCG_DATA_OBJEXTS;
624 #endif
625 }
626
627 /*
628 * Protect the first 2 pages. The first page is mostly unnecessary, and
629 * merely serves as an extended guard page. However, adding one
630 * additional page in the beginning gives us an even number of pages,
631 * which simplifies the mapping of address to metadata index.
632 */
633 for (i = 0; i < 2; i++) {
634 if (unlikely(!kfence_protect(addr)))
635 return addr;
636
637 addr += PAGE_SIZE;
638 }
639
640 for (i = 0; i < CONFIG_KFENCE_NUM_OBJECTS; i++) {
641 struct kfence_metadata *meta = &kfence_metadata_init[i];
642
643 /* Initialize metadata. */
644 INIT_LIST_HEAD(&meta->list);
645 raw_spin_lock_init(&meta->lock);
646 meta->state = KFENCE_OBJECT_UNUSED;
647 meta->addr = addr; /* Initialize for validation in metadata_to_pageaddr(). */
648 list_add_tail(&meta->list, &kfence_freelist);
649
650 /* Protect the right redzone. */
651 if (unlikely(!kfence_protect(addr + PAGE_SIZE)))
652 goto reset_slab;
653
654 addr += 2 * PAGE_SIZE;
655 }
656
657 /*
658 * Make kfence_metadata visible only when initialization is successful.
659 * Otherwise, if the initialization fails and kfence_metadata is freed,
660 * it may cause UAF in kfence_shutdown_cache().
661 */
662 smp_store_release(&kfence_metadata, kfence_metadata_init);
663 return 0;
664
665 reset_slab:
666 for (i = 0; i < KFENCE_POOL_SIZE / PAGE_SIZE; i++) {
667 struct slab *slab = page_slab(nth_page(pages, i));
668
669 if (!i || (i % 2))
670 continue;
671 #ifdef CONFIG_MEMCG
672 slab->obj_exts = 0;
673 #endif
674 __folio_clear_slab(slab_folio(slab));
675 }
676
677 return addr;
678 }
679
kfence_init_pool_early(void)680 static bool __init kfence_init_pool_early(void)
681 {
682 unsigned long addr;
683
684 if (!__kfence_pool)
685 return false;
686
687 addr = kfence_init_pool();
688
689 if (!addr) {
690 /*
691 * The pool is live and will never be deallocated from this point on.
692 * Ignore the pool object from the kmemleak phys object tree, as it would
693 * otherwise overlap with allocations returned by kfence_alloc(), which
694 * are registered with kmemleak through the slab post-alloc hook.
695 */
696 kmemleak_ignore_phys(__pa(__kfence_pool));
697 return true;
698 }
699
700 /*
701 * Only release unprotected pages, and do not try to go back and change
702 * page attributes due to risk of failing to do so as well. If changing
703 * page attributes for some pages fails, it is very likely that it also
704 * fails for the first page, and therefore expect addr==__kfence_pool in
705 * most failure cases.
706 */
707 memblock_free_late(__pa(addr), KFENCE_POOL_SIZE - (addr - (unsigned long)__kfence_pool));
708 __kfence_pool = NULL;
709
710 memblock_free_late(__pa(kfence_metadata_init), KFENCE_METADATA_SIZE);
711 kfence_metadata_init = NULL;
712
713 return false;
714 }
715
716 /* === DebugFS Interface ==================================================== */
717
stats_show(struct seq_file * seq,void * v)718 static int stats_show(struct seq_file *seq, void *v)
719 {
720 int i;
721
722 seq_printf(seq, "enabled: %i\n", READ_ONCE(kfence_enabled));
723 for (i = 0; i < KFENCE_COUNTER_COUNT; i++)
724 seq_printf(seq, "%s: %ld\n", counter_names[i], atomic_long_read(&counters[i]));
725
726 return 0;
727 }
728 DEFINE_SHOW_ATTRIBUTE(stats);
729
730 /*
731 * debugfs seq_file operations for /sys/kernel/debug/kfence/objects.
732 * start_object() and next_object() return the object index + 1, because NULL is used
733 * to stop iteration.
734 */
start_object(struct seq_file * seq,loff_t * pos)735 static void *start_object(struct seq_file *seq, loff_t *pos)
736 {
737 if (*pos < CONFIG_KFENCE_NUM_OBJECTS)
738 return (void *)((long)*pos + 1);
739 return NULL;
740 }
741
stop_object(struct seq_file * seq,void * v)742 static void stop_object(struct seq_file *seq, void *v)
743 {
744 }
745
next_object(struct seq_file * seq,void * v,loff_t * pos)746 static void *next_object(struct seq_file *seq, void *v, loff_t *pos)
747 {
748 ++*pos;
749 if (*pos < CONFIG_KFENCE_NUM_OBJECTS)
750 return (void *)((long)*pos + 1);
751 return NULL;
752 }
753
show_object(struct seq_file * seq,void * v)754 static int show_object(struct seq_file *seq, void *v)
755 {
756 struct kfence_metadata *meta = &kfence_metadata[(long)v - 1];
757 unsigned long flags;
758
759 raw_spin_lock_irqsave(&meta->lock, flags);
760 kfence_print_object(seq, meta);
761 raw_spin_unlock_irqrestore(&meta->lock, flags);
762 seq_puts(seq, "---------------------------------\n");
763
764 return 0;
765 }
766
767 static const struct seq_operations objects_sops = {
768 .start = start_object,
769 .next = next_object,
770 .stop = stop_object,
771 .show = show_object,
772 };
773 DEFINE_SEQ_ATTRIBUTE(objects);
774
kfence_debugfs_init(void)775 static int kfence_debugfs_init(void)
776 {
777 struct dentry *kfence_dir;
778
779 if (!READ_ONCE(kfence_enabled))
780 return 0;
781
782 kfence_dir = debugfs_create_dir("kfence", NULL);
783 debugfs_create_file("stats", 0444, kfence_dir, NULL, &stats_fops);
784 debugfs_create_file("objects", 0400, kfence_dir, NULL, &objects_fops);
785 return 0;
786 }
787
788 late_initcall(kfence_debugfs_init);
789
790 /* === Panic Notifier ====================================================== */
791
kfence_check_all_canary(void)792 static void kfence_check_all_canary(void)
793 {
794 int i;
795
796 for (i = 0; i < CONFIG_KFENCE_NUM_OBJECTS; i++) {
797 struct kfence_metadata *meta = &kfence_metadata[i];
798
799 if (kfence_obj_allocated(meta))
800 check_canary(meta);
801 }
802 }
803
kfence_check_canary_callback(struct notifier_block * nb,unsigned long reason,void * arg)804 static int kfence_check_canary_callback(struct notifier_block *nb,
805 unsigned long reason, void *arg)
806 {
807 kfence_check_all_canary();
808 return NOTIFY_OK;
809 }
810
811 static struct notifier_block kfence_check_canary_notifier = {
812 .notifier_call = kfence_check_canary_callback,
813 };
814
815 /* === Allocation Gate Timer ================================================ */
816
817 static struct delayed_work kfence_timer;
818
819 #ifdef CONFIG_KFENCE_STATIC_KEYS
820 /* Wait queue to wake up allocation-gate timer task. */
821 static DECLARE_WAIT_QUEUE_HEAD(allocation_wait);
822
wake_up_kfence_timer(struct irq_work * work)823 static void wake_up_kfence_timer(struct irq_work *work)
824 {
825 wake_up(&allocation_wait);
826 }
827 static DEFINE_IRQ_WORK(wake_up_kfence_timer_work, wake_up_kfence_timer);
828 #endif
829
830 /*
831 * Set up delayed work, which will enable and disable the static key. We need to
832 * use a work queue (rather than a simple timer), since enabling and disabling a
833 * static key cannot be done from an interrupt.
834 *
835 * Note: Toggling a static branch currently causes IPIs, and here we'll end up
836 * with a total of 2 IPIs to all CPUs. If this ends up a problem in future (with
837 * more aggressive sampling intervals), we could get away with a variant that
838 * avoids IPIs, at the cost of not immediately capturing allocations if the
839 * instructions remain cached.
840 */
toggle_allocation_gate(struct work_struct * work)841 static void toggle_allocation_gate(struct work_struct *work)
842 {
843 if (!READ_ONCE(kfence_enabled))
844 return;
845
846 atomic_set(&kfence_allocation_gate, -kfence_burst);
847 #ifdef CONFIG_KFENCE_STATIC_KEYS
848 /* Enable static key, and await allocation to happen. */
849 static_branch_enable(&kfence_allocation_key);
850
851 wait_event_idle(allocation_wait, atomic_read(&kfence_allocation_gate) > 0);
852
853 /* Disable static key and reset timer. */
854 static_branch_disable(&kfence_allocation_key);
855 #endif
856 queue_delayed_work(system_unbound_wq, &kfence_timer,
857 msecs_to_jiffies(kfence_sample_interval));
858 }
859
860 /* === Public interface ===================================================== */
861
kfence_alloc_pool_and_metadata(void)862 void __init kfence_alloc_pool_and_metadata(void)
863 {
864 if (!kfence_sample_interval)
865 return;
866
867 /*
868 * If the pool has already been initialized by arch, there is no need to
869 * re-allocate the memory pool.
870 */
871 if (!__kfence_pool)
872 __kfence_pool = memblock_alloc(KFENCE_POOL_SIZE, PAGE_SIZE);
873
874 if (!__kfence_pool) {
875 pr_err("failed to allocate pool\n");
876 return;
877 }
878
879 /* The memory allocated by memblock has been zeroed out. */
880 kfence_metadata_init = memblock_alloc(KFENCE_METADATA_SIZE, PAGE_SIZE);
881 if (!kfence_metadata_init) {
882 pr_err("failed to allocate metadata\n");
883 memblock_free(__kfence_pool, KFENCE_POOL_SIZE);
884 __kfence_pool = NULL;
885 }
886 }
887
kfence_init_enable(void)888 static void kfence_init_enable(void)
889 {
890 if (!IS_ENABLED(CONFIG_KFENCE_STATIC_KEYS))
891 static_branch_enable(&kfence_allocation_key);
892
893 if (kfence_deferrable)
894 INIT_DEFERRABLE_WORK(&kfence_timer, toggle_allocation_gate);
895 else
896 INIT_DELAYED_WORK(&kfence_timer, toggle_allocation_gate);
897
898 if (kfence_check_on_panic)
899 atomic_notifier_chain_register(&panic_notifier_list, &kfence_check_canary_notifier);
900
901 WRITE_ONCE(kfence_enabled, true);
902 queue_delayed_work(system_unbound_wq, &kfence_timer, 0);
903
904 pr_info("initialized - using %lu bytes for %d objects at 0x%p-0x%p\n", KFENCE_POOL_SIZE,
905 CONFIG_KFENCE_NUM_OBJECTS, (void *)__kfence_pool,
906 (void *)(__kfence_pool + KFENCE_POOL_SIZE));
907 }
908
kfence_init(void)909 void __init kfence_init(void)
910 {
911 stack_hash_seed = get_random_u32();
912
913 /* Setting kfence_sample_interval to 0 on boot disables KFENCE. */
914 if (!kfence_sample_interval)
915 return;
916
917 if (!kfence_init_pool_early()) {
918 pr_err("%s failed\n", __func__);
919 return;
920 }
921
922 kfence_init_enable();
923 }
924
kfence_init_late(void)925 static int kfence_init_late(void)
926 {
927 const unsigned long nr_pages_pool = KFENCE_POOL_SIZE / PAGE_SIZE;
928 const unsigned long nr_pages_meta = KFENCE_METADATA_SIZE / PAGE_SIZE;
929 unsigned long addr = (unsigned long)__kfence_pool;
930 unsigned long free_size = KFENCE_POOL_SIZE;
931 int err = -ENOMEM;
932
933 #ifdef CONFIG_CONTIG_ALLOC
934 struct page *pages;
935
936 pages = alloc_contig_pages(nr_pages_pool, GFP_KERNEL, first_online_node,
937 NULL);
938 if (!pages)
939 return -ENOMEM;
940
941 __kfence_pool = page_to_virt(pages);
942 pages = alloc_contig_pages(nr_pages_meta, GFP_KERNEL, first_online_node,
943 NULL);
944 if (pages)
945 kfence_metadata_init = page_to_virt(pages);
946 #else
947 if (nr_pages_pool > MAX_ORDER_NR_PAGES ||
948 nr_pages_meta > MAX_ORDER_NR_PAGES) {
949 pr_warn("KFENCE_NUM_OBJECTS too large for buddy allocator\n");
950 return -EINVAL;
951 }
952
953 __kfence_pool = alloc_pages_exact(KFENCE_POOL_SIZE, GFP_KERNEL);
954 if (!__kfence_pool)
955 return -ENOMEM;
956
957 kfence_metadata_init = alloc_pages_exact(KFENCE_METADATA_SIZE, GFP_KERNEL);
958 #endif
959
960 if (!kfence_metadata_init)
961 goto free_pool;
962
963 memzero_explicit(kfence_metadata_init, KFENCE_METADATA_SIZE);
964 addr = kfence_init_pool();
965 if (!addr) {
966 kfence_init_enable();
967 kfence_debugfs_init();
968 return 0;
969 }
970
971 pr_err("%s failed\n", __func__);
972 free_size = KFENCE_POOL_SIZE - (addr - (unsigned long)__kfence_pool);
973 err = -EBUSY;
974
975 #ifdef CONFIG_CONTIG_ALLOC
976 free_contig_range(page_to_pfn(virt_to_page((void *)kfence_metadata_init)),
977 nr_pages_meta);
978 free_pool:
979 free_contig_range(page_to_pfn(virt_to_page((void *)addr)),
980 free_size / PAGE_SIZE);
981 #else
982 free_pages_exact((void *)kfence_metadata_init, KFENCE_METADATA_SIZE);
983 free_pool:
984 free_pages_exact((void *)addr, free_size);
985 #endif
986
987 kfence_metadata_init = NULL;
988 __kfence_pool = NULL;
989 return err;
990 }
991
kfence_enable_late(void)992 static int kfence_enable_late(void)
993 {
994 if (!__kfence_pool)
995 return kfence_init_late();
996
997 WRITE_ONCE(kfence_enabled, true);
998 queue_delayed_work(system_unbound_wq, &kfence_timer, 0);
999 pr_info("re-enabled\n");
1000 return 0;
1001 }
1002
kfence_shutdown_cache(struct kmem_cache * s)1003 void kfence_shutdown_cache(struct kmem_cache *s)
1004 {
1005 unsigned long flags;
1006 struct kfence_metadata *meta;
1007 int i;
1008
1009 /* Pairs with release in kfence_init_pool(). */
1010 if (!smp_load_acquire(&kfence_metadata))
1011 return;
1012
1013 for (i = 0; i < CONFIG_KFENCE_NUM_OBJECTS; i++) {
1014 bool in_use;
1015
1016 meta = &kfence_metadata[i];
1017
1018 /*
1019 * If we observe some inconsistent cache and state pair where we
1020 * should have returned false here, cache destruction is racing
1021 * with either kmem_cache_alloc() or kmem_cache_free(). Taking
1022 * the lock will not help, as different critical section
1023 * serialization will have the same outcome.
1024 */
1025 if (READ_ONCE(meta->cache) != s || !kfence_obj_allocated(meta))
1026 continue;
1027
1028 raw_spin_lock_irqsave(&meta->lock, flags);
1029 in_use = meta->cache == s && kfence_obj_allocated(meta);
1030 raw_spin_unlock_irqrestore(&meta->lock, flags);
1031
1032 if (in_use) {
1033 /*
1034 * This cache still has allocations, and we should not
1035 * release them back into the freelist so they can still
1036 * safely be used and retain the kernel's default
1037 * behaviour of keeping the allocations alive (leak the
1038 * cache); however, they effectively become "zombie
1039 * allocations" as the KFENCE objects are the only ones
1040 * still in use and the owning cache is being destroyed.
1041 *
1042 * We mark them freed, so that any subsequent use shows
1043 * more useful error messages that will include stack
1044 * traces of the user of the object, the original
1045 * allocation, and caller to shutdown_cache().
1046 */
1047 kfence_guarded_free((void *)meta->addr, meta, /*zombie=*/true);
1048 }
1049 }
1050
1051 for (i = 0; i < CONFIG_KFENCE_NUM_OBJECTS; i++) {
1052 meta = &kfence_metadata[i];
1053
1054 /* See above. */
1055 if (READ_ONCE(meta->cache) != s || READ_ONCE(meta->state) != KFENCE_OBJECT_FREED)
1056 continue;
1057
1058 raw_spin_lock_irqsave(&meta->lock, flags);
1059 if (meta->cache == s && meta->state == KFENCE_OBJECT_FREED)
1060 meta->cache = NULL;
1061 raw_spin_unlock_irqrestore(&meta->lock, flags);
1062 }
1063 }
1064
__kfence_alloc(struct kmem_cache * s,size_t size,gfp_t flags)1065 void *__kfence_alloc(struct kmem_cache *s, size_t size, gfp_t flags)
1066 {
1067 unsigned long stack_entries[KFENCE_STACK_DEPTH];
1068 size_t num_stack_entries;
1069 u32 alloc_stack_hash;
1070 int allocation_gate;
1071
1072 /*
1073 * Perform size check before switching kfence_allocation_gate, so that
1074 * we don't disable KFENCE without making an allocation.
1075 */
1076 if (size > PAGE_SIZE) {
1077 atomic_long_inc(&counters[KFENCE_COUNTER_SKIP_INCOMPAT]);
1078 return NULL;
1079 }
1080
1081 /*
1082 * Skip allocations from non-default zones, including DMA. We cannot
1083 * guarantee that pages in the KFENCE pool will have the requested
1084 * properties (e.g. reside in DMAable memory).
1085 */
1086 if ((flags & GFP_ZONEMASK) ||
1087 (s->flags & (SLAB_CACHE_DMA | SLAB_CACHE_DMA32))) {
1088 atomic_long_inc(&counters[KFENCE_COUNTER_SKIP_INCOMPAT]);
1089 return NULL;
1090 }
1091
1092 /*
1093 * Skip allocations for this slab, if KFENCE has been disabled for
1094 * this slab.
1095 */
1096 if (s->flags & SLAB_SKIP_KFENCE)
1097 return NULL;
1098
1099 allocation_gate = atomic_inc_return(&kfence_allocation_gate);
1100 if (allocation_gate > 1)
1101 return NULL;
1102 #ifdef CONFIG_KFENCE_STATIC_KEYS
1103 /*
1104 * waitqueue_active() is fully ordered after the update of
1105 * kfence_allocation_gate per atomic_inc_return().
1106 */
1107 if (allocation_gate == 1 && waitqueue_active(&allocation_wait)) {
1108 /*
1109 * Calling wake_up() here may deadlock when allocations happen
1110 * from within timer code. Use an irq_work to defer it.
1111 */
1112 irq_work_queue(&wake_up_kfence_timer_work);
1113 }
1114 #endif
1115
1116 if (!READ_ONCE(kfence_enabled))
1117 return NULL;
1118
1119 num_stack_entries = stack_trace_save(stack_entries, KFENCE_STACK_DEPTH, 0);
1120
1121 /*
1122 * Do expensive check for coverage of allocation in slow-path after
1123 * allocation_gate has already become non-zero, even though it might
1124 * mean not making any allocation within a given sample interval.
1125 *
1126 * This ensures reasonable allocation coverage when the pool is almost
1127 * full, including avoiding long-lived allocations of the same source
1128 * filling up the pool (e.g. pagecache allocations).
1129 */
1130 alloc_stack_hash = get_alloc_stack_hash(stack_entries, num_stack_entries);
1131 if (should_skip_covered() && alloc_covered_contains(alloc_stack_hash)) {
1132 atomic_long_inc(&counters[KFENCE_COUNTER_SKIP_COVERED]);
1133 return NULL;
1134 }
1135
1136 return kfence_guarded_alloc(s, size, flags, stack_entries, num_stack_entries,
1137 alloc_stack_hash);
1138 }
1139
kfence_ksize(const void * addr)1140 size_t kfence_ksize(const void *addr)
1141 {
1142 const struct kfence_metadata *meta = addr_to_metadata((unsigned long)addr);
1143
1144 /*
1145 * Read locklessly -- if there is a race with __kfence_alloc(), this is
1146 * either a use-after-free or invalid access.
1147 */
1148 return meta ? meta->size : 0;
1149 }
1150
kfence_object_start(const void * addr)1151 void *kfence_object_start(const void *addr)
1152 {
1153 const struct kfence_metadata *meta = addr_to_metadata((unsigned long)addr);
1154
1155 /*
1156 * Read locklessly -- if there is a race with __kfence_alloc(), this is
1157 * either a use-after-free or invalid access.
1158 */
1159 return meta ? (void *)meta->addr : NULL;
1160 }
1161
__kfence_free(void * addr)1162 void __kfence_free(void *addr)
1163 {
1164 struct kfence_metadata *meta = addr_to_metadata((unsigned long)addr);
1165
1166 #ifdef CONFIG_MEMCG
1167 KFENCE_WARN_ON(meta->obj_exts.objcg);
1168 #endif
1169 /*
1170 * If the objects of the cache are SLAB_TYPESAFE_BY_RCU, defer freeing
1171 * the object, as the object page may be recycled for other-typed
1172 * objects once it has been freed. meta->cache may be NULL if the cache
1173 * was destroyed.
1174 * Save the stack trace here so that reports show where the user freed
1175 * the object.
1176 */
1177 if (unlikely(meta->cache && (meta->cache->flags & SLAB_TYPESAFE_BY_RCU))) {
1178 unsigned long flags;
1179
1180 raw_spin_lock_irqsave(&meta->lock, flags);
1181 metadata_update_state(meta, KFENCE_OBJECT_RCU_FREEING, NULL, 0);
1182 raw_spin_unlock_irqrestore(&meta->lock, flags);
1183 call_rcu(&meta->rcu_head, rcu_guarded_free);
1184 } else {
1185 kfence_guarded_free(addr, meta, false);
1186 }
1187 }
1188
kfence_handle_page_fault(unsigned long addr,bool is_write,struct pt_regs * regs)1189 bool kfence_handle_page_fault(unsigned long addr, bool is_write, struct pt_regs *regs)
1190 {
1191 const int page_index = (addr - (unsigned long)__kfence_pool) / PAGE_SIZE;
1192 struct kfence_metadata *to_report = NULL;
1193 enum kfence_error_type error_type;
1194 unsigned long flags;
1195
1196 if (!is_kfence_address((void *)addr))
1197 return false;
1198
1199 if (!READ_ONCE(kfence_enabled)) /* If disabled at runtime ... */
1200 return kfence_unprotect(addr); /* ... unprotect and proceed. */
1201
1202 atomic_long_inc(&counters[KFENCE_COUNTER_BUGS]);
1203
1204 if (page_index % 2) {
1205 /* This is a redzone, report a buffer overflow. */
1206 struct kfence_metadata *meta;
1207 int distance = 0;
1208
1209 meta = addr_to_metadata(addr - PAGE_SIZE);
1210 if (meta && kfence_obj_allocated(meta)) {
1211 to_report = meta;
1212 /* Data race ok; distance calculation approximate. */
1213 distance = addr - data_race(meta->addr + meta->size);
1214 }
1215
1216 meta = addr_to_metadata(addr + PAGE_SIZE);
1217 if (meta && kfence_obj_allocated(meta)) {
1218 /* Data race ok; distance calculation approximate. */
1219 if (!to_report || distance > data_race(meta->addr) - addr)
1220 to_report = meta;
1221 }
1222
1223 if (!to_report)
1224 goto out;
1225
1226 raw_spin_lock_irqsave(&to_report->lock, flags);
1227 to_report->unprotected_page = addr;
1228 error_type = KFENCE_ERROR_OOB;
1229
1230 /*
1231 * If the object was freed before we took the look we can still
1232 * report this as an OOB -- the report will simply show the
1233 * stacktrace of the free as well.
1234 */
1235 } else {
1236 to_report = addr_to_metadata(addr);
1237 if (!to_report)
1238 goto out;
1239
1240 raw_spin_lock_irqsave(&to_report->lock, flags);
1241 error_type = KFENCE_ERROR_UAF;
1242 /*
1243 * We may race with __kfence_alloc(), and it is possible that a
1244 * freed object may be reallocated. We simply report this as a
1245 * use-after-free, with the stack trace showing the place where
1246 * the object was re-allocated.
1247 */
1248 }
1249
1250 out:
1251 if (to_report) {
1252 kfence_report_error(addr, is_write, regs, to_report, error_type);
1253 raw_spin_unlock_irqrestore(&to_report->lock, flags);
1254 } else {
1255 /* This may be a UAF or OOB access, but we can't be sure. */
1256 kfence_report_error(addr, is_write, regs, NULL, KFENCE_ERROR_INVALID);
1257 }
1258
1259 return kfence_unprotect(addr); /* Unprotect and let access proceed. */
1260 }
1261