1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * KFENCE guarded object allocator and fault handling.
4 *
5 * Copyright (C) 2020, Google LLC.
6 */
7
8 #define pr_fmt(fmt) "kfence: " fmt
9
10 #include <linux/atomic.h>
11 #include <linux/bug.h>
12 #include <linux/debugfs.h>
13 #include <linux/hash.h>
14 #include <linux/irq_work.h>
15 #include <linux/jhash.h>
16 #include <linux/kasan-enabled.h>
17 #include <linux/kcsan-checks.h>
18 #include <linux/kfence.h>
19 #include <linux/kmemleak.h>
20 #include <linux/list.h>
21 #include <linux/lockdep.h>
22 #include <linux/log2.h>
23 #include <linux/memblock.h>
24 #include <linux/moduleparam.h>
25 #include <linux/nodemask.h>
26 #include <linux/notifier.h>
27 #include <linux/panic_notifier.h>
28 #include <linux/random.h>
29 #include <linux/rcupdate.h>
30 #include <linux/reboot.h>
31 #include <linux/sched/clock.h>
32 #include <linux/seq_file.h>
33 #include <linux/slab.h>
34 #include <linux/spinlock.h>
35 #include <linux/string.h>
36
37 #include <asm/kfence.h>
38
39 #include "kfence.h"
40
41 /* Disables KFENCE on the first warning assuming an irrecoverable error. */
42 #define KFENCE_WARN_ON(cond) \
43 ({ \
44 const bool __cond = WARN_ON(cond); \
45 if (unlikely(__cond)) { \
46 WRITE_ONCE(kfence_enabled, false); \
47 disabled_by_warn = true; \
48 } \
49 __cond; \
50 })
51
52 /* === Data ================================================================= */
53
54 static bool kfence_enabled __read_mostly;
55 static bool disabled_by_warn __read_mostly;
56
57 unsigned long kfence_sample_interval __read_mostly = CONFIG_KFENCE_SAMPLE_INTERVAL;
58 EXPORT_SYMBOL_GPL(kfence_sample_interval); /* Export for test modules. */
59
60 #ifdef MODULE_PARAM_PREFIX
61 #undef MODULE_PARAM_PREFIX
62 #endif
63 #define MODULE_PARAM_PREFIX "kfence."
64
65 static int kfence_enable_late(void);
param_set_sample_interval(const char * val,const struct kernel_param * kp)66 static int param_set_sample_interval(const char *val, const struct kernel_param *kp)
67 {
68 unsigned long num;
69 int ret = kstrtoul(val, 0, &num);
70
71 if (ret < 0)
72 return ret;
73
74 /* Using 0 to indicate KFENCE is disabled. */
75 if (!num && READ_ONCE(kfence_enabled)) {
76 pr_info("disabled\n");
77 WRITE_ONCE(kfence_enabled, false);
78 }
79
80 *((unsigned long *)kp->arg) = num;
81
82 if (num && !READ_ONCE(kfence_enabled) && system_state != SYSTEM_BOOTING)
83 return disabled_by_warn ? -EINVAL : kfence_enable_late();
84 return 0;
85 }
86
param_get_sample_interval(char * buffer,const struct kernel_param * kp)87 static int param_get_sample_interval(char *buffer, const struct kernel_param *kp)
88 {
89 if (!READ_ONCE(kfence_enabled))
90 return sprintf(buffer, "0\n");
91
92 return param_get_ulong(buffer, kp);
93 }
94
95 static const struct kernel_param_ops sample_interval_param_ops = {
96 .set = param_set_sample_interval,
97 .get = param_get_sample_interval,
98 };
99 module_param_cb(sample_interval, &sample_interval_param_ops, &kfence_sample_interval, 0600);
100
101 /* Pool usage% threshold when currently covered allocations are skipped. */
102 static unsigned long kfence_skip_covered_thresh __read_mostly = 75;
103 module_param_named(skip_covered_thresh, kfence_skip_covered_thresh, ulong, 0644);
104
105 /* Allocation burst count: number of excess KFENCE allocations per sample. */
106 static unsigned int kfence_burst __read_mostly;
107 module_param_named(burst, kfence_burst, uint, 0644);
108
109 /* If true, use a deferrable timer. */
110 static bool kfence_deferrable __read_mostly = IS_ENABLED(CONFIG_KFENCE_DEFERRABLE);
111 module_param_named(deferrable, kfence_deferrable, bool, 0444);
112
113 /* If true, check all canary bytes on panic. */
114 static bool kfence_check_on_panic __read_mostly;
115 module_param_named(check_on_panic, kfence_check_on_panic, bool, 0444);
116
117 /* The pool of pages used for guard pages and objects. */
118 char *__kfence_pool __read_mostly;
119 EXPORT_SYMBOL(__kfence_pool); /* Export for test modules. */
120
121 /*
122 * Per-object metadata, with one-to-one mapping of object metadata to
123 * backing pages (in __kfence_pool).
124 */
125 static_assert(CONFIG_KFENCE_NUM_OBJECTS > 0);
126 struct kfence_metadata *kfence_metadata __read_mostly;
127
128 /*
129 * If kfence_metadata is not NULL, it may be accessed by kfence_shutdown_cache().
130 * So introduce kfence_metadata_init to initialize metadata, and then make
131 * kfence_metadata visible after initialization is successful. This prevents
132 * potential UAF or access to uninitialized metadata.
133 */
134 static struct kfence_metadata *kfence_metadata_init __read_mostly;
135
136 /* Freelist with available objects. */
137 DEFINE_RAW_SPINLOCK(kfence_freelist_lock); /* Lock protecting freelist. */
138 static struct list_head kfence_freelist __guarded_by(&kfence_freelist_lock) = LIST_HEAD_INIT(kfence_freelist);
139
140 /*
141 * The static key to set up a KFENCE allocation; or if static keys are not used
142 * to gate allocations, to avoid a load and compare if KFENCE is disabled.
143 */
144 DEFINE_STATIC_KEY_FALSE(kfence_allocation_key);
145
146 /* Gates the allocation, ensuring only one succeeds in a given period. */
147 atomic_t kfence_allocation_gate = ATOMIC_INIT(1);
148
149 /*
150 * A Counting Bloom filter of allocation coverage: limits currently covered
151 * allocations of the same source filling up the pool.
152 *
153 * Assuming a range of 15%-85% unique allocations in the pool at any point in
154 * time, the below parameters provide a probablity of 0.02-0.33 for false
155 * positive hits respectively:
156 *
157 * P(alloc_traces) = (1 - e^(-HNUM * (alloc_traces / SIZE)) ^ HNUM
158 */
159 #define ALLOC_COVERED_HNUM 2
160 #define ALLOC_COVERED_ORDER (const_ilog2(CONFIG_KFENCE_NUM_OBJECTS) + 2)
161 #define ALLOC_COVERED_SIZE (1 << ALLOC_COVERED_ORDER)
162 #define ALLOC_COVERED_HNEXT(h) hash_32(h, ALLOC_COVERED_ORDER)
163 #define ALLOC_COVERED_MASK (ALLOC_COVERED_SIZE - 1)
164 static atomic_t alloc_covered[ALLOC_COVERED_SIZE];
165
166 /* Stack depth used to determine uniqueness of an allocation. */
167 #define UNIQUE_ALLOC_STACK_DEPTH ((size_t)8)
168
169 /*
170 * Randomness for stack hashes, making the same collisions across reboots and
171 * different machines less likely.
172 */
173 static u32 stack_hash_seed __ro_after_init;
174
175 /* Statistics counters for debugfs. */
176 enum kfence_counter_id {
177 KFENCE_COUNTER_ALLOCATED,
178 KFENCE_COUNTER_ALLOCS,
179 KFENCE_COUNTER_FREES,
180 KFENCE_COUNTER_ZOMBIES,
181 KFENCE_COUNTER_BUGS,
182 KFENCE_COUNTER_SKIP_INCOMPAT,
183 KFENCE_COUNTER_SKIP_CAPACITY,
184 KFENCE_COUNTER_SKIP_COVERED,
185 KFENCE_COUNTER_COUNT,
186 };
187 static atomic_long_t counters[KFENCE_COUNTER_COUNT];
188 static const char *const counter_names[] = {
189 [KFENCE_COUNTER_ALLOCATED] = "currently allocated",
190 [KFENCE_COUNTER_ALLOCS] = "total allocations",
191 [KFENCE_COUNTER_FREES] = "total frees",
192 [KFENCE_COUNTER_ZOMBIES] = "zombie allocations",
193 [KFENCE_COUNTER_BUGS] = "total bugs",
194 [KFENCE_COUNTER_SKIP_INCOMPAT] = "skipped allocations (incompatible)",
195 [KFENCE_COUNTER_SKIP_CAPACITY] = "skipped allocations (capacity)",
196 [KFENCE_COUNTER_SKIP_COVERED] = "skipped allocations (covered)",
197 };
198 static_assert(ARRAY_SIZE(counter_names) == KFENCE_COUNTER_COUNT);
199
200 /* === Internals ============================================================ */
201
should_skip_covered(void)202 static inline bool should_skip_covered(void)
203 {
204 unsigned long thresh = (CONFIG_KFENCE_NUM_OBJECTS * kfence_skip_covered_thresh) / 100;
205
206 return atomic_long_read(&counters[KFENCE_COUNTER_ALLOCATED]) > thresh;
207 }
208
get_alloc_stack_hash(unsigned long * stack_entries,size_t num_entries)209 static u32 get_alloc_stack_hash(unsigned long *stack_entries, size_t num_entries)
210 {
211 num_entries = min(num_entries, UNIQUE_ALLOC_STACK_DEPTH);
212 num_entries = filter_irq_stacks(stack_entries, num_entries);
213 return jhash(stack_entries, num_entries * sizeof(stack_entries[0]), stack_hash_seed);
214 }
215
216 /*
217 * Adds (or subtracts) count @val for allocation stack trace hash
218 * @alloc_stack_hash from Counting Bloom filter.
219 */
alloc_covered_add(u32 alloc_stack_hash,int val)220 static void alloc_covered_add(u32 alloc_stack_hash, int val)
221 {
222 int i;
223
224 for (i = 0; i < ALLOC_COVERED_HNUM; i++) {
225 atomic_add(val, &alloc_covered[alloc_stack_hash & ALLOC_COVERED_MASK]);
226 alloc_stack_hash = ALLOC_COVERED_HNEXT(alloc_stack_hash);
227 }
228 }
229
230 /*
231 * Returns true if the allocation stack trace hash @alloc_stack_hash is
232 * currently contained (non-zero count) in Counting Bloom filter.
233 */
alloc_covered_contains(u32 alloc_stack_hash)234 static bool alloc_covered_contains(u32 alloc_stack_hash)
235 {
236 int i;
237
238 for (i = 0; i < ALLOC_COVERED_HNUM; i++) {
239 if (!atomic_read(&alloc_covered[alloc_stack_hash & ALLOC_COVERED_MASK]))
240 return false;
241 alloc_stack_hash = ALLOC_COVERED_HNEXT(alloc_stack_hash);
242 }
243
244 return true;
245 }
246
kfence_protect(unsigned long addr)247 static bool kfence_protect(unsigned long addr)
248 {
249 return !KFENCE_WARN_ON(!kfence_protect_page(ALIGN_DOWN(addr, PAGE_SIZE), true));
250 }
251
kfence_unprotect(unsigned long addr)252 static bool kfence_unprotect(unsigned long addr)
253 {
254 return !KFENCE_WARN_ON(!kfence_protect_page(ALIGN_DOWN(addr, PAGE_SIZE), false));
255 }
256
metadata_to_pageaddr(const struct kfence_metadata * meta)257 static inline unsigned long metadata_to_pageaddr(const struct kfence_metadata *meta)
258 __must_hold(&meta->lock)
259 {
260 unsigned long offset = (meta - kfence_metadata + 1) * PAGE_SIZE * 2;
261 unsigned long pageaddr = (unsigned long)&__kfence_pool[offset];
262
263 /* The checks do not affect performance; only called from slow-paths. */
264
265 /* Only call with a pointer into kfence_metadata. */
266 if (KFENCE_WARN_ON(meta < kfence_metadata ||
267 meta >= kfence_metadata + CONFIG_KFENCE_NUM_OBJECTS))
268 return 0;
269
270 /*
271 * This metadata object only ever maps to 1 page; verify that the stored
272 * address is in the expected range.
273 */
274 if (KFENCE_WARN_ON(ALIGN_DOWN(meta->addr, PAGE_SIZE) != pageaddr))
275 return 0;
276
277 return pageaddr;
278 }
279
kfence_obj_allocated(const struct kfence_metadata * meta)280 static inline bool kfence_obj_allocated(const struct kfence_metadata *meta)
281 {
282 enum kfence_object_state state = READ_ONCE(meta->state);
283
284 return state == KFENCE_OBJECT_ALLOCATED || state == KFENCE_OBJECT_RCU_FREEING;
285 }
286
287 /*
288 * Update the object's metadata state, including updating the alloc/free stacks
289 * depending on the state transition.
290 */
291 static noinline void
metadata_update_state(struct kfence_metadata * meta,enum kfence_object_state next,unsigned long * stack_entries,size_t num_stack_entries)292 metadata_update_state(struct kfence_metadata *meta, enum kfence_object_state next,
293 unsigned long *stack_entries, size_t num_stack_entries)
294 __must_hold(&meta->lock)
295 {
296 struct kfence_track *track =
297 next == KFENCE_OBJECT_ALLOCATED ? &meta->alloc_track : &meta->free_track;
298
299 lockdep_assert_held(&meta->lock);
300
301 /* Stack has been saved when calling rcu, skip. */
302 if (READ_ONCE(meta->state) == KFENCE_OBJECT_RCU_FREEING)
303 goto out;
304
305 if (stack_entries) {
306 memcpy(track->stack_entries, stack_entries,
307 num_stack_entries * sizeof(stack_entries[0]));
308 } else {
309 /*
310 * Skip over 1 (this) functions; noinline ensures we do not
311 * accidentally skip over the caller by never inlining.
312 */
313 num_stack_entries = stack_trace_save(track->stack_entries, KFENCE_STACK_DEPTH, 1);
314 }
315 track->num_stack_entries = num_stack_entries;
316 track->pid = task_pid_nr(current);
317 track->cpu = raw_smp_processor_id();
318 track->ts_nsec = local_clock(); /* Same source as printk timestamps. */
319
320 out:
321 /*
322 * Pairs with READ_ONCE() in
323 * kfence_shutdown_cache(),
324 * kfence_handle_page_fault().
325 */
326 WRITE_ONCE(meta->state, next);
327 }
328
329 #ifdef CONFIG_KMSAN
330 #define check_canary_attributes noinline __no_kmsan_checks
331 #else
332 #define check_canary_attributes inline
333 #endif
334
335 /* Check canary byte at @addr. */
check_canary_byte(u8 * addr)336 static check_canary_attributes bool check_canary_byte(u8 *addr)
337 {
338 struct kfence_metadata *meta;
339 unsigned long flags;
340
341 if (likely(*addr == KFENCE_CANARY_PATTERN_U8(addr)))
342 return true;
343
344 atomic_long_inc(&counters[KFENCE_COUNTER_BUGS]);
345
346 meta = addr_to_metadata((unsigned long)addr);
347 raw_spin_lock_irqsave(&meta->lock, flags);
348 kfence_report_error((unsigned long)addr, false, NULL, meta, KFENCE_ERROR_CORRUPTION);
349 raw_spin_unlock_irqrestore(&meta->lock, flags);
350
351 return false;
352 }
353
set_canary(const struct kfence_metadata * meta)354 static inline void set_canary(const struct kfence_metadata *meta)
355 {
356 const unsigned long pageaddr = ALIGN_DOWN(meta->addr, PAGE_SIZE);
357 unsigned long addr = pageaddr;
358
359 /*
360 * The canary may be written to part of the object memory, but it does
361 * not affect it. The user should initialize the object before using it.
362 */
363 for (; addr < meta->addr; addr += sizeof(u64))
364 *((u64 *)addr) = KFENCE_CANARY_PATTERN_U64;
365
366 addr = ALIGN_DOWN(meta->addr + meta->size, sizeof(u64));
367 for (; addr - pageaddr < PAGE_SIZE; addr += sizeof(u64))
368 *((u64 *)addr) = KFENCE_CANARY_PATTERN_U64;
369 }
370
371 static check_canary_attributes void
check_canary(const struct kfence_metadata * meta)372 check_canary(const struct kfence_metadata *meta)
373 {
374 const unsigned long pageaddr = ALIGN_DOWN(meta->addr, PAGE_SIZE);
375 unsigned long addr = pageaddr;
376
377 /*
378 * We'll iterate over each canary byte per-side until a corrupted byte
379 * is found. However, we'll still iterate over the canary bytes to the
380 * right of the object even if there was an error in the canary bytes to
381 * the left of the object. Specifically, if check_canary_byte()
382 * generates an error, showing both sides might give more clues as to
383 * what the error is about when displaying which bytes were corrupted.
384 */
385
386 /* Apply to left of object. */
387 for (; meta->addr - addr >= sizeof(u64); addr += sizeof(u64)) {
388 if (unlikely(*((u64 *)addr) != KFENCE_CANARY_PATTERN_U64))
389 break;
390 }
391
392 /*
393 * If the canary is corrupted in a certain 64 bytes, or the canary
394 * memory cannot be completely covered by multiple consecutive 64 bytes,
395 * it needs to be checked one by one.
396 */
397 for (; addr < meta->addr; addr++) {
398 if (unlikely(!check_canary_byte((u8 *)addr)))
399 break;
400 }
401
402 /* Apply to right of object. */
403 for (addr = meta->addr + meta->size; addr % sizeof(u64) != 0; addr++) {
404 if (unlikely(!check_canary_byte((u8 *)addr)))
405 return;
406 }
407 for (; addr - pageaddr < PAGE_SIZE; addr += sizeof(u64)) {
408 if (unlikely(*((u64 *)addr) != KFENCE_CANARY_PATTERN_U64)) {
409
410 for (; addr - pageaddr < PAGE_SIZE; addr++) {
411 if (!check_canary_byte((u8 *)addr))
412 return;
413 }
414 }
415 }
416 }
417
kfence_guarded_alloc(struct kmem_cache * cache,size_t size,gfp_t gfp,unsigned long * stack_entries,size_t num_stack_entries,u32 alloc_stack_hash)418 static void *kfence_guarded_alloc(struct kmem_cache *cache, size_t size, gfp_t gfp,
419 unsigned long *stack_entries, size_t num_stack_entries,
420 u32 alloc_stack_hash)
421 {
422 struct kfence_metadata *meta = NULL;
423 unsigned long flags;
424 struct slab *slab;
425 void *addr;
426 const bool random_right_allocate = get_random_u32_below(2);
427 const bool random_fault = CONFIG_KFENCE_STRESS_TEST_FAULTS &&
428 !get_random_u32_below(CONFIG_KFENCE_STRESS_TEST_FAULTS);
429
430 /* Try to obtain a free object. */
431 raw_spin_lock_irqsave(&kfence_freelist_lock, flags);
432 if (!list_empty(&kfence_freelist)) {
433 meta = list_entry(kfence_freelist.next, struct kfence_metadata, list);
434 list_del_init(&meta->list);
435 }
436 raw_spin_unlock_irqrestore(&kfence_freelist_lock, flags);
437 if (!meta) {
438 atomic_long_inc(&counters[KFENCE_COUNTER_SKIP_CAPACITY]);
439 return NULL;
440 }
441
442 if (unlikely(!raw_spin_trylock_irqsave(&meta->lock, flags))) {
443 /*
444 * This is extremely unlikely -- we are reporting on a
445 * use-after-free, which locked meta->lock, and the reporting
446 * code via printk calls kmalloc() which ends up in
447 * kfence_alloc() and tries to grab the same object that we're
448 * reporting on. While it has never been observed, lockdep does
449 * report that there is a possibility of deadlock. Fix it by
450 * using trylock and bailing out gracefully.
451 */
452 raw_spin_lock_irqsave(&kfence_freelist_lock, flags);
453 /* Put the object back on the freelist. */
454 list_add_tail(&meta->list, &kfence_freelist);
455 raw_spin_unlock_irqrestore(&kfence_freelist_lock, flags);
456
457 return NULL;
458 }
459
460 meta->addr = metadata_to_pageaddr(meta);
461 /* Unprotect if we're reusing this page. */
462 if (meta->state == KFENCE_OBJECT_FREED)
463 kfence_unprotect(meta->addr);
464
465 /*
466 * Note: for allocations made before RNG initialization, will always
467 * return zero. We still benefit from enabling KFENCE as early as
468 * possible, even when the RNG is not yet available, as this will allow
469 * KFENCE to detect bugs due to earlier allocations. The only downside
470 * is that the out-of-bounds accesses detected are deterministic for
471 * such allocations.
472 */
473 if (random_right_allocate) {
474 /* Allocate on the "right" side, re-calculate address. */
475 meta->addr += PAGE_SIZE - size;
476 meta->addr = ALIGN_DOWN(meta->addr, cache->align);
477 }
478
479 addr = (void *)meta->addr;
480
481 /* Update remaining metadata. */
482 metadata_update_state(meta, KFENCE_OBJECT_ALLOCATED, stack_entries, num_stack_entries);
483 /* Pairs with READ_ONCE() in kfence_shutdown_cache(). */
484 WRITE_ONCE(meta->cache, cache);
485 meta->size = size;
486 meta->alloc_stack_hash = alloc_stack_hash;
487 raw_spin_unlock_irqrestore(&meta->lock, flags);
488
489 alloc_covered_add(alloc_stack_hash, 1);
490
491 /* Set required slab fields. */
492 slab = virt_to_slab(addr);
493 slab->slab_cache = cache;
494 slab->objects = 1;
495
496 /* Memory initialization. */
497 set_canary(meta);
498
499 /*
500 * We check slab_want_init_on_alloc() ourselves, rather than letting
501 * SL*B do the initialization, as otherwise we might overwrite KFENCE's
502 * redzone.
503 */
504 if (unlikely(slab_want_init_on_alloc(gfp, cache)))
505 memzero_explicit(addr, size);
506 if (cache->ctor)
507 cache->ctor(addr);
508
509 if (random_fault)
510 kfence_protect(meta->addr); /* Random "faults" by protecting the object. */
511
512 atomic_long_inc(&counters[KFENCE_COUNTER_ALLOCATED]);
513 atomic_long_inc(&counters[KFENCE_COUNTER_ALLOCS]);
514
515 return addr;
516 }
517
kfence_guarded_free(void * addr,struct kfence_metadata * meta,bool zombie)518 static void kfence_guarded_free(void *addr, struct kfence_metadata *meta, bool zombie)
519 {
520 struct kcsan_scoped_access assert_page_exclusive;
521 u32 alloc_stack_hash;
522 unsigned long flags;
523 bool init;
524
525 raw_spin_lock_irqsave(&meta->lock, flags);
526
527 if (!kfence_obj_allocated(meta) || meta->addr != (unsigned long)addr) {
528 /* Invalid or double-free, bail out. */
529 atomic_long_inc(&counters[KFENCE_COUNTER_BUGS]);
530 kfence_report_error((unsigned long)addr, false, NULL, meta,
531 KFENCE_ERROR_INVALID_FREE);
532 raw_spin_unlock_irqrestore(&meta->lock, flags);
533 return;
534 }
535
536 /* Detect racy use-after-free, or incorrect reallocation of this page by KFENCE. */
537 kcsan_begin_scoped_access((void *)ALIGN_DOWN((unsigned long)addr, PAGE_SIZE), PAGE_SIZE,
538 KCSAN_ACCESS_SCOPED | KCSAN_ACCESS_WRITE | KCSAN_ACCESS_ASSERT,
539 &assert_page_exclusive);
540
541 if (CONFIG_KFENCE_STRESS_TEST_FAULTS)
542 kfence_unprotect((unsigned long)addr); /* To check canary bytes. */
543
544 /* Restore page protection if there was an OOB access. */
545 if (meta->unprotected_page) {
546 memzero_explicit((void *)ALIGN_DOWN(meta->unprotected_page, PAGE_SIZE), PAGE_SIZE);
547 kfence_protect(meta->unprotected_page);
548 meta->unprotected_page = 0;
549 }
550
551 /* Mark the object as freed. */
552 metadata_update_state(meta, KFENCE_OBJECT_FREED, NULL, 0);
553 init = slab_want_init_on_free(meta->cache);
554 alloc_stack_hash = meta->alloc_stack_hash;
555 raw_spin_unlock_irqrestore(&meta->lock, flags);
556
557 alloc_covered_add(alloc_stack_hash, -1);
558
559 /* Check canary bytes for memory corruption. */
560 check_canary(meta);
561
562 /*
563 * Clear memory if init-on-free is set. While we protect the page, the
564 * data is still there, and after a use-after-free is detected, we
565 * unprotect the page, so the data is still accessible.
566 */
567 if (!zombie && unlikely(init))
568 memzero_explicit(addr, meta->size);
569
570 /* Protect to detect use-after-frees. */
571 kfence_protect((unsigned long)addr);
572
573 kcsan_end_scoped_access(&assert_page_exclusive);
574 if (!zombie) {
575 /* Add it to the tail of the freelist for reuse. */
576 raw_spin_lock_irqsave(&kfence_freelist_lock, flags);
577 KFENCE_WARN_ON(!list_empty(&meta->list));
578 list_add_tail(&meta->list, &kfence_freelist);
579 raw_spin_unlock_irqrestore(&kfence_freelist_lock, flags);
580
581 atomic_long_dec(&counters[KFENCE_COUNTER_ALLOCATED]);
582 atomic_long_inc(&counters[KFENCE_COUNTER_FREES]);
583 } else {
584 /* See kfence_shutdown_cache(). */
585 atomic_long_inc(&counters[KFENCE_COUNTER_ZOMBIES]);
586 }
587 }
588
rcu_guarded_free(struct rcu_head * h)589 static void rcu_guarded_free(struct rcu_head *h)
590 {
591 struct kfence_metadata *meta = container_of(h, struct kfence_metadata, rcu_head);
592
593 kfence_guarded_free((void *)meta->addr, meta, false);
594 }
595
596 /*
597 * Initialization of the KFENCE pool after its allocation.
598 * Returns 0 on success; otherwise returns the address up to
599 * which partial initialization succeeded.
600 */
kfence_init_pool(void)601 static unsigned long kfence_init_pool(void)
602 __context_unsafe(/* constructor */)
603 {
604 unsigned long addr, start_pfn;
605 int i, rand;
606
607 if (!arch_kfence_init_pool())
608 return (unsigned long)__kfence_pool;
609
610 addr = (unsigned long)__kfence_pool;
611 start_pfn = PHYS_PFN(virt_to_phys(__kfence_pool));
612
613 /*
614 * Set up object pages: they must have PGTY_slab set to avoid freeing
615 * them as real pages.
616 *
617 * We also want to avoid inserting kfence_free() in the kfree()
618 * fast-path in SLUB, and therefore need to ensure kfree() correctly
619 * enters __slab_free() slow-path.
620 */
621 for (i = 0; i < KFENCE_POOL_SIZE / PAGE_SIZE; i++) {
622 struct page *page;
623
624 if (!i || (i % 2))
625 continue;
626
627 page = pfn_to_page(start_pfn + i);
628 __SetPageSlab(page);
629 #ifdef CONFIG_MEMCG
630 struct slab *slab = page_slab(page);
631 slab->obj_exts = (unsigned long)&kfence_metadata_init[i / 2 - 1].obj_exts |
632 MEMCG_DATA_OBJEXTS;
633 #endif
634 }
635
636 /*
637 * Protect the first 2 pages. The first page is mostly unnecessary, and
638 * merely serves as an extended guard page. However, adding one
639 * additional page in the beginning gives us an even number of pages,
640 * which simplifies the mapping of address to metadata index.
641 */
642 for (i = 0; i < 2; i++) {
643 if (unlikely(!kfence_protect(addr)))
644 return addr;
645
646 addr += PAGE_SIZE;
647 }
648
649 for (i = 0; i < CONFIG_KFENCE_NUM_OBJECTS; i++) {
650 struct kfence_metadata *meta = &kfence_metadata_init[i];
651
652 /* Initialize metadata. */
653 INIT_LIST_HEAD(&meta->list);
654 raw_spin_lock_init(&meta->lock);
655 meta->state = KFENCE_OBJECT_UNUSED;
656 /* Use addr to randomize the freelist. */
657 meta->addr = i;
658
659 /* Protect the right redzone. */
660 if (unlikely(!kfence_protect(addr + 2 * i * PAGE_SIZE + PAGE_SIZE)))
661 goto reset_slab;
662 }
663
664 for (i = CONFIG_KFENCE_NUM_OBJECTS; i > 0; i--) {
665 rand = get_random_u32_below(i);
666 swap(kfence_metadata_init[i - 1].addr, kfence_metadata_init[rand].addr);
667 }
668
669 for (i = 0; i < CONFIG_KFENCE_NUM_OBJECTS; i++) {
670 struct kfence_metadata *meta_1 = &kfence_metadata_init[i];
671 struct kfence_metadata *meta_2 = &kfence_metadata_init[meta_1->addr];
672
673 list_add_tail(&meta_2->list, &kfence_freelist);
674 }
675 for (i = 0; i < CONFIG_KFENCE_NUM_OBJECTS; i++) {
676 kfence_metadata_init[i].addr = addr;
677 addr += 2 * PAGE_SIZE;
678 }
679
680 /*
681 * Make kfence_metadata visible only when initialization is successful.
682 * Otherwise, if the initialization fails and kfence_metadata is freed,
683 * it may cause UAF in kfence_shutdown_cache().
684 */
685 smp_store_release(&kfence_metadata, kfence_metadata_init);
686 return 0;
687
688 reset_slab:
689 addr += 2 * i * PAGE_SIZE;
690 for (i = 0; i < KFENCE_POOL_SIZE / PAGE_SIZE; i++) {
691 struct page *page;
692
693 if (!i || (i % 2))
694 continue;
695
696 page = pfn_to_page(start_pfn + i);
697 #ifdef CONFIG_MEMCG
698 struct slab *slab = page_slab(page);
699 slab->obj_exts = 0;
700 #endif
701 __ClearPageSlab(page);
702 }
703
704 return addr;
705 }
706
kfence_init_pool_early(void)707 static bool __init kfence_init_pool_early(void)
708 {
709 unsigned long addr;
710
711 if (!__kfence_pool)
712 return false;
713
714 addr = kfence_init_pool();
715
716 if (!addr) {
717 /*
718 * The pool is live and will never be deallocated from this point on.
719 * Ignore the pool object from the kmemleak phys object tree, as it would
720 * otherwise overlap with allocations returned by kfence_alloc(), which
721 * are registered with kmemleak through the slab post-alloc hook.
722 */
723 kmemleak_ignore_phys(__pa(__kfence_pool));
724 return true;
725 }
726
727 /*
728 * Only release unprotected pages, and do not try to go back and change
729 * page attributes due to risk of failing to do so as well. If changing
730 * page attributes for some pages fails, it is very likely that it also
731 * fails for the first page, and therefore expect addr==__kfence_pool in
732 * most failure cases.
733 */
734 memblock_free_late(__pa(addr), KFENCE_POOL_SIZE - (addr - (unsigned long)__kfence_pool));
735 __kfence_pool = NULL;
736
737 memblock_free_late(__pa(kfence_metadata_init), KFENCE_METADATA_SIZE);
738 kfence_metadata_init = NULL;
739
740 return false;
741 }
742
743 /* === DebugFS Interface ==================================================== */
744
stats_show(struct seq_file * seq,void * v)745 static int stats_show(struct seq_file *seq, void *v)
746 {
747 int i;
748
749 seq_printf(seq, "enabled: %i\n", READ_ONCE(kfence_enabled));
750 for (i = 0; i < KFENCE_COUNTER_COUNT; i++)
751 seq_printf(seq, "%s: %ld\n", counter_names[i], atomic_long_read(&counters[i]));
752
753 return 0;
754 }
755 DEFINE_SHOW_ATTRIBUTE(stats);
756
757 /*
758 * debugfs seq_file operations for /sys/kernel/debug/kfence/objects.
759 * start_object() and next_object() return the object index + 1, because NULL is used
760 * to stop iteration.
761 */
start_object(struct seq_file * seq,loff_t * pos)762 static void *start_object(struct seq_file *seq, loff_t *pos)
763 {
764 if (*pos < CONFIG_KFENCE_NUM_OBJECTS)
765 return (void *)((long)*pos + 1);
766 return NULL;
767 }
768
stop_object(struct seq_file * seq,void * v)769 static void stop_object(struct seq_file *seq, void *v)
770 {
771 }
772
next_object(struct seq_file * seq,void * v,loff_t * pos)773 static void *next_object(struct seq_file *seq, void *v, loff_t *pos)
774 {
775 ++*pos;
776 if (*pos < CONFIG_KFENCE_NUM_OBJECTS)
777 return (void *)((long)*pos + 1);
778 return NULL;
779 }
780
show_object(struct seq_file * seq,void * v)781 static int show_object(struct seq_file *seq, void *v)
782 {
783 struct kfence_metadata *meta = &kfence_metadata[(long)v - 1];
784 unsigned long flags;
785
786 raw_spin_lock_irqsave(&meta->lock, flags);
787 kfence_print_object(seq, meta);
788 raw_spin_unlock_irqrestore(&meta->lock, flags);
789 seq_puts(seq, "---------------------------------\n");
790
791 return 0;
792 }
793
794 static const struct seq_operations objects_sops = {
795 .start = start_object,
796 .next = next_object,
797 .stop = stop_object,
798 .show = show_object,
799 };
800 DEFINE_SEQ_ATTRIBUTE(objects);
801
kfence_debugfs_init(void)802 static int kfence_debugfs_init(void)
803 {
804 struct dentry *kfence_dir;
805
806 if (!READ_ONCE(kfence_enabled))
807 return 0;
808
809 kfence_dir = debugfs_create_dir("kfence", NULL);
810 debugfs_create_file("stats", 0444, kfence_dir, NULL, &stats_fops);
811 debugfs_create_file("objects", 0400, kfence_dir, NULL, &objects_fops);
812 return 0;
813 }
814
815 late_initcall(kfence_debugfs_init);
816
817 /* === Panic Notifier ====================================================== */
818
kfence_check_all_canary(void)819 static void kfence_check_all_canary(void)
820 {
821 int i;
822
823 for (i = 0; i < CONFIG_KFENCE_NUM_OBJECTS; i++) {
824 struct kfence_metadata *meta = &kfence_metadata[i];
825
826 if (kfence_obj_allocated(meta))
827 check_canary(meta);
828 }
829 }
830
kfence_check_canary_callback(struct notifier_block * nb,unsigned long reason,void * arg)831 static int kfence_check_canary_callback(struct notifier_block *nb,
832 unsigned long reason, void *arg)
833 {
834 kfence_check_all_canary();
835 return NOTIFY_OK;
836 }
837
838 static struct notifier_block kfence_check_canary_notifier = {
839 .notifier_call = kfence_check_canary_callback,
840 };
841
842 /* === Allocation Gate Timer ================================================ */
843
844 static struct delayed_work kfence_timer;
845
846 #ifdef CONFIG_KFENCE_STATIC_KEYS
847 /* Wait queue to wake up allocation-gate timer task. */
848 static DECLARE_WAIT_QUEUE_HEAD(allocation_wait);
849
kfence_reboot_callback(struct notifier_block * nb,unsigned long action,void * data)850 static int kfence_reboot_callback(struct notifier_block *nb,
851 unsigned long action, void *data)
852 {
853 /*
854 * Disable kfence to avoid static keys IPI synchronization during
855 * late shutdown/kexec
856 */
857 WRITE_ONCE(kfence_enabled, false);
858 /* Cancel any pending timer work */
859 cancel_delayed_work(&kfence_timer);
860 /*
861 * Wake up any blocked toggle_allocation_gate() so it can complete
862 * early while the system is still able to handle IPIs.
863 */
864 wake_up(&allocation_wait);
865
866 return NOTIFY_OK;
867 }
868
869 static struct notifier_block kfence_reboot_notifier = {
870 .notifier_call = kfence_reboot_callback,
871 .priority = INT_MAX, /* Run early to stop timers ASAP */
872 };
873
wake_up_kfence_timer(struct irq_work * work)874 static void wake_up_kfence_timer(struct irq_work *work)
875 {
876 wake_up(&allocation_wait);
877 }
878 static DEFINE_IRQ_WORK(wake_up_kfence_timer_work, wake_up_kfence_timer);
879 #endif
880
881 /*
882 * Set up delayed work, which will enable and disable the static key. We need to
883 * use a work queue (rather than a simple timer), since enabling and disabling a
884 * static key cannot be done from an interrupt.
885 *
886 * Note: Toggling a static branch currently causes IPIs, and here we'll end up
887 * with a total of 2 IPIs to all CPUs. If this ends up a problem in future (with
888 * more aggressive sampling intervals), we could get away with a variant that
889 * avoids IPIs, at the cost of not immediately capturing allocations if the
890 * instructions remain cached.
891 */
toggle_allocation_gate(struct work_struct * work)892 static void toggle_allocation_gate(struct work_struct *work)
893 {
894 if (!READ_ONCE(kfence_enabled))
895 return;
896
897 atomic_set(&kfence_allocation_gate, -kfence_burst);
898 #ifdef CONFIG_KFENCE_STATIC_KEYS
899 /* Enable static key, and await allocation to happen. */
900 static_branch_enable(&kfence_allocation_key);
901
902 wait_event_idle(allocation_wait,
903 atomic_read(&kfence_allocation_gate) > 0 ||
904 !READ_ONCE(kfence_enabled));
905
906 /* Disable static key and reset timer. */
907 static_branch_disable(&kfence_allocation_key);
908 #endif
909 queue_delayed_work(system_dfl_wq, &kfence_timer,
910 msecs_to_jiffies(kfence_sample_interval));
911 }
912
913 /* === Public interface ===================================================== */
914
kfence_alloc_pool_and_metadata(void)915 void __init kfence_alloc_pool_and_metadata(void)
916 {
917 if (!kfence_sample_interval)
918 return;
919
920 /*
921 * If KASAN hardware tags are enabled, disable KFENCE, because it
922 * does not support MTE yet.
923 */
924 if (kasan_hw_tags_enabled()) {
925 pr_info("disabled as KASAN HW tags are enabled\n");
926 if (__kfence_pool) {
927 memblock_free(__kfence_pool, KFENCE_POOL_SIZE);
928 __kfence_pool = NULL;
929 }
930 kfence_sample_interval = 0;
931 return;
932 }
933
934 /*
935 * If the pool has already been initialized by arch, there is no need to
936 * re-allocate the memory pool.
937 */
938 if (!__kfence_pool)
939 __kfence_pool = memblock_alloc(KFENCE_POOL_SIZE, PAGE_SIZE);
940
941 if (!__kfence_pool) {
942 pr_err("failed to allocate pool\n");
943 return;
944 }
945
946 /* The memory allocated by memblock has been zeroed out. */
947 kfence_metadata_init = memblock_alloc(KFENCE_METADATA_SIZE, PAGE_SIZE);
948 if (!kfence_metadata_init) {
949 pr_err("failed to allocate metadata\n");
950 memblock_free(__kfence_pool, KFENCE_POOL_SIZE);
951 __kfence_pool = NULL;
952 }
953 }
954
kfence_init_enable(void)955 static void kfence_init_enable(void)
956 {
957 if (!IS_ENABLED(CONFIG_KFENCE_STATIC_KEYS))
958 static_branch_enable(&kfence_allocation_key);
959
960 if (kfence_deferrable)
961 INIT_DEFERRABLE_WORK(&kfence_timer, toggle_allocation_gate);
962 else
963 INIT_DELAYED_WORK(&kfence_timer, toggle_allocation_gate);
964
965 if (kfence_check_on_panic)
966 atomic_notifier_chain_register(&panic_notifier_list, &kfence_check_canary_notifier);
967
968 #ifdef CONFIG_KFENCE_STATIC_KEYS
969 register_reboot_notifier(&kfence_reboot_notifier);
970 #endif
971
972 WRITE_ONCE(kfence_enabled, true);
973 queue_delayed_work(system_dfl_wq, &kfence_timer, 0);
974
975 pr_info("initialized - using %lu bytes for %d objects at 0x%p-0x%p\n", KFENCE_POOL_SIZE,
976 CONFIG_KFENCE_NUM_OBJECTS, (void *)__kfence_pool,
977 (void *)(__kfence_pool + KFENCE_POOL_SIZE));
978 }
979
kfence_init(void)980 void __init kfence_init(void)
981 {
982 stack_hash_seed = get_random_u32();
983
984 /* Setting kfence_sample_interval to 0 on boot disables KFENCE. */
985 if (!kfence_sample_interval)
986 return;
987
988 if (!kfence_init_pool_early()) {
989 pr_err("%s failed\n", __func__);
990 return;
991 }
992
993 kfence_init_enable();
994 }
995
kfence_init_late(void)996 static int kfence_init_late(void)
997 {
998 const unsigned long nr_pages_pool = KFENCE_POOL_SIZE / PAGE_SIZE;
999 const unsigned long nr_pages_meta = KFENCE_METADATA_SIZE / PAGE_SIZE;
1000 unsigned long addr = (unsigned long)__kfence_pool;
1001 unsigned long free_size = KFENCE_POOL_SIZE;
1002 int err = -ENOMEM;
1003
1004 #ifdef CONFIG_CONTIG_ALLOC
1005 struct page *pages;
1006
1007 pages = alloc_contig_pages(nr_pages_pool, GFP_KERNEL | __GFP_SKIP_KASAN,
1008 first_online_node, NULL);
1009 if (!pages)
1010 return -ENOMEM;
1011
1012 __kfence_pool = page_to_virt(pages);
1013 pages = alloc_contig_pages(nr_pages_meta, GFP_KERNEL | __GFP_SKIP_KASAN,
1014 first_online_node, NULL);
1015 if (pages)
1016 kfence_metadata_init = page_to_virt(pages);
1017 #else
1018 if (nr_pages_pool > MAX_ORDER_NR_PAGES ||
1019 nr_pages_meta > MAX_ORDER_NR_PAGES) {
1020 pr_warn("KFENCE_NUM_OBJECTS too large for buddy allocator\n");
1021 return -EINVAL;
1022 }
1023
1024 __kfence_pool = alloc_pages_exact(KFENCE_POOL_SIZE,
1025 GFP_KERNEL | __GFP_SKIP_KASAN);
1026 if (!__kfence_pool)
1027 return -ENOMEM;
1028
1029 kfence_metadata_init = alloc_pages_exact(KFENCE_METADATA_SIZE,
1030 GFP_KERNEL | __GFP_SKIP_KASAN);
1031 #endif
1032
1033 if (!kfence_metadata_init)
1034 goto free_pool;
1035
1036 memzero_explicit(kfence_metadata_init, KFENCE_METADATA_SIZE);
1037 addr = kfence_init_pool();
1038 if (!addr) {
1039 kfence_init_enable();
1040 kfence_debugfs_init();
1041 return 0;
1042 }
1043
1044 pr_err("%s failed\n", __func__);
1045 free_size = KFENCE_POOL_SIZE - (addr - (unsigned long)__kfence_pool);
1046 err = -EBUSY;
1047
1048 #ifdef CONFIG_CONTIG_ALLOC
1049 free_contig_range(page_to_pfn(virt_to_page((void *)kfence_metadata_init)),
1050 nr_pages_meta);
1051 free_pool:
1052 free_contig_range(page_to_pfn(virt_to_page((void *)addr)),
1053 free_size / PAGE_SIZE);
1054 #else
1055 free_pages_exact((void *)kfence_metadata_init, KFENCE_METADATA_SIZE);
1056 free_pool:
1057 free_pages_exact((void *)addr, free_size);
1058 #endif
1059
1060 kfence_metadata_init = NULL;
1061 __kfence_pool = NULL;
1062 return err;
1063 }
1064
kfence_enable_late(void)1065 static int kfence_enable_late(void)
1066 {
1067 if (!__kfence_pool)
1068 return kfence_init_late();
1069
1070 WRITE_ONCE(kfence_enabled, true);
1071 queue_delayed_work(system_dfl_wq, &kfence_timer, 0);
1072 pr_info("re-enabled\n");
1073 return 0;
1074 }
1075
kfence_shutdown_cache(struct kmem_cache * s)1076 void kfence_shutdown_cache(struct kmem_cache *s)
1077 {
1078 unsigned long flags;
1079 struct kfence_metadata *meta;
1080 int i;
1081
1082 /* Pairs with release in kfence_init_pool(). */
1083 if (!smp_load_acquire(&kfence_metadata))
1084 return;
1085
1086 for (i = 0; i < CONFIG_KFENCE_NUM_OBJECTS; i++) {
1087 bool in_use;
1088
1089 meta = &kfence_metadata[i];
1090
1091 /*
1092 * If we observe some inconsistent cache and state pair where we
1093 * should have returned false here, cache destruction is racing
1094 * with either kmem_cache_alloc() or kmem_cache_free(). Taking
1095 * the lock will not help, as different critical section
1096 * serialization will have the same outcome.
1097 */
1098 if (READ_ONCE(meta->cache) != s || !kfence_obj_allocated(meta))
1099 continue;
1100
1101 raw_spin_lock_irqsave(&meta->lock, flags);
1102 in_use = meta->cache == s && kfence_obj_allocated(meta);
1103 raw_spin_unlock_irqrestore(&meta->lock, flags);
1104
1105 if (in_use) {
1106 /*
1107 * This cache still has allocations, and we should not
1108 * release them back into the freelist so they can still
1109 * safely be used and retain the kernel's default
1110 * behaviour of keeping the allocations alive (leak the
1111 * cache); however, they effectively become "zombie
1112 * allocations" as the KFENCE objects are the only ones
1113 * still in use and the owning cache is being destroyed.
1114 *
1115 * We mark them freed, so that any subsequent use shows
1116 * more useful error messages that will include stack
1117 * traces of the user of the object, the original
1118 * allocation, and caller to shutdown_cache().
1119 */
1120 kfence_guarded_free((void *)meta->addr, meta, /*zombie=*/true);
1121 }
1122 }
1123
1124 for (i = 0; i < CONFIG_KFENCE_NUM_OBJECTS; i++) {
1125 meta = &kfence_metadata[i];
1126
1127 /* See above. */
1128 if (READ_ONCE(meta->cache) != s || READ_ONCE(meta->state) != KFENCE_OBJECT_FREED)
1129 continue;
1130
1131 raw_spin_lock_irqsave(&meta->lock, flags);
1132 if (meta->cache == s && meta->state == KFENCE_OBJECT_FREED)
1133 meta->cache = NULL;
1134 raw_spin_unlock_irqrestore(&meta->lock, flags);
1135 }
1136 }
1137
__kfence_alloc(struct kmem_cache * s,size_t size,gfp_t flags)1138 void *__kfence_alloc(struct kmem_cache *s, size_t size, gfp_t flags)
1139 {
1140 unsigned long stack_entries[KFENCE_STACK_DEPTH];
1141 size_t num_stack_entries;
1142 u32 alloc_stack_hash;
1143 int allocation_gate;
1144
1145 /*
1146 * Perform size check before switching kfence_allocation_gate, so that
1147 * we don't disable KFENCE without making an allocation.
1148 */
1149 if (size > PAGE_SIZE) {
1150 atomic_long_inc(&counters[KFENCE_COUNTER_SKIP_INCOMPAT]);
1151 return NULL;
1152 }
1153
1154 /*
1155 * Skip allocations from non-default zones, including DMA. We cannot
1156 * guarantee that pages in the KFENCE pool will have the requested
1157 * properties (e.g. reside in DMAable memory).
1158 */
1159 if ((flags & GFP_ZONEMASK) ||
1160 ((flags & __GFP_THISNODE) && num_online_nodes() > 1) ||
1161 (s->flags & (SLAB_CACHE_DMA | SLAB_CACHE_DMA32))) {
1162 atomic_long_inc(&counters[KFENCE_COUNTER_SKIP_INCOMPAT]);
1163 return NULL;
1164 }
1165
1166 /*
1167 * Skip allocations for this slab, if KFENCE has been disabled for
1168 * this slab.
1169 */
1170 if (s->flags & SLAB_SKIP_KFENCE)
1171 return NULL;
1172
1173 allocation_gate = atomic_inc_return(&kfence_allocation_gate);
1174 if (allocation_gate > 1)
1175 return NULL;
1176 #ifdef CONFIG_KFENCE_STATIC_KEYS
1177 /*
1178 * waitqueue_active() is fully ordered after the update of
1179 * kfence_allocation_gate per atomic_inc_return().
1180 */
1181 if (allocation_gate == 1 && waitqueue_active(&allocation_wait)) {
1182 /*
1183 * Calling wake_up() here may deadlock when allocations happen
1184 * from within timer code. Use an irq_work to defer it.
1185 */
1186 irq_work_queue(&wake_up_kfence_timer_work);
1187 }
1188 #endif
1189
1190 if (!READ_ONCE(kfence_enabled))
1191 return NULL;
1192
1193 num_stack_entries = stack_trace_save(stack_entries, KFENCE_STACK_DEPTH, 0);
1194
1195 /*
1196 * Do expensive check for coverage of allocation in slow-path after
1197 * allocation_gate has already become non-zero, even though it might
1198 * mean not making any allocation within a given sample interval.
1199 *
1200 * This ensures reasonable allocation coverage when the pool is almost
1201 * full, including avoiding long-lived allocations of the same source
1202 * filling up the pool (e.g. pagecache allocations).
1203 */
1204 alloc_stack_hash = get_alloc_stack_hash(stack_entries, num_stack_entries);
1205 if (should_skip_covered() && alloc_covered_contains(alloc_stack_hash)) {
1206 atomic_long_inc(&counters[KFENCE_COUNTER_SKIP_COVERED]);
1207 return NULL;
1208 }
1209
1210 return kfence_guarded_alloc(s, size, flags, stack_entries, num_stack_entries,
1211 alloc_stack_hash);
1212 }
1213
kfence_ksize(const void * addr)1214 size_t kfence_ksize(const void *addr)
1215 {
1216 const struct kfence_metadata *meta = addr_to_metadata((unsigned long)addr);
1217
1218 /*
1219 * Read locklessly -- if there is a race with __kfence_alloc(), this is
1220 * either a use-after-free or invalid access.
1221 */
1222 return meta ? meta->size : 0;
1223 }
1224
kfence_object_start(const void * addr)1225 void *kfence_object_start(const void *addr)
1226 {
1227 const struct kfence_metadata *meta = addr_to_metadata((unsigned long)addr);
1228
1229 /*
1230 * Read locklessly -- if there is a race with __kfence_alloc(), this is
1231 * either a use-after-free or invalid access.
1232 */
1233 return meta ? (void *)meta->addr : NULL;
1234 }
1235
__kfence_free(void * addr)1236 void __kfence_free(void *addr)
1237 {
1238 struct kfence_metadata *meta = addr_to_metadata((unsigned long)addr);
1239
1240 #ifdef CONFIG_MEMCG
1241 KFENCE_WARN_ON(meta->obj_exts.objcg);
1242 #endif
1243 /*
1244 * If the objects of the cache are SLAB_TYPESAFE_BY_RCU, defer freeing
1245 * the object, as the object page may be recycled for other-typed
1246 * objects once it has been freed. meta->cache may be NULL if the cache
1247 * was destroyed.
1248 * Save the stack trace here so that reports show where the user freed
1249 * the object.
1250 */
1251 if (unlikely(meta->cache && (meta->cache->flags & SLAB_TYPESAFE_BY_RCU))) {
1252 unsigned long flags;
1253
1254 raw_spin_lock_irqsave(&meta->lock, flags);
1255 metadata_update_state(meta, KFENCE_OBJECT_RCU_FREEING, NULL, 0);
1256 raw_spin_unlock_irqrestore(&meta->lock, flags);
1257 call_rcu(&meta->rcu_head, rcu_guarded_free);
1258 } else {
1259 kfence_guarded_free(addr, meta, false);
1260 }
1261 }
1262
kfence_handle_page_fault(unsigned long addr,bool is_write,struct pt_regs * regs)1263 bool kfence_handle_page_fault(unsigned long addr, bool is_write, struct pt_regs *regs)
1264 {
1265 const int page_index = (addr - (unsigned long)__kfence_pool) / PAGE_SIZE;
1266 struct kfence_metadata *to_report = NULL;
1267 unsigned long unprotected_page = 0;
1268 enum kfence_error_type error_type;
1269 unsigned long flags;
1270
1271 if (!is_kfence_address((void *)addr))
1272 return false;
1273
1274 if (!READ_ONCE(kfence_enabled)) /* If disabled at runtime ... */
1275 return kfence_unprotect(addr); /* ... unprotect and proceed. */
1276
1277 atomic_long_inc(&counters[KFENCE_COUNTER_BUGS]);
1278
1279 if (page_index % 2) {
1280 /* This is a redzone, report a buffer overflow. */
1281 struct kfence_metadata *meta;
1282 int distance = 0;
1283
1284 meta = addr_to_metadata(addr - PAGE_SIZE);
1285 if (meta && kfence_obj_allocated(meta)) {
1286 to_report = meta;
1287 /* Data race ok; distance calculation approximate. */
1288 distance = addr - data_race(meta->addr + meta->size);
1289 }
1290
1291 meta = addr_to_metadata(addr + PAGE_SIZE);
1292 if (meta && kfence_obj_allocated(meta)) {
1293 /* Data race ok; distance calculation approximate. */
1294 if (!to_report || distance > data_race(meta->addr) - addr)
1295 to_report = meta;
1296 }
1297
1298 if (!to_report)
1299 goto out;
1300
1301 error_type = KFENCE_ERROR_OOB;
1302 unprotected_page = addr;
1303
1304 /*
1305 * If the object was freed before we took the look we can still
1306 * report this as an OOB -- the report will simply show the
1307 * stacktrace of the free as well.
1308 */
1309 } else {
1310 to_report = addr_to_metadata(addr);
1311 if (!to_report)
1312 goto out;
1313
1314 error_type = KFENCE_ERROR_UAF;
1315 /*
1316 * We may race with __kfence_alloc(), and it is possible that a
1317 * freed object may be reallocated. We simply report this as a
1318 * use-after-free, with the stack trace showing the place where
1319 * the object was re-allocated.
1320 */
1321 }
1322
1323 out:
1324 if (to_report) {
1325 raw_spin_lock_irqsave(&to_report->lock, flags);
1326 to_report->unprotected_page = unprotected_page;
1327 kfence_report_error(addr, is_write, regs, to_report, error_type);
1328 raw_spin_unlock_irqrestore(&to_report->lock, flags);
1329 } else {
1330 /* This may be a UAF or OOB access, but we can't be sure. */
1331 kfence_report_error(addr, is_write, regs, NULL, KFENCE_ERROR_INVALID);
1332 }
1333
1334 return kfence_unprotect(addr); /* Unprotect and let access proceed. */
1335 }
1336