1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * KFENCE guarded object allocator and fault handling.
4 *
5 * Copyright (C) 2020, Google LLC.
6 */
7
8 #define pr_fmt(fmt) "kfence: " fmt
9
10 #include <linux/atomic.h>
11 #include <linux/bug.h>
12 #include <linux/debugfs.h>
13 #include <linux/hash.h>
14 #include <linux/irq_work.h>
15 #include <linux/jhash.h>
16 #include <linux/kasan-enabled.h>
17 #include <linux/kcsan-checks.h>
18 #include <linux/kfence.h>
19 #include <linux/kmemleak.h>
20 #include <linux/list.h>
21 #include <linux/lockdep.h>
22 #include <linux/log2.h>
23 #include <linux/memblock.h>
24 #include <linux/moduleparam.h>
25 #include <linux/nodemask.h>
26 #include <linux/notifier.h>
27 #include <linux/panic_notifier.h>
28 #include <linux/random.h>
29 #include <linux/rcupdate.h>
30 #include <linux/reboot.h>
31 #include <linux/sched/clock.h>
32 #include <linux/seq_file.h>
33 #include <linux/slab.h>
34 #include <linux/spinlock.h>
35 #include <linux/string.h>
36
37 #include <asm/kfence.h>
38
39 #include "kfence.h"
40
41 /* Disables KFENCE on the first warning assuming an irrecoverable error. */
42 #define KFENCE_WARN_ON(cond) \
43 ({ \
44 const bool __cond = WARN_ON(cond); \
45 if (unlikely(__cond)) { \
46 WRITE_ONCE(kfence_enabled, false); \
47 disabled_by_warn = true; \
48 } \
49 __cond; \
50 })
51
52 /* === Data ================================================================= */
53
54 bool kfence_enabled __read_mostly;
55 static bool disabled_by_warn __read_mostly;
56
57 unsigned long kfence_sample_interval __read_mostly = CONFIG_KFENCE_SAMPLE_INTERVAL;
58 EXPORT_SYMBOL_GPL(kfence_sample_interval); /* Export for test modules. */
59
60 #ifdef MODULE_PARAM_PREFIX
61 #undef MODULE_PARAM_PREFIX
62 #endif
63 #define MODULE_PARAM_PREFIX "kfence."
64
65 static int kfence_enable_late(void);
param_set_sample_interval(const char * val,const struct kernel_param * kp)66 static int param_set_sample_interval(const char *val, const struct kernel_param *kp)
67 {
68 unsigned long num;
69 int ret = kstrtoul(val, 0, &num);
70
71 if (ret < 0)
72 return ret;
73
74 /* Using 0 to indicate KFENCE is disabled. */
75 if (!num && READ_ONCE(kfence_enabled)) {
76 pr_info("disabled\n");
77 WRITE_ONCE(kfence_enabled, false);
78 }
79
80 *((unsigned long *)kp->arg) = num;
81
82 if (num && !READ_ONCE(kfence_enabled) && system_state != SYSTEM_BOOTING)
83 return disabled_by_warn ? -EINVAL : kfence_enable_late();
84 return 0;
85 }
86
param_get_sample_interval(char * buffer,const struct kernel_param * kp)87 static int param_get_sample_interval(char *buffer, const struct kernel_param *kp)
88 {
89 if (!READ_ONCE(kfence_enabled))
90 return sprintf(buffer, "0\n");
91
92 return param_get_ulong(buffer, kp);
93 }
94
95 static const struct kernel_param_ops sample_interval_param_ops = {
96 .set = param_set_sample_interval,
97 .get = param_get_sample_interval,
98 };
99 module_param_cb(sample_interval, &sample_interval_param_ops, &kfence_sample_interval, 0600);
100
101 /* Pool usage% threshold when currently covered allocations are skipped. */
102 static unsigned long kfence_skip_covered_thresh __read_mostly = 75;
103 module_param_named(skip_covered_thresh, kfence_skip_covered_thresh, ulong, 0644);
104
105 /* Allocation burst count: number of excess KFENCE allocations per sample. */
106 static unsigned int kfence_burst __read_mostly;
107 module_param_named(burst, kfence_burst, uint, 0644);
108
109 /* If true, use a deferrable timer. */
110 static bool kfence_deferrable __read_mostly = IS_ENABLED(CONFIG_KFENCE_DEFERRABLE);
111 module_param_named(deferrable, kfence_deferrable, bool, 0444);
112
113 /* If true, check all canary bytes on panic. */
114 static bool kfence_check_on_panic __read_mostly;
115 module_param_named(check_on_panic, kfence_check_on_panic, bool, 0444);
116
117 /* The pool of pages used for guard pages and objects. */
118 char *__kfence_pool __read_mostly;
119 EXPORT_SYMBOL(__kfence_pool); /* Export for test modules. */
120
121 /*
122 * Per-object metadata, with one-to-one mapping of object metadata to
123 * backing pages (in __kfence_pool).
124 */
125 static_assert(CONFIG_KFENCE_NUM_OBJECTS > 0);
126 struct kfence_metadata *kfence_metadata __read_mostly;
127
128 /*
129 * If kfence_metadata is not NULL, it may be accessed by kfence_shutdown_cache().
130 * So introduce kfence_metadata_init to initialize metadata, and then make
131 * kfence_metadata visible after initialization is successful. This prevents
132 * potential UAF or access to uninitialized metadata.
133 */
134 static struct kfence_metadata *kfence_metadata_init __read_mostly;
135
136 /* Freelist with available objects. */
137 DEFINE_RAW_SPINLOCK(kfence_freelist_lock); /* Lock protecting freelist. */
138 static struct list_head kfence_freelist __guarded_by(&kfence_freelist_lock) = LIST_HEAD_INIT(kfence_freelist);
139
140 /*
141 * The static key to set up a KFENCE allocation; or if static keys are not used
142 * to gate allocations, to avoid a load and compare if KFENCE is disabled.
143 */
144 DEFINE_STATIC_KEY_FALSE(kfence_allocation_key);
145
146 /* Gates the allocation, ensuring only one succeeds in a given period. */
147 atomic_t kfence_allocation_gate = ATOMIC_INIT(1);
148
149 /*
150 * A Counting Bloom filter of allocation coverage: limits currently covered
151 * allocations of the same source filling up the pool.
152 *
153 * Assuming a range of 15%-85% unique allocations in the pool at any point in
154 * time, the below parameters provide a probablity of 0.02-0.33 for false
155 * positive hits respectively:
156 *
157 * P(alloc_traces) = (1 - e^(-HNUM * (alloc_traces / SIZE)) ^ HNUM
158 */
159 #define ALLOC_COVERED_HNUM 2
160 #define ALLOC_COVERED_ORDER (const_ilog2(CONFIG_KFENCE_NUM_OBJECTS) + 2)
161 #define ALLOC_COVERED_SIZE (1 << ALLOC_COVERED_ORDER)
162 #define ALLOC_COVERED_HNEXT(h) hash_32(h, ALLOC_COVERED_ORDER)
163 #define ALLOC_COVERED_MASK (ALLOC_COVERED_SIZE - 1)
164 static atomic_t alloc_covered[ALLOC_COVERED_SIZE];
165
166 /* Stack depth used to determine uniqueness of an allocation. */
167 #define UNIQUE_ALLOC_STACK_DEPTH ((size_t)8)
168
169 /*
170 * Randomness for stack hashes, making the same collisions across reboots and
171 * different machines less likely.
172 */
173 static u32 stack_hash_seed __ro_after_init;
174
175 /* Statistics counters for debugfs. */
176 enum kfence_counter_id {
177 KFENCE_COUNTER_ALLOCATED,
178 KFENCE_COUNTER_ALLOCS,
179 KFENCE_COUNTER_FREES,
180 KFENCE_COUNTER_ZOMBIES,
181 KFENCE_COUNTER_BUGS,
182 KFENCE_COUNTER_SKIP_INCOMPAT,
183 KFENCE_COUNTER_SKIP_CAPACITY,
184 KFENCE_COUNTER_SKIP_COVERED,
185 KFENCE_COUNTER_COUNT,
186 };
187 static atomic_long_t counters[KFENCE_COUNTER_COUNT];
188 static const char *const counter_names[] = {
189 [KFENCE_COUNTER_ALLOCATED] = "currently allocated",
190 [KFENCE_COUNTER_ALLOCS] = "total allocations",
191 [KFENCE_COUNTER_FREES] = "total frees",
192 [KFENCE_COUNTER_ZOMBIES] = "zombie allocations",
193 [KFENCE_COUNTER_BUGS] = "total bugs",
194 [KFENCE_COUNTER_SKIP_INCOMPAT] = "skipped allocations (incompatible)",
195 [KFENCE_COUNTER_SKIP_CAPACITY] = "skipped allocations (capacity)",
196 [KFENCE_COUNTER_SKIP_COVERED] = "skipped allocations (covered)",
197 };
198 static_assert(ARRAY_SIZE(counter_names) == KFENCE_COUNTER_COUNT);
199
200 /* === Internals ============================================================ */
201
should_skip_covered(void)202 static inline bool should_skip_covered(void)
203 {
204 unsigned long thresh = (CONFIG_KFENCE_NUM_OBJECTS * kfence_skip_covered_thresh) / 100;
205
206 return atomic_long_read(&counters[KFENCE_COUNTER_ALLOCATED]) > thresh;
207 }
208
get_alloc_stack_hash(unsigned long * stack_entries,size_t num_entries)209 static u32 get_alloc_stack_hash(unsigned long *stack_entries, size_t num_entries)
210 {
211 num_entries = min(num_entries, UNIQUE_ALLOC_STACK_DEPTH);
212 num_entries = filter_irq_stacks(stack_entries, num_entries);
213 return jhash(stack_entries, num_entries * sizeof(stack_entries[0]), stack_hash_seed);
214 }
215
216 /*
217 * Adds (or subtracts) count @val for allocation stack trace hash
218 * @alloc_stack_hash from Counting Bloom filter.
219 */
alloc_covered_add(u32 alloc_stack_hash,int val)220 static void alloc_covered_add(u32 alloc_stack_hash, int val)
221 {
222 int i;
223
224 for (i = 0; i < ALLOC_COVERED_HNUM; i++) {
225 atomic_add(val, &alloc_covered[alloc_stack_hash & ALLOC_COVERED_MASK]);
226 alloc_stack_hash = ALLOC_COVERED_HNEXT(alloc_stack_hash);
227 }
228 }
229
230 /*
231 * Returns true if the allocation stack trace hash @alloc_stack_hash is
232 * currently contained (non-zero count) in Counting Bloom filter.
233 */
alloc_covered_contains(u32 alloc_stack_hash)234 static bool alloc_covered_contains(u32 alloc_stack_hash)
235 {
236 int i;
237
238 for (i = 0; i < ALLOC_COVERED_HNUM; i++) {
239 if (!atomic_read(&alloc_covered[alloc_stack_hash & ALLOC_COVERED_MASK]))
240 return false;
241 alloc_stack_hash = ALLOC_COVERED_HNEXT(alloc_stack_hash);
242 }
243
244 return true;
245 }
246
kfence_protect(unsigned long addr)247 static bool kfence_protect(unsigned long addr)
248 {
249 return !KFENCE_WARN_ON(!kfence_protect_page(ALIGN_DOWN(addr, PAGE_SIZE), true));
250 }
251
kfence_unprotect(unsigned long addr)252 static bool kfence_unprotect(unsigned long addr)
253 {
254 return !KFENCE_WARN_ON(!kfence_protect_page(ALIGN_DOWN(addr, PAGE_SIZE), false));
255 }
256
metadata_to_pageaddr(const struct kfence_metadata * meta)257 static inline unsigned long metadata_to_pageaddr(const struct kfence_metadata *meta)
258 __must_hold(&meta->lock)
259 {
260 unsigned long offset = (meta - kfence_metadata + 1) * PAGE_SIZE * 2;
261 unsigned long pageaddr = (unsigned long)&__kfence_pool[offset];
262
263 /* The checks do not affect performance; only called from slow-paths. */
264
265 /* Only call with a pointer into kfence_metadata. */
266 if (KFENCE_WARN_ON(meta < kfence_metadata ||
267 meta >= kfence_metadata + CONFIG_KFENCE_NUM_OBJECTS))
268 return 0;
269
270 /*
271 * This metadata object only ever maps to 1 page; verify that the stored
272 * address is in the expected range.
273 */
274 if (KFENCE_WARN_ON(ALIGN_DOWN(meta->addr, PAGE_SIZE) != pageaddr))
275 return 0;
276
277 return pageaddr;
278 }
279
kfence_obj_allocated(const struct kfence_metadata * meta)280 static inline bool kfence_obj_allocated(const struct kfence_metadata *meta)
281 {
282 enum kfence_object_state state = READ_ONCE(meta->state);
283
284 return state == KFENCE_OBJECT_ALLOCATED || state == KFENCE_OBJECT_RCU_FREEING;
285 }
286
287 /*
288 * Update the object's metadata state, including updating the alloc/free stacks
289 * depending on the state transition.
290 */
291 static noinline void
metadata_update_state(struct kfence_metadata * meta,enum kfence_object_state next,unsigned long * stack_entries,size_t num_stack_entries)292 metadata_update_state(struct kfence_metadata *meta, enum kfence_object_state next,
293 unsigned long *stack_entries, size_t num_stack_entries)
294 __must_hold(&meta->lock)
295 {
296 struct kfence_track *track =
297 next == KFENCE_OBJECT_ALLOCATED ? &meta->alloc_track : &meta->free_track;
298
299 lockdep_assert_held(&meta->lock);
300
301 /* Stack has been saved when calling rcu, skip. */
302 if (READ_ONCE(meta->state) == KFENCE_OBJECT_RCU_FREEING)
303 goto out;
304
305 if (stack_entries) {
306 memcpy(track->stack_entries, stack_entries,
307 num_stack_entries * sizeof(stack_entries[0]));
308 } else {
309 /*
310 * Skip over 1 (this) functions; noinline ensures we do not
311 * accidentally skip over the caller by never inlining.
312 */
313 num_stack_entries = stack_trace_save(track->stack_entries, KFENCE_STACK_DEPTH, 1);
314 }
315 track->num_stack_entries = num_stack_entries;
316 track->pid = task_pid_nr(current);
317 track->cpu = raw_smp_processor_id();
318 track->ts_nsec = local_clock(); /* Same source as printk timestamps. */
319
320 out:
321 /*
322 * Pairs with READ_ONCE() in
323 * kfence_shutdown_cache(),
324 * kfence_handle_page_fault().
325 */
326 WRITE_ONCE(meta->state, next);
327 }
328
329 #ifdef CONFIG_KMSAN
330 #define check_canary_attributes noinline __no_kmsan_checks
331 #else
332 #define check_canary_attributes inline
333 #endif
334
335 /* Check canary byte at @addr. */
check_canary_byte(u8 * addr)336 static check_canary_attributes bool check_canary_byte(u8 *addr)
337 {
338 struct kfence_metadata *meta;
339 enum kfence_fault fault;
340 unsigned long flags;
341
342 if (likely(*addr == KFENCE_CANARY_PATTERN_U8(addr)))
343 return true;
344
345 atomic_long_inc(&counters[KFENCE_COUNTER_BUGS]);
346
347 meta = addr_to_metadata((unsigned long)addr);
348 raw_spin_lock_irqsave(&meta->lock, flags);
349 fault = kfence_report_error((unsigned long)addr, false, NULL, meta, KFENCE_ERROR_CORRUPTION);
350 raw_spin_unlock_irqrestore(&meta->lock, flags);
351 kfence_handle_fault(fault);
352
353 return false;
354 }
355
set_canary(const struct kfence_metadata * meta)356 static inline void set_canary(const struct kfence_metadata *meta)
357 {
358 const unsigned long pageaddr = ALIGN_DOWN(meta->addr, PAGE_SIZE);
359 unsigned long addr = pageaddr;
360
361 /*
362 * The canary may be written to part of the object memory, but it does
363 * not affect it. The user should initialize the object before using it.
364 */
365 for (; addr < meta->addr; addr += sizeof(u64))
366 *((u64 *)addr) = KFENCE_CANARY_PATTERN_U64;
367
368 addr = ALIGN_DOWN(meta->addr + meta->size, sizeof(u64));
369 for (; addr - pageaddr < PAGE_SIZE; addr += sizeof(u64))
370 *((u64 *)addr) = KFENCE_CANARY_PATTERN_U64;
371 }
372
373 static check_canary_attributes void
check_canary(const struct kfence_metadata * meta)374 check_canary(const struct kfence_metadata *meta)
375 {
376 const unsigned long pageaddr = ALIGN_DOWN(meta->addr, PAGE_SIZE);
377 unsigned long addr = pageaddr;
378
379 /*
380 * We'll iterate over each canary byte per-side until a corrupted byte
381 * is found. However, we'll still iterate over the canary bytes to the
382 * right of the object even if there was an error in the canary bytes to
383 * the left of the object. Specifically, if check_canary_byte()
384 * generates an error, showing both sides might give more clues as to
385 * what the error is about when displaying which bytes were corrupted.
386 */
387
388 /* Apply to left of object. */
389 for (; meta->addr - addr >= sizeof(u64); addr += sizeof(u64)) {
390 if (unlikely(*((u64 *)addr) != KFENCE_CANARY_PATTERN_U64))
391 break;
392 }
393
394 /*
395 * If the canary is corrupted in a certain 64 bytes, or the canary
396 * memory cannot be completely covered by multiple consecutive 64 bytes,
397 * it needs to be checked one by one.
398 */
399 for (; addr < meta->addr; addr++) {
400 if (unlikely(!check_canary_byte((u8 *)addr)))
401 break;
402 }
403
404 /* Apply to right of object. */
405 for (addr = meta->addr + meta->size; addr % sizeof(u64) != 0; addr++) {
406 if (unlikely(!check_canary_byte((u8 *)addr)))
407 return;
408 }
409 for (; addr - pageaddr < PAGE_SIZE; addr += sizeof(u64)) {
410 if (unlikely(*((u64 *)addr) != KFENCE_CANARY_PATTERN_U64)) {
411
412 for (; addr - pageaddr < PAGE_SIZE; addr++) {
413 if (!check_canary_byte((u8 *)addr))
414 return;
415 }
416 }
417 }
418 }
419
kfence_guarded_alloc(struct kmem_cache * cache,size_t size,gfp_t gfp,unsigned long * stack_entries,size_t num_stack_entries,u32 alloc_stack_hash)420 static void *kfence_guarded_alloc(struct kmem_cache *cache, size_t size, gfp_t gfp,
421 unsigned long *stack_entries, size_t num_stack_entries,
422 u32 alloc_stack_hash)
423 {
424 struct kfence_metadata *meta = NULL;
425 unsigned long flags;
426 struct slab *slab;
427 void *addr;
428 const bool random_right_allocate = get_random_u32_below(2);
429 const bool random_fault = CONFIG_KFENCE_STRESS_TEST_FAULTS &&
430 !get_random_u32_below(CONFIG_KFENCE_STRESS_TEST_FAULTS);
431
432 /* Try to obtain a free object. */
433 raw_spin_lock_irqsave(&kfence_freelist_lock, flags);
434 if (!list_empty(&kfence_freelist)) {
435 meta = list_entry(kfence_freelist.next, struct kfence_metadata, list);
436 list_del_init(&meta->list);
437 }
438 raw_spin_unlock_irqrestore(&kfence_freelist_lock, flags);
439 if (!meta) {
440 atomic_long_inc(&counters[KFENCE_COUNTER_SKIP_CAPACITY]);
441 return NULL;
442 }
443
444 if (unlikely(!raw_spin_trylock_irqsave(&meta->lock, flags))) {
445 /*
446 * This is extremely unlikely -- we are reporting on a
447 * use-after-free, which locked meta->lock, and the reporting
448 * code via printk calls kmalloc() which ends up in
449 * kfence_alloc() and tries to grab the same object that we're
450 * reporting on. While it has never been observed, lockdep does
451 * report that there is a possibility of deadlock. Fix it by
452 * using trylock and bailing out gracefully.
453 */
454 raw_spin_lock_irqsave(&kfence_freelist_lock, flags);
455 /* Put the object back on the freelist. */
456 list_add_tail(&meta->list, &kfence_freelist);
457 raw_spin_unlock_irqrestore(&kfence_freelist_lock, flags);
458
459 return NULL;
460 }
461
462 meta->addr = metadata_to_pageaddr(meta);
463 /* Unprotect if we're reusing this page. */
464 if (meta->state == KFENCE_OBJECT_FREED)
465 kfence_unprotect(meta->addr);
466
467 /*
468 * Note: for allocations made before RNG initialization, will always
469 * return zero. We still benefit from enabling KFENCE as early as
470 * possible, even when the RNG is not yet available, as this will allow
471 * KFENCE to detect bugs due to earlier allocations. The only downside
472 * is that the out-of-bounds accesses detected are deterministic for
473 * such allocations.
474 */
475 if (random_right_allocate) {
476 /* Allocate on the "right" side, re-calculate address. */
477 meta->addr += PAGE_SIZE - size;
478 meta->addr = ALIGN_DOWN(meta->addr, cache->align);
479 }
480
481 addr = (void *)meta->addr;
482
483 /* Update remaining metadata. */
484 metadata_update_state(meta, KFENCE_OBJECT_ALLOCATED, stack_entries, num_stack_entries);
485 /* Pairs with READ_ONCE() in kfence_shutdown_cache(). */
486 WRITE_ONCE(meta->cache, cache);
487 meta->size = size;
488 meta->alloc_stack_hash = alloc_stack_hash;
489 raw_spin_unlock_irqrestore(&meta->lock, flags);
490
491 alloc_covered_add(alloc_stack_hash, 1);
492
493 /* Set required slab fields. */
494 slab = virt_to_slab(addr);
495 slab->slab_cache = cache;
496 slab->objects = 1;
497
498 /* Memory initialization. */
499 set_canary(meta);
500
501 /*
502 * We check slab_want_init_on_alloc() ourselves, rather than letting
503 * SL*B do the initialization, as otherwise we might overwrite KFENCE's
504 * redzone.
505 */
506 if (unlikely(slab_want_init_on_alloc(gfp, cache)))
507 memzero_explicit(addr, size);
508 if (cache->ctor)
509 cache->ctor(addr);
510
511 if (random_fault)
512 kfence_protect(meta->addr); /* Random "faults" by protecting the object. */
513
514 atomic_long_inc(&counters[KFENCE_COUNTER_ALLOCATED]);
515 atomic_long_inc(&counters[KFENCE_COUNTER_ALLOCS]);
516
517 return addr;
518 }
519
kfence_guarded_free(void * addr,struct kfence_metadata * meta,bool zombie)520 static void kfence_guarded_free(void *addr, struct kfence_metadata *meta, bool zombie)
521 {
522 struct kcsan_scoped_access assert_page_exclusive;
523 u32 alloc_stack_hash;
524 unsigned long flags;
525 bool init;
526
527 raw_spin_lock_irqsave(&meta->lock, flags);
528
529 if (!kfence_obj_allocated(meta) || meta->addr != (unsigned long)addr) {
530 enum kfence_fault fault;
531
532 /* Invalid or double-free, bail out. */
533 atomic_long_inc(&counters[KFENCE_COUNTER_BUGS]);
534 fault = kfence_report_error((unsigned long)addr, false, NULL, meta,
535 KFENCE_ERROR_INVALID_FREE);
536 raw_spin_unlock_irqrestore(&meta->lock, flags);
537 kfence_handle_fault(fault);
538 return;
539 }
540
541 /* Detect racy use-after-free, or incorrect reallocation of this page by KFENCE. */
542 kcsan_begin_scoped_access((void *)ALIGN_DOWN((unsigned long)addr, PAGE_SIZE), PAGE_SIZE,
543 KCSAN_ACCESS_SCOPED | KCSAN_ACCESS_WRITE | KCSAN_ACCESS_ASSERT,
544 &assert_page_exclusive);
545
546 if (CONFIG_KFENCE_STRESS_TEST_FAULTS)
547 kfence_unprotect((unsigned long)addr); /* To check canary bytes. */
548
549 /* Restore page protection if there was an OOB access. */
550 if (meta->unprotected_page) {
551 memzero_explicit((void *)ALIGN_DOWN(meta->unprotected_page, PAGE_SIZE), PAGE_SIZE);
552 kfence_protect(meta->unprotected_page);
553 meta->unprotected_page = 0;
554 }
555
556 /* Mark the object as freed. */
557 metadata_update_state(meta, KFENCE_OBJECT_FREED, NULL, 0);
558 init = slab_want_init_on_free(meta->cache);
559 alloc_stack_hash = meta->alloc_stack_hash;
560 raw_spin_unlock_irqrestore(&meta->lock, flags);
561
562 alloc_covered_add(alloc_stack_hash, -1);
563
564 /* Check canary bytes for memory corruption. */
565 check_canary(meta);
566
567 /*
568 * Clear memory if init-on-free is set. While we protect the page, the
569 * data is still there, and after a use-after-free is detected, we
570 * unprotect the page, so the data is still accessible.
571 */
572 if (!zombie && unlikely(init))
573 memzero_explicit(addr, meta->size);
574
575 /* Protect to detect use-after-frees. */
576 kfence_protect((unsigned long)addr);
577
578 kcsan_end_scoped_access(&assert_page_exclusive);
579 if (!zombie) {
580 /* Add it to the tail of the freelist for reuse. */
581 raw_spin_lock_irqsave(&kfence_freelist_lock, flags);
582 KFENCE_WARN_ON(!list_empty(&meta->list));
583 list_add_tail(&meta->list, &kfence_freelist);
584 raw_spin_unlock_irqrestore(&kfence_freelist_lock, flags);
585
586 atomic_long_dec(&counters[KFENCE_COUNTER_ALLOCATED]);
587 atomic_long_inc(&counters[KFENCE_COUNTER_FREES]);
588 } else {
589 /* See kfence_shutdown_cache(). */
590 atomic_long_inc(&counters[KFENCE_COUNTER_ZOMBIES]);
591 }
592 }
593
rcu_guarded_free(struct rcu_head * h)594 static void rcu_guarded_free(struct rcu_head *h)
595 {
596 struct kfence_metadata *meta = container_of(h, struct kfence_metadata, rcu_head);
597
598 kfence_guarded_free((void *)meta->addr, meta, false);
599 }
600
601 /*
602 * Initialization of the KFENCE pool after its allocation.
603 * Returns 0 on success; otherwise returns the address up to
604 * which partial initialization succeeded.
605 */
kfence_init_pool(void)606 static unsigned long kfence_init_pool(void)
607 __context_unsafe(/* constructor */)
608 {
609 unsigned long addr, start_pfn;
610 int i, rand;
611
612 if (!arch_kfence_init_pool())
613 return (unsigned long)__kfence_pool;
614
615 addr = (unsigned long)__kfence_pool;
616 start_pfn = PHYS_PFN(virt_to_phys(__kfence_pool));
617
618 /*
619 * Set up object pages: they must have PGTY_slab set to avoid freeing
620 * them as real pages.
621 *
622 * We also want to avoid inserting kfence_free() in the kfree()
623 * fast-path in SLUB, and therefore need to ensure kfree() correctly
624 * enters __slab_free() slow-path.
625 */
626 for (i = 0; i < KFENCE_POOL_SIZE / PAGE_SIZE; i++) {
627 struct page *page;
628
629 if (!i || (i % 2))
630 continue;
631
632 page = pfn_to_page(start_pfn + i);
633 __SetPageSlab(page);
634 #ifdef CONFIG_MEMCG
635 struct slab *slab = page_slab(page);
636 slab->obj_exts = (unsigned long)&kfence_metadata_init[i / 2 - 1].obj_exts |
637 MEMCG_DATA_OBJEXTS;
638 #endif
639 }
640
641 /*
642 * Protect the first 2 pages. The first page is mostly unnecessary, and
643 * merely serves as an extended guard page. However, adding one
644 * additional page in the beginning gives us an even number of pages,
645 * which simplifies the mapping of address to metadata index.
646 */
647 for (i = 0; i < 2; i++) {
648 if (unlikely(!kfence_protect(addr)))
649 return addr;
650
651 addr += PAGE_SIZE;
652 }
653
654 for (i = 0; i < CONFIG_KFENCE_NUM_OBJECTS; i++) {
655 struct kfence_metadata *meta = &kfence_metadata_init[i];
656
657 /* Initialize metadata. */
658 INIT_LIST_HEAD(&meta->list);
659 raw_spin_lock_init(&meta->lock);
660 meta->state = KFENCE_OBJECT_UNUSED;
661 /* Use addr to randomize the freelist. */
662 meta->addr = i;
663
664 /* Protect the right redzone. */
665 if (unlikely(!kfence_protect(addr + 2 * i * PAGE_SIZE + PAGE_SIZE)))
666 goto reset_slab;
667 }
668
669 for (i = CONFIG_KFENCE_NUM_OBJECTS; i > 0; i--) {
670 rand = get_random_u32_below(i);
671 swap(kfence_metadata_init[i - 1].addr, kfence_metadata_init[rand].addr);
672 }
673
674 for (i = 0; i < CONFIG_KFENCE_NUM_OBJECTS; i++) {
675 struct kfence_metadata *meta_1 = &kfence_metadata_init[i];
676 struct kfence_metadata *meta_2 = &kfence_metadata_init[meta_1->addr];
677
678 list_add_tail(&meta_2->list, &kfence_freelist);
679 }
680 for (i = 0; i < CONFIG_KFENCE_NUM_OBJECTS; i++) {
681 kfence_metadata_init[i].addr = addr;
682 addr += 2 * PAGE_SIZE;
683 }
684
685 /*
686 * Make kfence_metadata visible only when initialization is successful.
687 * Otherwise, if the initialization fails and kfence_metadata is freed,
688 * it may cause UAF in kfence_shutdown_cache().
689 */
690 smp_store_release(&kfence_metadata, kfence_metadata_init);
691 return 0;
692
693 reset_slab:
694 addr += 2 * i * PAGE_SIZE;
695 for (i = 0; i < KFENCE_POOL_SIZE / PAGE_SIZE; i++) {
696 struct page *page;
697
698 if (!i || (i % 2))
699 continue;
700
701 page = pfn_to_page(start_pfn + i);
702 #ifdef CONFIG_MEMCG
703 struct slab *slab = page_slab(page);
704 slab->obj_exts = 0;
705 #endif
706 __ClearPageSlab(page);
707 }
708
709 return addr;
710 }
711
kfence_init_pool_early(void)712 static bool __init kfence_init_pool_early(void)
713 {
714 unsigned long addr;
715
716 if (!__kfence_pool)
717 return false;
718
719 addr = kfence_init_pool();
720
721 if (!addr) {
722 /*
723 * The pool is live and will never be deallocated from this point on.
724 * Ignore the pool object from the kmemleak phys object tree, as it would
725 * otherwise overlap with allocations returned by kfence_alloc(), which
726 * are registered with kmemleak through the slab post-alloc hook.
727 */
728 kmemleak_ignore_phys(__pa(__kfence_pool));
729 return true;
730 }
731
732 /*
733 * Only release unprotected pages, and do not try to go back and change
734 * page attributes due to risk of failing to do so as well. If changing
735 * page attributes for some pages fails, it is very likely that it also
736 * fails for the first page, and therefore expect addr==__kfence_pool in
737 * most failure cases.
738 */
739 memblock_free((void *)addr, KFENCE_POOL_SIZE - (addr - (unsigned long)__kfence_pool));
740 __kfence_pool = NULL;
741
742 memblock_free(kfence_metadata_init, KFENCE_METADATA_SIZE);
743 kfence_metadata_init = NULL;
744
745 return false;
746 }
747
748 /* === DebugFS Interface ==================================================== */
749
stats_show(struct seq_file * seq,void * v)750 static int stats_show(struct seq_file *seq, void *v)
751 {
752 int i;
753
754 seq_printf(seq, "enabled: %i\n", READ_ONCE(kfence_enabled));
755 for (i = 0; i < KFENCE_COUNTER_COUNT; i++)
756 seq_printf(seq, "%s: %ld\n", counter_names[i], atomic_long_read(&counters[i]));
757
758 return 0;
759 }
760 DEFINE_SHOW_ATTRIBUTE(stats);
761
762 /*
763 * debugfs seq_file operations for /sys/kernel/debug/kfence/objects.
764 * start_object() and next_object() return the object index + 1, because NULL is used
765 * to stop iteration.
766 */
start_object(struct seq_file * seq,loff_t * pos)767 static void *start_object(struct seq_file *seq, loff_t *pos)
768 {
769 if (*pos < CONFIG_KFENCE_NUM_OBJECTS)
770 return (void *)((long)*pos + 1);
771 return NULL;
772 }
773
stop_object(struct seq_file * seq,void * v)774 static void stop_object(struct seq_file *seq, void *v)
775 {
776 }
777
next_object(struct seq_file * seq,void * v,loff_t * pos)778 static void *next_object(struct seq_file *seq, void *v, loff_t *pos)
779 {
780 ++*pos;
781 if (*pos < CONFIG_KFENCE_NUM_OBJECTS)
782 return (void *)((long)*pos + 1);
783 return NULL;
784 }
785
show_object(struct seq_file * seq,void * v)786 static int show_object(struct seq_file *seq, void *v)
787 {
788 struct kfence_metadata *meta = &kfence_metadata[(long)v - 1];
789 unsigned long flags;
790
791 raw_spin_lock_irqsave(&meta->lock, flags);
792 kfence_print_object(seq, meta);
793 raw_spin_unlock_irqrestore(&meta->lock, flags);
794 seq_puts(seq, "---------------------------------\n");
795
796 return 0;
797 }
798
799 static const struct seq_operations objects_sops = {
800 .start = start_object,
801 .next = next_object,
802 .stop = stop_object,
803 .show = show_object,
804 };
805 DEFINE_SEQ_ATTRIBUTE(objects);
806
kfence_debugfs_init(void)807 static int kfence_debugfs_init(void)
808 {
809 struct dentry *kfence_dir;
810
811 if (!READ_ONCE(kfence_enabled))
812 return 0;
813
814 kfence_dir = debugfs_create_dir("kfence", NULL);
815 debugfs_create_file("stats", 0444, kfence_dir, NULL, &stats_fops);
816 debugfs_create_file("objects", 0400, kfence_dir, NULL, &objects_fops);
817 return 0;
818 }
819
820 late_initcall(kfence_debugfs_init);
821
822 /* === Panic Notifier ====================================================== */
823
kfence_check_all_canary(void)824 static void kfence_check_all_canary(void)
825 {
826 int i;
827
828 for (i = 0; i < CONFIG_KFENCE_NUM_OBJECTS; i++) {
829 struct kfence_metadata *meta = &kfence_metadata[i];
830
831 if (kfence_obj_allocated(meta))
832 check_canary(meta);
833 }
834 }
835
kfence_check_canary_callback(struct notifier_block * nb,unsigned long reason,void * arg)836 static int kfence_check_canary_callback(struct notifier_block *nb,
837 unsigned long reason, void *arg)
838 {
839 if (READ_ONCE(kfence_enabled))
840 kfence_check_all_canary();
841 return NOTIFY_OK;
842 }
843
844 static struct notifier_block kfence_check_canary_notifier = {
845 .notifier_call = kfence_check_canary_callback,
846 };
847
848 /* === Allocation Gate Timer ================================================ */
849
850 static struct delayed_work kfence_timer;
851
852 #ifdef CONFIG_KFENCE_STATIC_KEYS
853 /* Wait queue to wake up allocation-gate timer task. */
854 static DECLARE_WAIT_QUEUE_HEAD(allocation_wait);
855
kfence_reboot_callback(struct notifier_block * nb,unsigned long action,void * data)856 static int kfence_reboot_callback(struct notifier_block *nb,
857 unsigned long action, void *data)
858 {
859 /*
860 * Disable kfence to avoid static keys IPI synchronization during
861 * late shutdown/kexec
862 */
863 WRITE_ONCE(kfence_enabled, false);
864 /* Cancel any pending timer work */
865 cancel_delayed_work(&kfence_timer);
866 /*
867 * Wake up any blocked toggle_allocation_gate() so it can complete
868 * early while the system is still able to handle IPIs.
869 */
870 wake_up(&allocation_wait);
871
872 return NOTIFY_OK;
873 }
874
875 static struct notifier_block kfence_reboot_notifier = {
876 .notifier_call = kfence_reboot_callback,
877 .priority = INT_MAX, /* Run early to stop timers ASAP */
878 };
879
wake_up_kfence_timer(struct irq_work * work)880 static void wake_up_kfence_timer(struct irq_work *work)
881 {
882 wake_up(&allocation_wait);
883 }
884 static DEFINE_IRQ_WORK(wake_up_kfence_timer_work, wake_up_kfence_timer);
885 #endif
886
887 /*
888 * Set up delayed work, which will enable and disable the static key. We need to
889 * use a work queue (rather than a simple timer), since enabling and disabling a
890 * static key cannot be done from an interrupt.
891 *
892 * Note: Toggling a static branch currently causes IPIs, and here we'll end up
893 * with a total of 2 IPIs to all CPUs. If this ends up a problem in future (with
894 * more aggressive sampling intervals), we could get away with a variant that
895 * avoids IPIs, at the cost of not immediately capturing allocations if the
896 * instructions remain cached.
897 */
toggle_allocation_gate(struct work_struct * work)898 static void toggle_allocation_gate(struct work_struct *work)
899 {
900 if (!READ_ONCE(kfence_enabled))
901 return;
902
903 atomic_set(&kfence_allocation_gate, -kfence_burst);
904 #ifdef CONFIG_KFENCE_STATIC_KEYS
905 /* Enable static key, and await allocation to happen. */
906 static_branch_enable(&kfence_allocation_key);
907
908 wait_event_idle(allocation_wait,
909 atomic_read(&kfence_allocation_gate) > 0 ||
910 !READ_ONCE(kfence_enabled));
911
912 /* Disable static key and reset timer. */
913 static_branch_disable(&kfence_allocation_key);
914 #endif
915 queue_delayed_work(system_dfl_wq, &kfence_timer,
916 msecs_to_jiffies(kfence_sample_interval));
917 }
918
919 /* === Public interface ===================================================== */
920
kfence_alloc_pool_and_metadata(void)921 void __init kfence_alloc_pool_and_metadata(void)
922 {
923 if (!kfence_sample_interval)
924 return;
925
926 /*
927 * If KASAN hardware tags are enabled, disable KFENCE, because it
928 * does not support MTE yet.
929 */
930 if (kasan_hw_tags_enabled()) {
931 pr_info("disabled as KASAN HW tags are enabled\n");
932 if (__kfence_pool) {
933 memblock_free(__kfence_pool, KFENCE_POOL_SIZE);
934 __kfence_pool = NULL;
935 }
936 kfence_sample_interval = 0;
937 return;
938 }
939
940 /*
941 * If the pool has already been initialized by arch, there is no need to
942 * re-allocate the memory pool.
943 */
944 if (!__kfence_pool)
945 __kfence_pool = memblock_alloc(KFENCE_POOL_SIZE, PAGE_SIZE);
946
947 if (!__kfence_pool) {
948 pr_err("failed to allocate pool\n");
949 return;
950 }
951
952 /* The memory allocated by memblock has been zeroed out. */
953 kfence_metadata_init = memblock_alloc(KFENCE_METADATA_SIZE, PAGE_SIZE);
954 if (!kfence_metadata_init) {
955 pr_err("failed to allocate metadata\n");
956 memblock_free(__kfence_pool, KFENCE_POOL_SIZE);
957 __kfence_pool = NULL;
958 }
959 }
960
kfence_init_enable(void)961 static void kfence_init_enable(void)
962 {
963 if (!IS_ENABLED(CONFIG_KFENCE_STATIC_KEYS))
964 static_branch_enable(&kfence_allocation_key);
965
966 if (kfence_deferrable)
967 INIT_DEFERRABLE_WORK(&kfence_timer, toggle_allocation_gate);
968 else
969 INIT_DELAYED_WORK(&kfence_timer, toggle_allocation_gate);
970
971 if (kfence_check_on_panic)
972 atomic_notifier_chain_register(&panic_notifier_list, &kfence_check_canary_notifier);
973
974 #ifdef CONFIG_KFENCE_STATIC_KEYS
975 register_reboot_notifier(&kfence_reboot_notifier);
976 #endif
977
978 WRITE_ONCE(kfence_enabled, true);
979 queue_delayed_work(system_dfl_wq, &kfence_timer, 0);
980
981 pr_info("initialized - using %lu bytes for %d objects at 0x%p-0x%p\n", KFENCE_POOL_SIZE,
982 CONFIG_KFENCE_NUM_OBJECTS, (void *)__kfence_pool,
983 (void *)(__kfence_pool + KFENCE_POOL_SIZE));
984 }
985
kfence_init(void)986 void __init kfence_init(void)
987 {
988 stack_hash_seed = get_random_u32();
989
990 /* Setting kfence_sample_interval to 0 on boot disables KFENCE. */
991 if (!kfence_sample_interval)
992 return;
993
994 if (!kfence_init_pool_early()) {
995 pr_err("%s failed\n", __func__);
996 return;
997 }
998
999 kfence_init_enable();
1000 }
1001
kfence_init_late(void)1002 static int kfence_init_late(void)
1003 {
1004 const unsigned long nr_pages_pool = KFENCE_POOL_SIZE / PAGE_SIZE;
1005 const unsigned long nr_pages_meta = KFENCE_METADATA_SIZE / PAGE_SIZE;
1006 unsigned long addr = (unsigned long)__kfence_pool;
1007 unsigned long free_size = KFENCE_POOL_SIZE;
1008 int err = -ENOMEM;
1009
1010 #ifdef CONFIG_CONTIG_ALLOC
1011 struct page *pages;
1012
1013 pages = alloc_contig_pages(nr_pages_pool, GFP_KERNEL | __GFP_SKIP_KASAN,
1014 first_online_node, NULL);
1015 if (!pages)
1016 return -ENOMEM;
1017
1018 __kfence_pool = page_to_virt(pages);
1019 pages = alloc_contig_pages(nr_pages_meta, GFP_KERNEL | __GFP_SKIP_KASAN,
1020 first_online_node, NULL);
1021 if (pages)
1022 kfence_metadata_init = page_to_virt(pages);
1023 #else
1024 if (nr_pages_pool > MAX_ORDER_NR_PAGES ||
1025 nr_pages_meta > MAX_ORDER_NR_PAGES) {
1026 pr_warn("KFENCE_NUM_OBJECTS too large for buddy allocator\n");
1027 return -EINVAL;
1028 }
1029
1030 __kfence_pool = alloc_pages_exact(KFENCE_POOL_SIZE,
1031 GFP_KERNEL | __GFP_SKIP_KASAN);
1032 if (!__kfence_pool)
1033 return -ENOMEM;
1034
1035 kfence_metadata_init = alloc_pages_exact(KFENCE_METADATA_SIZE,
1036 GFP_KERNEL | __GFP_SKIP_KASAN);
1037 #endif
1038
1039 if (!kfence_metadata_init)
1040 goto free_pool;
1041
1042 memzero_explicit(kfence_metadata_init, KFENCE_METADATA_SIZE);
1043 addr = kfence_init_pool();
1044 if (!addr) {
1045 kfence_init_enable();
1046 kfence_debugfs_init();
1047 return 0;
1048 }
1049
1050 pr_err("%s failed\n", __func__);
1051 free_size = KFENCE_POOL_SIZE - (addr - (unsigned long)__kfence_pool);
1052 err = -EBUSY;
1053
1054 #ifdef CONFIG_CONTIG_ALLOC
1055 free_contig_range(page_to_pfn(virt_to_page((void *)kfence_metadata_init)),
1056 nr_pages_meta);
1057 free_pool:
1058 free_contig_range(page_to_pfn(virt_to_page((void *)addr)),
1059 free_size / PAGE_SIZE);
1060 #else
1061 free_pages_exact((void *)kfence_metadata_init, KFENCE_METADATA_SIZE);
1062 free_pool:
1063 free_pages_exact((void *)addr, free_size);
1064 #endif
1065
1066 kfence_metadata_init = NULL;
1067 __kfence_pool = NULL;
1068 return err;
1069 }
1070
kfence_enable_late(void)1071 static int kfence_enable_late(void)
1072 {
1073 if (!__kfence_pool)
1074 return kfence_init_late();
1075
1076 WRITE_ONCE(kfence_enabled, true);
1077 queue_delayed_work(system_dfl_wq, &kfence_timer, 0);
1078 pr_info("re-enabled\n");
1079 return 0;
1080 }
1081
kfence_shutdown_cache(struct kmem_cache * s)1082 void kfence_shutdown_cache(struct kmem_cache *s)
1083 {
1084 unsigned long flags;
1085 struct kfence_metadata *meta;
1086 int i;
1087
1088 /* Pairs with release in kfence_init_pool(). */
1089 if (!smp_load_acquire(&kfence_metadata))
1090 return;
1091
1092 for (i = 0; i < CONFIG_KFENCE_NUM_OBJECTS; i++) {
1093 bool in_use;
1094
1095 meta = &kfence_metadata[i];
1096
1097 /*
1098 * If we observe some inconsistent cache and state pair where we
1099 * should have returned false here, cache destruction is racing
1100 * with either kmem_cache_alloc() or kmem_cache_free(). Taking
1101 * the lock will not help, as different critical section
1102 * serialization will have the same outcome.
1103 */
1104 if (READ_ONCE(meta->cache) != s || !kfence_obj_allocated(meta))
1105 continue;
1106
1107 raw_spin_lock_irqsave(&meta->lock, flags);
1108 in_use = meta->cache == s && kfence_obj_allocated(meta);
1109 raw_spin_unlock_irqrestore(&meta->lock, flags);
1110
1111 if (in_use) {
1112 /*
1113 * This cache still has allocations, and we should not
1114 * release them back into the freelist so they can still
1115 * safely be used and retain the kernel's default
1116 * behaviour of keeping the allocations alive (leak the
1117 * cache); however, they effectively become "zombie
1118 * allocations" as the KFENCE objects are the only ones
1119 * still in use and the owning cache is being destroyed.
1120 *
1121 * We mark them freed, so that any subsequent use shows
1122 * more useful error messages that will include stack
1123 * traces of the user of the object, the original
1124 * allocation, and caller to shutdown_cache().
1125 */
1126 kfence_guarded_free((void *)meta->addr, meta, /*zombie=*/true);
1127 }
1128 }
1129
1130 for (i = 0; i < CONFIG_KFENCE_NUM_OBJECTS; i++) {
1131 meta = &kfence_metadata[i];
1132
1133 /* See above. */
1134 if (READ_ONCE(meta->cache) != s || READ_ONCE(meta->state) != KFENCE_OBJECT_FREED)
1135 continue;
1136
1137 raw_spin_lock_irqsave(&meta->lock, flags);
1138 if (meta->cache == s && meta->state == KFENCE_OBJECT_FREED)
1139 meta->cache = NULL;
1140 raw_spin_unlock_irqrestore(&meta->lock, flags);
1141 }
1142 }
1143
__kfence_alloc(struct kmem_cache * s,size_t size,gfp_t flags)1144 void *__kfence_alloc(struct kmem_cache *s, size_t size, gfp_t flags)
1145 {
1146 unsigned long stack_entries[KFENCE_STACK_DEPTH];
1147 size_t num_stack_entries;
1148 u32 alloc_stack_hash;
1149 int allocation_gate;
1150
1151 /*
1152 * Perform size check before switching kfence_allocation_gate, so that
1153 * we don't disable KFENCE without making an allocation.
1154 */
1155 if (size > PAGE_SIZE) {
1156 atomic_long_inc(&counters[KFENCE_COUNTER_SKIP_INCOMPAT]);
1157 return NULL;
1158 }
1159
1160 /*
1161 * Skip allocations from non-default zones, including DMA. We cannot
1162 * guarantee that pages in the KFENCE pool will have the requested
1163 * properties (e.g. reside in DMAable memory).
1164 */
1165 if ((flags & GFP_ZONEMASK) ||
1166 ((flags & __GFP_THISNODE) && num_online_nodes() > 1) ||
1167 (s->flags & (SLAB_CACHE_DMA | SLAB_CACHE_DMA32))) {
1168 atomic_long_inc(&counters[KFENCE_COUNTER_SKIP_INCOMPAT]);
1169 return NULL;
1170 }
1171
1172 /*
1173 * Skip allocations for this slab, if KFENCE has been disabled for
1174 * this slab.
1175 */
1176 if (s->flags & SLAB_SKIP_KFENCE)
1177 return NULL;
1178
1179 allocation_gate = atomic_inc_return(&kfence_allocation_gate);
1180 if (allocation_gate > 1)
1181 return NULL;
1182 #ifdef CONFIG_KFENCE_STATIC_KEYS
1183 /*
1184 * waitqueue_active() is fully ordered after the update of
1185 * kfence_allocation_gate per atomic_inc_return().
1186 */
1187 if (allocation_gate == 1 && waitqueue_active(&allocation_wait)) {
1188 /*
1189 * Calling wake_up() here may deadlock when allocations happen
1190 * from within timer code. Use an irq_work to defer it.
1191 */
1192 irq_work_queue(&wake_up_kfence_timer_work);
1193 }
1194 #endif
1195
1196 if (!READ_ONCE(kfence_enabled))
1197 return NULL;
1198
1199 num_stack_entries = stack_trace_save(stack_entries, KFENCE_STACK_DEPTH, 0);
1200
1201 /*
1202 * Do expensive check for coverage of allocation in slow-path after
1203 * allocation_gate has already become non-zero, even though it might
1204 * mean not making any allocation within a given sample interval.
1205 *
1206 * This ensures reasonable allocation coverage when the pool is almost
1207 * full, including avoiding long-lived allocations of the same source
1208 * filling up the pool (e.g. pagecache allocations).
1209 */
1210 alloc_stack_hash = get_alloc_stack_hash(stack_entries, num_stack_entries);
1211 if (should_skip_covered() && alloc_covered_contains(alloc_stack_hash)) {
1212 atomic_long_inc(&counters[KFENCE_COUNTER_SKIP_COVERED]);
1213 return NULL;
1214 }
1215
1216 return kfence_guarded_alloc(s, size, flags, stack_entries, num_stack_entries,
1217 alloc_stack_hash);
1218 }
1219
kfence_ksize(const void * addr)1220 size_t kfence_ksize(const void *addr)
1221 {
1222 const struct kfence_metadata *meta = addr_to_metadata((unsigned long)addr);
1223
1224 /*
1225 * Read locklessly -- if there is a race with __kfence_alloc(), this is
1226 * either a use-after-free or invalid access.
1227 */
1228 return meta ? meta->size : 0;
1229 }
1230
kfence_object_start(const void * addr)1231 void *kfence_object_start(const void *addr)
1232 {
1233 const struct kfence_metadata *meta = addr_to_metadata((unsigned long)addr);
1234
1235 /*
1236 * Read locklessly -- if there is a race with __kfence_alloc(), this is
1237 * either a use-after-free or invalid access.
1238 */
1239 return meta ? (void *)meta->addr : NULL;
1240 }
1241
__kfence_free(void * addr)1242 void __kfence_free(void *addr)
1243 {
1244 struct kfence_metadata *meta = addr_to_metadata((unsigned long)addr);
1245
1246 #ifdef CONFIG_MEMCG
1247 KFENCE_WARN_ON(meta->obj_exts.objcg);
1248 #endif
1249 /*
1250 * If the objects of the cache are SLAB_TYPESAFE_BY_RCU, defer freeing
1251 * the object, as the object page may be recycled for other-typed
1252 * objects once it has been freed. meta->cache may be NULL if the cache
1253 * was destroyed.
1254 * Save the stack trace here so that reports show where the user freed
1255 * the object.
1256 */
1257 if (unlikely(meta->cache && (meta->cache->flags & SLAB_TYPESAFE_BY_RCU))) {
1258 unsigned long flags;
1259
1260 raw_spin_lock_irqsave(&meta->lock, flags);
1261 metadata_update_state(meta, KFENCE_OBJECT_RCU_FREEING, NULL, 0);
1262 raw_spin_unlock_irqrestore(&meta->lock, flags);
1263 call_rcu(&meta->rcu_head, rcu_guarded_free);
1264 } else {
1265 kfence_guarded_free(addr, meta, false);
1266 }
1267 }
1268
kfence_handle_page_fault(unsigned long addr,bool is_write,struct pt_regs * regs)1269 bool kfence_handle_page_fault(unsigned long addr, bool is_write, struct pt_regs *regs)
1270 {
1271 const int page_index = (addr - (unsigned long)__kfence_pool) / PAGE_SIZE;
1272 struct kfence_metadata *to_report = NULL;
1273 unsigned long unprotected_page = 0;
1274 enum kfence_error_type error_type;
1275 enum kfence_fault fault;
1276 unsigned long flags;
1277
1278 if (!is_kfence_address((void *)addr))
1279 return false;
1280
1281 if (!READ_ONCE(kfence_enabled)) /* If disabled at runtime ... */
1282 return kfence_unprotect(addr); /* ... unprotect and proceed. */
1283
1284 atomic_long_inc(&counters[KFENCE_COUNTER_BUGS]);
1285
1286 if (page_index % 2) {
1287 /* This is a redzone, report a buffer overflow. */
1288 struct kfence_metadata *meta;
1289 int distance = 0;
1290
1291 meta = addr_to_metadata(addr - PAGE_SIZE);
1292 if (meta && kfence_obj_allocated(meta)) {
1293 to_report = meta;
1294 /* Data race ok; distance calculation approximate. */
1295 distance = addr - data_race(meta->addr + meta->size);
1296 }
1297
1298 meta = addr_to_metadata(addr + PAGE_SIZE);
1299 if (meta && kfence_obj_allocated(meta)) {
1300 /* Data race ok; distance calculation approximate. */
1301 if (!to_report || distance > data_race(meta->addr) - addr)
1302 to_report = meta;
1303 }
1304
1305 if (!to_report)
1306 goto out;
1307
1308 error_type = KFENCE_ERROR_OOB;
1309 unprotected_page = addr;
1310
1311 /*
1312 * If the object was freed before we took the look we can still
1313 * report this as an OOB -- the report will simply show the
1314 * stacktrace of the free as well.
1315 */
1316 } else {
1317 to_report = addr_to_metadata(addr);
1318 if (!to_report)
1319 goto out;
1320
1321 error_type = KFENCE_ERROR_UAF;
1322 /*
1323 * We may race with __kfence_alloc(), and it is possible that a
1324 * freed object may be reallocated. We simply report this as a
1325 * use-after-free, with the stack trace showing the place where
1326 * the object was re-allocated.
1327 */
1328 }
1329
1330 out:
1331 if (to_report) {
1332 raw_spin_lock_irqsave(&to_report->lock, flags);
1333 to_report->unprotected_page = unprotected_page;
1334 fault = kfence_report_error(addr, is_write, regs, to_report, error_type);
1335 raw_spin_unlock_irqrestore(&to_report->lock, flags);
1336 } else {
1337 /* This may be a UAF or OOB access, but we can't be sure. */
1338 fault = kfence_report_error(addr, is_write, regs, NULL, KFENCE_ERROR_INVALID);
1339 }
1340
1341 kfence_handle_fault(fault);
1342
1343 return kfence_unprotect(addr); /* Unprotect and let access proceed. */
1344 }
1345