xref: /linux/mm/kasan/common.c (revision 0b34fd0feac6202602591dc15c58e25ffde41bd5)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * This file contains common KASAN code.
4  *
5  * Copyright (c) 2014 Samsung Electronics Co., Ltd.
6  * Author: Andrey Ryabinin <ryabinin.a.a@gmail.com>
7  *
8  * Some code borrowed from https://github.com/xairy/kasan-prototype by
9  *        Andrey Konovalov <andreyknvl@gmail.com>
10  */
11 
12 #include <linux/export.h>
13 #include <linux/init.h>
14 #include <linux/kasan.h>
15 #include <linux/kernel.h>
16 #include <linux/linkage.h>
17 #include <linux/memblock.h>
18 #include <linux/memory.h>
19 #include <linux/mm.h>
20 #include <linux/module.h>
21 #include <linux/printk.h>
22 #include <linux/sched.h>
23 #include <linux/sched/clock.h>
24 #include <linux/sched/task_stack.h>
25 #include <linux/slab.h>
26 #include <linux/stackdepot.h>
27 #include <linux/stacktrace.h>
28 #include <linux/string.h>
29 #include <linux/types.h>
30 #include <linux/bug.h>
31 #include <linux/vmalloc.h>
32 
33 #include "kasan.h"
34 #include "../slab.h"
35 
36 #if defined(CONFIG_ARCH_DEFER_KASAN) || defined(CONFIG_KASAN_HW_TAGS)
37 /*
38  * Definition of the unified static key declared in kasan-enabled.h.
39  * This provides consistent runtime enable/disable across KASAN modes.
40  */
41 DEFINE_STATIC_KEY_FALSE(kasan_flag_enabled);
42 EXPORT_SYMBOL_GPL(kasan_flag_enabled);
43 #endif
44 
kasan_addr_to_slab(const void * addr)45 struct slab *kasan_addr_to_slab(const void *addr)
46 {
47 	if (virt_addr_valid(addr))
48 		return virt_to_slab(addr);
49 	return NULL;
50 }
51 
kasan_save_stack(gfp_t flags,depot_flags_t depot_flags)52 depot_stack_handle_t kasan_save_stack(gfp_t flags, depot_flags_t depot_flags)
53 {
54 	unsigned long entries[KASAN_STACK_DEPTH];
55 	unsigned int nr_entries;
56 
57 	nr_entries = stack_trace_save(entries, ARRAY_SIZE(entries), 0);
58 	return stack_depot_save_flags(entries, nr_entries, flags, depot_flags);
59 }
60 
kasan_set_track(struct kasan_track * track,depot_stack_handle_t stack)61 void kasan_set_track(struct kasan_track *track, depot_stack_handle_t stack)
62 {
63 #ifdef CONFIG_KASAN_EXTRA_INFO
64 	u32 cpu = raw_smp_processor_id();
65 	u64 ts_nsec = local_clock();
66 
67 	track->cpu = cpu;
68 	track->timestamp = ts_nsec >> 9;
69 #endif /* CONFIG_KASAN_EXTRA_INFO */
70 	track->pid = current->pid;
71 	track->stack = stack;
72 }
73 
kasan_save_track(struct kasan_track * track,gfp_t flags)74 void kasan_save_track(struct kasan_track *track, gfp_t flags)
75 {
76 	depot_stack_handle_t stack;
77 
78 	stack = kasan_save_stack(flags, STACK_DEPOT_FLAG_CAN_ALLOC);
79 	kasan_set_track(track, stack);
80 }
81 
82 #if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
kasan_enable_current(void)83 void kasan_enable_current(void)
84 {
85 	current->kasan_depth++;
86 }
87 EXPORT_SYMBOL(kasan_enable_current);
88 
kasan_disable_current(void)89 void kasan_disable_current(void)
90 {
91 	current->kasan_depth--;
92 }
93 EXPORT_SYMBOL(kasan_disable_current);
94 
95 #endif /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */
96 
__kasan_unpoison_range(const void * address,size_t size)97 void __kasan_unpoison_range(const void *address, size_t size)
98 {
99 	if (is_kfence_address(address))
100 		return;
101 
102 	kasan_unpoison(address, size, false);
103 }
104 
105 #ifdef CONFIG_KASAN_STACK
106 /* Unpoison the entire stack for a task. */
kasan_unpoison_task_stack(struct task_struct * task)107 void kasan_unpoison_task_stack(struct task_struct *task)
108 {
109 	void *base = task_stack_page(task);
110 
111 	kasan_unpoison(base, THREAD_SIZE, false);
112 }
113 
114 /* Unpoison the stack for the current task beyond a watermark sp value. */
kasan_unpoison_task_stack_below(const void * watermark)115 asmlinkage void kasan_unpoison_task_stack_below(const void *watermark)
116 {
117 	/*
118 	 * Calculate the task stack base address.  Avoid using 'current'
119 	 * because this function is called by early resume code which hasn't
120 	 * yet set up the percpu register (%gs).
121 	 */
122 	void *base = (void *)((unsigned long)watermark & ~(THREAD_SIZE - 1));
123 
124 	kasan_unpoison(base, watermark - base, false);
125 }
126 #endif /* CONFIG_KASAN_STACK */
127 
__kasan_unpoison_pages(struct page * page,unsigned int order,bool init)128 bool __kasan_unpoison_pages(struct page *page, unsigned int order, bool init)
129 {
130 	u8 tag;
131 	unsigned long i;
132 
133 	if (unlikely(PageHighMem(page)))
134 		return false;
135 
136 	if (!kasan_sample_page_alloc(order))
137 		return false;
138 
139 	tag = kasan_random_tag();
140 	kasan_unpoison(set_tag(page_address(page), tag),
141 		       PAGE_SIZE << order, init);
142 	for (i = 0; i < (1 << order); i++)
143 		page_kasan_tag_set(page + i, tag);
144 
145 	return true;
146 }
147 
__kasan_poison_pages(struct page * page,unsigned int order,bool init)148 void __kasan_poison_pages(struct page *page, unsigned int order, bool init)
149 {
150 	if (likely(!PageHighMem(page)))
151 		kasan_poison(page_address(page), PAGE_SIZE << order,
152 			     KASAN_PAGE_FREE, init);
153 }
154 
__kasan_poison_slab(struct slab * slab)155 void __kasan_poison_slab(struct slab *slab)
156 {
157 	struct page *page = slab_page(slab);
158 	unsigned long i;
159 
160 	for (i = 0; i < compound_nr(page); i++)
161 		page_kasan_tag_reset(page + i);
162 	kasan_poison(page_address(page), page_size(page),
163 		     KASAN_SLAB_REDZONE, false);
164 }
165 
__kasan_unpoison_new_object(struct kmem_cache * cache,void * object)166 void __kasan_unpoison_new_object(struct kmem_cache *cache, void *object)
167 {
168 	kasan_unpoison(object, cache->object_size, false);
169 }
170 
__kasan_poison_new_object(struct kmem_cache * cache,void * object)171 void __kasan_poison_new_object(struct kmem_cache *cache, void *object)
172 {
173 	kasan_poison(object, round_up(cache->object_size, KASAN_GRANULE_SIZE),
174 			KASAN_SLAB_REDZONE, false);
175 }
176 
177 /*
178  * This function assigns a tag to an object considering the following:
179  * 1. A cache might have a constructor, which might save a pointer to a slab
180  *    object somewhere (e.g. in the object itself). We preassign a tag for
181  *    each object in caches with constructors during slab creation and reuse
182  *    the same tag each time a particular object is allocated.
183  * 2. A cache might be SLAB_TYPESAFE_BY_RCU, which means objects can be
184  *    accessed after being freed. We preassign tags for objects in these
185  *    caches as well.
186  */
assign_tag(struct kmem_cache * cache,const void * object,bool init)187 static inline u8 assign_tag(struct kmem_cache *cache,
188 					const void *object, bool init)
189 {
190 	if (IS_ENABLED(CONFIG_KASAN_GENERIC))
191 		return 0xff;
192 
193 	/*
194 	 * If the cache neither has a constructor nor has SLAB_TYPESAFE_BY_RCU
195 	 * set, assign a tag when the object is being allocated (init == false).
196 	 */
197 	if (!cache->ctor && !(cache->flags & SLAB_TYPESAFE_BY_RCU))
198 		return init ? KASAN_TAG_KERNEL : kasan_random_tag();
199 
200 	/*
201 	 * For caches that either have a constructor or SLAB_TYPESAFE_BY_RCU,
202 	 * assign a random tag during slab creation, otherwise reuse
203 	 * the already assigned tag.
204 	 */
205 	return init ? kasan_random_tag() : get_tag(object);
206 }
207 
__kasan_init_slab_obj(struct kmem_cache * cache,const void * object)208 void * __must_check __kasan_init_slab_obj(struct kmem_cache *cache,
209 						const void *object)
210 {
211 	/* Initialize per-object metadata if it is present. */
212 	if (kasan_requires_meta())
213 		kasan_init_object_meta(cache, object);
214 
215 	/* Tag is ignored in set_tag() without CONFIG_KASAN_SW/HW_TAGS */
216 	object = set_tag(object, assign_tag(cache, object, true));
217 
218 	return (void *)object;
219 }
220 
221 /* Returns true when freeing the object is not safe. */
check_slab_allocation(struct kmem_cache * cache,void * object,unsigned long ip)222 static bool check_slab_allocation(struct kmem_cache *cache, void *object,
223 				  unsigned long ip)
224 {
225 	void *tagged_object = object;
226 
227 	object = kasan_reset_tag(object);
228 
229 	if (unlikely(nearest_obj(cache, virt_to_slab(object), object) != object)) {
230 		kasan_report_invalid_free(tagged_object, ip, KASAN_REPORT_INVALID_FREE);
231 		return true;
232 	}
233 
234 	if (!kasan_byte_accessible(tagged_object)) {
235 		kasan_report_invalid_free(tagged_object, ip, KASAN_REPORT_DOUBLE_FREE);
236 		return true;
237 	}
238 
239 	return false;
240 }
241 
poison_slab_object(struct kmem_cache * cache,void * object,bool init)242 static inline void poison_slab_object(struct kmem_cache *cache, void *object,
243 				      bool init)
244 {
245 	void *tagged_object = object;
246 
247 	object = kasan_reset_tag(object);
248 
249 	kasan_poison(object, round_up(cache->object_size, KASAN_GRANULE_SIZE),
250 			KASAN_SLAB_FREE, init);
251 
252 	if (kasan_stack_collection_enabled())
253 		kasan_save_free_info(cache, tagged_object);
254 }
255 
__kasan_slab_pre_free(struct kmem_cache * cache,void * object,unsigned long ip)256 bool __kasan_slab_pre_free(struct kmem_cache *cache, void *object,
257 				unsigned long ip)
258 {
259 	if (is_kfence_address(object))
260 		return false;
261 	return check_slab_allocation(cache, object, ip);
262 }
263 
__kasan_slab_free(struct kmem_cache * cache,void * object,bool init,bool still_accessible,bool no_quarantine)264 bool __kasan_slab_free(struct kmem_cache *cache, void *object, bool init,
265 		       bool still_accessible, bool no_quarantine)
266 {
267 	if (is_kfence_address(object))
268 		return false;
269 
270 	/*
271 	 * If this point is reached with an object that must still be
272 	 * accessible under RCU, we can't poison it; in that case, also skip the
273 	 * quarantine. This should mostly only happen when CONFIG_SLUB_RCU_DEBUG
274 	 * has been disabled manually.
275 	 *
276 	 * Putting the object on the quarantine wouldn't help catch UAFs (since
277 	 * we can't poison it here), and it would mask bugs caused by
278 	 * SLAB_TYPESAFE_BY_RCU users not being careful enough about object
279 	 * reuse; so overall, putting the object into the quarantine here would
280 	 * be counterproductive.
281 	 */
282 	if (still_accessible)
283 		return false;
284 
285 	poison_slab_object(cache, object, init);
286 
287 	if (no_quarantine)
288 		return false;
289 
290 	/*
291 	 * If the object is put into quarantine, do not let slab put the object
292 	 * onto the freelist for now. The object's metadata is kept until the
293 	 * object gets evicted from quarantine.
294 	 */
295 	if (kasan_quarantine_put(cache, object))
296 		return true;
297 
298 	/*
299 	 * Note: Keep per-object metadata to allow KASAN print stack traces for
300 	 * use-after-free-before-realloc bugs.
301 	 */
302 
303 	/* Let slab put the object onto the freelist. */
304 	return false;
305 }
306 
check_page_allocation(void * ptr,unsigned long ip)307 static inline bool check_page_allocation(void *ptr, unsigned long ip)
308 {
309 	if (ptr != page_address(virt_to_head_page(ptr))) {
310 		kasan_report_invalid_free(ptr, ip, KASAN_REPORT_INVALID_FREE);
311 		return true;
312 	}
313 
314 	if (!kasan_byte_accessible(ptr)) {
315 		kasan_report_invalid_free(ptr, ip, KASAN_REPORT_DOUBLE_FREE);
316 		return true;
317 	}
318 
319 	return false;
320 }
321 
__kasan_kfree_large(void * ptr,unsigned long ip)322 void __kasan_kfree_large(void *ptr, unsigned long ip)
323 {
324 	check_page_allocation(ptr, ip);
325 
326 	/* The object will be poisoned by kasan_poison_pages(). */
327 }
328 
unpoison_slab_object(struct kmem_cache * cache,void * object,gfp_t flags,bool init)329 static inline void unpoison_slab_object(struct kmem_cache *cache, void *object,
330 					gfp_t flags, bool init)
331 {
332 	/*
333 	 * Unpoison the whole object. For kmalloc() allocations,
334 	 * poison_kmalloc_redzone() will do precise poisoning.
335 	 */
336 	kasan_unpoison(object, cache->object_size, init);
337 
338 	/* Save alloc info (if possible) for non-kmalloc() allocations. */
339 	if (kasan_stack_collection_enabled() && !is_kmalloc_cache(cache))
340 		kasan_save_alloc_info(cache, object, flags);
341 }
342 
__kasan_slab_alloc(struct kmem_cache * cache,void * object,gfp_t flags,bool init)343 void * __must_check __kasan_slab_alloc(struct kmem_cache *cache,
344 					void *object, gfp_t flags, bool init)
345 {
346 	u8 tag;
347 	void *tagged_object;
348 
349 	if (gfpflags_allow_blocking(flags))
350 		kasan_quarantine_reduce();
351 
352 	if (unlikely(object == NULL))
353 		return NULL;
354 
355 	if (is_kfence_address(object))
356 		return (void *)object;
357 
358 	/*
359 	 * Generate and assign random tag for tag-based modes.
360 	 * Tag is ignored in set_tag() for the generic mode.
361 	 */
362 	tag = assign_tag(cache, object, false);
363 	tagged_object = set_tag(object, tag);
364 
365 	/* Unpoison the object and save alloc info for non-kmalloc() allocations. */
366 	unpoison_slab_object(cache, tagged_object, flags, init);
367 
368 	return tagged_object;
369 }
370 
poison_kmalloc_redzone(struct kmem_cache * cache,const void * object,size_t size,gfp_t flags)371 static inline void poison_kmalloc_redzone(struct kmem_cache *cache,
372 				const void *object, size_t size, gfp_t flags)
373 {
374 	unsigned long redzone_start;
375 	unsigned long redzone_end;
376 
377 	/*
378 	 * The redzone has byte-level precision for the generic mode.
379 	 * Partially poison the last object granule to cover the unaligned
380 	 * part of the redzone.
381 	 */
382 	if (IS_ENABLED(CONFIG_KASAN_GENERIC))
383 		kasan_poison_last_granule((void *)object, size);
384 
385 	/* Poison the aligned part of the redzone. */
386 	redzone_start = round_up((unsigned long)(object + size),
387 				KASAN_GRANULE_SIZE);
388 	redzone_end = round_up((unsigned long)(object + cache->object_size),
389 				KASAN_GRANULE_SIZE);
390 	kasan_poison((void *)redzone_start, redzone_end - redzone_start,
391 			   KASAN_SLAB_REDZONE, false);
392 
393 	/*
394 	 * Save alloc info (if possible) for kmalloc() allocations.
395 	 * This also rewrites the alloc info when called from kasan_krealloc().
396 	 */
397 	if (kasan_stack_collection_enabled() && is_kmalloc_cache(cache))
398 		kasan_save_alloc_info(cache, (void *)object, flags);
399 
400 }
401 
__kasan_kmalloc(struct kmem_cache * cache,const void * object,size_t size,gfp_t flags)402 void * __must_check __kasan_kmalloc(struct kmem_cache *cache, const void *object,
403 					size_t size, gfp_t flags)
404 {
405 	if (gfpflags_allow_blocking(flags))
406 		kasan_quarantine_reduce();
407 
408 	if (unlikely(object == NULL))
409 		return NULL;
410 
411 	if (is_kfence_address(object))
412 		return (void *)object;
413 
414 	/* The object has already been unpoisoned by kasan_slab_alloc(). */
415 	poison_kmalloc_redzone(cache, object, size, flags);
416 
417 	/* Keep the tag that was set by kasan_slab_alloc(). */
418 	return (void *)object;
419 }
420 EXPORT_SYMBOL(__kasan_kmalloc);
421 
poison_kmalloc_large_redzone(const void * ptr,size_t size,gfp_t flags)422 static inline void poison_kmalloc_large_redzone(const void *ptr, size_t size,
423 						gfp_t flags)
424 {
425 	unsigned long redzone_start;
426 	unsigned long redzone_end;
427 
428 	/*
429 	 * The redzone has byte-level precision for the generic mode.
430 	 * Partially poison the last object granule to cover the unaligned
431 	 * part of the redzone.
432 	 */
433 	if (IS_ENABLED(CONFIG_KASAN_GENERIC))
434 		kasan_poison_last_granule(ptr, size);
435 
436 	/* Poison the aligned part of the redzone. */
437 	redzone_start = round_up((unsigned long)(ptr + size), KASAN_GRANULE_SIZE);
438 	redzone_end = (unsigned long)ptr + page_size(virt_to_page(ptr));
439 	kasan_poison((void *)redzone_start, redzone_end - redzone_start,
440 		     KASAN_PAGE_REDZONE, false);
441 }
442 
__kasan_kmalloc_large(const void * ptr,size_t size,gfp_t flags)443 void * __must_check __kasan_kmalloc_large(const void *ptr, size_t size,
444 						gfp_t flags)
445 {
446 	if (gfpflags_allow_blocking(flags))
447 		kasan_quarantine_reduce();
448 
449 	if (unlikely(ptr == NULL))
450 		return NULL;
451 
452 	/* The object has already been unpoisoned by kasan_unpoison_pages(). */
453 	poison_kmalloc_large_redzone(ptr, size, flags);
454 
455 	/* Keep the tag that was set by alloc_pages(). */
456 	return (void *)ptr;
457 }
458 
__kasan_krealloc(const void * object,size_t size,gfp_t flags)459 void * __must_check __kasan_krealloc(const void *object, size_t size, gfp_t flags)
460 {
461 	struct slab *slab;
462 
463 	if (gfpflags_allow_blocking(flags))
464 		kasan_quarantine_reduce();
465 
466 	if (unlikely(object == ZERO_SIZE_PTR))
467 		return (void *)object;
468 
469 	if (is_kfence_address(object))
470 		return (void *)object;
471 
472 	/*
473 	 * Unpoison the object's data.
474 	 * Part of it might already have been unpoisoned, but it's unknown
475 	 * how big that part is.
476 	 */
477 	kasan_unpoison(object, size, false);
478 
479 	slab = virt_to_slab(object);
480 
481 	/* Piggy-back on kmalloc() instrumentation to poison the redzone. */
482 	if (unlikely(!slab))
483 		poison_kmalloc_large_redzone(object, size, flags);
484 	else
485 		poison_kmalloc_redzone(slab->slab_cache, object, size, flags);
486 
487 	return (void *)object;
488 }
489 
__kasan_mempool_poison_pages(struct page * page,unsigned int order,unsigned long ip)490 bool __kasan_mempool_poison_pages(struct page *page, unsigned int order,
491 				  unsigned long ip)
492 {
493 	unsigned long *ptr;
494 
495 	if (unlikely(PageHighMem(page)))
496 		return true;
497 
498 	/* Bail out if allocation was excluded due to sampling. */
499 	if (!IS_ENABLED(CONFIG_KASAN_GENERIC) &&
500 	    page_kasan_tag(page) == KASAN_TAG_KERNEL)
501 		return true;
502 
503 	ptr = page_address(page);
504 
505 	if (check_page_allocation(ptr, ip))
506 		return false;
507 
508 	kasan_poison(ptr, PAGE_SIZE << order, KASAN_PAGE_FREE, false);
509 
510 	return true;
511 }
512 
__kasan_mempool_unpoison_pages(struct page * page,unsigned int order,unsigned long ip)513 void __kasan_mempool_unpoison_pages(struct page *page, unsigned int order,
514 				    unsigned long ip)
515 {
516 	__kasan_unpoison_pages(page, order, false);
517 }
518 
__kasan_mempool_poison_object(void * ptr,unsigned long ip)519 bool __kasan_mempool_poison_object(void *ptr, unsigned long ip)
520 {
521 	struct page *page = virt_to_page(ptr);
522 	struct slab *slab;
523 
524 	if (unlikely(PageLargeKmalloc(page))) {
525 		if (check_page_allocation(ptr, ip))
526 			return false;
527 		kasan_poison(ptr, page_size(page), KASAN_PAGE_FREE, false);
528 		return true;
529 	}
530 
531 	if (is_kfence_address(ptr))
532 		return true;
533 
534 	slab = page_slab(page);
535 
536 	if (check_slab_allocation(slab->slab_cache, ptr, ip))
537 		return false;
538 
539 	poison_slab_object(slab->slab_cache, ptr, false);
540 	return true;
541 }
542 
__kasan_mempool_unpoison_object(void * ptr,size_t size,unsigned long ip)543 void __kasan_mempool_unpoison_object(void *ptr, size_t size, unsigned long ip)
544 {
545 	struct slab *slab;
546 	gfp_t flags = 0; /* Might be executing under a lock. */
547 
548 	slab = virt_to_slab(ptr);
549 
550 	/*
551 	 * This function can be called for large kmalloc allocation that get
552 	 * their memory from page_alloc.
553 	 */
554 	if (unlikely(!slab)) {
555 		kasan_unpoison(ptr, size, false);
556 		poison_kmalloc_large_redzone(ptr, size, flags);
557 		return;
558 	}
559 
560 	if (is_kfence_address(ptr))
561 		return;
562 
563 	/* Unpoison the object and save alloc info for non-kmalloc() allocations. */
564 	unpoison_slab_object(slab->slab_cache, ptr, flags, false);
565 
566 	/* Poison the redzone and save alloc info for kmalloc() allocations. */
567 	if (is_kmalloc_cache(slab->slab_cache))
568 		poison_kmalloc_redzone(slab->slab_cache, ptr, size, flags);
569 }
570 
__kasan_check_byte(const void * address,unsigned long ip)571 bool __kasan_check_byte(const void *address, unsigned long ip)
572 {
573 	if (!kasan_byte_accessible(address)) {
574 		kasan_report(address, 1, false, ip);
575 		return false;
576 	}
577 	return true;
578 }
579 
580 #ifdef CONFIG_KASAN_VMALLOC
__kasan_unpoison_vmap_areas(struct vm_struct ** vms,int nr_vms,kasan_vmalloc_flags_t flags)581 void __kasan_unpoison_vmap_areas(struct vm_struct **vms, int nr_vms,
582 				 kasan_vmalloc_flags_t flags)
583 {
584 	unsigned long size;
585 	void *addr;
586 	int area;
587 	u8 tag;
588 
589 	/*
590 	 * If KASAN_VMALLOC_KEEP_TAG was set at this point, all vms[] pointers
591 	 * would be unpoisoned with the KASAN_TAG_KERNEL which would disable
592 	 * KASAN checks down the line.
593 	 */
594 	if (WARN_ON_ONCE(flags & KASAN_VMALLOC_KEEP_TAG))
595 		return;
596 
597 	size = vms[0]->size;
598 	addr = vms[0]->addr;
599 	vms[0]->addr = __kasan_unpoison_vmalloc(addr, size, flags);
600 	tag = get_tag(vms[0]->addr);
601 
602 	for (area = 1 ; area < nr_vms ; area++) {
603 		size = vms[area]->size;
604 		addr = set_tag(vms[area]->addr, tag);
605 		vms[area]->addr =
606 			__kasan_unpoison_vmalloc(addr, size, flags | KASAN_VMALLOC_KEEP_TAG);
607 	}
608 }
609 #endif
610