xref: /linux/mm/kasan/common.c (revision 4b660dbd9ee2059850fd30e0df420ca7a38a1856)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * This file contains common KASAN code.
4  *
5  * Copyright (c) 2014 Samsung Electronics Co., Ltd.
6  * Author: Andrey Ryabinin <ryabinin.a.a@gmail.com>
7  *
8  * Some code borrowed from https://github.com/xairy/kasan-prototype by
9  *        Andrey Konovalov <andreyknvl@gmail.com>
10  */
11 
12 #include <linux/export.h>
13 #include <linux/init.h>
14 #include <linux/kasan.h>
15 #include <linux/kernel.h>
16 #include <linux/linkage.h>
17 #include <linux/memblock.h>
18 #include <linux/memory.h>
19 #include <linux/mm.h>
20 #include <linux/module.h>
21 #include <linux/printk.h>
22 #include <linux/sched.h>
23 #include <linux/sched/clock.h>
24 #include <linux/sched/task_stack.h>
25 #include <linux/slab.h>
26 #include <linux/stackdepot.h>
27 #include <linux/stacktrace.h>
28 #include <linux/string.h>
29 #include <linux/types.h>
30 #include <linux/bug.h>
31 
32 #include "kasan.h"
33 #include "../slab.h"
34 
35 struct slab *kasan_addr_to_slab(const void *addr)
36 {
37 	if (virt_addr_valid(addr))
38 		return virt_to_slab(addr);
39 	return NULL;
40 }
41 
42 depot_stack_handle_t kasan_save_stack(gfp_t flags, depot_flags_t depot_flags)
43 {
44 	unsigned long entries[KASAN_STACK_DEPTH];
45 	unsigned int nr_entries;
46 
47 	nr_entries = stack_trace_save(entries, ARRAY_SIZE(entries), 0);
48 	return stack_depot_save_flags(entries, nr_entries, flags, depot_flags);
49 }
50 
51 void kasan_set_track(struct kasan_track *track, depot_stack_handle_t stack)
52 {
53 #ifdef CONFIG_KASAN_EXTRA_INFO
54 	u32 cpu = raw_smp_processor_id();
55 	u64 ts_nsec = local_clock();
56 
57 	track->cpu = cpu;
58 	track->timestamp = ts_nsec >> 9;
59 #endif /* CONFIG_KASAN_EXTRA_INFO */
60 	track->pid = current->pid;
61 	track->stack = stack;
62 }
63 
64 void kasan_save_track(struct kasan_track *track, gfp_t flags)
65 {
66 	depot_stack_handle_t stack;
67 
68 	stack = kasan_save_stack(flags, STACK_DEPOT_FLAG_CAN_ALLOC);
69 	kasan_set_track(track, stack);
70 }
71 
72 #if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
73 void kasan_enable_current(void)
74 {
75 	current->kasan_depth++;
76 }
77 EXPORT_SYMBOL(kasan_enable_current);
78 
79 void kasan_disable_current(void)
80 {
81 	current->kasan_depth--;
82 }
83 EXPORT_SYMBOL(kasan_disable_current);
84 
85 #endif /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */
86 
87 void __kasan_unpoison_range(const void *address, size_t size)
88 {
89 	if (is_kfence_address(address))
90 		return;
91 
92 	kasan_unpoison(address, size, false);
93 }
94 
95 #ifdef CONFIG_KASAN_STACK
96 /* Unpoison the entire stack for a task. */
97 void kasan_unpoison_task_stack(struct task_struct *task)
98 {
99 	void *base = task_stack_page(task);
100 
101 	kasan_unpoison(base, THREAD_SIZE, false);
102 }
103 
104 /* Unpoison the stack for the current task beyond a watermark sp value. */
105 asmlinkage void kasan_unpoison_task_stack_below(const void *watermark)
106 {
107 	/*
108 	 * Calculate the task stack base address.  Avoid using 'current'
109 	 * because this function is called by early resume code which hasn't
110 	 * yet set up the percpu register (%gs).
111 	 */
112 	void *base = (void *)((unsigned long)watermark & ~(THREAD_SIZE - 1));
113 
114 	kasan_unpoison(base, watermark - base, false);
115 }
116 #endif /* CONFIG_KASAN_STACK */
117 
118 bool __kasan_unpoison_pages(struct page *page, unsigned int order, bool init)
119 {
120 	u8 tag;
121 	unsigned long i;
122 
123 	if (unlikely(PageHighMem(page)))
124 		return false;
125 
126 	if (!kasan_sample_page_alloc(order))
127 		return false;
128 
129 	tag = kasan_random_tag();
130 	kasan_unpoison(set_tag(page_address(page), tag),
131 		       PAGE_SIZE << order, init);
132 	for (i = 0; i < (1 << order); i++)
133 		page_kasan_tag_set(page + i, tag);
134 
135 	return true;
136 }
137 
138 void __kasan_poison_pages(struct page *page, unsigned int order, bool init)
139 {
140 	if (likely(!PageHighMem(page)))
141 		kasan_poison(page_address(page), PAGE_SIZE << order,
142 			     KASAN_PAGE_FREE, init);
143 }
144 
145 void __kasan_poison_slab(struct slab *slab)
146 {
147 	struct page *page = slab_page(slab);
148 	unsigned long i;
149 
150 	for (i = 0; i < compound_nr(page); i++)
151 		page_kasan_tag_reset(page + i);
152 	kasan_poison(page_address(page), page_size(page),
153 		     KASAN_SLAB_REDZONE, false);
154 }
155 
156 void __kasan_unpoison_new_object(struct kmem_cache *cache, void *object)
157 {
158 	kasan_unpoison(object, cache->object_size, false);
159 }
160 
161 void __kasan_poison_new_object(struct kmem_cache *cache, void *object)
162 {
163 	kasan_poison(object, round_up(cache->object_size, KASAN_GRANULE_SIZE),
164 			KASAN_SLAB_REDZONE, false);
165 }
166 
167 /*
168  * This function assigns a tag to an object considering the following:
169  * 1. A cache might have a constructor, which might save a pointer to a slab
170  *    object somewhere (e.g. in the object itself). We preassign a tag for
171  *    each object in caches with constructors during slab creation and reuse
172  *    the same tag each time a particular object is allocated.
173  * 2. A cache might be SLAB_TYPESAFE_BY_RCU, which means objects can be
174  *    accessed after being freed. We preassign tags for objects in these
175  *    caches as well.
176  */
177 static inline u8 assign_tag(struct kmem_cache *cache,
178 					const void *object, bool init)
179 {
180 	if (IS_ENABLED(CONFIG_KASAN_GENERIC))
181 		return 0xff;
182 
183 	/*
184 	 * If the cache neither has a constructor nor has SLAB_TYPESAFE_BY_RCU
185 	 * set, assign a tag when the object is being allocated (init == false).
186 	 */
187 	if (!cache->ctor && !(cache->flags & SLAB_TYPESAFE_BY_RCU))
188 		return init ? KASAN_TAG_KERNEL : kasan_random_tag();
189 
190 	/*
191 	 * For caches that either have a constructor or SLAB_TYPESAFE_BY_RCU,
192 	 * assign a random tag during slab creation, otherwise reuse
193 	 * the already assigned tag.
194 	 */
195 	return init ? kasan_random_tag() : get_tag(object);
196 }
197 
198 void * __must_check __kasan_init_slab_obj(struct kmem_cache *cache,
199 						const void *object)
200 {
201 	/* Initialize per-object metadata if it is present. */
202 	if (kasan_requires_meta())
203 		kasan_init_object_meta(cache, object);
204 
205 	/* Tag is ignored in set_tag() without CONFIG_KASAN_SW/HW_TAGS */
206 	object = set_tag(object, assign_tag(cache, object, true));
207 
208 	return (void *)object;
209 }
210 
211 static inline bool poison_slab_object(struct kmem_cache *cache, void *object,
212 				      unsigned long ip, bool init)
213 {
214 	void *tagged_object;
215 
216 	if (!kasan_arch_is_ready())
217 		return false;
218 
219 	tagged_object = object;
220 	object = kasan_reset_tag(object);
221 
222 	if (unlikely(nearest_obj(cache, virt_to_slab(object), object) != object)) {
223 		kasan_report_invalid_free(tagged_object, ip, KASAN_REPORT_INVALID_FREE);
224 		return true;
225 	}
226 
227 	/* RCU slabs could be legally used after free within the RCU period. */
228 	if (unlikely(cache->flags & SLAB_TYPESAFE_BY_RCU))
229 		return false;
230 
231 	if (!kasan_byte_accessible(tagged_object)) {
232 		kasan_report_invalid_free(tagged_object, ip, KASAN_REPORT_DOUBLE_FREE);
233 		return true;
234 	}
235 
236 	kasan_poison(object, round_up(cache->object_size, KASAN_GRANULE_SIZE),
237 			KASAN_SLAB_FREE, init);
238 
239 	if (kasan_stack_collection_enabled())
240 		kasan_save_free_info(cache, tagged_object);
241 
242 	return false;
243 }
244 
245 bool __kasan_slab_free(struct kmem_cache *cache, void *object,
246 				unsigned long ip, bool init)
247 {
248 	if (is_kfence_address(object))
249 		return false;
250 
251 	/*
252 	 * If the object is buggy, do not let slab put the object onto the
253 	 * freelist. The object will thus never be allocated again and its
254 	 * metadata will never get released.
255 	 */
256 	if (poison_slab_object(cache, object, ip, init))
257 		return true;
258 
259 	/*
260 	 * If the object is put into quarantine, do not let slab put the object
261 	 * onto the freelist for now. The object's metadata is kept until the
262 	 * object gets evicted from quarantine.
263 	 */
264 	if (kasan_quarantine_put(cache, object))
265 		return true;
266 
267 	/*
268 	 * Note: Keep per-object metadata to allow KASAN print stack traces for
269 	 * use-after-free-before-realloc bugs.
270 	 */
271 
272 	/* Let slab put the object onto the freelist. */
273 	return false;
274 }
275 
276 static inline bool check_page_allocation(void *ptr, unsigned long ip)
277 {
278 	if (!kasan_arch_is_ready())
279 		return false;
280 
281 	if (ptr != page_address(virt_to_head_page(ptr))) {
282 		kasan_report_invalid_free(ptr, ip, KASAN_REPORT_INVALID_FREE);
283 		return true;
284 	}
285 
286 	if (!kasan_byte_accessible(ptr)) {
287 		kasan_report_invalid_free(ptr, ip, KASAN_REPORT_DOUBLE_FREE);
288 		return true;
289 	}
290 
291 	return false;
292 }
293 
294 void __kasan_kfree_large(void *ptr, unsigned long ip)
295 {
296 	check_page_allocation(ptr, ip);
297 
298 	/* The object will be poisoned by kasan_poison_pages(). */
299 }
300 
301 static inline void unpoison_slab_object(struct kmem_cache *cache, void *object,
302 					gfp_t flags, bool init)
303 {
304 	/*
305 	 * Unpoison the whole object. For kmalloc() allocations,
306 	 * poison_kmalloc_redzone() will do precise poisoning.
307 	 */
308 	kasan_unpoison(object, cache->object_size, init);
309 
310 	/* Save alloc info (if possible) for non-kmalloc() allocations. */
311 	if (kasan_stack_collection_enabled() && !is_kmalloc_cache(cache))
312 		kasan_save_alloc_info(cache, object, flags);
313 }
314 
315 void * __must_check __kasan_slab_alloc(struct kmem_cache *cache,
316 					void *object, gfp_t flags, bool init)
317 {
318 	u8 tag;
319 	void *tagged_object;
320 
321 	if (gfpflags_allow_blocking(flags))
322 		kasan_quarantine_reduce();
323 
324 	if (unlikely(object == NULL))
325 		return NULL;
326 
327 	if (is_kfence_address(object))
328 		return (void *)object;
329 
330 	/*
331 	 * Generate and assign random tag for tag-based modes.
332 	 * Tag is ignored in set_tag() for the generic mode.
333 	 */
334 	tag = assign_tag(cache, object, false);
335 	tagged_object = set_tag(object, tag);
336 
337 	/* Unpoison the object and save alloc info for non-kmalloc() allocations. */
338 	unpoison_slab_object(cache, tagged_object, flags, init);
339 
340 	return tagged_object;
341 }
342 
343 static inline void poison_kmalloc_redzone(struct kmem_cache *cache,
344 				const void *object, size_t size, gfp_t flags)
345 {
346 	unsigned long redzone_start;
347 	unsigned long redzone_end;
348 
349 	/*
350 	 * The redzone has byte-level precision for the generic mode.
351 	 * Partially poison the last object granule to cover the unaligned
352 	 * part of the redzone.
353 	 */
354 	if (IS_ENABLED(CONFIG_KASAN_GENERIC))
355 		kasan_poison_last_granule((void *)object, size);
356 
357 	/* Poison the aligned part of the redzone. */
358 	redzone_start = round_up((unsigned long)(object + size),
359 				KASAN_GRANULE_SIZE);
360 	redzone_end = round_up((unsigned long)(object + cache->object_size),
361 				KASAN_GRANULE_SIZE);
362 	kasan_poison((void *)redzone_start, redzone_end - redzone_start,
363 			   KASAN_SLAB_REDZONE, false);
364 
365 	/*
366 	 * Save alloc info (if possible) for kmalloc() allocations.
367 	 * This also rewrites the alloc info when called from kasan_krealloc().
368 	 */
369 	if (kasan_stack_collection_enabled() && is_kmalloc_cache(cache))
370 		kasan_save_alloc_info(cache, (void *)object, flags);
371 
372 }
373 
374 void * __must_check __kasan_kmalloc(struct kmem_cache *cache, const void *object,
375 					size_t size, gfp_t flags)
376 {
377 	if (gfpflags_allow_blocking(flags))
378 		kasan_quarantine_reduce();
379 
380 	if (unlikely(object == NULL))
381 		return NULL;
382 
383 	if (is_kfence_address(object))
384 		return (void *)object;
385 
386 	/* The object has already been unpoisoned by kasan_slab_alloc(). */
387 	poison_kmalloc_redzone(cache, object, size, flags);
388 
389 	/* Keep the tag that was set by kasan_slab_alloc(). */
390 	return (void *)object;
391 }
392 EXPORT_SYMBOL(__kasan_kmalloc);
393 
394 static inline void poison_kmalloc_large_redzone(const void *ptr, size_t size,
395 						gfp_t flags)
396 {
397 	unsigned long redzone_start;
398 	unsigned long redzone_end;
399 
400 	/*
401 	 * The redzone has byte-level precision for the generic mode.
402 	 * Partially poison the last object granule to cover the unaligned
403 	 * part of the redzone.
404 	 */
405 	if (IS_ENABLED(CONFIG_KASAN_GENERIC))
406 		kasan_poison_last_granule(ptr, size);
407 
408 	/* Poison the aligned part of the redzone. */
409 	redzone_start = round_up((unsigned long)(ptr + size), KASAN_GRANULE_SIZE);
410 	redzone_end = (unsigned long)ptr + page_size(virt_to_page(ptr));
411 	kasan_poison((void *)redzone_start, redzone_end - redzone_start,
412 		     KASAN_PAGE_REDZONE, false);
413 }
414 
415 void * __must_check __kasan_kmalloc_large(const void *ptr, size_t size,
416 						gfp_t flags)
417 {
418 	if (gfpflags_allow_blocking(flags))
419 		kasan_quarantine_reduce();
420 
421 	if (unlikely(ptr == NULL))
422 		return NULL;
423 
424 	/* The object has already been unpoisoned by kasan_unpoison_pages(). */
425 	poison_kmalloc_large_redzone(ptr, size, flags);
426 
427 	/* Keep the tag that was set by alloc_pages(). */
428 	return (void *)ptr;
429 }
430 
431 void * __must_check __kasan_krealloc(const void *object, size_t size, gfp_t flags)
432 {
433 	struct slab *slab;
434 
435 	if (gfpflags_allow_blocking(flags))
436 		kasan_quarantine_reduce();
437 
438 	if (unlikely(object == ZERO_SIZE_PTR))
439 		return (void *)object;
440 
441 	if (is_kfence_address(object))
442 		return (void *)object;
443 
444 	/*
445 	 * Unpoison the object's data.
446 	 * Part of it might already have been unpoisoned, but it's unknown
447 	 * how big that part is.
448 	 */
449 	kasan_unpoison(object, size, false);
450 
451 	slab = virt_to_slab(object);
452 
453 	/* Piggy-back on kmalloc() instrumentation to poison the redzone. */
454 	if (unlikely(!slab))
455 		poison_kmalloc_large_redzone(object, size, flags);
456 	else
457 		poison_kmalloc_redzone(slab->slab_cache, object, size, flags);
458 
459 	return (void *)object;
460 }
461 
462 bool __kasan_mempool_poison_pages(struct page *page, unsigned int order,
463 				  unsigned long ip)
464 {
465 	unsigned long *ptr;
466 
467 	if (unlikely(PageHighMem(page)))
468 		return true;
469 
470 	/* Bail out if allocation was excluded due to sampling. */
471 	if (!IS_ENABLED(CONFIG_KASAN_GENERIC) &&
472 	    page_kasan_tag(page) == KASAN_TAG_KERNEL)
473 		return true;
474 
475 	ptr = page_address(page);
476 
477 	if (check_page_allocation(ptr, ip))
478 		return false;
479 
480 	kasan_poison(ptr, PAGE_SIZE << order, KASAN_PAGE_FREE, false);
481 
482 	return true;
483 }
484 
485 void __kasan_mempool_unpoison_pages(struct page *page, unsigned int order,
486 				    unsigned long ip)
487 {
488 	__kasan_unpoison_pages(page, order, false);
489 }
490 
491 bool __kasan_mempool_poison_object(void *ptr, unsigned long ip)
492 {
493 	struct folio *folio = virt_to_folio(ptr);
494 	struct slab *slab;
495 
496 	/*
497 	 * This function can be called for large kmalloc allocation that get
498 	 * their memory from page_alloc. Thus, the folio might not be a slab.
499 	 */
500 	if (unlikely(!folio_test_slab(folio))) {
501 		if (check_page_allocation(ptr, ip))
502 			return false;
503 		kasan_poison(ptr, folio_size(folio), KASAN_PAGE_FREE, false);
504 		return true;
505 	}
506 
507 	if (is_kfence_address(ptr))
508 		return false;
509 
510 	slab = folio_slab(folio);
511 	return !poison_slab_object(slab->slab_cache, ptr, ip, false);
512 }
513 
514 void __kasan_mempool_unpoison_object(void *ptr, size_t size, unsigned long ip)
515 {
516 	struct slab *slab;
517 	gfp_t flags = 0; /* Might be executing under a lock. */
518 
519 	slab = virt_to_slab(ptr);
520 
521 	/*
522 	 * This function can be called for large kmalloc allocation that get
523 	 * their memory from page_alloc.
524 	 */
525 	if (unlikely(!slab)) {
526 		kasan_unpoison(ptr, size, false);
527 		poison_kmalloc_large_redzone(ptr, size, flags);
528 		return;
529 	}
530 
531 	if (is_kfence_address(ptr))
532 		return;
533 
534 	/* Unpoison the object and save alloc info for non-kmalloc() allocations. */
535 	unpoison_slab_object(slab->slab_cache, ptr, size, flags);
536 
537 	/* Poison the redzone and save alloc info for kmalloc() allocations. */
538 	if (is_kmalloc_cache(slab->slab_cache))
539 		poison_kmalloc_redzone(slab->slab_cache, ptr, size, flags);
540 }
541 
542 bool __kasan_check_byte(const void *address, unsigned long ip)
543 {
544 	if (!kasan_byte_accessible(address)) {
545 		kasan_report(address, 1, false, ip);
546 		return false;
547 	}
548 	return true;
549 }
550