xref: /linux/mm/kasan/common.c (revision 6179d4a213006491ff0d50073256f21fad22149b)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * This file contains common KASAN code.
4  *
5  * Copyright (c) 2014 Samsung Electronics Co., Ltd.
6  * Author: Andrey Ryabinin <ryabinin.a.a@gmail.com>
7  *
8  * Some code borrowed from https://github.com/xairy/kasan-prototype by
9  *        Andrey Konovalov <andreyknvl@gmail.com>
10  */
11 
12 #include <linux/export.h>
13 #include <linux/init.h>
14 #include <linux/kasan.h>
15 #include <linux/kernel.h>
16 #include <linux/linkage.h>
17 #include <linux/memblock.h>
18 #include <linux/memory.h>
19 #include <linux/mm.h>
20 #include <linux/module.h>
21 #include <linux/printk.h>
22 #include <linux/sched.h>
23 #include <linux/sched/clock.h>
24 #include <linux/sched/task_stack.h>
25 #include <linux/slab.h>
26 #include <linux/stackdepot.h>
27 #include <linux/stacktrace.h>
28 #include <linux/string.h>
29 #include <linux/types.h>
30 #include <linux/bug.h>
31 
32 #include "kasan.h"
33 #include "../slab.h"
34 
35 struct slab *kasan_addr_to_slab(const void *addr)
36 {
37 	if (virt_addr_valid(addr))
38 		return virt_to_slab(addr);
39 	return NULL;
40 }
41 
42 depot_stack_handle_t kasan_save_stack(gfp_t flags, depot_flags_t depot_flags)
43 {
44 	unsigned long entries[KASAN_STACK_DEPTH];
45 	unsigned int nr_entries;
46 
47 	nr_entries = stack_trace_save(entries, ARRAY_SIZE(entries), 0);
48 	return stack_depot_save_flags(entries, nr_entries, flags, depot_flags);
49 }
50 
51 void kasan_set_track(struct kasan_track *track, depot_stack_handle_t stack)
52 {
53 #ifdef CONFIG_KASAN_EXTRA_INFO
54 	u32 cpu = raw_smp_processor_id();
55 	u64 ts_nsec = local_clock();
56 
57 	track->cpu = cpu;
58 	track->timestamp = ts_nsec >> 3;
59 #endif /* CONFIG_KASAN_EXTRA_INFO */
60 	track->pid = current->pid;
61 	track->stack = stack;
62 }
63 
64 void kasan_save_track(struct kasan_track *track, gfp_t flags)
65 {
66 	depot_stack_handle_t stack;
67 
68 	stack = kasan_save_stack(flags,
69 			STACK_DEPOT_FLAG_CAN_ALLOC | STACK_DEPOT_FLAG_GET);
70 	kasan_set_track(track, stack);
71 }
72 
73 #if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
74 void kasan_enable_current(void)
75 {
76 	current->kasan_depth++;
77 }
78 EXPORT_SYMBOL(kasan_enable_current);
79 
80 void kasan_disable_current(void)
81 {
82 	current->kasan_depth--;
83 }
84 EXPORT_SYMBOL(kasan_disable_current);
85 
86 #endif /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */
87 
88 void __kasan_unpoison_range(const void *address, size_t size)
89 {
90 	if (is_kfence_address(address))
91 		return;
92 
93 	kasan_unpoison(address, size, false);
94 }
95 
96 #ifdef CONFIG_KASAN_STACK
97 /* Unpoison the entire stack for a task. */
98 void kasan_unpoison_task_stack(struct task_struct *task)
99 {
100 	void *base = task_stack_page(task);
101 
102 	kasan_unpoison(base, THREAD_SIZE, false);
103 }
104 
105 /* Unpoison the stack for the current task beyond a watermark sp value. */
106 asmlinkage void kasan_unpoison_task_stack_below(const void *watermark)
107 {
108 	/*
109 	 * Calculate the task stack base address.  Avoid using 'current'
110 	 * because this function is called by early resume code which hasn't
111 	 * yet set up the percpu register (%gs).
112 	 */
113 	void *base = (void *)((unsigned long)watermark & ~(THREAD_SIZE - 1));
114 
115 	kasan_unpoison(base, watermark - base, false);
116 }
117 #endif /* CONFIG_KASAN_STACK */
118 
119 bool __kasan_unpoison_pages(struct page *page, unsigned int order, bool init)
120 {
121 	u8 tag;
122 	unsigned long i;
123 
124 	if (unlikely(PageHighMem(page)))
125 		return false;
126 
127 	if (!kasan_sample_page_alloc(order))
128 		return false;
129 
130 	tag = kasan_random_tag();
131 	kasan_unpoison(set_tag(page_address(page), tag),
132 		       PAGE_SIZE << order, init);
133 	for (i = 0; i < (1 << order); i++)
134 		page_kasan_tag_set(page + i, tag);
135 
136 	return true;
137 }
138 
139 void __kasan_poison_pages(struct page *page, unsigned int order, bool init)
140 {
141 	if (likely(!PageHighMem(page)))
142 		kasan_poison(page_address(page), PAGE_SIZE << order,
143 			     KASAN_PAGE_FREE, init);
144 }
145 
146 void __kasan_poison_slab(struct slab *slab)
147 {
148 	struct page *page = slab_page(slab);
149 	unsigned long i;
150 
151 	for (i = 0; i < compound_nr(page); i++)
152 		page_kasan_tag_reset(page + i);
153 	kasan_poison(page_address(page), page_size(page),
154 		     KASAN_SLAB_REDZONE, false);
155 }
156 
157 void __kasan_unpoison_new_object(struct kmem_cache *cache, void *object)
158 {
159 	kasan_unpoison(object, cache->object_size, false);
160 }
161 
162 void __kasan_poison_new_object(struct kmem_cache *cache, void *object)
163 {
164 	kasan_poison(object, round_up(cache->object_size, KASAN_GRANULE_SIZE),
165 			KASAN_SLAB_REDZONE, false);
166 }
167 
168 /*
169  * This function assigns a tag to an object considering the following:
170  * 1. A cache might have a constructor, which might save a pointer to a slab
171  *    object somewhere (e.g. in the object itself). We preassign a tag for
172  *    each object in caches with constructors during slab creation and reuse
173  *    the same tag each time a particular object is allocated.
174  * 2. A cache might be SLAB_TYPESAFE_BY_RCU, which means objects can be
175  *    accessed after being freed. We preassign tags for objects in these
176  *    caches as well.
177  */
178 static inline u8 assign_tag(struct kmem_cache *cache,
179 					const void *object, bool init)
180 {
181 	if (IS_ENABLED(CONFIG_KASAN_GENERIC))
182 		return 0xff;
183 
184 	/*
185 	 * If the cache neither has a constructor nor has SLAB_TYPESAFE_BY_RCU
186 	 * set, assign a tag when the object is being allocated (init == false).
187 	 */
188 	if (!cache->ctor && !(cache->flags & SLAB_TYPESAFE_BY_RCU))
189 		return init ? KASAN_TAG_KERNEL : kasan_random_tag();
190 
191 	/*
192 	 * For caches that either have a constructor or SLAB_TYPESAFE_BY_RCU,
193 	 * assign a random tag during slab creation, otherwise reuse
194 	 * the already assigned tag.
195 	 */
196 	return init ? kasan_random_tag() : get_tag(object);
197 }
198 
199 void * __must_check __kasan_init_slab_obj(struct kmem_cache *cache,
200 						const void *object)
201 {
202 	/* Initialize per-object metadata if it is present. */
203 	if (kasan_requires_meta())
204 		kasan_init_object_meta(cache, object);
205 
206 	/* Tag is ignored in set_tag() without CONFIG_KASAN_SW/HW_TAGS */
207 	object = set_tag(object, assign_tag(cache, object, true));
208 
209 	return (void *)object;
210 }
211 
212 static inline bool poison_slab_object(struct kmem_cache *cache, void *object,
213 				      unsigned long ip, bool init)
214 {
215 	void *tagged_object;
216 
217 	if (!kasan_arch_is_ready())
218 		return false;
219 
220 	tagged_object = object;
221 	object = kasan_reset_tag(object);
222 
223 	if (unlikely(nearest_obj(cache, virt_to_slab(object), object) != object)) {
224 		kasan_report_invalid_free(tagged_object, ip, KASAN_REPORT_INVALID_FREE);
225 		return true;
226 	}
227 
228 	/* RCU slabs could be legally used after free within the RCU period. */
229 	if (unlikely(cache->flags & SLAB_TYPESAFE_BY_RCU))
230 		return false;
231 
232 	if (!kasan_byte_accessible(tagged_object)) {
233 		kasan_report_invalid_free(tagged_object, ip, KASAN_REPORT_DOUBLE_FREE);
234 		return true;
235 	}
236 
237 	kasan_poison(object, round_up(cache->object_size, KASAN_GRANULE_SIZE),
238 			KASAN_SLAB_FREE, init);
239 
240 	if (kasan_stack_collection_enabled())
241 		kasan_save_free_info(cache, tagged_object);
242 
243 	return false;
244 }
245 
246 bool __kasan_slab_free(struct kmem_cache *cache, void *object,
247 				unsigned long ip, bool init)
248 {
249 	if (is_kfence_address(object))
250 		return false;
251 
252 	/*
253 	 * If the object is buggy, do not let slab put the object onto the
254 	 * freelist. The object will thus never be allocated again and its
255 	 * metadata will never get released.
256 	 */
257 	if (poison_slab_object(cache, object, ip, init))
258 		return true;
259 
260 	/*
261 	 * If the object is put into quarantine, do not let slab put the object
262 	 * onto the freelist for now. The object's metadata is kept until the
263 	 * object gets evicted from quarantine.
264 	 */
265 	if (kasan_quarantine_put(cache, object))
266 		return true;
267 
268 	/*
269 	 * If the object is not put into quarantine, it will likely be quickly
270 	 * reallocated. Thus, release its metadata now.
271 	 */
272 	kasan_release_object_meta(cache, object);
273 
274 	/* Let slab put the object onto the freelist. */
275 	return false;
276 }
277 
278 static inline bool check_page_allocation(void *ptr, unsigned long ip)
279 {
280 	if (!kasan_arch_is_ready())
281 		return false;
282 
283 	if (ptr != page_address(virt_to_head_page(ptr))) {
284 		kasan_report_invalid_free(ptr, ip, KASAN_REPORT_INVALID_FREE);
285 		return true;
286 	}
287 
288 	if (!kasan_byte_accessible(ptr)) {
289 		kasan_report_invalid_free(ptr, ip, KASAN_REPORT_DOUBLE_FREE);
290 		return true;
291 	}
292 
293 	return false;
294 }
295 
296 void __kasan_kfree_large(void *ptr, unsigned long ip)
297 {
298 	check_page_allocation(ptr, ip);
299 
300 	/* The object will be poisoned by kasan_poison_pages(). */
301 }
302 
303 static inline void unpoison_slab_object(struct kmem_cache *cache, void *object,
304 					gfp_t flags, bool init)
305 {
306 	/*
307 	 * Unpoison the whole object. For kmalloc() allocations,
308 	 * poison_kmalloc_redzone() will do precise poisoning.
309 	 */
310 	kasan_unpoison(object, cache->object_size, init);
311 
312 	/* Save alloc info (if possible) for non-kmalloc() allocations. */
313 	if (kasan_stack_collection_enabled() && !is_kmalloc_cache(cache))
314 		kasan_save_alloc_info(cache, object, flags);
315 }
316 
317 void * __must_check __kasan_slab_alloc(struct kmem_cache *cache,
318 					void *object, gfp_t flags, bool init)
319 {
320 	u8 tag;
321 	void *tagged_object;
322 
323 	if (gfpflags_allow_blocking(flags))
324 		kasan_quarantine_reduce();
325 
326 	if (unlikely(object == NULL))
327 		return NULL;
328 
329 	if (is_kfence_address(object))
330 		return (void *)object;
331 
332 	/*
333 	 * Generate and assign random tag for tag-based modes.
334 	 * Tag is ignored in set_tag() for the generic mode.
335 	 */
336 	tag = assign_tag(cache, object, false);
337 	tagged_object = set_tag(object, tag);
338 
339 	/* Unpoison the object and save alloc info for non-kmalloc() allocations. */
340 	unpoison_slab_object(cache, tagged_object, flags, init);
341 
342 	return tagged_object;
343 }
344 
345 static inline void poison_kmalloc_redzone(struct kmem_cache *cache,
346 				const void *object, size_t size, gfp_t flags)
347 {
348 	unsigned long redzone_start;
349 	unsigned long redzone_end;
350 
351 	/*
352 	 * The redzone has byte-level precision for the generic mode.
353 	 * Partially poison the last object granule to cover the unaligned
354 	 * part of the redzone.
355 	 */
356 	if (IS_ENABLED(CONFIG_KASAN_GENERIC))
357 		kasan_poison_last_granule((void *)object, size);
358 
359 	/* Poison the aligned part of the redzone. */
360 	redzone_start = round_up((unsigned long)(object + size),
361 				KASAN_GRANULE_SIZE);
362 	redzone_end = round_up((unsigned long)(object + cache->object_size),
363 				KASAN_GRANULE_SIZE);
364 	kasan_poison((void *)redzone_start, redzone_end - redzone_start,
365 			   KASAN_SLAB_REDZONE, false);
366 
367 	/*
368 	 * Save alloc info (if possible) for kmalloc() allocations.
369 	 * This also rewrites the alloc info when called from kasan_krealloc().
370 	 */
371 	if (kasan_stack_collection_enabled() && is_kmalloc_cache(cache))
372 		kasan_save_alloc_info(cache, (void *)object, flags);
373 
374 }
375 
376 void * __must_check __kasan_kmalloc(struct kmem_cache *cache, const void *object,
377 					size_t size, gfp_t flags)
378 {
379 	if (gfpflags_allow_blocking(flags))
380 		kasan_quarantine_reduce();
381 
382 	if (unlikely(object == NULL))
383 		return NULL;
384 
385 	if (is_kfence_address(object))
386 		return (void *)object;
387 
388 	/* The object has already been unpoisoned by kasan_slab_alloc(). */
389 	poison_kmalloc_redzone(cache, object, size, flags);
390 
391 	/* Keep the tag that was set by kasan_slab_alloc(). */
392 	return (void *)object;
393 }
394 EXPORT_SYMBOL(__kasan_kmalloc);
395 
396 static inline void poison_kmalloc_large_redzone(const void *ptr, size_t size,
397 						gfp_t flags)
398 {
399 	unsigned long redzone_start;
400 	unsigned long redzone_end;
401 
402 	/*
403 	 * The redzone has byte-level precision for the generic mode.
404 	 * Partially poison the last object granule to cover the unaligned
405 	 * part of the redzone.
406 	 */
407 	if (IS_ENABLED(CONFIG_KASAN_GENERIC))
408 		kasan_poison_last_granule(ptr, size);
409 
410 	/* Poison the aligned part of the redzone. */
411 	redzone_start = round_up((unsigned long)(ptr + size), KASAN_GRANULE_SIZE);
412 	redzone_end = (unsigned long)ptr + page_size(virt_to_page(ptr));
413 	kasan_poison((void *)redzone_start, redzone_end - redzone_start,
414 		     KASAN_PAGE_REDZONE, false);
415 }
416 
417 void * __must_check __kasan_kmalloc_large(const void *ptr, size_t size,
418 						gfp_t flags)
419 {
420 	if (gfpflags_allow_blocking(flags))
421 		kasan_quarantine_reduce();
422 
423 	if (unlikely(ptr == NULL))
424 		return NULL;
425 
426 	/* The object has already been unpoisoned by kasan_unpoison_pages(). */
427 	poison_kmalloc_large_redzone(ptr, size, flags);
428 
429 	/* Keep the tag that was set by alloc_pages(). */
430 	return (void *)ptr;
431 }
432 
433 void * __must_check __kasan_krealloc(const void *object, size_t size, gfp_t flags)
434 {
435 	struct slab *slab;
436 
437 	if (gfpflags_allow_blocking(flags))
438 		kasan_quarantine_reduce();
439 
440 	if (unlikely(object == ZERO_SIZE_PTR))
441 		return (void *)object;
442 
443 	if (is_kfence_address(object))
444 		return (void *)object;
445 
446 	/*
447 	 * Unpoison the object's data.
448 	 * Part of it might already have been unpoisoned, but it's unknown
449 	 * how big that part is.
450 	 */
451 	kasan_unpoison(object, size, false);
452 
453 	slab = virt_to_slab(object);
454 
455 	/* Piggy-back on kmalloc() instrumentation to poison the redzone. */
456 	if (unlikely(!slab))
457 		poison_kmalloc_large_redzone(object, size, flags);
458 	else
459 		poison_kmalloc_redzone(slab->slab_cache, object, size, flags);
460 
461 	return (void *)object;
462 }
463 
464 bool __kasan_mempool_poison_pages(struct page *page, unsigned int order,
465 				  unsigned long ip)
466 {
467 	unsigned long *ptr;
468 
469 	if (unlikely(PageHighMem(page)))
470 		return true;
471 
472 	/* Bail out if allocation was excluded due to sampling. */
473 	if (!IS_ENABLED(CONFIG_KASAN_GENERIC) &&
474 	    page_kasan_tag(page) == KASAN_TAG_KERNEL)
475 		return true;
476 
477 	ptr = page_address(page);
478 
479 	if (check_page_allocation(ptr, ip))
480 		return false;
481 
482 	kasan_poison(ptr, PAGE_SIZE << order, KASAN_PAGE_FREE, false);
483 
484 	return true;
485 }
486 
487 void __kasan_mempool_unpoison_pages(struct page *page, unsigned int order,
488 				    unsigned long ip)
489 {
490 	__kasan_unpoison_pages(page, order, false);
491 }
492 
493 bool __kasan_mempool_poison_object(void *ptr, unsigned long ip)
494 {
495 	struct folio *folio = virt_to_folio(ptr);
496 	struct slab *slab;
497 
498 	/*
499 	 * This function can be called for large kmalloc allocation that get
500 	 * their memory from page_alloc. Thus, the folio might not be a slab.
501 	 */
502 	if (unlikely(!folio_test_slab(folio))) {
503 		if (check_page_allocation(ptr, ip))
504 			return false;
505 		kasan_poison(ptr, folio_size(folio), KASAN_PAGE_FREE, false);
506 		return true;
507 	}
508 
509 	if (is_kfence_address(ptr))
510 		return false;
511 
512 	slab = folio_slab(folio);
513 	return !poison_slab_object(slab->slab_cache, ptr, ip, false);
514 }
515 
516 void __kasan_mempool_unpoison_object(void *ptr, size_t size, unsigned long ip)
517 {
518 	struct slab *slab;
519 	gfp_t flags = 0; /* Might be executing under a lock. */
520 
521 	slab = virt_to_slab(ptr);
522 
523 	/*
524 	 * This function can be called for large kmalloc allocation that get
525 	 * their memory from page_alloc.
526 	 */
527 	if (unlikely(!slab)) {
528 		kasan_unpoison(ptr, size, false);
529 		poison_kmalloc_large_redzone(ptr, size, flags);
530 		return;
531 	}
532 
533 	if (is_kfence_address(ptr))
534 		return;
535 
536 	/* Unpoison the object and save alloc info for non-kmalloc() allocations. */
537 	unpoison_slab_object(slab->slab_cache, ptr, size, flags);
538 
539 	/* Poison the redzone and save alloc info for kmalloc() allocations. */
540 	if (is_kmalloc_cache(slab->slab_cache))
541 		poison_kmalloc_redzone(slab->slab_cache, ptr, size, flags);
542 }
543 
544 bool __kasan_check_byte(const void *address, unsigned long ip)
545 {
546 	if (!kasan_byte_accessible(address)) {
547 		kasan_report(address, 1, false, ip);
548 		return false;
549 	}
550 	return true;
551 }
552