xref: /linux/mm/kasan/generic.c (revision 60433a9d038db006ca2f49e3c5f050dc46aaad3a)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * This file contains core generic KASAN code.
4  *
5  * Copyright (c) 2014 Samsung Electronics Co., Ltd.
6  * Author: Andrey Ryabinin <ryabinin.a.a@gmail.com>
7  *
8  * Some code borrowed from https://github.com/xairy/kasan-prototype by
9  *        Andrey Konovalov <andreyknvl@gmail.com>
10  */
11 
12 #include <linux/export.h>
13 #include <linux/interrupt.h>
14 #include <linux/init.h>
15 #include <linux/kasan.h>
16 #include <linux/kernel.h>
17 #include <linux/kfence.h>
18 #include <linux/kmemleak.h>
19 #include <linux/linkage.h>
20 #include <linux/memblock.h>
21 #include <linux/memory.h>
22 #include <linux/mm.h>
23 #include <linux/module.h>
24 #include <linux/printk.h>
25 #include <linux/sched.h>
26 #include <linux/sched/task_stack.h>
27 #include <linux/slab.h>
28 #include <linux/stackdepot.h>
29 #include <linux/stacktrace.h>
30 #include <linux/string.h>
31 #include <linux/types.h>
32 #include <linux/vmalloc.h>
33 #include <linux/bug.h>
34 
35 #include "kasan.h"
36 #include "../slab.h"
37 
38 /*
39  * All functions below always inlined so compiler could
40  * perform better optimizations in each of __asan_loadX/__assn_storeX
41  * depending on memory access size X.
42  */
43 
44 static __always_inline bool memory_is_poisoned_1(const void *addr)
45 {
46 	s8 shadow_value = *(s8 *)kasan_mem_to_shadow(addr);
47 
48 	if (unlikely(shadow_value)) {
49 		s8 last_accessible_byte = (unsigned long)addr & KASAN_GRANULE_MASK;
50 		return unlikely(last_accessible_byte >= shadow_value);
51 	}
52 
53 	return false;
54 }
55 
56 static __always_inline bool memory_is_poisoned_2_4_8(const void *addr,
57 						unsigned long size)
58 {
59 	u8 *shadow_addr = (u8 *)kasan_mem_to_shadow(addr);
60 
61 	/*
62 	 * Access crosses 8(shadow size)-byte boundary. Such access maps
63 	 * into 2 shadow bytes, so we need to check them both.
64 	 */
65 	if (unlikely((((unsigned long)addr + size - 1) & KASAN_GRANULE_MASK) < size - 1))
66 		return *shadow_addr || memory_is_poisoned_1(addr + size - 1);
67 
68 	return memory_is_poisoned_1(addr + size - 1);
69 }
70 
71 static __always_inline bool memory_is_poisoned_16(const void *addr)
72 {
73 	u16 *shadow_addr = (u16 *)kasan_mem_to_shadow(addr);
74 
75 	/* Unaligned 16-bytes access maps into 3 shadow bytes. */
76 	if (unlikely(!IS_ALIGNED((unsigned long)addr, KASAN_GRANULE_SIZE)))
77 		return *shadow_addr || memory_is_poisoned_1(addr + 15);
78 
79 	return *shadow_addr;
80 }
81 
82 static __always_inline unsigned long bytes_is_nonzero(const u8 *start,
83 					size_t size)
84 {
85 	while (size) {
86 		if (unlikely(*start))
87 			return (unsigned long)start;
88 		start++;
89 		size--;
90 	}
91 
92 	return 0;
93 }
94 
95 static __always_inline unsigned long memory_is_nonzero(const void *start,
96 						const void *end)
97 {
98 	unsigned int words;
99 	unsigned long ret;
100 	unsigned int prefix = (unsigned long)start % 8;
101 
102 	if (end - start <= 16)
103 		return bytes_is_nonzero(start, end - start);
104 
105 	if (prefix) {
106 		prefix = 8 - prefix;
107 		ret = bytes_is_nonzero(start, prefix);
108 		if (unlikely(ret))
109 			return ret;
110 		start += prefix;
111 	}
112 
113 	words = (end - start) / 8;
114 	while (words) {
115 		if (unlikely(*(u64 *)start))
116 			return bytes_is_nonzero(start, 8);
117 		start += 8;
118 		words--;
119 	}
120 
121 	return bytes_is_nonzero(start, (end - start) % 8);
122 }
123 
124 static __always_inline bool memory_is_poisoned_n(const void *addr, size_t size)
125 {
126 	unsigned long ret;
127 
128 	ret = memory_is_nonzero(kasan_mem_to_shadow(addr),
129 			kasan_mem_to_shadow(addr + size - 1) + 1);
130 
131 	if (unlikely(ret)) {
132 		const void *last_byte = addr + size - 1;
133 		s8 *last_shadow = (s8 *)kasan_mem_to_shadow(last_byte);
134 		s8 last_accessible_byte = (unsigned long)last_byte & KASAN_GRANULE_MASK;
135 
136 		if (unlikely(ret != (unsigned long)last_shadow ||
137 			     last_accessible_byte >= *last_shadow))
138 			return true;
139 	}
140 	return false;
141 }
142 
143 static __always_inline bool memory_is_poisoned(const void *addr, size_t size)
144 {
145 	if (__builtin_constant_p(size)) {
146 		switch (size) {
147 		case 1:
148 			return memory_is_poisoned_1(addr);
149 		case 2:
150 		case 4:
151 		case 8:
152 			return memory_is_poisoned_2_4_8(addr, size);
153 		case 16:
154 			return memory_is_poisoned_16(addr);
155 		default:
156 			BUILD_BUG();
157 		}
158 	}
159 
160 	return memory_is_poisoned_n(addr, size);
161 }
162 
163 static __always_inline bool check_region_inline(const void *addr,
164 						size_t size, bool write,
165 						unsigned long ret_ip)
166 {
167 	if (!kasan_arch_is_ready())
168 		return true;
169 
170 	if (unlikely(size == 0))
171 		return true;
172 
173 	if (unlikely(addr + size < addr))
174 		return !kasan_report(addr, size, write, ret_ip);
175 
176 	if (unlikely(!addr_has_metadata(addr)))
177 		return !kasan_report(addr, size, write, ret_ip);
178 
179 	if (likely(!memory_is_poisoned(addr, size)))
180 		return true;
181 
182 	return !kasan_report(addr, size, write, ret_ip);
183 }
184 
185 bool kasan_check_range(const void *addr, size_t size, bool write,
186 					unsigned long ret_ip)
187 {
188 	return check_region_inline(addr, size, write, ret_ip);
189 }
190 
191 bool kasan_byte_accessible(const void *addr)
192 {
193 	s8 shadow_byte;
194 
195 	if (!kasan_arch_is_ready())
196 		return true;
197 
198 	shadow_byte = READ_ONCE(*(s8 *)kasan_mem_to_shadow(addr));
199 
200 	return shadow_byte >= 0 && shadow_byte < KASAN_GRANULE_SIZE;
201 }
202 
203 void kasan_cache_shrink(struct kmem_cache *cache)
204 {
205 	kasan_quarantine_remove_cache(cache);
206 }
207 
208 void kasan_cache_shutdown(struct kmem_cache *cache)
209 {
210 	if (!__kmem_cache_empty(cache))
211 		kasan_quarantine_remove_cache(cache);
212 }
213 
214 static void register_global(struct kasan_global *global)
215 {
216 	size_t aligned_size = round_up(global->size, KASAN_GRANULE_SIZE);
217 
218 	kasan_unpoison(global->beg, global->size, false);
219 
220 	kasan_poison(global->beg + aligned_size,
221 		     global->size_with_redzone - aligned_size,
222 		     KASAN_GLOBAL_REDZONE, false);
223 }
224 
225 void __asan_register_globals(void *ptr, ssize_t size)
226 {
227 	int i;
228 	struct kasan_global *globals = ptr;
229 
230 	for (i = 0; i < size; i++)
231 		register_global(&globals[i]);
232 }
233 EXPORT_SYMBOL(__asan_register_globals);
234 
235 void __asan_unregister_globals(void *ptr, ssize_t size)
236 {
237 }
238 EXPORT_SYMBOL(__asan_unregister_globals);
239 
240 #define DEFINE_ASAN_LOAD_STORE(size)					\
241 	void __asan_load##size(void *addr)				\
242 	{								\
243 		check_region_inline(addr, size, false, _RET_IP_);	\
244 	}								\
245 	EXPORT_SYMBOL(__asan_load##size);				\
246 	__alias(__asan_load##size)					\
247 	void __asan_load##size##_noabort(void *);			\
248 	EXPORT_SYMBOL(__asan_load##size##_noabort);			\
249 	void __asan_store##size(void *addr)				\
250 	{								\
251 		check_region_inline(addr, size, true, _RET_IP_);	\
252 	}								\
253 	EXPORT_SYMBOL(__asan_store##size);				\
254 	__alias(__asan_store##size)					\
255 	void __asan_store##size##_noabort(void *);			\
256 	EXPORT_SYMBOL(__asan_store##size##_noabort)
257 
258 DEFINE_ASAN_LOAD_STORE(1);
259 DEFINE_ASAN_LOAD_STORE(2);
260 DEFINE_ASAN_LOAD_STORE(4);
261 DEFINE_ASAN_LOAD_STORE(8);
262 DEFINE_ASAN_LOAD_STORE(16);
263 
264 void __asan_loadN(void *addr, ssize_t size)
265 {
266 	kasan_check_range(addr, size, false, _RET_IP_);
267 }
268 EXPORT_SYMBOL(__asan_loadN);
269 
270 __alias(__asan_loadN)
271 void __asan_loadN_noabort(void *, ssize_t);
272 EXPORT_SYMBOL(__asan_loadN_noabort);
273 
274 void __asan_storeN(void *addr, ssize_t size)
275 {
276 	kasan_check_range(addr, size, true, _RET_IP_);
277 }
278 EXPORT_SYMBOL(__asan_storeN);
279 
280 __alias(__asan_storeN)
281 void __asan_storeN_noabort(void *, ssize_t);
282 EXPORT_SYMBOL(__asan_storeN_noabort);
283 
284 /* to shut up compiler complaints */
285 void __asan_handle_no_return(void) {}
286 EXPORT_SYMBOL(__asan_handle_no_return);
287 
288 /* Emitted by compiler to poison alloca()ed objects. */
289 void __asan_alloca_poison(void *addr, ssize_t size)
290 {
291 	size_t rounded_up_size = round_up(size, KASAN_GRANULE_SIZE);
292 	size_t padding_size = round_up(size, KASAN_ALLOCA_REDZONE_SIZE) -
293 			rounded_up_size;
294 	size_t rounded_down_size = round_down(size, KASAN_GRANULE_SIZE);
295 
296 	const void *left_redzone = (const void *)(addr -
297 			KASAN_ALLOCA_REDZONE_SIZE);
298 	const void *right_redzone = (const void *)(addr + rounded_up_size);
299 
300 	WARN_ON(!IS_ALIGNED((unsigned long)addr, KASAN_ALLOCA_REDZONE_SIZE));
301 
302 	kasan_unpoison((const void *)(addr + rounded_down_size),
303 			size - rounded_down_size, false);
304 	kasan_poison(left_redzone, KASAN_ALLOCA_REDZONE_SIZE,
305 		     KASAN_ALLOCA_LEFT, false);
306 	kasan_poison(right_redzone, padding_size + KASAN_ALLOCA_REDZONE_SIZE,
307 		     KASAN_ALLOCA_RIGHT, false);
308 }
309 EXPORT_SYMBOL(__asan_alloca_poison);
310 
311 /* Emitted by compiler to unpoison alloca()ed areas when the stack unwinds. */
312 void __asan_allocas_unpoison(void *stack_top, ssize_t stack_bottom)
313 {
314 	if (unlikely(!stack_top || stack_top > (void *)stack_bottom))
315 		return;
316 
317 	kasan_unpoison(stack_top, (void *)stack_bottom - stack_top, false);
318 }
319 EXPORT_SYMBOL(__asan_allocas_unpoison);
320 
321 /* Emitted by the compiler to [un]poison local variables. */
322 #define DEFINE_ASAN_SET_SHADOW(byte) \
323 	void __asan_set_shadow_##byte(const void *addr, ssize_t size)	\
324 	{								\
325 		__memset((void *)addr, 0x##byte, size);			\
326 	}								\
327 	EXPORT_SYMBOL(__asan_set_shadow_##byte)
328 
329 DEFINE_ASAN_SET_SHADOW(00);
330 DEFINE_ASAN_SET_SHADOW(f1);
331 DEFINE_ASAN_SET_SHADOW(f2);
332 DEFINE_ASAN_SET_SHADOW(f3);
333 DEFINE_ASAN_SET_SHADOW(f5);
334 DEFINE_ASAN_SET_SHADOW(f8);
335 
336 /* Only allow cache merging when no per-object metadata is present. */
337 slab_flags_t kasan_never_merge(void)
338 {
339 	if (!kasan_requires_meta())
340 		return 0;
341 	return SLAB_KASAN;
342 }
343 
344 /*
345  * Adaptive redzone policy taken from the userspace AddressSanitizer runtime.
346  * For larger allocations larger redzones are used.
347  */
348 static inline unsigned int optimal_redzone(unsigned int object_size)
349 {
350 	return
351 		object_size <= 64        - 16   ? 16 :
352 		object_size <= 128       - 32   ? 32 :
353 		object_size <= 512       - 64   ? 64 :
354 		object_size <= 4096      - 128  ? 128 :
355 		object_size <= (1 << 14) - 256  ? 256 :
356 		object_size <= (1 << 15) - 512  ? 512 :
357 		object_size <= (1 << 16) - 1024 ? 1024 : 2048;
358 }
359 
360 void kasan_cache_create(struct kmem_cache *cache, unsigned int *size,
361 			  slab_flags_t *flags)
362 {
363 	unsigned int ok_size;
364 	unsigned int optimal_size;
365 	unsigned int rem_free_meta_size;
366 	unsigned int orig_alloc_meta_offset;
367 
368 	if (!kasan_requires_meta())
369 		return;
370 
371 	/*
372 	 * SLAB_KASAN is used to mark caches that are sanitized by KASAN
373 	 * and that thus have per-object metadata.
374 	 * Currently this flag is used in two places:
375 	 * 1. In slab_ksize() to account for per-object metadata when
376 	 *    calculating the size of the accessible memory within the object.
377 	 * 2. In slab_common.c via kasan_never_merge() to prevent merging of
378 	 *    caches with per-object metadata.
379 	 */
380 	*flags |= SLAB_KASAN;
381 
382 	ok_size = *size;
383 
384 	/* Add alloc meta into redzone. */
385 	cache->kasan_info.alloc_meta_offset = *size;
386 	*size += sizeof(struct kasan_alloc_meta);
387 
388 	/*
389 	 * If alloc meta doesn't fit, don't add it.
390 	 * This can only happen with SLAB, as it has KMALLOC_MAX_SIZE equal
391 	 * to KMALLOC_MAX_CACHE_SIZE and doesn't fall back to page_alloc for
392 	 * larger sizes.
393 	 */
394 	if (*size > KMALLOC_MAX_SIZE) {
395 		cache->kasan_info.alloc_meta_offset = 0;
396 		*size = ok_size;
397 		/* Continue, since free meta might still fit. */
398 	}
399 
400 	ok_size = *size;
401 	orig_alloc_meta_offset = cache->kasan_info.alloc_meta_offset;
402 
403 	/*
404 	 * Add free meta into redzone when it's not possible to store
405 	 * it in the object. This is the case when:
406 	 * 1. Object is SLAB_TYPESAFE_BY_RCU, which means that it can
407 	 *    be touched after it was freed, or
408 	 * 2. Object has a constructor, which means it's expected to
409 	 *    retain its content until the next allocation, or
410 	 * 3. Object is too small and SLUB DEBUG is enabled. Avoid
411 	 *    free meta that exceeds the object size corrupts the
412 	 *    SLUB DEBUG metadata.
413 	 * Otherwise cache->kasan_info.free_meta_offset = 0 is implied.
414 	 * If the object is smaller than the free meta and SLUB DEBUG
415 	 * is not enabled, it is still possible to store part of the
416 	 * free meta in the object.
417 	 */
418 	if ((cache->flags & SLAB_TYPESAFE_BY_RCU) || cache->ctor) {
419 		cache->kasan_info.free_meta_offset = *size;
420 		*size += sizeof(struct kasan_free_meta);
421 	} else if (cache->object_size < sizeof(struct kasan_free_meta)) {
422 		if (__slub_debug_enabled()) {
423 			cache->kasan_info.free_meta_offset = *size;
424 			*size += sizeof(struct kasan_free_meta);
425 		} else {
426 			rem_free_meta_size = sizeof(struct kasan_free_meta) -
427 									cache->object_size;
428 			*size += rem_free_meta_size;
429 			if (cache->kasan_info.alloc_meta_offset != 0)
430 				cache->kasan_info.alloc_meta_offset += rem_free_meta_size;
431 		}
432 	}
433 
434 	/* If free meta doesn't fit, don't add it. */
435 	if (*size > KMALLOC_MAX_SIZE) {
436 		cache->kasan_info.free_meta_offset = KASAN_NO_FREE_META;
437 		cache->kasan_info.alloc_meta_offset = orig_alloc_meta_offset;
438 		*size = ok_size;
439 	}
440 
441 	/* Calculate size with optimal redzone. */
442 	optimal_size = cache->object_size + optimal_redzone(cache->object_size);
443 	/* Limit it with KMALLOC_MAX_SIZE (relevant for SLAB only). */
444 	if (optimal_size > KMALLOC_MAX_SIZE)
445 		optimal_size = KMALLOC_MAX_SIZE;
446 	/* Use optimal size if the size with added metas is not large enough. */
447 	if (*size < optimal_size)
448 		*size = optimal_size;
449 }
450 
451 struct kasan_alloc_meta *kasan_get_alloc_meta(struct kmem_cache *cache,
452 					      const void *object)
453 {
454 	if (!cache->kasan_info.alloc_meta_offset)
455 		return NULL;
456 	return (void *)object + cache->kasan_info.alloc_meta_offset;
457 }
458 
459 struct kasan_free_meta *kasan_get_free_meta(struct kmem_cache *cache,
460 					    const void *object)
461 {
462 	BUILD_BUG_ON(sizeof(struct kasan_free_meta) > 32);
463 	if (cache->kasan_info.free_meta_offset == KASAN_NO_FREE_META)
464 		return NULL;
465 	return (void *)object + cache->kasan_info.free_meta_offset;
466 }
467 
468 void kasan_init_object_meta(struct kmem_cache *cache, const void *object)
469 {
470 	struct kasan_alloc_meta *alloc_meta;
471 	struct kasan_free_meta *free_meta;
472 
473 	alloc_meta = kasan_get_alloc_meta(cache, object);
474 	if (alloc_meta)
475 		__memset(alloc_meta, 0, sizeof(*alloc_meta));
476 	free_meta = kasan_get_free_meta(cache, object);
477 	if (free_meta)
478 		__memset(free_meta, 0, sizeof(*free_meta));
479 }
480 
481 size_t kasan_metadata_size(struct kmem_cache *cache, bool in_object)
482 {
483 	struct kasan_cache *info = &cache->kasan_info;
484 
485 	if (!kasan_requires_meta())
486 		return 0;
487 
488 	if (in_object)
489 		return (info->free_meta_offset ?
490 			0 : sizeof(struct kasan_free_meta));
491 	else
492 		return (info->alloc_meta_offset ?
493 			sizeof(struct kasan_alloc_meta) : 0) +
494 			((info->free_meta_offset &&
495 			info->free_meta_offset != KASAN_NO_FREE_META) ?
496 			sizeof(struct kasan_free_meta) : 0);
497 }
498 
499 static void __kasan_record_aux_stack(void *addr, depot_flags_t depot_flags)
500 {
501 	struct slab *slab = kasan_addr_to_slab(addr);
502 	struct kmem_cache *cache;
503 	struct kasan_alloc_meta *alloc_meta;
504 	void *object;
505 
506 	if (is_kfence_address(addr) || !slab)
507 		return;
508 
509 	cache = slab->slab_cache;
510 	object = nearest_obj(cache, slab, addr);
511 	alloc_meta = kasan_get_alloc_meta(cache, object);
512 	if (!alloc_meta)
513 		return;
514 
515 	stack_depot_put(alloc_meta->aux_stack[1]);
516 	alloc_meta->aux_stack[1] = alloc_meta->aux_stack[0];
517 	alloc_meta->aux_stack[0] = kasan_save_stack(0, depot_flags);
518 }
519 
520 void kasan_record_aux_stack(void *addr)
521 {
522 	return __kasan_record_aux_stack(addr,
523 			STACK_DEPOT_FLAG_CAN_ALLOC | STACK_DEPOT_FLAG_GET);
524 }
525 
526 void kasan_record_aux_stack_noalloc(void *addr)
527 {
528 	return __kasan_record_aux_stack(addr, STACK_DEPOT_FLAG_GET);
529 }
530 
531 void kasan_save_alloc_info(struct kmem_cache *cache, void *object, gfp_t flags)
532 {
533 	struct kasan_alloc_meta *alloc_meta;
534 
535 	alloc_meta = kasan_get_alloc_meta(cache, object);
536 	if (!alloc_meta)
537 		return;
538 
539 	/* Evict previous stack traces (might exist for krealloc). */
540 	stack_depot_put(alloc_meta->alloc_track.stack);
541 	stack_depot_put(alloc_meta->aux_stack[0]);
542 	stack_depot_put(alloc_meta->aux_stack[1]);
543 	__memset(alloc_meta, 0, sizeof(*alloc_meta));
544 
545 	kasan_set_track(&alloc_meta->alloc_track, flags);
546 }
547 
548 void kasan_save_free_info(struct kmem_cache *cache, void *object)
549 {
550 	struct kasan_free_meta *free_meta;
551 
552 	free_meta = kasan_get_free_meta(cache, object);
553 	if (!free_meta)
554 		return;
555 
556 	kasan_set_track(&free_meta->free_track, 0);
557 	/* The object was freed and has free track set. */
558 	*(u8 *)kasan_mem_to_shadow(object) = KASAN_SLAB_FREETRACK;
559 }
560