xref: /linux/mm/kasan/shadow.c (revision b61104e7a6349bd2c2b3e2fb3260d87f15eda8f4)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * This file contains KASAN runtime code that manages shadow memory for
4  * generic and software tag-based KASAN modes.
5  *
6  * Copyright (c) 2014 Samsung Electronics Co., Ltd.
7  * Author: Andrey Ryabinin <ryabinin.a.a@gmail.com>
8  *
9  * Some code borrowed from https://github.com/xairy/kasan-prototype by
10  *        Andrey Konovalov <andreyknvl@gmail.com>
11  */
12 
13 #include <linux/init.h>
14 #include <linux/kasan.h>
15 #include <linux/kernel.h>
16 #include <linux/kfence.h>
17 #include <linux/kmemleak.h>
18 #include <linux/memory.h>
19 #include <linux/mm.h>
20 #include <linux/string.h>
21 #include <linux/types.h>
22 #include <linux/vmalloc.h>
23 
24 #include <asm/cacheflush.h>
25 #include <asm/tlbflush.h>
26 
27 #include "kasan.h"
28 
29 bool __kasan_check_read(const volatile void *p, unsigned int size)
30 {
31 	return kasan_check_range((void *)p, size, false, _RET_IP_);
32 }
33 EXPORT_SYMBOL(__kasan_check_read);
34 
35 bool __kasan_check_write(const volatile void *p, unsigned int size)
36 {
37 	return kasan_check_range((void *)p, size, true, _RET_IP_);
38 }
39 EXPORT_SYMBOL(__kasan_check_write);
40 
41 #if !defined(CONFIG_CC_HAS_KASAN_MEMINTRINSIC_PREFIX) && !defined(CONFIG_GENERIC_ENTRY)
42 /*
43  * CONFIG_GENERIC_ENTRY relies on compiler emitted mem*() calls to not be
44  * instrumented. KASAN enabled toolchains should emit __asan_mem*() functions
45  * for the sites they want to instrument.
46  *
47  * If we have a compiler that can instrument meminstrinsics, never override
48  * these, so that non-instrumented files can safely consider them as builtins.
49  */
50 #undef memset
51 void *memset(void *addr, int c, size_t len)
52 {
53 	if (!kasan_check_range(addr, len, true, _RET_IP_))
54 		return NULL;
55 
56 	return __memset(addr, c, len);
57 }
58 
59 #ifdef __HAVE_ARCH_MEMMOVE
60 #undef memmove
61 void *memmove(void *dest, const void *src, size_t len)
62 {
63 	if (!kasan_check_range(src, len, false, _RET_IP_) ||
64 	    !kasan_check_range(dest, len, true, _RET_IP_))
65 		return NULL;
66 
67 	return __memmove(dest, src, len);
68 }
69 #endif
70 
71 #undef memcpy
72 void *memcpy(void *dest, const void *src, size_t len)
73 {
74 	if (!kasan_check_range(src, len, false, _RET_IP_) ||
75 	    !kasan_check_range(dest, len, true, _RET_IP_))
76 		return NULL;
77 
78 	return __memcpy(dest, src, len);
79 }
80 #endif
81 
82 void *__asan_memset(void *addr, int c, ssize_t len)
83 {
84 	if (!kasan_check_range(addr, len, true, _RET_IP_))
85 		return NULL;
86 
87 	return __memset(addr, c, len);
88 }
89 EXPORT_SYMBOL(__asan_memset);
90 
91 #ifdef __HAVE_ARCH_MEMMOVE
92 void *__asan_memmove(void *dest, const void *src, ssize_t len)
93 {
94 	if (!kasan_check_range(src, len, false, _RET_IP_) ||
95 	    !kasan_check_range(dest, len, true, _RET_IP_))
96 		return NULL;
97 
98 	return __memmove(dest, src, len);
99 }
100 EXPORT_SYMBOL(__asan_memmove);
101 #endif
102 
103 void *__asan_memcpy(void *dest, const void *src, ssize_t len)
104 {
105 	if (!kasan_check_range(src, len, false, _RET_IP_) ||
106 	    !kasan_check_range(dest, len, true, _RET_IP_))
107 		return NULL;
108 
109 	return __memcpy(dest, src, len);
110 }
111 EXPORT_SYMBOL(__asan_memcpy);
112 
113 #ifdef CONFIG_KASAN_SW_TAGS
114 void *__hwasan_memset(void *addr, int c, ssize_t len) __alias(__asan_memset);
115 EXPORT_SYMBOL(__hwasan_memset);
116 #ifdef __HAVE_ARCH_MEMMOVE
117 void *__hwasan_memmove(void *dest, const void *src, ssize_t len) __alias(__asan_memmove);
118 EXPORT_SYMBOL(__hwasan_memmove);
119 #endif
120 void *__hwasan_memcpy(void *dest, const void *src, ssize_t len) __alias(__asan_memcpy);
121 EXPORT_SYMBOL(__hwasan_memcpy);
122 #endif
123 
124 void kasan_poison(const void *addr, size_t size, u8 value, bool init)
125 {
126 	void *shadow_start, *shadow_end;
127 
128 	if (!kasan_enabled())
129 		return;
130 
131 	/*
132 	 * Perform shadow offset calculation based on untagged address, as
133 	 * some of the callers (e.g. kasan_poison_new_object) pass tagged
134 	 * addresses to this function.
135 	 */
136 	addr = kasan_reset_tag(addr);
137 
138 	if (WARN_ON((unsigned long)addr & KASAN_GRANULE_MASK))
139 		return;
140 	if (WARN_ON(size & KASAN_GRANULE_MASK))
141 		return;
142 
143 	shadow_start = kasan_mem_to_shadow(addr);
144 	shadow_end = kasan_mem_to_shadow(addr + size);
145 
146 	__memset(shadow_start, value, shadow_end - shadow_start);
147 }
148 EXPORT_SYMBOL_GPL(kasan_poison);
149 
150 #ifdef CONFIG_KASAN_GENERIC
151 void kasan_poison_last_granule(const void *addr, size_t size)
152 {
153 	if (!kasan_enabled())
154 		return;
155 
156 	if (size & KASAN_GRANULE_MASK) {
157 		u8 *shadow = (u8 *)kasan_mem_to_shadow(addr + size);
158 		*shadow = size & KASAN_GRANULE_MASK;
159 	}
160 }
161 #endif
162 
163 void kasan_unpoison(const void *addr, size_t size, bool init)
164 {
165 	u8 tag = get_tag(addr);
166 
167 	/*
168 	 * Perform shadow offset calculation based on untagged address, as
169 	 * some of the callers (e.g. kasan_unpoison_new_object) pass tagged
170 	 * addresses to this function.
171 	 */
172 	addr = kasan_reset_tag(addr);
173 
174 	if (WARN_ON((unsigned long)addr & KASAN_GRANULE_MASK))
175 		return;
176 
177 	/* Unpoison all granules that cover the object. */
178 	kasan_poison(addr, round_up(size, KASAN_GRANULE_SIZE), tag, false);
179 
180 	/* Partially poison the last granule for the generic mode. */
181 	if (IS_ENABLED(CONFIG_KASAN_GENERIC))
182 		kasan_poison_last_granule(addr, size);
183 }
184 
185 #ifdef CONFIG_MEMORY_HOTPLUG
186 static bool shadow_mapped(unsigned long addr)
187 {
188 	pgd_t *pgd = pgd_offset_k(addr);
189 	p4d_t *p4d;
190 	pud_t *pud;
191 	pmd_t *pmd;
192 	pte_t *pte;
193 
194 	if (pgd_none(*pgd))
195 		return false;
196 	p4d = p4d_offset(pgd, addr);
197 	if (p4d_none(*p4d))
198 		return false;
199 	pud = pud_offset(p4d, addr);
200 	if (pud_none(*pud))
201 		return false;
202 	if (pud_leaf(*pud))
203 		return true;
204 	pmd = pmd_offset(pud, addr);
205 	if (pmd_none(*pmd))
206 		return false;
207 	if (pmd_leaf(*pmd))
208 		return true;
209 	pte = pte_offset_kernel(pmd, addr);
210 	return !pte_none(ptep_get(pte));
211 }
212 
213 static int __meminit kasan_mem_notifier(struct notifier_block *nb,
214 			unsigned long action, void *data)
215 {
216 	struct memory_notify *mem_data = data;
217 	unsigned long nr_shadow_pages, start_kaddr, shadow_start;
218 	unsigned long shadow_end, shadow_size;
219 
220 	nr_shadow_pages = mem_data->nr_pages >> KASAN_SHADOW_SCALE_SHIFT;
221 	start_kaddr = (unsigned long)pfn_to_kaddr(mem_data->start_pfn);
222 	shadow_start = (unsigned long)kasan_mem_to_shadow((void *)start_kaddr);
223 	shadow_size = nr_shadow_pages << PAGE_SHIFT;
224 	shadow_end = shadow_start + shadow_size;
225 
226 	if (WARN_ON(mem_data->nr_pages % KASAN_GRANULE_SIZE) ||
227 		WARN_ON(start_kaddr % KASAN_MEMORY_PER_SHADOW_PAGE))
228 		return NOTIFY_BAD;
229 
230 	switch (action) {
231 	case MEM_GOING_ONLINE: {
232 		void *ret;
233 
234 		/*
235 		 * If shadow is mapped already than it must have been mapped
236 		 * during the boot. This could happen if we onlining previously
237 		 * offlined memory.
238 		 */
239 		if (shadow_mapped(shadow_start))
240 			return NOTIFY_OK;
241 
242 		ret = __vmalloc_node_range(shadow_size, PAGE_SIZE, shadow_start,
243 					shadow_end, GFP_KERNEL,
244 					PAGE_KERNEL, VM_NO_GUARD,
245 					pfn_to_nid(mem_data->start_pfn),
246 					__builtin_return_address(0));
247 		if (!ret)
248 			return NOTIFY_BAD;
249 
250 		kmemleak_ignore(ret);
251 		return NOTIFY_OK;
252 	}
253 	case MEM_CANCEL_ONLINE:
254 	case MEM_OFFLINE: {
255 		struct vm_struct *vm;
256 
257 		/*
258 		 * shadow_start was either mapped during boot by kasan_init()
259 		 * or during memory online by __vmalloc_node_range().
260 		 * In the latter case we can use vfree() to free shadow.
261 		 * Non-NULL result of the find_vm_area() will tell us if
262 		 * that was the second case.
263 		 *
264 		 * Currently it's not possible to free shadow mapped
265 		 * during boot by kasan_init(). It's because the code
266 		 * to do that hasn't been written yet. So we'll just
267 		 * leak the memory.
268 		 */
269 		vm = find_vm_area((void *)shadow_start);
270 		if (vm)
271 			vfree((void *)shadow_start);
272 	}
273 	}
274 
275 	return NOTIFY_OK;
276 }
277 
278 static int __init kasan_memhotplug_init(void)
279 {
280 	hotplug_memory_notifier(kasan_mem_notifier, DEFAULT_CALLBACK_PRI);
281 
282 	return 0;
283 }
284 
285 core_initcall(kasan_memhotplug_init);
286 #endif
287 
288 #ifdef CONFIG_KASAN_VMALLOC
289 
290 void __init __weak kasan_populate_early_vm_area_shadow(void *start,
291 						       unsigned long size)
292 {
293 }
294 
295 struct vmalloc_populate_data {
296 	unsigned long start;
297 	struct page **pages;
298 };
299 
300 static int kasan_populate_vmalloc_pte(pte_t *ptep, unsigned long addr,
301 				      void *_data)
302 {
303 	struct vmalloc_populate_data *data = _data;
304 	struct page *page;
305 	pte_t pte;
306 	int index;
307 
308 	arch_leave_lazy_mmu_mode();
309 
310 	index = PFN_DOWN(addr - data->start);
311 	page = data->pages[index];
312 	__memset(page_to_virt(page), KASAN_VMALLOC_INVALID, PAGE_SIZE);
313 	pte = pfn_pte(page_to_pfn(page), PAGE_KERNEL);
314 
315 	spin_lock(&init_mm.page_table_lock);
316 	if (likely(pte_none(ptep_get(ptep)))) {
317 		set_pte_at(&init_mm, addr, ptep, pte);
318 		data->pages[index] = NULL;
319 	}
320 	spin_unlock(&init_mm.page_table_lock);
321 
322 	arch_enter_lazy_mmu_mode();
323 
324 	return 0;
325 }
326 
327 static void ___free_pages_bulk(struct page **pages, int nr_pages)
328 {
329 	int i;
330 
331 	for (i = 0; i < nr_pages; i++) {
332 		if (pages[i]) {
333 			__free_pages(pages[i], 0);
334 			pages[i] = NULL;
335 		}
336 	}
337 }
338 
339 static int ___alloc_pages_bulk(struct page **pages, int nr_pages, gfp_t gfp_mask)
340 {
341 	unsigned long nr_populated, nr_total = nr_pages;
342 	struct page **page_array = pages;
343 
344 	while (nr_pages) {
345 		nr_populated = alloc_pages_bulk(gfp_mask, nr_pages, pages);
346 		if (!nr_populated) {
347 			___free_pages_bulk(page_array, nr_total - nr_pages);
348 			return -ENOMEM;
349 		}
350 		pages += nr_populated;
351 		nr_pages -= nr_populated;
352 	}
353 
354 	return 0;
355 }
356 
357 static int __kasan_populate_vmalloc_do(unsigned long start, unsigned long end, gfp_t gfp_mask)
358 {
359 	unsigned long nr_pages, nr_total = PFN_UP(end - start);
360 	struct vmalloc_populate_data data;
361 	unsigned int flags;
362 	int ret = 0;
363 
364 	data.pages = (struct page **)__get_free_page(gfp_mask | __GFP_ZERO);
365 	if (!data.pages)
366 		return -ENOMEM;
367 
368 	while (nr_total) {
369 		nr_pages = min(nr_total, PAGE_SIZE / sizeof(data.pages[0]));
370 		ret = ___alloc_pages_bulk(data.pages, nr_pages, gfp_mask);
371 		if (ret)
372 			break;
373 
374 		data.start = start;
375 
376 		/*
377 		 * page tables allocations ignore external gfp mask, enforce it
378 		 * by the scope API
379 		 */
380 		flags = memalloc_apply_gfp_scope(gfp_mask);
381 		ret = apply_to_page_range(&init_mm, start, nr_pages * PAGE_SIZE,
382 					  kasan_populate_vmalloc_pte, &data);
383 		memalloc_restore_scope(flags);
384 
385 		___free_pages_bulk(data.pages, nr_pages);
386 		if (ret)
387 			break;
388 
389 		start += nr_pages * PAGE_SIZE;
390 		nr_total -= nr_pages;
391 	}
392 
393 	free_page((unsigned long)data.pages);
394 
395 	return ret;
396 }
397 
398 int __kasan_populate_vmalloc(unsigned long addr, unsigned long size, gfp_t gfp_mask)
399 {
400 	unsigned long shadow_start, shadow_end;
401 	int ret;
402 
403 	if (!is_vmalloc_or_module_addr((void *)addr))
404 		return 0;
405 
406 	shadow_start = (unsigned long)kasan_mem_to_shadow((void *)addr);
407 	shadow_end = (unsigned long)kasan_mem_to_shadow((void *)addr + size);
408 
409 	/*
410 	 * User Mode Linux maps enough shadow memory for all of virtual memory
411 	 * at boot, so doesn't need to allocate more on vmalloc, just clear it.
412 	 *
413 	 * The remaining CONFIG_UML checks in this file exist for the same
414 	 * reason.
415 	 */
416 	if (IS_ENABLED(CONFIG_UML)) {
417 		__memset((void *)shadow_start, KASAN_VMALLOC_INVALID, shadow_end - shadow_start);
418 		return 0;
419 	}
420 
421 	shadow_start = PAGE_ALIGN_DOWN(shadow_start);
422 	shadow_end = PAGE_ALIGN(shadow_end);
423 
424 	ret = __kasan_populate_vmalloc_do(shadow_start, shadow_end, gfp_mask);
425 	if (ret)
426 		return ret;
427 
428 	flush_cache_vmap(shadow_start, shadow_end);
429 
430 	/*
431 	 * We need to be careful about inter-cpu effects here. Consider:
432 	 *
433 	 *   CPU#0				  CPU#1
434 	 * WRITE_ONCE(p, vmalloc(100));		while (x = READ_ONCE(p)) ;
435 	 *					p[99] = 1;
436 	 *
437 	 * With compiler instrumentation, that ends up looking like this:
438 	 *
439 	 *   CPU#0				  CPU#1
440 	 * // vmalloc() allocates memory
441 	 * // let a = area->addr
442 	 * // we reach kasan_populate_vmalloc
443 	 * // and call kasan_unpoison:
444 	 * STORE shadow(a), unpoison_val
445 	 * ...
446 	 * STORE shadow(a+99), unpoison_val	x = LOAD p
447 	 * // rest of vmalloc process		<data dependency>
448 	 * STORE p, a				LOAD shadow(x+99)
449 	 *
450 	 * If there is no barrier between the end of unpoisoning the shadow
451 	 * and the store of the result to p, the stores could be committed
452 	 * in a different order by CPU#0, and CPU#1 could erroneously observe
453 	 * poison in the shadow.
454 	 *
455 	 * We need some sort of barrier between the stores.
456 	 *
457 	 * In the vmalloc() case, this is provided by a smp_wmb() in
458 	 * clear_vm_uninitialized_flag(). In the per-cpu allocator and in
459 	 * get_vm_area() and friends, the caller gets shadow allocated but
460 	 * doesn't have any pages mapped into the virtual address space that
461 	 * has been reserved. Mapping those pages in will involve taking and
462 	 * releasing a page-table lock, which will provide the barrier.
463 	 */
464 
465 	return 0;
466 }
467 
468 static int kasan_depopulate_vmalloc_pte(pte_t *ptep, unsigned long addr,
469 					void *unused)
470 {
471 	pte_t pte;
472 	int none;
473 
474 	arch_leave_lazy_mmu_mode();
475 
476 	spin_lock(&init_mm.page_table_lock);
477 	pte = ptep_get(ptep);
478 	none = pte_none(pte);
479 	if (likely(!none))
480 		pte_clear(&init_mm, addr, ptep);
481 	spin_unlock(&init_mm.page_table_lock);
482 
483 	if (likely(!none))
484 		__free_page(pfn_to_page(pte_pfn(pte)));
485 
486 	arch_enter_lazy_mmu_mode();
487 
488 	return 0;
489 }
490 
491 /*
492  * Release the backing for the vmalloc region [start, end), which
493  * lies within the free region [free_region_start, free_region_end).
494  *
495  * This can be run lazily, long after the region was freed. It runs
496  * under vmap_area_lock, so it's not safe to interact with the vmalloc/vmap
497  * infrastructure.
498  *
499  * How does this work?
500  * -------------------
501  *
502  * We have a region that is page aligned, labeled as A.
503  * That might not map onto the shadow in a way that is page-aligned:
504  *
505  *                    start                     end
506  *                    v                         v
507  * |????????|????????|AAAAAAAA|AA....AA|AAAAAAAA|????????| < vmalloc
508  *  -------- -------- --------          -------- --------
509  *      |        |       |                 |        |
510  *      |        |       |         /-------/        |
511  *      \-------\|/------/         |/---------------/
512  *              |||                ||
513  *             |??AAAAAA|AAAAAAAA|AA??????|                < shadow
514  *                 (1)      (2)      (3)
515  *
516  * First we align the start upwards and the end downwards, so that the
517  * shadow of the region aligns with shadow page boundaries. In the
518  * example, this gives us the shadow page (2). This is the shadow entirely
519  * covered by this allocation.
520  *
521  * Then we have the tricky bits. We want to know if we can free the
522  * partially covered shadow pages - (1) and (3) in the example. For this,
523  * we are given the start and end of the free region that contains this
524  * allocation. Extending our previous example, we could have:
525  *
526  *  free_region_start                                    free_region_end
527  *  |                 start                     end      |
528  *  v                 v                         v        v
529  * |FFFFFFFF|FFFFFFFF|AAAAAAAA|AA....AA|AAAAAAAA|FFFFFFFF| < vmalloc
530  *  -------- -------- --------          -------- --------
531  *      |        |       |                 |        |
532  *      |        |       |         /-------/        |
533  *      \-------\|/------/         |/---------------/
534  *              |||                ||
535  *             |FFAAAAAA|AAAAAAAA|AAF?????|                < shadow
536  *                 (1)      (2)      (3)
537  *
538  * Once again, we align the start of the free region up, and the end of
539  * the free region down so that the shadow is page aligned. So we can free
540  * page (1) - we know no allocation currently uses anything in that page,
541  * because all of it is in the vmalloc free region. But we cannot free
542  * page (3), because we can't be sure that the rest of it is unused.
543  *
544  * We only consider pages that contain part of the original region for
545  * freeing: we don't try to free other pages from the free region or we'd
546  * end up trying to free huge chunks of virtual address space.
547  *
548  * Concurrency
549  * -----------
550  *
551  * How do we know that we're not freeing a page that is simultaneously
552  * being used for a fresh allocation in kasan_populate_vmalloc(_pte)?
553  *
554  * We _can_ have kasan_release_vmalloc and kasan_populate_vmalloc running
555  * at the same time. While we run under free_vmap_area_lock, the population
556  * code does not.
557  *
558  * free_vmap_area_lock instead operates to ensure that the larger range
559  * [free_region_start, free_region_end) is safe: because __alloc_vmap_area and
560  * the per-cpu region-finding algorithm both run under free_vmap_area_lock,
561  * no space identified as free will become used while we are running. This
562  * means that so long as we are careful with alignment and only free shadow
563  * pages entirely covered by the free region, we will not run in to any
564  * trouble - any simultaneous allocations will be for disjoint regions.
565  */
566 void __kasan_release_vmalloc(unsigned long start, unsigned long end,
567 			   unsigned long free_region_start,
568 			   unsigned long free_region_end,
569 			   unsigned long flags)
570 {
571 	void *shadow_start, *shadow_end;
572 	unsigned long region_start, region_end;
573 	unsigned long size;
574 
575 	region_start = ALIGN(start, KASAN_MEMORY_PER_SHADOW_PAGE);
576 	region_end = ALIGN_DOWN(end, KASAN_MEMORY_PER_SHADOW_PAGE);
577 
578 	free_region_start = ALIGN(free_region_start, KASAN_MEMORY_PER_SHADOW_PAGE);
579 
580 	if (start != region_start &&
581 	    free_region_start < region_start)
582 		region_start -= KASAN_MEMORY_PER_SHADOW_PAGE;
583 
584 	free_region_end = ALIGN_DOWN(free_region_end, KASAN_MEMORY_PER_SHADOW_PAGE);
585 
586 	if (end != region_end &&
587 	    free_region_end > region_end)
588 		region_end += KASAN_MEMORY_PER_SHADOW_PAGE;
589 
590 	shadow_start = kasan_mem_to_shadow((void *)region_start);
591 	shadow_end = kasan_mem_to_shadow((void *)region_end);
592 
593 	if (shadow_end > shadow_start) {
594 		size = shadow_end - shadow_start;
595 		if (IS_ENABLED(CONFIG_UML)) {
596 			__memset(shadow_start, KASAN_SHADOW_INIT, shadow_end - shadow_start);
597 			return;
598 		}
599 
600 
601 		if (flags & KASAN_VMALLOC_PAGE_RANGE)
602 			apply_to_existing_page_range(&init_mm,
603 					     (unsigned long)shadow_start,
604 					     size, kasan_depopulate_vmalloc_pte,
605 					     NULL);
606 
607 		if (flags & KASAN_VMALLOC_TLB_FLUSH)
608 			flush_tlb_kernel_range((unsigned long)shadow_start,
609 					       (unsigned long)shadow_end);
610 	}
611 }
612 
613 void *__kasan_unpoison_vmalloc(const void *start, unsigned long size,
614 			       kasan_vmalloc_flags_t flags)
615 {
616 	/*
617 	 * Software KASAN modes unpoison both VM_ALLOC and non-VM_ALLOC
618 	 * mappings, so the KASAN_VMALLOC_VM_ALLOC flag is ignored.
619 	 * Software KASAN modes can't optimize zeroing memory by combining it
620 	 * with setting memory tags, so the KASAN_VMALLOC_INIT flag is ignored.
621 	 */
622 
623 	if (!is_vmalloc_or_module_addr(start))
624 		return (void *)start;
625 
626 	/*
627 	 * Don't tag executable memory with the tag-based mode.
628 	 * The kernel doesn't tolerate having the PC register tagged.
629 	 */
630 	if (IS_ENABLED(CONFIG_KASAN_SW_TAGS) &&
631 	    !(flags & KASAN_VMALLOC_PROT_NORMAL))
632 		return (void *)start;
633 
634 	start = set_tag(start, kasan_random_tag());
635 	kasan_unpoison(start, size, false);
636 	return (void *)start;
637 }
638 
639 /*
640  * Poison the shadow for a vmalloc region. Called as part of the
641  * freeing process at the time the region is freed.
642  */
643 void __kasan_poison_vmalloc(const void *start, unsigned long size)
644 {
645 	if (!is_vmalloc_or_module_addr(start))
646 		return;
647 
648 	size = round_up(size, KASAN_GRANULE_SIZE);
649 	kasan_poison(start, size, KASAN_VMALLOC_INVALID, false);
650 }
651 
652 #else /* CONFIG_KASAN_VMALLOC */
653 
654 int kasan_alloc_module_shadow(void *addr, size_t size, gfp_t gfp_mask)
655 {
656 	void *ret;
657 	size_t scaled_size;
658 	size_t shadow_size;
659 	unsigned long shadow_start;
660 
661 	shadow_start = (unsigned long)kasan_mem_to_shadow(addr);
662 	scaled_size = (size + KASAN_GRANULE_SIZE - 1) >>
663 				KASAN_SHADOW_SCALE_SHIFT;
664 	shadow_size = round_up(scaled_size, PAGE_SIZE);
665 
666 	if (WARN_ON(!PAGE_ALIGNED(shadow_start)))
667 		return -EINVAL;
668 
669 	if (IS_ENABLED(CONFIG_UML)) {
670 		__memset((void *)shadow_start, KASAN_SHADOW_INIT, shadow_size);
671 		return 0;
672 	}
673 
674 	ret = __vmalloc_node_range(shadow_size, 1, shadow_start,
675 			shadow_start + shadow_size,
676 			GFP_KERNEL,
677 			PAGE_KERNEL, VM_NO_GUARD, NUMA_NO_NODE,
678 			__builtin_return_address(0));
679 
680 	if (ret) {
681 		struct vm_struct *vm = find_vm_area(addr);
682 		__memset(ret, KASAN_SHADOW_INIT, shadow_size);
683 		vm->flags |= VM_KASAN;
684 		kmemleak_ignore(ret);
685 
686 		if (vm->flags & VM_DEFER_KMEMLEAK)
687 			kmemleak_vmalloc(vm, size, gfp_mask);
688 
689 		return 0;
690 	}
691 
692 	return -ENOMEM;
693 }
694 
695 void kasan_free_module_shadow(const struct vm_struct *vm)
696 {
697 	if (IS_ENABLED(CONFIG_UML))
698 		return;
699 
700 	if (vm->flags & VM_KASAN)
701 		vfree(kasan_mem_to_shadow(vm->addr));
702 }
703 
704 #endif
705