xref: /linux/mm/kmsan/shadow.c (revision a1ff5a7d78a036d6c2178ee5acd6ba4946243800)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * KMSAN shadow implementation.
4  *
5  * Copyright (C) 2017-2022 Google LLC
6  * Author: Alexander Potapenko <glider@google.com>
7  *
8  */
9 
10 #include <asm/kmsan.h>
11 #include <asm/tlbflush.h>
12 #include <linux/cacheflush.h>
13 #include <linux/memblock.h>
14 #include <linux/mm_types.h>
15 #include <linux/slab.h>
16 #include <linux/smp.h>
17 #include <linux/stddef.h>
18 
19 #include "../internal.h"
20 #include "kmsan.h"
21 
22 #define shadow_page_for(page) ((page)->kmsan_shadow)
23 
24 #define origin_page_for(page) ((page)->kmsan_origin)
25 
shadow_ptr_for(struct page * page)26 static void *shadow_ptr_for(struct page *page)
27 {
28 	return page_address(shadow_page_for(page));
29 }
30 
origin_ptr_for(struct page * page)31 static void *origin_ptr_for(struct page *page)
32 {
33 	return page_address(origin_page_for(page));
34 }
35 
page_has_metadata(struct page * page)36 static bool page_has_metadata(struct page *page)
37 {
38 	return shadow_page_for(page) && origin_page_for(page);
39 }
40 
set_no_shadow_origin_page(struct page * page)41 static void set_no_shadow_origin_page(struct page *page)
42 {
43 	shadow_page_for(page) = NULL;
44 	origin_page_for(page) = NULL;
45 }
46 
47 /*
48  * Dummy load and store pages to be used when the real metadata is unavailable.
49  * There are separate pages for loads and stores, so that every load returns a
50  * zero, and every store doesn't affect other loads.
51  */
52 static char dummy_load_page[PAGE_SIZE] __aligned(PAGE_SIZE);
53 static char dummy_store_page[PAGE_SIZE] __aligned(PAGE_SIZE);
54 
vmalloc_meta(void * addr,bool is_origin)55 static unsigned long vmalloc_meta(void *addr, bool is_origin)
56 {
57 	unsigned long addr64 = (unsigned long)addr, off;
58 
59 	KMSAN_WARN_ON(is_origin && !IS_ALIGNED(addr64, KMSAN_ORIGIN_SIZE));
60 	if (kmsan_internal_is_vmalloc_addr(addr)) {
61 		off = addr64 - VMALLOC_START;
62 		return off + (is_origin ? KMSAN_VMALLOC_ORIGIN_START :
63 					  KMSAN_VMALLOC_SHADOW_START);
64 	}
65 	if (kmsan_internal_is_module_addr(addr)) {
66 		off = addr64 - MODULES_VADDR;
67 		return off + (is_origin ? KMSAN_MODULES_ORIGIN_START :
68 					  KMSAN_MODULES_SHADOW_START);
69 	}
70 	return 0;
71 }
72 
virt_to_page_or_null(void * vaddr)73 static struct page *virt_to_page_or_null(void *vaddr)
74 {
75 	if (kmsan_virt_addr_valid(vaddr))
76 		return virt_to_page(vaddr);
77 	else
78 		return NULL;
79 }
80 
kmsan_get_shadow_origin_ptr(void * address,u64 size,bool store)81 struct shadow_origin_ptr kmsan_get_shadow_origin_ptr(void *address, u64 size,
82 						     bool store)
83 {
84 	struct shadow_origin_ptr ret;
85 	void *shadow;
86 
87 	/*
88 	 * Even if we redirect this memory access to the dummy page, it will
89 	 * go out of bounds.
90 	 */
91 	KMSAN_WARN_ON(size > PAGE_SIZE);
92 
93 	if (!kmsan_enabled)
94 		goto return_dummy;
95 
96 	KMSAN_WARN_ON(!kmsan_metadata_is_contiguous(address, size));
97 	shadow = kmsan_get_metadata(address, KMSAN_META_SHADOW);
98 	if (!shadow)
99 		goto return_dummy;
100 
101 	ret.shadow = shadow;
102 	ret.origin = kmsan_get_metadata(address, KMSAN_META_ORIGIN);
103 	return ret;
104 
105 return_dummy:
106 	if (store) {
107 		/* Ignore this store. */
108 		ret.shadow = dummy_store_page;
109 		ret.origin = dummy_store_page;
110 	} else {
111 		/* This load will return zero. */
112 		ret.shadow = dummy_load_page;
113 		ret.origin = dummy_load_page;
114 	}
115 	return ret;
116 }
117 
118 /*
119  * Obtain the shadow or origin pointer for the given address, or NULL if there's
120  * none. The caller must check the return value for being non-NULL if needed.
121  * The return value of this function should not depend on whether we're in the
122  * runtime or not.
123  */
kmsan_get_metadata(void * address,bool is_origin)124 void *kmsan_get_metadata(void *address, bool is_origin)
125 {
126 	u64 addr = (u64)address, off;
127 	struct page *page;
128 	void *ret;
129 
130 	if (is_origin)
131 		addr = ALIGN_DOWN(addr, KMSAN_ORIGIN_SIZE);
132 	address = (void *)addr;
133 	if (kmsan_internal_is_vmalloc_addr(address) ||
134 	    kmsan_internal_is_module_addr(address))
135 		return (void *)vmalloc_meta(address, is_origin);
136 
137 	ret = arch_kmsan_get_meta_or_null(address, is_origin);
138 	if (ret)
139 		return ret;
140 
141 	page = virt_to_page_or_null(address);
142 	if (!page)
143 		return NULL;
144 	if (!page_has_metadata(page))
145 		return NULL;
146 	off = offset_in_page(addr);
147 
148 	return (is_origin ? origin_ptr_for(page) : shadow_ptr_for(page)) + off;
149 }
150 
kmsan_copy_page_meta(struct page * dst,struct page * src)151 void kmsan_copy_page_meta(struct page *dst, struct page *src)
152 {
153 	if (!kmsan_enabled || kmsan_in_runtime())
154 		return;
155 	if (!dst || !page_has_metadata(dst))
156 		return;
157 	if (!src || !page_has_metadata(src)) {
158 		kmsan_internal_unpoison_memory(page_address(dst), PAGE_SIZE,
159 					       /*checked*/ false);
160 		return;
161 	}
162 
163 	kmsan_enter_runtime();
164 	__memcpy(shadow_ptr_for(dst), shadow_ptr_for(src), PAGE_SIZE);
165 	__memcpy(origin_ptr_for(dst), origin_ptr_for(src), PAGE_SIZE);
166 	kmsan_leave_runtime();
167 }
168 EXPORT_SYMBOL(kmsan_copy_page_meta);
169 
kmsan_alloc_page(struct page * page,unsigned int order,gfp_t flags)170 void kmsan_alloc_page(struct page *page, unsigned int order, gfp_t flags)
171 {
172 	bool initialized = (flags & __GFP_ZERO) || !kmsan_enabled;
173 	struct page *shadow, *origin;
174 	depot_stack_handle_t handle;
175 	int pages = 1 << order;
176 
177 	if (!page)
178 		return;
179 
180 	shadow = shadow_page_for(page);
181 	origin = origin_page_for(page);
182 
183 	if (initialized) {
184 		__memset(page_address(shadow), 0, PAGE_SIZE * pages);
185 		__memset(page_address(origin), 0, PAGE_SIZE * pages);
186 		return;
187 	}
188 
189 	/* Zero pages allocated by the runtime should also be initialized. */
190 	if (kmsan_in_runtime())
191 		return;
192 
193 	__memset(page_address(shadow), -1, PAGE_SIZE * pages);
194 	kmsan_enter_runtime();
195 	handle = kmsan_save_stack_with_flags(flags, /*extra_bits*/ 0);
196 	kmsan_leave_runtime();
197 	/*
198 	 * Addresses are page-aligned, pages are contiguous, so it's ok
199 	 * to just fill the origin pages with @handle.
200 	 */
201 	for (int i = 0; i < PAGE_SIZE * pages / sizeof(handle); i++)
202 		((depot_stack_handle_t *)page_address(origin))[i] = handle;
203 }
204 
kmsan_free_page(struct page * page,unsigned int order)205 void kmsan_free_page(struct page *page, unsigned int order)
206 {
207 	if (!kmsan_enabled || kmsan_in_runtime())
208 		return;
209 	kmsan_enter_runtime();
210 	kmsan_internal_poison_memory(page_address(page),
211 				     page_size(page),
212 				     GFP_KERNEL,
213 				     KMSAN_POISON_CHECK | KMSAN_POISON_FREE);
214 	kmsan_leave_runtime();
215 }
216 
kmsan_vmap_pages_range_noflush(unsigned long start,unsigned long end,pgprot_t prot,struct page ** pages,unsigned int page_shift)217 int kmsan_vmap_pages_range_noflush(unsigned long start, unsigned long end,
218 				   pgprot_t prot, struct page **pages,
219 				   unsigned int page_shift)
220 {
221 	unsigned long shadow_start, origin_start, shadow_end, origin_end;
222 	struct page **s_pages, **o_pages;
223 	int nr, mapped, err = 0;
224 
225 	if (!kmsan_enabled)
226 		return 0;
227 
228 	shadow_start = vmalloc_meta((void *)start, KMSAN_META_SHADOW);
229 	shadow_end = vmalloc_meta((void *)end, KMSAN_META_SHADOW);
230 	if (!shadow_start)
231 		return 0;
232 
233 	nr = (end - start) / PAGE_SIZE;
234 	s_pages = kcalloc(nr, sizeof(*s_pages), GFP_KERNEL);
235 	o_pages = kcalloc(nr, sizeof(*o_pages), GFP_KERNEL);
236 	if (!s_pages || !o_pages) {
237 		err = -ENOMEM;
238 		goto ret;
239 	}
240 	for (int i = 0; i < nr; i++) {
241 		s_pages[i] = shadow_page_for(pages[i]);
242 		o_pages[i] = origin_page_for(pages[i]);
243 	}
244 	prot = PAGE_KERNEL;
245 
246 	origin_start = vmalloc_meta((void *)start, KMSAN_META_ORIGIN);
247 	origin_end = vmalloc_meta((void *)end, KMSAN_META_ORIGIN);
248 	kmsan_enter_runtime();
249 	mapped = __vmap_pages_range_noflush(shadow_start, shadow_end, prot,
250 					    s_pages, page_shift);
251 	if (mapped) {
252 		err = mapped;
253 		goto ret;
254 	}
255 	mapped = __vmap_pages_range_noflush(origin_start, origin_end, prot,
256 					    o_pages, page_shift);
257 	if (mapped) {
258 		err = mapped;
259 		goto ret;
260 	}
261 	kmsan_leave_runtime();
262 	flush_tlb_kernel_range(shadow_start, shadow_end);
263 	flush_tlb_kernel_range(origin_start, origin_end);
264 	flush_cache_vmap(shadow_start, shadow_end);
265 	flush_cache_vmap(origin_start, origin_end);
266 
267 ret:
268 	kfree(s_pages);
269 	kfree(o_pages);
270 	return err;
271 }
272 
273 /* Allocate metadata for pages allocated at boot time. */
kmsan_init_alloc_meta_for_range(void * start,void * end)274 void __init kmsan_init_alloc_meta_for_range(void *start, void *end)
275 {
276 	struct page *shadow_p, *origin_p;
277 	void *shadow, *origin;
278 	struct page *page;
279 	u64 size;
280 
281 	start = (void *)PAGE_ALIGN_DOWN((u64)start);
282 	size = PAGE_ALIGN((u64)end - (u64)start);
283 	shadow = memblock_alloc(size, PAGE_SIZE);
284 	origin = memblock_alloc(size, PAGE_SIZE);
285 
286 	if (!shadow || !origin)
287 		panic("%s: Failed to allocate metadata memory for early boot range of size %llu",
288 		      __func__, size);
289 
290 	for (u64 addr = 0; addr < size; addr += PAGE_SIZE) {
291 		page = virt_to_page_or_null((char *)start + addr);
292 		shadow_p = virt_to_page((char *)shadow + addr);
293 		set_no_shadow_origin_page(shadow_p);
294 		shadow_page_for(page) = shadow_p;
295 		origin_p = virt_to_page((char *)origin + addr);
296 		set_no_shadow_origin_page(origin_p);
297 		origin_page_for(page) = origin_p;
298 	}
299 }
300 
kmsan_setup_meta(struct page * page,struct page * shadow,struct page * origin,int order)301 void kmsan_setup_meta(struct page *page, struct page *shadow,
302 		      struct page *origin, int order)
303 {
304 	for (int i = 0; i < (1 << order); i++) {
305 		set_no_shadow_origin_page(&shadow[i]);
306 		set_no_shadow_origin_page(&origin[i]);
307 		shadow_page_for(&page[i]) = &shadow[i];
308 		origin_page_for(&page[i]) = &origin[i];
309 	}
310 }
311