xref: /linux/mm/kmsan/shadow.c (revision 3c206509826094e85ead0b056f484db96829248d)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * KMSAN shadow implementation.
4  *
5  * Copyright (C) 2017-2022 Google LLC
6  * Author: Alexander Potapenko <glider@google.com>
7  *
8  */
9 
10 #include <asm/kmsan.h>
11 #include <asm/tlbflush.h>
12 #include <linux/cacheflush.h>
13 #include <linux/memblock.h>
14 #include <linux/mm_types.h>
15 #include <linux/percpu-defs.h>
16 #include <linux/slab.h>
17 #include <linux/smp.h>
18 #include <linux/stddef.h>
19 
20 #include "../internal.h"
21 #include "kmsan.h"
22 
23 #define shadow_page_for(page) ((page)->kmsan_shadow)
24 
25 #define origin_page_for(page) ((page)->kmsan_origin)
26 
27 static void *shadow_ptr_for(struct page *page)
28 {
29 	return page_address(shadow_page_for(page));
30 }
31 
32 static void *origin_ptr_for(struct page *page)
33 {
34 	return page_address(origin_page_for(page));
35 }
36 
37 static bool page_has_metadata(struct page *page)
38 {
39 	return shadow_page_for(page) && origin_page_for(page);
40 }
41 
42 static void set_no_shadow_origin_page(struct page *page)
43 {
44 	shadow_page_for(page) = NULL;
45 	origin_page_for(page) = NULL;
46 }
47 
48 /*
49  * Dummy load and store pages to be used when the real metadata is unavailable.
50  * There are separate pages for loads and stores, so that every load returns a
51  * zero, and every store doesn't affect other loads.
52  */
53 static char dummy_load_page[PAGE_SIZE] __aligned(PAGE_SIZE);
54 static char dummy_store_page[PAGE_SIZE] __aligned(PAGE_SIZE);
55 
56 static unsigned long vmalloc_meta(void *addr, bool is_origin)
57 {
58 	unsigned long addr64 = (unsigned long)addr, off;
59 
60 	KMSAN_WARN_ON(is_origin && !IS_ALIGNED(addr64, KMSAN_ORIGIN_SIZE));
61 	if (kmsan_internal_is_vmalloc_addr(addr)) {
62 		off = addr64 - VMALLOC_START;
63 		return off + (is_origin ? KMSAN_VMALLOC_ORIGIN_START :
64 					  KMSAN_VMALLOC_SHADOW_START);
65 	}
66 	if (kmsan_internal_is_module_addr(addr)) {
67 		off = addr64 - MODULES_VADDR;
68 		return off + (is_origin ? KMSAN_MODULES_ORIGIN_START :
69 					  KMSAN_MODULES_SHADOW_START);
70 	}
71 	return 0;
72 }
73 
74 static struct page *virt_to_page_or_null(void *vaddr)
75 {
76 	if (kmsan_virt_addr_valid(vaddr))
77 		return virt_to_page(vaddr);
78 	else
79 		return NULL;
80 }
81 
82 struct shadow_origin_ptr kmsan_get_shadow_origin_ptr(void *address, u64 size,
83 						     bool store)
84 {
85 	struct shadow_origin_ptr ret;
86 	void *shadow;
87 
88 	/*
89 	 * Even if we redirect this memory access to the dummy page, it will
90 	 * go out of bounds.
91 	 */
92 	KMSAN_WARN_ON(size > PAGE_SIZE);
93 
94 	if (!kmsan_enabled)
95 		goto return_dummy;
96 
97 	KMSAN_WARN_ON(!kmsan_metadata_is_contiguous(address, size));
98 	shadow = kmsan_get_metadata(address, KMSAN_META_SHADOW);
99 	if (!shadow)
100 		goto return_dummy;
101 
102 	ret.shadow = shadow;
103 	ret.origin = kmsan_get_metadata(address, KMSAN_META_ORIGIN);
104 	return ret;
105 
106 return_dummy:
107 	if (store) {
108 		/* Ignore this store. */
109 		ret.shadow = dummy_store_page;
110 		ret.origin = dummy_store_page;
111 	} else {
112 		/* This load will return zero. */
113 		ret.shadow = dummy_load_page;
114 		ret.origin = dummy_load_page;
115 	}
116 	return ret;
117 }
118 
119 /*
120  * Obtain the shadow or origin pointer for the given address, or NULL if there's
121  * none. The caller must check the return value for being non-NULL if needed.
122  * The return value of this function should not depend on whether we're in the
123  * runtime or not.
124  */
125 void *kmsan_get_metadata(void *address, bool is_origin)
126 {
127 	u64 addr = (u64)address, pad, off;
128 	struct page *page;
129 
130 	if (is_origin && !IS_ALIGNED(addr, KMSAN_ORIGIN_SIZE)) {
131 		pad = addr % KMSAN_ORIGIN_SIZE;
132 		addr -= pad;
133 	}
134 	address = (void *)addr;
135 	if (kmsan_internal_is_vmalloc_addr(address) ||
136 	    kmsan_internal_is_module_addr(address))
137 		return (void *)vmalloc_meta(address, is_origin);
138 
139 	page = virt_to_page_or_null(address);
140 	if (!page)
141 		return NULL;
142 	if (!page_has_metadata(page))
143 		return NULL;
144 	off = addr % PAGE_SIZE;
145 
146 	return (is_origin ? origin_ptr_for(page) : shadow_ptr_for(page)) + off;
147 }
148 
149 void kmsan_copy_page_meta(struct page *dst, struct page *src)
150 {
151 	if (!kmsan_enabled || kmsan_in_runtime())
152 		return;
153 	if (!dst || !page_has_metadata(dst))
154 		return;
155 	if (!src || !page_has_metadata(src)) {
156 		kmsan_internal_unpoison_memory(page_address(dst), PAGE_SIZE,
157 					       /*checked*/ false);
158 		return;
159 	}
160 
161 	kmsan_enter_runtime();
162 	__memcpy(shadow_ptr_for(dst), shadow_ptr_for(src), PAGE_SIZE);
163 	__memcpy(origin_ptr_for(dst), origin_ptr_for(src), PAGE_SIZE);
164 	kmsan_leave_runtime();
165 }
166 
167 void kmsan_alloc_page(struct page *page, unsigned int order, gfp_t flags)
168 {
169 	bool initialized = (flags & __GFP_ZERO) || !kmsan_enabled;
170 	struct page *shadow, *origin;
171 	depot_stack_handle_t handle;
172 	int pages = 1 << order;
173 
174 	if (!page)
175 		return;
176 
177 	shadow = shadow_page_for(page);
178 	origin = origin_page_for(page);
179 
180 	if (initialized) {
181 		__memset(page_address(shadow), 0, PAGE_SIZE * pages);
182 		__memset(page_address(origin), 0, PAGE_SIZE * pages);
183 		return;
184 	}
185 
186 	/* Zero pages allocated by the runtime should also be initialized. */
187 	if (kmsan_in_runtime())
188 		return;
189 
190 	__memset(page_address(shadow), -1, PAGE_SIZE * pages);
191 	kmsan_enter_runtime();
192 	handle = kmsan_save_stack_with_flags(flags, /*extra_bits*/ 0);
193 	kmsan_leave_runtime();
194 	/*
195 	 * Addresses are page-aligned, pages are contiguous, so it's ok
196 	 * to just fill the origin pages with @handle.
197 	 */
198 	for (int i = 0; i < PAGE_SIZE * pages / sizeof(handle); i++)
199 		((depot_stack_handle_t *)page_address(origin))[i] = handle;
200 }
201 
202 void kmsan_free_page(struct page *page, unsigned int order)
203 {
204 	if (!kmsan_enabled || kmsan_in_runtime())
205 		return;
206 	kmsan_enter_runtime();
207 	kmsan_internal_poison_memory(page_address(page),
208 				     PAGE_SIZE << compound_order(page),
209 				     GFP_KERNEL,
210 				     KMSAN_POISON_CHECK | KMSAN_POISON_FREE);
211 	kmsan_leave_runtime();
212 }
213 
214 void kmsan_vmap_pages_range_noflush(unsigned long start, unsigned long end,
215 				    pgprot_t prot, struct page **pages,
216 				    unsigned int page_shift)
217 {
218 	unsigned long shadow_start, origin_start, shadow_end, origin_end;
219 	struct page **s_pages, **o_pages;
220 	int nr, mapped;
221 
222 	if (!kmsan_enabled)
223 		return;
224 
225 	shadow_start = vmalloc_meta((void *)start, KMSAN_META_SHADOW);
226 	shadow_end = vmalloc_meta((void *)end, KMSAN_META_SHADOW);
227 	if (!shadow_start)
228 		return;
229 
230 	nr = (end - start) / PAGE_SIZE;
231 	s_pages = kcalloc(nr, sizeof(*s_pages), GFP_KERNEL);
232 	o_pages = kcalloc(nr, sizeof(*o_pages), GFP_KERNEL);
233 	if (!s_pages || !o_pages)
234 		goto ret;
235 	for (int i = 0; i < nr; i++) {
236 		s_pages[i] = shadow_page_for(pages[i]);
237 		o_pages[i] = origin_page_for(pages[i]);
238 	}
239 	prot = __pgprot(pgprot_val(prot) | _PAGE_NX);
240 	prot = PAGE_KERNEL;
241 
242 	origin_start = vmalloc_meta((void *)start, KMSAN_META_ORIGIN);
243 	origin_end = vmalloc_meta((void *)end, KMSAN_META_ORIGIN);
244 	kmsan_enter_runtime();
245 	mapped = __vmap_pages_range_noflush(shadow_start, shadow_end, prot,
246 					    s_pages, page_shift);
247 	KMSAN_WARN_ON(mapped);
248 	mapped = __vmap_pages_range_noflush(origin_start, origin_end, prot,
249 					    o_pages, page_shift);
250 	KMSAN_WARN_ON(mapped);
251 	kmsan_leave_runtime();
252 	flush_tlb_kernel_range(shadow_start, shadow_end);
253 	flush_tlb_kernel_range(origin_start, origin_end);
254 	flush_cache_vmap(shadow_start, shadow_end);
255 	flush_cache_vmap(origin_start, origin_end);
256 
257 ret:
258 	kfree(s_pages);
259 	kfree(o_pages);
260 }
261 
262 /* Allocate metadata for pages allocated at boot time. */
263 void __init kmsan_init_alloc_meta_for_range(void *start, void *end)
264 {
265 	struct page *shadow_p, *origin_p;
266 	void *shadow, *origin;
267 	struct page *page;
268 	u64 size;
269 
270 	start = (void *)ALIGN_DOWN((u64)start, PAGE_SIZE);
271 	size = ALIGN((u64)end - (u64)start, PAGE_SIZE);
272 	shadow = memblock_alloc(size, PAGE_SIZE);
273 	origin = memblock_alloc(size, PAGE_SIZE);
274 	for (u64 addr = 0; addr < size; addr += PAGE_SIZE) {
275 		page = virt_to_page_or_null((char *)start + addr);
276 		shadow_p = virt_to_page_or_null((char *)shadow + addr);
277 		set_no_shadow_origin_page(shadow_p);
278 		shadow_page_for(page) = shadow_p;
279 		origin_p = virt_to_page_or_null((char *)origin + addr);
280 		set_no_shadow_origin_page(origin_p);
281 		origin_page_for(page) = origin_p;
282 	}
283 }
284 
285 void kmsan_setup_meta(struct page *page, struct page *shadow,
286 		      struct page *origin, int order)
287 {
288 	for (int i = 0; i < (1 << order); i++) {
289 		set_no_shadow_origin_page(&shadow[i]);
290 		set_no_shadow_origin_page(&origin[i]);
291 		shadow_page_for(&page[i]) = &shadow[i];
292 		origin_page_for(&page[i]) = &origin[i];
293 	}
294 }
295