xref: /linux/mm/kmsan/instrumentation.c (revision c532de5a67a70f8533d495f8f2aaa9a0491c3ad0)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * KMSAN compiler API.
4  *
5  * This file implements __msan_XXX hooks that Clang inserts into the code
6  * compiled with -fsanitize=kernel-memory.
7  * See Documentation/dev-tools/kmsan.rst for more information on how KMSAN
8  * instrumentation works.
9  *
10  * Copyright (C) 2017-2022 Google LLC
11  * Author: Alexander Potapenko <glider@google.com>
12  *
13  */
14 
15 #include "kmsan.h"
16 #include <linux/gfp.h>
17 #include <linux/kmsan.h>
18 #include <linux/kmsan_string.h>
19 #include <linux/mm.h>
20 #include <linux/uaccess.h>
21 
22 static inline bool is_bad_asm_addr(void *addr, uintptr_t size, bool is_store)
23 {
24 	if (IS_ENABLED(CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE) &&
25 	    (u64)addr < TASK_SIZE)
26 		return true;
27 	if (!kmsan_get_metadata(addr, KMSAN_META_SHADOW))
28 		return true;
29 	return false;
30 }
31 
32 static inline struct shadow_origin_ptr
33 get_shadow_origin_ptr(void *addr, u64 size, bool store)
34 {
35 	unsigned long ua_flags = user_access_save();
36 	struct shadow_origin_ptr ret;
37 
38 	ret = kmsan_get_shadow_origin_ptr(addr, size, store);
39 	user_access_restore(ua_flags);
40 	return ret;
41 }
42 
43 /*
44  * KMSAN instrumentation functions follow. They are not declared elsewhere in
45  * the kernel code, so they are preceded by prototypes, to silence
46  * -Wmissing-prototypes warnings.
47  */
48 
49 /* Get shadow and origin pointers for a memory load with non-standard size. */
50 struct shadow_origin_ptr __msan_metadata_ptr_for_load_n(void *addr,
51 							uintptr_t size);
52 struct shadow_origin_ptr __msan_metadata_ptr_for_load_n(void *addr,
53 							uintptr_t size)
54 {
55 	return get_shadow_origin_ptr(addr, size, /*store*/ false);
56 }
57 EXPORT_SYMBOL(__msan_metadata_ptr_for_load_n);
58 
59 /* Get shadow and origin pointers for a memory store with non-standard size. */
60 struct shadow_origin_ptr __msan_metadata_ptr_for_store_n(void *addr,
61 							 uintptr_t size);
62 struct shadow_origin_ptr __msan_metadata_ptr_for_store_n(void *addr,
63 							 uintptr_t size)
64 {
65 	return get_shadow_origin_ptr(addr, size, /*store*/ true);
66 }
67 EXPORT_SYMBOL(__msan_metadata_ptr_for_store_n);
68 
69 /*
70  * Declare functions that obtain shadow/origin pointers for loads and stores
71  * with fixed size.
72  */
73 #define DECLARE_METADATA_PTR_GETTER(size)                                  \
74 	struct shadow_origin_ptr __msan_metadata_ptr_for_load_##size(      \
75 		void *addr);                                               \
76 	struct shadow_origin_ptr __msan_metadata_ptr_for_load_##size(      \
77 		void *addr)                                                \
78 	{                                                                  \
79 		return get_shadow_origin_ptr(addr, size, /*store*/ false); \
80 	}                                                                  \
81 	EXPORT_SYMBOL(__msan_metadata_ptr_for_load_##size);                \
82 	struct shadow_origin_ptr __msan_metadata_ptr_for_store_##size(     \
83 		void *addr);                                               \
84 	struct shadow_origin_ptr __msan_metadata_ptr_for_store_##size(     \
85 		void *addr)                                                \
86 	{                                                                  \
87 		return get_shadow_origin_ptr(addr, size, /*store*/ true);  \
88 	}                                                                  \
89 	EXPORT_SYMBOL(__msan_metadata_ptr_for_store_##size)
90 
91 DECLARE_METADATA_PTR_GETTER(1);
92 DECLARE_METADATA_PTR_GETTER(2);
93 DECLARE_METADATA_PTR_GETTER(4);
94 DECLARE_METADATA_PTR_GETTER(8);
95 
96 /*
97  * Handle a memory store performed by inline assembly. KMSAN conservatively
98  * attempts to unpoison the outputs of asm() directives to prevent false
99  * positives caused by missed stores.
100  *
101  * __msan_instrument_asm_store() may be called for inline assembly code when
102  * entering or leaving IRQ. We omit the check for kmsan_in_runtime() to ensure
103  * the memory written to in these cases is also marked as initialized.
104  */
105 void __msan_instrument_asm_store(void *addr, uintptr_t size);
106 void __msan_instrument_asm_store(void *addr, uintptr_t size)
107 {
108 	unsigned long ua_flags;
109 
110 	if (!kmsan_enabled)
111 		return;
112 
113 	ua_flags = user_access_save();
114 	/*
115 	 * Most of the accesses are below 32 bytes. The exceptions so far are
116 	 * clwb() (64 bytes), FPU state (512 bytes) and chsc() (4096 bytes).
117 	 */
118 	if (size > 4096) {
119 		WARN_ONCE(1, "assembly store size too big: %ld\n", size);
120 		size = 8;
121 	}
122 	if (is_bad_asm_addr(addr, size, /*is_store*/ true)) {
123 		user_access_restore(ua_flags);
124 		return;
125 	}
126 	/* Unpoisoning the memory on best effort. */
127 	kmsan_internal_unpoison_memory(addr, size, /*checked*/ false);
128 	user_access_restore(ua_flags);
129 }
130 EXPORT_SYMBOL(__msan_instrument_asm_store);
131 
132 /*
133  * KMSAN instrumentation pass replaces LLVM memcpy, memmove and memset
134  * intrinsics with calls to respective __msan_ functions. We use
135  * get_param0_metadata() and set_retval_metadata() to store the shadow/origin
136  * values for the destination argument of these functions and use them for the
137  * functions' return values.
138  */
139 static inline void get_param0_metadata(u64 *shadow,
140 				       depot_stack_handle_t *origin)
141 {
142 	struct kmsan_ctx *ctx = kmsan_get_context();
143 
144 	*shadow = *(u64 *)(ctx->cstate.param_tls);
145 	*origin = ctx->cstate.param_origin_tls[0];
146 }
147 
148 static inline void set_retval_metadata(u64 shadow, depot_stack_handle_t origin)
149 {
150 	struct kmsan_ctx *ctx = kmsan_get_context();
151 
152 	*(u64 *)(ctx->cstate.retval_tls) = shadow;
153 	ctx->cstate.retval_origin_tls = origin;
154 }
155 
156 /* Handle llvm.memmove intrinsic. */
157 void *__msan_memmove(void *dst, const void *src, uintptr_t n);
158 void *__msan_memmove(void *dst, const void *src, uintptr_t n)
159 {
160 	depot_stack_handle_t origin;
161 	void *result;
162 	u64 shadow;
163 
164 	get_param0_metadata(&shadow, &origin);
165 	result = __memmove(dst, src, n);
166 	if (!n)
167 		/* Some people call memmove() with zero length. */
168 		return result;
169 	if (!kmsan_enabled || kmsan_in_runtime())
170 		return result;
171 
172 	kmsan_enter_runtime();
173 	kmsan_internal_memmove_metadata(dst, (void *)src, n);
174 	kmsan_leave_runtime();
175 
176 	set_retval_metadata(shadow, origin);
177 	return result;
178 }
179 EXPORT_SYMBOL(__msan_memmove);
180 
181 /* Handle llvm.memcpy intrinsic. */
182 void *__msan_memcpy(void *dst, const void *src, uintptr_t n);
183 void *__msan_memcpy(void *dst, const void *src, uintptr_t n)
184 {
185 	depot_stack_handle_t origin;
186 	void *result;
187 	u64 shadow;
188 
189 	get_param0_metadata(&shadow, &origin);
190 	result = __memcpy(dst, src, n);
191 	if (!n)
192 		/* Some people call memcpy() with zero length. */
193 		return result;
194 
195 	if (!kmsan_enabled || kmsan_in_runtime())
196 		return result;
197 
198 	kmsan_enter_runtime();
199 	/* Using memmove instead of memcpy doesn't affect correctness. */
200 	kmsan_internal_memmove_metadata(dst, (void *)src, n);
201 	kmsan_leave_runtime();
202 
203 	set_retval_metadata(shadow, origin);
204 	return result;
205 }
206 EXPORT_SYMBOL(__msan_memcpy);
207 
208 /* Handle llvm.memset intrinsic. */
209 void *__msan_memset(void *dst, int c, uintptr_t n);
210 void *__msan_memset(void *dst, int c, uintptr_t n)
211 {
212 	depot_stack_handle_t origin;
213 	void *result;
214 	u64 shadow;
215 
216 	get_param0_metadata(&shadow, &origin);
217 	result = __memset(dst, c, n);
218 	if (!kmsan_enabled || kmsan_in_runtime())
219 		return result;
220 
221 	kmsan_enter_runtime();
222 	/*
223 	 * Clang doesn't pass parameter metadata here, so it is impossible to
224 	 * use shadow of @c to set up the shadow for @dst.
225 	 */
226 	kmsan_internal_unpoison_memory(dst, n, /*checked*/ false);
227 	kmsan_leave_runtime();
228 
229 	set_retval_metadata(shadow, origin);
230 	return result;
231 }
232 EXPORT_SYMBOL(__msan_memset);
233 
234 /*
235  * Create a new origin from an old one. This is done when storing an
236  * uninitialized value to memory. When reporting an error, KMSAN unrolls and
237  * prints the whole chain of stores that preceded the use of this value.
238  */
239 depot_stack_handle_t __msan_chain_origin(depot_stack_handle_t origin);
240 depot_stack_handle_t __msan_chain_origin(depot_stack_handle_t origin)
241 {
242 	depot_stack_handle_t ret = 0;
243 	unsigned long ua_flags;
244 
245 	if (!kmsan_enabled || kmsan_in_runtime())
246 		return ret;
247 
248 	ua_flags = user_access_save();
249 
250 	/* Creating new origins may allocate memory. */
251 	kmsan_enter_runtime();
252 	ret = kmsan_internal_chain_origin(origin);
253 	kmsan_leave_runtime();
254 	user_access_restore(ua_flags);
255 	return ret;
256 }
257 EXPORT_SYMBOL(__msan_chain_origin);
258 
259 /* Poison a local variable when entering a function. */
260 void __msan_poison_alloca(void *address, uintptr_t size, char *descr);
261 void __msan_poison_alloca(void *address, uintptr_t size, char *descr)
262 {
263 	depot_stack_handle_t handle;
264 	unsigned long entries[4];
265 	unsigned long ua_flags;
266 
267 	if (!kmsan_enabled || kmsan_in_runtime())
268 		return;
269 
270 	ua_flags = user_access_save();
271 	entries[0] = KMSAN_ALLOCA_MAGIC_ORIGIN;
272 	entries[1] = (u64)descr;
273 	entries[2] = (u64)__builtin_return_address(0);
274 	/*
275 	 * With frame pointers enabled, it is possible to quickly fetch the
276 	 * second frame of the caller stack without calling the unwinder.
277 	 * Without them, simply do not bother.
278 	 */
279 	if (IS_ENABLED(CONFIG_UNWINDER_FRAME_POINTER))
280 		entries[3] = (u64)__builtin_return_address(1);
281 	else
282 		entries[3] = 0;
283 
284 	/* stack_depot_save() may allocate memory. */
285 	kmsan_enter_runtime();
286 	handle = stack_depot_save(entries, ARRAY_SIZE(entries), __GFP_HIGH);
287 	kmsan_leave_runtime();
288 
289 	kmsan_internal_set_shadow_origin(address, size, -1, handle,
290 					 /*checked*/ true);
291 	user_access_restore(ua_flags);
292 }
293 EXPORT_SYMBOL(__msan_poison_alloca);
294 
295 /* Unpoison a local variable. */
296 void __msan_unpoison_alloca(void *address, uintptr_t size);
297 void __msan_unpoison_alloca(void *address, uintptr_t size)
298 {
299 	if (!kmsan_enabled || kmsan_in_runtime())
300 		return;
301 
302 	kmsan_enter_runtime();
303 	kmsan_internal_unpoison_memory(address, size, /*checked*/ true);
304 	kmsan_leave_runtime();
305 }
306 EXPORT_SYMBOL(__msan_unpoison_alloca);
307 
308 /*
309  * Report that an uninitialized value with the given origin was used in a way
310  * that constituted undefined behavior.
311  */
312 void __msan_warning(u32 origin);
313 void __msan_warning(u32 origin)
314 {
315 	if (!kmsan_enabled || kmsan_in_runtime())
316 		return;
317 	kmsan_enter_runtime();
318 	kmsan_report(origin, /*address*/ NULL, /*size*/ 0,
319 		     /*off_first*/ 0, /*off_last*/ 0, /*user_addr*/ NULL,
320 		     REASON_ANY);
321 	kmsan_leave_runtime();
322 }
323 EXPORT_SYMBOL(__msan_warning);
324 
325 /*
326  * At the beginning of an instrumented function, obtain the pointer to
327  * `struct kmsan_context_state` holding the metadata for function parameters.
328  */
329 struct kmsan_context_state *__msan_get_context_state(void);
330 struct kmsan_context_state *__msan_get_context_state(void)
331 {
332 	return &kmsan_get_context()->cstate;
333 }
334 EXPORT_SYMBOL(__msan_get_context_state);
335