xref: /linux/mm/kmsan/instrumentation.c (revision bfb921b2a9d5d1123d1d10b196a39db629ddef87)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * KMSAN compiler API.
4  *
5  * This file implements __msan_XXX hooks that Clang inserts into the code
6  * compiled with -fsanitize=kernel-memory.
7  * See Documentation/dev-tools/kmsan.rst for more information on how KMSAN
8  * instrumentation works.
9  *
10  * Copyright (C) 2017-2022 Google LLC
11  * Author: Alexander Potapenko <glider@google.com>
12  *
13  */
14 
15 #include "kmsan.h"
16 #include <linux/gfp.h>
17 #include <linux/kmsan_string.h>
18 #include <linux/mm.h>
19 #include <linux/uaccess.h>
20 
21 static inline bool is_bad_asm_addr(void *addr, uintptr_t size, bool is_store)
22 {
23 	if ((u64)addr < TASK_SIZE)
24 		return true;
25 	if (!kmsan_get_metadata(addr, KMSAN_META_SHADOW))
26 		return true;
27 	return false;
28 }
29 
30 static inline struct shadow_origin_ptr
31 get_shadow_origin_ptr(void *addr, u64 size, bool store)
32 {
33 	unsigned long ua_flags = user_access_save();
34 	struct shadow_origin_ptr ret;
35 
36 	ret = kmsan_get_shadow_origin_ptr(addr, size, store);
37 	user_access_restore(ua_flags);
38 	return ret;
39 }
40 
41 /*
42  * KMSAN instrumentation functions follow. They are not declared elsewhere in
43  * the kernel code, so they are preceded by prototypes, to silence
44  * -Wmissing-prototypes warnings.
45  */
46 
47 /* Get shadow and origin pointers for a memory load with non-standard size. */
48 struct shadow_origin_ptr __msan_metadata_ptr_for_load_n(void *addr,
49 							uintptr_t size);
50 struct shadow_origin_ptr __msan_metadata_ptr_for_load_n(void *addr,
51 							uintptr_t size)
52 {
53 	return get_shadow_origin_ptr(addr, size, /*store*/ false);
54 }
55 EXPORT_SYMBOL(__msan_metadata_ptr_for_load_n);
56 
57 /* Get shadow and origin pointers for a memory store with non-standard size. */
58 struct shadow_origin_ptr __msan_metadata_ptr_for_store_n(void *addr,
59 							 uintptr_t size);
60 struct shadow_origin_ptr __msan_metadata_ptr_for_store_n(void *addr,
61 							 uintptr_t size)
62 {
63 	return get_shadow_origin_ptr(addr, size, /*store*/ true);
64 }
65 EXPORT_SYMBOL(__msan_metadata_ptr_for_store_n);
66 
67 /*
68  * Declare functions that obtain shadow/origin pointers for loads and stores
69  * with fixed size.
70  */
71 #define DECLARE_METADATA_PTR_GETTER(size)                                  \
72 	struct shadow_origin_ptr __msan_metadata_ptr_for_load_##size(      \
73 		void *addr);                                               \
74 	struct shadow_origin_ptr __msan_metadata_ptr_for_load_##size(      \
75 		void *addr)                                                \
76 	{                                                                  \
77 		return get_shadow_origin_ptr(addr, size, /*store*/ false); \
78 	}                                                                  \
79 	EXPORT_SYMBOL(__msan_metadata_ptr_for_load_##size);                \
80 	struct shadow_origin_ptr __msan_metadata_ptr_for_store_##size(     \
81 		void *addr);                                               \
82 	struct shadow_origin_ptr __msan_metadata_ptr_for_store_##size(     \
83 		void *addr)                                                \
84 	{                                                                  \
85 		return get_shadow_origin_ptr(addr, size, /*store*/ true);  \
86 	}                                                                  \
87 	EXPORT_SYMBOL(__msan_metadata_ptr_for_store_##size)
88 
89 DECLARE_METADATA_PTR_GETTER(1);
90 DECLARE_METADATA_PTR_GETTER(2);
91 DECLARE_METADATA_PTR_GETTER(4);
92 DECLARE_METADATA_PTR_GETTER(8);
93 
94 /*
95  * Handle a memory store performed by inline assembly. KMSAN conservatively
96  * attempts to unpoison the outputs of asm() directives to prevent false
97  * positives caused by missed stores.
98  *
99  * __msan_instrument_asm_store() may be called for inline assembly code when
100  * entering or leaving IRQ. We omit the check for kmsan_in_runtime() to ensure
101  * the memory written to in these cases is also marked as initialized.
102  */
103 void __msan_instrument_asm_store(void *addr, uintptr_t size);
104 void __msan_instrument_asm_store(void *addr, uintptr_t size)
105 {
106 	unsigned long ua_flags;
107 
108 	if (!kmsan_enabled)
109 		return;
110 
111 	ua_flags = user_access_save();
112 	/*
113 	 * Most of the accesses are below 32 bytes. The two exceptions so far
114 	 * are clwb() (64 bytes) and FPU state (512 bytes).
115 	 * It's unlikely that the assembly will touch more than 512 bytes.
116 	 */
117 	if (size > 512) {
118 		WARN_ONCE(1, "assembly store size too big: %ld\n", size);
119 		size = 8;
120 	}
121 	if (is_bad_asm_addr(addr, size, /*is_store*/ true)) {
122 		user_access_restore(ua_flags);
123 		return;
124 	}
125 	/* Unpoisoning the memory on best effort. */
126 	kmsan_internal_unpoison_memory(addr, size, /*checked*/ false);
127 	user_access_restore(ua_flags);
128 }
129 EXPORT_SYMBOL(__msan_instrument_asm_store);
130 
131 /*
132  * KMSAN instrumentation pass replaces LLVM memcpy, memmove and memset
133  * intrinsics with calls to respective __msan_ functions. We use
134  * get_param0_metadata() and set_retval_metadata() to store the shadow/origin
135  * values for the destination argument of these functions and use them for the
136  * functions' return values.
137  */
138 static inline void get_param0_metadata(u64 *shadow,
139 				       depot_stack_handle_t *origin)
140 {
141 	struct kmsan_ctx *ctx = kmsan_get_context();
142 
143 	*shadow = *(u64 *)(ctx->cstate.param_tls);
144 	*origin = ctx->cstate.param_origin_tls[0];
145 }
146 
147 static inline void set_retval_metadata(u64 shadow, depot_stack_handle_t origin)
148 {
149 	struct kmsan_ctx *ctx = kmsan_get_context();
150 
151 	*(u64 *)(ctx->cstate.retval_tls) = shadow;
152 	ctx->cstate.retval_origin_tls = origin;
153 }
154 
155 /* Handle llvm.memmove intrinsic. */
156 void *__msan_memmove(void *dst, const void *src, uintptr_t n);
157 void *__msan_memmove(void *dst, const void *src, uintptr_t n)
158 {
159 	depot_stack_handle_t origin;
160 	void *result;
161 	u64 shadow;
162 
163 	get_param0_metadata(&shadow, &origin);
164 	result = __memmove(dst, src, n);
165 	if (!n)
166 		/* Some people call memmove() with zero length. */
167 		return result;
168 	if (!kmsan_enabled || kmsan_in_runtime())
169 		return result;
170 
171 	kmsan_enter_runtime();
172 	kmsan_internal_memmove_metadata(dst, (void *)src, n);
173 	kmsan_leave_runtime();
174 
175 	set_retval_metadata(shadow, origin);
176 	return result;
177 }
178 EXPORT_SYMBOL(__msan_memmove);
179 
180 /* Handle llvm.memcpy intrinsic. */
181 void *__msan_memcpy(void *dst, const void *src, uintptr_t n);
182 void *__msan_memcpy(void *dst, const void *src, uintptr_t n)
183 {
184 	depot_stack_handle_t origin;
185 	void *result;
186 	u64 shadow;
187 
188 	get_param0_metadata(&shadow, &origin);
189 	result = __memcpy(dst, src, n);
190 	if (!n)
191 		/* Some people call memcpy() with zero length. */
192 		return result;
193 
194 	if (!kmsan_enabled || kmsan_in_runtime())
195 		return result;
196 
197 	kmsan_enter_runtime();
198 	/* Using memmove instead of memcpy doesn't affect correctness. */
199 	kmsan_internal_memmove_metadata(dst, (void *)src, n);
200 	kmsan_leave_runtime();
201 
202 	set_retval_metadata(shadow, origin);
203 	return result;
204 }
205 EXPORT_SYMBOL(__msan_memcpy);
206 
207 /* Handle llvm.memset intrinsic. */
208 void *__msan_memset(void *dst, int c, uintptr_t n);
209 void *__msan_memset(void *dst, int c, uintptr_t n)
210 {
211 	depot_stack_handle_t origin;
212 	void *result;
213 	u64 shadow;
214 
215 	get_param0_metadata(&shadow, &origin);
216 	result = __memset(dst, c, n);
217 	if (!kmsan_enabled || kmsan_in_runtime())
218 		return result;
219 
220 	kmsan_enter_runtime();
221 	/*
222 	 * Clang doesn't pass parameter metadata here, so it is impossible to
223 	 * use shadow of @c to set up the shadow for @dst.
224 	 */
225 	kmsan_internal_unpoison_memory(dst, n, /*checked*/ false);
226 	kmsan_leave_runtime();
227 
228 	set_retval_metadata(shadow, origin);
229 	return result;
230 }
231 EXPORT_SYMBOL(__msan_memset);
232 
233 /*
234  * Create a new origin from an old one. This is done when storing an
235  * uninitialized value to memory. When reporting an error, KMSAN unrolls and
236  * prints the whole chain of stores that preceded the use of this value.
237  */
238 depot_stack_handle_t __msan_chain_origin(depot_stack_handle_t origin);
239 depot_stack_handle_t __msan_chain_origin(depot_stack_handle_t origin)
240 {
241 	depot_stack_handle_t ret = 0;
242 	unsigned long ua_flags;
243 
244 	if (!kmsan_enabled || kmsan_in_runtime())
245 		return ret;
246 
247 	ua_flags = user_access_save();
248 
249 	/* Creating new origins may allocate memory. */
250 	kmsan_enter_runtime();
251 	ret = kmsan_internal_chain_origin(origin);
252 	kmsan_leave_runtime();
253 	user_access_restore(ua_flags);
254 	return ret;
255 }
256 EXPORT_SYMBOL(__msan_chain_origin);
257 
258 /* Poison a local variable when entering a function. */
259 void __msan_poison_alloca(void *address, uintptr_t size, char *descr);
260 void __msan_poison_alloca(void *address, uintptr_t size, char *descr)
261 {
262 	depot_stack_handle_t handle;
263 	unsigned long entries[4];
264 	unsigned long ua_flags;
265 
266 	if (!kmsan_enabled || kmsan_in_runtime())
267 		return;
268 
269 	ua_flags = user_access_save();
270 	entries[0] = KMSAN_ALLOCA_MAGIC_ORIGIN;
271 	entries[1] = (u64)descr;
272 	entries[2] = (u64)__builtin_return_address(0);
273 	/*
274 	 * With frame pointers enabled, it is possible to quickly fetch the
275 	 * second frame of the caller stack without calling the unwinder.
276 	 * Without them, simply do not bother.
277 	 */
278 	if (IS_ENABLED(CONFIG_UNWINDER_FRAME_POINTER))
279 		entries[3] = (u64)__builtin_return_address(1);
280 	else
281 		entries[3] = 0;
282 
283 	/* stack_depot_save() may allocate memory. */
284 	kmsan_enter_runtime();
285 	handle = stack_depot_save(entries, ARRAY_SIZE(entries), __GFP_HIGH);
286 	kmsan_leave_runtime();
287 
288 	kmsan_internal_set_shadow_origin(address, size, -1, handle,
289 					 /*checked*/ true);
290 	user_access_restore(ua_flags);
291 }
292 EXPORT_SYMBOL(__msan_poison_alloca);
293 
294 /* Unpoison a local variable. */
295 void __msan_unpoison_alloca(void *address, uintptr_t size);
296 void __msan_unpoison_alloca(void *address, uintptr_t size)
297 {
298 	if (!kmsan_enabled || kmsan_in_runtime())
299 		return;
300 
301 	kmsan_enter_runtime();
302 	kmsan_internal_unpoison_memory(address, size, /*checked*/ true);
303 	kmsan_leave_runtime();
304 }
305 EXPORT_SYMBOL(__msan_unpoison_alloca);
306 
307 /*
308  * Report that an uninitialized value with the given origin was used in a way
309  * that constituted undefined behavior.
310  */
311 void __msan_warning(u32 origin);
312 void __msan_warning(u32 origin)
313 {
314 	if (!kmsan_enabled || kmsan_in_runtime())
315 		return;
316 	kmsan_enter_runtime();
317 	kmsan_report(origin, /*address*/ 0, /*size*/ 0,
318 		     /*off_first*/ 0, /*off_last*/ 0, /*user_addr*/ 0,
319 		     REASON_ANY);
320 	kmsan_leave_runtime();
321 }
322 EXPORT_SYMBOL(__msan_warning);
323 
324 /*
325  * At the beginning of an instrumented function, obtain the pointer to
326  * `struct kmsan_context_state` holding the metadata for function parameters.
327  */
328 struct kmsan_context_state *__msan_get_context_state(void);
329 struct kmsan_context_state *__msan_get_context_state(void)
330 {
331 	return &kmsan_get_context()->cstate;
332 }
333 EXPORT_SYMBOL(__msan_get_context_state);
334