1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * KMSAN runtime library. 4 * 5 * Copyright (C) 2017-2022 Google LLC 6 * Author: Alexander Potapenko <glider@google.com> 7 * 8 */ 9 10 #include <asm/page.h> 11 #include <linux/compiler.h> 12 #include <linux/export.h> 13 #include <linux/highmem.h> 14 #include <linux/interrupt.h> 15 #include <linux/kernel.h> 16 #include <linux/kmsan_types.h> 17 #include <linux/memory.h> 18 #include <linux/mm.h> 19 #include <linux/mm_types.h> 20 #include <linux/mmzone.h> 21 #include <linux/percpu-defs.h> 22 #include <linux/preempt.h> 23 #include <linux/slab.h> 24 #include <linux/stackdepot.h> 25 #include <linux/stacktrace.h> 26 #include <linux/types.h> 27 #include <linux/vmalloc.h> 28 29 #include "../slab.h" 30 #include "kmsan.h" 31 32 bool kmsan_enabled __read_mostly; 33 34 /* 35 * Per-CPU KMSAN context to be used in interrupts, where current->kmsan is 36 * unavaliable. 37 */ 38 DEFINE_PER_CPU(struct kmsan_ctx, kmsan_percpu_ctx); 39 40 void kmsan_internal_task_create(struct task_struct *task) 41 { 42 struct kmsan_ctx *ctx = &task->kmsan_ctx; 43 struct thread_info *info = current_thread_info(); 44 45 __memset(ctx, 0, sizeof(*ctx)); 46 kmsan_internal_unpoison_memory(info, sizeof(*info), false); 47 } 48 49 void kmsan_internal_poison_memory(void *address, size_t size, gfp_t flags, 50 unsigned int poison_flags) 51 { 52 u32 extra_bits = 53 kmsan_extra_bits(/*depth*/ 0, poison_flags & KMSAN_POISON_FREE); 54 bool checked = poison_flags & KMSAN_POISON_CHECK; 55 depot_stack_handle_t handle; 56 57 handle = kmsan_save_stack_with_flags(flags, extra_bits); 58 kmsan_internal_set_shadow_origin(address, size, -1, handle, checked); 59 } 60 61 void kmsan_internal_unpoison_memory(void *address, size_t size, bool checked) 62 { 63 kmsan_internal_set_shadow_origin(address, size, 0, 0, checked); 64 } 65 66 depot_stack_handle_t kmsan_save_stack_with_flags(gfp_t flags, 67 unsigned int extra) 68 { 69 unsigned long entries[KMSAN_STACK_DEPTH]; 70 unsigned int nr_entries; 71 depot_stack_handle_t handle; 72 73 nr_entries = stack_trace_save(entries, KMSAN_STACK_DEPTH, 0); 74 75 handle = stack_depot_save(entries, nr_entries, flags); 76 return stack_depot_set_extra_bits(handle, extra); 77 } 78 79 /* Copy the metadata following the memmove() behavior. */ 80 void kmsan_internal_memmove_metadata(void *dst, void *src, size_t n) 81 { 82 depot_stack_handle_t prev_old_origin = 0, prev_new_origin = 0; 83 int i, iter, step, src_off, dst_off, oiter_src, oiter_dst; 84 depot_stack_handle_t old_origin = 0, new_origin = 0; 85 depot_stack_handle_t *origin_src, *origin_dst; 86 u8 *shadow_src, *shadow_dst; 87 u32 *align_shadow_dst; 88 bool backwards; 89 90 shadow_dst = kmsan_get_metadata(dst, KMSAN_META_SHADOW); 91 if (!shadow_dst) 92 return; 93 KMSAN_WARN_ON(!kmsan_metadata_is_contiguous(dst, n)); 94 align_shadow_dst = 95 (u32 *)ALIGN_DOWN((u64)shadow_dst, KMSAN_ORIGIN_SIZE); 96 97 shadow_src = kmsan_get_metadata(src, KMSAN_META_SHADOW); 98 if (!shadow_src) { 99 /* @src is untracked: mark @dst as initialized. */ 100 kmsan_internal_unpoison_memory(dst, n, /*checked*/ false); 101 return; 102 } 103 KMSAN_WARN_ON(!kmsan_metadata_is_contiguous(src, n)); 104 105 origin_dst = kmsan_get_metadata(dst, KMSAN_META_ORIGIN); 106 origin_src = kmsan_get_metadata(src, KMSAN_META_ORIGIN); 107 KMSAN_WARN_ON(!origin_dst || !origin_src); 108 109 backwards = dst > src; 110 step = backwards ? -1 : 1; 111 iter = backwards ? n - 1 : 0; 112 src_off = (u64)src % KMSAN_ORIGIN_SIZE; 113 dst_off = (u64)dst % KMSAN_ORIGIN_SIZE; 114 115 /* Copy shadow bytes one by one, updating the origins if necessary. */ 116 for (i = 0; i < n; i++, iter += step) { 117 oiter_src = (iter + src_off) / KMSAN_ORIGIN_SIZE; 118 oiter_dst = (iter + dst_off) / KMSAN_ORIGIN_SIZE; 119 if (!shadow_src[iter]) { 120 shadow_dst[iter] = 0; 121 if (!align_shadow_dst[oiter_dst]) 122 origin_dst[oiter_dst] = 0; 123 continue; 124 } 125 shadow_dst[iter] = shadow_src[iter]; 126 old_origin = origin_src[oiter_src]; 127 if (old_origin == prev_old_origin) 128 new_origin = prev_new_origin; 129 else { 130 /* 131 * kmsan_internal_chain_origin() may return 132 * NULL, but we don't want to lose the previous 133 * origin value. 134 */ 135 new_origin = kmsan_internal_chain_origin(old_origin); 136 if (!new_origin) 137 new_origin = old_origin; 138 } 139 origin_dst[oiter_dst] = new_origin; 140 prev_new_origin = new_origin; 141 prev_old_origin = old_origin; 142 } 143 } 144 145 depot_stack_handle_t kmsan_internal_chain_origin(depot_stack_handle_t id) 146 { 147 unsigned long entries[3]; 148 u32 extra_bits; 149 int depth; 150 bool uaf; 151 depot_stack_handle_t handle; 152 153 if (!id) 154 return id; 155 /* 156 * Make sure we have enough spare bits in @id to hold the UAF bit and 157 * the chain depth. 158 */ 159 BUILD_BUG_ON((1 << STACK_DEPOT_EXTRA_BITS) <= 160 (KMSAN_MAX_ORIGIN_DEPTH << 1)); 161 162 extra_bits = stack_depot_get_extra_bits(id); 163 depth = kmsan_depth_from_eb(extra_bits); 164 uaf = kmsan_uaf_from_eb(extra_bits); 165 166 /* 167 * Stop chaining origins once the depth reached KMSAN_MAX_ORIGIN_DEPTH. 168 * This mostly happens in the case structures with uninitialized padding 169 * are copied around many times. Origin chains for such structures are 170 * usually periodic, and it does not make sense to fully store them. 171 */ 172 if (depth == KMSAN_MAX_ORIGIN_DEPTH) 173 return id; 174 175 depth++; 176 extra_bits = kmsan_extra_bits(depth, uaf); 177 178 entries[0] = KMSAN_CHAIN_MAGIC_ORIGIN; 179 entries[1] = kmsan_save_stack_with_flags(__GFP_HIGH, 0); 180 entries[2] = id; 181 /* 182 * @entries is a local var in non-instrumented code, so KMSAN does not 183 * know it is initialized. Explicitly unpoison it to avoid false 184 * positives when stack_depot_save() passes it to instrumented code. 185 */ 186 kmsan_internal_unpoison_memory(entries, sizeof(entries), false); 187 handle = stack_depot_save(entries, ARRAY_SIZE(entries), __GFP_HIGH); 188 return stack_depot_set_extra_bits(handle, extra_bits); 189 } 190 191 void kmsan_internal_set_shadow_origin(void *addr, size_t size, int b, 192 u32 origin, bool checked) 193 { 194 u64 address = (u64)addr; 195 void *shadow_start; 196 u32 *aligned_shadow, *origin_start; 197 size_t pad = 0; 198 199 KMSAN_WARN_ON(!kmsan_metadata_is_contiguous(addr, size)); 200 shadow_start = kmsan_get_metadata(addr, KMSAN_META_SHADOW); 201 if (!shadow_start) { 202 /* 203 * kmsan_metadata_is_contiguous() is true, so either all shadow 204 * and origin pages are NULL, or all are non-NULL. 205 */ 206 if (checked) { 207 pr_err("%s: not memsetting %ld bytes starting at %px, because the shadow is NULL\n", 208 __func__, size, addr); 209 KMSAN_WARN_ON(true); 210 } 211 return; 212 } 213 __memset(shadow_start, b, size); 214 215 if (IS_ALIGNED(address, KMSAN_ORIGIN_SIZE)) { 216 aligned_shadow = shadow_start; 217 } else { 218 pad = address % KMSAN_ORIGIN_SIZE; 219 address -= pad; 220 aligned_shadow = shadow_start - pad; 221 size += pad; 222 } 223 size = ALIGN(size, KMSAN_ORIGIN_SIZE); 224 origin_start = 225 (u32 *)kmsan_get_metadata((void *)address, KMSAN_META_ORIGIN); 226 227 /* 228 * If the new origin is non-zero, assume that the shadow byte is also non-zero, 229 * and unconditionally overwrite the old origin slot. 230 * If the new origin is zero, overwrite the old origin slot iff the 231 * corresponding shadow slot is zero. 232 */ 233 for (int i = 0; i < size / KMSAN_ORIGIN_SIZE; i++) { 234 if (origin || !aligned_shadow[i]) 235 origin_start[i] = origin; 236 } 237 } 238 239 struct page *kmsan_vmalloc_to_page_or_null(void *vaddr) 240 { 241 struct page *page; 242 243 if (!kmsan_internal_is_vmalloc_addr(vaddr) && 244 !kmsan_internal_is_module_addr(vaddr)) 245 return NULL; 246 page = vmalloc_to_page(vaddr); 247 if (pfn_valid(page_to_pfn(page))) 248 return page; 249 else 250 return NULL; 251 } 252 253 void kmsan_internal_check_memory(void *addr, size_t size, 254 const void __user *user_addr, int reason) 255 { 256 depot_stack_handle_t cur_origin = 0, new_origin = 0; 257 unsigned long addr64 = (unsigned long)addr; 258 depot_stack_handle_t *origin = NULL; 259 unsigned char *shadow = NULL; 260 int cur_off_start = -1; 261 int chunk_size; 262 size_t pos = 0; 263 264 if (!size) 265 return; 266 KMSAN_WARN_ON(!kmsan_metadata_is_contiguous(addr, size)); 267 while (pos < size) { 268 chunk_size = min(size - pos, 269 PAGE_SIZE - ((addr64 + pos) % PAGE_SIZE)); 270 shadow = kmsan_get_metadata((void *)(addr64 + pos), 271 KMSAN_META_SHADOW); 272 if (!shadow) { 273 /* 274 * This page is untracked. If there were uninitialized 275 * bytes before, report them. 276 */ 277 if (cur_origin) { 278 kmsan_report(cur_origin, addr, size, 279 cur_off_start, pos - 1, user_addr, 280 reason); 281 } 282 cur_origin = 0; 283 cur_off_start = -1; 284 pos += chunk_size; 285 continue; 286 } 287 for (int i = 0; i < chunk_size; i++) { 288 if (!shadow[i]) { 289 /* 290 * This byte is unpoisoned. If there were 291 * poisoned bytes before, report them. 292 */ 293 if (cur_origin) { 294 kmsan_report(cur_origin, addr, size, 295 cur_off_start, pos + i - 1, 296 user_addr, reason); 297 } 298 cur_origin = 0; 299 cur_off_start = -1; 300 continue; 301 } 302 origin = kmsan_get_metadata((void *)(addr64 + pos + i), 303 KMSAN_META_ORIGIN); 304 KMSAN_WARN_ON(!origin); 305 new_origin = *origin; 306 /* 307 * Encountered new origin - report the previous 308 * uninitialized range. 309 */ 310 if (cur_origin != new_origin) { 311 if (cur_origin) { 312 kmsan_report(cur_origin, addr, size, 313 cur_off_start, pos + i - 1, 314 user_addr, reason); 315 } 316 cur_origin = new_origin; 317 cur_off_start = pos + i; 318 } 319 } 320 pos += chunk_size; 321 } 322 KMSAN_WARN_ON(pos != size); 323 if (cur_origin) { 324 kmsan_report(cur_origin, addr, size, cur_off_start, pos - 1, 325 user_addr, reason); 326 } 327 } 328 329 bool kmsan_metadata_is_contiguous(void *addr, size_t size) 330 { 331 char *cur_shadow = NULL, *next_shadow = NULL, *cur_origin = NULL, 332 *next_origin = NULL; 333 u64 cur_addr = (u64)addr, next_addr = cur_addr + PAGE_SIZE; 334 depot_stack_handle_t *origin_p; 335 bool all_untracked = false; 336 337 if (!size) 338 return true; 339 340 /* The whole range belongs to the same page. */ 341 if (ALIGN_DOWN(cur_addr + size - 1, PAGE_SIZE) == 342 ALIGN_DOWN(cur_addr, PAGE_SIZE)) 343 return true; 344 345 cur_shadow = kmsan_get_metadata((void *)cur_addr, /*is_origin*/ false); 346 if (!cur_shadow) 347 all_untracked = true; 348 cur_origin = kmsan_get_metadata((void *)cur_addr, /*is_origin*/ true); 349 if (all_untracked && cur_origin) 350 goto report; 351 352 for (; next_addr < (u64)addr + size; 353 cur_addr = next_addr, cur_shadow = next_shadow, 354 cur_origin = next_origin, next_addr += PAGE_SIZE) { 355 next_shadow = kmsan_get_metadata((void *)next_addr, false); 356 next_origin = kmsan_get_metadata((void *)next_addr, true); 357 if (all_untracked) { 358 if (next_shadow || next_origin) 359 goto report; 360 if (!next_shadow && !next_origin) 361 continue; 362 } 363 if (((u64)cur_shadow == ((u64)next_shadow - PAGE_SIZE)) && 364 ((u64)cur_origin == ((u64)next_origin - PAGE_SIZE))) 365 continue; 366 goto report; 367 } 368 return true; 369 370 report: 371 pr_err("%s: attempting to access two shadow page ranges.\n", __func__); 372 pr_err("Access of size %ld at %px.\n", size, addr); 373 pr_err("Addresses belonging to different ranges: %px and %px\n", 374 (void *)cur_addr, (void *)next_addr); 375 pr_err("page[0].shadow: %px, page[1].shadow: %px\n", cur_shadow, 376 next_shadow); 377 pr_err("page[0].origin: %px, page[1].origin: %px\n", cur_origin, 378 next_origin); 379 origin_p = kmsan_get_metadata(addr, KMSAN_META_ORIGIN); 380 if (origin_p) { 381 pr_err("Origin: %08x\n", *origin_p); 382 kmsan_print_origin(*origin_p); 383 } else { 384 pr_err("Origin: unavailable\n"); 385 } 386 return false; 387 } 388