1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * This file contains KASAN runtime code that manages shadow memory for 4 * generic and software tag-based KASAN modes. 5 * 6 * Copyright (c) 2014 Samsung Electronics Co., Ltd. 7 * Author: Andrey Ryabinin <ryabinin.a.a@gmail.com> 8 * 9 * Some code borrowed from https://github.com/xairy/kasan-prototype by 10 * Andrey Konovalov <andreyknvl@gmail.com> 11 */ 12 13 #include <linux/init.h> 14 #include <linux/kasan.h> 15 #include <linux/kernel.h> 16 #include <linux/kfence.h> 17 #include <linux/kmemleak.h> 18 #include <linux/memory.h> 19 #include <linux/mm.h> 20 #include <linux/string.h> 21 #include <linux/types.h> 22 #include <linux/vmalloc.h> 23 24 #include <asm/cacheflush.h> 25 #include <asm/tlbflush.h> 26 27 #include "kasan.h" 28 29 bool __kasan_check_read(const volatile void *p, unsigned int size) 30 { 31 return kasan_check_range((void *)p, size, false, _RET_IP_); 32 } 33 EXPORT_SYMBOL(__kasan_check_read); 34 35 bool __kasan_check_write(const volatile void *p, unsigned int size) 36 { 37 return kasan_check_range((void *)p, size, true, _RET_IP_); 38 } 39 EXPORT_SYMBOL(__kasan_check_write); 40 41 #if !defined(CONFIG_CC_HAS_KASAN_MEMINTRINSIC_PREFIX) && !defined(CONFIG_GENERIC_ENTRY) 42 /* 43 * CONFIG_GENERIC_ENTRY relies on compiler emitted mem*() calls to not be 44 * instrumented. KASAN enabled toolchains should emit __asan_mem*() functions 45 * for the sites they want to instrument. 46 * 47 * If we have a compiler that can instrument meminstrinsics, never override 48 * these, so that non-instrumented files can safely consider them as builtins. 49 */ 50 #undef memset 51 void *memset(void *addr, int c, size_t len) 52 { 53 if (!kasan_check_range(addr, len, true, _RET_IP_)) 54 return NULL; 55 56 return __memset(addr, c, len); 57 } 58 59 #ifdef __HAVE_ARCH_MEMMOVE 60 #undef memmove 61 void *memmove(void *dest, const void *src, size_t len) 62 { 63 if (!kasan_check_range(src, len, false, _RET_IP_) || 64 !kasan_check_range(dest, len, true, _RET_IP_)) 65 return NULL; 66 67 return __memmove(dest, src, len); 68 } 69 #endif 70 71 #undef memcpy 72 void *memcpy(void *dest, const void *src, size_t len) 73 { 74 if (!kasan_check_range(src, len, false, _RET_IP_) || 75 !kasan_check_range(dest, len, true, _RET_IP_)) 76 return NULL; 77 78 return __memcpy(dest, src, len); 79 } 80 #endif 81 82 void *__asan_memset(void *addr, int c, ssize_t len) 83 { 84 if (!kasan_check_range(addr, len, true, _RET_IP_)) 85 return NULL; 86 87 return __memset(addr, c, len); 88 } 89 EXPORT_SYMBOL(__asan_memset); 90 91 #ifdef __HAVE_ARCH_MEMMOVE 92 void *__asan_memmove(void *dest, const void *src, ssize_t len) 93 { 94 if (!kasan_check_range(src, len, false, _RET_IP_) || 95 !kasan_check_range(dest, len, true, _RET_IP_)) 96 return NULL; 97 98 return __memmove(dest, src, len); 99 } 100 EXPORT_SYMBOL(__asan_memmove); 101 #endif 102 103 void *__asan_memcpy(void *dest, const void *src, ssize_t len) 104 { 105 if (!kasan_check_range(src, len, false, _RET_IP_) || 106 !kasan_check_range(dest, len, true, _RET_IP_)) 107 return NULL; 108 109 return __memcpy(dest, src, len); 110 } 111 EXPORT_SYMBOL(__asan_memcpy); 112 113 #ifdef CONFIG_KASAN_SW_TAGS 114 void *__hwasan_memset(void *addr, int c, ssize_t len) __alias(__asan_memset); 115 EXPORT_SYMBOL(__hwasan_memset); 116 #ifdef __HAVE_ARCH_MEMMOVE 117 void *__hwasan_memmove(void *dest, const void *src, ssize_t len) __alias(__asan_memmove); 118 EXPORT_SYMBOL(__hwasan_memmove); 119 #endif 120 void *__hwasan_memcpy(void *dest, const void *src, ssize_t len) __alias(__asan_memcpy); 121 EXPORT_SYMBOL(__hwasan_memcpy); 122 #endif 123 124 void kasan_poison(const void *addr, size_t size, u8 value, bool init) 125 { 126 void *shadow_start, *shadow_end; 127 128 if (!kasan_arch_is_ready()) 129 return; 130 131 /* 132 * Perform shadow offset calculation based on untagged address, as 133 * some of the callers (e.g. kasan_poison_new_object) pass tagged 134 * addresses to this function. 135 */ 136 addr = kasan_reset_tag(addr); 137 138 if (WARN_ON((unsigned long)addr & KASAN_GRANULE_MASK)) 139 return; 140 if (WARN_ON(size & KASAN_GRANULE_MASK)) 141 return; 142 143 shadow_start = kasan_mem_to_shadow(addr); 144 shadow_end = kasan_mem_to_shadow(addr + size); 145 146 __memset(shadow_start, value, shadow_end - shadow_start); 147 } 148 EXPORT_SYMBOL_GPL(kasan_poison); 149 150 #ifdef CONFIG_KASAN_GENERIC 151 void kasan_poison_last_granule(const void *addr, size_t size) 152 { 153 if (!kasan_arch_is_ready()) 154 return; 155 156 if (size & KASAN_GRANULE_MASK) { 157 u8 *shadow = (u8 *)kasan_mem_to_shadow(addr + size); 158 *shadow = size & KASAN_GRANULE_MASK; 159 } 160 } 161 #endif 162 163 void kasan_unpoison(const void *addr, size_t size, bool init) 164 { 165 u8 tag = get_tag(addr); 166 167 /* 168 * Perform shadow offset calculation based on untagged address, as 169 * some of the callers (e.g. kasan_unpoison_new_object) pass tagged 170 * addresses to this function. 171 */ 172 addr = kasan_reset_tag(addr); 173 174 if (WARN_ON((unsigned long)addr & KASAN_GRANULE_MASK)) 175 return; 176 177 /* Unpoison all granules that cover the object. */ 178 kasan_poison(addr, round_up(size, KASAN_GRANULE_SIZE), tag, false); 179 180 /* Partially poison the last granule for the generic mode. */ 181 if (IS_ENABLED(CONFIG_KASAN_GENERIC)) 182 kasan_poison_last_granule(addr, size); 183 } 184 185 #ifdef CONFIG_MEMORY_HOTPLUG 186 static bool shadow_mapped(unsigned long addr) 187 { 188 pgd_t *pgd = pgd_offset_k(addr); 189 p4d_t *p4d; 190 pud_t *pud; 191 pmd_t *pmd; 192 pte_t *pte; 193 194 if (pgd_none(*pgd)) 195 return false; 196 p4d = p4d_offset(pgd, addr); 197 if (p4d_none(*p4d)) 198 return false; 199 pud = pud_offset(p4d, addr); 200 if (pud_none(*pud)) 201 return false; 202 if (pud_leaf(*pud)) 203 return true; 204 pmd = pmd_offset(pud, addr); 205 if (pmd_none(*pmd)) 206 return false; 207 if (pmd_leaf(*pmd)) 208 return true; 209 pte = pte_offset_kernel(pmd, addr); 210 return !pte_none(ptep_get(pte)); 211 } 212 213 static int __meminit kasan_mem_notifier(struct notifier_block *nb, 214 unsigned long action, void *data) 215 { 216 struct memory_notify *mem_data = data; 217 unsigned long nr_shadow_pages, start_kaddr, shadow_start; 218 unsigned long shadow_end, shadow_size; 219 220 nr_shadow_pages = mem_data->nr_pages >> KASAN_SHADOW_SCALE_SHIFT; 221 start_kaddr = (unsigned long)pfn_to_kaddr(mem_data->start_pfn); 222 shadow_start = (unsigned long)kasan_mem_to_shadow((void *)start_kaddr); 223 shadow_size = nr_shadow_pages << PAGE_SHIFT; 224 shadow_end = shadow_start + shadow_size; 225 226 if (WARN_ON(mem_data->nr_pages % KASAN_GRANULE_SIZE) || 227 WARN_ON(start_kaddr % KASAN_MEMORY_PER_SHADOW_PAGE)) 228 return NOTIFY_BAD; 229 230 switch (action) { 231 case MEM_GOING_ONLINE: { 232 void *ret; 233 234 /* 235 * If shadow is mapped already than it must have been mapped 236 * during the boot. This could happen if we onlining previously 237 * offlined memory. 238 */ 239 if (shadow_mapped(shadow_start)) 240 return NOTIFY_OK; 241 242 ret = __vmalloc_node_range(shadow_size, PAGE_SIZE, shadow_start, 243 shadow_end, GFP_KERNEL, 244 PAGE_KERNEL, VM_NO_GUARD, 245 pfn_to_nid(mem_data->start_pfn), 246 __builtin_return_address(0)); 247 if (!ret) 248 return NOTIFY_BAD; 249 250 kmemleak_ignore(ret); 251 return NOTIFY_OK; 252 } 253 case MEM_CANCEL_ONLINE: 254 case MEM_OFFLINE: { 255 struct vm_struct *vm; 256 257 /* 258 * shadow_start was either mapped during boot by kasan_init() 259 * or during memory online by __vmalloc_node_range(). 260 * In the latter case we can use vfree() to free shadow. 261 * Non-NULL result of the find_vm_area() will tell us if 262 * that was the second case. 263 * 264 * Currently it's not possible to free shadow mapped 265 * during boot by kasan_init(). It's because the code 266 * to do that hasn't been written yet. So we'll just 267 * leak the memory. 268 */ 269 vm = find_vm_area((void *)shadow_start); 270 if (vm) 271 vfree((void *)shadow_start); 272 } 273 } 274 275 return NOTIFY_OK; 276 } 277 278 static int __init kasan_memhotplug_init(void) 279 { 280 hotplug_memory_notifier(kasan_mem_notifier, DEFAULT_CALLBACK_PRI); 281 282 return 0; 283 } 284 285 core_initcall(kasan_memhotplug_init); 286 #endif 287 288 #ifdef CONFIG_KASAN_VMALLOC 289 290 void __init __weak kasan_populate_early_vm_area_shadow(void *start, 291 unsigned long size) 292 { 293 } 294 295 static int kasan_populate_vmalloc_pte(pte_t *ptep, unsigned long addr, 296 void *unused) 297 { 298 unsigned long page; 299 pte_t pte; 300 301 if (likely(!pte_none(ptep_get(ptep)))) 302 return 0; 303 304 page = __get_free_page(GFP_KERNEL); 305 if (!page) 306 return -ENOMEM; 307 308 __memset((void *)page, KASAN_VMALLOC_INVALID, PAGE_SIZE); 309 pte = pfn_pte(PFN_DOWN(__pa(page)), PAGE_KERNEL); 310 311 spin_lock(&init_mm.page_table_lock); 312 if (likely(pte_none(ptep_get(ptep)))) { 313 set_pte_at(&init_mm, addr, ptep, pte); 314 page = 0; 315 } 316 spin_unlock(&init_mm.page_table_lock); 317 if (page) 318 free_page(page); 319 return 0; 320 } 321 322 int kasan_populate_vmalloc(unsigned long addr, unsigned long size) 323 { 324 unsigned long shadow_start, shadow_end; 325 int ret; 326 327 if (!kasan_arch_is_ready()) 328 return 0; 329 330 if (!is_vmalloc_or_module_addr((void *)addr)) 331 return 0; 332 333 shadow_start = (unsigned long)kasan_mem_to_shadow((void *)addr); 334 shadow_end = (unsigned long)kasan_mem_to_shadow((void *)addr + size); 335 336 /* 337 * User Mode Linux maps enough shadow memory for all of virtual memory 338 * at boot, so doesn't need to allocate more on vmalloc, just clear it. 339 * 340 * The remaining CONFIG_UML checks in this file exist for the same 341 * reason. 342 */ 343 if (IS_ENABLED(CONFIG_UML)) { 344 __memset((void *)shadow_start, KASAN_VMALLOC_INVALID, shadow_end - shadow_start); 345 return 0; 346 } 347 348 shadow_start = PAGE_ALIGN_DOWN(shadow_start); 349 shadow_end = PAGE_ALIGN(shadow_end); 350 351 ret = apply_to_page_range(&init_mm, shadow_start, 352 shadow_end - shadow_start, 353 kasan_populate_vmalloc_pte, NULL); 354 if (ret) 355 return ret; 356 357 flush_cache_vmap(shadow_start, shadow_end); 358 359 /* 360 * We need to be careful about inter-cpu effects here. Consider: 361 * 362 * CPU#0 CPU#1 363 * WRITE_ONCE(p, vmalloc(100)); while (x = READ_ONCE(p)) ; 364 * p[99] = 1; 365 * 366 * With compiler instrumentation, that ends up looking like this: 367 * 368 * CPU#0 CPU#1 369 * // vmalloc() allocates memory 370 * // let a = area->addr 371 * // we reach kasan_populate_vmalloc 372 * // and call kasan_unpoison: 373 * STORE shadow(a), unpoison_val 374 * ... 375 * STORE shadow(a+99), unpoison_val x = LOAD p 376 * // rest of vmalloc process <data dependency> 377 * STORE p, a LOAD shadow(x+99) 378 * 379 * If there is no barrier between the end of unpoisoning the shadow 380 * and the store of the result to p, the stores could be committed 381 * in a different order by CPU#0, and CPU#1 could erroneously observe 382 * poison in the shadow. 383 * 384 * We need some sort of barrier between the stores. 385 * 386 * In the vmalloc() case, this is provided by a smp_wmb() in 387 * clear_vm_uninitialized_flag(). In the per-cpu allocator and in 388 * get_vm_area() and friends, the caller gets shadow allocated but 389 * doesn't have any pages mapped into the virtual address space that 390 * has been reserved. Mapping those pages in will involve taking and 391 * releasing a page-table lock, which will provide the barrier. 392 */ 393 394 return 0; 395 } 396 397 static int kasan_depopulate_vmalloc_pte(pte_t *ptep, unsigned long addr, 398 void *unused) 399 { 400 unsigned long page; 401 402 page = (unsigned long)__va(pte_pfn(ptep_get(ptep)) << PAGE_SHIFT); 403 404 spin_lock(&init_mm.page_table_lock); 405 406 if (likely(!pte_none(ptep_get(ptep)))) { 407 pte_clear(&init_mm, addr, ptep); 408 free_page(page); 409 } 410 spin_unlock(&init_mm.page_table_lock); 411 412 return 0; 413 } 414 415 /* 416 * Release the backing for the vmalloc region [start, end), which 417 * lies within the free region [free_region_start, free_region_end). 418 * 419 * This can be run lazily, long after the region was freed. It runs 420 * under vmap_area_lock, so it's not safe to interact with the vmalloc/vmap 421 * infrastructure. 422 * 423 * How does this work? 424 * ------------------- 425 * 426 * We have a region that is page aligned, labeled as A. 427 * That might not map onto the shadow in a way that is page-aligned: 428 * 429 * start end 430 * v v 431 * |????????|????????|AAAAAAAA|AA....AA|AAAAAAAA|????????| < vmalloc 432 * -------- -------- -------- -------- -------- 433 * | | | | | 434 * | | | /-------/ | 435 * \-------\|/------/ |/---------------/ 436 * ||| || 437 * |??AAAAAA|AAAAAAAA|AA??????| < shadow 438 * (1) (2) (3) 439 * 440 * First we align the start upwards and the end downwards, so that the 441 * shadow of the region aligns with shadow page boundaries. In the 442 * example, this gives us the shadow page (2). This is the shadow entirely 443 * covered by this allocation. 444 * 445 * Then we have the tricky bits. We want to know if we can free the 446 * partially covered shadow pages - (1) and (3) in the example. For this, 447 * we are given the start and end of the free region that contains this 448 * allocation. Extending our previous example, we could have: 449 * 450 * free_region_start free_region_end 451 * | start end | 452 * v v v v 453 * |FFFFFFFF|FFFFFFFF|AAAAAAAA|AA....AA|AAAAAAAA|FFFFFFFF| < vmalloc 454 * -------- -------- -------- -------- -------- 455 * | | | | | 456 * | | | /-------/ | 457 * \-------\|/------/ |/---------------/ 458 * ||| || 459 * |FFAAAAAA|AAAAAAAA|AAF?????| < shadow 460 * (1) (2) (3) 461 * 462 * Once again, we align the start of the free region up, and the end of 463 * the free region down so that the shadow is page aligned. So we can free 464 * page (1) - we know no allocation currently uses anything in that page, 465 * because all of it is in the vmalloc free region. But we cannot free 466 * page (3), because we can't be sure that the rest of it is unused. 467 * 468 * We only consider pages that contain part of the original region for 469 * freeing: we don't try to free other pages from the free region or we'd 470 * end up trying to free huge chunks of virtual address space. 471 * 472 * Concurrency 473 * ----------- 474 * 475 * How do we know that we're not freeing a page that is simultaneously 476 * being used for a fresh allocation in kasan_populate_vmalloc(_pte)? 477 * 478 * We _can_ have kasan_release_vmalloc and kasan_populate_vmalloc running 479 * at the same time. While we run under free_vmap_area_lock, the population 480 * code does not. 481 * 482 * free_vmap_area_lock instead operates to ensure that the larger range 483 * [free_region_start, free_region_end) is safe: because __alloc_vmap_area and 484 * the per-cpu region-finding algorithm both run under free_vmap_area_lock, 485 * no space identified as free will become used while we are running. This 486 * means that so long as we are careful with alignment and only free shadow 487 * pages entirely covered by the free region, we will not run in to any 488 * trouble - any simultaneous allocations will be for disjoint regions. 489 */ 490 void kasan_release_vmalloc(unsigned long start, unsigned long end, 491 unsigned long free_region_start, 492 unsigned long free_region_end, 493 unsigned long flags) 494 { 495 void *shadow_start, *shadow_end; 496 unsigned long region_start, region_end; 497 unsigned long size; 498 499 if (!kasan_arch_is_ready()) 500 return; 501 502 region_start = ALIGN(start, KASAN_MEMORY_PER_SHADOW_PAGE); 503 region_end = ALIGN_DOWN(end, KASAN_MEMORY_PER_SHADOW_PAGE); 504 505 free_region_start = ALIGN(free_region_start, KASAN_MEMORY_PER_SHADOW_PAGE); 506 507 if (start != region_start && 508 free_region_start < region_start) 509 region_start -= KASAN_MEMORY_PER_SHADOW_PAGE; 510 511 free_region_end = ALIGN_DOWN(free_region_end, KASAN_MEMORY_PER_SHADOW_PAGE); 512 513 if (end != region_end && 514 free_region_end > region_end) 515 region_end += KASAN_MEMORY_PER_SHADOW_PAGE; 516 517 shadow_start = kasan_mem_to_shadow((void *)region_start); 518 shadow_end = kasan_mem_to_shadow((void *)region_end); 519 520 if (shadow_end > shadow_start) { 521 size = shadow_end - shadow_start; 522 if (IS_ENABLED(CONFIG_UML)) { 523 __memset(shadow_start, KASAN_SHADOW_INIT, shadow_end - shadow_start); 524 return; 525 } 526 527 528 if (flags & KASAN_VMALLOC_PAGE_RANGE) 529 apply_to_existing_page_range(&init_mm, 530 (unsigned long)shadow_start, 531 size, kasan_depopulate_vmalloc_pte, 532 NULL); 533 534 if (flags & KASAN_VMALLOC_TLB_FLUSH) 535 flush_tlb_kernel_range((unsigned long)shadow_start, 536 (unsigned long)shadow_end); 537 } 538 } 539 540 void *__kasan_unpoison_vmalloc(const void *start, unsigned long size, 541 kasan_vmalloc_flags_t flags) 542 { 543 /* 544 * Software KASAN modes unpoison both VM_ALLOC and non-VM_ALLOC 545 * mappings, so the KASAN_VMALLOC_VM_ALLOC flag is ignored. 546 * Software KASAN modes can't optimize zeroing memory by combining it 547 * with setting memory tags, so the KASAN_VMALLOC_INIT flag is ignored. 548 */ 549 550 if (!kasan_arch_is_ready()) 551 return (void *)start; 552 553 if (!is_vmalloc_or_module_addr(start)) 554 return (void *)start; 555 556 /* 557 * Don't tag executable memory with the tag-based mode. 558 * The kernel doesn't tolerate having the PC register tagged. 559 */ 560 if (IS_ENABLED(CONFIG_KASAN_SW_TAGS) && 561 !(flags & KASAN_VMALLOC_PROT_NORMAL)) 562 return (void *)start; 563 564 start = set_tag(start, kasan_random_tag()); 565 kasan_unpoison(start, size, false); 566 return (void *)start; 567 } 568 569 /* 570 * Poison the shadow for a vmalloc region. Called as part of the 571 * freeing process at the time the region is freed. 572 */ 573 void __kasan_poison_vmalloc(const void *start, unsigned long size) 574 { 575 if (!kasan_arch_is_ready()) 576 return; 577 578 if (!is_vmalloc_or_module_addr(start)) 579 return; 580 581 size = round_up(size, KASAN_GRANULE_SIZE); 582 kasan_poison(start, size, KASAN_VMALLOC_INVALID, false); 583 } 584 585 #else /* CONFIG_KASAN_VMALLOC */ 586 587 int kasan_alloc_module_shadow(void *addr, size_t size, gfp_t gfp_mask) 588 { 589 void *ret; 590 size_t scaled_size; 591 size_t shadow_size; 592 unsigned long shadow_start; 593 594 shadow_start = (unsigned long)kasan_mem_to_shadow(addr); 595 scaled_size = (size + KASAN_GRANULE_SIZE - 1) >> 596 KASAN_SHADOW_SCALE_SHIFT; 597 shadow_size = round_up(scaled_size, PAGE_SIZE); 598 599 if (WARN_ON(!PAGE_ALIGNED(shadow_start))) 600 return -EINVAL; 601 602 if (IS_ENABLED(CONFIG_UML)) { 603 __memset((void *)shadow_start, KASAN_SHADOW_INIT, shadow_size); 604 return 0; 605 } 606 607 ret = __vmalloc_node_range(shadow_size, 1, shadow_start, 608 shadow_start + shadow_size, 609 GFP_KERNEL, 610 PAGE_KERNEL, VM_NO_GUARD, NUMA_NO_NODE, 611 __builtin_return_address(0)); 612 613 if (ret) { 614 struct vm_struct *vm = find_vm_area(addr); 615 __memset(ret, KASAN_SHADOW_INIT, shadow_size); 616 vm->flags |= VM_KASAN; 617 kmemleak_ignore(ret); 618 619 if (vm->flags & VM_DEFER_KMEMLEAK) 620 kmemleak_vmalloc(vm, size, gfp_mask); 621 622 return 0; 623 } 624 625 return -ENOMEM; 626 } 627 628 void kasan_free_module_shadow(const struct vm_struct *vm) 629 { 630 if (IS_ENABLED(CONFIG_UML)) 631 return; 632 633 if (vm->flags & VM_KASAN) 634 vfree(kasan_mem_to_shadow(vm->addr)); 635 } 636 637 #endif 638