1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * This file contains KASAN runtime code that manages shadow memory for 4 * generic and software tag-based KASAN modes. 5 * 6 * Copyright (c) 2014 Samsung Electronics Co., Ltd. 7 * Author: Andrey Ryabinin <ryabinin.a.a@gmail.com> 8 * 9 * Some code borrowed from https://github.com/xairy/kasan-prototype by 10 * Andrey Konovalov <andreyknvl@gmail.com> 11 */ 12 13 #include <linux/init.h> 14 #include <linux/kasan.h> 15 #include <linux/kernel.h> 16 #include <linux/kfence.h> 17 #include <linux/kmemleak.h> 18 #include <linux/memory.h> 19 #include <linux/mm.h> 20 #include <linux/string.h> 21 #include <linux/types.h> 22 #include <linux/vmalloc.h> 23 24 #include <asm/cacheflush.h> 25 #include <asm/tlbflush.h> 26 27 #include "kasan.h" 28 29 bool __kasan_check_read(const volatile void *p, unsigned int size) 30 { 31 return kasan_check_range((void *)p, size, false, _RET_IP_); 32 } 33 EXPORT_SYMBOL(__kasan_check_read); 34 35 bool __kasan_check_write(const volatile void *p, unsigned int size) 36 { 37 return kasan_check_range((void *)p, size, true, _RET_IP_); 38 } 39 EXPORT_SYMBOL(__kasan_check_write); 40 41 #if !defined(CONFIG_CC_HAS_KASAN_MEMINTRINSIC_PREFIX) && !defined(CONFIG_GENERIC_ENTRY) 42 /* 43 * CONFIG_GENERIC_ENTRY relies on compiler emitted mem*() calls to not be 44 * instrumented. KASAN enabled toolchains should emit __asan_mem*() functions 45 * for the sites they want to instrument. 46 * 47 * If we have a compiler that can instrument meminstrinsics, never override 48 * these, so that non-instrumented files can safely consider them as builtins. 49 */ 50 #undef memset 51 void *memset(void *addr, int c, size_t len) 52 { 53 if (!kasan_check_range(addr, len, true, _RET_IP_)) 54 return NULL; 55 56 return __memset(addr, c, len); 57 } 58 59 #ifdef __HAVE_ARCH_MEMMOVE 60 #undef memmove 61 void *memmove(void *dest, const void *src, size_t len) 62 { 63 if (!kasan_check_range(src, len, false, _RET_IP_) || 64 !kasan_check_range(dest, len, true, _RET_IP_)) 65 return NULL; 66 67 return __memmove(dest, src, len); 68 } 69 #endif 70 71 #undef memcpy 72 void *memcpy(void *dest, const void *src, size_t len) 73 { 74 if (!kasan_check_range(src, len, false, _RET_IP_) || 75 !kasan_check_range(dest, len, true, _RET_IP_)) 76 return NULL; 77 78 return __memcpy(dest, src, len); 79 } 80 #endif 81 82 void *__asan_memset(void *addr, int c, ssize_t len) 83 { 84 if (!kasan_check_range(addr, len, true, _RET_IP_)) 85 return NULL; 86 87 return __memset(addr, c, len); 88 } 89 EXPORT_SYMBOL(__asan_memset); 90 91 #ifdef __HAVE_ARCH_MEMMOVE 92 void *__asan_memmove(void *dest, const void *src, ssize_t len) 93 { 94 if (!kasan_check_range(src, len, false, _RET_IP_) || 95 !kasan_check_range(dest, len, true, _RET_IP_)) 96 return NULL; 97 98 return __memmove(dest, src, len); 99 } 100 EXPORT_SYMBOL(__asan_memmove); 101 #endif 102 103 void *__asan_memcpy(void *dest, const void *src, ssize_t len) 104 { 105 if (!kasan_check_range(src, len, false, _RET_IP_) || 106 !kasan_check_range(dest, len, true, _RET_IP_)) 107 return NULL; 108 109 return __memcpy(dest, src, len); 110 } 111 EXPORT_SYMBOL(__asan_memcpy); 112 113 #ifdef CONFIG_KASAN_SW_TAGS 114 void *__hwasan_memset(void *addr, int c, ssize_t len) __alias(__asan_memset); 115 EXPORT_SYMBOL(__hwasan_memset); 116 #ifdef __HAVE_ARCH_MEMMOVE 117 void *__hwasan_memmove(void *dest, const void *src, ssize_t len) __alias(__asan_memmove); 118 EXPORT_SYMBOL(__hwasan_memmove); 119 #endif 120 void *__hwasan_memcpy(void *dest, const void *src, ssize_t len) __alias(__asan_memcpy); 121 EXPORT_SYMBOL(__hwasan_memcpy); 122 #endif 123 124 void kasan_poison(const void *addr, size_t size, u8 value, bool init) 125 { 126 void *shadow_start, *shadow_end; 127 128 if (!kasan_arch_is_ready()) 129 return; 130 131 /* 132 * Perform shadow offset calculation based on untagged address, as 133 * some of the callers (e.g. kasan_poison_new_object) pass tagged 134 * addresses to this function. 135 */ 136 addr = kasan_reset_tag(addr); 137 138 if (WARN_ON((unsigned long)addr & KASAN_GRANULE_MASK)) 139 return; 140 if (WARN_ON(size & KASAN_GRANULE_MASK)) 141 return; 142 143 shadow_start = kasan_mem_to_shadow(addr); 144 shadow_end = kasan_mem_to_shadow(addr + size); 145 146 __memset(shadow_start, value, shadow_end - shadow_start); 147 } 148 EXPORT_SYMBOL_GPL(kasan_poison); 149 150 #ifdef CONFIG_KASAN_GENERIC 151 void kasan_poison_last_granule(const void *addr, size_t size) 152 { 153 if (!kasan_arch_is_ready()) 154 return; 155 156 if (size & KASAN_GRANULE_MASK) { 157 u8 *shadow = (u8 *)kasan_mem_to_shadow(addr + size); 158 *shadow = size & KASAN_GRANULE_MASK; 159 } 160 } 161 #endif 162 163 void kasan_unpoison(const void *addr, size_t size, bool init) 164 { 165 u8 tag = get_tag(addr); 166 167 /* 168 * Perform shadow offset calculation based on untagged address, as 169 * some of the callers (e.g. kasan_unpoison_new_object) pass tagged 170 * addresses to this function. 171 */ 172 addr = kasan_reset_tag(addr); 173 174 if (WARN_ON((unsigned long)addr & KASAN_GRANULE_MASK)) 175 return; 176 177 /* Unpoison all granules that cover the object. */ 178 kasan_poison(addr, round_up(size, KASAN_GRANULE_SIZE), tag, false); 179 180 /* Partially poison the last granule for the generic mode. */ 181 if (IS_ENABLED(CONFIG_KASAN_GENERIC)) 182 kasan_poison_last_granule(addr, size); 183 } 184 185 #ifdef CONFIG_MEMORY_HOTPLUG 186 static bool shadow_mapped(unsigned long addr) 187 { 188 pgd_t *pgd = pgd_offset_k(addr); 189 p4d_t *p4d; 190 pud_t *pud; 191 pmd_t *pmd; 192 pte_t *pte; 193 194 if (pgd_none(*pgd)) 195 return false; 196 p4d = p4d_offset(pgd, addr); 197 if (p4d_none(*p4d)) 198 return false; 199 pud = pud_offset(p4d, addr); 200 if (pud_none(*pud)) 201 return false; 202 203 /* 204 * We can't use pud_large() or pud_huge(), the first one is 205 * arch-specific, the last one depends on HUGETLB_PAGE. So let's abuse 206 * pud_bad(), if pud is bad then it's bad because it's huge. 207 */ 208 if (pud_bad(*pud)) 209 return true; 210 pmd = pmd_offset(pud, addr); 211 if (pmd_none(*pmd)) 212 return false; 213 214 if (pmd_bad(*pmd)) 215 return true; 216 pte = pte_offset_kernel(pmd, addr); 217 return !pte_none(ptep_get(pte)); 218 } 219 220 static int __meminit kasan_mem_notifier(struct notifier_block *nb, 221 unsigned long action, void *data) 222 { 223 struct memory_notify *mem_data = data; 224 unsigned long nr_shadow_pages, start_kaddr, shadow_start; 225 unsigned long shadow_end, shadow_size; 226 227 nr_shadow_pages = mem_data->nr_pages >> KASAN_SHADOW_SCALE_SHIFT; 228 start_kaddr = (unsigned long)pfn_to_kaddr(mem_data->start_pfn); 229 shadow_start = (unsigned long)kasan_mem_to_shadow((void *)start_kaddr); 230 shadow_size = nr_shadow_pages << PAGE_SHIFT; 231 shadow_end = shadow_start + shadow_size; 232 233 if (WARN_ON(mem_data->nr_pages % KASAN_GRANULE_SIZE) || 234 WARN_ON(start_kaddr % KASAN_MEMORY_PER_SHADOW_PAGE)) 235 return NOTIFY_BAD; 236 237 switch (action) { 238 case MEM_GOING_ONLINE: { 239 void *ret; 240 241 /* 242 * If shadow is mapped already than it must have been mapped 243 * during the boot. This could happen if we onlining previously 244 * offlined memory. 245 */ 246 if (shadow_mapped(shadow_start)) 247 return NOTIFY_OK; 248 249 ret = __vmalloc_node_range(shadow_size, PAGE_SIZE, shadow_start, 250 shadow_end, GFP_KERNEL, 251 PAGE_KERNEL, VM_NO_GUARD, 252 pfn_to_nid(mem_data->start_pfn), 253 __builtin_return_address(0)); 254 if (!ret) 255 return NOTIFY_BAD; 256 257 kmemleak_ignore(ret); 258 return NOTIFY_OK; 259 } 260 case MEM_CANCEL_ONLINE: 261 case MEM_OFFLINE: { 262 struct vm_struct *vm; 263 264 /* 265 * shadow_start was either mapped during boot by kasan_init() 266 * or during memory online by __vmalloc_node_range(). 267 * In the latter case we can use vfree() to free shadow. 268 * Non-NULL result of the find_vm_area() will tell us if 269 * that was the second case. 270 * 271 * Currently it's not possible to free shadow mapped 272 * during boot by kasan_init(). It's because the code 273 * to do that hasn't been written yet. So we'll just 274 * leak the memory. 275 */ 276 vm = find_vm_area((void *)shadow_start); 277 if (vm) 278 vfree((void *)shadow_start); 279 } 280 } 281 282 return NOTIFY_OK; 283 } 284 285 static int __init kasan_memhotplug_init(void) 286 { 287 hotplug_memory_notifier(kasan_mem_notifier, DEFAULT_CALLBACK_PRI); 288 289 return 0; 290 } 291 292 core_initcall(kasan_memhotplug_init); 293 #endif 294 295 #ifdef CONFIG_KASAN_VMALLOC 296 297 void __init __weak kasan_populate_early_vm_area_shadow(void *start, 298 unsigned long size) 299 { 300 } 301 302 static int kasan_populate_vmalloc_pte(pte_t *ptep, unsigned long addr, 303 void *unused) 304 { 305 unsigned long page; 306 pte_t pte; 307 308 if (likely(!pte_none(ptep_get(ptep)))) 309 return 0; 310 311 page = __get_free_page(GFP_KERNEL); 312 if (!page) 313 return -ENOMEM; 314 315 __memset((void *)page, KASAN_VMALLOC_INVALID, PAGE_SIZE); 316 pte = pfn_pte(PFN_DOWN(__pa(page)), PAGE_KERNEL); 317 318 spin_lock(&init_mm.page_table_lock); 319 if (likely(pte_none(ptep_get(ptep)))) { 320 set_pte_at(&init_mm, addr, ptep, pte); 321 page = 0; 322 } 323 spin_unlock(&init_mm.page_table_lock); 324 if (page) 325 free_page(page); 326 return 0; 327 } 328 329 int kasan_populate_vmalloc(unsigned long addr, unsigned long size) 330 { 331 unsigned long shadow_start, shadow_end; 332 int ret; 333 334 if (!kasan_arch_is_ready()) 335 return 0; 336 337 if (!is_vmalloc_or_module_addr((void *)addr)) 338 return 0; 339 340 shadow_start = (unsigned long)kasan_mem_to_shadow((void *)addr); 341 shadow_end = (unsigned long)kasan_mem_to_shadow((void *)addr + size); 342 343 /* 344 * User Mode Linux maps enough shadow memory for all of virtual memory 345 * at boot, so doesn't need to allocate more on vmalloc, just clear it. 346 * 347 * The remaining CONFIG_UML checks in this file exist for the same 348 * reason. 349 */ 350 if (IS_ENABLED(CONFIG_UML)) { 351 __memset((void *)shadow_start, KASAN_VMALLOC_INVALID, shadow_end - shadow_start); 352 return 0; 353 } 354 355 shadow_start = PAGE_ALIGN_DOWN(shadow_start); 356 shadow_end = PAGE_ALIGN(shadow_end); 357 358 ret = apply_to_page_range(&init_mm, shadow_start, 359 shadow_end - shadow_start, 360 kasan_populate_vmalloc_pte, NULL); 361 if (ret) 362 return ret; 363 364 flush_cache_vmap(shadow_start, shadow_end); 365 366 /* 367 * We need to be careful about inter-cpu effects here. Consider: 368 * 369 * CPU#0 CPU#1 370 * WRITE_ONCE(p, vmalloc(100)); while (x = READ_ONCE(p)) ; 371 * p[99] = 1; 372 * 373 * With compiler instrumentation, that ends up looking like this: 374 * 375 * CPU#0 CPU#1 376 * // vmalloc() allocates memory 377 * // let a = area->addr 378 * // we reach kasan_populate_vmalloc 379 * // and call kasan_unpoison: 380 * STORE shadow(a), unpoison_val 381 * ... 382 * STORE shadow(a+99), unpoison_val x = LOAD p 383 * // rest of vmalloc process <data dependency> 384 * STORE p, a LOAD shadow(x+99) 385 * 386 * If there is no barrier between the end of unpoisoning the shadow 387 * and the store of the result to p, the stores could be committed 388 * in a different order by CPU#0, and CPU#1 could erroneously observe 389 * poison in the shadow. 390 * 391 * We need some sort of barrier between the stores. 392 * 393 * In the vmalloc() case, this is provided by a smp_wmb() in 394 * clear_vm_uninitialized_flag(). In the per-cpu allocator and in 395 * get_vm_area() and friends, the caller gets shadow allocated but 396 * doesn't have any pages mapped into the virtual address space that 397 * has been reserved. Mapping those pages in will involve taking and 398 * releasing a page-table lock, which will provide the barrier. 399 */ 400 401 return 0; 402 } 403 404 static int kasan_depopulate_vmalloc_pte(pte_t *ptep, unsigned long addr, 405 void *unused) 406 { 407 unsigned long page; 408 409 page = (unsigned long)__va(pte_pfn(ptep_get(ptep)) << PAGE_SHIFT); 410 411 spin_lock(&init_mm.page_table_lock); 412 413 if (likely(!pte_none(ptep_get(ptep)))) { 414 pte_clear(&init_mm, addr, ptep); 415 free_page(page); 416 } 417 spin_unlock(&init_mm.page_table_lock); 418 419 return 0; 420 } 421 422 /* 423 * Release the backing for the vmalloc region [start, end), which 424 * lies within the free region [free_region_start, free_region_end). 425 * 426 * This can be run lazily, long after the region was freed. It runs 427 * under vmap_area_lock, so it's not safe to interact with the vmalloc/vmap 428 * infrastructure. 429 * 430 * How does this work? 431 * ------------------- 432 * 433 * We have a region that is page aligned, labeled as A. 434 * That might not map onto the shadow in a way that is page-aligned: 435 * 436 * start end 437 * v v 438 * |????????|????????|AAAAAAAA|AA....AA|AAAAAAAA|????????| < vmalloc 439 * -------- -------- -------- -------- -------- 440 * | | | | | 441 * | | | /-------/ | 442 * \-------\|/------/ |/---------------/ 443 * ||| || 444 * |??AAAAAA|AAAAAAAA|AA??????| < shadow 445 * (1) (2) (3) 446 * 447 * First we align the start upwards and the end downwards, so that the 448 * shadow of the region aligns with shadow page boundaries. In the 449 * example, this gives us the shadow page (2). This is the shadow entirely 450 * covered by this allocation. 451 * 452 * Then we have the tricky bits. We want to know if we can free the 453 * partially covered shadow pages - (1) and (3) in the example. For this, 454 * we are given the start and end of the free region that contains this 455 * allocation. Extending our previous example, we could have: 456 * 457 * free_region_start free_region_end 458 * | start end | 459 * v v v v 460 * |FFFFFFFF|FFFFFFFF|AAAAAAAA|AA....AA|AAAAAAAA|FFFFFFFF| < vmalloc 461 * -------- -------- -------- -------- -------- 462 * | | | | | 463 * | | | /-------/ | 464 * \-------\|/------/ |/---------------/ 465 * ||| || 466 * |FFAAAAAA|AAAAAAAA|AAF?????| < shadow 467 * (1) (2) (3) 468 * 469 * Once again, we align the start of the free region up, and the end of 470 * the free region down so that the shadow is page aligned. So we can free 471 * page (1) - we know no allocation currently uses anything in that page, 472 * because all of it is in the vmalloc free region. But we cannot free 473 * page (3), because we can't be sure that the rest of it is unused. 474 * 475 * We only consider pages that contain part of the original region for 476 * freeing: we don't try to free other pages from the free region or we'd 477 * end up trying to free huge chunks of virtual address space. 478 * 479 * Concurrency 480 * ----------- 481 * 482 * How do we know that we're not freeing a page that is simultaneously 483 * being used for a fresh allocation in kasan_populate_vmalloc(_pte)? 484 * 485 * We _can_ have kasan_release_vmalloc and kasan_populate_vmalloc running 486 * at the same time. While we run under free_vmap_area_lock, the population 487 * code does not. 488 * 489 * free_vmap_area_lock instead operates to ensure that the larger range 490 * [free_region_start, free_region_end) is safe: because __alloc_vmap_area and 491 * the per-cpu region-finding algorithm both run under free_vmap_area_lock, 492 * no space identified as free will become used while we are running. This 493 * means that so long as we are careful with alignment and only free shadow 494 * pages entirely covered by the free region, we will not run in to any 495 * trouble - any simultaneous allocations will be for disjoint regions. 496 */ 497 void kasan_release_vmalloc(unsigned long start, unsigned long end, 498 unsigned long free_region_start, 499 unsigned long free_region_end) 500 { 501 void *shadow_start, *shadow_end; 502 unsigned long region_start, region_end; 503 unsigned long size; 504 505 if (!kasan_arch_is_ready()) 506 return; 507 508 region_start = ALIGN(start, KASAN_MEMORY_PER_SHADOW_PAGE); 509 region_end = ALIGN_DOWN(end, KASAN_MEMORY_PER_SHADOW_PAGE); 510 511 free_region_start = ALIGN(free_region_start, KASAN_MEMORY_PER_SHADOW_PAGE); 512 513 if (start != region_start && 514 free_region_start < region_start) 515 region_start -= KASAN_MEMORY_PER_SHADOW_PAGE; 516 517 free_region_end = ALIGN_DOWN(free_region_end, KASAN_MEMORY_PER_SHADOW_PAGE); 518 519 if (end != region_end && 520 free_region_end > region_end) 521 region_end += KASAN_MEMORY_PER_SHADOW_PAGE; 522 523 shadow_start = kasan_mem_to_shadow((void *)region_start); 524 shadow_end = kasan_mem_to_shadow((void *)region_end); 525 526 if (shadow_end > shadow_start) { 527 size = shadow_end - shadow_start; 528 if (IS_ENABLED(CONFIG_UML)) { 529 __memset(shadow_start, KASAN_SHADOW_INIT, shadow_end - shadow_start); 530 return; 531 } 532 apply_to_existing_page_range(&init_mm, 533 (unsigned long)shadow_start, 534 size, kasan_depopulate_vmalloc_pte, 535 NULL); 536 flush_tlb_kernel_range((unsigned long)shadow_start, 537 (unsigned long)shadow_end); 538 } 539 } 540 541 void *__kasan_unpoison_vmalloc(const void *start, unsigned long size, 542 kasan_vmalloc_flags_t flags) 543 { 544 /* 545 * Software KASAN modes unpoison both VM_ALLOC and non-VM_ALLOC 546 * mappings, so the KASAN_VMALLOC_VM_ALLOC flag is ignored. 547 * Software KASAN modes can't optimize zeroing memory by combining it 548 * with setting memory tags, so the KASAN_VMALLOC_INIT flag is ignored. 549 */ 550 551 if (!kasan_arch_is_ready()) 552 return (void *)start; 553 554 if (!is_vmalloc_or_module_addr(start)) 555 return (void *)start; 556 557 /* 558 * Don't tag executable memory with the tag-based mode. 559 * The kernel doesn't tolerate having the PC register tagged. 560 */ 561 if (IS_ENABLED(CONFIG_KASAN_SW_TAGS) && 562 !(flags & KASAN_VMALLOC_PROT_NORMAL)) 563 return (void *)start; 564 565 start = set_tag(start, kasan_random_tag()); 566 kasan_unpoison(start, size, false); 567 return (void *)start; 568 } 569 570 /* 571 * Poison the shadow for a vmalloc region. Called as part of the 572 * freeing process at the time the region is freed. 573 */ 574 void __kasan_poison_vmalloc(const void *start, unsigned long size) 575 { 576 if (!kasan_arch_is_ready()) 577 return; 578 579 if (!is_vmalloc_or_module_addr(start)) 580 return; 581 582 size = round_up(size, KASAN_GRANULE_SIZE); 583 kasan_poison(start, size, KASAN_VMALLOC_INVALID, false); 584 } 585 586 #else /* CONFIG_KASAN_VMALLOC */ 587 588 int kasan_alloc_module_shadow(void *addr, size_t size, gfp_t gfp_mask) 589 { 590 void *ret; 591 size_t scaled_size; 592 size_t shadow_size; 593 unsigned long shadow_start; 594 595 shadow_start = (unsigned long)kasan_mem_to_shadow(addr); 596 scaled_size = (size + KASAN_GRANULE_SIZE - 1) >> 597 KASAN_SHADOW_SCALE_SHIFT; 598 shadow_size = round_up(scaled_size, PAGE_SIZE); 599 600 if (WARN_ON(!PAGE_ALIGNED(shadow_start))) 601 return -EINVAL; 602 603 if (IS_ENABLED(CONFIG_UML)) { 604 __memset((void *)shadow_start, KASAN_SHADOW_INIT, shadow_size); 605 return 0; 606 } 607 608 ret = __vmalloc_node_range(shadow_size, 1, shadow_start, 609 shadow_start + shadow_size, 610 GFP_KERNEL, 611 PAGE_KERNEL, VM_NO_GUARD, NUMA_NO_NODE, 612 __builtin_return_address(0)); 613 614 if (ret) { 615 struct vm_struct *vm = find_vm_area(addr); 616 __memset(ret, KASAN_SHADOW_INIT, shadow_size); 617 vm->flags |= VM_KASAN; 618 kmemleak_ignore(ret); 619 620 if (vm->flags & VM_DEFER_KMEMLEAK) 621 kmemleak_vmalloc(vm, size, gfp_mask); 622 623 return 0; 624 } 625 626 return -ENOMEM; 627 } 628 629 void kasan_free_module_shadow(const struct vm_struct *vm) 630 { 631 if (IS_ENABLED(CONFIG_UML)) 632 return; 633 634 if (vm->flags & VM_KASAN) 635 vfree(kasan_mem_to_shadow(vm->addr)); 636 } 637 638 #endif 639