1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * This file contains common KASAN code. 4 * 5 * Copyright (c) 2014 Samsung Electronics Co., Ltd. 6 * Author: Andrey Ryabinin <ryabinin.a.a@gmail.com> 7 * 8 * Some code borrowed from https://github.com/xairy/kasan-prototype by 9 * Andrey Konovalov <andreyknvl@gmail.com> 10 */ 11 12 #include <linux/export.h> 13 #include <linux/init.h> 14 #include <linux/kasan.h> 15 #include <linux/kernel.h> 16 #include <linux/linkage.h> 17 #include <linux/memblock.h> 18 #include <linux/memory.h> 19 #include <linux/mm.h> 20 #include <linux/module.h> 21 #include <linux/printk.h> 22 #include <linux/sched.h> 23 #include <linux/sched/clock.h> 24 #include <linux/sched/task_stack.h> 25 #include <linux/slab.h> 26 #include <linux/stackdepot.h> 27 #include <linux/stacktrace.h> 28 #include <linux/string.h> 29 #include <linux/types.h> 30 #include <linux/bug.h> 31 32 #include "kasan.h" 33 #include "../slab.h" 34 35 struct slab *kasan_addr_to_slab(const void *addr) 36 { 37 if (virt_addr_valid(addr)) 38 return virt_to_slab(addr); 39 return NULL; 40 } 41 42 depot_stack_handle_t kasan_save_stack(gfp_t flags, depot_flags_t depot_flags) 43 { 44 unsigned long entries[KASAN_STACK_DEPTH]; 45 unsigned int nr_entries; 46 47 nr_entries = stack_trace_save(entries, ARRAY_SIZE(entries), 0); 48 return stack_depot_save_flags(entries, nr_entries, flags, depot_flags); 49 } 50 51 void kasan_set_track(struct kasan_track *track, depot_stack_handle_t stack) 52 { 53 #ifdef CONFIG_KASAN_EXTRA_INFO 54 u32 cpu = raw_smp_processor_id(); 55 u64 ts_nsec = local_clock(); 56 57 track->cpu = cpu; 58 track->timestamp = ts_nsec >> 9; 59 #endif /* CONFIG_KASAN_EXTRA_INFO */ 60 track->pid = current->pid; 61 track->stack = stack; 62 } 63 64 void kasan_save_track(struct kasan_track *track, gfp_t flags) 65 { 66 depot_stack_handle_t stack; 67 68 stack = kasan_save_stack(flags, STACK_DEPOT_FLAG_CAN_ALLOC); 69 kasan_set_track(track, stack); 70 } 71 72 #if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS) 73 void kasan_enable_current(void) 74 { 75 current->kasan_depth++; 76 } 77 EXPORT_SYMBOL(kasan_enable_current); 78 79 void kasan_disable_current(void) 80 { 81 current->kasan_depth--; 82 } 83 EXPORT_SYMBOL(kasan_disable_current); 84 85 #endif /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */ 86 87 void __kasan_unpoison_range(const void *address, size_t size) 88 { 89 if (is_kfence_address(address)) 90 return; 91 92 kasan_unpoison(address, size, false); 93 } 94 95 #ifdef CONFIG_KASAN_STACK 96 /* Unpoison the entire stack for a task. */ 97 void kasan_unpoison_task_stack(struct task_struct *task) 98 { 99 void *base = task_stack_page(task); 100 101 kasan_unpoison(base, THREAD_SIZE, false); 102 } 103 104 /* Unpoison the stack for the current task beyond a watermark sp value. */ 105 asmlinkage void kasan_unpoison_task_stack_below(const void *watermark) 106 { 107 /* 108 * Calculate the task stack base address. Avoid using 'current' 109 * because this function is called by early resume code which hasn't 110 * yet set up the percpu register (%gs). 111 */ 112 void *base = (void *)((unsigned long)watermark & ~(THREAD_SIZE - 1)); 113 114 kasan_unpoison(base, watermark - base, false); 115 } 116 #endif /* CONFIG_KASAN_STACK */ 117 118 bool __kasan_unpoison_pages(struct page *page, unsigned int order, bool init) 119 { 120 u8 tag; 121 unsigned long i; 122 123 if (unlikely(PageHighMem(page))) 124 return false; 125 126 if (!kasan_sample_page_alloc(order)) 127 return false; 128 129 tag = kasan_random_tag(); 130 kasan_unpoison(set_tag(page_address(page), tag), 131 PAGE_SIZE << order, init); 132 for (i = 0; i < (1 << order); i++) 133 page_kasan_tag_set(page + i, tag); 134 135 return true; 136 } 137 138 void __kasan_poison_pages(struct page *page, unsigned int order, bool init) 139 { 140 if (likely(!PageHighMem(page))) 141 kasan_poison(page_address(page), PAGE_SIZE << order, 142 KASAN_PAGE_FREE, init); 143 } 144 145 void __kasan_poison_slab(struct slab *slab) 146 { 147 struct page *page = slab_page(slab); 148 unsigned long i; 149 150 for (i = 0; i < compound_nr(page); i++) 151 page_kasan_tag_reset(page + i); 152 kasan_poison(page_address(page), page_size(page), 153 KASAN_SLAB_REDZONE, false); 154 } 155 156 void __kasan_unpoison_new_object(struct kmem_cache *cache, void *object) 157 { 158 kasan_unpoison(object, cache->object_size, false); 159 } 160 161 void __kasan_poison_new_object(struct kmem_cache *cache, void *object) 162 { 163 kasan_poison(object, round_up(cache->object_size, KASAN_GRANULE_SIZE), 164 KASAN_SLAB_REDZONE, false); 165 } 166 167 /* 168 * This function assigns a tag to an object considering the following: 169 * 1. A cache might have a constructor, which might save a pointer to a slab 170 * object somewhere (e.g. in the object itself). We preassign a tag for 171 * each object in caches with constructors during slab creation and reuse 172 * the same tag each time a particular object is allocated. 173 * 2. A cache might be SLAB_TYPESAFE_BY_RCU, which means objects can be 174 * accessed after being freed. We preassign tags for objects in these 175 * caches as well. 176 */ 177 static inline u8 assign_tag(struct kmem_cache *cache, 178 const void *object, bool init) 179 { 180 if (IS_ENABLED(CONFIG_KASAN_GENERIC)) 181 return 0xff; 182 183 /* 184 * If the cache neither has a constructor nor has SLAB_TYPESAFE_BY_RCU 185 * set, assign a tag when the object is being allocated (init == false). 186 */ 187 if (!cache->ctor && !(cache->flags & SLAB_TYPESAFE_BY_RCU)) 188 return init ? KASAN_TAG_KERNEL : kasan_random_tag(); 189 190 /* 191 * For caches that either have a constructor or SLAB_TYPESAFE_BY_RCU, 192 * assign a random tag during slab creation, otherwise reuse 193 * the already assigned tag. 194 */ 195 return init ? kasan_random_tag() : get_tag(object); 196 } 197 198 void * __must_check __kasan_init_slab_obj(struct kmem_cache *cache, 199 const void *object) 200 { 201 /* Initialize per-object metadata if it is present. */ 202 if (kasan_requires_meta()) 203 kasan_init_object_meta(cache, object); 204 205 /* Tag is ignored in set_tag() without CONFIG_KASAN_SW/HW_TAGS */ 206 object = set_tag(object, assign_tag(cache, object, true)); 207 208 return (void *)object; 209 } 210 211 /* Returns true when freeing the object is not safe. */ 212 static bool check_slab_allocation(struct kmem_cache *cache, void *object, 213 unsigned long ip) 214 { 215 void *tagged_object = object; 216 217 object = kasan_reset_tag(object); 218 219 if (unlikely(nearest_obj(cache, virt_to_slab(object), object) != object)) { 220 kasan_report_invalid_free(tagged_object, ip, KASAN_REPORT_INVALID_FREE); 221 return true; 222 } 223 224 if (!kasan_byte_accessible(tagged_object)) { 225 kasan_report_invalid_free(tagged_object, ip, KASAN_REPORT_DOUBLE_FREE); 226 return true; 227 } 228 229 return false; 230 } 231 232 static inline void poison_slab_object(struct kmem_cache *cache, void *object, 233 bool init) 234 { 235 void *tagged_object = object; 236 237 object = kasan_reset_tag(object); 238 239 kasan_poison(object, round_up(cache->object_size, KASAN_GRANULE_SIZE), 240 KASAN_SLAB_FREE, init); 241 242 if (kasan_stack_collection_enabled()) 243 kasan_save_free_info(cache, tagged_object); 244 } 245 246 bool __kasan_slab_pre_free(struct kmem_cache *cache, void *object, 247 unsigned long ip) 248 { 249 if (!kasan_arch_is_ready() || is_kfence_address(object)) 250 return false; 251 return check_slab_allocation(cache, object, ip); 252 } 253 254 bool __kasan_slab_free(struct kmem_cache *cache, void *object, bool init, 255 bool still_accessible) 256 { 257 if (!kasan_arch_is_ready() || is_kfence_address(object)) 258 return false; 259 260 /* 261 * If this point is reached with an object that must still be 262 * accessible under RCU, we can't poison it; in that case, also skip the 263 * quarantine. This should mostly only happen when CONFIG_SLUB_RCU_DEBUG 264 * has been disabled manually. 265 * 266 * Putting the object on the quarantine wouldn't help catch UAFs (since 267 * we can't poison it here), and it would mask bugs caused by 268 * SLAB_TYPESAFE_BY_RCU users not being careful enough about object 269 * reuse; so overall, putting the object into the quarantine here would 270 * be counterproductive. 271 */ 272 if (still_accessible) 273 return false; 274 275 poison_slab_object(cache, object, init); 276 277 /* 278 * If the object is put into quarantine, do not let slab put the object 279 * onto the freelist for now. The object's metadata is kept until the 280 * object gets evicted from quarantine. 281 */ 282 if (kasan_quarantine_put(cache, object)) 283 return true; 284 285 /* 286 * Note: Keep per-object metadata to allow KASAN print stack traces for 287 * use-after-free-before-realloc bugs. 288 */ 289 290 /* Let slab put the object onto the freelist. */ 291 return false; 292 } 293 294 static inline bool check_page_allocation(void *ptr, unsigned long ip) 295 { 296 if (!kasan_arch_is_ready()) 297 return false; 298 299 if (ptr != page_address(virt_to_head_page(ptr))) { 300 kasan_report_invalid_free(ptr, ip, KASAN_REPORT_INVALID_FREE); 301 return true; 302 } 303 304 if (!kasan_byte_accessible(ptr)) { 305 kasan_report_invalid_free(ptr, ip, KASAN_REPORT_DOUBLE_FREE); 306 return true; 307 } 308 309 return false; 310 } 311 312 void __kasan_kfree_large(void *ptr, unsigned long ip) 313 { 314 check_page_allocation(ptr, ip); 315 316 /* The object will be poisoned by kasan_poison_pages(). */ 317 } 318 319 static inline void unpoison_slab_object(struct kmem_cache *cache, void *object, 320 gfp_t flags, bool init) 321 { 322 /* 323 * Unpoison the whole object. For kmalloc() allocations, 324 * poison_kmalloc_redzone() will do precise poisoning. 325 */ 326 kasan_unpoison(object, cache->object_size, init); 327 328 /* Save alloc info (if possible) for non-kmalloc() allocations. */ 329 if (kasan_stack_collection_enabled() && !is_kmalloc_cache(cache)) 330 kasan_save_alloc_info(cache, object, flags); 331 } 332 333 void * __must_check __kasan_slab_alloc(struct kmem_cache *cache, 334 void *object, gfp_t flags, bool init) 335 { 336 u8 tag; 337 void *tagged_object; 338 339 if (gfpflags_allow_blocking(flags)) 340 kasan_quarantine_reduce(); 341 342 if (unlikely(object == NULL)) 343 return NULL; 344 345 if (is_kfence_address(object)) 346 return (void *)object; 347 348 /* 349 * Generate and assign random tag for tag-based modes. 350 * Tag is ignored in set_tag() for the generic mode. 351 */ 352 tag = assign_tag(cache, object, false); 353 tagged_object = set_tag(object, tag); 354 355 /* Unpoison the object and save alloc info for non-kmalloc() allocations. */ 356 unpoison_slab_object(cache, tagged_object, flags, init); 357 358 return tagged_object; 359 } 360 361 static inline void poison_kmalloc_redzone(struct kmem_cache *cache, 362 const void *object, size_t size, gfp_t flags) 363 { 364 unsigned long redzone_start; 365 unsigned long redzone_end; 366 367 /* 368 * The redzone has byte-level precision for the generic mode. 369 * Partially poison the last object granule to cover the unaligned 370 * part of the redzone. 371 */ 372 if (IS_ENABLED(CONFIG_KASAN_GENERIC)) 373 kasan_poison_last_granule((void *)object, size); 374 375 /* Poison the aligned part of the redzone. */ 376 redzone_start = round_up((unsigned long)(object + size), 377 KASAN_GRANULE_SIZE); 378 redzone_end = round_up((unsigned long)(object + cache->object_size), 379 KASAN_GRANULE_SIZE); 380 kasan_poison((void *)redzone_start, redzone_end - redzone_start, 381 KASAN_SLAB_REDZONE, false); 382 383 /* 384 * Save alloc info (if possible) for kmalloc() allocations. 385 * This also rewrites the alloc info when called from kasan_krealloc(). 386 */ 387 if (kasan_stack_collection_enabled() && is_kmalloc_cache(cache)) 388 kasan_save_alloc_info(cache, (void *)object, flags); 389 390 } 391 392 void * __must_check __kasan_kmalloc(struct kmem_cache *cache, const void *object, 393 size_t size, gfp_t flags) 394 { 395 if (gfpflags_allow_blocking(flags)) 396 kasan_quarantine_reduce(); 397 398 if (unlikely(object == NULL)) 399 return NULL; 400 401 if (is_kfence_address(object)) 402 return (void *)object; 403 404 /* The object has already been unpoisoned by kasan_slab_alloc(). */ 405 poison_kmalloc_redzone(cache, object, size, flags); 406 407 /* Keep the tag that was set by kasan_slab_alloc(). */ 408 return (void *)object; 409 } 410 EXPORT_SYMBOL(__kasan_kmalloc); 411 412 static inline void poison_kmalloc_large_redzone(const void *ptr, size_t size, 413 gfp_t flags) 414 { 415 unsigned long redzone_start; 416 unsigned long redzone_end; 417 418 /* 419 * The redzone has byte-level precision for the generic mode. 420 * Partially poison the last object granule to cover the unaligned 421 * part of the redzone. 422 */ 423 if (IS_ENABLED(CONFIG_KASAN_GENERIC)) 424 kasan_poison_last_granule(ptr, size); 425 426 /* Poison the aligned part of the redzone. */ 427 redzone_start = round_up((unsigned long)(ptr + size), KASAN_GRANULE_SIZE); 428 redzone_end = (unsigned long)ptr + page_size(virt_to_page(ptr)); 429 kasan_poison((void *)redzone_start, redzone_end - redzone_start, 430 KASAN_PAGE_REDZONE, false); 431 } 432 433 void * __must_check __kasan_kmalloc_large(const void *ptr, size_t size, 434 gfp_t flags) 435 { 436 if (gfpflags_allow_blocking(flags)) 437 kasan_quarantine_reduce(); 438 439 if (unlikely(ptr == NULL)) 440 return NULL; 441 442 /* The object has already been unpoisoned by kasan_unpoison_pages(). */ 443 poison_kmalloc_large_redzone(ptr, size, flags); 444 445 /* Keep the tag that was set by alloc_pages(). */ 446 return (void *)ptr; 447 } 448 449 void * __must_check __kasan_krealloc(const void *object, size_t size, gfp_t flags) 450 { 451 struct slab *slab; 452 453 if (gfpflags_allow_blocking(flags)) 454 kasan_quarantine_reduce(); 455 456 if (unlikely(object == ZERO_SIZE_PTR)) 457 return (void *)object; 458 459 if (is_kfence_address(object)) 460 return (void *)object; 461 462 /* 463 * Unpoison the object's data. 464 * Part of it might already have been unpoisoned, but it's unknown 465 * how big that part is. 466 */ 467 kasan_unpoison(object, size, false); 468 469 slab = virt_to_slab(object); 470 471 /* Piggy-back on kmalloc() instrumentation to poison the redzone. */ 472 if (unlikely(!slab)) 473 poison_kmalloc_large_redzone(object, size, flags); 474 else 475 poison_kmalloc_redzone(slab->slab_cache, object, size, flags); 476 477 return (void *)object; 478 } 479 480 bool __kasan_mempool_poison_pages(struct page *page, unsigned int order, 481 unsigned long ip) 482 { 483 unsigned long *ptr; 484 485 if (unlikely(PageHighMem(page))) 486 return true; 487 488 /* Bail out if allocation was excluded due to sampling. */ 489 if (!IS_ENABLED(CONFIG_KASAN_GENERIC) && 490 page_kasan_tag(page) == KASAN_TAG_KERNEL) 491 return true; 492 493 ptr = page_address(page); 494 495 if (check_page_allocation(ptr, ip)) 496 return false; 497 498 kasan_poison(ptr, PAGE_SIZE << order, KASAN_PAGE_FREE, false); 499 500 return true; 501 } 502 503 void __kasan_mempool_unpoison_pages(struct page *page, unsigned int order, 504 unsigned long ip) 505 { 506 __kasan_unpoison_pages(page, order, false); 507 } 508 509 bool __kasan_mempool_poison_object(void *ptr, unsigned long ip) 510 { 511 struct folio *folio = virt_to_folio(ptr); 512 struct slab *slab; 513 514 /* 515 * This function can be called for large kmalloc allocation that get 516 * their memory from page_alloc. Thus, the folio might not be a slab. 517 */ 518 if (unlikely(!folio_test_slab(folio))) { 519 if (check_page_allocation(ptr, ip)) 520 return false; 521 kasan_poison(ptr, folio_size(folio), KASAN_PAGE_FREE, false); 522 return true; 523 } 524 525 if (is_kfence_address(ptr) || !kasan_arch_is_ready()) 526 return true; 527 528 slab = folio_slab(folio); 529 530 if (check_slab_allocation(slab->slab_cache, ptr, ip)) 531 return false; 532 533 poison_slab_object(slab->slab_cache, ptr, false); 534 return true; 535 } 536 537 void __kasan_mempool_unpoison_object(void *ptr, size_t size, unsigned long ip) 538 { 539 struct slab *slab; 540 gfp_t flags = 0; /* Might be executing under a lock. */ 541 542 slab = virt_to_slab(ptr); 543 544 /* 545 * This function can be called for large kmalloc allocation that get 546 * their memory from page_alloc. 547 */ 548 if (unlikely(!slab)) { 549 kasan_unpoison(ptr, size, false); 550 poison_kmalloc_large_redzone(ptr, size, flags); 551 return; 552 } 553 554 if (is_kfence_address(ptr)) 555 return; 556 557 /* Unpoison the object and save alloc info for non-kmalloc() allocations. */ 558 unpoison_slab_object(slab->slab_cache, ptr, flags, false); 559 560 /* Poison the redzone and save alloc info for kmalloc() allocations. */ 561 if (is_kmalloc_cache(slab->slab_cache)) 562 poison_kmalloc_redzone(slab->slab_cache, ptr, size, flags); 563 } 564 565 bool __kasan_check_byte(const void *address, unsigned long ip) 566 { 567 if (!kasan_byte_accessible(address)) { 568 kasan_report(address, 1, false, ip); 569 return false; 570 } 571 return true; 572 } 573