1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * This file contains common KASAN code. 4 * 5 * Copyright (c) 2014 Samsung Electronics Co., Ltd. 6 * Author: Andrey Ryabinin <ryabinin.a.a@gmail.com> 7 * 8 * Some code borrowed from https://github.com/xairy/kasan-prototype by 9 * Andrey Konovalov <andreyknvl@gmail.com> 10 */ 11 12 #include <linux/export.h> 13 #include <linux/init.h> 14 #include <linux/kasan.h> 15 #include <linux/kernel.h> 16 #include <linux/linkage.h> 17 #include <linux/memblock.h> 18 #include <linux/memory.h> 19 #include <linux/mm.h> 20 #include <linux/module.h> 21 #include <linux/printk.h> 22 #include <linux/sched.h> 23 #include <linux/sched/task_stack.h> 24 #include <linux/slab.h> 25 #include <linux/stackdepot.h> 26 #include <linux/stacktrace.h> 27 #include <linux/string.h> 28 #include <linux/types.h> 29 #include <linux/bug.h> 30 31 #include "kasan.h" 32 #include "../slab.h" 33 34 struct slab *kasan_addr_to_slab(const void *addr) 35 { 36 if (virt_addr_valid(addr)) 37 return virt_to_slab(addr); 38 return NULL; 39 } 40 41 depot_stack_handle_t kasan_save_stack(gfp_t flags, depot_flags_t depot_flags) 42 { 43 unsigned long entries[KASAN_STACK_DEPTH]; 44 unsigned int nr_entries; 45 46 nr_entries = stack_trace_save(entries, ARRAY_SIZE(entries), 0); 47 return stack_depot_save_flags(entries, nr_entries, flags, depot_flags); 48 } 49 50 void kasan_set_track(struct kasan_track *track, gfp_t flags) 51 { 52 track->pid = current->pid; 53 track->stack = kasan_save_stack(flags, 54 STACK_DEPOT_FLAG_CAN_ALLOC | STACK_DEPOT_FLAG_GET); 55 } 56 57 #if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS) 58 void kasan_enable_current(void) 59 { 60 current->kasan_depth++; 61 } 62 EXPORT_SYMBOL(kasan_enable_current); 63 64 void kasan_disable_current(void) 65 { 66 current->kasan_depth--; 67 } 68 EXPORT_SYMBOL(kasan_disable_current); 69 70 #endif /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */ 71 72 void __kasan_unpoison_range(const void *address, size_t size) 73 { 74 kasan_unpoison(address, size, false); 75 } 76 77 #ifdef CONFIG_KASAN_STACK 78 /* Unpoison the entire stack for a task. */ 79 void kasan_unpoison_task_stack(struct task_struct *task) 80 { 81 void *base = task_stack_page(task); 82 83 kasan_unpoison(base, THREAD_SIZE, false); 84 } 85 86 /* Unpoison the stack for the current task beyond a watermark sp value. */ 87 asmlinkage void kasan_unpoison_task_stack_below(const void *watermark) 88 { 89 /* 90 * Calculate the task stack base address. Avoid using 'current' 91 * because this function is called by early resume code which hasn't 92 * yet set up the percpu register (%gs). 93 */ 94 void *base = (void *)((unsigned long)watermark & ~(THREAD_SIZE - 1)); 95 96 kasan_unpoison(base, watermark - base, false); 97 } 98 #endif /* CONFIG_KASAN_STACK */ 99 100 bool __kasan_unpoison_pages(struct page *page, unsigned int order, bool init) 101 { 102 u8 tag; 103 unsigned long i; 104 105 if (unlikely(PageHighMem(page))) 106 return false; 107 108 if (!kasan_sample_page_alloc(order)) 109 return false; 110 111 tag = kasan_random_tag(); 112 kasan_unpoison(set_tag(page_address(page), tag), 113 PAGE_SIZE << order, init); 114 for (i = 0; i < (1 << order); i++) 115 page_kasan_tag_set(page + i, tag); 116 117 return true; 118 } 119 120 void __kasan_poison_pages(struct page *page, unsigned int order, bool init) 121 { 122 if (likely(!PageHighMem(page))) 123 kasan_poison(page_address(page), PAGE_SIZE << order, 124 KASAN_PAGE_FREE, init); 125 } 126 127 void __kasan_poison_slab(struct slab *slab) 128 { 129 struct page *page = slab_page(slab); 130 unsigned long i; 131 132 for (i = 0; i < compound_nr(page); i++) 133 page_kasan_tag_reset(page + i); 134 kasan_poison(page_address(page), page_size(page), 135 KASAN_SLAB_REDZONE, false); 136 } 137 138 void __kasan_unpoison_object_data(struct kmem_cache *cache, void *object) 139 { 140 kasan_unpoison(object, cache->object_size, false); 141 } 142 143 void __kasan_poison_object_data(struct kmem_cache *cache, void *object) 144 { 145 kasan_poison(object, round_up(cache->object_size, KASAN_GRANULE_SIZE), 146 KASAN_SLAB_REDZONE, false); 147 } 148 149 /* 150 * This function assigns a tag to an object considering the following: 151 * 1. A cache might have a constructor, which might save a pointer to a slab 152 * object somewhere (e.g. in the object itself). We preassign a tag for 153 * each object in caches with constructors during slab creation and reuse 154 * the same tag each time a particular object is allocated. 155 * 2. A cache might be SLAB_TYPESAFE_BY_RCU, which means objects can be 156 * accessed after being freed. We preassign tags for objects in these 157 * caches as well. 158 * 3. For SLAB allocator we can't preassign tags randomly since the freelist 159 * is stored as an array of indexes instead of a linked list. Assign tags 160 * based on objects indexes, so that objects that are next to each other 161 * get different tags. 162 */ 163 static inline u8 assign_tag(struct kmem_cache *cache, 164 const void *object, bool init) 165 { 166 if (IS_ENABLED(CONFIG_KASAN_GENERIC)) 167 return 0xff; 168 169 /* 170 * If the cache neither has a constructor nor has SLAB_TYPESAFE_BY_RCU 171 * set, assign a tag when the object is being allocated (init == false). 172 */ 173 if (!cache->ctor && !(cache->flags & SLAB_TYPESAFE_BY_RCU)) 174 return init ? KASAN_TAG_KERNEL : kasan_random_tag(); 175 176 /* For caches that either have a constructor or SLAB_TYPESAFE_BY_RCU: */ 177 #ifdef CONFIG_SLAB 178 /* For SLAB assign tags based on the object index in the freelist. */ 179 return (u8)obj_to_index(cache, virt_to_slab(object), (void *)object); 180 #else 181 /* 182 * For SLUB assign a random tag during slab creation, otherwise reuse 183 * the already assigned tag. 184 */ 185 return init ? kasan_random_tag() : get_tag(object); 186 #endif 187 } 188 189 void * __must_check __kasan_init_slab_obj(struct kmem_cache *cache, 190 const void *object) 191 { 192 /* Initialize per-object metadata if it is present. */ 193 if (kasan_requires_meta()) 194 kasan_init_object_meta(cache, object); 195 196 /* Tag is ignored in set_tag() without CONFIG_KASAN_SW/HW_TAGS */ 197 object = set_tag(object, assign_tag(cache, object, true)); 198 199 return (void *)object; 200 } 201 202 static inline bool ____kasan_slab_free(struct kmem_cache *cache, void *object, 203 unsigned long ip, bool quarantine, bool init) 204 { 205 void *tagged_object; 206 207 if (!kasan_arch_is_ready()) 208 return false; 209 210 tagged_object = object; 211 object = kasan_reset_tag(object); 212 213 if (is_kfence_address(object)) 214 return false; 215 216 if (unlikely(nearest_obj(cache, virt_to_slab(object), object) != 217 object)) { 218 kasan_report_invalid_free(tagged_object, ip, KASAN_REPORT_INVALID_FREE); 219 return true; 220 } 221 222 /* RCU slabs could be legally used after free within the RCU period */ 223 if (unlikely(cache->flags & SLAB_TYPESAFE_BY_RCU)) 224 return false; 225 226 if (!kasan_byte_accessible(tagged_object)) { 227 kasan_report_invalid_free(tagged_object, ip, KASAN_REPORT_DOUBLE_FREE); 228 return true; 229 } 230 231 kasan_poison(object, round_up(cache->object_size, KASAN_GRANULE_SIZE), 232 KASAN_SLAB_FREE, init); 233 234 if ((IS_ENABLED(CONFIG_KASAN_GENERIC) && !quarantine)) 235 return false; 236 237 if (kasan_stack_collection_enabled()) 238 kasan_save_free_info(cache, tagged_object); 239 240 return kasan_quarantine_put(cache, object); 241 } 242 243 bool __kasan_slab_free(struct kmem_cache *cache, void *object, 244 unsigned long ip, bool init) 245 { 246 return ____kasan_slab_free(cache, object, ip, true, init); 247 } 248 249 static inline bool ____kasan_kfree_large(void *ptr, unsigned long ip) 250 { 251 if (!kasan_arch_is_ready()) 252 return false; 253 254 if (ptr != page_address(virt_to_head_page(ptr))) { 255 kasan_report_invalid_free(ptr, ip, KASAN_REPORT_INVALID_FREE); 256 return true; 257 } 258 259 if (!kasan_byte_accessible(ptr)) { 260 kasan_report_invalid_free(ptr, ip, KASAN_REPORT_DOUBLE_FREE); 261 return true; 262 } 263 264 /* 265 * The object will be poisoned by kasan_poison_pages() or 266 * kasan_slab_free_mempool(). 267 */ 268 269 return false; 270 } 271 272 void __kasan_kfree_large(void *ptr, unsigned long ip) 273 { 274 ____kasan_kfree_large(ptr, ip); 275 } 276 277 void __kasan_slab_free_mempool(void *ptr, unsigned long ip) 278 { 279 struct folio *folio; 280 281 folio = virt_to_folio(ptr); 282 283 /* 284 * Even though this function is only called for kmem_cache_alloc and 285 * kmalloc backed mempool allocations, those allocations can still be 286 * !PageSlab() when the size provided to kmalloc is larger than 287 * KMALLOC_MAX_SIZE, and kmalloc falls back onto page_alloc. 288 */ 289 if (unlikely(!folio_test_slab(folio))) { 290 if (____kasan_kfree_large(ptr, ip)) 291 return; 292 kasan_poison(ptr, folio_size(folio), KASAN_PAGE_FREE, false); 293 } else { 294 struct slab *slab = folio_slab(folio); 295 296 ____kasan_slab_free(slab->slab_cache, ptr, ip, false, false); 297 } 298 } 299 300 void * __must_check __kasan_slab_alloc(struct kmem_cache *cache, 301 void *object, gfp_t flags, bool init) 302 { 303 u8 tag; 304 void *tagged_object; 305 306 if (gfpflags_allow_blocking(flags)) 307 kasan_quarantine_reduce(); 308 309 if (unlikely(object == NULL)) 310 return NULL; 311 312 if (is_kfence_address(object)) 313 return (void *)object; 314 315 /* 316 * Generate and assign random tag for tag-based modes. 317 * Tag is ignored in set_tag() for the generic mode. 318 */ 319 tag = assign_tag(cache, object, false); 320 tagged_object = set_tag(object, tag); 321 322 /* 323 * Unpoison the whole object. 324 * For kmalloc() allocations, kasan_kmalloc() will do precise poisoning. 325 */ 326 kasan_unpoison(tagged_object, cache->object_size, init); 327 328 /* Save alloc info (if possible) for non-kmalloc() allocations. */ 329 if (kasan_stack_collection_enabled() && !is_kmalloc_cache(cache)) 330 kasan_save_alloc_info(cache, tagged_object, flags); 331 332 return tagged_object; 333 } 334 335 static inline void *____kasan_kmalloc(struct kmem_cache *cache, 336 const void *object, size_t size, gfp_t flags) 337 { 338 unsigned long redzone_start; 339 unsigned long redzone_end; 340 341 if (gfpflags_allow_blocking(flags)) 342 kasan_quarantine_reduce(); 343 344 if (unlikely(object == NULL)) 345 return NULL; 346 347 if (is_kfence_address(kasan_reset_tag(object))) 348 return (void *)object; 349 350 /* 351 * The object has already been unpoisoned by kasan_slab_alloc() for 352 * kmalloc() or by kasan_krealloc() for krealloc(). 353 */ 354 355 /* 356 * The redzone has byte-level precision for the generic mode. 357 * Partially poison the last object granule to cover the unaligned 358 * part of the redzone. 359 */ 360 if (IS_ENABLED(CONFIG_KASAN_GENERIC)) 361 kasan_poison_last_granule((void *)object, size); 362 363 /* Poison the aligned part of the redzone. */ 364 redzone_start = round_up((unsigned long)(object + size), 365 KASAN_GRANULE_SIZE); 366 redzone_end = round_up((unsigned long)(object + cache->object_size), 367 KASAN_GRANULE_SIZE); 368 kasan_poison((void *)redzone_start, redzone_end - redzone_start, 369 KASAN_SLAB_REDZONE, false); 370 371 /* 372 * Save alloc info (if possible) for kmalloc() allocations. 373 * This also rewrites the alloc info when called from kasan_krealloc(). 374 */ 375 if (kasan_stack_collection_enabled() && is_kmalloc_cache(cache)) 376 kasan_save_alloc_info(cache, (void *)object, flags); 377 378 /* Keep the tag that was set by kasan_slab_alloc(). */ 379 return (void *)object; 380 } 381 382 void * __must_check __kasan_kmalloc(struct kmem_cache *cache, const void *object, 383 size_t size, gfp_t flags) 384 { 385 return ____kasan_kmalloc(cache, object, size, flags); 386 } 387 EXPORT_SYMBOL(__kasan_kmalloc); 388 389 void * __must_check __kasan_kmalloc_large(const void *ptr, size_t size, 390 gfp_t flags) 391 { 392 unsigned long redzone_start; 393 unsigned long redzone_end; 394 395 if (gfpflags_allow_blocking(flags)) 396 kasan_quarantine_reduce(); 397 398 if (unlikely(ptr == NULL)) 399 return NULL; 400 401 /* 402 * The object has already been unpoisoned by kasan_unpoison_pages() for 403 * alloc_pages() or by kasan_krealloc() for krealloc(). 404 */ 405 406 /* 407 * The redzone has byte-level precision for the generic mode. 408 * Partially poison the last object granule to cover the unaligned 409 * part of the redzone. 410 */ 411 if (IS_ENABLED(CONFIG_KASAN_GENERIC)) 412 kasan_poison_last_granule(ptr, size); 413 414 /* Poison the aligned part of the redzone. */ 415 redzone_start = round_up((unsigned long)(ptr + size), 416 KASAN_GRANULE_SIZE); 417 redzone_end = (unsigned long)ptr + page_size(virt_to_page(ptr)); 418 kasan_poison((void *)redzone_start, redzone_end - redzone_start, 419 KASAN_PAGE_REDZONE, false); 420 421 return (void *)ptr; 422 } 423 424 void * __must_check __kasan_krealloc(const void *object, size_t size, gfp_t flags) 425 { 426 struct slab *slab; 427 428 if (unlikely(object == ZERO_SIZE_PTR)) 429 return (void *)object; 430 431 /* 432 * Unpoison the object's data. 433 * Part of it might already have been unpoisoned, but it's unknown 434 * how big that part is. 435 */ 436 kasan_unpoison(object, size, false); 437 438 slab = virt_to_slab(object); 439 440 /* Piggy-back on kmalloc() instrumentation to poison the redzone. */ 441 if (unlikely(!slab)) 442 return __kasan_kmalloc_large(object, size, flags); 443 else 444 return ____kasan_kmalloc(slab->slab_cache, object, size, flags); 445 } 446 447 bool __kasan_check_byte(const void *address, unsigned long ip) 448 { 449 if (!kasan_byte_accessible(address)) { 450 kasan_report(address, 1, false, ip); 451 return false; 452 } 453 return true; 454 } 455