1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Slab allocator functions that are independent of the allocator strategy 4 * 5 * (C) 2012 Christoph Lameter <cl@linux.com> 6 */ 7 #include <linux/slab.h> 8 9 #include <linux/mm.h> 10 #include <linux/poison.h> 11 #include <linux/interrupt.h> 12 #include <linux/memory.h> 13 #include <linux/cache.h> 14 #include <linux/compiler.h> 15 #include <linux/kfence.h> 16 #include <linux/module.h> 17 #include <linux/cpu.h> 18 #include <linux/uaccess.h> 19 #include <linux/seq_file.h> 20 #include <linux/dma-mapping.h> 21 #include <linux/swiotlb.h> 22 #include <linux/proc_fs.h> 23 #include <linux/debugfs.h> 24 #include <linux/kmemleak.h> 25 #include <linux/kasan.h> 26 #include <asm/cacheflush.h> 27 #include <asm/tlbflush.h> 28 #include <asm/page.h> 29 #include <linux/memcontrol.h> 30 #include <linux/stackdepot.h> 31 32 #include "internal.h" 33 #include "slab.h" 34 35 #define CREATE_TRACE_POINTS 36 #include <trace/events/kmem.h> 37 38 enum slab_state slab_state; 39 LIST_HEAD(slab_caches); 40 DEFINE_MUTEX(slab_mutex); 41 struct kmem_cache *kmem_cache; 42 43 /* 44 * Set of flags that will prevent slab merging 45 */ 46 #define SLAB_NEVER_MERGE (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | \ 47 SLAB_TRACE | SLAB_TYPESAFE_BY_RCU | SLAB_NOLEAKTRACE | \ 48 SLAB_FAILSLAB | SLAB_NO_MERGE) 49 50 #define SLAB_MERGE_SAME (SLAB_RECLAIM_ACCOUNT | SLAB_CACHE_DMA | \ 51 SLAB_CACHE_DMA32 | SLAB_ACCOUNT) 52 53 /* 54 * Merge control. If this is set then no merging of slab caches will occur. 55 */ 56 static bool slab_nomerge = !IS_ENABLED(CONFIG_SLAB_MERGE_DEFAULT); 57 58 static int __init setup_slab_nomerge(char *str) 59 { 60 slab_nomerge = true; 61 return 1; 62 } 63 64 static int __init setup_slab_merge(char *str) 65 { 66 slab_nomerge = false; 67 return 1; 68 } 69 70 __setup_param("slub_nomerge", slub_nomerge, setup_slab_nomerge, 0); 71 __setup_param("slub_merge", slub_merge, setup_slab_merge, 0); 72 73 __setup("slab_nomerge", setup_slab_nomerge); 74 __setup("slab_merge", setup_slab_merge); 75 76 /* 77 * Determine the size of a slab object 78 */ 79 unsigned int kmem_cache_size(struct kmem_cache *s) 80 { 81 return s->object_size; 82 } 83 EXPORT_SYMBOL(kmem_cache_size); 84 85 #ifdef CONFIG_DEBUG_VM 86 87 static bool kmem_cache_is_duplicate_name(const char *name) 88 { 89 struct kmem_cache *s; 90 91 list_for_each_entry(s, &slab_caches, list) { 92 if (!strcmp(s->name, name)) 93 return true; 94 } 95 96 return false; 97 } 98 99 static int kmem_cache_sanity_check(const char *name, unsigned int size) 100 { 101 if (!name || in_interrupt() || size > KMALLOC_MAX_SIZE) { 102 pr_err("kmem_cache_create(%s) integrity check failed\n", name); 103 return -EINVAL; 104 } 105 106 /* Duplicate names will confuse slabtop, et al */ 107 WARN(kmem_cache_is_duplicate_name(name), 108 "kmem_cache of name '%s' already exists\n", name); 109 110 WARN_ON(strchr(name, ' ')); /* It confuses parsers */ 111 return 0; 112 } 113 #else 114 static inline int kmem_cache_sanity_check(const char *name, unsigned int size) 115 { 116 return 0; 117 } 118 #endif 119 120 /* 121 * Figure out what the alignment of the objects will be given a set of 122 * flags, a user specified alignment and the size of the objects. 123 */ 124 static unsigned int calculate_alignment(slab_flags_t flags, 125 unsigned int align, unsigned int size) 126 { 127 /* 128 * If the user wants hardware cache aligned objects then follow that 129 * suggestion if the object is sufficiently large. 130 * 131 * The hardware cache alignment cannot override the specified 132 * alignment though. If that is greater then use it. 133 */ 134 if (flags & SLAB_HWCACHE_ALIGN) { 135 unsigned int ralign; 136 137 ralign = cache_line_size(); 138 while (size <= ralign / 2) 139 ralign /= 2; 140 align = max(align, ralign); 141 } 142 143 align = max(align, arch_slab_minalign()); 144 145 return ALIGN(align, sizeof(void *)); 146 } 147 148 /* 149 * Find a mergeable slab cache 150 */ 151 int slab_unmergeable(struct kmem_cache *s) 152 { 153 if (slab_nomerge || (s->flags & SLAB_NEVER_MERGE)) 154 return 1; 155 156 if (s->ctor) 157 return 1; 158 159 #ifdef CONFIG_HARDENED_USERCOPY 160 if (s->usersize) 161 return 1; 162 #endif 163 164 /* 165 * We may have set a slab to be unmergeable during bootstrap. 166 */ 167 if (s->refcount < 0) 168 return 1; 169 170 return 0; 171 } 172 173 struct kmem_cache *find_mergeable(unsigned int size, unsigned int align, 174 slab_flags_t flags, const char *name, void (*ctor)(void *)) 175 { 176 struct kmem_cache *s; 177 178 if (slab_nomerge) 179 return NULL; 180 181 if (ctor) 182 return NULL; 183 184 flags = kmem_cache_flags(flags, name); 185 186 if (flags & SLAB_NEVER_MERGE) 187 return NULL; 188 189 size = ALIGN(size, sizeof(void *)); 190 align = calculate_alignment(flags, align, size); 191 size = ALIGN(size, align); 192 193 list_for_each_entry_reverse(s, &slab_caches, list) { 194 if (slab_unmergeable(s)) 195 continue; 196 197 if (size > s->size) 198 continue; 199 200 if ((flags & SLAB_MERGE_SAME) != (s->flags & SLAB_MERGE_SAME)) 201 continue; 202 /* 203 * Check if alignment is compatible. 204 * Courtesy of Adrian Drzewiecki 205 */ 206 if ((s->size & ~(align - 1)) != s->size) 207 continue; 208 209 if (s->size - size >= sizeof(void *)) 210 continue; 211 212 return s; 213 } 214 return NULL; 215 } 216 217 static struct kmem_cache *create_cache(const char *name, 218 unsigned int object_size, 219 struct kmem_cache_args *args, 220 slab_flags_t flags) 221 { 222 struct kmem_cache *s; 223 int err; 224 225 if (WARN_ON(args->useroffset + args->usersize > object_size)) 226 args->useroffset = args->usersize = 0; 227 228 /* If a custom freelist pointer is requested make sure it's sane. */ 229 err = -EINVAL; 230 if (args->use_freeptr_offset && 231 (args->freeptr_offset >= object_size || 232 !(flags & SLAB_TYPESAFE_BY_RCU) || 233 !IS_ALIGNED(args->freeptr_offset, sizeof(freeptr_t)))) 234 goto out; 235 236 err = -ENOMEM; 237 s = kmem_cache_zalloc(kmem_cache, GFP_KERNEL); 238 if (!s) 239 goto out; 240 err = do_kmem_cache_create(s, name, object_size, args, flags); 241 if (err) 242 goto out_free_cache; 243 244 s->refcount = 1; 245 list_add(&s->list, &slab_caches); 246 return s; 247 248 out_free_cache: 249 kmem_cache_free(kmem_cache, s); 250 out: 251 return ERR_PTR(err); 252 } 253 254 /** 255 * __kmem_cache_create_args - Create a kmem cache. 256 * @name: A string which is used in /proc/slabinfo to identify this cache. 257 * @object_size: The size of objects to be created in this cache. 258 * @args: Additional arguments for the cache creation (see 259 * &struct kmem_cache_args). 260 * @flags: See %SLAB_* flags for an explanation of individual @flags. 261 * 262 * Not to be called directly, use the kmem_cache_create() wrapper with the same 263 * parameters. 264 * 265 * Context: Cannot be called within a interrupt, but can be interrupted. 266 * 267 * Return: a pointer to the cache on success, NULL on failure. 268 */ 269 struct kmem_cache *__kmem_cache_create_args(const char *name, 270 unsigned int object_size, 271 struct kmem_cache_args *args, 272 slab_flags_t flags) 273 { 274 struct kmem_cache *s = NULL; 275 const char *cache_name; 276 int err; 277 278 #ifdef CONFIG_SLUB_DEBUG 279 /* 280 * If no slab_debug was enabled globally, the static key is not yet 281 * enabled by setup_slub_debug(). Enable it if the cache is being 282 * created with any of the debugging flags passed explicitly. 283 * It's also possible that this is the first cache created with 284 * SLAB_STORE_USER and we should init stack_depot for it. 285 */ 286 if (flags & SLAB_DEBUG_FLAGS) 287 static_branch_enable(&slub_debug_enabled); 288 if (flags & SLAB_STORE_USER) 289 stack_depot_init(); 290 #endif 291 292 mutex_lock(&slab_mutex); 293 294 err = kmem_cache_sanity_check(name, object_size); 295 if (err) { 296 goto out_unlock; 297 } 298 299 /* Refuse requests with allocator specific flags */ 300 if (flags & ~SLAB_FLAGS_PERMITTED) { 301 err = -EINVAL; 302 goto out_unlock; 303 } 304 305 /* 306 * Some allocators will constraint the set of valid flags to a subset 307 * of all flags. We expect them to define CACHE_CREATE_MASK in this 308 * case, and we'll just provide them with a sanitized version of the 309 * passed flags. 310 */ 311 flags &= CACHE_CREATE_MASK; 312 313 /* Fail closed on bad usersize of useroffset values. */ 314 if (!IS_ENABLED(CONFIG_HARDENED_USERCOPY) || 315 WARN_ON(!args->usersize && args->useroffset) || 316 WARN_ON(object_size < args->usersize || 317 object_size - args->usersize < args->useroffset)) 318 args->usersize = args->useroffset = 0; 319 320 if (!args->usersize) 321 s = __kmem_cache_alias(name, object_size, args->align, flags, 322 args->ctor); 323 if (s) 324 goto out_unlock; 325 326 cache_name = kstrdup_const(name, GFP_KERNEL); 327 if (!cache_name) { 328 err = -ENOMEM; 329 goto out_unlock; 330 } 331 332 args->align = calculate_alignment(flags, args->align, object_size); 333 s = create_cache(cache_name, object_size, args, flags); 334 if (IS_ERR(s)) { 335 err = PTR_ERR(s); 336 kfree_const(cache_name); 337 } 338 339 out_unlock: 340 mutex_unlock(&slab_mutex); 341 342 if (err) { 343 if (flags & SLAB_PANIC) 344 panic("%s: Failed to create slab '%s'. Error %d\n", 345 __func__, name, err); 346 else { 347 pr_warn("%s(%s) failed with error %d\n", 348 __func__, name, err); 349 dump_stack(); 350 } 351 return NULL; 352 } 353 return s; 354 } 355 EXPORT_SYMBOL(__kmem_cache_create_args); 356 357 static struct kmem_cache *kmem_buckets_cache __ro_after_init; 358 359 /** 360 * kmem_buckets_create - Create a set of caches that handle dynamic sized 361 * allocations via kmem_buckets_alloc() 362 * @name: A prefix string which is used in /proc/slabinfo to identify this 363 * cache. The individual caches with have their sizes as the suffix. 364 * @flags: SLAB flags (see kmem_cache_create() for details). 365 * @useroffset: Starting offset within an allocation that may be copied 366 * to/from userspace. 367 * @usersize: How many bytes, starting at @useroffset, may be copied 368 * to/from userspace. 369 * @ctor: A constructor for the objects, run when new allocations are made. 370 * 371 * Cannot be called within an interrupt, but can be interrupted. 372 * 373 * Return: a pointer to the cache on success, NULL on failure. When 374 * CONFIG_SLAB_BUCKETS is not enabled, ZERO_SIZE_PTR is returned, and 375 * subsequent calls to kmem_buckets_alloc() will fall back to kmalloc(). 376 * (i.e. callers only need to check for NULL on failure.) 377 */ 378 kmem_buckets *kmem_buckets_create(const char *name, slab_flags_t flags, 379 unsigned int useroffset, 380 unsigned int usersize, 381 void (*ctor)(void *)) 382 { 383 kmem_buckets *b; 384 int idx; 385 386 /* 387 * When the separate buckets API is not built in, just return 388 * a non-NULL value for the kmem_buckets pointer, which will be 389 * unused when performing allocations. 390 */ 391 if (!IS_ENABLED(CONFIG_SLAB_BUCKETS)) 392 return ZERO_SIZE_PTR; 393 394 if (WARN_ON(!kmem_buckets_cache)) 395 return NULL; 396 397 b = kmem_cache_alloc(kmem_buckets_cache, GFP_KERNEL|__GFP_ZERO); 398 if (WARN_ON(!b)) 399 return NULL; 400 401 flags |= SLAB_NO_MERGE; 402 403 for (idx = 0; idx < ARRAY_SIZE(kmalloc_caches[KMALLOC_NORMAL]); idx++) { 404 char *short_size, *cache_name; 405 unsigned int cache_useroffset, cache_usersize; 406 unsigned int size; 407 408 if (!kmalloc_caches[KMALLOC_NORMAL][idx]) 409 continue; 410 411 size = kmalloc_caches[KMALLOC_NORMAL][idx]->object_size; 412 if (!size) 413 continue; 414 415 short_size = strchr(kmalloc_caches[KMALLOC_NORMAL][idx]->name, '-'); 416 if (WARN_ON(!short_size)) 417 goto fail; 418 419 cache_name = kasprintf(GFP_KERNEL, "%s-%s", name, short_size + 1); 420 if (WARN_ON(!cache_name)) 421 goto fail; 422 423 if (useroffset >= size) { 424 cache_useroffset = 0; 425 cache_usersize = 0; 426 } else { 427 cache_useroffset = useroffset; 428 cache_usersize = min(size - cache_useroffset, usersize); 429 } 430 (*b)[idx] = kmem_cache_create_usercopy(cache_name, size, 431 0, flags, cache_useroffset, 432 cache_usersize, ctor); 433 kfree(cache_name); 434 if (WARN_ON(!(*b)[idx])) 435 goto fail; 436 } 437 438 return b; 439 440 fail: 441 for (idx = 0; idx < ARRAY_SIZE(kmalloc_caches[KMALLOC_NORMAL]); idx++) 442 kmem_cache_destroy((*b)[idx]); 443 kmem_cache_free(kmem_buckets_cache, b); 444 445 return NULL; 446 } 447 EXPORT_SYMBOL(kmem_buckets_create); 448 449 /* 450 * For a given kmem_cache, kmem_cache_destroy() should only be called 451 * once or there will be a use-after-free problem. The actual deletion 452 * and release of the kobject does not need slab_mutex or cpu_hotplug_lock 453 * protection. So they are now done without holding those locks. 454 */ 455 static void kmem_cache_release(struct kmem_cache *s) 456 { 457 kfence_shutdown_cache(s); 458 if (__is_defined(SLAB_SUPPORTS_SYSFS) && slab_state >= FULL) 459 sysfs_slab_release(s); 460 else 461 slab_kmem_cache_release(s); 462 } 463 464 void slab_kmem_cache_release(struct kmem_cache *s) 465 { 466 __kmem_cache_release(s); 467 kfree_const(s->name); 468 kmem_cache_free(kmem_cache, s); 469 } 470 471 void kmem_cache_destroy(struct kmem_cache *s) 472 { 473 int err; 474 475 if (unlikely(!s) || !kasan_check_byte(s)) 476 return; 477 478 /* in-flight kfree_rcu()'s may include objects from our cache */ 479 kvfree_rcu_barrier(); 480 481 if (IS_ENABLED(CONFIG_SLUB_RCU_DEBUG) && 482 (s->flags & SLAB_TYPESAFE_BY_RCU)) { 483 /* 484 * Under CONFIG_SLUB_RCU_DEBUG, when objects in a 485 * SLAB_TYPESAFE_BY_RCU slab are freed, SLUB will internally 486 * defer their freeing with call_rcu(). 487 * Wait for such call_rcu() invocations here before actually 488 * destroying the cache. 489 * 490 * It doesn't matter that we haven't looked at the slab refcount 491 * yet - slabs with SLAB_TYPESAFE_BY_RCU can't be merged, so 492 * the refcount should be 1 here. 493 */ 494 rcu_barrier(); 495 } 496 497 cpus_read_lock(); 498 mutex_lock(&slab_mutex); 499 500 s->refcount--; 501 if (s->refcount) { 502 mutex_unlock(&slab_mutex); 503 cpus_read_unlock(); 504 return; 505 } 506 507 /* free asan quarantined objects */ 508 kasan_cache_shutdown(s); 509 510 err = __kmem_cache_shutdown(s); 511 WARN(err, "%s %s: Slab cache still has objects when called from %pS", 512 __func__, s->name, (void *)_RET_IP_); 513 514 list_del(&s->list); 515 516 mutex_unlock(&slab_mutex); 517 cpus_read_unlock(); 518 519 if (slab_state >= FULL) 520 sysfs_slab_unlink(s); 521 debugfs_slab_release(s); 522 523 if (err) 524 return; 525 526 if (s->flags & SLAB_TYPESAFE_BY_RCU) 527 rcu_barrier(); 528 529 kmem_cache_release(s); 530 } 531 EXPORT_SYMBOL(kmem_cache_destroy); 532 533 /** 534 * kmem_cache_shrink - Shrink a cache. 535 * @cachep: The cache to shrink. 536 * 537 * Releases as many slabs as possible for a cache. 538 * To help debugging, a zero exit status indicates all slabs were released. 539 * 540 * Return: %0 if all slabs were released, non-zero otherwise 541 */ 542 int kmem_cache_shrink(struct kmem_cache *cachep) 543 { 544 kasan_cache_shrink(cachep); 545 546 return __kmem_cache_shrink(cachep); 547 } 548 EXPORT_SYMBOL(kmem_cache_shrink); 549 550 bool slab_is_available(void) 551 { 552 return slab_state >= UP; 553 } 554 555 #ifdef CONFIG_PRINTK 556 static void kmem_obj_info(struct kmem_obj_info *kpp, void *object, struct slab *slab) 557 { 558 if (__kfence_obj_info(kpp, object, slab)) 559 return; 560 __kmem_obj_info(kpp, object, slab); 561 } 562 563 /** 564 * kmem_dump_obj - Print available slab provenance information 565 * @object: slab object for which to find provenance information. 566 * 567 * This function uses pr_cont(), so that the caller is expected to have 568 * printed out whatever preamble is appropriate. The provenance information 569 * depends on the type of object and on how much debugging is enabled. 570 * For a slab-cache object, the fact that it is a slab object is printed, 571 * and, if available, the slab name, return address, and stack trace from 572 * the allocation and last free path of that object. 573 * 574 * Return: %true if the pointer is to a not-yet-freed object from 575 * kmalloc() or kmem_cache_alloc(), either %true or %false if the pointer 576 * is to an already-freed object, and %false otherwise. 577 */ 578 bool kmem_dump_obj(void *object) 579 { 580 char *cp = IS_ENABLED(CONFIG_MMU) ? "" : "/vmalloc"; 581 int i; 582 struct slab *slab; 583 unsigned long ptroffset; 584 struct kmem_obj_info kp = { }; 585 586 /* Some arches consider ZERO_SIZE_PTR to be a valid address. */ 587 if (object < (void *)PAGE_SIZE || !virt_addr_valid(object)) 588 return false; 589 slab = virt_to_slab(object); 590 if (!slab) 591 return false; 592 593 kmem_obj_info(&kp, object, slab); 594 if (kp.kp_slab_cache) 595 pr_cont(" slab%s %s", cp, kp.kp_slab_cache->name); 596 else 597 pr_cont(" slab%s", cp); 598 if (is_kfence_address(object)) 599 pr_cont(" (kfence)"); 600 if (kp.kp_objp) 601 pr_cont(" start %px", kp.kp_objp); 602 if (kp.kp_data_offset) 603 pr_cont(" data offset %lu", kp.kp_data_offset); 604 if (kp.kp_objp) { 605 ptroffset = ((char *)object - (char *)kp.kp_objp) - kp.kp_data_offset; 606 pr_cont(" pointer offset %lu", ptroffset); 607 } 608 if (kp.kp_slab_cache && kp.kp_slab_cache->object_size) 609 pr_cont(" size %u", kp.kp_slab_cache->object_size); 610 if (kp.kp_ret) 611 pr_cont(" allocated at %pS\n", kp.kp_ret); 612 else 613 pr_cont("\n"); 614 for (i = 0; i < ARRAY_SIZE(kp.kp_stack); i++) { 615 if (!kp.kp_stack[i]) 616 break; 617 pr_info(" %pS\n", kp.kp_stack[i]); 618 } 619 620 if (kp.kp_free_stack[0]) 621 pr_cont(" Free path:\n"); 622 623 for (i = 0; i < ARRAY_SIZE(kp.kp_free_stack); i++) { 624 if (!kp.kp_free_stack[i]) 625 break; 626 pr_info(" %pS\n", kp.kp_free_stack[i]); 627 } 628 629 return true; 630 } 631 EXPORT_SYMBOL_GPL(kmem_dump_obj); 632 #endif 633 634 /* Create a cache during boot when no slab services are available yet */ 635 void __init create_boot_cache(struct kmem_cache *s, const char *name, 636 unsigned int size, slab_flags_t flags, 637 unsigned int useroffset, unsigned int usersize) 638 { 639 int err; 640 unsigned int align = ARCH_KMALLOC_MINALIGN; 641 struct kmem_cache_args kmem_args = {}; 642 643 /* 644 * kmalloc caches guarantee alignment of at least the largest 645 * power-of-two divisor of the size. For power-of-two sizes, 646 * it is the size itself. 647 */ 648 if (flags & SLAB_KMALLOC) 649 align = max(align, 1U << (ffs(size) - 1)); 650 kmem_args.align = calculate_alignment(flags, align, size); 651 652 #ifdef CONFIG_HARDENED_USERCOPY 653 kmem_args.useroffset = useroffset; 654 kmem_args.usersize = usersize; 655 #endif 656 657 err = do_kmem_cache_create(s, name, size, &kmem_args, flags); 658 659 if (err) 660 panic("Creation of kmalloc slab %s size=%u failed. Reason %d\n", 661 name, size, err); 662 663 s->refcount = -1; /* Exempt from merging for now */ 664 } 665 666 static struct kmem_cache *__init create_kmalloc_cache(const char *name, 667 unsigned int size, 668 slab_flags_t flags) 669 { 670 struct kmem_cache *s = kmem_cache_zalloc(kmem_cache, GFP_NOWAIT); 671 672 if (!s) 673 panic("Out of memory when creating slab %s\n", name); 674 675 create_boot_cache(s, name, size, flags | SLAB_KMALLOC, 0, size); 676 list_add(&s->list, &slab_caches); 677 s->refcount = 1; 678 return s; 679 } 680 681 kmem_buckets kmalloc_caches[NR_KMALLOC_TYPES] __ro_after_init = 682 { /* initialization for https://llvm.org/pr42570 */ }; 683 EXPORT_SYMBOL(kmalloc_caches); 684 685 #ifdef CONFIG_RANDOM_KMALLOC_CACHES 686 unsigned long random_kmalloc_seed __ro_after_init; 687 EXPORT_SYMBOL(random_kmalloc_seed); 688 #endif 689 690 /* 691 * Conversion table for small slabs sizes / 8 to the index in the 692 * kmalloc array. This is necessary for slabs < 192 since we have non power 693 * of two cache sizes there. The size of larger slabs can be determined using 694 * fls. 695 */ 696 u8 kmalloc_size_index[24] __ro_after_init = { 697 3, /* 8 */ 698 4, /* 16 */ 699 5, /* 24 */ 700 5, /* 32 */ 701 6, /* 40 */ 702 6, /* 48 */ 703 6, /* 56 */ 704 6, /* 64 */ 705 1, /* 72 */ 706 1, /* 80 */ 707 1, /* 88 */ 708 1, /* 96 */ 709 7, /* 104 */ 710 7, /* 112 */ 711 7, /* 120 */ 712 7, /* 128 */ 713 2, /* 136 */ 714 2, /* 144 */ 715 2, /* 152 */ 716 2, /* 160 */ 717 2, /* 168 */ 718 2, /* 176 */ 719 2, /* 184 */ 720 2 /* 192 */ 721 }; 722 723 size_t kmalloc_size_roundup(size_t size) 724 { 725 if (size && size <= KMALLOC_MAX_CACHE_SIZE) { 726 /* 727 * The flags don't matter since size_index is common to all. 728 * Neither does the caller for just getting ->object_size. 729 */ 730 return kmalloc_slab(size, NULL, GFP_KERNEL, 0)->object_size; 731 } 732 733 /* Above the smaller buckets, size is a multiple of page size. */ 734 if (size && size <= KMALLOC_MAX_SIZE) 735 return PAGE_SIZE << get_order(size); 736 737 /* 738 * Return 'size' for 0 - kmalloc() returns ZERO_SIZE_PTR 739 * and very large size - kmalloc() may fail. 740 */ 741 return size; 742 743 } 744 EXPORT_SYMBOL(kmalloc_size_roundup); 745 746 #ifdef CONFIG_ZONE_DMA 747 #define KMALLOC_DMA_NAME(sz) .name[KMALLOC_DMA] = "dma-kmalloc-" #sz, 748 #else 749 #define KMALLOC_DMA_NAME(sz) 750 #endif 751 752 #ifdef CONFIG_MEMCG 753 #define KMALLOC_CGROUP_NAME(sz) .name[KMALLOC_CGROUP] = "kmalloc-cg-" #sz, 754 #else 755 #define KMALLOC_CGROUP_NAME(sz) 756 #endif 757 758 #ifndef CONFIG_SLUB_TINY 759 #define KMALLOC_RCL_NAME(sz) .name[KMALLOC_RECLAIM] = "kmalloc-rcl-" #sz, 760 #else 761 #define KMALLOC_RCL_NAME(sz) 762 #endif 763 764 #ifdef CONFIG_RANDOM_KMALLOC_CACHES 765 #define __KMALLOC_RANDOM_CONCAT(a, b) a ## b 766 #define KMALLOC_RANDOM_NAME(N, sz) __KMALLOC_RANDOM_CONCAT(KMA_RAND_, N)(sz) 767 #define KMA_RAND_1(sz) .name[KMALLOC_RANDOM_START + 1] = "kmalloc-rnd-01-" #sz, 768 #define KMA_RAND_2(sz) KMA_RAND_1(sz) .name[KMALLOC_RANDOM_START + 2] = "kmalloc-rnd-02-" #sz, 769 #define KMA_RAND_3(sz) KMA_RAND_2(sz) .name[KMALLOC_RANDOM_START + 3] = "kmalloc-rnd-03-" #sz, 770 #define KMA_RAND_4(sz) KMA_RAND_3(sz) .name[KMALLOC_RANDOM_START + 4] = "kmalloc-rnd-04-" #sz, 771 #define KMA_RAND_5(sz) KMA_RAND_4(sz) .name[KMALLOC_RANDOM_START + 5] = "kmalloc-rnd-05-" #sz, 772 #define KMA_RAND_6(sz) KMA_RAND_5(sz) .name[KMALLOC_RANDOM_START + 6] = "kmalloc-rnd-06-" #sz, 773 #define KMA_RAND_7(sz) KMA_RAND_6(sz) .name[KMALLOC_RANDOM_START + 7] = "kmalloc-rnd-07-" #sz, 774 #define KMA_RAND_8(sz) KMA_RAND_7(sz) .name[KMALLOC_RANDOM_START + 8] = "kmalloc-rnd-08-" #sz, 775 #define KMA_RAND_9(sz) KMA_RAND_8(sz) .name[KMALLOC_RANDOM_START + 9] = "kmalloc-rnd-09-" #sz, 776 #define KMA_RAND_10(sz) KMA_RAND_9(sz) .name[KMALLOC_RANDOM_START + 10] = "kmalloc-rnd-10-" #sz, 777 #define KMA_RAND_11(sz) KMA_RAND_10(sz) .name[KMALLOC_RANDOM_START + 11] = "kmalloc-rnd-11-" #sz, 778 #define KMA_RAND_12(sz) KMA_RAND_11(sz) .name[KMALLOC_RANDOM_START + 12] = "kmalloc-rnd-12-" #sz, 779 #define KMA_RAND_13(sz) KMA_RAND_12(sz) .name[KMALLOC_RANDOM_START + 13] = "kmalloc-rnd-13-" #sz, 780 #define KMA_RAND_14(sz) KMA_RAND_13(sz) .name[KMALLOC_RANDOM_START + 14] = "kmalloc-rnd-14-" #sz, 781 #define KMA_RAND_15(sz) KMA_RAND_14(sz) .name[KMALLOC_RANDOM_START + 15] = "kmalloc-rnd-15-" #sz, 782 #else // CONFIG_RANDOM_KMALLOC_CACHES 783 #define KMALLOC_RANDOM_NAME(N, sz) 784 #endif 785 786 #define INIT_KMALLOC_INFO(__size, __short_size) \ 787 { \ 788 .name[KMALLOC_NORMAL] = "kmalloc-" #__short_size, \ 789 KMALLOC_RCL_NAME(__short_size) \ 790 KMALLOC_CGROUP_NAME(__short_size) \ 791 KMALLOC_DMA_NAME(__short_size) \ 792 KMALLOC_RANDOM_NAME(RANDOM_KMALLOC_CACHES_NR, __short_size) \ 793 .size = __size, \ 794 } 795 796 /* 797 * kmalloc_info[] is to make slab_debug=,kmalloc-xx option work at boot time. 798 * kmalloc_index() supports up to 2^21=2MB, so the final entry of the table is 799 * kmalloc-2M. 800 */ 801 const struct kmalloc_info_struct kmalloc_info[] __initconst = { 802 INIT_KMALLOC_INFO(0, 0), 803 INIT_KMALLOC_INFO(96, 96), 804 INIT_KMALLOC_INFO(192, 192), 805 INIT_KMALLOC_INFO(8, 8), 806 INIT_KMALLOC_INFO(16, 16), 807 INIT_KMALLOC_INFO(32, 32), 808 INIT_KMALLOC_INFO(64, 64), 809 INIT_KMALLOC_INFO(128, 128), 810 INIT_KMALLOC_INFO(256, 256), 811 INIT_KMALLOC_INFO(512, 512), 812 INIT_KMALLOC_INFO(1024, 1k), 813 INIT_KMALLOC_INFO(2048, 2k), 814 INIT_KMALLOC_INFO(4096, 4k), 815 INIT_KMALLOC_INFO(8192, 8k), 816 INIT_KMALLOC_INFO(16384, 16k), 817 INIT_KMALLOC_INFO(32768, 32k), 818 INIT_KMALLOC_INFO(65536, 64k), 819 INIT_KMALLOC_INFO(131072, 128k), 820 INIT_KMALLOC_INFO(262144, 256k), 821 INIT_KMALLOC_INFO(524288, 512k), 822 INIT_KMALLOC_INFO(1048576, 1M), 823 INIT_KMALLOC_INFO(2097152, 2M) 824 }; 825 826 /* 827 * Patch up the size_index table if we have strange large alignment 828 * requirements for the kmalloc array. This is only the case for 829 * MIPS it seems. The standard arches will not generate any code here. 830 * 831 * Largest permitted alignment is 256 bytes due to the way we 832 * handle the index determination for the smaller caches. 833 * 834 * Make sure that nothing crazy happens if someone starts tinkering 835 * around with ARCH_KMALLOC_MINALIGN 836 */ 837 void __init setup_kmalloc_cache_index_table(void) 838 { 839 unsigned int i; 840 841 BUILD_BUG_ON(KMALLOC_MIN_SIZE > 256 || 842 !is_power_of_2(KMALLOC_MIN_SIZE)); 843 844 for (i = 8; i < KMALLOC_MIN_SIZE; i += 8) { 845 unsigned int elem = size_index_elem(i); 846 847 if (elem >= ARRAY_SIZE(kmalloc_size_index)) 848 break; 849 kmalloc_size_index[elem] = KMALLOC_SHIFT_LOW; 850 } 851 852 if (KMALLOC_MIN_SIZE >= 64) { 853 /* 854 * The 96 byte sized cache is not used if the alignment 855 * is 64 byte. 856 */ 857 for (i = 64 + 8; i <= 96; i += 8) 858 kmalloc_size_index[size_index_elem(i)] = 7; 859 860 } 861 862 if (KMALLOC_MIN_SIZE >= 128) { 863 /* 864 * The 192 byte sized cache is not used if the alignment 865 * is 128 byte. Redirect kmalloc to use the 256 byte cache 866 * instead. 867 */ 868 for (i = 128 + 8; i <= 192; i += 8) 869 kmalloc_size_index[size_index_elem(i)] = 8; 870 } 871 } 872 873 static unsigned int __kmalloc_minalign(void) 874 { 875 unsigned int minalign = dma_get_cache_alignment(); 876 877 if (IS_ENABLED(CONFIG_DMA_BOUNCE_UNALIGNED_KMALLOC) && 878 is_swiotlb_allocated()) 879 minalign = ARCH_KMALLOC_MINALIGN; 880 881 return max(minalign, arch_slab_minalign()); 882 } 883 884 static void __init 885 new_kmalloc_cache(int idx, enum kmalloc_cache_type type) 886 { 887 slab_flags_t flags = 0; 888 unsigned int minalign = __kmalloc_minalign(); 889 unsigned int aligned_size = kmalloc_info[idx].size; 890 int aligned_idx = idx; 891 892 if ((KMALLOC_RECLAIM != KMALLOC_NORMAL) && (type == KMALLOC_RECLAIM)) { 893 flags |= SLAB_RECLAIM_ACCOUNT; 894 } else if (IS_ENABLED(CONFIG_MEMCG) && (type == KMALLOC_CGROUP)) { 895 if (mem_cgroup_kmem_disabled()) { 896 kmalloc_caches[type][idx] = kmalloc_caches[KMALLOC_NORMAL][idx]; 897 return; 898 } 899 flags |= SLAB_ACCOUNT; 900 } else if (IS_ENABLED(CONFIG_ZONE_DMA) && (type == KMALLOC_DMA)) { 901 flags |= SLAB_CACHE_DMA; 902 } 903 904 #ifdef CONFIG_RANDOM_KMALLOC_CACHES 905 if (type >= KMALLOC_RANDOM_START && type <= KMALLOC_RANDOM_END) 906 flags |= SLAB_NO_MERGE; 907 #endif 908 909 /* 910 * If CONFIG_MEMCG is enabled, disable cache merging for 911 * KMALLOC_NORMAL caches. 912 */ 913 if (IS_ENABLED(CONFIG_MEMCG) && (type == KMALLOC_NORMAL)) 914 flags |= SLAB_NO_MERGE; 915 916 if (minalign > ARCH_KMALLOC_MINALIGN) { 917 aligned_size = ALIGN(aligned_size, minalign); 918 aligned_idx = __kmalloc_index(aligned_size, false); 919 } 920 921 if (!kmalloc_caches[type][aligned_idx]) 922 kmalloc_caches[type][aligned_idx] = create_kmalloc_cache( 923 kmalloc_info[aligned_idx].name[type], 924 aligned_size, flags); 925 if (idx != aligned_idx) 926 kmalloc_caches[type][idx] = kmalloc_caches[type][aligned_idx]; 927 } 928 929 /* 930 * Create the kmalloc array. Some of the regular kmalloc arrays 931 * may already have been created because they were needed to 932 * enable allocations for slab creation. 933 */ 934 void __init create_kmalloc_caches(void) 935 { 936 int i; 937 enum kmalloc_cache_type type; 938 939 /* 940 * Including KMALLOC_CGROUP if CONFIG_MEMCG defined 941 */ 942 for (type = KMALLOC_NORMAL; type < NR_KMALLOC_TYPES; type++) { 943 /* Caches that are NOT of the two-to-the-power-of size. */ 944 if (KMALLOC_MIN_SIZE <= 32) 945 new_kmalloc_cache(1, type); 946 if (KMALLOC_MIN_SIZE <= 64) 947 new_kmalloc_cache(2, type); 948 949 /* Caches that are of the two-to-the-power-of size. */ 950 for (i = KMALLOC_SHIFT_LOW; i <= KMALLOC_SHIFT_HIGH; i++) 951 new_kmalloc_cache(i, type); 952 } 953 #ifdef CONFIG_RANDOM_KMALLOC_CACHES 954 random_kmalloc_seed = get_random_u64(); 955 #endif 956 957 /* Kmalloc array is now usable */ 958 slab_state = UP; 959 960 if (IS_ENABLED(CONFIG_SLAB_BUCKETS)) 961 kmem_buckets_cache = kmem_cache_create("kmalloc_buckets", 962 sizeof(kmem_buckets), 963 0, SLAB_NO_MERGE, NULL); 964 } 965 966 /** 967 * __ksize -- Report full size of underlying allocation 968 * @object: pointer to the object 969 * 970 * This should only be used internally to query the true size of allocations. 971 * It is not meant to be a way to discover the usable size of an allocation 972 * after the fact. Instead, use kmalloc_size_roundup(). Using memory beyond 973 * the originally requested allocation size may trigger KASAN, UBSAN_BOUNDS, 974 * and/or FORTIFY_SOURCE. 975 * 976 * Return: size of the actual memory used by @object in bytes 977 */ 978 size_t __ksize(const void *object) 979 { 980 struct folio *folio; 981 982 if (unlikely(object == ZERO_SIZE_PTR)) 983 return 0; 984 985 folio = virt_to_folio(object); 986 987 if (unlikely(!folio_test_slab(folio))) { 988 if (WARN_ON(folio_size(folio) <= KMALLOC_MAX_CACHE_SIZE)) 989 return 0; 990 if (WARN_ON(object != folio_address(folio))) 991 return 0; 992 return folio_size(folio); 993 } 994 995 #ifdef CONFIG_SLUB_DEBUG 996 skip_orig_size_check(folio_slab(folio)->slab_cache, object); 997 #endif 998 999 return slab_ksize(folio_slab(folio)->slab_cache); 1000 } 1001 1002 gfp_t kmalloc_fix_flags(gfp_t flags) 1003 { 1004 gfp_t invalid_mask = flags & GFP_SLAB_BUG_MASK; 1005 1006 flags &= ~GFP_SLAB_BUG_MASK; 1007 pr_warn("Unexpected gfp: %#x (%pGg). Fixing up to gfp: %#x (%pGg). Fix your code!\n", 1008 invalid_mask, &invalid_mask, flags, &flags); 1009 dump_stack(); 1010 1011 return flags; 1012 } 1013 1014 #ifdef CONFIG_SLAB_FREELIST_RANDOM 1015 /* Randomize a generic freelist */ 1016 static void freelist_randomize(unsigned int *list, 1017 unsigned int count) 1018 { 1019 unsigned int rand; 1020 unsigned int i; 1021 1022 for (i = 0; i < count; i++) 1023 list[i] = i; 1024 1025 /* Fisher-Yates shuffle */ 1026 for (i = count - 1; i > 0; i--) { 1027 rand = get_random_u32_below(i + 1); 1028 swap(list[i], list[rand]); 1029 } 1030 } 1031 1032 /* Create a random sequence per cache */ 1033 int cache_random_seq_create(struct kmem_cache *cachep, unsigned int count, 1034 gfp_t gfp) 1035 { 1036 1037 if (count < 2 || cachep->random_seq) 1038 return 0; 1039 1040 cachep->random_seq = kcalloc(count, sizeof(unsigned int), gfp); 1041 if (!cachep->random_seq) 1042 return -ENOMEM; 1043 1044 freelist_randomize(cachep->random_seq, count); 1045 return 0; 1046 } 1047 1048 /* Destroy the per-cache random freelist sequence */ 1049 void cache_random_seq_destroy(struct kmem_cache *cachep) 1050 { 1051 kfree(cachep->random_seq); 1052 cachep->random_seq = NULL; 1053 } 1054 #endif /* CONFIG_SLAB_FREELIST_RANDOM */ 1055 1056 #ifdef CONFIG_SLUB_DEBUG 1057 #define SLABINFO_RIGHTS (0400) 1058 1059 static void print_slabinfo_header(struct seq_file *m) 1060 { 1061 /* 1062 * Output format version, so at least we can change it 1063 * without _too_ many complaints. 1064 */ 1065 seq_puts(m, "slabinfo - version: 2.1\n"); 1066 seq_puts(m, "# name <active_objs> <num_objs> <objsize> <objperslab> <pagesperslab>"); 1067 seq_puts(m, " : tunables <limit> <batchcount> <sharedfactor>"); 1068 seq_puts(m, " : slabdata <active_slabs> <num_slabs> <sharedavail>"); 1069 seq_putc(m, '\n'); 1070 } 1071 1072 static void *slab_start(struct seq_file *m, loff_t *pos) 1073 { 1074 mutex_lock(&slab_mutex); 1075 return seq_list_start(&slab_caches, *pos); 1076 } 1077 1078 static void *slab_next(struct seq_file *m, void *p, loff_t *pos) 1079 { 1080 return seq_list_next(p, &slab_caches, pos); 1081 } 1082 1083 static void slab_stop(struct seq_file *m, void *p) 1084 { 1085 mutex_unlock(&slab_mutex); 1086 } 1087 1088 static void cache_show(struct kmem_cache *s, struct seq_file *m) 1089 { 1090 struct slabinfo sinfo; 1091 1092 memset(&sinfo, 0, sizeof(sinfo)); 1093 get_slabinfo(s, &sinfo); 1094 1095 seq_printf(m, "%-17s %6lu %6lu %6u %4u %4d", 1096 s->name, sinfo.active_objs, sinfo.num_objs, s->size, 1097 sinfo.objects_per_slab, (1 << sinfo.cache_order)); 1098 1099 seq_printf(m, " : tunables %4u %4u %4u", 1100 sinfo.limit, sinfo.batchcount, sinfo.shared); 1101 seq_printf(m, " : slabdata %6lu %6lu %6lu", 1102 sinfo.active_slabs, sinfo.num_slabs, sinfo.shared_avail); 1103 seq_putc(m, '\n'); 1104 } 1105 1106 static int slab_show(struct seq_file *m, void *p) 1107 { 1108 struct kmem_cache *s = list_entry(p, struct kmem_cache, list); 1109 1110 if (p == slab_caches.next) 1111 print_slabinfo_header(m); 1112 cache_show(s, m); 1113 return 0; 1114 } 1115 1116 void dump_unreclaimable_slab(void) 1117 { 1118 struct kmem_cache *s; 1119 struct slabinfo sinfo; 1120 1121 /* 1122 * Here acquiring slab_mutex is risky since we don't prefer to get 1123 * sleep in oom path. But, without mutex hold, it may introduce a 1124 * risk of crash. 1125 * Use mutex_trylock to protect the list traverse, dump nothing 1126 * without acquiring the mutex. 1127 */ 1128 if (!mutex_trylock(&slab_mutex)) { 1129 pr_warn("excessive unreclaimable slab but cannot dump stats\n"); 1130 return; 1131 } 1132 1133 pr_info("Unreclaimable slab info:\n"); 1134 pr_info("Name Used Total\n"); 1135 1136 list_for_each_entry(s, &slab_caches, list) { 1137 if (s->flags & SLAB_RECLAIM_ACCOUNT) 1138 continue; 1139 1140 get_slabinfo(s, &sinfo); 1141 1142 if (sinfo.num_objs > 0) 1143 pr_info("%-17s %10luKB %10luKB\n", s->name, 1144 (sinfo.active_objs * s->size) / 1024, 1145 (sinfo.num_objs * s->size) / 1024); 1146 } 1147 mutex_unlock(&slab_mutex); 1148 } 1149 1150 /* 1151 * slabinfo_op - iterator that generates /proc/slabinfo 1152 * 1153 * Output layout: 1154 * cache-name 1155 * num-active-objs 1156 * total-objs 1157 * object size 1158 * num-active-slabs 1159 * total-slabs 1160 * num-pages-per-slab 1161 * + further values on SMP and with statistics enabled 1162 */ 1163 static const struct seq_operations slabinfo_op = { 1164 .start = slab_start, 1165 .next = slab_next, 1166 .stop = slab_stop, 1167 .show = slab_show, 1168 }; 1169 1170 static int slabinfo_open(struct inode *inode, struct file *file) 1171 { 1172 return seq_open(file, &slabinfo_op); 1173 } 1174 1175 static const struct proc_ops slabinfo_proc_ops = { 1176 .proc_flags = PROC_ENTRY_PERMANENT, 1177 .proc_open = slabinfo_open, 1178 .proc_read = seq_read, 1179 .proc_lseek = seq_lseek, 1180 .proc_release = seq_release, 1181 }; 1182 1183 static int __init slab_proc_init(void) 1184 { 1185 proc_create("slabinfo", SLABINFO_RIGHTS, NULL, &slabinfo_proc_ops); 1186 return 0; 1187 } 1188 module_init(slab_proc_init); 1189 1190 #endif /* CONFIG_SLUB_DEBUG */ 1191 1192 static __always_inline __realloc_size(2) void * 1193 __do_krealloc(const void *p, size_t new_size, gfp_t flags) 1194 { 1195 void *ret; 1196 size_t ks; 1197 1198 /* Check for double-free before calling ksize. */ 1199 if (likely(!ZERO_OR_NULL_PTR(p))) { 1200 if (!kasan_check_byte(p)) 1201 return NULL; 1202 ks = ksize(p); 1203 } else 1204 ks = 0; 1205 1206 /* If the object still fits, repoison it precisely. */ 1207 if (ks >= new_size) { 1208 /* Zero out spare memory. */ 1209 if (want_init_on_alloc(flags)) { 1210 kasan_disable_current(); 1211 memset((void *)p + new_size, 0, ks - new_size); 1212 kasan_enable_current(); 1213 } 1214 1215 p = kasan_krealloc((void *)p, new_size, flags); 1216 return (void *)p; 1217 } 1218 1219 ret = kmalloc_node_track_caller_noprof(new_size, flags, NUMA_NO_NODE, _RET_IP_); 1220 if (ret && p) { 1221 /* Disable KASAN checks as the object's redzone is accessed. */ 1222 kasan_disable_current(); 1223 memcpy(ret, kasan_reset_tag(p), ks); 1224 kasan_enable_current(); 1225 } 1226 1227 return ret; 1228 } 1229 1230 /** 1231 * krealloc - reallocate memory. The contents will remain unchanged. 1232 * @p: object to reallocate memory for. 1233 * @new_size: how many bytes of memory are required. 1234 * @flags: the type of memory to allocate. 1235 * 1236 * If @p is %NULL, krealloc() behaves exactly like kmalloc(). If @new_size 1237 * is 0 and @p is not a %NULL pointer, the object pointed to is freed. 1238 * 1239 * If __GFP_ZERO logic is requested, callers must ensure that, starting with the 1240 * initial memory allocation, every subsequent call to this API for the same 1241 * memory allocation is flagged with __GFP_ZERO. Otherwise, it is possible that 1242 * __GFP_ZERO is not fully honored by this API. 1243 * 1244 * This is the case, since krealloc() only knows about the bucket size of an 1245 * allocation (but not the exact size it was allocated with) and hence 1246 * implements the following semantics for shrinking and growing buffers with 1247 * __GFP_ZERO. 1248 * 1249 * new bucket 1250 * 0 size size 1251 * |--------|----------------| 1252 * | keep | zero | 1253 * 1254 * In any case, the contents of the object pointed to are preserved up to the 1255 * lesser of the new and old sizes. 1256 * 1257 * Return: pointer to the allocated memory or %NULL in case of error 1258 */ 1259 void *krealloc_noprof(const void *p, size_t new_size, gfp_t flags) 1260 { 1261 void *ret; 1262 1263 if (unlikely(!new_size)) { 1264 kfree(p); 1265 return ZERO_SIZE_PTR; 1266 } 1267 1268 ret = __do_krealloc(p, new_size, flags); 1269 if (ret && kasan_reset_tag(p) != kasan_reset_tag(ret)) 1270 kfree(p); 1271 1272 return ret; 1273 } 1274 EXPORT_SYMBOL(krealloc_noprof); 1275 1276 /** 1277 * kfree_sensitive - Clear sensitive information in memory before freeing 1278 * @p: object to free memory of 1279 * 1280 * The memory of the object @p points to is zeroed before freed. 1281 * If @p is %NULL, kfree_sensitive() does nothing. 1282 * 1283 * Note: this function zeroes the whole allocated buffer which can be a good 1284 * deal bigger than the requested buffer size passed to kmalloc(). So be 1285 * careful when using this function in performance sensitive code. 1286 */ 1287 void kfree_sensitive(const void *p) 1288 { 1289 size_t ks; 1290 void *mem = (void *)p; 1291 1292 ks = ksize(mem); 1293 if (ks) { 1294 kasan_unpoison_range(mem, ks); 1295 memzero_explicit(mem, ks); 1296 } 1297 kfree(mem); 1298 } 1299 EXPORT_SYMBOL(kfree_sensitive); 1300 1301 size_t ksize(const void *objp) 1302 { 1303 /* 1304 * We need to first check that the pointer to the object is valid. 1305 * The KASAN report printed from ksize() is more useful, then when 1306 * it's printed later when the behaviour could be undefined due to 1307 * a potential use-after-free or double-free. 1308 * 1309 * We use kasan_check_byte(), which is supported for the hardware 1310 * tag-based KASAN mode, unlike kasan_check_read/write(). 1311 * 1312 * If the pointed to memory is invalid, we return 0 to avoid users of 1313 * ksize() writing to and potentially corrupting the memory region. 1314 * 1315 * We want to perform the check before __ksize(), to avoid potentially 1316 * crashing in __ksize() due to accessing invalid metadata. 1317 */ 1318 if (unlikely(ZERO_OR_NULL_PTR(objp)) || !kasan_check_byte(objp)) 1319 return 0; 1320 1321 return kfence_ksize(objp) ?: __ksize(objp); 1322 } 1323 EXPORT_SYMBOL(ksize); 1324 1325 /* Tracepoints definitions. */ 1326 EXPORT_TRACEPOINT_SYMBOL(kmalloc); 1327 EXPORT_TRACEPOINT_SYMBOL(kmem_cache_alloc); 1328 EXPORT_TRACEPOINT_SYMBOL(kfree); 1329 EXPORT_TRACEPOINT_SYMBOL(kmem_cache_free); 1330 1331