1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Slab allocator functions that are independent of the allocator strategy 4 * 5 * (C) 2012 Christoph Lameter <cl@linux.com> 6 */ 7 #include <linux/slab.h> 8 9 #include <linux/mm.h> 10 #include <linux/poison.h> 11 #include <linux/interrupt.h> 12 #include <linux/memory.h> 13 #include <linux/compiler.h> 14 #include <linux/module.h> 15 #include <linux/cpu.h> 16 #include <linux/uaccess.h> 17 #include <linux/seq_file.h> 18 #include <linux/proc_fs.h> 19 #include <asm/cacheflush.h> 20 #include <asm/tlbflush.h> 21 #include <asm/page.h> 22 #include <linux/memcontrol.h> 23 24 #define CREATE_TRACE_POINTS 25 #include <trace/events/kmem.h> 26 27 #include "slab.h" 28 29 enum slab_state slab_state; 30 LIST_HEAD(slab_caches); 31 DEFINE_MUTEX(slab_mutex); 32 struct kmem_cache *kmem_cache; 33 34 static LIST_HEAD(slab_caches_to_rcu_destroy); 35 static void slab_caches_to_rcu_destroy_workfn(struct work_struct *work); 36 static DECLARE_WORK(slab_caches_to_rcu_destroy_work, 37 slab_caches_to_rcu_destroy_workfn); 38 39 /* 40 * Set of flags that will prevent slab merging 41 */ 42 #define SLAB_NEVER_MERGE (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | \ 43 SLAB_TRACE | SLAB_TYPESAFE_BY_RCU | SLAB_NOLEAKTRACE | \ 44 SLAB_FAILSLAB | SLAB_KASAN) 45 46 #define SLAB_MERGE_SAME (SLAB_RECLAIM_ACCOUNT | SLAB_CACHE_DMA | \ 47 SLAB_ACCOUNT) 48 49 /* 50 * Merge control. If this is set then no merging of slab caches will occur. 51 */ 52 static bool slab_nomerge = !IS_ENABLED(CONFIG_SLAB_MERGE_DEFAULT); 53 54 static int __init setup_slab_nomerge(char *str) 55 { 56 slab_nomerge = true; 57 return 1; 58 } 59 60 #ifdef CONFIG_SLUB 61 __setup_param("slub_nomerge", slub_nomerge, setup_slab_nomerge, 0); 62 #endif 63 64 __setup("slab_nomerge", setup_slab_nomerge); 65 66 /* 67 * Determine the size of a slab object 68 */ 69 unsigned int kmem_cache_size(struct kmem_cache *s) 70 { 71 return s->object_size; 72 } 73 EXPORT_SYMBOL(kmem_cache_size); 74 75 #ifdef CONFIG_DEBUG_VM 76 static int kmem_cache_sanity_check(const char *name, size_t size) 77 { 78 struct kmem_cache *s = NULL; 79 80 if (!name || in_interrupt() || size < sizeof(void *) || 81 size > KMALLOC_MAX_SIZE) { 82 pr_err("kmem_cache_create(%s) integrity check failed\n", name); 83 return -EINVAL; 84 } 85 86 list_for_each_entry(s, &slab_caches, list) { 87 char tmp; 88 int res; 89 90 /* 91 * This happens when the module gets unloaded and doesn't 92 * destroy its slab cache and no-one else reuses the vmalloc 93 * area of the module. Print a warning. 94 */ 95 res = probe_kernel_address(s->name, tmp); 96 if (res) { 97 pr_err("Slab cache with size %d has lost its name\n", 98 s->object_size); 99 continue; 100 } 101 } 102 103 WARN_ON(strchr(name, ' ')); /* It confuses parsers */ 104 return 0; 105 } 106 #else 107 static inline int kmem_cache_sanity_check(const char *name, size_t size) 108 { 109 return 0; 110 } 111 #endif 112 113 void __kmem_cache_free_bulk(struct kmem_cache *s, size_t nr, void **p) 114 { 115 size_t i; 116 117 for (i = 0; i < nr; i++) { 118 if (s) 119 kmem_cache_free(s, p[i]); 120 else 121 kfree(p[i]); 122 } 123 } 124 125 int __kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t nr, 126 void **p) 127 { 128 size_t i; 129 130 for (i = 0; i < nr; i++) { 131 void *x = p[i] = kmem_cache_alloc(s, flags); 132 if (!x) { 133 __kmem_cache_free_bulk(s, i, p); 134 return 0; 135 } 136 } 137 return i; 138 } 139 140 #if defined(CONFIG_MEMCG) && !defined(CONFIG_SLOB) 141 142 LIST_HEAD(slab_root_caches); 143 144 void slab_init_memcg_params(struct kmem_cache *s) 145 { 146 s->memcg_params.root_cache = NULL; 147 RCU_INIT_POINTER(s->memcg_params.memcg_caches, NULL); 148 INIT_LIST_HEAD(&s->memcg_params.children); 149 } 150 151 static int init_memcg_params(struct kmem_cache *s, 152 struct mem_cgroup *memcg, struct kmem_cache *root_cache) 153 { 154 struct memcg_cache_array *arr; 155 156 if (root_cache) { 157 s->memcg_params.root_cache = root_cache; 158 s->memcg_params.memcg = memcg; 159 INIT_LIST_HEAD(&s->memcg_params.children_node); 160 INIT_LIST_HEAD(&s->memcg_params.kmem_caches_node); 161 return 0; 162 } 163 164 slab_init_memcg_params(s); 165 166 if (!memcg_nr_cache_ids) 167 return 0; 168 169 arr = kvzalloc(sizeof(struct memcg_cache_array) + 170 memcg_nr_cache_ids * sizeof(void *), 171 GFP_KERNEL); 172 if (!arr) 173 return -ENOMEM; 174 175 RCU_INIT_POINTER(s->memcg_params.memcg_caches, arr); 176 return 0; 177 } 178 179 static void destroy_memcg_params(struct kmem_cache *s) 180 { 181 if (is_root_cache(s)) 182 kvfree(rcu_access_pointer(s->memcg_params.memcg_caches)); 183 } 184 185 static void free_memcg_params(struct rcu_head *rcu) 186 { 187 struct memcg_cache_array *old; 188 189 old = container_of(rcu, struct memcg_cache_array, rcu); 190 kvfree(old); 191 } 192 193 static int update_memcg_params(struct kmem_cache *s, int new_array_size) 194 { 195 struct memcg_cache_array *old, *new; 196 197 new = kvzalloc(sizeof(struct memcg_cache_array) + 198 new_array_size * sizeof(void *), GFP_KERNEL); 199 if (!new) 200 return -ENOMEM; 201 202 old = rcu_dereference_protected(s->memcg_params.memcg_caches, 203 lockdep_is_held(&slab_mutex)); 204 if (old) 205 memcpy(new->entries, old->entries, 206 memcg_nr_cache_ids * sizeof(void *)); 207 208 rcu_assign_pointer(s->memcg_params.memcg_caches, new); 209 if (old) 210 call_rcu(&old->rcu, free_memcg_params); 211 return 0; 212 } 213 214 int memcg_update_all_caches(int num_memcgs) 215 { 216 struct kmem_cache *s; 217 int ret = 0; 218 219 mutex_lock(&slab_mutex); 220 list_for_each_entry(s, &slab_root_caches, root_caches_node) { 221 ret = update_memcg_params(s, num_memcgs); 222 /* 223 * Instead of freeing the memory, we'll just leave the caches 224 * up to this point in an updated state. 225 */ 226 if (ret) 227 break; 228 } 229 mutex_unlock(&slab_mutex); 230 return ret; 231 } 232 233 void memcg_link_cache(struct kmem_cache *s) 234 { 235 if (is_root_cache(s)) { 236 list_add(&s->root_caches_node, &slab_root_caches); 237 } else { 238 list_add(&s->memcg_params.children_node, 239 &s->memcg_params.root_cache->memcg_params.children); 240 list_add(&s->memcg_params.kmem_caches_node, 241 &s->memcg_params.memcg->kmem_caches); 242 } 243 } 244 245 static void memcg_unlink_cache(struct kmem_cache *s) 246 { 247 if (is_root_cache(s)) { 248 list_del(&s->root_caches_node); 249 } else { 250 list_del(&s->memcg_params.children_node); 251 list_del(&s->memcg_params.kmem_caches_node); 252 } 253 } 254 #else 255 static inline int init_memcg_params(struct kmem_cache *s, 256 struct mem_cgroup *memcg, struct kmem_cache *root_cache) 257 { 258 return 0; 259 } 260 261 static inline void destroy_memcg_params(struct kmem_cache *s) 262 { 263 } 264 265 static inline void memcg_unlink_cache(struct kmem_cache *s) 266 { 267 } 268 #endif /* CONFIG_MEMCG && !CONFIG_SLOB */ 269 270 /* 271 * Find a mergeable slab cache 272 */ 273 int slab_unmergeable(struct kmem_cache *s) 274 { 275 if (slab_nomerge || (s->flags & SLAB_NEVER_MERGE)) 276 return 1; 277 278 if (!is_root_cache(s)) 279 return 1; 280 281 if (s->ctor) 282 return 1; 283 284 /* 285 * We may have set a slab to be unmergeable during bootstrap. 286 */ 287 if (s->refcount < 0) 288 return 1; 289 290 return 0; 291 } 292 293 struct kmem_cache *find_mergeable(size_t size, size_t align, 294 slab_flags_t flags, const char *name, void (*ctor)(void *)) 295 { 296 struct kmem_cache *s; 297 298 if (slab_nomerge) 299 return NULL; 300 301 if (ctor) 302 return NULL; 303 304 size = ALIGN(size, sizeof(void *)); 305 align = calculate_alignment(flags, align, size); 306 size = ALIGN(size, align); 307 flags = kmem_cache_flags(size, flags, name, NULL); 308 309 if (flags & SLAB_NEVER_MERGE) 310 return NULL; 311 312 list_for_each_entry_reverse(s, &slab_root_caches, root_caches_node) { 313 if (slab_unmergeable(s)) 314 continue; 315 316 if (size > s->size) 317 continue; 318 319 if ((flags & SLAB_MERGE_SAME) != (s->flags & SLAB_MERGE_SAME)) 320 continue; 321 /* 322 * Check if alignment is compatible. 323 * Courtesy of Adrian Drzewiecki 324 */ 325 if ((s->size & ~(align - 1)) != s->size) 326 continue; 327 328 if (s->size - size >= sizeof(void *)) 329 continue; 330 331 if (IS_ENABLED(CONFIG_SLAB) && align && 332 (align > s->align || s->align % align)) 333 continue; 334 335 return s; 336 } 337 return NULL; 338 } 339 340 /* 341 * Figure out what the alignment of the objects will be given a set of 342 * flags, a user specified alignment and the size of the objects. 343 */ 344 unsigned long calculate_alignment(slab_flags_t flags, 345 unsigned long align, unsigned long size) 346 { 347 /* 348 * If the user wants hardware cache aligned objects then follow that 349 * suggestion if the object is sufficiently large. 350 * 351 * The hardware cache alignment cannot override the specified 352 * alignment though. If that is greater then use it. 353 */ 354 if (flags & SLAB_HWCACHE_ALIGN) { 355 unsigned long ralign = cache_line_size(); 356 while (size <= ralign / 2) 357 ralign /= 2; 358 align = max(align, ralign); 359 } 360 361 if (align < ARCH_SLAB_MINALIGN) 362 align = ARCH_SLAB_MINALIGN; 363 364 return ALIGN(align, sizeof(void *)); 365 } 366 367 static struct kmem_cache *create_cache(const char *name, 368 size_t object_size, size_t size, size_t align, 369 slab_flags_t flags, void (*ctor)(void *), 370 struct mem_cgroup *memcg, struct kmem_cache *root_cache) 371 { 372 struct kmem_cache *s; 373 int err; 374 375 err = -ENOMEM; 376 s = kmem_cache_zalloc(kmem_cache, GFP_KERNEL); 377 if (!s) 378 goto out; 379 380 s->name = name; 381 s->object_size = object_size; 382 s->size = size; 383 s->align = align; 384 s->ctor = ctor; 385 386 err = init_memcg_params(s, memcg, root_cache); 387 if (err) 388 goto out_free_cache; 389 390 err = __kmem_cache_create(s, flags); 391 if (err) 392 goto out_free_cache; 393 394 s->refcount = 1; 395 list_add(&s->list, &slab_caches); 396 memcg_link_cache(s); 397 out: 398 if (err) 399 return ERR_PTR(err); 400 return s; 401 402 out_free_cache: 403 destroy_memcg_params(s); 404 kmem_cache_free(kmem_cache, s); 405 goto out; 406 } 407 408 /* 409 * kmem_cache_create - Create a cache. 410 * @name: A string which is used in /proc/slabinfo to identify this cache. 411 * @size: The size of objects to be created in this cache. 412 * @align: The required alignment for the objects. 413 * @flags: SLAB flags 414 * @ctor: A constructor for the objects. 415 * 416 * Returns a ptr to the cache on success, NULL on failure. 417 * Cannot be called within a interrupt, but can be interrupted. 418 * The @ctor is run when new pages are allocated by the cache. 419 * 420 * The flags are 421 * 422 * %SLAB_POISON - Poison the slab with a known test pattern (a5a5a5a5) 423 * to catch references to uninitialised memory. 424 * 425 * %SLAB_RED_ZONE - Insert `Red' zones around the allocated memory to check 426 * for buffer overruns. 427 * 428 * %SLAB_HWCACHE_ALIGN - Align the objects in this cache to a hardware 429 * cacheline. This can be beneficial if you're counting cycles as closely 430 * as davem. 431 */ 432 struct kmem_cache * 433 kmem_cache_create(const char *name, size_t size, size_t align, 434 slab_flags_t flags, void (*ctor)(void *)) 435 { 436 struct kmem_cache *s = NULL; 437 const char *cache_name; 438 int err; 439 440 get_online_cpus(); 441 get_online_mems(); 442 memcg_get_cache_ids(); 443 444 mutex_lock(&slab_mutex); 445 446 err = kmem_cache_sanity_check(name, size); 447 if (err) { 448 goto out_unlock; 449 } 450 451 /* Refuse requests with allocator specific flags */ 452 if (flags & ~SLAB_FLAGS_PERMITTED) { 453 err = -EINVAL; 454 goto out_unlock; 455 } 456 457 /* 458 * Some allocators will constraint the set of valid flags to a subset 459 * of all flags. We expect them to define CACHE_CREATE_MASK in this 460 * case, and we'll just provide them with a sanitized version of the 461 * passed flags. 462 */ 463 flags &= CACHE_CREATE_MASK; 464 465 s = __kmem_cache_alias(name, size, align, flags, ctor); 466 if (s) 467 goto out_unlock; 468 469 cache_name = kstrdup_const(name, GFP_KERNEL); 470 if (!cache_name) { 471 err = -ENOMEM; 472 goto out_unlock; 473 } 474 475 s = create_cache(cache_name, size, size, 476 calculate_alignment(flags, align, size), 477 flags, ctor, NULL, NULL); 478 if (IS_ERR(s)) { 479 err = PTR_ERR(s); 480 kfree_const(cache_name); 481 } 482 483 out_unlock: 484 mutex_unlock(&slab_mutex); 485 486 memcg_put_cache_ids(); 487 put_online_mems(); 488 put_online_cpus(); 489 490 if (err) { 491 if (flags & SLAB_PANIC) 492 panic("kmem_cache_create: Failed to create slab '%s'. Error %d\n", 493 name, err); 494 else { 495 pr_warn("kmem_cache_create(%s) failed with error %d\n", 496 name, err); 497 dump_stack(); 498 } 499 return NULL; 500 } 501 return s; 502 } 503 EXPORT_SYMBOL(kmem_cache_create); 504 505 static void slab_caches_to_rcu_destroy_workfn(struct work_struct *work) 506 { 507 LIST_HEAD(to_destroy); 508 struct kmem_cache *s, *s2; 509 510 /* 511 * On destruction, SLAB_TYPESAFE_BY_RCU kmem_caches are put on the 512 * @slab_caches_to_rcu_destroy list. The slab pages are freed 513 * through RCU and and the associated kmem_cache are dereferenced 514 * while freeing the pages, so the kmem_caches should be freed only 515 * after the pending RCU operations are finished. As rcu_barrier() 516 * is a pretty slow operation, we batch all pending destructions 517 * asynchronously. 518 */ 519 mutex_lock(&slab_mutex); 520 list_splice_init(&slab_caches_to_rcu_destroy, &to_destroy); 521 mutex_unlock(&slab_mutex); 522 523 if (list_empty(&to_destroy)) 524 return; 525 526 rcu_barrier(); 527 528 list_for_each_entry_safe(s, s2, &to_destroy, list) { 529 #ifdef SLAB_SUPPORTS_SYSFS 530 sysfs_slab_release(s); 531 #else 532 slab_kmem_cache_release(s); 533 #endif 534 } 535 } 536 537 static int shutdown_cache(struct kmem_cache *s) 538 { 539 /* free asan quarantined objects */ 540 kasan_cache_shutdown(s); 541 542 if (__kmem_cache_shutdown(s) != 0) 543 return -EBUSY; 544 545 memcg_unlink_cache(s); 546 list_del(&s->list); 547 548 if (s->flags & SLAB_TYPESAFE_BY_RCU) { 549 list_add_tail(&s->list, &slab_caches_to_rcu_destroy); 550 schedule_work(&slab_caches_to_rcu_destroy_work); 551 } else { 552 #ifdef SLAB_SUPPORTS_SYSFS 553 sysfs_slab_release(s); 554 #else 555 slab_kmem_cache_release(s); 556 #endif 557 } 558 559 return 0; 560 } 561 562 #if defined(CONFIG_MEMCG) && !defined(CONFIG_SLOB) 563 /* 564 * memcg_create_kmem_cache - Create a cache for a memory cgroup. 565 * @memcg: The memory cgroup the new cache is for. 566 * @root_cache: The parent of the new cache. 567 * 568 * This function attempts to create a kmem cache that will serve allocation 569 * requests going from @memcg to @root_cache. The new cache inherits properties 570 * from its parent. 571 */ 572 void memcg_create_kmem_cache(struct mem_cgroup *memcg, 573 struct kmem_cache *root_cache) 574 { 575 static char memcg_name_buf[NAME_MAX + 1]; /* protected by slab_mutex */ 576 struct cgroup_subsys_state *css = &memcg->css; 577 struct memcg_cache_array *arr; 578 struct kmem_cache *s = NULL; 579 char *cache_name; 580 int idx; 581 582 get_online_cpus(); 583 get_online_mems(); 584 585 mutex_lock(&slab_mutex); 586 587 /* 588 * The memory cgroup could have been offlined while the cache 589 * creation work was pending. 590 */ 591 if (memcg->kmem_state != KMEM_ONLINE) 592 goto out_unlock; 593 594 idx = memcg_cache_id(memcg); 595 arr = rcu_dereference_protected(root_cache->memcg_params.memcg_caches, 596 lockdep_is_held(&slab_mutex)); 597 598 /* 599 * Since per-memcg caches are created asynchronously on first 600 * allocation (see memcg_kmem_get_cache()), several threads can try to 601 * create the same cache, but only one of them may succeed. 602 */ 603 if (arr->entries[idx]) 604 goto out_unlock; 605 606 cgroup_name(css->cgroup, memcg_name_buf, sizeof(memcg_name_buf)); 607 cache_name = kasprintf(GFP_KERNEL, "%s(%llu:%s)", root_cache->name, 608 css->serial_nr, memcg_name_buf); 609 if (!cache_name) 610 goto out_unlock; 611 612 s = create_cache(cache_name, root_cache->object_size, 613 root_cache->size, root_cache->align, 614 root_cache->flags & CACHE_CREATE_MASK, 615 root_cache->ctor, memcg, root_cache); 616 /* 617 * If we could not create a memcg cache, do not complain, because 618 * that's not critical at all as we can always proceed with the root 619 * cache. 620 */ 621 if (IS_ERR(s)) { 622 kfree(cache_name); 623 goto out_unlock; 624 } 625 626 /* 627 * Since readers won't lock (see cache_from_memcg_idx()), we need a 628 * barrier here to ensure nobody will see the kmem_cache partially 629 * initialized. 630 */ 631 smp_wmb(); 632 arr->entries[idx] = s; 633 634 out_unlock: 635 mutex_unlock(&slab_mutex); 636 637 put_online_mems(); 638 put_online_cpus(); 639 } 640 641 static void kmemcg_deactivate_workfn(struct work_struct *work) 642 { 643 struct kmem_cache *s = container_of(work, struct kmem_cache, 644 memcg_params.deact_work); 645 646 get_online_cpus(); 647 get_online_mems(); 648 649 mutex_lock(&slab_mutex); 650 651 s->memcg_params.deact_fn(s); 652 653 mutex_unlock(&slab_mutex); 654 655 put_online_mems(); 656 put_online_cpus(); 657 658 /* done, put the ref from slab_deactivate_memcg_cache_rcu_sched() */ 659 css_put(&s->memcg_params.memcg->css); 660 } 661 662 static void kmemcg_deactivate_rcufn(struct rcu_head *head) 663 { 664 struct kmem_cache *s = container_of(head, struct kmem_cache, 665 memcg_params.deact_rcu_head); 666 667 /* 668 * We need to grab blocking locks. Bounce to ->deact_work. The 669 * work item shares the space with the RCU head and can't be 670 * initialized eariler. 671 */ 672 INIT_WORK(&s->memcg_params.deact_work, kmemcg_deactivate_workfn); 673 queue_work(memcg_kmem_cache_wq, &s->memcg_params.deact_work); 674 } 675 676 /** 677 * slab_deactivate_memcg_cache_rcu_sched - schedule deactivation after a 678 * sched RCU grace period 679 * @s: target kmem_cache 680 * @deact_fn: deactivation function to call 681 * 682 * Schedule @deact_fn to be invoked with online cpus, mems and slab_mutex 683 * held after a sched RCU grace period. The slab is guaranteed to stay 684 * alive until @deact_fn is finished. This is to be used from 685 * __kmemcg_cache_deactivate(). 686 */ 687 void slab_deactivate_memcg_cache_rcu_sched(struct kmem_cache *s, 688 void (*deact_fn)(struct kmem_cache *)) 689 { 690 if (WARN_ON_ONCE(is_root_cache(s)) || 691 WARN_ON_ONCE(s->memcg_params.deact_fn)) 692 return; 693 694 /* pin memcg so that @s doesn't get destroyed in the middle */ 695 css_get(&s->memcg_params.memcg->css); 696 697 s->memcg_params.deact_fn = deact_fn; 698 call_rcu_sched(&s->memcg_params.deact_rcu_head, kmemcg_deactivate_rcufn); 699 } 700 701 void memcg_deactivate_kmem_caches(struct mem_cgroup *memcg) 702 { 703 int idx; 704 struct memcg_cache_array *arr; 705 struct kmem_cache *s, *c; 706 707 idx = memcg_cache_id(memcg); 708 709 get_online_cpus(); 710 get_online_mems(); 711 712 mutex_lock(&slab_mutex); 713 list_for_each_entry(s, &slab_root_caches, root_caches_node) { 714 arr = rcu_dereference_protected(s->memcg_params.memcg_caches, 715 lockdep_is_held(&slab_mutex)); 716 c = arr->entries[idx]; 717 if (!c) 718 continue; 719 720 __kmemcg_cache_deactivate(c); 721 arr->entries[idx] = NULL; 722 } 723 mutex_unlock(&slab_mutex); 724 725 put_online_mems(); 726 put_online_cpus(); 727 } 728 729 void memcg_destroy_kmem_caches(struct mem_cgroup *memcg) 730 { 731 struct kmem_cache *s, *s2; 732 733 get_online_cpus(); 734 get_online_mems(); 735 736 mutex_lock(&slab_mutex); 737 list_for_each_entry_safe(s, s2, &memcg->kmem_caches, 738 memcg_params.kmem_caches_node) { 739 /* 740 * The cgroup is about to be freed and therefore has no charges 741 * left. Hence, all its caches must be empty by now. 742 */ 743 BUG_ON(shutdown_cache(s)); 744 } 745 mutex_unlock(&slab_mutex); 746 747 put_online_mems(); 748 put_online_cpus(); 749 } 750 751 static int shutdown_memcg_caches(struct kmem_cache *s) 752 { 753 struct memcg_cache_array *arr; 754 struct kmem_cache *c, *c2; 755 LIST_HEAD(busy); 756 int i; 757 758 BUG_ON(!is_root_cache(s)); 759 760 /* 761 * First, shutdown active caches, i.e. caches that belong to online 762 * memory cgroups. 763 */ 764 arr = rcu_dereference_protected(s->memcg_params.memcg_caches, 765 lockdep_is_held(&slab_mutex)); 766 for_each_memcg_cache_index(i) { 767 c = arr->entries[i]; 768 if (!c) 769 continue; 770 if (shutdown_cache(c)) 771 /* 772 * The cache still has objects. Move it to a temporary 773 * list so as not to try to destroy it for a second 774 * time while iterating over inactive caches below. 775 */ 776 list_move(&c->memcg_params.children_node, &busy); 777 else 778 /* 779 * The cache is empty and will be destroyed soon. Clear 780 * the pointer to it in the memcg_caches array so that 781 * it will never be accessed even if the root cache 782 * stays alive. 783 */ 784 arr->entries[i] = NULL; 785 } 786 787 /* 788 * Second, shutdown all caches left from memory cgroups that are now 789 * offline. 790 */ 791 list_for_each_entry_safe(c, c2, &s->memcg_params.children, 792 memcg_params.children_node) 793 shutdown_cache(c); 794 795 list_splice(&busy, &s->memcg_params.children); 796 797 /* 798 * A cache being destroyed must be empty. In particular, this means 799 * that all per memcg caches attached to it must be empty too. 800 */ 801 if (!list_empty(&s->memcg_params.children)) 802 return -EBUSY; 803 return 0; 804 } 805 #else 806 static inline int shutdown_memcg_caches(struct kmem_cache *s) 807 { 808 return 0; 809 } 810 #endif /* CONFIG_MEMCG && !CONFIG_SLOB */ 811 812 void slab_kmem_cache_release(struct kmem_cache *s) 813 { 814 __kmem_cache_release(s); 815 destroy_memcg_params(s); 816 kfree_const(s->name); 817 kmem_cache_free(kmem_cache, s); 818 } 819 820 void kmem_cache_destroy(struct kmem_cache *s) 821 { 822 int err; 823 824 if (unlikely(!s)) 825 return; 826 827 get_online_cpus(); 828 get_online_mems(); 829 830 mutex_lock(&slab_mutex); 831 832 s->refcount--; 833 if (s->refcount) 834 goto out_unlock; 835 836 err = shutdown_memcg_caches(s); 837 if (!err) 838 err = shutdown_cache(s); 839 840 if (err) { 841 pr_err("kmem_cache_destroy %s: Slab cache still has objects\n", 842 s->name); 843 dump_stack(); 844 } 845 out_unlock: 846 mutex_unlock(&slab_mutex); 847 848 put_online_mems(); 849 put_online_cpus(); 850 } 851 EXPORT_SYMBOL(kmem_cache_destroy); 852 853 /** 854 * kmem_cache_shrink - Shrink a cache. 855 * @cachep: The cache to shrink. 856 * 857 * Releases as many slabs as possible for a cache. 858 * To help debugging, a zero exit status indicates all slabs were released. 859 */ 860 int kmem_cache_shrink(struct kmem_cache *cachep) 861 { 862 int ret; 863 864 get_online_cpus(); 865 get_online_mems(); 866 kasan_cache_shrink(cachep); 867 ret = __kmem_cache_shrink(cachep); 868 put_online_mems(); 869 put_online_cpus(); 870 return ret; 871 } 872 EXPORT_SYMBOL(kmem_cache_shrink); 873 874 bool slab_is_available(void) 875 { 876 return slab_state >= UP; 877 } 878 879 #ifndef CONFIG_SLOB 880 /* Create a cache during boot when no slab services are available yet */ 881 void __init create_boot_cache(struct kmem_cache *s, const char *name, size_t size, 882 slab_flags_t flags) 883 { 884 int err; 885 886 s->name = name; 887 s->size = s->object_size = size; 888 s->align = calculate_alignment(flags, ARCH_KMALLOC_MINALIGN, size); 889 890 slab_init_memcg_params(s); 891 892 err = __kmem_cache_create(s, flags); 893 894 if (err) 895 panic("Creation of kmalloc slab %s size=%zu failed. Reason %d\n", 896 name, size, err); 897 898 s->refcount = -1; /* Exempt from merging for now */ 899 } 900 901 struct kmem_cache *__init create_kmalloc_cache(const char *name, size_t size, 902 slab_flags_t flags) 903 { 904 struct kmem_cache *s = kmem_cache_zalloc(kmem_cache, GFP_NOWAIT); 905 906 if (!s) 907 panic("Out of memory when creating slab %s\n", name); 908 909 create_boot_cache(s, name, size, flags); 910 list_add(&s->list, &slab_caches); 911 memcg_link_cache(s); 912 s->refcount = 1; 913 return s; 914 } 915 916 struct kmem_cache *kmalloc_caches[KMALLOC_SHIFT_HIGH + 1]; 917 EXPORT_SYMBOL(kmalloc_caches); 918 919 #ifdef CONFIG_ZONE_DMA 920 struct kmem_cache *kmalloc_dma_caches[KMALLOC_SHIFT_HIGH + 1]; 921 EXPORT_SYMBOL(kmalloc_dma_caches); 922 #endif 923 924 /* 925 * Conversion table for small slabs sizes / 8 to the index in the 926 * kmalloc array. This is necessary for slabs < 192 since we have non power 927 * of two cache sizes there. The size of larger slabs can be determined using 928 * fls. 929 */ 930 static s8 size_index[24] = { 931 3, /* 8 */ 932 4, /* 16 */ 933 5, /* 24 */ 934 5, /* 32 */ 935 6, /* 40 */ 936 6, /* 48 */ 937 6, /* 56 */ 938 6, /* 64 */ 939 1, /* 72 */ 940 1, /* 80 */ 941 1, /* 88 */ 942 1, /* 96 */ 943 7, /* 104 */ 944 7, /* 112 */ 945 7, /* 120 */ 946 7, /* 128 */ 947 2, /* 136 */ 948 2, /* 144 */ 949 2, /* 152 */ 950 2, /* 160 */ 951 2, /* 168 */ 952 2, /* 176 */ 953 2, /* 184 */ 954 2 /* 192 */ 955 }; 956 957 static inline int size_index_elem(size_t bytes) 958 { 959 return (bytes - 1) / 8; 960 } 961 962 /* 963 * Find the kmem_cache structure that serves a given size of 964 * allocation 965 */ 966 struct kmem_cache *kmalloc_slab(size_t size, gfp_t flags) 967 { 968 int index; 969 970 if (unlikely(size > KMALLOC_MAX_SIZE)) { 971 WARN_ON_ONCE(!(flags & __GFP_NOWARN)); 972 return NULL; 973 } 974 975 if (size <= 192) { 976 if (!size) 977 return ZERO_SIZE_PTR; 978 979 index = size_index[size_index_elem(size)]; 980 } else 981 index = fls(size - 1); 982 983 #ifdef CONFIG_ZONE_DMA 984 if (unlikely((flags & GFP_DMA))) 985 return kmalloc_dma_caches[index]; 986 987 #endif 988 return kmalloc_caches[index]; 989 } 990 991 /* 992 * kmalloc_info[] is to make slub_debug=,kmalloc-xx option work at boot time. 993 * kmalloc_index() supports up to 2^26=64MB, so the final entry of the table is 994 * kmalloc-67108864. 995 */ 996 const struct kmalloc_info_struct kmalloc_info[] __initconst = { 997 {NULL, 0}, {"kmalloc-96", 96}, 998 {"kmalloc-192", 192}, {"kmalloc-8", 8}, 999 {"kmalloc-16", 16}, {"kmalloc-32", 32}, 1000 {"kmalloc-64", 64}, {"kmalloc-128", 128}, 1001 {"kmalloc-256", 256}, {"kmalloc-512", 512}, 1002 {"kmalloc-1024", 1024}, {"kmalloc-2048", 2048}, 1003 {"kmalloc-4096", 4096}, {"kmalloc-8192", 8192}, 1004 {"kmalloc-16384", 16384}, {"kmalloc-32768", 32768}, 1005 {"kmalloc-65536", 65536}, {"kmalloc-131072", 131072}, 1006 {"kmalloc-262144", 262144}, {"kmalloc-524288", 524288}, 1007 {"kmalloc-1048576", 1048576}, {"kmalloc-2097152", 2097152}, 1008 {"kmalloc-4194304", 4194304}, {"kmalloc-8388608", 8388608}, 1009 {"kmalloc-16777216", 16777216}, {"kmalloc-33554432", 33554432}, 1010 {"kmalloc-67108864", 67108864} 1011 }; 1012 1013 /* 1014 * Patch up the size_index table if we have strange large alignment 1015 * requirements for the kmalloc array. This is only the case for 1016 * MIPS it seems. The standard arches will not generate any code here. 1017 * 1018 * Largest permitted alignment is 256 bytes due to the way we 1019 * handle the index determination for the smaller caches. 1020 * 1021 * Make sure that nothing crazy happens if someone starts tinkering 1022 * around with ARCH_KMALLOC_MINALIGN 1023 */ 1024 void __init setup_kmalloc_cache_index_table(void) 1025 { 1026 int i; 1027 1028 BUILD_BUG_ON(KMALLOC_MIN_SIZE > 256 || 1029 (KMALLOC_MIN_SIZE & (KMALLOC_MIN_SIZE - 1))); 1030 1031 for (i = 8; i < KMALLOC_MIN_SIZE; i += 8) { 1032 int elem = size_index_elem(i); 1033 1034 if (elem >= ARRAY_SIZE(size_index)) 1035 break; 1036 size_index[elem] = KMALLOC_SHIFT_LOW; 1037 } 1038 1039 if (KMALLOC_MIN_SIZE >= 64) { 1040 /* 1041 * The 96 byte size cache is not used if the alignment 1042 * is 64 byte. 1043 */ 1044 for (i = 64 + 8; i <= 96; i += 8) 1045 size_index[size_index_elem(i)] = 7; 1046 1047 } 1048 1049 if (KMALLOC_MIN_SIZE >= 128) { 1050 /* 1051 * The 192 byte sized cache is not used if the alignment 1052 * is 128 byte. Redirect kmalloc to use the 256 byte cache 1053 * instead. 1054 */ 1055 for (i = 128 + 8; i <= 192; i += 8) 1056 size_index[size_index_elem(i)] = 8; 1057 } 1058 } 1059 1060 static void __init new_kmalloc_cache(int idx, slab_flags_t flags) 1061 { 1062 kmalloc_caches[idx] = create_kmalloc_cache(kmalloc_info[idx].name, 1063 kmalloc_info[idx].size, flags); 1064 } 1065 1066 /* 1067 * Create the kmalloc array. Some of the regular kmalloc arrays 1068 * may already have been created because they were needed to 1069 * enable allocations for slab creation. 1070 */ 1071 void __init create_kmalloc_caches(slab_flags_t flags) 1072 { 1073 int i; 1074 1075 for (i = KMALLOC_SHIFT_LOW; i <= KMALLOC_SHIFT_HIGH; i++) { 1076 if (!kmalloc_caches[i]) 1077 new_kmalloc_cache(i, flags); 1078 1079 /* 1080 * Caches that are not of the two-to-the-power-of size. 1081 * These have to be created immediately after the 1082 * earlier power of two caches 1083 */ 1084 if (KMALLOC_MIN_SIZE <= 32 && !kmalloc_caches[1] && i == 6) 1085 new_kmalloc_cache(1, flags); 1086 if (KMALLOC_MIN_SIZE <= 64 && !kmalloc_caches[2] && i == 7) 1087 new_kmalloc_cache(2, flags); 1088 } 1089 1090 /* Kmalloc array is now usable */ 1091 slab_state = UP; 1092 1093 #ifdef CONFIG_ZONE_DMA 1094 for (i = 0; i <= KMALLOC_SHIFT_HIGH; i++) { 1095 struct kmem_cache *s = kmalloc_caches[i]; 1096 1097 if (s) { 1098 int size = kmalloc_size(i); 1099 char *n = kasprintf(GFP_NOWAIT, 1100 "dma-kmalloc-%d", size); 1101 1102 BUG_ON(!n); 1103 kmalloc_dma_caches[i] = create_kmalloc_cache(n, 1104 size, SLAB_CACHE_DMA | flags); 1105 } 1106 } 1107 #endif 1108 } 1109 #endif /* !CONFIG_SLOB */ 1110 1111 /* 1112 * To avoid unnecessary overhead, we pass through large allocation requests 1113 * directly to the page allocator. We use __GFP_COMP, because we will need to 1114 * know the allocation order to free the pages properly in kfree. 1115 */ 1116 void *kmalloc_order(size_t size, gfp_t flags, unsigned int order) 1117 { 1118 void *ret; 1119 struct page *page; 1120 1121 flags |= __GFP_COMP; 1122 page = alloc_pages(flags, order); 1123 ret = page ? page_address(page) : NULL; 1124 kmemleak_alloc(ret, size, 1, flags); 1125 kasan_kmalloc_large(ret, size, flags); 1126 return ret; 1127 } 1128 EXPORT_SYMBOL(kmalloc_order); 1129 1130 #ifdef CONFIG_TRACING 1131 void *kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order) 1132 { 1133 void *ret = kmalloc_order(size, flags, order); 1134 trace_kmalloc(_RET_IP_, ret, size, PAGE_SIZE << order, flags); 1135 return ret; 1136 } 1137 EXPORT_SYMBOL(kmalloc_order_trace); 1138 #endif 1139 1140 #ifdef CONFIG_SLAB_FREELIST_RANDOM 1141 /* Randomize a generic freelist */ 1142 static void freelist_randomize(struct rnd_state *state, unsigned int *list, 1143 size_t count) 1144 { 1145 size_t i; 1146 unsigned int rand; 1147 1148 for (i = 0; i < count; i++) 1149 list[i] = i; 1150 1151 /* Fisher-Yates shuffle */ 1152 for (i = count - 1; i > 0; i--) { 1153 rand = prandom_u32_state(state); 1154 rand %= (i + 1); 1155 swap(list[i], list[rand]); 1156 } 1157 } 1158 1159 /* Create a random sequence per cache */ 1160 int cache_random_seq_create(struct kmem_cache *cachep, unsigned int count, 1161 gfp_t gfp) 1162 { 1163 struct rnd_state state; 1164 1165 if (count < 2 || cachep->random_seq) 1166 return 0; 1167 1168 cachep->random_seq = kcalloc(count, sizeof(unsigned int), gfp); 1169 if (!cachep->random_seq) 1170 return -ENOMEM; 1171 1172 /* Get best entropy at this stage of boot */ 1173 prandom_seed_state(&state, get_random_long()); 1174 1175 freelist_randomize(&state, cachep->random_seq, count); 1176 return 0; 1177 } 1178 1179 /* Destroy the per-cache random freelist sequence */ 1180 void cache_random_seq_destroy(struct kmem_cache *cachep) 1181 { 1182 kfree(cachep->random_seq); 1183 cachep->random_seq = NULL; 1184 } 1185 #endif /* CONFIG_SLAB_FREELIST_RANDOM */ 1186 1187 #if defined(CONFIG_SLAB) || defined(CONFIG_SLUB_DEBUG) 1188 #ifdef CONFIG_SLAB 1189 #define SLABINFO_RIGHTS (S_IWUSR | S_IRUSR) 1190 #else 1191 #define SLABINFO_RIGHTS S_IRUSR 1192 #endif 1193 1194 static void print_slabinfo_header(struct seq_file *m) 1195 { 1196 /* 1197 * Output format version, so at least we can change it 1198 * without _too_ many complaints. 1199 */ 1200 #ifdef CONFIG_DEBUG_SLAB 1201 seq_puts(m, "slabinfo - version: 2.1 (statistics)\n"); 1202 #else 1203 seq_puts(m, "slabinfo - version: 2.1\n"); 1204 #endif 1205 seq_puts(m, "# name <active_objs> <num_objs> <objsize> <objperslab> <pagesperslab>"); 1206 seq_puts(m, " : tunables <limit> <batchcount> <sharedfactor>"); 1207 seq_puts(m, " : slabdata <active_slabs> <num_slabs> <sharedavail>"); 1208 #ifdef CONFIG_DEBUG_SLAB 1209 seq_puts(m, " : globalstat <listallocs> <maxobjs> <grown> <reaped> <error> <maxfreeable> <nodeallocs> <remotefrees> <alienoverflow>"); 1210 seq_puts(m, " : cpustat <allochit> <allocmiss> <freehit> <freemiss>"); 1211 #endif 1212 seq_putc(m, '\n'); 1213 } 1214 1215 void *slab_start(struct seq_file *m, loff_t *pos) 1216 { 1217 mutex_lock(&slab_mutex); 1218 return seq_list_start(&slab_root_caches, *pos); 1219 } 1220 1221 void *slab_next(struct seq_file *m, void *p, loff_t *pos) 1222 { 1223 return seq_list_next(p, &slab_root_caches, pos); 1224 } 1225 1226 void slab_stop(struct seq_file *m, void *p) 1227 { 1228 mutex_unlock(&slab_mutex); 1229 } 1230 1231 static void 1232 memcg_accumulate_slabinfo(struct kmem_cache *s, struct slabinfo *info) 1233 { 1234 struct kmem_cache *c; 1235 struct slabinfo sinfo; 1236 1237 if (!is_root_cache(s)) 1238 return; 1239 1240 for_each_memcg_cache(c, s) { 1241 memset(&sinfo, 0, sizeof(sinfo)); 1242 get_slabinfo(c, &sinfo); 1243 1244 info->active_slabs += sinfo.active_slabs; 1245 info->num_slabs += sinfo.num_slabs; 1246 info->shared_avail += sinfo.shared_avail; 1247 info->active_objs += sinfo.active_objs; 1248 info->num_objs += sinfo.num_objs; 1249 } 1250 } 1251 1252 static void cache_show(struct kmem_cache *s, struct seq_file *m) 1253 { 1254 struct slabinfo sinfo; 1255 1256 memset(&sinfo, 0, sizeof(sinfo)); 1257 get_slabinfo(s, &sinfo); 1258 1259 memcg_accumulate_slabinfo(s, &sinfo); 1260 1261 seq_printf(m, "%-17s %6lu %6lu %6u %4u %4d", 1262 cache_name(s), sinfo.active_objs, sinfo.num_objs, s->size, 1263 sinfo.objects_per_slab, (1 << sinfo.cache_order)); 1264 1265 seq_printf(m, " : tunables %4u %4u %4u", 1266 sinfo.limit, sinfo.batchcount, sinfo.shared); 1267 seq_printf(m, " : slabdata %6lu %6lu %6lu", 1268 sinfo.active_slabs, sinfo.num_slabs, sinfo.shared_avail); 1269 slabinfo_show_stats(m, s); 1270 seq_putc(m, '\n'); 1271 } 1272 1273 static int slab_show(struct seq_file *m, void *p) 1274 { 1275 struct kmem_cache *s = list_entry(p, struct kmem_cache, root_caches_node); 1276 1277 if (p == slab_root_caches.next) 1278 print_slabinfo_header(m); 1279 cache_show(s, m); 1280 return 0; 1281 } 1282 1283 void dump_unreclaimable_slab(void) 1284 { 1285 struct kmem_cache *s, *s2; 1286 struct slabinfo sinfo; 1287 1288 /* 1289 * Here acquiring slab_mutex is risky since we don't prefer to get 1290 * sleep in oom path. But, without mutex hold, it may introduce a 1291 * risk of crash. 1292 * Use mutex_trylock to protect the list traverse, dump nothing 1293 * without acquiring the mutex. 1294 */ 1295 if (!mutex_trylock(&slab_mutex)) { 1296 pr_warn("excessive unreclaimable slab but cannot dump stats\n"); 1297 return; 1298 } 1299 1300 pr_info("Unreclaimable slab info:\n"); 1301 pr_info("Name Used Total\n"); 1302 1303 list_for_each_entry_safe(s, s2, &slab_caches, list) { 1304 if (!is_root_cache(s) || (s->flags & SLAB_RECLAIM_ACCOUNT)) 1305 continue; 1306 1307 get_slabinfo(s, &sinfo); 1308 1309 if (sinfo.num_objs > 0) 1310 pr_info("%-17s %10luKB %10luKB\n", cache_name(s), 1311 (sinfo.active_objs * s->size) / 1024, 1312 (sinfo.num_objs * s->size) / 1024); 1313 } 1314 mutex_unlock(&slab_mutex); 1315 } 1316 1317 #if defined(CONFIG_MEMCG) 1318 void *memcg_slab_start(struct seq_file *m, loff_t *pos) 1319 { 1320 struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m)); 1321 1322 mutex_lock(&slab_mutex); 1323 return seq_list_start(&memcg->kmem_caches, *pos); 1324 } 1325 1326 void *memcg_slab_next(struct seq_file *m, void *p, loff_t *pos) 1327 { 1328 struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m)); 1329 1330 return seq_list_next(p, &memcg->kmem_caches, pos); 1331 } 1332 1333 void memcg_slab_stop(struct seq_file *m, void *p) 1334 { 1335 mutex_unlock(&slab_mutex); 1336 } 1337 1338 int memcg_slab_show(struct seq_file *m, void *p) 1339 { 1340 struct kmem_cache *s = list_entry(p, struct kmem_cache, 1341 memcg_params.kmem_caches_node); 1342 struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m)); 1343 1344 if (p == memcg->kmem_caches.next) 1345 print_slabinfo_header(m); 1346 cache_show(s, m); 1347 return 0; 1348 } 1349 #endif 1350 1351 /* 1352 * slabinfo_op - iterator that generates /proc/slabinfo 1353 * 1354 * Output layout: 1355 * cache-name 1356 * num-active-objs 1357 * total-objs 1358 * object size 1359 * num-active-slabs 1360 * total-slabs 1361 * num-pages-per-slab 1362 * + further values on SMP and with statistics enabled 1363 */ 1364 static const struct seq_operations slabinfo_op = { 1365 .start = slab_start, 1366 .next = slab_next, 1367 .stop = slab_stop, 1368 .show = slab_show, 1369 }; 1370 1371 static int slabinfo_open(struct inode *inode, struct file *file) 1372 { 1373 return seq_open(file, &slabinfo_op); 1374 } 1375 1376 static const struct file_operations proc_slabinfo_operations = { 1377 .open = slabinfo_open, 1378 .read = seq_read, 1379 .write = slabinfo_write, 1380 .llseek = seq_lseek, 1381 .release = seq_release, 1382 }; 1383 1384 static int __init slab_proc_init(void) 1385 { 1386 proc_create("slabinfo", SLABINFO_RIGHTS, NULL, 1387 &proc_slabinfo_operations); 1388 return 0; 1389 } 1390 module_init(slab_proc_init); 1391 #endif /* CONFIG_SLAB || CONFIG_SLUB_DEBUG */ 1392 1393 static __always_inline void *__do_krealloc(const void *p, size_t new_size, 1394 gfp_t flags) 1395 { 1396 void *ret; 1397 size_t ks = 0; 1398 1399 if (p) 1400 ks = ksize(p); 1401 1402 if (ks >= new_size) { 1403 kasan_krealloc((void *)p, new_size, flags); 1404 return (void *)p; 1405 } 1406 1407 ret = kmalloc_track_caller(new_size, flags); 1408 if (ret && p) 1409 memcpy(ret, p, ks); 1410 1411 return ret; 1412 } 1413 1414 /** 1415 * __krealloc - like krealloc() but don't free @p. 1416 * @p: object to reallocate memory for. 1417 * @new_size: how many bytes of memory are required. 1418 * @flags: the type of memory to allocate. 1419 * 1420 * This function is like krealloc() except it never frees the originally 1421 * allocated buffer. Use this if you don't want to free the buffer immediately 1422 * like, for example, with RCU. 1423 */ 1424 void *__krealloc(const void *p, size_t new_size, gfp_t flags) 1425 { 1426 if (unlikely(!new_size)) 1427 return ZERO_SIZE_PTR; 1428 1429 return __do_krealloc(p, new_size, flags); 1430 1431 } 1432 EXPORT_SYMBOL(__krealloc); 1433 1434 /** 1435 * krealloc - reallocate memory. The contents will remain unchanged. 1436 * @p: object to reallocate memory for. 1437 * @new_size: how many bytes of memory are required. 1438 * @flags: the type of memory to allocate. 1439 * 1440 * The contents of the object pointed to are preserved up to the 1441 * lesser of the new and old sizes. If @p is %NULL, krealloc() 1442 * behaves exactly like kmalloc(). If @new_size is 0 and @p is not a 1443 * %NULL pointer, the object pointed to is freed. 1444 */ 1445 void *krealloc(const void *p, size_t new_size, gfp_t flags) 1446 { 1447 void *ret; 1448 1449 if (unlikely(!new_size)) { 1450 kfree(p); 1451 return ZERO_SIZE_PTR; 1452 } 1453 1454 ret = __do_krealloc(p, new_size, flags); 1455 if (ret && p != ret) 1456 kfree(p); 1457 1458 return ret; 1459 } 1460 EXPORT_SYMBOL(krealloc); 1461 1462 /** 1463 * kzfree - like kfree but zero memory 1464 * @p: object to free memory of 1465 * 1466 * The memory of the object @p points to is zeroed before freed. 1467 * If @p is %NULL, kzfree() does nothing. 1468 * 1469 * Note: this function zeroes the whole allocated buffer which can be a good 1470 * deal bigger than the requested buffer size passed to kmalloc(). So be 1471 * careful when using this function in performance sensitive code. 1472 */ 1473 void kzfree(const void *p) 1474 { 1475 size_t ks; 1476 void *mem = (void *)p; 1477 1478 if (unlikely(ZERO_OR_NULL_PTR(mem))) 1479 return; 1480 ks = ksize(mem); 1481 memset(mem, 0, ks); 1482 kfree(mem); 1483 } 1484 EXPORT_SYMBOL(kzfree); 1485 1486 /* Tracepoints definitions. */ 1487 EXPORT_TRACEPOINT_SYMBOL(kmalloc); 1488 EXPORT_TRACEPOINT_SYMBOL(kmem_cache_alloc); 1489 EXPORT_TRACEPOINT_SYMBOL(kmalloc_node); 1490 EXPORT_TRACEPOINT_SYMBOL(kmem_cache_alloc_node); 1491 EXPORT_TRACEPOINT_SYMBOL(kfree); 1492 EXPORT_TRACEPOINT_SYMBOL(kmem_cache_free); 1493