1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #ifndef MM_SLAB_H 3 #define MM_SLAB_H 4 5 #include <linux/reciprocal_div.h> 6 #include <linux/list_lru.h> 7 #include <linux/local_lock.h> 8 #include <linux/random.h> 9 #include <linux/kobject.h> 10 #include <linux/sched/mm.h> 11 #include <linux/memcontrol.h> 12 #include <linux/kfence.h> 13 #include <linux/kasan.h> 14 15 /* 16 * Internal slab definitions 17 */ 18 19 #ifdef CONFIG_64BIT 20 # ifdef system_has_cmpxchg128 21 # define system_has_freelist_aba() system_has_cmpxchg128() 22 # define try_cmpxchg_freelist try_cmpxchg128 23 # endif 24 typedef u128 freelist_full_t; 25 #else /* CONFIG_64BIT */ 26 # ifdef system_has_cmpxchg64 27 # define system_has_freelist_aba() system_has_cmpxchg64() 28 # define try_cmpxchg_freelist try_cmpxchg64 29 # endif 30 typedef u64 freelist_full_t; 31 #endif /* CONFIG_64BIT */ 32 33 #if defined(system_has_freelist_aba) && !defined(CONFIG_HAVE_ALIGNED_STRUCT_PAGE) 34 #undef system_has_freelist_aba 35 #endif 36 37 /* 38 * Freelist pointer and counter to cmpxchg together, avoids the typical ABA 39 * problems with cmpxchg of just a pointer. 40 */ 41 struct freelist_counters { 42 union { 43 struct { 44 void *freelist; 45 union { 46 unsigned long counters; 47 struct { 48 unsigned inuse:16; 49 unsigned objects:15; 50 /* 51 * If slab debugging is enabled then the 52 * frozen bit can be reused to indicate 53 * that the slab was corrupted 54 */ 55 unsigned frozen:1; 56 #ifdef CONFIG_64BIT 57 /* 58 * Some optimizations use free bits in 'counters' field 59 * to save memory. In case ->stride field is not available, 60 * such optimizations are disabled. 61 */ 62 unsigned int stride; 63 #endif 64 }; 65 }; 66 }; 67 #ifdef system_has_freelist_aba 68 freelist_full_t freelist_counters; 69 #endif 70 }; 71 }; 72 73 /* Reuses the bits in struct page */ 74 struct slab { 75 memdesc_flags_t flags; 76 77 struct kmem_cache *slab_cache; 78 union { 79 struct { 80 struct list_head slab_list; 81 /* Double-word boundary */ 82 struct freelist_counters; 83 }; 84 struct rcu_head rcu_head; 85 }; 86 87 unsigned int __page_type; 88 atomic_t __page_refcount; 89 #ifdef CONFIG_SLAB_OBJ_EXT 90 unsigned long obj_exts; 91 #endif 92 }; 93 94 #define SLAB_MATCH(pg, sl) \ 95 static_assert(offsetof(struct page, pg) == offsetof(struct slab, sl)) 96 SLAB_MATCH(flags, flags); 97 SLAB_MATCH(compound_info, slab_cache); /* Ensure bit 0 is clear */ 98 SLAB_MATCH(_refcount, __page_refcount); 99 #ifdef CONFIG_MEMCG 100 SLAB_MATCH(memcg_data, obj_exts); 101 #elif defined(CONFIG_SLAB_OBJ_EXT) 102 SLAB_MATCH(_unused_slab_obj_exts, obj_exts); 103 #endif 104 #undef SLAB_MATCH 105 static_assert(sizeof(struct slab) <= sizeof(struct page)); 106 #if defined(system_has_freelist_aba) 107 static_assert(IS_ALIGNED(offsetof(struct slab, freelist), sizeof(struct freelist_counters))); 108 #endif 109 110 /** 111 * slab_folio - The folio allocated for a slab 112 * @s: The slab. 113 * 114 * Slabs are allocated as folios that contain the individual objects and are 115 * using some fields in the first struct page of the folio - those fields are 116 * now accessed by struct slab. It is occasionally necessary to convert back to 117 * a folio in order to communicate with the rest of the mm. Please use this 118 * helper function instead of casting yourself, as the implementation may change 119 * in the future. 120 */ 121 #define slab_folio(s) (_Generic((s), \ 122 const struct slab *: (const struct folio *)s, \ 123 struct slab *: (struct folio *)s)) 124 125 /** 126 * page_slab - Converts from struct page to its slab. 127 * @page: A page which may or may not belong to a slab. 128 * 129 * Return: The slab which contains this page or NULL if the page does 130 * not belong to a slab. This includes pages returned from large kmalloc. 131 */ 132 static inline struct slab *page_slab(const struct page *page) 133 { 134 page = compound_head(page); 135 if (data_race(page->page_type >> 24) != PGTY_slab) 136 page = NULL; 137 138 return (struct slab *)page; 139 } 140 141 /** 142 * slab_page - The first struct page allocated for a slab 143 * @s: The slab. 144 * 145 * A convenience wrapper for converting slab to the first struct page of the 146 * underlying folio, to communicate with code not yet converted to folio or 147 * struct slab. 148 */ 149 #define slab_page(s) folio_page(slab_folio(s), 0) 150 151 static inline void *slab_address(const struct slab *slab) 152 { 153 return folio_address(slab_folio(slab)); 154 } 155 156 static inline int slab_nid(const struct slab *slab) 157 { 158 return memdesc_nid(slab->flags); 159 } 160 161 static inline pg_data_t *slab_pgdat(const struct slab *slab) 162 { 163 return NODE_DATA(slab_nid(slab)); 164 } 165 166 static inline struct slab *virt_to_slab(const void *addr) 167 { 168 return page_slab(virt_to_page(addr)); 169 } 170 171 static inline int slab_order(const struct slab *slab) 172 { 173 return folio_order(slab_folio(slab)); 174 } 175 176 static inline size_t slab_size(const struct slab *slab) 177 { 178 return PAGE_SIZE << slab_order(slab); 179 } 180 181 /* 182 * Word size structure that can be atomically updated or read and that 183 * contains both the order and the number of objects that a slab of the 184 * given order would contain. 185 */ 186 struct kmem_cache_order_objects { 187 unsigned int x; 188 }; 189 190 /* 191 * Slab cache management. 192 */ 193 struct kmem_cache { 194 struct slub_percpu_sheaves __percpu *cpu_sheaves; 195 /* Used for retrieving partial slabs, etc. */ 196 slab_flags_t flags; 197 unsigned long min_partial; 198 unsigned int size; /* Object size including metadata */ 199 unsigned int object_size; /* Object size without metadata */ 200 struct reciprocal_value reciprocal_size; 201 unsigned int offset; /* Free pointer offset */ 202 unsigned int sheaf_capacity; 203 struct kmem_cache_order_objects oo; 204 205 /* Allocation and freeing of slabs */ 206 struct kmem_cache_order_objects min; 207 gfp_t allocflags; /* gfp flags to use on each alloc */ 208 int refcount; /* Refcount for slab cache destroy */ 209 void (*ctor)(void *object); /* Object constructor */ 210 unsigned int inuse; /* Offset to metadata */ 211 unsigned int align; /* Alignment */ 212 unsigned int red_left_pad; /* Left redzone padding size */ 213 const char *name; /* Name (only for display!) */ 214 struct list_head list; /* List of slab caches */ 215 #ifdef CONFIG_SYSFS 216 struct kobject kobj; /* For sysfs */ 217 #endif 218 #ifdef CONFIG_SLAB_FREELIST_HARDENED 219 unsigned long random; 220 #endif 221 222 #ifdef CONFIG_NUMA 223 /* 224 * Defragmentation by allocating from a remote node. 225 */ 226 unsigned int remote_node_defrag_ratio; 227 #endif 228 229 #ifdef CONFIG_SLAB_FREELIST_RANDOM 230 unsigned int *random_seq; 231 #endif 232 233 #ifdef CONFIG_KASAN_GENERIC 234 struct kasan_cache kasan_info; 235 #endif 236 237 #ifdef CONFIG_HARDENED_USERCOPY 238 unsigned int useroffset; /* Usercopy region offset */ 239 unsigned int usersize; /* Usercopy region size */ 240 #endif 241 242 #ifdef CONFIG_SLUB_STATS 243 struct kmem_cache_stats __percpu *cpu_stats; 244 #endif 245 246 struct kmem_cache_node *node[MAX_NUMNODES]; 247 }; 248 249 /* 250 * Every cache has !NULL s->cpu_sheaves but they may point to the 251 * bootstrap_sheaf temporarily during init, or permanently for the boot caches 252 * and caches with debugging enabled, or all caches with CONFIG_SLUB_TINY. This 253 * helper distinguishes whether cache has real non-bootstrap sheaves. 254 */ 255 static inline bool cache_has_sheaves(struct kmem_cache *s) 256 { 257 /* Test CONFIG_SLUB_TINY for code elimination purposes */ 258 return !IS_ENABLED(CONFIG_SLUB_TINY) && s->sheaf_capacity; 259 } 260 261 #if defined(CONFIG_SYSFS) && !defined(CONFIG_SLUB_TINY) 262 #define SLAB_SUPPORTS_SYSFS 1 263 void sysfs_slab_unlink(struct kmem_cache *s); 264 void sysfs_slab_release(struct kmem_cache *s); 265 int sysfs_slab_alias(struct kmem_cache *s, const char *name); 266 #else 267 static inline void sysfs_slab_unlink(struct kmem_cache *s) { } 268 static inline void sysfs_slab_release(struct kmem_cache *s) { } 269 static inline int sysfs_slab_alias(struct kmem_cache *s, const char *name) 270 { return 0; } 271 #endif 272 273 void *fixup_red_left(struct kmem_cache *s, void *p); 274 275 static inline void *nearest_obj(struct kmem_cache *cache, 276 const struct slab *slab, void *x) 277 { 278 void *object = x - (x - slab_address(slab)) % cache->size; 279 void *last_object = slab_address(slab) + 280 (slab->objects - 1) * cache->size; 281 void *result = (unlikely(object > last_object)) ? last_object : object; 282 283 result = fixup_red_left(cache, result); 284 return result; 285 } 286 287 /* Determine object index from a given position */ 288 static inline unsigned int __obj_to_index(const struct kmem_cache *cache, 289 void *addr, const void *obj) 290 { 291 return reciprocal_divide(kasan_reset_tag(obj) - addr, 292 cache->reciprocal_size); 293 } 294 295 static inline unsigned int obj_to_index(const struct kmem_cache *cache, 296 const struct slab *slab, const void *obj) 297 { 298 if (is_kfence_address(obj)) 299 return 0; 300 return __obj_to_index(cache, slab_address(slab), obj); 301 } 302 303 static inline int objs_per_slab(const struct kmem_cache *cache, 304 const struct slab *slab) 305 { 306 return slab->objects; 307 } 308 309 /* 310 * State of the slab allocator. 311 * 312 * This is used to describe the states of the allocator during bootup. 313 * Allocators use this to gradually bootstrap themselves. Most allocators 314 * have the problem that the structures used for managing slab caches are 315 * allocated from slab caches themselves. 316 */ 317 enum slab_state { 318 DOWN, /* No slab functionality yet */ 319 PARTIAL, /* SLUB: kmem_cache_node available */ 320 UP, /* Slab caches usable but not all extras yet */ 321 FULL /* Everything is working */ 322 }; 323 324 extern enum slab_state slab_state; 325 326 /* The slab cache mutex protects the management structures during changes */ 327 extern struct mutex slab_mutex; 328 329 /* The list of all slab caches on the system */ 330 extern struct list_head slab_caches; 331 332 /* The slab cache that manages slab cache information */ 333 extern struct kmem_cache *kmem_cache; 334 335 /* A table of kmalloc cache names and sizes */ 336 extern const struct kmalloc_info_struct { 337 const char *name[NR_KMALLOC_TYPES]; 338 unsigned int size; 339 } kmalloc_info[]; 340 341 /* Kmalloc array related functions */ 342 void setup_kmalloc_cache_index_table(void); 343 void create_kmalloc_caches(void); 344 345 extern u8 kmalloc_size_index[24]; 346 347 static inline unsigned int size_index_elem(unsigned int bytes) 348 { 349 return (bytes - 1) / 8; 350 } 351 352 /* 353 * Find the kmem_cache structure that serves a given size of 354 * allocation 355 * 356 * This assumes size is larger than zero and not larger than 357 * KMALLOC_MAX_CACHE_SIZE and the caller must check that. 358 */ 359 static inline struct kmem_cache * 360 kmalloc_slab(size_t size, kmem_buckets *b, gfp_t flags, unsigned long caller) 361 { 362 unsigned int index; 363 364 if (!b) 365 b = &kmalloc_caches[kmalloc_type(flags, caller)]; 366 if (size <= 192) 367 index = kmalloc_size_index[size_index_elem(size)]; 368 else 369 index = fls(size - 1); 370 371 return (*b)[index]; 372 } 373 374 gfp_t kmalloc_fix_flags(gfp_t flags); 375 376 /* Functions provided by the slab allocators */ 377 int do_kmem_cache_create(struct kmem_cache *s, const char *name, 378 unsigned int size, struct kmem_cache_args *args, 379 slab_flags_t flags); 380 381 void __init kmem_cache_init(void); 382 extern void create_boot_cache(struct kmem_cache *, const char *name, 383 unsigned int size, slab_flags_t flags, 384 unsigned int useroffset, unsigned int usersize); 385 386 int slab_unmergeable(struct kmem_cache *s); 387 bool slab_args_unmergeable(struct kmem_cache_args *args, slab_flags_t flags); 388 389 slab_flags_t kmem_cache_flags(slab_flags_t flags, const char *name); 390 391 static inline bool is_kmalloc_cache(struct kmem_cache *s) 392 { 393 return (s->flags & SLAB_KMALLOC); 394 } 395 396 static inline bool is_kmalloc_normal(struct kmem_cache *s) 397 { 398 if (!is_kmalloc_cache(s)) 399 return false; 400 return !(s->flags & (SLAB_CACHE_DMA|SLAB_ACCOUNT|SLAB_RECLAIM_ACCOUNT)); 401 } 402 403 bool __kfree_rcu_sheaf(struct kmem_cache *s, void *obj); 404 void flush_all_rcu_sheaves(void); 405 void flush_rcu_sheaves_on_cache(struct kmem_cache *s); 406 407 #define SLAB_CORE_FLAGS (SLAB_HWCACHE_ALIGN | SLAB_CACHE_DMA | \ 408 SLAB_CACHE_DMA32 | SLAB_PANIC | \ 409 SLAB_TYPESAFE_BY_RCU | SLAB_DEBUG_OBJECTS | \ 410 SLAB_NOLEAKTRACE | SLAB_RECLAIM_ACCOUNT | \ 411 SLAB_TEMPORARY | SLAB_ACCOUNT | \ 412 SLAB_NO_USER_FLAGS | SLAB_KMALLOC | SLAB_NO_MERGE) 413 414 #define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | \ 415 SLAB_TRACE | SLAB_CONSISTENCY_CHECKS) 416 417 #define SLAB_FLAGS_PERMITTED (SLAB_CORE_FLAGS | SLAB_DEBUG_FLAGS) 418 419 bool __kmem_cache_empty(struct kmem_cache *); 420 int __kmem_cache_shutdown(struct kmem_cache *); 421 void __kmem_cache_release(struct kmem_cache *); 422 int __kmem_cache_shrink(struct kmem_cache *); 423 void slab_kmem_cache_release(struct kmem_cache *); 424 425 struct seq_file; 426 struct file; 427 428 struct slabinfo { 429 unsigned long active_objs; 430 unsigned long num_objs; 431 unsigned long active_slabs; 432 unsigned long num_slabs; 433 unsigned long shared_avail; 434 unsigned int limit; 435 unsigned int batchcount; 436 unsigned int shared; 437 unsigned int objects_per_slab; 438 unsigned int cache_order; 439 }; 440 441 void get_slabinfo(struct kmem_cache *s, struct slabinfo *sinfo); 442 443 #ifdef CONFIG_SLUB_DEBUG 444 #ifdef CONFIG_SLUB_DEBUG_ON 445 DECLARE_STATIC_KEY_TRUE(slub_debug_enabled); 446 #else 447 DECLARE_STATIC_KEY_FALSE(slub_debug_enabled); 448 #endif 449 extern void print_tracking(struct kmem_cache *s, void *object); 450 long validate_slab_cache(struct kmem_cache *s); 451 static inline bool __slub_debug_enabled(void) 452 { 453 return static_branch_unlikely(&slub_debug_enabled); 454 } 455 #else 456 static inline void print_tracking(struct kmem_cache *s, void *object) 457 { 458 } 459 static inline bool __slub_debug_enabled(void) 460 { 461 return false; 462 } 463 #endif 464 465 /* 466 * Returns true if any of the specified slab_debug flags is enabled for the 467 * cache. Use only for flags parsed by setup_slub_debug() as it also enables 468 * the static key. 469 */ 470 static inline bool kmem_cache_debug_flags(struct kmem_cache *s, slab_flags_t flags) 471 { 472 if (IS_ENABLED(CONFIG_SLUB_DEBUG)) 473 VM_WARN_ON_ONCE(!(flags & SLAB_DEBUG_FLAGS)); 474 if (__slub_debug_enabled()) 475 return s->flags & flags; 476 return false; 477 } 478 479 #if IS_ENABLED(CONFIG_SLUB_DEBUG) && IS_ENABLED(CONFIG_KUNIT) 480 bool slab_in_kunit_test(void); 481 #else 482 static inline bool slab_in_kunit_test(void) { return false; } 483 #endif 484 485 /* 486 * slub is about to manipulate internal object metadata. This memory lies 487 * outside the range of the allocated object, so accessing it would normally 488 * be reported by kasan as a bounds error. metadata_access_enable() is used 489 * to tell kasan that these accesses are OK. 490 */ 491 static inline void metadata_access_enable(void) 492 { 493 kasan_disable_current(); 494 kmsan_disable_current(); 495 } 496 497 static inline void metadata_access_disable(void) 498 { 499 kmsan_enable_current(); 500 kasan_enable_current(); 501 } 502 503 #ifdef CONFIG_SLAB_OBJ_EXT 504 505 /* 506 * slab_obj_exts - get the pointer to the slab object extension vector 507 * associated with a slab. 508 * @slab: a pointer to the slab struct 509 * 510 * Returns the address of the object extension vector associated with the slab, 511 * or zero if no such vector has been associated yet. 512 * Do not dereference the return value directly; use get/put_slab_obj_exts() 513 * pair and slab_obj_ext() to access individual elements. 514 * 515 * Example usage: 516 * 517 * obj_exts = slab_obj_exts(slab); 518 * if (obj_exts) { 519 * get_slab_obj_exts(obj_exts); 520 * obj_ext = slab_obj_ext(slab, obj_exts, obj_to_index(s, slab, obj)); 521 * // do something with obj_ext 522 * put_slab_obj_exts(obj_exts); 523 * } 524 * 525 * Note that the get/put semantics does not involve reference counting. 526 * Instead, it updates kasan/kmsan depth so that accesses to slabobj_ext 527 * won't be reported as access violations. 528 */ 529 static inline unsigned long slab_obj_exts(struct slab *slab) 530 { 531 unsigned long obj_exts = READ_ONCE(slab->obj_exts); 532 533 #ifdef CONFIG_MEMCG 534 /* 535 * obj_exts should be either NULL, a valid pointer with 536 * MEMCG_DATA_OBJEXTS bit set or be equal to OBJEXTS_ALLOC_FAIL. 537 */ 538 VM_BUG_ON_PAGE(obj_exts && !(obj_exts & MEMCG_DATA_OBJEXTS) && 539 obj_exts != OBJEXTS_ALLOC_FAIL, slab_page(slab)); 540 VM_BUG_ON_PAGE(obj_exts & MEMCG_DATA_KMEM, slab_page(slab)); 541 #endif 542 543 return obj_exts & ~OBJEXTS_FLAGS_MASK; 544 } 545 546 static inline void get_slab_obj_exts(unsigned long obj_exts) 547 { 548 VM_WARN_ON_ONCE(!obj_exts); 549 metadata_access_enable(); 550 } 551 552 static inline void put_slab_obj_exts(unsigned long obj_exts) 553 { 554 metadata_access_disable(); 555 } 556 557 #ifdef CONFIG_64BIT 558 static inline void slab_set_stride(struct slab *slab, unsigned int stride) 559 { 560 slab->stride = stride; 561 } 562 static inline unsigned int slab_get_stride(struct slab *slab) 563 { 564 return slab->stride; 565 } 566 #else 567 static inline void slab_set_stride(struct slab *slab, unsigned int stride) 568 { 569 VM_WARN_ON_ONCE(stride != sizeof(struct slabobj_ext)); 570 } 571 static inline unsigned int slab_get_stride(struct slab *slab) 572 { 573 return sizeof(struct slabobj_ext); 574 } 575 #endif 576 577 /* 578 * slab_obj_ext - get the pointer to the slab object extension metadata 579 * associated with an object in a slab. 580 * @slab: a pointer to the slab struct 581 * @obj_exts: a pointer to the object extension vector 582 * @index: an index of the object 583 * 584 * Returns a pointer to the object extension associated with the object. 585 * Must be called within a section covered by get/put_slab_obj_exts(). 586 */ 587 static inline struct slabobj_ext *slab_obj_ext(struct slab *slab, 588 unsigned long obj_exts, 589 unsigned int index) 590 { 591 struct slabobj_ext *obj_ext; 592 593 VM_WARN_ON_ONCE(obj_exts != slab_obj_exts(slab)); 594 595 obj_ext = (struct slabobj_ext *)(obj_exts + 596 slab_get_stride(slab) * index); 597 return kasan_reset_tag(obj_ext); 598 } 599 600 int alloc_slab_obj_exts(struct slab *slab, struct kmem_cache *s, 601 gfp_t gfp, bool new_slab); 602 603 #else /* CONFIG_SLAB_OBJ_EXT */ 604 605 static inline unsigned long slab_obj_exts(struct slab *slab) 606 { 607 return 0; 608 } 609 610 static inline struct slabobj_ext *slab_obj_ext(struct slab *slab, 611 unsigned long obj_exts, 612 unsigned int index) 613 { 614 return NULL; 615 } 616 617 static inline void slab_set_stride(struct slab *slab, unsigned int stride) { } 618 static inline unsigned int slab_get_stride(struct slab *slab) { return 0; } 619 620 621 #endif /* CONFIG_SLAB_OBJ_EXT */ 622 623 static inline enum node_stat_item cache_vmstat_idx(struct kmem_cache *s) 624 { 625 return (s->flags & SLAB_RECLAIM_ACCOUNT) ? 626 NR_SLAB_RECLAIMABLE_B : NR_SLAB_UNRECLAIMABLE_B; 627 } 628 629 #ifdef CONFIG_MEMCG 630 bool __memcg_slab_post_alloc_hook(struct kmem_cache *s, struct list_lru *lru, 631 gfp_t flags, size_t size, void **p); 632 void __memcg_slab_free_hook(struct kmem_cache *s, struct slab *slab, 633 void **p, int objects, unsigned long obj_exts); 634 #endif 635 636 void kvfree_rcu_cb(struct rcu_head *head); 637 638 static inline unsigned int large_kmalloc_order(const struct page *page) 639 { 640 return page[1].flags.f & 0xff; 641 } 642 643 static inline size_t large_kmalloc_size(const struct page *page) 644 { 645 return PAGE_SIZE << large_kmalloc_order(page); 646 } 647 648 #ifdef CONFIG_SLUB_DEBUG 649 void dump_unreclaimable_slab(void); 650 #else 651 static inline void dump_unreclaimable_slab(void) 652 { 653 } 654 #endif 655 656 void ___cache_free(struct kmem_cache *cache, void *x, unsigned long addr); 657 658 #ifdef CONFIG_SLAB_FREELIST_RANDOM 659 int cache_random_seq_create(struct kmem_cache *cachep, unsigned int count, 660 gfp_t gfp); 661 void cache_random_seq_destroy(struct kmem_cache *cachep); 662 #else 663 static inline int cache_random_seq_create(struct kmem_cache *cachep, 664 unsigned int count, gfp_t gfp) 665 { 666 return 0; 667 } 668 static inline void cache_random_seq_destroy(struct kmem_cache *cachep) { } 669 #endif /* CONFIG_SLAB_FREELIST_RANDOM */ 670 671 static inline bool slab_want_init_on_alloc(gfp_t flags, struct kmem_cache *c) 672 { 673 if (static_branch_maybe(CONFIG_INIT_ON_ALLOC_DEFAULT_ON, 674 &init_on_alloc)) { 675 if (c->ctor) 676 return false; 677 if (c->flags & (SLAB_TYPESAFE_BY_RCU | SLAB_POISON)) 678 return flags & __GFP_ZERO; 679 return true; 680 } 681 return flags & __GFP_ZERO; 682 } 683 684 static inline bool slab_want_init_on_free(struct kmem_cache *c) 685 { 686 if (static_branch_maybe(CONFIG_INIT_ON_FREE_DEFAULT_ON, 687 &init_on_free)) 688 return !(c->ctor || 689 (c->flags & (SLAB_TYPESAFE_BY_RCU | SLAB_POISON))); 690 return false; 691 } 692 693 #if defined(CONFIG_DEBUG_FS) && defined(CONFIG_SLUB_DEBUG) 694 void debugfs_slab_release(struct kmem_cache *); 695 #else 696 static inline void debugfs_slab_release(struct kmem_cache *s) { } 697 #endif 698 699 #ifdef CONFIG_PRINTK 700 #define KS_ADDRS_COUNT 16 701 struct kmem_obj_info { 702 void *kp_ptr; 703 struct slab *kp_slab; 704 void *kp_objp; 705 unsigned long kp_data_offset; 706 struct kmem_cache *kp_slab_cache; 707 void *kp_ret; 708 void *kp_stack[KS_ADDRS_COUNT]; 709 void *kp_free_stack[KS_ADDRS_COUNT]; 710 }; 711 void __kmem_obj_info(struct kmem_obj_info *kpp, void *object, struct slab *slab); 712 #endif 713 714 void __check_heap_object(const void *ptr, unsigned long n, 715 const struct slab *slab, bool to_user); 716 717 void defer_free_barrier(void); 718 719 static inline bool slub_debug_orig_size(struct kmem_cache *s) 720 { 721 return (kmem_cache_debug_flags(s, SLAB_STORE_USER) && 722 (s->flags & SLAB_KMALLOC)); 723 } 724 725 #ifdef CONFIG_SLUB_DEBUG 726 void skip_orig_size_check(struct kmem_cache *s, const void *object); 727 #endif 728 729 #endif /* MM_SLAB_H */ 730