1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #ifndef MM_SLAB_H 3 #define MM_SLAB_H 4 /* 5 * Internal slab definitions 6 */ 7 void __init kmem_cache_init(void); 8 9 /* Reuses the bits in struct page */ 10 struct slab { 11 unsigned long __page_flags; 12 13 #if defined(CONFIG_SLAB) 14 15 struct kmem_cache *slab_cache; 16 union { 17 struct { 18 struct list_head slab_list; 19 void *freelist; /* array of free object indexes */ 20 void *s_mem; /* first object */ 21 }; 22 struct rcu_head rcu_head; 23 }; 24 unsigned int active; 25 26 #elif defined(CONFIG_SLUB) 27 28 struct kmem_cache *slab_cache; 29 union { 30 struct { 31 union { 32 struct list_head slab_list; 33 #ifdef CONFIG_SLUB_CPU_PARTIAL 34 struct { 35 struct slab *next; 36 int slabs; /* Nr of slabs left */ 37 }; 38 #endif 39 }; 40 /* Double-word boundary */ 41 void *freelist; /* first free object */ 42 union { 43 unsigned long counters; 44 struct { 45 unsigned inuse:16; 46 unsigned objects:15; 47 unsigned frozen:1; 48 }; 49 }; 50 }; 51 struct rcu_head rcu_head; 52 }; 53 unsigned int __unused; 54 55 #elif defined(CONFIG_SLOB) 56 57 struct list_head slab_list; 58 void *__unused_1; 59 void *freelist; /* first free block */ 60 long units; 61 unsigned int __unused_2; 62 63 #else 64 #error "Unexpected slab allocator configured" 65 #endif 66 67 atomic_t __page_refcount; 68 #ifdef CONFIG_MEMCG 69 unsigned long memcg_data; 70 #endif 71 }; 72 73 #define SLAB_MATCH(pg, sl) \ 74 static_assert(offsetof(struct page, pg) == offsetof(struct slab, sl)) 75 SLAB_MATCH(flags, __page_flags); 76 #ifndef CONFIG_SLOB 77 SLAB_MATCH(compound_head, slab_cache); /* Ensure bit 0 is clear */ 78 #else 79 SLAB_MATCH(compound_head, slab_list); /* Ensure bit 0 is clear */ 80 #endif 81 SLAB_MATCH(_refcount, __page_refcount); 82 #ifdef CONFIG_MEMCG 83 SLAB_MATCH(memcg_data, memcg_data); 84 #endif 85 #undef SLAB_MATCH 86 static_assert(sizeof(struct slab) <= sizeof(struct page)); 87 #if defined(CONFIG_HAVE_CMPXCHG_DOUBLE) && defined(CONFIG_SLUB) 88 static_assert(IS_ALIGNED(offsetof(struct slab, freelist), 2*sizeof(void *))); 89 #endif 90 91 /** 92 * folio_slab - Converts from folio to slab. 93 * @folio: The folio. 94 * 95 * Currently struct slab is a different representation of a folio where 96 * folio_test_slab() is true. 97 * 98 * Return: The slab which contains this folio. 99 */ 100 #define folio_slab(folio) (_Generic((folio), \ 101 const struct folio *: (const struct slab *)(folio), \ 102 struct folio *: (struct slab *)(folio))) 103 104 /** 105 * slab_folio - The folio allocated for a slab 106 * @slab: The slab. 107 * 108 * Slabs are allocated as folios that contain the individual objects and are 109 * using some fields in the first struct page of the folio - those fields are 110 * now accessed by struct slab. It is occasionally necessary to convert back to 111 * a folio in order to communicate with the rest of the mm. Please use this 112 * helper function instead of casting yourself, as the implementation may change 113 * in the future. 114 */ 115 #define slab_folio(s) (_Generic((s), \ 116 const struct slab *: (const struct folio *)s, \ 117 struct slab *: (struct folio *)s)) 118 119 /** 120 * page_slab - Converts from first struct page to slab. 121 * @p: The first (either head of compound or single) page of slab. 122 * 123 * A temporary wrapper to convert struct page to struct slab in situations where 124 * we know the page is the compound head, or single order-0 page. 125 * 126 * Long-term ideally everything would work with struct slab directly or go 127 * through folio to struct slab. 128 * 129 * Return: The slab which contains this page 130 */ 131 #define page_slab(p) (_Generic((p), \ 132 const struct page *: (const struct slab *)(p), \ 133 struct page *: (struct slab *)(p))) 134 135 /** 136 * slab_page - The first struct page allocated for a slab 137 * @slab: The slab. 138 * 139 * A convenience wrapper for converting slab to the first struct page of the 140 * underlying folio, to communicate with code not yet converted to folio or 141 * struct slab. 142 */ 143 #define slab_page(s) folio_page(slab_folio(s), 0) 144 145 /* 146 * If network-based swap is enabled, sl*b must keep track of whether pages 147 * were allocated from pfmemalloc reserves. 148 */ 149 static inline bool slab_test_pfmemalloc(const struct slab *slab) 150 { 151 return folio_test_active((struct folio *)slab_folio(slab)); 152 } 153 154 static inline void slab_set_pfmemalloc(struct slab *slab) 155 { 156 folio_set_active(slab_folio(slab)); 157 } 158 159 static inline void slab_clear_pfmemalloc(struct slab *slab) 160 { 161 folio_clear_active(slab_folio(slab)); 162 } 163 164 static inline void __slab_clear_pfmemalloc(struct slab *slab) 165 { 166 __folio_clear_active(slab_folio(slab)); 167 } 168 169 static inline void *slab_address(const struct slab *slab) 170 { 171 return folio_address(slab_folio(slab)); 172 } 173 174 static inline int slab_nid(const struct slab *slab) 175 { 176 return folio_nid(slab_folio(slab)); 177 } 178 179 static inline pg_data_t *slab_pgdat(const struct slab *slab) 180 { 181 return folio_pgdat(slab_folio(slab)); 182 } 183 184 static inline struct slab *virt_to_slab(const void *addr) 185 { 186 struct folio *folio = virt_to_folio(addr); 187 188 if (!folio_test_slab(folio)) 189 return NULL; 190 191 return folio_slab(folio); 192 } 193 194 static inline int slab_order(const struct slab *slab) 195 { 196 return folio_order((struct folio *)slab_folio(slab)); 197 } 198 199 static inline size_t slab_size(const struct slab *slab) 200 { 201 return PAGE_SIZE << slab_order(slab); 202 } 203 204 #ifdef CONFIG_SLOB 205 /* 206 * Common fields provided in kmem_cache by all slab allocators 207 * This struct is either used directly by the allocator (SLOB) 208 * or the allocator must include definitions for all fields 209 * provided in kmem_cache_common in their definition of kmem_cache. 210 * 211 * Once we can do anonymous structs (C11 standard) we could put a 212 * anonymous struct definition in these allocators so that the 213 * separate allocations in the kmem_cache structure of SLAB and 214 * SLUB is no longer needed. 215 */ 216 struct kmem_cache { 217 unsigned int object_size;/* The original size of the object */ 218 unsigned int size; /* The aligned/padded/added on size */ 219 unsigned int align; /* Alignment as calculated */ 220 slab_flags_t flags; /* Active flags on the slab */ 221 const char *name; /* Slab name for sysfs */ 222 int refcount; /* Use counter */ 223 void (*ctor)(void *); /* Called on object slot creation */ 224 struct list_head list; /* List of all slab caches on the system */ 225 }; 226 227 #endif /* CONFIG_SLOB */ 228 229 #ifdef CONFIG_SLAB 230 #include <linux/slab_def.h> 231 #endif 232 233 #ifdef CONFIG_SLUB 234 #include <linux/slub_def.h> 235 #endif 236 237 #include <linux/memcontrol.h> 238 #include <linux/fault-inject.h> 239 #include <linux/kasan.h> 240 #include <linux/kmemleak.h> 241 #include <linux/random.h> 242 #include <linux/sched/mm.h> 243 #include <linux/list_lru.h> 244 245 /* 246 * State of the slab allocator. 247 * 248 * This is used to describe the states of the allocator during bootup. 249 * Allocators use this to gradually bootstrap themselves. Most allocators 250 * have the problem that the structures used for managing slab caches are 251 * allocated from slab caches themselves. 252 */ 253 enum slab_state { 254 DOWN, /* No slab functionality yet */ 255 PARTIAL, /* SLUB: kmem_cache_node available */ 256 PARTIAL_NODE, /* SLAB: kmalloc size for node struct available */ 257 UP, /* Slab caches usable but not all extras yet */ 258 FULL /* Everything is working */ 259 }; 260 261 extern enum slab_state slab_state; 262 263 /* The slab cache mutex protects the management structures during changes */ 264 extern struct mutex slab_mutex; 265 266 /* The list of all slab caches on the system */ 267 extern struct list_head slab_caches; 268 269 /* The slab cache that manages slab cache information */ 270 extern struct kmem_cache *kmem_cache; 271 272 /* A table of kmalloc cache names and sizes */ 273 extern const struct kmalloc_info_struct { 274 const char *name[NR_KMALLOC_TYPES]; 275 unsigned int size; 276 } kmalloc_info[]; 277 278 #ifndef CONFIG_SLOB 279 /* Kmalloc array related functions */ 280 void setup_kmalloc_cache_index_table(void); 281 void create_kmalloc_caches(slab_flags_t); 282 283 /* Find the kmalloc slab corresponding for a certain size */ 284 struct kmem_cache *kmalloc_slab(size_t, gfp_t); 285 286 void *__kmem_cache_alloc_node(struct kmem_cache *s, gfp_t gfpflags, 287 int node, size_t orig_size, 288 unsigned long caller); 289 void __kmem_cache_free(struct kmem_cache *s, void *x, unsigned long caller); 290 #endif 291 292 gfp_t kmalloc_fix_flags(gfp_t flags); 293 294 /* Functions provided by the slab allocators */ 295 int __kmem_cache_create(struct kmem_cache *, slab_flags_t flags); 296 297 struct kmem_cache *create_kmalloc_cache(const char *name, unsigned int size, 298 slab_flags_t flags, unsigned int useroffset, 299 unsigned int usersize); 300 extern void create_boot_cache(struct kmem_cache *, const char *name, 301 unsigned int size, slab_flags_t flags, 302 unsigned int useroffset, unsigned int usersize); 303 304 int slab_unmergeable(struct kmem_cache *s); 305 struct kmem_cache *find_mergeable(unsigned size, unsigned align, 306 slab_flags_t flags, const char *name, void (*ctor)(void *)); 307 #ifndef CONFIG_SLOB 308 struct kmem_cache * 309 __kmem_cache_alias(const char *name, unsigned int size, unsigned int align, 310 slab_flags_t flags, void (*ctor)(void *)); 311 312 slab_flags_t kmem_cache_flags(unsigned int object_size, 313 slab_flags_t flags, const char *name); 314 #else 315 static inline struct kmem_cache * 316 __kmem_cache_alias(const char *name, unsigned int size, unsigned int align, 317 slab_flags_t flags, void (*ctor)(void *)) 318 { return NULL; } 319 320 static inline slab_flags_t kmem_cache_flags(unsigned int object_size, 321 slab_flags_t flags, const char *name) 322 { 323 return flags; 324 } 325 #endif 326 327 static inline bool is_kmalloc_cache(struct kmem_cache *s) 328 { 329 #ifndef CONFIG_SLOB 330 return (s->flags & SLAB_KMALLOC); 331 #else 332 return false; 333 #endif 334 } 335 336 /* Legal flag mask for kmem_cache_create(), for various configurations */ 337 #define SLAB_CORE_FLAGS (SLAB_HWCACHE_ALIGN | SLAB_CACHE_DMA | \ 338 SLAB_CACHE_DMA32 | SLAB_PANIC | \ 339 SLAB_TYPESAFE_BY_RCU | SLAB_DEBUG_OBJECTS ) 340 341 #if defined(CONFIG_DEBUG_SLAB) 342 #define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER) 343 #elif defined(CONFIG_SLUB_DEBUG) 344 #define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | \ 345 SLAB_TRACE | SLAB_CONSISTENCY_CHECKS) 346 #else 347 #define SLAB_DEBUG_FLAGS (0) 348 #endif 349 350 #if defined(CONFIG_SLAB) 351 #define SLAB_CACHE_FLAGS (SLAB_MEM_SPREAD | SLAB_NOLEAKTRACE | \ 352 SLAB_RECLAIM_ACCOUNT | SLAB_TEMPORARY | \ 353 SLAB_ACCOUNT) 354 #elif defined(CONFIG_SLUB) 355 #define SLAB_CACHE_FLAGS (SLAB_NOLEAKTRACE | SLAB_RECLAIM_ACCOUNT | \ 356 SLAB_TEMPORARY | SLAB_ACCOUNT | \ 357 SLAB_NO_USER_FLAGS | SLAB_KMALLOC) 358 #else 359 #define SLAB_CACHE_FLAGS (SLAB_NOLEAKTRACE) 360 #endif 361 362 /* Common flags available with current configuration */ 363 #define CACHE_CREATE_MASK (SLAB_CORE_FLAGS | SLAB_DEBUG_FLAGS | SLAB_CACHE_FLAGS) 364 365 /* Common flags permitted for kmem_cache_create */ 366 #define SLAB_FLAGS_PERMITTED (SLAB_CORE_FLAGS | \ 367 SLAB_RED_ZONE | \ 368 SLAB_POISON | \ 369 SLAB_STORE_USER | \ 370 SLAB_TRACE | \ 371 SLAB_CONSISTENCY_CHECKS | \ 372 SLAB_MEM_SPREAD | \ 373 SLAB_NOLEAKTRACE | \ 374 SLAB_RECLAIM_ACCOUNT | \ 375 SLAB_TEMPORARY | \ 376 SLAB_ACCOUNT | \ 377 SLAB_KMALLOC | \ 378 SLAB_NO_USER_FLAGS) 379 380 bool __kmem_cache_empty(struct kmem_cache *); 381 int __kmem_cache_shutdown(struct kmem_cache *); 382 void __kmem_cache_release(struct kmem_cache *); 383 int __kmem_cache_shrink(struct kmem_cache *); 384 void slab_kmem_cache_release(struct kmem_cache *); 385 386 struct seq_file; 387 struct file; 388 389 struct slabinfo { 390 unsigned long active_objs; 391 unsigned long num_objs; 392 unsigned long active_slabs; 393 unsigned long num_slabs; 394 unsigned long shared_avail; 395 unsigned int limit; 396 unsigned int batchcount; 397 unsigned int shared; 398 unsigned int objects_per_slab; 399 unsigned int cache_order; 400 }; 401 402 void get_slabinfo(struct kmem_cache *s, struct slabinfo *sinfo); 403 void slabinfo_show_stats(struct seq_file *m, struct kmem_cache *s); 404 ssize_t slabinfo_write(struct file *file, const char __user *buffer, 405 size_t count, loff_t *ppos); 406 407 static inline enum node_stat_item cache_vmstat_idx(struct kmem_cache *s) 408 { 409 return (s->flags & SLAB_RECLAIM_ACCOUNT) ? 410 NR_SLAB_RECLAIMABLE_B : NR_SLAB_UNRECLAIMABLE_B; 411 } 412 413 #ifdef CONFIG_SLUB_DEBUG 414 #ifdef CONFIG_SLUB_DEBUG_ON 415 DECLARE_STATIC_KEY_TRUE(slub_debug_enabled); 416 #else 417 DECLARE_STATIC_KEY_FALSE(slub_debug_enabled); 418 #endif 419 extern void print_tracking(struct kmem_cache *s, void *object); 420 long validate_slab_cache(struct kmem_cache *s); 421 static inline bool __slub_debug_enabled(void) 422 { 423 return static_branch_unlikely(&slub_debug_enabled); 424 } 425 #else 426 static inline void print_tracking(struct kmem_cache *s, void *object) 427 { 428 } 429 static inline bool __slub_debug_enabled(void) 430 { 431 return false; 432 } 433 #endif 434 435 /* 436 * Returns true if any of the specified slub_debug flags is enabled for the 437 * cache. Use only for flags parsed by setup_slub_debug() as it also enables 438 * the static key. 439 */ 440 static inline bool kmem_cache_debug_flags(struct kmem_cache *s, slab_flags_t flags) 441 { 442 if (IS_ENABLED(CONFIG_SLUB_DEBUG)) 443 VM_WARN_ON_ONCE(!(flags & SLAB_DEBUG_FLAGS)); 444 if (__slub_debug_enabled()) 445 return s->flags & flags; 446 return false; 447 } 448 449 #ifdef CONFIG_MEMCG_KMEM 450 /* 451 * slab_objcgs - get the object cgroups vector associated with a slab 452 * @slab: a pointer to the slab struct 453 * 454 * Returns a pointer to the object cgroups vector associated with the slab, 455 * or NULL if no such vector has been associated yet. 456 */ 457 static inline struct obj_cgroup **slab_objcgs(struct slab *slab) 458 { 459 unsigned long memcg_data = READ_ONCE(slab->memcg_data); 460 461 VM_BUG_ON_PAGE(memcg_data && !(memcg_data & MEMCG_DATA_OBJCGS), 462 slab_page(slab)); 463 VM_BUG_ON_PAGE(memcg_data & MEMCG_DATA_KMEM, slab_page(slab)); 464 465 return (struct obj_cgroup **)(memcg_data & ~MEMCG_DATA_FLAGS_MASK); 466 } 467 468 int memcg_alloc_slab_cgroups(struct slab *slab, struct kmem_cache *s, 469 gfp_t gfp, bool new_slab); 470 void mod_objcg_state(struct obj_cgroup *objcg, struct pglist_data *pgdat, 471 enum node_stat_item idx, int nr); 472 473 static inline void memcg_free_slab_cgroups(struct slab *slab) 474 { 475 kfree(slab_objcgs(slab)); 476 slab->memcg_data = 0; 477 } 478 479 static inline size_t obj_full_size(struct kmem_cache *s) 480 { 481 /* 482 * For each accounted object there is an extra space which is used 483 * to store obj_cgroup membership. Charge it too. 484 */ 485 return s->size + sizeof(struct obj_cgroup *); 486 } 487 488 /* 489 * Returns false if the allocation should fail. 490 */ 491 static inline bool memcg_slab_pre_alloc_hook(struct kmem_cache *s, 492 struct list_lru *lru, 493 struct obj_cgroup **objcgp, 494 size_t objects, gfp_t flags) 495 { 496 struct obj_cgroup *objcg; 497 498 if (!memcg_kmem_online()) 499 return true; 500 501 if (!(flags & __GFP_ACCOUNT) && !(s->flags & SLAB_ACCOUNT)) 502 return true; 503 504 objcg = get_obj_cgroup_from_current(); 505 if (!objcg) 506 return true; 507 508 if (lru) { 509 int ret; 510 struct mem_cgroup *memcg; 511 512 memcg = get_mem_cgroup_from_objcg(objcg); 513 ret = memcg_list_lru_alloc(memcg, lru, flags); 514 css_put(&memcg->css); 515 516 if (ret) 517 goto out; 518 } 519 520 if (obj_cgroup_charge(objcg, flags, objects * obj_full_size(s))) 521 goto out; 522 523 *objcgp = objcg; 524 return true; 525 out: 526 obj_cgroup_put(objcg); 527 return false; 528 } 529 530 static inline void memcg_slab_post_alloc_hook(struct kmem_cache *s, 531 struct obj_cgroup *objcg, 532 gfp_t flags, size_t size, 533 void **p) 534 { 535 struct slab *slab; 536 unsigned long off; 537 size_t i; 538 539 if (!memcg_kmem_online() || !objcg) 540 return; 541 542 for (i = 0; i < size; i++) { 543 if (likely(p[i])) { 544 slab = virt_to_slab(p[i]); 545 546 if (!slab_objcgs(slab) && 547 memcg_alloc_slab_cgroups(slab, s, flags, 548 false)) { 549 obj_cgroup_uncharge(objcg, obj_full_size(s)); 550 continue; 551 } 552 553 off = obj_to_index(s, slab, p[i]); 554 obj_cgroup_get(objcg); 555 slab_objcgs(slab)[off] = objcg; 556 mod_objcg_state(objcg, slab_pgdat(slab), 557 cache_vmstat_idx(s), obj_full_size(s)); 558 } else { 559 obj_cgroup_uncharge(objcg, obj_full_size(s)); 560 } 561 } 562 obj_cgroup_put(objcg); 563 } 564 565 static inline void memcg_slab_free_hook(struct kmem_cache *s, struct slab *slab, 566 void **p, int objects) 567 { 568 struct obj_cgroup **objcgs; 569 int i; 570 571 if (!memcg_kmem_online()) 572 return; 573 574 objcgs = slab_objcgs(slab); 575 if (!objcgs) 576 return; 577 578 for (i = 0; i < objects; i++) { 579 struct obj_cgroup *objcg; 580 unsigned int off; 581 582 off = obj_to_index(s, slab, p[i]); 583 objcg = objcgs[off]; 584 if (!objcg) 585 continue; 586 587 objcgs[off] = NULL; 588 obj_cgroup_uncharge(objcg, obj_full_size(s)); 589 mod_objcg_state(objcg, slab_pgdat(slab), cache_vmstat_idx(s), 590 -obj_full_size(s)); 591 obj_cgroup_put(objcg); 592 } 593 } 594 595 #else /* CONFIG_MEMCG_KMEM */ 596 static inline struct obj_cgroup **slab_objcgs(struct slab *slab) 597 { 598 return NULL; 599 } 600 601 static inline struct mem_cgroup *memcg_from_slab_obj(void *ptr) 602 { 603 return NULL; 604 } 605 606 static inline int memcg_alloc_slab_cgroups(struct slab *slab, 607 struct kmem_cache *s, gfp_t gfp, 608 bool new_slab) 609 { 610 return 0; 611 } 612 613 static inline void memcg_free_slab_cgroups(struct slab *slab) 614 { 615 } 616 617 static inline bool memcg_slab_pre_alloc_hook(struct kmem_cache *s, 618 struct list_lru *lru, 619 struct obj_cgroup **objcgp, 620 size_t objects, gfp_t flags) 621 { 622 return true; 623 } 624 625 static inline void memcg_slab_post_alloc_hook(struct kmem_cache *s, 626 struct obj_cgroup *objcg, 627 gfp_t flags, size_t size, 628 void **p) 629 { 630 } 631 632 static inline void memcg_slab_free_hook(struct kmem_cache *s, struct slab *slab, 633 void **p, int objects) 634 { 635 } 636 #endif /* CONFIG_MEMCG_KMEM */ 637 638 #ifndef CONFIG_SLOB 639 static inline struct kmem_cache *virt_to_cache(const void *obj) 640 { 641 struct slab *slab; 642 643 slab = virt_to_slab(obj); 644 if (WARN_ONCE(!slab, "%s: Object is not a Slab page!\n", 645 __func__)) 646 return NULL; 647 return slab->slab_cache; 648 } 649 650 static __always_inline void account_slab(struct slab *slab, int order, 651 struct kmem_cache *s, gfp_t gfp) 652 { 653 if (memcg_kmem_online() && (s->flags & SLAB_ACCOUNT)) 654 memcg_alloc_slab_cgroups(slab, s, gfp, true); 655 656 mod_node_page_state(slab_pgdat(slab), cache_vmstat_idx(s), 657 PAGE_SIZE << order); 658 } 659 660 static __always_inline void unaccount_slab(struct slab *slab, int order, 661 struct kmem_cache *s) 662 { 663 if (memcg_kmem_online()) 664 memcg_free_slab_cgroups(slab); 665 666 mod_node_page_state(slab_pgdat(slab), cache_vmstat_idx(s), 667 -(PAGE_SIZE << order)); 668 } 669 670 static inline struct kmem_cache *cache_from_obj(struct kmem_cache *s, void *x) 671 { 672 struct kmem_cache *cachep; 673 674 if (!IS_ENABLED(CONFIG_SLAB_FREELIST_HARDENED) && 675 !kmem_cache_debug_flags(s, SLAB_CONSISTENCY_CHECKS)) 676 return s; 677 678 cachep = virt_to_cache(x); 679 if (WARN(cachep && cachep != s, 680 "%s: Wrong slab cache. %s but object is from %s\n", 681 __func__, s->name, cachep->name)) 682 print_tracking(cachep, x); 683 return cachep; 684 } 685 686 void free_large_kmalloc(struct folio *folio, void *object); 687 688 #endif /* CONFIG_SLOB */ 689 690 size_t __ksize(const void *objp); 691 692 static inline size_t slab_ksize(const struct kmem_cache *s) 693 { 694 #ifndef CONFIG_SLUB 695 return s->object_size; 696 697 #else /* CONFIG_SLUB */ 698 # ifdef CONFIG_SLUB_DEBUG 699 /* 700 * Debugging requires use of the padding between object 701 * and whatever may come after it. 702 */ 703 if (s->flags & (SLAB_RED_ZONE | SLAB_POISON)) 704 return s->object_size; 705 # endif 706 if (s->flags & SLAB_KASAN) 707 return s->object_size; 708 /* 709 * If we have the need to store the freelist pointer 710 * back there or track user information then we can 711 * only use the space before that information. 712 */ 713 if (s->flags & (SLAB_TYPESAFE_BY_RCU | SLAB_STORE_USER)) 714 return s->inuse; 715 /* 716 * Else we can use all the padding etc for the allocation 717 */ 718 return s->size; 719 #endif 720 } 721 722 static inline struct kmem_cache *slab_pre_alloc_hook(struct kmem_cache *s, 723 struct list_lru *lru, 724 struct obj_cgroup **objcgp, 725 size_t size, gfp_t flags) 726 { 727 flags &= gfp_allowed_mask; 728 729 might_alloc(flags); 730 731 if (should_failslab(s, flags)) 732 return NULL; 733 734 if (!memcg_slab_pre_alloc_hook(s, lru, objcgp, size, flags)) 735 return NULL; 736 737 return s; 738 } 739 740 static inline void slab_post_alloc_hook(struct kmem_cache *s, 741 struct obj_cgroup *objcg, gfp_t flags, 742 size_t size, void **p, bool init, 743 unsigned int orig_size) 744 { 745 unsigned int zero_size = s->object_size; 746 size_t i; 747 748 flags &= gfp_allowed_mask; 749 750 /* 751 * For kmalloc object, the allocated memory size(object_size) is likely 752 * larger than the requested size(orig_size). If redzone check is 753 * enabled for the extra space, don't zero it, as it will be redzoned 754 * soon. The redzone operation for this extra space could be seen as a 755 * replacement of current poisoning under certain debug option, and 756 * won't break other sanity checks. 757 */ 758 if (kmem_cache_debug_flags(s, SLAB_STORE_USER | SLAB_RED_ZONE) && 759 (s->flags & SLAB_KMALLOC)) 760 zero_size = orig_size; 761 762 /* 763 * As memory initialization might be integrated into KASAN, 764 * kasan_slab_alloc and initialization memset must be 765 * kept together to avoid discrepancies in behavior. 766 * 767 * As p[i] might get tagged, memset and kmemleak hook come after KASAN. 768 */ 769 for (i = 0; i < size; i++) { 770 p[i] = kasan_slab_alloc(s, p[i], flags, init); 771 if (p[i] && init && !kasan_has_integrated_init()) 772 memset(p[i], 0, zero_size); 773 kmemleak_alloc_recursive(p[i], s->object_size, 1, 774 s->flags, flags); 775 kmsan_slab_alloc(s, p[i], flags); 776 } 777 778 memcg_slab_post_alloc_hook(s, objcg, flags, size, p); 779 } 780 781 #ifndef CONFIG_SLOB 782 /* 783 * The slab lists for all objects. 784 */ 785 struct kmem_cache_node { 786 #ifdef CONFIG_SLAB 787 raw_spinlock_t list_lock; 788 struct list_head slabs_partial; /* partial list first, better asm code */ 789 struct list_head slabs_full; 790 struct list_head slabs_free; 791 unsigned long total_slabs; /* length of all slab lists */ 792 unsigned long free_slabs; /* length of free slab list only */ 793 unsigned long free_objects; 794 unsigned int free_limit; 795 unsigned int colour_next; /* Per-node cache coloring */ 796 struct array_cache *shared; /* shared per node */ 797 struct alien_cache **alien; /* on other nodes */ 798 unsigned long next_reap; /* updated without locking */ 799 int free_touched; /* updated without locking */ 800 #endif 801 802 #ifdef CONFIG_SLUB 803 spinlock_t list_lock; 804 unsigned long nr_partial; 805 struct list_head partial; 806 #ifdef CONFIG_SLUB_DEBUG 807 atomic_long_t nr_slabs; 808 atomic_long_t total_objects; 809 struct list_head full; 810 #endif 811 #endif 812 813 }; 814 815 static inline struct kmem_cache_node *get_node(struct kmem_cache *s, int node) 816 { 817 return s->node[node]; 818 } 819 820 /* 821 * Iterator over all nodes. The body will be executed for each node that has 822 * a kmem_cache_node structure allocated (which is true for all online nodes) 823 */ 824 #define for_each_kmem_cache_node(__s, __node, __n) \ 825 for (__node = 0; __node < nr_node_ids; __node++) \ 826 if ((__n = get_node(__s, __node))) 827 828 #endif 829 830 #if defined(CONFIG_SLAB) || defined(CONFIG_SLUB_DEBUG) 831 void dump_unreclaimable_slab(void); 832 #else 833 static inline void dump_unreclaimable_slab(void) 834 { 835 } 836 #endif 837 838 void ___cache_free(struct kmem_cache *cache, void *x, unsigned long addr); 839 840 #ifdef CONFIG_SLAB_FREELIST_RANDOM 841 int cache_random_seq_create(struct kmem_cache *cachep, unsigned int count, 842 gfp_t gfp); 843 void cache_random_seq_destroy(struct kmem_cache *cachep); 844 #else 845 static inline int cache_random_seq_create(struct kmem_cache *cachep, 846 unsigned int count, gfp_t gfp) 847 { 848 return 0; 849 } 850 static inline void cache_random_seq_destroy(struct kmem_cache *cachep) { } 851 #endif /* CONFIG_SLAB_FREELIST_RANDOM */ 852 853 static inline bool slab_want_init_on_alloc(gfp_t flags, struct kmem_cache *c) 854 { 855 if (static_branch_maybe(CONFIG_INIT_ON_ALLOC_DEFAULT_ON, 856 &init_on_alloc)) { 857 if (c->ctor) 858 return false; 859 if (c->flags & (SLAB_TYPESAFE_BY_RCU | SLAB_POISON)) 860 return flags & __GFP_ZERO; 861 return true; 862 } 863 return flags & __GFP_ZERO; 864 } 865 866 static inline bool slab_want_init_on_free(struct kmem_cache *c) 867 { 868 if (static_branch_maybe(CONFIG_INIT_ON_FREE_DEFAULT_ON, 869 &init_on_free)) 870 return !(c->ctor || 871 (c->flags & (SLAB_TYPESAFE_BY_RCU | SLAB_POISON))); 872 return false; 873 } 874 875 #if defined(CONFIG_DEBUG_FS) && defined(CONFIG_SLUB_DEBUG) 876 void debugfs_slab_release(struct kmem_cache *); 877 #else 878 static inline void debugfs_slab_release(struct kmem_cache *s) { } 879 #endif 880 881 #ifdef CONFIG_PRINTK 882 #define KS_ADDRS_COUNT 16 883 struct kmem_obj_info { 884 void *kp_ptr; 885 struct slab *kp_slab; 886 void *kp_objp; 887 unsigned long kp_data_offset; 888 struct kmem_cache *kp_slab_cache; 889 void *kp_ret; 890 void *kp_stack[KS_ADDRS_COUNT]; 891 void *kp_free_stack[KS_ADDRS_COUNT]; 892 }; 893 void __kmem_obj_info(struct kmem_obj_info *kpp, void *object, struct slab *slab); 894 #endif 895 896 #ifdef CONFIG_HAVE_HARDENED_USERCOPY_ALLOCATOR 897 void __check_heap_object(const void *ptr, unsigned long n, 898 const struct slab *slab, bool to_user); 899 #else 900 static inline 901 void __check_heap_object(const void *ptr, unsigned long n, 902 const struct slab *slab, bool to_user) 903 { 904 } 905 #endif 906 907 #ifdef CONFIG_SLUB_DEBUG 908 void skip_orig_size_check(struct kmem_cache *s, const void *object); 909 #endif 910 911 #endif /* MM_SLAB_H */ 912