1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #ifndef MM_SLAB_H 3 #define MM_SLAB_H 4 /* 5 * Internal slab definitions 6 */ 7 8 #ifdef CONFIG_SLOB 9 /* 10 * Common fields provided in kmem_cache by all slab allocators 11 * This struct is either used directly by the allocator (SLOB) 12 * or the allocator must include definitions for all fields 13 * provided in kmem_cache_common in their definition of kmem_cache. 14 * 15 * Once we can do anonymous structs (C11 standard) we could put a 16 * anonymous struct definition in these allocators so that the 17 * separate allocations in the kmem_cache structure of SLAB and 18 * SLUB is no longer needed. 19 */ 20 struct kmem_cache { 21 unsigned int object_size;/* The original size of the object */ 22 unsigned int size; /* The aligned/padded/added on size */ 23 unsigned int align; /* Alignment as calculated */ 24 slab_flags_t flags; /* Active flags on the slab */ 25 const char *name; /* Slab name for sysfs */ 26 int refcount; /* Use counter */ 27 void (*ctor)(void *); /* Called on object slot creation */ 28 struct list_head list; /* List of all slab caches on the system */ 29 }; 30 31 #endif /* CONFIG_SLOB */ 32 33 #ifdef CONFIG_SLAB 34 #include <linux/slab_def.h> 35 #endif 36 37 #ifdef CONFIG_SLUB 38 #include <linux/slub_def.h> 39 #endif 40 41 #include <linux/memcontrol.h> 42 #include <linux/fault-inject.h> 43 #include <linux/kasan.h> 44 #include <linux/kmemleak.h> 45 #include <linux/random.h> 46 #include <linux/sched/mm.h> 47 48 /* 49 * State of the slab allocator. 50 * 51 * This is used to describe the states of the allocator during bootup. 52 * Allocators use this to gradually bootstrap themselves. Most allocators 53 * have the problem that the structures used for managing slab caches are 54 * allocated from slab caches themselves. 55 */ 56 enum slab_state { 57 DOWN, /* No slab functionality yet */ 58 PARTIAL, /* SLUB: kmem_cache_node available */ 59 PARTIAL_NODE, /* SLAB: kmalloc size for node struct available */ 60 UP, /* Slab caches usable but not all extras yet */ 61 FULL /* Everything is working */ 62 }; 63 64 extern enum slab_state slab_state; 65 66 /* The slab cache mutex protects the management structures during changes */ 67 extern struct mutex slab_mutex; 68 69 /* The list of all slab caches on the system */ 70 extern struct list_head slab_caches; 71 72 /* The slab cache that manages slab cache information */ 73 extern struct kmem_cache *kmem_cache; 74 75 /* A table of kmalloc cache names and sizes */ 76 extern const struct kmalloc_info_struct { 77 const char *name; 78 unsigned long size; 79 } kmalloc_info[]; 80 81 #ifndef CONFIG_SLOB 82 /* Kmalloc array related functions */ 83 void setup_kmalloc_cache_index_table(void); 84 void create_kmalloc_caches(slab_flags_t); 85 86 /* Find the kmalloc slab corresponding for a certain size */ 87 struct kmem_cache *kmalloc_slab(size_t, gfp_t); 88 #endif 89 90 91 /* Functions provided by the slab allocators */ 92 int __kmem_cache_create(struct kmem_cache *, slab_flags_t flags); 93 94 extern struct kmem_cache *create_kmalloc_cache(const char *name, size_t size, 95 slab_flags_t flags); 96 extern void create_boot_cache(struct kmem_cache *, const char *name, 97 size_t size, slab_flags_t flags); 98 99 int slab_unmergeable(struct kmem_cache *s); 100 struct kmem_cache *find_mergeable(size_t size, size_t align, 101 slab_flags_t flags, const char *name, void (*ctor)(void *)); 102 #ifndef CONFIG_SLOB 103 struct kmem_cache * 104 __kmem_cache_alias(const char *name, size_t size, size_t align, 105 slab_flags_t flags, void (*ctor)(void *)); 106 107 slab_flags_t kmem_cache_flags(unsigned long object_size, 108 slab_flags_t flags, const char *name, 109 void (*ctor)(void *)); 110 #else 111 static inline struct kmem_cache * 112 __kmem_cache_alias(const char *name, size_t size, size_t align, 113 slab_flags_t flags, void (*ctor)(void *)) 114 { return NULL; } 115 116 static inline slab_flags_t kmem_cache_flags(unsigned long object_size, 117 slab_flags_t flags, const char *name, 118 void (*ctor)(void *)) 119 { 120 return flags; 121 } 122 #endif 123 124 125 /* Legal flag mask for kmem_cache_create(), for various configurations */ 126 #define SLAB_CORE_FLAGS (SLAB_HWCACHE_ALIGN | SLAB_CACHE_DMA | SLAB_PANIC | \ 127 SLAB_TYPESAFE_BY_RCU | SLAB_DEBUG_OBJECTS ) 128 129 #if defined(CONFIG_DEBUG_SLAB) 130 #define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER) 131 #elif defined(CONFIG_SLUB_DEBUG) 132 #define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | \ 133 SLAB_TRACE | SLAB_CONSISTENCY_CHECKS) 134 #else 135 #define SLAB_DEBUG_FLAGS (0) 136 #endif 137 138 #if defined(CONFIG_SLAB) 139 #define SLAB_CACHE_FLAGS (SLAB_MEM_SPREAD | SLAB_NOLEAKTRACE | \ 140 SLAB_RECLAIM_ACCOUNT | SLAB_TEMPORARY | \ 141 SLAB_ACCOUNT) 142 #elif defined(CONFIG_SLUB) 143 #define SLAB_CACHE_FLAGS (SLAB_NOLEAKTRACE | SLAB_RECLAIM_ACCOUNT | \ 144 SLAB_TEMPORARY | SLAB_ACCOUNT) 145 #else 146 #define SLAB_CACHE_FLAGS (0) 147 #endif 148 149 /* Common flags available with current configuration */ 150 #define CACHE_CREATE_MASK (SLAB_CORE_FLAGS | SLAB_DEBUG_FLAGS | SLAB_CACHE_FLAGS) 151 152 /* Common flags permitted for kmem_cache_create */ 153 #define SLAB_FLAGS_PERMITTED (SLAB_CORE_FLAGS | \ 154 SLAB_RED_ZONE | \ 155 SLAB_POISON | \ 156 SLAB_STORE_USER | \ 157 SLAB_TRACE | \ 158 SLAB_CONSISTENCY_CHECKS | \ 159 SLAB_MEM_SPREAD | \ 160 SLAB_NOLEAKTRACE | \ 161 SLAB_RECLAIM_ACCOUNT | \ 162 SLAB_TEMPORARY | \ 163 SLAB_ACCOUNT) 164 165 int __kmem_cache_shutdown(struct kmem_cache *); 166 void __kmem_cache_release(struct kmem_cache *); 167 int __kmem_cache_shrink(struct kmem_cache *); 168 void __kmemcg_cache_deactivate(struct kmem_cache *s); 169 void slab_kmem_cache_release(struct kmem_cache *); 170 171 struct seq_file; 172 struct file; 173 174 struct slabinfo { 175 unsigned long active_objs; 176 unsigned long num_objs; 177 unsigned long active_slabs; 178 unsigned long num_slabs; 179 unsigned long shared_avail; 180 unsigned int limit; 181 unsigned int batchcount; 182 unsigned int shared; 183 unsigned int objects_per_slab; 184 unsigned int cache_order; 185 }; 186 187 void get_slabinfo(struct kmem_cache *s, struct slabinfo *sinfo); 188 void slabinfo_show_stats(struct seq_file *m, struct kmem_cache *s); 189 ssize_t slabinfo_write(struct file *file, const char __user *buffer, 190 size_t count, loff_t *ppos); 191 192 /* 193 * Generic implementation of bulk operations 194 * These are useful for situations in which the allocator cannot 195 * perform optimizations. In that case segments of the object listed 196 * may be allocated or freed using these operations. 197 */ 198 void __kmem_cache_free_bulk(struct kmem_cache *, size_t, void **); 199 int __kmem_cache_alloc_bulk(struct kmem_cache *, gfp_t, size_t, void **); 200 201 #if defined(CONFIG_MEMCG) && !defined(CONFIG_SLOB) 202 203 /* List of all root caches. */ 204 extern struct list_head slab_root_caches; 205 #define root_caches_node memcg_params.__root_caches_node 206 207 /* 208 * Iterate over all memcg caches of the given root cache. The caller must hold 209 * slab_mutex. 210 */ 211 #define for_each_memcg_cache(iter, root) \ 212 list_for_each_entry(iter, &(root)->memcg_params.children, \ 213 memcg_params.children_node) 214 215 static inline bool is_root_cache(struct kmem_cache *s) 216 { 217 return !s->memcg_params.root_cache; 218 } 219 220 static inline bool slab_equal_or_root(struct kmem_cache *s, 221 struct kmem_cache *p) 222 { 223 return p == s || p == s->memcg_params.root_cache; 224 } 225 226 /* 227 * We use suffixes to the name in memcg because we can't have caches 228 * created in the system with the same name. But when we print them 229 * locally, better refer to them with the base name 230 */ 231 static inline const char *cache_name(struct kmem_cache *s) 232 { 233 if (!is_root_cache(s)) 234 s = s->memcg_params.root_cache; 235 return s->name; 236 } 237 238 /* 239 * Note, we protect with RCU only the memcg_caches array, not per-memcg caches. 240 * That said the caller must assure the memcg's cache won't go away by either 241 * taking a css reference to the owner cgroup, or holding the slab_mutex. 242 */ 243 static inline struct kmem_cache * 244 cache_from_memcg_idx(struct kmem_cache *s, int idx) 245 { 246 struct kmem_cache *cachep; 247 struct memcg_cache_array *arr; 248 249 rcu_read_lock(); 250 arr = rcu_dereference(s->memcg_params.memcg_caches); 251 252 /* 253 * Make sure we will access the up-to-date value. The code updating 254 * memcg_caches issues a write barrier to match this (see 255 * memcg_create_kmem_cache()). 256 */ 257 cachep = READ_ONCE(arr->entries[idx]); 258 rcu_read_unlock(); 259 260 return cachep; 261 } 262 263 static inline struct kmem_cache *memcg_root_cache(struct kmem_cache *s) 264 { 265 if (is_root_cache(s)) 266 return s; 267 return s->memcg_params.root_cache; 268 } 269 270 static __always_inline int memcg_charge_slab(struct page *page, 271 gfp_t gfp, int order, 272 struct kmem_cache *s) 273 { 274 if (!memcg_kmem_enabled()) 275 return 0; 276 if (is_root_cache(s)) 277 return 0; 278 return memcg_kmem_charge_memcg(page, gfp, order, s->memcg_params.memcg); 279 } 280 281 static __always_inline void memcg_uncharge_slab(struct page *page, int order, 282 struct kmem_cache *s) 283 { 284 if (!memcg_kmem_enabled()) 285 return; 286 memcg_kmem_uncharge(page, order); 287 } 288 289 extern void slab_init_memcg_params(struct kmem_cache *); 290 extern void memcg_link_cache(struct kmem_cache *s); 291 extern void slab_deactivate_memcg_cache_rcu_sched(struct kmem_cache *s, 292 void (*deact_fn)(struct kmem_cache *)); 293 294 #else /* CONFIG_MEMCG && !CONFIG_SLOB */ 295 296 /* If !memcg, all caches are root. */ 297 #define slab_root_caches slab_caches 298 #define root_caches_node list 299 300 #define for_each_memcg_cache(iter, root) \ 301 for ((void)(iter), (void)(root); 0; ) 302 303 static inline bool is_root_cache(struct kmem_cache *s) 304 { 305 return true; 306 } 307 308 static inline bool slab_equal_or_root(struct kmem_cache *s, 309 struct kmem_cache *p) 310 { 311 return true; 312 } 313 314 static inline const char *cache_name(struct kmem_cache *s) 315 { 316 return s->name; 317 } 318 319 static inline struct kmem_cache * 320 cache_from_memcg_idx(struct kmem_cache *s, int idx) 321 { 322 return NULL; 323 } 324 325 static inline struct kmem_cache *memcg_root_cache(struct kmem_cache *s) 326 { 327 return s; 328 } 329 330 static inline int memcg_charge_slab(struct page *page, gfp_t gfp, int order, 331 struct kmem_cache *s) 332 { 333 return 0; 334 } 335 336 static inline void memcg_uncharge_slab(struct page *page, int order, 337 struct kmem_cache *s) 338 { 339 } 340 341 static inline void slab_init_memcg_params(struct kmem_cache *s) 342 { 343 } 344 345 static inline void memcg_link_cache(struct kmem_cache *s) 346 { 347 } 348 349 #endif /* CONFIG_MEMCG && !CONFIG_SLOB */ 350 351 static inline struct kmem_cache *cache_from_obj(struct kmem_cache *s, void *x) 352 { 353 struct kmem_cache *cachep; 354 struct page *page; 355 356 /* 357 * When kmemcg is not being used, both assignments should return the 358 * same value. but we don't want to pay the assignment price in that 359 * case. If it is not compiled in, the compiler should be smart enough 360 * to not do even the assignment. In that case, slab_equal_or_root 361 * will also be a constant. 362 */ 363 if (!memcg_kmem_enabled() && 364 !unlikely(s->flags & SLAB_CONSISTENCY_CHECKS)) 365 return s; 366 367 page = virt_to_head_page(x); 368 cachep = page->slab_cache; 369 if (slab_equal_or_root(cachep, s)) 370 return cachep; 371 372 pr_err("%s: Wrong slab cache. %s but object is from %s\n", 373 __func__, s->name, cachep->name); 374 WARN_ON_ONCE(1); 375 return s; 376 } 377 378 static inline size_t slab_ksize(const struct kmem_cache *s) 379 { 380 #ifndef CONFIG_SLUB 381 return s->object_size; 382 383 #else /* CONFIG_SLUB */ 384 # ifdef CONFIG_SLUB_DEBUG 385 /* 386 * Debugging requires use of the padding between object 387 * and whatever may come after it. 388 */ 389 if (s->flags & (SLAB_RED_ZONE | SLAB_POISON)) 390 return s->object_size; 391 # endif 392 if (s->flags & SLAB_KASAN) 393 return s->object_size; 394 /* 395 * If we have the need to store the freelist pointer 396 * back there or track user information then we can 397 * only use the space before that information. 398 */ 399 if (s->flags & (SLAB_TYPESAFE_BY_RCU | SLAB_STORE_USER)) 400 return s->inuse; 401 /* 402 * Else we can use all the padding etc for the allocation 403 */ 404 return s->size; 405 #endif 406 } 407 408 static inline struct kmem_cache *slab_pre_alloc_hook(struct kmem_cache *s, 409 gfp_t flags) 410 { 411 flags &= gfp_allowed_mask; 412 413 fs_reclaim_acquire(flags); 414 fs_reclaim_release(flags); 415 416 might_sleep_if(gfpflags_allow_blocking(flags)); 417 418 if (should_failslab(s, flags)) 419 return NULL; 420 421 if (memcg_kmem_enabled() && 422 ((flags & __GFP_ACCOUNT) || (s->flags & SLAB_ACCOUNT))) 423 return memcg_kmem_get_cache(s); 424 425 return s; 426 } 427 428 static inline void slab_post_alloc_hook(struct kmem_cache *s, gfp_t flags, 429 size_t size, void **p) 430 { 431 size_t i; 432 433 flags &= gfp_allowed_mask; 434 for (i = 0; i < size; i++) { 435 void *object = p[i]; 436 437 kmemleak_alloc_recursive(object, s->object_size, 1, 438 s->flags, flags); 439 kasan_slab_alloc(s, object, flags); 440 } 441 442 if (memcg_kmem_enabled()) 443 memcg_kmem_put_cache(s); 444 } 445 446 #ifndef CONFIG_SLOB 447 /* 448 * The slab lists for all objects. 449 */ 450 struct kmem_cache_node { 451 spinlock_t list_lock; 452 453 #ifdef CONFIG_SLAB 454 struct list_head slabs_partial; /* partial list first, better asm code */ 455 struct list_head slabs_full; 456 struct list_head slabs_free; 457 unsigned long total_slabs; /* length of all slab lists */ 458 unsigned long free_slabs; /* length of free slab list only */ 459 unsigned long free_objects; 460 unsigned int free_limit; 461 unsigned int colour_next; /* Per-node cache coloring */ 462 struct array_cache *shared; /* shared per node */ 463 struct alien_cache **alien; /* on other nodes */ 464 unsigned long next_reap; /* updated without locking */ 465 int free_touched; /* updated without locking */ 466 #endif 467 468 #ifdef CONFIG_SLUB 469 unsigned long nr_partial; 470 struct list_head partial; 471 #ifdef CONFIG_SLUB_DEBUG 472 atomic_long_t nr_slabs; 473 atomic_long_t total_objects; 474 struct list_head full; 475 #endif 476 #endif 477 478 }; 479 480 static inline struct kmem_cache_node *get_node(struct kmem_cache *s, int node) 481 { 482 return s->node[node]; 483 } 484 485 /* 486 * Iterator over all nodes. The body will be executed for each node that has 487 * a kmem_cache_node structure allocated (which is true for all online nodes) 488 */ 489 #define for_each_kmem_cache_node(__s, __node, __n) \ 490 for (__node = 0; __node < nr_node_ids; __node++) \ 491 if ((__n = get_node(__s, __node))) 492 493 #endif 494 495 void *slab_start(struct seq_file *m, loff_t *pos); 496 void *slab_next(struct seq_file *m, void *p, loff_t *pos); 497 void slab_stop(struct seq_file *m, void *p); 498 void *memcg_slab_start(struct seq_file *m, loff_t *pos); 499 void *memcg_slab_next(struct seq_file *m, void *p, loff_t *pos); 500 void memcg_slab_stop(struct seq_file *m, void *p); 501 int memcg_slab_show(struct seq_file *m, void *p); 502 503 #if defined(CONFIG_SLAB) || defined(CONFIG_SLUB_DEBUG) 504 void dump_unreclaimable_slab(void); 505 #else 506 static inline void dump_unreclaimable_slab(void) 507 { 508 } 509 #endif 510 511 void ___cache_free(struct kmem_cache *cache, void *x, unsigned long addr); 512 513 #ifdef CONFIG_SLAB_FREELIST_RANDOM 514 int cache_random_seq_create(struct kmem_cache *cachep, unsigned int count, 515 gfp_t gfp); 516 void cache_random_seq_destroy(struct kmem_cache *cachep); 517 #else 518 static inline int cache_random_seq_create(struct kmem_cache *cachep, 519 unsigned int count, gfp_t gfp) 520 { 521 return 0; 522 } 523 static inline void cache_random_seq_destroy(struct kmem_cache *cachep) { } 524 #endif /* CONFIG_SLAB_FREELIST_RANDOM */ 525 526 #endif /* MM_SLAB_H */ 527