197d06609SChristoph Lameter #ifndef MM_SLAB_H 297d06609SChristoph Lameter #define MM_SLAB_H 397d06609SChristoph Lameter /* 497d06609SChristoph Lameter * Internal slab definitions 597d06609SChristoph Lameter */ 697d06609SChristoph Lameter 797d06609SChristoph Lameter /* 897d06609SChristoph Lameter * State of the slab allocator. 997d06609SChristoph Lameter * 1097d06609SChristoph Lameter * This is used to describe the states of the allocator during bootup. 1197d06609SChristoph Lameter * Allocators use this to gradually bootstrap themselves. Most allocators 1297d06609SChristoph Lameter * have the problem that the structures used for managing slab caches are 1397d06609SChristoph Lameter * allocated from slab caches themselves. 1497d06609SChristoph Lameter */ 1597d06609SChristoph Lameter enum slab_state { 1697d06609SChristoph Lameter DOWN, /* No slab functionality yet */ 1797d06609SChristoph Lameter PARTIAL, /* SLUB: kmem_cache_node available */ 1897d06609SChristoph Lameter PARTIAL_ARRAYCACHE, /* SLAB: kmalloc size for arraycache available */ 1997d06609SChristoph Lameter PARTIAL_L3, /* SLAB: kmalloc size for l3 struct available */ 2097d06609SChristoph Lameter UP, /* Slab caches usable but not all extras yet */ 2197d06609SChristoph Lameter FULL /* Everything is working */ 2297d06609SChristoph Lameter }; 2397d06609SChristoph Lameter 2497d06609SChristoph Lameter extern enum slab_state slab_state; 2597d06609SChristoph Lameter 2618004c5dSChristoph Lameter /* The slab cache mutex protects the management structures during changes */ 2718004c5dSChristoph Lameter extern struct mutex slab_mutex; 289b030cb8SChristoph Lameter 299b030cb8SChristoph Lameter /* The list of all slab caches on the system */ 3018004c5dSChristoph Lameter extern struct list_head slab_caches; 3118004c5dSChristoph Lameter 329b030cb8SChristoph Lameter /* The slab cache that manages slab cache information */ 339b030cb8SChristoph Lameter extern struct kmem_cache *kmem_cache; 349b030cb8SChristoph Lameter 35*45906855SChristoph Lameter unsigned long calculate_alignment(unsigned long flags, 36*45906855SChristoph Lameter unsigned long align, unsigned long size); 37*45906855SChristoph Lameter 389b030cb8SChristoph Lameter /* Functions provided by the slab allocators */ 398a13a4ccSChristoph Lameter extern int __kmem_cache_create(struct kmem_cache *, unsigned long flags); 4097d06609SChristoph Lameter 4145530c44SChristoph Lameter extern struct kmem_cache *create_kmalloc_cache(const char *name, size_t size, 4245530c44SChristoph Lameter unsigned long flags); 4345530c44SChristoph Lameter extern void create_boot_cache(struct kmem_cache *, const char *name, 4445530c44SChristoph Lameter size_t size, unsigned long flags); 4545530c44SChristoph Lameter 46cbb79694SChristoph Lameter #ifdef CONFIG_SLUB 47cbb79694SChristoph Lameter struct kmem_cache *__kmem_cache_alias(const char *name, size_t size, 48cbb79694SChristoph Lameter size_t align, unsigned long flags, void (*ctor)(void *)); 49cbb79694SChristoph Lameter #else 50cbb79694SChristoph Lameter static inline struct kmem_cache *__kmem_cache_alias(const char *name, size_t size, 51cbb79694SChristoph Lameter size_t align, unsigned long flags, void (*ctor)(void *)) 52cbb79694SChristoph Lameter { return NULL; } 53cbb79694SChristoph Lameter #endif 54cbb79694SChristoph Lameter 55cbb79694SChristoph Lameter 56d8843922SGlauber Costa /* Legal flag mask for kmem_cache_create(), for various configurations */ 57d8843922SGlauber Costa #define SLAB_CORE_FLAGS (SLAB_HWCACHE_ALIGN | SLAB_CACHE_DMA | SLAB_PANIC | \ 58d8843922SGlauber Costa SLAB_DESTROY_BY_RCU | SLAB_DEBUG_OBJECTS ) 59d8843922SGlauber Costa 60d8843922SGlauber Costa #if defined(CONFIG_DEBUG_SLAB) 61d8843922SGlauber Costa #define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER) 62d8843922SGlauber Costa #elif defined(CONFIG_SLUB_DEBUG) 63d8843922SGlauber Costa #define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | \ 64d8843922SGlauber Costa SLAB_TRACE | SLAB_DEBUG_FREE) 65d8843922SGlauber Costa #else 66d8843922SGlauber Costa #define SLAB_DEBUG_FLAGS (0) 67d8843922SGlauber Costa #endif 68d8843922SGlauber Costa 69d8843922SGlauber Costa #if defined(CONFIG_SLAB) 70d8843922SGlauber Costa #define SLAB_CACHE_FLAGS (SLAB_MEM_SPREAD | SLAB_NOLEAKTRACE | \ 71d8843922SGlauber Costa SLAB_RECLAIM_ACCOUNT | SLAB_TEMPORARY | SLAB_NOTRACK) 72d8843922SGlauber Costa #elif defined(CONFIG_SLUB) 73d8843922SGlauber Costa #define SLAB_CACHE_FLAGS (SLAB_NOLEAKTRACE | SLAB_RECLAIM_ACCOUNT | \ 74d8843922SGlauber Costa SLAB_TEMPORARY | SLAB_NOTRACK) 75d8843922SGlauber Costa #else 76d8843922SGlauber Costa #define SLAB_CACHE_FLAGS (0) 77d8843922SGlauber Costa #endif 78d8843922SGlauber Costa 79d8843922SGlauber Costa #define CACHE_CREATE_MASK (SLAB_CORE_FLAGS | SLAB_DEBUG_FLAGS | SLAB_CACHE_FLAGS) 80d8843922SGlauber Costa 81945cf2b6SChristoph Lameter int __kmem_cache_shutdown(struct kmem_cache *); 82945cf2b6SChristoph Lameter 83b7454ad3SGlauber Costa struct seq_file; 84b7454ad3SGlauber Costa struct file; 85b7454ad3SGlauber Costa 860d7561c6SGlauber Costa struct slabinfo { 870d7561c6SGlauber Costa unsigned long active_objs; 880d7561c6SGlauber Costa unsigned long num_objs; 890d7561c6SGlauber Costa unsigned long active_slabs; 900d7561c6SGlauber Costa unsigned long num_slabs; 910d7561c6SGlauber Costa unsigned long shared_avail; 920d7561c6SGlauber Costa unsigned int limit; 930d7561c6SGlauber Costa unsigned int batchcount; 940d7561c6SGlauber Costa unsigned int shared; 950d7561c6SGlauber Costa unsigned int objects_per_slab; 960d7561c6SGlauber Costa unsigned int cache_order; 970d7561c6SGlauber Costa }; 980d7561c6SGlauber Costa 990d7561c6SGlauber Costa void get_slabinfo(struct kmem_cache *s, struct slabinfo *sinfo); 1000d7561c6SGlauber Costa void slabinfo_show_stats(struct seq_file *m, struct kmem_cache *s); 101b7454ad3SGlauber Costa ssize_t slabinfo_write(struct file *file, const char __user *buffer, 102b7454ad3SGlauber Costa size_t count, loff_t *ppos); 10397d06609SChristoph Lameter #endif 104