197d06609SChristoph Lameter #ifndef MM_SLAB_H 297d06609SChristoph Lameter #define MM_SLAB_H 397d06609SChristoph Lameter /* 497d06609SChristoph Lameter * Internal slab definitions 597d06609SChristoph Lameter */ 697d06609SChristoph Lameter 797d06609SChristoph Lameter /* 897d06609SChristoph Lameter * State of the slab allocator. 997d06609SChristoph Lameter * 1097d06609SChristoph Lameter * This is used to describe the states of the allocator during bootup. 1197d06609SChristoph Lameter * Allocators use this to gradually bootstrap themselves. Most allocators 1297d06609SChristoph Lameter * have the problem that the structures used for managing slab caches are 1397d06609SChristoph Lameter * allocated from slab caches themselves. 1497d06609SChristoph Lameter */ 1597d06609SChristoph Lameter enum slab_state { 1697d06609SChristoph Lameter DOWN, /* No slab functionality yet */ 1797d06609SChristoph Lameter PARTIAL, /* SLUB: kmem_cache_node available */ 1897d06609SChristoph Lameter PARTIAL_ARRAYCACHE, /* SLAB: kmalloc size for arraycache available */ 1997d06609SChristoph Lameter PARTIAL_L3, /* SLAB: kmalloc size for l3 struct available */ 2097d06609SChristoph Lameter UP, /* Slab caches usable but not all extras yet */ 2197d06609SChristoph Lameter FULL /* Everything is working */ 2297d06609SChristoph Lameter }; 2397d06609SChristoph Lameter 2497d06609SChristoph Lameter extern enum slab_state slab_state; 2597d06609SChristoph Lameter 2618004c5dSChristoph Lameter /* The slab cache mutex protects the management structures during changes */ 2718004c5dSChristoph Lameter extern struct mutex slab_mutex; 289b030cb8SChristoph Lameter 299b030cb8SChristoph Lameter /* The list of all slab caches on the system */ 3018004c5dSChristoph Lameter extern struct list_head slab_caches; 3118004c5dSChristoph Lameter 329b030cb8SChristoph Lameter /* The slab cache that manages slab cache information */ 339b030cb8SChristoph Lameter extern struct kmem_cache *kmem_cache; 349b030cb8SChristoph Lameter 359b030cb8SChristoph Lameter /* Functions provided by the slab allocators */ 368a13a4ccSChristoph Lameter extern int __kmem_cache_create(struct kmem_cache *, unsigned long flags); 3797d06609SChristoph Lameter 38cbb79694SChristoph Lameter #ifdef CONFIG_SLUB 39cbb79694SChristoph Lameter struct kmem_cache *__kmem_cache_alias(const char *name, size_t size, 40cbb79694SChristoph Lameter size_t align, unsigned long flags, void (*ctor)(void *)); 41cbb79694SChristoph Lameter #else 42cbb79694SChristoph Lameter static inline struct kmem_cache *__kmem_cache_alias(const char *name, size_t size, 43cbb79694SChristoph Lameter size_t align, unsigned long flags, void (*ctor)(void *)) 44cbb79694SChristoph Lameter { return NULL; } 45cbb79694SChristoph Lameter #endif 46cbb79694SChristoph Lameter 47cbb79694SChristoph Lameter 48945cf2b6SChristoph Lameter int __kmem_cache_shutdown(struct kmem_cache *); 49945cf2b6SChristoph Lameter 50*b7454ad3SGlauber Costa struct seq_file; 51*b7454ad3SGlauber Costa struct file; 52*b7454ad3SGlauber Costa void print_slabinfo_header(struct seq_file *m); 53*b7454ad3SGlauber Costa 54*b7454ad3SGlauber Costa int slabinfo_show(struct seq_file *m, void *p); 55*b7454ad3SGlauber Costa 56*b7454ad3SGlauber Costa ssize_t slabinfo_write(struct file *file, const char __user *buffer, 57*b7454ad3SGlauber Costa size_t count, loff_t *ppos); 5897d06609SChristoph Lameter #endif 59