1 #ifndef IOU_ALLOC_CACHE_H 2 #define IOU_ALLOC_CACHE_H 3 4 /* 5 * Don't allow the cache to grow beyond this size. 6 */ 7 #define IO_ALLOC_CACHE_MAX 128 8 9 static inline bool io_alloc_cache_put(struct io_alloc_cache *cache, 10 void *entry) 11 { 12 if (cache->nr_cached < cache->max_cached) { 13 if (!kasan_mempool_poison_object(entry)) 14 return false; 15 cache->entries[cache->nr_cached++] = entry; 16 return true; 17 } 18 return false; 19 } 20 21 static inline void *io_alloc_cache_get(struct io_alloc_cache *cache) 22 { 23 if (cache->nr_cached) { 24 void *entry = cache->entries[--cache->nr_cached]; 25 26 kasan_mempool_unpoison_object(entry, cache->elem_size); 27 return entry; 28 } 29 30 return NULL; 31 } 32 33 static inline void *io_cache_alloc(struct io_alloc_cache *cache, gfp_t gfp, 34 void (*init_once)(void *obj)) 35 { 36 if (unlikely(!cache->nr_cached)) { 37 void *obj = kmalloc(cache->elem_size, gfp); 38 39 if (obj && init_once) 40 init_once(obj); 41 return obj; 42 } 43 return io_alloc_cache_get(cache); 44 } 45 46 /* returns false if the cache was initialized properly */ 47 static inline bool io_alloc_cache_init(struct io_alloc_cache *cache, 48 unsigned max_nr, size_t size) 49 { 50 cache->entries = kvmalloc_array(max_nr, sizeof(void *), GFP_KERNEL); 51 if (cache->entries) { 52 cache->nr_cached = 0; 53 cache->max_cached = max_nr; 54 cache->elem_size = size; 55 return false; 56 } 57 return true; 58 } 59 60 static inline void io_alloc_cache_free(struct io_alloc_cache *cache, 61 void (*free)(const void *)) 62 { 63 void *entry; 64 65 if (!cache->entries) 66 return; 67 68 while ((entry = io_alloc_cache_get(cache)) != NULL) 69 free(entry); 70 71 kvfree(cache->entries); 72 cache->entries = NULL; 73 } 74 #endif 75