Lines Matching +full:pool +full:- +full:long
1 // SPDX-License-Identifier: GPL-2.0
5 * memory buffer pool support. Such pools are mostly used
6 * for guaranteed, deadlock-free memory allocations during
24 static void poison_error(mempool_t *pool, void *element, size_t size, in poison_error() argument
27 const int nr = pool->curr_nr; in poison_error()
28 const int start = max_t(int, byte - (BITS_PER_LONG / 8), 0); in poison_error()
33 pr_err("Mempool %p size %zu\n", pool, size); in poison_error()
41 static void __check_element(mempool_t *pool, void *element, size_t size) in __check_element() argument
47 u8 exp = (i < size - 1) ? POISON_FREE : POISON_END; in __check_element()
50 poison_error(pool, element, size, i); in __check_element()
57 static void check_element(mempool_t *pool, void *element) in check_element() argument
64 if (pool->free == mempool_kfree) { in check_element()
65 __check_element(pool, element, (size_t)pool->pool_data); in check_element()
66 } else if (pool->free == mempool_free_slab) { in check_element()
67 __check_element(pool, element, kmem_cache_size(pool->pool_data)); in check_element()
68 } else if (pool->free == mempool_free_pages) { in check_element()
70 int order = (int)(long)pool->pool_data; in check_element()
73 __check_element(pool, addr, 1UL << (PAGE_SHIFT + order)); in check_element()
82 memset(obj, POISON_FREE, size - 1); in __poison_element()
83 obj[size - 1] = POISON_END; in __poison_element()
86 static void poison_element(mempool_t *pool, void *element) in poison_element() argument
93 if (pool->alloc == mempool_kmalloc) { in poison_element()
94 __poison_element(element, (size_t)pool->pool_data); in poison_element()
95 } else if (pool->alloc == mempool_alloc_slab) { in poison_element()
96 __poison_element(element, kmem_cache_size(pool->pool_data)); in poison_element()
97 } else if (pool->alloc == mempool_alloc_pages) { in poison_element()
99 int order = (int)(long)pool->pool_data; in poison_element()
107 static inline void check_element(mempool_t *pool, void *element) in check_element() argument
110 static inline void poison_element(mempool_t *pool, void *element) in poison_element() argument
115 static __always_inline bool kasan_poison_element(mempool_t *pool, void *element) in kasan_poison_element() argument
117 if (pool->alloc == mempool_alloc_slab || pool->alloc == mempool_kmalloc) in kasan_poison_element()
119 else if (pool->alloc == mempool_alloc_pages) in kasan_poison_element()
121 (unsigned long)pool->pool_data); in kasan_poison_element()
125 static void kasan_unpoison_element(mempool_t *pool, void *element) in kasan_unpoison_element() argument
127 if (pool->alloc == mempool_kmalloc) in kasan_unpoison_element()
128 kasan_mempool_unpoison_object(element, (size_t)pool->pool_data); in kasan_unpoison_element()
129 else if (pool->alloc == mempool_alloc_slab) in kasan_unpoison_element()
131 kmem_cache_size(pool->pool_data)); in kasan_unpoison_element()
132 else if (pool->alloc == mempool_alloc_pages) in kasan_unpoison_element()
134 (unsigned long)pool->pool_data); in kasan_unpoison_element()
137 static __always_inline void add_element(mempool_t *pool, void *element) in add_element() argument
139 BUG_ON(pool->min_nr != 0 && pool->curr_nr >= pool->min_nr); in add_element()
140 poison_element(pool, element); in add_element()
141 if (kasan_poison_element(pool, element)) in add_element()
142 pool->elements[pool->curr_nr++] = element; in add_element()
145 static void *remove_element(mempool_t *pool) in remove_element() argument
147 void *element = pool->elements[--pool->curr_nr]; in remove_element()
149 BUG_ON(pool->curr_nr < 0); in remove_element()
150 kasan_unpoison_element(pool, element); in remove_element()
151 check_element(pool, element); in remove_element()
156 * mempool_exit - exit a mempool initialized with mempool_init()
157 * @pool: pointer to the memory pool which was initialized with
160 * Free all reserved elements in @pool and @pool itself. This function
166 void mempool_exit(mempool_t *pool) in mempool_exit() argument
168 while (pool->curr_nr) { in mempool_exit()
169 void *element = remove_element(pool); in mempool_exit()
170 pool->free(element, pool->pool_data); in mempool_exit()
172 kfree(pool->elements); in mempool_exit()
173 pool->elements = NULL; in mempool_exit()
178 * mempool_destroy - deallocate a memory pool
179 * @pool: pointer to the memory pool which was allocated via
182 * Free all reserved elements in @pool and @pool itself. This function
185 void mempool_destroy(mempool_t *pool) in mempool_destroy() argument
187 if (unlikely(!pool)) in mempool_destroy()
190 mempool_exit(pool); in mempool_destroy()
191 kfree(pool); in mempool_destroy()
195 int mempool_init_node(mempool_t *pool, int min_nr, mempool_alloc_t *alloc_fn, in mempool_init_node() argument
199 spin_lock_init(&pool->lock); in mempool_init_node()
200 pool->min_nr = min_nr; in mempool_init_node()
201 pool->pool_data = pool_data; in mempool_init_node()
202 pool->alloc = alloc_fn; in mempool_init_node()
203 pool->free = free_fn; in mempool_init_node()
204 init_waitqueue_head(&pool->wait); in mempool_init_node()
207 * zero minimum pool in mempool_init_node()
209 pool->elements = kmalloc_array_node(max(1, min_nr), sizeof(void *), in mempool_init_node()
211 if (!pool->elements) in mempool_init_node()
212 return -ENOMEM; in mempool_init_node()
215 * First pre-allocate the guaranteed number of buffers, in mempool_init_node()
216 * also pre-allocate 1 element for zero minimum pool. in mempool_init_node()
218 while (pool->curr_nr < max(1, pool->min_nr)) { in mempool_init_node()
221 element = pool->alloc(gfp_mask, pool->pool_data); in mempool_init_node()
223 mempool_exit(pool); in mempool_init_node()
224 return -ENOMEM; in mempool_init_node()
226 add_element(pool, element); in mempool_init_node()
234 * mempool_init - initialize a memory pool
235 * @pool: pointer to the memory pool that should be initialized
237 * allocated for this pool.
238 * @alloc_fn: user-defined element-allocation function.
239 * @free_fn: user-defined element-freeing function.
240 * @pool_data: optional private data available to the user-defined functions.
242 * Like mempool_create(), but initializes the pool in (i.e. embedded in another
247 int mempool_init_noprof(mempool_t *pool, int min_nr, mempool_alloc_t *alloc_fn, in mempool_init_noprof() argument
250 return mempool_init_node(pool, min_nr, alloc_fn, free_fn, in mempool_init_noprof()
257 * mempool_create_node - create a memory pool
259 * allocated for this pool.
260 * @alloc_fn: user-defined element-allocation function.
261 * @free_fn: user-defined element-freeing function.
262 * @pool_data: optional private data available to the user-defined functions.
267 * memory pool. The pool can be used from the mempool_alloc() and mempool_free()
269 * functions might sleep - as long as the mempool_alloc() function is not called
272 * Return: pointer to the created memory pool object or %NULL on error.
278 mempool_t *pool; in mempool_create_node_noprof() local
280 pool = kmalloc_node_noprof(sizeof(*pool), gfp_mask | __GFP_ZERO, node_id); in mempool_create_node_noprof()
281 if (!pool) in mempool_create_node_noprof()
284 if (mempool_init_node(pool, min_nr, alloc_fn, free_fn, pool_data, in mempool_create_node_noprof()
286 kfree(pool); in mempool_create_node_noprof()
290 return pool; in mempool_create_node_noprof()
295 * mempool_resize - resize an existing memory pool
296 * @pool: pointer to the memory pool which was allocated via
299 * allocated for this pool.
301 * This function shrinks/grows the pool. In the case of growing,
302 * it cannot be guaranteed that the pool will be grown to the new
312 int mempool_resize(mempool_t *pool, int new_min_nr) in mempool_resize() argument
316 unsigned long flags; in mempool_resize()
321 spin_lock_irqsave(&pool->lock, flags); in mempool_resize()
322 if (new_min_nr <= pool->min_nr) { in mempool_resize()
323 while (new_min_nr < pool->curr_nr) { in mempool_resize()
324 element = remove_element(pool); in mempool_resize()
325 spin_unlock_irqrestore(&pool->lock, flags); in mempool_resize()
326 pool->free(element, pool->pool_data); in mempool_resize()
327 spin_lock_irqsave(&pool->lock, flags); in mempool_resize()
329 pool->min_nr = new_min_nr; in mempool_resize()
332 spin_unlock_irqrestore(&pool->lock, flags); in mempool_resize()
334 /* Grow the pool */ in mempool_resize()
338 return -ENOMEM; in mempool_resize()
340 spin_lock_irqsave(&pool->lock, flags); in mempool_resize()
341 if (unlikely(new_min_nr <= pool->min_nr)) { in mempool_resize()
343 spin_unlock_irqrestore(&pool->lock, flags); in mempool_resize()
347 memcpy(new_elements, pool->elements, in mempool_resize()
348 pool->curr_nr * sizeof(*new_elements)); in mempool_resize()
349 kfree(pool->elements); in mempool_resize()
350 pool->elements = new_elements; in mempool_resize()
351 pool->min_nr = new_min_nr; in mempool_resize()
353 while (pool->curr_nr < pool->min_nr) { in mempool_resize()
354 spin_unlock_irqrestore(&pool->lock, flags); in mempool_resize()
355 element = pool->alloc(GFP_KERNEL, pool->pool_data); in mempool_resize()
358 spin_lock_irqsave(&pool->lock, flags); in mempool_resize()
359 if (pool->curr_nr < pool->min_nr) { in mempool_resize()
360 add_element(pool, element); in mempool_resize()
362 spin_unlock_irqrestore(&pool->lock, flags); in mempool_resize()
363 pool->free(element, pool->pool_data); /* Raced */ in mempool_resize()
368 spin_unlock_irqrestore(&pool->lock, flags); in mempool_resize()
375 * mempool_alloc - allocate an element from a specific memory pool
376 * @pool: pointer to the memory pool which was allocated via
388 void *mempool_alloc_noprof(mempool_t *pool, gfp_t gfp_mask) in mempool_alloc_noprof() argument
391 unsigned long flags; in mempool_alloc_noprof()
406 element = pool->alloc(gfp_temp, pool->pool_data); in mempool_alloc_noprof()
410 spin_lock_irqsave(&pool->lock, flags); in mempool_alloc_noprof()
411 if (likely(pool->curr_nr)) { in mempool_alloc_noprof()
412 element = remove_element(pool); in mempool_alloc_noprof()
413 spin_unlock_irqrestore(&pool->lock, flags); in mempool_alloc_noprof()
426 * alloc failed with that and @pool was empty, retry immediately. in mempool_alloc_noprof()
429 spin_unlock_irqrestore(&pool->lock, flags); in mempool_alloc_noprof()
436 spin_unlock_irqrestore(&pool->lock, flags); in mempool_alloc_noprof()
440 /* Let's wait for someone else to return an element to @pool */ in mempool_alloc_noprof()
442 prepare_to_wait(&pool->wait, &wait, TASK_UNINTERRUPTIBLE); in mempool_alloc_noprof()
444 spin_unlock_irqrestore(&pool->lock, flags); in mempool_alloc_noprof()
452 finish_wait(&pool->wait, &wait); in mempool_alloc_noprof()
458 * mempool_alloc_preallocated - allocate an element from preallocated elements
459 * belonging to a specific memory pool
460 * @pool: pointer to the memory pool which was allocated via
470 void *mempool_alloc_preallocated(mempool_t *pool) in mempool_alloc_preallocated() argument
473 unsigned long flags; in mempool_alloc_preallocated()
475 spin_lock_irqsave(&pool->lock, flags); in mempool_alloc_preallocated()
476 if (likely(pool->curr_nr)) { in mempool_alloc_preallocated()
477 element = remove_element(pool); in mempool_alloc_preallocated()
478 spin_unlock_irqrestore(&pool->lock, flags); in mempool_alloc_preallocated()
488 spin_unlock_irqrestore(&pool->lock, flags); in mempool_alloc_preallocated()
495 * mempool_free - return an element to the pool.
496 * @element: pool element pointer.
497 * @pool: pointer to the memory pool which was allocated via
502 void mempool_free(void *element, mempool_t *pool) in mempool_free() argument
504 unsigned long flags; in mempool_free()
511 * for @element and the following @pool->curr_nr. This ensures in mempool_free()
512 * that the visible value of @pool->curr_nr is from after the in mempool_free()
540 * pool waking up the waiters. in mempool_free()
542 if (unlikely(READ_ONCE(pool->curr_nr) < pool->min_nr)) { in mempool_free()
543 spin_lock_irqsave(&pool->lock, flags); in mempool_free()
544 if (likely(pool->curr_nr < pool->min_nr)) { in mempool_free()
545 add_element(pool, element); in mempool_free()
546 spin_unlock_irqrestore(&pool->lock, flags); in mempool_free()
547 if (wq_has_sleeper(&pool->wait)) in mempool_free()
548 wake_up(&pool->wait); in mempool_free()
551 spin_unlock_irqrestore(&pool->lock, flags); in mempool_free()
557 * For zero-minimum pools, curr_nr < min_nr (0 < 0) never succeeds, in mempool_free()
558 * so waiters sleeping on pool->wait would never be woken by the in mempool_free()
559 * wake-up path of previous test. This explicit check ensures the in mempool_free()
563 if (unlikely(pool->min_nr == 0 && in mempool_free()
564 READ_ONCE(pool->curr_nr) == 0)) { in mempool_free()
565 spin_lock_irqsave(&pool->lock, flags); in mempool_free()
566 if (likely(pool->curr_nr == 0)) { in mempool_free()
567 add_element(pool, element); in mempool_free()
568 spin_unlock_irqrestore(&pool->lock, flags); in mempool_free()
569 if (wq_has_sleeper(&pool->wait)) in mempool_free()
570 wake_up(&pool->wait); in mempool_free()
573 spin_unlock_irqrestore(&pool->lock, flags); in mempool_free()
576 pool->free(element, pool->pool_data); in mempool_free()
586 VM_BUG_ON(mem->ctor); in mempool_alloc_slab()
629 * A simple mempool-backed page allocator that allocates pages
634 int order = (int)(long)pool_data; in mempool_alloc_pages()
641 int order = (int)(long)pool_data; in mempool_free_pages()