1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * memory buffer pool support. Such pools are mostly used
4 * for guaranteed, deadlock-free memory allocations during
5 * extreme VM load.
6 *
7 * started by Ingo Molnar, Copyright (C) 2001
8 * debugging by David Rientjes, Copyright (C) 2015
9 */
10 #include <linux/fault-inject.h>
11 #include <linux/mm.h>
12 #include <linux/slab.h>
13 #include <linux/highmem.h>
14 #include <linux/kasan.h>
15 #include <linux/kmemleak.h>
16 #include <linux/export.h>
17 #include <linux/mempool.h>
18 #include <linux/writeback.h>
19 #include "slab.h"
20
21 static DECLARE_FAULT_ATTR(fail_mempool_alloc);
22 static DECLARE_FAULT_ATTR(fail_mempool_alloc_bulk);
23
mempool_faul_inject_init(void)24 static int __init mempool_faul_inject_init(void)
25 {
26 int error;
27
28 error = PTR_ERR_OR_ZERO(fault_create_debugfs_attr("fail_mempool_alloc",
29 NULL, &fail_mempool_alloc));
30 if (error)
31 return error;
32
33 /* booting will fail on error return here, don't bother to cleanup */
34 return PTR_ERR_OR_ZERO(
35 fault_create_debugfs_attr("fail_mempool_alloc_bulk", NULL,
36 &fail_mempool_alloc_bulk));
37 }
38 late_initcall(mempool_faul_inject_init);
39
40 #ifdef CONFIG_SLUB_DEBUG_ON
poison_error(struct mempool * pool,void * element,size_t size,size_t byte)41 static void poison_error(struct mempool *pool, void *element, size_t size,
42 size_t byte)
43 {
44 const int nr = pool->curr_nr;
45 const int start = max_t(int, byte - (BITS_PER_LONG / 8), 0);
46 const int end = min_t(int, byte + (BITS_PER_LONG / 8), size);
47 int i;
48
49 pr_err("BUG: mempool element poison mismatch\n");
50 pr_err("Mempool %p size %zu\n", pool, size);
51 pr_err(" nr=%d @ %p: %s0x", nr, element, start > 0 ? "... " : "");
52 for (i = start; i < end; i++)
53 pr_cont("%x ", *(u8 *)(element + i));
54 pr_cont("%s\n", end < size ? "..." : "");
55 dump_stack();
56 }
57
__check_element(struct mempool * pool,void * element,size_t size)58 static void __check_element(struct mempool *pool, void *element, size_t size)
59 {
60 u8 *obj = element;
61 size_t i;
62
63 for (i = 0; i < size; i++) {
64 u8 exp = (i < size - 1) ? POISON_FREE : POISON_END;
65
66 if (obj[i] != exp) {
67 poison_error(pool, element, size, i);
68 return;
69 }
70 }
71 memset(obj, POISON_INUSE, size);
72 }
73
check_element(struct mempool * pool,void * element)74 static void check_element(struct mempool *pool, void *element)
75 {
76 /* Skip checking: KASAN might save its metadata in the element. */
77 if (kasan_enabled())
78 return;
79
80 /* Mempools backed by slab allocator */
81 if (pool->free == mempool_kfree) {
82 __check_element(pool, element, (size_t)pool->pool_data);
83 } else if (pool->free == mempool_free_slab) {
84 __check_element(pool, element, kmem_cache_size(pool->pool_data));
85 } else if (pool->free == mempool_free_pages) {
86 /* Mempools backed by page allocator */
87 int order = (int)(long)pool->pool_data;
88
89 #ifdef CONFIG_HIGHMEM
90 for (int i = 0; i < (1 << order); i++) {
91 struct page *page = (struct page *)element;
92 void *addr = kmap_local_page(page + i);
93
94 __check_element(pool, addr, PAGE_SIZE);
95 kunmap_local(addr);
96 }
97 #else
98 void *addr = page_address((struct page *)element);
99
100 __check_element(pool, addr, PAGE_SIZE << order);
101 #endif
102 }
103 }
104
__poison_element(void * element,size_t size)105 static void __poison_element(void *element, size_t size)
106 {
107 u8 *obj = element;
108
109 memset(obj, POISON_FREE, size - 1);
110 obj[size - 1] = POISON_END;
111 }
112
poison_element(struct mempool * pool,void * element)113 static void poison_element(struct mempool *pool, void *element)
114 {
115 /* Skip poisoning: KASAN might save its metadata in the element. */
116 if (kasan_enabled())
117 return;
118
119 /* Mempools backed by slab allocator */
120 if (pool->alloc == mempool_kmalloc) {
121 __poison_element(element, (size_t)pool->pool_data);
122 } else if (pool->alloc == mempool_alloc_slab) {
123 __poison_element(element, kmem_cache_size(pool->pool_data));
124 } else if (pool->alloc == mempool_alloc_pages) {
125 /* Mempools backed by page allocator */
126 int order = (int)(long)pool->pool_data;
127
128 #ifdef CONFIG_HIGHMEM
129 for (int i = 0; i < (1 << order); i++) {
130 struct page *page = (struct page *)element;
131 void *addr = kmap_local_page(page + i);
132
133 __poison_element(addr, PAGE_SIZE);
134 kunmap_local(addr);
135 }
136 #else
137 void *addr = page_address((struct page *)element);
138
139 __poison_element(addr, PAGE_SIZE << order);
140 #endif
141 }
142 }
143 #else /* CONFIG_SLUB_DEBUG_ON */
check_element(struct mempool * pool,void * element)144 static inline void check_element(struct mempool *pool, void *element)
145 {
146 }
poison_element(struct mempool * pool,void * element)147 static inline void poison_element(struct mempool *pool, void *element)
148 {
149 }
150 #endif /* CONFIG_SLUB_DEBUG_ON */
151
kasan_poison_element(struct mempool * pool,void * element)152 static __always_inline bool kasan_poison_element(struct mempool *pool,
153 void *element)
154 {
155 if (pool->alloc == mempool_alloc_slab || pool->alloc == mempool_kmalloc)
156 return kasan_mempool_poison_object(element);
157 else if (pool->alloc == mempool_alloc_pages)
158 return kasan_mempool_poison_pages(element,
159 (unsigned long)pool->pool_data);
160 return true;
161 }
162
kasan_unpoison_element(struct mempool * pool,void * element)163 static void kasan_unpoison_element(struct mempool *pool, void *element)
164 {
165 if (pool->alloc == mempool_kmalloc)
166 kasan_mempool_unpoison_object(element, (size_t)pool->pool_data);
167 else if (pool->alloc == mempool_alloc_slab)
168 kasan_mempool_unpoison_object(element,
169 kmem_cache_size(pool->pool_data));
170 else if (pool->alloc == mempool_alloc_pages)
171 kasan_mempool_unpoison_pages(element,
172 (unsigned long)pool->pool_data);
173 }
174
add_element(struct mempool * pool,void * element)175 static __always_inline void add_element(struct mempool *pool, void *element)
176 {
177 BUG_ON(pool->min_nr != 0 && pool->curr_nr >= pool->min_nr);
178 poison_element(pool, element);
179 if (kasan_poison_element(pool, element))
180 pool->elements[pool->curr_nr++] = element;
181 }
182
remove_element(struct mempool * pool)183 static void *remove_element(struct mempool *pool)
184 {
185 void *element = pool->elements[--pool->curr_nr];
186
187 BUG_ON(pool->curr_nr < 0);
188 kasan_unpoison_element(pool, element);
189 check_element(pool, element);
190 return element;
191 }
192
193 /**
194 * mempool_exit - exit a mempool initialized with mempool_init()
195 * @pool: pointer to the memory pool which was initialized with
196 * mempool_init().
197 *
198 * Free all reserved elements in @pool and @pool itself. This function
199 * only sleeps if the free_fn() function sleeps.
200 *
201 * May be called on a zeroed but uninitialized mempool (i.e. allocated with
202 * kzalloc()).
203 */
mempool_exit(struct mempool * pool)204 void mempool_exit(struct mempool *pool)
205 {
206 while (pool->curr_nr) {
207 void *element = remove_element(pool);
208 pool->free(element, pool->pool_data);
209 }
210 kfree(pool->elements);
211 pool->elements = NULL;
212 }
213 EXPORT_SYMBOL(mempool_exit);
214
215 /**
216 * mempool_destroy - deallocate a memory pool
217 * @pool: pointer to the memory pool which was allocated via
218 * mempool_create().
219 *
220 * Free all reserved elements in @pool and @pool itself. This function
221 * only sleeps if the free_fn() function sleeps.
222 */
mempool_destroy(struct mempool * pool)223 void mempool_destroy(struct mempool *pool)
224 {
225 if (unlikely(!pool))
226 return;
227
228 mempool_exit(pool);
229 kfree(pool);
230 }
231 EXPORT_SYMBOL(mempool_destroy);
232
mempool_init_node(struct mempool * pool,int min_nr,mempool_alloc_t * alloc_fn,mempool_free_t * free_fn,void * pool_data,gfp_t gfp_mask,int node_id)233 int mempool_init_node(struct mempool *pool, int min_nr,
234 mempool_alloc_t *alloc_fn, mempool_free_t *free_fn,
235 void *pool_data, gfp_t gfp_mask, int node_id)
236 {
237 spin_lock_init(&pool->lock);
238 pool->min_nr = min_nr;
239 pool->pool_data = pool_data;
240 pool->alloc = alloc_fn;
241 pool->free = free_fn;
242 init_waitqueue_head(&pool->wait);
243 /*
244 * max() used here to ensure storage for at least 1 element to support
245 * zero minimum pool
246 */
247 pool->elements = kmalloc_array_node(max(1, min_nr), sizeof(void *),
248 gfp_mask, node_id);
249 if (!pool->elements)
250 return -ENOMEM;
251
252 /*
253 * First pre-allocate the guaranteed number of buffers,
254 * also pre-allocate 1 element for zero minimum pool.
255 */
256 while (pool->curr_nr < max(1, pool->min_nr)) {
257 void *element;
258
259 element = pool->alloc(gfp_mask, pool->pool_data);
260 if (unlikely(!element)) {
261 mempool_exit(pool);
262 return -ENOMEM;
263 }
264 add_element(pool, element);
265 }
266
267 return 0;
268 }
269 EXPORT_SYMBOL(mempool_init_node);
270
271 /**
272 * mempool_init - initialize a memory pool
273 * @pool: pointer to the memory pool that should be initialized
274 * @min_nr: the minimum number of elements guaranteed to be
275 * allocated for this pool.
276 * @alloc_fn: user-defined element-allocation function.
277 * @free_fn: user-defined element-freeing function.
278 * @pool_data: optional private data available to the user-defined functions.
279 *
280 * Like mempool_create(), but initializes the pool in (i.e. embedded in another
281 * structure).
282 *
283 * Return: %0 on success, negative error code otherwise.
284 */
mempool_init_noprof(struct mempool * pool,int min_nr,mempool_alloc_t * alloc_fn,mempool_free_t * free_fn,void * pool_data)285 int mempool_init_noprof(struct mempool *pool, int min_nr,
286 mempool_alloc_t *alloc_fn, mempool_free_t *free_fn,
287 void *pool_data)
288 {
289 return mempool_init_node(pool, min_nr, alloc_fn, free_fn,
290 pool_data, GFP_KERNEL, NUMA_NO_NODE);
291
292 }
293 EXPORT_SYMBOL(mempool_init_noprof);
294
295 /**
296 * mempool_create_node - create a memory pool
297 * @min_nr: the minimum number of elements guaranteed to be
298 * allocated for this pool.
299 * @alloc_fn: user-defined element-allocation function.
300 * @free_fn: user-defined element-freeing function.
301 * @pool_data: optional private data available to the user-defined functions.
302 * @gfp_mask: memory allocation flags
303 * @node_id: numa node to allocate on
304 *
305 * this function creates and allocates a guaranteed size, preallocated
306 * memory pool. The pool can be used from the mempool_alloc() and mempool_free()
307 * functions. This function might sleep. Both the alloc_fn() and the free_fn()
308 * functions might sleep - as long as the mempool_alloc() function is not called
309 * from IRQ contexts.
310 *
311 * Return: pointer to the created memory pool object or %NULL on error.
312 */
mempool_create_node_noprof(int min_nr,mempool_alloc_t * alloc_fn,mempool_free_t * free_fn,void * pool_data,gfp_t gfp_mask,int node_id)313 struct mempool *mempool_create_node_noprof(int min_nr,
314 mempool_alloc_t *alloc_fn, mempool_free_t *free_fn,
315 void *pool_data, gfp_t gfp_mask, int node_id)
316 {
317 struct mempool *pool;
318
319 pool = kmalloc_node_noprof(sizeof(*pool), gfp_mask | __GFP_ZERO, node_id);
320 if (!pool)
321 return NULL;
322
323 if (mempool_init_node(pool, min_nr, alloc_fn, free_fn, pool_data,
324 gfp_mask, node_id)) {
325 kfree(pool);
326 return NULL;
327 }
328
329 return pool;
330 }
331 EXPORT_SYMBOL(mempool_create_node_noprof);
332
333 /**
334 * mempool_resize - resize an existing memory pool
335 * @pool: pointer to the memory pool which was allocated via
336 * mempool_create().
337 * @new_min_nr: the new minimum number of elements guaranteed to be
338 * allocated for this pool.
339 *
340 * This function shrinks/grows the pool. In the case of growing,
341 * it cannot be guaranteed that the pool will be grown to the new
342 * size immediately, but new mempool_free() calls will refill it.
343 * This function may sleep.
344 *
345 * Note, the caller must guarantee that no mempool_destroy is called
346 * while this function is running. mempool_alloc() & mempool_free()
347 * might be called (eg. from IRQ contexts) while this function executes.
348 *
349 * Return: %0 on success, negative error code otherwise.
350 */
mempool_resize(struct mempool * pool,int new_min_nr)351 int mempool_resize(struct mempool *pool, int new_min_nr)
352 {
353 void *element;
354 void **new_elements;
355 unsigned long flags;
356
357 BUG_ON(new_min_nr <= 0);
358 might_sleep();
359
360 spin_lock_irqsave(&pool->lock, flags);
361 if (new_min_nr <= pool->min_nr) {
362 while (new_min_nr < pool->curr_nr) {
363 element = remove_element(pool);
364 spin_unlock_irqrestore(&pool->lock, flags);
365 pool->free(element, pool->pool_data);
366 spin_lock_irqsave(&pool->lock, flags);
367 }
368 pool->min_nr = new_min_nr;
369 goto out_unlock;
370 }
371 spin_unlock_irqrestore(&pool->lock, flags);
372
373 /* Grow the pool */
374 new_elements = kmalloc_objs(*new_elements, new_min_nr);
375 if (!new_elements)
376 return -ENOMEM;
377
378 spin_lock_irqsave(&pool->lock, flags);
379 if (unlikely(new_min_nr <= pool->min_nr)) {
380 /* Raced, other resize will do our work */
381 spin_unlock_irqrestore(&pool->lock, flags);
382 kfree(new_elements);
383 goto out;
384 }
385 memcpy(new_elements, pool->elements,
386 pool->curr_nr * sizeof(*new_elements));
387 kfree(pool->elements);
388 pool->elements = new_elements;
389 pool->min_nr = new_min_nr;
390
391 while (pool->curr_nr < pool->min_nr) {
392 spin_unlock_irqrestore(&pool->lock, flags);
393 element = pool->alloc(GFP_KERNEL, pool->pool_data);
394 if (!element)
395 goto out;
396 spin_lock_irqsave(&pool->lock, flags);
397 if (pool->curr_nr < pool->min_nr) {
398 add_element(pool, element);
399 } else {
400 spin_unlock_irqrestore(&pool->lock, flags);
401 pool->free(element, pool->pool_data); /* Raced */
402 goto out;
403 }
404 }
405 out_unlock:
406 spin_unlock_irqrestore(&pool->lock, flags);
407 out:
408 return 0;
409 }
410 EXPORT_SYMBOL(mempool_resize);
411
mempool_alloc_from_pool(struct mempool * pool,void ** elems,unsigned int count,unsigned int allocated,gfp_t gfp_mask)412 static unsigned int mempool_alloc_from_pool(struct mempool *pool, void **elems,
413 unsigned int count, unsigned int allocated,
414 gfp_t gfp_mask)
415 {
416 unsigned long flags;
417 unsigned int i;
418
419 spin_lock_irqsave(&pool->lock, flags);
420 if (unlikely(pool->curr_nr < count - allocated))
421 goto fail;
422 for (i = 0; i < count; i++) {
423 if (!elems[i]) {
424 elems[i] = remove_element(pool);
425 allocated++;
426 }
427 }
428 spin_unlock_irqrestore(&pool->lock, flags);
429
430 /* Paired with rmb in mempool_free(), read comment there. */
431 smp_wmb();
432
433 /*
434 * Update the allocation stack trace as this is more useful for
435 * debugging.
436 */
437 for (i = 0; i < count; i++)
438 kmemleak_update_trace(elems[i]);
439 return allocated;
440
441 fail:
442 if (gfp_mask & __GFP_DIRECT_RECLAIM) {
443 DEFINE_WAIT(wait);
444
445 prepare_to_wait(&pool->wait, &wait, TASK_UNINTERRUPTIBLE);
446 spin_unlock_irqrestore(&pool->lock, flags);
447
448 /*
449 * Wait for someone else to return an element to @pool, but wake
450 * up occasionally as memory pressure might have reduced even
451 * and the normal allocation in alloc_fn could succeed even if
452 * no element was returned.
453 */
454 io_schedule_timeout(5 * HZ);
455 finish_wait(&pool->wait, &wait);
456 } else {
457 /* We must not sleep if __GFP_DIRECT_RECLAIM is not set. */
458 spin_unlock_irqrestore(&pool->lock, flags);
459 }
460
461 return allocated;
462 }
463
464 /*
465 * Adjust the gfp flags for mempool allocations, as we never want to dip into
466 * the global emergency reserves or retry in the page allocator.
467 *
468 * The first pass also doesn't want to go reclaim, but the next passes do, so
469 * return a separate subset for that first iteration.
470 */
mempool_adjust_gfp(gfp_t * gfp_mask)471 static inline gfp_t mempool_adjust_gfp(gfp_t *gfp_mask)
472 {
473 *gfp_mask |= __GFP_NOMEMALLOC | __GFP_NORETRY | __GFP_NOWARN;
474 return *gfp_mask & ~(__GFP_DIRECT_RECLAIM | __GFP_IO);
475 }
476
477 /**
478 * mempool_alloc_bulk - allocate multiple elements from a memory pool
479 * @pool: pointer to the memory pool
480 * @elems: partially or fully populated elements array
481 * @count: number of entries in @elem that need to be allocated
482 * @allocated: number of entries in @elem already allocated
483 *
484 * Allocate elements for each slot in @elem that is non-%NULL. This is done by
485 * first calling into the alloc_fn supplied at pool initialization time, and
486 * dipping into the reserved pool when alloc_fn fails to allocate an element.
487 *
488 * On return all @count elements in @elems will be populated.
489 *
490 * Return: Always 0. If it wasn't for %$#^$ alloc tags, it would return void.
491 */
mempool_alloc_bulk_noprof(struct mempool * pool,void ** elems,unsigned int count,unsigned int allocated)492 int mempool_alloc_bulk_noprof(struct mempool *pool, void **elems,
493 unsigned int count, unsigned int allocated)
494 {
495 gfp_t gfp_mask = GFP_KERNEL;
496 gfp_t gfp_temp = mempool_adjust_gfp(&gfp_mask);
497 unsigned int i = 0;
498
499 VM_WARN_ON_ONCE(count > pool->min_nr);
500 might_alloc(gfp_mask);
501
502 /*
503 * If an error is injected, fail all elements in a bulk allocation so
504 * that we stress the multiple elements missing path.
505 */
506 if (should_fail_ex(&fail_mempool_alloc_bulk, 1, FAULT_NOWARN)) {
507 pr_info("forcing mempool usage for %pS\n",
508 (void *)_RET_IP_);
509 goto use_pool;
510 }
511
512 repeat_alloc:
513 /*
514 * Try to allocate the elements using the allocation callback first as
515 * that might succeed even when the caller's bulk allocation did not.
516 */
517 for (i = 0; i < count; i++) {
518 if (elems[i])
519 continue;
520 elems[i] = pool->alloc(gfp_temp, pool->pool_data);
521 if (unlikely(!elems[i]))
522 goto use_pool;
523 allocated++;
524 }
525
526 return 0;
527
528 use_pool:
529 allocated = mempool_alloc_from_pool(pool, elems, count, allocated,
530 gfp_temp);
531 gfp_temp = gfp_mask;
532 goto repeat_alloc;
533 }
534 EXPORT_SYMBOL_GPL(mempool_alloc_bulk_noprof);
535
536 /**
537 * mempool_alloc - allocate an element from a memory pool
538 * @pool: pointer to the memory pool
539 * @gfp_mask: GFP_* flags. %__GFP_ZERO is not supported.
540 *
541 * Allocate an element from @pool. This is done by first calling into the
542 * alloc_fn supplied at pool initialization time, and dipping into the reserved
543 * pool when alloc_fn fails to allocate an element.
544 *
545 * This function only sleeps if the alloc_fn callback sleeps, or when waiting
546 * for elements to become available in the pool.
547 *
548 * Return: pointer to the allocated element or %NULL when failing to allocate
549 * an element. Allocation failure can only happen when @gfp_mask does not
550 * include %__GFP_DIRECT_RECLAIM.
551 */
mempool_alloc_noprof(struct mempool * pool,gfp_t gfp_mask)552 void *mempool_alloc_noprof(struct mempool *pool, gfp_t gfp_mask)
553 {
554 gfp_t gfp_temp = mempool_adjust_gfp(&gfp_mask);
555 void *element;
556
557 VM_WARN_ON_ONCE(gfp_mask & __GFP_ZERO);
558 might_alloc(gfp_mask);
559
560 repeat_alloc:
561 if (should_fail_ex(&fail_mempool_alloc, 1, FAULT_NOWARN)) {
562 pr_info("forcing mempool usage for %pS\n",
563 (void *)_RET_IP_);
564 element = NULL;
565 } else {
566 element = pool->alloc(gfp_temp, pool->pool_data);
567 }
568
569 if (unlikely(!element)) {
570 /*
571 * Try to allocate an element from the pool.
572 *
573 * The first pass won't have __GFP_DIRECT_RECLAIM and won't
574 * sleep in mempool_alloc_from_pool. Retry the allocation
575 * with all flags set in that case.
576 */
577 if (!mempool_alloc_from_pool(pool, &element, 1, 0, gfp_temp)) {
578 if (gfp_temp != gfp_mask) {
579 gfp_temp = gfp_mask;
580 goto repeat_alloc;
581 }
582 if (gfp_mask & __GFP_DIRECT_RECLAIM) {
583 goto repeat_alloc;
584 }
585 }
586 }
587
588 return element;
589 }
590 EXPORT_SYMBOL(mempool_alloc_noprof);
591
592 /**
593 * mempool_alloc_preallocated - allocate an element from preallocated elements
594 * belonging to a memory pool
595 * @pool: pointer to the memory pool
596 *
597 * This function is similar to mempool_alloc(), but it only attempts allocating
598 * an element from the preallocated elements. It only takes a single spinlock_t
599 * and immediately returns if no preallocated elements are available.
600 *
601 * Return: pointer to the allocated element or %NULL if no elements are
602 * available.
603 */
mempool_alloc_preallocated(struct mempool * pool)604 void *mempool_alloc_preallocated(struct mempool *pool)
605 {
606 void *element = NULL;
607
608 mempool_alloc_from_pool(pool, &element, 1, 0, GFP_NOWAIT);
609 return element;
610 }
611 EXPORT_SYMBOL(mempool_alloc_preallocated);
612
613 /**
614 * mempool_free_bulk - return elements to a mempool
615 * @pool: pointer to the memory pool
616 * @elems: elements to return
617 * @count: number of elements to return
618 *
619 * Returns a number of elements from the start of @elem to @pool if @pool needs
620 * replenishing and sets their slots in @elem to NULL. Other elements are left
621 * in @elem.
622 *
623 * Return: number of elements transferred to @pool. Elements are always
624 * transferred from the beginning of @elem, so the return value can be used as
625 * an offset into @elem for the freeing the remaining elements in the caller.
626 */
mempool_free_bulk(struct mempool * pool,void ** elems,unsigned int count)627 unsigned int mempool_free_bulk(struct mempool *pool, void **elems,
628 unsigned int count)
629 {
630 unsigned long flags;
631 unsigned int freed = 0;
632 bool added = false;
633
634 /*
635 * Paired with the wmb in mempool_alloc(). The preceding read is
636 * for @element and the following @pool->curr_nr. This ensures
637 * that the visible value of @pool->curr_nr is from after the
638 * allocation of @element. This is necessary for fringe cases
639 * where @element was passed to this task without going through
640 * barriers.
641 *
642 * For example, assume @p is %NULL at the beginning and one task
643 * performs "p = mempool_alloc(...);" while another task is doing
644 * "while (!p) cpu_relax(); mempool_free(p, ...);". This function
645 * may end up using curr_nr value which is from before allocation
646 * of @p without the following rmb.
647 */
648 smp_rmb();
649
650 /*
651 * For correctness, we need a test which is guaranteed to trigger
652 * if curr_nr + #allocated == min_nr. Testing curr_nr < min_nr
653 * without locking achieves that and refilling as soon as possible
654 * is desirable.
655 *
656 * Because curr_nr visible here is always a value after the
657 * allocation of @element, any task which decremented curr_nr below
658 * min_nr is guaranteed to see curr_nr < min_nr unless curr_nr gets
659 * incremented to min_nr afterwards. If curr_nr gets incremented
660 * to min_nr after the allocation of @element, the elements
661 * allocated after that are subject to the same guarantee.
662 *
663 * Waiters happen iff curr_nr is 0 and the above guarantee also
664 * ensures that there will be frees which return elements to the
665 * pool waking up the waiters.
666 *
667 * For zero-minimum pools, curr_nr < min_nr (0 < 0) never succeeds,
668 * so waiters sleeping on pool->wait would never be woken by the
669 * wake-up path of previous test. This explicit check ensures the
670 * allocation of element when both min_nr and curr_nr are 0, and
671 * any active waiters are properly awakened.
672 */
673 if (unlikely(READ_ONCE(pool->curr_nr) < pool->min_nr)) {
674 spin_lock_irqsave(&pool->lock, flags);
675 while (pool->curr_nr < pool->min_nr && freed < count) {
676 add_element(pool, elems[freed++]);
677 added = true;
678 }
679 spin_unlock_irqrestore(&pool->lock, flags);
680 } else if (unlikely(pool->min_nr == 0 &&
681 READ_ONCE(pool->curr_nr) == 0)) {
682 /* Handle the min_nr = 0 edge case: */
683 spin_lock_irqsave(&pool->lock, flags);
684 if (likely(pool->curr_nr == 0)) {
685 add_element(pool, elems[freed++]);
686 added = true;
687 }
688 spin_unlock_irqrestore(&pool->lock, flags);
689 }
690
691 if (unlikely(added) && wq_has_sleeper(&pool->wait))
692 wake_up(&pool->wait);
693
694 return freed;
695 }
696 EXPORT_SYMBOL_GPL(mempool_free_bulk);
697
698 /**
699 * mempool_free - return an element to the pool.
700 * @element: element to return
701 * @pool: pointer to the memory pool
702 *
703 * Returns @element to @pool if it needs replenishing, else frees it using
704 * the free_fn callback in @pool.
705 *
706 * This function only sleeps if the free_fn callback sleeps.
707 */
mempool_free(void * element,struct mempool * pool)708 void mempool_free(void *element, struct mempool *pool)
709 {
710 if (likely(element) && !mempool_free_bulk(pool, &element, 1))
711 pool->free(element, pool->pool_data);
712 }
713 EXPORT_SYMBOL(mempool_free);
714
715 /*
716 * A commonly used alloc and free fn.
717 */
mempool_alloc_slab(gfp_t gfp_mask,void * pool_data)718 void *mempool_alloc_slab(gfp_t gfp_mask, void *pool_data)
719 {
720 struct kmem_cache *mem = pool_data;
721 VM_BUG_ON(mem->ctor);
722 return kmem_cache_alloc_noprof(mem, gfp_mask);
723 }
724 EXPORT_SYMBOL(mempool_alloc_slab);
725
mempool_free_slab(void * element,void * pool_data)726 void mempool_free_slab(void *element, void *pool_data)
727 {
728 struct kmem_cache *mem = pool_data;
729 kmem_cache_free(mem, element);
730 }
731 EXPORT_SYMBOL(mempool_free_slab);
732
733 /*
734 * A commonly used alloc and free fn that kmalloc/kfrees the amount of memory
735 * specified by pool_data
736 */
mempool_kmalloc(gfp_t gfp_mask,void * pool_data)737 void *mempool_kmalloc(gfp_t gfp_mask, void *pool_data)
738 {
739 size_t size = (size_t)pool_data;
740 return kmalloc_noprof(size, gfp_mask);
741 }
742 EXPORT_SYMBOL(mempool_kmalloc);
743
mempool_kfree(void * element,void * pool_data)744 void mempool_kfree(void *element, void *pool_data)
745 {
746 kfree(element);
747 }
748 EXPORT_SYMBOL(mempool_kfree);
749
750 /*
751 * A simple mempool-backed page allocator that allocates pages
752 * of the order specified by pool_data.
753 */
mempool_alloc_pages(gfp_t gfp_mask,void * pool_data)754 void *mempool_alloc_pages(gfp_t gfp_mask, void *pool_data)
755 {
756 int order = (int)(long)pool_data;
757 return alloc_pages_noprof(gfp_mask, order);
758 }
759 EXPORT_SYMBOL(mempool_alloc_pages);
760
mempool_free_pages(void * element,void * pool_data)761 void mempool_free_pages(void *element, void *pool_data)
762 {
763 int order = (int)(long)pool_data;
764 __free_pages(element, order);
765 }
766 EXPORT_SYMBOL(mempool_free_pages);
767