xref: /linux/mm/mempool.c (revision b687034b1a4d85333ced0fe07f67b17276cccdc8)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  *  memory buffer pool support. Such pools are mostly used
4  *  for guaranteed, deadlock-free memory allocations during
5  *  extreme VM load.
6  *
7  *  started by Ingo Molnar, Copyright (C) 2001
8  *  debugging by David Rientjes, Copyright (C) 2015
9  */
10 #include <linux/fault-inject.h>
11 #include <linux/mm.h>
12 #include <linux/slab.h>
13 #include <linux/highmem.h>
14 #include <linux/kasan.h>
15 #include <linux/kmemleak.h>
16 #include <linux/export.h>
17 #include <linux/mempool.h>
18 #include <linux/writeback.h>
19 #include "slab.h"
20 
21 static DECLARE_FAULT_ATTR(fail_mempool_alloc);
22 static DECLARE_FAULT_ATTR(fail_mempool_alloc_bulk);
23 
mempool_faul_inject_init(void)24 static int __init mempool_faul_inject_init(void)
25 {
26 	int error;
27 
28 	error = PTR_ERR_OR_ZERO(fault_create_debugfs_attr("fail_mempool_alloc",
29 			NULL, &fail_mempool_alloc));
30 	if (error)
31 		return error;
32 
33 	/* booting will fail on error return here, don't bother to cleanup */
34 	return PTR_ERR_OR_ZERO(
35 		fault_create_debugfs_attr("fail_mempool_alloc_bulk", NULL,
36 		&fail_mempool_alloc_bulk));
37 }
38 late_initcall(mempool_faul_inject_init);
39 
40 #ifdef CONFIG_SLUB_DEBUG_ON
poison_error(struct mempool * pool,void * element,size_t size,size_t byte)41 static void poison_error(struct mempool *pool, void *element, size_t size,
42 			 size_t byte)
43 {
44 	const int nr = pool->curr_nr;
45 	const int start = max_t(int, byte - (BITS_PER_LONG / 8), 0);
46 	const int end = min_t(int, byte + (BITS_PER_LONG / 8), size);
47 	int i;
48 
49 	pr_err("BUG: mempool element poison mismatch\n");
50 	pr_err("Mempool %p size %zu\n", pool, size);
51 	pr_err(" nr=%d @ %p: %s0x", nr, element, start > 0 ? "... " : "");
52 	for (i = start; i < end; i++)
53 		pr_cont("%x ", *(u8 *)(element + i));
54 	pr_cont("%s\n", end < size ? "..." : "");
55 	dump_stack();
56 }
57 
__check_element(struct mempool * pool,void * element,size_t size)58 static void __check_element(struct mempool *pool, void *element, size_t size)
59 {
60 	u8 *obj = element;
61 	size_t i;
62 
63 	for (i = 0; i < size; i++) {
64 		u8 exp = (i < size - 1) ? POISON_FREE : POISON_END;
65 
66 		if (obj[i] != exp) {
67 			poison_error(pool, element, size, i);
68 			return;
69 		}
70 	}
71 	memset(obj, POISON_INUSE, size);
72 }
73 
check_element(struct mempool * pool,void * element)74 static void check_element(struct mempool *pool, void *element)
75 {
76 	/* Skip checking: KASAN might save its metadata in the element. */
77 	if (kasan_enabled())
78 		return;
79 
80 	/* Mempools backed by slab allocator */
81 	if (pool->free == mempool_kfree) {
82 		__check_element(pool, element, (size_t)pool->pool_data);
83 	} else if (pool->free == mempool_free_slab) {
84 		__check_element(pool, element, kmem_cache_size(pool->pool_data));
85 	} else if (pool->free == mempool_free_pages) {
86 		/* Mempools backed by page allocator */
87 		int order = (int)(long)pool->pool_data;
88 
89 #ifdef CONFIG_HIGHMEM
90 		for (int i = 0; i < (1 << order); i++) {
91 			struct page *page = (struct page *)element;
92 			void *addr = kmap_local_page(page + i);
93 
94 			__check_element(pool, addr, PAGE_SIZE);
95 			kunmap_local(addr);
96 		}
97 #else
98 		void *addr = page_address((struct page *)element);
99 
100 		__check_element(pool, addr, PAGE_SIZE << order);
101 #endif
102 	}
103 }
104 
__poison_element(void * element,size_t size)105 static void __poison_element(void *element, size_t size)
106 {
107 	u8 *obj = element;
108 
109 	memset(obj, POISON_FREE, size - 1);
110 	obj[size - 1] = POISON_END;
111 }
112 
poison_element(struct mempool * pool,void * element)113 static void poison_element(struct mempool *pool, void *element)
114 {
115 	/* Skip poisoning: KASAN might save its metadata in the element. */
116 	if (kasan_enabled())
117 		return;
118 
119 	/* Mempools backed by slab allocator */
120 	if (pool->alloc == mempool_kmalloc) {
121 		__poison_element(element, (size_t)pool->pool_data);
122 	} else if (pool->alloc == mempool_alloc_slab) {
123 		__poison_element(element, kmem_cache_size(pool->pool_data));
124 	} else if (pool->alloc == mempool_alloc_pages) {
125 		/* Mempools backed by page allocator */
126 		int order = (int)(long)pool->pool_data;
127 
128 #ifdef CONFIG_HIGHMEM
129 		for (int i = 0; i < (1 << order); i++) {
130 			struct page *page = (struct page *)element;
131 			void *addr = kmap_local_page(page + i);
132 
133 			__poison_element(addr, PAGE_SIZE);
134 			kunmap_local(addr);
135 		}
136 #else
137 		void *addr = page_address((struct page *)element);
138 
139 		__poison_element(addr, PAGE_SIZE << order);
140 #endif
141 	}
142 }
143 #else /* CONFIG_SLUB_DEBUG_ON */
check_element(struct mempool * pool,void * element)144 static inline void check_element(struct mempool *pool, void *element)
145 {
146 }
poison_element(struct mempool * pool,void * element)147 static inline void poison_element(struct mempool *pool, void *element)
148 {
149 }
150 #endif /* CONFIG_SLUB_DEBUG_ON */
151 
kasan_poison_element(struct mempool * pool,void * element)152 static __always_inline bool kasan_poison_element(struct mempool *pool,
153 		void *element)
154 {
155 	if (pool->alloc == mempool_alloc_slab || pool->alloc == mempool_kmalloc)
156 		return kasan_mempool_poison_object(element);
157 	else if (pool->alloc == mempool_alloc_pages)
158 		return kasan_mempool_poison_pages(element,
159 						(unsigned long)pool->pool_data);
160 	return true;
161 }
162 
kasan_unpoison_element(struct mempool * pool,void * element)163 static void kasan_unpoison_element(struct mempool *pool, void *element)
164 {
165 	if (pool->alloc == mempool_kmalloc)
166 		kasan_mempool_unpoison_object(element, (size_t)pool->pool_data);
167 	else if (pool->alloc == mempool_alloc_slab)
168 		kasan_mempool_unpoison_object(element,
169 					      kmem_cache_size(pool->pool_data));
170 	else if (pool->alloc == mempool_alloc_pages)
171 		kasan_mempool_unpoison_pages(element,
172 					     (unsigned long)pool->pool_data);
173 }
174 
add_element(struct mempool * pool,void * element)175 static __always_inline void add_element(struct mempool *pool, void *element)
176 {
177 	BUG_ON(pool->min_nr != 0 && pool->curr_nr >= pool->min_nr);
178 	poison_element(pool, element);
179 	if (kasan_poison_element(pool, element))
180 		pool->elements[pool->curr_nr++] = element;
181 }
182 
remove_element(struct mempool * pool)183 static void *remove_element(struct mempool *pool)
184 {
185 	void *element = pool->elements[--pool->curr_nr];
186 
187 	BUG_ON(pool->curr_nr < 0);
188 	kasan_unpoison_element(pool, element);
189 	check_element(pool, element);
190 	return element;
191 }
192 
193 /**
194  * mempool_exit - exit a mempool initialized with mempool_init()
195  * @pool:      pointer to the memory pool which was initialized with
196  *             mempool_init().
197  *
198  * Free all reserved elements in @pool and @pool itself.  This function
199  * only sleeps if the free_fn() function sleeps.
200  *
201  * May be called on a zeroed but uninitialized mempool (i.e. allocated with
202  * kzalloc()).
203  */
mempool_exit(struct mempool * pool)204 void mempool_exit(struct mempool *pool)
205 {
206 	while (pool->curr_nr) {
207 		void *element = remove_element(pool);
208 		pool->free(element, pool->pool_data);
209 	}
210 	kfree(pool->elements);
211 	pool->elements = NULL;
212 }
213 EXPORT_SYMBOL(mempool_exit);
214 
215 /**
216  * mempool_destroy - deallocate a memory pool
217  * @pool:      pointer to the memory pool which was allocated via
218  *             mempool_create().
219  *
220  * Free all reserved elements in @pool and @pool itself.  This function
221  * only sleeps if the free_fn() function sleeps.
222  */
mempool_destroy(struct mempool * pool)223 void mempool_destroy(struct mempool *pool)
224 {
225 	if (unlikely(!pool))
226 		return;
227 
228 	mempool_exit(pool);
229 	kfree(pool);
230 }
231 EXPORT_SYMBOL(mempool_destroy);
232 
mempool_init_node(struct mempool * pool,int min_nr,mempool_alloc_t * alloc_fn,mempool_free_t * free_fn,void * pool_data,gfp_t gfp_mask,int node_id)233 int mempool_init_node(struct mempool *pool, int min_nr,
234 		mempool_alloc_t *alloc_fn, mempool_free_t *free_fn,
235 		void *pool_data, gfp_t gfp_mask, int node_id)
236 {
237 	spin_lock_init(&pool->lock);
238 	pool->min_nr	= min_nr;
239 	pool->pool_data = pool_data;
240 	pool->alloc	= alloc_fn;
241 	pool->free	= free_fn;
242 	init_waitqueue_head(&pool->wait);
243 	/*
244 	 * max() used here to ensure storage for at least 1 element to support
245 	 * zero minimum pool
246 	 */
247 	pool->elements = kmalloc_array_node(max(1, min_nr), sizeof(void *),
248 					    gfp_mask, node_id);
249 	if (!pool->elements)
250 		return -ENOMEM;
251 
252 	/*
253 	 * First pre-allocate the guaranteed number of buffers,
254 	 * also pre-allocate 1 element for zero minimum pool.
255 	 */
256 	while (pool->curr_nr < max(1, pool->min_nr)) {
257 		void *element;
258 
259 		element = pool->alloc(gfp_mask, pool->pool_data);
260 		if (unlikely(!element)) {
261 			mempool_exit(pool);
262 			return -ENOMEM;
263 		}
264 		add_element(pool, element);
265 	}
266 
267 	return 0;
268 }
269 EXPORT_SYMBOL(mempool_init_node);
270 
271 /**
272  * mempool_init - initialize a memory pool
273  * @pool:      pointer to the memory pool that should be initialized
274  * @min_nr:    the minimum number of elements guaranteed to be
275  *             allocated for this pool.
276  * @alloc_fn:  user-defined element-allocation function.
277  * @free_fn:   user-defined element-freeing function.
278  * @pool_data: optional private data available to the user-defined functions.
279  *
280  * Like mempool_create(), but initializes the pool in (i.e. embedded in another
281  * structure).
282  *
283  * Return: %0 on success, negative error code otherwise.
284  */
mempool_init_noprof(struct mempool * pool,int min_nr,mempool_alloc_t * alloc_fn,mempool_free_t * free_fn,void * pool_data)285 int mempool_init_noprof(struct mempool *pool, int min_nr,
286 		mempool_alloc_t *alloc_fn, mempool_free_t *free_fn,
287 		void *pool_data)
288 {
289 	return mempool_init_node(pool, min_nr, alloc_fn, free_fn,
290 				 pool_data, GFP_KERNEL, NUMA_NO_NODE);
291 
292 }
293 EXPORT_SYMBOL(mempool_init_noprof);
294 
295 /**
296  * mempool_create_node - create a memory pool
297  * @min_nr:    the minimum number of elements guaranteed to be
298  *             allocated for this pool.
299  * @alloc_fn:  user-defined element-allocation function.
300  * @free_fn:   user-defined element-freeing function.
301  * @pool_data: optional private data available to the user-defined functions.
302  * @gfp_mask:  memory allocation flags
303  * @node_id:   numa node to allocate on
304  *
305  * this function creates and allocates a guaranteed size, preallocated
306  * memory pool. The pool can be used from the mempool_alloc() and mempool_free()
307  * functions. This function might sleep. Both the alloc_fn() and the free_fn()
308  * functions might sleep - as long as the mempool_alloc() function is not called
309  * from IRQ contexts.
310  *
311  * Return: pointer to the created memory pool object or %NULL on error.
312  */
mempool_create_node_noprof(int min_nr,mempool_alloc_t * alloc_fn,mempool_free_t * free_fn,void * pool_data,gfp_t gfp_mask,int node_id)313 struct mempool *mempool_create_node_noprof(int min_nr,
314 		mempool_alloc_t *alloc_fn, mempool_free_t *free_fn,
315 		void *pool_data, gfp_t gfp_mask, int node_id)
316 {
317 	struct mempool *pool;
318 
319 	pool = kmalloc_node_noprof(sizeof(*pool), gfp_mask | __GFP_ZERO, node_id);
320 	if (!pool)
321 		return NULL;
322 
323 	if (mempool_init_node(pool, min_nr, alloc_fn, free_fn, pool_data,
324 			      gfp_mask, node_id)) {
325 		kfree(pool);
326 		return NULL;
327 	}
328 
329 	return pool;
330 }
331 EXPORT_SYMBOL(mempool_create_node_noprof);
332 
333 /**
334  * mempool_resize - resize an existing memory pool
335  * @pool:       pointer to the memory pool which was allocated via
336  *              mempool_create().
337  * @new_min_nr: the new minimum number of elements guaranteed to be
338  *              allocated for this pool.
339  *
340  * This function shrinks/grows the pool. In the case of growing,
341  * it cannot be guaranteed that the pool will be grown to the new
342  * size immediately, but new mempool_free() calls will refill it.
343  * This function may sleep.
344  *
345  * Note, the caller must guarantee that no mempool_destroy is called
346  * while this function is running. mempool_alloc() & mempool_free()
347  * might be called (eg. from IRQ contexts) while this function executes.
348  *
349  * Return: %0 on success, negative error code otherwise.
350  */
mempool_resize(struct mempool * pool,int new_min_nr)351 int mempool_resize(struct mempool *pool, int new_min_nr)
352 {
353 	void *element;
354 	void **new_elements;
355 	unsigned long flags;
356 
357 	BUG_ON(new_min_nr <= 0);
358 	might_sleep();
359 
360 	spin_lock_irqsave(&pool->lock, flags);
361 	if (new_min_nr <= pool->min_nr) {
362 		while (new_min_nr < pool->curr_nr) {
363 			element = remove_element(pool);
364 			spin_unlock_irqrestore(&pool->lock, flags);
365 			pool->free(element, pool->pool_data);
366 			spin_lock_irqsave(&pool->lock, flags);
367 		}
368 		pool->min_nr = new_min_nr;
369 		goto out_unlock;
370 	}
371 	spin_unlock_irqrestore(&pool->lock, flags);
372 
373 	/* Grow the pool */
374 	new_elements = kmalloc_array(new_min_nr, sizeof(*new_elements),
375 				     GFP_KERNEL);
376 	if (!new_elements)
377 		return -ENOMEM;
378 
379 	spin_lock_irqsave(&pool->lock, flags);
380 	if (unlikely(new_min_nr <= pool->min_nr)) {
381 		/* Raced, other resize will do our work */
382 		spin_unlock_irqrestore(&pool->lock, flags);
383 		kfree(new_elements);
384 		goto out;
385 	}
386 	memcpy(new_elements, pool->elements,
387 			pool->curr_nr * sizeof(*new_elements));
388 	kfree(pool->elements);
389 	pool->elements = new_elements;
390 	pool->min_nr = new_min_nr;
391 
392 	while (pool->curr_nr < pool->min_nr) {
393 		spin_unlock_irqrestore(&pool->lock, flags);
394 		element = pool->alloc(GFP_KERNEL, pool->pool_data);
395 		if (!element)
396 			goto out;
397 		spin_lock_irqsave(&pool->lock, flags);
398 		if (pool->curr_nr < pool->min_nr) {
399 			add_element(pool, element);
400 		} else {
401 			spin_unlock_irqrestore(&pool->lock, flags);
402 			pool->free(element, pool->pool_data);	/* Raced */
403 			goto out;
404 		}
405 	}
406 out_unlock:
407 	spin_unlock_irqrestore(&pool->lock, flags);
408 out:
409 	return 0;
410 }
411 EXPORT_SYMBOL(mempool_resize);
412 
mempool_alloc_from_pool(struct mempool * pool,void ** elems,unsigned int count,unsigned int allocated,gfp_t gfp_mask)413 static unsigned int mempool_alloc_from_pool(struct mempool *pool, void **elems,
414 		unsigned int count, unsigned int allocated,
415 		gfp_t gfp_mask)
416 {
417 	unsigned long flags;
418 	unsigned int i;
419 
420 	spin_lock_irqsave(&pool->lock, flags);
421 	if (unlikely(pool->curr_nr < count - allocated))
422 		goto fail;
423 	for (i = 0; i < count; i++) {
424 		if (!elems[i]) {
425 			elems[i] = remove_element(pool);
426 			allocated++;
427 		}
428 	}
429 	spin_unlock_irqrestore(&pool->lock, flags);
430 
431 	/* Paired with rmb in mempool_free(), read comment there. */
432 	smp_wmb();
433 
434 	/*
435 	 * Update the allocation stack trace as this is more useful for
436 	 * debugging.
437 	 */
438 	for (i = 0; i < count; i++)
439 		kmemleak_update_trace(elems[i]);
440 	return allocated;
441 
442 fail:
443 	if (gfp_mask & __GFP_DIRECT_RECLAIM) {
444 		DEFINE_WAIT(wait);
445 
446 		prepare_to_wait(&pool->wait, &wait, TASK_UNINTERRUPTIBLE);
447 		spin_unlock_irqrestore(&pool->lock, flags);
448 
449 		/*
450 		 * Wait for someone else to return an element to @pool, but wake
451 		 * up occasionally as memory pressure might have reduced even
452 		 * and the normal allocation in alloc_fn could succeed even if
453 		 * no element was returned.
454 		 */
455 		io_schedule_timeout(5 * HZ);
456 		finish_wait(&pool->wait, &wait);
457 	} else {
458 		/* We must not sleep if __GFP_DIRECT_RECLAIM is not set. */
459 		spin_unlock_irqrestore(&pool->lock, flags);
460 	}
461 
462 	return allocated;
463 }
464 
465 /*
466  * Adjust the gfp flags for mempool allocations, as we never want to dip into
467  * the global emergency reserves or retry in the page allocator.
468  *
469  * The first pass also doesn't want to go reclaim, but the next passes do, so
470  * return a separate subset for that first iteration.
471  */
mempool_adjust_gfp(gfp_t * gfp_mask)472 static inline gfp_t mempool_adjust_gfp(gfp_t *gfp_mask)
473 {
474 	*gfp_mask |= __GFP_NOMEMALLOC | __GFP_NORETRY | __GFP_NOWARN;
475 	return *gfp_mask & ~(__GFP_DIRECT_RECLAIM | __GFP_IO);
476 }
477 
478 /**
479  * mempool_alloc_bulk - allocate multiple elements from a memory pool
480  * @pool:	pointer to the memory pool
481  * @elems:	partially or fully populated elements array
482  * @count:	number of entries in @elem that need to be allocated
483  * @allocated:	number of entries in @elem already allocated
484  *
485  * Allocate elements for each slot in @elem that is non-%NULL. This is done by
486  * first calling into the alloc_fn supplied at pool initialization time, and
487  * dipping into the reserved pool when alloc_fn fails to allocate an element.
488  *
489  * On return all @count elements in @elems will be populated.
490  *
491  * Return: Always 0.  If it wasn't for %$#^$ alloc tags, it would return void.
492  */
mempool_alloc_bulk_noprof(struct mempool * pool,void ** elems,unsigned int count,unsigned int allocated)493 int mempool_alloc_bulk_noprof(struct mempool *pool, void **elems,
494 		unsigned int count, unsigned int allocated)
495 {
496 	gfp_t gfp_mask = GFP_KERNEL;
497 	gfp_t gfp_temp = mempool_adjust_gfp(&gfp_mask);
498 	unsigned int i = 0;
499 
500 	VM_WARN_ON_ONCE(count > pool->min_nr);
501 	might_alloc(gfp_mask);
502 
503 	/*
504 	 * If an error is injected, fail all elements in a bulk allocation so
505 	 * that we stress the multiple elements missing path.
506 	 */
507 	if (should_fail_ex(&fail_mempool_alloc_bulk, 1, FAULT_NOWARN)) {
508 		pr_info("forcing mempool usage for %pS\n",
509 				(void *)_RET_IP_);
510 		goto use_pool;
511 	}
512 
513 repeat_alloc:
514 	/*
515 	 * Try to allocate the elements using the allocation callback first as
516 	 * that might succeed even when the caller's bulk allocation did not.
517 	 */
518 	for (i = 0; i < count; i++) {
519 		if (elems[i])
520 			continue;
521 		elems[i] = pool->alloc(gfp_temp, pool->pool_data);
522 		if (unlikely(!elems[i]))
523 			goto use_pool;
524 		allocated++;
525 	}
526 
527 	return 0;
528 
529 use_pool:
530 	allocated = mempool_alloc_from_pool(pool, elems, count, allocated,
531 			gfp_temp);
532 	gfp_temp = gfp_mask;
533 	goto repeat_alloc;
534 }
535 EXPORT_SYMBOL_GPL(mempool_alloc_bulk_noprof);
536 
537 /**
538  * mempool_alloc - allocate an element from a memory pool
539  * @pool:	pointer to the memory pool
540  * @gfp_mask:	GFP_* flags.  %__GFP_ZERO is not supported.
541  *
542  * Allocate an element from @pool.  This is done by first calling into the
543  * alloc_fn supplied at pool initialization time, and dipping into the reserved
544  * pool when alloc_fn fails to allocate an element.
545  *
546  * This function only sleeps if the alloc_fn callback sleeps, or when waiting
547  * for elements to become available in the pool.
548  *
549  * Return: pointer to the allocated element or %NULL when failing to allocate
550  * an element.  Allocation failure can only happen when @gfp_mask does not
551  * include %__GFP_DIRECT_RECLAIM.
552  */
mempool_alloc_noprof(struct mempool * pool,gfp_t gfp_mask)553 void *mempool_alloc_noprof(struct mempool *pool, gfp_t gfp_mask)
554 {
555 	gfp_t gfp_temp = mempool_adjust_gfp(&gfp_mask);
556 	void *element;
557 
558 	VM_WARN_ON_ONCE(gfp_mask & __GFP_ZERO);
559 	might_alloc(gfp_mask);
560 
561 repeat_alloc:
562 	if (should_fail_ex(&fail_mempool_alloc, 1, FAULT_NOWARN)) {
563 		pr_info("forcing mempool usage for %pS\n",
564 				(void *)_RET_IP_);
565 		element = NULL;
566 	} else {
567 		element = pool->alloc(gfp_temp, pool->pool_data);
568 	}
569 
570 	if (unlikely(!element)) {
571 		/*
572 		 * Try to allocate an element from the pool.
573 		 *
574 		 * The first pass won't have __GFP_DIRECT_RECLAIM and won't
575 		 * sleep in mempool_alloc_from_pool.  Retry the allocation
576 		 * with all flags set in that case.
577 		 */
578 		if (!mempool_alloc_from_pool(pool, &element, 1, 0, gfp_temp)) {
579 			if (gfp_temp != gfp_mask) {
580 				gfp_temp = gfp_mask;
581 				goto repeat_alloc;
582 			}
583 			if (gfp_mask & __GFP_DIRECT_RECLAIM) {
584 				goto repeat_alloc;
585 			}
586 		}
587 	}
588 
589 	return element;
590 }
591 EXPORT_SYMBOL(mempool_alloc_noprof);
592 
593 /**
594  * mempool_alloc_preallocated - allocate an element from preallocated elements
595  *                              belonging to a memory pool
596  * @pool:	pointer to the memory pool
597  *
598  * This function is similar to mempool_alloc(), but it only attempts allocating
599  * an element from the preallocated elements. It only takes a single spinlock_t
600  * and immediately returns if no preallocated elements are available.
601  *
602  * Return: pointer to the allocated element or %NULL if no elements are
603  * available.
604  */
mempool_alloc_preallocated(struct mempool * pool)605 void *mempool_alloc_preallocated(struct mempool *pool)
606 {
607 	void *element = NULL;
608 
609 	mempool_alloc_from_pool(pool, &element, 1, 0, GFP_NOWAIT);
610 	return element;
611 }
612 EXPORT_SYMBOL(mempool_alloc_preallocated);
613 
614 /**
615  * mempool_free_bulk - return elements to a mempool
616  * @pool:	pointer to the memory pool
617  * @elems:	elements to return
618  * @count:	number of elements to return
619  *
620  * Returns a number of elements from the start of @elem to @pool if @pool needs
621  * replenishing and sets their slots in @elem to NULL.  Other elements are left
622  * in @elem.
623  *
624  * Return: number of elements transferred to @pool.  Elements are always
625  * transferred from the beginning of @elem, so the return value can be used as
626  * an offset into @elem for the freeing the remaining elements in the caller.
627  */
mempool_free_bulk(struct mempool * pool,void ** elems,unsigned int count)628 unsigned int mempool_free_bulk(struct mempool *pool, void **elems,
629 		unsigned int count)
630 {
631 	unsigned long flags;
632 	unsigned int freed = 0;
633 	bool added = false;
634 
635 	/*
636 	 * Paired with the wmb in mempool_alloc().  The preceding read is
637 	 * for @element and the following @pool->curr_nr.  This ensures
638 	 * that the visible value of @pool->curr_nr is from after the
639 	 * allocation of @element.  This is necessary for fringe cases
640 	 * where @element was passed to this task without going through
641 	 * barriers.
642 	 *
643 	 * For example, assume @p is %NULL at the beginning and one task
644 	 * performs "p = mempool_alloc(...);" while another task is doing
645 	 * "while (!p) cpu_relax(); mempool_free(p, ...);".  This function
646 	 * may end up using curr_nr value which is from before allocation
647 	 * of @p without the following rmb.
648 	 */
649 	smp_rmb();
650 
651 	/*
652 	 * For correctness, we need a test which is guaranteed to trigger
653 	 * if curr_nr + #allocated == min_nr.  Testing curr_nr < min_nr
654 	 * without locking achieves that and refilling as soon as possible
655 	 * is desirable.
656 	 *
657 	 * Because curr_nr visible here is always a value after the
658 	 * allocation of @element, any task which decremented curr_nr below
659 	 * min_nr is guaranteed to see curr_nr < min_nr unless curr_nr gets
660 	 * incremented to min_nr afterwards.  If curr_nr gets incremented
661 	 * to min_nr after the allocation of @element, the elements
662 	 * allocated after that are subject to the same guarantee.
663 	 *
664 	 * Waiters happen iff curr_nr is 0 and the above guarantee also
665 	 * ensures that there will be frees which return elements to the
666 	 * pool waking up the waiters.
667 	 *
668 	 * For zero-minimum pools, curr_nr < min_nr (0 < 0) never succeeds,
669 	 * so waiters sleeping on pool->wait would never be woken by the
670 	 * wake-up path of previous test. This explicit check ensures the
671 	 * allocation of element when both min_nr and curr_nr are 0, and
672 	 * any active waiters are properly awakened.
673 	 */
674 	if (unlikely(READ_ONCE(pool->curr_nr) < pool->min_nr)) {
675 		spin_lock_irqsave(&pool->lock, flags);
676 		while (pool->curr_nr < pool->min_nr && freed < count) {
677 			add_element(pool, elems[freed++]);
678 			added = true;
679 		}
680 		spin_unlock_irqrestore(&pool->lock, flags);
681 	} else if (unlikely(pool->min_nr == 0 &&
682 		     READ_ONCE(pool->curr_nr) == 0)) {
683 		/* Handle the min_nr = 0 edge case: */
684 		spin_lock_irqsave(&pool->lock, flags);
685 		if (likely(pool->curr_nr == 0)) {
686 			add_element(pool, elems[freed++]);
687 			added = true;
688 		}
689 		spin_unlock_irqrestore(&pool->lock, flags);
690 	}
691 
692 	if (unlikely(added) && wq_has_sleeper(&pool->wait))
693 		wake_up(&pool->wait);
694 
695 	return freed;
696 }
697 EXPORT_SYMBOL_GPL(mempool_free_bulk);
698 
699 /**
700  * mempool_free - return an element to the pool.
701  * @element:	element to return
702  * @pool:	pointer to the memory pool
703  *
704  * Returns @element to @pool if it needs replenishing, else frees it using
705  * the free_fn callback in @pool.
706  *
707  * This function only sleeps if the free_fn callback sleeps.
708  */
mempool_free(void * element,struct mempool * pool)709 void mempool_free(void *element, struct mempool *pool)
710 {
711 	if (likely(element) && !mempool_free_bulk(pool, &element, 1))
712 		pool->free(element, pool->pool_data);
713 }
714 EXPORT_SYMBOL(mempool_free);
715 
716 /*
717  * A commonly used alloc and free fn.
718  */
mempool_alloc_slab(gfp_t gfp_mask,void * pool_data)719 void *mempool_alloc_slab(gfp_t gfp_mask, void *pool_data)
720 {
721 	struct kmem_cache *mem = pool_data;
722 	VM_BUG_ON(mem->ctor);
723 	return kmem_cache_alloc_noprof(mem, gfp_mask);
724 }
725 EXPORT_SYMBOL(mempool_alloc_slab);
726 
mempool_free_slab(void * element,void * pool_data)727 void mempool_free_slab(void *element, void *pool_data)
728 {
729 	struct kmem_cache *mem = pool_data;
730 	kmem_cache_free(mem, element);
731 }
732 EXPORT_SYMBOL(mempool_free_slab);
733 
734 /*
735  * A commonly used alloc and free fn that kmalloc/kfrees the amount of memory
736  * specified by pool_data
737  */
mempool_kmalloc(gfp_t gfp_mask,void * pool_data)738 void *mempool_kmalloc(gfp_t gfp_mask, void *pool_data)
739 {
740 	size_t size = (size_t)pool_data;
741 	return kmalloc_noprof(size, gfp_mask);
742 }
743 EXPORT_SYMBOL(mempool_kmalloc);
744 
mempool_kfree(void * element,void * pool_data)745 void mempool_kfree(void *element, void *pool_data)
746 {
747 	kfree(element);
748 }
749 EXPORT_SYMBOL(mempool_kfree);
750 
751 /*
752  * A simple mempool-backed page allocator that allocates pages
753  * of the order specified by pool_data.
754  */
mempool_alloc_pages(gfp_t gfp_mask,void * pool_data)755 void *mempool_alloc_pages(gfp_t gfp_mask, void *pool_data)
756 {
757 	int order = (int)(long)pool_data;
758 	return alloc_pages_noprof(gfp_mask, order);
759 }
760 EXPORT_SYMBOL(mempool_alloc_pages);
761 
mempool_free_pages(void * element,void * pool_data)762 void mempool_free_pages(void *element, void *pool_data)
763 {
764 	int order = (int)(long)pool_data;
765 	__free_pages(element, order);
766 }
767 EXPORT_SYMBOL(mempool_free_pages);
768