xref: /linux/mm/mempool.c (revision c966813ea1206abc50a4447cb05cd7419e506806)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  *  linux/mm/mempool.c
4  *
5  *  memory buffer pool support. Such pools are mostly used
6  *  for guaranteed, deadlock-free memory allocations during
7  *  extreme VM load.
8  *
9  *  started by Ingo Molnar, Copyright (C) 2001
10  *  debugging by David Rientjes, Copyright (C) 2015
11  */
12 
13 #include <linux/mm.h>
14 #include <linux/slab.h>
15 #include <linux/highmem.h>
16 #include <linux/kasan.h>
17 #include <linux/kmemleak.h>
18 #include <linux/export.h>
19 #include <linux/mempool.h>
20 #include <linux/writeback.h>
21 #include "slab.h"
22 
23 #ifdef CONFIG_SLUB_DEBUG_ON
poison_error(mempool_t * pool,void * element,size_t size,size_t byte)24 static void poison_error(mempool_t *pool, void *element, size_t size,
25 			 size_t byte)
26 {
27 	const int nr = pool->curr_nr;
28 	const int start = max_t(int, byte - (BITS_PER_LONG / 8), 0);
29 	const int end = min_t(int, byte + (BITS_PER_LONG / 8), size);
30 	int i;
31 
32 	pr_err("BUG: mempool element poison mismatch\n");
33 	pr_err("Mempool %p size %zu\n", pool, size);
34 	pr_err(" nr=%d @ %p: %s0x", nr, element, start > 0 ? "... " : "");
35 	for (i = start; i < end; i++)
36 		pr_cont("%x ", *(u8 *)(element + i));
37 	pr_cont("%s\n", end < size ? "..." : "");
38 	dump_stack();
39 }
40 
__check_element(mempool_t * pool,void * element,size_t size)41 static void __check_element(mempool_t *pool, void *element, size_t size)
42 {
43 	u8 *obj = element;
44 	size_t i;
45 
46 	for (i = 0; i < size; i++) {
47 		u8 exp = (i < size - 1) ? POISON_FREE : POISON_END;
48 
49 		if (obj[i] != exp) {
50 			poison_error(pool, element, size, i);
51 			return;
52 		}
53 	}
54 	memset(obj, POISON_INUSE, size);
55 }
56 
check_element(mempool_t * pool,void * element)57 static void check_element(mempool_t *pool, void *element)
58 {
59 	/* Skip checking: KASAN might save its metadata in the element. */
60 	if (kasan_enabled())
61 		return;
62 
63 	/* Mempools backed by slab allocator */
64 	if (pool->free == mempool_kfree) {
65 		__check_element(pool, element, (size_t)pool->pool_data);
66 	} else if (pool->free == mempool_free_slab) {
67 		__check_element(pool, element, kmem_cache_size(pool->pool_data));
68 	} else if (pool->free == mempool_free_pages) {
69 		/* Mempools backed by page allocator */
70 		int order = (int)(long)pool->pool_data;
71 
72 #ifdef CONFIG_HIGHMEM
73 		for (int i = 0; i < (1 << order); i++) {
74 			struct page *page = (struct page *)element;
75 			void *addr = kmap_local_page(page + i);
76 
77 			__check_element(pool, addr, PAGE_SIZE);
78 			kunmap_local(addr);
79 		}
80 #else
81 		void *addr = page_address((struct page *)element);
82 
83 		__check_element(pool, addr, PAGE_SIZE << order);
84 #endif
85 	}
86 }
87 
__poison_element(void * element,size_t size)88 static void __poison_element(void *element, size_t size)
89 {
90 	u8 *obj = element;
91 
92 	memset(obj, POISON_FREE, size - 1);
93 	obj[size - 1] = POISON_END;
94 }
95 
poison_element(mempool_t * pool,void * element)96 static void poison_element(mempool_t *pool, void *element)
97 {
98 	/* Skip poisoning: KASAN might save its metadata in the element. */
99 	if (kasan_enabled())
100 		return;
101 
102 	/* Mempools backed by slab allocator */
103 	if (pool->alloc == mempool_kmalloc) {
104 		__poison_element(element, (size_t)pool->pool_data);
105 	} else if (pool->alloc == mempool_alloc_slab) {
106 		__poison_element(element, kmem_cache_size(pool->pool_data));
107 	} else if (pool->alloc == mempool_alloc_pages) {
108 		/* Mempools backed by page allocator */
109 		int order = (int)(long)pool->pool_data;
110 
111 #ifdef CONFIG_HIGHMEM
112 		for (int i = 0; i < (1 << order); i++) {
113 			struct page *page = (struct page *)element;
114 			void *addr = kmap_local_page(page + i);
115 
116 			__poison_element(addr, PAGE_SIZE);
117 			kunmap_local(addr);
118 		}
119 #else
120 		void *addr = page_address((struct page *)element);
121 
122 		__poison_element(addr, PAGE_SIZE << order);
123 #endif
124 	}
125 }
126 #else /* CONFIG_SLUB_DEBUG_ON */
check_element(mempool_t * pool,void * element)127 static inline void check_element(mempool_t *pool, void *element)
128 {
129 }
poison_element(mempool_t * pool,void * element)130 static inline void poison_element(mempool_t *pool, void *element)
131 {
132 }
133 #endif /* CONFIG_SLUB_DEBUG_ON */
134 
kasan_poison_element(mempool_t * pool,void * element)135 static __always_inline bool kasan_poison_element(mempool_t *pool, void *element)
136 {
137 	if (pool->alloc == mempool_alloc_slab || pool->alloc == mempool_kmalloc)
138 		return kasan_mempool_poison_object(element);
139 	else if (pool->alloc == mempool_alloc_pages)
140 		return kasan_mempool_poison_pages(element,
141 						(unsigned long)pool->pool_data);
142 	return true;
143 }
144 
kasan_unpoison_element(mempool_t * pool,void * element)145 static void kasan_unpoison_element(mempool_t *pool, void *element)
146 {
147 	if (pool->alloc == mempool_kmalloc)
148 		kasan_mempool_unpoison_object(element, (size_t)pool->pool_data);
149 	else if (pool->alloc == mempool_alloc_slab)
150 		kasan_mempool_unpoison_object(element,
151 					      kmem_cache_size(pool->pool_data));
152 	else if (pool->alloc == mempool_alloc_pages)
153 		kasan_mempool_unpoison_pages(element,
154 					     (unsigned long)pool->pool_data);
155 }
156 
add_element(mempool_t * pool,void * element)157 static __always_inline void add_element(mempool_t *pool, void *element)
158 {
159 	BUG_ON(pool->min_nr != 0 && pool->curr_nr >= pool->min_nr);
160 	poison_element(pool, element);
161 	if (kasan_poison_element(pool, element))
162 		pool->elements[pool->curr_nr++] = element;
163 }
164 
remove_element(mempool_t * pool)165 static void *remove_element(mempool_t *pool)
166 {
167 	void *element = pool->elements[--pool->curr_nr];
168 
169 	BUG_ON(pool->curr_nr < 0);
170 	kasan_unpoison_element(pool, element);
171 	check_element(pool, element);
172 	return element;
173 }
174 
175 /**
176  * mempool_exit - exit a mempool initialized with mempool_init()
177  * @pool:      pointer to the memory pool which was initialized with
178  *             mempool_init().
179  *
180  * Free all reserved elements in @pool and @pool itself.  This function
181  * only sleeps if the free_fn() function sleeps.
182  *
183  * May be called on a zeroed but uninitialized mempool (i.e. allocated with
184  * kzalloc()).
185  */
mempool_exit(mempool_t * pool)186 void mempool_exit(mempool_t *pool)
187 {
188 	while (pool->curr_nr) {
189 		void *element = remove_element(pool);
190 		pool->free(element, pool->pool_data);
191 	}
192 	kfree(pool->elements);
193 	pool->elements = NULL;
194 }
195 EXPORT_SYMBOL(mempool_exit);
196 
197 /**
198  * mempool_destroy - deallocate a memory pool
199  * @pool:      pointer to the memory pool which was allocated via
200  *             mempool_create().
201  *
202  * Free all reserved elements in @pool and @pool itself.  This function
203  * only sleeps if the free_fn() function sleeps.
204  */
mempool_destroy(mempool_t * pool)205 void mempool_destroy(mempool_t *pool)
206 {
207 	if (unlikely(!pool))
208 		return;
209 
210 	mempool_exit(pool);
211 	kfree(pool);
212 }
213 EXPORT_SYMBOL(mempool_destroy);
214 
mempool_init_node(mempool_t * pool,int min_nr,mempool_alloc_t * alloc_fn,mempool_free_t * free_fn,void * pool_data,gfp_t gfp_mask,int node_id)215 int mempool_init_node(mempool_t *pool, int min_nr, mempool_alloc_t *alloc_fn,
216 		      mempool_free_t *free_fn, void *pool_data,
217 		      gfp_t gfp_mask, int node_id)
218 {
219 	spin_lock_init(&pool->lock);
220 	pool->min_nr	= min_nr;
221 	pool->pool_data = pool_data;
222 	pool->alloc	= alloc_fn;
223 	pool->free	= free_fn;
224 	init_waitqueue_head(&pool->wait);
225 	/*
226 	 * max() used here to ensure storage for at least 1 element to support
227 	 * zero minimum pool
228 	 */
229 	pool->elements = kmalloc_array_node(max(1, min_nr), sizeof(void *),
230 					    gfp_mask, node_id);
231 	if (!pool->elements)
232 		return -ENOMEM;
233 
234 	/*
235 	 * First pre-allocate the guaranteed number of buffers,
236 	 * also pre-allocate 1 element for zero minimum pool.
237 	 */
238 	while (pool->curr_nr < max(1, pool->min_nr)) {
239 		void *element;
240 
241 		element = pool->alloc(gfp_mask, pool->pool_data);
242 		if (unlikely(!element)) {
243 			mempool_exit(pool);
244 			return -ENOMEM;
245 		}
246 		add_element(pool, element);
247 	}
248 
249 	return 0;
250 }
251 EXPORT_SYMBOL(mempool_init_node);
252 
253 /**
254  * mempool_init - initialize a memory pool
255  * @pool:      pointer to the memory pool that should be initialized
256  * @min_nr:    the minimum number of elements guaranteed to be
257  *             allocated for this pool.
258  * @alloc_fn:  user-defined element-allocation function.
259  * @free_fn:   user-defined element-freeing function.
260  * @pool_data: optional private data available to the user-defined functions.
261  *
262  * Like mempool_create(), but initializes the pool in (i.e. embedded in another
263  * structure).
264  *
265  * Return: %0 on success, negative error code otherwise.
266  */
mempool_init_noprof(mempool_t * pool,int min_nr,mempool_alloc_t * alloc_fn,mempool_free_t * free_fn,void * pool_data)267 int mempool_init_noprof(mempool_t *pool, int min_nr, mempool_alloc_t *alloc_fn,
268 			mempool_free_t *free_fn, void *pool_data)
269 {
270 	return mempool_init_node(pool, min_nr, alloc_fn, free_fn,
271 				 pool_data, GFP_KERNEL, NUMA_NO_NODE);
272 
273 }
274 EXPORT_SYMBOL(mempool_init_noprof);
275 
276 /**
277  * mempool_create_node - create a memory pool
278  * @min_nr:    the minimum number of elements guaranteed to be
279  *             allocated for this pool.
280  * @alloc_fn:  user-defined element-allocation function.
281  * @free_fn:   user-defined element-freeing function.
282  * @pool_data: optional private data available to the user-defined functions.
283  * @gfp_mask:  memory allocation flags
284  * @node_id:   numa node to allocate on
285  *
286  * this function creates and allocates a guaranteed size, preallocated
287  * memory pool. The pool can be used from the mempool_alloc() and mempool_free()
288  * functions. This function might sleep. Both the alloc_fn() and the free_fn()
289  * functions might sleep - as long as the mempool_alloc() function is not called
290  * from IRQ contexts.
291  *
292  * Return: pointer to the created memory pool object or %NULL on error.
293  */
mempool_create_node_noprof(int min_nr,mempool_alloc_t * alloc_fn,mempool_free_t * free_fn,void * pool_data,gfp_t gfp_mask,int node_id)294 mempool_t *mempool_create_node_noprof(int min_nr, mempool_alloc_t *alloc_fn,
295 				      mempool_free_t *free_fn, void *pool_data,
296 				      gfp_t gfp_mask, int node_id)
297 {
298 	mempool_t *pool;
299 
300 	pool = kmalloc_node_noprof(sizeof(*pool), gfp_mask | __GFP_ZERO, node_id);
301 	if (!pool)
302 		return NULL;
303 
304 	if (mempool_init_node(pool, min_nr, alloc_fn, free_fn, pool_data,
305 			      gfp_mask, node_id)) {
306 		kfree(pool);
307 		return NULL;
308 	}
309 
310 	return pool;
311 }
312 EXPORT_SYMBOL(mempool_create_node_noprof);
313 
314 /**
315  * mempool_resize - resize an existing memory pool
316  * @pool:       pointer to the memory pool which was allocated via
317  *              mempool_create().
318  * @new_min_nr: the new minimum number of elements guaranteed to be
319  *              allocated for this pool.
320  *
321  * This function shrinks/grows the pool. In the case of growing,
322  * it cannot be guaranteed that the pool will be grown to the new
323  * size immediately, but new mempool_free() calls will refill it.
324  * This function may sleep.
325  *
326  * Note, the caller must guarantee that no mempool_destroy is called
327  * while this function is running. mempool_alloc() & mempool_free()
328  * might be called (eg. from IRQ contexts) while this function executes.
329  *
330  * Return: %0 on success, negative error code otherwise.
331  */
mempool_resize(mempool_t * pool,int new_min_nr)332 int mempool_resize(mempool_t *pool, int new_min_nr)
333 {
334 	void *element;
335 	void **new_elements;
336 	unsigned long flags;
337 
338 	BUG_ON(new_min_nr <= 0);
339 	might_sleep();
340 
341 	spin_lock_irqsave(&pool->lock, flags);
342 	if (new_min_nr <= pool->min_nr) {
343 		while (new_min_nr < pool->curr_nr) {
344 			element = remove_element(pool);
345 			spin_unlock_irqrestore(&pool->lock, flags);
346 			pool->free(element, pool->pool_data);
347 			spin_lock_irqsave(&pool->lock, flags);
348 		}
349 		pool->min_nr = new_min_nr;
350 		goto out_unlock;
351 	}
352 	spin_unlock_irqrestore(&pool->lock, flags);
353 
354 	/* Grow the pool */
355 	new_elements = kmalloc_array(new_min_nr, sizeof(*new_elements),
356 				     GFP_KERNEL);
357 	if (!new_elements)
358 		return -ENOMEM;
359 
360 	spin_lock_irqsave(&pool->lock, flags);
361 	if (unlikely(new_min_nr <= pool->min_nr)) {
362 		/* Raced, other resize will do our work */
363 		spin_unlock_irqrestore(&pool->lock, flags);
364 		kfree(new_elements);
365 		goto out;
366 	}
367 	memcpy(new_elements, pool->elements,
368 			pool->curr_nr * sizeof(*new_elements));
369 	kfree(pool->elements);
370 	pool->elements = new_elements;
371 	pool->min_nr = new_min_nr;
372 
373 	while (pool->curr_nr < pool->min_nr) {
374 		spin_unlock_irqrestore(&pool->lock, flags);
375 		element = pool->alloc(GFP_KERNEL, pool->pool_data);
376 		if (!element)
377 			goto out;
378 		spin_lock_irqsave(&pool->lock, flags);
379 		if (pool->curr_nr < pool->min_nr) {
380 			add_element(pool, element);
381 		} else {
382 			spin_unlock_irqrestore(&pool->lock, flags);
383 			pool->free(element, pool->pool_data);	/* Raced */
384 			goto out;
385 		}
386 	}
387 out_unlock:
388 	spin_unlock_irqrestore(&pool->lock, flags);
389 out:
390 	return 0;
391 }
392 EXPORT_SYMBOL(mempool_resize);
393 
394 /**
395  * mempool_alloc - allocate an element from a specific memory pool
396  * @pool:      pointer to the memory pool which was allocated via
397  *             mempool_create().
398  * @gfp_mask:  the usual allocation bitmask.
399  *
400  * this function only sleeps if the alloc_fn() function sleeps or
401  * returns NULL. Note that due to preallocation, this function
402  * *never* fails when called from process contexts. (it might
403  * fail if called from an IRQ context.)
404  * Note: using __GFP_ZERO is not supported.
405  *
406  * Return: pointer to the allocated element or %NULL on error.
407  */
mempool_alloc_noprof(mempool_t * pool,gfp_t gfp_mask)408 void *mempool_alloc_noprof(mempool_t *pool, gfp_t gfp_mask)
409 {
410 	void *element;
411 	unsigned long flags;
412 	wait_queue_entry_t wait;
413 	gfp_t gfp_temp;
414 
415 	VM_WARN_ON_ONCE(gfp_mask & __GFP_ZERO);
416 	might_alloc(gfp_mask);
417 
418 	gfp_mask |= __GFP_NOMEMALLOC;	/* don't allocate emergency reserves */
419 	gfp_mask |= __GFP_NORETRY;	/* don't loop in __alloc_pages */
420 	gfp_mask |= __GFP_NOWARN;	/* failures are OK */
421 
422 	gfp_temp = gfp_mask & ~(__GFP_DIRECT_RECLAIM|__GFP_IO);
423 
424 repeat_alloc:
425 
426 	element = pool->alloc(gfp_temp, pool->pool_data);
427 	if (likely(element != NULL))
428 		return element;
429 
430 	spin_lock_irqsave(&pool->lock, flags);
431 	if (likely(pool->curr_nr)) {
432 		element = remove_element(pool);
433 		spin_unlock_irqrestore(&pool->lock, flags);
434 		/* paired with rmb in mempool_free(), read comment there */
435 		smp_wmb();
436 		/*
437 		 * Update the allocation stack trace as this is more useful
438 		 * for debugging.
439 		 */
440 		kmemleak_update_trace(element);
441 		return element;
442 	}
443 
444 	/*
445 	 * We use gfp mask w/o direct reclaim or IO for the first round.  If
446 	 * alloc failed with that and @pool was empty, retry immediately.
447 	 */
448 	if (gfp_temp != gfp_mask) {
449 		spin_unlock_irqrestore(&pool->lock, flags);
450 		gfp_temp = gfp_mask;
451 		goto repeat_alloc;
452 	}
453 
454 	/* We must not sleep if !__GFP_DIRECT_RECLAIM */
455 	if (!(gfp_mask & __GFP_DIRECT_RECLAIM)) {
456 		spin_unlock_irqrestore(&pool->lock, flags);
457 		return NULL;
458 	}
459 
460 	/* Let's wait for someone else to return an element to @pool */
461 	init_wait(&wait);
462 	prepare_to_wait(&pool->wait, &wait, TASK_UNINTERRUPTIBLE);
463 
464 	spin_unlock_irqrestore(&pool->lock, flags);
465 
466 	/*
467 	 * FIXME: this should be io_schedule().  The timeout is there as a
468 	 * workaround for some DM problems in 2.6.18.
469 	 */
470 	io_schedule_timeout(5*HZ);
471 
472 	finish_wait(&pool->wait, &wait);
473 	goto repeat_alloc;
474 }
475 EXPORT_SYMBOL(mempool_alloc_noprof);
476 
477 /**
478  * mempool_alloc_preallocated - allocate an element from preallocated elements
479  *                              belonging to a specific memory pool
480  * @pool:      pointer to the memory pool which was allocated via
481  *             mempool_create().
482  *
483  * This function is similar to mempool_alloc, but it only attempts allocating
484  * an element from the preallocated elements. It does not sleep and immediately
485  * returns if no preallocated elements are available.
486  *
487  * Return: pointer to the allocated element or %NULL if no elements are
488  * available.
489  */
mempool_alloc_preallocated(mempool_t * pool)490 void *mempool_alloc_preallocated(mempool_t *pool)
491 {
492 	void *element;
493 	unsigned long flags;
494 
495 	spin_lock_irqsave(&pool->lock, flags);
496 	if (likely(pool->curr_nr)) {
497 		element = remove_element(pool);
498 		spin_unlock_irqrestore(&pool->lock, flags);
499 		/* paired with rmb in mempool_free(), read comment there */
500 		smp_wmb();
501 		/*
502 		 * Update the allocation stack trace as this is more useful
503 		 * for debugging.
504 		 */
505 		kmemleak_update_trace(element);
506 		return element;
507 	}
508 	spin_unlock_irqrestore(&pool->lock, flags);
509 
510 	return NULL;
511 }
512 EXPORT_SYMBOL(mempool_alloc_preallocated);
513 
514 /**
515  * mempool_free - return an element to the pool.
516  * @element:   pool element pointer.
517  * @pool:      pointer to the memory pool which was allocated via
518  *             mempool_create().
519  *
520  * this function only sleeps if the free_fn() function sleeps.
521  */
mempool_free(void * element,mempool_t * pool)522 void mempool_free(void *element, mempool_t *pool)
523 {
524 	unsigned long flags;
525 
526 	if (unlikely(element == NULL))
527 		return;
528 
529 	/*
530 	 * Paired with the wmb in mempool_alloc().  The preceding read is
531 	 * for @element and the following @pool->curr_nr.  This ensures
532 	 * that the visible value of @pool->curr_nr is from after the
533 	 * allocation of @element.  This is necessary for fringe cases
534 	 * where @element was passed to this task without going through
535 	 * barriers.
536 	 *
537 	 * For example, assume @p is %NULL at the beginning and one task
538 	 * performs "p = mempool_alloc(...);" while another task is doing
539 	 * "while (!p) cpu_relax(); mempool_free(p, ...);".  This function
540 	 * may end up using curr_nr value which is from before allocation
541 	 * of @p without the following rmb.
542 	 */
543 	smp_rmb();
544 
545 	/*
546 	 * For correctness, we need a test which is guaranteed to trigger
547 	 * if curr_nr + #allocated == min_nr.  Testing curr_nr < min_nr
548 	 * without locking achieves that and refilling as soon as possible
549 	 * is desirable.
550 	 *
551 	 * Because curr_nr visible here is always a value after the
552 	 * allocation of @element, any task which decremented curr_nr below
553 	 * min_nr is guaranteed to see curr_nr < min_nr unless curr_nr gets
554 	 * incremented to min_nr afterwards.  If curr_nr gets incremented
555 	 * to min_nr after the allocation of @element, the elements
556 	 * allocated after that are subject to the same guarantee.
557 	 *
558 	 * Waiters happen iff curr_nr is 0 and the above guarantee also
559 	 * ensures that there will be frees which return elements to the
560 	 * pool waking up the waiters.
561 	 */
562 	if (unlikely(READ_ONCE(pool->curr_nr) < pool->min_nr)) {
563 		spin_lock_irqsave(&pool->lock, flags);
564 		if (likely(pool->curr_nr < pool->min_nr)) {
565 			add_element(pool, element);
566 			spin_unlock_irqrestore(&pool->lock, flags);
567 			if (wq_has_sleeper(&pool->wait))
568 				wake_up(&pool->wait);
569 			return;
570 		}
571 		spin_unlock_irqrestore(&pool->lock, flags);
572 	}
573 
574 	/*
575 	 * Handle the min_nr = 0 edge case:
576 	 *
577 	 * For zero-minimum pools, curr_nr < min_nr (0 < 0) never succeeds,
578 	 * so waiters sleeping on pool->wait would never be woken by the
579 	 * wake-up path of previous test. This explicit check ensures the
580 	 * allocation of element when both min_nr and curr_nr are 0, and
581 	 * any active waiters are properly awakened.
582 	 */
583 	if (unlikely(pool->min_nr == 0 &&
584 		     READ_ONCE(pool->curr_nr) == 0)) {
585 		spin_lock_irqsave(&pool->lock, flags);
586 		if (likely(pool->curr_nr == 0)) {
587 			add_element(pool, element);
588 			spin_unlock_irqrestore(&pool->lock, flags);
589 			if (wq_has_sleeper(&pool->wait))
590 				wake_up(&pool->wait);
591 			return;
592 		}
593 		spin_unlock_irqrestore(&pool->lock, flags);
594 	}
595 
596 	pool->free(element, pool->pool_data);
597 }
598 EXPORT_SYMBOL(mempool_free);
599 
600 /*
601  * A commonly used alloc and free fn.
602  */
mempool_alloc_slab(gfp_t gfp_mask,void * pool_data)603 void *mempool_alloc_slab(gfp_t gfp_mask, void *pool_data)
604 {
605 	struct kmem_cache *mem = pool_data;
606 	VM_BUG_ON(mem->ctor);
607 	return kmem_cache_alloc_noprof(mem, gfp_mask);
608 }
609 EXPORT_SYMBOL(mempool_alloc_slab);
610 
mempool_free_slab(void * element,void * pool_data)611 void mempool_free_slab(void *element, void *pool_data)
612 {
613 	struct kmem_cache *mem = pool_data;
614 	kmem_cache_free(mem, element);
615 }
616 EXPORT_SYMBOL(mempool_free_slab);
617 
618 /*
619  * A commonly used alloc and free fn that kmalloc/kfrees the amount of memory
620  * specified by pool_data
621  */
mempool_kmalloc(gfp_t gfp_mask,void * pool_data)622 void *mempool_kmalloc(gfp_t gfp_mask, void *pool_data)
623 {
624 	size_t size = (size_t)pool_data;
625 	return kmalloc_noprof(size, gfp_mask);
626 }
627 EXPORT_SYMBOL(mempool_kmalloc);
628 
mempool_kfree(void * element,void * pool_data)629 void mempool_kfree(void *element, void *pool_data)
630 {
631 	kfree(element);
632 }
633 EXPORT_SYMBOL(mempool_kfree);
634 
mempool_kvmalloc(gfp_t gfp_mask,void * pool_data)635 void *mempool_kvmalloc(gfp_t gfp_mask, void *pool_data)
636 {
637 	size_t size = (size_t)pool_data;
638 	return kvmalloc(size, gfp_mask);
639 }
640 EXPORT_SYMBOL(mempool_kvmalloc);
641 
mempool_kvfree(void * element,void * pool_data)642 void mempool_kvfree(void *element, void *pool_data)
643 {
644 	kvfree(element);
645 }
646 EXPORT_SYMBOL(mempool_kvfree);
647 
648 /*
649  * A simple mempool-backed page allocator that allocates pages
650  * of the order specified by pool_data.
651  */
mempool_alloc_pages(gfp_t gfp_mask,void * pool_data)652 void *mempool_alloc_pages(gfp_t gfp_mask, void *pool_data)
653 {
654 	int order = (int)(long)pool_data;
655 	return alloc_pages_noprof(gfp_mask, order);
656 }
657 EXPORT_SYMBOL(mempool_alloc_pages);
658 
mempool_free_pages(void * element,void * pool_data)659 void mempool_free_pages(void *element, void *pool_data)
660 {
661 	int order = (int)(long)pool_data;
662 	__free_pages(element, order);
663 }
664 EXPORT_SYMBOL(mempool_free_pages);
665