xref: /linux/mm/mempool.c (revision da23ea194db94257123f1534d487f3cdc9b5626d)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  *  linux/mm/mempool.c
4  *
5  *  memory buffer pool support. Such pools are mostly used
6  *  for guaranteed, deadlock-free memory allocations during
7  *  extreme VM load.
8  *
9  *  started by Ingo Molnar, Copyright (C) 2001
10  *  debugging by David Rientjes, Copyright (C) 2015
11  */
12 
13 #include <linux/mm.h>
14 #include <linux/slab.h>
15 #include <linux/highmem.h>
16 #include <linux/kasan.h>
17 #include <linux/kmemleak.h>
18 #include <linux/export.h>
19 #include <linux/mempool.h>
20 #include <linux/writeback.h>
21 #include "slab.h"
22 
23 #ifdef CONFIG_SLUB_DEBUG_ON
poison_error(mempool_t * pool,void * element,size_t size,size_t byte)24 static void poison_error(mempool_t *pool, void *element, size_t size,
25 			 size_t byte)
26 {
27 	const int nr = pool->curr_nr;
28 	const int start = max_t(int, byte - (BITS_PER_LONG / 8), 0);
29 	const int end = min_t(int, byte + (BITS_PER_LONG / 8), size);
30 	int i;
31 
32 	pr_err("BUG: mempool element poison mismatch\n");
33 	pr_err("Mempool %p size %zu\n", pool, size);
34 	pr_err(" nr=%d @ %p: %s0x", nr, element, start > 0 ? "... " : "");
35 	for (i = start; i < end; i++)
36 		pr_cont("%x ", *(u8 *)(element + i));
37 	pr_cont("%s\n", end < size ? "..." : "");
38 	dump_stack();
39 }
40 
__check_element(mempool_t * pool,void * element,size_t size)41 static void __check_element(mempool_t *pool, void *element, size_t size)
42 {
43 	u8 *obj = element;
44 	size_t i;
45 
46 	for (i = 0; i < size; i++) {
47 		u8 exp = (i < size - 1) ? POISON_FREE : POISON_END;
48 
49 		if (obj[i] != exp) {
50 			poison_error(pool, element, size, i);
51 			return;
52 		}
53 	}
54 	memset(obj, POISON_INUSE, size);
55 }
56 
check_element(mempool_t * pool,void * element)57 static void check_element(mempool_t *pool, void *element)
58 {
59 	/* Skip checking: KASAN might save its metadata in the element. */
60 	if (kasan_enabled())
61 		return;
62 
63 	/* Mempools backed by slab allocator */
64 	if (pool->free == mempool_kfree) {
65 		__check_element(pool, element, (size_t)pool->pool_data);
66 	} else if (pool->free == mempool_free_slab) {
67 		__check_element(pool, element, kmem_cache_size(pool->pool_data));
68 	} else if (pool->free == mempool_free_pages) {
69 		/* Mempools backed by page allocator */
70 		int order = (int)(long)pool->pool_data;
71 		void *addr = kmap_local_page((struct page *)element);
72 
73 		__check_element(pool, addr, 1UL << (PAGE_SHIFT + order));
74 		kunmap_local(addr);
75 	}
76 }
77 
__poison_element(void * element,size_t size)78 static void __poison_element(void *element, size_t size)
79 {
80 	u8 *obj = element;
81 
82 	memset(obj, POISON_FREE, size - 1);
83 	obj[size - 1] = POISON_END;
84 }
85 
poison_element(mempool_t * pool,void * element)86 static void poison_element(mempool_t *pool, void *element)
87 {
88 	/* Skip poisoning: KASAN might save its metadata in the element. */
89 	if (kasan_enabled())
90 		return;
91 
92 	/* Mempools backed by slab allocator */
93 	if (pool->alloc == mempool_kmalloc) {
94 		__poison_element(element, (size_t)pool->pool_data);
95 	} else if (pool->alloc == mempool_alloc_slab) {
96 		__poison_element(element, kmem_cache_size(pool->pool_data));
97 	} else if (pool->alloc == mempool_alloc_pages) {
98 		/* Mempools backed by page allocator */
99 		int order = (int)(long)pool->pool_data;
100 		void *addr = kmap_local_page((struct page *)element);
101 
102 		__poison_element(addr, 1UL << (PAGE_SHIFT + order));
103 		kunmap_local(addr);
104 	}
105 }
106 #else /* CONFIG_SLUB_DEBUG_ON */
check_element(mempool_t * pool,void * element)107 static inline void check_element(mempool_t *pool, void *element)
108 {
109 }
poison_element(mempool_t * pool,void * element)110 static inline void poison_element(mempool_t *pool, void *element)
111 {
112 }
113 #endif /* CONFIG_SLUB_DEBUG_ON */
114 
kasan_poison_element(mempool_t * pool,void * element)115 static __always_inline bool kasan_poison_element(mempool_t *pool, void *element)
116 {
117 	if (pool->alloc == mempool_alloc_slab || pool->alloc == mempool_kmalloc)
118 		return kasan_mempool_poison_object(element);
119 	else if (pool->alloc == mempool_alloc_pages)
120 		return kasan_mempool_poison_pages(element,
121 						(unsigned long)pool->pool_data);
122 	return true;
123 }
124 
kasan_unpoison_element(mempool_t * pool,void * element)125 static void kasan_unpoison_element(mempool_t *pool, void *element)
126 {
127 	if (pool->alloc == mempool_kmalloc)
128 		kasan_mempool_unpoison_object(element, (size_t)pool->pool_data);
129 	else if (pool->alloc == mempool_alloc_slab)
130 		kasan_mempool_unpoison_object(element,
131 					      kmem_cache_size(pool->pool_data));
132 	else if (pool->alloc == mempool_alloc_pages)
133 		kasan_mempool_unpoison_pages(element,
134 					     (unsigned long)pool->pool_data);
135 }
136 
add_element(mempool_t * pool,void * element)137 static __always_inline void add_element(mempool_t *pool, void *element)
138 {
139 	BUG_ON(pool->min_nr != 0 && pool->curr_nr >= pool->min_nr);
140 	poison_element(pool, element);
141 	if (kasan_poison_element(pool, element))
142 		pool->elements[pool->curr_nr++] = element;
143 }
144 
remove_element(mempool_t * pool)145 static void *remove_element(mempool_t *pool)
146 {
147 	void *element = pool->elements[--pool->curr_nr];
148 
149 	BUG_ON(pool->curr_nr < 0);
150 	kasan_unpoison_element(pool, element);
151 	check_element(pool, element);
152 	return element;
153 }
154 
155 /**
156  * mempool_exit - exit a mempool initialized with mempool_init()
157  * @pool:      pointer to the memory pool which was initialized with
158  *             mempool_init().
159  *
160  * Free all reserved elements in @pool and @pool itself.  This function
161  * only sleeps if the free_fn() function sleeps.
162  *
163  * May be called on a zeroed but uninitialized mempool (i.e. allocated with
164  * kzalloc()).
165  */
mempool_exit(mempool_t * pool)166 void mempool_exit(mempool_t *pool)
167 {
168 	while (pool->curr_nr) {
169 		void *element = remove_element(pool);
170 		pool->free(element, pool->pool_data);
171 	}
172 	kfree(pool->elements);
173 	pool->elements = NULL;
174 }
175 EXPORT_SYMBOL(mempool_exit);
176 
177 /**
178  * mempool_destroy - deallocate a memory pool
179  * @pool:      pointer to the memory pool which was allocated via
180  *             mempool_create().
181  *
182  * Free all reserved elements in @pool and @pool itself.  This function
183  * only sleeps if the free_fn() function sleeps.
184  */
mempool_destroy(mempool_t * pool)185 void mempool_destroy(mempool_t *pool)
186 {
187 	if (unlikely(!pool))
188 		return;
189 
190 	mempool_exit(pool);
191 	kfree(pool);
192 }
193 EXPORT_SYMBOL(mempool_destroy);
194 
mempool_init_node(mempool_t * pool,int min_nr,mempool_alloc_t * alloc_fn,mempool_free_t * free_fn,void * pool_data,gfp_t gfp_mask,int node_id)195 int mempool_init_node(mempool_t *pool, int min_nr, mempool_alloc_t *alloc_fn,
196 		      mempool_free_t *free_fn, void *pool_data,
197 		      gfp_t gfp_mask, int node_id)
198 {
199 	spin_lock_init(&pool->lock);
200 	pool->min_nr	= min_nr;
201 	pool->pool_data = pool_data;
202 	pool->alloc	= alloc_fn;
203 	pool->free	= free_fn;
204 	init_waitqueue_head(&pool->wait);
205 	/*
206 	 * max() used here to ensure storage for at least 1 element to support
207 	 * zero minimum pool
208 	 */
209 	pool->elements = kmalloc_array_node(max(1, min_nr), sizeof(void *),
210 					    gfp_mask, node_id);
211 	if (!pool->elements)
212 		return -ENOMEM;
213 
214 	/*
215 	 * First pre-allocate the guaranteed number of buffers,
216 	 * also pre-allocate 1 element for zero minimum pool.
217 	 */
218 	while (pool->curr_nr < max(1, pool->min_nr)) {
219 		void *element;
220 
221 		element = pool->alloc(gfp_mask, pool->pool_data);
222 		if (unlikely(!element)) {
223 			mempool_exit(pool);
224 			return -ENOMEM;
225 		}
226 		add_element(pool, element);
227 	}
228 
229 	return 0;
230 }
231 EXPORT_SYMBOL(mempool_init_node);
232 
233 /**
234  * mempool_init - initialize a memory pool
235  * @pool:      pointer to the memory pool that should be initialized
236  * @min_nr:    the minimum number of elements guaranteed to be
237  *             allocated for this pool.
238  * @alloc_fn:  user-defined element-allocation function.
239  * @free_fn:   user-defined element-freeing function.
240  * @pool_data: optional private data available to the user-defined functions.
241  *
242  * Like mempool_create(), but initializes the pool in (i.e. embedded in another
243  * structure).
244  *
245  * Return: %0 on success, negative error code otherwise.
246  */
mempool_init_noprof(mempool_t * pool,int min_nr,mempool_alloc_t * alloc_fn,mempool_free_t * free_fn,void * pool_data)247 int mempool_init_noprof(mempool_t *pool, int min_nr, mempool_alloc_t *alloc_fn,
248 			mempool_free_t *free_fn, void *pool_data)
249 {
250 	return mempool_init_node(pool, min_nr, alloc_fn, free_fn,
251 				 pool_data, GFP_KERNEL, NUMA_NO_NODE);
252 
253 }
254 EXPORT_SYMBOL(mempool_init_noprof);
255 
256 /**
257  * mempool_create_node - create a memory pool
258  * @min_nr:    the minimum number of elements guaranteed to be
259  *             allocated for this pool.
260  * @alloc_fn:  user-defined element-allocation function.
261  * @free_fn:   user-defined element-freeing function.
262  * @pool_data: optional private data available to the user-defined functions.
263  * @gfp_mask:  memory allocation flags
264  * @node_id:   numa node to allocate on
265  *
266  * this function creates and allocates a guaranteed size, preallocated
267  * memory pool. The pool can be used from the mempool_alloc() and mempool_free()
268  * functions. This function might sleep. Both the alloc_fn() and the free_fn()
269  * functions might sleep - as long as the mempool_alloc() function is not called
270  * from IRQ contexts.
271  *
272  * Return: pointer to the created memory pool object or %NULL on error.
273  */
mempool_create_node_noprof(int min_nr,mempool_alloc_t * alloc_fn,mempool_free_t * free_fn,void * pool_data,gfp_t gfp_mask,int node_id)274 mempool_t *mempool_create_node_noprof(int min_nr, mempool_alloc_t *alloc_fn,
275 				      mempool_free_t *free_fn, void *pool_data,
276 				      gfp_t gfp_mask, int node_id)
277 {
278 	mempool_t *pool;
279 
280 	pool = kmalloc_node_noprof(sizeof(*pool), gfp_mask | __GFP_ZERO, node_id);
281 	if (!pool)
282 		return NULL;
283 
284 	if (mempool_init_node(pool, min_nr, alloc_fn, free_fn, pool_data,
285 			      gfp_mask, node_id)) {
286 		kfree(pool);
287 		return NULL;
288 	}
289 
290 	return pool;
291 }
292 EXPORT_SYMBOL(mempool_create_node_noprof);
293 
294 /**
295  * mempool_resize - resize an existing memory pool
296  * @pool:       pointer to the memory pool which was allocated via
297  *              mempool_create().
298  * @new_min_nr: the new minimum number of elements guaranteed to be
299  *              allocated for this pool.
300  *
301  * This function shrinks/grows the pool. In the case of growing,
302  * it cannot be guaranteed that the pool will be grown to the new
303  * size immediately, but new mempool_free() calls will refill it.
304  * This function may sleep.
305  *
306  * Note, the caller must guarantee that no mempool_destroy is called
307  * while this function is running. mempool_alloc() & mempool_free()
308  * might be called (eg. from IRQ contexts) while this function executes.
309  *
310  * Return: %0 on success, negative error code otherwise.
311  */
mempool_resize(mempool_t * pool,int new_min_nr)312 int mempool_resize(mempool_t *pool, int new_min_nr)
313 {
314 	void *element;
315 	void **new_elements;
316 	unsigned long flags;
317 
318 	BUG_ON(new_min_nr <= 0);
319 	might_sleep();
320 
321 	spin_lock_irqsave(&pool->lock, flags);
322 	if (new_min_nr <= pool->min_nr) {
323 		while (new_min_nr < pool->curr_nr) {
324 			element = remove_element(pool);
325 			spin_unlock_irqrestore(&pool->lock, flags);
326 			pool->free(element, pool->pool_data);
327 			spin_lock_irqsave(&pool->lock, flags);
328 		}
329 		pool->min_nr = new_min_nr;
330 		goto out_unlock;
331 	}
332 	spin_unlock_irqrestore(&pool->lock, flags);
333 
334 	/* Grow the pool */
335 	new_elements = kmalloc_array(new_min_nr, sizeof(*new_elements),
336 				     GFP_KERNEL);
337 	if (!new_elements)
338 		return -ENOMEM;
339 
340 	spin_lock_irqsave(&pool->lock, flags);
341 	if (unlikely(new_min_nr <= pool->min_nr)) {
342 		/* Raced, other resize will do our work */
343 		spin_unlock_irqrestore(&pool->lock, flags);
344 		kfree(new_elements);
345 		goto out;
346 	}
347 	memcpy(new_elements, pool->elements,
348 			pool->curr_nr * sizeof(*new_elements));
349 	kfree(pool->elements);
350 	pool->elements = new_elements;
351 	pool->min_nr = new_min_nr;
352 
353 	while (pool->curr_nr < pool->min_nr) {
354 		spin_unlock_irqrestore(&pool->lock, flags);
355 		element = pool->alloc(GFP_KERNEL, pool->pool_data);
356 		if (!element)
357 			goto out;
358 		spin_lock_irqsave(&pool->lock, flags);
359 		if (pool->curr_nr < pool->min_nr) {
360 			add_element(pool, element);
361 		} else {
362 			spin_unlock_irqrestore(&pool->lock, flags);
363 			pool->free(element, pool->pool_data);	/* Raced */
364 			goto out;
365 		}
366 	}
367 out_unlock:
368 	spin_unlock_irqrestore(&pool->lock, flags);
369 out:
370 	return 0;
371 }
372 EXPORT_SYMBOL(mempool_resize);
373 
374 /**
375  * mempool_alloc - allocate an element from a specific memory pool
376  * @pool:      pointer to the memory pool which was allocated via
377  *             mempool_create().
378  * @gfp_mask:  the usual allocation bitmask.
379  *
380  * this function only sleeps if the alloc_fn() function sleeps or
381  * returns NULL. Note that due to preallocation, this function
382  * *never* fails when called from process contexts. (it might
383  * fail if called from an IRQ context.)
384  * Note: using __GFP_ZERO is not supported.
385  *
386  * Return: pointer to the allocated element or %NULL on error.
387  */
mempool_alloc_noprof(mempool_t * pool,gfp_t gfp_mask)388 void *mempool_alloc_noprof(mempool_t *pool, gfp_t gfp_mask)
389 {
390 	void *element;
391 	unsigned long flags;
392 	wait_queue_entry_t wait;
393 	gfp_t gfp_temp;
394 
395 	VM_WARN_ON_ONCE(gfp_mask & __GFP_ZERO);
396 	might_alloc(gfp_mask);
397 
398 	gfp_mask |= __GFP_NOMEMALLOC;	/* don't allocate emergency reserves */
399 	gfp_mask |= __GFP_NORETRY;	/* don't loop in __alloc_pages */
400 	gfp_mask |= __GFP_NOWARN;	/* failures are OK */
401 
402 	gfp_temp = gfp_mask & ~(__GFP_DIRECT_RECLAIM|__GFP_IO);
403 
404 repeat_alloc:
405 
406 	element = pool->alloc(gfp_temp, pool->pool_data);
407 	if (likely(element != NULL))
408 		return element;
409 
410 	spin_lock_irqsave(&pool->lock, flags);
411 	if (likely(pool->curr_nr)) {
412 		element = remove_element(pool);
413 		spin_unlock_irqrestore(&pool->lock, flags);
414 		/* paired with rmb in mempool_free(), read comment there */
415 		smp_wmb();
416 		/*
417 		 * Update the allocation stack trace as this is more useful
418 		 * for debugging.
419 		 */
420 		kmemleak_update_trace(element);
421 		return element;
422 	}
423 
424 	/*
425 	 * We use gfp mask w/o direct reclaim or IO for the first round.  If
426 	 * alloc failed with that and @pool was empty, retry immediately.
427 	 */
428 	if (gfp_temp != gfp_mask) {
429 		spin_unlock_irqrestore(&pool->lock, flags);
430 		gfp_temp = gfp_mask;
431 		goto repeat_alloc;
432 	}
433 
434 	/* We must not sleep if !__GFP_DIRECT_RECLAIM */
435 	if (!(gfp_mask & __GFP_DIRECT_RECLAIM)) {
436 		spin_unlock_irqrestore(&pool->lock, flags);
437 		return NULL;
438 	}
439 
440 	/* Let's wait for someone else to return an element to @pool */
441 	init_wait(&wait);
442 	prepare_to_wait(&pool->wait, &wait, TASK_UNINTERRUPTIBLE);
443 
444 	spin_unlock_irqrestore(&pool->lock, flags);
445 
446 	/*
447 	 * FIXME: this should be io_schedule().  The timeout is there as a
448 	 * workaround for some DM problems in 2.6.18.
449 	 */
450 	io_schedule_timeout(5*HZ);
451 
452 	finish_wait(&pool->wait, &wait);
453 	goto repeat_alloc;
454 }
455 EXPORT_SYMBOL(mempool_alloc_noprof);
456 
457 /**
458  * mempool_alloc_preallocated - allocate an element from preallocated elements
459  *                              belonging to a specific memory pool
460  * @pool:      pointer to the memory pool which was allocated via
461  *             mempool_create().
462  *
463  * This function is similar to mempool_alloc, but it only attempts allocating
464  * an element from the preallocated elements. It does not sleep and immediately
465  * returns if no preallocated elements are available.
466  *
467  * Return: pointer to the allocated element or %NULL if no elements are
468  * available.
469  */
mempool_alloc_preallocated(mempool_t * pool)470 void *mempool_alloc_preallocated(mempool_t *pool)
471 {
472 	void *element;
473 	unsigned long flags;
474 
475 	spin_lock_irqsave(&pool->lock, flags);
476 	if (likely(pool->curr_nr)) {
477 		element = remove_element(pool);
478 		spin_unlock_irqrestore(&pool->lock, flags);
479 		/* paired with rmb in mempool_free(), read comment there */
480 		smp_wmb();
481 		/*
482 		 * Update the allocation stack trace as this is more useful
483 		 * for debugging.
484 		 */
485 		kmemleak_update_trace(element);
486 		return element;
487 	}
488 	spin_unlock_irqrestore(&pool->lock, flags);
489 
490 	return NULL;
491 }
492 EXPORT_SYMBOL(mempool_alloc_preallocated);
493 
494 /**
495  * mempool_free - return an element to the pool.
496  * @element:   pool element pointer.
497  * @pool:      pointer to the memory pool which was allocated via
498  *             mempool_create().
499  *
500  * this function only sleeps if the free_fn() function sleeps.
501  */
mempool_free(void * element,mempool_t * pool)502 void mempool_free(void *element, mempool_t *pool)
503 {
504 	unsigned long flags;
505 
506 	if (unlikely(element == NULL))
507 		return;
508 
509 	/*
510 	 * Paired with the wmb in mempool_alloc().  The preceding read is
511 	 * for @element and the following @pool->curr_nr.  This ensures
512 	 * that the visible value of @pool->curr_nr is from after the
513 	 * allocation of @element.  This is necessary for fringe cases
514 	 * where @element was passed to this task without going through
515 	 * barriers.
516 	 *
517 	 * For example, assume @p is %NULL at the beginning and one task
518 	 * performs "p = mempool_alloc(...);" while another task is doing
519 	 * "while (!p) cpu_relax(); mempool_free(p, ...);".  This function
520 	 * may end up using curr_nr value which is from before allocation
521 	 * of @p without the following rmb.
522 	 */
523 	smp_rmb();
524 
525 	/*
526 	 * For correctness, we need a test which is guaranteed to trigger
527 	 * if curr_nr + #allocated == min_nr.  Testing curr_nr < min_nr
528 	 * without locking achieves that and refilling as soon as possible
529 	 * is desirable.
530 	 *
531 	 * Because curr_nr visible here is always a value after the
532 	 * allocation of @element, any task which decremented curr_nr below
533 	 * min_nr is guaranteed to see curr_nr < min_nr unless curr_nr gets
534 	 * incremented to min_nr afterwards.  If curr_nr gets incremented
535 	 * to min_nr after the allocation of @element, the elements
536 	 * allocated after that are subject to the same guarantee.
537 	 *
538 	 * Waiters happen iff curr_nr is 0 and the above guarantee also
539 	 * ensures that there will be frees which return elements to the
540 	 * pool waking up the waiters.
541 	 */
542 	if (unlikely(READ_ONCE(pool->curr_nr) < pool->min_nr)) {
543 		spin_lock_irqsave(&pool->lock, flags);
544 		if (likely(pool->curr_nr < pool->min_nr)) {
545 			add_element(pool, element);
546 			spin_unlock_irqrestore(&pool->lock, flags);
547 			if (wq_has_sleeper(&pool->wait))
548 				wake_up(&pool->wait);
549 			return;
550 		}
551 		spin_unlock_irqrestore(&pool->lock, flags);
552 	}
553 
554 	/*
555 	 * Handle the min_nr = 0 edge case:
556 	 *
557 	 * For zero-minimum pools, curr_nr < min_nr (0 < 0) never succeeds,
558 	 * so waiters sleeping on pool->wait would never be woken by the
559 	 * wake-up path of previous test. This explicit check ensures the
560 	 * allocation of element when both min_nr and curr_nr are 0, and
561 	 * any active waiters are properly awakened.
562 	 */
563 	if (unlikely(pool->min_nr == 0 &&
564 		     READ_ONCE(pool->curr_nr) == 0)) {
565 		spin_lock_irqsave(&pool->lock, flags);
566 		if (likely(pool->curr_nr == 0)) {
567 			add_element(pool, element);
568 			spin_unlock_irqrestore(&pool->lock, flags);
569 			if (wq_has_sleeper(&pool->wait))
570 				wake_up(&pool->wait);
571 			return;
572 		}
573 		spin_unlock_irqrestore(&pool->lock, flags);
574 	}
575 
576 	pool->free(element, pool->pool_data);
577 }
578 EXPORT_SYMBOL(mempool_free);
579 
580 /*
581  * A commonly used alloc and free fn.
582  */
mempool_alloc_slab(gfp_t gfp_mask,void * pool_data)583 void *mempool_alloc_slab(gfp_t gfp_mask, void *pool_data)
584 {
585 	struct kmem_cache *mem = pool_data;
586 	VM_BUG_ON(mem->ctor);
587 	return kmem_cache_alloc_noprof(mem, gfp_mask);
588 }
589 EXPORT_SYMBOL(mempool_alloc_slab);
590 
mempool_free_slab(void * element,void * pool_data)591 void mempool_free_slab(void *element, void *pool_data)
592 {
593 	struct kmem_cache *mem = pool_data;
594 	kmem_cache_free(mem, element);
595 }
596 EXPORT_SYMBOL(mempool_free_slab);
597 
598 /*
599  * A commonly used alloc and free fn that kmalloc/kfrees the amount of memory
600  * specified by pool_data
601  */
mempool_kmalloc(gfp_t gfp_mask,void * pool_data)602 void *mempool_kmalloc(gfp_t gfp_mask, void *pool_data)
603 {
604 	size_t size = (size_t)pool_data;
605 	return kmalloc_noprof(size, gfp_mask);
606 }
607 EXPORT_SYMBOL(mempool_kmalloc);
608 
mempool_kfree(void * element,void * pool_data)609 void mempool_kfree(void *element, void *pool_data)
610 {
611 	kfree(element);
612 }
613 EXPORT_SYMBOL(mempool_kfree);
614 
mempool_kvmalloc(gfp_t gfp_mask,void * pool_data)615 void *mempool_kvmalloc(gfp_t gfp_mask, void *pool_data)
616 {
617 	size_t size = (size_t)pool_data;
618 	return kvmalloc(size, gfp_mask);
619 }
620 EXPORT_SYMBOL(mempool_kvmalloc);
621 
mempool_kvfree(void * element,void * pool_data)622 void mempool_kvfree(void *element, void *pool_data)
623 {
624 	kvfree(element);
625 }
626 EXPORT_SYMBOL(mempool_kvfree);
627 
628 /*
629  * A simple mempool-backed page allocator that allocates pages
630  * of the order specified by pool_data.
631  */
mempool_alloc_pages(gfp_t gfp_mask,void * pool_data)632 void *mempool_alloc_pages(gfp_t gfp_mask, void *pool_data)
633 {
634 	int order = (int)(long)pool_data;
635 	return alloc_pages_noprof(gfp_mask, order);
636 }
637 EXPORT_SYMBOL(mempool_alloc_pages);
638 
mempool_free_pages(void * element,void * pool_data)639 void mempool_free_pages(void *element, void *pool_data)
640 {
641 	int order = (int)(long)pool_data;
642 	__free_pages(element, order);
643 }
644 EXPORT_SYMBOL(mempool_free_pages);
645