xref: /linux/mm/dmapool.c (revision 6f59de9bc0d576eb5a5edfea470527902315e924)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * DMA Pool allocator
4  *
5  * Copyright 2001 David Brownell
6  * Copyright 2007 Intel Corporation
7  *   Author: Matthew Wilcox <willy@linux.intel.com>
8  *
9  * This allocator returns small blocks of a given size which are DMA-able by
10  * the given device.  It uses the dma_alloc_coherent page allocator to get
11  * new pages, then splits them up into blocks of the required size.
12  * Many older drivers still have their own code to do this.
13  *
14  * The current design of this allocator is fairly simple.  The pool is
15  * represented by the 'struct dma_pool' which keeps a doubly-linked list of
16  * allocated pages.  Each page in the page_list is split into blocks of at
17  * least 'size' bytes.  Free blocks are tracked in an unsorted singly-linked
18  * list of free blocks across all pages.  Used blocks aren't tracked, but we
19  * keep a count of how many are currently allocated from each page.
20  */
21 
22 #include <linux/device.h>
23 #include <linux/dma-mapping.h>
24 #include <linux/dmapool.h>
25 #include <linux/kernel.h>
26 #include <linux/list.h>
27 #include <linux/export.h>
28 #include <linux/mutex.h>
29 #include <linux/poison.h>
30 #include <linux/sched.h>
31 #include <linux/sched/mm.h>
32 #include <linux/slab.h>
33 #include <linux/stat.h>
34 #include <linux/spinlock.h>
35 #include <linux/string.h>
36 #include <linux/types.h>
37 #include <linux/wait.h>
38 
39 #ifdef CONFIG_SLUB_DEBUG_ON
40 #define DMAPOOL_DEBUG 1
41 #endif
42 
43 struct dma_block {
44 	struct dma_block *next_block;
45 	dma_addr_t dma;
46 };
47 
48 struct dma_pool {		/* the pool */
49 	struct list_head page_list;
50 	spinlock_t lock;
51 	struct dma_block *next_block;
52 	size_t nr_blocks;
53 	size_t nr_active;
54 	size_t nr_pages;
55 	struct device *dev;
56 	unsigned int size;
57 	unsigned int allocation;
58 	unsigned int boundary;
59 	int node;
60 	char name[32];
61 	struct list_head pools;
62 };
63 
64 struct dma_page {		/* cacheable header for 'allocation' bytes */
65 	struct list_head page_list;
66 	void *vaddr;
67 	dma_addr_t dma;
68 };
69 
70 static DEFINE_MUTEX(pools_lock);
71 static DEFINE_MUTEX(pools_reg_lock);
72 
73 static ssize_t pools_show(struct device *dev, struct device_attribute *attr, char *buf)
74 {
75 	struct dma_pool *pool;
76 	unsigned size;
77 
78 	size = sysfs_emit(buf, "poolinfo - 0.1\n");
79 
80 	mutex_lock(&pools_lock);
81 	list_for_each_entry(pool, &dev->dma_pools, pools) {
82 		/* per-pool info, no real statistics yet */
83 		size += sysfs_emit_at(buf, size, "%-16s %4zu %4zu %4u %2zu\n",
84 				      pool->name, pool->nr_active,
85 				      pool->nr_blocks, pool->size,
86 				      pool->nr_pages);
87 	}
88 	mutex_unlock(&pools_lock);
89 
90 	return size;
91 }
92 
93 static DEVICE_ATTR_RO(pools);
94 
95 #ifdef DMAPOOL_DEBUG
96 static void pool_check_block(struct dma_pool *pool, struct dma_block *block,
97 			     gfp_t mem_flags)
98 {
99 	u8 *data = (void *)block;
100 	int i;
101 
102 	for (i = sizeof(struct dma_block); i < pool->size; i++) {
103 		if (data[i] == POOL_POISON_FREED)
104 			continue;
105 		dev_err(pool->dev, "%s %s, %p (corrupted)\n", __func__,
106 			pool->name, block);
107 
108 		/*
109 		 * Dump the first 4 bytes even if they are not
110 		 * POOL_POISON_FREED
111 		 */
112 		print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 16, 1,
113 				data, pool->size, 1);
114 		break;
115 	}
116 
117 	if (!want_init_on_alloc(mem_flags))
118 		memset(block, POOL_POISON_ALLOCATED, pool->size);
119 }
120 
121 static struct dma_page *pool_find_page(struct dma_pool *pool, dma_addr_t dma)
122 {
123 	struct dma_page *page;
124 
125 	list_for_each_entry(page, &pool->page_list, page_list) {
126 		if (dma < page->dma)
127 			continue;
128 		if ((dma - page->dma) < pool->allocation)
129 			return page;
130 	}
131 	return NULL;
132 }
133 
134 static bool pool_block_err(struct dma_pool *pool, void *vaddr, dma_addr_t dma)
135 {
136 	struct dma_block *block = pool->next_block;
137 	struct dma_page *page;
138 
139 	page = pool_find_page(pool, dma);
140 	if (!page) {
141 		dev_err(pool->dev, "%s %s, %p/%pad (bad dma)\n",
142 			__func__, pool->name, vaddr, &dma);
143 		return true;
144 	}
145 
146 	while (block) {
147 		if (block != vaddr) {
148 			block = block->next_block;
149 			continue;
150 		}
151 		dev_err(pool->dev, "%s %s, dma %pad already free\n",
152 			__func__, pool->name, &dma);
153 		return true;
154 	}
155 
156 	memset(vaddr, POOL_POISON_FREED, pool->size);
157 	return false;
158 }
159 
160 static void pool_init_page(struct dma_pool *pool, struct dma_page *page)
161 {
162 	memset(page->vaddr, POOL_POISON_FREED, pool->allocation);
163 }
164 #else
165 static void pool_check_block(struct dma_pool *pool, struct dma_block *block,
166 			     gfp_t mem_flags)
167 {
168 }
169 
170 static bool pool_block_err(struct dma_pool *pool, void *vaddr, dma_addr_t dma)
171 {
172 	if (want_init_on_free())
173 		memset(vaddr, 0, pool->size);
174 	return false;
175 }
176 
177 static void pool_init_page(struct dma_pool *pool, struct dma_page *page)
178 {
179 }
180 #endif
181 
182 static struct dma_block *pool_block_pop(struct dma_pool *pool)
183 {
184 	struct dma_block *block = pool->next_block;
185 
186 	if (block) {
187 		pool->next_block = block->next_block;
188 		pool->nr_active++;
189 	}
190 	return block;
191 }
192 
193 static void pool_block_push(struct dma_pool *pool, struct dma_block *block,
194 			    dma_addr_t dma)
195 {
196 	block->dma = dma;
197 	block->next_block = pool->next_block;
198 	pool->next_block = block;
199 }
200 
201 
202 /**
203  * dma_pool_create_node - Creates a pool of consistent memory blocks, for dma.
204  * @name: name of pool, for diagnostics
205  * @dev: device that will be doing the DMA
206  * @size: size of the blocks in this pool.
207  * @align: alignment requirement for blocks; must be a power of two
208  * @boundary: returned blocks won't cross this power of two boundary
209  * @node: optional NUMA node to allocate structs 'dma_pool' and 'dma_page' on
210  * Context: not in_interrupt()
211  *
212  * Given one of these pools, dma_pool_alloc()
213  * may be used to allocate memory.  Such memory will all have "consistent"
214  * DMA mappings, accessible by the device and its driver without using
215  * cache flushing primitives.  The actual size of blocks allocated may be
216  * larger than requested because of alignment.
217  *
218  * If @boundary is nonzero, objects returned from dma_pool_alloc() won't
219  * cross that size boundary.  This is useful for devices which have
220  * addressing restrictions on individual DMA transfers, such as not crossing
221  * boundaries of 4KBytes.
222  *
223  * Return: a dma allocation pool with the requested characteristics, or
224  * %NULL if one can't be created.
225  */
226 struct dma_pool *dma_pool_create_node(const char *name, struct device *dev,
227 		size_t size, size_t align, size_t boundary, int node)
228 {
229 	struct dma_pool *retval;
230 	size_t allocation;
231 	bool empty;
232 
233 	if (!dev)
234 		return NULL;
235 
236 	if (align == 0)
237 		align = 1;
238 	else if (align & (align - 1))
239 		return NULL;
240 
241 	if (size == 0 || size > INT_MAX)
242 		return NULL;
243 	if (size < sizeof(struct dma_block))
244 		size = sizeof(struct dma_block);
245 
246 	size = ALIGN(size, align);
247 	allocation = max_t(size_t, size, PAGE_SIZE);
248 
249 	if (!boundary)
250 		boundary = allocation;
251 	else if ((boundary < size) || (boundary & (boundary - 1)))
252 		return NULL;
253 
254 	boundary = min(boundary, allocation);
255 
256 	retval = kzalloc_node(sizeof(*retval), GFP_KERNEL, node);
257 	if (!retval)
258 		return retval;
259 
260 	strscpy(retval->name, name, sizeof(retval->name));
261 
262 	retval->dev = dev;
263 
264 	INIT_LIST_HEAD(&retval->page_list);
265 	spin_lock_init(&retval->lock);
266 	retval->size = size;
267 	retval->boundary = boundary;
268 	retval->allocation = allocation;
269 	retval->node = node;
270 	INIT_LIST_HEAD(&retval->pools);
271 
272 	/*
273 	 * pools_lock ensures that the ->dma_pools list does not get corrupted.
274 	 * pools_reg_lock ensures that there is not a race between
275 	 * dma_pool_create() and dma_pool_destroy() or within dma_pool_create()
276 	 * when the first invocation of dma_pool_create() failed on
277 	 * device_create_file() and the second assumes that it has been done (I
278 	 * know it is a short window).
279 	 */
280 	mutex_lock(&pools_reg_lock);
281 	mutex_lock(&pools_lock);
282 	empty = list_empty(&dev->dma_pools);
283 	list_add(&retval->pools, &dev->dma_pools);
284 	mutex_unlock(&pools_lock);
285 	if (empty) {
286 		int err;
287 
288 		err = device_create_file(dev, &dev_attr_pools);
289 		if (err) {
290 			mutex_lock(&pools_lock);
291 			list_del(&retval->pools);
292 			mutex_unlock(&pools_lock);
293 			mutex_unlock(&pools_reg_lock);
294 			kfree(retval);
295 			return NULL;
296 		}
297 	}
298 	mutex_unlock(&pools_reg_lock);
299 	return retval;
300 }
301 EXPORT_SYMBOL(dma_pool_create_node);
302 
303 static void pool_initialise_page(struct dma_pool *pool, struct dma_page *page)
304 {
305 	unsigned int next_boundary = pool->boundary, offset = 0;
306 	struct dma_block *block, *first = NULL, *last = NULL;
307 
308 	pool_init_page(pool, page);
309 	while (offset + pool->size <= pool->allocation) {
310 		if (offset + pool->size > next_boundary) {
311 			offset = next_boundary;
312 			next_boundary += pool->boundary;
313 			continue;
314 		}
315 
316 		block = page->vaddr + offset;
317 		block->dma = page->dma + offset;
318 		block->next_block = NULL;
319 
320 		if (last)
321 			last->next_block = block;
322 		else
323 			first = block;
324 		last = block;
325 
326 		offset += pool->size;
327 		pool->nr_blocks++;
328 	}
329 
330 	last->next_block = pool->next_block;
331 	pool->next_block = first;
332 
333 	list_add(&page->page_list, &pool->page_list);
334 	pool->nr_pages++;
335 }
336 
337 static struct dma_page *pool_alloc_page(struct dma_pool *pool, gfp_t mem_flags)
338 {
339 	struct dma_page *page;
340 
341 	page = kmalloc_node(sizeof(*page), mem_flags, pool->node);
342 	if (!page)
343 		return NULL;
344 
345 	page->vaddr = dma_alloc_coherent(pool->dev, pool->allocation,
346 					 &page->dma, mem_flags);
347 	if (!page->vaddr) {
348 		kfree(page);
349 		return NULL;
350 	}
351 
352 	return page;
353 }
354 
355 /**
356  * dma_pool_destroy - destroys a pool of dma memory blocks.
357  * @pool: dma pool that will be destroyed
358  * Context: !in_interrupt()
359  *
360  * Caller guarantees that no more memory from the pool is in use,
361  * and that nothing will try to use the pool after this call.
362  */
363 void dma_pool_destroy(struct dma_pool *pool)
364 {
365 	struct dma_page *page, *tmp;
366 	bool empty, busy = false;
367 
368 	if (unlikely(!pool))
369 		return;
370 
371 	mutex_lock(&pools_reg_lock);
372 	mutex_lock(&pools_lock);
373 	list_del(&pool->pools);
374 	empty = list_empty(&pool->dev->dma_pools);
375 	mutex_unlock(&pools_lock);
376 	if (empty)
377 		device_remove_file(pool->dev, &dev_attr_pools);
378 	mutex_unlock(&pools_reg_lock);
379 
380 	if (pool->nr_active) {
381 		dev_err(pool->dev, "%s %s busy\n", __func__, pool->name);
382 		busy = true;
383 	}
384 
385 	list_for_each_entry_safe(page, tmp, &pool->page_list, page_list) {
386 		if (!busy)
387 			dma_free_coherent(pool->dev, pool->allocation,
388 					  page->vaddr, page->dma);
389 		list_del(&page->page_list);
390 		kfree(page);
391 	}
392 
393 	kfree(pool);
394 }
395 EXPORT_SYMBOL(dma_pool_destroy);
396 
397 /**
398  * dma_pool_alloc - get a block of consistent memory
399  * @pool: dma pool that will produce the block
400  * @mem_flags: GFP_* bitmask
401  * @handle: pointer to dma address of block
402  *
403  * Return: the kernel virtual address of a currently unused block,
404  * and reports its dma address through the handle.
405  * If such a memory block can't be allocated, %NULL is returned.
406  */
407 void *dma_pool_alloc(struct dma_pool *pool, gfp_t mem_flags,
408 		     dma_addr_t *handle)
409 {
410 	struct dma_block *block;
411 	struct dma_page *page;
412 	unsigned long flags;
413 
414 	might_alloc(mem_flags);
415 
416 	spin_lock_irqsave(&pool->lock, flags);
417 	block = pool_block_pop(pool);
418 	if (!block) {
419 		/*
420 		 * pool_alloc_page() might sleep, so temporarily drop
421 		 * &pool->lock
422 		 */
423 		spin_unlock_irqrestore(&pool->lock, flags);
424 
425 		page = pool_alloc_page(pool, mem_flags & (~__GFP_ZERO));
426 		if (!page)
427 			return NULL;
428 
429 		spin_lock_irqsave(&pool->lock, flags);
430 		pool_initialise_page(pool, page);
431 		block = pool_block_pop(pool);
432 	}
433 	spin_unlock_irqrestore(&pool->lock, flags);
434 
435 	*handle = block->dma;
436 	pool_check_block(pool, block, mem_flags);
437 	if (want_init_on_alloc(mem_flags))
438 		memset(block, 0, pool->size);
439 
440 	return block;
441 }
442 EXPORT_SYMBOL(dma_pool_alloc);
443 
444 /**
445  * dma_pool_free - put block back into dma pool
446  * @pool: the dma pool holding the block
447  * @vaddr: virtual address of block
448  * @dma: dma address of block
449  *
450  * Caller promises neither device nor driver will again touch this block
451  * unless it is first re-allocated.
452  */
453 void dma_pool_free(struct dma_pool *pool, void *vaddr, dma_addr_t dma)
454 {
455 	struct dma_block *block = vaddr;
456 	unsigned long flags;
457 
458 	spin_lock_irqsave(&pool->lock, flags);
459 	if (!pool_block_err(pool, vaddr, dma)) {
460 		pool_block_push(pool, block, dma);
461 		pool->nr_active--;
462 	}
463 	spin_unlock_irqrestore(&pool->lock, flags);
464 }
465 EXPORT_SYMBOL(dma_pool_free);
466 
467 /*
468  * Managed DMA pool
469  */
470 static void dmam_pool_release(struct device *dev, void *res)
471 {
472 	struct dma_pool *pool = *(struct dma_pool **)res;
473 
474 	dma_pool_destroy(pool);
475 }
476 
477 static int dmam_pool_match(struct device *dev, void *res, void *match_data)
478 {
479 	return *(struct dma_pool **)res == match_data;
480 }
481 
482 /**
483  * dmam_pool_create - Managed dma_pool_create()
484  * @name: name of pool, for diagnostics
485  * @dev: device that will be doing the DMA
486  * @size: size of the blocks in this pool.
487  * @align: alignment requirement for blocks; must be a power of two
488  * @allocation: returned blocks won't cross this boundary (or zero)
489  *
490  * Managed dma_pool_create().  DMA pool created with this function is
491  * automatically destroyed on driver detach.
492  *
493  * Return: a managed dma allocation pool with the requested
494  * characteristics, or %NULL if one can't be created.
495  */
496 struct dma_pool *dmam_pool_create(const char *name, struct device *dev,
497 				  size_t size, size_t align, size_t allocation)
498 {
499 	struct dma_pool **ptr, *pool;
500 
501 	ptr = devres_alloc(dmam_pool_release, sizeof(*ptr), GFP_KERNEL);
502 	if (!ptr)
503 		return NULL;
504 
505 	pool = *ptr = dma_pool_create(name, dev, size, align, allocation);
506 	if (pool)
507 		devres_add(dev, ptr);
508 	else
509 		devres_free(ptr);
510 
511 	return pool;
512 }
513 EXPORT_SYMBOL(dmam_pool_create);
514 
515 /**
516  * dmam_pool_destroy - Managed dma_pool_destroy()
517  * @pool: dma pool that will be destroyed
518  *
519  * Managed dma_pool_destroy().
520  */
521 void dmam_pool_destroy(struct dma_pool *pool)
522 {
523 	struct device *dev = pool->dev;
524 
525 	WARN_ON(devres_release(dev, dmam_pool_release, dmam_pool_match, pool));
526 }
527 EXPORT_SYMBOL(dmam_pool_destroy);
528