xref: /linux/sound/core/memalloc.c (revision 2b8232ce512105e28453f301d1510de8363bccd1)
1 /*
2  *  Copyright (c) by Jaroslav Kysela <perex@suse.cz>
3  *                   Takashi Iwai <tiwai@suse.de>
4  *
5  *  Generic memory allocators
6  *
7  *
8  *   This program is free software; you can redistribute it and/or modify
9  *   it under the terms of the GNU General Public License as published by
10  *   the Free Software Foundation; either version 2 of the License, or
11  *   (at your option) any later version.
12  *
13  *   This program is distributed in the hope that it will be useful,
14  *   but WITHOUT ANY WARRANTY; without even the implied warranty of
15  *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16  *   GNU General Public License for more details.
17  *
18  *   You should have received a copy of the GNU General Public License
19  *   along with this program; if not, write to the Free Software
20  *   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
21  *
22  */
23 
24 #include <linux/module.h>
25 #include <linux/proc_fs.h>
26 #include <linux/init.h>
27 #include <linux/pci.h>
28 #include <linux/slab.h>
29 #include <linux/mm.h>
30 #include <linux/seq_file.h>
31 #include <asm/uaccess.h>
32 #include <linux/dma-mapping.h>
33 #include <linux/moduleparam.h>
34 #include <linux/mutex.h>
35 #include <sound/memalloc.h>
36 #ifdef CONFIG_SBUS
37 #include <asm/sbus.h>
38 #endif
39 
40 
41 MODULE_AUTHOR("Takashi Iwai <tiwai@suse.de>, Jaroslav Kysela <perex@suse.cz>");
42 MODULE_DESCRIPTION("Memory allocator for ALSA system.");
43 MODULE_LICENSE("GPL");
44 
45 
46 /*
47  */
48 
49 void *snd_malloc_sgbuf_pages(struct device *device,
50                              size_t size, struct snd_dma_buffer *dmab,
51 			     size_t *res_size);
52 int snd_free_sgbuf_pages(struct snd_dma_buffer *dmab);
53 
54 /*
55  */
56 
57 static DEFINE_MUTEX(list_mutex);
58 static LIST_HEAD(mem_list_head);
59 
60 /* buffer preservation list */
61 struct snd_mem_list {
62 	struct snd_dma_buffer buffer;
63 	unsigned int id;
64 	struct list_head list;
65 };
66 
67 /* id for pre-allocated buffers */
68 #define SNDRV_DMA_DEVICE_UNUSED (unsigned int)-1
69 
70 #ifdef CONFIG_SND_DEBUG
71 #define __ASTRING__(x) #x
72 #define snd_assert(expr, args...) do {\
73 	if (!(expr)) {\
74 		printk(KERN_ERR "snd-malloc: BUG? (%s) (called from %p)\n", __ASTRING__(expr), __builtin_return_address(0));\
75 		args;\
76 	}\
77 } while (0)
78 #else
79 #define snd_assert(expr, args...) /**/
80 #endif
81 
82 /*
83  *  Hacks
84  */
85 
86 #if defined(__i386__)
87 /*
88  * A hack to allocate large buffers via dma_alloc_coherent()
89  *
90  * since dma_alloc_coherent always tries GFP_DMA when the requested
91  * pci memory region is below 32bit, it happens quite often that even
92  * 2 order of pages cannot be allocated.
93  *
94  * so in the following, we allocate at first without dma_mask, so that
95  * allocation will be done without GFP_DMA.  if the area doesn't match
96  * with the requested region, then realloate with the original dma_mask
97  * again.
98  *
99  * Really, we want to move this type of thing into dma_alloc_coherent()
100  * so dma_mask doesn't have to be messed with.
101  */
102 
103 static void *snd_dma_hack_alloc_coherent(struct device *dev, size_t size,
104 					 dma_addr_t *dma_handle,
105 					 gfp_t flags)
106 {
107 	void *ret;
108 	u64 dma_mask, coherent_dma_mask;
109 
110 	if (dev == NULL || !dev->dma_mask)
111 		return dma_alloc_coherent(dev, size, dma_handle, flags);
112 	dma_mask = *dev->dma_mask;
113 	coherent_dma_mask = dev->coherent_dma_mask;
114 	*dev->dma_mask = 0xffffffff; 	/* do without masking */
115 	dev->coherent_dma_mask = 0xffffffff; 	/* do without masking */
116 	ret = dma_alloc_coherent(dev, size, dma_handle, flags);
117 	*dev->dma_mask = dma_mask;	/* restore */
118 	dev->coherent_dma_mask = coherent_dma_mask;	/* restore */
119 	if (ret) {
120 		/* obtained address is out of range? */
121 		if (((unsigned long)*dma_handle + size - 1) & ~dma_mask) {
122 			/* reallocate with the proper mask */
123 			dma_free_coherent(dev, size, ret, *dma_handle);
124 			ret = dma_alloc_coherent(dev, size, dma_handle, flags);
125 		}
126 	} else {
127 		/* wish to success now with the proper mask... */
128 		if (dma_mask != 0xffffffffUL) {
129 			/* allocation with GFP_ATOMIC to avoid the long stall */
130 			flags &= ~GFP_KERNEL;
131 			flags |= GFP_ATOMIC;
132 			ret = dma_alloc_coherent(dev, size, dma_handle, flags);
133 		}
134 	}
135 	return ret;
136 }
137 
138 /* redefine dma_alloc_coherent for some architectures */
139 #undef dma_alloc_coherent
140 #define dma_alloc_coherent snd_dma_hack_alloc_coherent
141 
142 #endif /* arch */
143 
144 /*
145  *
146  *  Generic memory allocators
147  *
148  */
149 
150 static long snd_allocated_pages; /* holding the number of allocated pages */
151 
152 static inline void inc_snd_pages(int order)
153 {
154 	snd_allocated_pages += 1 << order;
155 }
156 
157 static inline void dec_snd_pages(int order)
158 {
159 	snd_allocated_pages -= 1 << order;
160 }
161 
162 /**
163  * snd_malloc_pages - allocate pages with the given size
164  * @size: the size to allocate in bytes
165  * @gfp_flags: the allocation conditions, GFP_XXX
166  *
167  * Allocates the physically contiguous pages with the given size.
168  *
169  * Returns the pointer of the buffer, or NULL if no enoguh memory.
170  */
171 void *snd_malloc_pages(size_t size, gfp_t gfp_flags)
172 {
173 	int pg;
174 	void *res;
175 
176 	snd_assert(size > 0, return NULL);
177 	snd_assert(gfp_flags != 0, return NULL);
178 	gfp_flags |= __GFP_COMP;	/* compound page lets parts be mapped */
179 	pg = get_order(size);
180 	if ((res = (void *) __get_free_pages(gfp_flags, pg)) != NULL)
181 		inc_snd_pages(pg);
182 	return res;
183 }
184 
185 /**
186  * snd_free_pages - release the pages
187  * @ptr: the buffer pointer to release
188  * @size: the allocated buffer size
189  *
190  * Releases the buffer allocated via snd_malloc_pages().
191  */
192 void snd_free_pages(void *ptr, size_t size)
193 {
194 	int pg;
195 
196 	if (ptr == NULL)
197 		return;
198 	pg = get_order(size);
199 	dec_snd_pages(pg);
200 	free_pages((unsigned long) ptr, pg);
201 }
202 
203 /*
204  *
205  *  Bus-specific memory allocators
206  *
207  */
208 
209 /* allocate the coherent DMA pages */
210 static void *snd_malloc_dev_pages(struct device *dev, size_t size, dma_addr_t *dma)
211 {
212 	int pg;
213 	void *res;
214 	gfp_t gfp_flags;
215 
216 	snd_assert(size > 0, return NULL);
217 	snd_assert(dma != NULL, return NULL);
218 	pg = get_order(size);
219 	gfp_flags = GFP_KERNEL
220 		| __GFP_COMP	/* compound page lets parts be mapped */
221 		| __GFP_NORETRY /* don't trigger OOM-killer */
222 		| __GFP_NOWARN; /* no stack trace print - this call is non-critical */
223 	res = dma_alloc_coherent(dev, PAGE_SIZE << pg, dma, gfp_flags);
224 	if (res != NULL)
225 		inc_snd_pages(pg);
226 
227 	return res;
228 }
229 
230 /* free the coherent DMA pages */
231 static void snd_free_dev_pages(struct device *dev, size_t size, void *ptr,
232 			       dma_addr_t dma)
233 {
234 	int pg;
235 
236 	if (ptr == NULL)
237 		return;
238 	pg = get_order(size);
239 	dec_snd_pages(pg);
240 	dma_free_coherent(dev, PAGE_SIZE << pg, ptr, dma);
241 }
242 
243 #ifdef CONFIG_SBUS
244 
245 static void *snd_malloc_sbus_pages(struct device *dev, size_t size,
246 				   dma_addr_t *dma_addr)
247 {
248 	struct sbus_dev *sdev = (struct sbus_dev *)dev;
249 	int pg;
250 	void *res;
251 
252 	snd_assert(size > 0, return NULL);
253 	snd_assert(dma_addr != NULL, return NULL);
254 	pg = get_order(size);
255 	res = sbus_alloc_consistent(sdev, PAGE_SIZE * (1 << pg), dma_addr);
256 	if (res != NULL)
257 		inc_snd_pages(pg);
258 	return res;
259 }
260 
261 static void snd_free_sbus_pages(struct device *dev, size_t size,
262 				void *ptr, dma_addr_t dma_addr)
263 {
264 	struct sbus_dev *sdev = (struct sbus_dev *)dev;
265 	int pg;
266 
267 	if (ptr == NULL)
268 		return;
269 	pg = get_order(size);
270 	dec_snd_pages(pg);
271 	sbus_free_consistent(sdev, PAGE_SIZE * (1 << pg), ptr, dma_addr);
272 }
273 
274 #endif /* CONFIG_SBUS */
275 
276 /*
277  *
278  *  ALSA generic memory management
279  *
280  */
281 
282 
283 /**
284  * snd_dma_alloc_pages - allocate the buffer area according to the given type
285  * @type: the DMA buffer type
286  * @device: the device pointer
287  * @size: the buffer size to allocate
288  * @dmab: buffer allocation record to store the allocated data
289  *
290  * Calls the memory-allocator function for the corresponding
291  * buffer type.
292  *
293  * Returns zero if the buffer with the given size is allocated successfuly,
294  * other a negative value at error.
295  */
296 int snd_dma_alloc_pages(int type, struct device *device, size_t size,
297 			struct snd_dma_buffer *dmab)
298 {
299 	snd_assert(size > 0, return -ENXIO);
300 	snd_assert(dmab != NULL, return -ENXIO);
301 
302 	dmab->dev.type = type;
303 	dmab->dev.dev = device;
304 	dmab->bytes = 0;
305 	switch (type) {
306 	case SNDRV_DMA_TYPE_CONTINUOUS:
307 		dmab->area = snd_malloc_pages(size, (unsigned long)device);
308 		dmab->addr = 0;
309 		break;
310 #ifdef CONFIG_SBUS
311 	case SNDRV_DMA_TYPE_SBUS:
312 		dmab->area = snd_malloc_sbus_pages(device, size, &dmab->addr);
313 		break;
314 #endif
315 	case SNDRV_DMA_TYPE_DEV:
316 		dmab->area = snd_malloc_dev_pages(device, size, &dmab->addr);
317 		break;
318 	case SNDRV_DMA_TYPE_DEV_SG:
319 		snd_malloc_sgbuf_pages(device, size, dmab, NULL);
320 		break;
321 	default:
322 		printk(KERN_ERR "snd-malloc: invalid device type %d\n", type);
323 		dmab->area = NULL;
324 		dmab->addr = 0;
325 		return -ENXIO;
326 	}
327 	if (! dmab->area)
328 		return -ENOMEM;
329 	dmab->bytes = size;
330 	return 0;
331 }
332 
333 /**
334  * snd_dma_alloc_pages_fallback - allocate the buffer area according to the given type with fallback
335  * @type: the DMA buffer type
336  * @device: the device pointer
337  * @size: the buffer size to allocate
338  * @dmab: buffer allocation record to store the allocated data
339  *
340  * Calls the memory-allocator function for the corresponding
341  * buffer type.  When no space is left, this function reduces the size and
342  * tries to allocate again.  The size actually allocated is stored in
343  * res_size argument.
344  *
345  * Returns zero if the buffer with the given size is allocated successfuly,
346  * other a negative value at error.
347  */
348 int snd_dma_alloc_pages_fallback(int type, struct device *device, size_t size,
349 				 struct snd_dma_buffer *dmab)
350 {
351 	int err;
352 
353 	snd_assert(size > 0, return -ENXIO);
354 	snd_assert(dmab != NULL, return -ENXIO);
355 
356 	while ((err = snd_dma_alloc_pages(type, device, size, dmab)) < 0) {
357 		if (err != -ENOMEM)
358 			return err;
359 		size >>= 1;
360 		if (size <= PAGE_SIZE)
361 			return -ENOMEM;
362 	}
363 	if (! dmab->area)
364 		return -ENOMEM;
365 	return 0;
366 }
367 
368 
369 /**
370  * snd_dma_free_pages - release the allocated buffer
371  * @dmab: the buffer allocation record to release
372  *
373  * Releases the allocated buffer via snd_dma_alloc_pages().
374  */
375 void snd_dma_free_pages(struct snd_dma_buffer *dmab)
376 {
377 	switch (dmab->dev.type) {
378 	case SNDRV_DMA_TYPE_CONTINUOUS:
379 		snd_free_pages(dmab->area, dmab->bytes);
380 		break;
381 #ifdef CONFIG_SBUS
382 	case SNDRV_DMA_TYPE_SBUS:
383 		snd_free_sbus_pages(dmab->dev.dev, dmab->bytes, dmab->area, dmab->addr);
384 		break;
385 #endif
386 	case SNDRV_DMA_TYPE_DEV:
387 		snd_free_dev_pages(dmab->dev.dev, dmab->bytes, dmab->area, dmab->addr);
388 		break;
389 	case SNDRV_DMA_TYPE_DEV_SG:
390 		snd_free_sgbuf_pages(dmab);
391 		break;
392 	default:
393 		printk(KERN_ERR "snd-malloc: invalid device type %d\n", dmab->dev.type);
394 	}
395 }
396 
397 
398 /**
399  * snd_dma_get_reserved - get the reserved buffer for the given device
400  * @dmab: the buffer allocation record to store
401  * @id: the buffer id
402  *
403  * Looks for the reserved-buffer list and re-uses if the same buffer
404  * is found in the list.  When the buffer is found, it's removed from the free list.
405  *
406  * Returns the size of buffer if the buffer is found, or zero if not found.
407  */
408 size_t snd_dma_get_reserved_buf(struct snd_dma_buffer *dmab, unsigned int id)
409 {
410 	struct snd_mem_list *mem;
411 
412 	snd_assert(dmab, return 0);
413 
414 	mutex_lock(&list_mutex);
415 	list_for_each_entry(mem, &mem_list_head, list) {
416 		if (mem->id == id &&
417 		    (mem->buffer.dev.dev == NULL || dmab->dev.dev == NULL ||
418 		     ! memcmp(&mem->buffer.dev, &dmab->dev, sizeof(dmab->dev)))) {
419 			struct device *dev = dmab->dev.dev;
420 			list_del(&mem->list);
421 			*dmab = mem->buffer;
422 			if (dmab->dev.dev == NULL)
423 				dmab->dev.dev = dev;
424 			kfree(mem);
425 			mutex_unlock(&list_mutex);
426 			return dmab->bytes;
427 		}
428 	}
429 	mutex_unlock(&list_mutex);
430 	return 0;
431 }
432 
433 /**
434  * snd_dma_reserve_buf - reserve the buffer
435  * @dmab: the buffer to reserve
436  * @id: the buffer id
437  *
438  * Reserves the given buffer as a reserved buffer.
439  *
440  * Returns zero if successful, or a negative code at error.
441  */
442 int snd_dma_reserve_buf(struct snd_dma_buffer *dmab, unsigned int id)
443 {
444 	struct snd_mem_list *mem;
445 
446 	snd_assert(dmab, return -EINVAL);
447 	mem = kmalloc(sizeof(*mem), GFP_KERNEL);
448 	if (! mem)
449 		return -ENOMEM;
450 	mutex_lock(&list_mutex);
451 	mem->buffer = *dmab;
452 	mem->id = id;
453 	list_add_tail(&mem->list, &mem_list_head);
454 	mutex_unlock(&list_mutex);
455 	return 0;
456 }
457 
458 /*
459  * purge all reserved buffers
460  */
461 static void free_all_reserved_pages(void)
462 {
463 	struct list_head *p;
464 	struct snd_mem_list *mem;
465 
466 	mutex_lock(&list_mutex);
467 	while (! list_empty(&mem_list_head)) {
468 		p = mem_list_head.next;
469 		mem = list_entry(p, struct snd_mem_list, list);
470 		list_del(p);
471 		snd_dma_free_pages(&mem->buffer);
472 		kfree(mem);
473 	}
474 	mutex_unlock(&list_mutex);
475 }
476 
477 
478 #ifdef CONFIG_PROC_FS
479 /*
480  * proc file interface
481  */
482 #define SND_MEM_PROC_FILE	"driver/snd-page-alloc"
483 static struct proc_dir_entry *snd_mem_proc;
484 
485 static int snd_mem_proc_read(struct seq_file *seq, void *offset)
486 {
487 	long pages = snd_allocated_pages >> (PAGE_SHIFT-12);
488 	struct snd_mem_list *mem;
489 	int devno;
490 	static char *types[] = { "UNKNOWN", "CONT", "DEV", "DEV-SG", "SBUS" };
491 
492 	mutex_lock(&list_mutex);
493 	seq_printf(seq, "pages  : %li bytes (%li pages per %likB)\n",
494 		   pages * PAGE_SIZE, pages, PAGE_SIZE / 1024);
495 	devno = 0;
496 	list_for_each_entry(mem, &mem_list_head, list) {
497 		devno++;
498 		seq_printf(seq, "buffer %d : ID %08x : type %s\n",
499 			   devno, mem->id, types[mem->buffer.dev.type]);
500 		seq_printf(seq, "  addr = 0x%lx, size = %d bytes\n",
501 			   (unsigned long)mem->buffer.addr,
502 			   (int)mem->buffer.bytes);
503 	}
504 	mutex_unlock(&list_mutex);
505 	return 0;
506 }
507 
508 static int snd_mem_proc_open(struct inode *inode, struct file *file)
509 {
510 	return single_open(file, snd_mem_proc_read, NULL);
511 }
512 
513 /* FIXME: for pci only - other bus? */
514 #ifdef CONFIG_PCI
515 #define gettoken(bufp) strsep(bufp, " \t\n")
516 
517 static ssize_t snd_mem_proc_write(struct file *file, const char __user * buffer,
518 				  size_t count, loff_t * ppos)
519 {
520 	char buf[128];
521 	char *token, *p;
522 
523 	if (count > sizeof(buf) - 1)
524 		return -EINVAL;
525 	if (copy_from_user(buf, buffer, count))
526 		return -EFAULT;
527 	buf[count] = '\0';
528 
529 	p = buf;
530 	token = gettoken(&p);
531 	if (! token || *token == '#')
532 		return count;
533 	if (strcmp(token, "add") == 0) {
534 		char *endp;
535 		int vendor, device, size, buffers;
536 		long mask;
537 		int i, alloced;
538 		struct pci_dev *pci;
539 
540 		if ((token = gettoken(&p)) == NULL ||
541 		    (vendor = simple_strtol(token, NULL, 0)) <= 0 ||
542 		    (token = gettoken(&p)) == NULL ||
543 		    (device = simple_strtol(token, NULL, 0)) <= 0 ||
544 		    (token = gettoken(&p)) == NULL ||
545 		    (mask = simple_strtol(token, NULL, 0)) < 0 ||
546 		    (token = gettoken(&p)) == NULL ||
547 		    (size = memparse(token, &endp)) < 64*1024 ||
548 		    size > 16*1024*1024 /* too big */ ||
549 		    (token = gettoken(&p)) == NULL ||
550 		    (buffers = simple_strtol(token, NULL, 0)) <= 0 ||
551 		    buffers > 4) {
552 			printk(KERN_ERR "snd-page-alloc: invalid proc write format\n");
553 			return count;
554 		}
555 		vendor &= 0xffff;
556 		device &= 0xffff;
557 
558 		alloced = 0;
559 		pci = NULL;
560 		while ((pci = pci_get_device(vendor, device, pci)) != NULL) {
561 			if (mask > 0 && mask < 0xffffffff) {
562 				if (pci_set_dma_mask(pci, mask) < 0 ||
563 				    pci_set_consistent_dma_mask(pci, mask) < 0) {
564 					printk(KERN_ERR "snd-page-alloc: cannot set DMA mask %lx for pci %04x:%04x\n", mask, vendor, device);
565 					return count;
566 				}
567 			}
568 			for (i = 0; i < buffers; i++) {
569 				struct snd_dma_buffer dmab;
570 				memset(&dmab, 0, sizeof(dmab));
571 				if (snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, snd_dma_pci_data(pci),
572 							size, &dmab) < 0) {
573 					printk(KERN_ERR "snd-page-alloc: cannot allocate buffer pages (size = %d)\n", size);
574 					pci_dev_put(pci);
575 					return count;
576 				}
577 				snd_dma_reserve_buf(&dmab, snd_dma_pci_buf_id(pci));
578 			}
579 			alloced++;
580 		}
581 		if (! alloced) {
582 			for (i = 0; i < buffers; i++) {
583 				struct snd_dma_buffer dmab;
584 				memset(&dmab, 0, sizeof(dmab));
585 				/* FIXME: We can allocate only in ZONE_DMA
586 				 * without a device pointer!
587 				 */
588 				if (snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, NULL,
589 							size, &dmab) < 0) {
590 					printk(KERN_ERR "snd-page-alloc: cannot allocate buffer pages (size = %d)\n", size);
591 					break;
592 				}
593 				snd_dma_reserve_buf(&dmab, (unsigned int)((vendor << 16) | device));
594 			}
595 		}
596 	} else if (strcmp(token, "erase") == 0)
597 		/* FIXME: need for releasing each buffer chunk? */
598 		free_all_reserved_pages();
599 	else
600 		printk(KERN_ERR "snd-page-alloc: invalid proc cmd\n");
601 	return count;
602 }
603 #endif /* CONFIG_PCI */
604 
605 static const struct file_operations snd_mem_proc_fops = {
606 	.owner		= THIS_MODULE,
607 	.open		= snd_mem_proc_open,
608 	.read		= seq_read,
609 #ifdef CONFIG_PCI
610 	.write		= snd_mem_proc_write,
611 #endif
612 	.llseek		= seq_lseek,
613 	.release	= single_release,
614 };
615 
616 #endif /* CONFIG_PROC_FS */
617 
618 /*
619  * module entry
620  */
621 
622 static int __init snd_mem_init(void)
623 {
624 #ifdef CONFIG_PROC_FS
625 	snd_mem_proc = create_proc_entry(SND_MEM_PROC_FILE, 0644, NULL);
626 	if (snd_mem_proc)
627 		snd_mem_proc->proc_fops = &snd_mem_proc_fops;
628 #endif
629 	return 0;
630 }
631 
632 static void __exit snd_mem_exit(void)
633 {
634 	remove_proc_entry(SND_MEM_PROC_FILE, NULL);
635 	free_all_reserved_pages();
636 	if (snd_allocated_pages > 0)
637 		printk(KERN_ERR "snd-malloc: Memory leak?  pages not freed = %li\n", snd_allocated_pages);
638 }
639 
640 
641 module_init(snd_mem_init)
642 module_exit(snd_mem_exit)
643 
644 
645 /*
646  * exports
647  */
648 EXPORT_SYMBOL(snd_dma_alloc_pages);
649 EXPORT_SYMBOL(snd_dma_alloc_pages_fallback);
650 EXPORT_SYMBOL(snd_dma_free_pages);
651 
652 EXPORT_SYMBOL(snd_dma_get_reserved_buf);
653 EXPORT_SYMBOL(snd_dma_reserve_buf);
654 
655 EXPORT_SYMBOL(snd_malloc_pages);
656 EXPORT_SYMBOL(snd_free_pages);
657