xref: /linux/sound/core/memalloc.c (revision 9acb51e9617c28a92f9ce2af767db6bd660a6d4f)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  *  Copyright (c) by Jaroslav Kysela <perex@perex.cz>
4  *                   Takashi Iwai <tiwai@suse.de>
5  *
6  *  Generic memory allocators
7  */
8 
9 #include <linux/slab.h>
10 #include <linux/mm.h>
11 #include <linux/dma-mapping.h>
12 #include <linux/dma-map-ops.h>
13 #include <linux/genalloc.h>
14 #include <linux/highmem.h>
15 #include <linux/vmalloc.h>
16 #ifdef CONFIG_X86
17 #include <asm/set_memory.h>
18 #endif
19 #include <sound/memalloc.h>
20 #include "memalloc_local.h"
21 
22 #define DEFAULT_GFP \
23 	(GFP_KERNEL | \
24 	 __GFP_RETRY_MAYFAIL | /* don't trigger OOM-killer */ \
25 	 __GFP_NOWARN)   /* no stack trace print - this call is non-critical */
26 
27 static const struct snd_malloc_ops *snd_dma_get_ops(struct snd_dma_buffer *dmab);
28 
29 static void *__snd_dma_alloc_pages(struct snd_dma_buffer *dmab, size_t size)
30 {
31 	const struct snd_malloc_ops *ops = snd_dma_get_ops(dmab);
32 
33 	if (WARN_ON_ONCE(!ops || !ops->alloc))
34 		return NULL;
35 	return ops->alloc(dmab, size);
36 }
37 
38 /**
39  * snd_dma_alloc_dir_pages - allocate the buffer area according to the given
40  *	type and direction
41  * @type: the DMA buffer type
42  * @device: the device pointer
43  * @dir: DMA direction
44  * @size: the buffer size to allocate
45  * @dmab: buffer allocation record to store the allocated data
46  *
47  * Calls the memory-allocator function for the corresponding
48  * buffer type.
49  *
50  * Return: Zero if the buffer with the given size is allocated successfully,
51  * otherwise a negative value on error.
52  */
53 int snd_dma_alloc_dir_pages(int type, struct device *device,
54 			    enum dma_data_direction dir, size_t size,
55 			    struct snd_dma_buffer *dmab)
56 {
57 	if (WARN_ON(!size))
58 		return -ENXIO;
59 	if (WARN_ON(!dmab))
60 		return -ENXIO;
61 
62 	size = PAGE_ALIGN(size);
63 	dmab->dev.type = type;
64 	dmab->dev.dev = device;
65 	dmab->dev.dir = dir;
66 	dmab->bytes = 0;
67 	dmab->addr = 0;
68 	dmab->private_data = NULL;
69 	dmab->area = __snd_dma_alloc_pages(dmab, size);
70 	if (!dmab->area)
71 		return -ENOMEM;
72 	dmab->bytes = size;
73 	return 0;
74 }
75 EXPORT_SYMBOL(snd_dma_alloc_dir_pages);
76 
77 /**
78  * snd_dma_alloc_pages_fallback - allocate the buffer area according to the given type with fallback
79  * @type: the DMA buffer type
80  * @device: the device pointer
81  * @size: the buffer size to allocate
82  * @dmab: buffer allocation record to store the allocated data
83  *
84  * Calls the memory-allocator function for the corresponding
85  * buffer type.  When no space is left, this function reduces the size and
86  * tries to allocate again.  The size actually allocated is stored in
87  * res_size argument.
88  *
89  * Return: Zero if the buffer with the given size is allocated successfully,
90  * otherwise a negative value on error.
91  */
92 int snd_dma_alloc_pages_fallback(int type, struct device *device, size_t size,
93 				 struct snd_dma_buffer *dmab)
94 {
95 	int err;
96 
97 	while ((err = snd_dma_alloc_pages(type, device, size, dmab)) < 0) {
98 		if (err != -ENOMEM)
99 			return err;
100 		if (size <= PAGE_SIZE)
101 			return -ENOMEM;
102 		size >>= 1;
103 		size = PAGE_SIZE << get_order(size);
104 	}
105 	if (! dmab->area)
106 		return -ENOMEM;
107 	return 0;
108 }
109 EXPORT_SYMBOL(snd_dma_alloc_pages_fallback);
110 
111 /**
112  * snd_dma_free_pages - release the allocated buffer
113  * @dmab: the buffer allocation record to release
114  *
115  * Releases the allocated buffer via snd_dma_alloc_pages().
116  */
117 void snd_dma_free_pages(struct snd_dma_buffer *dmab)
118 {
119 	const struct snd_malloc_ops *ops = snd_dma_get_ops(dmab);
120 
121 	if (ops && ops->free)
122 		ops->free(dmab);
123 }
124 EXPORT_SYMBOL(snd_dma_free_pages);
125 
126 /* called by devres */
127 static void __snd_release_pages(struct device *dev, void *res)
128 {
129 	snd_dma_free_pages(res);
130 }
131 
132 /**
133  * snd_devm_alloc_dir_pages - allocate the buffer and manage with devres
134  * @dev: the device pointer
135  * @type: the DMA buffer type
136  * @dir: DMA direction
137  * @size: the buffer size to allocate
138  *
139  * Allocate buffer pages depending on the given type and manage using devres.
140  * The pages will be released automatically at the device removal.
141  *
142  * Unlike snd_dma_alloc_pages(), this function requires the real device pointer,
143  * hence it can't work with SNDRV_DMA_TYPE_CONTINUOUS or
144  * SNDRV_DMA_TYPE_VMALLOC type.
145  *
146  * Return: the snd_dma_buffer object at success, or NULL if failed
147  */
148 struct snd_dma_buffer *
149 snd_devm_alloc_dir_pages(struct device *dev, int type,
150 			 enum dma_data_direction dir, size_t size)
151 {
152 	struct snd_dma_buffer *dmab;
153 	int err;
154 
155 	if (WARN_ON(type == SNDRV_DMA_TYPE_CONTINUOUS ||
156 		    type == SNDRV_DMA_TYPE_VMALLOC))
157 		return NULL;
158 
159 	dmab = devres_alloc(__snd_release_pages, sizeof(*dmab), GFP_KERNEL);
160 	if (!dmab)
161 		return NULL;
162 
163 	err = snd_dma_alloc_dir_pages(type, dev, dir, size, dmab);
164 	if (err < 0) {
165 		devres_free(dmab);
166 		return NULL;
167 	}
168 
169 	devres_add(dev, dmab);
170 	return dmab;
171 }
172 EXPORT_SYMBOL_GPL(snd_devm_alloc_dir_pages);
173 
174 /**
175  * snd_dma_buffer_mmap - perform mmap of the given DMA buffer
176  * @dmab: buffer allocation information
177  * @area: VM area information
178  *
179  * Return: zero if successful, or a negative error code
180  */
181 int snd_dma_buffer_mmap(struct snd_dma_buffer *dmab,
182 			struct vm_area_struct *area)
183 {
184 	const struct snd_malloc_ops *ops;
185 
186 	if (!dmab)
187 		return -ENOENT;
188 	ops = snd_dma_get_ops(dmab);
189 	if (ops && ops->mmap)
190 		return ops->mmap(dmab, area);
191 	else
192 		return -ENOENT;
193 }
194 EXPORT_SYMBOL(snd_dma_buffer_mmap);
195 
196 #ifdef CONFIG_HAS_DMA
197 /**
198  * snd_dma_buffer_sync - sync DMA buffer between CPU and device
199  * @dmab: buffer allocation information
200  * @mode: sync mode
201  */
202 void snd_dma_buffer_sync(struct snd_dma_buffer *dmab,
203 			 enum snd_dma_sync_mode mode)
204 {
205 	const struct snd_malloc_ops *ops;
206 
207 	if (!dmab || !dmab->dev.need_sync)
208 		return;
209 	ops = snd_dma_get_ops(dmab);
210 	if (ops && ops->sync)
211 		ops->sync(dmab, mode);
212 }
213 EXPORT_SYMBOL_GPL(snd_dma_buffer_sync);
214 #endif /* CONFIG_HAS_DMA */
215 
216 /**
217  * snd_sgbuf_get_addr - return the physical address at the corresponding offset
218  * @dmab: buffer allocation information
219  * @offset: offset in the ring buffer
220  *
221  * Return: the physical address
222  */
223 dma_addr_t snd_sgbuf_get_addr(struct snd_dma_buffer *dmab, size_t offset)
224 {
225 	const struct snd_malloc_ops *ops = snd_dma_get_ops(dmab);
226 
227 	if (ops && ops->get_addr)
228 		return ops->get_addr(dmab, offset);
229 	else
230 		return dmab->addr + offset;
231 }
232 EXPORT_SYMBOL(snd_sgbuf_get_addr);
233 
234 /**
235  * snd_sgbuf_get_page - return the physical page at the corresponding offset
236  * @dmab: buffer allocation information
237  * @offset: offset in the ring buffer
238  *
239  * Return: the page pointer
240  */
241 struct page *snd_sgbuf_get_page(struct snd_dma_buffer *dmab, size_t offset)
242 {
243 	const struct snd_malloc_ops *ops = snd_dma_get_ops(dmab);
244 
245 	if (ops && ops->get_page)
246 		return ops->get_page(dmab, offset);
247 	else
248 		return virt_to_page(dmab->area + offset);
249 }
250 EXPORT_SYMBOL(snd_sgbuf_get_page);
251 
252 /**
253  * snd_sgbuf_get_chunk_size - compute the max chunk size with continuous pages
254  *	on sg-buffer
255  * @dmab: buffer allocation information
256  * @ofs: offset in the ring buffer
257  * @size: the requested size
258  *
259  * Return: the chunk size
260  */
261 unsigned int snd_sgbuf_get_chunk_size(struct snd_dma_buffer *dmab,
262 				      unsigned int ofs, unsigned int size)
263 {
264 	const struct snd_malloc_ops *ops = snd_dma_get_ops(dmab);
265 
266 	if (ops && ops->get_chunk_size)
267 		return ops->get_chunk_size(dmab, ofs, size);
268 	else
269 		return size;
270 }
271 EXPORT_SYMBOL(snd_sgbuf_get_chunk_size);
272 
273 /*
274  * Continuous pages allocator
275  */
276 static void *do_alloc_pages(struct device *dev, size_t size, dma_addr_t *addr,
277 			    bool wc)
278 {
279 	void *p;
280 	gfp_t gfp = GFP_KERNEL | __GFP_NORETRY | __GFP_NOWARN;
281 
282  again:
283 	p = alloc_pages_exact(size, gfp);
284 	if (!p)
285 		return NULL;
286 	*addr = page_to_phys(virt_to_page(p));
287 	if (!dev)
288 		return p;
289 	if ((*addr + size - 1) & ~dev->coherent_dma_mask) {
290 		if (IS_ENABLED(CONFIG_ZONE_DMA32) && !(gfp & GFP_DMA32)) {
291 			gfp |= GFP_DMA32;
292 			goto again;
293 		}
294 		if (IS_ENABLED(CONFIG_ZONE_DMA) && !(gfp & GFP_DMA)) {
295 			gfp = (gfp & ~GFP_DMA32) | GFP_DMA;
296 			goto again;
297 		}
298 	}
299 #ifdef CONFIG_X86
300 	if (wc)
301 		set_memory_wc((unsigned long)(p), size >> PAGE_SHIFT);
302 #endif
303 	return p;
304 }
305 
306 static void do_free_pages(void *p, size_t size, bool wc)
307 {
308 #ifdef CONFIG_X86
309 	if (wc)
310 		set_memory_wb((unsigned long)(p), size >> PAGE_SHIFT);
311 #endif
312 	free_pages_exact(p, size);
313 }
314 
315 
316 static void *snd_dma_continuous_alloc(struct snd_dma_buffer *dmab, size_t size)
317 {
318 	return do_alloc_pages(dmab->dev.dev, size, &dmab->addr, false);
319 }
320 
321 static void snd_dma_continuous_free(struct snd_dma_buffer *dmab)
322 {
323 	do_free_pages(dmab->area, dmab->bytes, false);
324 }
325 
326 static int snd_dma_continuous_mmap(struct snd_dma_buffer *dmab,
327 				   struct vm_area_struct *area)
328 {
329 	return remap_pfn_range(area, area->vm_start,
330 			       dmab->addr >> PAGE_SHIFT,
331 			       area->vm_end - area->vm_start,
332 			       area->vm_page_prot);
333 }
334 
335 static const struct snd_malloc_ops snd_dma_continuous_ops = {
336 	.alloc = snd_dma_continuous_alloc,
337 	.free = snd_dma_continuous_free,
338 	.mmap = snd_dma_continuous_mmap,
339 };
340 
341 /*
342  * VMALLOC allocator
343  */
344 static void *snd_dma_vmalloc_alloc(struct snd_dma_buffer *dmab, size_t size)
345 {
346 	return vmalloc(size);
347 }
348 
349 static void snd_dma_vmalloc_free(struct snd_dma_buffer *dmab)
350 {
351 	vfree(dmab->area);
352 }
353 
354 static int snd_dma_vmalloc_mmap(struct snd_dma_buffer *dmab,
355 				struct vm_area_struct *area)
356 {
357 	return remap_vmalloc_range(area, dmab->area, 0);
358 }
359 
360 #define get_vmalloc_page_addr(dmab, offset) \
361 	page_to_phys(vmalloc_to_page((dmab)->area + (offset)))
362 
363 static dma_addr_t snd_dma_vmalloc_get_addr(struct snd_dma_buffer *dmab,
364 					   size_t offset)
365 {
366 	return get_vmalloc_page_addr(dmab, offset) + offset % PAGE_SIZE;
367 }
368 
369 static struct page *snd_dma_vmalloc_get_page(struct snd_dma_buffer *dmab,
370 					     size_t offset)
371 {
372 	return vmalloc_to_page(dmab->area + offset);
373 }
374 
375 static unsigned int
376 snd_dma_vmalloc_get_chunk_size(struct snd_dma_buffer *dmab,
377 			       unsigned int ofs, unsigned int size)
378 {
379 	unsigned int start, end;
380 	unsigned long addr;
381 
382 	start = ALIGN_DOWN(ofs, PAGE_SIZE);
383 	end = ofs + size - 1; /* the last byte address */
384 	/* check page continuity */
385 	addr = get_vmalloc_page_addr(dmab, start);
386 	for (;;) {
387 		start += PAGE_SIZE;
388 		if (start > end)
389 			break;
390 		addr += PAGE_SIZE;
391 		if (get_vmalloc_page_addr(dmab, start) != addr)
392 			return start - ofs;
393 	}
394 	/* ok, all on continuous pages */
395 	return size;
396 }
397 
398 static const struct snd_malloc_ops snd_dma_vmalloc_ops = {
399 	.alloc = snd_dma_vmalloc_alloc,
400 	.free = snd_dma_vmalloc_free,
401 	.mmap = snd_dma_vmalloc_mmap,
402 	.get_addr = snd_dma_vmalloc_get_addr,
403 	.get_page = snd_dma_vmalloc_get_page,
404 	.get_chunk_size = snd_dma_vmalloc_get_chunk_size,
405 };
406 
407 #ifdef CONFIG_HAS_DMA
408 /*
409  * IRAM allocator
410  */
411 #ifdef CONFIG_GENERIC_ALLOCATOR
412 static void *snd_dma_iram_alloc(struct snd_dma_buffer *dmab, size_t size)
413 {
414 	struct device *dev = dmab->dev.dev;
415 	struct gen_pool *pool;
416 	void *p;
417 
418 	if (dev->of_node) {
419 		pool = of_gen_pool_get(dev->of_node, "iram", 0);
420 		/* Assign the pool into private_data field */
421 		dmab->private_data = pool;
422 
423 		p = gen_pool_dma_alloc_align(pool, size, &dmab->addr, PAGE_SIZE);
424 		if (p)
425 			return p;
426 	}
427 
428 	/* Internal memory might have limited size and no enough space,
429 	 * so if we fail to malloc, try to fetch memory traditionally.
430 	 */
431 	dmab->dev.type = SNDRV_DMA_TYPE_DEV;
432 	return __snd_dma_alloc_pages(dmab, size);
433 }
434 
435 static void snd_dma_iram_free(struct snd_dma_buffer *dmab)
436 {
437 	struct gen_pool *pool = dmab->private_data;
438 
439 	if (pool && dmab->area)
440 		gen_pool_free(pool, (unsigned long)dmab->area, dmab->bytes);
441 }
442 
443 static int snd_dma_iram_mmap(struct snd_dma_buffer *dmab,
444 			     struct vm_area_struct *area)
445 {
446 	area->vm_page_prot = pgprot_writecombine(area->vm_page_prot);
447 	return remap_pfn_range(area, area->vm_start,
448 			       dmab->addr >> PAGE_SHIFT,
449 			       area->vm_end - area->vm_start,
450 			       area->vm_page_prot);
451 }
452 
453 static const struct snd_malloc_ops snd_dma_iram_ops = {
454 	.alloc = snd_dma_iram_alloc,
455 	.free = snd_dma_iram_free,
456 	.mmap = snd_dma_iram_mmap,
457 };
458 #endif /* CONFIG_GENERIC_ALLOCATOR */
459 
460 /*
461  * Coherent device pages allocator
462  */
463 static void *snd_dma_dev_alloc(struct snd_dma_buffer *dmab, size_t size)
464 {
465 	return dma_alloc_coherent(dmab->dev.dev, size, &dmab->addr, DEFAULT_GFP);
466 }
467 
468 static void snd_dma_dev_free(struct snd_dma_buffer *dmab)
469 {
470 	dma_free_coherent(dmab->dev.dev, dmab->bytes, dmab->area, dmab->addr);
471 }
472 
473 static int snd_dma_dev_mmap(struct snd_dma_buffer *dmab,
474 			    struct vm_area_struct *area)
475 {
476 	return dma_mmap_coherent(dmab->dev.dev, area,
477 				 dmab->area, dmab->addr, dmab->bytes);
478 }
479 
480 static const struct snd_malloc_ops snd_dma_dev_ops = {
481 	.alloc = snd_dma_dev_alloc,
482 	.free = snd_dma_dev_free,
483 	.mmap = snd_dma_dev_mmap,
484 };
485 
486 /*
487  * Write-combined pages
488  */
489 /* x86-specific allocations */
490 #ifdef CONFIG_SND_DMA_SGBUF
491 #define x86_fallback(dmab)	(!get_dma_ops(dmab->dev.dev))
492 #else
493 #define x86_fallback(dmab)	false
494 #endif
495 
496 static void *snd_dma_wc_alloc(struct snd_dma_buffer *dmab, size_t size)
497 {
498 	if (x86_fallback(dmab))
499 		return do_alloc_pages(dmab->dev.dev, size, &dmab->addr, true);
500 	return dma_alloc_wc(dmab->dev.dev, size, &dmab->addr, DEFAULT_GFP);
501 }
502 
503 static void snd_dma_wc_free(struct snd_dma_buffer *dmab)
504 {
505 	if (x86_fallback(dmab)) {
506 		do_free_pages(dmab->area, dmab->bytes, true);
507 		return;
508 	}
509 	dma_free_wc(dmab->dev.dev, dmab->bytes, dmab->area, dmab->addr);
510 }
511 
512 static int snd_dma_wc_mmap(struct snd_dma_buffer *dmab,
513 			   struct vm_area_struct *area)
514 {
515 #ifdef CONFIG_SND_DMA_SGBUF
516 	if (x86_fallback(dmab)) {
517 		area->vm_page_prot = pgprot_writecombine(area->vm_page_prot);
518 		return snd_dma_continuous_mmap(dmab, area);
519 	}
520 #endif
521 	return dma_mmap_wc(dmab->dev.dev, area,
522 			   dmab->area, dmab->addr, dmab->bytes);
523 }
524 
525 static const struct snd_malloc_ops snd_dma_wc_ops = {
526 	.alloc = snd_dma_wc_alloc,
527 	.free = snd_dma_wc_free,
528 	.mmap = snd_dma_wc_mmap,
529 };
530 
531 /*
532  * Non-contiguous pages allocator
533  */
534 static void *snd_dma_noncontig_alloc(struct snd_dma_buffer *dmab, size_t size)
535 {
536 	struct sg_table *sgt;
537 	void *p;
538 
539 	sgt = dma_alloc_noncontiguous(dmab->dev.dev, size, dmab->dev.dir,
540 				      DEFAULT_GFP, 0);
541 	if (!sgt)
542 		return NULL;
543 
544 	dmab->dev.need_sync = dma_need_sync(dmab->dev.dev,
545 					    sg_dma_address(sgt->sgl));
546 	p = dma_vmap_noncontiguous(dmab->dev.dev, size, sgt);
547 	if (p) {
548 		dmab->private_data = sgt;
549 		/* store the first page address for convenience */
550 		dmab->addr = snd_sgbuf_get_addr(dmab, 0);
551 	} else {
552 		dma_free_noncontiguous(dmab->dev.dev, size, sgt, dmab->dev.dir);
553 	}
554 	return p;
555 }
556 
557 static void snd_dma_noncontig_free(struct snd_dma_buffer *dmab)
558 {
559 	dma_vunmap_noncontiguous(dmab->dev.dev, dmab->area);
560 	dma_free_noncontiguous(dmab->dev.dev, dmab->bytes, dmab->private_data,
561 			       dmab->dev.dir);
562 }
563 
564 static int snd_dma_noncontig_mmap(struct snd_dma_buffer *dmab,
565 				  struct vm_area_struct *area)
566 {
567 	return dma_mmap_noncontiguous(dmab->dev.dev, area,
568 				      dmab->bytes, dmab->private_data);
569 }
570 
571 static void snd_dma_noncontig_sync(struct snd_dma_buffer *dmab,
572 				   enum snd_dma_sync_mode mode)
573 {
574 	if (mode == SNDRV_DMA_SYNC_CPU) {
575 		if (dmab->dev.dir == DMA_TO_DEVICE)
576 			return;
577 		invalidate_kernel_vmap_range(dmab->area, dmab->bytes);
578 		dma_sync_sgtable_for_cpu(dmab->dev.dev, dmab->private_data,
579 					 dmab->dev.dir);
580 	} else {
581 		if (dmab->dev.dir == DMA_FROM_DEVICE)
582 			return;
583 		flush_kernel_vmap_range(dmab->area, dmab->bytes);
584 		dma_sync_sgtable_for_device(dmab->dev.dev, dmab->private_data,
585 					    dmab->dev.dir);
586 	}
587 }
588 
589 static inline void snd_dma_noncontig_iter_set(struct snd_dma_buffer *dmab,
590 					      struct sg_page_iter *piter,
591 					      size_t offset)
592 {
593 	struct sg_table *sgt = dmab->private_data;
594 
595 	__sg_page_iter_start(piter, sgt->sgl, sgt->orig_nents,
596 			     offset >> PAGE_SHIFT);
597 }
598 
599 static dma_addr_t snd_dma_noncontig_get_addr(struct snd_dma_buffer *dmab,
600 					     size_t offset)
601 {
602 	struct sg_dma_page_iter iter;
603 
604 	snd_dma_noncontig_iter_set(dmab, &iter.base, offset);
605 	__sg_page_iter_dma_next(&iter);
606 	return sg_page_iter_dma_address(&iter) + offset % PAGE_SIZE;
607 }
608 
609 static struct page *snd_dma_noncontig_get_page(struct snd_dma_buffer *dmab,
610 					       size_t offset)
611 {
612 	struct sg_page_iter iter;
613 
614 	snd_dma_noncontig_iter_set(dmab, &iter, offset);
615 	__sg_page_iter_next(&iter);
616 	return sg_page_iter_page(&iter);
617 }
618 
619 static unsigned int
620 snd_dma_noncontig_get_chunk_size(struct snd_dma_buffer *dmab,
621 				 unsigned int ofs, unsigned int size)
622 {
623 	struct sg_dma_page_iter iter;
624 	unsigned int start, end;
625 	unsigned long addr;
626 
627 	start = ALIGN_DOWN(ofs, PAGE_SIZE);
628 	end = ofs + size - 1; /* the last byte address */
629 	snd_dma_noncontig_iter_set(dmab, &iter.base, start);
630 	if (!__sg_page_iter_dma_next(&iter))
631 		return 0;
632 	/* check page continuity */
633 	addr = sg_page_iter_dma_address(&iter);
634 	for (;;) {
635 		start += PAGE_SIZE;
636 		if (start > end)
637 			break;
638 		addr += PAGE_SIZE;
639 		if (!__sg_page_iter_dma_next(&iter) ||
640 		    sg_page_iter_dma_address(&iter) != addr)
641 			return start - ofs;
642 	}
643 	/* ok, all on continuous pages */
644 	return size;
645 }
646 
647 static const struct snd_malloc_ops snd_dma_noncontig_ops = {
648 	.alloc = snd_dma_noncontig_alloc,
649 	.free = snd_dma_noncontig_free,
650 	.mmap = snd_dma_noncontig_mmap,
651 	.sync = snd_dma_noncontig_sync,
652 	.get_addr = snd_dma_noncontig_get_addr,
653 	.get_page = snd_dma_noncontig_get_page,
654 	.get_chunk_size = snd_dma_noncontig_get_chunk_size,
655 };
656 
657 #ifdef CONFIG_SND_DMA_SGBUF
658 /* Fallback SG-buffer allocations for x86 */
659 struct snd_dma_sg_fallback {
660 	bool use_dma_alloc_coherent;
661 	size_t count;
662 	struct page **pages;
663 	/* DMA address array; the first page contains #pages in ~PAGE_MASK */
664 	dma_addr_t *addrs;
665 };
666 
667 static void __snd_dma_sg_fallback_free(struct snd_dma_buffer *dmab,
668 				       struct snd_dma_sg_fallback *sgbuf)
669 {
670 	size_t i, size;
671 
672 	if (sgbuf->pages && sgbuf->addrs) {
673 		i = 0;
674 		while (i < sgbuf->count) {
675 			if (!sgbuf->pages[i] || !sgbuf->addrs[i])
676 				break;
677 			size = sgbuf->addrs[i] & ~PAGE_MASK;
678 			if (WARN_ON(!size))
679 				break;
680 			if (sgbuf->use_dma_alloc_coherent)
681 				dma_free_coherent(dmab->dev.dev, size << PAGE_SHIFT,
682 						  page_address(sgbuf->pages[i]),
683 						  sgbuf->addrs[i] & PAGE_MASK);
684 			else
685 				do_free_pages(page_address(sgbuf->pages[i]),
686 					      size << PAGE_SHIFT, false);
687 			i += size;
688 		}
689 	}
690 	kvfree(sgbuf->pages);
691 	kvfree(sgbuf->addrs);
692 	kfree(sgbuf);
693 }
694 
695 /* fallback manual S/G buffer allocations */
696 static void *snd_dma_sg_fallback_alloc(struct snd_dma_buffer *dmab, size_t size)
697 {
698 	struct snd_dma_sg_fallback *sgbuf;
699 	struct page **pagep, *curp;
700 	size_t chunk, npages;
701 	dma_addr_t *addrp;
702 	dma_addr_t addr;
703 	void *p;
704 
705 	sgbuf = kzalloc(sizeof(*sgbuf), GFP_KERNEL);
706 	if (!sgbuf)
707 		return NULL;
708 	sgbuf->use_dma_alloc_coherent = cpu_feature_enabled(X86_FEATURE_XENPV);
709 	size = PAGE_ALIGN(size);
710 	sgbuf->count = size >> PAGE_SHIFT;
711 	sgbuf->pages = kvcalloc(sgbuf->count, sizeof(*sgbuf->pages), GFP_KERNEL);
712 	sgbuf->addrs = kvcalloc(sgbuf->count, sizeof(*sgbuf->addrs), GFP_KERNEL);
713 	if (!sgbuf->pages || !sgbuf->addrs)
714 		goto error;
715 
716 	pagep = sgbuf->pages;
717 	addrp = sgbuf->addrs;
718 	chunk = (PAGE_SIZE - 1) << PAGE_SHIFT; /* to fit in low bits in addrs */
719 	while (size > 0) {
720 		chunk = min(size, chunk);
721 		if (sgbuf->use_dma_alloc_coherent)
722 			p = dma_alloc_coherent(dmab->dev.dev, chunk, &addr, DEFAULT_GFP);
723 		else
724 			p = do_alloc_pages(dmab->dev.dev, chunk, &addr, false);
725 		if (!p) {
726 			if (chunk <= PAGE_SIZE)
727 				goto error;
728 			chunk >>= 1;
729 			chunk = PAGE_SIZE << get_order(chunk);
730 			continue;
731 		}
732 
733 		size -= chunk;
734 		/* fill pages */
735 		npages = chunk >> PAGE_SHIFT;
736 		*addrp = npages; /* store in lower bits */
737 		curp = virt_to_page(p);
738 		while (npages--) {
739 			*pagep++ = curp++;
740 			*addrp++ |= addr;
741 			addr += PAGE_SIZE;
742 		}
743 	}
744 
745 	p = vmap(sgbuf->pages, sgbuf->count, VM_MAP, PAGE_KERNEL);
746 	if (!p)
747 		goto error;
748 
749 	if (dmab->dev.type == SNDRV_DMA_TYPE_DEV_WC_SG)
750 		set_pages_array_wc(sgbuf->pages, sgbuf->count);
751 
752 	dmab->private_data = sgbuf;
753 	/* store the first page address for convenience */
754 	dmab->addr = sgbuf->addrs[0] & PAGE_MASK;
755 	return p;
756 
757  error:
758 	__snd_dma_sg_fallback_free(dmab, sgbuf);
759 	return NULL;
760 }
761 
762 static void snd_dma_sg_fallback_free(struct snd_dma_buffer *dmab)
763 {
764 	struct snd_dma_sg_fallback *sgbuf = dmab->private_data;
765 
766 	if (dmab->dev.type == SNDRV_DMA_TYPE_DEV_WC_SG)
767 		set_pages_array_wb(sgbuf->pages, sgbuf->count);
768 	vunmap(dmab->area);
769 	__snd_dma_sg_fallback_free(dmab, dmab->private_data);
770 }
771 
772 static dma_addr_t snd_dma_sg_fallback_get_addr(struct snd_dma_buffer *dmab,
773 					       size_t offset)
774 {
775 	struct snd_dma_sg_fallback *sgbuf = dmab->private_data;
776 	size_t index = offset >> PAGE_SHIFT;
777 
778 	return (sgbuf->addrs[index] & PAGE_MASK) | (offset & ~PAGE_MASK);
779 }
780 
781 static int snd_dma_sg_fallback_mmap(struct snd_dma_buffer *dmab,
782 				    struct vm_area_struct *area)
783 {
784 	struct snd_dma_sg_fallback *sgbuf = dmab->private_data;
785 
786 	if (dmab->dev.type == SNDRV_DMA_TYPE_DEV_WC_SG)
787 		area->vm_page_prot = pgprot_writecombine(area->vm_page_prot);
788 	return vm_map_pages(area, sgbuf->pages, sgbuf->count);
789 }
790 
791 static void *snd_dma_sg_alloc(struct snd_dma_buffer *dmab, size_t size)
792 {
793 	int type = dmab->dev.type;
794 	void *p;
795 
796 	if (cpu_feature_enabled(X86_FEATURE_XENPV))
797 		return snd_dma_sg_fallback_alloc(dmab, size);
798 
799 	/* try the standard DMA API allocation at first */
800 	if (type == SNDRV_DMA_TYPE_DEV_WC_SG)
801 		dmab->dev.type = SNDRV_DMA_TYPE_DEV_WC;
802 	else
803 		dmab->dev.type = SNDRV_DMA_TYPE_DEV;
804 	p = __snd_dma_alloc_pages(dmab, size);
805 	if (p)
806 		return p;
807 
808 	dmab->dev.type = type; /* restore the type */
809 	/* if IOMMU is present but failed, give up */
810 	if (!x86_fallback(dmab))
811 		return NULL;
812 	/* try fallback */
813 	return snd_dma_sg_fallback_alloc(dmab, size);
814 }
815 
816 static const struct snd_malloc_ops snd_dma_sg_ops = {
817 	.alloc = snd_dma_sg_alloc,
818 	.free = snd_dma_sg_fallback_free,
819 	.mmap = snd_dma_sg_fallback_mmap,
820 	.get_addr = snd_dma_sg_fallback_get_addr,
821 	/* reuse vmalloc helpers */
822 	.get_page = snd_dma_vmalloc_get_page,
823 	.get_chunk_size = snd_dma_vmalloc_get_chunk_size,
824 };
825 #endif /* CONFIG_SND_DMA_SGBUF */
826 
827 /*
828  * Non-coherent pages allocator
829  */
830 static void *snd_dma_noncoherent_alloc(struct snd_dma_buffer *dmab, size_t size)
831 {
832 	void *p;
833 
834 	p = dma_alloc_noncoherent(dmab->dev.dev, size, &dmab->addr,
835 				  dmab->dev.dir, DEFAULT_GFP);
836 	if (p)
837 		dmab->dev.need_sync = dma_need_sync(dmab->dev.dev, dmab->addr);
838 	return p;
839 }
840 
841 static void snd_dma_noncoherent_free(struct snd_dma_buffer *dmab)
842 {
843 	dma_free_noncoherent(dmab->dev.dev, dmab->bytes, dmab->area,
844 			     dmab->addr, dmab->dev.dir);
845 }
846 
847 static int snd_dma_noncoherent_mmap(struct snd_dma_buffer *dmab,
848 				    struct vm_area_struct *area)
849 {
850 	area->vm_page_prot = vm_get_page_prot(area->vm_flags);
851 	return dma_mmap_pages(dmab->dev.dev, area,
852 			      area->vm_end - area->vm_start,
853 			      virt_to_page(dmab->area));
854 }
855 
856 static void snd_dma_noncoherent_sync(struct snd_dma_buffer *dmab,
857 				     enum snd_dma_sync_mode mode)
858 {
859 	if (mode == SNDRV_DMA_SYNC_CPU) {
860 		if (dmab->dev.dir != DMA_TO_DEVICE)
861 			dma_sync_single_for_cpu(dmab->dev.dev, dmab->addr,
862 						dmab->bytes, dmab->dev.dir);
863 	} else {
864 		if (dmab->dev.dir != DMA_FROM_DEVICE)
865 			dma_sync_single_for_device(dmab->dev.dev, dmab->addr,
866 						   dmab->bytes, dmab->dev.dir);
867 	}
868 }
869 
870 static const struct snd_malloc_ops snd_dma_noncoherent_ops = {
871 	.alloc = snd_dma_noncoherent_alloc,
872 	.free = snd_dma_noncoherent_free,
873 	.mmap = snd_dma_noncoherent_mmap,
874 	.sync = snd_dma_noncoherent_sync,
875 };
876 
877 #endif /* CONFIG_HAS_DMA */
878 
879 /*
880  * Entry points
881  */
882 static const struct snd_malloc_ops *snd_dma_ops[] = {
883 	[SNDRV_DMA_TYPE_CONTINUOUS] = &snd_dma_continuous_ops,
884 	[SNDRV_DMA_TYPE_VMALLOC] = &snd_dma_vmalloc_ops,
885 #ifdef CONFIG_HAS_DMA
886 	[SNDRV_DMA_TYPE_DEV] = &snd_dma_dev_ops,
887 	[SNDRV_DMA_TYPE_DEV_WC] = &snd_dma_wc_ops,
888 	[SNDRV_DMA_TYPE_NONCONTIG] = &snd_dma_noncontig_ops,
889 	[SNDRV_DMA_TYPE_NONCOHERENT] = &snd_dma_noncoherent_ops,
890 #ifdef CONFIG_SND_DMA_SGBUF
891 	[SNDRV_DMA_TYPE_DEV_SG] = &snd_dma_sg_ops,
892 	[SNDRV_DMA_TYPE_DEV_WC_SG] = &snd_dma_sg_ops,
893 #endif
894 #ifdef CONFIG_GENERIC_ALLOCATOR
895 	[SNDRV_DMA_TYPE_DEV_IRAM] = &snd_dma_iram_ops,
896 #endif /* CONFIG_GENERIC_ALLOCATOR */
897 #endif /* CONFIG_HAS_DMA */
898 };
899 
900 static const struct snd_malloc_ops *snd_dma_get_ops(struct snd_dma_buffer *dmab)
901 {
902 	if (WARN_ON_ONCE(!dmab))
903 		return NULL;
904 	if (WARN_ON_ONCE(dmab->dev.type <= SNDRV_DMA_TYPE_UNKNOWN ||
905 			 dmab->dev.type >= ARRAY_SIZE(snd_dma_ops)))
906 		return NULL;
907 	return snd_dma_ops[dmab->dev.type];
908 }
909