xref: /linux/sound/pci/emu10k1/memory.c (revision 07fdad3a93756b872da7b53647715c48d0f4a2d0)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  *  Copyright (c) by Jaroslav Kysela <perex@perex.cz>
4  *  Copyright (c) by Takashi Iwai <tiwai@suse.de>
5  *
6  *  EMU10K1 memory page allocation (PTB area)
7  */
8 
9 #include <linux/pci.h>
10 #include <linux/gfp.h>
11 #include <linux/time.h>
12 #include <linux/mutex.h>
13 #include <linux/export.h>
14 
15 #include <sound/core.h>
16 #include <sound/emu10k1.h>
17 
18 /* page arguments of these two macros are Emu page (4096 bytes), not like
19  * aligned pages in others
20  */
21 #define __set_ptb_entry(emu,page,addr) \
22 	(((__le32 *)(emu)->ptb_pages.area)[page] = \
23 	 cpu_to_le32(((addr) << (emu->address_mode)) | (page)))
24 #define __get_ptb_entry(emu, page) \
25 	(le32_to_cpu(((__le32 *)(emu)->ptb_pages.area)[page]))
26 
27 #define UNIT_PAGES		(PAGE_SIZE / EMUPAGESIZE)
28 #define MAX_ALIGN_PAGES0		(MAXPAGES0 / UNIT_PAGES)
29 #define MAX_ALIGN_PAGES1		(MAXPAGES1 / UNIT_PAGES)
30 /* get aligned page from offset address */
31 #define get_aligned_page(offset)	((offset) >> PAGE_SHIFT)
32 /* get offset address from aligned page */
33 #define aligned_page_offset(page)	((page) << PAGE_SHIFT)
34 
35 #if PAGE_SIZE == EMUPAGESIZE && !IS_ENABLED(CONFIG_DYNAMIC_DEBUG)
36 /* fill PTB entrie(s) corresponding to page with addr */
37 #define set_ptb_entry(emu,page,addr)	__set_ptb_entry(emu,page,addr)
38 /* fill PTB entrie(s) corresponding to page with silence pointer */
39 #define set_silent_ptb(emu,page)	__set_ptb_entry(emu,page,emu->silent_page.addr)
40 #else
41 /* fill PTB entries -- we need to fill UNIT_PAGES entries */
42 static inline void set_ptb_entry(struct snd_emu10k1 *emu, int page, dma_addr_t addr)
43 {
44 	int i;
45 	page *= UNIT_PAGES;
46 	for (i = 0; i < UNIT_PAGES; i++, page++) {
47 		__set_ptb_entry(emu, page, addr);
48 		dev_dbg(emu->card->dev, "mapped page %d to entry %.8x\n", page,
49 			(unsigned int)__get_ptb_entry(emu, page));
50 		addr += EMUPAGESIZE;
51 	}
52 }
53 static inline void set_silent_ptb(struct snd_emu10k1 *emu, int page)
54 {
55 	int i;
56 	page *= UNIT_PAGES;
57 	for (i = 0; i < UNIT_PAGES; i++, page++) {
58 		/* do not increment ptr */
59 		__set_ptb_entry(emu, page, emu->silent_page.addr);
60 		dev_dbg(emu->card->dev, "mapped silent page %d to entry %.8x\n",
61 			page, (unsigned int)__get_ptb_entry(emu, page));
62 	}
63 }
64 #endif /* PAGE_SIZE */
65 
66 
67 /*
68  */
69 static int synth_alloc_pages(struct snd_emu10k1 *hw, struct snd_emu10k1_memblk *blk);
70 static int synth_free_pages(struct snd_emu10k1 *hw, struct snd_emu10k1_memblk *blk);
71 
72 #define get_emu10k1_memblk(l,member)	list_entry(l, struct snd_emu10k1_memblk, member)
73 
74 
75 /* initialize emu10k1 part */
76 static void emu10k1_memblk_init(struct snd_emu10k1_memblk *blk)
77 {
78 	blk->mapped_page = -1;
79 	INIT_LIST_HEAD(&blk->mapped_link);
80 	INIT_LIST_HEAD(&blk->mapped_order_link);
81 	blk->map_locked = 0;
82 
83 	blk->first_page = get_aligned_page(blk->mem.offset);
84 	blk->last_page = get_aligned_page(blk->mem.offset + blk->mem.size - 1);
85 	blk->pages = blk->last_page - blk->first_page + 1;
86 }
87 
88 /*
89  * search empty region on PTB with the given size
90  *
91  * if an empty region is found, return the page and store the next mapped block
92  * in nextp
93  * if not found, return a negative error code.
94  */
95 static int search_empty_map_area(struct snd_emu10k1 *emu, int npages, struct list_head **nextp)
96 {
97 	int page = 1, found_page = -ENOMEM;
98 	int max_size = npages;
99 	int size;
100 	struct list_head *candidate = &emu->mapped_link_head;
101 	struct list_head *pos;
102 
103 	list_for_each (pos, &emu->mapped_link_head) {
104 		struct snd_emu10k1_memblk *blk = get_emu10k1_memblk(pos, mapped_link);
105 		if (blk->mapped_page < 0)
106 			continue;
107 		size = blk->mapped_page - page;
108 		if (size == npages) {
109 			*nextp = pos;
110 			return page;
111 		}
112 		else if (size > max_size) {
113 			/* we look for the maximum empty hole */
114 			max_size = size;
115 			candidate = pos;
116 			found_page = page;
117 		}
118 		page = blk->mapped_page + blk->pages;
119 	}
120 	size = (emu->address_mode ? MAX_ALIGN_PAGES1 : MAX_ALIGN_PAGES0) - page;
121 	if (size >= max_size) {
122 		*nextp = pos;
123 		return page;
124 	}
125 	*nextp = candidate;
126 	return found_page;
127 }
128 
129 /*
130  * map a memory block onto emu10k1's PTB
131  *
132  * call with memblk_lock held
133  */
134 static int map_memblk(struct snd_emu10k1 *emu, struct snd_emu10k1_memblk *blk)
135 {
136 	int page, pg;
137 	struct list_head *next;
138 
139 	page = search_empty_map_area(emu, blk->pages, &next);
140 	if (page < 0) /* not found */
141 		return page;
142 	if (page == 0) {
143 		dev_err(emu->card->dev, "trying to map zero (reserved) page\n");
144 		return -EINVAL;
145 	}
146 	/* insert this block in the proper position of mapped list */
147 	list_add_tail(&blk->mapped_link, next);
148 	/* append this as a newest block in order list */
149 	list_add_tail(&blk->mapped_order_link, &emu->mapped_order_link_head);
150 	blk->mapped_page = page;
151 	/* fill PTB */
152 	for (pg = blk->first_page; pg <= blk->last_page; pg++) {
153 		set_ptb_entry(emu, page, emu->page_addr_table[pg]);
154 		page++;
155 	}
156 	return 0;
157 }
158 
159 /*
160  * unmap the block
161  * return the size of resultant empty pages
162  *
163  * call with memblk_lock held
164  */
165 static int unmap_memblk(struct snd_emu10k1 *emu, struct snd_emu10k1_memblk *blk)
166 {
167 	int start_page, end_page, mpage, pg;
168 	struct list_head *p;
169 	struct snd_emu10k1_memblk *q;
170 
171 	/* calculate the expected size of empty region */
172 	p = blk->mapped_link.prev;
173 	if (p != &emu->mapped_link_head) {
174 		q = get_emu10k1_memblk(p, mapped_link);
175 		start_page = q->mapped_page + q->pages;
176 	} else {
177 		start_page = 1;
178 	}
179 	p = blk->mapped_link.next;
180 	if (p != &emu->mapped_link_head) {
181 		q = get_emu10k1_memblk(p, mapped_link);
182 		end_page = q->mapped_page;
183 	} else {
184 		end_page = (emu->address_mode ? MAX_ALIGN_PAGES1 : MAX_ALIGN_PAGES0);
185 	}
186 
187 	/* remove links */
188 	list_del(&blk->mapped_link);
189 	list_del(&blk->mapped_order_link);
190 	/* clear PTB */
191 	mpage = blk->mapped_page;
192 	for (pg = blk->first_page; pg <= blk->last_page; pg++) {
193 		set_silent_ptb(emu, mpage);
194 		mpage++;
195 	}
196 	blk->mapped_page = -1;
197 	return end_page - start_page; /* return the new empty size */
198 }
199 
200 /*
201  * search empty pages with the given size, and create a memory block
202  *
203  * unlike synth_alloc the memory block is aligned to the page start
204  */
205 static struct snd_emu10k1_memblk *
206 search_empty(struct snd_emu10k1 *emu, int size)
207 {
208 	struct list_head *p;
209 	struct snd_emu10k1_memblk *blk;
210 	int page, psize;
211 
212 	psize = get_aligned_page(size + PAGE_SIZE -1);
213 	page = 0;
214 	list_for_each(p, &emu->memhdr->block) {
215 		blk = get_emu10k1_memblk(p, mem.list);
216 		if (page + psize <= blk->first_page)
217 			goto __found_pages;
218 		page = blk->last_page + 1;
219 	}
220 	if (page + psize > emu->max_cache_pages)
221 		return NULL;
222 
223 __found_pages:
224 	/* create a new memory block */
225 	blk = (struct snd_emu10k1_memblk *)__snd_util_memblk_new(emu->memhdr, psize << PAGE_SHIFT, p->prev);
226 	if (blk == NULL)
227 		return NULL;
228 	blk->mem.offset = aligned_page_offset(page); /* set aligned offset */
229 	emu10k1_memblk_init(blk);
230 	return blk;
231 }
232 
233 
234 /*
235  * check if the given pointer is valid for pages
236  */
237 static int is_valid_page(struct snd_emu10k1 *emu, dma_addr_t addr)
238 {
239 	if (addr & ~emu->dma_mask) {
240 		dev_err_ratelimited(emu->card->dev,
241 			"max memory size is 0x%lx (addr = 0x%lx)!!\n",
242 			emu->dma_mask, (unsigned long)addr);
243 		return 0;
244 	}
245 	if (addr & (EMUPAGESIZE-1)) {
246 		dev_err_ratelimited(emu->card->dev, "page is not aligned\n");
247 		return 0;
248 	}
249 	return 1;
250 }
251 
252 /*
253  * map the given memory block on PTB.
254  * if the block is already mapped, update the link order.
255  * if no empty pages are found, tries to release unused memory blocks
256  * and retry the mapping.
257  */
258 int snd_emu10k1_memblk_map(struct snd_emu10k1 *emu, struct snd_emu10k1_memblk *blk)
259 {
260 	int err;
261 	int size;
262 	struct list_head *p, *nextp;
263 	struct snd_emu10k1_memblk *deleted;
264 
265 	guard(spinlock_irqsave)(&emu->memblk_lock);
266 	if (blk->mapped_page >= 0) {
267 		/* update order link */
268 		list_move_tail(&blk->mapped_order_link,
269 			       &emu->mapped_order_link_head);
270 		return 0;
271 	}
272 	err = map_memblk(emu, blk);
273 	if (err < 0) {
274 		/* no enough page - try to unmap some blocks */
275 		/* starting from the oldest block */
276 		p = emu->mapped_order_link_head.next;
277 		for (; p != &emu->mapped_order_link_head; p = nextp) {
278 			nextp = p->next;
279 			deleted = get_emu10k1_memblk(p, mapped_order_link);
280 			if (deleted->map_locked)
281 				continue;
282 			size = unmap_memblk(emu, deleted);
283 			if (size >= blk->pages) {
284 				/* ok the empty region is enough large */
285 				err = map_memblk(emu, blk);
286 				break;
287 			}
288 		}
289 	}
290 	return err;
291 }
292 
293 EXPORT_SYMBOL(snd_emu10k1_memblk_map);
294 
295 /*
296  * page allocation for DMA
297  */
298 struct snd_util_memblk *
299 snd_emu10k1_alloc_pages(struct snd_emu10k1 *emu, struct snd_pcm_substream *substream)
300 {
301 	struct snd_pcm_runtime *runtime = substream->runtime;
302 	struct snd_util_memhdr *hdr;
303 	struct snd_emu10k1_memblk *blk;
304 	int page, err, idx;
305 
306 	if (snd_BUG_ON(!emu))
307 		return NULL;
308 	if (snd_BUG_ON(runtime->dma_bytes <= 0 ||
309 		       runtime->dma_bytes >= (emu->address_mode ? MAXPAGES1 : MAXPAGES0) * EMUPAGESIZE))
310 		return NULL;
311 	hdr = emu->memhdr;
312 	if (snd_BUG_ON(!hdr))
313 		return NULL;
314 
315 	guard(mutex)(&hdr->block_mutex);
316 	blk = search_empty(emu, runtime->dma_bytes);
317 	if (blk == NULL)
318 		return NULL;
319 	/* fill buffer addresses but pointers are not stored so that
320 	 * snd_free_pci_page() is not called in synth_free()
321 	 */
322 	idx = 0;
323 	for (page = blk->first_page; page <= blk->last_page; page++, idx++) {
324 		unsigned long ofs = idx << PAGE_SHIFT;
325 		dma_addr_t addr;
326 		if (ofs >= runtime->dma_bytes)
327 			addr = emu->silent_page.addr;
328 		else
329 			addr = snd_pcm_sgbuf_get_addr(substream, ofs);
330 		if (! is_valid_page(emu, addr)) {
331 			dev_err_ratelimited(emu->card->dev,
332 				"emu: failure page = %d\n", idx);
333 			return NULL;
334 		}
335 		emu->page_addr_table[page] = addr;
336 		emu->page_ptr_table[page] = NULL;
337 	}
338 
339 	/* set PTB entries */
340 	blk->map_locked = 1; /* do not unmap this block! */
341 	err = snd_emu10k1_memblk_map(emu, blk);
342 	if (err < 0) {
343 		__snd_util_mem_free(hdr, (struct snd_util_memblk *)blk);
344 		return NULL;
345 	}
346 	return (struct snd_util_memblk *)blk;
347 }
348 
349 
350 /*
351  * release DMA buffer from page table
352  */
353 int snd_emu10k1_free_pages(struct snd_emu10k1 *emu, struct snd_util_memblk *blk)
354 {
355 	if (snd_BUG_ON(!emu || !blk))
356 		return -EINVAL;
357 	return snd_emu10k1_synth_free(emu, blk);
358 }
359 
360 /*
361  * allocate DMA pages, widening the allocation if necessary
362  *
363  * See the comment above snd_emu10k1_detect_iommu() in emu10k1_main.c why
364  * this might be needed.
365  *
366  * If you modify this function check whether __synth_free_pages() also needs
367  * changes.
368  */
369 int snd_emu10k1_alloc_pages_maybe_wider(struct snd_emu10k1 *emu, size_t size,
370 					struct snd_dma_buffer *dmab)
371 {
372 	if (emu->iommu_workaround) {
373 		size_t npages = DIV_ROUND_UP(size, PAGE_SIZE);
374 		size_t size_real = npages * PAGE_SIZE;
375 
376 		/*
377 		 * The device has been observed to accesses up to 256 extra
378 		 * bytes, but use 1k to be safe.
379 		 */
380 		if (size_real < size + 1024)
381 			size += PAGE_SIZE;
382 	}
383 
384 	return snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV,
385 				   &emu->pci->dev, size, dmab);
386 }
387 
388 /*
389  * memory allocation using multiple pages (for synth)
390  * Unlike the DMA allocation above, non-contiguous pages are assined.
391  */
392 
393 /*
394  * allocate a synth sample area
395  */
396 struct snd_util_memblk *
397 snd_emu10k1_synth_alloc(struct snd_emu10k1 *hw, unsigned int size)
398 {
399 	struct snd_emu10k1_memblk *blk;
400 	struct snd_util_memhdr *hdr = hw->memhdr;
401 
402 	guard(mutex)(&hdr->block_mutex);
403 	blk = (struct snd_emu10k1_memblk *)__snd_util_mem_alloc(hdr, size);
404 	if (blk == NULL)
405 		return NULL;
406 	if (synth_alloc_pages(hw, blk)) {
407 		__snd_util_mem_free(hdr, (struct snd_util_memblk *)blk);
408 		return NULL;
409 	}
410 	snd_emu10k1_memblk_map(hw, blk);
411 	return (struct snd_util_memblk *)blk;
412 }
413 
414 EXPORT_SYMBOL(snd_emu10k1_synth_alloc);
415 
416 /*
417  * free a synth sample area
418  */
419 int
420 snd_emu10k1_synth_free(struct snd_emu10k1 *emu, struct snd_util_memblk *memblk)
421 {
422 	struct snd_util_memhdr *hdr = emu->memhdr;
423 	struct snd_emu10k1_memblk *blk = (struct snd_emu10k1_memblk *)memblk;
424 
425 	guard(mutex)(&hdr->block_mutex);
426 	scoped_guard(spinlock_irqsave, &emu->memblk_lock) {
427 		if (blk->mapped_page >= 0)
428 			unmap_memblk(emu, blk);
429 	}
430 	synth_free_pages(emu, blk);
431 	__snd_util_mem_free(hdr, memblk);
432 	return 0;
433 }
434 
435 EXPORT_SYMBOL(snd_emu10k1_synth_free);
436 
437 /* check new allocation range */
438 static void get_single_page_range(struct snd_util_memhdr *hdr,
439 				  struct snd_emu10k1_memblk *blk,
440 				  int *first_page_ret, int *last_page_ret)
441 {
442 	struct list_head *p;
443 	struct snd_emu10k1_memblk *q;
444 	int first_page, last_page;
445 	first_page = blk->first_page;
446 	p = blk->mem.list.prev;
447 	if (p != &hdr->block) {
448 		q = get_emu10k1_memblk(p, mem.list);
449 		if (q->last_page == first_page)
450 			first_page++;  /* first page was already allocated */
451 	}
452 	last_page = blk->last_page;
453 	p = blk->mem.list.next;
454 	if (p != &hdr->block) {
455 		q = get_emu10k1_memblk(p, mem.list);
456 		if (q->first_page == last_page)
457 			last_page--; /* last page was already allocated */
458 	}
459 	*first_page_ret = first_page;
460 	*last_page_ret = last_page;
461 }
462 
463 /* release allocated pages */
464 static void __synth_free_pages(struct snd_emu10k1 *emu, int first_page,
465 			       int last_page)
466 {
467 	struct snd_dma_buffer dmab;
468 	int page;
469 
470 	dmab.dev.type = SNDRV_DMA_TYPE_DEV;
471 	dmab.dev.dev = &emu->pci->dev;
472 
473 	for (page = first_page; page <= last_page; page++) {
474 		if (emu->page_ptr_table[page] == NULL)
475 			continue;
476 		dmab.area = emu->page_ptr_table[page];
477 		dmab.addr = emu->page_addr_table[page];
478 
479 		/*
480 		 * please keep me in sync with logic in
481 		 * snd_emu10k1_alloc_pages_maybe_wider()
482 		 */
483 		dmab.bytes = PAGE_SIZE;
484 		if (emu->iommu_workaround)
485 			dmab.bytes *= 2;
486 
487 		snd_dma_free_pages(&dmab);
488 		emu->page_addr_table[page] = 0;
489 		emu->page_ptr_table[page] = NULL;
490 	}
491 }
492 
493 /*
494  * allocate kernel pages
495  */
496 static int synth_alloc_pages(struct snd_emu10k1 *emu, struct snd_emu10k1_memblk *blk)
497 {
498 	int page, first_page, last_page;
499 	struct snd_dma_buffer dmab;
500 
501 	emu10k1_memblk_init(blk);
502 	get_single_page_range(emu->memhdr, blk, &first_page, &last_page);
503 	/* allocate kernel pages */
504 	for (page = first_page; page <= last_page; page++) {
505 		if (snd_emu10k1_alloc_pages_maybe_wider(emu, PAGE_SIZE,
506 							&dmab) < 0)
507 			goto __fail;
508 		if (!is_valid_page(emu, dmab.addr)) {
509 			snd_dma_free_pages(&dmab);
510 			goto __fail;
511 		}
512 		emu->page_addr_table[page] = dmab.addr;
513 		emu->page_ptr_table[page] = dmab.area;
514 	}
515 	return 0;
516 
517 __fail:
518 	/* release allocated pages */
519 	last_page = page - 1;
520 	__synth_free_pages(emu, first_page, last_page);
521 
522 	return -ENOMEM;
523 }
524 
525 /*
526  * free pages
527  */
528 static int synth_free_pages(struct snd_emu10k1 *emu, struct snd_emu10k1_memblk *blk)
529 {
530 	int first_page, last_page;
531 
532 	get_single_page_range(emu->memhdr, blk, &first_page, &last_page);
533 	__synth_free_pages(emu, first_page, last_page);
534 	return 0;
535 }
536 
537 /* calculate buffer pointer from offset address */
538 static inline void *offset_ptr(struct snd_emu10k1 *emu, int page, int offset)
539 {
540 	char *ptr;
541 	if (snd_BUG_ON(page < 0 || page >= emu->max_cache_pages))
542 		return NULL;
543 	ptr = emu->page_ptr_table[page];
544 	if (! ptr) {
545 		dev_err(emu->card->dev,
546 			"access to NULL ptr: page = %d\n", page);
547 		return NULL;
548 	}
549 	ptr += offset & (PAGE_SIZE - 1);
550 	return (void*)ptr;
551 }
552 
553 /*
554  * memset(blk + offset, value, size)
555  */
556 int snd_emu10k1_synth_memset(struct snd_emu10k1 *emu, struct snd_util_memblk *blk,
557 			     int offset, int size, u8 value)
558 {
559 	int page, nextofs, end_offset, temp, temp1;
560 	void *ptr;
561 	struct snd_emu10k1_memblk *p = (struct snd_emu10k1_memblk *)blk;
562 
563 	if (snd_BUG_ON(offset + size > p->mem.size))
564 		return -EFAULT;
565 
566 	offset += blk->offset & (PAGE_SIZE - 1);
567 	end_offset = offset + size;
568 	page = get_aligned_page(offset);
569 	do {
570 		nextofs = aligned_page_offset(page + 1);
571 		temp = nextofs - offset;
572 		temp1 = end_offset - offset;
573 		if (temp1 < temp)
574 			temp = temp1;
575 		ptr = offset_ptr(emu, page + p->first_page, offset);
576 		if (ptr)
577 			memset(ptr, value, temp);
578 		offset = nextofs;
579 		page++;
580 	} while (offset < end_offset);
581 	return 0;
582 }
583 
584 EXPORT_SYMBOL(snd_emu10k1_synth_memset);
585 
586 // Note that the value is assumed to be suitably repetitive.
587 static void xor_range(void *ptr, int size, u32 value)
588 {
589 	if ((long)ptr & 1) {
590 		*(u8 *)ptr ^= (u8)value;
591 		ptr++;
592 		size--;
593 	}
594 	if (size > 1 && ((long)ptr & 2)) {
595 		*(u16 *)ptr ^= (u16)value;
596 		ptr += 2;
597 		size -= 2;
598 	}
599 	while (size > 3) {
600 		*(u32 *)ptr ^= value;
601 		ptr += 4;
602 		size -= 4;
603 	}
604 	if (size > 1) {
605 		*(u16 *)ptr ^= (u16)value;
606 		ptr += 2;
607 		size -= 2;
608 	}
609 	if (size > 0)
610 		*(u8 *)ptr ^= (u8)value;
611 }
612 
613 /*
614  * copy_from_user(blk + offset, data, size) ^ xor
615  */
616 int snd_emu10k1_synth_copy_from_user(struct snd_emu10k1 *emu, struct snd_util_memblk *blk,
617 				     int offset, const char __user *data, int size, u32 xor)
618 {
619 	int page, nextofs, end_offset, temp, temp1;
620 	void *ptr;
621 	struct snd_emu10k1_memblk *p = (struct snd_emu10k1_memblk *)blk;
622 
623 	if (snd_BUG_ON(offset + size > p->mem.size))
624 		return -EFAULT;
625 
626 	offset += blk->offset & (PAGE_SIZE - 1);
627 	end_offset = offset + size;
628 	page = get_aligned_page(offset);
629 	do {
630 		nextofs = aligned_page_offset(page + 1);
631 		temp = nextofs - offset;
632 		temp1 = end_offset - offset;
633 		if (temp1 < temp)
634 			temp = temp1;
635 		ptr = offset_ptr(emu, page + p->first_page, offset);
636 		if (ptr) {
637 			if (copy_from_user(ptr, data, temp))
638 				return -EFAULT;
639 			if (xor)
640 				xor_range(ptr, temp, xor);
641 		}
642 		offset = nextofs;
643 		data += temp;
644 		page++;
645 	} while (offset < end_offset);
646 	return 0;
647 }
648 
649 EXPORT_SYMBOL(snd_emu10k1_synth_copy_from_user);
650