// SPDX-License-Identifier: GPL-2.0-or-later /* * Copyright (c) by Jaroslav Kysela * Copyright (c) by Takashi Iwai * Copyright (c) by Scott McNab * * Trident 4DWave-NX memory page allocation (TLB area) * Trident chip can handle only 16MByte of the memory at the same time. */ #include #include #include #include #include #include "trident.h" /* page arguments of these two macros are Trident page (4096 bytes), not like * aligned pages in others */ #define __set_tlb_bus(trident,page,addr) \ (trident)->tlb.entries[page] = cpu_to_le32((addr) & ~(SNDRV_TRIDENT_PAGE_SIZE-1)) #define __tlb_to_addr(trident,page) \ (dma_addr_t)le32_to_cpu((trident->tlb.entries[page]) & ~(SNDRV_TRIDENT_PAGE_SIZE - 1)) #if PAGE_SIZE == 4096 /* page size == SNDRV_TRIDENT_PAGE_SIZE */ #define ALIGN_PAGE_SIZE PAGE_SIZE /* minimum page size for allocation */ #define MAX_ALIGN_PAGES SNDRV_TRIDENT_MAX_PAGES /* maxmium aligned pages */ /* fill TLB entrie(s) corresponding to page with ptr */ #define set_tlb_bus(trident,page,addr) __set_tlb_bus(trident,page,addr) /* fill TLB entrie(s) corresponding to page with silence pointer */ #define set_silent_tlb(trident,page) __set_tlb_bus(trident, page, trident->tlb.silent_page->addr) /* get aligned page from offset address */ #define get_aligned_page(offset) ((offset) >> 12) /* get offset address from aligned page */ #define aligned_page_offset(page) ((page) << 12) /* get PCI physical address from aligned page */ #define page_to_addr(trident,page) __tlb_to_addr(trident, page) #elif PAGE_SIZE == 8192 /* page size == SNDRV_TRIDENT_PAGE_SIZE x 2*/ #define ALIGN_PAGE_SIZE PAGE_SIZE #define MAX_ALIGN_PAGES (SNDRV_TRIDENT_MAX_PAGES / 2) #define get_aligned_page(offset) ((offset) >> 13) #define aligned_page_offset(page) ((page) << 13) #define page_to_addr(trident,page) __tlb_to_addr(trident, (page) << 1) /* fill TLB entries -- we need to fill two entries */ static inline void set_tlb_bus(struct snd_trident *trident, int page, dma_addr_t addr) { page <<= 1; __set_tlb_bus(trident, page, addr); __set_tlb_bus(trident, page+1, addr + SNDRV_TRIDENT_PAGE_SIZE); } static inline void set_silent_tlb(struct snd_trident *trident, int page) { page <<= 1; __set_tlb_bus(trident, page, trident->tlb.silent_page->addr); __set_tlb_bus(trident, page+1, trident->tlb.silent_page->addr); } #else /* arbitrary size */ #define UNIT_PAGES (PAGE_SIZE / SNDRV_TRIDENT_PAGE_SIZE) #define ALIGN_PAGE_SIZE (SNDRV_TRIDENT_PAGE_SIZE * UNIT_PAGES) #define MAX_ALIGN_PAGES (SNDRV_TRIDENT_MAX_PAGES / UNIT_PAGES) /* Note: if alignment doesn't match to the maximum size, the last few blocks * become unusable. To use such blocks, you'll need to check the validity * of accessing page in set_tlb_bus and set_silent_tlb. search_empty() * should also check it, too. */ #define get_aligned_page(offset) ((offset) / ALIGN_PAGE_SIZE) #define aligned_page_offset(page) ((page) * ALIGN_PAGE_SIZE) #define page_to_addr(trident,page) __tlb_to_addr(trident, (page) * UNIT_PAGES) /* fill TLB entries -- UNIT_PAGES entries must be filled */ static inline void set_tlb_bus(struct snd_trident *trident, int page, dma_addr_t addr) { int i; page *= UNIT_PAGES; for (i = 0; i < UNIT_PAGES; i++, page++) { __set_tlb_bus(trident, page, addr); addr += SNDRV_TRIDENT_PAGE_SIZE; } } static inline void set_silent_tlb(struct snd_trident *trident, int page) { int i; page *= UNIT_PAGES; for (i = 0; i < UNIT_PAGES; i++, page++) __set_tlb_bus(trident, page, trident->tlb.silent_page->addr); } #endif /* PAGE_SIZE */ /* first and last (aligned) pages of memory block */ #define firstpg(blk) (((struct snd_trident_memblk_arg *)snd_util_memblk_argptr(blk))->first_page) #define lastpg(blk) (((struct snd_trident_memblk_arg *)snd_util_memblk_argptr(blk))->last_page) /* * search empty pages which may contain given size */ static struct snd_util_memblk * search_empty(struct snd_util_memhdr *hdr, int size) { struct snd_util_memblk *blk; int page, psize; struct list_head *p; psize = get_aligned_page(size + ALIGN_PAGE_SIZE -1); page = 0; list_for_each(p, &hdr->block) { blk = list_entry(p, struct snd_util_memblk, list); if (page + psize <= firstpg(blk)) goto __found_pages; page = lastpg(blk) + 1; } if (page + psize > MAX_ALIGN_PAGES) return NULL; __found_pages: /* create a new memory block */ blk = __snd_util_memblk_new(hdr, psize * ALIGN_PAGE_SIZE, p->prev); if (blk == NULL) return NULL; blk->offset = aligned_page_offset(page); /* set aligned offset */ firstpg(blk) = page; lastpg(blk) = page + psize - 1; return blk; } /* * check if the given pointer is valid for pages */ static int is_valid_page(struct snd_trident *trident, unsigned long ptr) { if (ptr & ~0x3fffffffUL) { dev_err(trident->card->dev, "max memory size is 1GB!!\n"); return 0; } if (ptr & (SNDRV_TRIDENT_PAGE_SIZE-1)) { dev_err(trident->card->dev, "page is not aligned\n"); return 0; } return 1; } /* * page allocation for DMA (Scatter-Gather version) */ static struct snd_util_memblk * snd_trident_alloc_sg_pages(struct snd_trident *trident, struct snd_pcm_substream *substream) { struct snd_util_memhdr *hdr; struct snd_util_memblk *blk; struct snd_pcm_runtime *runtime = substream->runtime; int idx, page; if (snd_BUG_ON(runtime->dma_bytes <= 0 || runtime->dma_bytes > SNDRV_TRIDENT_MAX_PAGES * SNDRV_TRIDENT_PAGE_SIZE)) return NULL; hdr = trident->tlb.memhdr; if (snd_BUG_ON(!hdr)) return NULL; mutex_lock(&hdr->block_mutex); blk = search_empty(hdr, runtime->dma_bytes); if (blk == NULL) { mutex_unlock(&hdr->block_mutex); return NULL; } /* set TLB entries */ idx = 0; for (page = firstpg(blk); page <= lastpg(blk); page++, idx++) { unsigned long ofs = idx << PAGE_SHIFT; dma_addr_t addr = snd_pcm_sgbuf_get_addr(substream, ofs); if (!is_valid_page(trident, addr)) { __snd_util_mem_free(hdr, blk); mutex_unlock(&hdr->block_mutex); return NULL; } set_tlb_bus(trident, page, addr); } mutex_unlock(&hdr->block_mutex); return blk; } /* * page allocation for DMA (contiguous version) */ static struct snd_util_memblk * snd_trident_alloc_cont_pages(struct snd_trident *trident, struct snd_pcm_substream *substream) { struct snd_util_memhdr *hdr; struct snd_util_memblk *blk; int page; struct snd_pcm_runtime *runtime = substream->runtime; dma_addr_t addr; if (snd_BUG_ON(runtime->dma_bytes <= 0 || runtime->dma_bytes > SNDRV_TRIDENT_MAX_PAGES * SNDRV_TRIDENT_PAGE_SIZE)) return NULL; hdr = trident->tlb.memhdr; if (snd_BUG_ON(!hdr)) return NULL; mutex_lock(&hdr->block_mutex); blk = search_empty(hdr, runtime->dma_bytes); if (blk == NULL) { mutex_unlock(&hdr->block_mutex); return NULL; } /* set TLB entries */ addr = runtime->dma_addr; for (page = firstpg(blk); page <= lastpg(blk); page++, addr += SNDRV_TRIDENT_PAGE_SIZE) { if (!is_valid_page(trident, addr)) { __snd_util_mem_free(hdr, blk); mutex_unlock(&hdr->block_mutex); return NULL; } set_tlb_bus(trident, page, addr); } mutex_unlock(&hdr->block_mutex); return blk; } /* * page allocation for DMA */ struct snd_util_memblk * snd_trident_alloc_pages(struct snd_trident *trident, struct snd_pcm_substream *substream) { if (snd_BUG_ON(!trident || !substream)) return NULL; if (substream->dma_buffer.dev.type == SNDRV_DMA_TYPE_DEV_SG) return snd_trident_alloc_sg_pages(trident, substream); else return snd_trident_alloc_cont_pages(trident, substream); } /* * release DMA buffer from page table */ int snd_trident_free_pages(struct snd_trident *trident, struct snd_util_memblk *blk) { struct snd_util_memhdr *hdr; int page; if (snd_BUG_ON(!trident || !blk)) return -EINVAL; hdr = trident->tlb.memhdr; mutex_lock(&hdr->block_mutex); /* reset TLB entries */ for (page = firstpg(blk); page <= lastpg(blk); page++) set_silent_tlb(trident, page); /* free memory block */ __snd_util_mem_free(hdr, blk); mutex_unlock(&hdr->block_mutex); return 0; }