1 /* 2 * Copyright (c) by Jaroslav Kysela <perex@suse.cz> 3 * Copyright (c) by Takashi Iwai <tiwai@suse.de> 4 * 5 * EMU10K1 memory page allocation (PTB area) 6 * 7 * 8 * This program is free software; you can redistribute it and/or modify 9 * it under the terms of the GNU General Public License as published by 10 * the Free Software Foundation; either version 2 of the License, or 11 * (at your option) any later version. 12 * 13 * This program is distributed in the hope that it will be useful, 14 * but WITHOUT ANY WARRANTY; without even the implied warranty of 15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 16 * GNU General Public License for more details. 17 * 18 * You should have received a copy of the GNU General Public License 19 * along with this program; if not, write to the Free Software 20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 21 * 22 */ 23 24 #include <sound/driver.h> 25 #include <linux/pci.h> 26 #include <linux/time.h> 27 #include <linux/mutex.h> 28 29 #include <sound/core.h> 30 #include <sound/emu10k1.h> 31 32 /* page arguments of these two macros are Emu page (4096 bytes), not like 33 * aligned pages in others 34 */ 35 #define __set_ptb_entry(emu,page,addr) \ 36 (((u32 *)(emu)->ptb_pages.area)[page] = cpu_to_le32(((addr) << 1) | (page))) 37 38 #define UNIT_PAGES (PAGE_SIZE / EMUPAGESIZE) 39 #define MAX_ALIGN_PAGES (MAXPAGES / UNIT_PAGES) 40 /* get aligned page from offset address */ 41 #define get_aligned_page(offset) ((offset) >> PAGE_SHIFT) 42 /* get offset address from aligned page */ 43 #define aligned_page_offset(page) ((page) << PAGE_SHIFT) 44 45 #if PAGE_SIZE == 4096 46 /* page size == EMUPAGESIZE */ 47 /* fill PTB entrie(s) corresponding to page with addr */ 48 #define set_ptb_entry(emu,page,addr) __set_ptb_entry(emu,page,addr) 49 /* fill PTB entrie(s) corresponding to page with silence pointer */ 50 #define set_silent_ptb(emu,page) __set_ptb_entry(emu,page,emu->silent_page.addr) 51 #else 52 /* fill PTB entries -- we need to fill UNIT_PAGES entries */ 53 static inline void set_ptb_entry(struct snd_emu10k1 *emu, int page, dma_addr_t addr) 54 { 55 int i; 56 page *= UNIT_PAGES; 57 for (i = 0; i < UNIT_PAGES; i++, page++) { 58 __set_ptb_entry(emu, page, addr); 59 addr += EMUPAGESIZE; 60 } 61 } 62 static inline void set_silent_ptb(struct snd_emu10k1 *emu, int page) 63 { 64 int i; 65 page *= UNIT_PAGES; 66 for (i = 0; i < UNIT_PAGES; i++, page++) 67 /* do not increment ptr */ 68 __set_ptb_entry(emu, page, emu->silent_page.addr); 69 } 70 #endif /* PAGE_SIZE */ 71 72 73 /* 74 */ 75 static int synth_alloc_pages(struct snd_emu10k1 *hw, struct snd_emu10k1_memblk *blk); 76 static int synth_free_pages(struct snd_emu10k1 *hw, struct snd_emu10k1_memblk *blk); 77 78 #define get_emu10k1_memblk(l,member) list_entry(l, struct snd_emu10k1_memblk, member) 79 80 81 /* initialize emu10k1 part */ 82 static void emu10k1_memblk_init(struct snd_emu10k1_memblk *blk) 83 { 84 blk->mapped_page = -1; 85 INIT_LIST_HEAD(&blk->mapped_link); 86 INIT_LIST_HEAD(&blk->mapped_order_link); 87 blk->map_locked = 0; 88 89 blk->first_page = get_aligned_page(blk->mem.offset); 90 blk->last_page = get_aligned_page(blk->mem.offset + blk->mem.size - 1); 91 blk->pages = blk->last_page - blk->first_page + 1; 92 } 93 94 /* 95 * search empty region on PTB with the given size 96 * 97 * if an empty region is found, return the page and store the next mapped block 98 * in nextp 99 * if not found, return a negative error code. 100 */ 101 static int search_empty_map_area(struct snd_emu10k1 *emu, int npages, struct list_head **nextp) 102 { 103 int page = 0, found_page = -ENOMEM; 104 int max_size = npages; 105 int size; 106 struct list_head *candidate = &emu->mapped_link_head; 107 struct list_head *pos; 108 109 list_for_each (pos, &emu->mapped_link_head) { 110 struct snd_emu10k1_memblk *blk = get_emu10k1_memblk(pos, mapped_link); 111 snd_assert(blk->mapped_page >= 0, continue); 112 size = blk->mapped_page - page; 113 if (size == npages) { 114 *nextp = pos; 115 return page; 116 } 117 else if (size > max_size) { 118 /* we look for the maximum empty hole */ 119 max_size = size; 120 candidate = pos; 121 found_page = page; 122 } 123 page = blk->mapped_page + blk->pages; 124 } 125 size = MAX_ALIGN_PAGES - page; 126 if (size >= max_size) { 127 *nextp = pos; 128 return page; 129 } 130 *nextp = candidate; 131 return found_page; 132 } 133 134 /* 135 * map a memory block onto emu10k1's PTB 136 * 137 * call with memblk_lock held 138 */ 139 static int map_memblk(struct snd_emu10k1 *emu, struct snd_emu10k1_memblk *blk) 140 { 141 int page, pg; 142 struct list_head *next; 143 144 page = search_empty_map_area(emu, blk->pages, &next); 145 if (page < 0) /* not found */ 146 return page; 147 /* insert this block in the proper position of mapped list */ 148 list_add_tail(&blk->mapped_link, next); 149 /* append this as a newest block in order list */ 150 list_add_tail(&blk->mapped_order_link, &emu->mapped_order_link_head); 151 blk->mapped_page = page; 152 /* fill PTB */ 153 for (pg = blk->first_page; pg <= blk->last_page; pg++) { 154 set_ptb_entry(emu, page, emu->page_addr_table[pg]); 155 page++; 156 } 157 return 0; 158 } 159 160 /* 161 * unmap the block 162 * return the size of resultant empty pages 163 * 164 * call with memblk_lock held 165 */ 166 static int unmap_memblk(struct snd_emu10k1 *emu, struct snd_emu10k1_memblk *blk) 167 { 168 int start_page, end_page, mpage, pg; 169 struct list_head *p; 170 struct snd_emu10k1_memblk *q; 171 172 /* calculate the expected size of empty region */ 173 if ((p = blk->mapped_link.prev) != &emu->mapped_link_head) { 174 q = get_emu10k1_memblk(p, mapped_link); 175 start_page = q->mapped_page + q->pages; 176 } else 177 start_page = 0; 178 if ((p = blk->mapped_link.next) != &emu->mapped_link_head) { 179 q = get_emu10k1_memblk(p, mapped_link); 180 end_page = q->mapped_page; 181 } else 182 end_page = MAX_ALIGN_PAGES; 183 184 /* remove links */ 185 list_del(&blk->mapped_link); 186 list_del(&blk->mapped_order_link); 187 /* clear PTB */ 188 mpage = blk->mapped_page; 189 for (pg = blk->first_page; pg <= blk->last_page; pg++) { 190 set_silent_ptb(emu, mpage); 191 mpage++; 192 } 193 blk->mapped_page = -1; 194 return end_page - start_page; /* return the new empty size */ 195 } 196 197 /* 198 * search empty pages with the given size, and create a memory block 199 * 200 * unlike synth_alloc the memory block is aligned to the page start 201 */ 202 static struct snd_emu10k1_memblk * 203 search_empty(struct snd_emu10k1 *emu, int size) 204 { 205 struct list_head *p; 206 struct snd_emu10k1_memblk *blk; 207 int page, psize; 208 209 psize = get_aligned_page(size + PAGE_SIZE -1); 210 page = 0; 211 list_for_each(p, &emu->memhdr->block) { 212 blk = get_emu10k1_memblk(p, mem.list); 213 if (page + psize <= blk->first_page) 214 goto __found_pages; 215 page = blk->last_page + 1; 216 } 217 if (page + psize > emu->max_cache_pages) 218 return NULL; 219 220 __found_pages: 221 /* create a new memory block */ 222 blk = (struct snd_emu10k1_memblk *)__snd_util_memblk_new(emu->memhdr, psize << PAGE_SHIFT, p->prev); 223 if (blk == NULL) 224 return NULL; 225 blk->mem.offset = aligned_page_offset(page); /* set aligned offset */ 226 emu10k1_memblk_init(blk); 227 return blk; 228 } 229 230 231 /* 232 * check if the given pointer is valid for pages 233 */ 234 static int is_valid_page(struct snd_emu10k1 *emu, dma_addr_t addr) 235 { 236 if (addr & ~emu->dma_mask) { 237 snd_printk(KERN_ERR "max memory size is 0x%lx (addr = 0x%lx)!!\n", emu->dma_mask, (unsigned long)addr); 238 return 0; 239 } 240 if (addr & (EMUPAGESIZE-1)) { 241 snd_printk(KERN_ERR "page is not aligned\n"); 242 return 0; 243 } 244 return 1; 245 } 246 247 /* 248 * map the given memory block on PTB. 249 * if the block is already mapped, update the link order. 250 * if no empty pages are found, tries to release unsed memory blocks 251 * and retry the mapping. 252 */ 253 int snd_emu10k1_memblk_map(struct snd_emu10k1 *emu, struct snd_emu10k1_memblk *blk) 254 { 255 int err; 256 int size; 257 struct list_head *p, *nextp; 258 struct snd_emu10k1_memblk *deleted; 259 unsigned long flags; 260 261 spin_lock_irqsave(&emu->memblk_lock, flags); 262 if (blk->mapped_page >= 0) { 263 /* update order link */ 264 list_del(&blk->mapped_order_link); 265 list_add_tail(&blk->mapped_order_link, &emu->mapped_order_link_head); 266 spin_unlock_irqrestore(&emu->memblk_lock, flags); 267 return 0; 268 } 269 if ((err = map_memblk(emu, blk)) < 0) { 270 /* no enough page - try to unmap some blocks */ 271 /* starting from the oldest block */ 272 p = emu->mapped_order_link_head.next; 273 for (; p != &emu->mapped_order_link_head; p = nextp) { 274 nextp = p->next; 275 deleted = get_emu10k1_memblk(p, mapped_order_link); 276 if (deleted->map_locked) 277 continue; 278 size = unmap_memblk(emu, deleted); 279 if (size >= blk->pages) { 280 /* ok the empty region is enough large */ 281 err = map_memblk(emu, blk); 282 break; 283 } 284 } 285 } 286 spin_unlock_irqrestore(&emu->memblk_lock, flags); 287 return err; 288 } 289 290 /* 291 * page allocation for DMA 292 */ 293 struct snd_util_memblk * 294 snd_emu10k1_alloc_pages(struct snd_emu10k1 *emu, struct snd_pcm_substream *substream) 295 { 296 struct snd_pcm_runtime *runtime = substream->runtime; 297 struct snd_sg_buf *sgbuf = snd_pcm_substream_sgbuf(substream); 298 struct snd_util_memhdr *hdr; 299 struct snd_emu10k1_memblk *blk; 300 int page, err, idx; 301 302 snd_assert(emu, return NULL); 303 snd_assert(runtime->dma_bytes > 0 && runtime->dma_bytes < MAXPAGES * EMUPAGESIZE, return NULL); 304 hdr = emu->memhdr; 305 snd_assert(hdr, return NULL); 306 307 mutex_lock(&hdr->block_mutex); 308 blk = search_empty(emu, runtime->dma_bytes); 309 if (blk == NULL) { 310 mutex_unlock(&hdr->block_mutex); 311 return NULL; 312 } 313 /* fill buffer addresses but pointers are not stored so that 314 * snd_free_pci_page() is not called in in synth_free() 315 */ 316 idx = 0; 317 for (page = blk->first_page; page <= blk->last_page; page++, idx++) { 318 dma_addr_t addr; 319 #ifdef CONFIG_SND_DEBUG 320 if (idx >= sgbuf->pages) { 321 printk(KERN_ERR "emu: pages overflow! (%d-%d) for %d\n", 322 blk->first_page, blk->last_page, sgbuf->pages); 323 mutex_unlock(&hdr->block_mutex); 324 return NULL; 325 } 326 #endif 327 addr = sgbuf->table[idx].addr; 328 if (! is_valid_page(emu, addr)) { 329 printk(KERN_ERR "emu: failure page = %d\n", idx); 330 mutex_unlock(&hdr->block_mutex); 331 return NULL; 332 } 333 emu->page_addr_table[page] = addr; 334 emu->page_ptr_table[page] = NULL; 335 } 336 337 /* set PTB entries */ 338 blk->map_locked = 1; /* do not unmap this block! */ 339 err = snd_emu10k1_memblk_map(emu, blk); 340 if (err < 0) { 341 __snd_util_mem_free(hdr, (struct snd_util_memblk *)blk); 342 mutex_unlock(&hdr->block_mutex); 343 return NULL; 344 } 345 mutex_unlock(&hdr->block_mutex); 346 return (struct snd_util_memblk *)blk; 347 } 348 349 350 /* 351 * release DMA buffer from page table 352 */ 353 int snd_emu10k1_free_pages(struct snd_emu10k1 *emu, struct snd_util_memblk *blk) 354 { 355 snd_assert(emu && blk, return -EINVAL); 356 return snd_emu10k1_synth_free(emu, blk); 357 } 358 359 360 /* 361 * memory allocation using multiple pages (for synth) 362 * Unlike the DMA allocation above, non-contiguous pages are assined. 363 */ 364 365 /* 366 * allocate a synth sample area 367 */ 368 struct snd_util_memblk * 369 snd_emu10k1_synth_alloc(struct snd_emu10k1 *hw, unsigned int size) 370 { 371 struct snd_emu10k1_memblk *blk; 372 struct snd_util_memhdr *hdr = hw->memhdr; 373 374 mutex_lock(&hdr->block_mutex); 375 blk = (struct snd_emu10k1_memblk *)__snd_util_mem_alloc(hdr, size); 376 if (blk == NULL) { 377 mutex_unlock(&hdr->block_mutex); 378 return NULL; 379 } 380 if (synth_alloc_pages(hw, blk)) { 381 __snd_util_mem_free(hdr, (struct snd_util_memblk *)blk); 382 mutex_unlock(&hdr->block_mutex); 383 return NULL; 384 } 385 snd_emu10k1_memblk_map(hw, blk); 386 mutex_unlock(&hdr->block_mutex); 387 return (struct snd_util_memblk *)blk; 388 } 389 390 391 /* 392 * free a synth sample area 393 */ 394 int 395 snd_emu10k1_synth_free(struct snd_emu10k1 *emu, struct snd_util_memblk *memblk) 396 { 397 struct snd_util_memhdr *hdr = emu->memhdr; 398 struct snd_emu10k1_memblk *blk = (struct snd_emu10k1_memblk *)memblk; 399 unsigned long flags; 400 401 mutex_lock(&hdr->block_mutex); 402 spin_lock_irqsave(&emu->memblk_lock, flags); 403 if (blk->mapped_page >= 0) 404 unmap_memblk(emu, blk); 405 spin_unlock_irqrestore(&emu->memblk_lock, flags); 406 synth_free_pages(emu, blk); 407 __snd_util_mem_free(hdr, memblk); 408 mutex_unlock(&hdr->block_mutex); 409 return 0; 410 } 411 412 413 /* check new allocation range */ 414 static void get_single_page_range(struct snd_util_memhdr *hdr, 415 struct snd_emu10k1_memblk *blk, 416 int *first_page_ret, int *last_page_ret) 417 { 418 struct list_head *p; 419 struct snd_emu10k1_memblk *q; 420 int first_page, last_page; 421 first_page = blk->first_page; 422 if ((p = blk->mem.list.prev) != &hdr->block) { 423 q = get_emu10k1_memblk(p, mem.list); 424 if (q->last_page == first_page) 425 first_page++; /* first page was already allocated */ 426 } 427 last_page = blk->last_page; 428 if ((p = blk->mem.list.next) != &hdr->block) { 429 q = get_emu10k1_memblk(p, mem.list); 430 if (q->first_page == last_page) 431 last_page--; /* last page was already allocated */ 432 } 433 *first_page_ret = first_page; 434 *last_page_ret = last_page; 435 } 436 437 /* 438 * allocate kernel pages 439 */ 440 static int synth_alloc_pages(struct snd_emu10k1 *emu, struct snd_emu10k1_memblk *blk) 441 { 442 int page, first_page, last_page; 443 struct snd_dma_buffer dmab; 444 445 emu10k1_memblk_init(blk); 446 get_single_page_range(emu->memhdr, blk, &first_page, &last_page); 447 /* allocate kernel pages */ 448 for (page = first_page; page <= last_page; page++) { 449 if (snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, snd_dma_pci_data(emu->pci), 450 PAGE_SIZE, &dmab) < 0) 451 goto __fail; 452 if (! is_valid_page(emu, dmab.addr)) { 453 snd_dma_free_pages(&dmab); 454 goto __fail; 455 } 456 emu->page_addr_table[page] = dmab.addr; 457 emu->page_ptr_table[page] = dmab.area; 458 } 459 return 0; 460 461 __fail: 462 /* release allocated pages */ 463 last_page = page - 1; 464 for (page = first_page; page <= last_page; page++) { 465 dmab.area = emu->page_ptr_table[page]; 466 dmab.addr = emu->page_addr_table[page]; 467 dmab.bytes = PAGE_SIZE; 468 snd_dma_free_pages(&dmab); 469 emu->page_addr_table[page] = 0; 470 emu->page_ptr_table[page] = NULL; 471 } 472 473 return -ENOMEM; 474 } 475 476 /* 477 * free pages 478 */ 479 static int synth_free_pages(struct snd_emu10k1 *emu, struct snd_emu10k1_memblk *blk) 480 { 481 int page, first_page, last_page; 482 struct snd_dma_buffer dmab; 483 484 get_single_page_range(emu->memhdr, blk, &first_page, &last_page); 485 dmab.dev.type = SNDRV_DMA_TYPE_DEV; 486 dmab.dev.dev = snd_dma_pci_data(emu->pci); 487 for (page = first_page; page <= last_page; page++) { 488 if (emu->page_ptr_table[page] == NULL) 489 continue; 490 dmab.area = emu->page_ptr_table[page]; 491 dmab.addr = emu->page_addr_table[page]; 492 dmab.bytes = PAGE_SIZE; 493 snd_dma_free_pages(&dmab); 494 emu->page_addr_table[page] = 0; 495 emu->page_ptr_table[page] = NULL; 496 } 497 498 return 0; 499 } 500 501 /* calculate buffer pointer from offset address */ 502 static inline void *offset_ptr(struct snd_emu10k1 *emu, int page, int offset) 503 { 504 char *ptr; 505 snd_assert(page >= 0 && page < emu->max_cache_pages, return NULL); 506 ptr = emu->page_ptr_table[page]; 507 if (! ptr) { 508 printk(KERN_ERR "emu10k1: access to NULL ptr: page = %d\n", page); 509 return NULL; 510 } 511 ptr += offset & (PAGE_SIZE - 1); 512 return (void*)ptr; 513 } 514 515 /* 516 * bzero(blk + offset, size) 517 */ 518 int snd_emu10k1_synth_bzero(struct snd_emu10k1 *emu, struct snd_util_memblk *blk, 519 int offset, int size) 520 { 521 int page, nextofs, end_offset, temp, temp1; 522 void *ptr; 523 struct snd_emu10k1_memblk *p = (struct snd_emu10k1_memblk *)blk; 524 525 offset += blk->offset & (PAGE_SIZE - 1); 526 end_offset = offset + size; 527 page = get_aligned_page(offset); 528 do { 529 nextofs = aligned_page_offset(page + 1); 530 temp = nextofs - offset; 531 temp1 = end_offset - offset; 532 if (temp1 < temp) 533 temp = temp1; 534 ptr = offset_ptr(emu, page + p->first_page, offset); 535 if (ptr) 536 memset(ptr, 0, temp); 537 offset = nextofs; 538 page++; 539 } while (offset < end_offset); 540 return 0; 541 } 542 543 /* 544 * copy_from_user(blk + offset, data, size) 545 */ 546 int snd_emu10k1_synth_copy_from_user(struct snd_emu10k1 *emu, struct snd_util_memblk *blk, 547 int offset, const char __user *data, int size) 548 { 549 int page, nextofs, end_offset, temp, temp1; 550 void *ptr; 551 struct snd_emu10k1_memblk *p = (struct snd_emu10k1_memblk *)blk; 552 553 offset += blk->offset & (PAGE_SIZE - 1); 554 end_offset = offset + size; 555 page = get_aligned_page(offset); 556 do { 557 nextofs = aligned_page_offset(page + 1); 558 temp = nextofs - offset; 559 temp1 = end_offset - offset; 560 if (temp1 < temp) 561 temp = temp1; 562 ptr = offset_ptr(emu, page + p->first_page, offset); 563 if (ptr && copy_from_user(ptr, data, temp)) 564 return -EFAULT; 565 offset = nextofs; 566 data += temp; 567 page++; 568 } while (offset < end_offset); 569 return 0; 570 } 571