1 /* -*- linux-c -*- * 2 * 3 * ALSA driver for the digigram lx6464es interface 4 * low-level interface 5 * 6 * Copyright (c) 2009 Tim Blechmann <tim@klingt.org> 7 * 8 * This program is free software; you can redistribute it and/or modify 9 * it under the terms of the GNU General Public License as published by 10 * the Free Software Foundation; either version 2 of the License, or 11 * (at your option) any later version. 12 * 13 * This program is distributed in the hope that it will be useful, 14 * but WITHOUT ANY WARRANTY; without even the implied warranty of 15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 16 * GNU General Public License for more details. 17 * 18 * You should have received a copy of the GNU General Public License 19 * along with this program; see the file COPYING. If not, write to 20 * the Free Software Foundation, Inc., 59 Temple Place - Suite 330, 21 * Boston, MA 02111-1307, USA. 22 * 23 */ 24 25 /* #define RMH_DEBUG 1 */ 26 27 #include <linux/module.h> 28 #include <linux/pci.h> 29 #include <linux/delay.h> 30 31 #include "lx6464es.h" 32 #include "lx_core.h" 33 34 /* low-level register access */ 35 36 static const unsigned long dsp_port_offsets[] = { 37 0, 38 0x400, 39 0x401, 40 0x402, 41 0x403, 42 0x404, 43 0x405, 44 0x406, 45 0x407, 46 0x408, 47 0x409, 48 0x40a, 49 0x40b, 50 0x40c, 51 52 0x410, 53 0x411, 54 0x412, 55 0x413, 56 0x414, 57 0x415, 58 0x416, 59 60 0x420, 61 0x430, 62 0x431, 63 0x432, 64 0x433, 65 0x434, 66 0x440 67 }; 68 69 static void __iomem *lx_dsp_register(struct lx6464es *chip, int port) 70 { 71 void __iomem *base_address = chip->port_dsp_bar; 72 return base_address + dsp_port_offsets[port]*4; 73 } 74 75 unsigned long lx_dsp_reg_read(struct lx6464es *chip, int port) 76 { 77 void __iomem *address = lx_dsp_register(chip, port); 78 return ioread32(address); 79 } 80 81 void lx_dsp_reg_readbuf(struct lx6464es *chip, int port, u32 *data, u32 len) 82 { 83 void __iomem *address = lx_dsp_register(chip, port); 84 memcpy_fromio(data, address, len*sizeof(u32)); 85 } 86 87 88 void lx_dsp_reg_write(struct lx6464es *chip, int port, unsigned data) 89 { 90 void __iomem *address = lx_dsp_register(chip, port); 91 iowrite32(data, address); 92 } 93 94 void lx_dsp_reg_writebuf(struct lx6464es *chip, int port, const u32 *data, 95 u32 len) 96 { 97 void __iomem *address = lx_dsp_register(chip, port); 98 memcpy_toio(address, data, len*sizeof(u32)); 99 } 100 101 102 static const unsigned long plx_port_offsets[] = { 103 0x04, 104 0x40, 105 0x44, 106 0x48, 107 0x4c, 108 0x50, 109 0x54, 110 0x58, 111 0x5c, 112 0x64, 113 0x68, 114 0x6C 115 }; 116 117 static void __iomem *lx_plx_register(struct lx6464es *chip, int port) 118 { 119 void __iomem *base_address = chip->port_plx_remapped; 120 return base_address + plx_port_offsets[port]; 121 } 122 123 unsigned long lx_plx_reg_read(struct lx6464es *chip, int port) 124 { 125 void __iomem *address = lx_plx_register(chip, port); 126 return ioread32(address); 127 } 128 129 void lx_plx_reg_write(struct lx6464es *chip, int port, u32 data) 130 { 131 void __iomem *address = lx_plx_register(chip, port); 132 iowrite32(data, address); 133 } 134 135 u32 lx_plx_mbox_read(struct lx6464es *chip, int mbox_nr) 136 { 137 int index; 138 139 switch (mbox_nr) { 140 case 1: 141 index = ePLX_MBOX1; break; 142 case 2: 143 index = ePLX_MBOX2; break; 144 case 3: 145 index = ePLX_MBOX3; break; 146 case 4: 147 index = ePLX_MBOX4; break; 148 case 5: 149 index = ePLX_MBOX5; break; 150 case 6: 151 index = ePLX_MBOX6; break; 152 case 7: 153 index = ePLX_MBOX7; break; 154 case 0: /* reserved for HF flags */ 155 snd_BUG(); 156 default: 157 return 0xdeadbeef; 158 } 159 160 return lx_plx_reg_read(chip, index); 161 } 162 163 int lx_plx_mbox_write(struct lx6464es *chip, int mbox_nr, u32 value) 164 { 165 int index = -1; 166 167 switch (mbox_nr) { 168 case 1: 169 index = ePLX_MBOX1; break; 170 case 3: 171 index = ePLX_MBOX3; break; 172 case 4: 173 index = ePLX_MBOX4; break; 174 case 5: 175 index = ePLX_MBOX5; break; 176 case 6: 177 index = ePLX_MBOX6; break; 178 case 7: 179 index = ePLX_MBOX7; break; 180 case 0: /* reserved for HF flags */ 181 case 2: /* reserved for Pipe States 182 * the DSP keeps an image of it */ 183 snd_BUG(); 184 return -EBADRQC; 185 } 186 187 lx_plx_reg_write(chip, index, value); 188 return 0; 189 } 190 191 192 /* rmh */ 193 194 #ifdef CONFIG_SND_DEBUG 195 #define CMD_NAME(a) a 196 #else 197 #define CMD_NAME(a) NULL 198 #endif 199 200 #define Reg_CSM_MR 0x00000002 201 #define Reg_CSM_MC 0x00000001 202 203 struct dsp_cmd_info { 204 u32 dcCodeOp; /* Op Code of the command (usually 1st 24-bits 205 * word).*/ 206 u16 dcCmdLength; /* Command length in words of 24 bits.*/ 207 u16 dcStatusType; /* Status type: 0 for fixed length, 1 for 208 * random. */ 209 u16 dcStatusLength; /* Status length (if fixed).*/ 210 char *dcOpName; 211 }; 212 213 /* 214 Initialization and control data for the Microblaze interface 215 - OpCode: 216 the opcode field of the command set at the proper offset 217 - CmdLength 218 the number of command words 219 - StatusType 220 offset in the status registers: 0 means that the return value may be 221 different from 0, and must be read 222 - StatusLength 223 the number of status words (in addition to the return value) 224 */ 225 226 static struct dsp_cmd_info dsp_commands[] = 227 { 228 { (CMD_00_INFO_DEBUG << OPCODE_OFFSET) , 1 /*custom*/ 229 , 1 , 0 /**/ , CMD_NAME("INFO_DEBUG") }, 230 { (CMD_01_GET_SYS_CFG << OPCODE_OFFSET) , 1 /**/ 231 , 1 , 2 /**/ , CMD_NAME("GET_SYS_CFG") }, 232 { (CMD_02_SET_GRANULARITY << OPCODE_OFFSET) , 1 /**/ 233 , 1 , 0 /**/ , CMD_NAME("SET_GRANULARITY") }, 234 { (CMD_03_SET_TIMER_IRQ << OPCODE_OFFSET) , 1 /**/ 235 , 1 , 0 /**/ , CMD_NAME("SET_TIMER_IRQ") }, 236 { (CMD_04_GET_EVENT << OPCODE_OFFSET) , 1 /**/ 237 , 1 , 0 /*up to 10*/ , CMD_NAME("GET_EVENT") }, 238 { (CMD_05_GET_PIPES << OPCODE_OFFSET) , 1 /**/ 239 , 1 , 2 /*up to 4*/ , CMD_NAME("GET_PIPES") }, 240 { (CMD_06_ALLOCATE_PIPE << OPCODE_OFFSET) , 1 /**/ 241 , 0 , 0 /**/ , CMD_NAME("ALLOCATE_PIPE") }, 242 { (CMD_07_RELEASE_PIPE << OPCODE_OFFSET) , 1 /**/ 243 , 0 , 0 /**/ , CMD_NAME("RELEASE_PIPE") }, 244 { (CMD_08_ASK_BUFFERS << OPCODE_OFFSET) , 1 /**/ 245 , 1 , MAX_STREAM_BUFFER , CMD_NAME("ASK_BUFFERS") }, 246 { (CMD_09_STOP_PIPE << OPCODE_OFFSET) , 1 /**/ 247 , 0 , 0 /*up to 2*/ , CMD_NAME("STOP_PIPE") }, 248 { (CMD_0A_GET_PIPE_SPL_COUNT << OPCODE_OFFSET) , 1 /**/ 249 , 1 , 1 /*up to 2*/ , CMD_NAME("GET_PIPE_SPL_COUNT") }, 250 { (CMD_0B_TOGGLE_PIPE_STATE << OPCODE_OFFSET) , 1 /*up to 5*/ 251 , 1 , 0 /**/ , CMD_NAME("TOGGLE_PIPE_STATE") }, 252 { (CMD_0C_DEF_STREAM << OPCODE_OFFSET) , 1 /*up to 4*/ 253 , 1 , 0 /**/ , CMD_NAME("DEF_STREAM") }, 254 { (CMD_0D_SET_MUTE << OPCODE_OFFSET) , 3 /**/ 255 , 1 , 0 /**/ , CMD_NAME("SET_MUTE") }, 256 { (CMD_0E_GET_STREAM_SPL_COUNT << OPCODE_OFFSET) , 1/**/ 257 , 1 , 2 /**/ , CMD_NAME("GET_STREAM_SPL_COUNT") }, 258 { (CMD_0F_UPDATE_BUFFER << OPCODE_OFFSET) , 3 /*up to 4*/ 259 , 0 , 1 /**/ , CMD_NAME("UPDATE_BUFFER") }, 260 { (CMD_10_GET_BUFFER << OPCODE_OFFSET) , 1 /**/ 261 , 1 , 4 /**/ , CMD_NAME("GET_BUFFER") }, 262 { (CMD_11_CANCEL_BUFFER << OPCODE_OFFSET) , 1 /**/ 263 , 1 , 1 /*up to 4*/ , CMD_NAME("CANCEL_BUFFER") }, 264 { (CMD_12_GET_PEAK << OPCODE_OFFSET) , 1 /**/ 265 , 1 , 1 /**/ , CMD_NAME("GET_PEAK") }, 266 { (CMD_13_SET_STREAM_STATE << OPCODE_OFFSET) , 1 /**/ 267 , 1 , 0 /**/ , CMD_NAME("SET_STREAM_STATE") }, 268 }; 269 270 static void lx_message_init(struct lx_rmh *rmh, enum cmd_mb_opcodes cmd) 271 { 272 snd_BUG_ON(cmd >= CMD_14_INVALID); 273 274 rmh->cmd[0] = dsp_commands[cmd].dcCodeOp; 275 rmh->cmd_len = dsp_commands[cmd].dcCmdLength; 276 rmh->stat_len = dsp_commands[cmd].dcStatusLength; 277 rmh->dsp_stat = dsp_commands[cmd].dcStatusType; 278 rmh->cmd_idx = cmd; 279 memset(&rmh->cmd[1], 0, (REG_CRM_NUMBER - 1) * sizeof(u32)); 280 281 #ifdef CONFIG_SND_DEBUG 282 memset(rmh->stat, 0, REG_CRM_NUMBER * sizeof(u32)); 283 #endif 284 #ifdef RMH_DEBUG 285 rmh->cmd_idx = cmd; 286 #endif 287 } 288 289 #ifdef RMH_DEBUG 290 #define LXRMH "lx6464es rmh: " 291 static void lx_message_dump(struct lx_rmh *rmh) 292 { 293 u8 idx = rmh->cmd_idx; 294 int i; 295 296 snd_printk(LXRMH "command %s\n", dsp_commands[idx].dcOpName); 297 298 for (i = 0; i != rmh->cmd_len; ++i) 299 snd_printk(LXRMH "\tcmd[%d] %08x\n", i, rmh->cmd[i]); 300 301 for (i = 0; i != rmh->stat_len; ++i) 302 snd_printk(LXRMH "\tstat[%d]: %08x\n", i, rmh->stat[i]); 303 snd_printk("\n"); 304 } 305 #else 306 static inline void lx_message_dump(struct lx_rmh *rmh) 307 {} 308 #endif 309 310 311 312 /* sleep 500 - 100 = 400 times 100us -> the timeout is >= 40 ms */ 313 #define XILINX_TIMEOUT_MS 40 314 #define XILINX_POLL_NO_SLEEP 100 315 #define XILINX_POLL_ITERATIONS 150 316 317 318 static int lx_message_send_atomic(struct lx6464es *chip, struct lx_rmh *rmh) 319 { 320 u32 reg = ED_DSP_TIMED_OUT; 321 int dwloop; 322 323 if (lx_dsp_reg_read(chip, eReg_CSM) & (Reg_CSM_MC | Reg_CSM_MR)) { 324 snd_printk(KERN_ERR LXP "PIOSendMessage eReg_CSM %x\n", reg); 325 return -EBUSY; 326 } 327 328 /* write command */ 329 lx_dsp_reg_writebuf(chip, eReg_CRM1, rmh->cmd, rmh->cmd_len); 330 331 /* MicoBlaze gogogo */ 332 lx_dsp_reg_write(chip, eReg_CSM, Reg_CSM_MC); 333 334 /* wait for device to answer */ 335 for (dwloop = 0; dwloop != XILINX_TIMEOUT_MS * 1000; ++dwloop) { 336 if (lx_dsp_reg_read(chip, eReg_CSM) & Reg_CSM_MR) { 337 if (rmh->dsp_stat == 0) 338 reg = lx_dsp_reg_read(chip, eReg_CRM1); 339 else 340 reg = 0; 341 goto polling_successful; 342 } else 343 udelay(1); 344 } 345 snd_printk(KERN_WARNING LXP "TIMEOUT lx_message_send_atomic! " 346 "polling failed\n"); 347 348 polling_successful: 349 if ((reg & ERROR_VALUE) == 0) { 350 /* read response */ 351 if (rmh->stat_len) { 352 snd_BUG_ON(rmh->stat_len >= (REG_CRM_NUMBER-1)); 353 lx_dsp_reg_readbuf(chip, eReg_CRM2, rmh->stat, 354 rmh->stat_len); 355 } 356 } else 357 snd_printk(LXP "rmh error: %08x\n", reg); 358 359 /* clear Reg_CSM_MR */ 360 lx_dsp_reg_write(chip, eReg_CSM, 0); 361 362 switch (reg) { 363 case ED_DSP_TIMED_OUT: 364 snd_printk(KERN_WARNING LXP "lx_message_send: dsp timeout\n"); 365 return -ETIMEDOUT; 366 367 case ED_DSP_CRASHED: 368 snd_printk(KERN_WARNING LXP "lx_message_send: dsp crashed\n"); 369 return -EAGAIN; 370 } 371 372 lx_message_dump(rmh); 373 374 return reg; 375 } 376 377 378 /* low-level dsp access */ 379 int __devinit lx_dsp_get_version(struct lx6464es *chip, u32 *rdsp_version) 380 { 381 u16 ret; 382 unsigned long flags; 383 384 spin_lock_irqsave(&chip->msg_lock, flags); 385 386 lx_message_init(&chip->rmh, CMD_01_GET_SYS_CFG); 387 ret = lx_message_send_atomic(chip, &chip->rmh); 388 389 *rdsp_version = chip->rmh.stat[1]; 390 spin_unlock_irqrestore(&chip->msg_lock, flags); 391 return ret; 392 } 393 394 int lx_dsp_get_clock_frequency(struct lx6464es *chip, u32 *rfreq) 395 { 396 u16 ret = 0; 397 unsigned long flags; 398 u32 freq_raw = 0; 399 u32 freq = 0; 400 u32 frequency = 0; 401 402 spin_lock_irqsave(&chip->msg_lock, flags); 403 404 lx_message_init(&chip->rmh, CMD_01_GET_SYS_CFG); 405 ret = lx_message_send_atomic(chip, &chip->rmh); 406 407 if (ret == 0) { 408 freq_raw = chip->rmh.stat[0] >> FREQ_FIELD_OFFSET; 409 freq = freq_raw & XES_FREQ_COUNT8_MASK; 410 411 if ((freq < XES_FREQ_COUNT8_48_MAX) || 412 (freq > XES_FREQ_COUNT8_44_MIN)) 413 frequency = 0; /* unknown */ 414 else if (freq >= XES_FREQ_COUNT8_44_MAX) 415 frequency = 44100; 416 else 417 frequency = 48000; 418 } 419 420 spin_unlock_irqrestore(&chip->msg_lock, flags); 421 422 *rfreq = frequency * chip->freq_ratio; 423 424 return ret; 425 } 426 427 int lx_dsp_get_mac(struct lx6464es *chip, u8 *mac_address) 428 { 429 u32 macmsb, maclsb; 430 431 macmsb = lx_dsp_reg_read(chip, eReg_ADMACESMSB) & 0x00FFFFFF; 432 maclsb = lx_dsp_reg_read(chip, eReg_ADMACESLSB) & 0x00FFFFFF; 433 434 /* todo: endianess handling */ 435 mac_address[5] = ((u8 *)(&maclsb))[0]; 436 mac_address[4] = ((u8 *)(&maclsb))[1]; 437 mac_address[3] = ((u8 *)(&maclsb))[2]; 438 mac_address[2] = ((u8 *)(&macmsb))[0]; 439 mac_address[1] = ((u8 *)(&macmsb))[1]; 440 mac_address[0] = ((u8 *)(&macmsb))[2]; 441 442 return 0; 443 } 444 445 446 int lx_dsp_set_granularity(struct lx6464es *chip, u32 gran) 447 { 448 unsigned long flags; 449 int ret; 450 451 spin_lock_irqsave(&chip->msg_lock, flags); 452 453 lx_message_init(&chip->rmh, CMD_02_SET_GRANULARITY); 454 chip->rmh.cmd[0] |= gran; 455 456 ret = lx_message_send_atomic(chip, &chip->rmh); 457 spin_unlock_irqrestore(&chip->msg_lock, flags); 458 return ret; 459 } 460 461 int lx_dsp_read_async_events(struct lx6464es *chip, u32 *data) 462 { 463 unsigned long flags; 464 int ret; 465 466 spin_lock_irqsave(&chip->msg_lock, flags); 467 468 lx_message_init(&chip->rmh, CMD_04_GET_EVENT); 469 chip->rmh.stat_len = 9; /* we don't necessarily need the full length */ 470 471 ret = lx_message_send_atomic(chip, &chip->rmh); 472 473 if (!ret) 474 memcpy(data, chip->rmh.stat, chip->rmh.stat_len * sizeof(u32)); 475 476 spin_unlock_irqrestore(&chip->msg_lock, flags); 477 return ret; 478 } 479 480 #define CSES_TIMEOUT 100 /* microseconds */ 481 #define CSES_CE 0x0001 482 #define CSES_BROADCAST 0x0002 483 #define CSES_UPDATE_LDSV 0x0004 484 485 int lx_dsp_es_check_pipeline(struct lx6464es *chip) 486 { 487 int i; 488 489 for (i = 0; i != CSES_TIMEOUT; ++i) { 490 /* 491 * le bit CSES_UPDATE_LDSV est à 1 dés que le macprog 492 * est pret. il re-passe à 0 lorsque le premier read a 493 * été fait. pour l'instant on retire le test car ce bit 494 * passe a 1 environ 200 à 400 ms aprés que le registre 495 * confES à été écrit (kick du xilinx ES). 496 * 497 * On ne teste que le bit CE. 498 * */ 499 500 u32 cses = lx_dsp_reg_read(chip, eReg_CSES); 501 502 if ((cses & CSES_CE) == 0) 503 return 0; 504 505 udelay(1); 506 } 507 508 return -ETIMEDOUT; 509 } 510 511 512 #define PIPE_INFO_TO_CMD(capture, pipe) \ 513 ((u32)((u32)(pipe) | ((capture) ? ID_IS_CAPTURE : 0L)) << ID_OFFSET) 514 515 516 517 /* low-level pipe handling */ 518 int lx_pipe_allocate(struct lx6464es *chip, u32 pipe, int is_capture, 519 int channels) 520 { 521 int err; 522 unsigned long flags; 523 524 u32 pipe_cmd = PIPE_INFO_TO_CMD(is_capture, pipe); 525 526 spin_lock_irqsave(&chip->msg_lock, flags); 527 lx_message_init(&chip->rmh, CMD_06_ALLOCATE_PIPE); 528 529 chip->rmh.cmd[0] |= pipe_cmd; 530 chip->rmh.cmd[0] |= channels; 531 532 err = lx_message_send_atomic(chip, &chip->rmh); 533 spin_unlock_irqrestore(&chip->msg_lock, flags); 534 535 if (err != 0) 536 snd_printk(KERN_ERR "lx6464es: could not allocate pipe\n"); 537 538 return err; 539 } 540 541 int lx_pipe_release(struct lx6464es *chip, u32 pipe, int is_capture) 542 { 543 int err; 544 unsigned long flags; 545 546 u32 pipe_cmd = PIPE_INFO_TO_CMD(is_capture, pipe); 547 548 spin_lock_irqsave(&chip->msg_lock, flags); 549 lx_message_init(&chip->rmh, CMD_07_RELEASE_PIPE); 550 551 chip->rmh.cmd[0] |= pipe_cmd; 552 553 err = lx_message_send_atomic(chip, &chip->rmh); 554 spin_unlock_irqrestore(&chip->msg_lock, flags); 555 556 return err; 557 } 558 559 int lx_buffer_ask(struct lx6464es *chip, u32 pipe, int is_capture, 560 u32 *r_needed, u32 *r_freed, u32 *size_array) 561 { 562 int err; 563 unsigned long flags; 564 565 u32 pipe_cmd = PIPE_INFO_TO_CMD(is_capture, pipe); 566 567 #ifdef CONFIG_SND_DEBUG 568 if (size_array) 569 memset(size_array, 0, sizeof(u32)*MAX_STREAM_BUFFER); 570 #endif 571 572 *r_needed = 0; 573 *r_freed = 0; 574 575 spin_lock_irqsave(&chip->msg_lock, flags); 576 lx_message_init(&chip->rmh, CMD_08_ASK_BUFFERS); 577 578 chip->rmh.cmd[0] |= pipe_cmd; 579 580 err = lx_message_send_atomic(chip, &chip->rmh); 581 582 if (!err) { 583 int i; 584 for (i = 0; i < MAX_STREAM_BUFFER; ++i) { 585 u32 stat = chip->rmh.stat[i]; 586 if (stat & (BF_EOB << BUFF_FLAGS_OFFSET)) { 587 /* finished */ 588 *r_freed += 1; 589 if (size_array) 590 size_array[i] = stat & MASK_DATA_SIZE; 591 } else if ((stat & (BF_VALID << BUFF_FLAGS_OFFSET)) 592 == 0) 593 /* free */ 594 *r_needed += 1; 595 } 596 597 #if 0 598 snd_printdd(LXP "CMD_08_ASK_BUFFERS: needed %d, freed %d\n", 599 *r_needed, *r_freed); 600 for (i = 0; i < MAX_STREAM_BUFFER; ++i) { 601 for (i = 0; i != chip->rmh.stat_len; ++i) 602 snd_printdd(" stat[%d]: %x, %x\n", i, 603 chip->rmh.stat[i], 604 chip->rmh.stat[i] & MASK_DATA_SIZE); 605 } 606 #endif 607 } 608 609 spin_unlock_irqrestore(&chip->msg_lock, flags); 610 return err; 611 } 612 613 614 int lx_pipe_stop(struct lx6464es *chip, u32 pipe, int is_capture) 615 { 616 int err; 617 unsigned long flags; 618 619 u32 pipe_cmd = PIPE_INFO_TO_CMD(is_capture, pipe); 620 621 spin_lock_irqsave(&chip->msg_lock, flags); 622 lx_message_init(&chip->rmh, CMD_09_STOP_PIPE); 623 624 chip->rmh.cmd[0] |= pipe_cmd; 625 626 err = lx_message_send_atomic(chip, &chip->rmh); 627 628 spin_unlock_irqrestore(&chip->msg_lock, flags); 629 return err; 630 } 631 632 static int lx_pipe_toggle_state(struct lx6464es *chip, u32 pipe, int is_capture) 633 { 634 int err; 635 unsigned long flags; 636 637 u32 pipe_cmd = PIPE_INFO_TO_CMD(is_capture, pipe); 638 639 spin_lock_irqsave(&chip->msg_lock, flags); 640 lx_message_init(&chip->rmh, CMD_0B_TOGGLE_PIPE_STATE); 641 642 chip->rmh.cmd[0] |= pipe_cmd; 643 644 err = lx_message_send_atomic(chip, &chip->rmh); 645 646 spin_unlock_irqrestore(&chip->msg_lock, flags); 647 return err; 648 } 649 650 651 int lx_pipe_start(struct lx6464es *chip, u32 pipe, int is_capture) 652 { 653 int err; 654 655 err = lx_pipe_wait_for_idle(chip, pipe, is_capture); 656 if (err < 0) 657 return err; 658 659 err = lx_pipe_toggle_state(chip, pipe, is_capture); 660 661 return err; 662 } 663 664 int lx_pipe_pause(struct lx6464es *chip, u32 pipe, int is_capture) 665 { 666 int err = 0; 667 668 err = lx_pipe_wait_for_start(chip, pipe, is_capture); 669 if (err < 0) 670 return err; 671 672 err = lx_pipe_toggle_state(chip, pipe, is_capture); 673 674 return err; 675 } 676 677 678 int lx_pipe_sample_count(struct lx6464es *chip, u32 pipe, int is_capture, 679 u64 *rsample_count) 680 { 681 int err; 682 unsigned long flags; 683 684 u32 pipe_cmd = PIPE_INFO_TO_CMD(is_capture, pipe); 685 686 spin_lock_irqsave(&chip->msg_lock, flags); 687 lx_message_init(&chip->rmh, CMD_0A_GET_PIPE_SPL_COUNT); 688 689 chip->rmh.cmd[0] |= pipe_cmd; 690 chip->rmh.stat_len = 2; /* need all words here! */ 691 692 err = lx_message_send_atomic(chip, &chip->rmh); /* don't sleep! */ 693 694 if (err != 0) 695 snd_printk(KERN_ERR 696 "lx6464es: could not query pipe's sample count\n"); 697 else { 698 *rsample_count = ((u64)(chip->rmh.stat[0] & MASK_SPL_COUNT_HI) 699 << 24) /* hi part */ 700 + chip->rmh.stat[1]; /* lo part */ 701 } 702 703 spin_unlock_irqrestore(&chip->msg_lock, flags); 704 return err; 705 } 706 707 int lx_pipe_state(struct lx6464es *chip, u32 pipe, int is_capture, u16 *rstate) 708 { 709 int err; 710 unsigned long flags; 711 712 u32 pipe_cmd = PIPE_INFO_TO_CMD(is_capture, pipe); 713 714 spin_lock_irqsave(&chip->msg_lock, flags); 715 lx_message_init(&chip->rmh, CMD_0A_GET_PIPE_SPL_COUNT); 716 717 chip->rmh.cmd[0] |= pipe_cmd; 718 719 err = lx_message_send_atomic(chip, &chip->rmh); 720 721 if (err != 0) 722 snd_printk(KERN_ERR "lx6464es: could not query pipe's state\n"); 723 else 724 *rstate = (chip->rmh.stat[0] >> PSTATE_OFFSET) & 0x0F; 725 726 spin_unlock_irqrestore(&chip->msg_lock, flags); 727 return err; 728 } 729 730 static int lx_pipe_wait_for_state(struct lx6464es *chip, u32 pipe, 731 int is_capture, u16 state) 732 { 733 int i; 734 735 /* max 2*PCMOnlyGranularity = 2*1024 at 44100 = < 50 ms: 736 * timeout 50 ms */ 737 for (i = 0; i != 50; ++i) { 738 u16 current_state; 739 int err = lx_pipe_state(chip, pipe, is_capture, ¤t_state); 740 741 if (err < 0) 742 return err; 743 744 if (current_state == state) 745 return 0; 746 747 mdelay(1); 748 } 749 750 return -ETIMEDOUT; 751 } 752 753 int lx_pipe_wait_for_start(struct lx6464es *chip, u32 pipe, int is_capture) 754 { 755 return lx_pipe_wait_for_state(chip, pipe, is_capture, PSTATE_RUN); 756 } 757 758 int lx_pipe_wait_for_idle(struct lx6464es *chip, u32 pipe, int is_capture) 759 { 760 return lx_pipe_wait_for_state(chip, pipe, is_capture, PSTATE_IDLE); 761 } 762 763 /* low-level stream handling */ 764 int lx_stream_set_state(struct lx6464es *chip, u32 pipe, 765 int is_capture, enum stream_state_t state) 766 { 767 int err; 768 unsigned long flags; 769 770 u32 pipe_cmd = PIPE_INFO_TO_CMD(is_capture, pipe); 771 772 spin_lock_irqsave(&chip->msg_lock, flags); 773 lx_message_init(&chip->rmh, CMD_13_SET_STREAM_STATE); 774 775 chip->rmh.cmd[0] |= pipe_cmd; 776 chip->rmh.cmd[0] |= state; 777 778 err = lx_message_send_atomic(chip, &chip->rmh); 779 spin_unlock_irqrestore(&chip->msg_lock, flags); 780 781 return err; 782 } 783 784 int lx_stream_set_format(struct lx6464es *chip, struct snd_pcm_runtime *runtime, 785 u32 pipe, int is_capture) 786 { 787 int err; 788 unsigned long flags; 789 790 u32 pipe_cmd = PIPE_INFO_TO_CMD(is_capture, pipe); 791 792 u32 channels = runtime->channels; 793 794 if (runtime->channels != channels) 795 snd_printk(KERN_ERR LXP "channel count mismatch: %d vs %d", 796 runtime->channels, channels); 797 798 spin_lock_irqsave(&chip->msg_lock, flags); 799 lx_message_init(&chip->rmh, CMD_0C_DEF_STREAM); 800 801 chip->rmh.cmd[0] |= pipe_cmd; 802 803 if (runtime->sample_bits == 16) 804 /* 16 bit format */ 805 chip->rmh.cmd[0] |= (STREAM_FMT_16b << STREAM_FMT_OFFSET); 806 807 if (snd_pcm_format_little_endian(runtime->format)) 808 /* little endian/intel format */ 809 chip->rmh.cmd[0] |= (STREAM_FMT_intel << STREAM_FMT_OFFSET); 810 811 chip->rmh.cmd[0] |= channels-1; 812 813 err = lx_message_send_atomic(chip, &chip->rmh); 814 spin_unlock_irqrestore(&chip->msg_lock, flags); 815 816 return err; 817 } 818 819 int lx_stream_state(struct lx6464es *chip, u32 pipe, int is_capture, 820 int *rstate) 821 { 822 int err; 823 unsigned long flags; 824 825 u32 pipe_cmd = PIPE_INFO_TO_CMD(is_capture, pipe); 826 827 spin_lock_irqsave(&chip->msg_lock, flags); 828 lx_message_init(&chip->rmh, CMD_0E_GET_STREAM_SPL_COUNT); 829 830 chip->rmh.cmd[0] |= pipe_cmd; 831 832 err = lx_message_send_atomic(chip, &chip->rmh); 833 834 *rstate = (chip->rmh.stat[0] & SF_START) ? START_STATE : PAUSE_STATE; 835 836 spin_unlock_irqrestore(&chip->msg_lock, flags); 837 return err; 838 } 839 840 int lx_stream_sample_position(struct lx6464es *chip, u32 pipe, int is_capture, 841 u64 *r_bytepos) 842 { 843 int err; 844 unsigned long flags; 845 846 u32 pipe_cmd = PIPE_INFO_TO_CMD(is_capture, pipe); 847 848 spin_lock_irqsave(&chip->msg_lock, flags); 849 lx_message_init(&chip->rmh, CMD_0E_GET_STREAM_SPL_COUNT); 850 851 chip->rmh.cmd[0] |= pipe_cmd; 852 853 err = lx_message_send_atomic(chip, &chip->rmh); 854 855 *r_bytepos = ((u64) (chip->rmh.stat[0] & MASK_SPL_COUNT_HI) 856 << 32) /* hi part */ 857 + chip->rmh.stat[1]; /* lo part */ 858 859 spin_unlock_irqrestore(&chip->msg_lock, flags); 860 return err; 861 } 862 863 /* low-level buffer handling */ 864 int lx_buffer_give(struct lx6464es *chip, u32 pipe, int is_capture, 865 u32 buffer_size, u32 buf_address_lo, u32 buf_address_hi, 866 u32 *r_buffer_index) 867 { 868 int err; 869 unsigned long flags; 870 871 u32 pipe_cmd = PIPE_INFO_TO_CMD(is_capture, pipe); 872 873 spin_lock_irqsave(&chip->msg_lock, flags); 874 lx_message_init(&chip->rmh, CMD_0F_UPDATE_BUFFER); 875 876 chip->rmh.cmd[0] |= pipe_cmd; 877 chip->rmh.cmd[0] |= BF_NOTIFY_EOB; /* request interrupt notification */ 878 879 /* todo: pause request, circular buffer */ 880 881 chip->rmh.cmd[1] = buffer_size & MASK_DATA_SIZE; 882 chip->rmh.cmd[2] = buf_address_lo; 883 884 if (buf_address_hi) { 885 chip->rmh.cmd_len = 4; 886 chip->rmh.cmd[3] = buf_address_hi; 887 chip->rmh.cmd[0] |= BF_64BITS_ADR; 888 } 889 890 err = lx_message_send_atomic(chip, &chip->rmh); 891 892 if (err == 0) { 893 *r_buffer_index = chip->rmh.stat[0]; 894 goto done; 895 } 896 897 if (err == EB_RBUFFERS_TABLE_OVERFLOW) 898 snd_printk(LXP "lx_buffer_give EB_RBUFFERS_TABLE_OVERFLOW\n"); 899 900 if (err == EB_INVALID_STREAM) 901 snd_printk(LXP "lx_buffer_give EB_INVALID_STREAM\n"); 902 903 if (err == EB_CMD_REFUSED) 904 snd_printk(LXP "lx_buffer_give EB_CMD_REFUSED\n"); 905 906 done: 907 spin_unlock_irqrestore(&chip->msg_lock, flags); 908 return err; 909 } 910 911 int lx_buffer_free(struct lx6464es *chip, u32 pipe, int is_capture, 912 u32 *r_buffer_size) 913 { 914 int err; 915 unsigned long flags; 916 917 u32 pipe_cmd = PIPE_INFO_TO_CMD(is_capture, pipe); 918 919 spin_lock_irqsave(&chip->msg_lock, flags); 920 lx_message_init(&chip->rmh, CMD_11_CANCEL_BUFFER); 921 922 chip->rmh.cmd[0] |= pipe_cmd; 923 chip->rmh.cmd[0] |= MASK_BUFFER_ID; /* ask for the current buffer: the 924 * microblaze will seek for it */ 925 926 err = lx_message_send_atomic(chip, &chip->rmh); 927 928 if (err == 0) 929 *r_buffer_size = chip->rmh.stat[0] & MASK_DATA_SIZE; 930 931 spin_unlock_irqrestore(&chip->msg_lock, flags); 932 return err; 933 } 934 935 int lx_buffer_cancel(struct lx6464es *chip, u32 pipe, int is_capture, 936 u32 buffer_index) 937 { 938 int err; 939 unsigned long flags; 940 941 u32 pipe_cmd = PIPE_INFO_TO_CMD(is_capture, pipe); 942 943 spin_lock_irqsave(&chip->msg_lock, flags); 944 lx_message_init(&chip->rmh, CMD_11_CANCEL_BUFFER); 945 946 chip->rmh.cmd[0] |= pipe_cmd; 947 chip->rmh.cmd[0] |= buffer_index; 948 949 err = lx_message_send_atomic(chip, &chip->rmh); 950 951 spin_unlock_irqrestore(&chip->msg_lock, flags); 952 return err; 953 } 954 955 956 /* low-level gain/peak handling 957 * 958 * \todo: can we unmute capture/playback channels independently? 959 * 960 * */ 961 int lx_level_unmute(struct lx6464es *chip, int is_capture, int unmute) 962 { 963 int err; 964 unsigned long flags; 965 966 /* bit set to 1: channel muted */ 967 u64 mute_mask = unmute ? 0 : 0xFFFFFFFFFFFFFFFFLLU; 968 969 spin_lock_irqsave(&chip->msg_lock, flags); 970 lx_message_init(&chip->rmh, CMD_0D_SET_MUTE); 971 972 chip->rmh.cmd[0] |= PIPE_INFO_TO_CMD(is_capture, 0); 973 974 chip->rmh.cmd[1] = (u32)(mute_mask >> (u64)32); /* hi part */ 975 chip->rmh.cmd[2] = (u32)(mute_mask & (u64)0xFFFFFFFF); /* lo part */ 976 977 snd_printk("mute %x %x %x\n", chip->rmh.cmd[0], chip->rmh.cmd[1], 978 chip->rmh.cmd[2]); 979 980 err = lx_message_send_atomic(chip, &chip->rmh); 981 982 spin_unlock_irqrestore(&chip->msg_lock, flags); 983 return err; 984 } 985 986 static u32 peak_map[] = { 987 0x00000109, /* -90.308dB */ 988 0x0000083B, /* -72.247dB */ 989 0x000020C4, /* -60.205dB */ 990 0x00008273, /* -48.030dB */ 991 0x00020756, /* -36.005dB */ 992 0x00040C37, /* -30.001dB */ 993 0x00081385, /* -24.002dB */ 994 0x00101D3F, /* -18.000dB */ 995 0x0016C310, /* -15.000dB */ 996 0x002026F2, /* -12.001dB */ 997 0x002D6A86, /* -9.000dB */ 998 0x004026E6, /* -6.004dB */ 999 0x005A9DF6, /* -3.000dB */ 1000 0x0065AC8B, /* -2.000dB */ 1001 0x00721481, /* -1.000dB */ 1002 0x007FFFFF, /* FS */ 1003 }; 1004 1005 int lx_level_peaks(struct lx6464es *chip, int is_capture, int channels, 1006 u32 *r_levels) 1007 { 1008 int err = 0; 1009 unsigned long flags; 1010 int i; 1011 spin_lock_irqsave(&chip->msg_lock, flags); 1012 1013 for (i = 0; i < channels; i += 4) { 1014 u32 s0, s1, s2, s3; 1015 1016 lx_message_init(&chip->rmh, CMD_12_GET_PEAK); 1017 chip->rmh.cmd[0] |= PIPE_INFO_TO_CMD(is_capture, i); 1018 1019 err = lx_message_send_atomic(chip, &chip->rmh); 1020 1021 if (err == 0) { 1022 s0 = peak_map[chip->rmh.stat[0] & 0x0F]; 1023 s1 = peak_map[(chip->rmh.stat[0] >> 4) & 0xf]; 1024 s2 = peak_map[(chip->rmh.stat[0] >> 8) & 0xf]; 1025 s3 = peak_map[(chip->rmh.stat[0] >> 12) & 0xf]; 1026 } else 1027 s0 = s1 = s2 = s3 = 0; 1028 1029 r_levels[0] = s0; 1030 r_levels[1] = s1; 1031 r_levels[2] = s2; 1032 r_levels[3] = s3; 1033 1034 r_levels += 4; 1035 } 1036 1037 spin_unlock_irqrestore(&chip->msg_lock, flags); 1038 return err; 1039 } 1040 1041 /* interrupt handling */ 1042 #define PCX_IRQ_NONE 0 1043 #define IRQCS_ACTIVE_PCIDB 0x00002000L /* Bit nø 13 */ 1044 #define IRQCS_ENABLE_PCIIRQ 0x00000100L /* Bit nø 08 */ 1045 #define IRQCS_ENABLE_PCIDB 0x00000200L /* Bit nø 09 */ 1046 1047 static u32 lx_interrupt_test_ack(struct lx6464es *chip) 1048 { 1049 u32 irqcs = lx_plx_reg_read(chip, ePLX_IRQCS); 1050 1051 /* Test if PCI Doorbell interrupt is active */ 1052 if (irqcs & IRQCS_ACTIVE_PCIDB) { 1053 u32 temp; 1054 irqcs = PCX_IRQ_NONE; 1055 1056 while ((temp = lx_plx_reg_read(chip, ePLX_L2PCIDB))) { 1057 /* RAZ interrupt */ 1058 irqcs |= temp; 1059 lx_plx_reg_write(chip, ePLX_L2PCIDB, temp); 1060 } 1061 1062 return irqcs; 1063 } 1064 return PCX_IRQ_NONE; 1065 } 1066 1067 static int lx_interrupt_ack(struct lx6464es *chip, u32 *r_irqsrc, 1068 int *r_async_pending, int *r_async_escmd) 1069 { 1070 u32 irq_async; 1071 u32 irqsrc = lx_interrupt_test_ack(chip); 1072 1073 if (irqsrc == PCX_IRQ_NONE) 1074 return 0; 1075 1076 *r_irqsrc = irqsrc; 1077 1078 irq_async = irqsrc & MASK_SYS_ASYNC_EVENTS; /* + EtherSound response 1079 * (set by xilinx) + EOB */ 1080 1081 if (irq_async & MASK_SYS_STATUS_ESA) { 1082 irq_async &= ~MASK_SYS_STATUS_ESA; 1083 *r_async_escmd = 1; 1084 } 1085 1086 if (irq_async) { 1087 /* snd_printd("interrupt: async event pending\n"); */ 1088 *r_async_pending = 1; 1089 } 1090 1091 return 1; 1092 } 1093 1094 static int lx_interrupt_handle_async_events(struct lx6464es *chip, u32 irqsrc, 1095 int *r_freq_changed, 1096 u64 *r_notified_in_pipe_mask, 1097 u64 *r_notified_out_pipe_mask) 1098 { 1099 int err; 1100 u32 stat[9]; /* answer from CMD_04_GET_EVENT */ 1101 1102 /* On peut optimiser pour ne pas lire les evenements vides 1103 * les mots de réponse sont dans l'ordre suivant : 1104 * Stat[0] mot de status général 1105 * Stat[1] fin de buffer OUT pF 1106 * Stat[2] fin de buffer OUT pf 1107 * Stat[3] fin de buffer IN pF 1108 * Stat[4] fin de buffer IN pf 1109 * Stat[5] underrun poid fort 1110 * Stat[6] underrun poid faible 1111 * Stat[7] overrun poid fort 1112 * Stat[8] overrun poid faible 1113 * */ 1114 1115 u64 orun_mask; 1116 u64 urun_mask; 1117 #if 0 1118 int has_underrun = (irqsrc & MASK_SYS_STATUS_URUN) ? 1 : 0; 1119 int has_overrun = (irqsrc & MASK_SYS_STATUS_ORUN) ? 1 : 0; 1120 #endif 1121 int eb_pending_out = (irqsrc & MASK_SYS_STATUS_EOBO) ? 1 : 0; 1122 int eb_pending_in = (irqsrc & MASK_SYS_STATUS_EOBI) ? 1 : 0; 1123 1124 *r_freq_changed = (irqsrc & MASK_SYS_STATUS_FREQ) ? 1 : 0; 1125 1126 err = lx_dsp_read_async_events(chip, stat); 1127 if (err < 0) 1128 return err; 1129 1130 if (eb_pending_in) { 1131 *r_notified_in_pipe_mask = ((u64)stat[3] << 32) 1132 + stat[4]; 1133 snd_printdd(LXP "interrupt: EOBI pending %llx\n", 1134 *r_notified_in_pipe_mask); 1135 } 1136 if (eb_pending_out) { 1137 *r_notified_out_pipe_mask = ((u64)stat[1] << 32) 1138 + stat[2]; 1139 snd_printdd(LXP "interrupt: EOBO pending %llx\n", 1140 *r_notified_out_pipe_mask); 1141 } 1142 1143 orun_mask = ((u64)stat[7] << 32) + stat[8]; 1144 urun_mask = ((u64)stat[5] << 32) + stat[6]; 1145 1146 /* todo: handle xrun notification */ 1147 1148 return err; 1149 } 1150 1151 static int lx_interrupt_request_new_buffer(struct lx6464es *chip, 1152 struct lx_stream *lx_stream) 1153 { 1154 struct snd_pcm_substream *substream = lx_stream->stream; 1155 const unsigned int is_capture = lx_stream->is_capture; 1156 int err; 1157 unsigned long flags; 1158 1159 const u32 channels = substream->runtime->channels; 1160 const u32 bytes_per_frame = channels * 3; 1161 const u32 period_size = substream->runtime->period_size; 1162 const u32 period_bytes = period_size * bytes_per_frame; 1163 const u32 pos = lx_stream->frame_pos; 1164 const u32 next_pos = ((pos+1) == substream->runtime->periods) ? 1165 0 : pos + 1; 1166 1167 dma_addr_t buf = substream->dma_buffer.addr + pos * period_bytes; 1168 u32 buf_hi = 0; 1169 u32 buf_lo = 0; 1170 u32 buffer_index = 0; 1171 1172 u32 needed, freed; 1173 u32 size_array[MAX_STREAM_BUFFER]; 1174 1175 snd_printdd("->lx_interrupt_request_new_buffer\n"); 1176 1177 spin_lock_irqsave(&chip->lock, flags); 1178 1179 err = lx_buffer_ask(chip, 0, is_capture, &needed, &freed, size_array); 1180 snd_printdd(LXP "interrupt: needed %d, freed %d\n", needed, freed); 1181 1182 unpack_pointer(buf, &buf_lo, &buf_hi); 1183 err = lx_buffer_give(chip, 0, is_capture, period_bytes, buf_lo, buf_hi, 1184 &buffer_index); 1185 snd_printdd(LXP "interrupt: gave buffer index %x on %p (%d bytes)\n", 1186 buffer_index, (void *)buf, period_bytes); 1187 1188 lx_stream->frame_pos = next_pos; 1189 spin_unlock_irqrestore(&chip->lock, flags); 1190 1191 return err; 1192 } 1193 1194 void lx_tasklet_playback(unsigned long data) 1195 { 1196 struct lx6464es *chip = (struct lx6464es *)data; 1197 struct lx_stream *lx_stream = &chip->playback_stream; 1198 int err; 1199 1200 snd_printdd("->lx_tasklet_playback\n"); 1201 1202 err = lx_interrupt_request_new_buffer(chip, lx_stream); 1203 if (err < 0) 1204 snd_printk(KERN_ERR LXP 1205 "cannot request new buffer for playback\n"); 1206 1207 snd_pcm_period_elapsed(lx_stream->stream); 1208 } 1209 1210 void lx_tasklet_capture(unsigned long data) 1211 { 1212 struct lx6464es *chip = (struct lx6464es *)data; 1213 struct lx_stream *lx_stream = &chip->capture_stream; 1214 int err; 1215 1216 snd_printdd("->lx_tasklet_capture\n"); 1217 err = lx_interrupt_request_new_buffer(chip, lx_stream); 1218 if (err < 0) 1219 snd_printk(KERN_ERR LXP 1220 "cannot request new buffer for capture\n"); 1221 1222 snd_pcm_period_elapsed(lx_stream->stream); 1223 } 1224 1225 1226 1227 static int lx_interrupt_handle_audio_transfer(struct lx6464es *chip, 1228 u64 notified_in_pipe_mask, 1229 u64 notified_out_pipe_mask) 1230 { 1231 int err = 0; 1232 1233 if (notified_in_pipe_mask) { 1234 snd_printdd(LXP "requesting audio transfer for capture\n"); 1235 tasklet_hi_schedule(&chip->tasklet_capture); 1236 } 1237 1238 if (notified_out_pipe_mask) { 1239 snd_printdd(LXP "requesting audio transfer for playback\n"); 1240 tasklet_hi_schedule(&chip->tasklet_playback); 1241 } 1242 1243 return err; 1244 } 1245 1246 1247 irqreturn_t lx_interrupt(int irq, void *dev_id) 1248 { 1249 struct lx6464es *chip = dev_id; 1250 int async_pending, async_escmd; 1251 u32 irqsrc; 1252 1253 spin_lock(&chip->lock); 1254 1255 snd_printdd("**************************************************\n"); 1256 1257 if (!lx_interrupt_ack(chip, &irqsrc, &async_pending, &async_escmd)) { 1258 spin_unlock(&chip->lock); 1259 snd_printdd("IRQ_NONE\n"); 1260 return IRQ_NONE; /* this device did not cause the interrupt */ 1261 } 1262 1263 if (irqsrc & MASK_SYS_STATUS_CMD_DONE) 1264 goto exit; 1265 1266 #if 0 1267 if (irqsrc & MASK_SYS_STATUS_EOBI) 1268 snd_printdd(LXP "interrupt: EOBI\n"); 1269 1270 if (irqsrc & MASK_SYS_STATUS_EOBO) 1271 snd_printdd(LXP "interrupt: EOBO\n"); 1272 1273 if (irqsrc & MASK_SYS_STATUS_URUN) 1274 snd_printdd(LXP "interrupt: URUN\n"); 1275 1276 if (irqsrc & MASK_SYS_STATUS_ORUN) 1277 snd_printdd(LXP "interrupt: ORUN\n"); 1278 #endif 1279 1280 if (async_pending) { 1281 u64 notified_in_pipe_mask = 0; 1282 u64 notified_out_pipe_mask = 0; 1283 int freq_changed; 1284 int err; 1285 1286 /* handle async events */ 1287 err = lx_interrupt_handle_async_events(chip, irqsrc, 1288 &freq_changed, 1289 ¬ified_in_pipe_mask, 1290 ¬ified_out_pipe_mask); 1291 if (err) 1292 snd_printk(KERN_ERR LXP 1293 "error handling async events\n"); 1294 1295 err = lx_interrupt_handle_audio_transfer(chip, 1296 notified_in_pipe_mask, 1297 notified_out_pipe_mask 1298 ); 1299 if (err) 1300 snd_printk(KERN_ERR LXP 1301 "error during audio transfer\n"); 1302 } 1303 1304 if (async_escmd) { 1305 #if 0 1306 /* backdoor for ethersound commands 1307 * 1308 * for now, we do not need this 1309 * 1310 * */ 1311 1312 snd_printdd("lx6464es: interrupt requests escmd handling\n"); 1313 #endif 1314 } 1315 1316 exit: 1317 spin_unlock(&chip->lock); 1318 return IRQ_HANDLED; /* this device caused the interrupt */ 1319 } 1320 1321 1322 static void lx_irq_set(struct lx6464es *chip, int enable) 1323 { 1324 u32 reg = lx_plx_reg_read(chip, ePLX_IRQCS); 1325 1326 /* enable/disable interrupts 1327 * 1328 * Set the Doorbell and PCI interrupt enable bits 1329 * 1330 * */ 1331 if (enable) 1332 reg |= (IRQCS_ENABLE_PCIIRQ | IRQCS_ENABLE_PCIDB); 1333 else 1334 reg &= ~(IRQCS_ENABLE_PCIIRQ | IRQCS_ENABLE_PCIDB); 1335 lx_plx_reg_write(chip, ePLX_IRQCS, reg); 1336 } 1337 1338 void lx_irq_enable(struct lx6464es *chip) 1339 { 1340 snd_printdd("->lx_irq_enable\n"); 1341 lx_irq_set(chip, 1); 1342 } 1343 1344 void lx_irq_disable(struct lx6464es *chip) 1345 { 1346 snd_printdd("->lx_irq_disable\n"); 1347 lx_irq_set(chip, 0); 1348 } 1349