1 /* -*- linux-c -*- * 2 * 3 * ALSA driver for the digigram lx6464es interface 4 * low-level interface 5 * 6 * Copyright (c) 2009 Tim Blechmann <tim@klingt.org> 7 * 8 * This program is free software; you can redistribute it and/or modify 9 * it under the terms of the GNU General Public License as published by 10 * the Free Software Foundation; either version 2 of the License, or 11 * (at your option) any later version. 12 * 13 * This program is distributed in the hope that it will be useful, 14 * but WITHOUT ANY WARRANTY; without even the implied warranty of 15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 16 * GNU General Public License for more details. 17 * 18 * You should have received a copy of the GNU General Public License 19 * along with this program; see the file COPYING. If not, write to 20 * the Free Software Foundation, Inc., 59 Temple Place - Suite 330, 21 * Boston, MA 02111-1307, USA. 22 * 23 */ 24 25 /* #define RMH_DEBUG 1 */ 26 27 #include <linux/module.h> 28 #include <linux/pci.h> 29 #include <linux/delay.h> 30 31 #include "lx6464es.h" 32 #include "lx_core.h" 33 34 /* low-level register access */ 35 36 static const unsigned long dsp_port_offsets[] = { 37 0, 38 0x400, 39 0x401, 40 0x402, 41 0x403, 42 0x404, 43 0x405, 44 0x406, 45 0x407, 46 0x408, 47 0x409, 48 0x40a, 49 0x40b, 50 0x40c, 51 52 0x410, 53 0x411, 54 0x412, 55 0x413, 56 0x414, 57 0x415, 58 0x416, 59 60 0x420, 61 0x430, 62 0x431, 63 0x432, 64 0x433, 65 0x434, 66 0x440 67 }; 68 69 static void __iomem *lx_dsp_register(struct lx6464es *chip, int port) 70 { 71 void __iomem *base_address = chip->port_dsp_bar; 72 return base_address + dsp_port_offsets[port]*4; 73 } 74 75 unsigned long lx_dsp_reg_read(struct lx6464es *chip, int port) 76 { 77 void __iomem *address = lx_dsp_register(chip, port); 78 return ioread32(address); 79 } 80 81 static void lx_dsp_reg_readbuf(struct lx6464es *chip, int port, u32 *data, 82 u32 len) 83 { 84 u32 __iomem *address = lx_dsp_register(chip, port); 85 int i; 86 87 /* we cannot use memcpy_fromio */ 88 for (i = 0; i != len; ++i) 89 data[i] = ioread32(address + i); 90 } 91 92 93 void lx_dsp_reg_write(struct lx6464es *chip, int port, unsigned data) 94 { 95 void __iomem *address = lx_dsp_register(chip, port); 96 iowrite32(data, address); 97 } 98 99 static void lx_dsp_reg_writebuf(struct lx6464es *chip, int port, 100 const u32 *data, u32 len) 101 { 102 u32 __iomem *address = lx_dsp_register(chip, port); 103 int i; 104 105 /* we cannot use memcpy_to */ 106 for (i = 0; i != len; ++i) 107 iowrite32(data[i], address + i); 108 } 109 110 111 static const unsigned long plx_port_offsets[] = { 112 0x04, 113 0x40, 114 0x44, 115 0x48, 116 0x4c, 117 0x50, 118 0x54, 119 0x58, 120 0x5c, 121 0x64, 122 0x68, 123 0x6C 124 }; 125 126 static void __iomem *lx_plx_register(struct lx6464es *chip, int port) 127 { 128 void __iomem *base_address = chip->port_plx_remapped; 129 return base_address + plx_port_offsets[port]; 130 } 131 132 unsigned long lx_plx_reg_read(struct lx6464es *chip, int port) 133 { 134 void __iomem *address = lx_plx_register(chip, port); 135 return ioread32(address); 136 } 137 138 void lx_plx_reg_write(struct lx6464es *chip, int port, u32 data) 139 { 140 void __iomem *address = lx_plx_register(chip, port); 141 iowrite32(data, address); 142 } 143 144 u32 lx_plx_mbox_read(struct lx6464es *chip, int mbox_nr) 145 { 146 int index; 147 148 switch (mbox_nr) { 149 case 1: 150 index = ePLX_MBOX1; break; 151 case 2: 152 index = ePLX_MBOX2; break; 153 case 3: 154 index = ePLX_MBOX3; break; 155 case 4: 156 index = ePLX_MBOX4; break; 157 case 5: 158 index = ePLX_MBOX5; break; 159 case 6: 160 index = ePLX_MBOX6; break; 161 case 7: 162 index = ePLX_MBOX7; break; 163 case 0: /* reserved for HF flags */ 164 snd_BUG(); 165 default: 166 return 0xdeadbeef; 167 } 168 169 return lx_plx_reg_read(chip, index); 170 } 171 172 int lx_plx_mbox_write(struct lx6464es *chip, int mbox_nr, u32 value) 173 { 174 int index = -1; 175 176 switch (mbox_nr) { 177 case 1: 178 index = ePLX_MBOX1; break; 179 case 3: 180 index = ePLX_MBOX3; break; 181 case 4: 182 index = ePLX_MBOX4; break; 183 case 5: 184 index = ePLX_MBOX5; break; 185 case 6: 186 index = ePLX_MBOX6; break; 187 case 7: 188 index = ePLX_MBOX7; break; 189 case 0: /* reserved for HF flags */ 190 case 2: /* reserved for Pipe States 191 * the DSP keeps an image of it */ 192 snd_BUG(); 193 return -EBADRQC; 194 } 195 196 lx_plx_reg_write(chip, index, value); 197 return 0; 198 } 199 200 201 /* rmh */ 202 203 #ifdef CONFIG_SND_DEBUG 204 #define CMD_NAME(a) a 205 #else 206 #define CMD_NAME(a) NULL 207 #endif 208 209 #define Reg_CSM_MR 0x00000002 210 #define Reg_CSM_MC 0x00000001 211 212 struct dsp_cmd_info { 213 u32 dcCodeOp; /* Op Code of the command (usually 1st 24-bits 214 * word).*/ 215 u16 dcCmdLength; /* Command length in words of 24 bits.*/ 216 u16 dcStatusType; /* Status type: 0 for fixed length, 1 for 217 * random. */ 218 u16 dcStatusLength; /* Status length (if fixed).*/ 219 char *dcOpName; 220 }; 221 222 /* 223 Initialization and control data for the Microblaze interface 224 - OpCode: 225 the opcode field of the command set at the proper offset 226 - CmdLength 227 the number of command words 228 - StatusType 229 offset in the status registers: 0 means that the return value may be 230 different from 0, and must be read 231 - StatusLength 232 the number of status words (in addition to the return value) 233 */ 234 235 static struct dsp_cmd_info dsp_commands[] = 236 { 237 { (CMD_00_INFO_DEBUG << OPCODE_OFFSET) , 1 /*custom*/ 238 , 1 , 0 /**/ , CMD_NAME("INFO_DEBUG") }, 239 { (CMD_01_GET_SYS_CFG << OPCODE_OFFSET) , 1 /**/ 240 , 1 , 2 /**/ , CMD_NAME("GET_SYS_CFG") }, 241 { (CMD_02_SET_GRANULARITY << OPCODE_OFFSET) , 1 /**/ 242 , 1 , 0 /**/ , CMD_NAME("SET_GRANULARITY") }, 243 { (CMD_03_SET_TIMER_IRQ << OPCODE_OFFSET) , 1 /**/ 244 , 1 , 0 /**/ , CMD_NAME("SET_TIMER_IRQ") }, 245 { (CMD_04_GET_EVENT << OPCODE_OFFSET) , 1 /**/ 246 , 1 , 0 /*up to 10*/ , CMD_NAME("GET_EVENT") }, 247 { (CMD_05_GET_PIPES << OPCODE_OFFSET) , 1 /**/ 248 , 1 , 2 /*up to 4*/ , CMD_NAME("GET_PIPES") }, 249 { (CMD_06_ALLOCATE_PIPE << OPCODE_OFFSET) , 1 /**/ 250 , 0 , 0 /**/ , CMD_NAME("ALLOCATE_PIPE") }, 251 { (CMD_07_RELEASE_PIPE << OPCODE_OFFSET) , 1 /**/ 252 , 0 , 0 /**/ , CMD_NAME("RELEASE_PIPE") }, 253 { (CMD_08_ASK_BUFFERS << OPCODE_OFFSET) , 1 /**/ 254 , 1 , MAX_STREAM_BUFFER , CMD_NAME("ASK_BUFFERS") }, 255 { (CMD_09_STOP_PIPE << OPCODE_OFFSET) , 1 /**/ 256 , 0 , 0 /*up to 2*/ , CMD_NAME("STOP_PIPE") }, 257 { (CMD_0A_GET_PIPE_SPL_COUNT << OPCODE_OFFSET) , 1 /**/ 258 , 1 , 1 /*up to 2*/ , CMD_NAME("GET_PIPE_SPL_COUNT") }, 259 { (CMD_0B_TOGGLE_PIPE_STATE << OPCODE_OFFSET) , 1 /*up to 5*/ 260 , 1 , 0 /**/ , CMD_NAME("TOGGLE_PIPE_STATE") }, 261 { (CMD_0C_DEF_STREAM << OPCODE_OFFSET) , 1 /*up to 4*/ 262 , 1 , 0 /**/ , CMD_NAME("DEF_STREAM") }, 263 { (CMD_0D_SET_MUTE << OPCODE_OFFSET) , 3 /**/ 264 , 1 , 0 /**/ , CMD_NAME("SET_MUTE") }, 265 { (CMD_0E_GET_STREAM_SPL_COUNT << OPCODE_OFFSET) , 1/**/ 266 , 1 , 2 /**/ , CMD_NAME("GET_STREAM_SPL_COUNT") }, 267 { (CMD_0F_UPDATE_BUFFER << OPCODE_OFFSET) , 3 /*up to 4*/ 268 , 0 , 1 /**/ , CMD_NAME("UPDATE_BUFFER") }, 269 { (CMD_10_GET_BUFFER << OPCODE_OFFSET) , 1 /**/ 270 , 1 , 4 /**/ , CMD_NAME("GET_BUFFER") }, 271 { (CMD_11_CANCEL_BUFFER << OPCODE_OFFSET) , 1 /**/ 272 , 1 , 1 /*up to 4*/ , CMD_NAME("CANCEL_BUFFER") }, 273 { (CMD_12_GET_PEAK << OPCODE_OFFSET) , 1 /**/ 274 , 1 , 1 /**/ , CMD_NAME("GET_PEAK") }, 275 { (CMD_13_SET_STREAM_STATE << OPCODE_OFFSET) , 1 /**/ 276 , 1 , 0 /**/ , CMD_NAME("SET_STREAM_STATE") }, 277 }; 278 279 static void lx_message_init(struct lx_rmh *rmh, enum cmd_mb_opcodes cmd) 280 { 281 snd_BUG_ON(cmd >= CMD_14_INVALID); 282 283 rmh->cmd[0] = dsp_commands[cmd].dcCodeOp; 284 rmh->cmd_len = dsp_commands[cmd].dcCmdLength; 285 rmh->stat_len = dsp_commands[cmd].dcStatusLength; 286 rmh->dsp_stat = dsp_commands[cmd].dcStatusType; 287 rmh->cmd_idx = cmd; 288 memset(&rmh->cmd[1], 0, (REG_CRM_NUMBER - 1) * sizeof(u32)); 289 290 #ifdef CONFIG_SND_DEBUG 291 memset(rmh->stat, 0, REG_CRM_NUMBER * sizeof(u32)); 292 #endif 293 #ifdef RMH_DEBUG 294 rmh->cmd_idx = cmd; 295 #endif 296 } 297 298 #ifdef RMH_DEBUG 299 #define LXRMH "lx6464es rmh: " 300 static void lx_message_dump(struct lx_rmh *rmh) 301 { 302 u8 idx = rmh->cmd_idx; 303 int i; 304 305 snd_printk(LXRMH "command %s\n", dsp_commands[idx].dcOpName); 306 307 for (i = 0; i != rmh->cmd_len; ++i) 308 snd_printk(LXRMH "\tcmd[%d] %08x\n", i, rmh->cmd[i]); 309 310 for (i = 0; i != rmh->stat_len; ++i) 311 snd_printk(LXRMH "\tstat[%d]: %08x\n", i, rmh->stat[i]); 312 snd_printk("\n"); 313 } 314 #else 315 static inline void lx_message_dump(struct lx_rmh *rmh) 316 {} 317 #endif 318 319 320 321 /* sleep 500 - 100 = 400 times 100us -> the timeout is >= 40 ms */ 322 #define XILINX_TIMEOUT_MS 40 323 #define XILINX_POLL_NO_SLEEP 100 324 #define XILINX_POLL_ITERATIONS 150 325 326 327 static int lx_message_send_atomic(struct lx6464es *chip, struct lx_rmh *rmh) 328 { 329 u32 reg = ED_DSP_TIMED_OUT; 330 int dwloop; 331 332 if (lx_dsp_reg_read(chip, eReg_CSM) & (Reg_CSM_MC | Reg_CSM_MR)) { 333 snd_printk(KERN_ERR LXP "PIOSendMessage eReg_CSM %x\n", reg); 334 return -EBUSY; 335 } 336 337 /* write command */ 338 lx_dsp_reg_writebuf(chip, eReg_CRM1, rmh->cmd, rmh->cmd_len); 339 340 /* MicoBlaze gogogo */ 341 lx_dsp_reg_write(chip, eReg_CSM, Reg_CSM_MC); 342 343 /* wait for device to answer */ 344 for (dwloop = 0; dwloop != XILINX_TIMEOUT_MS * 1000; ++dwloop) { 345 if (lx_dsp_reg_read(chip, eReg_CSM) & Reg_CSM_MR) { 346 if (rmh->dsp_stat == 0) 347 reg = lx_dsp_reg_read(chip, eReg_CRM1); 348 else 349 reg = 0; 350 goto polling_successful; 351 } else 352 udelay(1); 353 } 354 snd_printk(KERN_WARNING LXP "TIMEOUT lx_message_send_atomic! " 355 "polling failed\n"); 356 357 polling_successful: 358 if ((reg & ERROR_VALUE) == 0) { 359 /* read response */ 360 if (rmh->stat_len) { 361 snd_BUG_ON(rmh->stat_len >= (REG_CRM_NUMBER-1)); 362 lx_dsp_reg_readbuf(chip, eReg_CRM2, rmh->stat, 363 rmh->stat_len); 364 } 365 } else 366 snd_printk(LXP "rmh error: %08x\n", reg); 367 368 /* clear Reg_CSM_MR */ 369 lx_dsp_reg_write(chip, eReg_CSM, 0); 370 371 switch (reg) { 372 case ED_DSP_TIMED_OUT: 373 snd_printk(KERN_WARNING LXP "lx_message_send: dsp timeout\n"); 374 return -ETIMEDOUT; 375 376 case ED_DSP_CRASHED: 377 snd_printk(KERN_WARNING LXP "lx_message_send: dsp crashed\n"); 378 return -EAGAIN; 379 } 380 381 lx_message_dump(rmh); 382 383 return reg; 384 } 385 386 387 /* low-level dsp access */ 388 int lx_dsp_get_version(struct lx6464es *chip, u32 *rdsp_version) 389 { 390 u16 ret; 391 unsigned long flags; 392 393 spin_lock_irqsave(&chip->msg_lock, flags); 394 395 lx_message_init(&chip->rmh, CMD_01_GET_SYS_CFG); 396 ret = lx_message_send_atomic(chip, &chip->rmh); 397 398 *rdsp_version = chip->rmh.stat[1]; 399 spin_unlock_irqrestore(&chip->msg_lock, flags); 400 return ret; 401 } 402 403 int lx_dsp_get_clock_frequency(struct lx6464es *chip, u32 *rfreq) 404 { 405 u16 ret = 0; 406 unsigned long flags; 407 u32 freq_raw = 0; 408 u32 freq = 0; 409 u32 frequency = 0; 410 411 spin_lock_irqsave(&chip->msg_lock, flags); 412 413 lx_message_init(&chip->rmh, CMD_01_GET_SYS_CFG); 414 ret = lx_message_send_atomic(chip, &chip->rmh); 415 416 if (ret == 0) { 417 freq_raw = chip->rmh.stat[0] >> FREQ_FIELD_OFFSET; 418 freq = freq_raw & XES_FREQ_COUNT8_MASK; 419 420 if ((freq < XES_FREQ_COUNT8_48_MAX) || 421 (freq > XES_FREQ_COUNT8_44_MIN)) 422 frequency = 0; /* unknown */ 423 else if (freq >= XES_FREQ_COUNT8_44_MAX) 424 frequency = 44100; 425 else 426 frequency = 48000; 427 } 428 429 spin_unlock_irqrestore(&chip->msg_lock, flags); 430 431 *rfreq = frequency * chip->freq_ratio; 432 433 return ret; 434 } 435 436 int lx_dsp_get_mac(struct lx6464es *chip) 437 { 438 u32 macmsb, maclsb; 439 440 macmsb = lx_dsp_reg_read(chip, eReg_ADMACESMSB) & 0x00FFFFFF; 441 maclsb = lx_dsp_reg_read(chip, eReg_ADMACESLSB) & 0x00FFFFFF; 442 443 /* todo: endianess handling */ 444 chip->mac_address[5] = ((u8 *)(&maclsb))[0]; 445 chip->mac_address[4] = ((u8 *)(&maclsb))[1]; 446 chip->mac_address[3] = ((u8 *)(&maclsb))[2]; 447 chip->mac_address[2] = ((u8 *)(&macmsb))[0]; 448 chip->mac_address[1] = ((u8 *)(&macmsb))[1]; 449 chip->mac_address[0] = ((u8 *)(&macmsb))[2]; 450 451 return 0; 452 } 453 454 455 int lx_dsp_set_granularity(struct lx6464es *chip, u32 gran) 456 { 457 unsigned long flags; 458 int ret; 459 460 spin_lock_irqsave(&chip->msg_lock, flags); 461 462 lx_message_init(&chip->rmh, CMD_02_SET_GRANULARITY); 463 chip->rmh.cmd[0] |= gran; 464 465 ret = lx_message_send_atomic(chip, &chip->rmh); 466 spin_unlock_irqrestore(&chip->msg_lock, flags); 467 return ret; 468 } 469 470 int lx_dsp_read_async_events(struct lx6464es *chip, u32 *data) 471 { 472 unsigned long flags; 473 int ret; 474 475 spin_lock_irqsave(&chip->msg_lock, flags); 476 477 lx_message_init(&chip->rmh, CMD_04_GET_EVENT); 478 chip->rmh.stat_len = 9; /* we don't necessarily need the full length */ 479 480 ret = lx_message_send_atomic(chip, &chip->rmh); 481 482 if (!ret) 483 memcpy(data, chip->rmh.stat, chip->rmh.stat_len * sizeof(u32)); 484 485 spin_unlock_irqrestore(&chip->msg_lock, flags); 486 return ret; 487 } 488 489 #define CSES_TIMEOUT 100 /* microseconds */ 490 #define CSES_CE 0x0001 491 #define CSES_BROADCAST 0x0002 492 #define CSES_UPDATE_LDSV 0x0004 493 494 int lx_dsp_es_check_pipeline(struct lx6464es *chip) 495 { 496 int i; 497 498 for (i = 0; i != CSES_TIMEOUT; ++i) { 499 /* 500 * le bit CSES_UPDATE_LDSV est à 1 dés que le macprog 501 * est pret. il re-passe à 0 lorsque le premier read a 502 * été fait. pour l'instant on retire le test car ce bit 503 * passe a 1 environ 200 à 400 ms aprés que le registre 504 * confES à été écrit (kick du xilinx ES). 505 * 506 * On ne teste que le bit CE. 507 * */ 508 509 u32 cses = lx_dsp_reg_read(chip, eReg_CSES); 510 511 if ((cses & CSES_CE) == 0) 512 return 0; 513 514 udelay(1); 515 } 516 517 return -ETIMEDOUT; 518 } 519 520 521 #define PIPE_INFO_TO_CMD(capture, pipe) \ 522 ((u32)((u32)(pipe) | ((capture) ? ID_IS_CAPTURE : 0L)) << ID_OFFSET) 523 524 525 526 /* low-level pipe handling */ 527 int lx_pipe_allocate(struct lx6464es *chip, u32 pipe, int is_capture, 528 int channels) 529 { 530 int err; 531 unsigned long flags; 532 533 u32 pipe_cmd = PIPE_INFO_TO_CMD(is_capture, pipe); 534 535 spin_lock_irqsave(&chip->msg_lock, flags); 536 lx_message_init(&chip->rmh, CMD_06_ALLOCATE_PIPE); 537 538 chip->rmh.cmd[0] |= pipe_cmd; 539 chip->rmh.cmd[0] |= channels; 540 541 err = lx_message_send_atomic(chip, &chip->rmh); 542 spin_unlock_irqrestore(&chip->msg_lock, flags); 543 544 if (err != 0) 545 snd_printk(KERN_ERR "lx6464es: could not allocate pipe\n"); 546 547 return err; 548 } 549 550 int lx_pipe_release(struct lx6464es *chip, u32 pipe, int is_capture) 551 { 552 int err; 553 unsigned long flags; 554 555 u32 pipe_cmd = PIPE_INFO_TO_CMD(is_capture, pipe); 556 557 spin_lock_irqsave(&chip->msg_lock, flags); 558 lx_message_init(&chip->rmh, CMD_07_RELEASE_PIPE); 559 560 chip->rmh.cmd[0] |= pipe_cmd; 561 562 err = lx_message_send_atomic(chip, &chip->rmh); 563 spin_unlock_irqrestore(&chip->msg_lock, flags); 564 565 return err; 566 } 567 568 int lx_buffer_ask(struct lx6464es *chip, u32 pipe, int is_capture, 569 u32 *r_needed, u32 *r_freed, u32 *size_array) 570 { 571 int err; 572 unsigned long flags; 573 574 u32 pipe_cmd = PIPE_INFO_TO_CMD(is_capture, pipe); 575 576 #ifdef CONFIG_SND_DEBUG 577 if (size_array) 578 memset(size_array, 0, sizeof(u32)*MAX_STREAM_BUFFER); 579 #endif 580 581 *r_needed = 0; 582 *r_freed = 0; 583 584 spin_lock_irqsave(&chip->msg_lock, flags); 585 lx_message_init(&chip->rmh, CMD_08_ASK_BUFFERS); 586 587 chip->rmh.cmd[0] |= pipe_cmd; 588 589 err = lx_message_send_atomic(chip, &chip->rmh); 590 591 if (!err) { 592 int i; 593 for (i = 0; i < MAX_STREAM_BUFFER; ++i) { 594 u32 stat = chip->rmh.stat[i]; 595 if (stat & (BF_EOB << BUFF_FLAGS_OFFSET)) { 596 /* finished */ 597 *r_freed += 1; 598 if (size_array) 599 size_array[i] = stat & MASK_DATA_SIZE; 600 } else if ((stat & (BF_VALID << BUFF_FLAGS_OFFSET)) 601 == 0) 602 /* free */ 603 *r_needed += 1; 604 } 605 606 #if 0 607 snd_printdd(LXP "CMD_08_ASK_BUFFERS: needed %d, freed %d\n", 608 *r_needed, *r_freed); 609 for (i = 0; i < MAX_STREAM_BUFFER; ++i) { 610 for (i = 0; i != chip->rmh.stat_len; ++i) 611 snd_printdd(" stat[%d]: %x, %x\n", i, 612 chip->rmh.stat[i], 613 chip->rmh.stat[i] & MASK_DATA_SIZE); 614 } 615 #endif 616 } 617 618 spin_unlock_irqrestore(&chip->msg_lock, flags); 619 return err; 620 } 621 622 623 int lx_pipe_stop(struct lx6464es *chip, u32 pipe, int is_capture) 624 { 625 int err; 626 unsigned long flags; 627 628 u32 pipe_cmd = PIPE_INFO_TO_CMD(is_capture, pipe); 629 630 spin_lock_irqsave(&chip->msg_lock, flags); 631 lx_message_init(&chip->rmh, CMD_09_STOP_PIPE); 632 633 chip->rmh.cmd[0] |= pipe_cmd; 634 635 err = lx_message_send_atomic(chip, &chip->rmh); 636 637 spin_unlock_irqrestore(&chip->msg_lock, flags); 638 return err; 639 } 640 641 static int lx_pipe_toggle_state(struct lx6464es *chip, u32 pipe, int is_capture) 642 { 643 int err; 644 unsigned long flags; 645 646 u32 pipe_cmd = PIPE_INFO_TO_CMD(is_capture, pipe); 647 648 spin_lock_irqsave(&chip->msg_lock, flags); 649 lx_message_init(&chip->rmh, CMD_0B_TOGGLE_PIPE_STATE); 650 651 chip->rmh.cmd[0] |= pipe_cmd; 652 653 err = lx_message_send_atomic(chip, &chip->rmh); 654 655 spin_unlock_irqrestore(&chip->msg_lock, flags); 656 return err; 657 } 658 659 660 int lx_pipe_start(struct lx6464es *chip, u32 pipe, int is_capture) 661 { 662 int err; 663 664 err = lx_pipe_wait_for_idle(chip, pipe, is_capture); 665 if (err < 0) 666 return err; 667 668 err = lx_pipe_toggle_state(chip, pipe, is_capture); 669 670 return err; 671 } 672 673 int lx_pipe_pause(struct lx6464es *chip, u32 pipe, int is_capture) 674 { 675 int err = 0; 676 677 err = lx_pipe_wait_for_start(chip, pipe, is_capture); 678 if (err < 0) 679 return err; 680 681 err = lx_pipe_toggle_state(chip, pipe, is_capture); 682 683 return err; 684 } 685 686 687 int lx_pipe_sample_count(struct lx6464es *chip, u32 pipe, int is_capture, 688 u64 *rsample_count) 689 { 690 int err; 691 unsigned long flags; 692 693 u32 pipe_cmd = PIPE_INFO_TO_CMD(is_capture, pipe); 694 695 spin_lock_irqsave(&chip->msg_lock, flags); 696 lx_message_init(&chip->rmh, CMD_0A_GET_PIPE_SPL_COUNT); 697 698 chip->rmh.cmd[0] |= pipe_cmd; 699 chip->rmh.stat_len = 2; /* need all words here! */ 700 701 err = lx_message_send_atomic(chip, &chip->rmh); /* don't sleep! */ 702 703 if (err != 0) 704 snd_printk(KERN_ERR 705 "lx6464es: could not query pipe's sample count\n"); 706 else { 707 *rsample_count = ((u64)(chip->rmh.stat[0] & MASK_SPL_COUNT_HI) 708 << 24) /* hi part */ 709 + chip->rmh.stat[1]; /* lo part */ 710 } 711 712 spin_unlock_irqrestore(&chip->msg_lock, flags); 713 return err; 714 } 715 716 int lx_pipe_state(struct lx6464es *chip, u32 pipe, int is_capture, u16 *rstate) 717 { 718 int err; 719 unsigned long flags; 720 721 u32 pipe_cmd = PIPE_INFO_TO_CMD(is_capture, pipe); 722 723 spin_lock_irqsave(&chip->msg_lock, flags); 724 lx_message_init(&chip->rmh, CMD_0A_GET_PIPE_SPL_COUNT); 725 726 chip->rmh.cmd[0] |= pipe_cmd; 727 728 err = lx_message_send_atomic(chip, &chip->rmh); 729 730 if (err != 0) 731 snd_printk(KERN_ERR "lx6464es: could not query pipe's state\n"); 732 else 733 *rstate = (chip->rmh.stat[0] >> PSTATE_OFFSET) & 0x0F; 734 735 spin_unlock_irqrestore(&chip->msg_lock, flags); 736 return err; 737 } 738 739 static int lx_pipe_wait_for_state(struct lx6464es *chip, u32 pipe, 740 int is_capture, u16 state) 741 { 742 int i; 743 744 /* max 2*PCMOnlyGranularity = 2*1024 at 44100 = < 50 ms: 745 * timeout 50 ms */ 746 for (i = 0; i != 50; ++i) { 747 u16 current_state; 748 int err = lx_pipe_state(chip, pipe, is_capture, ¤t_state); 749 750 if (err < 0) 751 return err; 752 753 if (current_state == state) 754 return 0; 755 756 mdelay(1); 757 } 758 759 return -ETIMEDOUT; 760 } 761 762 int lx_pipe_wait_for_start(struct lx6464es *chip, u32 pipe, int is_capture) 763 { 764 return lx_pipe_wait_for_state(chip, pipe, is_capture, PSTATE_RUN); 765 } 766 767 int lx_pipe_wait_for_idle(struct lx6464es *chip, u32 pipe, int is_capture) 768 { 769 return lx_pipe_wait_for_state(chip, pipe, is_capture, PSTATE_IDLE); 770 } 771 772 /* low-level stream handling */ 773 int lx_stream_set_state(struct lx6464es *chip, u32 pipe, 774 int is_capture, enum stream_state_t state) 775 { 776 int err; 777 unsigned long flags; 778 779 u32 pipe_cmd = PIPE_INFO_TO_CMD(is_capture, pipe); 780 781 spin_lock_irqsave(&chip->msg_lock, flags); 782 lx_message_init(&chip->rmh, CMD_13_SET_STREAM_STATE); 783 784 chip->rmh.cmd[0] |= pipe_cmd; 785 chip->rmh.cmd[0] |= state; 786 787 err = lx_message_send_atomic(chip, &chip->rmh); 788 spin_unlock_irqrestore(&chip->msg_lock, flags); 789 790 return err; 791 } 792 793 int lx_stream_set_format(struct lx6464es *chip, struct snd_pcm_runtime *runtime, 794 u32 pipe, int is_capture) 795 { 796 int err; 797 unsigned long flags; 798 799 u32 pipe_cmd = PIPE_INFO_TO_CMD(is_capture, pipe); 800 801 u32 channels = runtime->channels; 802 803 if (runtime->channels != channels) 804 snd_printk(KERN_ERR LXP "channel count mismatch: %d vs %d", 805 runtime->channels, channels); 806 807 spin_lock_irqsave(&chip->msg_lock, flags); 808 lx_message_init(&chip->rmh, CMD_0C_DEF_STREAM); 809 810 chip->rmh.cmd[0] |= pipe_cmd; 811 812 if (runtime->sample_bits == 16) 813 /* 16 bit format */ 814 chip->rmh.cmd[0] |= (STREAM_FMT_16b << STREAM_FMT_OFFSET); 815 816 if (snd_pcm_format_little_endian(runtime->format)) 817 /* little endian/intel format */ 818 chip->rmh.cmd[0] |= (STREAM_FMT_intel << STREAM_FMT_OFFSET); 819 820 chip->rmh.cmd[0] |= channels-1; 821 822 err = lx_message_send_atomic(chip, &chip->rmh); 823 spin_unlock_irqrestore(&chip->msg_lock, flags); 824 825 return err; 826 } 827 828 int lx_stream_state(struct lx6464es *chip, u32 pipe, int is_capture, 829 int *rstate) 830 { 831 int err; 832 unsigned long flags; 833 834 u32 pipe_cmd = PIPE_INFO_TO_CMD(is_capture, pipe); 835 836 spin_lock_irqsave(&chip->msg_lock, flags); 837 lx_message_init(&chip->rmh, CMD_0E_GET_STREAM_SPL_COUNT); 838 839 chip->rmh.cmd[0] |= pipe_cmd; 840 841 err = lx_message_send_atomic(chip, &chip->rmh); 842 843 *rstate = (chip->rmh.stat[0] & SF_START) ? START_STATE : PAUSE_STATE; 844 845 spin_unlock_irqrestore(&chip->msg_lock, flags); 846 return err; 847 } 848 849 int lx_stream_sample_position(struct lx6464es *chip, u32 pipe, int is_capture, 850 u64 *r_bytepos) 851 { 852 int err; 853 unsigned long flags; 854 855 u32 pipe_cmd = PIPE_INFO_TO_CMD(is_capture, pipe); 856 857 spin_lock_irqsave(&chip->msg_lock, flags); 858 lx_message_init(&chip->rmh, CMD_0E_GET_STREAM_SPL_COUNT); 859 860 chip->rmh.cmd[0] |= pipe_cmd; 861 862 err = lx_message_send_atomic(chip, &chip->rmh); 863 864 *r_bytepos = ((u64) (chip->rmh.stat[0] & MASK_SPL_COUNT_HI) 865 << 32) /* hi part */ 866 + chip->rmh.stat[1]; /* lo part */ 867 868 spin_unlock_irqrestore(&chip->msg_lock, flags); 869 return err; 870 } 871 872 /* low-level buffer handling */ 873 int lx_buffer_give(struct lx6464es *chip, u32 pipe, int is_capture, 874 u32 buffer_size, u32 buf_address_lo, u32 buf_address_hi, 875 u32 *r_buffer_index) 876 { 877 int err; 878 unsigned long flags; 879 880 u32 pipe_cmd = PIPE_INFO_TO_CMD(is_capture, pipe); 881 882 spin_lock_irqsave(&chip->msg_lock, flags); 883 lx_message_init(&chip->rmh, CMD_0F_UPDATE_BUFFER); 884 885 chip->rmh.cmd[0] |= pipe_cmd; 886 chip->rmh.cmd[0] |= BF_NOTIFY_EOB; /* request interrupt notification */ 887 888 /* todo: pause request, circular buffer */ 889 890 chip->rmh.cmd[1] = buffer_size & MASK_DATA_SIZE; 891 chip->rmh.cmd[2] = buf_address_lo; 892 893 if (buf_address_hi) { 894 chip->rmh.cmd_len = 4; 895 chip->rmh.cmd[3] = buf_address_hi; 896 chip->rmh.cmd[0] |= BF_64BITS_ADR; 897 } 898 899 err = lx_message_send_atomic(chip, &chip->rmh); 900 901 if (err == 0) { 902 *r_buffer_index = chip->rmh.stat[0]; 903 goto done; 904 } 905 906 if (err == EB_RBUFFERS_TABLE_OVERFLOW) 907 snd_printk(LXP "lx_buffer_give EB_RBUFFERS_TABLE_OVERFLOW\n"); 908 909 if (err == EB_INVALID_STREAM) 910 snd_printk(LXP "lx_buffer_give EB_INVALID_STREAM\n"); 911 912 if (err == EB_CMD_REFUSED) 913 snd_printk(LXP "lx_buffer_give EB_CMD_REFUSED\n"); 914 915 done: 916 spin_unlock_irqrestore(&chip->msg_lock, flags); 917 return err; 918 } 919 920 int lx_buffer_free(struct lx6464es *chip, u32 pipe, int is_capture, 921 u32 *r_buffer_size) 922 { 923 int err; 924 unsigned long flags; 925 926 u32 pipe_cmd = PIPE_INFO_TO_CMD(is_capture, pipe); 927 928 spin_lock_irqsave(&chip->msg_lock, flags); 929 lx_message_init(&chip->rmh, CMD_11_CANCEL_BUFFER); 930 931 chip->rmh.cmd[0] |= pipe_cmd; 932 chip->rmh.cmd[0] |= MASK_BUFFER_ID; /* ask for the current buffer: the 933 * microblaze will seek for it */ 934 935 err = lx_message_send_atomic(chip, &chip->rmh); 936 937 if (err == 0) 938 *r_buffer_size = chip->rmh.stat[0] & MASK_DATA_SIZE; 939 940 spin_unlock_irqrestore(&chip->msg_lock, flags); 941 return err; 942 } 943 944 int lx_buffer_cancel(struct lx6464es *chip, u32 pipe, int is_capture, 945 u32 buffer_index) 946 { 947 int err; 948 unsigned long flags; 949 950 u32 pipe_cmd = PIPE_INFO_TO_CMD(is_capture, pipe); 951 952 spin_lock_irqsave(&chip->msg_lock, flags); 953 lx_message_init(&chip->rmh, CMD_11_CANCEL_BUFFER); 954 955 chip->rmh.cmd[0] |= pipe_cmd; 956 chip->rmh.cmd[0] |= buffer_index; 957 958 err = lx_message_send_atomic(chip, &chip->rmh); 959 960 spin_unlock_irqrestore(&chip->msg_lock, flags); 961 return err; 962 } 963 964 965 /* low-level gain/peak handling 966 * 967 * \todo: can we unmute capture/playback channels independently? 968 * 969 * */ 970 int lx_level_unmute(struct lx6464es *chip, int is_capture, int unmute) 971 { 972 int err; 973 unsigned long flags; 974 975 /* bit set to 1: channel muted */ 976 u64 mute_mask = unmute ? 0 : 0xFFFFFFFFFFFFFFFFLLU; 977 978 spin_lock_irqsave(&chip->msg_lock, flags); 979 lx_message_init(&chip->rmh, CMD_0D_SET_MUTE); 980 981 chip->rmh.cmd[0] |= PIPE_INFO_TO_CMD(is_capture, 0); 982 983 chip->rmh.cmd[1] = (u32)(mute_mask >> (u64)32); /* hi part */ 984 chip->rmh.cmd[2] = (u32)(mute_mask & (u64)0xFFFFFFFF); /* lo part */ 985 986 snd_printk("mute %x %x %x\n", chip->rmh.cmd[0], chip->rmh.cmd[1], 987 chip->rmh.cmd[2]); 988 989 err = lx_message_send_atomic(chip, &chip->rmh); 990 991 spin_unlock_irqrestore(&chip->msg_lock, flags); 992 return err; 993 } 994 995 static u32 peak_map[] = { 996 0x00000109, /* -90.308dB */ 997 0x0000083B, /* -72.247dB */ 998 0x000020C4, /* -60.205dB */ 999 0x00008273, /* -48.030dB */ 1000 0x00020756, /* -36.005dB */ 1001 0x00040C37, /* -30.001dB */ 1002 0x00081385, /* -24.002dB */ 1003 0x00101D3F, /* -18.000dB */ 1004 0x0016C310, /* -15.000dB */ 1005 0x002026F2, /* -12.001dB */ 1006 0x002D6A86, /* -9.000dB */ 1007 0x004026E6, /* -6.004dB */ 1008 0x005A9DF6, /* -3.000dB */ 1009 0x0065AC8B, /* -2.000dB */ 1010 0x00721481, /* -1.000dB */ 1011 0x007FFFFF, /* FS */ 1012 }; 1013 1014 int lx_level_peaks(struct lx6464es *chip, int is_capture, int channels, 1015 u32 *r_levels) 1016 { 1017 int err = 0; 1018 unsigned long flags; 1019 int i; 1020 spin_lock_irqsave(&chip->msg_lock, flags); 1021 1022 for (i = 0; i < channels; i += 4) { 1023 u32 s0, s1, s2, s3; 1024 1025 lx_message_init(&chip->rmh, CMD_12_GET_PEAK); 1026 chip->rmh.cmd[0] |= PIPE_INFO_TO_CMD(is_capture, i); 1027 1028 err = lx_message_send_atomic(chip, &chip->rmh); 1029 1030 if (err == 0) { 1031 s0 = peak_map[chip->rmh.stat[0] & 0x0F]; 1032 s1 = peak_map[(chip->rmh.stat[0] >> 4) & 0xf]; 1033 s2 = peak_map[(chip->rmh.stat[0] >> 8) & 0xf]; 1034 s3 = peak_map[(chip->rmh.stat[0] >> 12) & 0xf]; 1035 } else 1036 s0 = s1 = s2 = s3 = 0; 1037 1038 r_levels[0] = s0; 1039 r_levels[1] = s1; 1040 r_levels[2] = s2; 1041 r_levels[3] = s3; 1042 1043 r_levels += 4; 1044 } 1045 1046 spin_unlock_irqrestore(&chip->msg_lock, flags); 1047 return err; 1048 } 1049 1050 /* interrupt handling */ 1051 #define PCX_IRQ_NONE 0 1052 #define IRQCS_ACTIVE_PCIDB 0x00002000L /* Bit nø 13 */ 1053 #define IRQCS_ENABLE_PCIIRQ 0x00000100L /* Bit nø 08 */ 1054 #define IRQCS_ENABLE_PCIDB 0x00000200L /* Bit nø 09 */ 1055 1056 static u32 lx_interrupt_test_ack(struct lx6464es *chip) 1057 { 1058 u32 irqcs = lx_plx_reg_read(chip, ePLX_IRQCS); 1059 1060 /* Test if PCI Doorbell interrupt is active */ 1061 if (irqcs & IRQCS_ACTIVE_PCIDB) { 1062 u32 temp; 1063 irqcs = PCX_IRQ_NONE; 1064 1065 while ((temp = lx_plx_reg_read(chip, ePLX_L2PCIDB))) { 1066 /* RAZ interrupt */ 1067 irqcs |= temp; 1068 lx_plx_reg_write(chip, ePLX_L2PCIDB, temp); 1069 } 1070 1071 return irqcs; 1072 } 1073 return PCX_IRQ_NONE; 1074 } 1075 1076 static int lx_interrupt_ack(struct lx6464es *chip, u32 *r_irqsrc, 1077 int *r_async_pending, int *r_async_escmd) 1078 { 1079 u32 irq_async; 1080 u32 irqsrc = lx_interrupt_test_ack(chip); 1081 1082 if (irqsrc == PCX_IRQ_NONE) 1083 return 0; 1084 1085 *r_irqsrc = irqsrc; 1086 1087 irq_async = irqsrc & MASK_SYS_ASYNC_EVENTS; /* + EtherSound response 1088 * (set by xilinx) + EOB */ 1089 1090 if (irq_async & MASK_SYS_STATUS_ESA) { 1091 irq_async &= ~MASK_SYS_STATUS_ESA; 1092 *r_async_escmd = 1; 1093 } 1094 1095 if (irq_async) { 1096 /* snd_printd("interrupt: async event pending\n"); */ 1097 *r_async_pending = 1; 1098 } 1099 1100 return 1; 1101 } 1102 1103 static int lx_interrupt_handle_async_events(struct lx6464es *chip, u32 irqsrc, 1104 int *r_freq_changed, 1105 u64 *r_notified_in_pipe_mask, 1106 u64 *r_notified_out_pipe_mask) 1107 { 1108 int err; 1109 u32 stat[9]; /* answer from CMD_04_GET_EVENT */ 1110 1111 /* On peut optimiser pour ne pas lire les evenements vides 1112 * les mots de réponse sont dans l'ordre suivant : 1113 * Stat[0] mot de status général 1114 * Stat[1] fin de buffer OUT pF 1115 * Stat[2] fin de buffer OUT pf 1116 * Stat[3] fin de buffer IN pF 1117 * Stat[4] fin de buffer IN pf 1118 * Stat[5] underrun poid fort 1119 * Stat[6] underrun poid faible 1120 * Stat[7] overrun poid fort 1121 * Stat[8] overrun poid faible 1122 * */ 1123 1124 u64 orun_mask; 1125 u64 urun_mask; 1126 #if 0 1127 int has_underrun = (irqsrc & MASK_SYS_STATUS_URUN) ? 1 : 0; 1128 int has_overrun = (irqsrc & MASK_SYS_STATUS_ORUN) ? 1 : 0; 1129 #endif 1130 int eb_pending_out = (irqsrc & MASK_SYS_STATUS_EOBO) ? 1 : 0; 1131 int eb_pending_in = (irqsrc & MASK_SYS_STATUS_EOBI) ? 1 : 0; 1132 1133 *r_freq_changed = (irqsrc & MASK_SYS_STATUS_FREQ) ? 1 : 0; 1134 1135 err = lx_dsp_read_async_events(chip, stat); 1136 if (err < 0) 1137 return err; 1138 1139 if (eb_pending_in) { 1140 *r_notified_in_pipe_mask = ((u64)stat[3] << 32) 1141 + stat[4]; 1142 snd_printdd(LXP "interrupt: EOBI pending %llx\n", 1143 *r_notified_in_pipe_mask); 1144 } 1145 if (eb_pending_out) { 1146 *r_notified_out_pipe_mask = ((u64)stat[1] << 32) 1147 + stat[2]; 1148 snd_printdd(LXP "interrupt: EOBO pending %llx\n", 1149 *r_notified_out_pipe_mask); 1150 } 1151 1152 orun_mask = ((u64)stat[7] << 32) + stat[8]; 1153 urun_mask = ((u64)stat[5] << 32) + stat[6]; 1154 1155 /* todo: handle xrun notification */ 1156 1157 return err; 1158 } 1159 1160 static int lx_interrupt_request_new_buffer(struct lx6464es *chip, 1161 struct lx_stream *lx_stream) 1162 { 1163 struct snd_pcm_substream *substream = lx_stream->stream; 1164 const unsigned int is_capture = lx_stream->is_capture; 1165 int err; 1166 unsigned long flags; 1167 1168 const u32 channels = substream->runtime->channels; 1169 const u32 bytes_per_frame = channels * 3; 1170 const u32 period_size = substream->runtime->period_size; 1171 const u32 period_bytes = period_size * bytes_per_frame; 1172 const u32 pos = lx_stream->frame_pos; 1173 const u32 next_pos = ((pos+1) == substream->runtime->periods) ? 1174 0 : pos + 1; 1175 1176 dma_addr_t buf = substream->dma_buffer.addr + pos * period_bytes; 1177 u32 buf_hi = 0; 1178 u32 buf_lo = 0; 1179 u32 buffer_index = 0; 1180 1181 u32 needed, freed; 1182 u32 size_array[MAX_STREAM_BUFFER]; 1183 1184 snd_printdd("->lx_interrupt_request_new_buffer\n"); 1185 1186 spin_lock_irqsave(&chip->lock, flags); 1187 1188 err = lx_buffer_ask(chip, 0, is_capture, &needed, &freed, size_array); 1189 snd_printdd(LXP "interrupt: needed %d, freed %d\n", needed, freed); 1190 1191 unpack_pointer(buf, &buf_lo, &buf_hi); 1192 err = lx_buffer_give(chip, 0, is_capture, period_bytes, buf_lo, buf_hi, 1193 &buffer_index); 1194 snd_printdd(LXP "interrupt: gave buffer index %x on 0x%lx (%d bytes)\n", 1195 buffer_index, (unsigned long)buf, period_bytes); 1196 1197 lx_stream->frame_pos = next_pos; 1198 spin_unlock_irqrestore(&chip->lock, flags); 1199 1200 return err; 1201 } 1202 1203 void lx_tasklet_playback(unsigned long data) 1204 { 1205 struct lx6464es *chip = (struct lx6464es *)data; 1206 struct lx_stream *lx_stream = &chip->playback_stream; 1207 int err; 1208 1209 snd_printdd("->lx_tasklet_playback\n"); 1210 1211 err = lx_interrupt_request_new_buffer(chip, lx_stream); 1212 if (err < 0) 1213 snd_printk(KERN_ERR LXP 1214 "cannot request new buffer for playback\n"); 1215 1216 snd_pcm_period_elapsed(lx_stream->stream); 1217 } 1218 1219 void lx_tasklet_capture(unsigned long data) 1220 { 1221 struct lx6464es *chip = (struct lx6464es *)data; 1222 struct lx_stream *lx_stream = &chip->capture_stream; 1223 int err; 1224 1225 snd_printdd("->lx_tasklet_capture\n"); 1226 err = lx_interrupt_request_new_buffer(chip, lx_stream); 1227 if (err < 0) 1228 snd_printk(KERN_ERR LXP 1229 "cannot request new buffer for capture\n"); 1230 1231 snd_pcm_period_elapsed(lx_stream->stream); 1232 } 1233 1234 1235 1236 static int lx_interrupt_handle_audio_transfer(struct lx6464es *chip, 1237 u64 notified_in_pipe_mask, 1238 u64 notified_out_pipe_mask) 1239 { 1240 int err = 0; 1241 1242 if (notified_in_pipe_mask) { 1243 snd_printdd(LXP "requesting audio transfer for capture\n"); 1244 tasklet_hi_schedule(&chip->tasklet_capture); 1245 } 1246 1247 if (notified_out_pipe_mask) { 1248 snd_printdd(LXP "requesting audio transfer for playback\n"); 1249 tasklet_hi_schedule(&chip->tasklet_playback); 1250 } 1251 1252 return err; 1253 } 1254 1255 1256 irqreturn_t lx_interrupt(int irq, void *dev_id) 1257 { 1258 struct lx6464es *chip = dev_id; 1259 int async_pending, async_escmd; 1260 u32 irqsrc; 1261 1262 spin_lock(&chip->lock); 1263 1264 snd_printdd("**************************************************\n"); 1265 1266 if (!lx_interrupt_ack(chip, &irqsrc, &async_pending, &async_escmd)) { 1267 spin_unlock(&chip->lock); 1268 snd_printdd("IRQ_NONE\n"); 1269 return IRQ_NONE; /* this device did not cause the interrupt */ 1270 } 1271 1272 if (irqsrc & MASK_SYS_STATUS_CMD_DONE) 1273 goto exit; 1274 1275 #if 0 1276 if (irqsrc & MASK_SYS_STATUS_EOBI) 1277 snd_printdd(LXP "interrupt: EOBI\n"); 1278 1279 if (irqsrc & MASK_SYS_STATUS_EOBO) 1280 snd_printdd(LXP "interrupt: EOBO\n"); 1281 1282 if (irqsrc & MASK_SYS_STATUS_URUN) 1283 snd_printdd(LXP "interrupt: URUN\n"); 1284 1285 if (irqsrc & MASK_SYS_STATUS_ORUN) 1286 snd_printdd(LXP "interrupt: ORUN\n"); 1287 #endif 1288 1289 if (async_pending) { 1290 u64 notified_in_pipe_mask = 0; 1291 u64 notified_out_pipe_mask = 0; 1292 int freq_changed; 1293 int err; 1294 1295 /* handle async events */ 1296 err = lx_interrupt_handle_async_events(chip, irqsrc, 1297 &freq_changed, 1298 ¬ified_in_pipe_mask, 1299 ¬ified_out_pipe_mask); 1300 if (err) 1301 snd_printk(KERN_ERR LXP 1302 "error handling async events\n"); 1303 1304 err = lx_interrupt_handle_audio_transfer(chip, 1305 notified_in_pipe_mask, 1306 notified_out_pipe_mask 1307 ); 1308 if (err) 1309 snd_printk(KERN_ERR LXP 1310 "error during audio transfer\n"); 1311 } 1312 1313 if (async_escmd) { 1314 #if 0 1315 /* backdoor for ethersound commands 1316 * 1317 * for now, we do not need this 1318 * 1319 * */ 1320 1321 snd_printdd("lx6464es: interrupt requests escmd handling\n"); 1322 #endif 1323 } 1324 1325 exit: 1326 spin_unlock(&chip->lock); 1327 return IRQ_HANDLED; /* this device caused the interrupt */ 1328 } 1329 1330 1331 static void lx_irq_set(struct lx6464es *chip, int enable) 1332 { 1333 u32 reg = lx_plx_reg_read(chip, ePLX_IRQCS); 1334 1335 /* enable/disable interrupts 1336 * 1337 * Set the Doorbell and PCI interrupt enable bits 1338 * 1339 * */ 1340 if (enable) 1341 reg |= (IRQCS_ENABLE_PCIIRQ | IRQCS_ENABLE_PCIDB); 1342 else 1343 reg &= ~(IRQCS_ENABLE_PCIIRQ | IRQCS_ENABLE_PCIDB); 1344 lx_plx_reg_write(chip, ePLX_IRQCS, reg); 1345 } 1346 1347 void lx_irq_enable(struct lx6464es *chip) 1348 { 1349 snd_printdd("->lx_irq_enable\n"); 1350 lx_irq_set(chip, 1); 1351 } 1352 1353 void lx_irq_disable(struct lx6464es *chip) 1354 { 1355 snd_printdd("->lx_irq_disable\n"); 1356 lx_irq_set(chip, 0); 1357 } 1358