1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /* -*- linux-c -*- *
3 *
4 * ALSA driver for the digigram lx6464es interface
5 * low-level interface
6 *
7 * Copyright (c) 2009 Tim Blechmann <tim@klingt.org>
8 */
9
10 /* #define RMH_DEBUG 1 */
11
12 #include <linux/bitops.h>
13 #include <linux/module.h>
14 #include <linux/pci.h>
15 #include <linux/delay.h>
16
17 #include "lx6464es.h"
18 #include "lx_core.h"
19
20 /* low-level register access */
21
22 static const unsigned long dsp_port_offsets[] = {
23 0,
24 0x400,
25 0x401,
26 0x402,
27 0x403,
28 0x404,
29 0x405,
30 0x406,
31 0x407,
32 0x408,
33 0x409,
34 0x40a,
35 0x40b,
36 0x40c,
37
38 0x410,
39 0x411,
40 0x412,
41 0x413,
42 0x414,
43 0x415,
44 0x416,
45
46 0x420,
47 0x430,
48 0x431,
49 0x432,
50 0x433,
51 0x434,
52 0x440
53 };
54
lx_dsp_register(struct lx6464es * chip,int port)55 static void __iomem *lx_dsp_register(struct lx6464es *chip, int port)
56 {
57 void __iomem *base_address = chip->port_dsp_bar;
58 return base_address + dsp_port_offsets[port]*4;
59 }
60
lx_dsp_reg_read(struct lx6464es * chip,int port)61 unsigned long lx_dsp_reg_read(struct lx6464es *chip, int port)
62 {
63 void __iomem *address = lx_dsp_register(chip, port);
64 return ioread32(address);
65 }
66
lx_dsp_reg_readbuf(struct lx6464es * chip,int port,u32 * data,u32 len)67 static void lx_dsp_reg_readbuf(struct lx6464es *chip, int port, u32 *data,
68 u32 len)
69 {
70 u32 __iomem *address = lx_dsp_register(chip, port);
71 int i;
72
73 /* we cannot use memcpy_fromio */
74 for (i = 0; i != len; ++i)
75 data[i] = ioread32(address + i);
76 }
77
78
lx_dsp_reg_write(struct lx6464es * chip,int port,unsigned data)79 void lx_dsp_reg_write(struct lx6464es *chip, int port, unsigned data)
80 {
81 void __iomem *address = lx_dsp_register(chip, port);
82 iowrite32(data, address);
83 }
84
lx_dsp_reg_writebuf(struct lx6464es * chip,int port,const u32 * data,u32 len)85 static void lx_dsp_reg_writebuf(struct lx6464es *chip, int port,
86 const u32 *data, u32 len)
87 {
88 u32 __iomem *address = lx_dsp_register(chip, port);
89 int i;
90
91 /* we cannot use memcpy_to */
92 for (i = 0; i != len; ++i)
93 iowrite32(data[i], address + i);
94 }
95
96
97 static const unsigned long plx_port_offsets[] = {
98 0x04,
99 0x40,
100 0x44,
101 0x48,
102 0x4c,
103 0x50,
104 0x54,
105 0x58,
106 0x5c,
107 0x64,
108 0x68,
109 0x6C
110 };
111
lx_plx_register(struct lx6464es * chip,int port)112 static void __iomem *lx_plx_register(struct lx6464es *chip, int port)
113 {
114 void __iomem *base_address = chip->port_plx_remapped;
115 return base_address + plx_port_offsets[port];
116 }
117
lx_plx_reg_read(struct lx6464es * chip,int port)118 unsigned long lx_plx_reg_read(struct lx6464es *chip, int port)
119 {
120 void __iomem *address = lx_plx_register(chip, port);
121 return ioread32(address);
122 }
123
lx_plx_reg_write(struct lx6464es * chip,int port,u32 data)124 void lx_plx_reg_write(struct lx6464es *chip, int port, u32 data)
125 {
126 void __iomem *address = lx_plx_register(chip, port);
127 iowrite32(data, address);
128 }
129
130 /* rmh */
131
132 #ifdef CONFIG_SND_DEBUG
133 #define CMD_NAME(a) a
134 #else
135 #define CMD_NAME(a) NULL
136 #endif
137
138 #define Reg_CSM_MR 0x00000002
139 #define Reg_CSM_MC 0x00000001
140
141 struct dsp_cmd_info {
142 u32 dcCodeOp; /* Op Code of the command (usually 1st 24-bits
143 * word).*/
144 u16 dcCmdLength; /* Command length in words of 24 bits.*/
145 u16 dcStatusType; /* Status type: 0 for fixed length, 1 for
146 * random. */
147 u16 dcStatusLength; /* Status length (if fixed).*/
148 char *dcOpName;
149 };
150
151 /*
152 Initialization and control data for the Microblaze interface
153 - OpCode:
154 the opcode field of the command set at the proper offset
155 - CmdLength
156 the number of command words
157 - StatusType
158 offset in the status registers: 0 means that the return value may be
159 different from 0, and must be read
160 - StatusLength
161 the number of status words (in addition to the return value)
162 */
163
164 static const struct dsp_cmd_info dsp_commands[] =
165 {
166 { (CMD_00_INFO_DEBUG << OPCODE_OFFSET) , 1 /*custom*/
167 , 1 , 0 /**/ , CMD_NAME("INFO_DEBUG") },
168 { (CMD_01_GET_SYS_CFG << OPCODE_OFFSET) , 1 /**/
169 , 1 , 2 /**/ , CMD_NAME("GET_SYS_CFG") },
170 { (CMD_02_SET_GRANULARITY << OPCODE_OFFSET) , 1 /**/
171 , 1 , 0 /**/ , CMD_NAME("SET_GRANULARITY") },
172 { (CMD_03_SET_TIMER_IRQ << OPCODE_OFFSET) , 1 /**/
173 , 1 , 0 /**/ , CMD_NAME("SET_TIMER_IRQ") },
174 { (CMD_04_GET_EVENT << OPCODE_OFFSET) , 1 /**/
175 , 1 , 0 /*up to 10*/ , CMD_NAME("GET_EVENT") },
176 { (CMD_05_GET_PIPES << OPCODE_OFFSET) , 1 /**/
177 , 1 , 2 /*up to 4*/ , CMD_NAME("GET_PIPES") },
178 { (CMD_06_ALLOCATE_PIPE << OPCODE_OFFSET) , 1 /**/
179 , 0 , 0 /**/ , CMD_NAME("ALLOCATE_PIPE") },
180 { (CMD_07_RELEASE_PIPE << OPCODE_OFFSET) , 1 /**/
181 , 0 , 0 /**/ , CMD_NAME("RELEASE_PIPE") },
182 { (CMD_08_ASK_BUFFERS << OPCODE_OFFSET) , 1 /**/
183 , 1 , MAX_STREAM_BUFFER , CMD_NAME("ASK_BUFFERS") },
184 { (CMD_09_STOP_PIPE << OPCODE_OFFSET) , 1 /**/
185 , 0 , 0 /*up to 2*/ , CMD_NAME("STOP_PIPE") },
186 { (CMD_0A_GET_PIPE_SPL_COUNT << OPCODE_OFFSET) , 1 /**/
187 , 1 , 1 /*up to 2*/ , CMD_NAME("GET_PIPE_SPL_COUNT") },
188 { (CMD_0B_TOGGLE_PIPE_STATE << OPCODE_OFFSET) , 1 /*up to 5*/
189 , 1 , 0 /**/ , CMD_NAME("TOGGLE_PIPE_STATE") },
190 { (CMD_0C_DEF_STREAM << OPCODE_OFFSET) , 1 /*up to 4*/
191 , 1 , 0 /**/ , CMD_NAME("DEF_STREAM") },
192 { (CMD_0D_SET_MUTE << OPCODE_OFFSET) , 3 /**/
193 , 1 , 0 /**/ , CMD_NAME("SET_MUTE") },
194 { (CMD_0E_GET_STREAM_SPL_COUNT << OPCODE_OFFSET) , 1/**/
195 , 1 , 2 /**/ , CMD_NAME("GET_STREAM_SPL_COUNT") },
196 { (CMD_0F_UPDATE_BUFFER << OPCODE_OFFSET) , 3 /*up to 4*/
197 , 0 , 1 /**/ , CMD_NAME("UPDATE_BUFFER") },
198 { (CMD_10_GET_BUFFER << OPCODE_OFFSET) , 1 /**/
199 , 1 , 4 /**/ , CMD_NAME("GET_BUFFER") },
200 { (CMD_11_CANCEL_BUFFER << OPCODE_OFFSET) , 1 /**/
201 , 1 , 1 /*up to 4*/ , CMD_NAME("CANCEL_BUFFER") },
202 { (CMD_12_GET_PEAK << OPCODE_OFFSET) , 1 /**/
203 , 1 , 1 /**/ , CMD_NAME("GET_PEAK") },
204 { (CMD_13_SET_STREAM_STATE << OPCODE_OFFSET) , 1 /**/
205 , 1 , 0 /**/ , CMD_NAME("SET_STREAM_STATE") },
206 };
207
lx_message_init(struct lx_rmh * rmh,enum cmd_mb_opcodes cmd)208 static void lx_message_init(struct lx_rmh *rmh, enum cmd_mb_opcodes cmd)
209 {
210 snd_BUG_ON(cmd >= CMD_14_INVALID);
211
212 rmh->cmd[0] = dsp_commands[cmd].dcCodeOp;
213 rmh->cmd_len = dsp_commands[cmd].dcCmdLength;
214 rmh->stat_len = dsp_commands[cmd].dcStatusLength;
215 rmh->dsp_stat = dsp_commands[cmd].dcStatusType;
216 rmh->cmd_idx = cmd;
217 memset(&rmh->cmd[1], 0, (REG_CRM_NUMBER - 1) * sizeof(u32));
218
219 #ifdef CONFIG_SND_DEBUG
220 memset(rmh->stat, 0, REG_CRM_NUMBER * sizeof(u32));
221 #endif
222 #ifdef RMH_DEBUG
223 rmh->cmd_idx = cmd;
224 #endif
225 }
226
227 #ifdef RMH_DEBUG
228 #define LXRMH "lx6464es rmh: "
lx_message_dump(struct lx_rmh * rmh)229 static void lx_message_dump(struct lx_rmh *rmh)
230 {
231 u8 idx = rmh->cmd_idx;
232 int i;
233
234 pr_debug(LXRMH "command %s\n", dsp_commands[idx].dcOpName);
235
236 for (i = 0; i != rmh->cmd_len; ++i)
237 pr_debug(LXRMH "\tcmd[%d] %08x\n", i, rmh->cmd[i]);
238
239 for (i = 0; i != rmh->stat_len; ++i)
240 pr_debug(LXRMH "\tstat[%d]: %08x\n", i, rmh->stat[i]);
241 pr_debug("\n");
242 }
243 #else
lx_message_dump(struct lx_rmh * rmh)244 static inline void lx_message_dump(struct lx_rmh *rmh)
245 {}
246 #endif
247
248
249
250 /* sleep 500 - 100 = 400 times 100us -> the timeout is >= 40 ms */
251 #define XILINX_TIMEOUT_MS 40
252 #define XILINX_POLL_NO_SLEEP 100
253 #define XILINX_POLL_ITERATIONS 150
254
255
lx_message_send_atomic(struct lx6464es * chip,struct lx_rmh * rmh)256 static int lx_message_send_atomic(struct lx6464es *chip, struct lx_rmh *rmh)
257 {
258 u32 reg = ED_DSP_TIMED_OUT;
259 int dwloop;
260
261 if (lx_dsp_reg_read(chip, eReg_CSM) & (Reg_CSM_MC | Reg_CSM_MR)) {
262 dev_err(chip->card->dev, "PIOSendMessage eReg_CSM %x\n", reg);
263 return -EBUSY;
264 }
265
266 /* write command */
267 lx_dsp_reg_writebuf(chip, eReg_CRM1, rmh->cmd, rmh->cmd_len);
268
269 /* MicoBlaze gogogo */
270 lx_dsp_reg_write(chip, eReg_CSM, Reg_CSM_MC);
271
272 /* wait for device to answer */
273 for (dwloop = 0; dwloop != XILINX_TIMEOUT_MS * 1000; ++dwloop) {
274 if (lx_dsp_reg_read(chip, eReg_CSM) & Reg_CSM_MR) {
275 if (rmh->dsp_stat == 0)
276 reg = lx_dsp_reg_read(chip, eReg_CRM1);
277 else
278 reg = 0;
279 goto polling_successful;
280 } else
281 udelay(1);
282 }
283 dev_warn(chip->card->dev, "TIMEOUT lx_message_send_atomic! "
284 "polling failed\n");
285
286 polling_successful:
287 if ((reg & ERROR_VALUE) == 0) {
288 /* read response */
289 if (rmh->stat_len) {
290 snd_BUG_ON(rmh->stat_len >= (REG_CRM_NUMBER-1));
291 lx_dsp_reg_readbuf(chip, eReg_CRM2, rmh->stat,
292 rmh->stat_len);
293 }
294 } else
295 dev_err(chip->card->dev, "rmh error: %08x\n", reg);
296
297 /* clear Reg_CSM_MR */
298 lx_dsp_reg_write(chip, eReg_CSM, 0);
299
300 switch (reg) {
301 case ED_DSP_TIMED_OUT:
302 dev_warn(chip->card->dev, "lx_message_send: dsp timeout\n");
303 return -ETIMEDOUT;
304
305 case ED_DSP_CRASHED:
306 dev_warn(chip->card->dev, "lx_message_send: dsp crashed\n");
307 return -EAGAIN;
308 }
309
310 lx_message_dump(rmh);
311
312 return reg;
313 }
314
315
316 /* low-level dsp access */
lx_dsp_get_version(struct lx6464es * chip,u32 * rdsp_version)317 int lx_dsp_get_version(struct lx6464es *chip, u32 *rdsp_version)
318 {
319 u16 ret;
320
321 mutex_lock(&chip->msg_lock);
322
323 lx_message_init(&chip->rmh, CMD_01_GET_SYS_CFG);
324 ret = lx_message_send_atomic(chip, &chip->rmh);
325
326 *rdsp_version = chip->rmh.stat[1];
327 mutex_unlock(&chip->msg_lock);
328 return ret;
329 }
330
lx_dsp_get_clock_frequency(struct lx6464es * chip,u32 * rfreq)331 int lx_dsp_get_clock_frequency(struct lx6464es *chip, u32 *rfreq)
332 {
333 u16 ret = 0;
334 u32 freq_raw = 0;
335 u32 freq = 0;
336 u32 frequency = 0;
337
338 mutex_lock(&chip->msg_lock);
339
340 lx_message_init(&chip->rmh, CMD_01_GET_SYS_CFG);
341 ret = lx_message_send_atomic(chip, &chip->rmh);
342
343 if (ret == 0) {
344 freq_raw = chip->rmh.stat[0] >> FREQ_FIELD_OFFSET;
345 freq = freq_raw & XES_FREQ_COUNT8_MASK;
346
347 if ((freq < XES_FREQ_COUNT8_48_MAX) ||
348 (freq > XES_FREQ_COUNT8_44_MIN))
349 frequency = 0; /* unknown */
350 else if (freq >= XES_FREQ_COUNT8_44_MAX)
351 frequency = 44100;
352 else
353 frequency = 48000;
354 }
355
356 mutex_unlock(&chip->msg_lock);
357
358 *rfreq = frequency * chip->freq_ratio;
359
360 return ret;
361 }
362
lx_dsp_get_mac(struct lx6464es * chip)363 int lx_dsp_get_mac(struct lx6464es *chip)
364 {
365 u32 macmsb, maclsb;
366
367 macmsb = lx_dsp_reg_read(chip, eReg_ADMACESMSB) & 0x00FFFFFF;
368 maclsb = lx_dsp_reg_read(chip, eReg_ADMACESLSB) & 0x00FFFFFF;
369
370 /* todo: endianess handling */
371 chip->mac_address[5] = ((u8 *)(&maclsb))[0];
372 chip->mac_address[4] = ((u8 *)(&maclsb))[1];
373 chip->mac_address[3] = ((u8 *)(&maclsb))[2];
374 chip->mac_address[2] = ((u8 *)(&macmsb))[0];
375 chip->mac_address[1] = ((u8 *)(&macmsb))[1];
376 chip->mac_address[0] = ((u8 *)(&macmsb))[2];
377
378 return 0;
379 }
380
381
lx_dsp_set_granularity(struct lx6464es * chip,u32 gran)382 int lx_dsp_set_granularity(struct lx6464es *chip, u32 gran)
383 {
384 int ret;
385
386 mutex_lock(&chip->msg_lock);
387
388 lx_message_init(&chip->rmh, CMD_02_SET_GRANULARITY);
389 chip->rmh.cmd[0] |= gran;
390
391 ret = lx_message_send_atomic(chip, &chip->rmh);
392 mutex_unlock(&chip->msg_lock);
393 return ret;
394 }
395
lx_dsp_read_async_events(struct lx6464es * chip,u32 * data)396 int lx_dsp_read_async_events(struct lx6464es *chip, u32 *data)
397 {
398 int ret;
399
400 mutex_lock(&chip->msg_lock);
401
402 lx_message_init(&chip->rmh, CMD_04_GET_EVENT);
403 chip->rmh.stat_len = 9; /* we don't necessarily need the full length */
404
405 ret = lx_message_send_atomic(chip, &chip->rmh);
406
407 if (!ret)
408 memcpy(data, chip->rmh.stat, chip->rmh.stat_len * sizeof(u32));
409
410 mutex_unlock(&chip->msg_lock);
411 return ret;
412 }
413
414 #define PIPE_INFO_TO_CMD(capture, pipe) \
415 ((u32)((u32)(pipe) | ((capture) ? ID_IS_CAPTURE : 0L)) << ID_OFFSET)
416
417
418
419 /* low-level pipe handling */
lx_pipe_allocate(struct lx6464es * chip,u32 pipe,int is_capture,int channels)420 int lx_pipe_allocate(struct lx6464es *chip, u32 pipe, int is_capture,
421 int channels)
422 {
423 int err;
424 u32 pipe_cmd = PIPE_INFO_TO_CMD(is_capture, pipe);
425
426 mutex_lock(&chip->msg_lock);
427 lx_message_init(&chip->rmh, CMD_06_ALLOCATE_PIPE);
428
429 chip->rmh.cmd[0] |= pipe_cmd;
430 chip->rmh.cmd[0] |= channels;
431
432 err = lx_message_send_atomic(chip, &chip->rmh);
433 mutex_unlock(&chip->msg_lock);
434
435 if (err != 0)
436 dev_err(chip->card->dev, "could not allocate pipe\n");
437
438 return err;
439 }
440
lx_pipe_release(struct lx6464es * chip,u32 pipe,int is_capture)441 int lx_pipe_release(struct lx6464es *chip, u32 pipe, int is_capture)
442 {
443 int err;
444 u32 pipe_cmd = PIPE_INFO_TO_CMD(is_capture, pipe);
445
446 mutex_lock(&chip->msg_lock);
447 lx_message_init(&chip->rmh, CMD_07_RELEASE_PIPE);
448
449 chip->rmh.cmd[0] |= pipe_cmd;
450
451 err = lx_message_send_atomic(chip, &chip->rmh);
452 mutex_unlock(&chip->msg_lock);
453
454 return err;
455 }
456
lx_buffer_ask(struct lx6464es * chip,u32 pipe,int is_capture,u32 * r_needed,u32 * r_freed,u32 * size_array)457 int lx_buffer_ask(struct lx6464es *chip, u32 pipe, int is_capture,
458 u32 *r_needed, u32 *r_freed, u32 *size_array)
459 {
460 int err;
461 u32 pipe_cmd = PIPE_INFO_TO_CMD(is_capture, pipe);
462
463 #ifdef CONFIG_SND_DEBUG
464 if (size_array)
465 memset(size_array, 0, sizeof(u32)*MAX_STREAM_BUFFER);
466 #endif
467
468 *r_needed = 0;
469 *r_freed = 0;
470
471 mutex_lock(&chip->msg_lock);
472 lx_message_init(&chip->rmh, CMD_08_ASK_BUFFERS);
473
474 chip->rmh.cmd[0] |= pipe_cmd;
475
476 err = lx_message_send_atomic(chip, &chip->rmh);
477
478 if (!err) {
479 int i;
480 for (i = 0; i < MAX_STREAM_BUFFER; ++i) {
481 u32 stat = chip->rmh.stat[i];
482 if (stat & (BF_EOB << BUFF_FLAGS_OFFSET)) {
483 /* finished */
484 *r_freed += 1;
485 if (size_array)
486 size_array[i] = stat & MASK_DATA_SIZE;
487 } else if ((stat & (BF_VALID << BUFF_FLAGS_OFFSET))
488 == 0)
489 /* free */
490 *r_needed += 1;
491 }
492
493 dev_dbg(chip->card->dev,
494 "CMD_08_ASK_BUFFERS: needed %d, freed %d\n",
495 *r_needed, *r_freed);
496 for (i = 0; i < MAX_STREAM_BUFFER && i < chip->rmh.stat_len;
497 ++i) {
498 dev_dbg(chip->card->dev, " stat[%d]: %x, %x\n", i,
499 chip->rmh.stat[i],
500 chip->rmh.stat[i] & MASK_DATA_SIZE);
501 }
502 }
503
504 mutex_unlock(&chip->msg_lock);
505 return err;
506 }
507
508
lx_pipe_stop(struct lx6464es * chip,u32 pipe,int is_capture)509 int lx_pipe_stop(struct lx6464es *chip, u32 pipe, int is_capture)
510 {
511 int err;
512 u32 pipe_cmd = PIPE_INFO_TO_CMD(is_capture, pipe);
513
514 mutex_lock(&chip->msg_lock);
515 lx_message_init(&chip->rmh, CMD_09_STOP_PIPE);
516
517 chip->rmh.cmd[0] |= pipe_cmd;
518
519 err = lx_message_send_atomic(chip, &chip->rmh);
520
521 mutex_unlock(&chip->msg_lock);
522 return err;
523 }
524
lx_pipe_toggle_state(struct lx6464es * chip,u32 pipe,int is_capture)525 static int lx_pipe_toggle_state(struct lx6464es *chip, u32 pipe, int is_capture)
526 {
527 int err;
528 u32 pipe_cmd = PIPE_INFO_TO_CMD(is_capture, pipe);
529
530 mutex_lock(&chip->msg_lock);
531 lx_message_init(&chip->rmh, CMD_0B_TOGGLE_PIPE_STATE);
532
533 chip->rmh.cmd[0] |= pipe_cmd;
534
535 err = lx_message_send_atomic(chip, &chip->rmh);
536
537 mutex_unlock(&chip->msg_lock);
538 return err;
539 }
540
541
lx_pipe_start(struct lx6464es * chip,u32 pipe,int is_capture)542 int lx_pipe_start(struct lx6464es *chip, u32 pipe, int is_capture)
543 {
544 int err;
545
546 err = lx_pipe_wait_for_idle(chip, pipe, is_capture);
547 if (err < 0)
548 return err;
549
550 err = lx_pipe_toggle_state(chip, pipe, is_capture);
551
552 return err;
553 }
554
lx_pipe_pause(struct lx6464es * chip,u32 pipe,int is_capture)555 int lx_pipe_pause(struct lx6464es *chip, u32 pipe, int is_capture)
556 {
557 int err = 0;
558
559 err = lx_pipe_wait_for_start(chip, pipe, is_capture);
560 if (err < 0)
561 return err;
562
563 err = lx_pipe_toggle_state(chip, pipe, is_capture);
564
565 return err;
566 }
567
568
lx_pipe_sample_count(struct lx6464es * chip,u32 pipe,int is_capture,u64 * rsample_count)569 int lx_pipe_sample_count(struct lx6464es *chip, u32 pipe, int is_capture,
570 u64 *rsample_count)
571 {
572 int err;
573 u32 pipe_cmd = PIPE_INFO_TO_CMD(is_capture, pipe);
574
575 mutex_lock(&chip->msg_lock);
576 lx_message_init(&chip->rmh, CMD_0A_GET_PIPE_SPL_COUNT);
577
578 chip->rmh.cmd[0] |= pipe_cmd;
579 chip->rmh.stat_len = 2; /* need all words here! */
580
581 err = lx_message_send_atomic(chip, &chip->rmh); /* don't sleep! */
582
583 if (err != 0)
584 dev_err(chip->card->dev,
585 "could not query pipe's sample count\n");
586 else {
587 *rsample_count = ((u64)(chip->rmh.stat[0] & MASK_SPL_COUNT_HI)
588 << 24) /* hi part */
589 + chip->rmh.stat[1]; /* lo part */
590 }
591
592 mutex_unlock(&chip->msg_lock);
593 return err;
594 }
595
lx_pipe_state(struct lx6464es * chip,u32 pipe,int is_capture,u16 * rstate)596 int lx_pipe_state(struct lx6464es *chip, u32 pipe, int is_capture, u16 *rstate)
597 {
598 int err;
599 u32 pipe_cmd = PIPE_INFO_TO_CMD(is_capture, pipe);
600
601 mutex_lock(&chip->msg_lock);
602 lx_message_init(&chip->rmh, CMD_0A_GET_PIPE_SPL_COUNT);
603
604 chip->rmh.cmd[0] |= pipe_cmd;
605
606 err = lx_message_send_atomic(chip, &chip->rmh);
607
608 if (err != 0)
609 dev_err(chip->card->dev, "could not query pipe's state\n");
610 else
611 *rstate = (chip->rmh.stat[0] >> PSTATE_OFFSET) & 0x0F;
612
613 mutex_unlock(&chip->msg_lock);
614 return err;
615 }
616
lx_pipe_wait_for_state(struct lx6464es * chip,u32 pipe,int is_capture,u16 state)617 static int lx_pipe_wait_for_state(struct lx6464es *chip, u32 pipe,
618 int is_capture, u16 state)
619 {
620 int i;
621
622 /* max 2*PCMOnlyGranularity = 2*1024 at 44100 = < 50 ms:
623 * timeout 50 ms */
624 for (i = 0; i != 50; ++i) {
625 u16 current_state;
626 int err = lx_pipe_state(chip, pipe, is_capture, ¤t_state);
627
628 if (err < 0)
629 return err;
630
631 if (!err && current_state == state)
632 return 0;
633
634 mdelay(1);
635 }
636
637 return -ETIMEDOUT;
638 }
639
lx_pipe_wait_for_start(struct lx6464es * chip,u32 pipe,int is_capture)640 int lx_pipe_wait_for_start(struct lx6464es *chip, u32 pipe, int is_capture)
641 {
642 return lx_pipe_wait_for_state(chip, pipe, is_capture, PSTATE_RUN);
643 }
644
lx_pipe_wait_for_idle(struct lx6464es * chip,u32 pipe,int is_capture)645 int lx_pipe_wait_for_idle(struct lx6464es *chip, u32 pipe, int is_capture)
646 {
647 return lx_pipe_wait_for_state(chip, pipe, is_capture, PSTATE_IDLE);
648 }
649
650 /* low-level stream handling */
lx_stream_set_state(struct lx6464es * chip,u32 pipe,int is_capture,enum stream_state_t state)651 int lx_stream_set_state(struct lx6464es *chip, u32 pipe,
652 int is_capture, enum stream_state_t state)
653 {
654 int err;
655 u32 pipe_cmd = PIPE_INFO_TO_CMD(is_capture, pipe);
656
657 mutex_lock(&chip->msg_lock);
658 lx_message_init(&chip->rmh, CMD_13_SET_STREAM_STATE);
659
660 chip->rmh.cmd[0] |= pipe_cmd;
661 chip->rmh.cmd[0] |= state;
662
663 err = lx_message_send_atomic(chip, &chip->rmh);
664 mutex_unlock(&chip->msg_lock);
665
666 return err;
667 }
668
lx_stream_set_format(struct lx6464es * chip,struct snd_pcm_runtime * runtime,u32 pipe,int is_capture)669 int lx_stream_set_format(struct lx6464es *chip, struct snd_pcm_runtime *runtime,
670 u32 pipe, int is_capture)
671 {
672 int err;
673 u32 pipe_cmd = PIPE_INFO_TO_CMD(is_capture, pipe);
674 u32 channels = runtime->channels;
675
676 mutex_lock(&chip->msg_lock);
677 lx_message_init(&chip->rmh, CMD_0C_DEF_STREAM);
678
679 chip->rmh.cmd[0] |= pipe_cmd;
680
681 if (runtime->sample_bits == 16)
682 /* 16 bit format */
683 chip->rmh.cmd[0] |= (STREAM_FMT_16b << STREAM_FMT_OFFSET);
684
685 if (snd_pcm_format_little_endian(runtime->format))
686 /* little endian/intel format */
687 chip->rmh.cmd[0] |= (STREAM_FMT_intel << STREAM_FMT_OFFSET);
688
689 chip->rmh.cmd[0] |= channels-1;
690
691 err = lx_message_send_atomic(chip, &chip->rmh);
692 mutex_unlock(&chip->msg_lock);
693
694 return err;
695 }
696
lx_stream_state(struct lx6464es * chip,u32 pipe,int is_capture,int * rstate)697 int lx_stream_state(struct lx6464es *chip, u32 pipe, int is_capture,
698 int *rstate)
699 {
700 int err;
701 u32 pipe_cmd = PIPE_INFO_TO_CMD(is_capture, pipe);
702
703 mutex_lock(&chip->msg_lock);
704 lx_message_init(&chip->rmh, CMD_0E_GET_STREAM_SPL_COUNT);
705
706 chip->rmh.cmd[0] |= pipe_cmd;
707
708 err = lx_message_send_atomic(chip, &chip->rmh);
709
710 *rstate = (chip->rmh.stat[0] & SF_START) ? START_STATE : PAUSE_STATE;
711
712 mutex_unlock(&chip->msg_lock);
713 return err;
714 }
715
lx_stream_sample_position(struct lx6464es * chip,u32 pipe,int is_capture,u64 * r_bytepos)716 int lx_stream_sample_position(struct lx6464es *chip, u32 pipe, int is_capture,
717 u64 *r_bytepos)
718 {
719 int err;
720 u32 pipe_cmd = PIPE_INFO_TO_CMD(is_capture, pipe);
721
722 mutex_lock(&chip->msg_lock);
723 lx_message_init(&chip->rmh, CMD_0E_GET_STREAM_SPL_COUNT);
724
725 chip->rmh.cmd[0] |= pipe_cmd;
726
727 err = lx_message_send_atomic(chip, &chip->rmh);
728
729 *r_bytepos = ((u64) (chip->rmh.stat[0] & MASK_SPL_COUNT_HI)
730 << 32) /* hi part */
731 + chip->rmh.stat[1]; /* lo part */
732
733 mutex_unlock(&chip->msg_lock);
734 return err;
735 }
736
737 /* low-level buffer handling */
lx_buffer_give(struct lx6464es * chip,u32 pipe,int is_capture,u32 buffer_size,u32 buf_address_lo,u32 buf_address_hi,u32 * r_buffer_index)738 int lx_buffer_give(struct lx6464es *chip, u32 pipe, int is_capture,
739 u32 buffer_size, u32 buf_address_lo, u32 buf_address_hi,
740 u32 *r_buffer_index)
741 {
742 int err;
743 u32 pipe_cmd = PIPE_INFO_TO_CMD(is_capture, pipe);
744
745 mutex_lock(&chip->msg_lock);
746 lx_message_init(&chip->rmh, CMD_0F_UPDATE_BUFFER);
747
748 chip->rmh.cmd[0] |= pipe_cmd;
749 chip->rmh.cmd[0] |= BF_NOTIFY_EOB; /* request interrupt notification */
750
751 /* todo: pause request, circular buffer */
752
753 chip->rmh.cmd[1] = buffer_size & MASK_DATA_SIZE;
754 chip->rmh.cmd[2] = buf_address_lo;
755
756 if (buf_address_hi) {
757 chip->rmh.cmd_len = 4;
758 chip->rmh.cmd[3] = buf_address_hi;
759 chip->rmh.cmd[0] |= BF_64BITS_ADR;
760 }
761
762 err = lx_message_send_atomic(chip, &chip->rmh);
763
764 if (err == 0) {
765 *r_buffer_index = chip->rmh.stat[0];
766 goto done;
767 }
768
769 if (err == EB_RBUFFERS_TABLE_OVERFLOW)
770 dev_err(chip->card->dev,
771 "lx_buffer_give EB_RBUFFERS_TABLE_OVERFLOW\n");
772
773 if (err == EB_INVALID_STREAM)
774 dev_err(chip->card->dev,
775 "lx_buffer_give EB_INVALID_STREAM\n");
776
777 if (err == EB_CMD_REFUSED)
778 dev_err(chip->card->dev,
779 "lx_buffer_give EB_CMD_REFUSED\n");
780
781 done:
782 mutex_unlock(&chip->msg_lock);
783 return err;
784 }
785
lx_buffer_free(struct lx6464es * chip,u32 pipe,int is_capture,u32 * r_buffer_size)786 int lx_buffer_free(struct lx6464es *chip, u32 pipe, int is_capture,
787 u32 *r_buffer_size)
788 {
789 int err;
790 u32 pipe_cmd = PIPE_INFO_TO_CMD(is_capture, pipe);
791
792 mutex_lock(&chip->msg_lock);
793 lx_message_init(&chip->rmh, CMD_11_CANCEL_BUFFER);
794
795 chip->rmh.cmd[0] |= pipe_cmd;
796 chip->rmh.cmd[0] |= MASK_BUFFER_ID; /* ask for the current buffer: the
797 * microblaze will seek for it */
798
799 err = lx_message_send_atomic(chip, &chip->rmh);
800
801 if (err == 0)
802 *r_buffer_size = chip->rmh.stat[0] & MASK_DATA_SIZE;
803
804 mutex_unlock(&chip->msg_lock);
805 return err;
806 }
807
lx_buffer_cancel(struct lx6464es * chip,u32 pipe,int is_capture,u32 buffer_index)808 int lx_buffer_cancel(struct lx6464es *chip, u32 pipe, int is_capture,
809 u32 buffer_index)
810 {
811 int err;
812 u32 pipe_cmd = PIPE_INFO_TO_CMD(is_capture, pipe);
813
814 mutex_lock(&chip->msg_lock);
815 lx_message_init(&chip->rmh, CMD_11_CANCEL_BUFFER);
816
817 chip->rmh.cmd[0] |= pipe_cmd;
818 chip->rmh.cmd[0] |= buffer_index;
819
820 err = lx_message_send_atomic(chip, &chip->rmh);
821
822 mutex_unlock(&chip->msg_lock);
823 return err;
824 }
825
826
827 /* low-level gain/peak handling
828 *
829 * \todo: can we unmute capture/playback channels independently?
830 *
831 * */
lx_level_unmute(struct lx6464es * chip,int is_capture,int unmute)832 int lx_level_unmute(struct lx6464es *chip, int is_capture, int unmute)
833 {
834 int err;
835 /* bit set to 1: channel muted */
836 u64 mute_mask = unmute ? 0 : 0xFFFFFFFFFFFFFFFFLLU;
837
838 mutex_lock(&chip->msg_lock);
839 lx_message_init(&chip->rmh, CMD_0D_SET_MUTE);
840
841 chip->rmh.cmd[0] |= PIPE_INFO_TO_CMD(is_capture, 0);
842
843 chip->rmh.cmd[1] = (u32)(mute_mask >> (u64)32); /* hi part */
844 chip->rmh.cmd[2] = (u32)(mute_mask & (u64)0xFFFFFFFF); /* lo part */
845
846 dev_dbg(chip->card->dev,
847 "mute %x %x %x\n", chip->rmh.cmd[0], chip->rmh.cmd[1],
848 chip->rmh.cmd[2]);
849
850 err = lx_message_send_atomic(chip, &chip->rmh);
851
852 mutex_unlock(&chip->msg_lock);
853 return err;
854 }
855
856 static const u32 peak_map[] = {
857 0x00000109, /* -90.308dB */
858 0x0000083B, /* -72.247dB */
859 0x000020C4, /* -60.205dB */
860 0x00008273, /* -48.030dB */
861 0x00020756, /* -36.005dB */
862 0x00040C37, /* -30.001dB */
863 0x00081385, /* -24.002dB */
864 0x00101D3F, /* -18.000dB */
865 0x0016C310, /* -15.000dB */
866 0x002026F2, /* -12.001dB */
867 0x002D6A86, /* -9.000dB */
868 0x004026E6, /* -6.004dB */
869 0x005A9DF6, /* -3.000dB */
870 0x0065AC8B, /* -2.000dB */
871 0x00721481, /* -1.000dB */
872 0x007FFFFF, /* FS */
873 };
874
lx_level_peaks(struct lx6464es * chip,int is_capture,int channels,u32 * r_levels)875 int lx_level_peaks(struct lx6464es *chip, int is_capture, int channels,
876 u32 *r_levels)
877 {
878 int err = 0;
879 int i;
880
881 mutex_lock(&chip->msg_lock);
882 for (i = 0; i < channels; i += 4) {
883 u32 s0, s1, s2, s3;
884
885 lx_message_init(&chip->rmh, CMD_12_GET_PEAK);
886 chip->rmh.cmd[0] |= PIPE_INFO_TO_CMD(is_capture, i);
887
888 err = lx_message_send_atomic(chip, &chip->rmh);
889
890 if (err == 0) {
891 s0 = peak_map[chip->rmh.stat[0] & 0x0F];
892 s1 = peak_map[(chip->rmh.stat[0] >> 4) & 0xf];
893 s2 = peak_map[(chip->rmh.stat[0] >> 8) & 0xf];
894 s3 = peak_map[(chip->rmh.stat[0] >> 12) & 0xf];
895 } else
896 s0 = s1 = s2 = s3 = 0;
897
898 r_levels[0] = s0;
899 r_levels[1] = s1;
900 r_levels[2] = s2;
901 r_levels[3] = s3;
902
903 r_levels += 4;
904 }
905
906 mutex_unlock(&chip->msg_lock);
907 return err;
908 }
909
910 /* interrupt handling */
911 #define PCX_IRQ_NONE 0
912 #define IRQCS_ACTIVE_PCIDB BIT(13)
913 #define IRQCS_ENABLE_PCIIRQ BIT(8)
914 #define IRQCS_ENABLE_PCIDB BIT(9)
915
lx_interrupt_test_ack(struct lx6464es * chip)916 static u32 lx_interrupt_test_ack(struct lx6464es *chip)
917 {
918 u32 irqcs = lx_plx_reg_read(chip, ePLX_IRQCS);
919
920 /* Test if PCI Doorbell interrupt is active */
921 if (irqcs & IRQCS_ACTIVE_PCIDB) {
922 u32 temp;
923 irqcs = PCX_IRQ_NONE;
924
925 while ((temp = lx_plx_reg_read(chip, ePLX_L2PCIDB))) {
926 /* RAZ interrupt */
927 irqcs |= temp;
928 lx_plx_reg_write(chip, ePLX_L2PCIDB, temp);
929 }
930
931 return irqcs;
932 }
933 return PCX_IRQ_NONE;
934 }
935
lx_interrupt_ack(struct lx6464es * chip,u32 * r_irqsrc,int * r_async_pending,int * r_async_escmd)936 static int lx_interrupt_ack(struct lx6464es *chip, u32 *r_irqsrc,
937 int *r_async_pending, int *r_async_escmd)
938 {
939 u32 irq_async;
940 u32 irqsrc = lx_interrupt_test_ack(chip);
941
942 if (irqsrc == PCX_IRQ_NONE)
943 return 0;
944
945 *r_irqsrc = irqsrc;
946
947 irq_async = irqsrc & MASK_SYS_ASYNC_EVENTS; /* + EtherSound response
948 * (set by xilinx) + EOB */
949
950 if (irq_async & MASK_SYS_STATUS_ESA) {
951 irq_async &= ~MASK_SYS_STATUS_ESA;
952 *r_async_escmd = 1;
953 }
954
955 if (irq_async) {
956 /* dev_dbg(chip->card->dev, "interrupt: async event pending\n"); */
957 *r_async_pending = 1;
958 }
959
960 return 1;
961 }
962
lx_interrupt_handle_async_events(struct lx6464es * chip,u32 irqsrc,int * r_freq_changed,u64 * r_notified_in_pipe_mask,u64 * r_notified_out_pipe_mask)963 static int lx_interrupt_handle_async_events(struct lx6464es *chip, u32 irqsrc,
964 int *r_freq_changed,
965 u64 *r_notified_in_pipe_mask,
966 u64 *r_notified_out_pipe_mask)
967 {
968 int err;
969 u32 stat[9]; /* answer from CMD_04_GET_EVENT */
970
971 /* We can optimize this to not read dumb events.
972 * Answer words are in the following order:
973 * Stat[0] general status
974 * Stat[1] end of buffer OUT pF
975 * Stat[2] end of buffer OUT pf
976 * Stat[3] end of buffer IN pF
977 * Stat[4] end of buffer IN pf
978 * Stat[5] MSB underrun
979 * Stat[6] LSB underrun
980 * Stat[7] MSB overrun
981 * Stat[8] LSB overrun
982 * */
983
984 int eb_pending_out = (irqsrc & MASK_SYS_STATUS_EOBO) ? 1 : 0;
985 int eb_pending_in = (irqsrc & MASK_SYS_STATUS_EOBI) ? 1 : 0;
986
987 *r_freq_changed = (irqsrc & MASK_SYS_STATUS_FREQ) ? 1 : 0;
988
989 err = lx_dsp_read_async_events(chip, stat);
990 if (err < 0)
991 return err;
992
993 if (eb_pending_in) {
994 *r_notified_in_pipe_mask = ((u64)stat[3] << 32)
995 + stat[4];
996 dev_dbg(chip->card->dev, "interrupt: EOBI pending %llx\n",
997 *r_notified_in_pipe_mask);
998 }
999 if (eb_pending_out) {
1000 *r_notified_out_pipe_mask = ((u64)stat[1] << 32)
1001 + stat[2];
1002 dev_dbg(chip->card->dev, "interrupt: EOBO pending %llx\n",
1003 *r_notified_out_pipe_mask);
1004 }
1005
1006 /* todo: handle xrun notification */
1007
1008 return err;
1009 }
1010
lx_interrupt_request_new_buffer(struct lx6464es * chip,struct lx_stream * lx_stream)1011 static int lx_interrupt_request_new_buffer(struct lx6464es *chip,
1012 struct lx_stream *lx_stream)
1013 {
1014 struct snd_pcm_substream *substream = lx_stream->stream;
1015 const unsigned int is_capture = lx_stream->is_capture;
1016 int err;
1017
1018 const u32 channels = substream->runtime->channels;
1019 const u32 bytes_per_frame = channels * 3;
1020 const u32 period_size = substream->runtime->period_size;
1021 const u32 period_bytes = period_size * bytes_per_frame;
1022 const u32 pos = lx_stream->frame_pos;
1023 const u32 next_pos = ((pos+1) == substream->runtime->periods) ?
1024 0 : pos + 1;
1025
1026 dma_addr_t buf = substream->dma_buffer.addr + pos * period_bytes;
1027 u32 buf_hi = 0;
1028 u32 buf_lo = 0;
1029 u32 buffer_index = 0;
1030
1031 u32 needed, freed;
1032 u32 size_array[MAX_STREAM_BUFFER];
1033
1034 dev_dbg(chip->card->dev, "->lx_interrupt_request_new_buffer\n");
1035
1036 mutex_lock(&chip->lock);
1037
1038 err = lx_buffer_ask(chip, 0, is_capture, &needed, &freed, size_array);
1039 dev_dbg(chip->card->dev,
1040 "interrupt: needed %d, freed %d\n", needed, freed);
1041
1042 unpack_pointer(buf, &buf_lo, &buf_hi);
1043 err = lx_buffer_give(chip, 0, is_capture, period_bytes, buf_lo, buf_hi,
1044 &buffer_index);
1045 dev_dbg(chip->card->dev,
1046 "interrupt: gave buffer index %x on 0x%lx (%d bytes)\n",
1047 buffer_index, (unsigned long)buf, period_bytes);
1048
1049 lx_stream->frame_pos = next_pos;
1050 mutex_unlock(&chip->lock);
1051
1052 return err;
1053 }
1054
lx_interrupt(int irq,void * dev_id)1055 irqreturn_t lx_interrupt(int irq, void *dev_id)
1056 {
1057 struct lx6464es *chip = dev_id;
1058 int async_pending, async_escmd;
1059 u32 irqsrc;
1060 bool wake_thread = false;
1061
1062 dev_dbg(chip->card->dev,
1063 "**************************************************\n");
1064
1065 if (!lx_interrupt_ack(chip, &irqsrc, &async_pending, &async_escmd)) {
1066 dev_dbg(chip->card->dev, "IRQ_NONE\n");
1067 return IRQ_NONE; /* this device did not cause the interrupt */
1068 }
1069
1070 if (irqsrc & MASK_SYS_STATUS_CMD_DONE)
1071 return IRQ_HANDLED;
1072
1073 if (irqsrc & MASK_SYS_STATUS_EOBI)
1074 dev_dbg(chip->card->dev, "interrupt: EOBI\n");
1075
1076 if (irqsrc & MASK_SYS_STATUS_EOBO)
1077 dev_dbg(chip->card->dev, "interrupt: EOBO\n");
1078
1079 if (irqsrc & MASK_SYS_STATUS_URUN)
1080 dev_dbg(chip->card->dev, "interrupt: URUN\n");
1081
1082 if (irqsrc & MASK_SYS_STATUS_ORUN)
1083 dev_dbg(chip->card->dev, "interrupt: ORUN\n");
1084
1085 if (async_pending) {
1086 wake_thread = true;
1087 chip->irqsrc = irqsrc;
1088 }
1089
1090 if (async_escmd) {
1091 /* backdoor for ethersound commands
1092 *
1093 * for now, we do not need this
1094 *
1095 * */
1096
1097 dev_dbg(chip->card->dev, "interrupt requests escmd handling\n");
1098 }
1099
1100 return wake_thread ? IRQ_WAKE_THREAD : IRQ_HANDLED;
1101 }
1102
lx_threaded_irq(int irq,void * dev_id)1103 irqreturn_t lx_threaded_irq(int irq, void *dev_id)
1104 {
1105 struct lx6464es *chip = dev_id;
1106 u64 notified_in_pipe_mask = 0;
1107 u64 notified_out_pipe_mask = 0;
1108 int freq_changed;
1109 int err;
1110
1111 /* handle async events */
1112 err = lx_interrupt_handle_async_events(chip, chip->irqsrc,
1113 &freq_changed,
1114 ¬ified_in_pipe_mask,
1115 ¬ified_out_pipe_mask);
1116 if (err)
1117 dev_err(chip->card->dev, "error handling async events\n");
1118
1119 if (notified_in_pipe_mask) {
1120 struct lx_stream *lx_stream = &chip->capture_stream;
1121
1122 dev_dbg(chip->card->dev,
1123 "requesting audio transfer for capture\n");
1124 err = lx_interrupt_request_new_buffer(chip, lx_stream);
1125 if (err < 0)
1126 dev_err(chip->card->dev,
1127 "cannot request new buffer for capture\n");
1128 snd_pcm_period_elapsed(lx_stream->stream);
1129 }
1130
1131 if (notified_out_pipe_mask) {
1132 struct lx_stream *lx_stream = &chip->playback_stream;
1133
1134 dev_dbg(chip->card->dev,
1135 "requesting audio transfer for playback\n");
1136 err = lx_interrupt_request_new_buffer(chip, lx_stream);
1137 if (err < 0)
1138 dev_err(chip->card->dev,
1139 "cannot request new buffer for playback\n");
1140 snd_pcm_period_elapsed(lx_stream->stream);
1141 }
1142
1143 return IRQ_HANDLED;
1144 }
1145
1146
lx_irq_set(struct lx6464es * chip,int enable)1147 static void lx_irq_set(struct lx6464es *chip, int enable)
1148 {
1149 u32 reg = lx_plx_reg_read(chip, ePLX_IRQCS);
1150
1151 /* enable/disable interrupts
1152 *
1153 * Set the Doorbell and PCI interrupt enable bits
1154 *
1155 * */
1156 if (enable)
1157 reg |= (IRQCS_ENABLE_PCIIRQ | IRQCS_ENABLE_PCIDB);
1158 else
1159 reg &= ~(IRQCS_ENABLE_PCIIRQ | IRQCS_ENABLE_PCIDB);
1160 lx_plx_reg_write(chip, ePLX_IRQCS, reg);
1161 }
1162
lx_irq_enable(struct lx6464es * chip)1163 void lx_irq_enable(struct lx6464es *chip)
1164 {
1165 dev_dbg(chip->card->dev, "->lx_irq_enable\n");
1166 lx_irq_set(chip, 1);
1167 }
1168
lx_irq_disable(struct lx6464es * chip)1169 void lx_irq_disable(struct lx6464es *chip)
1170 {
1171 dev_dbg(chip->card->dev, "->lx_irq_disable\n");
1172 lx_irq_set(chip, 0);
1173 }
1174