1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (c) 2010-2011,2013-2015 The Linux Foundation. All rights reserved.
4 *
5 * lpass-platform.c -- ALSA SoC platform driver for QTi LPASS
6 */
7
8 #include <dt-bindings/sound/qcom,lpass.h>
9 #include <linux/dma-mapping.h>
10 #include <linux/export.h>
11 #include <linux/kernel.h>
12 #include <linux/module.h>
13 #include <linux/platform_device.h>
14 #include <sound/pcm_params.h>
15 #include <linux/regmap.h>
16 #include <sound/soc.h>
17 #include "lpass-lpaif-reg.h"
18 #include "lpass.h"
19
20 #define DRV_NAME "lpass-platform"
21
22 #define LPASS_PLATFORM_BUFFER_SIZE (24 * 2 * 1024)
23 #define LPASS_PLATFORM_PERIODS 2
24 #define LPASS_RXTX_CDC_DMA_LPM_BUFF_SIZE (8 * 1024)
25 #define LPASS_VA_CDC_DMA_LPM_BUFF_SIZE (12 * 1024)
26 #define LPASS_CDC_DMA_REGISTER_FIELDS_MAX 15
27
28 static const struct snd_pcm_hardware lpass_platform_pcm_hardware = {
29 .info = SNDRV_PCM_INFO_MMAP |
30 SNDRV_PCM_INFO_MMAP_VALID |
31 SNDRV_PCM_INFO_INTERLEAVED |
32 SNDRV_PCM_INFO_PAUSE |
33 SNDRV_PCM_INFO_RESUME,
34 .formats = SNDRV_PCM_FMTBIT_S16 |
35 SNDRV_PCM_FMTBIT_S24 |
36 SNDRV_PCM_FMTBIT_S32,
37 .rates = SNDRV_PCM_RATE_8000_192000,
38 .rate_min = 8000,
39 .rate_max = 192000,
40 .channels_min = 1,
41 .channels_max = 8,
42 .buffer_bytes_max = LPASS_PLATFORM_BUFFER_SIZE,
43 .period_bytes_max = LPASS_PLATFORM_BUFFER_SIZE /
44 LPASS_PLATFORM_PERIODS,
45 .period_bytes_min = LPASS_PLATFORM_BUFFER_SIZE /
46 LPASS_PLATFORM_PERIODS,
47 .periods_min = LPASS_PLATFORM_PERIODS,
48 .periods_max = LPASS_PLATFORM_PERIODS,
49 .fifo_size = 0,
50 };
51
52 static const struct snd_pcm_hardware lpass_platform_rxtx_hardware = {
53 .info = SNDRV_PCM_INFO_MMAP |
54 SNDRV_PCM_INFO_MMAP_VALID |
55 SNDRV_PCM_INFO_INTERLEAVED |
56 SNDRV_PCM_INFO_PAUSE |
57 SNDRV_PCM_INFO_RESUME,
58 .formats = SNDRV_PCM_FMTBIT_S16 |
59 SNDRV_PCM_FMTBIT_S24 |
60 SNDRV_PCM_FMTBIT_S32,
61 .rates = SNDRV_PCM_RATE_8000_192000,
62 .rate_min = 8000,
63 .rate_max = 192000,
64 .channels_min = 1,
65 .channels_max = 8,
66 .buffer_bytes_max = LPASS_RXTX_CDC_DMA_LPM_BUFF_SIZE,
67 .period_bytes_max = LPASS_RXTX_CDC_DMA_LPM_BUFF_SIZE /
68 LPASS_PLATFORM_PERIODS,
69 .period_bytes_min = LPASS_RXTX_CDC_DMA_LPM_BUFF_SIZE /
70 LPASS_PLATFORM_PERIODS,
71 .periods_min = LPASS_PLATFORM_PERIODS,
72 .periods_max = LPASS_PLATFORM_PERIODS,
73 .fifo_size = 0,
74 };
75
76 static const struct snd_pcm_hardware lpass_platform_va_hardware = {
77 .info = SNDRV_PCM_INFO_MMAP |
78 SNDRV_PCM_INFO_MMAP_VALID |
79 SNDRV_PCM_INFO_INTERLEAVED |
80 SNDRV_PCM_INFO_PAUSE |
81 SNDRV_PCM_INFO_RESUME,
82 .formats = SNDRV_PCM_FMTBIT_S16 |
83 SNDRV_PCM_FMTBIT_S24 |
84 SNDRV_PCM_FMTBIT_S32,
85 .rates = SNDRV_PCM_RATE_8000_192000,
86 .rate_min = 8000,
87 .rate_max = 192000,
88 .channels_min = 1,
89 .channels_max = 8,
90 .buffer_bytes_max = LPASS_VA_CDC_DMA_LPM_BUFF_SIZE,
91 .period_bytes_max = LPASS_VA_CDC_DMA_LPM_BUFF_SIZE /
92 LPASS_PLATFORM_PERIODS,
93 .period_bytes_min = LPASS_VA_CDC_DMA_LPM_BUFF_SIZE /
94 LPASS_PLATFORM_PERIODS,
95 .periods_min = LPASS_PLATFORM_PERIODS,
96 .periods_max = LPASS_PLATFORM_PERIODS,
97 .fifo_size = 0,
98 };
99
lpass_platform_alloc_rxtx_dmactl_fields(struct device * dev,struct regmap * map)100 static int lpass_platform_alloc_rxtx_dmactl_fields(struct device *dev,
101 struct regmap *map)
102 {
103 struct lpass_data *drvdata = dev_get_drvdata(dev);
104 const struct lpass_variant *v = drvdata->variant;
105 struct lpaif_dmactl *rd_dmactl, *wr_dmactl;
106 int rval;
107
108 rd_dmactl = devm_kzalloc(dev, sizeof(*rd_dmactl), GFP_KERNEL);
109 if (!rd_dmactl)
110 return -ENOMEM;
111
112 wr_dmactl = devm_kzalloc(dev, sizeof(*wr_dmactl), GFP_KERNEL);
113 if (!wr_dmactl)
114 return -ENOMEM;
115
116 drvdata->rxtx_rd_dmactl = rd_dmactl;
117 drvdata->rxtx_wr_dmactl = wr_dmactl;
118
119 rval = devm_regmap_field_bulk_alloc(dev, map, &rd_dmactl->intf,
120 &v->rxtx_rdma_intf, LPASS_CDC_DMA_REGISTER_FIELDS_MAX);
121 if (rval)
122 return rval;
123
124 return devm_regmap_field_bulk_alloc(dev, map, &wr_dmactl->intf,
125 &v->rxtx_wrdma_intf, LPASS_CDC_DMA_REGISTER_FIELDS_MAX);
126 }
127
lpass_platform_alloc_va_dmactl_fields(struct device * dev,struct regmap * map)128 static int lpass_platform_alloc_va_dmactl_fields(struct device *dev,
129 struct regmap *map)
130 {
131 struct lpass_data *drvdata = dev_get_drvdata(dev);
132 const struct lpass_variant *v = drvdata->variant;
133 struct lpaif_dmactl *wr_dmactl;
134
135 wr_dmactl = devm_kzalloc(dev, sizeof(*wr_dmactl), GFP_KERNEL);
136 if (!wr_dmactl)
137 return -ENOMEM;
138
139 drvdata->va_wr_dmactl = wr_dmactl;
140 return devm_regmap_field_bulk_alloc(dev, map, &wr_dmactl->intf,
141 &v->va_wrdma_intf, LPASS_CDC_DMA_REGISTER_FIELDS_MAX);
142 }
143
144
lpass_platform_alloc_dmactl_fields(struct device * dev,struct regmap * map)145 static int lpass_platform_alloc_dmactl_fields(struct device *dev,
146 struct regmap *map)
147 {
148 struct lpass_data *drvdata = dev_get_drvdata(dev);
149 const struct lpass_variant *v = drvdata->variant;
150 struct lpaif_dmactl *rd_dmactl, *wr_dmactl;
151 int rval;
152
153 drvdata->rd_dmactl = devm_kzalloc(dev, sizeof(struct lpaif_dmactl),
154 GFP_KERNEL);
155 if (drvdata->rd_dmactl == NULL)
156 return -ENOMEM;
157
158 drvdata->wr_dmactl = devm_kzalloc(dev, sizeof(struct lpaif_dmactl),
159 GFP_KERNEL);
160 if (drvdata->wr_dmactl == NULL)
161 return -ENOMEM;
162
163 rd_dmactl = drvdata->rd_dmactl;
164 wr_dmactl = drvdata->wr_dmactl;
165
166 rval = devm_regmap_field_bulk_alloc(dev, map, &rd_dmactl->intf,
167 &v->rdma_intf, 6);
168 if (rval)
169 return rval;
170
171 return devm_regmap_field_bulk_alloc(dev, map, &wr_dmactl->intf,
172 &v->wrdma_intf, 6);
173 }
174
lpass_platform_alloc_hdmidmactl_fields(struct device * dev,struct regmap * map)175 static int lpass_platform_alloc_hdmidmactl_fields(struct device *dev,
176 struct regmap *map)
177 {
178 struct lpass_data *drvdata = dev_get_drvdata(dev);
179 const struct lpass_variant *v = drvdata->variant;
180 struct lpaif_dmactl *rd_dmactl;
181
182 rd_dmactl = devm_kzalloc(dev, sizeof(struct lpaif_dmactl), GFP_KERNEL);
183 if (rd_dmactl == NULL)
184 return -ENOMEM;
185
186 drvdata->hdmi_rd_dmactl = rd_dmactl;
187
188 return devm_regmap_field_bulk_alloc(dev, map, &rd_dmactl->bursten,
189 &v->hdmi_rdma_bursten, 8);
190 }
191
lpass_platform_pcmops_open(struct snd_soc_component * component,struct snd_pcm_substream * substream)192 static int lpass_platform_pcmops_open(struct snd_soc_component *component,
193 struct snd_pcm_substream *substream)
194 {
195 struct snd_pcm_runtime *runtime = substream->runtime;
196 struct snd_soc_pcm_runtime *soc_runtime = snd_soc_substream_to_rtd(substream);
197 struct snd_soc_dai *cpu_dai = snd_soc_rtd_to_cpu(soc_runtime, 0);
198 struct lpass_data *drvdata = snd_soc_component_get_drvdata(component);
199 const struct lpass_variant *v = drvdata->variant;
200 int ret, dma_ch, dir = substream->stream;
201 struct lpass_pcm_data *data;
202 struct regmap *map;
203 unsigned int dai_id = cpu_dai->driver->id;
204
205 component->id = dai_id;
206 data = kzalloc(sizeof(*data), GFP_KERNEL);
207 if (!data)
208 return -ENOMEM;
209
210 data->i2s_port = cpu_dai->driver->id;
211 runtime->private_data = data;
212
213 if (v->alloc_dma_channel)
214 dma_ch = v->alloc_dma_channel(drvdata, dir, dai_id);
215 else
216 dma_ch = 0;
217
218 if (dma_ch < 0) {
219 kfree(data);
220 return dma_ch;
221 }
222
223 switch (dai_id) {
224 case MI2S_PRIMARY ... MI2S_QUINARY:
225 map = drvdata->lpaif_map;
226 drvdata->substream[dma_ch] = substream;
227 break;
228 case LPASS_DP_RX:
229 map = drvdata->hdmiif_map;
230 drvdata->hdmi_substream[dma_ch] = substream;
231 break;
232 case LPASS_CDC_DMA_RX0 ... LPASS_CDC_DMA_RX9:
233 case LPASS_CDC_DMA_TX0 ... LPASS_CDC_DMA_TX8:
234 map = drvdata->rxtx_lpaif_map;
235 drvdata->rxtx_substream[dma_ch] = substream;
236 break;
237 case LPASS_CDC_DMA_VA_TX0 ... LPASS_CDC_DMA_VA_TX8:
238 map = drvdata->va_lpaif_map;
239 drvdata->va_substream[dma_ch] = substream;
240 break;
241 default:
242 break;
243 }
244
245 data->dma_ch = dma_ch;
246 switch (dai_id) {
247 case MI2S_PRIMARY ... MI2S_QUINARY:
248 case LPASS_DP_RX:
249 ret = regmap_write(map, LPAIF_DMACTL_REG(v, dma_ch, dir, data->i2s_port), 0);
250 if (ret) {
251 kfree(data);
252 dev_err(soc_runtime->dev, "error writing to rdmactl reg: %d\n", ret);
253 return ret;
254 }
255 snd_soc_set_runtime_hwparams(substream, &lpass_platform_pcm_hardware);
256 runtime->dma_bytes = lpass_platform_pcm_hardware.buffer_bytes_max;
257 break;
258 case LPASS_CDC_DMA_RX0 ... LPASS_CDC_DMA_RX9:
259 case LPASS_CDC_DMA_TX0 ... LPASS_CDC_DMA_TX8:
260 snd_soc_set_runtime_hwparams(substream, &lpass_platform_rxtx_hardware);
261 runtime->dma_bytes = lpass_platform_rxtx_hardware.buffer_bytes_max;
262 snd_pcm_set_runtime_buffer(substream, &substream->dma_buffer);
263 break;
264 case LPASS_CDC_DMA_VA_TX0 ... LPASS_CDC_DMA_VA_TX8:
265 snd_soc_set_runtime_hwparams(substream, &lpass_platform_va_hardware);
266 runtime->dma_bytes = lpass_platform_va_hardware.buffer_bytes_max;
267 snd_pcm_set_runtime_buffer(substream, &substream->dma_buffer);
268 break;
269 default:
270 break;
271 }
272 ret = snd_pcm_hw_constraint_integer(runtime,
273 SNDRV_PCM_HW_PARAM_PERIODS);
274 if (ret < 0) {
275 kfree(data);
276 dev_err(soc_runtime->dev, "setting constraints failed: %d\n",
277 ret);
278 return -EINVAL;
279 }
280
281 return 0;
282 }
283
lpass_platform_pcmops_close(struct snd_soc_component * component,struct snd_pcm_substream * substream)284 static int lpass_platform_pcmops_close(struct snd_soc_component *component,
285 struct snd_pcm_substream *substream)
286 {
287 struct snd_pcm_runtime *runtime = substream->runtime;
288 struct snd_soc_pcm_runtime *soc_runtime = snd_soc_substream_to_rtd(substream);
289 struct snd_soc_dai *cpu_dai = snd_soc_rtd_to_cpu(soc_runtime, 0);
290 struct lpass_data *drvdata = snd_soc_component_get_drvdata(component);
291 const struct lpass_variant *v = drvdata->variant;
292 struct lpass_pcm_data *data;
293 unsigned int dai_id = cpu_dai->driver->id;
294
295 data = runtime->private_data;
296
297 switch (dai_id) {
298 case MI2S_PRIMARY ... MI2S_QUINARY:
299 drvdata->substream[data->dma_ch] = NULL;
300 break;
301 case LPASS_DP_RX:
302 drvdata->hdmi_substream[data->dma_ch] = NULL;
303 break;
304 case LPASS_CDC_DMA_RX0 ... LPASS_CDC_DMA_RX9:
305 case LPASS_CDC_DMA_TX0 ... LPASS_CDC_DMA_TX8:
306 drvdata->rxtx_substream[data->dma_ch] = NULL;
307 break;
308 case LPASS_CDC_DMA_VA_TX0 ... LPASS_CDC_DMA_VA_TX8:
309 drvdata->va_substream[data->dma_ch] = NULL;
310 break;
311 default:
312 break;
313 }
314
315 if (v->free_dma_channel)
316 v->free_dma_channel(drvdata, data->dma_ch, dai_id);
317
318 kfree(data);
319 return 0;
320 }
321
__lpass_get_dmactl_handle(const struct snd_pcm_substream * substream,struct snd_soc_component * component)322 static struct lpaif_dmactl *__lpass_get_dmactl_handle(const struct snd_pcm_substream *substream,
323 struct snd_soc_component *component)
324 {
325 struct snd_soc_pcm_runtime *soc_runtime = snd_soc_substream_to_rtd(substream);
326 struct snd_soc_dai *cpu_dai = snd_soc_rtd_to_cpu(soc_runtime, 0);
327 struct lpass_data *drvdata = snd_soc_component_get_drvdata(component);
328 struct lpaif_dmactl *dmactl = NULL;
329
330 switch (cpu_dai->driver->id) {
331 case MI2S_PRIMARY ... MI2S_QUINARY:
332 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
333 dmactl = drvdata->rd_dmactl;
334 else
335 dmactl = drvdata->wr_dmactl;
336 break;
337 case LPASS_DP_RX:
338 dmactl = drvdata->hdmi_rd_dmactl;
339 break;
340 case LPASS_CDC_DMA_RX0 ... LPASS_CDC_DMA_RX9:
341 dmactl = drvdata->rxtx_rd_dmactl;
342 break;
343 case LPASS_CDC_DMA_TX0 ... LPASS_CDC_DMA_TX8:
344 dmactl = drvdata->rxtx_wr_dmactl;
345 break;
346 case LPASS_CDC_DMA_VA_TX0 ... LPASS_CDC_DMA_VA_TX8:
347 dmactl = drvdata->va_wr_dmactl;
348 break;
349 }
350
351 return dmactl;
352 }
353
__lpass_get_id(const struct snd_pcm_substream * substream,struct snd_soc_component * component)354 static int __lpass_get_id(const struct snd_pcm_substream *substream,
355 struct snd_soc_component *component)
356 {
357 struct snd_soc_pcm_runtime *soc_runtime = snd_soc_substream_to_rtd(substream);
358 struct snd_soc_dai *cpu_dai = snd_soc_rtd_to_cpu(soc_runtime, 0);
359 struct lpass_data *drvdata = snd_soc_component_get_drvdata(component);
360 struct snd_pcm_runtime *rt = substream->runtime;
361 struct lpass_pcm_data *pcm_data = rt->private_data;
362 const struct lpass_variant *v = drvdata->variant;
363 int id;
364
365 switch (cpu_dai->driver->id) {
366 case MI2S_PRIMARY ... MI2S_QUINARY:
367 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
368 id = pcm_data->dma_ch;
369 else
370 id = pcm_data->dma_ch - v->wrdma_channel_start;
371 break;
372 case LPASS_DP_RX:
373 id = pcm_data->dma_ch;
374 break;
375 case LPASS_CDC_DMA_RX0 ... LPASS_CDC_DMA_RX9:
376 id = pcm_data->dma_ch;
377 break;
378 case LPASS_CDC_DMA_TX0 ... LPASS_CDC_DMA_TX8:
379 id = pcm_data->dma_ch - v->rxtx_wrdma_channel_start;
380 break;
381 case LPASS_CDC_DMA_VA_TX0 ... LPASS_CDC_DMA_VA_TX8:
382 id = pcm_data->dma_ch - v->va_wrdma_channel_start;
383 break;
384 }
385
386 return id;
387 }
388
__lpass_get_regmap_handle(const struct snd_pcm_substream * substream,struct snd_soc_component * component)389 static struct regmap *__lpass_get_regmap_handle(const struct snd_pcm_substream *substream,
390 struct snd_soc_component *component)
391 {
392 struct snd_soc_pcm_runtime *soc_runtime = snd_soc_substream_to_rtd(substream);
393 struct snd_soc_dai *cpu_dai = snd_soc_rtd_to_cpu(soc_runtime, 0);
394 struct lpass_data *drvdata = snd_soc_component_get_drvdata(component);
395 struct regmap *map = NULL;
396
397 switch (cpu_dai->driver->id) {
398 case MI2S_PRIMARY ... MI2S_QUINARY:
399 map = drvdata->lpaif_map;
400 break;
401 case LPASS_DP_RX:
402 map = drvdata->hdmiif_map;
403 break;
404 case LPASS_CDC_DMA_RX0 ... LPASS_CDC_DMA_RX9:
405 case LPASS_CDC_DMA_TX0 ... LPASS_CDC_DMA_TX8:
406 map = drvdata->rxtx_lpaif_map;
407 break;
408 case LPASS_CDC_DMA_VA_TX0 ... LPASS_CDC_DMA_VA_TX8:
409 map = drvdata->va_lpaif_map;
410 break;
411 }
412
413 return map;
414 }
415
lpass_platform_pcmops_hw_params(struct snd_soc_component * component,struct snd_pcm_substream * substream,struct snd_pcm_hw_params * params)416 static int lpass_platform_pcmops_hw_params(struct snd_soc_component *component,
417 struct snd_pcm_substream *substream,
418 struct snd_pcm_hw_params *params)
419 {
420 struct snd_soc_pcm_runtime *soc_runtime = snd_soc_substream_to_rtd(substream);
421 struct snd_soc_dai *cpu_dai = snd_soc_rtd_to_cpu(soc_runtime, 0);
422 struct lpass_data *drvdata = snd_soc_component_get_drvdata(component);
423 struct snd_pcm_runtime *rt = substream->runtime;
424 struct lpass_pcm_data *pcm_data = rt->private_data;
425 const struct lpass_variant *v = drvdata->variant;
426 snd_pcm_format_t format = params_format(params);
427 unsigned int channels = params_channels(params);
428 unsigned int regval;
429 struct lpaif_dmactl *dmactl;
430 int id;
431 int bitwidth;
432 int ret, dma_port = pcm_data->i2s_port + v->dmactl_audif_start;
433 unsigned int dai_id = cpu_dai->driver->id;
434
435 dmactl = __lpass_get_dmactl_handle(substream, component);
436 id = __lpass_get_id(substream, component);
437
438 bitwidth = snd_pcm_format_width(format);
439 if (bitwidth < 0) {
440 dev_err(soc_runtime->dev, "invalid bit width given: %d\n",
441 bitwidth);
442 return bitwidth;
443 }
444
445 ret = regmap_fields_write(dmactl->bursten, id, LPAIF_DMACTL_BURSTEN_INCR4);
446 if (ret) {
447 dev_err(soc_runtime->dev, "error updating bursten field: %d\n", ret);
448 return ret;
449 }
450
451 ret = regmap_fields_write(dmactl->fifowm, id, LPAIF_DMACTL_FIFOWM_8);
452 if (ret) {
453 dev_err(soc_runtime->dev, "error updating fifowm field: %d\n", ret);
454 return ret;
455 }
456
457 switch (dai_id) {
458 case LPASS_DP_RX:
459 ret = regmap_fields_write(dmactl->burst8, id,
460 LPAIF_DMACTL_BURSTEN_INCR4);
461 if (ret) {
462 dev_err(soc_runtime->dev, "error updating burst8en field: %d\n", ret);
463 return ret;
464 }
465 ret = regmap_fields_write(dmactl->burst16, id,
466 LPAIF_DMACTL_BURSTEN_INCR4);
467 if (ret) {
468 dev_err(soc_runtime->dev, "error updating burst16en field: %d\n", ret);
469 return ret;
470 }
471 ret = regmap_fields_write(dmactl->dynburst, id,
472 LPAIF_DMACTL_BURSTEN_INCR4);
473 if (ret) {
474 dev_err(soc_runtime->dev, "error updating dynbursten field: %d\n", ret);
475 return ret;
476 }
477 break;
478 case MI2S_PRIMARY:
479 case MI2S_SECONDARY:
480 case MI2S_TERTIARY:
481 case MI2S_QUATERNARY:
482 case MI2S_QUINARY:
483 ret = regmap_fields_write(dmactl->intf, id,
484 LPAIF_DMACTL_AUDINTF(dma_port));
485 if (ret) {
486 dev_err(soc_runtime->dev, "error updating audio interface field: %d\n",
487 ret);
488 return ret;
489 }
490
491 break;
492 case LPASS_CDC_DMA_RX0 ... LPASS_CDC_DMA_RX9:
493 case LPASS_CDC_DMA_TX0 ... LPASS_CDC_DMA_TX8:
494 case LPASS_CDC_DMA_VA_TX0 ... LPASS_CDC_DMA_VA_TX0:
495 break;
496 default:
497 dev_err(soc_runtime->dev, "%s: invalid interface: %d\n", __func__, dai_id);
498 break;
499 }
500 switch (bitwidth) {
501 case 16:
502 switch (channels) {
503 case 1:
504 case 2:
505 regval = LPAIF_DMACTL_WPSCNT_ONE;
506 break;
507 case 4:
508 regval = LPAIF_DMACTL_WPSCNT_TWO;
509 break;
510 case 6:
511 regval = LPAIF_DMACTL_WPSCNT_THREE;
512 break;
513 case 8:
514 regval = LPAIF_DMACTL_WPSCNT_FOUR;
515 break;
516 default:
517 dev_err(soc_runtime->dev, "invalid PCM config given: bw=%d, ch=%u\n",
518 bitwidth, channels);
519 return -EINVAL;
520 }
521 break;
522 case 24:
523 case 32:
524 switch (channels) {
525 case 1:
526 regval = LPAIF_DMACTL_WPSCNT_ONE;
527 break;
528 case 2:
529 regval = (dai_id == LPASS_DP_RX ?
530 LPAIF_DMACTL_WPSCNT_ONE :
531 LPAIF_DMACTL_WPSCNT_TWO);
532 break;
533 case 4:
534 regval = (dai_id == LPASS_DP_RX ?
535 LPAIF_DMACTL_WPSCNT_TWO :
536 LPAIF_DMACTL_WPSCNT_FOUR);
537 break;
538 case 6:
539 regval = (dai_id == LPASS_DP_RX ?
540 LPAIF_DMACTL_WPSCNT_THREE :
541 LPAIF_DMACTL_WPSCNT_SIX);
542 break;
543 case 8:
544 regval = (dai_id == LPASS_DP_RX ?
545 LPAIF_DMACTL_WPSCNT_FOUR :
546 LPAIF_DMACTL_WPSCNT_EIGHT);
547 break;
548 default:
549 dev_err(soc_runtime->dev, "invalid PCM config given: bw=%d, ch=%u\n",
550 bitwidth, channels);
551 return -EINVAL;
552 }
553 break;
554 default:
555 dev_err(soc_runtime->dev, "invalid PCM config given: bw=%d, ch=%u\n",
556 bitwidth, channels);
557 return -EINVAL;
558 }
559
560 ret = regmap_fields_write(dmactl->wpscnt, id, regval);
561 if (ret) {
562 dev_err(soc_runtime->dev, "error writing to dmactl reg: %d\n",
563 ret);
564 return ret;
565 }
566
567 return 0;
568 }
569
lpass_platform_pcmops_hw_free(struct snd_soc_component * component,struct snd_pcm_substream * substream)570 static int lpass_platform_pcmops_hw_free(struct snd_soc_component *component,
571 struct snd_pcm_substream *substream)
572 {
573 struct snd_soc_pcm_runtime *soc_runtime = snd_soc_substream_to_rtd(substream);
574 struct snd_soc_dai *cpu_dai = snd_soc_rtd_to_cpu(soc_runtime, 0);
575 struct lpass_data *drvdata = snd_soc_component_get_drvdata(component);
576 struct snd_pcm_runtime *rt = substream->runtime;
577 struct lpass_pcm_data *pcm_data = rt->private_data;
578 const struct lpass_variant *v = drvdata->variant;
579 unsigned int reg;
580 int ret;
581 struct regmap *map;
582 unsigned int dai_id = cpu_dai->driver->id;
583
584 if (is_cdc_dma_port(dai_id))
585 return 0;
586 map = __lpass_get_regmap_handle(substream, component);
587
588 reg = LPAIF_DMACTL_REG(v, pcm_data->dma_ch, substream->stream, dai_id);
589 ret = regmap_write(map, reg, 0);
590 if (ret)
591 dev_err(soc_runtime->dev, "error writing to rdmactl reg: %d\n",
592 ret);
593
594 return ret;
595 }
596
lpass_platform_pcmops_prepare(struct snd_soc_component * component,struct snd_pcm_substream * substream)597 static int lpass_platform_pcmops_prepare(struct snd_soc_component *component,
598 struct snd_pcm_substream *substream)
599 {
600 struct snd_pcm_runtime *runtime = substream->runtime;
601 struct snd_soc_pcm_runtime *soc_runtime = snd_soc_substream_to_rtd(substream);
602 struct snd_soc_dai *cpu_dai = snd_soc_rtd_to_cpu(soc_runtime, 0);
603 struct lpass_data *drvdata = snd_soc_component_get_drvdata(component);
604 struct snd_pcm_runtime *rt = substream->runtime;
605 struct lpass_pcm_data *pcm_data = rt->private_data;
606 const struct lpass_variant *v = drvdata->variant;
607 struct lpaif_dmactl *dmactl;
608 struct regmap *map;
609 int ret, id, ch, dir = substream->stream;
610 unsigned int dai_id = cpu_dai->driver->id;
611
612 ch = pcm_data->dma_ch;
613
614 dmactl = __lpass_get_dmactl_handle(substream, component);
615 id = __lpass_get_id(substream, component);
616 map = __lpass_get_regmap_handle(substream, component);
617
618 ret = regmap_write(map, LPAIF_DMABASE_REG(v, ch, dir, dai_id),
619 runtime->dma_addr);
620 if (ret) {
621 dev_err(soc_runtime->dev, "error writing to rdmabase reg: %d\n",
622 ret);
623 return ret;
624 }
625
626 ret = regmap_write(map, LPAIF_DMABUFF_REG(v, ch, dir, dai_id),
627 (snd_pcm_lib_buffer_bytes(substream) >> 2) - 1);
628 if (ret) {
629 dev_err(soc_runtime->dev, "error writing to rdmabuff reg: %d\n",
630 ret);
631 return ret;
632 }
633
634 ret = regmap_write(map, LPAIF_DMAPER_REG(v, ch, dir, dai_id),
635 (snd_pcm_lib_period_bytes(substream) >> 2) - 1);
636 if (ret) {
637 dev_err(soc_runtime->dev, "error writing to rdmaper reg: %d\n",
638 ret);
639 return ret;
640 }
641
642 if (is_cdc_dma_port(dai_id)) {
643 ret = regmap_fields_write(dmactl->fifowm, id, LPAIF_DMACTL_FIFOWM_8);
644 if (ret) {
645 dev_err(soc_runtime->dev, "error writing fifowm field to dmactl reg: %d, id: %d\n",
646 ret, id);
647 return ret;
648 }
649 }
650 ret = regmap_fields_write(dmactl->enable, id, LPAIF_DMACTL_ENABLE_ON);
651 if (ret) {
652 dev_err(soc_runtime->dev, "error writing to rdmactl reg: %d\n",
653 ret);
654 return ret;
655 }
656
657 return 0;
658 }
659
lpass_platform_pcmops_trigger(struct snd_soc_component * component,struct snd_pcm_substream * substream,int cmd)660 static int lpass_platform_pcmops_trigger(struct snd_soc_component *component,
661 struct snd_pcm_substream *substream,
662 int cmd)
663 {
664 struct snd_soc_pcm_runtime *soc_runtime = snd_soc_substream_to_rtd(substream);
665 struct snd_soc_dai *cpu_dai = snd_soc_rtd_to_cpu(soc_runtime, 0);
666 struct lpass_data *drvdata = snd_soc_component_get_drvdata(component);
667 struct snd_pcm_runtime *rt = substream->runtime;
668 struct lpass_pcm_data *pcm_data = rt->private_data;
669 const struct lpass_variant *v = drvdata->variant;
670 struct lpaif_dmactl *dmactl;
671 struct regmap *map;
672 int ret, ch, id;
673 unsigned int reg_irqclr = 0, val_irqclr = 0;
674 unsigned int reg_irqen = 0, val_irqen = 0, val_mask = 0;
675 unsigned int dai_id = cpu_dai->driver->id;
676
677 ch = pcm_data->dma_ch;
678 dmactl = __lpass_get_dmactl_handle(substream, component);
679 id = __lpass_get_id(substream, component);
680 map = __lpass_get_regmap_handle(substream, component);
681
682 switch (cmd) {
683 case SNDRV_PCM_TRIGGER_START:
684 case SNDRV_PCM_TRIGGER_RESUME:
685 case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
686 ret = regmap_fields_write(dmactl->enable, id,
687 LPAIF_DMACTL_ENABLE_ON);
688 if (ret) {
689 dev_err(soc_runtime->dev,
690 "error writing to rdmactl reg: %d\n", ret);
691 return ret;
692 }
693 switch (dai_id) {
694 case LPASS_DP_RX:
695 ret = regmap_fields_write(dmactl->dyncclk, id,
696 LPAIF_DMACTL_DYNCLK_ON);
697 if (ret) {
698 dev_err(soc_runtime->dev,
699 "error writing to rdmactl reg: %d\n", ret);
700 return ret;
701 }
702 reg_irqclr = LPASS_HDMITX_APP_IRQCLEAR_REG(v);
703 val_irqclr = (LPAIF_IRQ_ALL(ch) |
704 LPAIF_IRQ_HDMI_REQ_ON_PRELOAD(ch) |
705 LPAIF_IRQ_HDMI_METADONE |
706 LPAIF_IRQ_HDMI_SDEEP_AUD_DIS(ch));
707
708 reg_irqen = LPASS_HDMITX_APP_IRQEN_REG(v);
709 val_mask = (LPAIF_IRQ_ALL(ch) |
710 LPAIF_IRQ_HDMI_REQ_ON_PRELOAD(ch) |
711 LPAIF_IRQ_HDMI_METADONE |
712 LPAIF_IRQ_HDMI_SDEEP_AUD_DIS(ch));
713 val_irqen = (LPAIF_IRQ_ALL(ch) |
714 LPAIF_IRQ_HDMI_REQ_ON_PRELOAD(ch) |
715 LPAIF_IRQ_HDMI_METADONE |
716 LPAIF_IRQ_HDMI_SDEEP_AUD_DIS(ch));
717 break;
718 case MI2S_PRIMARY:
719 case MI2S_SECONDARY:
720 case MI2S_TERTIARY:
721 case MI2S_QUATERNARY:
722 case MI2S_QUINARY:
723 reg_irqclr = LPAIF_IRQCLEAR_REG(v, LPAIF_IRQ_PORT_HOST);
724 val_irqclr = LPAIF_IRQ_ALL(ch);
725
726
727 reg_irqen = LPAIF_IRQEN_REG(v, LPAIF_IRQ_PORT_HOST);
728 val_mask = LPAIF_IRQ_ALL(ch);
729 val_irqen = LPAIF_IRQ_ALL(ch);
730 break;
731 case LPASS_CDC_DMA_RX0 ... LPASS_CDC_DMA_RX9:
732 case LPASS_CDC_DMA_TX0 ... LPASS_CDC_DMA_TX8:
733 ret = regmap_fields_write(dmactl->dyncclk, id, LPAIF_DMACTL_DYNCLK_ON);
734 if (ret) {
735 dev_err(soc_runtime->dev,
736 "error writing to rdmactl reg field: %d\n", ret);
737 return ret;
738 }
739 reg_irqclr = LPAIF_RXTX_IRQCLEAR_REG(v, LPAIF_IRQ_PORT_HOST);
740 val_irqclr = LPAIF_IRQ_ALL(ch);
741
742 reg_irqen = LPAIF_RXTX_IRQEN_REG(v, LPAIF_IRQ_PORT_HOST);
743 val_mask = LPAIF_IRQ_ALL(ch);
744 val_irqen = LPAIF_IRQ_ALL(ch);
745 break;
746 case LPASS_CDC_DMA_VA_TX0 ... LPASS_CDC_DMA_VA_TX8:
747 ret = regmap_fields_write(dmactl->dyncclk, id, LPAIF_DMACTL_DYNCLK_ON);
748 if (ret) {
749 dev_err(soc_runtime->dev,
750 "error writing to rdmactl reg field: %d\n", ret);
751 return ret;
752 }
753 reg_irqclr = LPAIF_VA_IRQCLEAR_REG(v, LPAIF_IRQ_PORT_HOST);
754 val_irqclr = LPAIF_IRQ_ALL(ch);
755
756 reg_irqen = LPAIF_VA_IRQEN_REG(v, LPAIF_IRQ_PORT_HOST);
757 val_mask = LPAIF_IRQ_ALL(ch);
758 val_irqen = LPAIF_IRQ_ALL(ch);
759 break;
760 default:
761 dev_err(soc_runtime->dev, "%s: invalid %d interface\n", __func__, dai_id);
762 return -EINVAL;
763 }
764
765 ret = regmap_write_bits(map, reg_irqclr, val_irqclr, val_irqclr);
766 if (ret) {
767 dev_err(soc_runtime->dev, "error writing to irqclear reg: %d\n", ret);
768 return ret;
769 }
770 ret = regmap_update_bits(map, reg_irqen, val_mask, val_irqen);
771 if (ret) {
772 dev_err(soc_runtime->dev, "error writing to irqen reg: %d\n", ret);
773 return ret;
774 }
775 break;
776 case SNDRV_PCM_TRIGGER_STOP:
777 case SNDRV_PCM_TRIGGER_SUSPEND:
778 case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
779 ret = regmap_fields_write(dmactl->enable, id,
780 LPAIF_DMACTL_ENABLE_OFF);
781 if (ret) {
782 dev_err(soc_runtime->dev,
783 "error writing to rdmactl reg: %d\n", ret);
784 return ret;
785 }
786 switch (dai_id) {
787 case LPASS_DP_RX:
788 ret = regmap_fields_write(dmactl->dyncclk, id,
789 LPAIF_DMACTL_DYNCLK_OFF);
790 if (ret) {
791 dev_err(soc_runtime->dev,
792 "error writing to rdmactl reg: %d\n", ret);
793 return ret;
794 }
795 reg_irqen = LPASS_HDMITX_APP_IRQEN_REG(v);
796 val_mask = (LPAIF_IRQ_ALL(ch) |
797 LPAIF_IRQ_HDMI_REQ_ON_PRELOAD(ch) |
798 LPAIF_IRQ_HDMI_METADONE |
799 LPAIF_IRQ_HDMI_SDEEP_AUD_DIS(ch));
800 val_irqen = 0;
801 break;
802 case MI2S_PRIMARY:
803 case MI2S_SECONDARY:
804 case MI2S_TERTIARY:
805 case MI2S_QUATERNARY:
806 case MI2S_QUINARY:
807 reg_irqen = LPAIF_IRQEN_REG(v, LPAIF_IRQ_PORT_HOST);
808 val_mask = LPAIF_IRQ_ALL(ch);
809 val_irqen = 0;
810 break;
811 case LPASS_CDC_DMA_RX0 ... LPASS_CDC_DMA_RX9:
812 case LPASS_CDC_DMA_TX0 ... LPASS_CDC_DMA_TX8:
813 ret = regmap_fields_write(dmactl->dyncclk, id, LPAIF_DMACTL_DYNCLK_OFF);
814 if (ret) {
815 dev_err(soc_runtime->dev,
816 "error writing to rdmactl reg field: %d\n", ret);
817 return ret;
818 }
819
820 reg_irqclr = LPAIF_RXTX_IRQCLEAR_REG(v, LPAIF_IRQ_PORT_HOST);
821 val_irqclr = LPAIF_IRQ_ALL(ch);
822
823 reg_irqen = LPAIF_RXTX_IRQEN_REG(v, LPAIF_IRQ_PORT_HOST);
824 val_mask = LPAIF_IRQ_ALL(ch);
825 val_irqen = LPAIF_IRQ_ALL(ch);
826 break;
827 case LPASS_CDC_DMA_VA_TX0 ... LPASS_CDC_DMA_VA_TX8:
828 ret = regmap_fields_write(dmactl->dyncclk, id, LPAIF_DMACTL_DYNCLK_OFF);
829 if (ret) {
830 dev_err(soc_runtime->dev,
831 "error writing to rdmactl reg field: %d\n", ret);
832 return ret;
833 }
834
835 reg_irqclr = LPAIF_VA_IRQCLEAR_REG(v, LPAIF_IRQ_PORT_HOST);
836 val_irqclr = LPAIF_IRQ_ALL(ch);
837
838 reg_irqen = LPAIF_VA_IRQEN_REG(v, LPAIF_IRQ_PORT_HOST);
839 val_mask = LPAIF_IRQ_ALL(ch);
840 val_irqen = LPAIF_IRQ_ALL(ch);
841 break;
842 default:
843 dev_err(soc_runtime->dev, "%s: invalid %d interface\n", __func__, dai_id);
844 return -EINVAL;
845 }
846
847 ret = regmap_update_bits(map, reg_irqen, val_mask, val_irqen);
848 if (ret) {
849 dev_err(soc_runtime->dev,
850 "error writing to irqen reg: %d\n", ret);
851 return ret;
852 }
853 break;
854 }
855
856 return 0;
857 }
858
lpass_platform_pcmops_pointer(struct snd_soc_component * component,struct snd_pcm_substream * substream)859 static snd_pcm_uframes_t lpass_platform_pcmops_pointer(
860 struct snd_soc_component *component,
861 struct snd_pcm_substream *substream)
862 {
863 struct snd_soc_pcm_runtime *soc_runtime = snd_soc_substream_to_rtd(substream);
864 struct snd_soc_dai *cpu_dai = snd_soc_rtd_to_cpu(soc_runtime, 0);
865 struct lpass_data *drvdata = snd_soc_component_get_drvdata(component);
866 struct snd_pcm_runtime *rt = substream->runtime;
867 struct lpass_pcm_data *pcm_data = rt->private_data;
868 const struct lpass_variant *v = drvdata->variant;
869 unsigned int base_addr, curr_addr;
870 int ret, ch, dir = substream->stream;
871 struct regmap *map;
872 unsigned int dai_id = cpu_dai->driver->id;
873
874 map = __lpass_get_regmap_handle(substream, component);
875 ch = pcm_data->dma_ch;
876
877 ret = regmap_read(map,
878 LPAIF_DMABASE_REG(v, ch, dir, dai_id), &base_addr);
879 if (ret) {
880 dev_err(soc_runtime->dev,
881 "error reading from rdmabase reg: %d\n", ret);
882 return ret;
883 }
884
885 ret = regmap_read(map,
886 LPAIF_DMACURR_REG(v, ch, dir, dai_id), &curr_addr);
887 if (ret) {
888 dev_err(soc_runtime->dev,
889 "error reading from rdmacurr reg: %d\n", ret);
890 return ret;
891 }
892
893 return bytes_to_frames(substream->runtime, curr_addr - base_addr);
894 }
895
lpass_platform_cdc_dma_mmap(struct snd_pcm_substream * substream,struct vm_area_struct * vma)896 static int lpass_platform_cdc_dma_mmap(struct snd_pcm_substream *substream,
897 struct vm_area_struct *vma)
898 {
899 struct snd_pcm_runtime *runtime = substream->runtime;
900 unsigned long size, offset;
901
902 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
903 size = vma->vm_end - vma->vm_start;
904 offset = vma->vm_pgoff << PAGE_SHIFT;
905 return io_remap_pfn_range(vma, vma->vm_start,
906 (runtime->dma_addr + offset) >> PAGE_SHIFT,
907 size, vma->vm_page_prot);
908
909 }
910
lpass_platform_pcmops_mmap(struct snd_soc_component * component,struct snd_pcm_substream * substream,struct vm_area_struct * vma)911 static int lpass_platform_pcmops_mmap(struct snd_soc_component *component,
912 struct snd_pcm_substream *substream,
913 struct vm_area_struct *vma)
914 {
915 struct snd_soc_pcm_runtime *soc_runtime = snd_soc_substream_to_rtd(substream);
916 struct snd_soc_dai *cpu_dai = snd_soc_rtd_to_cpu(soc_runtime, 0);
917 unsigned int dai_id = cpu_dai->driver->id;
918
919 if (is_cdc_dma_port(dai_id))
920 return lpass_platform_cdc_dma_mmap(substream, vma);
921
922 return snd_pcm_lib_default_mmap(substream, vma);
923 }
924
lpass_dma_interrupt_handler(struct snd_pcm_substream * substream,struct lpass_data * drvdata,int chan,u32 interrupts)925 static irqreturn_t lpass_dma_interrupt_handler(
926 struct snd_pcm_substream *substream,
927 struct lpass_data *drvdata,
928 int chan, u32 interrupts)
929 {
930 struct snd_soc_pcm_runtime *soc_runtime = snd_soc_substream_to_rtd(substream);
931 struct snd_soc_dai *cpu_dai = snd_soc_rtd_to_cpu(soc_runtime, 0);
932 const struct lpass_variant *v = drvdata->variant;
933 irqreturn_t ret = IRQ_NONE;
934 int rv;
935 unsigned int reg, val, mask;
936 struct regmap *map;
937 unsigned int dai_id = cpu_dai->driver->id;
938
939 mask = LPAIF_IRQ_ALL(chan);
940 switch (dai_id) {
941 case LPASS_DP_RX:
942 map = drvdata->hdmiif_map;
943 reg = LPASS_HDMITX_APP_IRQCLEAR_REG(v);
944 val = (LPAIF_IRQ_HDMI_REQ_ON_PRELOAD(chan) |
945 LPAIF_IRQ_HDMI_METADONE |
946 LPAIF_IRQ_HDMI_SDEEP_AUD_DIS(chan));
947 break;
948 case MI2S_PRIMARY:
949 case MI2S_SECONDARY:
950 case MI2S_TERTIARY:
951 case MI2S_QUATERNARY:
952 case MI2S_QUINARY:
953 map = drvdata->lpaif_map;
954 reg = LPAIF_IRQCLEAR_REG(v, LPAIF_IRQ_PORT_HOST);
955 val = 0;
956 break;
957 case LPASS_CDC_DMA_RX0 ... LPASS_CDC_DMA_RX9:
958 case LPASS_CDC_DMA_TX0 ... LPASS_CDC_DMA_TX8:
959 map = drvdata->rxtx_lpaif_map;
960 reg = LPAIF_RXTX_IRQCLEAR_REG(v, LPAIF_IRQ_PORT_HOST);
961 val = 0;
962 break;
963 case LPASS_CDC_DMA_VA_TX0 ... LPASS_CDC_DMA_VA_TX8:
964 map = drvdata->va_lpaif_map;
965 reg = LPAIF_VA_IRQCLEAR_REG(v, LPAIF_IRQ_PORT_HOST);
966 val = 0;
967 break;
968 default:
969 dev_err(soc_runtime->dev, "%s: invalid %d interface\n", __func__, dai_id);
970 return -EINVAL;
971 }
972 if (interrupts & LPAIF_IRQ_PER(chan)) {
973 rv = regmap_write_bits(map, reg, mask, (LPAIF_IRQ_PER(chan) | val));
974 if (rv) {
975 dev_err(soc_runtime->dev,
976 "error writing to irqclear reg: %d\n", rv);
977 return IRQ_NONE;
978 }
979 snd_pcm_period_elapsed(substream);
980 ret = IRQ_HANDLED;
981 }
982
983 if (interrupts & LPAIF_IRQ_XRUN(chan)) {
984 rv = regmap_write_bits(map, reg, mask, (LPAIF_IRQ_XRUN(chan) | val));
985 if (rv) {
986 dev_err(soc_runtime->dev,
987 "error writing to irqclear reg: %d\n", rv);
988 return IRQ_NONE;
989 }
990 dev_warn_ratelimited(soc_runtime->dev, "xrun warning\n");
991
992 snd_pcm_stop_xrun(substream);
993 ret = IRQ_HANDLED;
994 }
995
996 if (interrupts & LPAIF_IRQ_ERR(chan)) {
997 rv = regmap_write_bits(map, reg, mask, (LPAIF_IRQ_ERR(chan) | val));
998 if (rv) {
999 dev_err(soc_runtime->dev,
1000 "error writing to irqclear reg: %d\n", rv);
1001 return IRQ_NONE;
1002 }
1003 dev_err(soc_runtime->dev, "bus access error\n");
1004 snd_pcm_stop(substream, SNDRV_PCM_STATE_DISCONNECTED);
1005 ret = IRQ_HANDLED;
1006 }
1007
1008 if (interrupts & val) {
1009 rv = regmap_write(map, reg, val);
1010 if (rv) {
1011 dev_err(soc_runtime->dev,
1012 "error writing to irqclear reg: %d\n", rv);
1013 return IRQ_NONE;
1014 }
1015 ret = IRQ_HANDLED;
1016 }
1017
1018 return ret;
1019 }
1020
lpass_platform_lpaif_irq(int irq,void * data)1021 static irqreturn_t lpass_platform_lpaif_irq(int irq, void *data)
1022 {
1023 struct lpass_data *drvdata = data;
1024 const struct lpass_variant *v = drvdata->variant;
1025 unsigned int irqs;
1026 int rv, chan;
1027
1028 rv = regmap_read(drvdata->lpaif_map,
1029 LPAIF_IRQSTAT_REG(v, LPAIF_IRQ_PORT_HOST), &irqs);
1030 if (rv) {
1031 pr_err("error reading from irqstat reg: %d\n", rv);
1032 return IRQ_NONE;
1033 }
1034
1035 /* Handle per channel interrupts */
1036 for (chan = 0; chan < LPASS_MAX_DMA_CHANNELS; chan++) {
1037 if (irqs & LPAIF_IRQ_ALL(chan) && drvdata->substream[chan]) {
1038 rv = lpass_dma_interrupt_handler(
1039 drvdata->substream[chan],
1040 drvdata, chan, irqs);
1041 if (rv != IRQ_HANDLED)
1042 return rv;
1043 }
1044 }
1045
1046 return IRQ_HANDLED;
1047 }
1048
lpass_platform_hdmiif_irq(int irq,void * data)1049 static irqreturn_t lpass_platform_hdmiif_irq(int irq, void *data)
1050 {
1051 struct lpass_data *drvdata = data;
1052 const struct lpass_variant *v = drvdata->variant;
1053 unsigned int irqs;
1054 int rv, chan;
1055
1056 rv = regmap_read(drvdata->hdmiif_map,
1057 LPASS_HDMITX_APP_IRQSTAT_REG(v), &irqs);
1058 if (rv) {
1059 pr_err("error reading from irqstat reg: %d\n", rv);
1060 return IRQ_NONE;
1061 }
1062
1063 /* Handle per channel interrupts */
1064 for (chan = 0; chan < LPASS_MAX_HDMI_DMA_CHANNELS; chan++) {
1065 if (irqs & (LPAIF_IRQ_ALL(chan) | LPAIF_IRQ_HDMI_REQ_ON_PRELOAD(chan) |
1066 LPAIF_IRQ_HDMI_METADONE |
1067 LPAIF_IRQ_HDMI_SDEEP_AUD_DIS(chan))
1068 && drvdata->hdmi_substream[chan]) {
1069 rv = lpass_dma_interrupt_handler(
1070 drvdata->hdmi_substream[chan],
1071 drvdata, chan, irqs);
1072 if (rv != IRQ_HANDLED)
1073 return rv;
1074 }
1075 }
1076 return IRQ_HANDLED;
1077 }
1078
lpass_platform_rxtxif_irq(int irq,void * data)1079 static irqreturn_t lpass_platform_rxtxif_irq(int irq, void *data)
1080 {
1081 struct lpass_data *drvdata = data;
1082 const struct lpass_variant *v = drvdata->variant;
1083 unsigned int irqs;
1084 irqreturn_t rv;
1085 int chan;
1086
1087 rv = regmap_read(drvdata->rxtx_lpaif_map,
1088 LPAIF_RXTX_IRQSTAT_REG(v, LPAIF_IRQ_PORT_HOST), &irqs);
1089
1090 /* Handle per channel interrupts */
1091 for (chan = 0; chan < LPASS_MAX_CDC_DMA_CHANNELS; chan++) {
1092 if (irqs & LPAIF_IRQ_ALL(chan) && drvdata->rxtx_substream[chan]) {
1093 rv = lpass_dma_interrupt_handler(
1094 drvdata->rxtx_substream[chan],
1095 drvdata, chan, irqs);
1096 if (rv != IRQ_HANDLED)
1097 return rv;
1098 }
1099 }
1100
1101 return IRQ_HANDLED;
1102 }
1103
lpass_platform_vaif_irq(int irq,void * data)1104 static irqreturn_t lpass_platform_vaif_irq(int irq, void *data)
1105 {
1106 struct lpass_data *drvdata = data;
1107 const struct lpass_variant *v = drvdata->variant;
1108 unsigned int irqs;
1109 irqreturn_t rv;
1110 int chan;
1111
1112 rv = regmap_read(drvdata->va_lpaif_map,
1113 LPAIF_VA_IRQSTAT_REG(v, LPAIF_IRQ_PORT_HOST), &irqs);
1114
1115 /* Handle per channel interrupts */
1116 for (chan = 0; chan < LPASS_MAX_VA_CDC_DMA_CHANNELS; chan++) {
1117 if (irqs & LPAIF_IRQ_ALL(chan) && drvdata->va_substream[chan]) {
1118 rv = lpass_dma_interrupt_handler(
1119 drvdata->va_substream[chan],
1120 drvdata, chan, irqs);
1121 if (rv != IRQ_HANDLED)
1122 return rv;
1123 }
1124 }
1125 return IRQ_HANDLED;
1126 }
1127
lpass_platform_prealloc_cdc_dma_buffer(struct snd_soc_component * component,struct snd_pcm * pcm,int dai_id)1128 static int lpass_platform_prealloc_cdc_dma_buffer(struct snd_soc_component *component,
1129 struct snd_pcm *pcm, int dai_id)
1130 {
1131 struct lpass_data *drvdata = snd_soc_component_get_drvdata(component);
1132 struct snd_pcm_substream *substream;
1133 struct snd_dma_buffer *buf;
1134
1135 if (pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream)
1136 substream = pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream;
1137 else
1138 substream = pcm->streams[SNDRV_PCM_STREAM_CAPTURE].substream;
1139
1140 buf = &substream->dma_buffer;
1141 buf->dev.dev = pcm->card->dev;
1142 buf->private_data = NULL;
1143
1144 /* Assign Codec DMA buffer pointers */
1145 buf->dev.type = SNDRV_DMA_TYPE_CONTINUOUS;
1146
1147 switch (dai_id) {
1148 case LPASS_CDC_DMA_RX0 ... LPASS_CDC_DMA_RX9:
1149 buf->bytes = lpass_platform_rxtx_hardware.buffer_bytes_max;
1150 buf->addr = drvdata->rxtx_cdc_dma_lpm_buf;
1151 break;
1152 case LPASS_CDC_DMA_TX0 ... LPASS_CDC_DMA_TX8:
1153 buf->bytes = lpass_platform_rxtx_hardware.buffer_bytes_max;
1154 buf->addr = drvdata->rxtx_cdc_dma_lpm_buf + LPASS_RXTX_CDC_DMA_LPM_BUFF_SIZE;
1155 break;
1156 case LPASS_CDC_DMA_VA_TX0 ... LPASS_CDC_DMA_VA_TX8:
1157 buf->bytes = lpass_platform_va_hardware.buffer_bytes_max;
1158 buf->addr = drvdata->va_cdc_dma_lpm_buf;
1159 break;
1160 default:
1161 break;
1162 }
1163
1164 buf->area = (unsigned char * __force)memremap(buf->addr, buf->bytes, MEMREMAP_WC);
1165
1166 return 0;
1167 }
1168
lpass_platform_pcm_new(struct snd_soc_component * component,struct snd_soc_pcm_runtime * soc_runtime)1169 static int lpass_platform_pcm_new(struct snd_soc_component *component,
1170 struct snd_soc_pcm_runtime *soc_runtime)
1171 {
1172 struct snd_pcm *pcm = soc_runtime->pcm;
1173 struct snd_soc_dai *cpu_dai = snd_soc_rtd_to_cpu(soc_runtime, 0);
1174 unsigned int dai_id = cpu_dai->driver->id;
1175
1176 size_t size = lpass_platform_pcm_hardware.buffer_bytes_max;
1177
1178 /*
1179 * Lpass codec dma can access only lpass lpm hardware memory.
1180 * ioremap is for HLOS to access hardware memory.
1181 */
1182 if (is_cdc_dma_port(dai_id))
1183 return lpass_platform_prealloc_cdc_dma_buffer(component, pcm, dai_id);
1184
1185 return snd_pcm_set_fixed_buffer_all(pcm, SNDRV_DMA_TYPE_NONCOHERENT,
1186 component->dev, size);
1187 }
1188
lpass_platform_pcmops_suspend(struct snd_soc_component * component)1189 static int lpass_platform_pcmops_suspend(struct snd_soc_component *component)
1190 {
1191 struct lpass_data *drvdata = snd_soc_component_get_drvdata(component);
1192 struct regmap *map;
1193 unsigned int dai_id = component->id;
1194
1195 if (dai_id == LPASS_DP_RX)
1196 map = drvdata->hdmiif_map;
1197 else
1198 map = drvdata->lpaif_map;
1199
1200 regcache_cache_only(map, true);
1201 regcache_mark_dirty(map);
1202
1203 return 0;
1204 }
1205
lpass_platform_pcmops_resume(struct snd_soc_component * component)1206 static int lpass_platform_pcmops_resume(struct snd_soc_component *component)
1207 {
1208 struct lpass_data *drvdata = snd_soc_component_get_drvdata(component);
1209 struct regmap *map;
1210 unsigned int dai_id = component->id;
1211
1212 if (dai_id == LPASS_DP_RX)
1213 map = drvdata->hdmiif_map;
1214 else
1215 map = drvdata->lpaif_map;
1216
1217 regcache_cache_only(map, false);
1218 return regcache_sync(map);
1219 }
1220
lpass_platform_copy(struct snd_soc_component * component,struct snd_pcm_substream * substream,int channel,unsigned long pos,struct iov_iter * buf,unsigned long bytes)1221 static int lpass_platform_copy(struct snd_soc_component *component,
1222 struct snd_pcm_substream *substream, int channel,
1223 unsigned long pos, struct iov_iter *buf,
1224 unsigned long bytes)
1225 {
1226 struct snd_pcm_runtime *rt = substream->runtime;
1227 unsigned int dai_id = component->id;
1228 int ret = 0;
1229
1230 void __iomem *dma_buf = (void __iomem *) (rt->dma_area + pos +
1231 channel * (rt->dma_bytes / rt->channels));
1232
1233 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
1234 if (is_cdc_dma_port(dai_id)) {
1235 ret = copy_from_iter_toio(dma_buf, buf, bytes);
1236 } else {
1237 if (copy_from_iter((void __force *)dma_buf, bytes, buf) != bytes)
1238 ret = -EFAULT;
1239 }
1240 } else if (substream->stream == SNDRV_PCM_STREAM_CAPTURE) {
1241 if (is_cdc_dma_port(dai_id)) {
1242 ret = copy_to_iter_fromio(buf, dma_buf, bytes);
1243 } else {
1244 if (copy_to_iter((void __force *)dma_buf, bytes, buf) != bytes)
1245 ret = -EFAULT;
1246 }
1247 }
1248
1249 return ret;
1250 }
1251
1252 static const struct snd_soc_component_driver lpass_component_driver = {
1253 .name = DRV_NAME,
1254 .open = lpass_platform_pcmops_open,
1255 .close = lpass_platform_pcmops_close,
1256 .hw_params = lpass_platform_pcmops_hw_params,
1257 .hw_free = lpass_platform_pcmops_hw_free,
1258 .prepare = lpass_platform_pcmops_prepare,
1259 .trigger = lpass_platform_pcmops_trigger,
1260 .pointer = lpass_platform_pcmops_pointer,
1261 .mmap = lpass_platform_pcmops_mmap,
1262 .pcm_construct = lpass_platform_pcm_new,
1263 .suspend = lpass_platform_pcmops_suspend,
1264 .resume = lpass_platform_pcmops_resume,
1265 .copy = lpass_platform_copy,
1266
1267 };
1268
asoc_qcom_lpass_platform_register(struct platform_device * pdev)1269 int asoc_qcom_lpass_platform_register(struct platform_device *pdev)
1270 {
1271 struct lpass_data *drvdata = platform_get_drvdata(pdev);
1272 const struct lpass_variant *v = drvdata->variant;
1273 int ret;
1274
1275 drvdata->lpaif_irq = platform_get_irq_byname(pdev, "lpass-irq-lpaif");
1276 if (drvdata->lpaif_irq < 0)
1277 return -ENODEV;
1278
1279 /* ensure audio hardware is disabled */
1280 ret = regmap_write(drvdata->lpaif_map,
1281 LPAIF_IRQEN_REG(v, LPAIF_IRQ_PORT_HOST), 0);
1282 if (ret) {
1283 dev_err(&pdev->dev, "error writing to irqen reg: %d\n", ret);
1284 return ret;
1285 }
1286
1287 ret = devm_request_irq(&pdev->dev, drvdata->lpaif_irq,
1288 lpass_platform_lpaif_irq, IRQF_TRIGGER_RISING,
1289 "lpass-irq-lpaif", drvdata);
1290 if (ret) {
1291 dev_err(&pdev->dev, "irq request failed: %d\n", ret);
1292 return ret;
1293 }
1294
1295 ret = lpass_platform_alloc_dmactl_fields(&pdev->dev,
1296 drvdata->lpaif_map);
1297 if (ret) {
1298 dev_err(&pdev->dev,
1299 "error initializing dmactl fields: %d\n", ret);
1300 return ret;
1301 }
1302
1303 if (drvdata->codec_dma_enable) {
1304 ret = regmap_write(drvdata->rxtx_lpaif_map,
1305 LPAIF_RXTX_IRQEN_REG(v, LPAIF_IRQ_PORT_HOST), 0x0);
1306 if (ret) {
1307 dev_err(&pdev->dev, "error writing to rxtx irqen reg: %d\n", ret);
1308 return ret;
1309 }
1310 ret = regmap_write(drvdata->va_lpaif_map,
1311 LPAIF_VA_IRQEN_REG(v, LPAIF_IRQ_PORT_HOST), 0x0);
1312 if (ret) {
1313 dev_err(&pdev->dev, "error writing to rxtx irqen reg: %d\n", ret);
1314 return ret;
1315 }
1316 drvdata->rxtxif_irq = platform_get_irq_byname(pdev, "lpass-irq-rxtxif");
1317 if (drvdata->rxtxif_irq < 0)
1318 return -ENODEV;
1319
1320 ret = devm_request_irq(&pdev->dev, drvdata->rxtxif_irq,
1321 lpass_platform_rxtxif_irq, 0, "lpass-irq-rxtxif", drvdata);
1322 if (ret) {
1323 dev_err(&pdev->dev, "rxtx irq request failed: %d\n", ret);
1324 return ret;
1325 }
1326
1327 ret = lpass_platform_alloc_rxtx_dmactl_fields(&pdev->dev,
1328 drvdata->rxtx_lpaif_map);
1329 if (ret) {
1330 dev_err(&pdev->dev,
1331 "error initializing rxtx dmactl fields: %d\n", ret);
1332 return ret;
1333 }
1334
1335 drvdata->vaif_irq = platform_get_irq_byname(pdev, "lpass-irq-vaif");
1336 if (drvdata->vaif_irq < 0)
1337 return -ENODEV;
1338
1339 ret = devm_request_irq(&pdev->dev, drvdata->vaif_irq,
1340 lpass_platform_vaif_irq, 0, "lpass-irq-vaif", drvdata);
1341 if (ret) {
1342 dev_err(&pdev->dev, "va irq request failed: %d\n", ret);
1343 return ret;
1344 }
1345
1346 ret = lpass_platform_alloc_va_dmactl_fields(&pdev->dev,
1347 drvdata->va_lpaif_map);
1348 if (ret) {
1349 dev_err(&pdev->dev,
1350 "error initializing va dmactl fields: %d\n", ret);
1351 return ret;
1352 }
1353 }
1354
1355 if (drvdata->hdmi_port_enable) {
1356 drvdata->hdmiif_irq = platform_get_irq_byname(pdev, "lpass-irq-hdmi");
1357 if (drvdata->hdmiif_irq < 0)
1358 return -ENODEV;
1359
1360 ret = devm_request_irq(&pdev->dev, drvdata->hdmiif_irq,
1361 lpass_platform_hdmiif_irq, 0, "lpass-irq-hdmi", drvdata);
1362 if (ret) {
1363 dev_err(&pdev->dev, "irq hdmi request failed: %d\n", ret);
1364 return ret;
1365 }
1366 ret = regmap_write(drvdata->hdmiif_map,
1367 LPASS_HDMITX_APP_IRQEN_REG(v), 0);
1368 if (ret) {
1369 dev_err(&pdev->dev, "error writing to hdmi irqen reg: %d\n", ret);
1370 return ret;
1371 }
1372
1373 ret = lpass_platform_alloc_hdmidmactl_fields(&pdev->dev,
1374 drvdata->hdmiif_map);
1375 if (ret) {
1376 dev_err(&pdev->dev,
1377 "error initializing hdmidmactl fields: %d\n", ret);
1378 return ret;
1379 }
1380 }
1381 return devm_snd_soc_register_component(&pdev->dev,
1382 &lpass_component_driver, NULL, 0);
1383 }
1384 EXPORT_SYMBOL_GPL(asoc_qcom_lpass_platform_register);
1385
1386 MODULE_DESCRIPTION("QTi LPASS Platform Driver");
1387 MODULE_LICENSE("GPL");
1388