Lines Matching +full:fe +full:- +full:dai +full:- +full:link
1 // SPDX-License-Identifier: GPL-2.0
3 * mtk-afe-fe-dais.c -- Mediatek afe fe dai operator
14 #include "mtk-afe-platform-driver.h"
16 #include "mtk-afe-fe-dai.h"
17 #include "mtk-base-afe.h"
38 struct snd_soc_dai *dai) in mtk_afe_fe_startup() argument
41 struct mtk_base_afe *afe = snd_soc_dai_get_drvdata(dai); in mtk_afe_fe_startup()
42 struct snd_pcm_runtime *runtime = substream->runtime; in mtk_afe_fe_startup()
43 int memif_num = snd_soc_rtd_to_cpu(rtd, 0)->id; in mtk_afe_fe_startup()
44 struct mtk_base_afe_memif *memif = &afe->memif[memif_num]; in mtk_afe_fe_startup()
45 const struct snd_pcm_hardware *mtk_afe_hardware = afe->mtk_afe_hardware; in mtk_afe_fe_startup()
48 memif->substream = substream; in mtk_afe_fe_startup()
50 snd_pcm_hw_constraint_step(substream->runtime, 0, in mtk_afe_fe_startup()
53 mtk_regmap_update_bits(afe->regmap, memif->data->agent_disable_reg, in mtk_afe_fe_startup()
54 1, 0, memif->data->agent_disable_shift); in mtk_afe_fe_startup()
59 * Capture cannot use ping-pong buffer since hw_ptr at IRQ may be in mtk_afe_fe_startup()
64 if (substream->stream == SNDRV_PCM_STREAM_CAPTURE) { in mtk_afe_fe_startup()
65 int periods_max = mtk_afe_hardware->periods_max; in mtk_afe_fe_startup()
71 dev_err(afe->dev, "hw_constraint_minmax failed\n"); in mtk_afe_fe_startup()
79 dev_err(afe->dev, "snd_pcm_hw_constraint_integer failed\n"); in mtk_afe_fe_startup()
82 if (memif->irq_usage < 0) { in mtk_afe_fe_startup()
85 if (irq_id != afe->irqs_size) { in mtk_afe_fe_startup()
86 /* link */ in mtk_afe_fe_startup()
87 memif->irq_usage = irq_id; in mtk_afe_fe_startup()
89 dev_err(afe->dev, "%s() error: no more asys irq\n", in mtk_afe_fe_startup()
91 ret = -EBUSY; in mtk_afe_fe_startup()
99 struct snd_soc_dai *dai) in mtk_afe_fe_shutdown() argument
102 struct mtk_base_afe *afe = snd_soc_dai_get_drvdata(dai); in mtk_afe_fe_shutdown()
103 struct mtk_base_afe_memif *memif = &afe->memif[snd_soc_rtd_to_cpu(rtd, 0)->id]; in mtk_afe_fe_shutdown()
106 irq_id = memif->irq_usage; in mtk_afe_fe_shutdown()
108 mtk_regmap_update_bits(afe->regmap, memif->data->agent_disable_reg, in mtk_afe_fe_shutdown()
109 1, 1, memif->data->agent_disable_shift); in mtk_afe_fe_shutdown()
111 if (!memif->const_irq) { in mtk_afe_fe_shutdown()
113 memif->irq_usage = -1; in mtk_afe_fe_shutdown()
114 memif->substream = NULL; in mtk_afe_fe_shutdown()
121 struct snd_soc_dai *dai) in mtk_afe_fe_hw_params() argument
124 struct mtk_base_afe *afe = snd_soc_dai_get_drvdata(dai); in mtk_afe_fe_hw_params()
125 int id = snd_soc_rtd_to_cpu(rtd, 0)->id; in mtk_afe_fe_hw_params()
126 struct mtk_base_afe_memif *memif = &afe->memif[id]; in mtk_afe_fe_hw_params()
132 if (afe->request_dram_resource) in mtk_afe_fe_hw_params()
133 afe->request_dram_resource(afe->dev); in mtk_afe_fe_hw_params()
135 …dev_dbg(afe->dev, "%s(), %s, ch %d, rate %d, fmt %d, dma_addr %pad, dma_area %p, dma_bytes 0x%zx\n… in mtk_afe_fe_hw_params()
136 __func__, memif->data->name, in mtk_afe_fe_hw_params()
138 &substream->runtime->dma_addr, in mtk_afe_fe_hw_params()
139 substream->runtime->dma_area, in mtk_afe_fe_hw_params()
140 substream->runtime->dma_bytes); in mtk_afe_fe_hw_params()
142 memset_io((void __force __iomem *)substream->runtime->dma_area, 0, in mtk_afe_fe_hw_params()
143 substream->runtime->dma_bytes); in mtk_afe_fe_hw_params()
147 substream->runtime->dma_area, in mtk_afe_fe_hw_params()
148 substream->runtime->dma_addr, in mtk_afe_fe_hw_params()
149 substream->runtime->dma_bytes); in mtk_afe_fe_hw_params()
151 dev_err(afe->dev, "%s(), error, id %d, set addr, ret %d\n", in mtk_afe_fe_hw_params()
159 dev_err(afe->dev, "%s(), error, id %d, set channel %d, ret %d\n", in mtk_afe_fe_hw_params()
167 dev_err(afe->dev, "%s(), error, id %d, set rate %d, ret %d\n", in mtk_afe_fe_hw_params()
175 dev_err(afe->dev, "%s(), error, id %d, set format %d, ret %d\n", in mtk_afe_fe_hw_params()
185 struct snd_soc_dai *dai) in mtk_afe_fe_hw_free() argument
187 struct mtk_base_afe *afe = snd_soc_dai_get_drvdata(dai); in mtk_afe_fe_hw_free()
189 if (afe->release_dram_resource) in mtk_afe_fe_hw_free()
190 afe->release_dram_resource(afe->dev); in mtk_afe_fe_hw_free()
197 struct snd_soc_dai *dai) in mtk_afe_fe_trigger() argument
200 struct snd_pcm_runtime * const runtime = substream->runtime; in mtk_afe_fe_trigger()
201 struct mtk_base_afe *afe = snd_soc_dai_get_drvdata(dai); in mtk_afe_fe_trigger()
202 int id = snd_soc_rtd_to_cpu(rtd, 0)->id; in mtk_afe_fe_trigger()
203 struct mtk_base_afe_memif *memif = &afe->memif[id]; in mtk_afe_fe_trigger()
204 struct mtk_base_afe_irq *irqs = &afe->irqs[memif->irq_usage]; in mtk_afe_fe_trigger()
205 const struct mtk_base_irq_data *irq_data = irqs->irq_data; in mtk_afe_fe_trigger()
206 unsigned int counter = runtime->period_size; in mtk_afe_fe_trigger()
210 dev_dbg(afe->dev, "%s %s cmd=%d\n", __func__, memif->data->name, cmd); in mtk_afe_fe_trigger()
217 dev_err(afe->dev, "%s(), error, id %d, memif enable, ret %d\n", in mtk_afe_fe_trigger()
223 mtk_regmap_update_bits(afe->regmap, irq_data->irq_cnt_reg, in mtk_afe_fe_trigger()
224 irq_data->irq_cnt_maskbit, counter, in mtk_afe_fe_trigger()
225 irq_data->irq_cnt_shift); in mtk_afe_fe_trigger()
228 fs = afe->irq_fs(substream, runtime->rate); in mtk_afe_fe_trigger()
231 return -EINVAL; in mtk_afe_fe_trigger()
233 mtk_regmap_update_bits(afe->regmap, irq_data->irq_fs_reg, in mtk_afe_fe_trigger()
234 irq_data->irq_fs_maskbit, fs, in mtk_afe_fe_trigger()
235 irq_data->irq_fs_shift); in mtk_afe_fe_trigger()
238 mtk_regmap_update_bits(afe->regmap, irq_data->irq_en_reg, in mtk_afe_fe_trigger()
239 1, 1, irq_data->irq_en_shift); in mtk_afe_fe_trigger()
246 dev_err(afe->dev, "%s(), error, id %d, memif enable, ret %d\n", in mtk_afe_fe_trigger()
251 mtk_regmap_update_bits(afe->regmap, irq_data->irq_en_reg, in mtk_afe_fe_trigger()
252 1, 0, irq_data->irq_en_shift); in mtk_afe_fe_trigger()
254 mtk_regmap_write(afe->regmap, irq_data->irq_clr_reg, in mtk_afe_fe_trigger()
255 1 << irq_data->irq_clr_shift); in mtk_afe_fe_trigger()
258 return -EINVAL; in mtk_afe_fe_trigger()
264 struct snd_soc_dai *dai) in mtk_afe_fe_prepare() argument
267 struct mtk_base_afe *afe = snd_soc_dai_get_drvdata(dai); in mtk_afe_fe_prepare()
268 int id = snd_soc_rtd_to_cpu(rtd, 0)->id; in mtk_afe_fe_prepare()
271 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) { in mtk_afe_fe_prepare()
272 if (afe->get_memif_pbuf_size) { in mtk_afe_fe_prepare()
273 pbuf_size = afe->get_memif_pbuf_size(substream); in mtk_afe_fe_prepare()
295 mutex_lock(&afe->irq_alloc_lock); in mtk_dynamic_irq_acquire()
296 for (i = 0; i < afe->irqs_size; ++i) { in mtk_dynamic_irq_acquire()
297 if (afe->irqs[i].irq_occupyed == 0) { in mtk_dynamic_irq_acquire()
298 afe->irqs[i].irq_occupyed = 1; in mtk_dynamic_irq_acquire()
299 mutex_unlock(&afe->irq_alloc_lock); in mtk_dynamic_irq_acquire()
303 mutex_unlock(&afe->irq_alloc_lock); in mtk_dynamic_irq_acquire()
304 return afe->irqs_size; in mtk_dynamic_irq_acquire()
310 mutex_lock(&afe->irq_alloc_lock); in mtk_dynamic_irq_release()
311 if (irq_id >= 0 && irq_id < afe->irqs_size) { in mtk_dynamic_irq_release()
312 afe->irqs[irq_id].irq_occupyed = 0; in mtk_dynamic_irq_release()
313 mutex_unlock(&afe->irq_alloc_lock); in mtk_dynamic_irq_release()
316 mutex_unlock(&afe->irq_alloc_lock); in mtk_dynamic_irq_release()
317 return -EINVAL; in mtk_dynamic_irq_release()
324 struct device *dev = afe->dev; in mtk_afe_suspend()
325 struct regmap *regmap = afe->regmap; in mtk_afe_suspend()
328 if (pm_runtime_status_suspended(dev) || afe->suspended) in mtk_afe_suspend()
331 if (!afe->reg_back_up) in mtk_afe_suspend()
332 afe->reg_back_up = in mtk_afe_suspend()
333 devm_kcalloc(dev, afe->reg_back_up_list_num, in mtk_afe_suspend()
336 if (afe->reg_back_up) { in mtk_afe_suspend()
337 for (i = 0; i < afe->reg_back_up_list_num; i++) in mtk_afe_suspend()
338 regmap_read(regmap, afe->reg_back_up_list[i], in mtk_afe_suspend()
339 &afe->reg_back_up[i]); in mtk_afe_suspend()
342 afe->suspended = true; in mtk_afe_suspend()
343 afe->runtime_suspend(dev); in mtk_afe_suspend()
351 struct device *dev = afe->dev; in mtk_afe_resume()
352 struct regmap *regmap = afe->regmap; in mtk_afe_resume()
355 if (pm_runtime_status_suspended(dev) || !afe->suspended) in mtk_afe_resume()
358 afe->runtime_resume(dev); in mtk_afe_resume()
360 if (!afe->reg_back_up) { in mtk_afe_resume()
363 for (i = 0; i < afe->reg_back_up_list_num; i++) in mtk_afe_resume()
364 mtk_regmap_write(regmap, afe->reg_back_up_list[i], in mtk_afe_resume()
365 afe->reg_back_up[i]); in mtk_afe_resume()
368 afe->suspended = false; in mtk_afe_resume()
375 struct mtk_base_afe_memif *memif = &afe->memif[id]; in mtk_memif_set_enable()
377 if (memif->data->enable_shift < 0) { in mtk_memif_set_enable()
378 dev_warn(afe->dev, "%s(), error, id %d, enable_shift < 0\n", in mtk_memif_set_enable()
382 return mtk_regmap_update_bits(afe->regmap, memif->data->enable_reg, in mtk_memif_set_enable()
383 1, 1, memif->data->enable_shift); in mtk_memif_set_enable()
389 struct mtk_base_afe_memif *memif = &afe->memif[id]; in mtk_memif_set_disable()
391 if (memif->data->enable_shift < 0) { in mtk_memif_set_disable()
392 dev_warn(afe->dev, "%s(), error, id %d, enable_shift < 0\n", in mtk_memif_set_disable()
396 return mtk_regmap_update_bits(afe->regmap, memif->data->enable_reg, in mtk_memif_set_disable()
397 1, 0, memif->data->enable_shift); in mtk_memif_set_disable()
406 struct mtk_base_afe_memif *memif = &afe->memif[id]; in mtk_memif_set_addr()
411 memif->dma_area = dma_area; in mtk_memif_set_addr()
412 memif->dma_addr = dma_addr; in mtk_memif_set_addr()
413 memif->dma_bytes = dma_bytes; in mtk_memif_set_addr()
416 mtk_regmap_write(afe->regmap, memif->data->reg_ofs_base, in mtk_memif_set_addr()
419 if (memif->data->reg_ofs_end) in mtk_memif_set_addr()
420 mtk_regmap_write(afe->regmap, in mtk_memif_set_addr()
421 memif->data->reg_ofs_end, in mtk_memif_set_addr()
422 phys_buf_addr + dma_bytes - 1); in mtk_memif_set_addr()
424 mtk_regmap_write(afe->regmap, in mtk_memif_set_addr()
425 memif->data->reg_ofs_base + in mtk_memif_set_addr()
427 phys_buf_addr + dma_bytes - 1); in mtk_memif_set_addr()
430 if (memif->data->reg_ofs_base_msb) { in mtk_memif_set_addr()
431 mtk_regmap_write(afe->regmap, memif->data->reg_ofs_base_msb, in mtk_memif_set_addr()
433 mtk_regmap_write(afe->regmap, in mtk_memif_set_addr()
434 memif->data->reg_ofs_end_msb, in mtk_memif_set_addr()
439 * set MSB to 33-bit, for memif address in mtk_memif_set_addr()
442 if (memif->data->msb_reg) in mtk_memif_set_addr()
443 mtk_regmap_update_bits(afe->regmap, memif->data->msb_reg, in mtk_memif_set_addr()
444 1, msb_at_bit33, memif->data->msb_shift); in mtk_memif_set_addr()
446 /* set MSB to 33-bit, for memif end address */ in mtk_memif_set_addr()
447 if (memif->data->msb_end_reg) in mtk_memif_set_addr()
448 mtk_regmap_update_bits(afe->regmap, memif->data->msb_end_reg, in mtk_memif_set_addr()
450 memif->data->msb_end_shift); in mtk_memif_set_addr()
459 struct mtk_base_afe_memif *memif = &afe->memif[id]; in mtk_memif_set_channel()
462 if (memif->data->mono_shift < 0) in mtk_memif_set_channel()
465 if (memif->data->quad_ch_mask) { in mtk_memif_set_channel()
468 mtk_regmap_update_bits(afe->regmap, memif->data->quad_ch_reg, in mtk_memif_set_channel()
469 memif->data->quad_ch_mask, in mtk_memif_set_channel()
470 quad_ch, memif->data->quad_ch_shift); in mtk_memif_set_channel()
473 if (memif->data->mono_invert) in mtk_memif_set_channel()
479 if (memif->data->int_odd_flag_reg) in mtk_memif_set_channel()
480 mtk_regmap_update_bits(afe->regmap, in mtk_memif_set_channel()
481 memif->data->int_odd_flag_reg, in mtk_memif_set_channel()
483 memif->data->int_odd_flag_shift); in mtk_memif_set_channel()
485 return mtk_regmap_update_bits(afe->regmap, memif->data->mono_reg, in mtk_memif_set_channel()
486 1, mono, memif->data->mono_shift); in mtk_memif_set_channel()
493 struct mtk_base_afe_memif *memif = &afe->memif[id]; in mtk_memif_set_rate_fs()
495 if (memif->data->fs_shift >= 0) in mtk_memif_set_rate_fs()
496 mtk_regmap_update_bits(afe->regmap, memif->data->fs_reg, in mtk_memif_set_rate_fs()
497 memif->data->fs_maskbit, in mtk_memif_set_rate_fs()
498 fs, memif->data->fs_shift); in mtk_memif_set_rate_fs()
508 if (!afe->get_dai_fs) { in mtk_memif_set_rate()
509 dev_err(afe->dev, "%s(), error, afe->get_dai_fs == NULL\n", in mtk_memif_set_rate()
511 return -EINVAL; in mtk_memif_set_rate()
514 fs = afe->get_dai_fs(afe, id, rate); in mtk_memif_set_rate()
517 return -EINVAL; in mtk_memif_set_rate()
533 if (!afe->memif_fs) { in mtk_memif_set_rate_substream()
534 dev_err(afe->dev, "%s(), error, afe->memif_fs == NULL\n", in mtk_memif_set_rate_substream()
536 return -EINVAL; in mtk_memif_set_rate_substream()
539 fs = afe->memif_fs(substream, rate); in mtk_memif_set_rate_substream()
542 return -EINVAL; in mtk_memif_set_rate_substream()
551 struct mtk_base_afe_memif *memif = &afe->memif[id]; in mtk_memif_set_format()
563 if (afe->memif_32bit_supported) { in mtk_memif_set_format()
576 dev_err(afe->dev, "%s() error: unsupported format %d\n", in mtk_memif_set_format()
581 mtk_regmap_update_bits(afe->regmap, memif->data->hd_reg, in mtk_memif_set_format()
582 0x3, hd_audio, memif->data->hd_shift); in mtk_memif_set_format()
584 mtk_regmap_update_bits(afe->regmap, memif->data->hd_align_reg, in mtk_memif_set_format()
585 0x1, hd_align, memif->data->hd_align_mshift); in mtk_memif_set_format()
594 const struct mtk_base_memif_data *memif_data = afe->memif[id].data; in mtk_memif_set_pbuf_size()
596 if (memif_data->pbuf_mask == 0 || memif_data->minlen_mask == 0) in mtk_memif_set_pbuf_size()
599 mtk_regmap_update_bits(afe->regmap, memif_data->pbuf_reg, in mtk_memif_set_pbuf_size()
600 memif_data->pbuf_mask, in mtk_memif_set_pbuf_size()
601 pbuf_size, memif_data->pbuf_shift); in mtk_memif_set_pbuf_size()
603 mtk_regmap_update_bits(afe->regmap, memif_data->minlen_reg, in mtk_memif_set_pbuf_size()
604 memif_data->minlen_mask, in mtk_memif_set_pbuf_size()
605 pbuf_size, memif_data->minlen_shift); in mtk_memif_set_pbuf_size()
610 MODULE_DESCRIPTION("Mediatek simple fe dai operator");