xref: /linux/sound/soc/mediatek/common/mtk-afe-fe-dai.c (revision 18f90d372cf35b387663f1567de701e5393f6eb5)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * mtk-afe-fe-dais.c  --  Mediatek afe fe dai operator
4  *
5  * Copyright (c) 2016 MediaTek Inc.
6  * Author: Garlic Tseng <garlic.tseng@mediatek.com>
7  */
8 
9 #include <linux/module.h>
10 #include <linux/pm_runtime.h>
11 #include <linux/regmap.h>
12 #include <sound/soc.h>
13 #include "mtk-afe-platform-driver.h"
14 #include "mtk-afe-fe-dai.h"
15 #include "mtk-base-afe.h"
16 
17 #define AFE_BASE_END_OFFSET 8
18 
19 static int mtk_regmap_update_bits(struct regmap *map, int reg,
20 			   unsigned int mask,
21 			   unsigned int val)
22 {
23 	if (reg < 0)
24 		return 0;
25 	return regmap_update_bits(map, reg, mask, val);
26 }
27 
28 static int mtk_regmap_write(struct regmap *map, int reg, unsigned int val)
29 {
30 	if (reg < 0)
31 		return 0;
32 	return regmap_write(map, reg, val);
33 }
34 
35 int mtk_afe_fe_startup(struct snd_pcm_substream *substream,
36 		       struct snd_soc_dai *dai)
37 {
38 	struct snd_soc_pcm_runtime *rtd = substream->private_data;
39 	struct mtk_base_afe *afe = snd_soc_dai_get_drvdata(dai);
40 	struct snd_pcm_runtime *runtime = substream->runtime;
41 	int memif_num = rtd->cpu_dai->id;
42 	struct mtk_base_afe_memif *memif = &afe->memif[memif_num];
43 	const struct snd_pcm_hardware *mtk_afe_hardware = afe->mtk_afe_hardware;
44 	int ret;
45 
46 	memif->substream = substream;
47 
48 	snd_pcm_hw_constraint_step(substream->runtime, 0,
49 				   SNDRV_PCM_HW_PARAM_BUFFER_BYTES, 16);
50 	/* enable agent */
51 	mtk_regmap_update_bits(afe->regmap, memif->data->agent_disable_reg,
52 			       1 << memif->data->agent_disable_shift,
53 			       0 << memif->data->agent_disable_shift);
54 
55 	snd_soc_set_runtime_hwparams(substream, mtk_afe_hardware);
56 
57 	/*
58 	 * Capture cannot use ping-pong buffer since hw_ptr at IRQ may be
59 	 * smaller than period_size due to AFE's internal buffer.
60 	 * This easily leads to overrun when avail_min is period_size.
61 	 * One more period can hold the possible unread buffer.
62 	 */
63 	if (substream->stream == SNDRV_PCM_STREAM_CAPTURE) {
64 		int periods_max = mtk_afe_hardware->periods_max;
65 
66 		ret = snd_pcm_hw_constraint_minmax(runtime,
67 						   SNDRV_PCM_HW_PARAM_PERIODS,
68 						   3, periods_max);
69 		if (ret < 0) {
70 			dev_err(afe->dev, "hw_constraint_minmax failed\n");
71 			return ret;
72 		}
73 	}
74 
75 	ret = snd_pcm_hw_constraint_integer(runtime,
76 					    SNDRV_PCM_HW_PARAM_PERIODS);
77 	if (ret < 0)
78 		dev_err(afe->dev, "snd_pcm_hw_constraint_integer failed\n");
79 
80 	/* dynamic allocate irq to memif */
81 	if (memif->irq_usage < 0) {
82 		int irq_id = mtk_dynamic_irq_acquire(afe);
83 
84 		if (irq_id != afe->irqs_size) {
85 			/* link */
86 			memif->irq_usage = irq_id;
87 		} else {
88 			dev_err(afe->dev, "%s() error: no more asys irq\n",
89 				__func__);
90 			ret = -EBUSY;
91 		}
92 	}
93 	return ret;
94 }
95 EXPORT_SYMBOL_GPL(mtk_afe_fe_startup);
96 
97 void mtk_afe_fe_shutdown(struct snd_pcm_substream *substream,
98 			 struct snd_soc_dai *dai)
99 {
100 	struct snd_soc_pcm_runtime *rtd = substream->private_data;
101 	struct mtk_base_afe *afe = snd_soc_dai_get_drvdata(dai);
102 	struct mtk_base_afe_memif *memif = &afe->memif[rtd->cpu_dai->id];
103 	int irq_id;
104 
105 	irq_id = memif->irq_usage;
106 
107 	mtk_regmap_update_bits(afe->regmap, memif->data->agent_disable_reg,
108 			       1 << memif->data->agent_disable_shift,
109 			       1 << memif->data->agent_disable_shift);
110 
111 	if (!memif->const_irq) {
112 		mtk_dynamic_irq_release(afe, irq_id);
113 		memif->irq_usage = -1;
114 		memif->substream = NULL;
115 	}
116 }
117 EXPORT_SYMBOL_GPL(mtk_afe_fe_shutdown);
118 
119 int mtk_afe_fe_hw_params(struct snd_pcm_substream *substream,
120 			 struct snd_pcm_hw_params *params,
121 			 struct snd_soc_dai *dai)
122 {
123 	struct snd_soc_pcm_runtime *rtd = substream->private_data;
124 	struct mtk_base_afe *afe = snd_soc_dai_get_drvdata(dai);
125 	struct mtk_base_afe_memif *memif = &afe->memif[rtd->cpu_dai->id];
126 	int msb_at_bit33 = 0;
127 	int ret, fs = 0;
128 
129 	ret = snd_pcm_lib_malloc_pages(substream, params_buffer_bytes(params));
130 	if (ret < 0)
131 		return ret;
132 
133 	msb_at_bit33 = upper_32_bits(substream->runtime->dma_addr) ? 1 : 0;
134 	memif->phys_buf_addr = lower_32_bits(substream->runtime->dma_addr);
135 	memif->buffer_size = substream->runtime->dma_bytes;
136 
137 	/* start */
138 	mtk_regmap_write(afe->regmap, memif->data->reg_ofs_base,
139 			 memif->phys_buf_addr);
140 	/* end */
141 	mtk_regmap_write(afe->regmap,
142 			 memif->data->reg_ofs_base + AFE_BASE_END_OFFSET,
143 			 memif->phys_buf_addr + memif->buffer_size - 1);
144 
145 	/* set MSB to 33-bit */
146 	mtk_regmap_update_bits(afe->regmap, memif->data->msb_reg,
147 			       1 << memif->data->msb_shift,
148 			       msb_at_bit33 << memif->data->msb_shift);
149 
150 	/* set channel */
151 	if (memif->data->mono_shift >= 0) {
152 		unsigned int mono = (params_channels(params) == 1) ? 1 : 0;
153 
154 		mtk_regmap_update_bits(afe->regmap, memif->data->mono_reg,
155 				       1 << memif->data->mono_shift,
156 				       mono << memif->data->mono_shift);
157 	}
158 
159 	/* set rate */
160 	if (memif->data->fs_shift < 0)
161 		return 0;
162 
163 	fs = afe->memif_fs(substream, params_rate(params));
164 
165 	if (fs < 0)
166 		return -EINVAL;
167 
168 	mtk_regmap_update_bits(afe->regmap, memif->data->fs_reg,
169 			       memif->data->fs_maskbit << memif->data->fs_shift,
170 			       fs << memif->data->fs_shift);
171 
172 	return 0;
173 }
174 EXPORT_SYMBOL_GPL(mtk_afe_fe_hw_params);
175 
176 int mtk_afe_fe_hw_free(struct snd_pcm_substream *substream,
177 		       struct snd_soc_dai *dai)
178 {
179 	return snd_pcm_lib_free_pages(substream);
180 }
181 EXPORT_SYMBOL_GPL(mtk_afe_fe_hw_free);
182 
183 int mtk_afe_fe_trigger(struct snd_pcm_substream *substream, int cmd,
184 		       struct snd_soc_dai *dai)
185 {
186 	struct snd_soc_pcm_runtime *rtd = substream->private_data;
187 	struct snd_pcm_runtime * const runtime = substream->runtime;
188 	struct mtk_base_afe *afe = snd_soc_dai_get_drvdata(dai);
189 	struct mtk_base_afe_memif *memif = &afe->memif[rtd->cpu_dai->id];
190 	struct mtk_base_afe_irq *irqs = &afe->irqs[memif->irq_usage];
191 	const struct mtk_base_irq_data *irq_data = irqs->irq_data;
192 	unsigned int counter = runtime->period_size;
193 	int fs;
194 
195 	dev_dbg(afe->dev, "%s %s cmd=%d\n", __func__, memif->data->name, cmd);
196 
197 	switch (cmd) {
198 	case SNDRV_PCM_TRIGGER_START:
199 	case SNDRV_PCM_TRIGGER_RESUME:
200 		if (memif->data->enable_shift >= 0)
201 			mtk_regmap_update_bits(afe->regmap,
202 					       memif->data->enable_reg,
203 					       1 << memif->data->enable_shift,
204 					       1 << memif->data->enable_shift);
205 
206 		/* set irq counter */
207 		mtk_regmap_update_bits(afe->regmap, irq_data->irq_cnt_reg,
208 				       irq_data->irq_cnt_maskbit
209 				       << irq_data->irq_cnt_shift,
210 				       counter << irq_data->irq_cnt_shift);
211 
212 		/* set irq fs */
213 		fs = afe->irq_fs(substream, runtime->rate);
214 
215 		if (fs < 0)
216 			return -EINVAL;
217 
218 		mtk_regmap_update_bits(afe->regmap, irq_data->irq_fs_reg,
219 				       irq_data->irq_fs_maskbit
220 				       << irq_data->irq_fs_shift,
221 				       fs << irq_data->irq_fs_shift);
222 
223 		/* enable interrupt */
224 		mtk_regmap_update_bits(afe->regmap, irq_data->irq_en_reg,
225 				       1 << irq_data->irq_en_shift,
226 				       1 << irq_data->irq_en_shift);
227 
228 		return 0;
229 	case SNDRV_PCM_TRIGGER_STOP:
230 	case SNDRV_PCM_TRIGGER_SUSPEND:
231 		mtk_regmap_update_bits(afe->regmap, memif->data->enable_reg,
232 				       1 << memif->data->enable_shift, 0);
233 		/* disable interrupt */
234 		mtk_regmap_update_bits(afe->regmap, irq_data->irq_en_reg,
235 				       1 << irq_data->irq_en_shift,
236 				       0 << irq_data->irq_en_shift);
237 		/* and clear pending IRQ */
238 		mtk_regmap_write(afe->regmap, irq_data->irq_clr_reg,
239 				 1 << irq_data->irq_clr_shift);
240 		return 0;
241 	default:
242 		return -EINVAL;
243 	}
244 }
245 EXPORT_SYMBOL_GPL(mtk_afe_fe_trigger);
246 
247 int mtk_afe_fe_prepare(struct snd_pcm_substream *substream,
248 		       struct snd_soc_dai *dai)
249 {
250 	struct snd_soc_pcm_runtime *rtd  = substream->private_data;
251 	struct mtk_base_afe *afe = snd_soc_dai_get_drvdata(dai);
252 	struct mtk_base_afe_memif *memif = &afe->memif[rtd->cpu_dai->id];
253 	int hd_audio = 0;
254 
255 	/* set hd mode */
256 	switch (substream->runtime->format) {
257 	case SNDRV_PCM_FORMAT_S16_LE:
258 		hd_audio = 0;
259 		break;
260 	case SNDRV_PCM_FORMAT_S32_LE:
261 		hd_audio = 1;
262 		break;
263 	case SNDRV_PCM_FORMAT_S24_LE:
264 		hd_audio = 1;
265 		break;
266 	default:
267 		dev_err(afe->dev, "%s() error: unsupported format %d\n",
268 			__func__, substream->runtime->format);
269 		break;
270 	}
271 
272 	mtk_regmap_update_bits(afe->regmap, memif->data->hd_reg,
273 			       1 << memif->data->hd_shift,
274 			       hd_audio << memif->data->hd_shift);
275 
276 	return 0;
277 }
278 EXPORT_SYMBOL_GPL(mtk_afe_fe_prepare);
279 
280 const struct snd_soc_dai_ops mtk_afe_fe_ops = {
281 	.startup	= mtk_afe_fe_startup,
282 	.shutdown	= mtk_afe_fe_shutdown,
283 	.hw_params	= mtk_afe_fe_hw_params,
284 	.hw_free	= mtk_afe_fe_hw_free,
285 	.prepare	= mtk_afe_fe_prepare,
286 	.trigger	= mtk_afe_fe_trigger,
287 };
288 EXPORT_SYMBOL_GPL(mtk_afe_fe_ops);
289 
290 static DEFINE_MUTEX(irqs_lock);
291 int mtk_dynamic_irq_acquire(struct mtk_base_afe *afe)
292 {
293 	int i;
294 
295 	mutex_lock(&afe->irq_alloc_lock);
296 	for (i = 0; i < afe->irqs_size; ++i) {
297 		if (afe->irqs[i].irq_occupyed == 0) {
298 			afe->irqs[i].irq_occupyed = 1;
299 			mutex_unlock(&afe->irq_alloc_lock);
300 			return i;
301 		}
302 	}
303 	mutex_unlock(&afe->irq_alloc_lock);
304 	return afe->irqs_size;
305 }
306 EXPORT_SYMBOL_GPL(mtk_dynamic_irq_acquire);
307 
308 int mtk_dynamic_irq_release(struct mtk_base_afe *afe, int irq_id)
309 {
310 	mutex_lock(&afe->irq_alloc_lock);
311 	if (irq_id >= 0 && irq_id < afe->irqs_size) {
312 		afe->irqs[irq_id].irq_occupyed = 0;
313 		mutex_unlock(&afe->irq_alloc_lock);
314 		return 0;
315 	}
316 	mutex_unlock(&afe->irq_alloc_lock);
317 	return -EINVAL;
318 }
319 EXPORT_SYMBOL_GPL(mtk_dynamic_irq_release);
320 
321 int mtk_afe_dai_suspend(struct snd_soc_dai *dai)
322 {
323 	struct mtk_base_afe *afe = snd_soc_dai_get_drvdata(dai);
324 	struct device *dev = afe->dev;
325 	struct regmap *regmap = afe->regmap;
326 	int i;
327 
328 	if (pm_runtime_status_suspended(dev) || afe->suspended)
329 		return 0;
330 
331 	if (!afe->reg_back_up)
332 		afe->reg_back_up =
333 			devm_kcalloc(dev, afe->reg_back_up_list_num,
334 				     sizeof(unsigned int), GFP_KERNEL);
335 
336 	for (i = 0; i < afe->reg_back_up_list_num; i++)
337 		regmap_read(regmap, afe->reg_back_up_list[i],
338 			    &afe->reg_back_up[i]);
339 
340 	afe->suspended = true;
341 	afe->runtime_suspend(dev);
342 	return 0;
343 }
344 EXPORT_SYMBOL_GPL(mtk_afe_dai_suspend);
345 
346 int mtk_afe_dai_resume(struct snd_soc_dai *dai)
347 {
348 	struct mtk_base_afe *afe = snd_soc_dai_get_drvdata(dai);
349 	struct device *dev = afe->dev;
350 	struct regmap *regmap = afe->regmap;
351 	int i = 0;
352 
353 	if (pm_runtime_status_suspended(dev) || !afe->suspended)
354 		return 0;
355 
356 	afe->runtime_resume(dev);
357 
358 	if (!afe->reg_back_up)
359 		dev_dbg(dev, "%s no reg_backup\n", __func__);
360 
361 	for (i = 0; i < afe->reg_back_up_list_num; i++)
362 		mtk_regmap_write(regmap, afe->reg_back_up_list[i],
363 				 afe->reg_back_up[i]);
364 
365 	afe->suspended = false;
366 	return 0;
367 }
368 EXPORT_SYMBOL_GPL(mtk_afe_dai_resume);
369 
370 MODULE_DESCRIPTION("Mediatek simple fe dai operator");
371 MODULE_AUTHOR("Garlic Tseng <garlic.tseng@mediatek.com>");
372 MODULE_LICENSE("GPL v2");
373