1 /* 2 * mtk-afe-fe-dais.c -- Mediatek afe fe dai operator 3 * 4 * Copyright (c) 2016 MediaTek Inc. 5 * Author: Garlic Tseng <garlic.tseng@mediatek.com> 6 * 7 * This program is free software; you can redistribute it and/or modify 8 * it under the terms of the GNU General Public License version 2 and 9 * only version 2 as published by the Free Software Foundation. 10 * 11 * This program is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 * GNU General Public License for more details. 15 */ 16 17 #include <linux/module.h> 18 #include <linux/pm_runtime.h> 19 #include <linux/regmap.h> 20 #include <sound/soc.h> 21 #include "mtk-afe-fe-dai.h" 22 #include "mtk-base-afe.h" 23 24 #define AFE_BASE_END_OFFSET 8 25 26 static int mtk_regmap_update_bits(struct regmap *map, int reg, 27 unsigned int mask, 28 unsigned int val) 29 { 30 if (reg < 0) 31 return 0; 32 return regmap_update_bits(map, reg, mask, val); 33 } 34 35 static int mtk_regmap_write(struct regmap *map, int reg, unsigned int val) 36 { 37 if (reg < 0) 38 return 0; 39 return regmap_write(map, reg, val); 40 } 41 42 int mtk_afe_fe_startup(struct snd_pcm_substream *substream, 43 struct snd_soc_dai *dai) 44 { 45 struct snd_soc_pcm_runtime *rtd = substream->private_data; 46 struct mtk_base_afe *afe = snd_soc_platform_get_drvdata(rtd->platform); 47 struct snd_pcm_runtime *runtime = substream->runtime; 48 int memif_num = rtd->cpu_dai->id; 49 struct mtk_base_afe_memif *memif = &afe->memif[memif_num]; 50 const struct snd_pcm_hardware *mtk_afe_hardware = afe->mtk_afe_hardware; 51 int ret; 52 53 memif->substream = substream; 54 55 snd_pcm_hw_constraint_step(substream->runtime, 0, 56 SNDRV_PCM_HW_PARAM_BUFFER_BYTES, 16); 57 /* enable agent */ 58 mtk_regmap_update_bits(afe->regmap, memif->data->agent_disable_reg, 59 1 << memif->data->agent_disable_shift, 60 0 << memif->data->agent_disable_shift); 61 62 snd_soc_set_runtime_hwparams(substream, mtk_afe_hardware); 63 64 /* 65 * Capture cannot use ping-pong buffer since hw_ptr at IRQ may be 66 * smaller than period_size due to AFE's internal buffer. 67 * This easily leads to overrun when avail_min is period_size. 68 * One more period can hold the possible unread buffer. 69 */ 70 if (substream->stream == SNDRV_PCM_STREAM_CAPTURE) { 71 int periods_max = mtk_afe_hardware->periods_max; 72 73 ret = snd_pcm_hw_constraint_minmax(runtime, 74 SNDRV_PCM_HW_PARAM_PERIODS, 75 3, periods_max); 76 if (ret < 0) { 77 dev_err(afe->dev, "hw_constraint_minmax failed\n"); 78 return ret; 79 } 80 } 81 82 ret = snd_pcm_hw_constraint_integer(runtime, 83 SNDRV_PCM_HW_PARAM_PERIODS); 84 if (ret < 0) 85 dev_err(afe->dev, "snd_pcm_hw_constraint_integer failed\n"); 86 87 /* dynamic allocate irq to memif */ 88 if (memif->irq_usage < 0) { 89 int irq_id = mtk_dynamic_irq_acquire(afe); 90 91 if (irq_id != afe->irqs_size) { 92 /* link */ 93 memif->irq_usage = irq_id; 94 } else { 95 dev_err(afe->dev, "%s() error: no more asys irq\n", 96 __func__); 97 ret = -EBUSY; 98 } 99 } 100 return ret; 101 } 102 EXPORT_SYMBOL_GPL(mtk_afe_fe_startup); 103 104 void mtk_afe_fe_shutdown(struct snd_pcm_substream *substream, 105 struct snd_soc_dai *dai) 106 { 107 struct snd_soc_pcm_runtime *rtd = substream->private_data; 108 struct mtk_base_afe *afe = snd_soc_platform_get_drvdata(rtd->platform); 109 struct mtk_base_afe_memif *memif = &afe->memif[rtd->cpu_dai->id]; 110 int irq_id; 111 112 irq_id = memif->irq_usage; 113 114 mtk_regmap_update_bits(afe->regmap, memif->data->agent_disable_reg, 115 1 << memif->data->agent_disable_shift, 116 1 << memif->data->agent_disable_shift); 117 118 if (!memif->const_irq) { 119 mtk_dynamic_irq_release(afe, irq_id); 120 memif->irq_usage = -1; 121 memif->substream = NULL; 122 } 123 } 124 EXPORT_SYMBOL_GPL(mtk_afe_fe_shutdown); 125 126 int mtk_afe_fe_hw_params(struct snd_pcm_substream *substream, 127 struct snd_pcm_hw_params *params, 128 struct snd_soc_dai *dai) 129 { 130 struct snd_soc_pcm_runtime *rtd = substream->private_data; 131 struct mtk_base_afe *afe = snd_soc_platform_get_drvdata(rtd->platform); 132 struct mtk_base_afe_memif *memif = &afe->memif[rtd->cpu_dai->id]; 133 int msb_at_bit33 = 0; 134 int ret, fs = 0; 135 136 ret = snd_pcm_lib_malloc_pages(substream, params_buffer_bytes(params)); 137 if (ret < 0) 138 return ret; 139 140 msb_at_bit33 = upper_32_bits(substream->runtime->dma_addr) ? 1 : 0; 141 memif->phys_buf_addr = lower_32_bits(substream->runtime->dma_addr); 142 memif->buffer_size = substream->runtime->dma_bytes; 143 144 /* start */ 145 mtk_regmap_write(afe->regmap, memif->data->reg_ofs_base, 146 memif->phys_buf_addr); 147 /* end */ 148 mtk_regmap_write(afe->regmap, 149 memif->data->reg_ofs_base + AFE_BASE_END_OFFSET, 150 memif->phys_buf_addr + memif->buffer_size - 1); 151 152 /* set MSB to 33-bit */ 153 mtk_regmap_update_bits(afe->regmap, memif->data->msb_reg, 154 1 << memif->data->msb_shift, 155 msb_at_bit33 << memif->data->msb_shift); 156 157 /* set channel */ 158 if (memif->data->mono_shift >= 0) { 159 unsigned int mono = (params_channels(params) == 1) ? 1 : 0; 160 161 mtk_regmap_update_bits(afe->regmap, memif->data->mono_reg, 162 1 << memif->data->mono_shift, 163 mono << memif->data->mono_shift); 164 } 165 166 /* set rate */ 167 if (memif->data->fs_shift < 0) 168 return 0; 169 170 fs = afe->memif_fs(substream, params_rate(params)); 171 172 if (fs < 0) 173 return -EINVAL; 174 175 mtk_regmap_update_bits(afe->regmap, memif->data->fs_reg, 176 memif->data->fs_maskbit << memif->data->fs_shift, 177 fs << memif->data->fs_shift); 178 179 return 0; 180 } 181 EXPORT_SYMBOL_GPL(mtk_afe_fe_hw_params); 182 183 int mtk_afe_fe_hw_free(struct snd_pcm_substream *substream, 184 struct snd_soc_dai *dai) 185 { 186 return snd_pcm_lib_free_pages(substream); 187 } 188 EXPORT_SYMBOL_GPL(mtk_afe_fe_hw_free); 189 190 int mtk_afe_fe_trigger(struct snd_pcm_substream *substream, int cmd, 191 struct snd_soc_dai *dai) 192 { 193 struct snd_soc_pcm_runtime *rtd = substream->private_data; 194 struct snd_pcm_runtime * const runtime = substream->runtime; 195 struct mtk_base_afe *afe = snd_soc_platform_get_drvdata(rtd->platform); 196 struct mtk_base_afe_memif *memif = &afe->memif[rtd->cpu_dai->id]; 197 struct mtk_base_afe_irq *irqs = &afe->irqs[memif->irq_usage]; 198 const struct mtk_base_irq_data *irq_data = irqs->irq_data; 199 unsigned int counter = runtime->period_size; 200 int fs; 201 202 dev_dbg(afe->dev, "%s %s cmd=%d\n", __func__, memif->data->name, cmd); 203 204 switch (cmd) { 205 case SNDRV_PCM_TRIGGER_START: 206 case SNDRV_PCM_TRIGGER_RESUME: 207 if (memif->data->enable_shift >= 0) 208 mtk_regmap_update_bits(afe->regmap, 209 memif->data->enable_reg, 210 1 << memif->data->enable_shift, 211 1 << memif->data->enable_shift); 212 213 /* set irq counter */ 214 mtk_regmap_update_bits(afe->regmap, irq_data->irq_cnt_reg, 215 irq_data->irq_cnt_maskbit 216 << irq_data->irq_cnt_shift, 217 counter << irq_data->irq_cnt_shift); 218 219 /* set irq fs */ 220 fs = afe->irq_fs(substream, runtime->rate); 221 222 if (fs < 0) 223 return -EINVAL; 224 225 mtk_regmap_update_bits(afe->regmap, irq_data->irq_fs_reg, 226 irq_data->irq_fs_maskbit 227 << irq_data->irq_fs_shift, 228 fs << irq_data->irq_fs_shift); 229 230 /* enable interrupt */ 231 mtk_regmap_update_bits(afe->regmap, irq_data->irq_en_reg, 232 1 << irq_data->irq_en_shift, 233 1 << irq_data->irq_en_shift); 234 235 return 0; 236 case SNDRV_PCM_TRIGGER_STOP: 237 case SNDRV_PCM_TRIGGER_SUSPEND: 238 mtk_regmap_update_bits(afe->regmap, memif->data->enable_reg, 239 1 << memif->data->enable_shift, 0); 240 /* disable interrupt */ 241 mtk_regmap_update_bits(afe->regmap, irq_data->irq_en_reg, 242 1 << irq_data->irq_en_shift, 243 0 << irq_data->irq_en_shift); 244 /* and clear pending IRQ */ 245 mtk_regmap_write(afe->regmap, irq_data->irq_clr_reg, 246 1 << irq_data->irq_clr_shift); 247 return 0; 248 default: 249 return -EINVAL; 250 } 251 } 252 EXPORT_SYMBOL_GPL(mtk_afe_fe_trigger); 253 254 int mtk_afe_fe_prepare(struct snd_pcm_substream *substream, 255 struct snd_soc_dai *dai) 256 { 257 struct snd_soc_pcm_runtime *rtd = substream->private_data; 258 struct mtk_base_afe *afe = snd_soc_platform_get_drvdata(rtd->platform); 259 struct mtk_base_afe_memif *memif = &afe->memif[rtd->cpu_dai->id]; 260 int hd_audio = 0; 261 262 /* set hd mode */ 263 switch (substream->runtime->format) { 264 case SNDRV_PCM_FORMAT_S16_LE: 265 hd_audio = 0; 266 break; 267 case SNDRV_PCM_FORMAT_S32_LE: 268 hd_audio = 1; 269 break; 270 case SNDRV_PCM_FORMAT_S24_LE: 271 hd_audio = 1; 272 break; 273 default: 274 dev_err(afe->dev, "%s() error: unsupported format %d\n", 275 __func__, substream->runtime->format); 276 break; 277 } 278 279 mtk_regmap_update_bits(afe->regmap, memif->data->hd_reg, 280 1 << memif->data->hd_shift, 281 hd_audio << memif->data->hd_shift); 282 283 return 0; 284 } 285 EXPORT_SYMBOL_GPL(mtk_afe_fe_prepare); 286 287 const struct snd_soc_dai_ops mtk_afe_fe_ops = { 288 .startup = mtk_afe_fe_startup, 289 .shutdown = mtk_afe_fe_shutdown, 290 .hw_params = mtk_afe_fe_hw_params, 291 .hw_free = mtk_afe_fe_hw_free, 292 .prepare = mtk_afe_fe_prepare, 293 .trigger = mtk_afe_fe_trigger, 294 }; 295 EXPORT_SYMBOL_GPL(mtk_afe_fe_ops); 296 297 static DEFINE_MUTEX(irqs_lock); 298 int mtk_dynamic_irq_acquire(struct mtk_base_afe *afe) 299 { 300 int i; 301 302 mutex_lock(&afe->irq_alloc_lock); 303 for (i = 0; i < afe->irqs_size; ++i) { 304 if (afe->irqs[i].irq_occupyed == 0) { 305 afe->irqs[i].irq_occupyed = 1; 306 mutex_unlock(&afe->irq_alloc_lock); 307 return i; 308 } 309 } 310 mutex_unlock(&afe->irq_alloc_lock); 311 return afe->irqs_size; 312 } 313 EXPORT_SYMBOL_GPL(mtk_dynamic_irq_acquire); 314 315 int mtk_dynamic_irq_release(struct mtk_base_afe *afe, int irq_id) 316 { 317 mutex_lock(&afe->irq_alloc_lock); 318 if (irq_id >= 0 && irq_id < afe->irqs_size) { 319 afe->irqs[irq_id].irq_occupyed = 0; 320 mutex_unlock(&afe->irq_alloc_lock); 321 return 0; 322 } 323 mutex_unlock(&afe->irq_alloc_lock); 324 return -EINVAL; 325 } 326 EXPORT_SYMBOL_GPL(mtk_dynamic_irq_release); 327 328 int mtk_afe_dai_suspend(struct snd_soc_dai *dai) 329 { 330 struct mtk_base_afe *afe = dev_get_drvdata(dai->dev); 331 struct device *dev = afe->dev; 332 struct regmap *regmap = afe->regmap; 333 int i; 334 335 if (pm_runtime_status_suspended(dev) || afe->suspended) 336 return 0; 337 338 if (!afe->reg_back_up) 339 afe->reg_back_up = 340 devm_kcalloc(dev, afe->reg_back_up_list_num, 341 sizeof(unsigned int), GFP_KERNEL); 342 343 for (i = 0; i < afe->reg_back_up_list_num; i++) 344 regmap_read(regmap, afe->reg_back_up_list[i], 345 &afe->reg_back_up[i]); 346 347 afe->suspended = true; 348 afe->runtime_suspend(dev); 349 return 0; 350 } 351 EXPORT_SYMBOL_GPL(mtk_afe_dai_suspend); 352 353 int mtk_afe_dai_resume(struct snd_soc_dai *dai) 354 { 355 struct mtk_base_afe *afe = dev_get_drvdata(dai->dev); 356 struct device *dev = afe->dev; 357 struct regmap *regmap = afe->regmap; 358 int i = 0; 359 360 if (pm_runtime_status_suspended(dev) || !afe->suspended) 361 return 0; 362 363 afe->runtime_resume(dev); 364 365 if (!afe->reg_back_up) 366 dev_dbg(dev, "%s no reg_backup\n", __func__); 367 368 for (i = 0; i < afe->reg_back_up_list_num; i++) 369 mtk_regmap_write(regmap, afe->reg_back_up_list[i], 370 afe->reg_back_up[i]); 371 372 afe->suspended = false; 373 return 0; 374 } 375 EXPORT_SYMBOL_GPL(mtk_afe_dai_resume); 376 377 MODULE_DESCRIPTION("Mediatek simple fe dai operator"); 378 MODULE_AUTHOR("Garlic Tseng <garlic.tseng@mediatek.com>"); 379 MODULE_LICENSE("GPL v2"); 380 381