1 // SPDX-License-Identifier: (GPL-2.0 OR MIT) 2 // 3 // Copyright (c) 2018 BayLibre, SAS. 4 // Author: Jerome Brunet <jbrunet@baylibre.com> 5 6 #include <linux/clk.h> 7 #include <linux/of_irq.h> 8 #include <linux/of_platform.h> 9 #include <linux/module.h> 10 #include <linux/regmap.h> 11 #include <linux/reset.h> 12 #include <sound/pcm_params.h> 13 #include <sound/soc.h> 14 #include <sound/soc-dai.h> 15 16 #include "axg-fifo.h" 17 18 /* 19 * This file implements the platform operations common to the playback and 20 * capture frontend DAI. The logic behind this two types of fifo is very 21 * similar but some difference exist. 22 * These differences are handled in the respective DAI drivers 23 */ 24 25 static struct snd_pcm_hardware axg_fifo_hw = { 26 .info = (SNDRV_PCM_INFO_INTERLEAVED | 27 SNDRV_PCM_INFO_MMAP | 28 SNDRV_PCM_INFO_MMAP_VALID | 29 SNDRV_PCM_INFO_BLOCK_TRANSFER | 30 SNDRV_PCM_INFO_PAUSE), 31 32 .formats = AXG_FIFO_FORMATS, 33 .rate_min = 5512, 34 .rate_max = 192000, 35 .channels_min = 1, 36 .channels_max = AXG_FIFO_CH_MAX, 37 .period_bytes_min = AXG_FIFO_MIN_DEPTH, 38 .period_bytes_max = UINT_MAX, 39 .periods_min = 2, 40 .periods_max = UINT_MAX, 41 42 /* No real justification for this */ 43 .buffer_bytes_max = 1 * 1024 * 1024, 44 }; 45 46 static struct snd_soc_dai *axg_fifo_dai(struct snd_pcm_substream *ss) 47 { 48 struct snd_soc_pcm_runtime *rtd = ss->private_data; 49 50 return rtd->cpu_dai; 51 } 52 53 static struct axg_fifo *axg_fifo_data(struct snd_pcm_substream *ss) 54 { 55 struct snd_soc_dai *dai = axg_fifo_dai(ss); 56 57 return snd_soc_dai_get_drvdata(dai); 58 } 59 60 static struct device *axg_fifo_dev(struct snd_pcm_substream *ss) 61 { 62 struct snd_soc_dai *dai = axg_fifo_dai(ss); 63 64 return dai->dev; 65 } 66 67 static void __dma_enable(struct axg_fifo *fifo, bool enable) 68 { 69 regmap_update_bits(fifo->map, FIFO_CTRL0, CTRL0_DMA_EN, 70 enable ? CTRL0_DMA_EN : 0); 71 } 72 73 int axg_fifo_pcm_trigger(struct snd_soc_component *component, 74 struct snd_pcm_substream *ss, int cmd) 75 { 76 struct axg_fifo *fifo = axg_fifo_data(ss); 77 78 switch (cmd) { 79 case SNDRV_PCM_TRIGGER_START: 80 case SNDRV_PCM_TRIGGER_RESUME: 81 case SNDRV_PCM_TRIGGER_PAUSE_RELEASE: 82 __dma_enable(fifo, true); 83 break; 84 case SNDRV_PCM_TRIGGER_SUSPEND: 85 case SNDRV_PCM_TRIGGER_PAUSE_PUSH: 86 case SNDRV_PCM_TRIGGER_STOP: 87 __dma_enable(fifo, false); 88 break; 89 default: 90 return -EINVAL; 91 } 92 93 return 0; 94 } 95 EXPORT_SYMBOL_GPL(axg_fifo_pcm_trigger); 96 97 snd_pcm_uframes_t axg_fifo_pcm_pointer(struct snd_soc_component *component, 98 struct snd_pcm_substream *ss) 99 { 100 struct axg_fifo *fifo = axg_fifo_data(ss); 101 struct snd_pcm_runtime *runtime = ss->runtime; 102 unsigned int addr; 103 104 regmap_read(fifo->map, FIFO_STATUS2, &addr); 105 106 return bytes_to_frames(runtime, addr - (unsigned int)runtime->dma_addr); 107 } 108 EXPORT_SYMBOL_GPL(axg_fifo_pcm_pointer); 109 110 int axg_fifo_pcm_hw_params(struct snd_soc_component *component, 111 struct snd_pcm_substream *ss, 112 struct snd_pcm_hw_params *params) 113 { 114 struct snd_pcm_runtime *runtime = ss->runtime; 115 struct axg_fifo *fifo = axg_fifo_data(ss); 116 dma_addr_t end_ptr; 117 unsigned int burst_num; 118 int ret; 119 120 ret = snd_pcm_lib_malloc_pages(ss, params_buffer_bytes(params)); 121 if (ret < 0) 122 return ret; 123 124 /* Setup dma memory pointers */ 125 end_ptr = runtime->dma_addr + runtime->dma_bytes - AXG_FIFO_BURST; 126 regmap_write(fifo->map, FIFO_START_ADDR, runtime->dma_addr); 127 regmap_write(fifo->map, FIFO_FINISH_ADDR, end_ptr); 128 129 /* Setup interrupt periodicity */ 130 burst_num = params_period_bytes(params) / AXG_FIFO_BURST; 131 regmap_write(fifo->map, FIFO_INT_ADDR, burst_num); 132 133 /* Enable block count irq */ 134 regmap_update_bits(fifo->map, FIFO_CTRL0, 135 CTRL0_INT_EN(FIFO_INT_COUNT_REPEAT), 136 CTRL0_INT_EN(FIFO_INT_COUNT_REPEAT)); 137 138 return 0; 139 } 140 EXPORT_SYMBOL_GPL(axg_fifo_pcm_hw_params); 141 142 int g12a_fifo_pcm_hw_params(struct snd_soc_component *component, 143 struct snd_pcm_substream *ss, 144 struct snd_pcm_hw_params *params) 145 { 146 struct axg_fifo *fifo = axg_fifo_data(ss); 147 struct snd_pcm_runtime *runtime = ss->runtime; 148 int ret; 149 150 ret = axg_fifo_pcm_hw_params(component, ss, params); 151 if (ret) 152 return ret; 153 154 /* Set the initial memory address of the DMA */ 155 regmap_write(fifo->map, FIFO_INIT_ADDR, runtime->dma_addr); 156 157 return 0; 158 } 159 EXPORT_SYMBOL_GPL(g12a_fifo_pcm_hw_params); 160 161 int axg_fifo_pcm_hw_free(struct snd_soc_component *component, 162 struct snd_pcm_substream *ss) 163 { 164 struct axg_fifo *fifo = axg_fifo_data(ss); 165 166 /* Disable the block count irq */ 167 regmap_update_bits(fifo->map, FIFO_CTRL0, 168 CTRL0_INT_EN(FIFO_INT_COUNT_REPEAT), 0); 169 170 return snd_pcm_lib_free_pages(ss); 171 } 172 EXPORT_SYMBOL_GPL(axg_fifo_pcm_hw_free); 173 174 static void axg_fifo_ack_irq(struct axg_fifo *fifo, u8 mask) 175 { 176 regmap_update_bits(fifo->map, FIFO_CTRL1, 177 CTRL1_INT_CLR(FIFO_INT_MASK), 178 CTRL1_INT_CLR(mask)); 179 180 /* Clear must also be cleared */ 181 regmap_update_bits(fifo->map, FIFO_CTRL1, 182 CTRL1_INT_CLR(FIFO_INT_MASK), 183 0); 184 } 185 186 static irqreturn_t axg_fifo_pcm_irq_block(int irq, void *dev_id) 187 { 188 struct snd_pcm_substream *ss = dev_id; 189 struct axg_fifo *fifo = axg_fifo_data(ss); 190 unsigned int status; 191 192 regmap_read(fifo->map, FIFO_STATUS1, &status); 193 194 status = STATUS1_INT_STS(status) & FIFO_INT_MASK; 195 if (status & FIFO_INT_COUNT_REPEAT) 196 snd_pcm_period_elapsed(ss); 197 else 198 dev_dbg(axg_fifo_dev(ss), "unexpected irq - STS 0x%02x\n", 199 status); 200 201 /* Ack irqs */ 202 axg_fifo_ack_irq(fifo, status); 203 204 return IRQ_RETVAL(status); 205 } 206 207 int axg_fifo_pcm_open(struct snd_soc_component *component, 208 struct snd_pcm_substream *ss) 209 { 210 struct axg_fifo *fifo = axg_fifo_data(ss); 211 struct device *dev = axg_fifo_dev(ss); 212 int ret; 213 214 snd_soc_set_runtime_hwparams(ss, &axg_fifo_hw); 215 216 /* 217 * Make sure the buffer and period size are multiple of the FIFO 218 * minimum depth size 219 */ 220 ret = snd_pcm_hw_constraint_step(ss->runtime, 0, 221 SNDRV_PCM_HW_PARAM_BUFFER_BYTES, 222 AXG_FIFO_MIN_DEPTH); 223 if (ret) 224 return ret; 225 226 ret = snd_pcm_hw_constraint_step(ss->runtime, 0, 227 SNDRV_PCM_HW_PARAM_PERIOD_BYTES, 228 AXG_FIFO_MIN_DEPTH); 229 if (ret) 230 return ret; 231 232 ret = request_irq(fifo->irq, axg_fifo_pcm_irq_block, 0, 233 dev_name(dev), ss); 234 if (ret) 235 return ret; 236 237 /* Enable pclk to access registers and clock the fifo ip */ 238 ret = clk_prepare_enable(fifo->pclk); 239 if (ret) 240 return ret; 241 242 /* Setup status2 so it reports the memory pointer */ 243 regmap_update_bits(fifo->map, FIFO_CTRL1, 244 CTRL1_STATUS2_SEL_MASK, 245 CTRL1_STATUS2_SEL(STATUS2_SEL_DDR_READ)); 246 247 /* Make sure the dma is initially disabled */ 248 __dma_enable(fifo, false); 249 250 /* Disable irqs until params are ready */ 251 regmap_update_bits(fifo->map, FIFO_CTRL0, 252 CTRL0_INT_EN(FIFO_INT_MASK), 0); 253 254 /* Clear any pending interrupt */ 255 axg_fifo_ack_irq(fifo, FIFO_INT_MASK); 256 257 /* Take memory arbitror out of reset */ 258 ret = reset_control_deassert(fifo->arb); 259 if (ret) 260 clk_disable_unprepare(fifo->pclk); 261 262 return ret; 263 } 264 EXPORT_SYMBOL_GPL(axg_fifo_pcm_open); 265 266 int axg_fifo_pcm_close(struct snd_soc_component *component, 267 struct snd_pcm_substream *ss) 268 { 269 struct axg_fifo *fifo = axg_fifo_data(ss); 270 int ret; 271 272 /* Put the memory arbitror back in reset */ 273 ret = reset_control_assert(fifo->arb); 274 275 /* Disable fifo ip and register access */ 276 clk_disable_unprepare(fifo->pclk); 277 278 /* remove IRQ */ 279 free_irq(fifo->irq, ss); 280 281 return ret; 282 } 283 EXPORT_SYMBOL_GPL(axg_fifo_pcm_close); 284 285 int axg_fifo_pcm_new(struct snd_soc_pcm_runtime *rtd, unsigned int type) 286 { 287 struct snd_card *card = rtd->card->snd_card; 288 size_t size = axg_fifo_hw.buffer_bytes_max; 289 290 snd_pcm_lib_preallocate_pages(rtd->pcm->streams[type].substream, 291 SNDRV_DMA_TYPE_DEV, card->dev, 292 size, size); 293 return 0; 294 } 295 EXPORT_SYMBOL_GPL(axg_fifo_pcm_new); 296 297 static const struct regmap_config axg_fifo_regmap_cfg = { 298 .reg_bits = 32, 299 .val_bits = 32, 300 .reg_stride = 4, 301 .max_register = FIFO_CTRL2, 302 }; 303 304 int axg_fifo_probe(struct platform_device *pdev) 305 { 306 struct device *dev = &pdev->dev; 307 const struct axg_fifo_match_data *data; 308 struct axg_fifo *fifo; 309 void __iomem *regs; 310 311 data = of_device_get_match_data(dev); 312 if (!data) { 313 dev_err(dev, "failed to match device\n"); 314 return -ENODEV; 315 } 316 317 fifo = devm_kzalloc(dev, sizeof(*fifo), GFP_KERNEL); 318 if (!fifo) 319 return -ENOMEM; 320 platform_set_drvdata(pdev, fifo); 321 322 regs = devm_platform_ioremap_resource(pdev, 0); 323 if (IS_ERR(regs)) 324 return PTR_ERR(regs); 325 326 fifo->map = devm_regmap_init_mmio(dev, regs, &axg_fifo_regmap_cfg); 327 if (IS_ERR(fifo->map)) { 328 dev_err(dev, "failed to init regmap: %ld\n", 329 PTR_ERR(fifo->map)); 330 return PTR_ERR(fifo->map); 331 } 332 333 fifo->pclk = devm_clk_get(dev, NULL); 334 if (IS_ERR(fifo->pclk)) { 335 if (PTR_ERR(fifo->pclk) != -EPROBE_DEFER) 336 dev_err(dev, "failed to get pclk: %ld\n", 337 PTR_ERR(fifo->pclk)); 338 return PTR_ERR(fifo->pclk); 339 } 340 341 fifo->arb = devm_reset_control_get_exclusive(dev, NULL); 342 if (IS_ERR(fifo->arb)) { 343 if (PTR_ERR(fifo->arb) != -EPROBE_DEFER) 344 dev_err(dev, "failed to get arb reset: %ld\n", 345 PTR_ERR(fifo->arb)); 346 return PTR_ERR(fifo->arb); 347 } 348 349 fifo->irq = of_irq_get(dev->of_node, 0); 350 if (fifo->irq <= 0) { 351 dev_err(dev, "failed to get irq: %d\n", fifo->irq); 352 return fifo->irq; 353 } 354 355 return devm_snd_soc_register_component(dev, data->component_drv, 356 data->dai_drv, 1); 357 } 358 EXPORT_SYMBOL_GPL(axg_fifo_probe); 359 360 MODULE_DESCRIPTION("Amlogic AXG/G12A fifo driver"); 361 MODULE_AUTHOR("Jerome Brunet <jbrunet@baylibre.com>"); 362 MODULE_LICENSE("GPL v2"); 363