1 // SPDX-License-Identifier: (GPL-2.0 OR MIT) 2 // 3 // Copyright (c) 2018 BayLibre, SAS. 4 // Author: Jerome Brunet <jbrunet@baylibre.com> 5 6 #include <linux/clk.h> 7 #include <linux/of_irq.h> 8 #include <linux/of_platform.h> 9 #include <linux/module.h> 10 #include <linux/regmap.h> 11 #include <linux/reset.h> 12 #include <sound/pcm_params.h> 13 #include <sound/soc.h> 14 #include <sound/soc-dai.h> 15 16 #include "axg-fifo.h" 17 18 /* 19 * This file implements the platform operations common to the playback and 20 * capture frontend DAI. The logic behind this two types of fifo is very 21 * similar but some difference exist. 22 * These differences are handled in the respective DAI drivers 23 */ 24 25 static struct snd_pcm_hardware axg_fifo_hw = { 26 .info = (SNDRV_PCM_INFO_INTERLEAVED | 27 SNDRV_PCM_INFO_MMAP | 28 SNDRV_PCM_INFO_MMAP_VALID | 29 SNDRV_PCM_INFO_BLOCK_TRANSFER | 30 SNDRV_PCM_INFO_PAUSE), 31 32 .formats = AXG_FIFO_FORMATS, 33 .rate_min = 5512, 34 .rate_max = 192000, 35 .channels_min = 1, 36 .channels_max = AXG_FIFO_CH_MAX, 37 .period_bytes_min = AXG_FIFO_MIN_DEPTH, 38 .period_bytes_max = UINT_MAX, 39 .periods_min = 2, 40 .periods_max = UINT_MAX, 41 42 /* No real justification for this */ 43 .buffer_bytes_max = 1 * 1024 * 1024, 44 }; 45 46 static struct snd_soc_dai *axg_fifo_dai(struct snd_pcm_substream *ss) 47 { 48 struct snd_soc_pcm_runtime *rtd = ss->private_data; 49 50 return rtd->cpu_dai; 51 } 52 53 static struct axg_fifo *axg_fifo_data(struct snd_pcm_substream *ss) 54 { 55 struct snd_soc_dai *dai = axg_fifo_dai(ss); 56 57 return snd_soc_dai_get_drvdata(dai); 58 } 59 60 static struct device *axg_fifo_dev(struct snd_pcm_substream *ss) 61 { 62 struct snd_soc_dai *dai = axg_fifo_dai(ss); 63 64 return dai->dev; 65 } 66 67 static void __dma_enable(struct axg_fifo *fifo, bool enable) 68 { 69 regmap_update_bits(fifo->map, FIFO_CTRL0, CTRL0_DMA_EN, 70 enable ? CTRL0_DMA_EN : 0); 71 } 72 73 int axg_fifo_pcm_trigger(struct snd_soc_component *component, 74 struct snd_pcm_substream *ss, int cmd) 75 { 76 struct axg_fifo *fifo = axg_fifo_data(ss); 77 78 switch (cmd) { 79 case SNDRV_PCM_TRIGGER_START: 80 case SNDRV_PCM_TRIGGER_RESUME: 81 case SNDRV_PCM_TRIGGER_PAUSE_RELEASE: 82 __dma_enable(fifo, true); 83 break; 84 case SNDRV_PCM_TRIGGER_SUSPEND: 85 case SNDRV_PCM_TRIGGER_PAUSE_PUSH: 86 case SNDRV_PCM_TRIGGER_STOP: 87 __dma_enable(fifo, false); 88 break; 89 default: 90 return -EINVAL; 91 } 92 93 return 0; 94 } 95 EXPORT_SYMBOL_GPL(axg_fifo_pcm_trigger); 96 97 snd_pcm_uframes_t axg_fifo_pcm_pointer(struct snd_soc_component *component, 98 struct snd_pcm_substream *ss) 99 { 100 struct axg_fifo *fifo = axg_fifo_data(ss); 101 struct snd_pcm_runtime *runtime = ss->runtime; 102 unsigned int addr; 103 104 regmap_read(fifo->map, FIFO_STATUS2, &addr); 105 106 return bytes_to_frames(runtime, addr - (unsigned int)runtime->dma_addr); 107 } 108 EXPORT_SYMBOL_GPL(axg_fifo_pcm_pointer); 109 110 int axg_fifo_pcm_hw_params(struct snd_soc_component *component, 111 struct snd_pcm_substream *ss, 112 struct snd_pcm_hw_params *params) 113 { 114 struct snd_pcm_runtime *runtime = ss->runtime; 115 struct axg_fifo *fifo = axg_fifo_data(ss); 116 dma_addr_t end_ptr; 117 unsigned int burst_num; 118 119 /* Setup dma memory pointers */ 120 end_ptr = runtime->dma_addr + runtime->dma_bytes - AXG_FIFO_BURST; 121 regmap_write(fifo->map, FIFO_START_ADDR, runtime->dma_addr); 122 regmap_write(fifo->map, FIFO_FINISH_ADDR, end_ptr); 123 124 /* Setup interrupt periodicity */ 125 burst_num = params_period_bytes(params) / AXG_FIFO_BURST; 126 regmap_write(fifo->map, FIFO_INT_ADDR, burst_num); 127 128 /* Enable block count irq */ 129 regmap_update_bits(fifo->map, FIFO_CTRL0, 130 CTRL0_INT_EN(FIFO_INT_COUNT_REPEAT), 131 CTRL0_INT_EN(FIFO_INT_COUNT_REPEAT)); 132 133 return 0; 134 } 135 EXPORT_SYMBOL_GPL(axg_fifo_pcm_hw_params); 136 137 int g12a_fifo_pcm_hw_params(struct snd_soc_component *component, 138 struct snd_pcm_substream *ss, 139 struct snd_pcm_hw_params *params) 140 { 141 struct axg_fifo *fifo = axg_fifo_data(ss); 142 struct snd_pcm_runtime *runtime = ss->runtime; 143 int ret; 144 145 ret = axg_fifo_pcm_hw_params(component, ss, params); 146 if (ret) 147 return ret; 148 149 /* Set the initial memory address of the DMA */ 150 regmap_write(fifo->map, FIFO_INIT_ADDR, runtime->dma_addr); 151 152 return 0; 153 } 154 EXPORT_SYMBOL_GPL(g12a_fifo_pcm_hw_params); 155 156 int axg_fifo_pcm_hw_free(struct snd_soc_component *component, 157 struct snd_pcm_substream *ss) 158 { 159 struct axg_fifo *fifo = axg_fifo_data(ss); 160 161 /* Disable the block count irq */ 162 regmap_update_bits(fifo->map, FIFO_CTRL0, 163 CTRL0_INT_EN(FIFO_INT_COUNT_REPEAT), 0); 164 165 return 0; 166 } 167 EXPORT_SYMBOL_GPL(axg_fifo_pcm_hw_free); 168 169 static void axg_fifo_ack_irq(struct axg_fifo *fifo, u8 mask) 170 { 171 regmap_update_bits(fifo->map, FIFO_CTRL1, 172 CTRL1_INT_CLR(FIFO_INT_MASK), 173 CTRL1_INT_CLR(mask)); 174 175 /* Clear must also be cleared */ 176 regmap_update_bits(fifo->map, FIFO_CTRL1, 177 CTRL1_INT_CLR(FIFO_INT_MASK), 178 0); 179 } 180 181 static irqreturn_t axg_fifo_pcm_irq_block(int irq, void *dev_id) 182 { 183 struct snd_pcm_substream *ss = dev_id; 184 struct axg_fifo *fifo = axg_fifo_data(ss); 185 unsigned int status; 186 187 regmap_read(fifo->map, FIFO_STATUS1, &status); 188 189 status = STATUS1_INT_STS(status) & FIFO_INT_MASK; 190 if (status & FIFO_INT_COUNT_REPEAT) 191 snd_pcm_period_elapsed(ss); 192 else 193 dev_dbg(axg_fifo_dev(ss), "unexpected irq - STS 0x%02x\n", 194 status); 195 196 /* Ack irqs */ 197 axg_fifo_ack_irq(fifo, status); 198 199 return IRQ_RETVAL(status); 200 } 201 202 int axg_fifo_pcm_open(struct snd_soc_component *component, 203 struct snd_pcm_substream *ss) 204 { 205 struct axg_fifo *fifo = axg_fifo_data(ss); 206 struct device *dev = axg_fifo_dev(ss); 207 int ret; 208 209 snd_soc_set_runtime_hwparams(ss, &axg_fifo_hw); 210 211 /* 212 * Make sure the buffer and period size are multiple of the FIFO 213 * minimum depth size 214 */ 215 ret = snd_pcm_hw_constraint_step(ss->runtime, 0, 216 SNDRV_PCM_HW_PARAM_BUFFER_BYTES, 217 AXG_FIFO_MIN_DEPTH); 218 if (ret) 219 return ret; 220 221 ret = snd_pcm_hw_constraint_step(ss->runtime, 0, 222 SNDRV_PCM_HW_PARAM_PERIOD_BYTES, 223 AXG_FIFO_MIN_DEPTH); 224 if (ret) 225 return ret; 226 227 ret = request_irq(fifo->irq, axg_fifo_pcm_irq_block, 0, 228 dev_name(dev), ss); 229 if (ret) 230 return ret; 231 232 /* Enable pclk to access registers and clock the fifo ip */ 233 ret = clk_prepare_enable(fifo->pclk); 234 if (ret) 235 return ret; 236 237 /* Setup status2 so it reports the memory pointer */ 238 regmap_update_bits(fifo->map, FIFO_CTRL1, 239 CTRL1_STATUS2_SEL_MASK, 240 CTRL1_STATUS2_SEL(STATUS2_SEL_DDR_READ)); 241 242 /* Make sure the dma is initially disabled */ 243 __dma_enable(fifo, false); 244 245 /* Disable irqs until params are ready */ 246 regmap_update_bits(fifo->map, FIFO_CTRL0, 247 CTRL0_INT_EN(FIFO_INT_MASK), 0); 248 249 /* Clear any pending interrupt */ 250 axg_fifo_ack_irq(fifo, FIFO_INT_MASK); 251 252 /* Take memory arbitror out of reset */ 253 ret = reset_control_deassert(fifo->arb); 254 if (ret) 255 clk_disable_unprepare(fifo->pclk); 256 257 return ret; 258 } 259 EXPORT_SYMBOL_GPL(axg_fifo_pcm_open); 260 261 int axg_fifo_pcm_close(struct snd_soc_component *component, 262 struct snd_pcm_substream *ss) 263 { 264 struct axg_fifo *fifo = axg_fifo_data(ss); 265 int ret; 266 267 /* Put the memory arbitror back in reset */ 268 ret = reset_control_assert(fifo->arb); 269 270 /* Disable fifo ip and register access */ 271 clk_disable_unprepare(fifo->pclk); 272 273 /* remove IRQ */ 274 free_irq(fifo->irq, ss); 275 276 return ret; 277 } 278 EXPORT_SYMBOL_GPL(axg_fifo_pcm_close); 279 280 int axg_fifo_pcm_new(struct snd_soc_pcm_runtime *rtd, unsigned int type) 281 { 282 struct snd_card *card = rtd->card->snd_card; 283 size_t size = axg_fifo_hw.buffer_bytes_max; 284 285 snd_pcm_set_managed_buffer(rtd->pcm->streams[type].substream, 286 SNDRV_DMA_TYPE_DEV, card->dev, 287 size, size); 288 return 0; 289 } 290 EXPORT_SYMBOL_GPL(axg_fifo_pcm_new); 291 292 static const struct regmap_config axg_fifo_regmap_cfg = { 293 .reg_bits = 32, 294 .val_bits = 32, 295 .reg_stride = 4, 296 .max_register = FIFO_CTRL2, 297 }; 298 299 int axg_fifo_probe(struct platform_device *pdev) 300 { 301 struct device *dev = &pdev->dev; 302 const struct axg_fifo_match_data *data; 303 struct axg_fifo *fifo; 304 void __iomem *regs; 305 306 data = of_device_get_match_data(dev); 307 if (!data) { 308 dev_err(dev, "failed to match device\n"); 309 return -ENODEV; 310 } 311 312 fifo = devm_kzalloc(dev, sizeof(*fifo), GFP_KERNEL); 313 if (!fifo) 314 return -ENOMEM; 315 platform_set_drvdata(pdev, fifo); 316 317 regs = devm_platform_ioremap_resource(pdev, 0); 318 if (IS_ERR(regs)) 319 return PTR_ERR(regs); 320 321 fifo->map = devm_regmap_init_mmio(dev, regs, &axg_fifo_regmap_cfg); 322 if (IS_ERR(fifo->map)) { 323 dev_err(dev, "failed to init regmap: %ld\n", 324 PTR_ERR(fifo->map)); 325 return PTR_ERR(fifo->map); 326 } 327 328 fifo->pclk = devm_clk_get(dev, NULL); 329 if (IS_ERR(fifo->pclk)) { 330 if (PTR_ERR(fifo->pclk) != -EPROBE_DEFER) 331 dev_err(dev, "failed to get pclk: %ld\n", 332 PTR_ERR(fifo->pclk)); 333 return PTR_ERR(fifo->pclk); 334 } 335 336 fifo->arb = devm_reset_control_get_exclusive(dev, NULL); 337 if (IS_ERR(fifo->arb)) { 338 if (PTR_ERR(fifo->arb) != -EPROBE_DEFER) 339 dev_err(dev, "failed to get arb reset: %ld\n", 340 PTR_ERR(fifo->arb)); 341 return PTR_ERR(fifo->arb); 342 } 343 344 fifo->irq = of_irq_get(dev->of_node, 0); 345 if (fifo->irq <= 0) { 346 dev_err(dev, "failed to get irq: %d\n", fifo->irq); 347 return fifo->irq; 348 } 349 350 return devm_snd_soc_register_component(dev, data->component_drv, 351 data->dai_drv, 1); 352 } 353 EXPORT_SYMBOL_GPL(axg_fifo_probe); 354 355 MODULE_DESCRIPTION("Amlogic AXG/G12A fifo driver"); 356 MODULE_AUTHOR("Jerome Brunet <jbrunet@baylibre.com>"); 357 MODULE_LICENSE("GPL v2"); 358