1 // SPDX-License-Identifier: GPL-2.0-only 2 // 3 // Apple SoCs MCA driver 4 // 5 // Copyright (C) The Asahi Linux Contributors 6 // 7 // The MCA peripheral is made up of a number of identical units called clusters. 8 // Each cluster has its separate clock parent, SYNC signal generator, carries 9 // four SERDES units and has a dedicated I2S port on the SoC's periphery. 10 // 11 // The clusters can operate independently, or can be combined together in a 12 // configurable manner. We mostly treat them as self-contained independent 13 // units and don't configure any cross-cluster connections except for the I2S 14 // ports. The I2S ports can be routed to any of the clusters (irrespective 15 // of their native cluster). We map this onto ASoC's (DPCM) notion of backend 16 // and frontend DAIs. The 'cluster guts' are frontends which are dynamically 17 // routed to backend I2S ports. 18 // 19 // DAI references in devicetree are resolved to backends. The routing between 20 // frontends and backends is determined by the machine driver in the DAPM paths 21 // it supplies. 22 23 #include <linux/bitfield.h> 24 #include <linux/clk.h> 25 #include <linux/dma-mapping.h> 26 #include <linux/init.h> 27 #include <linux/kernel.h> 28 #include <linux/module.h> 29 #include <linux/of.h> 30 #include <linux/of_clk.h> 31 #include <linux/of_dma.h> 32 #include <linux/platform_device.h> 33 #include <linux/pm_domain.h> 34 #include <linux/regmap.h> 35 #include <linux/reset.h> 36 #include <linux/slab.h> 37 38 #include <sound/core.h> 39 #include <sound/pcm.h> 40 #include <sound/pcm_params.h> 41 #include <sound/soc.h> 42 #include <sound/dmaengine_pcm.h> 43 44 #define USE_RXB_FOR_CAPTURE 45 46 /* Relative to cluster base */ 47 #define REG_STATUS 0x0 48 #define STATUS_MCLK_EN BIT(0) 49 #define REG_MCLK_CONF 0x4 50 #define MCLK_CONF_DIV GENMASK(11, 8) 51 52 #define REG_SYNCGEN_STATUS 0x100 53 #define SYNCGEN_STATUS_EN BIT(0) 54 #define REG_SYNCGEN_MCLK_SEL 0x104 55 #define SYNCGEN_MCLK_SEL GENMASK(3, 0) 56 #define REG_SYNCGEN_HI_PERIOD 0x108 57 #define REG_SYNCGEN_LO_PERIOD 0x10c 58 59 #define REG_PORT_ENABLES 0x600 60 #define PORT_ENABLES_CLOCKS GENMASK(2, 1) 61 #define PORT_ENABLES_TX_DATA BIT(3) 62 #define REG_PORT_CLOCK_SEL 0x604 63 #define PORT_CLOCK_SEL GENMASK(11, 8) 64 #define REG_PORT_DATA_SEL 0x608 65 #define PORT_DATA_SEL_TXA(cl) (1 << ((cl)*2)) 66 #define PORT_DATA_SEL_TXB(cl) (2 << ((cl)*2)) 67 68 #define REG_INTSTATE 0x700 69 #define REG_INTMASK 0x704 70 71 /* Bases of serdes units (relative to cluster) */ 72 #define CLUSTER_RXA_OFF 0x200 73 #define CLUSTER_TXA_OFF 0x300 74 #define CLUSTER_RXB_OFF 0x400 75 #define CLUSTER_TXB_OFF 0x500 76 77 #define CLUSTER_TX_OFF CLUSTER_TXA_OFF 78 79 #ifndef USE_RXB_FOR_CAPTURE 80 #define CLUSTER_RX_OFF CLUSTER_RXA_OFF 81 #else 82 #define CLUSTER_RX_OFF CLUSTER_RXB_OFF 83 #endif 84 85 /* Relative to serdes unit base */ 86 #define REG_SERDES_STATUS 0x00 87 #define SERDES_STATUS_EN BIT(0) 88 #define SERDES_STATUS_RST BIT(1) 89 #define REG_TX_SERDES_CONF 0x04 90 #define REG_RX_SERDES_CONF 0x08 91 #define SERDES_CONF_NCHANS GENMASK(3, 0) 92 #define SERDES_CONF_WIDTH_MASK GENMASK(8, 4) 93 #define SERDES_CONF_WIDTH_16BIT 0x40 94 #define SERDES_CONF_WIDTH_20BIT 0x80 95 #define SERDES_CONF_WIDTH_24BIT 0xc0 96 #define SERDES_CONF_WIDTH_32BIT 0x100 97 #define SERDES_CONF_BCLK_POL 0x400 98 #define SERDES_CONF_LSB_FIRST 0x800 99 #define SERDES_CONF_UNK1 BIT(12) 100 #define SERDES_CONF_UNK2 BIT(13) 101 #define SERDES_CONF_UNK3 BIT(14) 102 #define SERDES_CONF_NO_DATA_FEEDBACK BIT(15) 103 #define SERDES_CONF_SYNC_SEL GENMASK(18, 16) 104 #define SERDES_CONF_SOME_RST BIT(19) 105 #define REG_TX_SERDES_BITSTART 0x08 106 #define REG_RX_SERDES_BITSTART 0x0c 107 #define REG_TX_SERDES_SLOTMASK 0x0c 108 #define REG_RX_SERDES_SLOTMASK 0x10 109 #define REG_RX_SERDES_PORT 0x04 110 111 /* Relative to switch base */ 112 #define REG_DMA_ADAPTER_A(cl) (0x8000 * (cl)) 113 #define REG_DMA_ADAPTER_B(cl) (0x8000 * (cl) + 0x4000) 114 #define DMA_ADAPTER_TX_LSB_PAD GENMASK(4, 0) 115 #define DMA_ADAPTER_TX_NCHANS GENMASK(6, 5) 116 #define DMA_ADAPTER_RX_MSB_PAD GENMASK(12, 8) 117 #define DMA_ADAPTER_RX_NCHANS GENMASK(14, 13) 118 #define DMA_ADAPTER_NCHANS GENMASK(22, 20) 119 120 #define SWITCH_STRIDE 0x8000 121 #define CLUSTER_STRIDE 0x4000 122 123 #define MAX_NCLUSTERS 6 124 125 #define APPLE_MCA_FMTBITS (SNDRV_PCM_FMTBIT_S16_LE | \ 126 SNDRV_PCM_FMTBIT_S24_LE | \ 127 SNDRV_PCM_FMTBIT_S32_LE) 128 129 struct mca_cluster { 130 int no; 131 __iomem void *base; 132 struct mca_data *host; 133 struct device *pd_dev; 134 struct clk *clk_parent; 135 struct dma_chan *dma_chans[SNDRV_PCM_STREAM_LAST + 1]; 136 137 bool port_started[SNDRV_PCM_STREAM_LAST + 1]; 138 int port_driver; /* The cluster driving this cluster's port */ 139 140 bool clocks_in_use[SNDRV_PCM_STREAM_LAST + 1]; 141 struct device_link *pd_link; 142 143 unsigned int bclk_ratio; 144 145 /* Masks etc. picked up via the set_tdm_slot method */ 146 int tdm_slots; 147 int tdm_slot_width; 148 unsigned int tdm_tx_mask; 149 unsigned int tdm_rx_mask; 150 }; 151 152 struct mca_data { 153 struct device *dev; 154 155 __iomem void *switch_base; 156 157 struct device *pd_dev; 158 struct reset_control *rstc; 159 struct device_link *pd_link; 160 161 /* Mutex for accessing port_driver of foreign clusters */ 162 struct mutex port_mutex; 163 164 int nclusters; 165 struct mca_cluster clusters[]; 166 }; 167 168 static void mca_modify(struct mca_cluster *cl, int regoffset, u32 mask, u32 val) 169 { 170 __iomem void *ptr = cl->base + regoffset; 171 u32 newval; 172 173 newval = (val & mask) | (readl_relaxed(ptr) & ~mask); 174 writel_relaxed(newval, ptr); 175 } 176 177 /* 178 * Get the cluster of FE or BE DAI 179 */ 180 static struct mca_cluster *mca_dai_to_cluster(struct snd_soc_dai *dai) 181 { 182 struct mca_data *mca = snd_soc_dai_get_drvdata(dai); 183 /* 184 * FE DAIs are 0 ... nclusters - 1 185 * BE DAIs are nclusters ... 2*nclusters - 1 186 */ 187 int cluster_no = dai->id % mca->nclusters; 188 189 return &mca->clusters[cluster_no]; 190 } 191 192 /* called before PCM trigger */ 193 static void mca_fe_early_trigger(struct snd_pcm_substream *substream, int cmd, 194 struct snd_soc_dai *dai) 195 { 196 struct mca_cluster *cl = mca_dai_to_cluster(dai); 197 bool is_tx = substream->stream == SNDRV_PCM_STREAM_PLAYBACK; 198 int serdes_unit = is_tx ? CLUSTER_TX_OFF : CLUSTER_RX_OFF; 199 int serdes_conf = 200 serdes_unit + (is_tx ? REG_TX_SERDES_CONF : REG_RX_SERDES_CONF); 201 202 switch (cmd) { 203 case SNDRV_PCM_TRIGGER_START: 204 case SNDRV_PCM_TRIGGER_RESUME: 205 case SNDRV_PCM_TRIGGER_PAUSE_RELEASE: 206 mca_modify(cl, serdes_unit + REG_SERDES_STATUS, 207 SERDES_STATUS_EN | SERDES_STATUS_RST, 208 SERDES_STATUS_RST); 209 mca_modify(cl, serdes_conf, SERDES_CONF_SOME_RST, 210 SERDES_CONF_SOME_RST); 211 readl_relaxed(cl->base + serdes_conf); 212 mca_modify(cl, serdes_conf, SERDES_STATUS_RST, 0); 213 WARN_ON(readl_relaxed(cl->base + REG_SERDES_STATUS) & 214 SERDES_STATUS_RST); 215 break; 216 default: 217 break; 218 } 219 } 220 221 static int mca_fe_trigger(struct snd_pcm_substream *substream, int cmd, 222 struct snd_soc_dai *dai) 223 { 224 struct mca_cluster *cl = mca_dai_to_cluster(dai); 225 bool is_tx = substream->stream == SNDRV_PCM_STREAM_PLAYBACK; 226 int serdes_unit = is_tx ? CLUSTER_TX_OFF : CLUSTER_RX_OFF; 227 228 switch (cmd) { 229 case SNDRV_PCM_TRIGGER_START: 230 case SNDRV_PCM_TRIGGER_RESUME: 231 case SNDRV_PCM_TRIGGER_PAUSE_RELEASE: 232 mca_modify(cl, serdes_unit + REG_SERDES_STATUS, 233 SERDES_STATUS_EN | SERDES_STATUS_RST, 234 SERDES_STATUS_EN); 235 break; 236 237 case SNDRV_PCM_TRIGGER_STOP: 238 case SNDRV_PCM_TRIGGER_SUSPEND: 239 case SNDRV_PCM_TRIGGER_PAUSE_PUSH: 240 mca_modify(cl, serdes_unit + REG_SERDES_STATUS, 241 SERDES_STATUS_EN, 0); 242 break; 243 244 default: 245 return -EINVAL; 246 } 247 248 return 0; 249 } 250 251 static int mca_fe_enable_clocks(struct mca_cluster *cl) 252 { 253 struct mca_data *mca = cl->host; 254 int ret; 255 256 ret = clk_prepare_enable(cl->clk_parent); 257 if (ret) { 258 dev_err(mca->dev, 259 "cluster %d: unable to enable clock parent: %d\n", 260 cl->no, ret); 261 return ret; 262 } 263 264 /* 265 * We can't power up the device earlier than this because 266 * the power state driver would error out on seeing the device 267 * as clock-gated. 268 */ 269 cl->pd_link = device_link_add(mca->dev, cl->pd_dev, 270 DL_FLAG_STATELESS | DL_FLAG_PM_RUNTIME | 271 DL_FLAG_RPM_ACTIVE); 272 if (!cl->pd_link) { 273 dev_err(mca->dev, 274 "cluster %d: unable to prop-up power domain\n", cl->no); 275 clk_disable_unprepare(cl->clk_parent); 276 return -EINVAL; 277 } 278 279 writel_relaxed(cl->no + 1, cl->base + REG_SYNCGEN_MCLK_SEL); 280 mca_modify(cl, REG_SYNCGEN_STATUS, SYNCGEN_STATUS_EN, 281 SYNCGEN_STATUS_EN); 282 mca_modify(cl, REG_STATUS, STATUS_MCLK_EN, STATUS_MCLK_EN); 283 284 return 0; 285 } 286 287 static void mca_fe_disable_clocks(struct mca_cluster *cl) 288 { 289 mca_modify(cl, REG_SYNCGEN_STATUS, SYNCGEN_STATUS_EN, 0); 290 mca_modify(cl, REG_STATUS, STATUS_MCLK_EN, 0); 291 292 device_link_del(cl->pd_link); 293 clk_disable_unprepare(cl->clk_parent); 294 } 295 296 static bool mca_fe_clocks_in_use(struct mca_cluster *cl) 297 { 298 struct mca_data *mca = cl->host; 299 struct mca_cluster *be_cl; 300 int stream, i; 301 302 mutex_lock(&mca->port_mutex); 303 for (i = 0; i < mca->nclusters; i++) { 304 be_cl = &mca->clusters[i]; 305 306 if (be_cl->port_driver != cl->no) 307 continue; 308 309 for_each_pcm_streams(stream) { 310 if (be_cl->clocks_in_use[stream]) { 311 mutex_unlock(&mca->port_mutex); 312 return true; 313 } 314 } 315 } 316 mutex_unlock(&mca->port_mutex); 317 return false; 318 } 319 320 static int mca_be_prepare(struct snd_pcm_substream *substream, 321 struct snd_soc_dai *dai) 322 { 323 struct mca_cluster *cl = mca_dai_to_cluster(dai); 324 struct mca_data *mca = cl->host; 325 struct mca_cluster *fe_cl; 326 int ret; 327 328 if (cl->port_driver < 0) 329 return -EINVAL; 330 331 fe_cl = &mca->clusters[cl->port_driver]; 332 333 /* 334 * Typically the CODECs we are paired with will require clocks 335 * to be present at time of unmute with the 'mute_stream' op 336 * or at time of DAPM widget power-up. We need to enable clocks 337 * here at the latest (frontend prepare would be too late). 338 */ 339 if (!mca_fe_clocks_in_use(fe_cl)) { 340 ret = mca_fe_enable_clocks(fe_cl); 341 if (ret < 0) 342 return ret; 343 } 344 345 cl->clocks_in_use[substream->stream] = true; 346 347 return 0; 348 } 349 350 static int mca_be_hw_free(struct snd_pcm_substream *substream, 351 struct snd_soc_dai *dai) 352 { 353 struct mca_cluster *cl = mca_dai_to_cluster(dai); 354 struct mca_data *mca = cl->host; 355 struct mca_cluster *fe_cl; 356 357 if (cl->port_driver < 0) 358 return -EINVAL; 359 360 /* 361 * We are operating on a foreign cluster here, but since we 362 * belong to the same PCM, accesses should have been 363 * synchronized at ASoC level. 364 */ 365 fe_cl = &mca->clusters[cl->port_driver]; 366 if (!mca_fe_clocks_in_use(fe_cl)) 367 return 0; /* Nothing to do */ 368 369 cl->clocks_in_use[substream->stream] = false; 370 371 if (!mca_fe_clocks_in_use(fe_cl)) 372 mca_fe_disable_clocks(fe_cl); 373 374 return 0; 375 } 376 377 static unsigned int mca_crop_mask(unsigned int mask, int nchans) 378 { 379 while (hweight32(mask) > nchans) 380 mask &= ~(1 << __fls(mask)); 381 382 return mask; 383 } 384 385 static int mca_configure_serdes(struct mca_cluster *cl, int serdes_unit, 386 unsigned int mask, int slots, int nchans, 387 int slot_width, bool is_tx, int port) 388 { 389 __iomem void *serdes_base = cl->base + serdes_unit; 390 u32 serdes_conf, serdes_conf_mask; 391 392 serdes_conf_mask = SERDES_CONF_WIDTH_MASK | SERDES_CONF_NCHANS; 393 serdes_conf = FIELD_PREP(SERDES_CONF_NCHANS, max(slots, 1) - 1); 394 switch (slot_width) { 395 case 16: 396 serdes_conf |= SERDES_CONF_WIDTH_16BIT; 397 break; 398 case 20: 399 serdes_conf |= SERDES_CONF_WIDTH_20BIT; 400 break; 401 case 24: 402 serdes_conf |= SERDES_CONF_WIDTH_24BIT; 403 break; 404 case 32: 405 serdes_conf |= SERDES_CONF_WIDTH_32BIT; 406 break; 407 default: 408 goto err; 409 } 410 411 serdes_conf_mask |= SERDES_CONF_SYNC_SEL; 412 serdes_conf |= FIELD_PREP(SERDES_CONF_SYNC_SEL, cl->no + 1); 413 414 if (is_tx) { 415 serdes_conf_mask |= SERDES_CONF_UNK1 | SERDES_CONF_UNK2 | 416 SERDES_CONF_UNK3; 417 serdes_conf |= SERDES_CONF_UNK1 | SERDES_CONF_UNK2 | 418 SERDES_CONF_UNK3; 419 } else { 420 serdes_conf_mask |= SERDES_CONF_UNK1 | SERDES_CONF_UNK2 | 421 SERDES_CONF_UNK3 | 422 SERDES_CONF_NO_DATA_FEEDBACK; 423 serdes_conf |= SERDES_CONF_UNK1 | SERDES_CONF_UNK2 | 424 SERDES_CONF_NO_DATA_FEEDBACK; 425 } 426 427 mca_modify(cl, 428 serdes_unit + 429 (is_tx ? REG_TX_SERDES_CONF : REG_RX_SERDES_CONF), 430 serdes_conf_mask, serdes_conf); 431 432 if (is_tx) { 433 writel_relaxed(0xffffffff, 434 serdes_base + REG_TX_SERDES_SLOTMASK); 435 writel_relaxed(~((u32)mca_crop_mask(mask, nchans)), 436 serdes_base + REG_TX_SERDES_SLOTMASK + 0x4); 437 writel_relaxed(0xffffffff, 438 serdes_base + REG_TX_SERDES_SLOTMASK + 0x8); 439 writel_relaxed(~((u32)mask), 440 serdes_base + REG_TX_SERDES_SLOTMASK + 0xc); 441 } else { 442 writel_relaxed(0xffffffff, 443 serdes_base + REG_RX_SERDES_SLOTMASK); 444 writel_relaxed(~((u32)mca_crop_mask(mask, nchans)), 445 serdes_base + REG_RX_SERDES_SLOTMASK + 0x4); 446 writel_relaxed(1 << port, 447 serdes_base + REG_RX_SERDES_PORT); 448 } 449 450 return 0; 451 452 err: 453 dev_err(cl->host->dev, 454 "unsupported SERDES configuration requested (mask=0x%x slots=%d slot_width=%d)\n", 455 mask, slots, slot_width); 456 return -EINVAL; 457 } 458 459 static int mca_fe_set_tdm_slot(struct snd_soc_dai *dai, unsigned int tx_mask, 460 unsigned int rx_mask, int slots, int slot_width) 461 { 462 struct mca_cluster *cl = mca_dai_to_cluster(dai); 463 464 cl->tdm_slots = slots; 465 cl->tdm_slot_width = slot_width; 466 cl->tdm_tx_mask = tx_mask; 467 cl->tdm_rx_mask = rx_mask; 468 469 return 0; 470 } 471 472 static int mca_fe_set_fmt(struct snd_soc_dai *dai, unsigned int fmt) 473 { 474 struct mca_cluster *cl = mca_dai_to_cluster(dai); 475 struct mca_data *mca = cl->host; 476 bool fpol_inv = false; 477 u32 serdes_conf = 0; 478 u32 bitstart; 479 480 if ((fmt & SND_SOC_DAIFMT_CLOCK_PROVIDER_MASK) != 481 SND_SOC_DAIFMT_BP_FP) 482 goto err; 483 484 switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) { 485 case SND_SOC_DAIFMT_I2S: 486 fpol_inv = 0; 487 bitstart = 1; 488 break; 489 case SND_SOC_DAIFMT_LEFT_J: 490 fpol_inv = 1; 491 bitstart = 0; 492 break; 493 default: 494 goto err; 495 } 496 497 switch (fmt & SND_SOC_DAIFMT_INV_MASK) { 498 case SND_SOC_DAIFMT_NB_IF: 499 case SND_SOC_DAIFMT_IB_IF: 500 fpol_inv ^= 1; 501 break; 502 } 503 504 switch (fmt & SND_SOC_DAIFMT_INV_MASK) { 505 case SND_SOC_DAIFMT_NB_NF: 506 case SND_SOC_DAIFMT_NB_IF: 507 serdes_conf |= SERDES_CONF_BCLK_POL; 508 break; 509 } 510 511 if (!fpol_inv) 512 goto err; 513 514 mca_modify(cl, CLUSTER_TX_OFF + REG_TX_SERDES_CONF, 515 SERDES_CONF_BCLK_POL, serdes_conf); 516 mca_modify(cl, CLUSTER_RX_OFF + REG_RX_SERDES_CONF, 517 SERDES_CONF_BCLK_POL, serdes_conf); 518 writel_relaxed(bitstart, 519 cl->base + CLUSTER_TX_OFF + REG_TX_SERDES_BITSTART); 520 writel_relaxed(bitstart, 521 cl->base + CLUSTER_RX_OFF + REG_RX_SERDES_BITSTART); 522 523 return 0; 524 525 err: 526 dev_err(mca->dev, "unsupported DAI format (0x%x) requested\n", fmt); 527 return -EINVAL; 528 } 529 530 static int mca_set_bclk_ratio(struct snd_soc_dai *dai, unsigned int ratio) 531 { 532 struct mca_cluster *cl = mca_dai_to_cluster(dai); 533 534 cl->bclk_ratio = ratio; 535 536 return 0; 537 } 538 539 static int mca_fe_get_port(struct snd_pcm_substream *substream) 540 { 541 struct snd_soc_pcm_runtime *fe = asoc_substream_to_rtd(substream); 542 struct snd_soc_pcm_runtime *be; 543 struct snd_soc_dpcm *dpcm; 544 545 be = NULL; 546 for_each_dpcm_be(fe, substream->stream, dpcm) { 547 be = dpcm->be; 548 break; 549 } 550 551 if (!be) 552 return -EINVAL; 553 554 return mca_dai_to_cluster(asoc_rtd_to_cpu(be, 0))->no; 555 } 556 557 static int mca_fe_hw_params(struct snd_pcm_substream *substream, 558 struct snd_pcm_hw_params *params, 559 struct snd_soc_dai *dai) 560 { 561 struct mca_cluster *cl = mca_dai_to_cluster(dai); 562 struct mca_data *mca = cl->host; 563 struct device *dev = mca->dev; 564 unsigned int samp_rate = params_rate(params); 565 bool is_tx = substream->stream == SNDRV_PCM_STREAM_PLAYBACK; 566 bool refine_tdm = false; 567 unsigned long bclk_ratio; 568 unsigned int tdm_slots, tdm_slot_width, tdm_mask; 569 u32 regval, pad; 570 int ret, port, nchans_ceiled; 571 572 if (!cl->tdm_slot_width) { 573 /* 574 * We were not given TDM settings from above, set initial 575 * guesses which will later be refined. 576 */ 577 tdm_slot_width = params_width(params); 578 tdm_slots = params_channels(params); 579 refine_tdm = true; 580 } else { 581 tdm_slot_width = cl->tdm_slot_width; 582 tdm_slots = cl->tdm_slots; 583 tdm_mask = is_tx ? cl->tdm_tx_mask : cl->tdm_rx_mask; 584 } 585 586 if (cl->bclk_ratio) 587 bclk_ratio = cl->bclk_ratio; 588 else 589 bclk_ratio = tdm_slot_width * tdm_slots; 590 591 if (refine_tdm) { 592 int nchannels = params_channels(params); 593 594 if (nchannels > 2) { 595 dev_err(dev, "missing TDM for stream with two or more channels\n"); 596 return -EINVAL; 597 } 598 599 if ((bclk_ratio % nchannels) != 0) { 600 dev_err(dev, "BCLK ratio (%ld) not divisible by no. of channels (%d)\n", 601 bclk_ratio, nchannels); 602 return -EINVAL; 603 } 604 605 tdm_slot_width = bclk_ratio / nchannels; 606 607 if (tdm_slot_width > 32 && nchannels == 1) 608 tdm_slot_width = 32; 609 610 if (tdm_slot_width < params_width(params)) { 611 dev_err(dev, "TDM slots too narrow (tdm=%d params=%d)\n", 612 tdm_slot_width, params_width(params)); 613 return -EINVAL; 614 } 615 616 tdm_mask = (1 << tdm_slots) - 1; 617 } 618 619 port = mca_fe_get_port(substream); 620 if (port < 0) 621 return port; 622 623 ret = mca_configure_serdes(cl, is_tx ? CLUSTER_TX_OFF : CLUSTER_RX_OFF, 624 tdm_mask, tdm_slots, params_channels(params), 625 tdm_slot_width, is_tx, port); 626 if (ret) 627 return ret; 628 629 pad = 32 - params_width(params); 630 631 /* 632 * TODO: Here the register semantics aren't clear. 633 */ 634 nchans_ceiled = min_t(int, params_channels(params), 4); 635 regval = FIELD_PREP(DMA_ADAPTER_NCHANS, nchans_ceiled) | 636 FIELD_PREP(DMA_ADAPTER_TX_NCHANS, 0x2) | 637 FIELD_PREP(DMA_ADAPTER_RX_NCHANS, 0x2) | 638 FIELD_PREP(DMA_ADAPTER_TX_LSB_PAD, pad) | 639 FIELD_PREP(DMA_ADAPTER_RX_MSB_PAD, pad); 640 641 #ifndef USE_RXB_FOR_CAPTURE 642 writel_relaxed(regval, mca->switch_base + REG_DMA_ADAPTER_A(cl->no)); 643 #else 644 if (is_tx) 645 writel_relaxed(regval, 646 mca->switch_base + REG_DMA_ADAPTER_A(cl->no)); 647 else 648 writel_relaxed(regval, 649 mca->switch_base + REG_DMA_ADAPTER_B(cl->no)); 650 #endif 651 652 if (!mca_fe_clocks_in_use(cl)) { 653 /* 654 * Set up FSYNC duty cycle as even as possible. 655 */ 656 writel_relaxed((bclk_ratio / 2) - 1, 657 cl->base + REG_SYNCGEN_HI_PERIOD); 658 writel_relaxed(((bclk_ratio + 1) / 2) - 1, 659 cl->base + REG_SYNCGEN_LO_PERIOD); 660 writel_relaxed(FIELD_PREP(MCLK_CONF_DIV, 0x1), 661 cl->base + REG_MCLK_CONF); 662 663 ret = clk_set_rate(cl->clk_parent, bclk_ratio * samp_rate); 664 if (ret) { 665 dev_err(mca->dev, "cluster %d: unable to set clock parent: %d\n", 666 cl->no, ret); 667 return ret; 668 } 669 } 670 671 return 0; 672 } 673 674 static const struct snd_soc_dai_ops mca_fe_ops = { 675 .set_fmt = mca_fe_set_fmt, 676 .set_bclk_ratio = mca_set_bclk_ratio, 677 .set_tdm_slot = mca_fe_set_tdm_slot, 678 .hw_params = mca_fe_hw_params, 679 .trigger = mca_fe_trigger, 680 }; 681 682 static bool mca_be_started(struct mca_cluster *cl) 683 { 684 int stream; 685 686 for_each_pcm_streams(stream) 687 if (cl->port_started[stream]) 688 return true; 689 return false; 690 } 691 692 static int mca_be_startup(struct snd_pcm_substream *substream, 693 struct snd_soc_dai *dai) 694 { 695 struct snd_soc_pcm_runtime *be = asoc_substream_to_rtd(substream); 696 struct snd_soc_pcm_runtime *fe; 697 struct mca_cluster *cl = mca_dai_to_cluster(dai); 698 struct mca_cluster *fe_cl; 699 struct mca_data *mca = cl->host; 700 struct snd_soc_dpcm *dpcm; 701 702 fe = NULL; 703 704 for_each_dpcm_fe(be, substream->stream, dpcm) { 705 if (fe && dpcm->fe != fe) { 706 dev_err(mca->dev, "many FE per one BE unsupported\n"); 707 return -EINVAL; 708 } 709 710 fe = dpcm->fe; 711 } 712 713 if (!fe) 714 return -EINVAL; 715 716 fe_cl = mca_dai_to_cluster(asoc_rtd_to_cpu(fe, 0)); 717 718 if (mca_be_started(cl)) { 719 /* 720 * Port is already started in the other direction. 721 * Make sure there isn't a conflict with another cluster 722 * driving the port. 723 */ 724 if (cl->port_driver != fe_cl->no) 725 return -EINVAL; 726 727 cl->port_started[substream->stream] = true; 728 return 0; 729 } 730 731 writel_relaxed(PORT_ENABLES_CLOCKS | PORT_ENABLES_TX_DATA, 732 cl->base + REG_PORT_ENABLES); 733 writel_relaxed(FIELD_PREP(PORT_CLOCK_SEL, fe_cl->no + 1), 734 cl->base + REG_PORT_CLOCK_SEL); 735 writel_relaxed(PORT_DATA_SEL_TXA(fe_cl->no), 736 cl->base + REG_PORT_DATA_SEL); 737 mutex_lock(&mca->port_mutex); 738 cl->port_driver = fe_cl->no; 739 mutex_unlock(&mca->port_mutex); 740 cl->port_started[substream->stream] = true; 741 742 return 0; 743 } 744 745 static void mca_be_shutdown(struct snd_pcm_substream *substream, 746 struct snd_soc_dai *dai) 747 { 748 struct mca_cluster *cl = mca_dai_to_cluster(dai); 749 struct mca_data *mca = cl->host; 750 751 cl->port_started[substream->stream] = false; 752 753 if (!mca_be_started(cl)) { 754 /* 755 * Were we the last direction to shutdown? 756 * Turn off the lights. 757 */ 758 writel_relaxed(0, cl->base + REG_PORT_ENABLES); 759 writel_relaxed(0, cl->base + REG_PORT_DATA_SEL); 760 mutex_lock(&mca->port_mutex); 761 cl->port_driver = -1; 762 mutex_unlock(&mca->port_mutex); 763 } 764 } 765 766 static const struct snd_soc_dai_ops mca_be_ops = { 767 .prepare = mca_be_prepare, 768 .hw_free = mca_be_hw_free, 769 .startup = mca_be_startup, 770 .shutdown = mca_be_shutdown, 771 }; 772 773 static int mca_set_runtime_hwparams(struct snd_soc_component *component, 774 struct snd_pcm_substream *substream, 775 struct dma_chan *chan) 776 { 777 struct device *dma_dev = chan->device->dev; 778 struct snd_dmaengine_dai_dma_data dma_data = {}; 779 int ret; 780 781 struct snd_pcm_hardware hw; 782 783 memset(&hw, 0, sizeof(hw)); 784 785 hw.info = SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_MMAP_VALID | 786 SNDRV_PCM_INFO_INTERLEAVED; 787 hw.periods_min = 2; 788 hw.periods_max = UINT_MAX; 789 hw.period_bytes_min = 256; 790 hw.period_bytes_max = dma_get_max_seg_size(dma_dev); 791 hw.buffer_bytes_max = SIZE_MAX; 792 hw.fifo_size = 16; 793 794 ret = snd_dmaengine_pcm_refine_runtime_hwparams(substream, &dma_data, 795 &hw, chan); 796 797 if (ret) 798 return ret; 799 800 return snd_soc_set_runtime_hwparams(substream, &hw); 801 } 802 803 static int mca_pcm_open(struct snd_soc_component *component, 804 struct snd_pcm_substream *substream) 805 { 806 struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream); 807 struct mca_cluster *cl = mca_dai_to_cluster(asoc_rtd_to_cpu(rtd, 0)); 808 struct dma_chan *chan = cl->dma_chans[substream->stream]; 809 int ret; 810 811 if (rtd->dai_link->no_pcm) 812 return 0; 813 814 ret = mca_set_runtime_hwparams(component, substream, chan); 815 if (ret) 816 return ret; 817 818 return snd_dmaengine_pcm_open(substream, chan); 819 } 820 821 static int mca_hw_params(struct snd_soc_component *component, 822 struct snd_pcm_substream *substream, 823 struct snd_pcm_hw_params *params) 824 { 825 struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream); 826 struct dma_chan *chan = snd_dmaengine_pcm_get_chan(substream); 827 struct dma_slave_config slave_config; 828 int ret; 829 830 if (rtd->dai_link->no_pcm) 831 return 0; 832 833 memset(&slave_config, 0, sizeof(slave_config)); 834 ret = snd_hwparams_to_dma_slave_config(substream, params, 835 &slave_config); 836 if (ret < 0) 837 return ret; 838 839 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) 840 slave_config.dst_port_window_size = 841 min_t(u32, params_channels(params), 4); 842 else 843 slave_config.src_port_window_size = 844 min_t(u32, params_channels(params), 4); 845 846 return dmaengine_slave_config(chan, &slave_config); 847 } 848 849 static int mca_close(struct snd_soc_component *component, 850 struct snd_pcm_substream *substream) 851 { 852 struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream); 853 854 if (rtd->dai_link->no_pcm) 855 return 0; 856 857 return snd_dmaengine_pcm_close(substream); 858 } 859 860 static int mca_trigger(struct snd_soc_component *component, 861 struct snd_pcm_substream *substream, int cmd) 862 { 863 struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream); 864 865 if (rtd->dai_link->no_pcm) 866 return 0; 867 868 /* 869 * Before we do the PCM trigger proper, insert an opportunity 870 * to reset the frontend's SERDES. 871 */ 872 mca_fe_early_trigger(substream, cmd, asoc_rtd_to_cpu(rtd, 0)); 873 874 return snd_dmaengine_pcm_trigger(substream, cmd); 875 } 876 877 static snd_pcm_uframes_t mca_pointer(struct snd_soc_component *component, 878 struct snd_pcm_substream *substream) 879 { 880 struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream); 881 882 if (rtd->dai_link->no_pcm) 883 return -ENOTSUPP; 884 885 return snd_dmaengine_pcm_pointer(substream); 886 } 887 888 static struct dma_chan *mca_request_dma_channel(struct mca_cluster *cl, unsigned int stream) 889 { 890 bool is_tx = (stream == SNDRV_PCM_STREAM_PLAYBACK); 891 #ifndef USE_RXB_FOR_CAPTURE 892 char *name = devm_kasprintf(cl->host->dev, GFP_KERNEL, 893 is_tx ? "tx%da" : "rx%da", cl->no); 894 #else 895 char *name = devm_kasprintf(cl->host->dev, GFP_KERNEL, 896 is_tx ? "tx%da" : "rx%db", cl->no); 897 #endif 898 return of_dma_request_slave_channel(cl->host->dev->of_node, name); 899 900 } 901 902 static void mca_pcm_free(struct snd_soc_component *component, 903 struct snd_pcm *pcm) 904 { 905 struct snd_soc_pcm_runtime *rtd = snd_pcm_chip(pcm); 906 struct mca_cluster *cl = mca_dai_to_cluster(asoc_rtd_to_cpu(rtd, 0)); 907 unsigned int i; 908 909 if (rtd->dai_link->no_pcm) 910 return; 911 912 for_each_pcm_streams(i) { 913 struct snd_pcm_substream *substream = 914 rtd->pcm->streams[i].substream; 915 916 if (!substream || !cl->dma_chans[i]) 917 continue; 918 919 dma_release_channel(cl->dma_chans[i]); 920 cl->dma_chans[i] = NULL; 921 } 922 } 923 924 925 static int mca_pcm_new(struct snd_soc_component *component, 926 struct snd_soc_pcm_runtime *rtd) 927 { 928 struct mca_cluster *cl = mca_dai_to_cluster(asoc_rtd_to_cpu(rtd, 0)); 929 unsigned int i; 930 931 if (rtd->dai_link->no_pcm) 932 return 0; 933 934 for_each_pcm_streams(i) { 935 struct snd_pcm_substream *substream = 936 rtd->pcm->streams[i].substream; 937 struct dma_chan *chan; 938 939 if (!substream) 940 continue; 941 942 chan = mca_request_dma_channel(cl, i); 943 944 if (IS_ERR_OR_NULL(chan)) { 945 dev_err(component->dev, "unable to obtain DMA channel (stream %d cluster %d): %pe\n", 946 i, cl->no, chan); 947 mca_pcm_free(component, rtd->pcm); 948 return -EINVAL; 949 } 950 951 cl->dma_chans[i] = chan; 952 snd_pcm_set_managed_buffer(substream, SNDRV_DMA_TYPE_DEV_IRAM, 953 chan->device->dev, 512 * 1024 * 6, 954 SIZE_MAX); 955 } 956 957 return 0; 958 } 959 960 static const struct snd_soc_component_driver mca_component = { 961 .name = "apple-mca", 962 .open = mca_pcm_open, 963 .close = mca_close, 964 .hw_params = mca_hw_params, 965 .trigger = mca_trigger, 966 .pointer = mca_pointer, 967 .pcm_construct = mca_pcm_new, 968 .pcm_destruct = mca_pcm_free, 969 }; 970 971 static void apple_mca_release(struct mca_data *mca) 972 { 973 int i; 974 975 for (i = 0; i < mca->nclusters; i++) { 976 struct mca_cluster *cl = &mca->clusters[i]; 977 978 if (!IS_ERR_OR_NULL(cl->clk_parent)) 979 clk_put(cl->clk_parent); 980 981 if (!IS_ERR_OR_NULL(cl->pd_dev)) 982 dev_pm_domain_detach(cl->pd_dev, true); 983 } 984 985 if (mca->pd_link) 986 device_link_del(mca->pd_link); 987 988 if (!IS_ERR_OR_NULL(mca->pd_dev)) 989 dev_pm_domain_detach(mca->pd_dev, true); 990 991 reset_control_rearm(mca->rstc); 992 } 993 994 static int apple_mca_probe(struct platform_device *pdev) 995 { 996 struct mca_data *mca; 997 struct mca_cluster *clusters; 998 struct snd_soc_dai_driver *dai_drivers; 999 struct resource *res; 1000 void __iomem *base; 1001 int nclusters; 1002 int ret, i; 1003 1004 base = devm_platform_get_and_ioremap_resource(pdev, 0, &res); 1005 if (IS_ERR(base)) 1006 return PTR_ERR(base); 1007 1008 if (resource_size(res) < CLUSTER_STRIDE) 1009 return -EINVAL; 1010 nclusters = (resource_size(res) - CLUSTER_STRIDE) / CLUSTER_STRIDE + 1; 1011 1012 mca = devm_kzalloc(&pdev->dev, struct_size(mca, clusters, nclusters), 1013 GFP_KERNEL); 1014 if (!mca) 1015 return -ENOMEM; 1016 mca->dev = &pdev->dev; 1017 mca->nclusters = nclusters; 1018 mutex_init(&mca->port_mutex); 1019 platform_set_drvdata(pdev, mca); 1020 clusters = mca->clusters; 1021 1022 mca->switch_base = 1023 devm_platform_ioremap_resource(pdev, 1); 1024 if (IS_ERR(mca->switch_base)) 1025 return PTR_ERR(mca->switch_base); 1026 1027 mca->rstc = devm_reset_control_get_optional_shared(&pdev->dev, NULL); 1028 if (IS_ERR(mca->rstc)) 1029 return PTR_ERR(mca->rstc); 1030 1031 dai_drivers = devm_kzalloc( 1032 &pdev->dev, sizeof(*dai_drivers) * 2 * nclusters, GFP_KERNEL); 1033 if (!dai_drivers) 1034 return -ENOMEM; 1035 1036 mca->pd_dev = dev_pm_domain_attach_by_id(&pdev->dev, 0); 1037 if (IS_ERR(mca->pd_dev)) 1038 return -EINVAL; 1039 1040 mca->pd_link = device_link_add(&pdev->dev, mca->pd_dev, 1041 DL_FLAG_STATELESS | DL_FLAG_PM_RUNTIME | 1042 DL_FLAG_RPM_ACTIVE); 1043 if (!mca->pd_link) { 1044 ret = -EINVAL; 1045 /* Prevent an unbalanced reset rearm */ 1046 mca->rstc = NULL; 1047 goto err_release; 1048 } 1049 1050 reset_control_reset(mca->rstc); 1051 1052 for (i = 0; i < nclusters; i++) { 1053 struct mca_cluster *cl = &clusters[i]; 1054 struct snd_soc_dai_driver *fe = 1055 &dai_drivers[mca->nclusters + i]; 1056 struct snd_soc_dai_driver *be = &dai_drivers[i]; 1057 1058 cl->host = mca; 1059 cl->no = i; 1060 cl->base = base + CLUSTER_STRIDE * i; 1061 cl->port_driver = -1; 1062 cl->clk_parent = of_clk_get(pdev->dev.of_node, i); 1063 if (IS_ERR(cl->clk_parent)) { 1064 dev_err(&pdev->dev, "unable to obtain clock %d: %ld\n", 1065 i, PTR_ERR(cl->clk_parent)); 1066 ret = PTR_ERR(cl->clk_parent); 1067 goto err_release; 1068 } 1069 cl->pd_dev = dev_pm_domain_attach_by_id(&pdev->dev, i + 1); 1070 if (IS_ERR(cl->pd_dev)) { 1071 dev_err(&pdev->dev, 1072 "unable to obtain cluster %d PD: %ld\n", i, 1073 PTR_ERR(cl->pd_dev)); 1074 ret = PTR_ERR(cl->pd_dev); 1075 goto err_release; 1076 } 1077 1078 fe->id = i; 1079 fe->name = 1080 devm_kasprintf(&pdev->dev, GFP_KERNEL, "mca-pcm-%d", i); 1081 if (!fe->name) { 1082 ret = -ENOMEM; 1083 goto err_release; 1084 } 1085 fe->ops = &mca_fe_ops; 1086 fe->playback.channels_min = 1; 1087 fe->playback.channels_max = 32; 1088 fe->playback.rates = SNDRV_PCM_RATE_8000_192000; 1089 fe->playback.formats = APPLE_MCA_FMTBITS; 1090 fe->capture.channels_min = 1; 1091 fe->capture.channels_max = 32; 1092 fe->capture.rates = SNDRV_PCM_RATE_8000_192000; 1093 fe->capture.formats = APPLE_MCA_FMTBITS; 1094 fe->symmetric_rate = 1; 1095 1096 fe->playback.stream_name = 1097 devm_kasprintf(&pdev->dev, GFP_KERNEL, "PCM%d TX", i); 1098 fe->capture.stream_name = 1099 devm_kasprintf(&pdev->dev, GFP_KERNEL, "PCM%d RX", i); 1100 1101 if (!fe->playback.stream_name || !fe->capture.stream_name) { 1102 ret = -ENOMEM; 1103 goto err_release; 1104 } 1105 1106 be->id = i + nclusters; 1107 be->name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "mca-i2s-%d", i); 1108 if (!be->name) { 1109 ret = -ENOMEM; 1110 goto err_release; 1111 } 1112 be->ops = &mca_be_ops; 1113 be->playback.channels_min = 1; 1114 be->playback.channels_max = 32; 1115 be->playback.rates = SNDRV_PCM_RATE_8000_192000; 1116 be->playback.formats = APPLE_MCA_FMTBITS; 1117 be->capture.channels_min = 1; 1118 be->capture.channels_max = 32; 1119 be->capture.rates = SNDRV_PCM_RATE_8000_192000; 1120 be->capture.formats = APPLE_MCA_FMTBITS; 1121 1122 be->playback.stream_name = 1123 devm_kasprintf(&pdev->dev, GFP_KERNEL, "I2S%d TX", i); 1124 be->capture.stream_name = 1125 devm_kasprintf(&pdev->dev, GFP_KERNEL, "I2S%d RX", i); 1126 if (!be->playback.stream_name || !be->capture.stream_name) { 1127 ret = -ENOMEM; 1128 goto err_release; 1129 } 1130 } 1131 1132 ret = snd_soc_register_component(&pdev->dev, &mca_component, 1133 dai_drivers, nclusters * 2); 1134 if (ret) { 1135 dev_err(&pdev->dev, "unable to register ASoC component: %d\n", 1136 ret); 1137 goto err_release; 1138 } 1139 1140 return 0; 1141 1142 err_release: 1143 apple_mca_release(mca); 1144 return ret; 1145 } 1146 1147 static int apple_mca_remove(struct platform_device *pdev) 1148 { 1149 struct mca_data *mca = platform_get_drvdata(pdev); 1150 1151 snd_soc_unregister_component(&pdev->dev); 1152 apple_mca_release(mca); 1153 return 0; 1154 } 1155 1156 static const struct of_device_id apple_mca_of_match[] = { 1157 { .compatible = "apple,mca", }, 1158 {} 1159 }; 1160 MODULE_DEVICE_TABLE(of, apple_mca_of_match); 1161 1162 static struct platform_driver apple_mca_driver = { 1163 .driver = { 1164 .name = "apple-mca", 1165 .of_match_table = apple_mca_of_match, 1166 }, 1167 .probe = apple_mca_probe, 1168 .remove = apple_mca_remove, 1169 }; 1170 module_platform_driver(apple_mca_driver); 1171 1172 MODULE_AUTHOR("Martin Povišer <povik+lin@cutebit.org>"); 1173 MODULE_DESCRIPTION("ASoC Apple MCA driver"); 1174 MODULE_LICENSE("GPL"); 1175