Lines Matching +full:serdes +full:- +full:clk
1 // SPDX-License-Identifier: GPL-2.0-only
9 // four SERDES units and has a dedicated I2S port on the SoC's periphery.
12 // configurable manner. We mostly treat them as self-contained independent
13 // units and don't configure any cross-cluster connections except for the I2S
24 #include <linux/clk.h>
25 #include <linux/dma-mapping.h>
71 /* Bases of serdes units (relative to cluster) */
85 /* Relative to serdes unit base */
133 struct clk *clk_parent;
169 __iomem void *ptr = cl->base + regoffset;
183 * FE DAIs are 0 ... nclusters - 1
184 * BE DAIs are nclusters ... 2*nclusters - 1
186 int cluster_no = dai->id % mca->nclusters;
188 return &mca->clusters[cluster_no];
196 bool is_tx = substream->stream == SNDRV_PCM_STREAM_PLAYBACK;
217 WARN_ON(readl_relaxed(cl->base + serdes_unit + REG_SERDES_STATUS) &
222 FIELD_PREP(SERDES_CONF_SYNC_SEL, cl->no + 1));
233 bool is_tx = substream->stream == SNDRV_PCM_STREAM_PLAYBACK;
253 return -EINVAL;
261 struct mca_data *mca = cl->host;
264 ret = clk_prepare_enable(cl->clk_parent);
266 dev_err(mca->dev,
268 cl->no, ret);
275 * as clock-gated.
277 cl->pd_link = device_link_add(mca->dev, cl->pd_dev,
280 if (!cl->pd_link) {
281 dev_err(mca->dev,
282 "cluster %d: unable to prop-up power domain\n", cl->no);
283 clk_disable_unprepare(cl->clk_parent);
284 return -EINVAL;
287 writel_relaxed(cl->no + 1, cl->base + REG_SYNCGEN_MCLK_SEL);
300 device_link_del(cl->pd_link);
301 clk_disable_unprepare(cl->clk_parent);
306 struct mca_data *mca = cl->host;
310 mutex_lock(&mca->port_mutex);
311 for (i = 0; i < mca->nclusters; i++) {
312 be_cl = &mca->clusters[i];
314 if (be_cl->port_driver != cl->no)
318 if (be_cl->clocks_in_use[stream]) {
319 mutex_unlock(&mca->port_mutex);
324 mutex_unlock(&mca->port_mutex);
332 struct mca_data *mca = cl->host;
336 if (cl->port_driver < 0)
337 return -EINVAL;
339 fe_cl = &mca->clusters[cl->port_driver];
344 * or at time of DAPM widget power-up. We need to enable clocks
353 cl->clocks_in_use[substream->stream] = true;
362 struct mca_data *mca = cl->host;
365 if (cl->port_driver < 0)
366 return -EINVAL;
373 fe_cl = &mca->clusters[cl->port_driver];
377 cl->clocks_in_use[substream->stream] = false;
397 __iomem void *serdes_base = cl->base + serdes_unit;
401 serdes_conf = FIELD_PREP(SERDES_CONF_NCHANS, max(slots, 1) - 1);
420 serdes_conf |= FIELD_PREP(SERDES_CONF_SYNC_SEL, cl->no + 1);
461 dev_err(cl->host->dev,
462 "unsupported SERDES configuration requested (mask=0x%x slots=%d slot_width=%d)\n",
464 return -EINVAL;
472 cl->tdm_slots = slots;
473 cl->tdm_slot_width = slot_width;
474 cl->tdm_tx_mask = tx_mask;
475 cl->tdm_rx_mask = rx_mask;
483 struct mca_data *mca = cl->host;
527 cl->base + CLUSTER_TX_OFF + REG_TX_SERDES_BITSTART);
529 cl->base + CLUSTER_RX_OFF + REG_RX_SERDES_BITSTART);
534 dev_err(mca->dev, "unsupported DAI format (0x%x) requested\n", fmt);
535 return -EINVAL;
542 cl->bclk_ratio = ratio;
554 for_each_dpcm_be(fe, substream->stream, dpcm) {
555 be = dpcm->be;
560 return -EINVAL;
562 return mca_dai_to_cluster(snd_soc_rtd_to_cpu(be, 0))->no;
570 struct mca_data *mca = cl->host;
571 struct device *dev = mca->dev;
573 bool is_tx = substream->stream == SNDRV_PCM_STREAM_PLAYBACK;
580 if (!cl->tdm_slot_width) {
589 tdm_slot_width = cl->tdm_slot_width;
590 tdm_slots = cl->tdm_slots;
591 tdm_mask = is_tx ? cl->tdm_tx_mask : cl->tdm_rx_mask;
594 if (cl->bclk_ratio)
595 bclk_ratio = cl->bclk_ratio;
604 return -EINVAL;
610 return -EINVAL;
621 return -EINVAL;
624 tdm_mask = (1 << tdm_slots) - 1;
637 pad = 32 - params_width(params);
650 writel_relaxed(regval, mca->switch_base + REG_DMA_ADAPTER_A(cl->no));
654 mca->switch_base + REG_DMA_ADAPTER_A(cl->no));
657 mca->switch_base + REG_DMA_ADAPTER_B(cl->no));
664 writel_relaxed((bclk_ratio / 2) - 1,
665 cl->base + REG_SYNCGEN_HI_PERIOD);
666 writel_relaxed(((bclk_ratio + 1) / 2) - 1,
667 cl->base + REG_SYNCGEN_LO_PERIOD);
669 cl->base + REG_MCLK_CONF);
671 ret = clk_set_rate(cl->clk_parent, bclk_ratio * samp_rate);
673 dev_err(mca->dev, "cluster %d: unable to set clock parent: %d\n",
674 cl->no, ret);
695 if (cl->port_started[stream])
707 struct mca_data *mca = cl->host;
712 for_each_dpcm_fe(be, substream->stream, dpcm) {
713 if (fe && dpcm->fe != fe) {
714 dev_err(mca->dev, "many FE per one BE unsupported\n");
715 return -EINVAL;
718 fe = dpcm->fe;
722 return -EINVAL;
732 if (cl->port_driver != fe_cl->no)
733 return -EINVAL;
735 cl->port_started[substream->stream] = true;
740 cl->base + REG_PORT_ENABLES);
741 writel_relaxed(FIELD_PREP(PORT_CLOCK_SEL, fe_cl->no + 1),
742 cl->base + REG_PORT_CLOCK_SEL);
743 writel_relaxed(PORT_DATA_SEL_TXA(fe_cl->no),
744 cl->base + REG_PORT_DATA_SEL);
745 mutex_lock(&mca->port_mutex);
746 cl->port_driver = fe_cl->no;
747 mutex_unlock(&mca->port_mutex);
748 cl->port_started[substream->stream] = true;
757 struct mca_data *mca = cl->host;
759 cl->port_started[substream->stream] = false;
766 writel_relaxed(0, cl->base + REG_PORT_ENABLES);
767 writel_relaxed(0, cl->base + REG_PORT_DATA_SEL);
768 mutex_lock(&mca->port_mutex);
769 cl->port_driver = -1;
770 mutex_unlock(&mca->port_mutex);
785 struct device *dma_dev = chan->device->dev;
816 struct dma_chan *chan = cl->dma_chans[substream->stream];
819 if (rtd->dai_link->no_pcm)
838 if (rtd->dai_link->no_pcm)
847 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
862 if (rtd->dai_link->no_pcm)
873 if (rtd->dai_link->no_pcm)
878 * to reset the frontend's SERDES.
890 if (rtd->dai_link->no_pcm)
891 return -ENOTSUPP;
900 char *name = devm_kasprintf(cl->host->dev, GFP_KERNEL,
901 is_tx ? "tx%da" : "rx%da", cl->no);
903 char *name = devm_kasprintf(cl->host->dev, GFP_KERNEL,
904 is_tx ? "tx%da" : "rx%db", cl->no);
906 return of_dma_request_slave_channel(cl->host->dev->of_node, name);
917 if (rtd->dai_link->no_pcm)
922 rtd->pcm->streams[i].substream;
924 if (!substream || !cl->dma_chans[i])
927 dma_release_channel(cl->dma_chans[i]);
928 cl->dma_chans[i] = NULL;
939 if (rtd->dai_link->no_pcm)
944 rtd->pcm->streams[i].substream;
953 mca_pcm_free(component, rtd->pcm);
955 if (chan && PTR_ERR(chan) == -EPROBE_DEFER)
958 dev_err(component->dev, "unable to obtain DMA channel (stream %d cluster %d): %pe\n",
959 i, cl->no, chan);
962 return -EINVAL;
966 cl->dma_chans[i] = chan;
968 chan->device->dev, 512 * 1024 * 6,
976 .name = "apple-mca",
990 for (i = 0; i < mca->nclusters; i++) {
991 struct mca_cluster *cl = &mca->clusters[i];
993 if (!IS_ERR_OR_NULL(cl->clk_parent))
994 clk_put(cl->clk_parent);
996 if (!IS_ERR_OR_NULL(cl->pd_dev))
997 dev_pm_domain_detach(cl->pd_dev, true);
1000 if (mca->pd_link)
1001 device_link_del(mca->pd_link);
1003 if (!IS_ERR_OR_NULL(mca->pd_dev))
1004 dev_pm_domain_detach(mca->pd_dev, true);
1006 reset_control_rearm(mca->rstc);
1024 return -EINVAL;
1025 nclusters = (resource_size(res) - CLUSTER_STRIDE) / CLUSTER_STRIDE + 1;
1027 mca = devm_kzalloc(&pdev->dev, struct_size(mca, clusters, nclusters),
1030 return -ENOMEM;
1031 mca->dev = &pdev->dev;
1032 mca->nclusters = nclusters;
1033 mutex_init(&mca->port_mutex);
1035 clusters = mca->clusters;
1037 mca->switch_base =
1039 if (IS_ERR(mca->switch_base))
1040 return PTR_ERR(mca->switch_base);
1042 mca->rstc = devm_reset_control_get_optional_shared(&pdev->dev, NULL);
1043 if (IS_ERR(mca->rstc))
1044 return PTR_ERR(mca->rstc);
1047 &pdev->dev, sizeof(*dai_drivers) * 2 * nclusters, GFP_KERNEL);
1049 return -ENOMEM;
1051 mca->pd_dev = dev_pm_domain_attach_by_id(&pdev->dev, 0);
1052 if (IS_ERR(mca->pd_dev))
1053 return -EINVAL;
1055 mca->pd_link = device_link_add(&pdev->dev, mca->pd_dev,
1058 if (!mca->pd_link) {
1059 ret = -EINVAL;
1061 mca->rstc = NULL;
1065 reset_control_reset(mca->rstc);
1070 &dai_drivers[mca->nclusters + i];
1073 cl->host = mca;
1074 cl->no = i;
1075 cl->base = base + CLUSTER_STRIDE * i;
1076 cl->port_driver = -1;
1077 cl->clk_parent = of_clk_get(pdev->dev.of_node, i);
1078 if (IS_ERR(cl->clk_parent)) {
1079 dev_err(&pdev->dev, "unable to obtain clock %d: %ld\n",
1080 i, PTR_ERR(cl->clk_parent));
1081 ret = PTR_ERR(cl->clk_parent);
1084 cl->pd_dev = dev_pm_domain_attach_by_id(&pdev->dev, i + 1);
1085 if (IS_ERR(cl->pd_dev)) {
1086 dev_err(&pdev->dev,
1088 PTR_ERR(cl->pd_dev));
1089 ret = PTR_ERR(cl->pd_dev);
1093 fe->id = i;
1094 fe->name =
1095 devm_kasprintf(&pdev->dev, GFP_KERNEL, "mca-pcm-%d", i);
1096 if (!fe->name) {
1097 ret = -ENOMEM;
1100 fe->ops = &mca_fe_ops;
1101 fe->playback.channels_min = 1;
1102 fe->playback.channels_max = 32;
1103 fe->playback.rates = SNDRV_PCM_RATE_8000_192000;
1104 fe->playback.formats = APPLE_MCA_FMTBITS;
1105 fe->capture.channels_min = 1;
1106 fe->capture.channels_max = 32;
1107 fe->capture.rates = SNDRV_PCM_RATE_8000_192000;
1108 fe->capture.formats = APPLE_MCA_FMTBITS;
1109 fe->symmetric_rate = 1;
1111 fe->playback.stream_name =
1112 devm_kasprintf(&pdev->dev, GFP_KERNEL, "PCM%d TX", i);
1113 fe->capture.stream_name =
1114 devm_kasprintf(&pdev->dev, GFP_KERNEL, "PCM%d RX", i);
1116 if (!fe->playback.stream_name || !fe->capture.stream_name) {
1117 ret = -ENOMEM;
1121 be->id = i + nclusters;
1122 be->name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "mca-i2s-%d", i);
1123 if (!be->name) {
1124 ret = -ENOMEM;
1127 be->ops = &mca_be_ops;
1128 be->playback.channels_min = 1;
1129 be->playback.channels_max = 32;
1130 be->playback.rates = SNDRV_PCM_RATE_8000_192000;
1131 be->playback.formats = APPLE_MCA_FMTBITS;
1132 be->capture.channels_min = 1;
1133 be->capture.channels_max = 32;
1134 be->capture.rates = SNDRV_PCM_RATE_8000_192000;
1135 be->capture.formats = APPLE_MCA_FMTBITS;
1137 be->playback.stream_name =
1138 devm_kasprintf(&pdev->dev, GFP_KERNEL, "I2S%d TX", i);
1139 be->capture.stream_name =
1140 devm_kasprintf(&pdev->dev, GFP_KERNEL, "I2S%d RX", i);
1141 if (!be->playback.stream_name || !be->capture.stream_name) {
1142 ret = -ENOMEM;
1147 ret = snd_soc_register_component(&pdev->dev, &mca_component,
1150 dev_err(&pdev->dev, "unable to register ASoC component: %d\n",
1166 snd_soc_unregister_component(&pdev->dev);
1178 .name = "apple-mca",