Lines Matching +full:mmc +full:- +full:host
1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) STMicroelectronics 2018 - All Rights Reserved
8 #include <linux/dma-mapping.h>
10 #include <linux/mmc/host.h>
11 #include <linux/mmc/card.h>
69 int (*tuning_prepare)(struct mmci_host *host);
81 static int sdmmc_idma_validate_data(struct mmci_host *host, in sdmmc_idma_validate_data() argument
84 struct sdmmc_idma *idma = host->dma_priv; in sdmmc_idma_validate_data()
85 struct device *dev = mmc_dev(host->mmc); in sdmmc_idma_validate_data()
93 idma->use_bounce_buffer = false; in sdmmc_idma_validate_data()
94 for_each_sg(data->sg, sg, data->sg_len - 1, i) { in sdmmc_idma_validate_data()
95 if (!IS_ALIGNED(sg->offset, sizeof(u32)) || in sdmmc_idma_validate_data()
96 !IS_ALIGNED(sg->length, in sdmmc_idma_validate_data()
97 host->variant->stm32_idmabsize_align)) { in sdmmc_idma_validate_data()
98 dev_dbg(mmc_dev(host->mmc), in sdmmc_idma_validate_data()
100 data->sg->offset, data->sg->length); in sdmmc_idma_validate_data()
105 if (!IS_ALIGNED(sg->offset, sizeof(u32))) { in sdmmc_idma_validate_data()
106 dev_dbg(mmc_dev(host->mmc), in sdmmc_idma_validate_data()
108 data->sg->offset, data->sg->length); in sdmmc_idma_validate_data()
115 if (!idma->bounce_buf) { in sdmmc_idma_validate_data()
116 idma->bounce_buf = dmam_alloc_coherent(dev, in sdmmc_idma_validate_data()
117 host->mmc->max_req_size, in sdmmc_idma_validate_data()
118 &idma->bounce_dma_addr, in sdmmc_idma_validate_data()
120 if (!idma->bounce_buf) { in sdmmc_idma_validate_data()
122 return -ENOMEM; in sdmmc_idma_validate_data()
126 idma->use_bounce_buffer = true; in sdmmc_idma_validate_data()
131 static int _sdmmc_idma_prep_data(struct mmci_host *host, in _sdmmc_idma_prep_data() argument
134 struct sdmmc_idma *idma = host->dma_priv; in _sdmmc_idma_prep_data()
136 if (idma->use_bounce_buffer) { in _sdmmc_idma_prep_data()
137 if (data->flags & MMC_DATA_WRITE) { in _sdmmc_idma_prep_data()
138 unsigned int xfer_bytes = data->blksz * data->blocks; in _sdmmc_idma_prep_data()
140 sg_copy_to_buffer(data->sg, data->sg_len, in _sdmmc_idma_prep_data()
141 idma->bounce_buf, xfer_bytes); in _sdmmc_idma_prep_data()
147 n_elem = dma_map_sg(mmc_dev(host->mmc), in _sdmmc_idma_prep_data()
148 data->sg, in _sdmmc_idma_prep_data()
149 data->sg_len, in _sdmmc_idma_prep_data()
153 dev_err(mmc_dev(host->mmc), "dma_map_sg failed\n"); in _sdmmc_idma_prep_data()
154 return -EINVAL; in _sdmmc_idma_prep_data()
160 static int sdmmc_idma_prep_data(struct mmci_host *host, in sdmmc_idma_prep_data() argument
164 if (!next && data->host_cookie == host->next_cookie) in sdmmc_idma_prep_data()
167 return _sdmmc_idma_prep_data(host, data); in sdmmc_idma_prep_data()
170 static void sdmmc_idma_unprep_data(struct mmci_host *host, in sdmmc_idma_unprep_data() argument
173 struct sdmmc_idma *idma = host->dma_priv; in sdmmc_idma_unprep_data()
175 if (idma->use_bounce_buffer) { in sdmmc_idma_unprep_data()
176 if (data->flags & MMC_DATA_READ) { in sdmmc_idma_unprep_data()
177 unsigned int xfer_bytes = data->blksz * data->blocks; in sdmmc_idma_unprep_data()
179 sg_copy_from_buffer(data->sg, data->sg_len, in sdmmc_idma_unprep_data()
180 idma->bounce_buf, xfer_bytes); in sdmmc_idma_unprep_data()
183 dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len, in sdmmc_idma_unprep_data()
188 static int sdmmc_idma_setup(struct mmci_host *host) in sdmmc_idma_setup() argument
191 struct device *dev = mmc_dev(host->mmc); in sdmmc_idma_setup()
195 return -ENOMEM; in sdmmc_idma_setup()
197 host->dma_priv = idma; in sdmmc_idma_setup()
199 if (host->variant->dma_lli) { in sdmmc_idma_setup()
200 idma->sg_cpu = dmam_alloc_coherent(dev, SDMMC_LLI_BUF_LEN, in sdmmc_idma_setup()
201 &idma->sg_dma, GFP_KERNEL); in sdmmc_idma_setup()
202 if (!idma->sg_cpu) { in sdmmc_idma_setup()
204 return -ENOMEM; in sdmmc_idma_setup()
206 host->mmc->max_segs = SDMMC_LLI_BUF_LEN / in sdmmc_idma_setup()
208 host->mmc->max_seg_size = host->variant->stm32_idmabsize_mask; in sdmmc_idma_setup()
210 host->mmc->max_req_size = SZ_1M; in sdmmc_idma_setup()
212 host->mmc->max_segs = 1; in sdmmc_idma_setup()
213 host->mmc->max_seg_size = host->mmc->max_req_size; in sdmmc_idma_setup()
216 dma_set_max_seg_size(dev, host->mmc->max_seg_size); in sdmmc_idma_setup()
220 static int sdmmc_idma_start(struct mmci_host *host, unsigned int *datactrl) in sdmmc_idma_start() argument
223 struct sdmmc_idma *idma = host->dma_priv; in sdmmc_idma_start()
224 struct sdmmc_lli_desc *desc = (struct sdmmc_lli_desc *)idma->sg_cpu; in sdmmc_idma_start()
225 struct mmc_data *data = host->data; in sdmmc_idma_start()
229 host->dma_in_progress = true; in sdmmc_idma_start()
231 if (!host->variant->dma_lli || data->sg_len == 1 || in sdmmc_idma_start()
232 idma->use_bounce_buffer) { in sdmmc_idma_start()
235 if (idma->use_bounce_buffer) in sdmmc_idma_start()
236 dma_addr = idma->bounce_dma_addr; in sdmmc_idma_start()
238 dma_addr = sg_dma_address(data->sg); in sdmmc_idma_start()
241 host->base + MMCI_STM32_IDMABASE0R); in sdmmc_idma_start()
243 host->base + MMCI_STM32_IDMACTRLR); in sdmmc_idma_start()
247 for_each_sg(data->sg, sg, data->sg_len, i) { in sdmmc_idma_start()
256 desc[data->sg_len - 1].idmalar &= ~MMCI_STM32_ULA; in sdmmc_idma_start()
259 writel_relaxed(idma->sg_dma, host->base + MMCI_STM32_IDMABAR); in sdmmc_idma_start()
260 writel_relaxed(desc[0].idmalar, host->base + MMCI_STM32_IDMALAR); in sdmmc_idma_start()
261 writel_relaxed(desc[0].idmabase, host->base + MMCI_STM32_IDMABASE0R); in sdmmc_idma_start()
262 writel_relaxed(desc[0].idmasize, host->base + MMCI_STM32_IDMABSIZER); in sdmmc_idma_start()
264 host->base + MMCI_STM32_IDMACTRLR); in sdmmc_idma_start()
269 static void sdmmc_idma_error(struct mmci_host *host) in sdmmc_idma_error() argument
271 struct mmc_data *data = host->data; in sdmmc_idma_error()
272 struct sdmmc_idma *idma = host->dma_priv; in sdmmc_idma_error()
274 if (!dma_inprogress(host)) in sdmmc_idma_error()
277 writel_relaxed(0, host->base + MMCI_STM32_IDMACTRLR); in sdmmc_idma_error()
278 host->dma_in_progress = false; in sdmmc_idma_error()
279 data->host_cookie = 0; in sdmmc_idma_error()
281 if (!idma->use_bounce_buffer) in sdmmc_idma_error()
282 dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len, in sdmmc_idma_error()
286 static void sdmmc_idma_finalize(struct mmci_host *host, struct mmc_data *data) in sdmmc_idma_finalize() argument
288 if (!dma_inprogress(host)) in sdmmc_idma_finalize()
291 writel_relaxed(0, host->base + MMCI_STM32_IDMACTRLR); in sdmmc_idma_finalize()
292 host->dma_in_progress = false; in sdmmc_idma_finalize()
294 if (!data->host_cookie) in sdmmc_idma_finalize()
295 sdmmc_idma_unprep_data(host, data, 0); in sdmmc_idma_finalize()
298 static void mmci_sdmmc_set_clkreg(struct mmci_host *host, unsigned int desired) in mmci_sdmmc_set_clkreg() argument
302 if (host->mmc->ios.timing == MMC_TIMING_MMC_DDR52 || in mmci_sdmmc_set_clkreg()
303 host->mmc->ios.timing == MMC_TIMING_UHS_DDR50) in mmci_sdmmc_set_clkreg()
312 if (desired >= host->mclk && !ddr) { in mmci_sdmmc_set_clkreg()
313 host->cclk = host->mclk; in mmci_sdmmc_set_clkreg()
315 clk = DIV_ROUND_UP(host->mclk, 2 * desired); in mmci_sdmmc_set_clkreg()
318 host->cclk = host->mclk / (2 * clk); in mmci_sdmmc_set_clkreg()
322 * while power-on phase the clock can't be define to 0, in mmci_sdmmc_set_clkreg()
323 * Only power-off and power-cyc deactivate the clock. in mmci_sdmmc_set_clkreg()
327 host->cclk = host->mclk / (2 * clk); in mmci_sdmmc_set_clkreg()
331 if (host->mmc->ios.power_mode == MMC_POWER_ON) in mmci_sdmmc_set_clkreg()
332 host->mmc->actual_clock = host->cclk; in mmci_sdmmc_set_clkreg()
334 host->mmc->actual_clock = 0; in mmci_sdmmc_set_clkreg()
336 if (host->mmc->ios.bus_width == MMC_BUS_WIDTH_4) in mmci_sdmmc_set_clkreg()
338 if (host->mmc->ios.bus_width == MMC_BUS_WIDTH_8) in mmci_sdmmc_set_clkreg()
342 clk |= host->clk_reg_add; in mmci_sdmmc_set_clkreg()
345 if (host->mmc->ios.timing >= MMC_TIMING_UHS_SDR50) in mmci_sdmmc_set_clkreg()
348 mmci_write_clkreg(host, clk); in mmci_sdmmc_set_clkreg()
353 if (!dlyb || !dlyb->base) in sdmmc_dlyb_mp15_input_ck()
357 writel_relaxed(0, dlyb->base + DLYB_CR); in sdmmc_dlyb_mp15_input_ck()
360 static void mmci_sdmmc_set_pwrreg(struct mmci_host *host, unsigned int pwr) in mmci_sdmmc_set_pwrreg() argument
362 struct mmc_ios ios = host->mmc->ios; in mmci_sdmmc_set_pwrreg()
363 struct sdmmc_dlyb *dlyb = host->variant_priv; in mmci_sdmmc_set_pwrreg()
366 pwr = host->pwr_reg_add; in mmci_sdmmc_set_pwrreg()
368 if (dlyb && dlyb->ops->set_input_ck) in mmci_sdmmc_set_pwrreg()
369 dlyb->ops->set_input_ck(dlyb); in mmci_sdmmc_set_pwrreg()
372 /* Only a reset could power-off sdmmc */ in mmci_sdmmc_set_pwrreg()
373 reset_control_assert(host->rst); in mmci_sdmmc_set_pwrreg()
375 reset_control_deassert(host->rst); in mmci_sdmmc_set_pwrreg()
378 * Set the SDMMC in Power-cycle state. in mmci_sdmmc_set_pwrreg()
383 mmci_write_pwrreg(host, MCI_STM32_PWR_CYC | pwr); in mmci_sdmmc_set_pwrreg()
386 * After power-off (reset): the irq mask defined in probe in mmci_sdmmc_set_pwrreg()
390 writel(MCI_IRQENABLE | host->variant->start_err, in mmci_sdmmc_set_pwrreg()
391 host->base + MMCIMASK0); in mmci_sdmmc_set_pwrreg()
394 pwr |= host->pwr_reg & (MCI_STM32_VSWITCHEN | in mmci_sdmmc_set_pwrreg()
398 * After a power-cycle state, we must set the SDMMC in in mmci_sdmmc_set_pwrreg()
399 * Power-off. The SDMMC_D[7:0], SDMMC_CMD and SDMMC_CK are in mmci_sdmmc_set_pwrreg()
400 * driven high. Then we can set the SDMMC to Power-on state in mmci_sdmmc_set_pwrreg()
402 mmci_write_pwrreg(host, MCI_PWR_OFF | pwr); in mmci_sdmmc_set_pwrreg()
404 mmci_write_pwrreg(host, MCI_PWR_ON | pwr); in mmci_sdmmc_set_pwrreg()
408 static u32 sdmmc_get_dctrl_cfg(struct mmci_host *host) in sdmmc_get_dctrl_cfg() argument
412 datactrl = mmci_dctrl_blksz(host); in sdmmc_get_dctrl_cfg()
414 if (host->hw_revision >= 3) { in sdmmc_get_dctrl_cfg()
417 if (host->mmc->ios.timing == MMC_TIMING_UHS_SDR104 || in sdmmc_get_dctrl_cfg()
418 host->mmc->ios.timing == MMC_TIMING_MMC_HS200) { in sdmmc_get_dctrl_cfg()
419 thr = ffs(min_t(unsigned int, host->data->blksz, in sdmmc_get_dctrl_cfg()
420 host->variant->fifosize)); in sdmmc_get_dctrl_cfg()
424 writel_relaxed(thr, host->base + MMCI_STM32_FIFOTHRR); in sdmmc_get_dctrl_cfg()
427 if (host->mmc->card && mmc_card_sdio(host->mmc->card) && in sdmmc_get_dctrl_cfg()
428 host->data->blocks == 1) in sdmmc_get_dctrl_cfg()
430 else if (host->data->stop && !host->mrq->sbc) in sdmmc_get_dctrl_cfg()
438 static bool sdmmc_busy_complete(struct mmci_host *host, struct mmc_command *cmd, in sdmmc_busy_complete() argument
441 void __iomem *base = host->base; in sdmmc_busy_complete()
455 * if busy_d0 is in-progress we must activate busyd0end interrupt in sdmmc_busy_complete()
459 if (!host->busy_status) { in sdmmc_busy_complete()
460 writel_relaxed(mask | host->variant->busy_detect_mask, in sdmmc_busy_complete()
462 host->busy_status = status & in sdmmc_busy_complete()
469 if (host->busy_status) { in sdmmc_busy_complete()
470 writel_relaxed(mask & ~host->variant->busy_detect_mask, in sdmmc_busy_complete()
472 host->busy_status = 0; in sdmmc_busy_complete()
475 writel_relaxed(host->variant->busy_detect_mask, base + MMCICLEAR); in sdmmc_busy_complete()
482 writel_relaxed(DLYB_CR_DEN, dlyb->base + DLYB_CR); in sdmmc_dlyb_mp15_enable()
492 writel_relaxed(DLYB_CR_SEN | DLYB_CR_DEN, dlyb->base + DLYB_CR); in sdmmc_dlyb_mp15_set_cfg()
496 writel_relaxed(cfgr, dlyb->base + DLYB_CFGR); in sdmmc_dlyb_mp15_set_cfg()
499 writel_relaxed(DLYB_CR_DEN, dlyb->base + DLYB_CR); in sdmmc_dlyb_mp15_set_cfg()
504 static int sdmmc_dlyb_mp15_prepare(struct mmci_host *host) in sdmmc_dlyb_mp15_prepare() argument
506 struct sdmmc_dlyb *dlyb = host->variant_priv; in sdmmc_dlyb_mp15_prepare()
511 dlyb->ops->set_cfg(dlyb, i, DLYB_CFGR_SEL_MAX, true); in sdmmc_dlyb_mp15_prepare()
513 ret = readl_relaxed_poll_timeout(dlyb->base + DLYB_CFGR, cfgr, in sdmmc_dlyb_mp15_prepare()
517 dev_warn(mmc_dev(host->mmc), in sdmmc_dlyb_mp15_prepare()
529 return -EINVAL; in sdmmc_dlyb_mp15_prepare()
531 dlyb->unit = i; in sdmmc_dlyb_mp15_prepare()
532 dlyb->max = __fls(lng); in sdmmc_dlyb_mp15_prepare()
541 cr = readl_relaxed(dlyb->base + SYSCFG_DLYBSD_CR); in sdmmc_dlyb_mp25_enable()
544 writel_relaxed(cr, dlyb->base + SYSCFG_DLYBSD_CR); in sdmmc_dlyb_mp25_enable()
546 return readl_relaxed_poll_timeout(dlyb->base + SYSCFG_DLYBSD_SR, in sdmmc_dlyb_mp25_enable()
557 cr = readl_relaxed(dlyb->base + SYSCFG_DLYBSD_CR); in sdmmc_dlyb_mp25_set_cfg()
561 writel_relaxed(cr, dlyb->base + SYSCFG_DLYBSD_CR); in sdmmc_dlyb_mp25_set_cfg()
563 return readl_relaxed_poll_timeout(dlyb->base + SYSCFG_DLYBSD_SR, in sdmmc_dlyb_mp25_set_cfg()
568 static int sdmmc_dlyb_mp25_prepare(struct mmci_host *host) in sdmmc_dlyb_mp25_prepare() argument
570 struct sdmmc_dlyb *dlyb = host->variant_priv; in sdmmc_dlyb_mp25_prepare()
572 dlyb->max = DLYBSD_TAPSEL_NB; in sdmmc_dlyb_mp25_prepare()
577 static int sdmmc_dlyb_phase_tuning(struct mmci_host *host, u32 opcode) in sdmmc_dlyb_phase_tuning() argument
579 struct sdmmc_dlyb *dlyb = host->variant_priv; in sdmmc_dlyb_phase_tuning()
583 for (phase = 0; phase <= dlyb->max; phase++) { in sdmmc_dlyb_phase_tuning()
584 ret = dlyb->ops->set_cfg(dlyb, dlyb->unit, phase, false); in sdmmc_dlyb_phase_tuning()
586 dev_err(mmc_dev(host->mmc), "tuning config failed\n"); in sdmmc_dlyb_phase_tuning()
590 if (mmc_send_tuning(host->mmc, opcode, NULL)) { in sdmmc_dlyb_phase_tuning()
602 dev_err(mmc_dev(host->mmc), "no tuning point found\n"); in sdmmc_dlyb_phase_tuning()
603 return -EINVAL; in sdmmc_dlyb_phase_tuning()
606 if (dlyb->ops->set_input_ck) in sdmmc_dlyb_phase_tuning()
607 dlyb->ops->set_input_ck(dlyb); in sdmmc_dlyb_phase_tuning()
609 phase = end_of_len - max_len / 2; in sdmmc_dlyb_phase_tuning()
610 ret = dlyb->ops->set_cfg(dlyb, dlyb->unit, phase, false); in sdmmc_dlyb_phase_tuning()
612 dev_err(mmc_dev(host->mmc), "tuning reconfig failed\n"); in sdmmc_dlyb_phase_tuning()
616 dev_dbg(mmc_dev(host->mmc), "unit:%d max_dly:%d phase:%d\n", in sdmmc_dlyb_phase_tuning()
617 dlyb->unit, dlyb->max, phase); in sdmmc_dlyb_phase_tuning()
622 static int sdmmc_execute_tuning(struct mmc_host *mmc, u32 opcode) in sdmmc_execute_tuning() argument
624 struct mmci_host *host = mmc_priv(mmc); in sdmmc_execute_tuning() local
625 struct sdmmc_dlyb *dlyb = host->variant_priv; in sdmmc_execute_tuning()
629 if ((host->mmc->ios.timing != MMC_TIMING_UHS_SDR104 && in sdmmc_execute_tuning()
630 host->mmc->ios.timing != MMC_TIMING_MMC_HS200) || in sdmmc_execute_tuning()
631 host->mmc->actual_clock <= 50000000) in sdmmc_execute_tuning()
634 if (!dlyb || !dlyb->base) in sdmmc_execute_tuning()
635 return -EINVAL; in sdmmc_execute_tuning()
637 ret = dlyb->ops->dlyb_enable(dlyb); in sdmmc_execute_tuning()
645 clk = host->clk_reg; in sdmmc_execute_tuning()
648 mmci_write_clkreg(host, clk); in sdmmc_execute_tuning()
650 ret = dlyb->ops->tuning_prepare(host); in sdmmc_execute_tuning()
654 return sdmmc_dlyb_phase_tuning(host, opcode); in sdmmc_execute_tuning()
657 static void sdmmc_pre_sig_volt_vswitch(struct mmci_host *host) in sdmmc_pre_sig_volt_vswitch() argument
660 writel_relaxed(MCI_STM32_VSWENDC, host->base + MMCICLEAR); in sdmmc_pre_sig_volt_vswitch()
662 mmci_write_pwrreg(host, host->pwr_reg | MCI_STM32_VSWITCHEN); in sdmmc_pre_sig_volt_vswitch()
665 static int sdmmc_post_sig_volt_switch(struct mmci_host *host, in sdmmc_post_sig_volt_switch() argument
672 spin_lock_irqsave(&host->lock, flags); in sdmmc_post_sig_volt_switch()
673 if (ios->signal_voltage == MMC_SIGNAL_VOLTAGE_180 && in sdmmc_post_sig_volt_switch()
674 host->pwr_reg & MCI_STM32_VSWITCHEN) { in sdmmc_post_sig_volt_switch()
675 mmci_write_pwrreg(host, host->pwr_reg | MCI_STM32_VSWITCH); in sdmmc_post_sig_volt_switch()
676 spin_unlock_irqrestore(&host->lock, flags); in sdmmc_post_sig_volt_switch()
679 ret = readl_relaxed_poll_timeout(host->base + MMCISTATUS, in sdmmc_post_sig_volt_switch()
685 host->base + MMCICLEAR); in sdmmc_post_sig_volt_switch()
686 spin_lock_irqsave(&host->lock, flags); in sdmmc_post_sig_volt_switch()
687 mmci_write_pwrreg(host, host->pwr_reg & in sdmmc_post_sig_volt_switch()
690 spin_unlock_irqrestore(&host->lock, flags); in sdmmc_post_sig_volt_switch()
724 void sdmmc_variant_init(struct mmci_host *host) in sdmmc_variant_init() argument
726 struct device_node *np = host->mmc->parent->of_node; in sdmmc_variant_init()
730 host->ops = &sdmmc_variant_ops; in sdmmc_variant_init()
731 host->pwr_reg = readl_relaxed(host->base + MMCIPOWER); in sdmmc_variant_init()
733 base_dlyb = devm_of_iomap(mmc_dev(host->mmc), np, 1, NULL); in sdmmc_variant_init()
737 dlyb = devm_kzalloc(mmc_dev(host->mmc), sizeof(*dlyb), GFP_KERNEL); in sdmmc_variant_init()
741 dlyb->base = base_dlyb; in sdmmc_variant_init()
742 if (of_device_is_compatible(np, "st,stm32mp25-sdmmc2")) in sdmmc_variant_init()
743 dlyb->ops = &dlyb_tuning_mp25_ops; in sdmmc_variant_init()
745 dlyb->ops = &dlyb_tuning_mp15_ops; in sdmmc_variant_init()
747 host->variant_priv = dlyb; in sdmmc_variant_init()
748 host->mmc_ops->execute_tuning = sdmmc_execute_tuning; in sdmmc_variant_init()