Lines Matching +full:clk +full:- +full:phase +full:-

1 // SPDX-License-Identifier: GPL-2.0-only
3 * drivers/mmc/host/sdhci-msm.c - Qualcomm SDHCI Platform driver
5 * Copyright (c) 2013-2014, The Linux Foundation. All rights reserved.
23 #include "sdhci-cqhci.h"
24 #include "sdhci-pltfm.h"
123 #define INVALID_TUNING_PHASE -1
137 /* Max load for eMMC Vdd-io supply */
141 msm_host->var_ops->msm_readl_relaxed(host, offset)
144 msm_host->var_ops->msm_writel_relaxed(val, host, offset)
263 struct clk *bus_clk; /* SDHC bus voter clock */
264 struct clk *xo_clk; /* TCXO clk needed for FLL feature of cm_dll*/
300 return msm_host->offset; in sdhci_priv_msm_offset()
313 return readl_relaxed(msm_host->core_mem + offset); in sdhci_msm_mci_variant_readl_relaxed()
319 return readl_relaxed(host->ioaddr + offset); in sdhci_msm_v5_variant_readl_relaxed()
328 writel_relaxed(val, msm_host->core_mem + offset); in sdhci_msm_mci_variant_writel_relaxed()
334 writel_relaxed(val, host->ioaddr + offset); in sdhci_msm_v5_variant_writel_relaxed()
339 struct mmc_ios ios = host->mmc->ios; in msm_get_clock_mult_for_bus_mode()
349 host->flags & SDHCI_HS400_TUNING) in msm_get_clock_mult_for_bus_mode()
359 struct mmc_ios curr_ios = host->mmc->ios; in msm_set_clock_rate_for_bus_mode()
360 struct clk *core_clk = msm_host->bulk_clks[0].clk; in msm_set_clock_rate_for_bus_mode()
368 rc = dev_pm_opp_set_rate(mmc_dev(host->mmc), desired_rate); in msm_set_clock_rate_for_bus_mode()
371 mmc_hostname(host->mmc), desired_rate, curr_ios.timing); in msm_set_clock_rate_for_bus_mode()
383 mmc_hostname(host->mmc), desired_rate, achieved_rate); in msm_set_clock_rate_for_bus_mode()
384 host->mmc->actual_clock = achieved_rate / mult; in msm_set_clock_rate_for_bus_mode()
387 msm_host->clk_rate = desired_rate; in msm_set_clock_rate_for_bus_mode()
390 mmc_hostname(host->mmc), achieved_rate, curr_ios.timing); in msm_set_clock_rate_for_bus_mode()
398 struct mmc_host *mmc = host->mmc; in msm_dll_poll_ck_out_en()
403 ck_out_en = !!(readl_relaxed(host->ioaddr + in msm_dll_poll_ck_out_en()
404 msm_offset->core_dll_config) & CORE_CK_OUT_EN); in msm_dll_poll_ck_out_en()
407 if (--wait_cnt == 0) { in msm_dll_poll_ck_out_en()
410 return -ETIMEDOUT; in msm_dll_poll_ck_out_en()
414 ck_out_en = !!(readl_relaxed(host->ioaddr + in msm_dll_poll_ck_out_en()
415 msm_offset->core_dll_config) & CORE_CK_OUT_EN); in msm_dll_poll_ck_out_en()
421 static int msm_config_cm_dll_phase(struct sdhci_host *host, u8 phase) in msm_config_cm_dll_phase() argument
430 struct mmc_host *mmc = host->mmc; in msm_config_cm_dll_phase()
434 if (phase > 0xf) in msm_config_cm_dll_phase()
435 return -EINVAL; in msm_config_cm_dll_phase()
437 spin_lock_irqsave(&host->lock, flags); in msm_config_cm_dll_phase()
439 config = readl_relaxed(host->ioaddr + msm_offset->core_dll_config); in msm_config_cm_dll_phase()
442 writel_relaxed(config, host->ioaddr + msm_offset->core_dll_config); in msm_config_cm_dll_phase()
450 * Write the selected DLL clock output phase (0 ... 15) in msm_config_cm_dll_phase()
453 config = readl_relaxed(host->ioaddr + msm_offset->core_dll_config); in msm_config_cm_dll_phase()
455 config |= grey_coded_phase_table[phase] << CDR_SELEXT_SHIFT; in msm_config_cm_dll_phase()
456 writel_relaxed(config, host->ioaddr + msm_offset->core_dll_config); in msm_config_cm_dll_phase()
458 config = readl_relaxed(host->ioaddr + msm_offset->core_dll_config); in msm_config_cm_dll_phase()
460 writel_relaxed(config, host->ioaddr + msm_offset->core_dll_config); in msm_config_cm_dll_phase()
467 config = readl_relaxed(host->ioaddr + msm_offset->core_dll_config); in msm_config_cm_dll_phase()
470 writel_relaxed(config, host->ioaddr + msm_offset->core_dll_config); in msm_config_cm_dll_phase()
474 dev_err(mmc_dev(mmc), "%s: Failed to set DLL phase: %d\n", in msm_config_cm_dll_phase()
475 mmc_hostname(mmc), phase); in msm_config_cm_dll_phase()
477 spin_unlock_irqrestore(&host->lock, flags); in msm_config_cm_dll_phase()
484 * setting for SD3.0 UHS-I card read operation (in SDR104
488 * selected DLL clock output phase.
500 struct mmc_host *mmc = host->mmc; in msm_find_most_appropriate_phase()
505 return -EINVAL; in msm_find_most_appropriate_phase()
515 /* check if next phase in phase_table is consecutive or not */ in msm_find_most_appropriate_phase()
523 return -EINVAL; in msm_find_most_appropriate_phase()
525 /* Check if phase-0 is present in first valid window? */ in msm_find_most_appropriate_phase()
545 /* number of phases in raw where phase 0 is present */ in msm_find_most_appropriate_phase()
547 /* number of phases in raw where phase 15 is present */ in msm_find_most_appropriate_phase()
552 * If there are more than 1 phase windows then total in msm_find_most_appropriate_phase()
556 return -EINVAL; in msm_find_most_appropriate_phase()
580 i--; in msm_find_most_appropriate_phase()
585 ret = -EINVAL; in msm_find_most_appropriate_phase()
586 dev_err(mmc_dev(mmc), "%s: Invalid phase selected=%d\n", in msm_find_most_appropriate_phase()
600 if (host->clock <= 112000000) in msm_cm_dll_set_freq()
602 else if (host->clock <= 125000000) in msm_cm_dll_set_freq()
604 else if (host->clock <= 137000000) in msm_cm_dll_set_freq()
606 else if (host->clock <= 150000000) in msm_cm_dll_set_freq()
608 else if (host->clock <= 162000000) in msm_cm_dll_set_freq()
610 else if (host->clock <= 175000000) in msm_cm_dll_set_freq()
612 else if (host->clock <= 187000000) in msm_cm_dll_set_freq()
614 else if (host->clock <= 200000000) in msm_cm_dll_set_freq()
617 config = readl_relaxed(host->ioaddr + msm_offset->core_dll_config); in msm_cm_dll_set_freq()
620 writel_relaxed(config, host->ioaddr + msm_offset->core_dll_config); in msm_cm_dll_set_freq()
626 struct mmc_host *mmc = host->mmc; in msm_init_cm_dll()
633 msm_host->offset; in msm_init_cm_dll()
635 if (msm_host->use_14lpp_dll_reset && !IS_ERR_OR_NULL(msm_host->xo_clk)) in msm_init_cm_dll()
636 xo_clk = clk_get_rate(msm_host->xo_clk); in msm_init_cm_dll()
638 spin_lock_irqsave(&host->lock, flags); in msm_init_cm_dll()
645 config = readl_relaxed(host->ioaddr + msm_offset->core_vendor_spec); in msm_init_cm_dll()
647 writel_relaxed(config, host->ioaddr + msm_offset->core_vendor_spec); in msm_init_cm_dll()
649 if (msm_host->dll_config) in msm_init_cm_dll()
650 writel_relaxed(msm_host->dll_config, in msm_init_cm_dll()
651 host->ioaddr + msm_offset->core_dll_config); in msm_init_cm_dll()
653 if (msm_host->use_14lpp_dll_reset) { in msm_init_cm_dll()
654 config = readl_relaxed(host->ioaddr + in msm_init_cm_dll()
655 msm_offset->core_dll_config); in msm_init_cm_dll()
657 writel_relaxed(config, host->ioaddr + in msm_init_cm_dll()
658 msm_offset->core_dll_config); in msm_init_cm_dll()
660 config = readl_relaxed(host->ioaddr + in msm_init_cm_dll()
661 msm_offset->core_dll_config_2); in msm_init_cm_dll()
663 writel_relaxed(config, host->ioaddr + in msm_init_cm_dll()
664 msm_offset->core_dll_config_2); in msm_init_cm_dll()
667 config = readl_relaxed(host->ioaddr + in msm_init_cm_dll()
668 msm_offset->core_dll_config); in msm_init_cm_dll()
670 writel_relaxed(config, host->ioaddr + in msm_init_cm_dll()
671 msm_offset->core_dll_config); in msm_init_cm_dll()
673 config = readl_relaxed(host->ioaddr + in msm_init_cm_dll()
674 msm_offset->core_dll_config); in msm_init_cm_dll()
676 writel_relaxed(config, host->ioaddr + in msm_init_cm_dll()
677 msm_offset->core_dll_config); in msm_init_cm_dll()
679 if (!msm_host->dll_config) in msm_init_cm_dll()
682 if (msm_host->use_14lpp_dll_reset && in msm_init_cm_dll()
683 !IS_ERR_OR_NULL(msm_host->xo_clk)) { in msm_init_cm_dll()
686 config = readl_relaxed(host->ioaddr + in msm_init_cm_dll()
687 msm_offset->core_dll_config_2); in msm_init_cm_dll()
690 mclk_freq = DIV_ROUND_CLOSEST_ULL((host->clock * 8), in msm_init_cm_dll()
693 mclk_freq = DIV_ROUND_CLOSEST_ULL((host->clock * 4), in msm_init_cm_dll()
696 config = readl_relaxed(host->ioaddr + in msm_init_cm_dll()
697 msm_offset->core_dll_config_2); in msm_init_cm_dll()
701 writel_relaxed(config, host->ioaddr + in msm_init_cm_dll()
702 msm_offset->core_dll_config_2); in msm_init_cm_dll()
707 config = readl_relaxed(host->ioaddr + in msm_init_cm_dll()
708 msm_offset->core_dll_config); in msm_init_cm_dll()
710 writel_relaxed(config, host->ioaddr + in msm_init_cm_dll()
711 msm_offset->core_dll_config); in msm_init_cm_dll()
713 config = readl_relaxed(host->ioaddr + in msm_init_cm_dll()
714 msm_offset->core_dll_config); in msm_init_cm_dll()
716 writel_relaxed(config, host->ioaddr + in msm_init_cm_dll()
717 msm_offset->core_dll_config); in msm_init_cm_dll()
719 if (msm_host->use_14lpp_dll_reset) { in msm_init_cm_dll()
720 if (!msm_host->dll_config) in msm_init_cm_dll()
722 config = readl_relaxed(host->ioaddr + in msm_init_cm_dll()
723 msm_offset->core_dll_config_2); in msm_init_cm_dll()
725 writel_relaxed(config, host->ioaddr + in msm_init_cm_dll()
726 msm_offset->core_dll_config_2); in msm_init_cm_dll()
733 if (msm_host->uses_tassadar_dll) { in msm_init_cm_dll()
736 writel_relaxed(config, host->ioaddr + in msm_init_cm_dll()
737 msm_offset->core_dll_usr_ctl); in msm_init_cm_dll()
739 config = readl_relaxed(host->ioaddr + in msm_init_cm_dll()
740 msm_offset->core_dll_config_3); in msm_init_cm_dll()
742 if (msm_host->clk_rate < 150000000) in msm_init_cm_dll()
746 writel_relaxed(config, host->ioaddr + in msm_init_cm_dll()
747 msm_offset->core_dll_config_3); in msm_init_cm_dll()
750 config = readl_relaxed(host->ioaddr + in msm_init_cm_dll()
751 msm_offset->core_dll_config); in msm_init_cm_dll()
753 writel_relaxed(config, host->ioaddr + in msm_init_cm_dll()
754 msm_offset->core_dll_config); in msm_init_cm_dll()
756 config = readl_relaxed(host->ioaddr + in msm_init_cm_dll()
757 msm_offset->core_dll_config); in msm_init_cm_dll()
759 writel_relaxed(config, host->ioaddr + in msm_init_cm_dll()
760 msm_offset->core_dll_config); in msm_init_cm_dll()
763 while (!(readl_relaxed(host->ioaddr + msm_offset->core_dll_status) & in msm_init_cm_dll()
766 if (--wait_cnt == 0) { in msm_init_cm_dll()
769 spin_unlock_irqrestore(&host->lock, flags); in msm_init_cm_dll()
770 return -ETIMEDOUT; in msm_init_cm_dll()
775 spin_unlock_irqrestore(&host->lock, flags); in msm_init_cm_dll()
785 msm_host->offset; in msm_hc_select_default()
787 if (!msm_host->use_cdclp533) { in msm_hc_select_default()
788 config = readl_relaxed(host->ioaddr + in msm_hc_select_default()
789 msm_offset->core_vendor_spec3); in msm_hc_select_default()
791 writel_relaxed(config, host->ioaddr + in msm_hc_select_default()
792 msm_offset->core_vendor_spec3); in msm_hc_select_default()
795 config = readl_relaxed(host->ioaddr + msm_offset->core_vendor_spec); in msm_hc_select_default()
798 writel_relaxed(config, host->ioaddr + msm_offset->core_vendor_spec); in msm_hc_select_default()
807 config = readl_relaxed(host->ioaddr + msm_offset->core_vendor_spec); in msm_hc_select_default()
810 writel_relaxed(config, host->ioaddr + msm_offset->core_vendor_spec); in msm_hc_select_default()
823 struct mmc_ios ios = host->mmc->ios; in msm_hc_select_hs400()
827 msm_host->offset; in msm_hc_select_hs400()
830 config = readl_relaxed(host->ioaddr + msm_offset->core_vendor_spec); in msm_hc_select_hs400()
834 writel_relaxed(config, host->ioaddr + msm_offset->core_vendor_spec); in msm_hc_select_hs400()
839 if ((msm_host->tuning_done || ios.enhanced_strobe) && in msm_hc_select_hs400()
840 !msm_host->calibration_done) { in msm_hc_select_hs400()
841 config = readl_relaxed(host->ioaddr + in msm_hc_select_hs400()
842 msm_offset->core_vendor_spec); in msm_hc_select_hs400()
845 writel_relaxed(config, host->ioaddr + in msm_hc_select_hs400()
846 msm_offset->core_vendor_spec); in msm_hc_select_hs400()
848 if (!msm_host->clk_rate && !msm_host->use_cdclp533) { in msm_hc_select_hs400()
854 rc = readl_relaxed_poll_timeout(host->ioaddr + in msm_hc_select_hs400()
855 msm_offset->core_dll_status, in msm_hc_select_hs400()
861 if (rc == -ETIMEDOUT) in msm_hc_select_hs400()
863 mmc_hostname(host->mmc), dll_lock); in msm_hc_select_hs400()
873 * sdhci_msm_hc_select_mode :- In general all timing modes are
878 * HS200 - SDR104 (Since they both are equivalent in functionality)
879 * HS400 - This involves multiple configurations
880 * Initially SDR104 - when tuning is required as HS200
887 * HS400 - divided clock (free running MCLK/2)
888 * All other modes - default (free running MCLK)
892 struct mmc_ios ios = host->mmc->ios; in sdhci_msm_hc_select_mode()
895 host->flags & SDHCI_HS400_TUNING) in sdhci_msm_hc_select_mode()
908 msm_host->offset; in sdhci_msm_cdclp533_calibration()
910 pr_debug("%s: %s: Enter\n", mmc_hostname(host->mmc), __func__); in sdhci_msm_cdclp533_calibration()
914 * tuning block and restore the saved tuning phase. in sdhci_msm_cdclp533_calibration()
920 /* Set the selected phase in delay line hw block */ in sdhci_msm_cdclp533_calibration()
921 ret = msm_config_cm_dll_phase(host, msm_host->saved_tuning_phase); in sdhci_msm_cdclp533_calibration()
925 config = readl_relaxed(host->ioaddr + msm_offset->core_dll_config); in sdhci_msm_cdclp533_calibration()
927 writel_relaxed(config, host->ioaddr + msm_offset->core_dll_config); in sdhci_msm_cdclp533_calibration()
929 config = readl_relaxed(host->ioaddr + msm_offset->core_ddr_200_cfg); in sdhci_msm_cdclp533_calibration()
931 writel_relaxed(config, host->ioaddr + msm_offset->core_ddr_200_cfg); in sdhci_msm_cdclp533_calibration()
933 config = readl_relaxed(host->ioaddr + CORE_CSR_CDC_GEN_CFG); in sdhci_msm_cdclp533_calibration()
935 writel_relaxed(config, host->ioaddr + CORE_CSR_CDC_GEN_CFG); in sdhci_msm_cdclp533_calibration()
937 config = readl_relaxed(host->ioaddr + CORE_CSR_CDC_GEN_CFG); in sdhci_msm_cdclp533_calibration()
939 writel_relaxed(config, host->ioaddr + CORE_CSR_CDC_GEN_CFG); in sdhci_msm_cdclp533_calibration()
941 config = readl_relaxed(host->ioaddr + msm_offset->core_ddr_200_cfg); in sdhci_msm_cdclp533_calibration()
943 writel_relaxed(config, host->ioaddr + msm_offset->core_ddr_200_cfg); in sdhci_msm_cdclp533_calibration()
947 writel_relaxed(0x11800EC, host->ioaddr + CORE_CSR_CDC_CTLR_CFG0); in sdhci_msm_cdclp533_calibration()
948 writel_relaxed(0x3011111, host->ioaddr + CORE_CSR_CDC_CTLR_CFG1); in sdhci_msm_cdclp533_calibration()
949 writel_relaxed(0x1201000, host->ioaddr + CORE_CSR_CDC_CAL_TIMER_CFG0); in sdhci_msm_cdclp533_calibration()
950 writel_relaxed(0x4, host->ioaddr + CORE_CSR_CDC_CAL_TIMER_CFG1); in sdhci_msm_cdclp533_calibration()
951 writel_relaxed(0xCB732020, host->ioaddr + CORE_CSR_CDC_REFCOUNT_CFG); in sdhci_msm_cdclp533_calibration()
952 writel_relaxed(0xB19, host->ioaddr + CORE_CSR_CDC_COARSE_CAL_CFG); in sdhci_msm_cdclp533_calibration()
953 writel_relaxed(0x4E2, host->ioaddr + CORE_CSR_CDC_DELAY_CFG); in sdhci_msm_cdclp533_calibration()
954 writel_relaxed(0x0, host->ioaddr + CORE_CDC_OFFSET_CFG); in sdhci_msm_cdclp533_calibration()
955 writel_relaxed(0x16334, host->ioaddr + CORE_CDC_SLAVE_DDA_CFG); in sdhci_msm_cdclp533_calibration()
959 config = readl_relaxed(host->ioaddr + CORE_CSR_CDC_CTLR_CFG0); in sdhci_msm_cdclp533_calibration()
961 writel_relaxed(config, host->ioaddr + CORE_CSR_CDC_CTLR_CFG0); in sdhci_msm_cdclp533_calibration()
963 config = readl_relaxed(host->ioaddr + CORE_CSR_CDC_CTLR_CFG0); in sdhci_msm_cdclp533_calibration()
965 writel_relaxed(config, host->ioaddr + CORE_CSR_CDC_CTLR_CFG0); in sdhci_msm_cdclp533_calibration()
967 config = readl_relaxed(host->ioaddr + CORE_CSR_CDC_CTLR_CFG0); in sdhci_msm_cdclp533_calibration()
969 writel_relaxed(config, host->ioaddr + CORE_CSR_CDC_CTLR_CFG0); in sdhci_msm_cdclp533_calibration()
971 config = readl_relaxed(host->ioaddr + CORE_CSR_CDC_CAL_TIMER_CFG0); in sdhci_msm_cdclp533_calibration()
973 writel_relaxed(config, host->ioaddr + CORE_CSR_CDC_CAL_TIMER_CFG0); in sdhci_msm_cdclp533_calibration()
975 ret = readl_relaxed_poll_timeout(host->ioaddr + CORE_CSR_CDC_STATUS0, in sdhci_msm_cdclp533_calibration()
980 if (ret == -ETIMEDOUT) { in sdhci_msm_cdclp533_calibration()
982 mmc_hostname(host->mmc), __func__); in sdhci_msm_cdclp533_calibration()
986 ret = readl_relaxed(host->ioaddr + CORE_CSR_CDC_STATUS0) in sdhci_msm_cdclp533_calibration()
990 mmc_hostname(host->mmc), __func__, ret); in sdhci_msm_cdclp533_calibration()
991 ret = -EINVAL; in sdhci_msm_cdclp533_calibration()
995 config = readl_relaxed(host->ioaddr + msm_offset->core_ddr_200_cfg); in sdhci_msm_cdclp533_calibration()
997 writel_relaxed(config, host->ioaddr + msm_offset->core_ddr_200_cfg); in sdhci_msm_cdclp533_calibration()
999 pr_debug("%s: %s: Exit, ret %d\n", mmc_hostname(host->mmc), in sdhci_msm_cdclp533_calibration()
1006 struct mmc_host *mmc = host->mmc; in sdhci_msm_cm_dll_sdc4_calibration()
1014 pr_debug("%s: %s: Enter\n", mmc_hostname(host->mmc), __func__); in sdhci_msm_cm_dll_sdc4_calibration()
1023 if (msm_host->updated_ddr_cfg) in sdhci_msm_cm_dll_sdc4_calibration()
1024 ddr_cfg_offset = msm_offset->core_ddr_config; in sdhci_msm_cm_dll_sdc4_calibration()
1026 ddr_cfg_offset = msm_offset->core_ddr_config_old; in sdhci_msm_cm_dll_sdc4_calibration()
1027 writel_relaxed(msm_host->ddr_config, host->ioaddr + ddr_cfg_offset); in sdhci_msm_cm_dll_sdc4_calibration()
1029 if (mmc->ios.enhanced_strobe) { in sdhci_msm_cm_dll_sdc4_calibration()
1030 config = readl_relaxed(host->ioaddr + in sdhci_msm_cm_dll_sdc4_calibration()
1031 msm_offset->core_ddr_200_cfg); in sdhci_msm_cm_dll_sdc4_calibration()
1033 writel_relaxed(config, host->ioaddr + in sdhci_msm_cm_dll_sdc4_calibration()
1034 msm_offset->core_ddr_200_cfg); in sdhci_msm_cm_dll_sdc4_calibration()
1037 config = readl_relaxed(host->ioaddr + msm_offset->core_dll_config_2); in sdhci_msm_cm_dll_sdc4_calibration()
1039 writel_relaxed(config, host->ioaddr + msm_offset->core_dll_config_2); in sdhci_msm_cm_dll_sdc4_calibration()
1041 ret = readl_relaxed_poll_timeout(host->ioaddr + in sdhci_msm_cm_dll_sdc4_calibration()
1042 msm_offset->core_dll_status, in sdhci_msm_cm_dll_sdc4_calibration()
1047 if (ret == -ETIMEDOUT) { in sdhci_msm_cm_dll_sdc4_calibration()
1049 mmc_hostname(host->mmc), __func__); in sdhci_msm_cm_dll_sdc4_calibration()
1056 * and MCLK must be switched on for at-least 1us before DATA in sdhci_msm_cm_dll_sdc4_calibration()
1061 if (!msm_host->use_14lpp_dll_reset) { in sdhci_msm_cm_dll_sdc4_calibration()
1062 config = readl_relaxed(host->ioaddr + in sdhci_msm_cm_dll_sdc4_calibration()
1063 msm_offset->core_vendor_spec3); in sdhci_msm_cm_dll_sdc4_calibration()
1065 writel_relaxed(config, host->ioaddr + in sdhci_msm_cm_dll_sdc4_calibration()
1066 msm_offset->core_vendor_spec3); in sdhci_msm_cm_dll_sdc4_calibration()
1075 pr_debug("%s: %s: Exit, ret %d\n", mmc_hostname(host->mmc), in sdhci_msm_cm_dll_sdc4_calibration()
1084 struct mmc_host *mmc = host->mmc; in sdhci_msm_hs400_dll_calibration()
1088 msm_host->offset; in sdhci_msm_hs400_dll_calibration()
1090 pr_debug("%s: %s: Enter\n", mmc_hostname(host->mmc), __func__); in sdhci_msm_hs400_dll_calibration()
1094 * tuning block and restore the saved tuning phase. in sdhci_msm_hs400_dll_calibration()
1100 if (!mmc->ios.enhanced_strobe) { in sdhci_msm_hs400_dll_calibration()
1101 /* Set the selected phase in delay line hw block */ in sdhci_msm_hs400_dll_calibration()
1103 msm_host->saved_tuning_phase); in sdhci_msm_hs400_dll_calibration()
1106 config = readl_relaxed(host->ioaddr + in sdhci_msm_hs400_dll_calibration()
1107 msm_offset->core_dll_config); in sdhci_msm_hs400_dll_calibration()
1109 writel_relaxed(config, host->ioaddr + in sdhci_msm_hs400_dll_calibration()
1110 msm_offset->core_dll_config); in sdhci_msm_hs400_dll_calibration()
1113 if (msm_host->use_cdclp533) in sdhci_msm_hs400_dll_calibration()
1118 pr_debug("%s: %s: Exit, ret %d\n", mmc_hostname(host->mmc), in sdhci_msm_hs400_dll_calibration()
1125 struct mmc_ios *ios = &host->mmc->ios; in sdhci_msm_is_tuning_needed()
1131 if (host->clock <= CORE_FREQ_100MHZ || in sdhci_msm_is_tuning_needed()
1132 !(ios->timing == MMC_TIMING_MMC_HS400 || in sdhci_msm_is_tuning_needed()
1133 ios->timing == MMC_TIMING_MMC_HS200 || in sdhci_msm_is_tuning_needed()
1134 ios->timing == MMC_TIMING_UHS_SDR104) || in sdhci_msm_is_tuning_needed()
1135 ios->enhanced_strobe) in sdhci_msm_is_tuning_needed()
1160 ret = msm_config_cm_dll_phase(host, msm_host->saved_tuning_phase); in sdhci_msm_restore_sdr_dll_config()
1168 u32 config, oldconfig = readl_relaxed(host->ioaddr + in sdhci_msm_set_cdr()
1169 msm_offset->core_dll_config); in sdhci_msm_set_cdr()
1181 writel_relaxed(config, host->ioaddr + in sdhci_msm_set_cdr()
1182 msm_offset->core_dll_config); in sdhci_msm_set_cdr()
1190 u8 phase, tuned_phases[16], tuned_phase_cnt = 0; in sdhci_msm_execute_tuning() local
1192 struct mmc_ios ios = host->mmc->ios; in sdhci_msm_execute_tuning()
1197 msm_host->use_cdr = false; in sdhci_msm_execute_tuning()
1202 /* Clock-Data-Recovery used to dynamically adjust RX sampling point */ in sdhci_msm_execute_tuning()
1203 msm_host->use_cdr = true; in sdhci_msm_execute_tuning()
1209 msm_host->tuning_done = 0; in sdhci_msm_execute_tuning()
1213 * - select MCLK/2 in VENDOR_SPEC in sdhci_msm_execute_tuning()
1214 * - program MCLK to 400MHz (or nearest supported) in GCC in sdhci_msm_execute_tuning()
1216 if (host->flags & SDHCI_HS400_TUNING) { in sdhci_msm_execute_tuning()
1219 host->flags &= ~SDHCI_HS400_TUNING; in sdhci_msm_execute_tuning()
1228 phase = 0; in sdhci_msm_execute_tuning()
1230 /* Set the phase in delay line hw block */ in sdhci_msm_execute_tuning()
1231 rc = msm_config_cm_dll_phase(host, phase); in sdhci_msm_execute_tuning()
1238 tuned_phases[tuned_phase_cnt++] = phase; in sdhci_msm_execute_tuning()
1239 dev_dbg(mmc_dev(mmc), "%s: Found good phase = %d\n", in sdhci_msm_execute_tuning()
1240 mmc_hostname(mmc), phase); in sdhci_msm_execute_tuning()
1242 } while (++phase < ARRAY_SIZE(tuned_phases)); in sdhci_msm_execute_tuning()
1251 * we get a good phase. Better to try a few times. in sdhci_msm_execute_tuning()
1255 if (--tuning_seq_cnt) { in sdhci_msm_execute_tuning()
1266 phase = rc; in sdhci_msm_execute_tuning()
1269 * Finally set the selected phase in delay in sdhci_msm_execute_tuning()
1272 rc = msm_config_cm_dll_phase(host, phase); in sdhci_msm_execute_tuning()
1275 msm_host->saved_tuning_phase = phase; in sdhci_msm_execute_tuning()
1276 dev_dbg(mmc_dev(mmc), "%s: Setting the tuning phase to %d\n", in sdhci_msm_execute_tuning()
1277 mmc_hostname(mmc), phase); in sdhci_msm_execute_tuning()
1279 if (--tuning_seq_cnt) in sdhci_msm_execute_tuning()
1284 rc = -EIO; in sdhci_msm_execute_tuning()
1288 msm_host->tuning_done = true; in sdhci_msm_execute_tuning()
1293 * sdhci_msm_hs400 - Calibrate the DLL for HS400 bus speed mode operation.
1304 if (host->clock > CORE_FREQ_100MHZ && in sdhci_msm_hs400()
1305 (msm_host->tuning_done || ios->enhanced_strobe) && in sdhci_msm_hs400()
1306 !msm_host->calibration_done) { in sdhci_msm_hs400()
1309 msm_host->calibration_done = true; in sdhci_msm_hs400()
1312 mmc_hostname(host->mmc), ret); in sdhci_msm_hs400()
1319 struct mmc_host *mmc = host->mmc; in sdhci_msm_set_uhs_signaling()
1325 msm_host->offset; in sdhci_msm_set_uhs_signaling()
1357 if (host->clock <= CORE_FREQ_100MHZ) { in sdhci_msm_set_uhs_signaling()
1366 config = readl_relaxed(host->ioaddr + in sdhci_msm_set_uhs_signaling()
1367 msm_offset->core_dll_config); in sdhci_msm_set_uhs_signaling()
1369 writel_relaxed(config, host->ioaddr + in sdhci_msm_set_uhs_signaling()
1370 msm_offset->core_dll_config); in sdhci_msm_set_uhs_signaling()
1372 config = readl_relaxed(host->ioaddr + in sdhci_msm_set_uhs_signaling()
1373 msm_offset->core_dll_config); in sdhci_msm_set_uhs_signaling()
1375 writel_relaxed(config, host->ioaddr + in sdhci_msm_set_uhs_signaling()
1376 msm_offset->core_dll_config); in sdhci_msm_set_uhs_signaling()
1382 msm_host->calibration_done = false; in sdhci_msm_set_uhs_signaling()
1386 mmc_hostname(host->mmc), host->clock, uhs, ctrl_2); in sdhci_msm_set_uhs_signaling()
1389 if (mmc->ios.timing == MMC_TIMING_MMC_HS400) in sdhci_msm_set_uhs_signaling()
1390 sdhci_msm_hs400(host, &mmc->ios); in sdhci_msm_set_uhs_signaling()
1395 struct platform_device *pdev = msm_host->pdev; in sdhci_msm_set_pincfg()
1399 ret = pinctrl_pm_select_default_state(&pdev->dev); in sdhci_msm_set_pincfg()
1401 ret = pinctrl_pm_select_sleep_state(&pdev->dev); in sdhci_msm_set_pincfg()
1408 if (IS_ERR(mmc->supply.vmmc)) in sdhci_msm_set_vmmc()
1411 return mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, mmc->ios.vdd); in sdhci_msm_set_vmmc()
1420 if (msm_host->vqmmc_enabled == level) in msm_toggle_vqmmc()
1425 if (msm_host->caps_0 & CORE_3_0V_SUPPORT) in msm_toggle_vqmmc()
1427 else if (msm_host->caps_0 & CORE_1_8V_SUPPORT) in msm_toggle_vqmmc()
1430 if (msm_host->caps_0 & CORE_VOLT_SUPPORT) { in msm_toggle_vqmmc()
1438 ret = regulator_enable(mmc->supply.vqmmc); in msm_toggle_vqmmc()
1440 ret = regulator_disable(mmc->supply.vqmmc); in msm_toggle_vqmmc()
1447 msm_host->vqmmc_enabled = level; in msm_toggle_vqmmc()
1458 ret = regulator_set_load(mmc->supply.vqmmc, load); in msm_config_vqmmc_mode()
1471 if (IS_ERR(mmc->supply.vqmmc) || in sdhci_msm_set_vqmmc()
1472 (mmc->ios.power_mode == MMC_POWER_UNDEFINED)) in sdhci_msm_set_vqmmc()
1485 mmc->card && mmc_card_mmc(mmc->card); in sdhci_msm_set_vqmmc()
1497 init_waitqueue_head(&msm_host->pwr_irq_wait); in sdhci_msm_init_pwr_irq_wait()
1503 wake_up(&msm_host->pwr_irq_wait); in sdhci_msm_complete_pwr_irq_wait()
1522 msm_host->offset; in sdhci_msm_check_power_status()
1525 mmc_hostname(host->mmc), __func__, req_type, in sdhci_msm_check_power_status()
1526 msm_host->curr_pwr_state, msm_host->curr_io_level); in sdhci_msm_check_power_status()
1531 * Since sdhci-msm-v5, this bit has been removed and SW must consider in sdhci_msm_check_power_status()
1534 if (!msm_host->mci_removed) in sdhci_msm_check_power_status()
1536 msm_offset->core_generics); in sdhci_msm_check_power_status()
1543 * The IRQ for request type IO High/LOW will be generated when - in sdhci_msm_check_power_status()
1551 * for host->pwr to handle a case where IO voltage high request is in sdhci_msm_check_power_status()
1554 if ((req_type & REQ_IO_HIGH) && !host->pwr) { in sdhci_msm_check_power_status()
1556 mmc_hostname(host->mmc), req_type); in sdhci_msm_check_power_status()
1559 if ((req_type & msm_host->curr_pwr_state) || in sdhci_msm_check_power_status()
1560 (req_type & msm_host->curr_io_level)) in sdhci_msm_check_power_status()
1569 if (!wait_event_timeout(msm_host->pwr_irq_wait, in sdhci_msm_check_power_status()
1570 msm_host->pwr_irq_flag, in sdhci_msm_check_power_status()
1572 dev_warn(&msm_host->pdev->dev, in sdhci_msm_check_power_status()
1574 mmc_hostname(host->mmc), req_type); in sdhci_msm_check_power_status()
1576 pr_debug("%s: %s: request %d done\n", mmc_hostname(host->mmc), in sdhci_msm_check_power_status()
1585 msm_host->offset; in sdhci_msm_dump_pwr_ctrl_regs()
1588 mmc_hostname(host->mmc), in sdhci_msm_dump_pwr_ctrl_regs()
1589 msm_host_readl(msm_host, host, msm_offset->core_pwrctl_status), in sdhci_msm_dump_pwr_ctrl_regs()
1590 msm_host_readl(msm_host, host, msm_offset->core_pwrctl_mask), in sdhci_msm_dump_pwr_ctrl_regs()
1591 msm_host_readl(msm_host, host, msm_offset->core_pwrctl_ctl)); in sdhci_msm_dump_pwr_ctrl_regs()
1598 struct mmc_host *mmc = host->mmc; in sdhci_msm_handle_pwr_irq()
1603 const struct sdhci_msm_offset *msm_offset = msm_host->offset; in sdhci_msm_handle_pwr_irq()
1606 msm_offset->core_pwrctl_status); in sdhci_msm_handle_pwr_irq()
1610 msm_offset->core_pwrctl_clear); in sdhci_msm_handle_pwr_irq()
1620 msm_offset->core_pwrctl_status)) { in sdhci_msm_handle_pwr_irq()
1623 mmc_hostname(host->mmc), irq_status); in sdhci_msm_handle_pwr_irq()
1629 msm_offset->core_pwrctl_clear); in sdhci_msm_handle_pwr_irq()
1630 retry--; in sdhci_msm_handle_pwr_irq()
1668 if (io_level && !IS_ERR(mmc->supply.vqmmc) && !pwr_state) { in sdhci_msm_handle_pwr_irq()
1669 ret = mmc_regulator_set_vqmmc(mmc, &mmc->ios); in sdhci_msm_handle_pwr_irq()
1673 mmc->ios.signal_voltage, mmc->ios.vdd, in sdhci_msm_handle_pwr_irq()
1685 msm_offset->core_pwrctl_ctl); in sdhci_msm_handle_pwr_irq()
1691 if (msm_host->caps_0 & CORE_VOLT_SUPPORT) { in sdhci_msm_handle_pwr_irq()
1704 config = readl_relaxed(host->ioaddr + in sdhci_msm_handle_pwr_irq()
1705 msm_offset->core_vendor_spec); in sdhci_msm_handle_pwr_irq()
1709 (msm_host->caps_0 & CORE_3_0V_SUPPORT)) in sdhci_msm_handle_pwr_irq()
1712 (msm_host->caps_0 & CORE_1_8V_SUPPORT)) in sdhci_msm_handle_pwr_irq()
1716 writel_relaxed(new_config, host->ioaddr + in sdhci_msm_handle_pwr_irq()
1717 msm_offset->core_vendor_spec); in sdhci_msm_handle_pwr_irq()
1721 msm_host->curr_pwr_state = pwr_state; in sdhci_msm_handle_pwr_irq()
1723 msm_host->curr_io_level = io_level; in sdhci_msm_handle_pwr_irq()
1726 mmc_hostname(msm_host->mmc), __func__, irq, irq_status, in sdhci_msm_handle_pwr_irq()
1737 msm_host->pwr_irq_flag = 1; in sdhci_msm_pwr_irq()
1748 struct clk *core_clk = msm_host->bulk_clks[0].clk; in sdhci_msm_get_max_clock()
1759 * __sdhci_msm_set_clock - sdhci_msm clock control.
1768 u16 clk; in __sdhci_msm_set_clock() local
1780 clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL); in __sdhci_msm_set_clock()
1781 sdhci_enable_clk(host, clk); in __sdhci_msm_set_clock()
1784 /* sdhci_msm_set_clock - Called with (host->lock) spinlock held. */
1791 host->mmc->actual_clock = msm_host->clk_rate = 0; in sdhci_msm_set_clock()
1813 struct mmc_host *mmc = msm_host->mmc; in sdhci_msm_ice_init()
1821 if (ice == ERR_PTR(-EOPNOTSUPP)) { in sdhci_msm_ice_init()
1829 msm_host->ice = ice; in sdhci_msm_ice_init()
1830 mmc->caps2 |= MMC_CAP2_CRYPTO; in sdhci_msm_ice_init()
1837 if (msm_host->mmc->caps2 & MMC_CAP2_CRYPTO) in sdhci_msm_ice_enable()
1838 qcom_ice_enable(msm_host->ice); in sdhci_msm_ice_enable()
1843 if (msm_host->mmc->caps2 & MMC_CAP2_CRYPTO) in sdhci_msm_ice_resume()
1844 return qcom_ice_resume(msm_host->ice); in sdhci_msm_ice_resume()
1851 if (msm_host->mmc->caps2 & MMC_CAP2_CRYPTO) in sdhci_msm_ice_suspend()
1852 return qcom_ice_suspend(msm_host->ice); in sdhci_msm_ice_suspend()
1859 * vendor-specific SCM calls for this; it doesn't support the standard way.
1865 struct sdhci_host *host = mmc_priv(cq_host->mmc); in sdhci_msm_program_key()
1870 /* Only AES-256-XTS has been tested so far. */ in sdhci_msm_program_key()
1871 cap = cq_host->crypto_cap_array[cfg->crypto_cap_idx]; in sdhci_msm_program_key()
1874 return -EINVAL; in sdhci_msm_program_key()
1876 if (cfg->config_enable & CQHCI_CRYPTO_CONFIGURATION_ENABLE) in sdhci_msm_program_key()
1877 return qcom_ice_program_key(msm_host->ice, in sdhci_msm_program_key()
1880 cfg->crypto_key, in sdhci_msm_program_key()
1881 cfg->data_unit_size, slot); in sdhci_msm_program_key()
1883 return qcom_ice_evict_key(msm_host->ice, slot); in sdhci_msm_program_key()
1925 cqhci_irq(host->mmc, intmask, cmd_error, data_error); in sdhci_msm_cqe_irq()
1947 * on 16-byte descriptors in 64bit mode. in sdhci_msm_cqe_disable()
1949 if (host->flags & SDHCI_USE_64_BIT_DMA) in sdhci_msm_cqe_disable()
1950 host->desc_sz = 16; in sdhci_msm_cqe_disable()
1952 spin_lock_irqsave(&host->lock, flags); in sdhci_msm_cqe_disable()
1965 spin_unlock_irqrestore(&host->lock, flags); in sdhci_msm_cqe_disable()
1979 * using 4 * MCLK * 2^(count + 13). where MCLK = 1 / host->clock. in sdhci_msm_set_timeout()
1981 if (cmd && cmd->data && host->clock > 400000 && in sdhci_msm_set_timeout()
1982 host->clock <= 50000000 && in sdhci_msm_set_timeout()
1983 ((1 << (count + start)) > (10 * host->clock))) in sdhci_msm_set_timeout()
1984 host->data_timeout = 22LL * NSEC_PER_SEC; in sdhci_msm_set_timeout()
2009 if (host->caps & SDHCI_CAN_64BIT) in sdhci_msm_cqe_add_host()
2010 host->alloc_desc_sz = 16; in sdhci_msm_cqe_add_host()
2019 dev_err(&pdev->dev, "cqhci-pltfm init: failed: %d\n", ret); in sdhci_msm_cqe_add_host()
2023 msm_host->mmc->caps2 |= MMC_CAP2_CQE | MMC_CAP2_CQE_DCMD; in sdhci_msm_cqe_add_host()
2024 cq_host->ops = &sdhci_msm_cqhci_ops; in sdhci_msm_cqe_add_host()
2026 dma64 = host->flags & SDHCI_USE_64_BIT_DMA; in sdhci_msm_cqe_add_host()
2032 ret = cqhci_init(cq_host, host->mmc, dma64); in sdhci_msm_cqe_add_host()
2034 dev_err(&pdev->dev, "%s: CQE init: failed (%d)\n", in sdhci_msm_cqe_add_host()
2035 mmc_hostname(host->mmc), ret); in sdhci_msm_cqe_add_host()
2050 if (host->flags & SDHCI_USE_64_BIT_DMA) in sdhci_msm_cqe_add_host()
2051 host->desc_sz = 12; in sdhci_msm_cqe_add_host()
2057 dev_info(&pdev->dev, "%s: CQE init: success\n", in sdhci_msm_cqe_add_host()
2058 mmc_hostname(host->mmc)); in sdhci_msm_cqe_add_host()
2085 if (host->pwr && (val & SDHCI_RESET_ALL)) in __sdhci_msm_check_write()
2092 msm_host->transfer_mode = val; in __sdhci_msm_check_write()
2095 if (!msm_host->use_cdr) in __sdhci_msm_check_write()
2097 if ((msm_host->transfer_mode & SDHCI_TRNS_READ) && in __sdhci_msm_check_write()
2106 msm_host->pwr_irq_flag = 0; in __sdhci_msm_check_write()
2122 writew_relaxed(val, host->ioaddr + reg); in sdhci_msm_writew()
2135 writeb_relaxed(val, host->ioaddr + reg); in sdhci_msm_writeb()
2143 struct mmc_host *mmc = msm_host->mmc; in sdhci_msm_set_regulator_caps()
2144 struct regulator *supply = mmc->supply.vqmmc; in sdhci_msm_set_regulator_caps()
2147 const struct sdhci_msm_offset *msm_offset = msm_host->offset; in sdhci_msm_set_regulator_caps()
2149 if (!IS_ERR(mmc->supply.vqmmc)) { in sdhci_msm_set_regulator_caps()
2165 u32 io_level = msm_host->curr_io_level; in sdhci_msm_set_regulator_caps()
2167 config = readl_relaxed(host->ioaddr + in sdhci_msm_set_regulator_caps()
2168 msm_offset->core_vendor_spec); in sdhci_msm_set_regulator_caps()
2177 host->ioaddr + msm_offset->core_vendor_spec); in sdhci_msm_set_regulator_caps()
2179 msm_host->caps_0 |= caps; in sdhci_msm_set_regulator_caps()
2187 ret = mmc_regulator_get_supply(msm_host->mmc); in sdhci_msm_register_vreg()
2206 if (host->version < SDHCI_SPEC_300) in sdhci_msm_start_signal_voltage_switch()
2211 switch (ios->signal_voltage) { in sdhci_msm_start_signal_voltage_switch()
2213 if (!(host->flags & SDHCI_SIGNALING_330)) in sdhci_msm_start_signal_voltage_switch()
2214 return -EINVAL; in sdhci_msm_start_signal_voltage_switch()
2220 if (!(host->flags & SDHCI_SIGNALING_180)) in sdhci_msm_start_signal_voltage_switch()
2221 return -EINVAL; in sdhci_msm_start_signal_voltage_switch()
2228 return -EINVAL; in sdhci_msm_start_signal_voltage_switch()
2245 return -EAGAIN; in sdhci_msm_start_signal_voltage_switch()
2250 pr_err("%s: " DRIVER_NAME ": " f, mmc_hostname(host->mmc), ## x)
2256 const struct sdhci_msm_offset *msm_offset = msm_host->offset; in sdhci_msm_dump_vendor_regs()
2258 SDHCI_MSM_DUMP("----------- VENDOR REGISTER DUMP -----------\n"); in sdhci_msm_dump_vendor_regs()
2262 readl_relaxed(host->ioaddr + msm_offset->core_dll_status), in sdhci_msm_dump_vendor_regs()
2263 readl_relaxed(host->ioaddr + msm_offset->core_dll_config), in sdhci_msm_dump_vendor_regs()
2264 readl_relaxed(host->ioaddr + msm_offset->core_dll_config_2)); in sdhci_msm_dump_vendor_regs()
2267 readl_relaxed(host->ioaddr + msm_offset->core_dll_config_3), in sdhci_msm_dump_vendor_regs()
2268 readl_relaxed(host->ioaddr + msm_offset->core_dll_usr_ctl), in sdhci_msm_dump_vendor_regs()
2269 readl_relaxed(host->ioaddr + msm_offset->core_ddr_config)); in sdhci_msm_dump_vendor_regs()
2272 readl_relaxed(host->ioaddr + msm_offset->core_vendor_spec), in sdhci_msm_dump_vendor_regs()
2273 readl_relaxed(host->ioaddr + in sdhci_msm_dump_vendor_regs()
2274 msm_offset->core_vendor_spec_func2), in sdhci_msm_dump_vendor_regs()
2275 readl_relaxed(host->ioaddr + msm_offset->core_vendor_spec3)); in sdhci_msm_dump_vendor_regs()
2311 {.compatible = "qcom,sdhci-msm-v4", .data = &sdhci_msm_mci_var},
2312 {.compatible = "qcom,sdhci-msm-v5", .data = &sdhci_msm_v5_var},
2313 {.compatible = "qcom,sdm670-sdhci", .data = &sdm845_sdhci_var},
2314 {.compatible = "qcom,sdm845-sdhci", .data = &sdm845_sdhci_var},
2315 {.compatible = "qcom,sc7180-sdhci", .data = &sdm845_sdhci_var},
2349 struct device_node *node = pdev->dev.of_node; in sdhci_msm_get_of_property()
2353 if (of_property_read_u32(node, "qcom,ddr-config", in sdhci_msm_get_of_property()
2354 &msm_host->ddr_config)) in sdhci_msm_get_of_property()
2355 msm_host->ddr_config = DDR_CONFIG_POR_VAL; in sdhci_msm_get_of_property()
2357 of_property_read_u32(node, "qcom,dll-config", &msm_host->dll_config); in sdhci_msm_get_of_property()
2359 if (of_device_is_compatible(node, "qcom,msm8916-sdhci")) in sdhci_msm_get_of_property()
2360 host->quirks2 |= SDHCI_QUIRK2_BROKEN_64_BIT_DMA; in sdhci_msm_get_of_property()
2384 * is at least 3-4 sleep clock (32.7KHz) cycles, which comes to in sdhci_msm_gcc_reset()
2406 struct clk *clk; in sdhci_msm_probe() local
2413 struct device_node *node = pdev->dev.of_node; in sdhci_msm_probe()
2419 host->sdma_boundary = 0; in sdhci_msm_probe()
2422 msm_host->mmc = host->mmc; in sdhci_msm_probe()
2423 msm_host->pdev = pdev; in sdhci_msm_probe()
2425 ret = mmc_of_parse(host->mmc); in sdhci_msm_probe()
2433 var_info = of_device_get_match_data(&pdev->dev); in sdhci_msm_probe()
2435 msm_host->mci_removed = var_info->mci_removed; in sdhci_msm_probe()
2436 msm_host->restore_dll_config = var_info->restore_dll_config; in sdhci_msm_probe()
2437 msm_host->var_ops = var_info->var_ops; in sdhci_msm_probe()
2438 msm_host->offset = var_info->offset; in sdhci_msm_probe()
2440 msm_offset = msm_host->offset; in sdhci_msm_probe()
2445 msm_host->saved_tuning_phase = INVALID_TUNING_PHASE; in sdhci_msm_probe()
2447 ret = sdhci_msm_gcc_reset(&pdev->dev, host); in sdhci_msm_probe()
2452 msm_host->bus_clk = devm_clk_get(&pdev->dev, "bus"); in sdhci_msm_probe()
2453 if (!IS_ERR(msm_host->bus_clk)) { in sdhci_msm_probe()
2454 /* Vote for max. clk rate for max. performance */ in sdhci_msm_probe()
2455 ret = clk_set_rate(msm_host->bus_clk, INT_MAX); in sdhci_msm_probe()
2458 ret = clk_prepare_enable(msm_host->bus_clk); in sdhci_msm_probe()
2464 clk = devm_clk_get(&pdev->dev, "iface"); in sdhci_msm_probe()
2465 if (IS_ERR(clk)) { in sdhci_msm_probe()
2466 ret = PTR_ERR(clk); in sdhci_msm_probe()
2467 dev_err(&pdev->dev, "Peripheral clk setup failed (%d)\n", ret); in sdhci_msm_probe()
2470 msm_host->bulk_clks[1].clk = clk; in sdhci_msm_probe()
2473 clk = devm_clk_get(&pdev->dev, "core"); in sdhci_msm_probe()
2474 if (IS_ERR(clk)) { in sdhci_msm_probe()
2475 ret = PTR_ERR(clk); in sdhci_msm_probe()
2476 dev_err(&pdev->dev, "SDC MMC clk setup failed (%d)\n", ret); in sdhci_msm_probe()
2479 msm_host->bulk_clks[0].clk = clk; in sdhci_msm_probe()
2482 ret = dev_pm_opp_of_find_icc_paths(&pdev->dev, NULL); in sdhci_msm_probe()
2486 ret = devm_pm_opp_set_clkname(&pdev->dev, "core"); in sdhci_msm_probe()
2491 ret = devm_pm_opp_of_add_table(&pdev->dev); in sdhci_msm_probe()
2492 if (ret && ret != -ENODEV) { in sdhci_msm_probe()
2493 dev_err(&pdev->dev, "Invalid OPP table in Device tree\n"); in sdhci_msm_probe()
2498 ret = dev_pm_opp_set_rate(&pdev->dev, INT_MAX); in sdhci_msm_probe()
2500 dev_warn(&pdev->dev, "core clock boost failed\n"); in sdhci_msm_probe()
2502 clk = devm_clk_get(&pdev->dev, "cal"); in sdhci_msm_probe()
2503 if (IS_ERR(clk)) in sdhci_msm_probe()
2504 clk = NULL; in sdhci_msm_probe()
2505 msm_host->bulk_clks[2].clk = clk; in sdhci_msm_probe()
2507 clk = devm_clk_get(&pdev->dev, "sleep"); in sdhci_msm_probe()
2508 if (IS_ERR(clk)) in sdhci_msm_probe()
2509 clk = NULL; in sdhci_msm_probe()
2510 msm_host->bulk_clks[3].clk = clk; in sdhci_msm_probe()
2512 ret = clk_bulk_prepare_enable(ARRAY_SIZE(msm_host->bulk_clks), in sdhci_msm_probe()
2513 msm_host->bulk_clks); in sdhci_msm_probe()
2521 msm_host->xo_clk = devm_clk_get(&pdev->dev, "xo"); in sdhci_msm_probe()
2522 if (IS_ERR(msm_host->xo_clk)) { in sdhci_msm_probe()
2523 ret = PTR_ERR(msm_host->xo_clk); in sdhci_msm_probe()
2524 dev_warn(&pdev->dev, "TCXO clk not present (%d)\n", ret); in sdhci_msm_probe()
2527 if (!msm_host->mci_removed) { in sdhci_msm_probe()
2528 msm_host->core_mem = devm_platform_ioremap_resource(pdev, 1); in sdhci_msm_probe()
2529 if (IS_ERR(msm_host->core_mem)) { in sdhci_msm_probe()
2530 ret = PTR_ERR(msm_host->core_mem); in sdhci_msm_probe()
2537 host->ioaddr + msm_offset->core_vendor_spec); in sdhci_msm_probe()
2539 if (!msm_host->mci_removed) { in sdhci_msm_probe()
2542 msm_offset->core_hc_mode); in sdhci_msm_probe()
2544 msm_offset->core_hc_mode); in sdhci_msm_probe()
2547 msm_offset->core_hc_mode); in sdhci_msm_probe()
2550 host_version = readw_relaxed((host->ioaddr + SDHCI_HOST_VERSION)); in sdhci_msm_probe()
2551 dev_dbg(&pdev->dev, "Host Version: 0x%x Vendor Version 0x%x\n", in sdhci_msm_probe()
2556 msm_offset->core_mci_version); in sdhci_msm_probe()
2560 dev_dbg(&pdev->dev, "MCI Version: 0x%08x, major: 0x%04x, minor: 0x%02x\n", in sdhci_msm_probe()
2564 msm_host->use_14lpp_dll_reset = true; in sdhci_msm_probe()
2571 msm_host->use_cdclp533 = true; in sdhci_msm_probe()
2578 config = readl_relaxed(host->ioaddr + SDHCI_CAPABILITIES); in sdhci_msm_probe()
2580 writel_relaxed(config, host->ioaddr + in sdhci_msm_probe()
2581 msm_offset->core_vendor_spec_capabilities0); in sdhci_msm_probe()
2585 msm_host->updated_ddr_cfg = true; in sdhci_msm_probe()
2588 msm_host->uses_tassadar_dll = true; in sdhci_msm_probe()
2610 msm_host->pwr_irq = platform_get_irq_byname(pdev, "pwr_irq"); in sdhci_msm_probe()
2611 if (msm_host->pwr_irq < 0) { in sdhci_msm_probe()
2612 ret = msm_host->pwr_irq; in sdhci_msm_probe()
2619 msm_offset->core_pwrctl_mask); in sdhci_msm_probe()
2621 ret = devm_request_threaded_irq(&pdev->dev, msm_host->pwr_irq, NULL, in sdhci_msm_probe()
2623 dev_name(&pdev->dev), host); in sdhci_msm_probe()
2625 dev_err(&pdev->dev, "Request IRQ failed (%d)\n", ret); in sdhci_msm_probe()
2629 msm_host->mmc->caps |= MMC_CAP_WAIT_WHILE_BUSY | MMC_CAP_NEED_RSP_BUSY; in sdhci_msm_probe()
2632 host->max_timeout_count = 0xF; in sdhci_msm_probe()
2634 pm_runtime_get_noresume(&pdev->dev); in sdhci_msm_probe()
2635 pm_runtime_set_active(&pdev->dev); in sdhci_msm_probe()
2636 pm_runtime_enable(&pdev->dev); in sdhci_msm_probe()
2637 pm_runtime_set_autosuspend_delay(&pdev->dev, in sdhci_msm_probe()
2639 pm_runtime_use_autosuspend(&pdev->dev); in sdhci_msm_probe()
2641 host->mmc_host_ops.start_signal_voltage_switch = in sdhci_msm_probe()
2643 host->mmc_host_ops.execute_tuning = sdhci_msm_execute_tuning; in sdhci_msm_probe()
2644 if (of_property_read_bool(node, "supports-cqe")) in sdhci_msm_probe()
2651 pm_runtime_mark_last_busy(&pdev->dev); in sdhci_msm_probe()
2652 pm_runtime_put_autosuspend(&pdev->dev); in sdhci_msm_probe()
2657 pm_runtime_disable(&pdev->dev); in sdhci_msm_probe()
2658 pm_runtime_set_suspended(&pdev->dev); in sdhci_msm_probe()
2659 pm_runtime_put_noidle(&pdev->dev); in sdhci_msm_probe()
2661 clk_bulk_disable_unprepare(ARRAY_SIZE(msm_host->bulk_clks), in sdhci_msm_probe()
2662 msm_host->bulk_clks); in sdhci_msm_probe()
2664 if (!IS_ERR(msm_host->bus_clk)) in sdhci_msm_probe()
2665 clk_disable_unprepare(msm_host->bus_clk); in sdhci_msm_probe()
2676 int dead = (readl_relaxed(host->ioaddr + SDHCI_INT_STATUS) == in sdhci_msm_remove()
2681 pm_runtime_get_sync(&pdev->dev); in sdhci_msm_remove()
2682 pm_runtime_disable(&pdev->dev); in sdhci_msm_remove()
2683 pm_runtime_put_noidle(&pdev->dev); in sdhci_msm_remove()
2685 clk_bulk_disable_unprepare(ARRAY_SIZE(msm_host->bulk_clks), in sdhci_msm_remove()
2686 msm_host->bulk_clks); in sdhci_msm_remove()
2687 if (!IS_ERR(msm_host->bus_clk)) in sdhci_msm_remove()
2688 clk_disable_unprepare(msm_host->bus_clk); in sdhci_msm_remove()
2699 spin_lock_irqsave(&host->lock, flags); in sdhci_msm_runtime_suspend()
2700 host->runtime_suspended = true; in sdhci_msm_runtime_suspend()
2701 spin_unlock_irqrestore(&host->lock, flags); in sdhci_msm_runtime_suspend()
2705 clk_bulk_disable_unprepare(ARRAY_SIZE(msm_host->bulk_clks), in sdhci_msm_runtime_suspend()
2706 msm_host->bulk_clks); in sdhci_msm_runtime_suspend()
2719 ret = clk_bulk_prepare_enable(ARRAY_SIZE(msm_host->bulk_clks), in sdhci_msm_runtime_resume()
2720 msm_host->bulk_clks); in sdhci_msm_runtime_resume()
2724 * Whenever core-clock is gated dynamically, it's needed to in sdhci_msm_runtime_resume()
2727 if (msm_host->restore_dll_config && msm_host->clk_rate) { in sdhci_msm_runtime_resume()
2733 dev_pm_opp_set_rate(dev, msm_host->clk_rate); in sdhci_msm_runtime_resume()
2739 spin_lock_irqsave(&host->lock, flags); in sdhci_msm_runtime_resume()
2740 host->runtime_suspended = false; in sdhci_msm_runtime_resume()
2741 spin_unlock_irqrestore(&host->lock, flags); in sdhci_msm_runtime_resume()