Lines Matching +full:exynos7 +full:- +full:clk
1 // SPDX-License-Identifier: GPL-2.0-only
5 * Copyright (C) 2014-2015 Samsung Electronics Co., Ltd.
13 #include <linux/arm-smccc.h>
14 #include <linux/clk.h>
25 #include "ufshcd-pltfrm.h"
29 #include "ufs-exynos.h"
106 /* Multi-host registers */
215 if (ufs->sysreg) {
216 return regmap_update_bits(ufs->sysreg,
217 ufs->iocc_offset,
218 ufs->iocc_mask, ufs->iocc_val);
226 struct ufs_hba *hba = ufs->hba;
230 hba->caps |= UFSHCD_CAP_WB_EN;
233 hba->caps |= UFSHCD_CAP_CLK_GATING | UFSHCD_CAP_HIBERN8_WITH_CLK_GATING;
249 struct ufs_hba *hba = ufs->hba;
263 struct ufs_hba *hba = ufs->hba;
267 rx_line_reset_period = (RX_LINE_RESET_TIME * ufs->mclk_rate) / NSEC_PER_MSEC;
268 tx_line_reset_period = (TX_LINE_RESET_TIME * ufs->mclk_rate) / NSEC_PER_MSEC;
273 DIV_ROUND_UP(NSEC_PER_SEC, ufs->mclk_rate));
290 DIV_ROUND_UP(NSEC_PER_SEC, ufs->mclk_rate));
318 struct ufs_hba *hba = ufs->hba;
331 struct ufs_hba *hba = ufs->hba;
344 struct exynos_ufs_uic_attr *attr = ufs->drv_data->uic_attr;
345 u32 val = attr->pa_dbg_opt_suite1_val;
346 struct ufs_hba *hba = ufs->hba;
363 ufshcd_dme_set(hba, UIC_ARG_MIB(attr->pa_dbg_opt_suite1_off),
369 ufshcd_dme_set(hba, UIC_ARG_MIB(attr->pa_dbg_opt_suite1_off), val);
376 struct ufs_hba *hba = ufs->hba;
406 struct ufs_hba *hba = ufs->hba;
407 int lanes = max_t(u32, pwr->lane_rx, pwr->lane_tx);
421 * exynos_ufs_auto_ctrl_hcc - HCI core clock control by h/w
423 * - Before host controller S/W reset
424 * - Access to UFS protector's register
452 struct ufs_hba *hba = ufs->hba;
453 struct list_head *head = &hba->clk_list_head;
464 if (!IS_ERR(clki->clk)) {
465 if (!strcmp(clki->name, "core_clk"))
466 ufs->clk_hci_core = clki->clk;
467 else if (!strcmp(clki->name, "sclk_unipro_main"))
468 ufs->clk_unipro_main = clki->clk;
472 if (!ufs->clk_hci_core || !ufs->clk_unipro_main) {
473 dev_err(hba->dev, "failed to get clk info\n");
474 ret = -EINVAL;
478 ufs->mclk_rate = clk_get_rate(ufs->clk_unipro_main);
479 pclk_rate = clk_get_rate(ufs->clk_hci_core);
480 f_min = ufs->pclk_avail_min;
481 f_max = ufs->pclk_avail_max;
483 if (ufs->opts & EXYNOS_UFS_OPT_HAS_APB_CLK_CTRL) {
494 dev_err(hba->dev, "not available pclk range %lu\n", pclk_rate);
495 ret = -EINVAL;
499 ufs->pclk_rate = pclk_rate;
500 ufs->pclk_div = div;
508 if (ufs->opts & EXYNOS_UFS_OPT_HAS_APB_CLK_CTRL) {
512 hci_writel(ufs, UNIPRO_APB_CLK(val, ufs->pclk_div),
519 struct ufs_hba *hba = ufs->hba;
520 struct exynos_ufs_uic_attr *attr = ufs->drv_data->uic_attr;
523 UIC_ARG_MIB(CMN_PWM_CLK_CTRL), attr->cmn_pwm_clk_ctrl);
528 struct ufs_hba *hba = ufs->hba;
529 struct exynos_ufs_uic_attr *attr = ufs->drv_data->uic_attr;
534 unsigned long clk = 0, _clk, clk_period;
535 int i = 0, clk_idx = -1;
541 if (_clk > clk) {
543 clk = _clk;
548 if (clk_idx == -1) {
550 dev_err(hba->dev,
554 attr->cmn_pwm_clk_ctrl = clk_idx & PWM_CLK_CTRL_MASK;
560 long pclk_rate = ufs->pclk_rate;
571 struct exynos_ufs_uic_attr *attr = ufs->drv_data->uic_attr;
572 struct ufs_phy_time_cfg *t_cfg = &ufs->t_cfg;
574 if (ufs->opts & EXYNOS_UFS_OPT_SKIP_CONFIG_PHY_ATTR)
577 t_cfg->tx_linereset_p =
578 exynos_ufs_calc_time_cntr(ufs, attr->tx_dif_p_nsec);
579 t_cfg->tx_linereset_n =
580 exynos_ufs_calc_time_cntr(ufs, attr->tx_dif_n_nsec);
581 t_cfg->tx_high_z_cnt =
582 exynos_ufs_calc_time_cntr(ufs, attr->tx_high_z_cnt_nsec);
583 t_cfg->tx_base_n_val =
584 exynos_ufs_calc_time_cntr(ufs, attr->tx_base_unit_nsec);
585 t_cfg->tx_gran_n_val =
586 exynos_ufs_calc_time_cntr(ufs, attr->tx_gran_unit_nsec);
587 t_cfg->tx_sleep_cnt =
588 exynos_ufs_calc_time_cntr(ufs, attr->tx_sleep_cnt);
590 t_cfg->rx_linereset =
591 exynos_ufs_calc_time_cntr(ufs, attr->rx_dif_p_nsec);
592 t_cfg->rx_hibern8_wait =
593 exynos_ufs_calc_time_cntr(ufs, attr->rx_hibern8_wait_nsec);
594 t_cfg->rx_base_n_val =
595 exynos_ufs_calc_time_cntr(ufs, attr->rx_base_unit_nsec);
596 t_cfg->rx_gran_n_val =
597 exynos_ufs_calc_time_cntr(ufs, attr->rx_gran_unit_nsec);
598 t_cfg->rx_sleep_cnt =
599 exynos_ufs_calc_time_cntr(ufs, attr->rx_sleep_cnt);
600 t_cfg->rx_stall_cnt =
601 exynos_ufs_calc_time_cntr(ufs, attr->rx_stall_cnt);
606 struct ufs_hba *hba = ufs->hba;
607 struct ufs_phy_time_cfg *t_cfg = &ufs->t_cfg;
616 ufs->drv_data->uic_attr->rx_filler_enable);
618 RX_LINERESET(t_cfg->rx_linereset));
620 RX_BASE_NVAL_L(t_cfg->rx_base_n_val));
622 RX_BASE_NVAL_H(t_cfg->rx_base_n_val));
624 RX_GRAN_NVAL_L(t_cfg->rx_gran_n_val));
626 RX_GRAN_NVAL_H(t_cfg->rx_gran_n_val));
628 RX_OV_SLEEP_CNT(t_cfg->rx_sleep_cnt));
630 RX_OV_STALL_CNT(t_cfg->rx_stall_cnt));
635 TX_LINERESET_P(t_cfg->tx_linereset_p));
637 TX_HIGH_Z_CNT_L(t_cfg->tx_high_z_cnt));
639 TX_HIGH_Z_CNT_H(t_cfg->tx_high_z_cnt));
641 TX_BASE_NVAL_L(t_cfg->tx_base_n_val));
643 TX_BASE_NVAL_H(t_cfg->tx_base_n_val));
645 TX_GRAN_NVAL_L(t_cfg->tx_gran_n_val));
647 TX_GRAN_NVAL_H(t_cfg->tx_gran_n_val));
650 TX_OV_SLEEP_CNT(t_cfg->tx_sleep_cnt));
652 ufs->drv_data->uic_attr->tx_min_activatetime);
660 struct ufs_hba *hba = ufs->hba;
661 struct exynos_ufs_uic_attr *attr = ufs->drv_data->uic_attr;
669 attr->rx_hs_g1_sync_len_cap);
672 attr->rx_hs_g2_sync_len_cap);
675 attr->rx_hs_g3_sync_len_cap);
678 attr->rx_hs_g1_prep_sync_len_cap);
681 attr->rx_hs_g2_prep_sync_len_cap);
684 attr->rx_hs_g3_prep_sync_len_cap);
687 if (attr->rx_adv_fine_gran_sup_en == 0) {
692 if (attr->rx_min_actv_time_cap)
696 attr->rx_min_actv_time_cap);
698 if (attr->rx_hibern8_time_cap)
701 attr->rx_hibern8_time_cap);
703 } else if (attr->rx_adv_fine_gran_sup_en == 1) {
705 if (attr->rx_adv_fine_gran_step)
709 attr->rx_adv_fine_gran_step));
711 if (attr->rx_adv_min_actv_time_cap)
715 attr->rx_adv_min_actv_time_cap);
717 if (attr->rx_adv_hibern8_time_cap)
721 attr->rx_adv_hibern8_time_cap);
730 struct ufs_hba *hba = ufs->hba;
755 if (ufs->opts & EXYNOS_UFS_OPT_UFSPR_SECURE)
774 struct ufs_hba *hba = ufs->hba;
775 u8 g = max_t(u32, pwr->gear_rx, pwr->gear_tx);
811 major = FIELD_GET(UFS_HW_VER_MAJOR_MASK, hba->ufs_version);
816 /* Default is HS-G3 */
825 struct phy *generic_phy = ufs->phy;
831 ret = -EINVAL;
847 if (ufs->drv_data->pre_pwr_change)
848 ufs->drv_data->pre_pwr_change(ufs, dev_req_params);
853 switch (dev_req_params->hs_rate) {
876 struct phy *generic_phy = ufs->phy;
877 int gear = max_t(u32, pwr_req->gear_rx, pwr_req->gear_tx);
878 int lanes = max_t(u32, pwr_req->lane_rx, pwr_req->lane_tx);
888 if (ufs->drv_data->post_pwr_change)
889 ufs->drv_data->post_pwr_change(ufs, pwr_req);
892 switch (pwr_req->hs_rate) {
900 "FAST", pwr_req->hs_rate == PA_HS_MODE_A ? "A" : "B",
907 dev_info(hba->dev, "Power mode changed to : %s\n", pwr_str);
950 struct ufs_hba *hba = ufs->hba;
951 struct phy *generic_phy = ufs->phy;
954 if (ufs->avail_ln_rx == 0 || ufs->avail_ln_tx == 0) {
956 &ufs->avail_ln_rx);
958 &ufs->avail_ln_tx);
959 WARN(ufs->avail_ln_rx != ufs->avail_ln_tx,
961 ufs->avail_ln_rx, ufs->avail_ln_tx);
964 phy_set_bus_width(generic_phy, ufs->avail_ln_rx);
966 if (generic_phy->power_count) {
973 dev_err(hba->dev, "%s: phy init failed, ret = %d\n",
992 struct exynos_ufs_uic_attr *attr = ufs->drv_data->uic_attr;
993 struct ufs_hba *hba = ufs->hba;
995 if (attr->pa_dbg_clk_period_off)
996 ufshcd_dme_set(hba, UIC_ARG_MIB(attr->pa_dbg_clk_period_off),
997 DIV_ROUND_UP(NSEC_PER_SEC, ufs->mclk_rate));
1000 ufs->drv_data->uic_attr->tx_trailingclks);
1002 if (attr->pa_dbg_opt_suite1_off)
1003 ufshcd_dme_set(hba, UIC_ARG_MIB(attr->pa_dbg_opt_suite1_off),
1004 attr->pa_dbg_opt_suite1_val);
1006 if (attr->pa_dbg_opt_suite2_off)
1007 ufshcd_dme_set(hba, UIC_ARG_MIB(attr->pa_dbg_opt_suite2_off),
1008 attr->pa_dbg_opt_suite2_val);
1041 if (ufs->opts & EXYNOS_UFS_OPT_BROKEN_AUTO_CLK_CTRL)
1046 if (ufs->opts & EXYNOS_UFS_OPT_BROKEN_AUTO_CLK_CTRL)
1068 if (ufs->drv_data->pre_link)
1069 ufs->drv_data->pre_link(ufs);
1071 /* m-phy */
1073 if (!(ufs->opts & EXYNOS_UFS_OPT_SKIP_CONFIG_PHY_ATTR)) {
1086 if (ufs->opts & EXYNOS_UFS_OPT_TIMER_TICK_SELECT) {
1099 struct phy *generic_phy = ufs->phy;
1100 struct exynos_ufs_uic_attr *attr = ufs->drv_data->uic_attr;
1108 if (hba->caps & UFSHCD_CAP_CRYPTO)
1113 hci_writel(ufs, BIT(hba->nutrs) - 1, HCI_UTRL_NEXUS_TYPE);
1114 hci_writel(ufs, BIT(hba->nutmrs) - 1, HCI_UTMRL_NEXUS_TYPE);
1117 if (ufs->opts & EXYNOS_UFS_OPT_SKIP_CONNECTION_ESTAB)
1121 if (attr->pa_granularity) {
1124 attr->pa_granularity);
1127 if (attr->pa_tactivate)
1129 attr->pa_tactivate);
1130 if (attr->pa_hibern8time &&
1131 !(ufs->opts & EXYNOS_UFS_OPT_USE_SW_HIBERN8_TIMER))
1133 attr->pa_hibern8time);
1136 if (ufs->opts & EXYNOS_UFS_OPT_USE_SW_HIBERN8_TIMER) {
1137 if (!attr->pa_granularity)
1139 &attr->pa_granularity);
1140 if (!attr->pa_hibern8time)
1142 &attr->pa_hibern8time);
1148 if (attr->pa_granularity < 1 || attr->pa_granularity > 6) {
1150 dev_warn(hba->dev,
1153 attr->pa_granularity);
1154 attr->pa_granularity = 6;
1160 if (ufs->drv_data->post_link)
1161 ufs->drv_data->post_link(ufs);
1168 struct device_node *np = dev->of_node;
1172 ufs->drv_data = device_get_match_data(dev);
1174 if (ufs->drv_data && ufs->drv_data->uic_attr) {
1175 attr = ufs->drv_data->uic_attr;
1178 ret = -EINVAL;
1182 ufs->sysreg = syscon_regmap_lookup_by_phandle(np, "samsung,sysreg");
1183 if (IS_ERR(ufs->sysreg))
1184 ufs->sysreg = NULL;
1187 &ufs->iocc_offset)) {
1189 ufs->iocc_offset = UFS_SHAREABILITY_OFFSET;
1193 ufs->iocc_mask = ufs->drv_data->iocc_mask;
1195 * no 'dma-coherent' property means the descriptors are
1196 * non-cacheable so iocc shareability should be disabled.
1198 if (of_dma_is_coherent(dev->of_node))
1199 ufs->iocc_val = ufs->iocc_mask;
1201 ufs->iocc_val = 0;
1203 ufs->pclk_avail_min = PCLK_AVAIL_MIN;
1204 ufs->pclk_avail_max = PCLK_AVAIL_MAX;
1206 attr->rx_adv_fine_gran_sup_en = RX_ADV_FINE_GRAN_SUP_EN;
1207 attr->rx_adv_fine_gran_step = RX_ADV_FINE_GRAN_STEP_VAL;
1208 attr->rx_adv_min_actv_time_cap = RX_ADV_MIN_ACTV_TIME_CAP;
1209 attr->pa_granularity = PA_GRANULARITY_VAL;
1210 attr->pa_tactivate = PA_TACTIVATE_VAL;
1211 attr->pa_hibern8time = PA_HIBERN8TIME_VAL;
1220 ufs->hba = hba;
1221 ufs->opts = ufs->drv_data->opts;
1222 ufs->rx_sel_idx = PA_MAXDATALANES;
1223 if (ufs->opts & EXYNOS_UFS_OPT_BROKEN_RX_SEL_IDX)
1224 ufs->rx_sel_idx = 0;
1225 hba->priv = (void *)ufs;
1226 hba->quirks = ufs->drv_data->quirks;
1233 * hardware on Exynos and Exynos-based SoCs. The interface to this hardware is
1248 * struct fmp_sg_entry - nonstandard format of PRDT entries when FMP is enabled
1251 * bits of the 'size' field, i.e. the last 32-bit word. When these
1256 * @file_enckey: The first half of the AES-XTS key with all bytes reserved
1257 * @file_twkey: The second half of the AES-XTS key with all bytes reserved
1285 struct blk_crypto_profile *profile = &hba->crypto_profile;
1302 * downstream driver source for gs101 and other Exynos-based SoCs. It
1305 * on other Exynos-based SoCs too, and might even still be the only way
1310 if (!(ufs->opts & EXYNOS_UFS_OPT_UFSPR_SECURE))
1321 dev_warn(hba->dev,
1334 dev_err(hba->dev,
1341 err = devm_blk_crypto_profile_init(hba->dev, profile, 0);
1344 dev_err(hba->dev, "Failed to initialize crypto profile: %d\n",
1348 profile->max_dun_bytes_supported = AES_BLOCK_SIZE;
1349 profile->key_types_supported = BLK_CRYPTO_KEY_TYPE_RAW;
1350 profile->dev = hba->dev;
1351 profile->modes_supported[BLK_ENCRYPTION_MODE_AES_256_XTS] =
1354 /* Advertise crypto support to ufshcd-core. */
1355 hba->caps |= UFSHCD_CAP_CRYPTO;
1357 /* Advertise crypto quirks to ufshcd-core. */
1358 hba->quirks |= UFSHCD_QUIRK_CUSTOM_CRYPTO_PROFILE |
1368 if (!(hba->caps & UFSHCD_CAP_CRYPTO))
1374 dev_err(hba->dev,
1380 dev_err(hba->dev,
1387 key + AES_KEYSIZE_256 - (j + 1) * sizeof(u64)));
1396 const u8 *enckey = crypt_ctx->bc_key->bytes;
1398 u64 dun_lo = crypt_ctx->bc_dun[0];
1399 u64 dun_hi = crypt_ctx->bc_dun[1];
1403 if (WARN_ON_ONCE(!(hba->caps & UFSHCD_CAP_CRYPTO)))
1404 return -EIO;
1412 if (prd->base.size != cpu_to_le32(DATA_UNIT_SIZE - 1)) {
1413 dev_err(hba->dev,
1415 return -EIO;
1419 prd->base.size |= cpu_to_le32((FMP_ALGO_MODE_AES_XTS << 28) |
1423 prd->file_iv[0] = cpu_to_be64(dun_hi);
1424 prd->file_iv[1] = cpu_to_be64(dun_lo);
1428 prd->file_enckey[j] = fmp_key_word(enckey, j);
1429 prd->file_twkey[j] = fmp_key_word(twkey, j);
1456 struct device *dev = hba->dev;
1463 return -ENOMEM;
1465 /* exynos-specific hci */
1466 ufs->reg_hci = devm_platform_ioremap_resource_byname(pdev, "vs_hci");
1467 if (IS_ERR(ufs->reg_hci)) {
1469 return PTR_ERR(ufs->reg_hci);
1473 ufs->reg_unipro = devm_platform_ioremap_resource_byname(pdev, "unipro");
1474 if (IS_ERR(ufs->reg_unipro)) {
1476 return PTR_ERR(ufs->reg_unipro);
1480 ufs->reg_ufsp = devm_platform_ioremap_resource_byname(pdev, "ufsp");
1481 if (IS_ERR(ufs->reg_ufsp)) {
1483 return PTR_ERR(ufs->reg_ufsp);
1492 ufs->phy = devm_phy_get(dev, "ufs-phy");
1493 if (IS_ERR(ufs->phy)) {
1494 ret = PTR_ERR(ufs->phy);
1495 dev_err(dev, "failed to get ufs-phy\n");
1503 if (ufs->drv_data->drv_init) {
1504 ret = ufs->drv_data->drv_init(ufs);
1506 dev_err(dev, "failed to init drv-data\n");
1518 hba->host->dma_alignment = DATA_UNIT_SIZE - 1;
1522 hba->priv = NULL;
1530 phy_power_off(ufs->phy);
1531 phy_exit(ufs->phy);
1550 dev_err(hba->dev, "timeout host sw-reset\n");
1551 ret = -ETIMEDOUT;
1570 struct exynos_ufs_uic_attr *attr = ufs->drv_data->uic_attr;
1573 if (ufs->opts & EXYNOS_UFS_OPT_BROKEN_AUTO_CLK_CTRL)
1577 if (ufs->opts & EXYNOS_UFS_OPT_USE_SW_HIBERN8_TIMER) {
1581 int h8_time = attr->pa_hibern8time *
1582 granularity_tbl[attr->pa_granularity - 1];
1587 delta = h8_time - ktime_us_delta(ktime_get(),
1588 ufs->entry_hibern8_t);
1605 ufs->entry_hibern8_t = ktime_get();
1607 if (ufs->opts & EXYNOS_UFS_OPT_BROKEN_AUTO_CLK_CTRL)
1626 hba->host->max_segment_size = DATA_UNIT_SIZE;
1628 if (ufs->drv_data->pre_hce_enable) {
1629 ret = ufs->drv_data->pre_hce_enable(ufs);
1641 if (!(ufs->opts & EXYNOS_UFS_OPT_BROKEN_AUTO_CLK_CTRL))
1644 if (ufs->drv_data->post_hce_enable)
1645 ret = ufs->drv_data->post_hce_enable(ufs);
1718 if (ufs->drv_data->suspend)
1719 ufs->drv_data->suspend(ufs);
1722 phy_power_off(ufs->phy);
1732 phy_power_on(ufs->phy);
1769 return -ETIME;
1774 struct device *dev = hba->dev;
1781 return -ENOMEM;
1783 /* exynos-specific hci */
1784 ufs->reg_hci = devm_platform_ioremap_resource_byname(pdev, "vs_hci");
1785 if (IS_ERR(ufs->reg_hci)) {
1787 return PTR_ERR(ufs->reg_hci);
1794 ufs->drv_data = device_get_match_data(dev);
1795 if (!ufs->drv_data)
1796 return -ENODEV;
1805 struct exynos_ufs_uic_attr *attr = ufs->drv_data->uic_attr;
1806 struct ufs_hba *hba = ufs->hba;
1809 ufshcd_dme_set(hba, UIC_ARG_MIB(attr->pa_dbg_clk_period_off),
1810 DIV_ROUND_UP(NSEC_PER_SEC, ufs->mclk_rate));
1816 DIV_ROUND_UP(NSEC_PER_SEC, ufs->mclk_rate));
1822 DIV_ROUND_UP(NSEC_PER_SEC, ufs->mclk_rate));
1834 ufshcd_dme_set(hba, UIC_ARG_MIB(attr->pa_dbg_opt_suite1_off),
1846 struct ufs_hba *hba = ufs->hba;
1884 struct ufs_hba *hba = ufs->hba;
1901 return (16 * 1000 * 1000000UL / ufs->mclk_rate);
1906 struct ufs_hba *hba = ufs->hba;
1910 rx_line_reset_period = (RX_LINE_RESET_TIME * ufs->mclk_rate)
1912 tx_line_reset_period = (TX_LINE_RESET_TIME * ufs->mclk_rate)
1921 DIV_ROUND_UP(NSEC_PER_SEC, ufs->mclk_rate));
1936 DIV_ROUND_UP(NSEC_PER_SEC, ufs->mclk_rate));
1962 struct ufs_hba *hba = ufs->hba;
1980 struct ufs_hba *hba = ufs->hba;
2020 struct device *dev = &pdev->dev;
2025 if (drv_data && drv_data->vops)
2026 vops = drv_data->vops;
2187 { .compatible = "google,gs101-ufs",
2189 { .compatible = "samsung,exynos7-ufs",
2191 { .compatible = "samsung,exynosautov9-ufs",
2193 { .compatible = "samsung,exynosautov9-ufs-vh",
2195 { .compatible = "tesla,fsd-ufs",
2212 .name = "exynos-ufshc",