1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (c) 2013-2016, Linux Foundation. All rights reserved. 4 */ 5 6 #include <linux/acpi.h> 7 #include <linux/time.h> 8 #include <linux/clk.h> 9 #include <linux/delay.h> 10 #include <linux/module.h> 11 #include <linux/of.h> 12 #include <linux/platform_device.h> 13 #include <linux/phy/phy.h> 14 #include <linux/gpio/consumer.h> 15 #include <linux/reset-controller.h> 16 #include <linux/devfreq.h> 17 18 #include <ufs/ufshcd.h> 19 #include "ufshcd-pltfrm.h" 20 #include <ufs/unipro.h> 21 #include "ufs-qcom.h" 22 #include <ufs/ufshci.h> 23 #include <ufs/ufs_quirks.h> 24 25 #define UFS_QCOM_DEFAULT_DBG_PRINT_EN \ 26 (UFS_QCOM_DBG_PRINT_REGS_EN | UFS_QCOM_DBG_PRINT_TEST_BUS_EN) 27 28 enum { 29 TSTBUS_UAWM, 30 TSTBUS_UARM, 31 TSTBUS_TXUC, 32 TSTBUS_RXUC, 33 TSTBUS_DFC, 34 TSTBUS_TRLUT, 35 TSTBUS_TMRLUT, 36 TSTBUS_OCSC, 37 TSTBUS_UTP_HCI, 38 TSTBUS_COMBINED, 39 TSTBUS_WRAPPER, 40 TSTBUS_UNIPRO, 41 TSTBUS_MAX, 42 }; 43 44 static struct ufs_qcom_host *ufs_qcom_hosts[MAX_UFS_QCOM_HOSTS]; 45 46 static void ufs_qcom_get_default_testbus_cfg(struct ufs_qcom_host *host); 47 static int ufs_qcom_set_dme_vs_core_clk_ctrl_clear_div(struct ufs_hba *hba, 48 u32 clk_cycles); 49 50 static struct ufs_qcom_host *rcdev_to_ufs_host(struct reset_controller_dev *rcd) 51 { 52 return container_of(rcd, struct ufs_qcom_host, rcdev); 53 } 54 55 static void ufs_qcom_dump_regs_wrapper(struct ufs_hba *hba, int offset, int len, 56 const char *prefix, void *priv) 57 { 58 ufshcd_dump_regs(hba, offset, len * 4, prefix); 59 } 60 61 static int ufs_qcom_host_clk_get(struct device *dev, 62 const char *name, struct clk **clk_out, bool optional) 63 { 64 struct clk *clk; 65 int err = 0; 66 67 clk = devm_clk_get(dev, name); 68 if (!IS_ERR(clk)) { 69 *clk_out = clk; 70 return 0; 71 } 72 73 err = PTR_ERR(clk); 74 75 if (optional && err == -ENOENT) { 76 *clk_out = NULL; 77 return 0; 78 } 79 80 if (err != -EPROBE_DEFER) 81 dev_err(dev, "failed to get %s err %d\n", name, err); 82 83 return err; 84 } 85 86 static int ufs_qcom_host_clk_enable(struct device *dev, 87 const char *name, struct clk *clk) 88 { 89 int err = 0; 90 91 err = clk_prepare_enable(clk); 92 if (err) 93 dev_err(dev, "%s: %s enable failed %d\n", __func__, name, err); 94 95 return err; 96 } 97 98 static void ufs_qcom_disable_lane_clks(struct ufs_qcom_host *host) 99 { 100 if (!host->is_lane_clks_enabled) 101 return; 102 103 clk_disable_unprepare(host->tx_l1_sync_clk); 104 clk_disable_unprepare(host->tx_l0_sync_clk); 105 clk_disable_unprepare(host->rx_l1_sync_clk); 106 clk_disable_unprepare(host->rx_l0_sync_clk); 107 108 host->is_lane_clks_enabled = false; 109 } 110 111 static int ufs_qcom_enable_lane_clks(struct ufs_qcom_host *host) 112 { 113 int err = 0; 114 struct device *dev = host->hba->dev; 115 116 if (host->is_lane_clks_enabled) 117 return 0; 118 119 err = ufs_qcom_host_clk_enable(dev, "rx_lane0_sync_clk", 120 host->rx_l0_sync_clk); 121 if (err) 122 goto out; 123 124 err = ufs_qcom_host_clk_enable(dev, "tx_lane0_sync_clk", 125 host->tx_l0_sync_clk); 126 if (err) 127 goto disable_rx_l0; 128 129 err = ufs_qcom_host_clk_enable(dev, "rx_lane1_sync_clk", 130 host->rx_l1_sync_clk); 131 if (err) 132 goto disable_tx_l0; 133 134 err = ufs_qcom_host_clk_enable(dev, "tx_lane1_sync_clk", 135 host->tx_l1_sync_clk); 136 if (err) 137 goto disable_rx_l1; 138 139 host->is_lane_clks_enabled = true; 140 goto out; 141 142 disable_rx_l1: 143 clk_disable_unprepare(host->rx_l1_sync_clk); 144 disable_tx_l0: 145 clk_disable_unprepare(host->tx_l0_sync_clk); 146 disable_rx_l0: 147 clk_disable_unprepare(host->rx_l0_sync_clk); 148 out: 149 return err; 150 } 151 152 static int ufs_qcom_init_lane_clks(struct ufs_qcom_host *host) 153 { 154 int err = 0; 155 struct device *dev = host->hba->dev; 156 157 if (has_acpi_companion(dev)) 158 return 0; 159 160 err = ufs_qcom_host_clk_get(dev, "rx_lane0_sync_clk", 161 &host->rx_l0_sync_clk, false); 162 if (err) 163 goto out; 164 165 err = ufs_qcom_host_clk_get(dev, "tx_lane0_sync_clk", 166 &host->tx_l0_sync_clk, false); 167 if (err) 168 goto out; 169 170 /* In case of single lane per direction, don't read lane1 clocks */ 171 if (host->hba->lanes_per_direction > 1) { 172 err = ufs_qcom_host_clk_get(dev, "rx_lane1_sync_clk", 173 &host->rx_l1_sync_clk, false); 174 if (err) 175 goto out; 176 177 err = ufs_qcom_host_clk_get(dev, "tx_lane1_sync_clk", 178 &host->tx_l1_sync_clk, true); 179 } 180 out: 181 return err; 182 } 183 184 static int ufs_qcom_check_hibern8(struct ufs_hba *hba) 185 { 186 int err; 187 u32 tx_fsm_val = 0; 188 unsigned long timeout = jiffies + msecs_to_jiffies(HBRN8_POLL_TOUT_MS); 189 190 do { 191 err = ufshcd_dme_get(hba, 192 UIC_ARG_MIB_SEL(MPHY_TX_FSM_STATE, 193 UIC_ARG_MPHY_TX_GEN_SEL_INDEX(0)), 194 &tx_fsm_val); 195 if (err || tx_fsm_val == TX_FSM_HIBERN8) 196 break; 197 198 /* sleep for max. 200us */ 199 usleep_range(100, 200); 200 } while (time_before(jiffies, timeout)); 201 202 /* 203 * we might have scheduled out for long during polling so 204 * check the state again. 205 */ 206 if (time_after(jiffies, timeout)) 207 err = ufshcd_dme_get(hba, 208 UIC_ARG_MIB_SEL(MPHY_TX_FSM_STATE, 209 UIC_ARG_MPHY_TX_GEN_SEL_INDEX(0)), 210 &tx_fsm_val); 211 212 if (err) { 213 dev_err(hba->dev, "%s: unable to get TX_FSM_STATE, err %d\n", 214 __func__, err); 215 } else if (tx_fsm_val != TX_FSM_HIBERN8) { 216 err = tx_fsm_val; 217 dev_err(hba->dev, "%s: invalid TX_FSM_STATE = %d\n", 218 __func__, err); 219 } 220 221 return err; 222 } 223 224 static void ufs_qcom_select_unipro_mode(struct ufs_qcom_host *host) 225 { 226 ufshcd_rmwl(host->hba, QUNIPRO_SEL, 227 ufs_qcom_cap_qunipro(host) ? QUNIPRO_SEL : 0, 228 REG_UFS_CFG1); 229 /* make sure above configuration is applied before we return */ 230 mb(); 231 } 232 233 /* 234 * ufs_qcom_host_reset - reset host controller and PHY 235 */ 236 static int ufs_qcom_host_reset(struct ufs_hba *hba) 237 { 238 int ret = 0; 239 struct ufs_qcom_host *host = ufshcd_get_variant(hba); 240 bool reenable_intr = false; 241 242 if (!host->core_reset) { 243 dev_warn(hba->dev, "%s: reset control not set\n", __func__); 244 goto out; 245 } 246 247 reenable_intr = hba->is_irq_enabled; 248 disable_irq(hba->irq); 249 hba->is_irq_enabled = false; 250 251 ret = reset_control_assert(host->core_reset); 252 if (ret) { 253 dev_err(hba->dev, "%s: core_reset assert failed, err = %d\n", 254 __func__, ret); 255 goto out; 256 } 257 258 /* 259 * The hardware requirement for delay between assert/deassert 260 * is at least 3-4 sleep clock (32.7KHz) cycles, which comes to 261 * ~125us (4/32768). To be on the safe side add 200us delay. 262 */ 263 usleep_range(200, 210); 264 265 ret = reset_control_deassert(host->core_reset); 266 if (ret) 267 dev_err(hba->dev, "%s: core_reset deassert failed, err = %d\n", 268 __func__, ret); 269 270 usleep_range(1000, 1100); 271 272 if (reenable_intr) { 273 enable_irq(hba->irq); 274 hba->is_irq_enabled = true; 275 } 276 277 out: 278 return ret; 279 } 280 281 static int ufs_qcom_power_up_sequence(struct ufs_hba *hba) 282 { 283 struct ufs_qcom_host *host = ufshcd_get_variant(hba); 284 struct phy *phy = host->generic_phy; 285 int ret = 0; 286 bool is_rate_B = UFS_QCOM_LIMIT_HS_RATE == PA_HS_MODE_B; 287 288 /* Reset UFS Host Controller and PHY */ 289 ret = ufs_qcom_host_reset(hba); 290 if (ret) 291 dev_warn(hba->dev, "%s: host reset returned %d\n", 292 __func__, ret); 293 294 if (is_rate_B) 295 phy_set_mode(phy, PHY_MODE_UFS_HS_B); 296 297 /* phy initialization - calibrate the phy */ 298 ret = phy_init(phy); 299 if (ret) { 300 dev_err(hba->dev, "%s: phy init failed, ret = %d\n", 301 __func__, ret); 302 goto out; 303 } 304 305 /* power on phy - start serdes and phy's power and clocks */ 306 ret = phy_power_on(phy); 307 if (ret) { 308 dev_err(hba->dev, "%s: phy power on failed, ret = %d\n", 309 __func__, ret); 310 goto out_disable_phy; 311 } 312 313 ufs_qcom_select_unipro_mode(host); 314 315 return 0; 316 317 out_disable_phy: 318 phy_exit(phy); 319 out: 320 return ret; 321 } 322 323 /* 324 * The UTP controller has a number of internal clock gating cells (CGCs). 325 * Internal hardware sub-modules within the UTP controller control the CGCs. 326 * Hardware CGCs disable the clock to inactivate UTP sub-modules not involved 327 * in a specific operation, UTP controller CGCs are by default disabled and 328 * this function enables them (after every UFS link startup) to save some power 329 * leakage. 330 */ 331 static void ufs_qcom_enable_hw_clk_gating(struct ufs_hba *hba) 332 { 333 ufshcd_writel(hba, 334 ufshcd_readl(hba, REG_UFS_CFG2) | REG_UFS_CFG2_CGC_EN_ALL, 335 REG_UFS_CFG2); 336 337 /* Ensure that HW clock gating is enabled before next operations */ 338 mb(); 339 } 340 341 static int ufs_qcom_hce_enable_notify(struct ufs_hba *hba, 342 enum ufs_notify_change_status status) 343 { 344 struct ufs_qcom_host *host = ufshcd_get_variant(hba); 345 int err = 0; 346 347 switch (status) { 348 case PRE_CHANGE: 349 ufs_qcom_power_up_sequence(hba); 350 /* 351 * The PHY PLL output is the source of tx/rx lane symbol 352 * clocks, hence, enable the lane clocks only after PHY 353 * is initialized. 354 */ 355 err = ufs_qcom_enable_lane_clks(host); 356 break; 357 case POST_CHANGE: 358 /* check if UFS PHY moved from DISABLED to HIBERN8 */ 359 err = ufs_qcom_check_hibern8(hba); 360 ufs_qcom_enable_hw_clk_gating(hba); 361 ufs_qcom_ice_enable(host); 362 break; 363 default: 364 dev_err(hba->dev, "%s: invalid status %d\n", __func__, status); 365 err = -EINVAL; 366 break; 367 } 368 return err; 369 } 370 371 /* 372 * Returns zero for success and non-zero in case of a failure 373 */ 374 static int ufs_qcom_cfg_timers(struct ufs_hba *hba, u32 gear, 375 u32 hs, u32 rate, bool update_link_startup_timer) 376 { 377 int ret = 0; 378 struct ufs_qcom_host *host = ufshcd_get_variant(hba); 379 struct ufs_clk_info *clki; 380 u32 core_clk_period_in_ns; 381 u32 tx_clk_cycles_per_us = 0; 382 unsigned long core_clk_rate = 0; 383 u32 core_clk_cycles_per_us = 0; 384 385 static u32 pwm_fr_table[][2] = { 386 {UFS_PWM_G1, 0x1}, 387 {UFS_PWM_G2, 0x1}, 388 {UFS_PWM_G3, 0x1}, 389 {UFS_PWM_G4, 0x1}, 390 }; 391 392 static u32 hs_fr_table_rA[][2] = { 393 {UFS_HS_G1, 0x1F}, 394 {UFS_HS_G2, 0x3e}, 395 {UFS_HS_G3, 0x7D}, 396 }; 397 398 static u32 hs_fr_table_rB[][2] = { 399 {UFS_HS_G1, 0x24}, 400 {UFS_HS_G2, 0x49}, 401 {UFS_HS_G3, 0x92}, 402 }; 403 404 /* 405 * The Qunipro controller does not use following registers: 406 * SYS1CLK_1US_REG, TX_SYMBOL_CLK_1US_REG, CLK_NS_REG & 407 * UFS_REG_PA_LINK_STARTUP_TIMER 408 * But UTP controller uses SYS1CLK_1US_REG register for Interrupt 409 * Aggregation logic. 410 */ 411 if (ufs_qcom_cap_qunipro(host) && !ufshcd_is_intr_aggr_allowed(hba)) 412 goto out; 413 414 if (gear == 0) { 415 dev_err(hba->dev, "%s: invalid gear = %d\n", __func__, gear); 416 goto out_error; 417 } 418 419 list_for_each_entry(clki, &hba->clk_list_head, list) { 420 if (!strcmp(clki->name, "core_clk")) 421 core_clk_rate = clk_get_rate(clki->clk); 422 } 423 424 /* If frequency is smaller than 1MHz, set to 1MHz */ 425 if (core_clk_rate < DEFAULT_CLK_RATE_HZ) 426 core_clk_rate = DEFAULT_CLK_RATE_HZ; 427 428 core_clk_cycles_per_us = core_clk_rate / USEC_PER_SEC; 429 if (ufshcd_readl(hba, REG_UFS_SYS1CLK_1US) != core_clk_cycles_per_us) { 430 ufshcd_writel(hba, core_clk_cycles_per_us, REG_UFS_SYS1CLK_1US); 431 /* 432 * make sure above write gets applied before we return from 433 * this function. 434 */ 435 mb(); 436 } 437 438 if (ufs_qcom_cap_qunipro(host)) 439 goto out; 440 441 core_clk_period_in_ns = NSEC_PER_SEC / core_clk_rate; 442 core_clk_period_in_ns <<= OFFSET_CLK_NS_REG; 443 core_clk_period_in_ns &= MASK_CLK_NS_REG; 444 445 switch (hs) { 446 case FASTAUTO_MODE: 447 case FAST_MODE: 448 if (rate == PA_HS_MODE_A) { 449 if (gear > ARRAY_SIZE(hs_fr_table_rA)) { 450 dev_err(hba->dev, 451 "%s: index %d exceeds table size %zu\n", 452 __func__, gear, 453 ARRAY_SIZE(hs_fr_table_rA)); 454 goto out_error; 455 } 456 tx_clk_cycles_per_us = hs_fr_table_rA[gear-1][1]; 457 } else if (rate == PA_HS_MODE_B) { 458 if (gear > ARRAY_SIZE(hs_fr_table_rB)) { 459 dev_err(hba->dev, 460 "%s: index %d exceeds table size %zu\n", 461 __func__, gear, 462 ARRAY_SIZE(hs_fr_table_rB)); 463 goto out_error; 464 } 465 tx_clk_cycles_per_us = hs_fr_table_rB[gear-1][1]; 466 } else { 467 dev_err(hba->dev, "%s: invalid rate = %d\n", 468 __func__, rate); 469 goto out_error; 470 } 471 break; 472 case SLOWAUTO_MODE: 473 case SLOW_MODE: 474 if (gear > ARRAY_SIZE(pwm_fr_table)) { 475 dev_err(hba->dev, 476 "%s: index %d exceeds table size %zu\n", 477 __func__, gear, 478 ARRAY_SIZE(pwm_fr_table)); 479 goto out_error; 480 } 481 tx_clk_cycles_per_us = pwm_fr_table[gear-1][1]; 482 break; 483 case UNCHANGED: 484 default: 485 dev_err(hba->dev, "%s: invalid mode = %d\n", __func__, hs); 486 goto out_error; 487 } 488 489 if (ufshcd_readl(hba, REG_UFS_TX_SYMBOL_CLK_NS_US) != 490 (core_clk_period_in_ns | tx_clk_cycles_per_us)) { 491 /* this register 2 fields shall be written at once */ 492 ufshcd_writel(hba, core_clk_period_in_ns | tx_clk_cycles_per_us, 493 REG_UFS_TX_SYMBOL_CLK_NS_US); 494 /* 495 * make sure above write gets applied before we return from 496 * this function. 497 */ 498 mb(); 499 } 500 501 if (update_link_startup_timer) { 502 ufshcd_writel(hba, ((core_clk_rate / MSEC_PER_SEC) * 100), 503 REG_UFS_PA_LINK_STARTUP_TIMER); 504 /* 505 * make sure that this configuration is applied before 506 * we return 507 */ 508 mb(); 509 } 510 goto out; 511 512 out_error: 513 ret = -EINVAL; 514 out: 515 return ret; 516 } 517 518 static int ufs_qcom_link_startup_notify(struct ufs_hba *hba, 519 enum ufs_notify_change_status status) 520 { 521 int err = 0; 522 struct ufs_qcom_host *host = ufshcd_get_variant(hba); 523 524 switch (status) { 525 case PRE_CHANGE: 526 if (ufs_qcom_cfg_timers(hba, UFS_PWM_G1, SLOWAUTO_MODE, 527 0, true)) { 528 dev_err(hba->dev, "%s: ufs_qcom_cfg_timers() failed\n", 529 __func__); 530 err = -EINVAL; 531 goto out; 532 } 533 534 if (ufs_qcom_cap_qunipro(host)) 535 /* 536 * set unipro core clock cycles to 150 & clear clock 537 * divider 538 */ 539 err = ufs_qcom_set_dme_vs_core_clk_ctrl_clear_div(hba, 540 150); 541 542 /* 543 * Some UFS devices (and may be host) have issues if LCC is 544 * enabled. So we are setting PA_Local_TX_LCC_Enable to 0 545 * before link startup which will make sure that both host 546 * and device TX LCC are disabled once link startup is 547 * completed. 548 */ 549 if (ufshcd_get_local_unipro_ver(hba) != UFS_UNIPRO_VER_1_41) 550 err = ufshcd_disable_host_tx_lcc(hba); 551 552 break; 553 default: 554 break; 555 } 556 557 out: 558 return err; 559 } 560 561 static void ufs_qcom_device_reset_ctrl(struct ufs_hba *hba, bool asserted) 562 { 563 struct ufs_qcom_host *host = ufshcd_get_variant(hba); 564 565 /* reset gpio is optional */ 566 if (!host->device_reset) 567 return; 568 569 gpiod_set_value_cansleep(host->device_reset, asserted); 570 } 571 572 static int ufs_qcom_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op, 573 enum ufs_notify_change_status status) 574 { 575 struct ufs_qcom_host *host = ufshcd_get_variant(hba); 576 struct phy *phy = host->generic_phy; 577 578 if (status == PRE_CHANGE) 579 return 0; 580 581 if (ufs_qcom_is_link_off(hba)) { 582 /* 583 * Disable the tx/rx lane symbol clocks before PHY is 584 * powered down as the PLL source should be disabled 585 * after downstream clocks are disabled. 586 */ 587 ufs_qcom_disable_lane_clks(host); 588 phy_power_off(phy); 589 590 /* reset the connected UFS device during power down */ 591 ufs_qcom_device_reset_ctrl(hba, true); 592 593 } else if (!ufs_qcom_is_link_active(hba)) { 594 ufs_qcom_disable_lane_clks(host); 595 } 596 597 return 0; 598 } 599 600 static int ufs_qcom_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op) 601 { 602 struct ufs_qcom_host *host = ufshcd_get_variant(hba); 603 struct phy *phy = host->generic_phy; 604 int err; 605 606 if (ufs_qcom_is_link_off(hba)) { 607 err = phy_power_on(phy); 608 if (err) { 609 dev_err(hba->dev, "%s: failed PHY power on: %d\n", 610 __func__, err); 611 return err; 612 } 613 614 err = ufs_qcom_enable_lane_clks(host); 615 if (err) 616 return err; 617 618 } else if (!ufs_qcom_is_link_active(hba)) { 619 err = ufs_qcom_enable_lane_clks(host); 620 if (err) 621 return err; 622 } 623 624 return ufs_qcom_ice_resume(host); 625 } 626 627 static void ufs_qcom_dev_ref_clk_ctrl(struct ufs_qcom_host *host, bool enable) 628 { 629 if (host->dev_ref_clk_ctrl_mmio && 630 (enable ^ host->is_dev_ref_clk_enabled)) { 631 u32 temp = readl_relaxed(host->dev_ref_clk_ctrl_mmio); 632 633 if (enable) 634 temp |= host->dev_ref_clk_en_mask; 635 else 636 temp &= ~host->dev_ref_clk_en_mask; 637 638 /* 639 * If we are here to disable this clock it might be immediately 640 * after entering into hibern8 in which case we need to make 641 * sure that device ref_clk is active for specific time after 642 * hibern8 enter. 643 */ 644 if (!enable) { 645 unsigned long gating_wait; 646 647 gating_wait = host->hba->dev_info.clk_gating_wait_us; 648 if (!gating_wait) { 649 udelay(1); 650 } else { 651 /* 652 * bRefClkGatingWaitTime defines the minimum 653 * time for which the reference clock is 654 * required by device during transition from 655 * HS-MODE to LS-MODE or HIBERN8 state. Give it 656 * more delay to be on the safe side. 657 */ 658 gating_wait += 10; 659 usleep_range(gating_wait, gating_wait + 10); 660 } 661 } 662 663 writel_relaxed(temp, host->dev_ref_clk_ctrl_mmio); 664 665 /* 666 * Make sure the write to ref_clk reaches the destination and 667 * not stored in a Write Buffer (WB). 668 */ 669 readl(host->dev_ref_clk_ctrl_mmio); 670 671 /* 672 * If we call hibern8 exit after this, we need to make sure that 673 * device ref_clk is stable for at least 1us before the hibern8 674 * exit command. 675 */ 676 if (enable) 677 udelay(1); 678 679 host->is_dev_ref_clk_enabled = enable; 680 } 681 } 682 683 static int ufs_qcom_pwr_change_notify(struct ufs_hba *hba, 684 enum ufs_notify_change_status status, 685 struct ufs_pa_layer_attr *dev_max_params, 686 struct ufs_pa_layer_attr *dev_req_params) 687 { 688 struct ufs_qcom_host *host = ufshcd_get_variant(hba); 689 struct ufs_dev_params ufs_qcom_cap; 690 int ret = 0; 691 692 if (!dev_req_params) { 693 pr_err("%s: incoming dev_req_params is NULL\n", __func__); 694 ret = -EINVAL; 695 goto out; 696 } 697 698 switch (status) { 699 case PRE_CHANGE: 700 ufshcd_init_pwr_dev_param(&ufs_qcom_cap); 701 ufs_qcom_cap.hs_rate = UFS_QCOM_LIMIT_HS_RATE; 702 703 if (host->hw_ver.major == 0x1) { 704 /* 705 * HS-G3 operations may not reliably work on legacy QCOM 706 * UFS host controller hardware even though capability 707 * exchange during link startup phase may end up 708 * negotiating maximum supported gear as G3. 709 * Hence downgrade the maximum supported gear to HS-G2. 710 */ 711 if (ufs_qcom_cap.hs_tx_gear > UFS_HS_G2) 712 ufs_qcom_cap.hs_tx_gear = UFS_HS_G2; 713 if (ufs_qcom_cap.hs_rx_gear > UFS_HS_G2) 714 ufs_qcom_cap.hs_rx_gear = UFS_HS_G2; 715 } 716 717 ret = ufshcd_get_pwr_dev_param(&ufs_qcom_cap, 718 dev_max_params, 719 dev_req_params); 720 if (ret) { 721 pr_err("%s: failed to determine capabilities\n", 722 __func__); 723 goto out; 724 } 725 726 /* enable the device ref clock before changing to HS mode */ 727 if (!ufshcd_is_hs_mode(&hba->pwr_info) && 728 ufshcd_is_hs_mode(dev_req_params)) 729 ufs_qcom_dev_ref_clk_ctrl(host, true); 730 731 if (host->hw_ver.major >= 0x4) { 732 ufshcd_dme_configure_adapt(hba, 733 dev_req_params->gear_tx, 734 PA_INITIAL_ADAPT); 735 } 736 break; 737 case POST_CHANGE: 738 if (ufs_qcom_cfg_timers(hba, dev_req_params->gear_rx, 739 dev_req_params->pwr_rx, 740 dev_req_params->hs_rate, false)) { 741 dev_err(hba->dev, "%s: ufs_qcom_cfg_timers() failed\n", 742 __func__); 743 /* 744 * we return error code at the end of the routine, 745 * but continue to configure UFS_PHY_TX_LANE_ENABLE 746 * and bus voting as usual 747 */ 748 ret = -EINVAL; 749 } 750 751 /* cache the power mode parameters to use internally */ 752 memcpy(&host->dev_req_params, 753 dev_req_params, sizeof(*dev_req_params)); 754 755 /* disable the device ref clock if entered PWM mode */ 756 if (ufshcd_is_hs_mode(&hba->pwr_info) && 757 !ufshcd_is_hs_mode(dev_req_params)) 758 ufs_qcom_dev_ref_clk_ctrl(host, false); 759 break; 760 default: 761 ret = -EINVAL; 762 break; 763 } 764 out: 765 return ret; 766 } 767 768 static int ufs_qcom_quirk_host_pa_saveconfigtime(struct ufs_hba *hba) 769 { 770 int err; 771 u32 pa_vs_config_reg1; 772 773 err = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_VS_CONFIG_REG1), 774 &pa_vs_config_reg1); 775 if (err) 776 goto out; 777 778 /* Allow extension of MSB bits of PA_SaveConfigTime attribute */ 779 err = ufshcd_dme_set(hba, UIC_ARG_MIB(PA_VS_CONFIG_REG1), 780 (pa_vs_config_reg1 | (1 << 12))); 781 782 out: 783 return err; 784 } 785 786 static int ufs_qcom_apply_dev_quirks(struct ufs_hba *hba) 787 { 788 int err = 0; 789 790 if (hba->dev_quirks & UFS_DEVICE_QUIRK_HOST_PA_SAVECONFIGTIME) 791 err = ufs_qcom_quirk_host_pa_saveconfigtime(hba); 792 793 if (hba->dev_info.wmanufacturerid == UFS_VENDOR_WDC) 794 hba->dev_quirks |= UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE; 795 796 return err; 797 } 798 799 static u32 ufs_qcom_get_ufs_hci_version(struct ufs_hba *hba) 800 { 801 struct ufs_qcom_host *host = ufshcd_get_variant(hba); 802 803 if (host->hw_ver.major == 0x1) 804 return ufshci_version(1, 1); 805 else 806 return ufshci_version(2, 0); 807 } 808 809 /** 810 * ufs_qcom_advertise_quirks - advertise the known QCOM UFS controller quirks 811 * @hba: host controller instance 812 * 813 * QCOM UFS host controller might have some non standard behaviours (quirks) 814 * than what is specified by UFSHCI specification. Advertise all such 815 * quirks to standard UFS host controller driver so standard takes them into 816 * account. 817 */ 818 static void ufs_qcom_advertise_quirks(struct ufs_hba *hba) 819 { 820 struct ufs_qcom_host *host = ufshcd_get_variant(hba); 821 822 if (host->hw_ver.major == 0x01) { 823 hba->quirks |= UFSHCD_QUIRK_DELAY_BEFORE_DME_CMDS 824 | UFSHCD_QUIRK_BROKEN_PA_RXHSUNTERMCAP 825 | UFSHCD_QUIRK_DME_PEER_ACCESS_AUTO_MODE; 826 827 if (host->hw_ver.minor == 0x0001 && host->hw_ver.step == 0x0001) 828 hba->quirks |= UFSHCD_QUIRK_BROKEN_INTR_AGGR; 829 830 hba->quirks |= UFSHCD_QUIRK_BROKEN_LCC; 831 } 832 833 if (host->hw_ver.major == 0x2) { 834 hba->quirks |= UFSHCD_QUIRK_BROKEN_UFS_HCI_VERSION; 835 836 if (!ufs_qcom_cap_qunipro(host)) 837 /* Legacy UniPro mode still need following quirks */ 838 hba->quirks |= (UFSHCD_QUIRK_DELAY_BEFORE_DME_CMDS 839 | UFSHCD_QUIRK_DME_PEER_ACCESS_AUTO_MODE 840 | UFSHCD_QUIRK_BROKEN_PA_RXHSUNTERMCAP); 841 } 842 } 843 844 static void ufs_qcom_set_caps(struct ufs_hba *hba) 845 { 846 struct ufs_qcom_host *host = ufshcd_get_variant(hba); 847 848 hba->caps |= UFSHCD_CAP_CLK_GATING | UFSHCD_CAP_HIBERN8_WITH_CLK_GATING; 849 hba->caps |= UFSHCD_CAP_CLK_SCALING | UFSHCD_CAP_WB_WITH_CLK_SCALING; 850 hba->caps |= UFSHCD_CAP_AUTO_BKOPS_SUSPEND; 851 hba->caps |= UFSHCD_CAP_WB_EN; 852 hba->caps |= UFSHCD_CAP_CRYPTO; 853 hba->caps |= UFSHCD_CAP_AGGR_POWER_COLLAPSE; 854 hba->caps |= UFSHCD_CAP_RPM_AUTOSUSPEND; 855 856 if (host->hw_ver.major >= 0x2) { 857 host->caps = UFS_QCOM_CAP_QUNIPRO | 858 UFS_QCOM_CAP_RETAIN_SEC_CFG_AFTER_PWR_COLLAPSE; 859 } 860 } 861 862 /** 863 * ufs_qcom_setup_clocks - enables/disable clocks 864 * @hba: host controller instance 865 * @on: If true, enable clocks else disable them. 866 * @status: PRE_CHANGE or POST_CHANGE notify 867 * 868 * Returns 0 on success, non-zero on failure. 869 */ 870 static int ufs_qcom_setup_clocks(struct ufs_hba *hba, bool on, 871 enum ufs_notify_change_status status) 872 { 873 struct ufs_qcom_host *host = ufshcd_get_variant(hba); 874 875 /* 876 * In case ufs_qcom_init() is not yet done, simply ignore. 877 * This ufs_qcom_setup_clocks() shall be called from 878 * ufs_qcom_init() after init is done. 879 */ 880 if (!host) 881 return 0; 882 883 switch (status) { 884 case PRE_CHANGE: 885 if (!on) { 886 if (!ufs_qcom_is_link_active(hba)) { 887 /* disable device ref_clk */ 888 ufs_qcom_dev_ref_clk_ctrl(host, false); 889 } 890 } 891 break; 892 case POST_CHANGE: 893 if (on) { 894 /* enable the device ref clock for HS mode*/ 895 if (ufshcd_is_hs_mode(&hba->pwr_info)) 896 ufs_qcom_dev_ref_clk_ctrl(host, true); 897 } 898 break; 899 } 900 901 return 0; 902 } 903 904 static int 905 ufs_qcom_reset_assert(struct reset_controller_dev *rcdev, unsigned long id) 906 { 907 struct ufs_qcom_host *host = rcdev_to_ufs_host(rcdev); 908 909 /* Currently this code only knows about a single reset. */ 910 WARN_ON(id); 911 ufs_qcom_assert_reset(host->hba); 912 /* provide 1ms delay to let the reset pulse propagate. */ 913 usleep_range(1000, 1100); 914 return 0; 915 } 916 917 static int 918 ufs_qcom_reset_deassert(struct reset_controller_dev *rcdev, unsigned long id) 919 { 920 struct ufs_qcom_host *host = rcdev_to_ufs_host(rcdev); 921 922 /* Currently this code only knows about a single reset. */ 923 WARN_ON(id); 924 ufs_qcom_deassert_reset(host->hba); 925 926 /* 927 * after reset deassertion, phy will need all ref clocks, 928 * voltage, current to settle down before starting serdes. 929 */ 930 usleep_range(1000, 1100); 931 return 0; 932 } 933 934 static const struct reset_control_ops ufs_qcom_reset_ops = { 935 .assert = ufs_qcom_reset_assert, 936 .deassert = ufs_qcom_reset_deassert, 937 }; 938 939 /** 940 * ufs_qcom_init - bind phy with controller 941 * @hba: host controller instance 942 * 943 * Binds PHY with controller and powers up PHY enabling clocks 944 * and regulators. 945 * 946 * Returns -EPROBE_DEFER if binding fails, returns negative error 947 * on phy power up failure and returns zero on success. 948 */ 949 static int ufs_qcom_init(struct ufs_hba *hba) 950 { 951 int err; 952 struct device *dev = hba->dev; 953 struct platform_device *pdev = to_platform_device(dev); 954 struct ufs_qcom_host *host; 955 struct resource *res; 956 struct ufs_clk_info *clki; 957 958 host = devm_kzalloc(dev, sizeof(*host), GFP_KERNEL); 959 if (!host) { 960 err = -ENOMEM; 961 dev_err(dev, "%s: no memory for qcom ufs host\n", __func__); 962 goto out; 963 } 964 965 /* Make a two way bind between the qcom host and the hba */ 966 host->hba = hba; 967 ufshcd_set_variant(hba, host); 968 969 /* Setup the optional reset control of HCI */ 970 host->core_reset = devm_reset_control_get_optional(hba->dev, "rst"); 971 if (IS_ERR(host->core_reset)) { 972 err = dev_err_probe(dev, PTR_ERR(host->core_reset), 973 "Failed to get reset control\n"); 974 goto out_variant_clear; 975 } 976 977 /* Fire up the reset controller. Failure here is non-fatal. */ 978 host->rcdev.of_node = dev->of_node; 979 host->rcdev.ops = &ufs_qcom_reset_ops; 980 host->rcdev.owner = dev->driver->owner; 981 host->rcdev.nr_resets = 1; 982 err = devm_reset_controller_register(dev, &host->rcdev); 983 if (err) { 984 dev_warn(dev, "Failed to register reset controller\n"); 985 err = 0; 986 } 987 988 if (!has_acpi_companion(dev)) { 989 host->generic_phy = devm_phy_get(dev, "ufsphy"); 990 if (IS_ERR(host->generic_phy)) { 991 err = dev_err_probe(dev, PTR_ERR(host->generic_phy), "Failed to get PHY\n"); 992 goto out_variant_clear; 993 } 994 } 995 996 host->device_reset = devm_gpiod_get_optional(dev, "reset", 997 GPIOD_OUT_HIGH); 998 if (IS_ERR(host->device_reset)) { 999 err = PTR_ERR(host->device_reset); 1000 if (err != -EPROBE_DEFER) 1001 dev_err(dev, "failed to acquire reset gpio: %d\n", err); 1002 goto out_variant_clear; 1003 } 1004 1005 ufs_qcom_get_controller_revision(hba, &host->hw_ver.major, 1006 &host->hw_ver.minor, &host->hw_ver.step); 1007 1008 /* 1009 * for newer controllers, device reference clock control bit has 1010 * moved inside UFS controller register address space itself. 1011 */ 1012 if (host->hw_ver.major >= 0x02) { 1013 host->dev_ref_clk_ctrl_mmio = hba->mmio_base + REG_UFS_CFG1; 1014 host->dev_ref_clk_en_mask = BIT(26); 1015 } else { 1016 /* "dev_ref_clk_ctrl_mem" is optional resource */ 1017 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, 1018 "dev_ref_clk_ctrl_mem"); 1019 if (res) { 1020 host->dev_ref_clk_ctrl_mmio = 1021 devm_ioremap_resource(dev, res); 1022 if (IS_ERR(host->dev_ref_clk_ctrl_mmio)) 1023 host->dev_ref_clk_ctrl_mmio = NULL; 1024 host->dev_ref_clk_en_mask = BIT(5); 1025 } 1026 } 1027 1028 list_for_each_entry(clki, &hba->clk_list_head, list) { 1029 if (!strcmp(clki->name, "core_clk_unipro")) 1030 clki->keep_link_active = true; 1031 } 1032 1033 err = ufs_qcom_init_lane_clks(host); 1034 if (err) 1035 goto out_variant_clear; 1036 1037 ufs_qcom_set_caps(hba); 1038 ufs_qcom_advertise_quirks(hba); 1039 1040 err = ufs_qcom_ice_init(host); 1041 if (err) 1042 goto out_variant_clear; 1043 1044 ufs_qcom_setup_clocks(hba, true, POST_CHANGE); 1045 1046 if (hba->dev->id < MAX_UFS_QCOM_HOSTS) 1047 ufs_qcom_hosts[hba->dev->id] = host; 1048 1049 host->dbg_print_en |= UFS_QCOM_DEFAULT_DBG_PRINT_EN; 1050 ufs_qcom_get_default_testbus_cfg(host); 1051 err = ufs_qcom_testbus_config(host); 1052 if (err) { 1053 dev_warn(dev, "%s: failed to configure the testbus %d\n", 1054 __func__, err); 1055 err = 0; 1056 } 1057 1058 goto out; 1059 1060 out_variant_clear: 1061 ufshcd_set_variant(hba, NULL); 1062 out: 1063 return err; 1064 } 1065 1066 static void ufs_qcom_exit(struct ufs_hba *hba) 1067 { 1068 struct ufs_qcom_host *host = ufshcd_get_variant(hba); 1069 1070 ufs_qcom_disable_lane_clks(host); 1071 phy_power_off(host->generic_phy); 1072 phy_exit(host->generic_phy); 1073 } 1074 1075 static int ufs_qcom_set_dme_vs_core_clk_ctrl_clear_div(struct ufs_hba *hba, 1076 u32 clk_cycles) 1077 { 1078 int err; 1079 u32 core_clk_ctrl_reg; 1080 1081 if (clk_cycles > DME_VS_CORE_CLK_CTRL_MAX_CORE_CLK_1US_CYCLES_MASK) 1082 return -EINVAL; 1083 1084 err = ufshcd_dme_get(hba, 1085 UIC_ARG_MIB(DME_VS_CORE_CLK_CTRL), 1086 &core_clk_ctrl_reg); 1087 if (err) 1088 goto out; 1089 1090 core_clk_ctrl_reg &= ~DME_VS_CORE_CLK_CTRL_MAX_CORE_CLK_1US_CYCLES_MASK; 1091 core_clk_ctrl_reg |= clk_cycles; 1092 1093 /* Clear CORE_CLK_DIV_EN */ 1094 core_clk_ctrl_reg &= ~DME_VS_CORE_CLK_CTRL_CORE_CLK_DIV_EN_BIT; 1095 1096 err = ufshcd_dme_set(hba, 1097 UIC_ARG_MIB(DME_VS_CORE_CLK_CTRL), 1098 core_clk_ctrl_reg); 1099 out: 1100 return err; 1101 } 1102 1103 static int ufs_qcom_clk_scale_up_pre_change(struct ufs_hba *hba) 1104 { 1105 /* nothing to do as of now */ 1106 return 0; 1107 } 1108 1109 static int ufs_qcom_clk_scale_up_post_change(struct ufs_hba *hba) 1110 { 1111 struct ufs_qcom_host *host = ufshcd_get_variant(hba); 1112 1113 if (!ufs_qcom_cap_qunipro(host)) 1114 return 0; 1115 1116 /* set unipro core clock cycles to 150 and clear clock divider */ 1117 return ufs_qcom_set_dme_vs_core_clk_ctrl_clear_div(hba, 150); 1118 } 1119 1120 static int ufs_qcom_clk_scale_down_pre_change(struct ufs_hba *hba) 1121 { 1122 struct ufs_qcom_host *host = ufshcd_get_variant(hba); 1123 int err; 1124 u32 core_clk_ctrl_reg; 1125 1126 if (!ufs_qcom_cap_qunipro(host)) 1127 return 0; 1128 1129 err = ufshcd_dme_get(hba, 1130 UIC_ARG_MIB(DME_VS_CORE_CLK_CTRL), 1131 &core_clk_ctrl_reg); 1132 1133 /* make sure CORE_CLK_DIV_EN is cleared */ 1134 if (!err && 1135 (core_clk_ctrl_reg & DME_VS_CORE_CLK_CTRL_CORE_CLK_DIV_EN_BIT)) { 1136 core_clk_ctrl_reg &= ~DME_VS_CORE_CLK_CTRL_CORE_CLK_DIV_EN_BIT; 1137 err = ufshcd_dme_set(hba, 1138 UIC_ARG_MIB(DME_VS_CORE_CLK_CTRL), 1139 core_clk_ctrl_reg); 1140 } 1141 1142 return err; 1143 } 1144 1145 static int ufs_qcom_clk_scale_down_post_change(struct ufs_hba *hba) 1146 { 1147 struct ufs_qcom_host *host = ufshcd_get_variant(hba); 1148 1149 if (!ufs_qcom_cap_qunipro(host)) 1150 return 0; 1151 1152 /* set unipro core clock cycles to 75 and clear clock divider */ 1153 return ufs_qcom_set_dme_vs_core_clk_ctrl_clear_div(hba, 75); 1154 } 1155 1156 static int ufs_qcom_clk_scale_notify(struct ufs_hba *hba, 1157 bool scale_up, enum ufs_notify_change_status status) 1158 { 1159 struct ufs_qcom_host *host = ufshcd_get_variant(hba); 1160 struct ufs_pa_layer_attr *dev_req_params = &host->dev_req_params; 1161 int err = 0; 1162 1163 if (status == PRE_CHANGE) { 1164 err = ufshcd_uic_hibern8_enter(hba); 1165 if (err) 1166 return err; 1167 if (scale_up) 1168 err = ufs_qcom_clk_scale_up_pre_change(hba); 1169 else 1170 err = ufs_qcom_clk_scale_down_pre_change(hba); 1171 if (err) 1172 ufshcd_uic_hibern8_exit(hba); 1173 1174 } else { 1175 if (scale_up) 1176 err = ufs_qcom_clk_scale_up_post_change(hba); 1177 else 1178 err = ufs_qcom_clk_scale_down_post_change(hba); 1179 1180 1181 if (err || !dev_req_params) { 1182 ufshcd_uic_hibern8_exit(hba); 1183 goto out; 1184 } 1185 1186 ufs_qcom_cfg_timers(hba, 1187 dev_req_params->gear_rx, 1188 dev_req_params->pwr_rx, 1189 dev_req_params->hs_rate, 1190 false); 1191 ufshcd_uic_hibern8_exit(hba); 1192 } 1193 1194 out: 1195 return err; 1196 } 1197 1198 static void ufs_qcom_print_hw_debug_reg_all(struct ufs_hba *hba, 1199 void *priv, void (*print_fn)(struct ufs_hba *hba, 1200 int offset, int num_regs, const char *str, void *priv)) 1201 { 1202 u32 reg; 1203 struct ufs_qcom_host *host; 1204 1205 if (unlikely(!hba)) { 1206 pr_err("%s: hba is NULL\n", __func__); 1207 return; 1208 } 1209 if (unlikely(!print_fn)) { 1210 dev_err(hba->dev, "%s: print_fn is NULL\n", __func__); 1211 return; 1212 } 1213 1214 host = ufshcd_get_variant(hba); 1215 if (!(host->dbg_print_en & UFS_QCOM_DBG_PRINT_REGS_EN)) 1216 return; 1217 1218 reg = ufs_qcom_get_debug_reg_offset(host, UFS_UFS_DBG_RD_REG_OCSC); 1219 print_fn(hba, reg, 44, "UFS_UFS_DBG_RD_REG_OCSC ", priv); 1220 1221 reg = ufshcd_readl(hba, REG_UFS_CFG1); 1222 reg |= UTP_DBG_RAMS_EN; 1223 ufshcd_writel(hba, reg, REG_UFS_CFG1); 1224 1225 reg = ufs_qcom_get_debug_reg_offset(host, UFS_UFS_DBG_RD_EDTL_RAM); 1226 print_fn(hba, reg, 32, "UFS_UFS_DBG_RD_EDTL_RAM ", priv); 1227 1228 reg = ufs_qcom_get_debug_reg_offset(host, UFS_UFS_DBG_RD_DESC_RAM); 1229 print_fn(hba, reg, 128, "UFS_UFS_DBG_RD_DESC_RAM ", priv); 1230 1231 reg = ufs_qcom_get_debug_reg_offset(host, UFS_UFS_DBG_RD_PRDT_RAM); 1232 print_fn(hba, reg, 64, "UFS_UFS_DBG_RD_PRDT_RAM ", priv); 1233 1234 /* clear bit 17 - UTP_DBG_RAMS_EN */ 1235 ufshcd_rmwl(hba, UTP_DBG_RAMS_EN, 0, REG_UFS_CFG1); 1236 1237 reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_UAWM); 1238 print_fn(hba, reg, 4, "UFS_DBG_RD_REG_UAWM ", priv); 1239 1240 reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_UARM); 1241 print_fn(hba, reg, 4, "UFS_DBG_RD_REG_UARM ", priv); 1242 1243 reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_TXUC); 1244 print_fn(hba, reg, 48, "UFS_DBG_RD_REG_TXUC ", priv); 1245 1246 reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_RXUC); 1247 print_fn(hba, reg, 27, "UFS_DBG_RD_REG_RXUC ", priv); 1248 1249 reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_DFC); 1250 print_fn(hba, reg, 19, "UFS_DBG_RD_REG_DFC ", priv); 1251 1252 reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_TRLUT); 1253 print_fn(hba, reg, 34, "UFS_DBG_RD_REG_TRLUT ", priv); 1254 1255 reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_TMRLUT); 1256 print_fn(hba, reg, 9, "UFS_DBG_RD_REG_TMRLUT ", priv); 1257 } 1258 1259 static void ufs_qcom_enable_test_bus(struct ufs_qcom_host *host) 1260 { 1261 if (host->dbg_print_en & UFS_QCOM_DBG_PRINT_TEST_BUS_EN) { 1262 ufshcd_rmwl(host->hba, UFS_REG_TEST_BUS_EN, 1263 UFS_REG_TEST_BUS_EN, REG_UFS_CFG1); 1264 ufshcd_rmwl(host->hba, TEST_BUS_EN, TEST_BUS_EN, REG_UFS_CFG1); 1265 } else { 1266 ufshcd_rmwl(host->hba, UFS_REG_TEST_BUS_EN, 0, REG_UFS_CFG1); 1267 ufshcd_rmwl(host->hba, TEST_BUS_EN, 0, REG_UFS_CFG1); 1268 } 1269 } 1270 1271 static void ufs_qcom_get_default_testbus_cfg(struct ufs_qcom_host *host) 1272 { 1273 /* provide a legal default configuration */ 1274 host->testbus.select_major = TSTBUS_UNIPRO; 1275 host->testbus.select_minor = 37; 1276 } 1277 1278 static bool ufs_qcom_testbus_cfg_is_ok(struct ufs_qcom_host *host) 1279 { 1280 if (host->testbus.select_major >= TSTBUS_MAX) { 1281 dev_err(host->hba->dev, 1282 "%s: UFS_CFG1[TEST_BUS_SEL} may not equal 0x%05X\n", 1283 __func__, host->testbus.select_major); 1284 return false; 1285 } 1286 1287 return true; 1288 } 1289 1290 int ufs_qcom_testbus_config(struct ufs_qcom_host *host) 1291 { 1292 int reg; 1293 int offset; 1294 u32 mask = TEST_BUS_SUB_SEL_MASK; 1295 1296 if (!host) 1297 return -EINVAL; 1298 1299 if (!ufs_qcom_testbus_cfg_is_ok(host)) 1300 return -EPERM; 1301 1302 switch (host->testbus.select_major) { 1303 case TSTBUS_UAWM: 1304 reg = UFS_TEST_BUS_CTRL_0; 1305 offset = 24; 1306 break; 1307 case TSTBUS_UARM: 1308 reg = UFS_TEST_BUS_CTRL_0; 1309 offset = 16; 1310 break; 1311 case TSTBUS_TXUC: 1312 reg = UFS_TEST_BUS_CTRL_0; 1313 offset = 8; 1314 break; 1315 case TSTBUS_RXUC: 1316 reg = UFS_TEST_BUS_CTRL_0; 1317 offset = 0; 1318 break; 1319 case TSTBUS_DFC: 1320 reg = UFS_TEST_BUS_CTRL_1; 1321 offset = 24; 1322 break; 1323 case TSTBUS_TRLUT: 1324 reg = UFS_TEST_BUS_CTRL_1; 1325 offset = 16; 1326 break; 1327 case TSTBUS_TMRLUT: 1328 reg = UFS_TEST_BUS_CTRL_1; 1329 offset = 8; 1330 break; 1331 case TSTBUS_OCSC: 1332 reg = UFS_TEST_BUS_CTRL_1; 1333 offset = 0; 1334 break; 1335 case TSTBUS_WRAPPER: 1336 reg = UFS_TEST_BUS_CTRL_2; 1337 offset = 16; 1338 break; 1339 case TSTBUS_COMBINED: 1340 reg = UFS_TEST_BUS_CTRL_2; 1341 offset = 8; 1342 break; 1343 case TSTBUS_UTP_HCI: 1344 reg = UFS_TEST_BUS_CTRL_2; 1345 offset = 0; 1346 break; 1347 case TSTBUS_UNIPRO: 1348 reg = UFS_UNIPRO_CFG; 1349 offset = 20; 1350 mask = 0xFFF; 1351 break; 1352 /* 1353 * No need for a default case, since 1354 * ufs_qcom_testbus_cfg_is_ok() checks that the configuration 1355 * is legal 1356 */ 1357 } 1358 mask <<= offset; 1359 ufshcd_rmwl(host->hba, TEST_BUS_SEL, 1360 (u32)host->testbus.select_major << 19, 1361 REG_UFS_CFG1); 1362 ufshcd_rmwl(host->hba, mask, 1363 (u32)host->testbus.select_minor << offset, 1364 reg); 1365 ufs_qcom_enable_test_bus(host); 1366 /* 1367 * Make sure the test bus configuration is 1368 * committed before returning. 1369 */ 1370 mb(); 1371 1372 return 0; 1373 } 1374 1375 static void ufs_qcom_dump_dbg_regs(struct ufs_hba *hba) 1376 { 1377 ufshcd_dump_regs(hba, REG_UFS_SYS1CLK_1US, 16 * 4, 1378 "HCI Vendor Specific Registers "); 1379 1380 ufs_qcom_print_hw_debug_reg_all(hba, NULL, ufs_qcom_dump_regs_wrapper); 1381 } 1382 1383 /** 1384 * ufs_qcom_device_reset() - toggle the (optional) device reset line 1385 * @hba: per-adapter instance 1386 * 1387 * Toggles the (optional) reset line to reset the attached device. 1388 */ 1389 static int ufs_qcom_device_reset(struct ufs_hba *hba) 1390 { 1391 struct ufs_qcom_host *host = ufshcd_get_variant(hba); 1392 1393 /* reset gpio is optional */ 1394 if (!host->device_reset) 1395 return -EOPNOTSUPP; 1396 1397 /* 1398 * The UFS device shall detect reset pulses of 1us, sleep for 10us to 1399 * be on the safe side. 1400 */ 1401 ufs_qcom_device_reset_ctrl(hba, true); 1402 usleep_range(10, 15); 1403 1404 ufs_qcom_device_reset_ctrl(hba, false); 1405 usleep_range(10, 15); 1406 1407 return 0; 1408 } 1409 1410 #if IS_ENABLED(CONFIG_DEVFREQ_GOV_SIMPLE_ONDEMAND) 1411 static void ufs_qcom_config_scaling_param(struct ufs_hba *hba, 1412 struct devfreq_dev_profile *p, 1413 struct devfreq_simple_ondemand_data *d) 1414 { 1415 p->polling_ms = 60; 1416 d->upthreshold = 70; 1417 d->downdifferential = 5; 1418 } 1419 #else 1420 static void ufs_qcom_config_scaling_param(struct ufs_hba *hba, 1421 struct devfreq_dev_profile *p, 1422 struct devfreq_simple_ondemand_data *data) 1423 { 1424 } 1425 #endif 1426 1427 /* 1428 * struct ufs_hba_qcom_vops - UFS QCOM specific variant operations 1429 * 1430 * The variant operations configure the necessary controller and PHY 1431 * handshake during initialization. 1432 */ 1433 static const struct ufs_hba_variant_ops ufs_hba_qcom_vops = { 1434 .name = "qcom", 1435 .init = ufs_qcom_init, 1436 .exit = ufs_qcom_exit, 1437 .get_ufs_hci_version = ufs_qcom_get_ufs_hci_version, 1438 .clk_scale_notify = ufs_qcom_clk_scale_notify, 1439 .setup_clocks = ufs_qcom_setup_clocks, 1440 .hce_enable_notify = ufs_qcom_hce_enable_notify, 1441 .link_startup_notify = ufs_qcom_link_startup_notify, 1442 .pwr_change_notify = ufs_qcom_pwr_change_notify, 1443 .apply_dev_quirks = ufs_qcom_apply_dev_quirks, 1444 .suspend = ufs_qcom_suspend, 1445 .resume = ufs_qcom_resume, 1446 .dbg_register_dump = ufs_qcom_dump_dbg_regs, 1447 .device_reset = ufs_qcom_device_reset, 1448 .config_scaling_param = ufs_qcom_config_scaling_param, 1449 .program_key = ufs_qcom_ice_program_key, 1450 }; 1451 1452 /** 1453 * ufs_qcom_probe - probe routine of the driver 1454 * @pdev: pointer to Platform device handle 1455 * 1456 * Return zero for success and non-zero for failure 1457 */ 1458 static int ufs_qcom_probe(struct platform_device *pdev) 1459 { 1460 int err; 1461 struct device *dev = &pdev->dev; 1462 1463 /* Perform generic probe */ 1464 err = ufshcd_pltfrm_init(pdev, &ufs_hba_qcom_vops); 1465 if (err) 1466 dev_err(dev, "ufshcd_pltfrm_init() failed %d\n", err); 1467 1468 return err; 1469 } 1470 1471 /** 1472 * ufs_qcom_remove - set driver_data of the device to NULL 1473 * @pdev: pointer to platform device handle 1474 * 1475 * Always returns 0 1476 */ 1477 static int ufs_qcom_remove(struct platform_device *pdev) 1478 { 1479 struct ufs_hba *hba = platform_get_drvdata(pdev); 1480 1481 pm_runtime_get_sync(&(pdev)->dev); 1482 ufshcd_remove(hba); 1483 return 0; 1484 } 1485 1486 static const struct of_device_id ufs_qcom_of_match[] = { 1487 { .compatible = "qcom,ufshc"}, 1488 {}, 1489 }; 1490 MODULE_DEVICE_TABLE(of, ufs_qcom_of_match); 1491 1492 #ifdef CONFIG_ACPI 1493 static const struct acpi_device_id ufs_qcom_acpi_match[] = { 1494 { "QCOM24A5" }, 1495 { }, 1496 }; 1497 MODULE_DEVICE_TABLE(acpi, ufs_qcom_acpi_match); 1498 #endif 1499 1500 static const struct dev_pm_ops ufs_qcom_pm_ops = { 1501 SET_SYSTEM_SLEEP_PM_OPS(ufshcd_system_suspend, ufshcd_system_resume) 1502 SET_RUNTIME_PM_OPS(ufshcd_runtime_suspend, ufshcd_runtime_resume, NULL) 1503 .prepare = ufshcd_suspend_prepare, 1504 .complete = ufshcd_resume_complete, 1505 }; 1506 1507 static struct platform_driver ufs_qcom_pltform = { 1508 .probe = ufs_qcom_probe, 1509 .remove = ufs_qcom_remove, 1510 .shutdown = ufshcd_pltfrm_shutdown, 1511 .driver = { 1512 .name = "ufshcd-qcom", 1513 .pm = &ufs_qcom_pm_ops, 1514 .of_match_table = of_match_ptr(ufs_qcom_of_match), 1515 .acpi_match_table = ACPI_PTR(ufs_qcom_acpi_match), 1516 }, 1517 }; 1518 module_platform_driver(ufs_qcom_pltform); 1519 1520 MODULE_LICENSE("GPL v2"); 1521