1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (c) 2013-2016, Linux Foundation. All rights reserved. 4 */ 5 6 #include <linux/acpi.h> 7 #include <linux/clk.h> 8 #include <linux/cleanup.h> 9 #include <linux/delay.h> 10 #include <linux/devfreq.h> 11 #include <linux/gpio/consumer.h> 12 #include <linux/interconnect.h> 13 #include <linux/module.h> 14 #include <linux/of.h> 15 #include <linux/phy/phy.h> 16 #include <linux/platform_device.h> 17 #include <linux/pm_domain.h> 18 #include <linux/reset-controller.h> 19 #include <linux/time.h> 20 #include <linux/unaligned.h> 21 #include <linux/units.h> 22 23 #include <soc/qcom/ice.h> 24 25 #include <ufs/ufshcd.h> 26 #include <ufs/ufshci.h> 27 #include <ufs/ufs_quirks.h> 28 #include <ufs/unipro.h> 29 #include "ufshcd-pltfrm.h" 30 #include "ufs-qcom.h" 31 32 #define MCQ_QCFGPTR_MASK GENMASK(7, 0) 33 #define MCQ_QCFGPTR_UNIT 0x200 34 #define MCQ_SQATTR_OFFSET(c) \ 35 ((((c) >> 16) & MCQ_QCFGPTR_MASK) * MCQ_QCFGPTR_UNIT) 36 #define MCQ_QCFG_SIZE 0x40 37 38 /* De-emphasis for gear-5 */ 39 #define DEEMPHASIS_3_5_dB 0x04 40 #define NO_DEEMPHASIS 0x0 41 42 #define UFS_ICE_SYNC_RST_SEL BIT(3) 43 #define UFS_ICE_SYNC_RST_SW BIT(4) 44 45 enum { 46 TSTBUS_UAWM, 47 TSTBUS_UARM, 48 TSTBUS_TXUC, 49 TSTBUS_RXUC, 50 TSTBUS_DFC, 51 TSTBUS_TRLUT, 52 TSTBUS_TMRLUT, 53 TSTBUS_OCSC, 54 TSTBUS_UTP_HCI, 55 TSTBUS_COMBINED, 56 TSTBUS_WRAPPER, 57 TSTBUS_UNIPRO, 58 TSTBUS_MAX, 59 }; 60 61 #define QCOM_UFS_MAX_GEAR 5 62 #define QCOM_UFS_MAX_LANE 2 63 64 enum { 65 MODE_MIN, 66 MODE_PWM, 67 MODE_HS_RA, 68 MODE_HS_RB, 69 MODE_MAX, 70 }; 71 72 static const struct __ufs_qcom_bw_table { 73 u32 mem_bw; 74 u32 cfg_bw; 75 } ufs_qcom_bw_table[MODE_MAX + 1][QCOM_UFS_MAX_GEAR + 1][QCOM_UFS_MAX_LANE + 1] = { 76 [MODE_MIN][0][0] = { 0, 0 }, /* Bandwidth values in KB/s */ 77 [MODE_PWM][UFS_PWM_G1][UFS_LANE_1] = { 922, 1000 }, 78 [MODE_PWM][UFS_PWM_G2][UFS_LANE_1] = { 1844, 1000 }, 79 [MODE_PWM][UFS_PWM_G3][UFS_LANE_1] = { 3688, 1000 }, 80 [MODE_PWM][UFS_PWM_G4][UFS_LANE_1] = { 7376, 1000 }, 81 [MODE_PWM][UFS_PWM_G5][UFS_LANE_1] = { 14752, 1000 }, 82 [MODE_PWM][UFS_PWM_G1][UFS_LANE_2] = { 1844, 1000 }, 83 [MODE_PWM][UFS_PWM_G2][UFS_LANE_2] = { 3688, 1000 }, 84 [MODE_PWM][UFS_PWM_G3][UFS_LANE_2] = { 7376, 1000 }, 85 [MODE_PWM][UFS_PWM_G4][UFS_LANE_2] = { 14752, 1000 }, 86 [MODE_PWM][UFS_PWM_G5][UFS_LANE_2] = { 29504, 1000 }, 87 [MODE_HS_RA][UFS_HS_G1][UFS_LANE_1] = { 127796, 1000 }, 88 [MODE_HS_RA][UFS_HS_G2][UFS_LANE_1] = { 255591, 1000 }, 89 [MODE_HS_RA][UFS_HS_G3][UFS_LANE_1] = { 1492582, 102400 }, 90 [MODE_HS_RA][UFS_HS_G4][UFS_LANE_1] = { 2915200, 204800 }, 91 [MODE_HS_RA][UFS_HS_G5][UFS_LANE_1] = { 5836800, 409600 }, 92 [MODE_HS_RA][UFS_HS_G1][UFS_LANE_2] = { 255591, 1000 }, 93 [MODE_HS_RA][UFS_HS_G2][UFS_LANE_2] = { 511181, 1000 }, 94 [MODE_HS_RA][UFS_HS_G3][UFS_LANE_2] = { 1492582, 204800 }, 95 [MODE_HS_RA][UFS_HS_G4][UFS_LANE_2] = { 2915200, 409600 }, 96 [MODE_HS_RA][UFS_HS_G5][UFS_LANE_2] = { 5836800, 819200 }, 97 [MODE_HS_RB][UFS_HS_G1][UFS_LANE_1] = { 149422, 1000 }, 98 [MODE_HS_RB][UFS_HS_G2][UFS_LANE_1] = { 298189, 1000 }, 99 [MODE_HS_RB][UFS_HS_G3][UFS_LANE_1] = { 1492582, 102400 }, 100 [MODE_HS_RB][UFS_HS_G4][UFS_LANE_1] = { 2915200, 204800 }, 101 [MODE_HS_RB][UFS_HS_G5][UFS_LANE_1] = { 5836800, 409600 }, 102 [MODE_HS_RB][UFS_HS_G1][UFS_LANE_2] = { 298189, 1000 }, 103 [MODE_HS_RB][UFS_HS_G2][UFS_LANE_2] = { 596378, 1000 }, 104 [MODE_HS_RB][UFS_HS_G3][UFS_LANE_2] = { 1492582, 204800 }, 105 [MODE_HS_RB][UFS_HS_G4][UFS_LANE_2] = { 2915200, 409600 }, 106 [MODE_HS_RB][UFS_HS_G5][UFS_LANE_2] = { 5836800, 819200 }, 107 [MODE_MAX][0][0] = { 7643136, 819200 }, 108 }; 109 110 static const struct { 111 int nminor; 112 char *prefix; 113 } testbus_info[TSTBUS_MAX] = { 114 [TSTBUS_UAWM] = {32, "TSTBUS_UAWM"}, 115 [TSTBUS_UARM] = {32, "TSTBUS_UARM"}, 116 [TSTBUS_TXUC] = {32, "TSTBUS_TXUC"}, 117 [TSTBUS_RXUC] = {32, "TSTBUS_RXUC"}, 118 [TSTBUS_DFC] = {32, "TSTBUS_DFC"}, 119 [TSTBUS_TRLUT] = {32, "TSTBUS_TRLUT"}, 120 [TSTBUS_TMRLUT] = {32, "TSTBUS_TMRLUT"}, 121 [TSTBUS_OCSC] = {32, "TSTBUS_OCSC"}, 122 [TSTBUS_UTP_HCI] = {32, "TSTBUS_UTP_HCI"}, 123 [TSTBUS_COMBINED] = {32, "TSTBUS_COMBINED"}, 124 [TSTBUS_WRAPPER] = {32, "TSTBUS_WRAPPER"}, 125 [TSTBUS_UNIPRO] = {256, "TSTBUS_UNIPRO"}, 126 }; 127 128 static void ufs_qcom_get_default_testbus_cfg(struct ufs_qcom_host *host); 129 static unsigned long ufs_qcom_opp_freq_to_clk_freq(struct ufs_hba *hba, 130 unsigned long freq, char *name); 131 static int ufs_qcom_set_core_clk_ctrl(struct ufs_hba *hba, bool is_scale_up, unsigned long freq); 132 133 static struct ufs_qcom_host *rcdev_to_ufs_host(struct reset_controller_dev *rcd) 134 { 135 return container_of(rcd, struct ufs_qcom_host, rcdev); 136 } 137 138 #ifdef CONFIG_SCSI_UFS_CRYPTO 139 /** 140 * ufs_qcom_config_ice_allocator() - ICE core allocator configuration 141 * 142 * @host: pointer to qcom specific variant structure. 143 */ 144 static void ufs_qcom_config_ice_allocator(struct ufs_qcom_host *host) 145 { 146 struct ufs_hba *hba = host->hba; 147 static const uint8_t val[4] = { NUM_RX_R1W0, NUM_TX_R0W1, NUM_RX_R1W1, NUM_TX_R1W1 }; 148 u32 config; 149 150 if (!(host->caps & UFS_QCOM_CAP_ICE_CONFIG) || 151 !(host->hba->caps & UFSHCD_CAP_CRYPTO)) 152 return; 153 154 config = get_unaligned_le32(val); 155 156 ufshcd_writel(hba, ICE_ALLOCATOR_TYPE, REG_UFS_MEM_ICE_CONFIG); 157 ufshcd_writel(hba, config, REG_UFS_MEM_ICE_NUM_CORE); 158 } 159 160 static inline void ufs_qcom_ice_enable(struct ufs_qcom_host *host) 161 { 162 if (host->hba->caps & UFSHCD_CAP_CRYPTO) 163 qcom_ice_enable(host->ice); 164 } 165 166 static const struct blk_crypto_ll_ops ufs_qcom_crypto_ops; /* forward decl */ 167 168 static int ufs_qcom_ice_init(struct ufs_qcom_host *host) 169 { 170 struct ufs_hba *hba = host->hba; 171 struct blk_crypto_profile *profile = &hba->crypto_profile; 172 struct device *dev = hba->dev; 173 struct qcom_ice *ice; 174 union ufs_crypto_capabilities caps; 175 union ufs_crypto_cap_entry cap; 176 int err; 177 int i; 178 179 ice = devm_of_qcom_ice_get(dev); 180 if (ice == ERR_PTR(-EOPNOTSUPP)) { 181 dev_warn(dev, "Disabling inline encryption support\n"); 182 ice = NULL; 183 } 184 185 if (IS_ERR_OR_NULL(ice)) 186 return PTR_ERR_OR_ZERO(ice); 187 188 host->ice = ice; 189 190 /* Initialize the blk_crypto_profile */ 191 192 caps.reg_val = cpu_to_le32(ufshcd_readl(hba, REG_UFS_CCAP)); 193 194 /* The number of keyslots supported is (CFGC+1) */ 195 err = devm_blk_crypto_profile_init(dev, profile, caps.config_count + 1); 196 if (err) 197 return err; 198 199 profile->ll_ops = ufs_qcom_crypto_ops; 200 profile->max_dun_bytes_supported = 8; 201 profile->key_types_supported = qcom_ice_get_supported_key_type(ice); 202 profile->dev = dev; 203 204 /* 205 * Currently this driver only supports AES-256-XTS. All known versions 206 * of ICE support it, but to be safe make sure it is really declared in 207 * the crypto capability registers. The crypto capability registers 208 * also give the supported data unit size(s). 209 */ 210 for (i = 0; i < caps.num_crypto_cap; i++) { 211 cap.reg_val = cpu_to_le32(ufshcd_readl(hba, 212 REG_UFS_CRYPTOCAP + 213 i * sizeof(__le32))); 214 if (cap.algorithm_id == UFS_CRYPTO_ALG_AES_XTS && 215 cap.key_size == UFS_CRYPTO_KEY_SIZE_256) 216 profile->modes_supported[BLK_ENCRYPTION_MODE_AES_256_XTS] |= 217 cap.sdus_mask * 512; 218 } 219 220 hba->caps |= UFSHCD_CAP_CRYPTO; 221 hba->quirks |= UFSHCD_QUIRK_CUSTOM_CRYPTO_PROFILE; 222 return 0; 223 } 224 225 static inline int ufs_qcom_ice_resume(struct ufs_qcom_host *host) 226 { 227 if (host->hba->caps & UFSHCD_CAP_CRYPTO) 228 return qcom_ice_resume(host->ice); 229 230 return 0; 231 } 232 233 static inline int ufs_qcom_ice_suspend(struct ufs_qcom_host *host) 234 { 235 if (host->hba->caps & UFSHCD_CAP_CRYPTO) 236 return qcom_ice_suspend(host->ice); 237 238 return 0; 239 } 240 241 static int ufs_qcom_ice_keyslot_program(struct blk_crypto_profile *profile, 242 const struct blk_crypto_key *key, 243 unsigned int slot) 244 { 245 struct ufs_hba *hba = ufs_hba_from_crypto_profile(profile); 246 struct ufs_qcom_host *host = ufshcd_get_variant(hba); 247 int err; 248 249 ufshcd_hold(hba); 250 err = qcom_ice_program_key(host->ice, slot, key); 251 ufshcd_release(hba); 252 return err; 253 } 254 255 static int ufs_qcom_ice_keyslot_evict(struct blk_crypto_profile *profile, 256 const struct blk_crypto_key *key, 257 unsigned int slot) 258 { 259 struct ufs_hba *hba = ufs_hba_from_crypto_profile(profile); 260 struct ufs_qcom_host *host = ufshcd_get_variant(hba); 261 int err; 262 263 ufshcd_hold(hba); 264 err = qcom_ice_evict_key(host->ice, slot); 265 ufshcd_release(hba); 266 return err; 267 } 268 269 static int ufs_qcom_ice_derive_sw_secret(struct blk_crypto_profile *profile, 270 const u8 *eph_key, size_t eph_key_size, 271 u8 sw_secret[BLK_CRYPTO_SW_SECRET_SIZE]) 272 { 273 struct ufs_hba *hba = ufs_hba_from_crypto_profile(profile); 274 struct ufs_qcom_host *host = ufshcd_get_variant(hba); 275 276 return qcom_ice_derive_sw_secret(host->ice, eph_key, eph_key_size, 277 sw_secret); 278 } 279 280 static int ufs_qcom_ice_import_key(struct blk_crypto_profile *profile, 281 const u8 *raw_key, size_t raw_key_size, 282 u8 lt_key[BLK_CRYPTO_MAX_HW_WRAPPED_KEY_SIZE]) 283 { 284 struct ufs_hba *hba = ufs_hba_from_crypto_profile(profile); 285 struct ufs_qcom_host *host = ufshcd_get_variant(hba); 286 287 return qcom_ice_import_key(host->ice, raw_key, raw_key_size, lt_key); 288 } 289 290 static int ufs_qcom_ice_generate_key(struct blk_crypto_profile *profile, 291 u8 lt_key[BLK_CRYPTO_MAX_HW_WRAPPED_KEY_SIZE]) 292 { 293 struct ufs_hba *hba = ufs_hba_from_crypto_profile(profile); 294 struct ufs_qcom_host *host = ufshcd_get_variant(hba); 295 296 return qcom_ice_generate_key(host->ice, lt_key); 297 } 298 299 static int ufs_qcom_ice_prepare_key(struct blk_crypto_profile *profile, 300 const u8 *lt_key, size_t lt_key_size, 301 u8 eph_key[BLK_CRYPTO_MAX_HW_WRAPPED_KEY_SIZE]) 302 { 303 struct ufs_hba *hba = ufs_hba_from_crypto_profile(profile); 304 struct ufs_qcom_host *host = ufshcd_get_variant(hba); 305 306 return qcom_ice_prepare_key(host->ice, lt_key, lt_key_size, eph_key); 307 } 308 309 static const struct blk_crypto_ll_ops ufs_qcom_crypto_ops = { 310 .keyslot_program = ufs_qcom_ice_keyslot_program, 311 .keyslot_evict = ufs_qcom_ice_keyslot_evict, 312 .derive_sw_secret = ufs_qcom_ice_derive_sw_secret, 313 .import_key = ufs_qcom_ice_import_key, 314 .generate_key = ufs_qcom_ice_generate_key, 315 .prepare_key = ufs_qcom_ice_prepare_key, 316 }; 317 318 #else 319 320 static inline void ufs_qcom_ice_enable(struct ufs_qcom_host *host) 321 { 322 } 323 324 static int ufs_qcom_ice_init(struct ufs_qcom_host *host) 325 { 326 return 0; 327 } 328 329 static inline int ufs_qcom_ice_resume(struct ufs_qcom_host *host) 330 { 331 return 0; 332 } 333 334 static inline int ufs_qcom_ice_suspend(struct ufs_qcom_host *host) 335 { 336 return 0; 337 } 338 339 static void ufs_qcom_config_ice_allocator(struct ufs_qcom_host *host) 340 { 341 } 342 343 #endif 344 345 static void ufs_qcom_disable_lane_clks(struct ufs_qcom_host *host) 346 { 347 if (!host->is_lane_clks_enabled) 348 return; 349 350 clk_bulk_disable_unprepare(host->num_clks, host->clks); 351 352 host->is_lane_clks_enabled = false; 353 } 354 355 static int ufs_qcom_enable_lane_clks(struct ufs_qcom_host *host) 356 { 357 int err; 358 359 err = clk_bulk_prepare_enable(host->num_clks, host->clks); 360 if (err) 361 return err; 362 363 host->is_lane_clks_enabled = true; 364 365 return 0; 366 } 367 368 static int ufs_qcom_init_lane_clks(struct ufs_qcom_host *host) 369 { 370 int err; 371 struct device *dev = host->hba->dev; 372 373 if (has_acpi_companion(dev)) 374 return 0; 375 376 err = devm_clk_bulk_get_all(dev, &host->clks); 377 if (err <= 0) 378 return err; 379 380 host->num_clks = err; 381 382 return 0; 383 } 384 385 static int ufs_qcom_check_hibern8(struct ufs_hba *hba) 386 { 387 int err; 388 u32 tx_fsm_val; 389 unsigned long timeout = jiffies + msecs_to_jiffies(HBRN8_POLL_TOUT_MS); 390 391 do { 392 err = ufshcd_dme_get(hba, 393 UIC_ARG_MIB_SEL(MPHY_TX_FSM_STATE, 394 UIC_ARG_MPHY_TX_GEN_SEL_INDEX(0)), 395 &tx_fsm_val); 396 if (err || tx_fsm_val == TX_FSM_HIBERN8) 397 break; 398 399 /* sleep for max. 200us */ 400 usleep_range(100, 200); 401 } while (time_before(jiffies, timeout)); 402 403 /* 404 * we might have scheduled out for long during polling so 405 * check the state again. 406 */ 407 if (time_after(jiffies, timeout)) 408 err = ufshcd_dme_get(hba, 409 UIC_ARG_MIB_SEL(MPHY_TX_FSM_STATE, 410 UIC_ARG_MPHY_TX_GEN_SEL_INDEX(0)), 411 &tx_fsm_val); 412 413 if (err) { 414 dev_err(hba->dev, "%s: unable to get TX_FSM_STATE, err %d\n", 415 __func__, err); 416 } else if (tx_fsm_val != TX_FSM_HIBERN8) { 417 err = tx_fsm_val; 418 dev_err(hba->dev, "%s: invalid TX_FSM_STATE = %d\n", 419 __func__, err); 420 } 421 422 return err; 423 } 424 425 static void ufs_qcom_select_unipro_mode(struct ufs_qcom_host *host) 426 { 427 ufshcd_rmwl(host->hba, QUNIPRO_SEL, QUNIPRO_SEL, REG_UFS_CFG1); 428 429 if (host->hw_ver.major >= 0x05) 430 ufshcd_rmwl(host->hba, QUNIPRO_G4_SEL, 0, REG_UFS_CFG0); 431 } 432 433 /* 434 * ufs_qcom_host_reset - reset host controller and PHY 435 */ 436 static int ufs_qcom_host_reset(struct ufs_hba *hba) 437 { 438 int ret; 439 struct ufs_qcom_host *host = ufshcd_get_variant(hba); 440 bool reenable_intr; 441 442 if (!host->core_reset) 443 return 0; 444 445 reenable_intr = hba->is_irq_enabled; 446 ufshcd_disable_irq(hba); 447 448 ret = reset_control_assert(host->core_reset); 449 if (ret) { 450 dev_err(hba->dev, "%s: core_reset assert failed, err = %d\n", 451 __func__, ret); 452 return ret; 453 } 454 455 /* 456 * The hardware requirement for delay between assert/deassert 457 * is at least 3-4 sleep clock (32.7KHz) cycles, which comes to 458 * ~125us (4/32768). To be on the safe side add 200us delay. 459 */ 460 usleep_range(200, 210); 461 462 ret = reset_control_deassert(host->core_reset); 463 if (ret) { 464 dev_err(hba->dev, "%s: core_reset deassert failed, err = %d\n", 465 __func__, ret); 466 return ret; 467 } 468 469 usleep_range(1000, 1100); 470 471 if (reenable_intr) 472 ufshcd_enable_irq(hba); 473 474 return 0; 475 } 476 477 static u32 ufs_qcom_get_hs_gear(struct ufs_hba *hba) 478 { 479 struct ufs_qcom_host *host = ufshcd_get_variant(hba); 480 481 if (host->hw_ver.major >= 0x4) 482 return UFS_QCOM_MAX_GEAR(ufshcd_readl(hba, REG_UFS_PARAM0)); 483 484 /* Default is HS-G3 */ 485 return UFS_HS_G3; 486 } 487 488 static int ufs_qcom_power_up_sequence(struct ufs_hba *hba) 489 { 490 struct ufs_qcom_host *host = ufshcd_get_variant(hba); 491 struct ufs_host_params *host_params = &host->host_params; 492 struct phy *phy = host->generic_phy; 493 enum phy_mode mode; 494 int ret; 495 496 /* 497 * HW ver 5 can only support up to HS-G5 Rate-A due to HW limitations. 498 * If the HS-G5 PHY gear is used, update host_params->hs_rate to Rate-A, 499 * so that the subsequent power mode change shall stick to Rate-A. 500 */ 501 if (host->hw_ver.major == 0x5 && host->phy_gear == UFS_HS_G5) 502 host_params->hs_rate = PA_HS_MODE_A; 503 504 mode = host_params->hs_rate == PA_HS_MODE_B ? PHY_MODE_UFS_HS_B : PHY_MODE_UFS_HS_A; 505 506 /* Reset UFS Host Controller and PHY */ 507 ret = ufs_qcom_host_reset(hba); 508 if (ret) 509 return ret; 510 511 if (phy->power_count) 512 phy_power_off(phy); 513 514 515 /* phy initialization - calibrate the phy */ 516 ret = phy_init(phy); 517 if (ret) { 518 dev_err(hba->dev, "%s: phy init failed, ret = %d\n", 519 __func__, ret); 520 return ret; 521 } 522 523 ret = phy_set_mode_ext(phy, mode, host->phy_gear); 524 if (ret) 525 goto out_disable_phy; 526 527 /* power on phy - start serdes and phy's power and clocks */ 528 ret = phy_power_on(phy); 529 if (ret) { 530 dev_err(hba->dev, "%s: phy power on failed, ret = %d\n", 531 __func__, ret); 532 goto out_disable_phy; 533 } 534 535 ret = phy_calibrate(phy); 536 if (ret) { 537 dev_err(hba->dev, "Failed to calibrate PHY: %d\n", ret); 538 goto out_disable_phy; 539 } 540 541 ufs_qcom_select_unipro_mode(host); 542 543 return 0; 544 545 out_disable_phy: 546 phy_exit(phy); 547 548 return ret; 549 } 550 551 /* 552 * The UTP controller has a number of internal clock gating cells (CGCs). 553 * Internal hardware sub-modules within the UTP controller control the CGCs. 554 * Hardware CGCs disable the clock to inactivate UTP sub-modules not involved 555 * in a specific operation, UTP controller CGCs are by default disabled and 556 * this function enables them (after every UFS link startup) to save some power 557 * leakage. 558 */ 559 static void ufs_qcom_enable_hw_clk_gating(struct ufs_hba *hba) 560 { 561 int err; 562 563 /* Enable UTP internal clock gating */ 564 ufshcd_rmwl(hba, REG_UFS_CFG2_CGC_EN_ALL, REG_UFS_CFG2_CGC_EN_ALL, 565 REG_UFS_CFG2); 566 567 /* Ensure that HW clock gating is enabled before next operations */ 568 ufshcd_readl(hba, REG_UFS_CFG2); 569 570 /* Enable Unipro internal clock gating */ 571 err = ufshcd_dme_rmw(hba, DL_VS_CLK_CFG_MASK, 572 DL_VS_CLK_CFG_MASK, DL_VS_CLK_CFG); 573 if (err) 574 goto out; 575 576 err = ufshcd_dme_rmw(hba, PA_VS_CLK_CFG_REG_MASK, 577 PA_VS_CLK_CFG_REG_MASK, PA_VS_CLK_CFG_REG); 578 if (err) 579 goto out; 580 581 err = ufshcd_dme_rmw(hba, DME_VS_CORE_CLK_CTRL_DME_HW_CGC_EN, 582 DME_VS_CORE_CLK_CTRL_DME_HW_CGC_EN, 583 DME_VS_CORE_CLK_CTRL); 584 out: 585 if (err) 586 dev_err(hba->dev, "hw clk gating enabled failed\n"); 587 } 588 589 static int ufs_qcom_hce_enable_notify(struct ufs_hba *hba, 590 enum ufs_notify_change_status status) 591 { 592 struct ufs_qcom_host *host = ufshcd_get_variant(hba); 593 int err; 594 595 switch (status) { 596 case PRE_CHANGE: 597 err = ufs_qcom_power_up_sequence(hba); 598 if (err) 599 return err; 600 601 /* 602 * The PHY PLL output is the source of tx/rx lane symbol 603 * clocks, hence, enable the lane clocks only after PHY 604 * is initialized. 605 */ 606 err = ufs_qcom_enable_lane_clks(host); 607 break; 608 case POST_CHANGE: 609 /* check if UFS PHY moved from DISABLED to HIBERN8 */ 610 err = ufs_qcom_check_hibern8(hba); 611 ufs_qcom_enable_hw_clk_gating(hba); 612 ufs_qcom_ice_enable(host); 613 ufs_qcom_config_ice_allocator(host); 614 break; 615 default: 616 dev_err(hba->dev, "%s: invalid status %d\n", __func__, status); 617 err = -EINVAL; 618 break; 619 } 620 return err; 621 } 622 623 static int ufs_qcom_fw_managed_hce_enable_notify(struct ufs_hba *hba, 624 enum ufs_notify_change_status status) 625 { 626 struct ufs_qcom_host *host = ufshcd_get_variant(hba); 627 628 switch (status) { 629 case PRE_CHANGE: 630 ufs_qcom_select_unipro_mode(host); 631 break; 632 case POST_CHANGE: 633 ufs_qcom_enable_hw_clk_gating(hba); 634 ufs_qcom_ice_enable(host); 635 break; 636 default: 637 dev_err(hba->dev, "Invalid status %d\n", status); 638 return -EINVAL; 639 } 640 641 return 0; 642 } 643 644 /** 645 * ufs_qcom_cfg_timers - Configure ufs qcom cfg timers 646 * 647 * @hba: host controller instance 648 * @is_pre_scale_up: flag to check if pre scale up condition. 649 * @freq: target opp freq 650 * Return: zero for success and non-zero in case of a failure. 651 */ 652 static int ufs_qcom_cfg_timers(struct ufs_hba *hba, bool is_pre_scale_up, unsigned long freq) 653 { 654 struct ufs_qcom_host *host = ufshcd_get_variant(hba); 655 struct ufs_clk_info *clki; 656 unsigned long clk_freq = 0; 657 u32 core_clk_cycles_per_us; 658 659 /* 660 * UTP controller uses SYS1CLK_1US_REG register for Interrupt 661 * Aggregation logic. 662 * It is mandatory to write SYS1CLK_1US_REG register on UFS host 663 * controller V4.0.0 onwards. 664 */ 665 if (host->hw_ver.major < 4 && !ufshcd_is_intr_aggr_allowed(hba)) 666 return 0; 667 668 if (hba->use_pm_opp && freq != ULONG_MAX) { 669 clk_freq = ufs_qcom_opp_freq_to_clk_freq(hba, freq, "core_clk"); 670 if (clk_freq) 671 goto cfg_timers; 672 } 673 674 list_for_each_entry(clki, &hba->clk_list_head, list) { 675 if (!strcmp(clki->name, "core_clk")) { 676 if (freq == ULONG_MAX) { 677 clk_freq = clki->max_freq; 678 break; 679 } 680 681 if (is_pre_scale_up) 682 clk_freq = clki->max_freq; 683 else 684 clk_freq = clk_get_rate(clki->clk); 685 break; 686 } 687 688 } 689 690 cfg_timers: 691 /* If frequency is smaller than 1MHz, set to 1MHz */ 692 if (clk_freq < DEFAULT_CLK_RATE_HZ) 693 clk_freq = DEFAULT_CLK_RATE_HZ; 694 695 core_clk_cycles_per_us = clk_freq / USEC_PER_SEC; 696 if (ufshcd_readl(hba, REG_UFS_SYS1CLK_1US) != core_clk_cycles_per_us) { 697 ufshcd_writel(hba, core_clk_cycles_per_us, REG_UFS_SYS1CLK_1US); 698 /* 699 * make sure above write gets applied before we return from 700 * this function. 701 */ 702 ufshcd_readl(hba, REG_UFS_SYS1CLK_1US); 703 } 704 705 return 0; 706 } 707 708 static int ufs_qcom_link_startup_notify(struct ufs_hba *hba, 709 enum ufs_notify_change_status status) 710 { 711 int err = 0; 712 713 switch (status) { 714 case PRE_CHANGE: 715 if (ufs_qcom_cfg_timers(hba, false, ULONG_MAX)) { 716 dev_err(hba->dev, "%s: ufs_qcom_cfg_timers() failed\n", 717 __func__); 718 return -EINVAL; 719 } 720 721 err = ufs_qcom_set_core_clk_ctrl(hba, true, ULONG_MAX); 722 if (err) 723 dev_err(hba->dev, "cfg core clk ctrl failed\n"); 724 /* 725 * Some UFS devices (and may be host) have issues if LCC is 726 * enabled. So we are setting PA_Local_TX_LCC_Enable to 0 727 * before link startup which will make sure that both host 728 * and device TX LCC are disabled once link startup is 729 * completed. 730 */ 731 err = ufshcd_disable_host_tx_lcc(hba); 732 733 break; 734 default: 735 break; 736 } 737 738 return err; 739 } 740 741 static void ufs_qcom_device_reset_ctrl(struct ufs_hba *hba, bool asserted) 742 { 743 struct ufs_qcom_host *host = ufshcd_get_variant(hba); 744 745 /* reset gpio is optional */ 746 if (!host->device_reset) 747 return; 748 749 gpiod_set_value_cansleep(host->device_reset, asserted); 750 } 751 752 static int ufs_qcom_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op, 753 enum ufs_notify_change_status status) 754 { 755 struct ufs_qcom_host *host = ufshcd_get_variant(hba); 756 757 if (status == PRE_CHANGE) 758 return 0; 759 760 if (!ufs_qcom_is_link_active(hba)) 761 ufs_qcom_disable_lane_clks(host); 762 763 764 /* reset the connected UFS device during power down */ 765 if (ufs_qcom_is_link_off(hba) && host->device_reset) { 766 ufs_qcom_device_reset_ctrl(hba, true); 767 /* 768 * After sending the SSU command, asserting the rst_n 769 * line causes the device firmware to wake up and 770 * execute its reset routine. 771 * 772 * During this process, the device may draw current 773 * beyond the permissible limit for low-power mode (LPM). 774 * A 10ms delay, based on experimental observations, 775 * allows the UFS device to complete its hardware reset 776 * before transitioning the power rail to LPM. 777 */ 778 usleep_range(10000, 11000); 779 } 780 781 return ufs_qcom_ice_suspend(host); 782 } 783 784 static int ufs_qcom_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op) 785 { 786 struct ufs_qcom_host *host = ufshcd_get_variant(hba); 787 int err; 788 u32 reg_val; 789 790 err = ufs_qcom_enable_lane_clks(host); 791 if (err) 792 return err; 793 794 if ((!ufs_qcom_is_link_active(hba)) && 795 host->hw_ver.major == 5 && 796 host->hw_ver.minor == 0 && 797 host->hw_ver.step == 0) { 798 ufshcd_writel(hba, UFS_ICE_SYNC_RST_SEL | UFS_ICE_SYNC_RST_SW, UFS_MEM_ICE_CFG); 799 reg_val = ufshcd_readl(hba, UFS_MEM_ICE_CFG); 800 reg_val &= ~(UFS_ICE_SYNC_RST_SEL | UFS_ICE_SYNC_RST_SW); 801 /* 802 * HW documentation doesn't recommend any delay between the 803 * reset set and clear. But we are enforcing an arbitrary delay 804 * to give flops enough time to settle in. 805 */ 806 usleep_range(50, 100); 807 ufshcd_writel(hba, reg_val, UFS_MEM_ICE_CFG); 808 ufshcd_readl(hba, UFS_MEM_ICE_CFG); 809 } 810 811 return ufs_qcom_ice_resume(host); 812 } 813 814 static int ufs_qcom_fw_managed_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op, 815 enum ufs_notify_change_status status) 816 { 817 struct ufs_qcom_host *host = ufshcd_get_variant(hba); 818 819 if (status == PRE_CHANGE) 820 return 0; 821 822 pm_runtime_put_sync(hba->dev); 823 824 return ufs_qcom_ice_suspend(host); 825 } 826 827 static int ufs_qcom_fw_managed_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op) 828 { 829 struct ufs_qcom_host *host = ufshcd_get_variant(hba); 830 int err; 831 832 err = pm_runtime_resume_and_get(hba->dev); 833 if (err) { 834 dev_err(hba->dev, "PM runtime resume failed: %d\n", err); 835 return err; 836 } 837 838 return ufs_qcom_ice_resume(host); 839 } 840 841 static void ufs_qcom_dev_ref_clk_ctrl(struct ufs_qcom_host *host, bool enable) 842 { 843 if (host->dev_ref_clk_ctrl_mmio && 844 (enable ^ host->is_dev_ref_clk_enabled)) { 845 u32 temp = readl_relaxed(host->dev_ref_clk_ctrl_mmio); 846 847 if (enable) 848 temp |= host->dev_ref_clk_en_mask; 849 else 850 temp &= ~host->dev_ref_clk_en_mask; 851 852 /* 853 * If we are here to disable this clock it might be immediately 854 * after entering into hibern8 in which case we need to make 855 * sure that device ref_clk is active for specific time after 856 * hibern8 enter. 857 */ 858 if (!enable) { 859 unsigned long gating_wait; 860 861 gating_wait = host->hba->dev_info.clk_gating_wait_us; 862 if (!gating_wait) { 863 udelay(1); 864 } else { 865 /* 866 * bRefClkGatingWaitTime defines the minimum 867 * time for which the reference clock is 868 * required by device during transition from 869 * HS-MODE to LS-MODE or HIBERN8 state. Give it 870 * more delay to be on the safe side. 871 */ 872 gating_wait += 10; 873 usleep_range(gating_wait, gating_wait + 10); 874 } 875 } 876 877 writel_relaxed(temp, host->dev_ref_clk_ctrl_mmio); 878 879 /* 880 * Make sure the write to ref_clk reaches the destination and 881 * not stored in a Write Buffer (WB). 882 */ 883 readl(host->dev_ref_clk_ctrl_mmio); 884 885 /* 886 * If we call hibern8 exit after this, we need to make sure that 887 * device ref_clk is stable for at least 1us before the hibern8 888 * exit command. 889 */ 890 if (enable) 891 udelay(1); 892 893 host->is_dev_ref_clk_enabled = enable; 894 } 895 } 896 897 static int ufs_qcom_icc_set_bw(struct ufs_qcom_host *host, u32 mem_bw, u32 cfg_bw) 898 { 899 struct device *dev = host->hba->dev; 900 int ret; 901 902 ret = icc_set_bw(host->icc_ddr, 0, mem_bw); 903 if (ret < 0) { 904 dev_err(dev, "failed to set bandwidth request: %d\n", ret); 905 return ret; 906 } 907 908 ret = icc_set_bw(host->icc_cpu, 0, cfg_bw); 909 if (ret < 0) { 910 dev_err(dev, "failed to set bandwidth request: %d\n", ret); 911 return ret; 912 } 913 914 return 0; 915 } 916 917 static struct __ufs_qcom_bw_table ufs_qcom_get_bw_table(struct ufs_qcom_host *host) 918 { 919 struct ufs_pa_layer_attr *p = &host->dev_req_params; 920 int gear = max_t(u32, p->gear_rx, p->gear_tx); 921 int lane = max_t(u32, p->lane_rx, p->lane_tx); 922 923 if (WARN_ONCE(gear > QCOM_UFS_MAX_GEAR, 924 "ICC scaling for UFS Gear (%d) not supported. Using Gear (%d) bandwidth\n", 925 gear, QCOM_UFS_MAX_GEAR)) 926 gear = QCOM_UFS_MAX_GEAR; 927 928 if (WARN_ONCE(lane > QCOM_UFS_MAX_LANE, 929 "ICC scaling for UFS Lane (%d) not supported. Using Lane (%d) bandwidth\n", 930 lane, QCOM_UFS_MAX_LANE)) 931 lane = QCOM_UFS_MAX_LANE; 932 933 if (ufshcd_is_hs_mode(p)) { 934 if (p->hs_rate == PA_HS_MODE_B) 935 return ufs_qcom_bw_table[MODE_HS_RB][gear][lane]; 936 else 937 return ufs_qcom_bw_table[MODE_HS_RA][gear][lane]; 938 } else { 939 return ufs_qcom_bw_table[MODE_PWM][gear][lane]; 940 } 941 } 942 943 static int ufs_qcom_icc_update_bw(struct ufs_qcom_host *host) 944 { 945 struct __ufs_qcom_bw_table bw_table; 946 947 bw_table = ufs_qcom_get_bw_table(host); 948 949 return ufs_qcom_icc_set_bw(host, bw_table.mem_bw, bw_table.cfg_bw); 950 } 951 952 static void ufs_qcom_set_tx_hs_equalizer(struct ufs_hba *hba, u32 gear, u32 tx_lanes) 953 { 954 u32 equalizer_val; 955 int ret, i; 956 957 /* Determine the equalizer value based on the gear */ 958 equalizer_val = (gear == 5) ? DEEMPHASIS_3_5_dB : NO_DEEMPHASIS; 959 960 for (i = 0; i < tx_lanes; i++) { 961 ret = ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(TX_HS_EQUALIZER, i), 962 equalizer_val); 963 if (ret) 964 dev_err(hba->dev, "%s: failed equalizer lane %d\n", 965 __func__, i); 966 } 967 } 968 969 static int ufs_qcom_pwr_change_notify(struct ufs_hba *hba, 970 enum ufs_notify_change_status status, 971 const struct ufs_pa_layer_attr *dev_max_params, 972 struct ufs_pa_layer_attr *dev_req_params) 973 { 974 struct ufs_qcom_host *host = ufshcd_get_variant(hba); 975 struct ufs_host_params *host_params = &host->host_params; 976 int ret = 0; 977 978 if (!dev_req_params) { 979 pr_err("%s: incoming dev_req_params is NULL\n", __func__); 980 return -EINVAL; 981 } 982 983 switch (status) { 984 case PRE_CHANGE: 985 ret = ufshcd_negotiate_pwr_params(host_params, dev_max_params, dev_req_params); 986 if (ret) { 987 dev_err(hba->dev, "%s: failed to determine capabilities\n", 988 __func__); 989 return ret; 990 } 991 992 /* 993 * During UFS driver probe, always update the PHY gear to match the negotiated 994 * gear, so that, if quirk UFSHCD_QUIRK_REINIT_AFTER_MAX_GEAR_SWITCH is enabled, 995 * the second init can program the optimal PHY settings. This allows one to start 996 * the first init with either the minimum or the maximum support gear. 997 */ 998 if (hba->ufshcd_state == UFSHCD_STATE_RESET) { 999 /* 1000 * Skip REINIT if the negotiated gear matches with the 1001 * initial phy_gear. Otherwise, update the phy_gear to 1002 * program the optimal gear setting during REINIT. 1003 */ 1004 if (host->phy_gear == dev_req_params->gear_tx) 1005 hba->quirks &= ~UFSHCD_QUIRK_REINIT_AFTER_MAX_GEAR_SWITCH; 1006 else 1007 host->phy_gear = dev_req_params->gear_tx; 1008 } 1009 1010 /* enable the device ref clock before changing to HS mode */ 1011 if (!ufshcd_is_hs_mode(&hba->pwr_info) && 1012 ufshcd_is_hs_mode(dev_req_params)) 1013 ufs_qcom_dev_ref_clk_ctrl(host, true); 1014 1015 if (host->hw_ver.major >= 0x4) { 1016 ufshcd_dme_configure_adapt(hba, 1017 dev_req_params->gear_tx, 1018 PA_INITIAL_ADAPT); 1019 } 1020 1021 if (hba->dev_quirks & UFS_DEVICE_QUIRK_PA_TX_DEEMPHASIS_TUNING) 1022 ufs_qcom_set_tx_hs_equalizer(hba, 1023 dev_req_params->gear_tx, dev_req_params->lane_tx); 1024 1025 break; 1026 case POST_CHANGE: 1027 /* cache the power mode parameters to use internally */ 1028 memcpy(&host->dev_req_params, 1029 dev_req_params, sizeof(*dev_req_params)); 1030 1031 ufs_qcom_icc_update_bw(host); 1032 1033 /* disable the device ref clock if entered PWM mode */ 1034 if (ufshcd_is_hs_mode(&hba->pwr_info) && 1035 !ufshcd_is_hs_mode(dev_req_params)) 1036 ufs_qcom_dev_ref_clk_ctrl(host, false); 1037 break; 1038 default: 1039 ret = -EINVAL; 1040 break; 1041 } 1042 1043 return ret; 1044 } 1045 1046 static int ufs_qcom_quirk_host_pa_saveconfigtime(struct ufs_hba *hba) 1047 { 1048 int err; 1049 u32 pa_vs_config_reg1; 1050 1051 err = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_VS_CONFIG_REG1), 1052 &pa_vs_config_reg1); 1053 if (err) 1054 return err; 1055 1056 /* Allow extension of MSB bits of PA_SaveConfigTime attribute */ 1057 return ufshcd_dme_set(hba, UIC_ARG_MIB(PA_VS_CONFIG_REG1), 1058 (pa_vs_config_reg1 | (1 << 12))); 1059 } 1060 1061 static void ufs_qcom_override_pa_tx_hsg1_sync_len(struct ufs_hba *hba) 1062 { 1063 int err; 1064 1065 err = ufshcd_dme_peer_set(hba, UIC_ARG_MIB(PA_TX_HSG1_SYNC_LENGTH), 1066 PA_TX_HSG1_SYNC_LENGTH_VAL); 1067 if (err) 1068 dev_err(hba->dev, "Failed (%d) set PA_TX_HSG1_SYNC_LENGTH\n", err); 1069 } 1070 1071 static int ufs_qcom_apply_dev_quirks(struct ufs_hba *hba) 1072 { 1073 int err = 0; 1074 1075 if (hba->dev_quirks & UFS_DEVICE_QUIRK_HOST_PA_SAVECONFIGTIME) 1076 err = ufs_qcom_quirk_host_pa_saveconfigtime(hba); 1077 1078 if (hba->dev_quirks & UFS_DEVICE_QUIRK_PA_TX_HSG1_SYNC_LENGTH) 1079 ufs_qcom_override_pa_tx_hsg1_sync_len(hba); 1080 1081 return err; 1082 } 1083 1084 /* UFS device-specific quirks */ 1085 static struct ufs_dev_quirk ufs_qcom_dev_fixups[] = { 1086 { .wmanufacturerid = UFS_VENDOR_SKHYNIX, 1087 .model = UFS_ANY_MODEL, 1088 .quirk = UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM }, 1089 { .wmanufacturerid = UFS_VENDOR_WDC, 1090 .model = UFS_ANY_MODEL, 1091 .quirk = UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE }, 1092 { .wmanufacturerid = UFS_VENDOR_SAMSUNG, 1093 .model = UFS_ANY_MODEL, 1094 .quirk = UFS_DEVICE_QUIRK_PA_TX_HSG1_SYNC_LENGTH | 1095 UFS_DEVICE_QUIRK_PA_TX_DEEMPHASIS_TUNING }, 1096 {} 1097 }; 1098 1099 static void ufs_qcom_fixup_dev_quirks(struct ufs_hba *hba) 1100 { 1101 ufshcd_fixup_dev_quirks(hba, ufs_qcom_dev_fixups); 1102 } 1103 1104 static u32 ufs_qcom_get_ufs_hci_version(struct ufs_hba *hba) 1105 { 1106 return ufshci_version(2, 0); 1107 } 1108 1109 /** 1110 * ufs_qcom_advertise_quirks - advertise the known QCOM UFS controller quirks 1111 * @hba: host controller instance 1112 * 1113 * QCOM UFS host controller might have some non standard behaviours (quirks) 1114 * than what is specified by UFSHCI specification. Advertise all such 1115 * quirks to standard UFS host controller driver so standard takes them into 1116 * account. 1117 */ 1118 static void ufs_qcom_advertise_quirks(struct ufs_hba *hba) 1119 { 1120 const struct ufs_qcom_drvdata *drvdata = of_device_get_match_data(hba->dev); 1121 struct ufs_qcom_host *host = ufshcd_get_variant(hba); 1122 1123 if (host->hw_ver.major == 0x2) 1124 hba->quirks |= UFSHCD_QUIRK_BROKEN_UFS_HCI_VERSION; 1125 1126 if (host->hw_ver.major > 0x3) 1127 hba->quirks |= UFSHCD_QUIRK_REINIT_AFTER_MAX_GEAR_SWITCH; 1128 1129 if (drvdata && drvdata->quirks) 1130 hba->quirks |= drvdata->quirks; 1131 } 1132 1133 static void ufs_qcom_set_phy_gear(struct ufs_qcom_host *host) 1134 { 1135 struct ufs_host_params *host_params = &host->host_params; 1136 u32 val, dev_major; 1137 1138 /* 1139 * Default to powering up the PHY to the max gear possible, which is 1140 * backwards compatible with lower gears but not optimal from 1141 * a power usage point of view. After device negotiation, if the 1142 * gear is lower a reinit will be performed to program the PHY 1143 * to the ideal gear for this combo of controller and device. 1144 */ 1145 host->phy_gear = host_params->hs_tx_gear; 1146 1147 if (host->hw_ver.major < 0x4) { 1148 /* 1149 * These controllers only have one PHY init sequence, 1150 * let's power up the PHY using that (the minimum supported 1151 * gear, UFS_HS_G2). 1152 */ 1153 host->phy_gear = UFS_HS_G2; 1154 } else if (host->hw_ver.major >= 0x5) { 1155 val = ufshcd_readl(host->hba, REG_UFS_DEBUG_SPARE_CFG); 1156 dev_major = FIELD_GET(UFS_DEV_VER_MAJOR_MASK, val); 1157 1158 /* 1159 * Since the UFS device version is populated, let's remove the 1160 * REINIT quirk as the negotiated gear won't change during boot. 1161 * So there is no need to do reinit. 1162 */ 1163 if (dev_major != 0x0) 1164 host->hba->quirks &= ~UFSHCD_QUIRK_REINIT_AFTER_MAX_GEAR_SWITCH; 1165 1166 /* 1167 * For UFS 3.1 device and older, power up the PHY using HS-G4 1168 * PHY gear to save power. 1169 */ 1170 if (dev_major > 0x0 && dev_major < 0x4) 1171 host->phy_gear = UFS_HS_G4; 1172 } 1173 } 1174 1175 static void ufs_qcom_parse_gear_limits(struct ufs_hba *hba) 1176 { 1177 struct ufs_qcom_host *host = ufshcd_get_variant(hba); 1178 struct ufs_host_params *host_params = &host->host_params; 1179 u32 hs_gear_old = host_params->hs_tx_gear; 1180 1181 ufshcd_parse_gear_limits(hba, host_params); 1182 if (host_params->hs_tx_gear != hs_gear_old) { 1183 host->phy_gear = host_params->hs_tx_gear; 1184 } 1185 } 1186 1187 static void ufs_qcom_set_host_params(struct ufs_hba *hba) 1188 { 1189 struct ufs_qcom_host *host = ufshcd_get_variant(hba); 1190 struct ufs_host_params *host_params = &host->host_params; 1191 1192 ufshcd_init_host_params(host_params); 1193 1194 /* This driver only supports symmetic gear setting i.e., hs_tx_gear == hs_rx_gear */ 1195 host_params->hs_tx_gear = host_params->hs_rx_gear = ufs_qcom_get_hs_gear(hba); 1196 } 1197 1198 static void ufs_qcom_set_host_caps(struct ufs_hba *hba) 1199 { 1200 struct ufs_qcom_host *host = ufshcd_get_variant(hba); 1201 1202 if (host->hw_ver.major >= 0x5) 1203 host->caps |= UFS_QCOM_CAP_ICE_CONFIG; 1204 } 1205 1206 static void ufs_qcom_set_caps(struct ufs_hba *hba) 1207 { 1208 hba->caps |= UFSHCD_CAP_CLK_GATING | UFSHCD_CAP_HIBERN8_WITH_CLK_GATING; 1209 hba->caps |= UFSHCD_CAP_CLK_SCALING | UFSHCD_CAP_WB_WITH_CLK_SCALING; 1210 hba->caps |= UFSHCD_CAP_AUTO_BKOPS_SUSPEND; 1211 hba->caps |= UFSHCD_CAP_WB_EN; 1212 hba->caps |= UFSHCD_CAP_AGGR_POWER_COLLAPSE; 1213 hba->caps |= UFSHCD_CAP_RPM_AUTOSUSPEND; 1214 1215 ufs_qcom_set_host_caps(hba); 1216 } 1217 1218 /** 1219 * ufs_qcom_setup_clocks - enables/disable clocks 1220 * @hba: host controller instance 1221 * @on: If true, enable clocks else disable them. 1222 * @status: PRE_CHANGE or POST_CHANGE notify 1223 * 1224 * There are certain clocks which comes from the PHY so it needs 1225 * to be managed together along with controller clocks which also 1226 * provides a better power saving. Hence keep phy_power_off/on calls 1227 * in ufs_qcom_setup_clocks, so that PHY's regulators & clks can be 1228 * turned on/off along with UFS's clocks. 1229 * 1230 * Return: 0 on success, non-zero on failure. 1231 */ 1232 static int ufs_qcom_setup_clocks(struct ufs_hba *hba, bool on, 1233 enum ufs_notify_change_status status) 1234 { 1235 struct ufs_qcom_host *host = ufshcd_get_variant(hba); 1236 struct phy *phy; 1237 int err; 1238 1239 /* 1240 * In case ufs_qcom_init() is not yet done, simply ignore. 1241 * This ufs_qcom_setup_clocks() shall be called from 1242 * ufs_qcom_init() after init is done. 1243 */ 1244 if (!host) 1245 return 0; 1246 1247 phy = host->generic_phy; 1248 1249 switch (status) { 1250 case PRE_CHANGE: 1251 if (on) { 1252 ufs_qcom_icc_update_bw(host); 1253 if (ufs_qcom_is_link_hibern8(hba)) { 1254 err = ufs_qcom_enable_lane_clks(host); 1255 if (err) { 1256 dev_err(hba->dev, "enable lane clks failed, ret=%d\n", err); 1257 return err; 1258 } 1259 } 1260 } else { 1261 if (!ufs_qcom_is_link_active(hba)) { 1262 /* disable device ref_clk */ 1263 ufs_qcom_dev_ref_clk_ctrl(host, false); 1264 } 1265 1266 err = phy_power_off(phy); 1267 if (err) { 1268 dev_err(hba->dev, "phy power off failed, ret=%d\n", err); 1269 return err; 1270 } 1271 } 1272 break; 1273 case POST_CHANGE: 1274 if (on) { 1275 err = phy_power_on(phy); 1276 if (err) { 1277 dev_err(hba->dev, "phy power on failed, ret = %d\n", err); 1278 return err; 1279 } 1280 1281 /* enable the device ref clock for HS mode*/ 1282 if (ufshcd_is_hs_mode(&hba->pwr_info)) 1283 ufs_qcom_dev_ref_clk_ctrl(host, true); 1284 } else { 1285 if (ufs_qcom_is_link_hibern8(hba)) 1286 ufs_qcom_disable_lane_clks(host); 1287 1288 ufs_qcom_icc_set_bw(host, ufs_qcom_bw_table[MODE_MIN][0][0].mem_bw, 1289 ufs_qcom_bw_table[MODE_MIN][0][0].cfg_bw); 1290 } 1291 break; 1292 } 1293 1294 return 0; 1295 } 1296 1297 static int 1298 ufs_qcom_reset_assert(struct reset_controller_dev *rcdev, unsigned long id) 1299 { 1300 struct ufs_qcom_host *host = rcdev_to_ufs_host(rcdev); 1301 1302 ufs_qcom_assert_reset(host->hba); 1303 /* provide 1ms delay to let the reset pulse propagate. */ 1304 usleep_range(1000, 1100); 1305 return 0; 1306 } 1307 1308 static int 1309 ufs_qcom_reset_deassert(struct reset_controller_dev *rcdev, unsigned long id) 1310 { 1311 struct ufs_qcom_host *host = rcdev_to_ufs_host(rcdev); 1312 1313 ufs_qcom_deassert_reset(host->hba); 1314 1315 /* 1316 * after reset deassertion, phy will need all ref clocks, 1317 * voltage, current to settle down before starting serdes. 1318 */ 1319 usleep_range(1000, 1100); 1320 return 0; 1321 } 1322 1323 static const struct reset_control_ops ufs_qcom_reset_ops = { 1324 .assert = ufs_qcom_reset_assert, 1325 .deassert = ufs_qcom_reset_deassert, 1326 }; 1327 1328 static int ufs_qcom_icc_init(struct ufs_qcom_host *host) 1329 { 1330 struct device *dev = host->hba->dev; 1331 int ret; 1332 1333 host->icc_ddr = devm_of_icc_get(dev, "ufs-ddr"); 1334 if (IS_ERR(host->icc_ddr)) 1335 return dev_err_probe(dev, PTR_ERR(host->icc_ddr), 1336 "failed to acquire interconnect path\n"); 1337 1338 host->icc_cpu = devm_of_icc_get(dev, "cpu-ufs"); 1339 if (IS_ERR(host->icc_cpu)) 1340 return dev_err_probe(dev, PTR_ERR(host->icc_cpu), 1341 "failed to acquire interconnect path\n"); 1342 1343 /* 1344 * Set Maximum bandwidth vote before initializing the UFS controller and 1345 * device. Ideally, a minimal interconnect vote would suffice for the 1346 * initialization, but a max vote would allow faster initialization. 1347 */ 1348 ret = ufs_qcom_icc_set_bw(host, ufs_qcom_bw_table[MODE_MAX][0][0].mem_bw, 1349 ufs_qcom_bw_table[MODE_MAX][0][0].cfg_bw); 1350 if (ret < 0) 1351 return dev_err_probe(dev, ret, "failed to set bandwidth request\n"); 1352 1353 return 0; 1354 } 1355 1356 /** 1357 * ufs_qcom_init - bind phy with controller 1358 * @hba: host controller instance 1359 * 1360 * Binds PHY with controller and powers up PHY enabling clocks 1361 * and regulators. 1362 * 1363 * Return: -EPROBE_DEFER if binding fails, returns negative error 1364 * on phy power up failure and returns zero on success. 1365 */ 1366 static int ufs_qcom_init(struct ufs_hba *hba) 1367 { 1368 int err; 1369 struct device *dev = hba->dev; 1370 struct ufs_qcom_host *host; 1371 struct ufs_clk_info *clki; 1372 const struct ufs_qcom_drvdata *drvdata = of_device_get_match_data(hba->dev); 1373 1374 host = devm_kzalloc(dev, sizeof(*host), GFP_KERNEL); 1375 if (!host) 1376 return -ENOMEM; 1377 1378 /* Make a two way bind between the qcom host and the hba */ 1379 host->hba = hba; 1380 ufshcd_set_variant(hba, host); 1381 1382 /* Setup the optional reset control of HCI */ 1383 host->core_reset = devm_reset_control_get_optional(hba->dev, "rst"); 1384 if (IS_ERR(host->core_reset)) { 1385 err = dev_err_probe(dev, PTR_ERR(host->core_reset), 1386 "Failed to get reset control\n"); 1387 goto out_variant_clear; 1388 } 1389 1390 /* Fire up the reset controller. Failure here is non-fatal. */ 1391 host->rcdev.of_node = dev->of_node; 1392 host->rcdev.ops = &ufs_qcom_reset_ops; 1393 host->rcdev.owner = dev->driver->owner; 1394 host->rcdev.nr_resets = 1; 1395 err = devm_reset_controller_register(dev, &host->rcdev); 1396 if (err) 1397 dev_warn(dev, "Failed to register reset controller\n"); 1398 1399 if (!has_acpi_companion(dev)) { 1400 host->generic_phy = devm_phy_get(dev, "ufsphy"); 1401 if (IS_ERR(host->generic_phy)) { 1402 err = dev_err_probe(dev, PTR_ERR(host->generic_phy), "Failed to get PHY\n"); 1403 goto out_variant_clear; 1404 } 1405 } 1406 1407 err = ufs_qcom_icc_init(host); 1408 if (err) 1409 goto out_variant_clear; 1410 1411 host->device_reset = devm_gpiod_get_optional(dev, "reset", 1412 GPIOD_OUT_HIGH); 1413 if (IS_ERR(host->device_reset)) { 1414 err = dev_err_probe(dev, PTR_ERR(host->device_reset), 1415 "Failed to acquire device reset gpio\n"); 1416 goto out_variant_clear; 1417 } 1418 1419 ufs_qcom_get_controller_revision(hba, &host->hw_ver.major, 1420 &host->hw_ver.minor, &host->hw_ver.step); 1421 1422 host->dev_ref_clk_ctrl_mmio = hba->mmio_base + REG_UFS_CFG1; 1423 host->dev_ref_clk_en_mask = BIT(26); 1424 1425 list_for_each_entry(clki, &hba->clk_list_head, list) { 1426 if (!strcmp(clki->name, "core_clk_unipro")) 1427 clki->keep_link_active = true; 1428 } 1429 1430 err = ufs_qcom_init_lane_clks(host); 1431 if (err) 1432 goto out_variant_clear; 1433 1434 ufs_qcom_set_caps(hba); 1435 ufs_qcom_advertise_quirks(hba); 1436 ufs_qcom_set_host_params(hba); 1437 ufs_qcom_set_phy_gear(host); 1438 ufs_qcom_parse_gear_limits(hba); 1439 1440 err = ufs_qcom_ice_init(host); 1441 if (err) 1442 goto out_variant_clear; 1443 1444 ufs_qcom_setup_clocks(hba, true, POST_CHANGE); 1445 1446 ufs_qcom_get_default_testbus_cfg(host); 1447 err = ufs_qcom_testbus_config(host); 1448 if (err) 1449 /* Failure is non-fatal */ 1450 dev_warn(dev, "%s: failed to configure the testbus %d\n", 1451 __func__, err); 1452 1453 if (drvdata && drvdata->no_phy_retention) 1454 hba->spm_lvl = UFS_PM_LVL_5; 1455 1456 return 0; 1457 1458 out_variant_clear: 1459 ufshcd_set_variant(hba, NULL); 1460 1461 return err; 1462 } 1463 1464 static void ufs_qcom_exit(struct ufs_hba *hba) 1465 { 1466 struct ufs_qcom_host *host = ufshcd_get_variant(hba); 1467 1468 ufs_qcom_disable_lane_clks(host); 1469 phy_power_off(host->generic_phy); 1470 phy_exit(host->generic_phy); 1471 } 1472 1473 static int ufs_qcom_fw_managed_init(struct ufs_hba *hba) 1474 { 1475 struct device *dev = hba->dev; 1476 struct ufs_qcom_host *host; 1477 int err; 1478 1479 host = devm_kzalloc(dev, sizeof(*host), GFP_KERNEL); 1480 if (!host) 1481 return -ENOMEM; 1482 1483 host->hba = hba; 1484 ufshcd_set_variant(hba, host); 1485 1486 ufs_qcom_get_controller_revision(hba, &host->hw_ver.major, 1487 &host->hw_ver.minor, &host->hw_ver.step); 1488 1489 err = ufs_qcom_ice_init(host); 1490 if (err) 1491 goto out_variant_clear; 1492 1493 ufs_qcom_get_default_testbus_cfg(host); 1494 err = ufs_qcom_testbus_config(host); 1495 if (err) 1496 /* Failure is non-fatal */ 1497 dev_warn(dev, "Failed to configure the testbus %d\n", err); 1498 1499 hba->caps |= UFSHCD_CAP_WB_EN; 1500 1501 ufs_qcom_advertise_quirks(hba); 1502 host->hba->quirks &= ~UFSHCD_QUIRK_REINIT_AFTER_MAX_GEAR_SWITCH; 1503 1504 hba->spm_lvl = hba->rpm_lvl = hba->pm_lvl_min = UFS_PM_LVL_5; 1505 1506 ufs_qcom_set_host_params(hba); 1507 ufs_qcom_parse_gear_limits(hba); 1508 1509 return 0; 1510 1511 out_variant_clear: 1512 ufshcd_set_variant(hba, NULL); 1513 return err; 1514 } 1515 1516 static void ufs_qcom_fw_managed_exit(struct ufs_hba *hba) 1517 { 1518 pm_runtime_put_sync(hba->dev); 1519 } 1520 1521 /** 1522 * ufs_qcom_set_clk_40ns_cycles - Configure 40ns clk cycles 1523 * 1524 * @hba: host controller instance 1525 * @cycles_in_1us: No of cycles in 1us to be configured 1526 * 1527 * Returns error if dme get/set configuration for 40ns fails 1528 * and returns zero on success. 1529 */ 1530 static int ufs_qcom_set_clk_40ns_cycles(struct ufs_hba *hba, 1531 u32 cycles_in_1us) 1532 { 1533 struct ufs_qcom_host *host = ufshcd_get_variant(hba); 1534 u32 cycles_in_40ns; 1535 u32 reg; 1536 int err; 1537 1538 /* 1539 * UFS host controller V4.0.0 onwards needs to program 1540 * PA_VS_CORE_CLK_40NS_CYCLES attribute per programmed 1541 * frequency of unipro core clk of UFS host controller. 1542 */ 1543 if (host->hw_ver.major < 4) 1544 return 0; 1545 1546 /* 1547 * Generic formulae for cycles_in_40ns = (freq_unipro/25) is not 1548 * applicable for all frequencies. For ex: ceil(37.5 MHz/25) will 1549 * be 2 and ceil(403 MHZ/25) will be 17 whereas Hardware 1550 * specification expect to be 16. Hence use exact hardware spec 1551 * mandated value for cycles_in_40ns instead of calculating using 1552 * generic formulae. 1553 */ 1554 switch (cycles_in_1us) { 1555 case UNIPRO_CORE_CLK_FREQ_403_MHZ: 1556 cycles_in_40ns = 16; 1557 break; 1558 case UNIPRO_CORE_CLK_FREQ_300_MHZ: 1559 cycles_in_40ns = 12; 1560 break; 1561 case UNIPRO_CORE_CLK_FREQ_201_5_MHZ: 1562 cycles_in_40ns = 8; 1563 break; 1564 case UNIPRO_CORE_CLK_FREQ_150_MHZ: 1565 cycles_in_40ns = 6; 1566 break; 1567 case UNIPRO_CORE_CLK_FREQ_100_MHZ: 1568 cycles_in_40ns = 4; 1569 break; 1570 case UNIPRO_CORE_CLK_FREQ_75_MHZ: 1571 cycles_in_40ns = 3; 1572 break; 1573 case UNIPRO_CORE_CLK_FREQ_37_5_MHZ: 1574 cycles_in_40ns = 2; 1575 break; 1576 default: 1577 dev_err(hba->dev, "UNIPRO clk freq %u MHz not supported\n", 1578 cycles_in_1us); 1579 return -EINVAL; 1580 } 1581 1582 err = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_VS_CORE_CLK_40NS_CYCLES), ®); 1583 if (err) 1584 return err; 1585 1586 reg &= ~PA_VS_CORE_CLK_40NS_CYCLES_MASK; 1587 reg |= cycles_in_40ns; 1588 1589 return ufshcd_dme_set(hba, UIC_ARG_MIB(PA_VS_CORE_CLK_40NS_CYCLES), reg); 1590 } 1591 1592 static int ufs_qcom_set_core_clk_ctrl(struct ufs_hba *hba, bool is_scale_up, unsigned long freq) 1593 { 1594 struct ufs_qcom_host *host = ufshcd_get_variant(hba); 1595 struct list_head *head = &hba->clk_list_head; 1596 struct ufs_clk_info *clki; 1597 u32 cycles_in_1us = 0; 1598 u32 core_clk_ctrl_reg; 1599 unsigned long clk_freq; 1600 int err; 1601 1602 if (hba->use_pm_opp && freq != ULONG_MAX) { 1603 clk_freq = ufs_qcom_opp_freq_to_clk_freq(hba, freq, "core_clk_unipro"); 1604 if (clk_freq) { 1605 cycles_in_1us = ceil(clk_freq, HZ_PER_MHZ); 1606 goto set_core_clk_ctrl; 1607 } 1608 } 1609 1610 list_for_each_entry(clki, head, list) { 1611 if (!IS_ERR_OR_NULL(clki->clk) && 1612 !strcmp(clki->name, "core_clk_unipro")) { 1613 if (!clki->max_freq) { 1614 cycles_in_1us = 150; /* default for backwards compatibility */ 1615 break; 1616 } 1617 1618 if (freq == ULONG_MAX) { 1619 cycles_in_1us = ceil(clki->max_freq, HZ_PER_MHZ); 1620 break; 1621 } 1622 1623 if (is_scale_up) 1624 cycles_in_1us = ceil(clki->max_freq, HZ_PER_MHZ); 1625 else 1626 cycles_in_1us = ceil(clk_get_rate(clki->clk), HZ_PER_MHZ); 1627 break; 1628 } 1629 } 1630 1631 set_core_clk_ctrl: 1632 err = ufshcd_dme_get(hba, 1633 UIC_ARG_MIB(DME_VS_CORE_CLK_CTRL), 1634 &core_clk_ctrl_reg); 1635 if (err) 1636 return err; 1637 1638 /* Bit mask is different for UFS host controller V4.0.0 onwards */ 1639 if (host->hw_ver.major >= 4) { 1640 if (!FIELD_FIT(CLK_1US_CYCLES_MASK_V4, cycles_in_1us)) 1641 return -ERANGE; 1642 core_clk_ctrl_reg &= ~CLK_1US_CYCLES_MASK_V4; 1643 core_clk_ctrl_reg |= FIELD_PREP(CLK_1US_CYCLES_MASK_V4, cycles_in_1us); 1644 } else { 1645 if (!FIELD_FIT(CLK_1US_CYCLES_MASK, cycles_in_1us)) 1646 return -ERANGE; 1647 core_clk_ctrl_reg &= ~CLK_1US_CYCLES_MASK; 1648 core_clk_ctrl_reg |= FIELD_PREP(CLK_1US_CYCLES_MASK, cycles_in_1us); 1649 } 1650 1651 /* Clear CORE_CLK_DIV_EN */ 1652 core_clk_ctrl_reg &= ~DME_VS_CORE_CLK_CTRL_CORE_CLK_DIV_EN_BIT; 1653 1654 err = ufshcd_dme_set(hba, 1655 UIC_ARG_MIB(DME_VS_CORE_CLK_CTRL), 1656 core_clk_ctrl_reg); 1657 if (err) 1658 return err; 1659 1660 /* Configure unipro core clk 40ns attribute */ 1661 return ufs_qcom_set_clk_40ns_cycles(hba, cycles_in_1us); 1662 } 1663 1664 static int ufs_qcom_clk_scale_up_pre_change(struct ufs_hba *hba, unsigned long freq) 1665 { 1666 int ret; 1667 1668 ret = ufs_qcom_cfg_timers(hba, true, freq); 1669 if (ret) { 1670 dev_err(hba->dev, "%s ufs cfg timer failed\n", __func__); 1671 return ret; 1672 } 1673 /* set unipro core clock attributes and clear clock divider */ 1674 return ufs_qcom_set_core_clk_ctrl(hba, true, freq); 1675 } 1676 1677 static int ufs_qcom_clk_scale_up_post_change(struct ufs_hba *hba) 1678 { 1679 return 0; 1680 } 1681 1682 static int ufs_qcom_clk_scale_down_pre_change(struct ufs_hba *hba) 1683 { 1684 int err; 1685 u32 core_clk_ctrl_reg; 1686 1687 err = ufshcd_dme_get(hba, 1688 UIC_ARG_MIB(DME_VS_CORE_CLK_CTRL), 1689 &core_clk_ctrl_reg); 1690 1691 /* make sure CORE_CLK_DIV_EN is cleared */ 1692 if (!err && 1693 (core_clk_ctrl_reg & DME_VS_CORE_CLK_CTRL_CORE_CLK_DIV_EN_BIT)) { 1694 core_clk_ctrl_reg &= ~DME_VS_CORE_CLK_CTRL_CORE_CLK_DIV_EN_BIT; 1695 err = ufshcd_dme_set(hba, 1696 UIC_ARG_MIB(DME_VS_CORE_CLK_CTRL), 1697 core_clk_ctrl_reg); 1698 } 1699 1700 return err; 1701 } 1702 1703 static int ufs_qcom_clk_scale_down_post_change(struct ufs_hba *hba, unsigned long freq) 1704 { 1705 int ret; 1706 1707 ret = ufs_qcom_cfg_timers(hba, false, freq); 1708 if (ret) { 1709 dev_err(hba->dev, "%s: ufs_qcom_cfg_timers() failed\n", __func__); 1710 return ret; 1711 } 1712 /* set unipro core clock attributes and clear clock divider */ 1713 return ufs_qcom_set_core_clk_ctrl(hba, false, freq); 1714 } 1715 1716 static int ufs_qcom_clk_scale_notify(struct ufs_hba *hba, bool scale_up, 1717 unsigned long target_freq, 1718 enum ufs_notify_change_status status) 1719 { 1720 struct ufs_qcom_host *host = ufshcd_get_variant(hba); 1721 int err; 1722 1723 /* check the host controller state before sending hibern8 cmd */ 1724 if (!ufshcd_is_hba_active(hba)) 1725 return 0; 1726 1727 if (status == PRE_CHANGE) { 1728 err = ufshcd_uic_hibern8_enter(hba); 1729 if (err) 1730 return err; 1731 if (scale_up) 1732 err = ufs_qcom_clk_scale_up_pre_change(hba, target_freq); 1733 else 1734 err = ufs_qcom_clk_scale_down_pre_change(hba); 1735 1736 if (err) { 1737 ufshcd_uic_hibern8_exit(hba); 1738 return err; 1739 } 1740 } else { 1741 if (scale_up) 1742 err = ufs_qcom_clk_scale_up_post_change(hba); 1743 else 1744 err = ufs_qcom_clk_scale_down_post_change(hba, target_freq); 1745 1746 1747 if (err) { 1748 ufshcd_uic_hibern8_exit(hba); 1749 return err; 1750 } 1751 1752 ufs_qcom_icc_update_bw(host); 1753 ufshcd_uic_hibern8_exit(hba); 1754 } 1755 1756 return 0; 1757 } 1758 1759 static void ufs_qcom_enable_test_bus(struct ufs_qcom_host *host) 1760 { 1761 ufshcd_rmwl(host->hba, UFS_REG_TEST_BUS_EN, 1762 UFS_REG_TEST_BUS_EN, REG_UFS_CFG1); 1763 ufshcd_rmwl(host->hba, TEST_BUS_EN, TEST_BUS_EN, REG_UFS_CFG1); 1764 } 1765 1766 static void ufs_qcom_get_default_testbus_cfg(struct ufs_qcom_host *host) 1767 { 1768 /* provide a legal default configuration */ 1769 host->testbus.select_major = TSTBUS_UNIPRO; 1770 host->testbus.select_minor = 37; 1771 } 1772 1773 static bool ufs_qcom_testbus_cfg_is_ok(struct ufs_qcom_host *host) 1774 { 1775 if (host->testbus.select_major >= TSTBUS_MAX) { 1776 dev_err(host->hba->dev, 1777 "%s: UFS_CFG1[TEST_BUS_SEL} may not equal 0x%05X\n", 1778 __func__, host->testbus.select_major); 1779 return false; 1780 } 1781 1782 return true; 1783 } 1784 1785 int ufs_qcom_testbus_config(struct ufs_qcom_host *host) 1786 { 1787 int reg; 1788 int offset; 1789 u32 mask = TEST_BUS_SUB_SEL_MASK; 1790 1791 if (!host) 1792 return -EINVAL; 1793 1794 if (!ufs_qcom_testbus_cfg_is_ok(host)) 1795 return -EPERM; 1796 1797 switch (host->testbus.select_major) { 1798 case TSTBUS_UAWM: 1799 reg = UFS_TEST_BUS_CTRL_0; 1800 offset = 24; 1801 break; 1802 case TSTBUS_UARM: 1803 reg = UFS_TEST_BUS_CTRL_0; 1804 offset = 16; 1805 break; 1806 case TSTBUS_TXUC: 1807 reg = UFS_TEST_BUS_CTRL_0; 1808 offset = 8; 1809 break; 1810 case TSTBUS_RXUC: 1811 reg = UFS_TEST_BUS_CTRL_0; 1812 offset = 0; 1813 break; 1814 case TSTBUS_DFC: 1815 reg = UFS_TEST_BUS_CTRL_1; 1816 offset = 24; 1817 break; 1818 case TSTBUS_TRLUT: 1819 reg = UFS_TEST_BUS_CTRL_1; 1820 offset = 16; 1821 break; 1822 case TSTBUS_TMRLUT: 1823 reg = UFS_TEST_BUS_CTRL_1; 1824 offset = 8; 1825 break; 1826 case TSTBUS_OCSC: 1827 reg = UFS_TEST_BUS_CTRL_1; 1828 offset = 0; 1829 break; 1830 case TSTBUS_WRAPPER: 1831 reg = UFS_TEST_BUS_CTRL_2; 1832 offset = 16; 1833 break; 1834 case TSTBUS_COMBINED: 1835 reg = UFS_TEST_BUS_CTRL_2; 1836 offset = 8; 1837 break; 1838 case TSTBUS_UTP_HCI: 1839 reg = UFS_TEST_BUS_CTRL_2; 1840 offset = 0; 1841 break; 1842 case TSTBUS_UNIPRO: 1843 reg = UFS_UNIPRO_CFG; 1844 offset = 20; 1845 mask = 0xFFF; 1846 break; 1847 /* 1848 * No need for a default case, since 1849 * ufs_qcom_testbus_cfg_is_ok() checks that the configuration 1850 * is legal 1851 */ 1852 } 1853 mask <<= offset; 1854 ufshcd_rmwl(host->hba, TEST_BUS_SEL, 1855 (u32)host->testbus.select_major << 19, 1856 REG_UFS_CFG1); 1857 ufshcd_rmwl(host->hba, mask, 1858 (u32)host->testbus.select_minor << offset, 1859 reg); 1860 ufs_qcom_enable_test_bus(host); 1861 1862 return 0; 1863 } 1864 1865 static void ufs_qcom_dump_testbus(struct ufs_hba *hba) 1866 { 1867 struct ufs_qcom_host *host = ufshcd_get_variant(hba); 1868 int i, j, nminor = 0, testbus_len = 0; 1869 char *prefix; 1870 1871 u32 *testbus __free(kfree) = kmalloc_array(256, sizeof(u32), GFP_KERNEL); 1872 if (!testbus) 1873 return; 1874 1875 for (j = 0; j < TSTBUS_MAX; j++) { 1876 nminor = testbus_info[j].nminor; 1877 prefix = testbus_info[j].prefix; 1878 host->testbus.select_major = j; 1879 testbus_len = nminor * sizeof(u32); 1880 for (i = 0; i < nminor; i++) { 1881 host->testbus.select_minor = i; 1882 ufs_qcom_testbus_config(host); 1883 testbus[i] = ufshcd_readl(hba, UFS_TEST_BUS); 1884 } 1885 print_hex_dump(KERN_ERR, prefix, DUMP_PREFIX_OFFSET, 1886 16, 4, testbus, testbus_len, false); 1887 } 1888 } 1889 1890 static int ufs_qcom_dump_regs(struct ufs_hba *hba, size_t offset, size_t len, 1891 const char *prefix, void __iomem *base) 1892 { 1893 size_t pos; 1894 1895 if (offset % 4 != 0 || len % 4 != 0) 1896 return -EINVAL; 1897 1898 u32 *regs __free(kfree) = kzalloc(len, GFP_ATOMIC); 1899 if (!regs) 1900 return -ENOMEM; 1901 1902 for (pos = 0; pos < len; pos += 4) 1903 regs[pos / 4] = readl(base + offset + pos); 1904 1905 print_hex_dump(KERN_ERR, prefix, 1906 len > 4 ? DUMP_PREFIX_OFFSET : DUMP_PREFIX_NONE, 1907 16, 4, regs, len, false); 1908 1909 return 0; 1910 } 1911 1912 static void ufs_qcom_dump_mcq_hci_regs(struct ufs_hba *hba) 1913 { 1914 struct ufshcd_mcq_opr_info_t *opr = &hba->mcq_opr[0]; 1915 void __iomem *mcq_vs_base = hba->mcq_base + UFS_MEM_VS_BASE; 1916 1917 struct dump_info { 1918 void __iomem *base; 1919 size_t offset; 1920 size_t len; 1921 const char *prefix; 1922 }; 1923 1924 struct dump_info mcq_dumps[] = { 1925 {hba->mcq_base, 0x0, 256 * 4, "MCQ HCI-0 "}, 1926 {hba->mcq_base, 0x400, 256 * 4, "MCQ HCI-1 "}, 1927 {mcq_vs_base, 0x0, 5 * 4, "MCQ VS-0 "}, 1928 {opr->base, 0x0, 256 * 4, "MCQ SQD-0 "}, 1929 {opr->base, 0x400, 256 * 4, "MCQ SQD-1 "}, 1930 {opr->base, 0x800, 256 * 4, "MCQ SQD-2 "}, 1931 {opr->base, 0xc00, 256 * 4, "MCQ SQD-3 "}, 1932 {opr->base, 0x1000, 256 * 4, "MCQ SQD-4 "}, 1933 {opr->base, 0x1400, 256 * 4, "MCQ SQD-5 "}, 1934 {opr->base, 0x1800, 256 * 4, "MCQ SQD-6 "}, 1935 {opr->base, 0x1c00, 256 * 4, "MCQ SQD-7 "}, 1936 1937 }; 1938 1939 for (int i = 0; i < ARRAY_SIZE(mcq_dumps); i++) { 1940 ufs_qcom_dump_regs(hba, mcq_dumps[i].offset, mcq_dumps[i].len, 1941 mcq_dumps[i].prefix, mcq_dumps[i].base); 1942 cond_resched(); 1943 } 1944 } 1945 1946 static void ufs_qcom_dump_dbg_regs(struct ufs_hba *hba) 1947 { 1948 u32 reg; 1949 struct ufs_qcom_host *host; 1950 1951 host = ufshcd_get_variant(hba); 1952 1953 dev_err(hba->dev, "HW_H8_ENTER_CNT=%d\n", ufshcd_readl(hba, REG_UFS_HW_H8_ENTER_CNT)); 1954 dev_err(hba->dev, "HW_H8_EXIT_CNT=%d\n", ufshcd_readl(hba, REG_UFS_HW_H8_EXIT_CNT)); 1955 1956 dev_err(hba->dev, "SW_H8_ENTER_CNT=%d\n", ufshcd_readl(hba, REG_UFS_SW_H8_ENTER_CNT)); 1957 dev_err(hba->dev, "SW_H8_EXIT_CNT=%d\n", ufshcd_readl(hba, REG_UFS_SW_H8_EXIT_CNT)); 1958 1959 dev_err(hba->dev, "SW_AFTER_HW_H8_ENTER_CNT=%d\n", 1960 ufshcd_readl(hba, REG_UFS_SW_AFTER_HW_H8_ENTER_CNT)); 1961 1962 ufshcd_dump_regs(hba, REG_UFS_SYS1CLK_1US, 16 * 4, 1963 "HCI Vendor Specific Registers "); 1964 1965 reg = ufs_qcom_get_debug_reg_offset(host, UFS_UFS_DBG_RD_REG_OCSC); 1966 ufshcd_dump_regs(hba, reg, 44 * 4, "UFS_UFS_DBG_RD_REG_OCSC "); 1967 1968 reg = ufshcd_readl(hba, REG_UFS_CFG1); 1969 reg |= UTP_DBG_RAMS_EN; 1970 ufshcd_writel(hba, reg, REG_UFS_CFG1); 1971 1972 reg = ufs_qcom_get_debug_reg_offset(host, UFS_UFS_DBG_RD_EDTL_RAM); 1973 ufshcd_dump_regs(hba, reg, 32 * 4, "UFS_UFS_DBG_RD_EDTL_RAM "); 1974 1975 reg = ufs_qcom_get_debug_reg_offset(host, UFS_UFS_DBG_RD_DESC_RAM); 1976 ufshcd_dump_regs(hba, reg, 128 * 4, "UFS_UFS_DBG_RD_DESC_RAM "); 1977 1978 reg = ufs_qcom_get_debug_reg_offset(host, UFS_UFS_DBG_RD_PRDT_RAM); 1979 ufshcd_dump_regs(hba, reg, 64 * 4, "UFS_UFS_DBG_RD_PRDT_RAM "); 1980 1981 /* clear bit 17 - UTP_DBG_RAMS_EN */ 1982 ufshcd_rmwl(hba, UTP_DBG_RAMS_EN, 0, REG_UFS_CFG1); 1983 1984 reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_UAWM); 1985 ufshcd_dump_regs(hba, reg, 4 * 4, "UFS_DBG_RD_REG_UAWM "); 1986 1987 reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_UARM); 1988 ufshcd_dump_regs(hba, reg, 4 * 4, "UFS_DBG_RD_REG_UARM "); 1989 1990 reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_TXUC); 1991 ufshcd_dump_regs(hba, reg, 48 * 4, "UFS_DBG_RD_REG_TXUC "); 1992 1993 reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_RXUC); 1994 ufshcd_dump_regs(hba, reg, 27 * 4, "UFS_DBG_RD_REG_RXUC "); 1995 1996 reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_DFC); 1997 ufshcd_dump_regs(hba, reg, 19 * 4, "UFS_DBG_RD_REG_DFC "); 1998 1999 reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_TRLUT); 2000 ufshcd_dump_regs(hba, reg, 34 * 4, "UFS_DBG_RD_REG_TRLUT "); 2001 2002 reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_TMRLUT); 2003 ufshcd_dump_regs(hba, reg, 9 * 4, "UFS_DBG_RD_REG_TMRLUT "); 2004 2005 if (hba->mcq_enabled) { 2006 reg = ufs_qcom_get_debug_reg_offset(host, UFS_RD_REG_MCQ); 2007 ufshcd_dump_regs(hba, reg, 64 * 4, "HCI MCQ Debug Registers "); 2008 } 2009 2010 /* ensure below dumps occur only in task context due to blocking calls. */ 2011 if (in_task()) { 2012 /* Dump MCQ Host Vendor Specific Registers */ 2013 if (hba->mcq_enabled) 2014 ufs_qcom_dump_mcq_hci_regs(hba); 2015 2016 /* voluntarily yield the CPU as we are dumping too much data */ 2017 ufshcd_dump_regs(hba, UFS_TEST_BUS, 4, "UFS_TEST_BUS "); 2018 cond_resched(); 2019 ufs_qcom_dump_testbus(hba); 2020 } 2021 } 2022 2023 /** 2024 * ufs_qcom_device_reset() - toggle the (optional) device reset line 2025 * @hba: per-adapter instance 2026 * 2027 * Toggles the (optional) reset line to reset the attached device. 2028 */ 2029 static int ufs_qcom_device_reset(struct ufs_hba *hba) 2030 { 2031 struct ufs_qcom_host *host = ufshcd_get_variant(hba); 2032 2033 /* reset gpio is optional */ 2034 if (!host->device_reset) 2035 return -EOPNOTSUPP; 2036 2037 /* 2038 * The UFS device shall detect reset pulses of 1us, sleep for 10us to 2039 * be on the safe side. 2040 */ 2041 ufs_qcom_device_reset_ctrl(hba, true); 2042 usleep_range(10, 15); 2043 2044 ufs_qcom_device_reset_ctrl(hba, false); 2045 usleep_range(10, 15); 2046 2047 return 0; 2048 } 2049 2050 /** 2051 * ufs_qcom_fw_managed_device_reset - Reset UFS device under FW-managed design 2052 * @hba: pointer to UFS host bus adapter 2053 * 2054 * In the firmware-managed reset model, the power domain is powered on by genpd 2055 * before the UFS controller driver probes. For subsequent resets (such as 2056 * suspend/resume or recovery), the UFS driver must explicitly invoke PM runtime 2057 * 2058 * Return: 0 on success or a negative error code on failure. 2059 */ 2060 static int ufs_qcom_fw_managed_device_reset(struct ufs_hba *hba) 2061 { 2062 static bool is_boot = true; 2063 int err; 2064 2065 /* Skip reset on cold boot; perform it on subsequent calls */ 2066 if (is_boot) { 2067 is_boot = false; 2068 return 0; 2069 } 2070 2071 pm_runtime_put_sync(hba->dev); 2072 err = pm_runtime_resume_and_get(hba->dev); 2073 if (err < 0) { 2074 dev_err(hba->dev, "PM runtime resume failed: %d\n", err); 2075 return err; 2076 } 2077 2078 return 0; 2079 } 2080 2081 static void ufs_qcom_config_scaling_param(struct ufs_hba *hba, 2082 struct devfreq_dev_profile *p, 2083 struct devfreq_simple_ondemand_data *d) 2084 { 2085 p->polling_ms = 60; 2086 p->timer = DEVFREQ_TIMER_DELAYED; 2087 d->upthreshold = 70; 2088 d->downdifferential = 5; 2089 2090 hba->clk_scaling.suspend_on_no_request = true; 2091 } 2092 2093 static int ufs_qcom_mcq_config_resource(struct ufs_hba *hba) 2094 { 2095 struct platform_device *pdev = to_platform_device(hba->dev); 2096 struct resource *res; 2097 2098 /* Map the MCQ configuration region */ 2099 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "mcq"); 2100 if (!res) { 2101 dev_err(hba->dev, "MCQ resource not found in device tree\n"); 2102 return -ENODEV; 2103 } 2104 2105 hba->mcq_base = devm_ioremap_resource(hba->dev, res); 2106 if (IS_ERR(hba->mcq_base)) { 2107 dev_err(hba->dev, "Failed to map MCQ region: %ld\n", 2108 PTR_ERR(hba->mcq_base)); 2109 return PTR_ERR(hba->mcq_base); 2110 } 2111 2112 return 0; 2113 } 2114 2115 static int ufs_qcom_op_runtime_config(struct ufs_hba *hba) 2116 { 2117 struct ufshcd_mcq_opr_info_t *opr; 2118 int i; 2119 u32 doorbell_offsets[OPR_MAX]; 2120 2121 /* 2122 * Configure doorbell address offsets in MCQ configuration registers. 2123 * These values are offsets relative to mmio_base (UFS_HCI_BASE). 2124 * 2125 * Memory Layout: 2126 * - mmio_base = UFS_HCI_BASE 2127 * - mcq_base = MCQ_CONFIG_BASE = mmio_base + (UFS_QCOM_MCQCAP_QCFGPTR * 0x200) 2128 * - Doorbell registers are at: mmio_base + (UFS_QCOM_MCQCAP_QCFGPTR * 0x200) + 2129 * - UFS_QCOM_MCQ_SQD_OFFSET 2130 * - Which is also: mcq_base + UFS_QCOM_MCQ_SQD_OFFSET 2131 */ 2132 2133 doorbell_offsets[OPR_SQD] = UFS_QCOM_SQD_ADDR_OFFSET; 2134 doorbell_offsets[OPR_SQIS] = UFS_QCOM_SQIS_ADDR_OFFSET; 2135 doorbell_offsets[OPR_CQD] = UFS_QCOM_CQD_ADDR_OFFSET; 2136 doorbell_offsets[OPR_CQIS] = UFS_QCOM_CQIS_ADDR_OFFSET; 2137 2138 /* 2139 * Configure MCQ operation registers. 2140 * 2141 * The doorbell registers are physically located within the MCQ region: 2142 * - doorbell_physical_addr = mmio_base + doorbell_offset 2143 * - doorbell_physical_addr = mcq_base + (doorbell_offset - MCQ_CONFIG_OFFSET) 2144 */ 2145 for (i = 0; i < OPR_MAX; i++) { 2146 opr = &hba->mcq_opr[i]; 2147 opr->offset = doorbell_offsets[i]; /* Offset relative to mmio_base */ 2148 opr->stride = UFS_QCOM_MCQ_STRIDE; /* 256 bytes between queues */ 2149 2150 /* 2151 * Calculate the actual doorbell base address within MCQ region: 2152 * base = mcq_base + (doorbell_offset - MCQ_CONFIG_OFFSET) 2153 */ 2154 opr->base = hba->mcq_base + (opr->offset - UFS_QCOM_MCQ_CONFIG_OFFSET); 2155 } 2156 2157 return 0; 2158 } 2159 2160 static int ufs_qcom_get_hba_mac(struct ufs_hba *hba) 2161 { 2162 /* Qualcomm HC supports up to 64 */ 2163 return MAX_SUPP_MAC; 2164 } 2165 2166 static int ufs_qcom_get_outstanding_cqs(struct ufs_hba *hba, 2167 unsigned long *ocqs) 2168 { 2169 /* Read from MCQ vendor-specific register in MCQ region */ 2170 *ocqs = readl(hba->mcq_base + UFS_MEM_CQIS_VS); 2171 2172 return 0; 2173 } 2174 2175 static void ufs_qcom_write_msi_msg(struct msi_desc *desc, struct msi_msg *msg) 2176 { 2177 struct device *dev = msi_desc_to_dev(desc); 2178 struct ufs_hba *hba = dev_get_drvdata(dev); 2179 2180 ufshcd_mcq_config_esi(hba, msg); 2181 } 2182 2183 struct ufs_qcom_irq { 2184 unsigned int irq; 2185 unsigned int idx; 2186 struct ufs_hba *hba; 2187 }; 2188 2189 static irqreturn_t ufs_qcom_mcq_esi_handler(int irq, void *data) 2190 { 2191 struct ufs_qcom_irq *qi = data; 2192 struct ufs_hba *hba = qi->hba; 2193 struct ufs_hw_queue *hwq = &hba->uhq[qi->idx]; 2194 2195 ufshcd_mcq_write_cqis(hba, 0x1, qi->idx); 2196 ufshcd_mcq_poll_cqe_lock(hba, hwq); 2197 2198 return IRQ_HANDLED; 2199 } 2200 2201 static int ufs_qcom_config_esi(struct ufs_hba *hba) 2202 { 2203 struct ufs_qcom_host *host = ufshcd_get_variant(hba); 2204 int nr_irqs, ret; 2205 2206 if (host->esi_enabled) 2207 return 0; 2208 2209 /* 2210 * 1. We only handle CQs as of now. 2211 * 2. Poll queues do not need ESI. 2212 */ 2213 nr_irqs = hba->nr_hw_queues - hba->nr_queues[HCTX_TYPE_POLL]; 2214 2215 ret = platform_device_msi_init_and_alloc_irqs(hba->dev, nr_irqs, 2216 ufs_qcom_write_msi_msg); 2217 if (ret) { 2218 dev_warn(hba->dev, "Platform MSI not supported or failed, continuing without ESI\n"); 2219 return ret; /* Continue without ESI */ 2220 } 2221 2222 struct ufs_qcom_irq *qi = devm_kcalloc(hba->dev, nr_irqs, sizeof(*qi), GFP_KERNEL); 2223 2224 if (!qi) { 2225 platform_device_msi_free_irqs_all(hba->dev); 2226 return -ENOMEM; 2227 } 2228 2229 for (int idx = 0; idx < nr_irqs; idx++) { 2230 qi[idx].irq = msi_get_virq(hba->dev, idx); 2231 qi[idx].idx = idx; 2232 qi[idx].hba = hba; 2233 2234 ret = devm_request_irq(hba->dev, qi[idx].irq, ufs_qcom_mcq_esi_handler, 2235 IRQF_SHARED, "qcom-mcq-esi", qi + idx); 2236 if (ret) { 2237 dev_err(hba->dev, "%s: Failed to request IRQ for %d, err = %d\n", 2238 __func__, qi[idx].irq, ret); 2239 /* Free previously allocated IRQs */ 2240 for (int j = 0; j < idx; j++) 2241 devm_free_irq(hba->dev, qi[j].irq, qi + j); 2242 platform_device_msi_free_irqs_all(hba->dev); 2243 devm_kfree(hba->dev, qi); 2244 return ret; 2245 } 2246 } 2247 2248 if (host->hw_ver.major >= 6) { 2249 ufshcd_rmwl(hba, ESI_VEC_MASK, FIELD_PREP(ESI_VEC_MASK, MAX_ESI_VEC - 1), 2250 REG_UFS_CFG3); 2251 } 2252 ufshcd_mcq_enable_esi(hba); 2253 host->esi_enabled = true; 2254 return 0; 2255 } 2256 2257 static unsigned long ufs_qcom_opp_freq_to_clk_freq(struct ufs_hba *hba, 2258 unsigned long freq, char *name) 2259 { 2260 struct ufs_clk_info *clki; 2261 struct dev_pm_opp *opp; 2262 unsigned long clk_freq; 2263 int idx = 0; 2264 bool found = false; 2265 2266 opp = dev_pm_opp_find_freq_exact_indexed(hba->dev, freq, 0, true); 2267 if (IS_ERR(opp)) { 2268 dev_err(hba->dev, "Failed to find OPP for exact frequency %lu\n", freq); 2269 return 0; 2270 } 2271 2272 list_for_each_entry(clki, &hba->clk_list_head, list) { 2273 if (!strcmp(clki->name, name)) { 2274 found = true; 2275 break; 2276 } 2277 2278 idx++; 2279 } 2280 2281 if (!found) { 2282 dev_err(hba->dev, "Failed to find clock '%s' in clk list\n", name); 2283 dev_pm_opp_put(opp); 2284 return 0; 2285 } 2286 2287 clk_freq = dev_pm_opp_get_freq_indexed(opp, idx); 2288 2289 dev_pm_opp_put(opp); 2290 2291 return clk_freq; 2292 } 2293 2294 static u32 ufs_qcom_freq_to_gear_speed(struct ufs_hba *hba, unsigned long freq) 2295 { 2296 u32 gear = UFS_HS_DONT_CHANGE; 2297 unsigned long unipro_freq; 2298 2299 if (!hba->use_pm_opp) 2300 return gear; 2301 2302 unipro_freq = ufs_qcom_opp_freq_to_clk_freq(hba, freq, "core_clk_unipro"); 2303 switch (unipro_freq) { 2304 case 403000000: 2305 gear = UFS_HS_G5; 2306 break; 2307 case 300000000: 2308 gear = UFS_HS_G4; 2309 break; 2310 case 201500000: 2311 gear = UFS_HS_G3; 2312 break; 2313 case 150000000: 2314 case 100000000: 2315 gear = UFS_HS_G2; 2316 break; 2317 case 75000000: 2318 case 37500000: 2319 gear = UFS_HS_G1; 2320 break; 2321 default: 2322 dev_err(hba->dev, "%s: Unsupported clock freq : %lu\n", __func__, freq); 2323 return UFS_HS_DONT_CHANGE; 2324 } 2325 2326 return min_t(u32, gear, hba->max_pwr_info.info.gear_rx); 2327 } 2328 2329 /* 2330 * struct ufs_hba_qcom_vops - UFS QCOM specific variant operations 2331 * 2332 * The variant operations configure the necessary controller and PHY 2333 * handshake during initialization. 2334 */ 2335 static const struct ufs_hba_variant_ops ufs_hba_qcom_vops = { 2336 .name = "qcom", 2337 .init = ufs_qcom_init, 2338 .exit = ufs_qcom_exit, 2339 .get_ufs_hci_version = ufs_qcom_get_ufs_hci_version, 2340 .clk_scale_notify = ufs_qcom_clk_scale_notify, 2341 .setup_clocks = ufs_qcom_setup_clocks, 2342 .hce_enable_notify = ufs_qcom_hce_enable_notify, 2343 .link_startup_notify = ufs_qcom_link_startup_notify, 2344 .pwr_change_notify = ufs_qcom_pwr_change_notify, 2345 .apply_dev_quirks = ufs_qcom_apply_dev_quirks, 2346 .fixup_dev_quirks = ufs_qcom_fixup_dev_quirks, 2347 .suspend = ufs_qcom_suspend, 2348 .resume = ufs_qcom_resume, 2349 .dbg_register_dump = ufs_qcom_dump_dbg_regs, 2350 .device_reset = ufs_qcom_device_reset, 2351 .config_scaling_param = ufs_qcom_config_scaling_param, 2352 .mcq_config_resource = ufs_qcom_mcq_config_resource, 2353 .get_hba_mac = ufs_qcom_get_hba_mac, 2354 .op_runtime_config = ufs_qcom_op_runtime_config, 2355 .get_outstanding_cqs = ufs_qcom_get_outstanding_cqs, 2356 .config_esi = ufs_qcom_config_esi, 2357 .freq_to_gear_speed = ufs_qcom_freq_to_gear_speed, 2358 }; 2359 2360 static const struct ufs_hba_variant_ops ufs_hba_qcom_sa8255p_vops = { 2361 .name = "qcom-sa8255p", 2362 .init = ufs_qcom_fw_managed_init, 2363 .exit = ufs_qcom_fw_managed_exit, 2364 .hce_enable_notify = ufs_qcom_fw_managed_hce_enable_notify, 2365 .pwr_change_notify = ufs_qcom_pwr_change_notify, 2366 .apply_dev_quirks = ufs_qcom_apply_dev_quirks, 2367 .fixup_dev_quirks = ufs_qcom_fixup_dev_quirks, 2368 .suspend = ufs_qcom_fw_managed_suspend, 2369 .resume = ufs_qcom_fw_managed_resume, 2370 .dbg_register_dump = ufs_qcom_dump_dbg_regs, 2371 .device_reset = ufs_qcom_fw_managed_device_reset, 2372 }; 2373 2374 /** 2375 * ufs_qcom_probe - probe routine of the driver 2376 * @pdev: pointer to Platform device handle 2377 * 2378 * Return: zero for success and non-zero for failure. 2379 */ 2380 static int ufs_qcom_probe(struct platform_device *pdev) 2381 { 2382 int err; 2383 struct device *dev = &pdev->dev; 2384 const struct ufs_hba_variant_ops *vops; 2385 const struct ufs_qcom_drvdata *drvdata = device_get_match_data(dev); 2386 2387 if (drvdata && drvdata->vops) 2388 vops = drvdata->vops; 2389 else 2390 vops = &ufs_hba_qcom_vops; 2391 2392 /* Perform generic probe */ 2393 err = ufshcd_pltfrm_init(pdev, vops); 2394 if (err) 2395 return dev_err_probe(dev, err, "ufshcd_pltfrm_init() failed\n"); 2396 2397 return 0; 2398 } 2399 2400 /** 2401 * ufs_qcom_remove - set driver_data of the device to NULL 2402 * @pdev: pointer to platform device handle 2403 * 2404 * Always returns 0 2405 */ 2406 static void ufs_qcom_remove(struct platform_device *pdev) 2407 { 2408 struct ufs_hba *hba = platform_get_drvdata(pdev); 2409 struct ufs_qcom_host *host = ufshcd_get_variant(hba); 2410 2411 ufshcd_pltfrm_remove(pdev); 2412 if (host->esi_enabled) 2413 platform_device_msi_free_irqs_all(hba->dev); 2414 } 2415 2416 static const struct ufs_qcom_drvdata ufs_qcom_sm8550_drvdata = { 2417 .quirks = UFSHCD_QUIRK_BROKEN_LSDBS_CAP, 2418 .no_phy_retention = true, 2419 }; 2420 2421 static const struct ufs_qcom_drvdata ufs_qcom_sa8255p_drvdata = { 2422 .vops = &ufs_hba_qcom_sa8255p_vops 2423 }; 2424 2425 static const struct of_device_id ufs_qcom_of_match[] __maybe_unused = { 2426 { .compatible = "qcom,ufshc" }, 2427 { .compatible = "qcom,sm8550-ufshc", .data = &ufs_qcom_sm8550_drvdata }, 2428 { .compatible = "qcom,sm8650-ufshc", .data = &ufs_qcom_sm8550_drvdata }, 2429 { .compatible = "qcom,sa8255p-ufshc", .data = &ufs_qcom_sa8255p_drvdata }, 2430 {}, 2431 }; 2432 MODULE_DEVICE_TABLE(of, ufs_qcom_of_match); 2433 2434 #ifdef CONFIG_ACPI 2435 static const struct acpi_device_id ufs_qcom_acpi_match[] = { 2436 { "QCOM24A5" }, 2437 { }, 2438 }; 2439 MODULE_DEVICE_TABLE(acpi, ufs_qcom_acpi_match); 2440 #endif 2441 2442 static const struct dev_pm_ops ufs_qcom_pm_ops = { 2443 SET_RUNTIME_PM_OPS(ufshcd_runtime_suspend, ufshcd_runtime_resume, NULL) 2444 .prepare = ufshcd_suspend_prepare, 2445 .complete = ufshcd_resume_complete, 2446 #ifdef CONFIG_PM_SLEEP 2447 .suspend = ufshcd_system_suspend, 2448 .resume = ufshcd_system_resume, 2449 .freeze = ufshcd_system_freeze, 2450 .restore = ufshcd_system_restore, 2451 .thaw = ufshcd_system_thaw, 2452 #endif 2453 }; 2454 2455 static struct platform_driver ufs_qcom_pltform = { 2456 .probe = ufs_qcom_probe, 2457 .remove = ufs_qcom_remove, 2458 .driver = { 2459 .name = "ufshcd-qcom", 2460 .pm = &ufs_qcom_pm_ops, 2461 .of_match_table = of_match_ptr(ufs_qcom_of_match), 2462 .acpi_match_table = ACPI_PTR(ufs_qcom_acpi_match), 2463 }, 2464 }; 2465 module_platform_driver(ufs_qcom_pltform); 2466 2467 MODULE_DESCRIPTION("Qualcomm UFS host controller driver"); 2468 MODULE_LICENSE("GPL v2"); 2469