1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (c) 2013-2016, Linux Foundation. All rights reserved.
4 */
5
6 #include <linux/acpi.h>
7 #include <linux/clk.h>
8 #include <linux/delay.h>
9 #include <linux/devfreq.h>
10 #include <linux/gpio/consumer.h>
11 #include <linux/interconnect.h>
12 #include <linux/module.h>
13 #include <linux/of.h>
14 #include <linux/phy/phy.h>
15 #include <linux/platform_device.h>
16 #include <linux/reset-controller.h>
17 #include <linux/time.h>
18 #include <linux/unaligned.h>
19 #include <linux/units.h>
20
21 #include <soc/qcom/ice.h>
22
23 #include <ufs/ufshcd.h>
24 #include <ufs/ufshci.h>
25 #include <ufs/ufs_quirks.h>
26 #include <ufs/unipro.h>
27 #include "ufshcd-pltfrm.h"
28 #include "ufs-qcom.h"
29
30 #define MCQ_QCFGPTR_MASK GENMASK(7, 0)
31 #define MCQ_QCFGPTR_UNIT 0x200
32 #define MCQ_SQATTR_OFFSET(c) \
33 ((((c) >> 16) & MCQ_QCFGPTR_MASK) * MCQ_QCFGPTR_UNIT)
34 #define MCQ_QCFG_SIZE 0x40
35
36 enum {
37 TSTBUS_UAWM,
38 TSTBUS_UARM,
39 TSTBUS_TXUC,
40 TSTBUS_RXUC,
41 TSTBUS_DFC,
42 TSTBUS_TRLUT,
43 TSTBUS_TMRLUT,
44 TSTBUS_OCSC,
45 TSTBUS_UTP_HCI,
46 TSTBUS_COMBINED,
47 TSTBUS_WRAPPER,
48 TSTBUS_UNIPRO,
49 TSTBUS_MAX,
50 };
51
52 #define QCOM_UFS_MAX_GEAR 5
53 #define QCOM_UFS_MAX_LANE 2
54
55 enum {
56 MODE_MIN,
57 MODE_PWM,
58 MODE_HS_RA,
59 MODE_HS_RB,
60 MODE_MAX,
61 };
62
63 static const struct __ufs_qcom_bw_table {
64 u32 mem_bw;
65 u32 cfg_bw;
66 } ufs_qcom_bw_table[MODE_MAX + 1][QCOM_UFS_MAX_GEAR + 1][QCOM_UFS_MAX_LANE + 1] = {
67 [MODE_MIN][0][0] = { 0, 0 }, /* Bandwidth values in KB/s */
68 [MODE_PWM][UFS_PWM_G1][UFS_LANE_1] = { 922, 1000 },
69 [MODE_PWM][UFS_PWM_G2][UFS_LANE_1] = { 1844, 1000 },
70 [MODE_PWM][UFS_PWM_G3][UFS_LANE_1] = { 3688, 1000 },
71 [MODE_PWM][UFS_PWM_G4][UFS_LANE_1] = { 7376, 1000 },
72 [MODE_PWM][UFS_PWM_G5][UFS_LANE_1] = { 14752, 1000 },
73 [MODE_PWM][UFS_PWM_G1][UFS_LANE_2] = { 1844, 1000 },
74 [MODE_PWM][UFS_PWM_G2][UFS_LANE_2] = { 3688, 1000 },
75 [MODE_PWM][UFS_PWM_G3][UFS_LANE_2] = { 7376, 1000 },
76 [MODE_PWM][UFS_PWM_G4][UFS_LANE_2] = { 14752, 1000 },
77 [MODE_PWM][UFS_PWM_G5][UFS_LANE_2] = { 29504, 1000 },
78 [MODE_HS_RA][UFS_HS_G1][UFS_LANE_1] = { 127796, 1000 },
79 [MODE_HS_RA][UFS_HS_G2][UFS_LANE_1] = { 255591, 1000 },
80 [MODE_HS_RA][UFS_HS_G3][UFS_LANE_1] = { 1492582, 102400 },
81 [MODE_HS_RA][UFS_HS_G4][UFS_LANE_1] = { 2915200, 204800 },
82 [MODE_HS_RA][UFS_HS_G5][UFS_LANE_1] = { 5836800, 409600 },
83 [MODE_HS_RA][UFS_HS_G1][UFS_LANE_2] = { 255591, 1000 },
84 [MODE_HS_RA][UFS_HS_G2][UFS_LANE_2] = { 511181, 1000 },
85 [MODE_HS_RA][UFS_HS_G3][UFS_LANE_2] = { 1492582, 204800 },
86 [MODE_HS_RA][UFS_HS_G4][UFS_LANE_2] = { 2915200, 409600 },
87 [MODE_HS_RA][UFS_HS_G5][UFS_LANE_2] = { 5836800, 819200 },
88 [MODE_HS_RB][UFS_HS_G1][UFS_LANE_1] = { 149422, 1000 },
89 [MODE_HS_RB][UFS_HS_G2][UFS_LANE_1] = { 298189, 1000 },
90 [MODE_HS_RB][UFS_HS_G3][UFS_LANE_1] = { 1492582, 102400 },
91 [MODE_HS_RB][UFS_HS_G4][UFS_LANE_1] = { 2915200, 204800 },
92 [MODE_HS_RB][UFS_HS_G5][UFS_LANE_1] = { 5836800, 409600 },
93 [MODE_HS_RB][UFS_HS_G1][UFS_LANE_2] = { 298189, 1000 },
94 [MODE_HS_RB][UFS_HS_G2][UFS_LANE_2] = { 596378, 1000 },
95 [MODE_HS_RB][UFS_HS_G3][UFS_LANE_2] = { 1492582, 204800 },
96 [MODE_HS_RB][UFS_HS_G4][UFS_LANE_2] = { 2915200, 409600 },
97 [MODE_HS_RB][UFS_HS_G5][UFS_LANE_2] = { 5836800, 819200 },
98 [MODE_MAX][0][0] = { 7643136, 819200 },
99 };
100
101 static void ufs_qcom_get_default_testbus_cfg(struct ufs_qcom_host *host);
102 static int ufs_qcom_set_core_clk_ctrl(struct ufs_hba *hba, unsigned long freq);
103
rcdev_to_ufs_host(struct reset_controller_dev * rcd)104 static struct ufs_qcom_host *rcdev_to_ufs_host(struct reset_controller_dev *rcd)
105 {
106 return container_of(rcd, struct ufs_qcom_host, rcdev);
107 }
108
109 #ifdef CONFIG_SCSI_UFS_CRYPTO
110 /**
111 * ufs_qcom_config_ice_allocator() - ICE core allocator configuration
112 *
113 * @host: pointer to qcom specific variant structure.
114 */
ufs_qcom_config_ice_allocator(struct ufs_qcom_host * host)115 static void ufs_qcom_config_ice_allocator(struct ufs_qcom_host *host)
116 {
117 struct ufs_hba *hba = host->hba;
118 static const uint8_t val[4] = { NUM_RX_R1W0, NUM_TX_R0W1, NUM_RX_R1W1, NUM_TX_R1W1 };
119 u32 config;
120
121 if (!(host->caps & UFS_QCOM_CAP_ICE_CONFIG) ||
122 !(host->hba->caps & UFSHCD_CAP_CRYPTO))
123 return;
124
125 config = get_unaligned_le32(val);
126
127 ufshcd_writel(hba, ICE_ALLOCATOR_TYPE, REG_UFS_MEM_ICE_CONFIG);
128 ufshcd_writel(hba, config, REG_UFS_MEM_ICE_NUM_CORE);
129 }
130
ufs_qcom_ice_enable(struct ufs_qcom_host * host)131 static inline void ufs_qcom_ice_enable(struct ufs_qcom_host *host)
132 {
133 if (host->hba->caps & UFSHCD_CAP_CRYPTO)
134 qcom_ice_enable(host->ice);
135 }
136
137 static const struct blk_crypto_ll_ops ufs_qcom_crypto_ops; /* forward decl */
138
ufs_qcom_ice_init(struct ufs_qcom_host * host)139 static int ufs_qcom_ice_init(struct ufs_qcom_host *host)
140 {
141 struct ufs_hba *hba = host->hba;
142 struct blk_crypto_profile *profile = &hba->crypto_profile;
143 struct device *dev = hba->dev;
144 struct qcom_ice *ice;
145 union ufs_crypto_capabilities caps;
146 union ufs_crypto_cap_entry cap;
147 int err;
148 int i;
149
150 ice = devm_of_qcom_ice_get(dev);
151 if (ice == ERR_PTR(-EOPNOTSUPP)) {
152 dev_warn(dev, "Disabling inline encryption support\n");
153 ice = NULL;
154 }
155
156 if (IS_ERR_OR_NULL(ice))
157 return PTR_ERR_OR_ZERO(ice);
158
159 host->ice = ice;
160
161 /* Initialize the blk_crypto_profile */
162
163 caps.reg_val = cpu_to_le32(ufshcd_readl(hba, REG_UFS_CCAP));
164
165 /* The number of keyslots supported is (CFGC+1) */
166 err = devm_blk_crypto_profile_init(dev, profile, caps.config_count + 1);
167 if (err)
168 return err;
169
170 profile->ll_ops = ufs_qcom_crypto_ops;
171 profile->max_dun_bytes_supported = 8;
172 profile->key_types_supported = BLK_CRYPTO_KEY_TYPE_RAW;
173 profile->dev = dev;
174
175 /*
176 * Currently this driver only supports AES-256-XTS. All known versions
177 * of ICE support it, but to be safe make sure it is really declared in
178 * the crypto capability registers. The crypto capability registers
179 * also give the supported data unit size(s).
180 */
181 for (i = 0; i < caps.num_crypto_cap; i++) {
182 cap.reg_val = cpu_to_le32(ufshcd_readl(hba,
183 REG_UFS_CRYPTOCAP +
184 i * sizeof(__le32)));
185 if (cap.algorithm_id == UFS_CRYPTO_ALG_AES_XTS &&
186 cap.key_size == UFS_CRYPTO_KEY_SIZE_256)
187 profile->modes_supported[BLK_ENCRYPTION_MODE_AES_256_XTS] |=
188 cap.sdus_mask * 512;
189 }
190
191 hba->caps |= UFSHCD_CAP_CRYPTO;
192 hba->quirks |= UFSHCD_QUIRK_CUSTOM_CRYPTO_PROFILE;
193 return 0;
194 }
195
ufs_qcom_ice_resume(struct ufs_qcom_host * host)196 static inline int ufs_qcom_ice_resume(struct ufs_qcom_host *host)
197 {
198 if (host->hba->caps & UFSHCD_CAP_CRYPTO)
199 return qcom_ice_resume(host->ice);
200
201 return 0;
202 }
203
ufs_qcom_ice_suspend(struct ufs_qcom_host * host)204 static inline int ufs_qcom_ice_suspend(struct ufs_qcom_host *host)
205 {
206 if (host->hba->caps & UFSHCD_CAP_CRYPTO)
207 return qcom_ice_suspend(host->ice);
208
209 return 0;
210 }
211
ufs_qcom_ice_keyslot_program(struct blk_crypto_profile * profile,const struct blk_crypto_key * key,unsigned int slot)212 static int ufs_qcom_ice_keyslot_program(struct blk_crypto_profile *profile,
213 const struct blk_crypto_key *key,
214 unsigned int slot)
215 {
216 struct ufs_hba *hba = ufs_hba_from_crypto_profile(profile);
217 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
218 int err;
219
220 /* Only AES-256-XTS has been tested so far. */
221 if (key->crypto_cfg.crypto_mode != BLK_ENCRYPTION_MODE_AES_256_XTS)
222 return -EOPNOTSUPP;
223
224 ufshcd_hold(hba);
225 err = qcom_ice_program_key(host->ice,
226 QCOM_ICE_CRYPTO_ALG_AES_XTS,
227 QCOM_ICE_CRYPTO_KEY_SIZE_256,
228 key->bytes,
229 key->crypto_cfg.data_unit_size / 512,
230 slot);
231 ufshcd_release(hba);
232 return err;
233 }
234
ufs_qcom_ice_keyslot_evict(struct blk_crypto_profile * profile,const struct blk_crypto_key * key,unsigned int slot)235 static int ufs_qcom_ice_keyslot_evict(struct blk_crypto_profile *profile,
236 const struct blk_crypto_key *key,
237 unsigned int slot)
238 {
239 struct ufs_hba *hba = ufs_hba_from_crypto_profile(profile);
240 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
241 int err;
242
243 ufshcd_hold(hba);
244 err = qcom_ice_evict_key(host->ice, slot);
245 ufshcd_release(hba);
246 return err;
247 }
248
249 static const struct blk_crypto_ll_ops ufs_qcom_crypto_ops = {
250 .keyslot_program = ufs_qcom_ice_keyslot_program,
251 .keyslot_evict = ufs_qcom_ice_keyslot_evict,
252 };
253
254 #else
255
ufs_qcom_ice_enable(struct ufs_qcom_host * host)256 static inline void ufs_qcom_ice_enable(struct ufs_qcom_host *host)
257 {
258 }
259
ufs_qcom_ice_init(struct ufs_qcom_host * host)260 static int ufs_qcom_ice_init(struct ufs_qcom_host *host)
261 {
262 return 0;
263 }
264
ufs_qcom_ice_resume(struct ufs_qcom_host * host)265 static inline int ufs_qcom_ice_resume(struct ufs_qcom_host *host)
266 {
267 return 0;
268 }
269
ufs_qcom_ice_suspend(struct ufs_qcom_host * host)270 static inline int ufs_qcom_ice_suspend(struct ufs_qcom_host *host)
271 {
272 return 0;
273 }
274
ufs_qcom_config_ice_allocator(struct ufs_qcom_host * host)275 static void ufs_qcom_config_ice_allocator(struct ufs_qcom_host *host)
276 {
277 }
278
279 #endif
280
ufs_qcom_disable_lane_clks(struct ufs_qcom_host * host)281 static void ufs_qcom_disable_lane_clks(struct ufs_qcom_host *host)
282 {
283 if (!host->is_lane_clks_enabled)
284 return;
285
286 clk_bulk_disable_unprepare(host->num_clks, host->clks);
287
288 host->is_lane_clks_enabled = false;
289 }
290
ufs_qcom_enable_lane_clks(struct ufs_qcom_host * host)291 static int ufs_qcom_enable_lane_clks(struct ufs_qcom_host *host)
292 {
293 int err;
294
295 err = clk_bulk_prepare_enable(host->num_clks, host->clks);
296 if (err)
297 return err;
298
299 host->is_lane_clks_enabled = true;
300
301 return 0;
302 }
303
ufs_qcom_init_lane_clks(struct ufs_qcom_host * host)304 static int ufs_qcom_init_lane_clks(struct ufs_qcom_host *host)
305 {
306 int err;
307 struct device *dev = host->hba->dev;
308
309 if (has_acpi_companion(dev))
310 return 0;
311
312 err = devm_clk_bulk_get_all(dev, &host->clks);
313 if (err <= 0)
314 return err;
315
316 host->num_clks = err;
317
318 return 0;
319 }
320
ufs_qcom_check_hibern8(struct ufs_hba * hba)321 static int ufs_qcom_check_hibern8(struct ufs_hba *hba)
322 {
323 int err;
324 u32 tx_fsm_val;
325 unsigned long timeout = jiffies + msecs_to_jiffies(HBRN8_POLL_TOUT_MS);
326
327 do {
328 err = ufshcd_dme_get(hba,
329 UIC_ARG_MIB_SEL(MPHY_TX_FSM_STATE,
330 UIC_ARG_MPHY_TX_GEN_SEL_INDEX(0)),
331 &tx_fsm_val);
332 if (err || tx_fsm_val == TX_FSM_HIBERN8)
333 break;
334
335 /* sleep for max. 200us */
336 usleep_range(100, 200);
337 } while (time_before(jiffies, timeout));
338
339 /*
340 * we might have scheduled out for long during polling so
341 * check the state again.
342 */
343 if (time_after(jiffies, timeout))
344 err = ufshcd_dme_get(hba,
345 UIC_ARG_MIB_SEL(MPHY_TX_FSM_STATE,
346 UIC_ARG_MPHY_TX_GEN_SEL_INDEX(0)),
347 &tx_fsm_val);
348
349 if (err) {
350 dev_err(hba->dev, "%s: unable to get TX_FSM_STATE, err %d\n",
351 __func__, err);
352 } else if (tx_fsm_val != TX_FSM_HIBERN8) {
353 err = tx_fsm_val;
354 dev_err(hba->dev, "%s: invalid TX_FSM_STATE = %d\n",
355 __func__, err);
356 }
357
358 return err;
359 }
360
ufs_qcom_select_unipro_mode(struct ufs_qcom_host * host)361 static void ufs_qcom_select_unipro_mode(struct ufs_qcom_host *host)
362 {
363 ufshcd_rmwl(host->hba, QUNIPRO_SEL, QUNIPRO_SEL, REG_UFS_CFG1);
364
365 if (host->hw_ver.major >= 0x05)
366 ufshcd_rmwl(host->hba, QUNIPRO_G4_SEL, 0, REG_UFS_CFG0);
367 }
368
369 /*
370 * ufs_qcom_host_reset - reset host controller and PHY
371 */
ufs_qcom_host_reset(struct ufs_hba * hba)372 static int ufs_qcom_host_reset(struct ufs_hba *hba)
373 {
374 int ret;
375 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
376 bool reenable_intr;
377
378 if (!host->core_reset)
379 return 0;
380
381 reenable_intr = hba->is_irq_enabled;
382 ufshcd_disable_irq(hba);
383
384 ret = reset_control_assert(host->core_reset);
385 if (ret) {
386 dev_err(hba->dev, "%s: core_reset assert failed, err = %d\n",
387 __func__, ret);
388 return ret;
389 }
390
391 /*
392 * The hardware requirement for delay between assert/deassert
393 * is at least 3-4 sleep clock (32.7KHz) cycles, which comes to
394 * ~125us (4/32768). To be on the safe side add 200us delay.
395 */
396 usleep_range(200, 210);
397
398 ret = reset_control_deassert(host->core_reset);
399 if (ret) {
400 dev_err(hba->dev, "%s: core_reset deassert failed, err = %d\n",
401 __func__, ret);
402 return ret;
403 }
404
405 usleep_range(1000, 1100);
406
407 if (reenable_intr)
408 ufshcd_enable_irq(hba);
409
410 return 0;
411 }
412
ufs_qcom_get_hs_gear(struct ufs_hba * hba)413 static u32 ufs_qcom_get_hs_gear(struct ufs_hba *hba)
414 {
415 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
416
417 if (host->hw_ver.major >= 0x4)
418 return UFS_QCOM_MAX_GEAR(ufshcd_readl(hba, REG_UFS_PARAM0));
419
420 /* Default is HS-G3 */
421 return UFS_HS_G3;
422 }
423
ufs_qcom_power_up_sequence(struct ufs_hba * hba)424 static int ufs_qcom_power_up_sequence(struct ufs_hba *hba)
425 {
426 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
427 struct ufs_host_params *host_params = &host->host_params;
428 struct phy *phy = host->generic_phy;
429 enum phy_mode mode;
430 int ret;
431
432 /*
433 * HW ver 5 can only support up to HS-G5 Rate-A due to HW limitations.
434 * If the HS-G5 PHY gear is used, update host_params->hs_rate to Rate-A,
435 * so that the subsequent power mode change shall stick to Rate-A.
436 */
437 if (host->hw_ver.major == 0x5) {
438 if (host->phy_gear == UFS_HS_G5)
439 host_params->hs_rate = PA_HS_MODE_A;
440 else
441 host_params->hs_rate = PA_HS_MODE_B;
442 }
443
444 mode = host_params->hs_rate == PA_HS_MODE_B ? PHY_MODE_UFS_HS_B : PHY_MODE_UFS_HS_A;
445
446 /* Reset UFS Host Controller and PHY */
447 ret = ufs_qcom_host_reset(hba);
448 if (ret)
449 return ret;
450
451 if (phy->power_count) {
452 phy_power_off(phy);
453 phy_exit(phy);
454 }
455
456 /* phy initialization - calibrate the phy */
457 ret = phy_init(phy);
458 if (ret) {
459 dev_err(hba->dev, "%s: phy init failed, ret = %d\n",
460 __func__, ret);
461 return ret;
462 }
463
464 ret = phy_set_mode_ext(phy, mode, host->phy_gear);
465 if (ret)
466 goto out_disable_phy;
467
468 /* power on phy - start serdes and phy's power and clocks */
469 ret = phy_power_on(phy);
470 if (ret) {
471 dev_err(hba->dev, "%s: phy power on failed, ret = %d\n",
472 __func__, ret);
473 goto out_disable_phy;
474 }
475
476 ufs_qcom_select_unipro_mode(host);
477
478 return 0;
479
480 out_disable_phy:
481 phy_exit(phy);
482
483 return ret;
484 }
485
486 /*
487 * The UTP controller has a number of internal clock gating cells (CGCs).
488 * Internal hardware sub-modules within the UTP controller control the CGCs.
489 * Hardware CGCs disable the clock to inactivate UTP sub-modules not involved
490 * in a specific operation, UTP controller CGCs are by default disabled and
491 * this function enables them (after every UFS link startup) to save some power
492 * leakage.
493 */
ufs_qcom_enable_hw_clk_gating(struct ufs_hba * hba)494 static void ufs_qcom_enable_hw_clk_gating(struct ufs_hba *hba)
495 {
496 ufshcd_rmwl(hba, REG_UFS_CFG2_CGC_EN_ALL, REG_UFS_CFG2_CGC_EN_ALL,
497 REG_UFS_CFG2);
498
499 /* Ensure that HW clock gating is enabled before next operations */
500 ufshcd_readl(hba, REG_UFS_CFG2);
501 }
502
ufs_qcom_hce_enable_notify(struct ufs_hba * hba,enum ufs_notify_change_status status)503 static int ufs_qcom_hce_enable_notify(struct ufs_hba *hba,
504 enum ufs_notify_change_status status)
505 {
506 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
507 int err;
508
509 switch (status) {
510 case PRE_CHANGE:
511 err = ufs_qcom_power_up_sequence(hba);
512 if (err)
513 return err;
514
515 /*
516 * The PHY PLL output is the source of tx/rx lane symbol
517 * clocks, hence, enable the lane clocks only after PHY
518 * is initialized.
519 */
520 err = ufs_qcom_enable_lane_clks(host);
521 break;
522 case POST_CHANGE:
523 /* check if UFS PHY moved from DISABLED to HIBERN8 */
524 err = ufs_qcom_check_hibern8(hba);
525 ufs_qcom_enable_hw_clk_gating(hba);
526 ufs_qcom_ice_enable(host);
527 ufs_qcom_config_ice_allocator(host);
528 break;
529 default:
530 dev_err(hba->dev, "%s: invalid status %d\n", __func__, status);
531 err = -EINVAL;
532 break;
533 }
534 return err;
535 }
536
537 /**
538 * ufs_qcom_cfg_timers - Configure ufs qcom cfg timers
539 *
540 * @hba: host controller instance
541 * @is_pre_scale_up: flag to check if pre scale up condition.
542 * Return: zero for success and non-zero in case of a failure.
543 */
ufs_qcom_cfg_timers(struct ufs_hba * hba,bool is_pre_scale_up)544 static int ufs_qcom_cfg_timers(struct ufs_hba *hba, bool is_pre_scale_up)
545 {
546 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
547 struct ufs_clk_info *clki;
548 unsigned long core_clk_rate = 0;
549 u32 core_clk_cycles_per_us;
550
551 /*
552 * UTP controller uses SYS1CLK_1US_REG register for Interrupt
553 * Aggregation logic.
554 * It is mandatory to write SYS1CLK_1US_REG register on UFS host
555 * controller V4.0.0 onwards.
556 */
557 if (host->hw_ver.major < 4 && !ufshcd_is_intr_aggr_allowed(hba))
558 return 0;
559
560 list_for_each_entry(clki, &hba->clk_list_head, list) {
561 if (!strcmp(clki->name, "core_clk")) {
562 if (is_pre_scale_up)
563 core_clk_rate = clki->max_freq;
564 else
565 core_clk_rate = clk_get_rate(clki->clk);
566 break;
567 }
568
569 }
570
571 /* If frequency is smaller than 1MHz, set to 1MHz */
572 if (core_clk_rate < DEFAULT_CLK_RATE_HZ)
573 core_clk_rate = DEFAULT_CLK_RATE_HZ;
574
575 core_clk_cycles_per_us = core_clk_rate / USEC_PER_SEC;
576 if (ufshcd_readl(hba, REG_UFS_SYS1CLK_1US) != core_clk_cycles_per_us) {
577 ufshcd_writel(hba, core_clk_cycles_per_us, REG_UFS_SYS1CLK_1US);
578 /*
579 * make sure above write gets applied before we return from
580 * this function.
581 */
582 ufshcd_readl(hba, REG_UFS_SYS1CLK_1US);
583 }
584
585 return 0;
586 }
587
ufs_qcom_link_startup_notify(struct ufs_hba * hba,enum ufs_notify_change_status status)588 static int ufs_qcom_link_startup_notify(struct ufs_hba *hba,
589 enum ufs_notify_change_status status)
590 {
591 int err = 0;
592
593 switch (status) {
594 case PRE_CHANGE:
595 if (ufs_qcom_cfg_timers(hba, false)) {
596 dev_err(hba->dev, "%s: ufs_qcom_cfg_timers() failed\n",
597 __func__);
598 return -EINVAL;
599 }
600
601 err = ufs_qcom_set_core_clk_ctrl(hba, ULONG_MAX);
602 if (err)
603 dev_err(hba->dev, "cfg core clk ctrl failed\n");
604 /*
605 * Some UFS devices (and may be host) have issues if LCC is
606 * enabled. So we are setting PA_Local_TX_LCC_Enable to 0
607 * before link startup which will make sure that both host
608 * and device TX LCC are disabled once link startup is
609 * completed.
610 */
611 err = ufshcd_disable_host_tx_lcc(hba);
612
613 break;
614 default:
615 break;
616 }
617
618 return err;
619 }
620
ufs_qcom_device_reset_ctrl(struct ufs_hba * hba,bool asserted)621 static void ufs_qcom_device_reset_ctrl(struct ufs_hba *hba, bool asserted)
622 {
623 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
624
625 /* reset gpio is optional */
626 if (!host->device_reset)
627 return;
628
629 gpiod_set_value_cansleep(host->device_reset, asserted);
630 }
631
ufs_qcom_suspend(struct ufs_hba * hba,enum ufs_pm_op pm_op,enum ufs_notify_change_status status)632 static int ufs_qcom_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op,
633 enum ufs_notify_change_status status)
634 {
635 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
636 struct phy *phy = host->generic_phy;
637
638 if (status == PRE_CHANGE)
639 return 0;
640
641 if (ufs_qcom_is_link_off(hba)) {
642 /*
643 * Disable the tx/rx lane symbol clocks before PHY is
644 * powered down as the PLL source should be disabled
645 * after downstream clocks are disabled.
646 */
647 ufs_qcom_disable_lane_clks(host);
648 phy_power_off(phy);
649
650 /* reset the connected UFS device during power down */
651 ufs_qcom_device_reset_ctrl(hba, true);
652
653 } else if (!ufs_qcom_is_link_active(hba)) {
654 ufs_qcom_disable_lane_clks(host);
655 }
656
657 return ufs_qcom_ice_suspend(host);
658 }
659
ufs_qcom_resume(struct ufs_hba * hba,enum ufs_pm_op pm_op)660 static int ufs_qcom_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
661 {
662 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
663 struct phy *phy = host->generic_phy;
664 int err;
665
666 if (ufs_qcom_is_link_off(hba)) {
667 err = phy_power_on(phy);
668 if (err) {
669 dev_err(hba->dev, "%s: failed PHY power on: %d\n",
670 __func__, err);
671 return err;
672 }
673
674 err = ufs_qcom_enable_lane_clks(host);
675 if (err)
676 return err;
677
678 } else if (!ufs_qcom_is_link_active(hba)) {
679 err = ufs_qcom_enable_lane_clks(host);
680 if (err)
681 return err;
682 }
683
684 return ufs_qcom_ice_resume(host);
685 }
686
ufs_qcom_dev_ref_clk_ctrl(struct ufs_qcom_host * host,bool enable)687 static void ufs_qcom_dev_ref_clk_ctrl(struct ufs_qcom_host *host, bool enable)
688 {
689 if (host->dev_ref_clk_ctrl_mmio &&
690 (enable ^ host->is_dev_ref_clk_enabled)) {
691 u32 temp = readl_relaxed(host->dev_ref_clk_ctrl_mmio);
692
693 if (enable)
694 temp |= host->dev_ref_clk_en_mask;
695 else
696 temp &= ~host->dev_ref_clk_en_mask;
697
698 /*
699 * If we are here to disable this clock it might be immediately
700 * after entering into hibern8 in which case we need to make
701 * sure that device ref_clk is active for specific time after
702 * hibern8 enter.
703 */
704 if (!enable) {
705 unsigned long gating_wait;
706
707 gating_wait = host->hba->dev_info.clk_gating_wait_us;
708 if (!gating_wait) {
709 udelay(1);
710 } else {
711 /*
712 * bRefClkGatingWaitTime defines the minimum
713 * time for which the reference clock is
714 * required by device during transition from
715 * HS-MODE to LS-MODE or HIBERN8 state. Give it
716 * more delay to be on the safe side.
717 */
718 gating_wait += 10;
719 usleep_range(gating_wait, gating_wait + 10);
720 }
721 }
722
723 writel_relaxed(temp, host->dev_ref_clk_ctrl_mmio);
724
725 /*
726 * Make sure the write to ref_clk reaches the destination and
727 * not stored in a Write Buffer (WB).
728 */
729 readl(host->dev_ref_clk_ctrl_mmio);
730
731 /*
732 * If we call hibern8 exit after this, we need to make sure that
733 * device ref_clk is stable for at least 1us before the hibern8
734 * exit command.
735 */
736 if (enable)
737 udelay(1);
738
739 host->is_dev_ref_clk_enabled = enable;
740 }
741 }
742
ufs_qcom_icc_set_bw(struct ufs_qcom_host * host,u32 mem_bw,u32 cfg_bw)743 static int ufs_qcom_icc_set_bw(struct ufs_qcom_host *host, u32 mem_bw, u32 cfg_bw)
744 {
745 struct device *dev = host->hba->dev;
746 int ret;
747
748 ret = icc_set_bw(host->icc_ddr, 0, mem_bw);
749 if (ret < 0) {
750 dev_err(dev, "failed to set bandwidth request: %d\n", ret);
751 return ret;
752 }
753
754 ret = icc_set_bw(host->icc_cpu, 0, cfg_bw);
755 if (ret < 0) {
756 dev_err(dev, "failed to set bandwidth request: %d\n", ret);
757 return ret;
758 }
759
760 return 0;
761 }
762
ufs_qcom_get_bw_table(struct ufs_qcom_host * host)763 static struct __ufs_qcom_bw_table ufs_qcom_get_bw_table(struct ufs_qcom_host *host)
764 {
765 struct ufs_pa_layer_attr *p = &host->dev_req_params;
766 int gear = max_t(u32, p->gear_rx, p->gear_tx);
767 int lane = max_t(u32, p->lane_rx, p->lane_tx);
768
769 if (WARN_ONCE(gear > QCOM_UFS_MAX_GEAR,
770 "ICC scaling for UFS Gear (%d) not supported. Using Gear (%d) bandwidth\n",
771 gear, QCOM_UFS_MAX_GEAR))
772 gear = QCOM_UFS_MAX_GEAR;
773
774 if (WARN_ONCE(lane > QCOM_UFS_MAX_LANE,
775 "ICC scaling for UFS Lane (%d) not supported. Using Lane (%d) bandwidth\n",
776 lane, QCOM_UFS_MAX_LANE))
777 lane = QCOM_UFS_MAX_LANE;
778
779 if (ufshcd_is_hs_mode(p)) {
780 if (p->hs_rate == PA_HS_MODE_B)
781 return ufs_qcom_bw_table[MODE_HS_RB][gear][lane];
782 else
783 return ufs_qcom_bw_table[MODE_HS_RA][gear][lane];
784 } else {
785 return ufs_qcom_bw_table[MODE_PWM][gear][lane];
786 }
787 }
788
ufs_qcom_icc_update_bw(struct ufs_qcom_host * host)789 static int ufs_qcom_icc_update_bw(struct ufs_qcom_host *host)
790 {
791 struct __ufs_qcom_bw_table bw_table;
792
793 bw_table = ufs_qcom_get_bw_table(host);
794
795 return ufs_qcom_icc_set_bw(host, bw_table.mem_bw, bw_table.cfg_bw);
796 }
797
ufs_qcom_pwr_change_notify(struct ufs_hba * hba,enum ufs_notify_change_status status,const struct ufs_pa_layer_attr * dev_max_params,struct ufs_pa_layer_attr * dev_req_params)798 static int ufs_qcom_pwr_change_notify(struct ufs_hba *hba,
799 enum ufs_notify_change_status status,
800 const struct ufs_pa_layer_attr *dev_max_params,
801 struct ufs_pa_layer_attr *dev_req_params)
802 {
803 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
804 struct ufs_host_params *host_params = &host->host_params;
805 int ret = 0;
806
807 if (!dev_req_params) {
808 pr_err("%s: incoming dev_req_params is NULL\n", __func__);
809 return -EINVAL;
810 }
811
812 switch (status) {
813 case PRE_CHANGE:
814 ret = ufshcd_negotiate_pwr_params(host_params, dev_max_params, dev_req_params);
815 if (ret) {
816 dev_err(hba->dev, "%s: failed to determine capabilities\n",
817 __func__);
818 return ret;
819 }
820
821 /*
822 * During UFS driver probe, always update the PHY gear to match the negotiated
823 * gear, so that, if quirk UFSHCD_QUIRK_REINIT_AFTER_MAX_GEAR_SWITCH is enabled,
824 * the second init can program the optimal PHY settings. This allows one to start
825 * the first init with either the minimum or the maximum support gear.
826 */
827 if (hba->ufshcd_state == UFSHCD_STATE_RESET) {
828 /*
829 * Skip REINIT if the negotiated gear matches with the
830 * initial phy_gear. Otherwise, update the phy_gear to
831 * program the optimal gear setting during REINIT.
832 */
833 if (host->phy_gear == dev_req_params->gear_tx)
834 hba->quirks &= ~UFSHCD_QUIRK_REINIT_AFTER_MAX_GEAR_SWITCH;
835 else
836 host->phy_gear = dev_req_params->gear_tx;
837 }
838
839 /* enable the device ref clock before changing to HS mode */
840 if (!ufshcd_is_hs_mode(&hba->pwr_info) &&
841 ufshcd_is_hs_mode(dev_req_params))
842 ufs_qcom_dev_ref_clk_ctrl(host, true);
843
844 if (host->hw_ver.major >= 0x4) {
845 ufshcd_dme_configure_adapt(hba,
846 dev_req_params->gear_tx,
847 PA_INITIAL_ADAPT);
848 }
849 break;
850 case POST_CHANGE:
851 if (ufs_qcom_cfg_timers(hba, false)) {
852 dev_err(hba->dev, "%s: ufs_qcom_cfg_timers() failed\n",
853 __func__);
854 /*
855 * we return error code at the end of the routine,
856 * but continue to configure UFS_PHY_TX_LANE_ENABLE
857 * and bus voting as usual
858 */
859 ret = -EINVAL;
860 }
861
862 /* cache the power mode parameters to use internally */
863 memcpy(&host->dev_req_params,
864 dev_req_params, sizeof(*dev_req_params));
865
866 ufs_qcom_icc_update_bw(host);
867
868 /* disable the device ref clock if entered PWM mode */
869 if (ufshcd_is_hs_mode(&hba->pwr_info) &&
870 !ufshcd_is_hs_mode(dev_req_params))
871 ufs_qcom_dev_ref_clk_ctrl(host, false);
872 break;
873 default:
874 ret = -EINVAL;
875 break;
876 }
877
878 return ret;
879 }
880
ufs_qcom_quirk_host_pa_saveconfigtime(struct ufs_hba * hba)881 static int ufs_qcom_quirk_host_pa_saveconfigtime(struct ufs_hba *hba)
882 {
883 int err;
884 u32 pa_vs_config_reg1;
885
886 err = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_VS_CONFIG_REG1),
887 &pa_vs_config_reg1);
888 if (err)
889 return err;
890
891 /* Allow extension of MSB bits of PA_SaveConfigTime attribute */
892 return ufshcd_dme_set(hba, UIC_ARG_MIB(PA_VS_CONFIG_REG1),
893 (pa_vs_config_reg1 | (1 << 12)));
894 }
895
ufs_qcom_apply_dev_quirks(struct ufs_hba * hba)896 static int ufs_qcom_apply_dev_quirks(struct ufs_hba *hba)
897 {
898 int err = 0;
899
900 if (hba->dev_quirks & UFS_DEVICE_QUIRK_HOST_PA_SAVECONFIGTIME)
901 err = ufs_qcom_quirk_host_pa_saveconfigtime(hba);
902
903 return err;
904 }
905
906 /* UFS device-specific quirks */
907 static struct ufs_dev_quirk ufs_qcom_dev_fixups[] = {
908 { .wmanufacturerid = UFS_VENDOR_SKHYNIX,
909 .model = UFS_ANY_MODEL,
910 .quirk = UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM },
911 { .wmanufacturerid = UFS_VENDOR_TOSHIBA,
912 .model = UFS_ANY_MODEL,
913 .quirk = UFS_DEVICE_QUIRK_DELAY_AFTER_LPM },
914 { .wmanufacturerid = UFS_VENDOR_WDC,
915 .model = UFS_ANY_MODEL,
916 .quirk = UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE },
917 {}
918 };
919
ufs_qcom_fixup_dev_quirks(struct ufs_hba * hba)920 static void ufs_qcom_fixup_dev_quirks(struct ufs_hba *hba)
921 {
922 ufshcd_fixup_dev_quirks(hba, ufs_qcom_dev_fixups);
923 }
924
ufs_qcom_get_ufs_hci_version(struct ufs_hba * hba)925 static u32 ufs_qcom_get_ufs_hci_version(struct ufs_hba *hba)
926 {
927 return ufshci_version(2, 0);
928 }
929
930 /**
931 * ufs_qcom_advertise_quirks - advertise the known QCOM UFS controller quirks
932 * @hba: host controller instance
933 *
934 * QCOM UFS host controller might have some non standard behaviours (quirks)
935 * than what is specified by UFSHCI specification. Advertise all such
936 * quirks to standard UFS host controller driver so standard takes them into
937 * account.
938 */
ufs_qcom_advertise_quirks(struct ufs_hba * hba)939 static void ufs_qcom_advertise_quirks(struct ufs_hba *hba)
940 {
941 const struct ufs_qcom_drvdata *drvdata = of_device_get_match_data(hba->dev);
942 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
943
944 if (host->hw_ver.major == 0x2)
945 hba->quirks |= UFSHCD_QUIRK_BROKEN_UFS_HCI_VERSION;
946
947 if (host->hw_ver.major > 0x3)
948 hba->quirks |= UFSHCD_QUIRK_REINIT_AFTER_MAX_GEAR_SWITCH;
949
950 if (drvdata && drvdata->quirks)
951 hba->quirks |= drvdata->quirks;
952 }
953
ufs_qcom_set_phy_gear(struct ufs_qcom_host * host)954 static void ufs_qcom_set_phy_gear(struct ufs_qcom_host *host)
955 {
956 struct ufs_host_params *host_params = &host->host_params;
957 u32 val, dev_major;
958
959 /*
960 * Default to powering up the PHY to the max gear possible, which is
961 * backwards compatible with lower gears but not optimal from
962 * a power usage point of view. After device negotiation, if the
963 * gear is lower a reinit will be performed to program the PHY
964 * to the ideal gear for this combo of controller and device.
965 */
966 host->phy_gear = host_params->hs_tx_gear;
967
968 if (host->hw_ver.major < 0x4) {
969 /*
970 * These controllers only have one PHY init sequence,
971 * let's power up the PHY using that (the minimum supported
972 * gear, UFS_HS_G2).
973 */
974 host->phy_gear = UFS_HS_G2;
975 } else if (host->hw_ver.major >= 0x5) {
976 val = ufshcd_readl(host->hba, REG_UFS_DEBUG_SPARE_CFG);
977 dev_major = FIELD_GET(UFS_DEV_VER_MAJOR_MASK, val);
978
979 /*
980 * Since the UFS device version is populated, let's remove the
981 * REINIT quirk as the negotiated gear won't change during boot.
982 * So there is no need to do reinit.
983 */
984 if (dev_major != 0x0)
985 host->hba->quirks &= ~UFSHCD_QUIRK_REINIT_AFTER_MAX_GEAR_SWITCH;
986
987 /*
988 * For UFS 3.1 device and older, power up the PHY using HS-G4
989 * PHY gear to save power.
990 */
991 if (dev_major > 0x0 && dev_major < 0x4)
992 host->phy_gear = UFS_HS_G4;
993 }
994 }
995
ufs_qcom_set_host_params(struct ufs_hba * hba)996 static void ufs_qcom_set_host_params(struct ufs_hba *hba)
997 {
998 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
999 struct ufs_host_params *host_params = &host->host_params;
1000
1001 ufshcd_init_host_params(host_params);
1002
1003 /* This driver only supports symmetic gear setting i.e., hs_tx_gear == hs_rx_gear */
1004 host_params->hs_tx_gear = host_params->hs_rx_gear = ufs_qcom_get_hs_gear(hba);
1005 }
1006
ufs_qcom_set_host_caps(struct ufs_hba * hba)1007 static void ufs_qcom_set_host_caps(struct ufs_hba *hba)
1008 {
1009 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
1010
1011 if (host->hw_ver.major >= 0x5)
1012 host->caps |= UFS_QCOM_CAP_ICE_CONFIG;
1013 }
1014
ufs_qcom_set_caps(struct ufs_hba * hba)1015 static void ufs_qcom_set_caps(struct ufs_hba *hba)
1016 {
1017 hba->caps |= UFSHCD_CAP_CLK_GATING | UFSHCD_CAP_HIBERN8_WITH_CLK_GATING;
1018 hba->caps |= UFSHCD_CAP_CLK_SCALING | UFSHCD_CAP_WB_WITH_CLK_SCALING;
1019 hba->caps |= UFSHCD_CAP_AUTO_BKOPS_SUSPEND;
1020 hba->caps |= UFSHCD_CAP_WB_EN;
1021 hba->caps |= UFSHCD_CAP_AGGR_POWER_COLLAPSE;
1022 hba->caps |= UFSHCD_CAP_RPM_AUTOSUSPEND;
1023
1024 ufs_qcom_set_host_caps(hba);
1025 }
1026
1027 /**
1028 * ufs_qcom_setup_clocks - enables/disable clocks
1029 * @hba: host controller instance
1030 * @on: If true, enable clocks else disable them.
1031 * @status: PRE_CHANGE or POST_CHANGE notify
1032 *
1033 * Return: 0 on success, non-zero on failure.
1034 */
ufs_qcom_setup_clocks(struct ufs_hba * hba,bool on,enum ufs_notify_change_status status)1035 static int ufs_qcom_setup_clocks(struct ufs_hba *hba, bool on,
1036 enum ufs_notify_change_status status)
1037 {
1038 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
1039
1040 /*
1041 * In case ufs_qcom_init() is not yet done, simply ignore.
1042 * This ufs_qcom_setup_clocks() shall be called from
1043 * ufs_qcom_init() after init is done.
1044 */
1045 if (!host)
1046 return 0;
1047
1048 switch (status) {
1049 case PRE_CHANGE:
1050 if (on) {
1051 ufs_qcom_icc_update_bw(host);
1052 } else {
1053 if (!ufs_qcom_is_link_active(hba)) {
1054 /* disable device ref_clk */
1055 ufs_qcom_dev_ref_clk_ctrl(host, false);
1056 }
1057 }
1058 break;
1059 case POST_CHANGE:
1060 if (on) {
1061 /* enable the device ref clock for HS mode*/
1062 if (ufshcd_is_hs_mode(&hba->pwr_info))
1063 ufs_qcom_dev_ref_clk_ctrl(host, true);
1064 } else {
1065 ufs_qcom_icc_set_bw(host, ufs_qcom_bw_table[MODE_MIN][0][0].mem_bw,
1066 ufs_qcom_bw_table[MODE_MIN][0][0].cfg_bw);
1067 }
1068 break;
1069 }
1070
1071 return 0;
1072 }
1073
1074 static int
ufs_qcom_reset_assert(struct reset_controller_dev * rcdev,unsigned long id)1075 ufs_qcom_reset_assert(struct reset_controller_dev *rcdev, unsigned long id)
1076 {
1077 struct ufs_qcom_host *host = rcdev_to_ufs_host(rcdev);
1078
1079 ufs_qcom_assert_reset(host->hba);
1080 /* provide 1ms delay to let the reset pulse propagate. */
1081 usleep_range(1000, 1100);
1082 return 0;
1083 }
1084
1085 static int
ufs_qcom_reset_deassert(struct reset_controller_dev * rcdev,unsigned long id)1086 ufs_qcom_reset_deassert(struct reset_controller_dev *rcdev, unsigned long id)
1087 {
1088 struct ufs_qcom_host *host = rcdev_to_ufs_host(rcdev);
1089
1090 ufs_qcom_deassert_reset(host->hba);
1091
1092 /*
1093 * after reset deassertion, phy will need all ref clocks,
1094 * voltage, current to settle down before starting serdes.
1095 */
1096 usleep_range(1000, 1100);
1097 return 0;
1098 }
1099
1100 static const struct reset_control_ops ufs_qcom_reset_ops = {
1101 .assert = ufs_qcom_reset_assert,
1102 .deassert = ufs_qcom_reset_deassert,
1103 };
1104
ufs_qcom_icc_init(struct ufs_qcom_host * host)1105 static int ufs_qcom_icc_init(struct ufs_qcom_host *host)
1106 {
1107 struct device *dev = host->hba->dev;
1108 int ret;
1109
1110 host->icc_ddr = devm_of_icc_get(dev, "ufs-ddr");
1111 if (IS_ERR(host->icc_ddr))
1112 return dev_err_probe(dev, PTR_ERR(host->icc_ddr),
1113 "failed to acquire interconnect path\n");
1114
1115 host->icc_cpu = devm_of_icc_get(dev, "cpu-ufs");
1116 if (IS_ERR(host->icc_cpu))
1117 return dev_err_probe(dev, PTR_ERR(host->icc_cpu),
1118 "failed to acquire interconnect path\n");
1119
1120 /*
1121 * Set Maximum bandwidth vote before initializing the UFS controller and
1122 * device. Ideally, a minimal interconnect vote would suffice for the
1123 * initialization, but a max vote would allow faster initialization.
1124 */
1125 ret = ufs_qcom_icc_set_bw(host, ufs_qcom_bw_table[MODE_MAX][0][0].mem_bw,
1126 ufs_qcom_bw_table[MODE_MAX][0][0].cfg_bw);
1127 if (ret < 0)
1128 return dev_err_probe(dev, ret, "failed to set bandwidth request\n");
1129
1130 return 0;
1131 }
1132
1133 /**
1134 * ufs_qcom_init - bind phy with controller
1135 * @hba: host controller instance
1136 *
1137 * Binds PHY with controller and powers up PHY enabling clocks
1138 * and regulators.
1139 *
1140 * Return: -EPROBE_DEFER if binding fails, returns negative error
1141 * on phy power up failure and returns zero on success.
1142 */
ufs_qcom_init(struct ufs_hba * hba)1143 static int ufs_qcom_init(struct ufs_hba *hba)
1144 {
1145 int err;
1146 struct device *dev = hba->dev;
1147 struct ufs_qcom_host *host;
1148 struct ufs_clk_info *clki;
1149 const struct ufs_qcom_drvdata *drvdata = of_device_get_match_data(hba->dev);
1150
1151 host = devm_kzalloc(dev, sizeof(*host), GFP_KERNEL);
1152 if (!host)
1153 return -ENOMEM;
1154
1155 /* Make a two way bind between the qcom host and the hba */
1156 host->hba = hba;
1157 ufshcd_set_variant(hba, host);
1158
1159 /* Setup the optional reset control of HCI */
1160 host->core_reset = devm_reset_control_get_optional(hba->dev, "rst");
1161 if (IS_ERR(host->core_reset)) {
1162 err = dev_err_probe(dev, PTR_ERR(host->core_reset),
1163 "Failed to get reset control\n");
1164 goto out_variant_clear;
1165 }
1166
1167 /* Fire up the reset controller. Failure here is non-fatal. */
1168 host->rcdev.of_node = dev->of_node;
1169 host->rcdev.ops = &ufs_qcom_reset_ops;
1170 host->rcdev.owner = dev->driver->owner;
1171 host->rcdev.nr_resets = 1;
1172 err = devm_reset_controller_register(dev, &host->rcdev);
1173 if (err)
1174 dev_warn(dev, "Failed to register reset controller\n");
1175
1176 if (!has_acpi_companion(dev)) {
1177 host->generic_phy = devm_phy_get(dev, "ufsphy");
1178 if (IS_ERR(host->generic_phy)) {
1179 err = dev_err_probe(dev, PTR_ERR(host->generic_phy), "Failed to get PHY\n");
1180 goto out_variant_clear;
1181 }
1182 }
1183
1184 err = ufs_qcom_icc_init(host);
1185 if (err)
1186 goto out_variant_clear;
1187
1188 host->device_reset = devm_gpiod_get_optional(dev, "reset",
1189 GPIOD_OUT_HIGH);
1190 if (IS_ERR(host->device_reset)) {
1191 err = dev_err_probe(dev, PTR_ERR(host->device_reset),
1192 "Failed to acquire device reset gpio\n");
1193 goto out_variant_clear;
1194 }
1195
1196 ufs_qcom_get_controller_revision(hba, &host->hw_ver.major,
1197 &host->hw_ver.minor, &host->hw_ver.step);
1198
1199 host->dev_ref_clk_ctrl_mmio = hba->mmio_base + REG_UFS_CFG1;
1200 host->dev_ref_clk_en_mask = BIT(26);
1201
1202 list_for_each_entry(clki, &hba->clk_list_head, list) {
1203 if (!strcmp(clki->name, "core_clk_unipro"))
1204 clki->keep_link_active = true;
1205 }
1206
1207 err = ufs_qcom_init_lane_clks(host);
1208 if (err)
1209 goto out_variant_clear;
1210
1211 ufs_qcom_set_caps(hba);
1212 ufs_qcom_advertise_quirks(hba);
1213 ufs_qcom_set_host_params(hba);
1214 ufs_qcom_set_phy_gear(host);
1215
1216 err = ufs_qcom_ice_init(host);
1217 if (err)
1218 goto out_variant_clear;
1219
1220 ufs_qcom_setup_clocks(hba, true, POST_CHANGE);
1221
1222 ufs_qcom_get_default_testbus_cfg(host);
1223 err = ufs_qcom_testbus_config(host);
1224 if (err)
1225 /* Failure is non-fatal */
1226 dev_warn(dev, "%s: failed to configure the testbus %d\n",
1227 __func__, err);
1228
1229 if (drvdata && drvdata->no_phy_retention)
1230 hba->spm_lvl = UFS_PM_LVL_5;
1231
1232 return 0;
1233
1234 out_variant_clear:
1235 ufshcd_set_variant(hba, NULL);
1236
1237 return err;
1238 }
1239
ufs_qcom_exit(struct ufs_hba * hba)1240 static void ufs_qcom_exit(struct ufs_hba *hba)
1241 {
1242 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
1243
1244 ufs_qcom_disable_lane_clks(host);
1245 phy_power_off(host->generic_phy);
1246 phy_exit(host->generic_phy);
1247 }
1248
1249 /**
1250 * ufs_qcom_set_clk_40ns_cycles - Configure 40ns clk cycles
1251 *
1252 * @hba: host controller instance
1253 * @cycles_in_1us: No of cycles in 1us to be configured
1254 *
1255 * Returns error if dme get/set configuration for 40ns fails
1256 * and returns zero on success.
1257 */
ufs_qcom_set_clk_40ns_cycles(struct ufs_hba * hba,u32 cycles_in_1us)1258 static int ufs_qcom_set_clk_40ns_cycles(struct ufs_hba *hba,
1259 u32 cycles_in_1us)
1260 {
1261 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
1262 u32 cycles_in_40ns;
1263 u32 reg;
1264 int err;
1265
1266 /*
1267 * UFS host controller V4.0.0 onwards needs to program
1268 * PA_VS_CORE_CLK_40NS_CYCLES attribute per programmed
1269 * frequency of unipro core clk of UFS host controller.
1270 */
1271 if (host->hw_ver.major < 4)
1272 return 0;
1273
1274 /*
1275 * Generic formulae for cycles_in_40ns = (freq_unipro/25) is not
1276 * applicable for all frequencies. For ex: ceil(37.5 MHz/25) will
1277 * be 2 and ceil(403 MHZ/25) will be 17 whereas Hardware
1278 * specification expect to be 16. Hence use exact hardware spec
1279 * mandated value for cycles_in_40ns instead of calculating using
1280 * generic formulae.
1281 */
1282 switch (cycles_in_1us) {
1283 case UNIPRO_CORE_CLK_FREQ_403_MHZ:
1284 cycles_in_40ns = 16;
1285 break;
1286 case UNIPRO_CORE_CLK_FREQ_300_MHZ:
1287 cycles_in_40ns = 12;
1288 break;
1289 case UNIPRO_CORE_CLK_FREQ_201_5_MHZ:
1290 cycles_in_40ns = 8;
1291 break;
1292 case UNIPRO_CORE_CLK_FREQ_150_MHZ:
1293 cycles_in_40ns = 6;
1294 break;
1295 case UNIPRO_CORE_CLK_FREQ_100_MHZ:
1296 cycles_in_40ns = 4;
1297 break;
1298 case UNIPRO_CORE_CLK_FREQ_75_MHZ:
1299 cycles_in_40ns = 3;
1300 break;
1301 case UNIPRO_CORE_CLK_FREQ_37_5_MHZ:
1302 cycles_in_40ns = 2;
1303 break;
1304 default:
1305 dev_err(hba->dev, "UNIPRO clk freq %u MHz not supported\n",
1306 cycles_in_1us);
1307 return -EINVAL;
1308 }
1309
1310 err = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_VS_CORE_CLK_40NS_CYCLES), ®);
1311 if (err)
1312 return err;
1313
1314 reg &= ~PA_VS_CORE_CLK_40NS_CYCLES_MASK;
1315 reg |= cycles_in_40ns;
1316
1317 return ufshcd_dme_set(hba, UIC_ARG_MIB(PA_VS_CORE_CLK_40NS_CYCLES), reg);
1318 }
1319
ufs_qcom_set_core_clk_ctrl(struct ufs_hba * hba,unsigned long freq)1320 static int ufs_qcom_set_core_clk_ctrl(struct ufs_hba *hba, unsigned long freq)
1321 {
1322 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
1323 struct list_head *head = &hba->clk_list_head;
1324 struct ufs_clk_info *clki;
1325 u32 cycles_in_1us = 0;
1326 u32 core_clk_ctrl_reg;
1327 int err;
1328
1329 list_for_each_entry(clki, head, list) {
1330 if (!IS_ERR_OR_NULL(clki->clk) &&
1331 !strcmp(clki->name, "core_clk_unipro")) {
1332 if (!clki->max_freq)
1333 cycles_in_1us = 150; /* default for backwards compatibility */
1334 else if (freq == ULONG_MAX)
1335 cycles_in_1us = ceil(clki->max_freq, HZ_PER_MHZ);
1336 else
1337 cycles_in_1us = ceil(freq, HZ_PER_MHZ);
1338
1339 break;
1340 }
1341 }
1342
1343 err = ufshcd_dme_get(hba,
1344 UIC_ARG_MIB(DME_VS_CORE_CLK_CTRL),
1345 &core_clk_ctrl_reg);
1346 if (err)
1347 return err;
1348
1349 /* Bit mask is different for UFS host controller V4.0.0 onwards */
1350 if (host->hw_ver.major >= 4) {
1351 if (!FIELD_FIT(CLK_1US_CYCLES_MASK_V4, cycles_in_1us))
1352 return -ERANGE;
1353 core_clk_ctrl_reg &= ~CLK_1US_CYCLES_MASK_V4;
1354 core_clk_ctrl_reg |= FIELD_PREP(CLK_1US_CYCLES_MASK_V4, cycles_in_1us);
1355 } else {
1356 if (!FIELD_FIT(CLK_1US_CYCLES_MASK, cycles_in_1us))
1357 return -ERANGE;
1358 core_clk_ctrl_reg &= ~CLK_1US_CYCLES_MASK;
1359 core_clk_ctrl_reg |= FIELD_PREP(CLK_1US_CYCLES_MASK, cycles_in_1us);
1360 }
1361
1362 /* Clear CORE_CLK_DIV_EN */
1363 core_clk_ctrl_reg &= ~DME_VS_CORE_CLK_CTRL_CORE_CLK_DIV_EN_BIT;
1364
1365 err = ufshcd_dme_set(hba,
1366 UIC_ARG_MIB(DME_VS_CORE_CLK_CTRL),
1367 core_clk_ctrl_reg);
1368 if (err)
1369 return err;
1370
1371 /* Configure unipro core clk 40ns attribute */
1372 return ufs_qcom_set_clk_40ns_cycles(hba, cycles_in_1us);
1373 }
1374
ufs_qcom_clk_scale_up_pre_change(struct ufs_hba * hba,unsigned long freq)1375 static int ufs_qcom_clk_scale_up_pre_change(struct ufs_hba *hba, unsigned long freq)
1376 {
1377 int ret;
1378
1379 ret = ufs_qcom_cfg_timers(hba, true);
1380 if (ret) {
1381 dev_err(hba->dev, "%s ufs cfg timer failed\n", __func__);
1382 return ret;
1383 }
1384 /* set unipro core clock attributes and clear clock divider */
1385 return ufs_qcom_set_core_clk_ctrl(hba, freq);
1386 }
1387
ufs_qcom_clk_scale_up_post_change(struct ufs_hba * hba)1388 static int ufs_qcom_clk_scale_up_post_change(struct ufs_hba *hba)
1389 {
1390 return 0;
1391 }
1392
ufs_qcom_clk_scale_down_pre_change(struct ufs_hba * hba)1393 static int ufs_qcom_clk_scale_down_pre_change(struct ufs_hba *hba)
1394 {
1395 int err;
1396 u32 core_clk_ctrl_reg;
1397
1398 err = ufshcd_dme_get(hba,
1399 UIC_ARG_MIB(DME_VS_CORE_CLK_CTRL),
1400 &core_clk_ctrl_reg);
1401
1402 /* make sure CORE_CLK_DIV_EN is cleared */
1403 if (!err &&
1404 (core_clk_ctrl_reg & DME_VS_CORE_CLK_CTRL_CORE_CLK_DIV_EN_BIT)) {
1405 core_clk_ctrl_reg &= ~DME_VS_CORE_CLK_CTRL_CORE_CLK_DIV_EN_BIT;
1406 err = ufshcd_dme_set(hba,
1407 UIC_ARG_MIB(DME_VS_CORE_CLK_CTRL),
1408 core_clk_ctrl_reg);
1409 }
1410
1411 return err;
1412 }
1413
ufs_qcom_clk_scale_down_post_change(struct ufs_hba * hba,unsigned long freq)1414 static int ufs_qcom_clk_scale_down_post_change(struct ufs_hba *hba, unsigned long freq)
1415 {
1416 /* set unipro core clock attributes and clear clock divider */
1417 return ufs_qcom_set_core_clk_ctrl(hba, freq);
1418 }
1419
ufs_qcom_clk_scale_notify(struct ufs_hba * hba,bool scale_up,unsigned long target_freq,enum ufs_notify_change_status status)1420 static int ufs_qcom_clk_scale_notify(struct ufs_hba *hba, bool scale_up,
1421 unsigned long target_freq,
1422 enum ufs_notify_change_status status)
1423 {
1424 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
1425 int err;
1426
1427 /* check the host controller state before sending hibern8 cmd */
1428 if (!ufshcd_is_hba_active(hba))
1429 return 0;
1430
1431 if (status == PRE_CHANGE) {
1432 err = ufshcd_uic_hibern8_enter(hba);
1433 if (err)
1434 return err;
1435 if (scale_up)
1436 err = ufs_qcom_clk_scale_up_pre_change(hba, target_freq);
1437 else
1438 err = ufs_qcom_clk_scale_down_pre_change(hba);
1439
1440 if (err) {
1441 ufshcd_uic_hibern8_exit(hba);
1442 return err;
1443 }
1444 } else {
1445 if (scale_up)
1446 err = ufs_qcom_clk_scale_up_post_change(hba);
1447 else
1448 err = ufs_qcom_clk_scale_down_post_change(hba, target_freq);
1449
1450
1451 if (err) {
1452 ufshcd_uic_hibern8_exit(hba);
1453 return err;
1454 }
1455
1456 ufs_qcom_icc_update_bw(host);
1457 ufshcd_uic_hibern8_exit(hba);
1458 }
1459
1460 return 0;
1461 }
1462
ufs_qcom_enable_test_bus(struct ufs_qcom_host * host)1463 static void ufs_qcom_enable_test_bus(struct ufs_qcom_host *host)
1464 {
1465 ufshcd_rmwl(host->hba, UFS_REG_TEST_BUS_EN,
1466 UFS_REG_TEST_BUS_EN, REG_UFS_CFG1);
1467 ufshcd_rmwl(host->hba, TEST_BUS_EN, TEST_BUS_EN, REG_UFS_CFG1);
1468 }
1469
ufs_qcom_get_default_testbus_cfg(struct ufs_qcom_host * host)1470 static void ufs_qcom_get_default_testbus_cfg(struct ufs_qcom_host *host)
1471 {
1472 /* provide a legal default configuration */
1473 host->testbus.select_major = TSTBUS_UNIPRO;
1474 host->testbus.select_minor = 37;
1475 }
1476
ufs_qcom_testbus_cfg_is_ok(struct ufs_qcom_host * host)1477 static bool ufs_qcom_testbus_cfg_is_ok(struct ufs_qcom_host *host)
1478 {
1479 if (host->testbus.select_major >= TSTBUS_MAX) {
1480 dev_err(host->hba->dev,
1481 "%s: UFS_CFG1[TEST_BUS_SEL} may not equal 0x%05X\n",
1482 __func__, host->testbus.select_major);
1483 return false;
1484 }
1485
1486 return true;
1487 }
1488
ufs_qcom_testbus_config(struct ufs_qcom_host * host)1489 int ufs_qcom_testbus_config(struct ufs_qcom_host *host)
1490 {
1491 int reg;
1492 int offset;
1493 u32 mask = TEST_BUS_SUB_SEL_MASK;
1494
1495 if (!host)
1496 return -EINVAL;
1497
1498 if (!ufs_qcom_testbus_cfg_is_ok(host))
1499 return -EPERM;
1500
1501 switch (host->testbus.select_major) {
1502 case TSTBUS_UAWM:
1503 reg = UFS_TEST_BUS_CTRL_0;
1504 offset = 24;
1505 break;
1506 case TSTBUS_UARM:
1507 reg = UFS_TEST_BUS_CTRL_0;
1508 offset = 16;
1509 break;
1510 case TSTBUS_TXUC:
1511 reg = UFS_TEST_BUS_CTRL_0;
1512 offset = 8;
1513 break;
1514 case TSTBUS_RXUC:
1515 reg = UFS_TEST_BUS_CTRL_0;
1516 offset = 0;
1517 break;
1518 case TSTBUS_DFC:
1519 reg = UFS_TEST_BUS_CTRL_1;
1520 offset = 24;
1521 break;
1522 case TSTBUS_TRLUT:
1523 reg = UFS_TEST_BUS_CTRL_1;
1524 offset = 16;
1525 break;
1526 case TSTBUS_TMRLUT:
1527 reg = UFS_TEST_BUS_CTRL_1;
1528 offset = 8;
1529 break;
1530 case TSTBUS_OCSC:
1531 reg = UFS_TEST_BUS_CTRL_1;
1532 offset = 0;
1533 break;
1534 case TSTBUS_WRAPPER:
1535 reg = UFS_TEST_BUS_CTRL_2;
1536 offset = 16;
1537 break;
1538 case TSTBUS_COMBINED:
1539 reg = UFS_TEST_BUS_CTRL_2;
1540 offset = 8;
1541 break;
1542 case TSTBUS_UTP_HCI:
1543 reg = UFS_TEST_BUS_CTRL_2;
1544 offset = 0;
1545 break;
1546 case TSTBUS_UNIPRO:
1547 reg = UFS_UNIPRO_CFG;
1548 offset = 20;
1549 mask = 0xFFF;
1550 break;
1551 /*
1552 * No need for a default case, since
1553 * ufs_qcom_testbus_cfg_is_ok() checks that the configuration
1554 * is legal
1555 */
1556 }
1557 mask <<= offset;
1558 ufshcd_rmwl(host->hba, TEST_BUS_SEL,
1559 (u32)host->testbus.select_major << 19,
1560 REG_UFS_CFG1);
1561 ufshcd_rmwl(host->hba, mask,
1562 (u32)host->testbus.select_minor << offset,
1563 reg);
1564 ufs_qcom_enable_test_bus(host);
1565
1566 return 0;
1567 }
1568
ufs_qcom_dump_dbg_regs(struct ufs_hba * hba)1569 static void ufs_qcom_dump_dbg_regs(struct ufs_hba *hba)
1570 {
1571 u32 reg;
1572 struct ufs_qcom_host *host;
1573
1574 host = ufshcd_get_variant(hba);
1575
1576 ufshcd_dump_regs(hba, REG_UFS_SYS1CLK_1US, 16 * 4,
1577 "HCI Vendor Specific Registers ");
1578
1579 reg = ufs_qcom_get_debug_reg_offset(host, UFS_UFS_DBG_RD_REG_OCSC);
1580 ufshcd_dump_regs(hba, reg, 44 * 4, "UFS_UFS_DBG_RD_REG_OCSC ");
1581
1582 reg = ufshcd_readl(hba, REG_UFS_CFG1);
1583 reg |= UTP_DBG_RAMS_EN;
1584 ufshcd_writel(hba, reg, REG_UFS_CFG1);
1585
1586 reg = ufs_qcom_get_debug_reg_offset(host, UFS_UFS_DBG_RD_EDTL_RAM);
1587 ufshcd_dump_regs(hba, reg, 32 * 4, "UFS_UFS_DBG_RD_EDTL_RAM ");
1588
1589 reg = ufs_qcom_get_debug_reg_offset(host, UFS_UFS_DBG_RD_DESC_RAM);
1590 ufshcd_dump_regs(hba, reg, 128 * 4, "UFS_UFS_DBG_RD_DESC_RAM ");
1591
1592 reg = ufs_qcom_get_debug_reg_offset(host, UFS_UFS_DBG_RD_PRDT_RAM);
1593 ufshcd_dump_regs(hba, reg, 64 * 4, "UFS_UFS_DBG_RD_PRDT_RAM ");
1594
1595 /* clear bit 17 - UTP_DBG_RAMS_EN */
1596 ufshcd_rmwl(hba, UTP_DBG_RAMS_EN, 0, REG_UFS_CFG1);
1597
1598 reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_UAWM);
1599 ufshcd_dump_regs(hba, reg, 4 * 4, "UFS_DBG_RD_REG_UAWM ");
1600
1601 reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_UARM);
1602 ufshcd_dump_regs(hba, reg, 4 * 4, "UFS_DBG_RD_REG_UARM ");
1603
1604 reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_TXUC);
1605 ufshcd_dump_regs(hba, reg, 48 * 4, "UFS_DBG_RD_REG_TXUC ");
1606
1607 reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_RXUC);
1608 ufshcd_dump_regs(hba, reg, 27 * 4, "UFS_DBG_RD_REG_RXUC ");
1609
1610 reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_DFC);
1611 ufshcd_dump_regs(hba, reg, 19 * 4, "UFS_DBG_RD_REG_DFC ");
1612
1613 reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_TRLUT);
1614 ufshcd_dump_regs(hba, reg, 34 * 4, "UFS_DBG_RD_REG_TRLUT ");
1615
1616 reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_TMRLUT);
1617 ufshcd_dump_regs(hba, reg, 9 * 4, "UFS_DBG_RD_REG_TMRLUT ");
1618 }
1619
1620 /**
1621 * ufs_qcom_device_reset() - toggle the (optional) device reset line
1622 * @hba: per-adapter instance
1623 *
1624 * Toggles the (optional) reset line to reset the attached device.
1625 */
ufs_qcom_device_reset(struct ufs_hba * hba)1626 static int ufs_qcom_device_reset(struct ufs_hba *hba)
1627 {
1628 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
1629
1630 /* reset gpio is optional */
1631 if (!host->device_reset)
1632 return -EOPNOTSUPP;
1633
1634 /*
1635 * The UFS device shall detect reset pulses of 1us, sleep for 10us to
1636 * be on the safe side.
1637 */
1638 ufs_qcom_device_reset_ctrl(hba, true);
1639 usleep_range(10, 15);
1640
1641 ufs_qcom_device_reset_ctrl(hba, false);
1642 usleep_range(10, 15);
1643
1644 return 0;
1645 }
1646
1647 #if IS_ENABLED(CONFIG_DEVFREQ_GOV_SIMPLE_ONDEMAND)
ufs_qcom_config_scaling_param(struct ufs_hba * hba,struct devfreq_dev_profile * p,struct devfreq_simple_ondemand_data * d)1648 static void ufs_qcom_config_scaling_param(struct ufs_hba *hba,
1649 struct devfreq_dev_profile *p,
1650 struct devfreq_simple_ondemand_data *d)
1651 {
1652 p->polling_ms = 60;
1653 p->timer = DEVFREQ_TIMER_DELAYED;
1654 d->upthreshold = 70;
1655 d->downdifferential = 5;
1656
1657 hba->clk_scaling.suspend_on_no_request = true;
1658 }
1659 #else
ufs_qcom_config_scaling_param(struct ufs_hba * hba,struct devfreq_dev_profile * p,struct devfreq_simple_ondemand_data * data)1660 static void ufs_qcom_config_scaling_param(struct ufs_hba *hba,
1661 struct devfreq_dev_profile *p,
1662 struct devfreq_simple_ondemand_data *data)
1663 {
1664 }
1665 #endif
1666
1667 /* Resources */
1668 static const struct ufshcd_res_info ufs_res_info[RES_MAX] = {
1669 {.name = "ufs_mem",},
1670 {.name = "mcq",},
1671 /* Submission Queue DAO */
1672 {.name = "mcq_sqd",},
1673 /* Submission Queue Interrupt Status */
1674 {.name = "mcq_sqis",},
1675 /* Completion Queue DAO */
1676 {.name = "mcq_cqd",},
1677 /* Completion Queue Interrupt Status */
1678 {.name = "mcq_cqis",},
1679 /* MCQ vendor specific */
1680 {.name = "mcq_vs",},
1681 };
1682
ufs_qcom_mcq_config_resource(struct ufs_hba * hba)1683 static int ufs_qcom_mcq_config_resource(struct ufs_hba *hba)
1684 {
1685 struct platform_device *pdev = to_platform_device(hba->dev);
1686 struct ufshcd_res_info *res;
1687 struct resource *res_mem, *res_mcq;
1688 int i, ret;
1689
1690 memcpy(hba->res, ufs_res_info, sizeof(ufs_res_info));
1691
1692 for (i = 0; i < RES_MAX; i++) {
1693 res = &hba->res[i];
1694 res->resource = platform_get_resource_byname(pdev,
1695 IORESOURCE_MEM,
1696 res->name);
1697 if (!res->resource) {
1698 dev_info(hba->dev, "Resource %s not provided\n", res->name);
1699 if (i == RES_UFS)
1700 return -ENODEV;
1701 continue;
1702 } else if (i == RES_UFS) {
1703 res_mem = res->resource;
1704 res->base = hba->mmio_base;
1705 continue;
1706 }
1707
1708 res->base = devm_ioremap_resource(hba->dev, res->resource);
1709 if (IS_ERR(res->base)) {
1710 dev_err(hba->dev, "Failed to map res %s, err=%d\n",
1711 res->name, (int)PTR_ERR(res->base));
1712 ret = PTR_ERR(res->base);
1713 res->base = NULL;
1714 return ret;
1715 }
1716 }
1717
1718 /* MCQ resource provided in DT */
1719 res = &hba->res[RES_MCQ];
1720 /* Bail if MCQ resource is provided */
1721 if (res->base)
1722 goto out;
1723
1724 /* Explicitly allocate MCQ resource from ufs_mem */
1725 res_mcq = devm_kzalloc(hba->dev, sizeof(*res_mcq), GFP_KERNEL);
1726 if (!res_mcq)
1727 return -ENOMEM;
1728
1729 res_mcq->start = res_mem->start +
1730 MCQ_SQATTR_OFFSET(hba->mcq_capabilities);
1731 res_mcq->end = res_mcq->start + hba->nr_hw_queues * MCQ_QCFG_SIZE - 1;
1732 res_mcq->flags = res_mem->flags;
1733 res_mcq->name = "mcq";
1734
1735 ret = insert_resource(&iomem_resource, res_mcq);
1736 if (ret) {
1737 dev_err(hba->dev, "Failed to insert MCQ resource, err=%d\n",
1738 ret);
1739 return ret;
1740 }
1741
1742 res->base = devm_ioremap_resource(hba->dev, res_mcq);
1743 if (IS_ERR(res->base)) {
1744 dev_err(hba->dev, "MCQ registers mapping failed, err=%d\n",
1745 (int)PTR_ERR(res->base));
1746 ret = PTR_ERR(res->base);
1747 goto ioremap_err;
1748 }
1749
1750 out:
1751 hba->mcq_base = res->base;
1752 return 0;
1753 ioremap_err:
1754 res->base = NULL;
1755 remove_resource(res_mcq);
1756 return ret;
1757 }
1758
ufs_qcom_op_runtime_config(struct ufs_hba * hba)1759 static int ufs_qcom_op_runtime_config(struct ufs_hba *hba)
1760 {
1761 struct ufshcd_res_info *mem_res, *sqdao_res;
1762 struct ufshcd_mcq_opr_info_t *opr;
1763 int i;
1764
1765 mem_res = &hba->res[RES_UFS];
1766 sqdao_res = &hba->res[RES_MCQ_SQD];
1767
1768 if (!mem_res->base || !sqdao_res->base)
1769 return -EINVAL;
1770
1771 for (i = 0; i < OPR_MAX; i++) {
1772 opr = &hba->mcq_opr[i];
1773 opr->offset = sqdao_res->resource->start -
1774 mem_res->resource->start + 0x40 * i;
1775 opr->stride = 0x100;
1776 opr->base = sqdao_res->base + 0x40 * i;
1777 }
1778
1779 return 0;
1780 }
1781
ufs_qcom_get_hba_mac(struct ufs_hba * hba)1782 static int ufs_qcom_get_hba_mac(struct ufs_hba *hba)
1783 {
1784 /* Qualcomm HC supports up to 64 */
1785 return MAX_SUPP_MAC;
1786 }
1787
ufs_qcom_get_outstanding_cqs(struct ufs_hba * hba,unsigned long * ocqs)1788 static int ufs_qcom_get_outstanding_cqs(struct ufs_hba *hba,
1789 unsigned long *ocqs)
1790 {
1791 struct ufshcd_res_info *mcq_vs_res = &hba->res[RES_MCQ_VS];
1792
1793 if (!mcq_vs_res->base)
1794 return -EINVAL;
1795
1796 *ocqs = readl(mcq_vs_res->base + UFS_MEM_CQIS_VS);
1797
1798 return 0;
1799 }
1800
ufs_qcom_write_msi_msg(struct msi_desc * desc,struct msi_msg * msg)1801 static void ufs_qcom_write_msi_msg(struct msi_desc *desc, struct msi_msg *msg)
1802 {
1803 struct device *dev = msi_desc_to_dev(desc);
1804 struct ufs_hba *hba = dev_get_drvdata(dev);
1805
1806 ufshcd_mcq_config_esi(hba, msg);
1807 }
1808
1809 struct ufs_qcom_irq {
1810 unsigned int irq;
1811 unsigned int idx;
1812 struct ufs_hba *hba;
1813 };
1814
ufs_qcom_mcq_esi_handler(int irq,void * data)1815 static irqreturn_t ufs_qcom_mcq_esi_handler(int irq, void *data)
1816 {
1817 struct ufs_qcom_irq *qi = data;
1818 struct ufs_hba *hba = qi->hba;
1819 struct ufs_hw_queue *hwq = &hba->uhq[qi->idx];
1820
1821 ufshcd_mcq_write_cqis(hba, 0x1, qi->idx);
1822 ufshcd_mcq_poll_cqe_lock(hba, hwq);
1823
1824 return IRQ_HANDLED;
1825 }
1826
ufs_qcom_config_esi(struct ufs_hba * hba)1827 static int ufs_qcom_config_esi(struct ufs_hba *hba)
1828 {
1829 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
1830 struct ufs_qcom_irq *qi;
1831 int nr_irqs, ret;
1832
1833 if (host->esi_enabled)
1834 return 0;
1835
1836 /*
1837 * 1. We only handle CQs as of now.
1838 * 2. Poll queues do not need ESI.
1839 */
1840 nr_irqs = hba->nr_hw_queues - hba->nr_queues[HCTX_TYPE_POLL];
1841 qi = devm_kcalloc(hba->dev, nr_irqs, sizeof(*qi), GFP_KERNEL);
1842 if (qi)
1843 return -ENOMEM;
1844
1845 ret = platform_device_msi_init_and_alloc_irqs(hba->dev, nr_irqs,
1846 ufs_qcom_write_msi_msg);
1847 if (ret) {
1848 dev_err(hba->dev, "Failed to request Platform MSI %d\n", ret);
1849 goto cleanup;
1850 }
1851
1852 for (int idx = 0; idx < nr_irqs; idx++) {
1853 qi[idx].irq = msi_get_virq(hba->dev, idx);
1854 qi[idx].idx = idx;
1855 qi[idx].hba = hba;
1856
1857 ret = devm_request_irq(hba->dev, qi[idx].irq, ufs_qcom_mcq_esi_handler,
1858 IRQF_SHARED, "qcom-mcq-esi", qi + idx);
1859 if (ret) {
1860 dev_err(hba->dev, "%s: Fail to request IRQ for %d, err = %d\n",
1861 __func__, qi[idx].irq, ret);
1862 qi[idx].irq = 0;
1863 goto cleanup;
1864 }
1865 }
1866
1867 if (host->hw_ver.major == 6 && host->hw_ver.minor == 0 &&
1868 host->hw_ver.step == 0) {
1869 ufshcd_rmwl(hba, ESI_VEC_MASK,
1870 FIELD_PREP(ESI_VEC_MASK, MAX_ESI_VEC - 1),
1871 REG_UFS_CFG3);
1872 }
1873 ufshcd_mcq_enable_esi(hba);
1874 host->esi_enabled = true;
1875 return 0;
1876
1877 cleanup:
1878 for (int idx = 0; qi[idx].irq; idx++)
1879 devm_free_irq(hba->dev, qi[idx].irq, hba);
1880 platform_device_msi_free_irqs_all(hba->dev);
1881 devm_kfree(hba->dev, qi);
1882 return ret;
1883 }
1884
ufs_qcom_freq_to_gear_speed(struct ufs_hba * hba,unsigned long freq)1885 static u32 ufs_qcom_freq_to_gear_speed(struct ufs_hba *hba, unsigned long freq)
1886 {
1887 u32 gear = 0;
1888
1889 switch (freq) {
1890 case 403000000:
1891 gear = UFS_HS_G5;
1892 break;
1893 case 300000000:
1894 gear = UFS_HS_G4;
1895 break;
1896 case 201500000:
1897 gear = UFS_HS_G3;
1898 break;
1899 case 150000000:
1900 case 100000000:
1901 gear = UFS_HS_G2;
1902 break;
1903 case 75000000:
1904 case 37500000:
1905 gear = UFS_HS_G1;
1906 break;
1907 default:
1908 dev_err(hba->dev, "%s: Unsupported clock freq : %lu\n", __func__, freq);
1909 break;
1910 }
1911
1912 return gear;
1913 }
1914
1915 /*
1916 * struct ufs_hba_qcom_vops - UFS QCOM specific variant operations
1917 *
1918 * The variant operations configure the necessary controller and PHY
1919 * handshake during initialization.
1920 */
1921 static const struct ufs_hba_variant_ops ufs_hba_qcom_vops = {
1922 .name = "qcom",
1923 .init = ufs_qcom_init,
1924 .exit = ufs_qcom_exit,
1925 .get_ufs_hci_version = ufs_qcom_get_ufs_hci_version,
1926 .clk_scale_notify = ufs_qcom_clk_scale_notify,
1927 .setup_clocks = ufs_qcom_setup_clocks,
1928 .hce_enable_notify = ufs_qcom_hce_enable_notify,
1929 .link_startup_notify = ufs_qcom_link_startup_notify,
1930 .pwr_change_notify = ufs_qcom_pwr_change_notify,
1931 .apply_dev_quirks = ufs_qcom_apply_dev_quirks,
1932 .fixup_dev_quirks = ufs_qcom_fixup_dev_quirks,
1933 .suspend = ufs_qcom_suspend,
1934 .resume = ufs_qcom_resume,
1935 .dbg_register_dump = ufs_qcom_dump_dbg_regs,
1936 .device_reset = ufs_qcom_device_reset,
1937 .config_scaling_param = ufs_qcom_config_scaling_param,
1938 .mcq_config_resource = ufs_qcom_mcq_config_resource,
1939 .get_hba_mac = ufs_qcom_get_hba_mac,
1940 .op_runtime_config = ufs_qcom_op_runtime_config,
1941 .get_outstanding_cqs = ufs_qcom_get_outstanding_cqs,
1942 .config_esi = ufs_qcom_config_esi,
1943 .freq_to_gear_speed = ufs_qcom_freq_to_gear_speed,
1944 };
1945
1946 /**
1947 * ufs_qcom_probe - probe routine of the driver
1948 * @pdev: pointer to Platform device handle
1949 *
1950 * Return: zero for success and non-zero for failure.
1951 */
ufs_qcom_probe(struct platform_device * pdev)1952 static int ufs_qcom_probe(struct platform_device *pdev)
1953 {
1954 int err;
1955 struct device *dev = &pdev->dev;
1956
1957 /* Perform generic probe */
1958 err = ufshcd_pltfrm_init(pdev, &ufs_hba_qcom_vops);
1959 if (err)
1960 return dev_err_probe(dev, err, "ufshcd_pltfrm_init() failed\n");
1961
1962 return 0;
1963 }
1964
1965 /**
1966 * ufs_qcom_remove - set driver_data of the device to NULL
1967 * @pdev: pointer to platform device handle
1968 *
1969 * Always returns 0
1970 */
ufs_qcom_remove(struct platform_device * pdev)1971 static void ufs_qcom_remove(struct platform_device *pdev)
1972 {
1973 struct ufs_hba *hba = platform_get_drvdata(pdev);
1974 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
1975
1976 ufshcd_pltfrm_remove(pdev);
1977 if (host->esi_enabled)
1978 platform_device_msi_free_irqs_all(hba->dev);
1979 }
1980
1981 static const struct ufs_qcom_drvdata ufs_qcom_sm8550_drvdata = {
1982 .quirks = UFSHCD_QUIRK_BROKEN_LSDBS_CAP,
1983 .no_phy_retention = true,
1984 };
1985
1986 static const struct of_device_id ufs_qcom_of_match[] __maybe_unused = {
1987 { .compatible = "qcom,ufshc" },
1988 { .compatible = "qcom,sm8550-ufshc", .data = &ufs_qcom_sm8550_drvdata },
1989 { .compatible = "qcom,sm8650-ufshc", .data = &ufs_qcom_sm8550_drvdata },
1990 {},
1991 };
1992 MODULE_DEVICE_TABLE(of, ufs_qcom_of_match);
1993
1994 #ifdef CONFIG_ACPI
1995 static const struct acpi_device_id ufs_qcom_acpi_match[] = {
1996 { "QCOM24A5" },
1997 { },
1998 };
1999 MODULE_DEVICE_TABLE(acpi, ufs_qcom_acpi_match);
2000 #endif
2001
2002 static const struct dev_pm_ops ufs_qcom_pm_ops = {
2003 SET_RUNTIME_PM_OPS(ufshcd_runtime_suspend, ufshcd_runtime_resume, NULL)
2004 .prepare = ufshcd_suspend_prepare,
2005 .complete = ufshcd_resume_complete,
2006 #ifdef CONFIG_PM_SLEEP
2007 .suspend = ufshcd_system_suspend,
2008 .resume = ufshcd_system_resume,
2009 .freeze = ufshcd_system_freeze,
2010 .restore = ufshcd_system_restore,
2011 .thaw = ufshcd_system_thaw,
2012 #endif
2013 };
2014
2015 static struct platform_driver ufs_qcom_pltform = {
2016 .probe = ufs_qcom_probe,
2017 .remove = ufs_qcom_remove,
2018 .driver = {
2019 .name = "ufshcd-qcom",
2020 .pm = &ufs_qcom_pm_ops,
2021 .of_match_table = of_match_ptr(ufs_qcom_of_match),
2022 .acpi_match_table = ACPI_PTR(ufs_qcom_acpi_match),
2023 },
2024 };
2025 module_platform_driver(ufs_qcom_pltform);
2026
2027 MODULE_DESCRIPTION("Qualcomm UFS host controller driver");
2028 MODULE_LICENSE("GPL v2");
2029