xref: /linux/drivers/ufs/host/ufs-mediatek.c (revision 90e0d94d369d342e735a75174439482119b6c393)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2019 MediaTek Inc.
4  * Authors:
5  *	Stanley Chu <stanley.chu@mediatek.com>
6  *	Peter Wang <peter.wang@mediatek.com>
7  */
8 
9 #include <linux/arm-smccc.h>
10 #include <linux/bitfield.h>
11 #include <linux/clk.h>
12 #include <linux/delay.h>
13 #include <linux/module.h>
14 #include <linux/of.h>
15 #include <linux/of_address.h>
16 #include <linux/of_device.h>
17 #include <linux/phy/phy.h>
18 #include <linux/platform_device.h>
19 #include <linux/pm_qos.h>
20 #include <linux/regulator/consumer.h>
21 #include <linux/reset.h>
22 #include <linux/soc/mediatek/mtk_sip_svc.h>
23 
24 #include <ufs/ufshcd.h>
25 #include "ufshcd-pltfrm.h"
26 #include <ufs/ufs_quirks.h>
27 #include <ufs/unipro.h>
28 #include "ufs-mediatek.h"
29 
30 #define CREATE_TRACE_POINTS
31 #include "ufs-mediatek-trace.h"
32 
33 static const struct ufs_dev_quirk ufs_mtk_dev_fixups[] = {
34 	{ .wmanufacturerid = UFS_ANY_VENDOR,
35 	  .model = UFS_ANY_MODEL,
36 	  .quirk = UFS_DEVICE_QUIRK_DELAY_AFTER_LPM |
37 		UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM },
38 	{ .wmanufacturerid = UFS_VENDOR_SKHYNIX,
39 	  .model = "H9HQ21AFAMZDAR",
40 	  .quirk = UFS_DEVICE_QUIRK_SUPPORT_EXTENDED_FEATURES },
41 	{}
42 };
43 
44 static const struct of_device_id ufs_mtk_of_match[] = {
45 	{ .compatible = "mediatek,mt8183-ufshci" },
46 	{},
47 };
48 
49 /*
50  * Details of UIC Errors
51  */
52 static const char *const ufs_uic_err_str[] = {
53 	"PHY Adapter Layer",
54 	"Data Link Layer",
55 	"Network Link Layer",
56 	"Transport Link Layer",
57 	"DME"
58 };
59 
60 static const char *const ufs_uic_pa_err_str[] = {
61 	"PHY error on Lane 0",
62 	"PHY error on Lane 1",
63 	"PHY error on Lane 2",
64 	"PHY error on Lane 3",
65 	"Generic PHY Adapter Error. This should be the LINERESET indication"
66 };
67 
68 static const char *const ufs_uic_dl_err_str[] = {
69 	"NAC_RECEIVED",
70 	"TCx_REPLAY_TIMER_EXPIRED",
71 	"AFCx_REQUEST_TIMER_EXPIRED",
72 	"FCx_PROTECTION_TIMER_EXPIRED",
73 	"CRC_ERROR",
74 	"RX_BUFFER_OVERFLOW",
75 	"MAX_FRAME_LENGTH_EXCEEDED",
76 	"WRONG_SEQUENCE_NUMBER",
77 	"AFC_FRAME_SYNTAX_ERROR",
78 	"NAC_FRAME_SYNTAX_ERROR",
79 	"EOF_SYNTAX_ERROR",
80 	"FRAME_SYNTAX_ERROR",
81 	"BAD_CTRL_SYMBOL_TYPE",
82 	"PA_INIT_ERROR",
83 	"PA_ERROR_IND_RECEIVED",
84 	"PA_INIT"
85 };
86 
87 static bool ufs_mtk_is_boost_crypt_enabled(struct ufs_hba *hba)
88 {
89 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
90 
91 	return !!(host->caps & UFS_MTK_CAP_BOOST_CRYPT_ENGINE);
92 }
93 
94 static bool ufs_mtk_is_va09_supported(struct ufs_hba *hba)
95 {
96 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
97 
98 	return !!(host->caps & UFS_MTK_CAP_VA09_PWR_CTRL);
99 }
100 
101 static bool ufs_mtk_is_broken_vcc(struct ufs_hba *hba)
102 {
103 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
104 
105 	return !!(host->caps & UFS_MTK_CAP_BROKEN_VCC);
106 }
107 
108 static bool ufs_mtk_is_pmc_via_fastauto(struct ufs_hba *hba)
109 {
110 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
111 
112 	return (host->caps & UFS_MTK_CAP_PMC_VIA_FASTAUTO);
113 }
114 
115 static void ufs_mtk_cfg_unipro_cg(struct ufs_hba *hba, bool enable)
116 {
117 	u32 tmp;
118 
119 	if (enable) {
120 		ufshcd_dme_get(hba,
121 			       UIC_ARG_MIB(VS_SAVEPOWERCONTROL), &tmp);
122 		tmp = tmp |
123 		      (1 << RX_SYMBOL_CLK_GATE_EN) |
124 		      (1 << SYS_CLK_GATE_EN) |
125 		      (1 << TX_CLK_GATE_EN);
126 		ufshcd_dme_set(hba,
127 			       UIC_ARG_MIB(VS_SAVEPOWERCONTROL), tmp);
128 
129 		ufshcd_dme_get(hba,
130 			       UIC_ARG_MIB(VS_DEBUGCLOCKENABLE), &tmp);
131 		tmp = tmp & ~(1 << TX_SYMBOL_CLK_REQ_FORCE);
132 		ufshcd_dme_set(hba,
133 			       UIC_ARG_MIB(VS_DEBUGCLOCKENABLE), tmp);
134 	} else {
135 		ufshcd_dme_get(hba,
136 			       UIC_ARG_MIB(VS_SAVEPOWERCONTROL), &tmp);
137 		tmp = tmp & ~((1 << RX_SYMBOL_CLK_GATE_EN) |
138 			      (1 << SYS_CLK_GATE_EN) |
139 			      (1 << TX_CLK_GATE_EN));
140 		ufshcd_dme_set(hba,
141 			       UIC_ARG_MIB(VS_SAVEPOWERCONTROL), tmp);
142 
143 		ufshcd_dme_get(hba,
144 			       UIC_ARG_MIB(VS_DEBUGCLOCKENABLE), &tmp);
145 		tmp = tmp | (1 << TX_SYMBOL_CLK_REQ_FORCE);
146 		ufshcd_dme_set(hba,
147 			       UIC_ARG_MIB(VS_DEBUGCLOCKENABLE), tmp);
148 	}
149 }
150 
151 static void ufs_mtk_crypto_enable(struct ufs_hba *hba)
152 {
153 	struct arm_smccc_res res;
154 
155 	ufs_mtk_crypto_ctrl(res, 1);
156 	if (res.a0) {
157 		dev_info(hba->dev, "%s: crypto enable failed, err: %lu\n",
158 			 __func__, res.a0);
159 		hba->caps &= ~UFSHCD_CAP_CRYPTO;
160 	}
161 }
162 
163 static void ufs_mtk_host_reset(struct ufs_hba *hba)
164 {
165 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
166 
167 	reset_control_assert(host->hci_reset);
168 	reset_control_assert(host->crypto_reset);
169 	reset_control_assert(host->unipro_reset);
170 
171 	usleep_range(100, 110);
172 
173 	reset_control_deassert(host->unipro_reset);
174 	reset_control_deassert(host->crypto_reset);
175 	reset_control_deassert(host->hci_reset);
176 }
177 
178 static void ufs_mtk_init_reset_control(struct ufs_hba *hba,
179 				       struct reset_control **rc,
180 				       char *str)
181 {
182 	*rc = devm_reset_control_get(hba->dev, str);
183 	if (IS_ERR(*rc)) {
184 		dev_info(hba->dev, "Failed to get reset control %s: %ld\n",
185 			 str, PTR_ERR(*rc));
186 		*rc = NULL;
187 	}
188 }
189 
190 static void ufs_mtk_init_reset(struct ufs_hba *hba)
191 {
192 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
193 
194 	ufs_mtk_init_reset_control(hba, &host->hci_reset,
195 				   "hci_rst");
196 	ufs_mtk_init_reset_control(hba, &host->unipro_reset,
197 				   "unipro_rst");
198 	ufs_mtk_init_reset_control(hba, &host->crypto_reset,
199 				   "crypto_rst");
200 }
201 
202 static int ufs_mtk_hce_enable_notify(struct ufs_hba *hba,
203 				     enum ufs_notify_change_status status)
204 {
205 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
206 
207 	if (status == PRE_CHANGE) {
208 		if (host->unipro_lpm) {
209 			hba->vps->hba_enable_delay_us = 0;
210 		} else {
211 			hba->vps->hba_enable_delay_us = 600;
212 			ufs_mtk_host_reset(hba);
213 		}
214 
215 		if (hba->caps & UFSHCD_CAP_CRYPTO)
216 			ufs_mtk_crypto_enable(hba);
217 
218 		if (host->caps & UFS_MTK_CAP_DISABLE_AH8) {
219 			ufshcd_writel(hba, 0,
220 				      REG_AUTO_HIBERNATE_IDLE_TIMER);
221 			hba->capabilities &= ~MASK_AUTO_HIBERN8_SUPPORT;
222 			hba->ahit = 0;
223 		}
224 
225 		/*
226 		 * Turn on CLK_CG early to bypass abnormal ERR_CHK signal
227 		 * to prevent host hang issue
228 		 */
229 		ufshcd_writel(hba,
230 			      ufshcd_readl(hba, REG_UFS_XOUFS_CTRL) | 0x80,
231 			      REG_UFS_XOUFS_CTRL);
232 	}
233 
234 	return 0;
235 }
236 
237 static int ufs_mtk_bind_mphy(struct ufs_hba *hba)
238 {
239 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
240 	struct device *dev = hba->dev;
241 	struct device_node *np = dev->of_node;
242 	int err = 0;
243 
244 	host->mphy = devm_of_phy_get_by_index(dev, np, 0);
245 
246 	if (host->mphy == ERR_PTR(-EPROBE_DEFER)) {
247 		/*
248 		 * UFS driver might be probed before the phy driver does.
249 		 * In that case we would like to return EPROBE_DEFER code.
250 		 */
251 		err = -EPROBE_DEFER;
252 		dev_info(dev,
253 			 "%s: required phy hasn't probed yet. err = %d\n",
254 			__func__, err);
255 	} else if (IS_ERR(host->mphy)) {
256 		err = PTR_ERR(host->mphy);
257 		if (err != -ENODEV) {
258 			dev_info(dev, "%s: PHY get failed %d\n", __func__,
259 				 err);
260 		}
261 	}
262 
263 	if (err)
264 		host->mphy = NULL;
265 	/*
266 	 * Allow unbound mphy because not every platform needs specific
267 	 * mphy control.
268 	 */
269 	if (err == -ENODEV)
270 		err = 0;
271 
272 	return err;
273 }
274 
275 static int ufs_mtk_setup_ref_clk(struct ufs_hba *hba, bool on)
276 {
277 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
278 	struct arm_smccc_res res;
279 	ktime_t timeout, time_checked;
280 	u32 value;
281 
282 	if (host->ref_clk_enabled == on)
283 		return 0;
284 
285 	ufs_mtk_ref_clk_notify(on, PRE_CHANGE, res);
286 
287 	if (on) {
288 		ufshcd_writel(hba, REFCLK_REQUEST, REG_UFS_REFCLK_CTRL);
289 	} else {
290 		ufshcd_delay_us(host->ref_clk_gating_wait_us, 10);
291 		ufshcd_writel(hba, REFCLK_RELEASE, REG_UFS_REFCLK_CTRL);
292 	}
293 
294 	/* Wait for ack */
295 	timeout = ktime_add_us(ktime_get(), REFCLK_REQ_TIMEOUT_US);
296 	do {
297 		time_checked = ktime_get();
298 		value = ufshcd_readl(hba, REG_UFS_REFCLK_CTRL);
299 
300 		/* Wait until ack bit equals to req bit */
301 		if (((value & REFCLK_ACK) >> 1) == (value & REFCLK_REQUEST))
302 			goto out;
303 
304 		usleep_range(100, 200);
305 	} while (ktime_before(time_checked, timeout));
306 
307 	dev_err(hba->dev, "missing ack of refclk req, reg: 0x%x\n", value);
308 
309 	ufs_mtk_ref_clk_notify(host->ref_clk_enabled, POST_CHANGE, res);
310 
311 	return -ETIMEDOUT;
312 
313 out:
314 	host->ref_clk_enabled = on;
315 	if (on)
316 		ufshcd_delay_us(host->ref_clk_ungating_wait_us, 10);
317 
318 	ufs_mtk_ref_clk_notify(on, POST_CHANGE, res);
319 
320 	return 0;
321 }
322 
323 static void ufs_mtk_setup_ref_clk_wait_us(struct ufs_hba *hba,
324 					  u16 gating_us)
325 {
326 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
327 
328 	if (hba->dev_info.clk_gating_wait_us) {
329 		host->ref_clk_gating_wait_us =
330 			hba->dev_info.clk_gating_wait_us;
331 	} else {
332 		host->ref_clk_gating_wait_us = gating_us;
333 	}
334 
335 	host->ref_clk_ungating_wait_us = REFCLK_DEFAULT_WAIT_US;
336 }
337 
338 static void ufs_mtk_dbg_sel(struct ufs_hba *hba)
339 {
340 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
341 
342 	if (((host->ip_ver >> 16) & 0xFF) >= 0x36) {
343 		ufshcd_writel(hba, 0x820820, REG_UFS_DEBUG_SEL);
344 		ufshcd_writel(hba, 0x0, REG_UFS_DEBUG_SEL_B0);
345 		ufshcd_writel(hba, 0x55555555, REG_UFS_DEBUG_SEL_B1);
346 		ufshcd_writel(hba, 0xaaaaaaaa, REG_UFS_DEBUG_SEL_B2);
347 		ufshcd_writel(hba, 0xffffffff, REG_UFS_DEBUG_SEL_B3);
348 	} else {
349 		ufshcd_writel(hba, 0x20, REG_UFS_DEBUG_SEL);
350 	}
351 }
352 
353 static void ufs_mtk_wait_idle_state(struct ufs_hba *hba,
354 			    unsigned long retry_ms)
355 {
356 	u64 timeout, time_checked;
357 	u32 val, sm;
358 	bool wait_idle;
359 
360 	/* cannot use plain ktime_get() in suspend */
361 	timeout = ktime_get_mono_fast_ns() + retry_ms * 1000000UL;
362 
363 	/* wait a specific time after check base */
364 	udelay(10);
365 	wait_idle = false;
366 
367 	do {
368 		time_checked = ktime_get_mono_fast_ns();
369 		ufs_mtk_dbg_sel(hba);
370 		val = ufshcd_readl(hba, REG_UFS_PROBE);
371 
372 		sm = val & 0x1f;
373 
374 		/*
375 		 * if state is in H8 enter and H8 enter confirm
376 		 * wait until return to idle state.
377 		 */
378 		if ((sm >= VS_HIB_ENTER) && (sm <= VS_HIB_EXIT)) {
379 			wait_idle = true;
380 			udelay(50);
381 			continue;
382 		} else if (!wait_idle)
383 			break;
384 
385 		if (wait_idle && (sm == VS_HCE_BASE))
386 			break;
387 	} while (time_checked < timeout);
388 
389 	if (wait_idle && sm != VS_HCE_BASE)
390 		dev_info(hba->dev, "wait idle tmo: 0x%x\n", val);
391 }
392 
393 static int ufs_mtk_wait_link_state(struct ufs_hba *hba, u32 state,
394 				   unsigned long max_wait_ms)
395 {
396 	ktime_t timeout, time_checked;
397 	u32 val;
398 
399 	timeout = ktime_add_ms(ktime_get(), max_wait_ms);
400 	do {
401 		time_checked = ktime_get();
402 		ufs_mtk_dbg_sel(hba);
403 		val = ufshcd_readl(hba, REG_UFS_PROBE);
404 		val = val >> 28;
405 
406 		if (val == state)
407 			return 0;
408 
409 		/* Sleep for max. 200us */
410 		usleep_range(100, 200);
411 	} while (ktime_before(time_checked, timeout));
412 
413 	if (val == state)
414 		return 0;
415 
416 	return -ETIMEDOUT;
417 }
418 
419 static int ufs_mtk_mphy_power_on(struct ufs_hba *hba, bool on)
420 {
421 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
422 	struct phy *mphy = host->mphy;
423 	struct arm_smccc_res res;
424 	int ret = 0;
425 
426 	if (!mphy || !(on ^ host->mphy_powered_on))
427 		return 0;
428 
429 	if (on) {
430 		if (ufs_mtk_is_va09_supported(hba)) {
431 			ret = regulator_enable(host->reg_va09);
432 			if (ret < 0)
433 				goto out;
434 			/* wait 200 us to stablize VA09 */
435 			usleep_range(200, 210);
436 			ufs_mtk_va09_pwr_ctrl(res, 1);
437 		}
438 		phy_power_on(mphy);
439 	} else {
440 		phy_power_off(mphy);
441 		if (ufs_mtk_is_va09_supported(hba)) {
442 			ufs_mtk_va09_pwr_ctrl(res, 0);
443 			ret = regulator_disable(host->reg_va09);
444 			if (ret < 0)
445 				goto out;
446 		}
447 	}
448 out:
449 	if (ret) {
450 		dev_info(hba->dev,
451 			 "failed to %s va09: %d\n",
452 			 on ? "enable" : "disable",
453 			 ret);
454 	} else {
455 		host->mphy_powered_on = on;
456 	}
457 
458 	return ret;
459 }
460 
461 static int ufs_mtk_get_host_clk(struct device *dev, const char *name,
462 				struct clk **clk_out)
463 {
464 	struct clk *clk;
465 	int err = 0;
466 
467 	clk = devm_clk_get(dev, name);
468 	if (IS_ERR(clk))
469 		err = PTR_ERR(clk);
470 	else
471 		*clk_out = clk;
472 
473 	return err;
474 }
475 
476 static void ufs_mtk_boost_crypt(struct ufs_hba *hba, bool boost)
477 {
478 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
479 	struct ufs_mtk_crypt_cfg *cfg;
480 	struct regulator *reg;
481 	int volt, ret;
482 
483 	if (!ufs_mtk_is_boost_crypt_enabled(hba))
484 		return;
485 
486 	cfg = host->crypt;
487 	volt = cfg->vcore_volt;
488 	reg = cfg->reg_vcore;
489 
490 	ret = clk_prepare_enable(cfg->clk_crypt_mux);
491 	if (ret) {
492 		dev_info(hba->dev, "clk_prepare_enable(): %d\n",
493 			 ret);
494 		return;
495 	}
496 
497 	if (boost) {
498 		ret = regulator_set_voltage(reg, volt, INT_MAX);
499 		if (ret) {
500 			dev_info(hba->dev,
501 				 "failed to set vcore to %d\n", volt);
502 			goto out;
503 		}
504 
505 		ret = clk_set_parent(cfg->clk_crypt_mux,
506 				     cfg->clk_crypt_perf);
507 		if (ret) {
508 			dev_info(hba->dev,
509 				 "failed to set clk_crypt_perf\n");
510 			regulator_set_voltage(reg, 0, INT_MAX);
511 			goto out;
512 		}
513 	} else {
514 		ret = clk_set_parent(cfg->clk_crypt_mux,
515 				     cfg->clk_crypt_lp);
516 		if (ret) {
517 			dev_info(hba->dev,
518 				 "failed to set clk_crypt_lp\n");
519 			goto out;
520 		}
521 
522 		ret = regulator_set_voltage(reg, 0, INT_MAX);
523 		if (ret) {
524 			dev_info(hba->dev,
525 				 "failed to set vcore to MIN\n");
526 		}
527 	}
528 out:
529 	clk_disable_unprepare(cfg->clk_crypt_mux);
530 }
531 
532 static int ufs_mtk_init_host_clk(struct ufs_hba *hba, const char *name,
533 				 struct clk **clk)
534 {
535 	int ret;
536 
537 	ret = ufs_mtk_get_host_clk(hba->dev, name, clk);
538 	if (ret) {
539 		dev_info(hba->dev, "%s: failed to get %s: %d", __func__,
540 			 name, ret);
541 	}
542 
543 	return ret;
544 }
545 
546 static void ufs_mtk_init_boost_crypt(struct ufs_hba *hba)
547 {
548 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
549 	struct ufs_mtk_crypt_cfg *cfg;
550 	struct device *dev = hba->dev;
551 	struct regulator *reg;
552 	u32 volt;
553 
554 	host->crypt = devm_kzalloc(dev, sizeof(*(host->crypt)),
555 				   GFP_KERNEL);
556 	if (!host->crypt)
557 		goto disable_caps;
558 
559 	reg = devm_regulator_get_optional(dev, "dvfsrc-vcore");
560 	if (IS_ERR(reg)) {
561 		dev_info(dev, "failed to get dvfsrc-vcore: %ld",
562 			 PTR_ERR(reg));
563 		goto disable_caps;
564 	}
565 
566 	if (of_property_read_u32(dev->of_node, "boost-crypt-vcore-min",
567 				 &volt)) {
568 		dev_info(dev, "failed to get boost-crypt-vcore-min");
569 		goto disable_caps;
570 	}
571 
572 	cfg = host->crypt;
573 	if (ufs_mtk_init_host_clk(hba, "crypt_mux",
574 				  &cfg->clk_crypt_mux))
575 		goto disable_caps;
576 
577 	if (ufs_mtk_init_host_clk(hba, "crypt_lp",
578 				  &cfg->clk_crypt_lp))
579 		goto disable_caps;
580 
581 	if (ufs_mtk_init_host_clk(hba, "crypt_perf",
582 				  &cfg->clk_crypt_perf))
583 		goto disable_caps;
584 
585 	cfg->reg_vcore = reg;
586 	cfg->vcore_volt = volt;
587 	host->caps |= UFS_MTK_CAP_BOOST_CRYPT_ENGINE;
588 
589 disable_caps:
590 	return;
591 }
592 
593 static void ufs_mtk_init_va09_pwr_ctrl(struct ufs_hba *hba)
594 {
595 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
596 
597 	host->reg_va09 = regulator_get(hba->dev, "va09");
598 	if (IS_ERR(host->reg_va09))
599 		dev_info(hba->dev, "failed to get va09");
600 	else
601 		host->caps |= UFS_MTK_CAP_VA09_PWR_CTRL;
602 }
603 
604 static void ufs_mtk_init_host_caps(struct ufs_hba *hba)
605 {
606 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
607 	struct device_node *np = hba->dev->of_node;
608 
609 	if (of_property_read_bool(np, "mediatek,ufs-boost-crypt"))
610 		ufs_mtk_init_boost_crypt(hba);
611 
612 	if (of_property_read_bool(np, "mediatek,ufs-support-va09"))
613 		ufs_mtk_init_va09_pwr_ctrl(hba);
614 
615 	if (of_property_read_bool(np, "mediatek,ufs-disable-ah8"))
616 		host->caps |= UFS_MTK_CAP_DISABLE_AH8;
617 
618 	if (of_property_read_bool(np, "mediatek,ufs-broken-vcc"))
619 		host->caps |= UFS_MTK_CAP_BROKEN_VCC;
620 
621 	if (of_property_read_bool(np, "mediatek,ufs-pmc-via-fastauto"))
622 		host->caps |= UFS_MTK_CAP_PMC_VIA_FASTAUTO;
623 
624 	dev_info(hba->dev, "caps: 0x%x", host->caps);
625 }
626 
627 static void ufs_mtk_boost_pm_qos(struct ufs_hba *hba, bool boost)
628 {
629 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
630 
631 	if (!host || !host->pm_qos_init)
632 		return;
633 
634 	cpu_latency_qos_update_request(&host->pm_qos_req,
635 				       boost ? 0 : PM_QOS_DEFAULT_VALUE);
636 }
637 
638 static void ufs_mtk_scale_perf(struct ufs_hba *hba, bool scale_up)
639 {
640 	ufs_mtk_boost_crypt(hba, scale_up);
641 	ufs_mtk_boost_pm_qos(hba, scale_up);
642 }
643 
644 static void ufs_mtk_pwr_ctrl(struct ufs_hba *hba, bool on)
645 {
646 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
647 
648 	if (on) {
649 		phy_power_on(host->mphy);
650 		ufs_mtk_setup_ref_clk(hba, on);
651 		if (!ufshcd_is_clkscaling_supported(hba))
652 			ufs_mtk_scale_perf(hba, on);
653 	} else {
654 		if (!ufshcd_is_clkscaling_supported(hba))
655 			ufs_mtk_scale_perf(hba, on);
656 		ufs_mtk_setup_ref_clk(hba, on);
657 		phy_power_off(host->mphy);
658 	}
659 }
660 
661 /**
662  * ufs_mtk_setup_clocks - enables/disable clocks
663  * @hba: host controller instance
664  * @on: If true, enable clocks else disable them.
665  * @status: PRE_CHANGE or POST_CHANGE notify
666  *
667  * Returns 0 on success, non-zero on failure.
668  */
669 static int ufs_mtk_setup_clocks(struct ufs_hba *hba, bool on,
670 				enum ufs_notify_change_status status)
671 {
672 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
673 	bool clk_pwr_off = false;
674 	int ret = 0;
675 
676 	/*
677 	 * In case ufs_mtk_init() is not yet done, simply ignore.
678 	 * This ufs_mtk_setup_clocks() shall be called from
679 	 * ufs_mtk_init() after init is done.
680 	 */
681 	if (!host)
682 		return 0;
683 
684 	if (!on && status == PRE_CHANGE) {
685 		if (ufshcd_is_link_off(hba)) {
686 			clk_pwr_off = true;
687 		} else if (ufshcd_is_link_hibern8(hba) ||
688 			 (!ufshcd_can_hibern8_during_gating(hba) &&
689 			 ufshcd_is_auto_hibern8_enabled(hba))) {
690 			/*
691 			 * Gate ref-clk and poweroff mphy if link state is in
692 			 * OFF or Hibern8 by either Auto-Hibern8 or
693 			 * ufshcd_link_state_transition().
694 			 */
695 			ret = ufs_mtk_wait_link_state(hba,
696 						      VS_LINK_HIBERN8,
697 						      15);
698 			if (!ret)
699 				clk_pwr_off = true;
700 		}
701 
702 		if (clk_pwr_off)
703 			ufs_mtk_pwr_ctrl(hba, false);
704 	} else if (on && status == POST_CHANGE) {
705 		ufs_mtk_pwr_ctrl(hba, true);
706 	}
707 
708 	return ret;
709 }
710 
711 static void ufs_mtk_get_controller_version(struct ufs_hba *hba)
712 {
713 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
714 	int ret, ver = 0;
715 
716 	if (host->hw_ver.major)
717 		return;
718 
719 	/* Set default (minimum) version anyway */
720 	host->hw_ver.major = 2;
721 
722 	ret = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_LOCALVERINFO), &ver);
723 	if (!ret) {
724 		if (ver >= UFS_UNIPRO_VER_1_8) {
725 			host->hw_ver.major = 3;
726 			/*
727 			 * Fix HCI version for some platforms with
728 			 * incorrect version
729 			 */
730 			if (hba->ufs_version < ufshci_version(3, 0))
731 				hba->ufs_version = ufshci_version(3, 0);
732 		}
733 	}
734 }
735 
736 static u32 ufs_mtk_get_ufs_hci_version(struct ufs_hba *hba)
737 {
738 	return hba->ufs_version;
739 }
740 
741 /**
742  * ufs_mtk_init_clocks - Init mtk driver private clocks
743  *
744  * @hba: per adapter instance
745  */
746 static void ufs_mtk_init_clocks(struct ufs_hba *hba)
747 {
748 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
749 	struct list_head *head = &hba->clk_list_head;
750 	struct ufs_mtk_clk *mclk = &host->mclk;
751 	struct ufs_clk_info *clki, *clki_tmp;
752 
753 	/*
754 	 * Find private clocks and store them in struct ufs_mtk_clk.
755 	 * Remove "ufs_sel_min_src" and "ufs_sel_min_src" from list to avoid
756 	 * being switched on/off in clock gating.
757 	 */
758 	list_for_each_entry_safe(clki, clki_tmp, head, list) {
759 		if (!strcmp(clki->name, "ufs_sel")) {
760 			host->mclk.ufs_sel_clki = clki;
761 		} else if (!strcmp(clki->name, "ufs_sel_max_src")) {
762 			host->mclk.ufs_sel_max_clki = clki;
763 			clk_disable_unprepare(clki->clk);
764 			list_del(&clki->list);
765 		} else if (!strcmp(clki->name, "ufs_sel_min_src")) {
766 			host->mclk.ufs_sel_min_clki = clki;
767 			clk_disable_unprepare(clki->clk);
768 			list_del(&clki->list);
769 		}
770 	}
771 
772 	if (!mclk->ufs_sel_clki || !mclk->ufs_sel_max_clki ||
773 	    !mclk->ufs_sel_min_clki) {
774 		hba->caps &= ~UFSHCD_CAP_CLK_SCALING;
775 		dev_info(hba->dev,
776 			 "%s: Clk-scaling not ready. Feature disabled.",
777 			 __func__);
778 	}
779 }
780 
781 #define MAX_VCC_NAME 30
782 static int ufs_mtk_vreg_fix_vcc(struct ufs_hba *hba)
783 {
784 	struct ufs_vreg_info *info = &hba->vreg_info;
785 	struct device_node *np = hba->dev->of_node;
786 	struct device *dev = hba->dev;
787 	char vcc_name[MAX_VCC_NAME];
788 	struct arm_smccc_res res;
789 	int err, ver;
790 
791 	if (hba->vreg_info.vcc)
792 		return 0;
793 
794 	if (of_property_read_bool(np, "mediatek,ufs-vcc-by-num")) {
795 		ufs_mtk_get_vcc_num(res);
796 		if (res.a1 > UFS_VCC_NONE && res.a1 < UFS_VCC_MAX)
797 			snprintf(vcc_name, MAX_VCC_NAME, "vcc-opt%lu", res.a1);
798 		else
799 			return -ENODEV;
800 	} else if (of_property_read_bool(np, "mediatek,ufs-vcc-by-ver")) {
801 		ver = (hba->dev_info.wspecversion & 0xF00) >> 8;
802 		snprintf(vcc_name, MAX_VCC_NAME, "vcc-ufs%u", ver);
803 	} else {
804 		return 0;
805 	}
806 
807 	err = ufshcd_populate_vreg(dev, vcc_name, &info->vcc);
808 	if (err)
809 		return err;
810 
811 	err = ufshcd_get_vreg(dev, info->vcc);
812 	if (err)
813 		return err;
814 
815 	err = regulator_enable(info->vcc->reg);
816 	if (!err) {
817 		info->vcc->enabled = true;
818 		dev_info(dev, "%s: %s enabled\n", __func__, vcc_name);
819 	}
820 
821 	return err;
822 }
823 
824 static void ufs_mtk_vreg_fix_vccqx(struct ufs_hba *hba)
825 {
826 	struct ufs_vreg_info *info = &hba->vreg_info;
827 	struct ufs_vreg **vreg_on, **vreg_off;
828 
829 	if (hba->dev_info.wspecversion >= 0x0300) {
830 		vreg_on = &info->vccq;
831 		vreg_off = &info->vccq2;
832 	} else {
833 		vreg_on = &info->vccq2;
834 		vreg_off = &info->vccq;
835 	}
836 
837 	if (*vreg_on)
838 		(*vreg_on)->always_on = true;
839 
840 	if (*vreg_off) {
841 		regulator_disable((*vreg_off)->reg);
842 		devm_kfree(hba->dev, (*vreg_off)->name);
843 		devm_kfree(hba->dev, *vreg_off);
844 		*vreg_off = NULL;
845 	}
846 }
847 
848 /**
849  * ufs_mtk_init - find other essential mmio bases
850  * @hba: host controller instance
851  *
852  * Binds PHY with controller and powers up PHY enabling clocks
853  * and regulators.
854  *
855  * Returns -EPROBE_DEFER if binding fails, returns negative error
856  * on phy power up failure and returns zero on success.
857  */
858 static int ufs_mtk_init(struct ufs_hba *hba)
859 {
860 	const struct of_device_id *id;
861 	struct device *dev = hba->dev;
862 	struct ufs_mtk_host *host;
863 	int err = 0;
864 
865 	host = devm_kzalloc(dev, sizeof(*host), GFP_KERNEL);
866 	if (!host) {
867 		err = -ENOMEM;
868 		dev_info(dev, "%s: no memory for mtk ufs host\n", __func__);
869 		goto out;
870 	}
871 
872 	host->hba = hba;
873 	ufshcd_set_variant(hba, host);
874 
875 	id = of_match_device(ufs_mtk_of_match, dev);
876 	if (!id) {
877 		err = -EINVAL;
878 		goto out;
879 	}
880 
881 	/* Initialize host capability */
882 	ufs_mtk_init_host_caps(hba);
883 
884 	err = ufs_mtk_bind_mphy(hba);
885 	if (err)
886 		goto out_variant_clear;
887 
888 	ufs_mtk_init_reset(hba);
889 
890 	/* Enable runtime autosuspend */
891 	hba->caps |= UFSHCD_CAP_RPM_AUTOSUSPEND;
892 
893 	/* Enable clock-gating */
894 	hba->caps |= UFSHCD_CAP_CLK_GATING;
895 
896 	/* Enable inline encryption */
897 	hba->caps |= UFSHCD_CAP_CRYPTO;
898 
899 	/* Enable WriteBooster */
900 	hba->caps |= UFSHCD_CAP_WB_EN;
901 
902 	/* Enable clk scaling*/
903 	hba->caps |= UFSHCD_CAP_CLK_SCALING;
904 
905 	hba->quirks |= UFSHCI_QUIRK_SKIP_MANUAL_WB_FLUSH_CTRL;
906 	hba->vps->wb_flush_threshold = UFS_WB_BUF_REMAIN_PERCENT(80);
907 
908 	if (host->caps & UFS_MTK_CAP_DISABLE_AH8)
909 		hba->caps |= UFSHCD_CAP_HIBERN8_WITH_CLK_GATING;
910 
911 	ufs_mtk_init_clocks(hba);
912 
913 	/*
914 	 * ufshcd_vops_init() is invoked after
915 	 * ufshcd_setup_clock(true) in ufshcd_hba_init() thus
916 	 * phy clock setup is skipped.
917 	 *
918 	 * Enable phy clocks specifically here.
919 	 */
920 	ufs_mtk_mphy_power_on(hba, true);
921 	ufs_mtk_setup_clocks(hba, true, POST_CHANGE);
922 
923 	host->ip_ver = ufshcd_readl(hba, REG_UFS_MTK_IP_VER);
924 
925 	/* Initialize pm-qos request */
926 	cpu_latency_qos_add_request(&host->pm_qos_req, PM_QOS_DEFAULT_VALUE);
927 	host->pm_qos_init = true;
928 
929 	goto out;
930 
931 out_variant_clear:
932 	ufshcd_set_variant(hba, NULL);
933 out:
934 	return err;
935 }
936 
937 static bool ufs_mtk_pmc_via_fastauto(struct ufs_hba *hba,
938 				     struct ufs_pa_layer_attr *dev_req_params)
939 {
940 	if (!ufs_mtk_is_pmc_via_fastauto(hba))
941 		return false;
942 
943 	if (dev_req_params->hs_rate == hba->pwr_info.hs_rate)
944 		return false;
945 
946 	if (dev_req_params->pwr_tx != FAST_MODE &&
947 	    dev_req_params->gear_tx < UFS_HS_G4)
948 		return false;
949 
950 	if (dev_req_params->pwr_rx != FAST_MODE &&
951 	    dev_req_params->gear_rx < UFS_HS_G4)
952 		return false;
953 
954 	return true;
955 }
956 
957 static int ufs_mtk_pre_pwr_change(struct ufs_hba *hba,
958 				  struct ufs_pa_layer_attr *dev_max_params,
959 				  struct ufs_pa_layer_attr *dev_req_params)
960 {
961 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
962 	struct ufs_dev_params host_cap;
963 	int ret;
964 
965 	ufshcd_init_pwr_dev_param(&host_cap);
966 	host_cap.hs_rx_gear = UFS_HS_G5;
967 	host_cap.hs_tx_gear = UFS_HS_G5;
968 
969 	ret = ufshcd_get_pwr_dev_param(&host_cap,
970 				       dev_max_params,
971 				       dev_req_params);
972 	if (ret) {
973 		pr_info("%s: failed to determine capabilities\n",
974 			__func__);
975 	}
976 
977 	if (ufs_mtk_pmc_via_fastauto(hba, dev_req_params)) {
978 		ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXTERMINATION), true);
979 		ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXGEAR), UFS_HS_G1);
980 
981 		ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXTERMINATION), true);
982 		ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXGEAR), UFS_HS_G1);
983 
984 		ufshcd_dme_set(hba, UIC_ARG_MIB(PA_ACTIVETXDATALANES),
985 			       dev_req_params->lane_tx);
986 		ufshcd_dme_set(hba, UIC_ARG_MIB(PA_ACTIVERXDATALANES),
987 			       dev_req_params->lane_rx);
988 		ufshcd_dme_set(hba, UIC_ARG_MIB(PA_HSSERIES),
989 			       dev_req_params->hs_rate);
990 
991 		ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXHSADAPTTYPE),
992 			       PA_NO_ADAPT);
993 
994 		ret = ufshcd_uic_change_pwr_mode(hba,
995 					FASTAUTO_MODE << 4 | FASTAUTO_MODE);
996 
997 		if (ret) {
998 			dev_err(hba->dev, "%s: HSG1B FASTAUTO failed ret=%d\n",
999 				__func__, ret);
1000 		}
1001 	}
1002 
1003 	if (host->hw_ver.major >= 3) {
1004 		ret = ufshcd_dme_configure_adapt(hba,
1005 					   dev_req_params->gear_tx,
1006 					   PA_INITIAL_ADAPT);
1007 	}
1008 
1009 	return ret;
1010 }
1011 
1012 static int ufs_mtk_pwr_change_notify(struct ufs_hba *hba,
1013 				     enum ufs_notify_change_status stage,
1014 				     struct ufs_pa_layer_attr *dev_max_params,
1015 				     struct ufs_pa_layer_attr *dev_req_params)
1016 {
1017 	int ret = 0;
1018 
1019 	switch (stage) {
1020 	case PRE_CHANGE:
1021 		ret = ufs_mtk_pre_pwr_change(hba, dev_max_params,
1022 					     dev_req_params);
1023 		break;
1024 	case POST_CHANGE:
1025 		break;
1026 	default:
1027 		ret = -EINVAL;
1028 		break;
1029 	}
1030 
1031 	return ret;
1032 }
1033 
1034 static int ufs_mtk_unipro_set_lpm(struct ufs_hba *hba, bool lpm)
1035 {
1036 	int ret;
1037 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
1038 
1039 	ret = ufshcd_dme_set(hba,
1040 			     UIC_ARG_MIB_SEL(VS_UNIPROPOWERDOWNCONTROL, 0),
1041 			     lpm ? 1 : 0);
1042 	if (!ret || !lpm) {
1043 		/*
1044 		 * Forcibly set as non-LPM mode if UIC commands is failed
1045 		 * to use default hba_enable_delay_us value for re-enabling
1046 		 * the host.
1047 		 */
1048 		host->unipro_lpm = lpm;
1049 	}
1050 
1051 	return ret;
1052 }
1053 
1054 static int ufs_mtk_pre_link(struct ufs_hba *hba)
1055 {
1056 	int ret;
1057 	u32 tmp;
1058 
1059 	ufs_mtk_get_controller_version(hba);
1060 
1061 	ret = ufs_mtk_unipro_set_lpm(hba, false);
1062 	if (ret)
1063 		return ret;
1064 
1065 	/*
1066 	 * Setting PA_Local_TX_LCC_Enable to 0 before link startup
1067 	 * to make sure that both host and device TX LCC are disabled
1068 	 * once link startup is completed.
1069 	 */
1070 	ret = ufshcd_disable_host_tx_lcc(hba);
1071 	if (ret)
1072 		return ret;
1073 
1074 	/* disable deep stall */
1075 	ret = ufshcd_dme_get(hba, UIC_ARG_MIB(VS_SAVEPOWERCONTROL), &tmp);
1076 	if (ret)
1077 		return ret;
1078 
1079 	tmp &= ~(1 << 6);
1080 
1081 	ret = ufshcd_dme_set(hba, UIC_ARG_MIB(VS_SAVEPOWERCONTROL), tmp);
1082 
1083 	return ret;
1084 }
1085 
1086 static void ufs_mtk_setup_clk_gating(struct ufs_hba *hba)
1087 {
1088 	u32 ah_ms;
1089 
1090 	if (ufshcd_is_clkgating_allowed(hba)) {
1091 		if (ufshcd_is_auto_hibern8_supported(hba) && hba->ahit)
1092 			ah_ms = FIELD_GET(UFSHCI_AHIBERN8_TIMER_MASK,
1093 					  hba->ahit);
1094 		else
1095 			ah_ms = 10;
1096 		ufshcd_clkgate_delay_set(hba->dev, ah_ms + 5);
1097 	}
1098 }
1099 
1100 static int ufs_mtk_post_link(struct ufs_hba *hba)
1101 {
1102 	/* enable unipro clock gating feature */
1103 	ufs_mtk_cfg_unipro_cg(hba, true);
1104 
1105 	/* will be configured during probe hba */
1106 	if (ufshcd_is_auto_hibern8_supported(hba))
1107 		hba->ahit = FIELD_PREP(UFSHCI_AHIBERN8_TIMER_MASK, 10) |
1108 			FIELD_PREP(UFSHCI_AHIBERN8_SCALE_MASK, 3);
1109 
1110 	ufs_mtk_setup_clk_gating(hba);
1111 
1112 	return 0;
1113 }
1114 
1115 static int ufs_mtk_link_startup_notify(struct ufs_hba *hba,
1116 				       enum ufs_notify_change_status stage)
1117 {
1118 	int ret = 0;
1119 
1120 	switch (stage) {
1121 	case PRE_CHANGE:
1122 		ret = ufs_mtk_pre_link(hba);
1123 		break;
1124 	case POST_CHANGE:
1125 		ret = ufs_mtk_post_link(hba);
1126 		break;
1127 	default:
1128 		ret = -EINVAL;
1129 		break;
1130 	}
1131 
1132 	return ret;
1133 }
1134 
1135 static int ufs_mtk_device_reset(struct ufs_hba *hba)
1136 {
1137 	struct arm_smccc_res res;
1138 
1139 	/* disable hba before device reset */
1140 	ufshcd_hba_stop(hba);
1141 
1142 	ufs_mtk_device_reset_ctrl(0, res);
1143 
1144 	/*
1145 	 * The reset signal is active low. UFS devices shall detect
1146 	 * more than or equal to 1us of positive or negative RST_n
1147 	 * pulse width.
1148 	 *
1149 	 * To be on safe side, keep the reset low for at least 10us.
1150 	 */
1151 	usleep_range(10, 15);
1152 
1153 	ufs_mtk_device_reset_ctrl(1, res);
1154 
1155 	/* Some devices may need time to respond to rst_n */
1156 	usleep_range(10000, 15000);
1157 
1158 	dev_info(hba->dev, "device reset done\n");
1159 
1160 	return 0;
1161 }
1162 
1163 static int ufs_mtk_link_set_hpm(struct ufs_hba *hba)
1164 {
1165 	int err;
1166 
1167 	err = ufshcd_hba_enable(hba);
1168 	if (err)
1169 		return err;
1170 
1171 	err = ufs_mtk_unipro_set_lpm(hba, false);
1172 	if (err)
1173 		return err;
1174 
1175 	err = ufshcd_uic_hibern8_exit(hba);
1176 	if (!err)
1177 		ufshcd_set_link_active(hba);
1178 	else
1179 		return err;
1180 
1181 	err = ufshcd_make_hba_operational(hba);
1182 	if (err)
1183 		return err;
1184 
1185 	return 0;
1186 }
1187 
1188 static int ufs_mtk_link_set_lpm(struct ufs_hba *hba)
1189 {
1190 	int err;
1191 
1192 	/* Disable reset confirm feature by UniPro */
1193 	ufshcd_writel(hba,
1194 		      (ufshcd_readl(hba, REG_UFS_XOUFS_CTRL) & ~0x100),
1195 		      REG_UFS_XOUFS_CTRL);
1196 
1197 	err = ufs_mtk_unipro_set_lpm(hba, true);
1198 	if (err) {
1199 		/* Resume UniPro state for following error recovery */
1200 		ufs_mtk_unipro_set_lpm(hba, false);
1201 		return err;
1202 	}
1203 
1204 	return 0;
1205 }
1206 
1207 static void ufs_mtk_vccqx_set_lpm(struct ufs_hba *hba, bool lpm)
1208 {
1209 	struct ufs_vreg *vccqx = NULL;
1210 
1211 	if (hba->vreg_info.vccq)
1212 		vccqx = hba->vreg_info.vccq;
1213 	else
1214 		vccqx = hba->vreg_info.vccq2;
1215 
1216 	regulator_set_mode(vccqx->reg,
1217 			   lpm ? REGULATOR_MODE_IDLE : REGULATOR_MODE_NORMAL);
1218 }
1219 
1220 static void ufs_mtk_vsx_set_lpm(struct ufs_hba *hba, bool lpm)
1221 {
1222 	struct arm_smccc_res res;
1223 
1224 	ufs_mtk_device_pwr_ctrl(!lpm,
1225 				(unsigned long)hba->dev_info.wspecversion,
1226 				res);
1227 }
1228 
1229 static void ufs_mtk_dev_vreg_set_lpm(struct ufs_hba *hba, bool lpm)
1230 {
1231 	if (!hba->vreg_info.vccq && !hba->vreg_info.vccq2)
1232 		return;
1233 
1234 	/* Skip if VCC is assumed always-on */
1235 	if (!hba->vreg_info.vcc)
1236 		return;
1237 
1238 	/* Bypass LPM when device is still active */
1239 	if (lpm && ufshcd_is_ufs_dev_active(hba))
1240 		return;
1241 
1242 	/* Bypass LPM if VCC is enabled */
1243 	if (lpm && hba->vreg_info.vcc->enabled)
1244 		return;
1245 
1246 	if (lpm) {
1247 		ufs_mtk_vccqx_set_lpm(hba, lpm);
1248 		ufs_mtk_vsx_set_lpm(hba, lpm);
1249 	} else {
1250 		ufs_mtk_vsx_set_lpm(hba, lpm);
1251 		ufs_mtk_vccqx_set_lpm(hba, lpm);
1252 	}
1253 }
1254 
1255 static void ufs_mtk_auto_hibern8_disable(struct ufs_hba *hba)
1256 {
1257 	int ret;
1258 
1259 	/* disable auto-hibern8 */
1260 	ufshcd_writel(hba, 0, REG_AUTO_HIBERNATE_IDLE_TIMER);
1261 
1262 	/* wait host return to idle state when auto-hibern8 off */
1263 	ufs_mtk_wait_idle_state(hba, 5);
1264 
1265 	ret = ufs_mtk_wait_link_state(hba, VS_LINK_UP, 100);
1266 	if (ret)
1267 		dev_warn(hba->dev, "exit h8 state fail, ret=%d\n", ret);
1268 }
1269 
1270 static int ufs_mtk_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op,
1271 	enum ufs_notify_change_status status)
1272 {
1273 	int err;
1274 	struct arm_smccc_res res;
1275 
1276 	if (status == PRE_CHANGE) {
1277 		if (!ufshcd_is_auto_hibern8_supported(hba))
1278 			return 0;
1279 		ufs_mtk_auto_hibern8_disable(hba);
1280 		return 0;
1281 	}
1282 
1283 	if (ufshcd_is_link_hibern8(hba)) {
1284 		err = ufs_mtk_link_set_lpm(hba);
1285 		if (err)
1286 			goto fail;
1287 	}
1288 
1289 	if (!ufshcd_is_link_active(hba)) {
1290 		/*
1291 		 * Make sure no error will be returned to prevent
1292 		 * ufshcd_suspend() re-enabling regulators while vreg is still
1293 		 * in low-power mode.
1294 		 */
1295 		err = ufs_mtk_mphy_power_on(hba, false);
1296 		if (err)
1297 			goto fail;
1298 	}
1299 
1300 	if (ufshcd_is_link_off(hba))
1301 		ufs_mtk_device_reset_ctrl(0, res);
1302 
1303 	ufs_mtk_host_pwr_ctrl(HOST_PWR_HCI, false, res);
1304 
1305 	return 0;
1306 fail:
1307 	/*
1308 	 * Set link as off state enforcedly to trigger
1309 	 * ufshcd_host_reset_and_restore() in ufshcd_suspend()
1310 	 * for completed host reset.
1311 	 */
1312 	ufshcd_set_link_off(hba);
1313 	return -EAGAIN;
1314 }
1315 
1316 static int ufs_mtk_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
1317 {
1318 	int err;
1319 	struct arm_smccc_res res;
1320 
1321 	if (hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL)
1322 		ufs_mtk_dev_vreg_set_lpm(hba, false);
1323 
1324 	ufs_mtk_host_pwr_ctrl(HOST_PWR_HCI, true, res);
1325 
1326 	err = ufs_mtk_mphy_power_on(hba, true);
1327 	if (err)
1328 		goto fail;
1329 
1330 	if (ufshcd_is_link_hibern8(hba)) {
1331 		err = ufs_mtk_link_set_hpm(hba);
1332 		if (err)
1333 			goto fail;
1334 	}
1335 
1336 	return 0;
1337 fail:
1338 	return ufshcd_link_recovery(hba);
1339 }
1340 
1341 static void ufs_mtk_dbg_register_dump(struct ufs_hba *hba)
1342 {
1343 	/* Dump ufshci register 0x140 ~ 0x14C */
1344 	ufshcd_dump_regs(hba, REG_UFS_XOUFS_CTRL, 0x10,
1345 			 "XOUFS Ctrl (0x140): ");
1346 
1347 	ufshcd_dump_regs(hba, REG_UFS_EXTREG, 0x4, "Ext Reg ");
1348 
1349 	/* Dump ufshci register 0x2200 ~ 0x22AC */
1350 	ufshcd_dump_regs(hba, REG_UFS_MPHYCTRL,
1351 			 REG_UFS_REJECT_MON - REG_UFS_MPHYCTRL + 4,
1352 			 "MPHY Ctrl (0x2200): ");
1353 
1354 	/* Direct debugging information to REG_MTK_PROBE */
1355 	ufs_mtk_dbg_sel(hba);
1356 	ufshcd_dump_regs(hba, REG_UFS_PROBE, 0x4, "Debug Probe ");
1357 }
1358 
1359 static int ufs_mtk_apply_dev_quirks(struct ufs_hba *hba)
1360 {
1361 	struct ufs_dev_info *dev_info = &hba->dev_info;
1362 	u16 mid = dev_info->wmanufacturerid;
1363 
1364 	if (mid == UFS_VENDOR_SAMSUNG) {
1365 		ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TACTIVATE), 6);
1366 		ufshcd_dme_set(hba, UIC_ARG_MIB(PA_HIBERN8TIME), 10);
1367 	}
1368 
1369 	/*
1370 	 * Decide waiting time before gating reference clock and
1371 	 * after ungating reference clock according to vendors'
1372 	 * requirements.
1373 	 */
1374 	if (mid == UFS_VENDOR_SAMSUNG)
1375 		ufs_mtk_setup_ref_clk_wait_us(hba, 1);
1376 	else if (mid == UFS_VENDOR_SKHYNIX)
1377 		ufs_mtk_setup_ref_clk_wait_us(hba, 30);
1378 	else if (mid == UFS_VENDOR_TOSHIBA)
1379 		ufs_mtk_setup_ref_clk_wait_us(hba, 100);
1380 	else
1381 		ufs_mtk_setup_ref_clk_wait_us(hba,
1382 					      REFCLK_DEFAULT_WAIT_US);
1383 	return 0;
1384 }
1385 
1386 static void ufs_mtk_fixup_dev_quirks(struct ufs_hba *hba)
1387 {
1388 	ufshcd_fixup_dev_quirks(hba, ufs_mtk_dev_fixups);
1389 
1390 	if (ufs_mtk_is_broken_vcc(hba) && hba->vreg_info.vcc &&
1391 	    (hba->dev_quirks & UFS_DEVICE_QUIRK_DELAY_AFTER_LPM)) {
1392 		hba->vreg_info.vcc->always_on = true;
1393 		/*
1394 		 * VCC will be kept always-on thus we don't
1395 		 * need any delay during regulator operations
1396 		 */
1397 		hba->dev_quirks &= ~(UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM |
1398 			UFS_DEVICE_QUIRK_DELAY_AFTER_LPM);
1399 	}
1400 
1401 	ufs_mtk_vreg_fix_vcc(hba);
1402 	ufs_mtk_vreg_fix_vccqx(hba);
1403 }
1404 
1405 static void ufs_mtk_event_notify(struct ufs_hba *hba,
1406 				 enum ufs_event_type evt, void *data)
1407 {
1408 	unsigned int val = *(u32 *)data;
1409 	unsigned long reg;
1410 	u8 bit;
1411 
1412 	trace_ufs_mtk_event(evt, val);
1413 
1414 	/* Print details of UIC Errors */
1415 	if (evt <= UFS_EVT_DME_ERR) {
1416 		dev_info(hba->dev,
1417 			 "Host UIC Error Code (%s): %08x\n",
1418 			 ufs_uic_err_str[evt], val);
1419 		reg = val;
1420 	}
1421 
1422 	if (evt == UFS_EVT_PA_ERR) {
1423 		for_each_set_bit(bit, &reg, ARRAY_SIZE(ufs_uic_pa_err_str))
1424 			dev_info(hba->dev, "%s\n", ufs_uic_pa_err_str[bit]);
1425 	}
1426 
1427 	if (evt == UFS_EVT_DL_ERR) {
1428 		for_each_set_bit(bit, &reg, ARRAY_SIZE(ufs_uic_dl_err_str))
1429 			dev_info(hba->dev, "%s\n", ufs_uic_dl_err_str[bit]);
1430 	}
1431 }
1432 
1433 static void ufs_mtk_config_scaling_param(struct ufs_hba *hba,
1434 				struct devfreq_dev_profile *profile,
1435 				struct devfreq_simple_ondemand_data *data)
1436 {
1437 	/* Customize min gear in clk scaling */
1438 	hba->clk_scaling.min_gear = UFS_HS_G4;
1439 
1440 	hba->vps->devfreq_profile.polling_ms = 200;
1441 	hba->vps->ondemand_data.upthreshold = 50;
1442 	hba->vps->ondemand_data.downdifferential = 20;
1443 }
1444 
1445 /**
1446  * ufs_mtk_clk_scale - Internal clk scaling operation
1447  *
1448  * MTK platform supports clk scaling by switching parent of ufs_sel(mux).
1449  * The ufs_sel downstream to ufs_ck which feeds directly to UFS hardware.
1450  * Max and min clocks rate of ufs_sel defined in dts should match rate of
1451  * "ufs_sel_max_src" and "ufs_sel_min_src" respectively.
1452  * This prevent changing rate of pll clock that is shared between modules.
1453  *
1454  * @hba: per adapter instance
1455  * @scale_up: True for scaling up and false for scaling down
1456  */
1457 static void ufs_mtk_clk_scale(struct ufs_hba *hba, bool scale_up)
1458 {
1459 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
1460 	struct ufs_mtk_clk *mclk = &host->mclk;
1461 	struct ufs_clk_info *clki = mclk->ufs_sel_clki;
1462 	int ret = 0;
1463 
1464 	ret = clk_prepare_enable(clki->clk);
1465 	if (ret) {
1466 		dev_info(hba->dev,
1467 			 "clk_prepare_enable() fail, ret: %d\n", ret);
1468 		return;
1469 	}
1470 
1471 	if (scale_up) {
1472 		ret = clk_set_parent(clki->clk, mclk->ufs_sel_max_clki->clk);
1473 		clki->curr_freq = clki->max_freq;
1474 	} else {
1475 		ret = clk_set_parent(clki->clk, mclk->ufs_sel_min_clki->clk);
1476 		clki->curr_freq = clki->min_freq;
1477 	}
1478 
1479 	if (ret) {
1480 		dev_info(hba->dev,
1481 			 "Failed to set ufs_sel_clki, ret: %d\n", ret);
1482 	}
1483 
1484 	clk_disable_unprepare(clki->clk);
1485 
1486 	trace_ufs_mtk_clk_scale(clki->name, scale_up, clk_get_rate(clki->clk));
1487 }
1488 
1489 static int ufs_mtk_clk_scale_notify(struct ufs_hba *hba, bool scale_up,
1490 				    enum ufs_notify_change_status status)
1491 {
1492 	if (!ufshcd_is_clkscaling_supported(hba))
1493 		return 0;
1494 
1495 	if (status == PRE_CHANGE) {
1496 		/* Switch parent before clk_set_rate() */
1497 		ufs_mtk_clk_scale(hba, scale_up);
1498 	} else {
1499 		/* Request interrupt latency QoS accordingly */
1500 		ufs_mtk_scale_perf(hba, scale_up);
1501 	}
1502 
1503 	return 0;
1504 }
1505 
1506 /*
1507  * struct ufs_hba_mtk_vops - UFS MTK specific variant operations
1508  *
1509  * The variant operations configure the necessary controller and PHY
1510  * handshake during initialization.
1511  */
1512 static const struct ufs_hba_variant_ops ufs_hba_mtk_vops = {
1513 	.name                = "mediatek.ufshci",
1514 	.init                = ufs_mtk_init,
1515 	.get_ufs_hci_version = ufs_mtk_get_ufs_hci_version,
1516 	.setup_clocks        = ufs_mtk_setup_clocks,
1517 	.hce_enable_notify   = ufs_mtk_hce_enable_notify,
1518 	.link_startup_notify = ufs_mtk_link_startup_notify,
1519 	.pwr_change_notify   = ufs_mtk_pwr_change_notify,
1520 	.apply_dev_quirks    = ufs_mtk_apply_dev_quirks,
1521 	.fixup_dev_quirks    = ufs_mtk_fixup_dev_quirks,
1522 	.suspend             = ufs_mtk_suspend,
1523 	.resume              = ufs_mtk_resume,
1524 	.dbg_register_dump   = ufs_mtk_dbg_register_dump,
1525 	.device_reset        = ufs_mtk_device_reset,
1526 	.event_notify        = ufs_mtk_event_notify,
1527 	.config_scaling_param = ufs_mtk_config_scaling_param,
1528 	.clk_scale_notify    = ufs_mtk_clk_scale_notify,
1529 };
1530 
1531 /**
1532  * ufs_mtk_probe - probe routine of the driver
1533  * @pdev: pointer to Platform device handle
1534  *
1535  * Return zero for success and non-zero for failure
1536  */
1537 static int ufs_mtk_probe(struct platform_device *pdev)
1538 {
1539 	int err;
1540 	struct device *dev = &pdev->dev;
1541 	struct device_node *reset_node;
1542 	struct platform_device *reset_pdev;
1543 	struct device_link *link;
1544 
1545 	reset_node = of_find_compatible_node(NULL, NULL,
1546 					     "ti,syscon-reset");
1547 	if (!reset_node) {
1548 		dev_notice(dev, "find ti,syscon-reset fail\n");
1549 		goto skip_reset;
1550 	}
1551 	reset_pdev = of_find_device_by_node(reset_node);
1552 	if (!reset_pdev) {
1553 		dev_notice(dev, "find reset_pdev fail\n");
1554 		goto skip_reset;
1555 	}
1556 	link = device_link_add(dev, &reset_pdev->dev,
1557 		DL_FLAG_AUTOPROBE_CONSUMER);
1558 	put_device(&reset_pdev->dev);
1559 	if (!link) {
1560 		dev_notice(dev, "add reset device_link fail\n");
1561 		goto skip_reset;
1562 	}
1563 	/* supplier is not probed */
1564 	if (link->status == DL_STATE_DORMANT) {
1565 		err = -EPROBE_DEFER;
1566 		goto out;
1567 	}
1568 
1569 skip_reset:
1570 	/* perform generic probe */
1571 	err = ufshcd_pltfrm_init(pdev, &ufs_hba_mtk_vops);
1572 
1573 out:
1574 	if (err)
1575 		dev_info(dev, "probe failed %d\n", err);
1576 
1577 	of_node_put(reset_node);
1578 	return err;
1579 }
1580 
1581 /**
1582  * ufs_mtk_remove - set driver_data of the device to NULL
1583  * @pdev: pointer to platform device handle
1584  *
1585  * Always return 0
1586  */
1587 static int ufs_mtk_remove(struct platform_device *pdev)
1588 {
1589 	struct ufs_hba *hba =  platform_get_drvdata(pdev);
1590 
1591 	pm_runtime_get_sync(&(pdev)->dev);
1592 	ufshcd_remove(hba);
1593 	return 0;
1594 }
1595 
1596 #ifdef CONFIG_PM_SLEEP
1597 static int ufs_mtk_system_suspend(struct device *dev)
1598 {
1599 	struct ufs_hba *hba = dev_get_drvdata(dev);
1600 	int ret;
1601 
1602 	ret = ufshcd_system_suspend(dev);
1603 	if (ret)
1604 		return ret;
1605 
1606 	ufs_mtk_dev_vreg_set_lpm(hba, true);
1607 
1608 	return 0;
1609 }
1610 
1611 static int ufs_mtk_system_resume(struct device *dev)
1612 {
1613 	struct ufs_hba *hba = dev_get_drvdata(dev);
1614 
1615 	ufs_mtk_dev_vreg_set_lpm(hba, false);
1616 
1617 	return ufshcd_system_resume(dev);
1618 }
1619 #endif
1620 
1621 static int ufs_mtk_runtime_suspend(struct device *dev)
1622 {
1623 	struct ufs_hba *hba = dev_get_drvdata(dev);
1624 	int ret = 0;
1625 
1626 	ret = ufshcd_runtime_suspend(dev);
1627 	if (ret)
1628 		return ret;
1629 
1630 	ufs_mtk_dev_vreg_set_lpm(hba, true);
1631 
1632 	return 0;
1633 }
1634 
1635 static int ufs_mtk_runtime_resume(struct device *dev)
1636 {
1637 	struct ufs_hba *hba = dev_get_drvdata(dev);
1638 
1639 	ufs_mtk_dev_vreg_set_lpm(hba, false);
1640 
1641 	return ufshcd_runtime_resume(dev);
1642 }
1643 
1644 static const struct dev_pm_ops ufs_mtk_pm_ops = {
1645 	SET_SYSTEM_SLEEP_PM_OPS(ufs_mtk_system_suspend,
1646 				ufs_mtk_system_resume)
1647 	SET_RUNTIME_PM_OPS(ufs_mtk_runtime_suspend,
1648 			   ufs_mtk_runtime_resume, NULL)
1649 	.prepare	 = ufshcd_suspend_prepare,
1650 	.complete	 = ufshcd_resume_complete,
1651 };
1652 
1653 static struct platform_driver ufs_mtk_pltform = {
1654 	.probe      = ufs_mtk_probe,
1655 	.remove     = ufs_mtk_remove,
1656 	.shutdown   = ufshcd_pltfrm_shutdown,
1657 	.driver = {
1658 		.name   = "ufshcd-mtk",
1659 		.pm     = &ufs_mtk_pm_ops,
1660 		.of_match_table = ufs_mtk_of_match,
1661 	},
1662 };
1663 
1664 MODULE_AUTHOR("Stanley Chu <stanley.chu@mediatek.com>");
1665 MODULE_AUTHOR("Peter Wang <peter.wang@mediatek.com>");
1666 MODULE_DESCRIPTION("MediaTek UFS Host Driver");
1667 MODULE_LICENSE("GPL v2");
1668 
1669 module_platform_driver(ufs_mtk_pltform);
1670