xref: /linux/drivers/ufs/host/ufs-mediatek.c (revision 55a42f78ffd386e01a5404419f8c5ded7db70a21)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2019 MediaTek Inc.
4  * Authors:
5  *	Stanley Chu <stanley.chu@mediatek.com>
6  *	Peter Wang <peter.wang@mediatek.com>
7  */
8 
9 #include <linux/arm-smccc.h>
10 #include <linux/bitfield.h>
11 #include <linux/clk.h>
12 #include <linux/delay.h>
13 #include <linux/module.h>
14 #include <linux/of.h>
15 #include <linux/of_address.h>
16 #include <linux/of_device.h>
17 #include <linux/of_platform.h>
18 #include <linux/phy/phy.h>
19 #include <linux/platform_device.h>
20 #include <linux/regulator/consumer.h>
21 #include <linux/reset.h>
22 
23 #include <ufs/ufshcd.h>
24 #include "ufshcd-pltfrm.h"
25 #include <ufs/ufs_quirks.h>
26 #include <ufs/unipro.h>
27 
28 #include "ufs-mediatek.h"
29 #include "ufs-mediatek-sip.h"
30 
31 static int  ufs_mtk_config_mcq(struct ufs_hba *hba, bool irq);
32 static void _ufs_mtk_clk_scale(struct ufs_hba *hba, bool scale_up);
33 
34 #define CREATE_TRACE_POINTS
35 #include "ufs-mediatek-trace.h"
36 #undef CREATE_TRACE_POINTS
37 
38 #define MAX_SUPP_MAC 64
39 #define MCQ_QUEUE_OFFSET(c) ((((c) >> 16) & 0xFF) * 0x200)
40 
41 static const struct ufs_dev_quirk ufs_mtk_dev_fixups[] = {
42 	{ .wmanufacturerid = UFS_ANY_VENDOR,
43 	  .model = UFS_ANY_MODEL,
44 	  .quirk = UFS_DEVICE_QUIRK_DELAY_AFTER_LPM |
45 		UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM },
46 	{ .wmanufacturerid = UFS_VENDOR_SKHYNIX,
47 	  .model = "H9HQ21AFAMZDAR",
48 	  .quirk = UFS_DEVICE_QUIRK_SUPPORT_EXTENDED_FEATURES },
49 	{}
50 };
51 
52 static const struct of_device_id ufs_mtk_of_match[] = {
53 	{ .compatible = "mediatek,mt8183-ufshci" },
54 	{ .compatible = "mediatek,mt8195-ufshci" },
55 	{},
56 };
57 MODULE_DEVICE_TABLE(of, ufs_mtk_of_match);
58 
59 /*
60  * Details of UIC Errors
61  */
62 static const char *const ufs_uic_err_str[] = {
63 	"PHY Adapter Layer",
64 	"Data Link Layer",
65 	"Network Link Layer",
66 	"Transport Link Layer",
67 	"DME"
68 };
69 
70 static const char *const ufs_uic_pa_err_str[] = {
71 	"PHY error on Lane 0",
72 	"PHY error on Lane 1",
73 	"PHY error on Lane 2",
74 	"PHY error on Lane 3",
75 	"Generic PHY Adapter Error. This should be the LINERESET indication"
76 };
77 
78 static const char *const ufs_uic_dl_err_str[] = {
79 	"NAC_RECEIVED",
80 	"TCx_REPLAY_TIMER_EXPIRED",
81 	"AFCx_REQUEST_TIMER_EXPIRED",
82 	"FCx_PROTECTION_TIMER_EXPIRED",
83 	"CRC_ERROR",
84 	"RX_BUFFER_OVERFLOW",
85 	"MAX_FRAME_LENGTH_EXCEEDED",
86 	"WRONG_SEQUENCE_NUMBER",
87 	"AFC_FRAME_SYNTAX_ERROR",
88 	"NAC_FRAME_SYNTAX_ERROR",
89 	"EOF_SYNTAX_ERROR",
90 	"FRAME_SYNTAX_ERROR",
91 	"BAD_CTRL_SYMBOL_TYPE",
92 	"PA_INIT_ERROR",
93 	"PA_ERROR_IND_RECEIVED",
94 	"PA_INIT"
95 };
96 
97 static bool ufs_mtk_is_boost_crypt_enabled(struct ufs_hba *hba)
98 {
99 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
100 
101 	return host->caps & UFS_MTK_CAP_BOOST_CRYPT_ENGINE;
102 }
103 
104 static bool ufs_mtk_is_va09_supported(struct ufs_hba *hba)
105 {
106 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
107 
108 	return host->caps & UFS_MTK_CAP_VA09_PWR_CTRL;
109 }
110 
111 static bool ufs_mtk_is_broken_vcc(struct ufs_hba *hba)
112 {
113 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
114 
115 	return host->caps & UFS_MTK_CAP_BROKEN_VCC;
116 }
117 
118 static bool ufs_mtk_is_pmc_via_fastauto(struct ufs_hba *hba)
119 {
120 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
121 
122 	return host->caps & UFS_MTK_CAP_PMC_VIA_FASTAUTO;
123 }
124 
125 static bool ufs_mtk_is_tx_skew_fix(struct ufs_hba *hba)
126 {
127 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
128 
129 	return host->caps & UFS_MTK_CAP_TX_SKEW_FIX;
130 }
131 
132 static bool ufs_mtk_is_rtff_mtcmos(struct ufs_hba *hba)
133 {
134 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
135 
136 	return host->caps & UFS_MTK_CAP_RTFF_MTCMOS;
137 }
138 
139 static bool ufs_mtk_is_allow_vccqx_lpm(struct ufs_hba *hba)
140 {
141 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
142 
143 	return host->caps & UFS_MTK_CAP_ALLOW_VCCQX_LPM;
144 }
145 
146 static bool ufs_mtk_is_clk_scale_ready(struct ufs_hba *hba)
147 {
148 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
149 	struct ufs_mtk_clk *mclk = &host->mclk;
150 
151 	return mclk->ufs_sel_clki &&
152 		mclk->ufs_sel_max_clki &&
153 		mclk->ufs_sel_min_clki;
154 }
155 
156 static void ufs_mtk_cfg_unipro_cg(struct ufs_hba *hba, bool enable)
157 {
158 	u32 tmp;
159 
160 	if (enable) {
161 		ufshcd_dme_get(hba,
162 			       UIC_ARG_MIB(VS_SAVEPOWERCONTROL), &tmp);
163 		tmp = tmp |
164 		      (1 << RX_SYMBOL_CLK_GATE_EN) |
165 		      (1 << SYS_CLK_GATE_EN) |
166 		      (1 << TX_CLK_GATE_EN);
167 		ufshcd_dme_set(hba,
168 			       UIC_ARG_MIB(VS_SAVEPOWERCONTROL), tmp);
169 
170 		ufshcd_dme_get(hba,
171 			       UIC_ARG_MIB(VS_DEBUGCLOCKENABLE), &tmp);
172 		tmp = tmp & ~(1 << TX_SYMBOL_CLK_REQ_FORCE);
173 		ufshcd_dme_set(hba,
174 			       UIC_ARG_MIB(VS_DEBUGCLOCKENABLE), tmp);
175 	} else {
176 		ufshcd_dme_get(hba,
177 			       UIC_ARG_MIB(VS_SAVEPOWERCONTROL), &tmp);
178 		tmp = tmp & ~((1 << RX_SYMBOL_CLK_GATE_EN) |
179 			      (1 << SYS_CLK_GATE_EN) |
180 			      (1 << TX_CLK_GATE_EN));
181 		ufshcd_dme_set(hba,
182 			       UIC_ARG_MIB(VS_SAVEPOWERCONTROL), tmp);
183 
184 		ufshcd_dme_get(hba,
185 			       UIC_ARG_MIB(VS_DEBUGCLOCKENABLE), &tmp);
186 		tmp = tmp | (1 << TX_SYMBOL_CLK_REQ_FORCE);
187 		ufshcd_dme_set(hba,
188 			       UIC_ARG_MIB(VS_DEBUGCLOCKENABLE), tmp);
189 	}
190 }
191 
192 static void ufs_mtk_crypto_enable(struct ufs_hba *hba)
193 {
194 	struct arm_smccc_res res;
195 
196 	ufs_mtk_crypto_ctrl(res, 1);
197 	if (res.a0) {
198 		dev_info(hba->dev, "%s: crypto enable failed, err: %lu\n",
199 			 __func__, res.a0);
200 		hba->caps &= ~UFSHCD_CAP_CRYPTO;
201 	}
202 }
203 
204 static void ufs_mtk_host_reset(struct ufs_hba *hba)
205 {
206 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
207 	struct arm_smccc_res res;
208 
209 	reset_control_assert(host->hci_reset);
210 	reset_control_assert(host->crypto_reset);
211 	reset_control_assert(host->unipro_reset);
212 	reset_control_assert(host->mphy_reset);
213 
214 	usleep_range(100, 110);
215 
216 	reset_control_deassert(host->unipro_reset);
217 	reset_control_deassert(host->crypto_reset);
218 	reset_control_deassert(host->hci_reset);
219 	reset_control_deassert(host->mphy_reset);
220 
221 	/* restore mphy setting aftre mphy reset */
222 	if (host->mphy_reset)
223 		ufs_mtk_mphy_ctrl(UFS_MPHY_RESTORE, res);
224 }
225 
226 static void ufs_mtk_init_reset_control(struct ufs_hba *hba,
227 				       struct reset_control **rc,
228 				       char *str)
229 {
230 	*rc = devm_reset_control_get(hba->dev, str);
231 	if (IS_ERR(*rc)) {
232 		dev_info(hba->dev, "Failed to get reset control %s: %ld\n",
233 			 str, PTR_ERR(*rc));
234 		*rc = NULL;
235 	}
236 }
237 
238 static void ufs_mtk_init_reset(struct ufs_hba *hba)
239 {
240 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
241 
242 	ufs_mtk_init_reset_control(hba, &host->hci_reset,
243 				   "hci_rst");
244 	ufs_mtk_init_reset_control(hba, &host->unipro_reset,
245 				   "unipro_rst");
246 	ufs_mtk_init_reset_control(hba, &host->crypto_reset,
247 				   "crypto_rst");
248 	ufs_mtk_init_reset_control(hba, &host->mphy_reset,
249 				   "mphy_rst");
250 }
251 
252 static int ufs_mtk_hce_enable_notify(struct ufs_hba *hba,
253 				     enum ufs_notify_change_status status)
254 {
255 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
256 
257 	if (status == PRE_CHANGE) {
258 		if (host->unipro_lpm) {
259 			hba->vps->hba_enable_delay_us = 0;
260 		} else {
261 			hba->vps->hba_enable_delay_us = 600;
262 			ufs_mtk_host_reset(hba);
263 		}
264 
265 		if (hba->caps & UFSHCD_CAP_CRYPTO)
266 			ufs_mtk_crypto_enable(hba);
267 
268 		if (host->caps & UFS_MTK_CAP_DISABLE_AH8) {
269 			ufshcd_writel(hba, 0,
270 				      REG_AUTO_HIBERNATE_IDLE_TIMER);
271 			hba->capabilities &= ~MASK_AUTO_HIBERN8_SUPPORT;
272 			hba->ahit = 0;
273 		}
274 
275 		/*
276 		 * Turn on CLK_CG early to bypass abnormal ERR_CHK signal
277 		 * to prevent host hang issue
278 		 */
279 		ufshcd_writel(hba,
280 			      ufshcd_readl(hba, REG_UFS_XOUFS_CTRL) | 0x80,
281 			      REG_UFS_XOUFS_CTRL);
282 
283 		/* DDR_EN setting */
284 		if (host->ip_ver >= IP_VER_MT6989) {
285 			ufshcd_rmwl(hba, UFS_MASK(0x7FFF, 8),
286 				0x453000, REG_UFS_MMIO_OPT_CTRL_0);
287 		}
288 
289 	}
290 
291 	return 0;
292 }
293 
294 static int ufs_mtk_bind_mphy(struct ufs_hba *hba)
295 {
296 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
297 	struct device *dev = hba->dev;
298 	struct device_node *np = dev->of_node;
299 	int err = 0;
300 
301 	host->mphy = devm_of_phy_get_by_index(dev, np, 0);
302 
303 	if (host->mphy == ERR_PTR(-EPROBE_DEFER)) {
304 		/*
305 		 * UFS driver might be probed before the phy driver does.
306 		 * In that case we would like to return EPROBE_DEFER code.
307 		 */
308 		err = -EPROBE_DEFER;
309 		dev_info(dev,
310 			 "%s: required phy hasn't probed yet. err = %d\n",
311 			__func__, err);
312 	} else if (IS_ERR(host->mphy)) {
313 		err = PTR_ERR(host->mphy);
314 		if (err != -ENODEV) {
315 			dev_info(dev, "%s: PHY get failed %d\n", __func__,
316 				 err);
317 		}
318 	}
319 
320 	if (err)
321 		host->mphy = NULL;
322 	/*
323 	 * Allow unbound mphy because not every platform needs specific
324 	 * mphy control.
325 	 */
326 	if (err == -ENODEV)
327 		err = 0;
328 
329 	return err;
330 }
331 
332 static int ufs_mtk_setup_ref_clk(struct ufs_hba *hba, bool on)
333 {
334 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
335 	struct arm_smccc_res res;
336 	ktime_t timeout, time_checked;
337 	u32 value;
338 
339 	if (host->ref_clk_enabled == on)
340 		return 0;
341 
342 	ufs_mtk_ref_clk_notify(on, PRE_CHANGE, res);
343 
344 	if (on) {
345 		ufshcd_writel(hba, REFCLK_REQUEST, REG_UFS_REFCLK_CTRL);
346 	} else {
347 		ufshcd_delay_us(host->ref_clk_gating_wait_us, 10);
348 		ufshcd_writel(hba, REFCLK_RELEASE, REG_UFS_REFCLK_CTRL);
349 	}
350 
351 	/* Wait for ack */
352 	timeout = ktime_add_us(ktime_get(), REFCLK_REQ_TIMEOUT_US);
353 	do {
354 		time_checked = ktime_get();
355 		value = ufshcd_readl(hba, REG_UFS_REFCLK_CTRL);
356 
357 		/* Wait until ack bit equals to req bit */
358 		if (((value & REFCLK_ACK) >> 1) == (value & REFCLK_REQUEST))
359 			goto out;
360 
361 		usleep_range(100, 200);
362 	} while (ktime_before(time_checked, timeout));
363 
364 	dev_err(hba->dev, "missing ack of refclk req, reg: 0x%x\n", value);
365 
366 	/*
367 	 * If clock on timeout, assume clock is off, notify tfa do clock
368 	 * off setting.(keep DIFN disable, release resource)
369 	 * If clock off timeout, assume clock will off finally,
370 	 * set ref_clk_enabled directly.(keep DIFN disable, keep resource)
371 	 */
372 	if (on)
373 		ufs_mtk_ref_clk_notify(false, POST_CHANGE, res);
374 	else
375 		host->ref_clk_enabled = false;
376 
377 	return -ETIMEDOUT;
378 
379 out:
380 	host->ref_clk_enabled = on;
381 	if (on)
382 		ufshcd_delay_us(host->ref_clk_ungating_wait_us, 10);
383 
384 	ufs_mtk_ref_clk_notify(on, POST_CHANGE, res);
385 
386 	return 0;
387 }
388 
389 static void ufs_mtk_setup_ref_clk_wait_us(struct ufs_hba *hba,
390 					  u16 gating_us)
391 {
392 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
393 
394 	if (hba->dev_info.clk_gating_wait_us) {
395 		host->ref_clk_gating_wait_us =
396 			hba->dev_info.clk_gating_wait_us;
397 	} else {
398 		host->ref_clk_gating_wait_us = gating_us;
399 	}
400 
401 	host->ref_clk_ungating_wait_us = REFCLK_DEFAULT_WAIT_US;
402 }
403 
404 static void ufs_mtk_dbg_sel(struct ufs_hba *hba)
405 {
406 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
407 
408 	if (((host->ip_ver >> 16) & 0xFF) >= 0x36) {
409 		ufshcd_writel(hba, 0x820820, REG_UFS_DEBUG_SEL);
410 		ufshcd_writel(hba, 0x0, REG_UFS_DEBUG_SEL_B0);
411 		ufshcd_writel(hba, 0x55555555, REG_UFS_DEBUG_SEL_B1);
412 		ufshcd_writel(hba, 0xaaaaaaaa, REG_UFS_DEBUG_SEL_B2);
413 		ufshcd_writel(hba, 0xffffffff, REG_UFS_DEBUG_SEL_B3);
414 	} else {
415 		ufshcd_writel(hba, 0x20, REG_UFS_DEBUG_SEL);
416 	}
417 }
418 
419 static int ufs_mtk_wait_idle_state(struct ufs_hba *hba,
420 			    unsigned long retry_ms)
421 {
422 	u64 timeout, time_checked;
423 	u32 val, sm;
424 	bool wait_idle;
425 
426 	/* cannot use plain ktime_get() in suspend */
427 	timeout = ktime_get_mono_fast_ns() + retry_ms * 1000000UL;
428 
429 	/* wait a specific time after check base */
430 	udelay(10);
431 	wait_idle = false;
432 
433 	do {
434 		time_checked = ktime_get_mono_fast_ns();
435 		ufs_mtk_dbg_sel(hba);
436 		val = ufshcd_readl(hba, REG_UFS_PROBE);
437 
438 		sm = val & 0x1f;
439 
440 		/*
441 		 * if state is in H8 enter and H8 enter confirm
442 		 * wait until return to idle state.
443 		 */
444 		if ((sm >= VS_HIB_ENTER) && (sm <= VS_HIB_EXIT)) {
445 			wait_idle = true;
446 			udelay(50);
447 			continue;
448 		} else if (!wait_idle)
449 			break;
450 
451 		if (wait_idle && (sm == VS_HCE_BASE))
452 			break;
453 	} while (time_checked < timeout);
454 
455 	if (wait_idle && sm != VS_HCE_BASE) {
456 		dev_info(hba->dev, "wait idle tmo: 0x%x\n", val);
457 		return -ETIMEDOUT;
458 	}
459 
460 	return 0;
461 }
462 
463 static int ufs_mtk_wait_link_state(struct ufs_hba *hba, u32 state,
464 				   unsigned long max_wait_ms)
465 {
466 	ktime_t timeout, time_checked;
467 	u32 val;
468 
469 	timeout = ktime_add_ms(ktime_get(), max_wait_ms);
470 	do {
471 		time_checked = ktime_get();
472 		ufs_mtk_dbg_sel(hba);
473 		val = ufshcd_readl(hba, REG_UFS_PROBE);
474 		val = val >> 28;
475 
476 		if (val == state)
477 			return 0;
478 
479 		/* Sleep for max. 200us */
480 		usleep_range(100, 200);
481 	} while (ktime_before(time_checked, timeout));
482 
483 	return -ETIMEDOUT;
484 }
485 
486 static int ufs_mtk_mphy_power_on(struct ufs_hba *hba, bool on)
487 {
488 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
489 	struct phy *mphy = host->mphy;
490 	struct arm_smccc_res res;
491 	int ret = 0;
492 
493 	if (!mphy || !(on ^ host->mphy_powered_on))
494 		return 0;
495 
496 	if (on) {
497 		if (ufs_mtk_is_va09_supported(hba)) {
498 			ret = regulator_enable(host->reg_va09);
499 			if (ret < 0)
500 				goto out;
501 			/* wait 200 us to stablize VA09 */
502 			usleep_range(200, 210);
503 			ufs_mtk_va09_pwr_ctrl(res, 1);
504 		}
505 		phy_power_on(mphy);
506 	} else {
507 		phy_power_off(mphy);
508 		if (ufs_mtk_is_va09_supported(hba)) {
509 			ufs_mtk_va09_pwr_ctrl(res, 0);
510 			ret = regulator_disable(host->reg_va09);
511 		}
512 	}
513 out:
514 	if (ret) {
515 		dev_info(hba->dev,
516 			 "failed to %s va09: %d\n",
517 			 on ? "enable" : "disable",
518 			 ret);
519 	} else {
520 		host->mphy_powered_on = on;
521 	}
522 
523 	return ret;
524 }
525 
526 static int ufs_mtk_get_host_clk(struct device *dev, const char *name,
527 				struct clk **clk_out)
528 {
529 	struct clk *clk;
530 	int err = 0;
531 
532 	clk = devm_clk_get(dev, name);
533 	if (IS_ERR(clk))
534 		err = PTR_ERR(clk);
535 	else
536 		*clk_out = clk;
537 
538 	return err;
539 }
540 
541 static void ufs_mtk_boost_crypt(struct ufs_hba *hba, bool boost)
542 {
543 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
544 	struct ufs_mtk_crypt_cfg *cfg;
545 	struct regulator *reg;
546 	int volt, ret;
547 
548 	if (!ufs_mtk_is_boost_crypt_enabled(hba))
549 		return;
550 
551 	cfg = host->crypt;
552 	volt = cfg->vcore_volt;
553 	reg = cfg->reg_vcore;
554 
555 	ret = clk_prepare_enable(cfg->clk_crypt_mux);
556 	if (ret) {
557 		dev_info(hba->dev, "clk_prepare_enable(): %d\n",
558 			 ret);
559 		return;
560 	}
561 
562 	if (boost) {
563 		ret = regulator_set_voltage(reg, volt, INT_MAX);
564 		if (ret) {
565 			dev_info(hba->dev,
566 				 "failed to set vcore to %d\n", volt);
567 			goto out;
568 		}
569 
570 		ret = clk_set_parent(cfg->clk_crypt_mux,
571 				     cfg->clk_crypt_perf);
572 		if (ret) {
573 			dev_info(hba->dev,
574 				 "failed to set clk_crypt_perf\n");
575 			regulator_set_voltage(reg, 0, INT_MAX);
576 			goto out;
577 		}
578 	} else {
579 		ret = clk_set_parent(cfg->clk_crypt_mux,
580 				     cfg->clk_crypt_lp);
581 		if (ret) {
582 			dev_info(hba->dev,
583 				 "failed to set clk_crypt_lp\n");
584 			goto out;
585 		}
586 
587 		ret = regulator_set_voltage(reg, 0, INT_MAX);
588 		if (ret) {
589 			dev_info(hba->dev,
590 				 "failed to set vcore to MIN\n");
591 		}
592 	}
593 out:
594 	clk_disable_unprepare(cfg->clk_crypt_mux);
595 }
596 
597 static int ufs_mtk_init_host_clk(struct ufs_hba *hba, const char *name,
598 				 struct clk **clk)
599 {
600 	int ret;
601 
602 	ret = ufs_mtk_get_host_clk(hba->dev, name, clk);
603 	if (ret) {
604 		dev_info(hba->dev, "%s: failed to get %s: %d", __func__,
605 			 name, ret);
606 	}
607 
608 	return ret;
609 }
610 
611 static void ufs_mtk_init_boost_crypt(struct ufs_hba *hba)
612 {
613 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
614 	struct ufs_mtk_crypt_cfg *cfg;
615 	struct device *dev = hba->dev;
616 	struct regulator *reg;
617 	u32 volt;
618 
619 	host->crypt = devm_kzalloc(dev, sizeof(*(host->crypt)),
620 				   GFP_KERNEL);
621 	if (!host->crypt)
622 		goto disable_caps;
623 
624 	reg = devm_regulator_get_optional(dev, "dvfsrc-vcore");
625 	if (IS_ERR(reg)) {
626 		dev_info(dev, "failed to get dvfsrc-vcore: %ld",
627 			 PTR_ERR(reg));
628 		goto disable_caps;
629 	}
630 
631 	if (of_property_read_u32(dev->of_node, "boost-crypt-vcore-min",
632 				 &volt)) {
633 		dev_info(dev, "failed to get boost-crypt-vcore-min");
634 		goto disable_caps;
635 	}
636 
637 	cfg = host->crypt;
638 	if (ufs_mtk_init_host_clk(hba, "crypt_mux",
639 				  &cfg->clk_crypt_mux))
640 		goto disable_caps;
641 
642 	if (ufs_mtk_init_host_clk(hba, "crypt_lp",
643 				  &cfg->clk_crypt_lp))
644 		goto disable_caps;
645 
646 	if (ufs_mtk_init_host_clk(hba, "crypt_perf",
647 				  &cfg->clk_crypt_perf))
648 		goto disable_caps;
649 
650 	cfg->reg_vcore = reg;
651 	cfg->vcore_volt = volt;
652 	host->caps |= UFS_MTK_CAP_BOOST_CRYPT_ENGINE;
653 
654 disable_caps:
655 	return;
656 }
657 
658 static void ufs_mtk_init_va09_pwr_ctrl(struct ufs_hba *hba)
659 {
660 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
661 
662 	host->reg_va09 = regulator_get(hba->dev, "va09");
663 	if (IS_ERR(host->reg_va09))
664 		dev_info(hba->dev, "failed to get va09");
665 	else
666 		host->caps |= UFS_MTK_CAP_VA09_PWR_CTRL;
667 }
668 
669 static void ufs_mtk_init_host_caps(struct ufs_hba *hba)
670 {
671 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
672 	struct device_node *np = hba->dev->of_node;
673 
674 	if (of_property_read_bool(np, "mediatek,ufs-boost-crypt"))
675 		ufs_mtk_init_boost_crypt(hba);
676 
677 	if (of_property_read_bool(np, "mediatek,ufs-support-va09"))
678 		ufs_mtk_init_va09_pwr_ctrl(hba);
679 
680 	if (of_property_read_bool(np, "mediatek,ufs-disable-ah8"))
681 		host->caps |= UFS_MTK_CAP_DISABLE_AH8;
682 
683 	if (of_property_read_bool(np, "mediatek,ufs-broken-vcc"))
684 		host->caps |= UFS_MTK_CAP_BROKEN_VCC;
685 
686 	if (of_property_read_bool(np, "mediatek,ufs-pmc-via-fastauto"))
687 		host->caps |= UFS_MTK_CAP_PMC_VIA_FASTAUTO;
688 
689 	if (of_property_read_bool(np, "mediatek,ufs-tx-skew-fix"))
690 		host->caps |= UFS_MTK_CAP_TX_SKEW_FIX;
691 
692 	if (of_property_read_bool(np, "mediatek,ufs-disable-mcq"))
693 		host->caps |= UFS_MTK_CAP_DISABLE_MCQ;
694 
695 	if (of_property_read_bool(np, "mediatek,ufs-rtff-mtcmos"))
696 		host->caps |= UFS_MTK_CAP_RTFF_MTCMOS;
697 
698 	if (of_property_read_bool(np, "mediatek,ufs-broken-rtc"))
699 		host->caps |= UFS_MTK_CAP_MCQ_BROKEN_RTC;
700 
701 	dev_info(hba->dev, "caps: 0x%x", host->caps);
702 }
703 
704 static void ufs_mtk_scale_perf(struct ufs_hba *hba, bool scale_up)
705 {
706 	ufs_mtk_boost_crypt(hba, scale_up);
707 }
708 
709 static void ufs_mtk_pwr_ctrl(struct ufs_hba *hba, bool on)
710 {
711 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
712 
713 	if (on) {
714 		phy_power_on(host->mphy);
715 		ufs_mtk_setup_ref_clk(hba, on);
716 		if (!ufshcd_is_clkscaling_supported(hba))
717 			ufs_mtk_scale_perf(hba, on);
718 	} else {
719 		if (!ufshcd_is_clkscaling_supported(hba))
720 			ufs_mtk_scale_perf(hba, on);
721 		ufs_mtk_setup_ref_clk(hba, on);
722 		phy_power_off(host->mphy);
723 	}
724 }
725 
726 static void ufs_mtk_mcq_disable_irq(struct ufs_hba *hba)
727 {
728 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
729 	u32 irq, i;
730 
731 	if (!hba->mcq_enabled)
732 		return;
733 
734 	if (host->mcq_nr_intr == 0)
735 		return;
736 
737 	for (i = 0; i < host->mcq_nr_intr; i++) {
738 		irq = host->mcq_intr_info[i].irq;
739 		disable_irq(irq);
740 	}
741 	host->is_mcq_intr_enabled = false;
742 }
743 
744 static void ufs_mtk_mcq_enable_irq(struct ufs_hba *hba)
745 {
746 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
747 	u32 irq, i;
748 
749 	if (!hba->mcq_enabled)
750 		return;
751 
752 	if (host->mcq_nr_intr == 0)
753 		return;
754 
755 	if (host->is_mcq_intr_enabled == true)
756 		return;
757 
758 	for (i = 0; i < host->mcq_nr_intr; i++) {
759 		irq = host->mcq_intr_info[i].irq;
760 		enable_irq(irq);
761 	}
762 	host->is_mcq_intr_enabled = true;
763 }
764 
765 /**
766  * ufs_mtk_setup_clocks - enables/disable clocks
767  * @hba: host controller instance
768  * @on: If true, enable clocks else disable them.
769  * @status: PRE_CHANGE or POST_CHANGE notify
770  *
771  * Return: 0 on success, non-zero on failure.
772  */
773 static int ufs_mtk_setup_clocks(struct ufs_hba *hba, bool on,
774 				enum ufs_notify_change_status status)
775 {
776 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
777 	bool clk_pwr_off = false;
778 	int ret = 0;
779 
780 	/*
781 	 * In case ufs_mtk_init() is not yet done, simply ignore.
782 	 * This ufs_mtk_setup_clocks() shall be called from
783 	 * ufs_mtk_init() after init is done.
784 	 */
785 	if (!host)
786 		return 0;
787 
788 	if (!on && status == PRE_CHANGE) {
789 		if (ufshcd_is_link_off(hba)) {
790 			clk_pwr_off = true;
791 		} else if (ufshcd_is_link_hibern8(hba) ||
792 			 (!ufshcd_can_hibern8_during_gating(hba) &&
793 			 ufshcd_is_auto_hibern8_enabled(hba))) {
794 			/*
795 			 * Gate ref-clk and poweroff mphy if link state is in
796 			 * OFF or Hibern8 by either Auto-Hibern8 or
797 			 * ufshcd_link_state_transition().
798 			 */
799 			ret = ufs_mtk_wait_link_state(hba,
800 						      VS_LINK_HIBERN8,
801 						      15);
802 			if (!ret)
803 				clk_pwr_off = true;
804 		}
805 
806 		if (clk_pwr_off) {
807 			ufs_mtk_pwr_ctrl(hba, false);
808 		} else {
809 			dev_warn(hba->dev, "Clock is not turned off, hba->ahit = 0x%x, AHIT = 0x%x\n",
810 				hba->ahit,
811 				ufshcd_readl(hba,
812 					REG_AUTO_HIBERNATE_IDLE_TIMER));
813 		}
814 		ufs_mtk_mcq_disable_irq(hba);
815 	} else if (on && status == POST_CHANGE) {
816 		ufs_mtk_pwr_ctrl(hba, true);
817 		ufs_mtk_mcq_enable_irq(hba);
818 	}
819 
820 	return ret;
821 }
822 
823 static u32 ufs_mtk_mcq_get_irq(struct ufs_hba *hba, unsigned int cpu)
824 {
825 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
826 	struct blk_mq_tag_set *tag_set = &hba->host->tag_set;
827 	struct blk_mq_queue_map	*map = &tag_set->map[HCTX_TYPE_DEFAULT];
828 	unsigned int nr = map->nr_queues;
829 	unsigned int q_index;
830 
831 	q_index = map->mq_map[cpu];
832 	if (q_index >= nr) {
833 		dev_err(hba->dev, "hwq index %d exceed %d\n",
834 			q_index, nr);
835 		return MTK_MCQ_INVALID_IRQ;
836 	}
837 
838 	return host->mcq_intr_info[q_index].irq;
839 }
840 
841 static void ufs_mtk_mcq_set_irq_affinity(struct ufs_hba *hba, unsigned int cpu)
842 {
843 	unsigned int irq, _cpu;
844 	int ret;
845 
846 	irq = ufs_mtk_mcq_get_irq(hba, cpu);
847 	if (irq == MTK_MCQ_INVALID_IRQ) {
848 		dev_err(hba->dev, "invalid irq. unable to bind irq to cpu%d", cpu);
849 		return;
850 	}
851 
852 	/* force migrate irq of cpu0 to cpu3 */
853 	_cpu = (cpu == 0) ? 3 : cpu;
854 	ret = irq_set_affinity(irq, cpumask_of(_cpu));
855 	if (ret) {
856 		dev_err(hba->dev, "set irq %d affinity to CPU %d failed\n",
857 			irq, _cpu);
858 		return;
859 	}
860 	dev_info(hba->dev, "set irq %d affinity to CPU: %d\n", irq, _cpu);
861 }
862 
863 static bool ufs_mtk_is_legacy_chipset(struct ufs_hba *hba, u32 hw_ip_ver)
864 {
865 	bool is_legacy = false;
866 
867 	switch (hw_ip_ver) {
868 	case IP_LEGACY_VER_MT6893:
869 	case IP_LEGACY_VER_MT6781:
870 		/* can add other legacy chipset ID here accordingly */
871 		is_legacy = true;
872 		break;
873 	default:
874 		break;
875 	}
876 	dev_info(hba->dev, "legacy IP version - 0x%x, is legacy : %d", hw_ip_ver, is_legacy);
877 
878 	return is_legacy;
879 }
880 
881 /*
882  * HW version format has been changed from 01MMmmmm to 1MMMmmmm, since
883  * project MT6878. In order to perform correct version comparison,
884  * version number is changed by SW for the following projects.
885  * IP_VER_MT6983	0x00360000 to 0x10360000
886  * IP_VER_MT6897	0x01440000 to 0x10440000
887  * IP_VER_MT6989	0x01450000 to 0x10450000
888  * IP_VER_MT6991	0x01460000 to 0x10460000
889  */
890 static void ufs_mtk_get_hw_ip_version(struct ufs_hba *hba)
891 {
892 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
893 	u32 hw_ip_ver;
894 
895 	hw_ip_ver = ufshcd_readl(hba, REG_UFS_MTK_IP_VER);
896 
897 	if (((hw_ip_ver & (0xFF << 24)) == (0x1 << 24)) ||
898 	    ((hw_ip_ver & (0xFF << 24)) == 0)) {
899 		hw_ip_ver &= ~(0xFF << 24);
900 		hw_ip_ver |= (0x1 << 28);
901 	}
902 
903 	host->ip_ver = hw_ip_ver;
904 
905 	host->legacy_ip_ver = ufs_mtk_is_legacy_chipset(hba, hw_ip_ver);
906 }
907 
908 static void ufs_mtk_get_controller_version(struct ufs_hba *hba)
909 {
910 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
911 	int ret, ver = 0;
912 
913 	if (host->hw_ver.major)
914 		return;
915 
916 	/* Set default (minimum) version anyway */
917 	host->hw_ver.major = 2;
918 
919 	ret = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_LOCALVERINFO), &ver);
920 	if (!ret) {
921 		if (ver >= UFS_UNIPRO_VER_1_8) {
922 			host->hw_ver.major = 3;
923 			/*
924 			 * Fix HCI version for some platforms with
925 			 * incorrect version
926 			 */
927 			if (hba->ufs_version < ufshci_version(3, 0))
928 				hba->ufs_version = ufshci_version(3, 0);
929 		}
930 	}
931 }
932 
933 static u32 ufs_mtk_get_ufs_hci_version(struct ufs_hba *hba)
934 {
935 	return hba->ufs_version;
936 }
937 
938 /**
939  * ufs_mtk_init_clocks - Init mtk driver private clocks
940  *
941  * @hba: per adapter instance
942  */
943 static void ufs_mtk_init_clocks(struct ufs_hba *hba)
944 {
945 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
946 	struct list_head *head = &hba->clk_list_head;
947 	struct ufs_clk_info *clki, *clki_tmp;
948 	struct device *dev = hba->dev;
949 	struct regulator *reg;
950 	u32 volt;
951 
952 	/*
953 	 * Find private clocks and store them in struct ufs_mtk_clk.
954 	 * Remove "ufs_sel_min_src" and "ufs_sel_min_src" from list to avoid
955 	 * being switched on/off in clock gating.
956 	 */
957 	list_for_each_entry_safe(clki, clki_tmp, head, list) {
958 		if (!strcmp(clki->name, "ufs_sel")) {
959 			host->mclk.ufs_sel_clki = clki;
960 		} else if (!strcmp(clki->name, "ufs_sel_max_src")) {
961 			host->mclk.ufs_sel_max_clki = clki;
962 			clk_disable_unprepare(clki->clk);
963 			list_del(&clki->list);
964 		} else if (!strcmp(clki->name, "ufs_sel_min_src")) {
965 			host->mclk.ufs_sel_min_clki = clki;
966 			clk_disable_unprepare(clki->clk);
967 			list_del(&clki->list);
968 		} else if (!strcmp(clki->name, "ufs_fde")) {
969 			host->mclk.ufs_fde_clki = clki;
970 		} else if (!strcmp(clki->name, "ufs_fde_max_src")) {
971 			host->mclk.ufs_fde_max_clki = clki;
972 			clk_disable_unprepare(clki->clk);
973 			list_del(&clki->list);
974 		} else if (!strcmp(clki->name, "ufs_fde_min_src")) {
975 			host->mclk.ufs_fde_min_clki = clki;
976 			clk_disable_unprepare(clki->clk);
977 			list_del(&clki->list);
978 		}
979 	}
980 
981 	list_for_each_entry(clki, head, list) {
982 		dev_info(hba->dev, "clk \"%s\" present", clki->name);
983 	}
984 
985 	if (!ufs_mtk_is_clk_scale_ready(hba)) {
986 		hba->caps &= ~UFSHCD_CAP_CLK_SCALING;
987 		dev_info(hba->dev,
988 			 "%s: Clk-scaling not ready. Feature disabled.",
989 			 __func__);
990 		return;
991 	}
992 
993 	/*
994 	 * Default get vcore if dts have these settings.
995 	 * No matter clock scaling support or not. (may disable by customer)
996 	 */
997 	reg = devm_regulator_get_optional(dev, "dvfsrc-vcore");
998 	if (IS_ERR(reg)) {
999 		dev_info(dev, "failed to get dvfsrc-vcore: %ld",
1000 			 PTR_ERR(reg));
1001 		return;
1002 	}
1003 
1004 	if (of_property_read_u32(dev->of_node, "clk-scale-up-vcore-min",
1005 				 &volt)) {
1006 		dev_info(dev, "failed to get clk-scale-up-vcore-min");
1007 		return;
1008 	}
1009 
1010 	host->mclk.reg_vcore = reg;
1011 	host->mclk.vcore_volt = volt;
1012 
1013 	/* If default boot is max gear, request vcore */
1014 	if (reg && volt && host->clk_scale_up) {
1015 		if (regulator_set_voltage(reg, volt, INT_MAX)) {
1016 			dev_info(hba->dev,
1017 				"Failed to set vcore to %d\n", volt);
1018 		}
1019 	}
1020 }
1021 
1022 #define MAX_VCC_NAME 30
1023 static int ufs_mtk_vreg_fix_vcc(struct ufs_hba *hba)
1024 {
1025 	struct ufs_vreg_info *info = &hba->vreg_info;
1026 	struct device_node *np = hba->dev->of_node;
1027 	struct device *dev = hba->dev;
1028 	char vcc_name[MAX_VCC_NAME];
1029 	struct arm_smccc_res res;
1030 	int err, ver;
1031 
1032 	if (info->vcc)
1033 		return 0;
1034 
1035 	if (of_property_read_bool(np, "mediatek,ufs-vcc-by-num")) {
1036 		ufs_mtk_get_vcc_num(res);
1037 		if (res.a1 > UFS_VCC_NONE && res.a1 < UFS_VCC_MAX)
1038 			snprintf(vcc_name, MAX_VCC_NAME, "vcc-opt%lu", res.a1);
1039 		else
1040 			return -ENODEV;
1041 	} else if (of_property_read_bool(np, "mediatek,ufs-vcc-by-ver")) {
1042 		ver = (hba->dev_info.wspecversion & 0xF00) >> 8;
1043 		snprintf(vcc_name, MAX_VCC_NAME, "vcc-ufs%u", ver);
1044 	} else {
1045 		return 0;
1046 	}
1047 
1048 	err = ufshcd_populate_vreg(dev, vcc_name, &info->vcc, false);
1049 	if (err)
1050 		return err;
1051 
1052 	err = ufshcd_get_vreg(dev, info->vcc);
1053 	if (err)
1054 		return err;
1055 
1056 	err = regulator_enable(info->vcc->reg);
1057 	if (!err) {
1058 		info->vcc->enabled = true;
1059 		dev_info(dev, "%s: %s enabled\n", __func__, vcc_name);
1060 	}
1061 
1062 	return err;
1063 }
1064 
1065 static void ufs_mtk_vreg_fix_vccqx(struct ufs_hba *hba)
1066 {
1067 	struct ufs_vreg_info *info = &hba->vreg_info;
1068 	struct ufs_vreg **vreg_on, **vreg_off;
1069 
1070 	if (hba->dev_info.wspecversion >= 0x0300) {
1071 		vreg_on = &info->vccq;
1072 		vreg_off = &info->vccq2;
1073 	} else {
1074 		vreg_on = &info->vccq2;
1075 		vreg_off = &info->vccq;
1076 	}
1077 
1078 	if (*vreg_on)
1079 		(*vreg_on)->always_on = true;
1080 
1081 	if (*vreg_off) {
1082 		regulator_disable((*vreg_off)->reg);
1083 		devm_kfree(hba->dev, (*vreg_off)->name);
1084 		devm_kfree(hba->dev, *vreg_off);
1085 		*vreg_off = NULL;
1086 	}
1087 }
1088 
1089 static void ufs_mtk_setup_clk_gating(struct ufs_hba *hba)
1090 {
1091 	unsigned long flags;
1092 	u32 ah_ms = 10;
1093 	u32 ah_scale, ah_timer;
1094 	u32 scale_us[] = {1, 10, 100, 1000, 10000, 100000};
1095 
1096 	if (ufshcd_is_clkgating_allowed(hba)) {
1097 		if (ufshcd_is_auto_hibern8_supported(hba) && hba->ahit) {
1098 			ah_scale = FIELD_GET(UFSHCI_AHIBERN8_SCALE_MASK,
1099 					  hba->ahit);
1100 			ah_timer = FIELD_GET(UFSHCI_AHIBERN8_TIMER_MASK,
1101 					  hba->ahit);
1102 			if (ah_scale <= 5)
1103 				ah_ms = ah_timer * scale_us[ah_scale] / 1000;
1104 		}
1105 
1106 		spin_lock_irqsave(hba->host->host_lock, flags);
1107 		hba->clk_gating.delay_ms = max(ah_ms, 10U);
1108 		spin_unlock_irqrestore(hba->host->host_lock, flags);
1109 	}
1110 }
1111 
1112 /* Convert microseconds to Auto-Hibernate Idle Timer register value */
1113 static u32 ufs_mtk_us_to_ahit(unsigned int timer)
1114 {
1115 	unsigned int scale;
1116 
1117 	for (scale = 0; timer > UFSHCI_AHIBERN8_TIMER_MASK; ++scale)
1118 		timer /= UFSHCI_AHIBERN8_SCALE_FACTOR;
1119 
1120 	return FIELD_PREP(UFSHCI_AHIBERN8_TIMER_MASK, timer) |
1121 	       FIELD_PREP(UFSHCI_AHIBERN8_SCALE_MASK, scale);
1122 }
1123 
1124 static void ufs_mtk_fix_ahit(struct ufs_hba *hba)
1125 {
1126 	unsigned int us;
1127 
1128 	if (ufshcd_is_auto_hibern8_supported(hba)) {
1129 		switch (hba->dev_info.wmanufacturerid) {
1130 		case UFS_VENDOR_SAMSUNG:
1131 			/* configure auto-hibern8 timer to 3.5 ms */
1132 			us = 3500;
1133 			break;
1134 
1135 		case UFS_VENDOR_MICRON:
1136 			/* configure auto-hibern8 timer to 2 ms */
1137 			us = 2000;
1138 			break;
1139 
1140 		default:
1141 			/* configure auto-hibern8 timer to 1 ms */
1142 			us = 1000;
1143 			break;
1144 		}
1145 
1146 		hba->ahit = ufs_mtk_us_to_ahit(us);
1147 	}
1148 
1149 	ufs_mtk_setup_clk_gating(hba);
1150 }
1151 
1152 static void ufs_mtk_fix_clock_scaling(struct ufs_hba *hba)
1153 {
1154 	/* UFS version is below 4.0, clock scaling is not necessary */
1155 	if ((hba->dev_info.wspecversion < 0x0400)  &&
1156 		ufs_mtk_is_clk_scale_ready(hba)) {
1157 		hba->caps &= ~UFSHCD_CAP_CLK_SCALING;
1158 
1159 		_ufs_mtk_clk_scale(hba, false);
1160 	}
1161 }
1162 
1163 static void ufs_mtk_init_mcq_irq(struct ufs_hba *hba)
1164 {
1165 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
1166 	struct platform_device *pdev;
1167 	int i;
1168 	int irq;
1169 
1170 	host->mcq_nr_intr = UFSHCD_MAX_Q_NR;
1171 	pdev = container_of(hba->dev, struct platform_device, dev);
1172 
1173 	if (host->caps & UFS_MTK_CAP_DISABLE_MCQ)
1174 		goto failed;
1175 
1176 	for (i = 0; i < host->mcq_nr_intr; i++) {
1177 		/* irq index 0 is legacy irq, sq/cq irq start from index 1 */
1178 		irq = platform_get_irq(pdev, i + 1);
1179 		if (irq < 0) {
1180 			host->mcq_intr_info[i].irq = MTK_MCQ_INVALID_IRQ;
1181 			goto failed;
1182 		}
1183 		host->mcq_intr_info[i].hba = hba;
1184 		host->mcq_intr_info[i].irq = irq;
1185 		dev_info(hba->dev, "get platform mcq irq: %d, %d\n", i, irq);
1186 	}
1187 
1188 	return;
1189 failed:
1190        /* invalidate irq info */
1191 	for (i = 0; i < host->mcq_nr_intr; i++)
1192 		host->mcq_intr_info[i].irq = MTK_MCQ_INVALID_IRQ;
1193 
1194 	host->mcq_nr_intr = 0;
1195 }
1196 
1197 /**
1198  * ufs_mtk_init - find other essential mmio bases
1199  * @hba: host controller instance
1200  *
1201  * Binds PHY with controller and powers up PHY enabling clocks
1202  * and regulators.
1203  *
1204  * Return: -EPROBE_DEFER if binding fails, returns negative error
1205  * on phy power up failure and returns zero on success.
1206  */
1207 static int ufs_mtk_init(struct ufs_hba *hba)
1208 {
1209 	const struct of_device_id *id;
1210 	struct device *dev = hba->dev;
1211 	struct ufs_mtk_host *host;
1212 	struct Scsi_Host *shost = hba->host;
1213 	int err = 0;
1214 	struct arm_smccc_res res;
1215 
1216 	host = devm_kzalloc(dev, sizeof(*host), GFP_KERNEL);
1217 	if (!host) {
1218 		err = -ENOMEM;
1219 		dev_info(dev, "%s: no memory for mtk ufs host\n", __func__);
1220 		goto out;
1221 	}
1222 
1223 	host->hba = hba;
1224 	ufshcd_set_variant(hba, host);
1225 
1226 	id = of_match_device(ufs_mtk_of_match, dev);
1227 	if (!id) {
1228 		err = -EINVAL;
1229 		goto out;
1230 	}
1231 
1232 	/* Initialize host capability */
1233 	ufs_mtk_init_host_caps(hba);
1234 
1235 	ufs_mtk_init_mcq_irq(hba);
1236 
1237 	err = ufs_mtk_bind_mphy(hba);
1238 	if (err)
1239 		goto out_variant_clear;
1240 
1241 	ufs_mtk_init_reset(hba);
1242 
1243 	/* backup mphy setting if mphy can reset */
1244 	if (host->mphy_reset)
1245 		ufs_mtk_mphy_ctrl(UFS_MPHY_BACKUP, res);
1246 
1247 	/* Enable runtime autosuspend */
1248 	hba->caps |= UFSHCD_CAP_RPM_AUTOSUSPEND;
1249 
1250 	/* Enable clock-gating */
1251 	hba->caps |= UFSHCD_CAP_CLK_GATING;
1252 
1253 	/* Enable inline encryption */
1254 	hba->caps |= UFSHCD_CAP_CRYPTO;
1255 
1256 	/* Enable WriteBooster */
1257 	hba->caps |= UFSHCD_CAP_WB_EN;
1258 
1259 	/* Enable clk scaling*/
1260 	hba->caps |= UFSHCD_CAP_CLK_SCALING;
1261 	host->clk_scale_up = true; /* default is max freq */
1262 
1263 	/* Set runtime pm delay to replace default */
1264 	shost->rpm_autosuspend_delay = MTK_RPM_AUTOSUSPEND_DELAY_MS;
1265 
1266 	hba->quirks |= UFSHCI_QUIRK_SKIP_MANUAL_WB_FLUSH_CTRL;
1267 
1268 	hba->quirks |= UFSHCD_QUIRK_MCQ_BROKEN_INTR;
1269 	if (host->caps & UFS_MTK_CAP_MCQ_BROKEN_RTC)
1270 		hba->quirks |= UFSHCD_QUIRK_MCQ_BROKEN_RTC;
1271 
1272 	hba->vps->wb_flush_threshold = UFS_WB_BUF_REMAIN_PERCENT(80);
1273 
1274 	if (host->caps & UFS_MTK_CAP_DISABLE_AH8)
1275 		hba->caps |= UFSHCD_CAP_HIBERN8_WITH_CLK_GATING;
1276 
1277 	if (host->caps & UFS_MTK_CAP_DISABLE_MCQ)
1278 		hba->quirks |= UFSHCD_QUIRK_BROKEN_LSDBS_CAP;
1279 
1280 	ufs_mtk_init_clocks(hba);
1281 
1282 	/*
1283 	 * ufshcd_vops_init() is invoked after
1284 	 * ufshcd_setup_clock(true) in ufshcd_hba_init() thus
1285 	 * phy clock setup is skipped.
1286 	 *
1287 	 * Enable phy clocks specifically here.
1288 	 */
1289 	ufs_mtk_mphy_power_on(hba, true);
1290 
1291 	if (ufs_mtk_is_rtff_mtcmos(hba)) {
1292 		/* First Restore here, to avoid backup unexpected value */
1293 		ufs_mtk_mtcmos_ctrl(false, res);
1294 
1295 		/* Power on to init */
1296 		ufs_mtk_mtcmos_ctrl(true, res);
1297 	}
1298 
1299 	ufs_mtk_setup_clocks(hba, true, POST_CHANGE);
1300 
1301 	ufs_mtk_get_hw_ip_version(hba);
1302 
1303 	goto out;
1304 
1305 out_variant_clear:
1306 	ufshcd_set_variant(hba, NULL);
1307 out:
1308 	return err;
1309 }
1310 
1311 static bool ufs_mtk_pmc_via_fastauto(struct ufs_hba *hba,
1312 				     struct ufs_pa_layer_attr *dev_req_params)
1313 {
1314 	if (!ufs_mtk_is_pmc_via_fastauto(hba))
1315 		return false;
1316 
1317 	if (dev_req_params->hs_rate == hba->pwr_info.hs_rate)
1318 		return false;
1319 
1320 	if (dev_req_params->pwr_tx != FAST_MODE &&
1321 	    dev_req_params->gear_tx < UFS_HS_G4)
1322 		return false;
1323 
1324 	if (dev_req_params->pwr_rx != FAST_MODE &&
1325 	    dev_req_params->gear_rx < UFS_HS_G4)
1326 		return false;
1327 
1328 	if (dev_req_params->pwr_tx == SLOW_MODE ||
1329 	    dev_req_params->pwr_rx == SLOW_MODE)
1330 		return false;
1331 
1332 	return true;
1333 }
1334 
1335 static int ufs_mtk_pre_pwr_change(struct ufs_hba *hba,
1336 				const struct ufs_pa_layer_attr *dev_max_params,
1337 				struct ufs_pa_layer_attr *dev_req_params)
1338 {
1339 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
1340 	struct ufs_host_params host_params;
1341 	int ret;
1342 
1343 	ufshcd_init_host_params(&host_params);
1344 	host_params.hs_rx_gear = UFS_HS_G5;
1345 	host_params.hs_tx_gear = UFS_HS_G5;
1346 
1347 	if (dev_max_params->pwr_rx == SLOW_MODE ||
1348 	    dev_max_params->pwr_tx == SLOW_MODE)
1349 		host_params.desired_working_mode = UFS_PWM_MODE;
1350 
1351 	ret = ufshcd_negotiate_pwr_params(&host_params, dev_max_params, dev_req_params);
1352 	if (ret) {
1353 		pr_info("%s: failed to determine capabilities\n",
1354 			__func__);
1355 	}
1356 
1357 	if (ufs_mtk_pmc_via_fastauto(hba, dev_req_params)) {
1358 		ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXTERMINATION), true);
1359 		ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXGEAR), UFS_HS_G1);
1360 
1361 		ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXTERMINATION), true);
1362 		ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXGEAR), UFS_HS_G1);
1363 
1364 		ufshcd_dme_set(hba, UIC_ARG_MIB(PA_ACTIVETXDATALANES),
1365 			       dev_req_params->lane_tx);
1366 		ufshcd_dme_set(hba, UIC_ARG_MIB(PA_ACTIVERXDATALANES),
1367 			       dev_req_params->lane_rx);
1368 		ufshcd_dme_set(hba, UIC_ARG_MIB(PA_HSSERIES),
1369 			       dev_req_params->hs_rate);
1370 
1371 		ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXHSADAPTTYPE),
1372 			       PA_NO_ADAPT);
1373 
1374 		if (!(hba->quirks & UFSHCD_QUIRK_SKIP_DEF_UNIPRO_TIMEOUT_SETTING)) {
1375 			ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA0),
1376 					DL_FC0ProtectionTimeOutVal_Default);
1377 			ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA1),
1378 					DL_TC0ReplayTimeOutVal_Default);
1379 			ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA2),
1380 					DL_AFC0ReqTimeOutVal_Default);
1381 			ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA3),
1382 					DL_FC1ProtectionTimeOutVal_Default);
1383 			ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA4),
1384 					DL_TC1ReplayTimeOutVal_Default);
1385 			ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA5),
1386 					DL_AFC1ReqTimeOutVal_Default);
1387 
1388 			ufshcd_dme_set(hba, UIC_ARG_MIB(DME_LocalFC0ProtectionTimeOutVal),
1389 					DL_FC0ProtectionTimeOutVal_Default);
1390 			ufshcd_dme_set(hba, UIC_ARG_MIB(DME_LocalTC0ReplayTimeOutVal),
1391 					DL_TC0ReplayTimeOutVal_Default);
1392 			ufshcd_dme_set(hba, UIC_ARG_MIB(DME_LocalAFC0ReqTimeOutVal),
1393 					DL_AFC0ReqTimeOutVal_Default);
1394 		}
1395 
1396 		ret = ufshcd_uic_change_pwr_mode(hba,
1397 					FASTAUTO_MODE << 4 | FASTAUTO_MODE);
1398 
1399 		if (ret) {
1400 			dev_err(hba->dev, "%s: HSG1B FASTAUTO failed ret=%d\n",
1401 				__func__, ret);
1402 		}
1403 	}
1404 
1405 	/* if already configured to the requested pwr_mode, skip adapt */
1406 	if (dev_req_params->gear_rx == hba->pwr_info.gear_rx &&
1407 	    dev_req_params->gear_tx == hba->pwr_info.gear_tx &&
1408 	    dev_req_params->lane_rx == hba->pwr_info.lane_rx &&
1409 	    dev_req_params->lane_tx == hba->pwr_info.lane_tx &&
1410 	    dev_req_params->pwr_rx == hba->pwr_info.pwr_rx &&
1411 	    dev_req_params->pwr_tx == hba->pwr_info.pwr_tx &&
1412 	    dev_req_params->hs_rate == hba->pwr_info.hs_rate) {
1413 		return ret;
1414 	}
1415 
1416 	if (dev_req_params->pwr_rx == FAST_MODE ||
1417 	    dev_req_params->pwr_rx == FASTAUTO_MODE) {
1418 		if (host->hw_ver.major >= 3) {
1419 			ret = ufshcd_dme_configure_adapt(hba,
1420 						   dev_req_params->gear_tx,
1421 						   PA_INITIAL_ADAPT);
1422 		} else {
1423 			ret = ufshcd_dme_configure_adapt(hba,
1424 				   dev_req_params->gear_tx,
1425 				   PA_NO_ADAPT);
1426 		}
1427 	} else {
1428 		ret = ufshcd_dme_configure_adapt(hba,
1429 			   dev_req_params->gear_tx,
1430 			   PA_NO_ADAPT);
1431 	}
1432 
1433 	return ret;
1434 }
1435 
1436 static int ufs_mtk_auto_hibern8_disable(struct ufs_hba *hba)
1437 {
1438 	int ret;
1439 
1440 	/* disable auto-hibern8 */
1441 	ufshcd_writel(hba, 0, REG_AUTO_HIBERNATE_IDLE_TIMER);
1442 
1443 	/* wait host return to idle state when auto-hibern8 off */
1444 	ret = ufs_mtk_wait_idle_state(hba, 5);
1445 	if (ret)
1446 		goto out;
1447 
1448 	ret = ufs_mtk_wait_link_state(hba, VS_LINK_UP, 100);
1449 
1450 out:
1451 	if (ret) {
1452 		dev_warn(hba->dev, "exit h8 state fail, ret=%d\n", ret);
1453 
1454 		ufshcd_force_error_recovery(hba);
1455 
1456 		/* trigger error handler and break suspend */
1457 		ret = -EBUSY;
1458 	}
1459 
1460 	return ret;
1461 }
1462 
1463 static int ufs_mtk_pwr_change_notify(struct ufs_hba *hba,
1464 				enum ufs_notify_change_status stage,
1465 				const struct ufs_pa_layer_attr *dev_max_params,
1466 				struct ufs_pa_layer_attr *dev_req_params)
1467 {
1468 	int ret = 0;
1469 	static u32 reg;
1470 
1471 	switch (stage) {
1472 	case PRE_CHANGE:
1473 		if (ufshcd_is_auto_hibern8_supported(hba)) {
1474 			reg = ufshcd_readl(hba, REG_AUTO_HIBERNATE_IDLE_TIMER);
1475 			ufs_mtk_auto_hibern8_disable(hba);
1476 		}
1477 		ret = ufs_mtk_pre_pwr_change(hba, dev_max_params,
1478 					     dev_req_params);
1479 		break;
1480 	case POST_CHANGE:
1481 		if (ufshcd_is_auto_hibern8_supported(hba))
1482 			ufshcd_writel(hba, reg, REG_AUTO_HIBERNATE_IDLE_TIMER);
1483 		break;
1484 	default:
1485 		ret = -EINVAL;
1486 		break;
1487 	}
1488 
1489 	return ret;
1490 }
1491 
1492 static int ufs_mtk_unipro_set_lpm(struct ufs_hba *hba, bool lpm)
1493 {
1494 	int ret;
1495 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
1496 
1497 	ret = ufshcd_dme_set(hba,
1498 			     UIC_ARG_MIB_SEL(VS_UNIPROPOWERDOWNCONTROL, 0),
1499 			     lpm ? 1 : 0);
1500 	if (!ret || !lpm) {
1501 		/*
1502 		 * Forcibly set as non-LPM mode if UIC commands is failed
1503 		 * to use default hba_enable_delay_us value for re-enabling
1504 		 * the host.
1505 		 */
1506 		host->unipro_lpm = lpm;
1507 	}
1508 
1509 	return ret;
1510 }
1511 
1512 static int ufs_mtk_pre_link(struct ufs_hba *hba)
1513 {
1514 	int ret;
1515 	u32 tmp;
1516 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
1517 
1518 	ufs_mtk_get_controller_version(hba);
1519 
1520 	ret = ufs_mtk_unipro_set_lpm(hba, false);
1521 	if (ret)
1522 		return ret;
1523 
1524 	/*
1525 	 * Setting PA_Local_TX_LCC_Enable to 0 before link startup
1526 	 * to make sure that both host and device TX LCC are disabled
1527 	 * once link startup is completed.
1528 	 */
1529 	ret = ufshcd_disable_host_tx_lcc(hba);
1530 	if (ret)
1531 		return ret;
1532 
1533 	/* disable deep stall */
1534 	ret = ufshcd_dme_get(hba, UIC_ARG_MIB(VS_SAVEPOWERCONTROL), &tmp);
1535 	if (ret)
1536 		return ret;
1537 
1538 	tmp &= ~(1 << 6);
1539 
1540 	ret = ufshcd_dme_set(hba, UIC_ARG_MIB(VS_SAVEPOWERCONTROL), tmp);
1541 
1542 	/* Enable the 1144 functions setting */
1543 	if (host->ip_ver == IP_VER_MT6989) {
1544 		ret = ufshcd_dme_get(hba, UIC_ARG_MIB(VS_DEBUGOMC), &tmp);
1545 		if (ret)
1546 			return ret;
1547 
1548 		tmp |= 0x10;
1549 		ret = ufshcd_dme_set(hba, UIC_ARG_MIB(VS_DEBUGOMC), tmp);
1550 	}
1551 
1552 	return ret;
1553 }
1554 
1555 static void ufs_mtk_post_link(struct ufs_hba *hba)
1556 {
1557 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
1558 	u32 tmp;
1559 
1560 	/* fix device PA_INIT no adapt */
1561 	if (host->ip_ver >= IP_VER_MT6899) {
1562 		ufshcd_dme_get(hba, UIC_ARG_MIB(VS_DEBUGOMC), &tmp);
1563 		tmp |= 0x100;
1564 		ufshcd_dme_set(hba, UIC_ARG_MIB(VS_DEBUGOMC), tmp);
1565 	}
1566 
1567 	/* enable unipro clock gating feature */
1568 	ufs_mtk_cfg_unipro_cg(hba, true);
1569 }
1570 
1571 static int ufs_mtk_link_startup_notify(struct ufs_hba *hba,
1572 				       enum ufs_notify_change_status stage)
1573 {
1574 	int ret = 0;
1575 
1576 	switch (stage) {
1577 	case PRE_CHANGE:
1578 		ret = ufs_mtk_pre_link(hba);
1579 		break;
1580 	case POST_CHANGE:
1581 		ufs_mtk_post_link(hba);
1582 		break;
1583 	default:
1584 		ret = -EINVAL;
1585 		break;
1586 	}
1587 
1588 	return ret;
1589 }
1590 
1591 static int ufs_mtk_device_reset(struct ufs_hba *hba)
1592 {
1593 	struct arm_smccc_res res;
1594 
1595 	ufs_mtk_device_reset_ctrl(0, res);
1596 
1597 	/* disable hba in middle of device reset */
1598 	ufshcd_hba_stop(hba);
1599 
1600 	/*
1601 	 * The reset signal is active low. UFS devices shall detect
1602 	 * more than or equal to 1us of positive or negative RST_n
1603 	 * pulse width.
1604 	 *
1605 	 * To be on safe side, keep the reset low for at least 10us.
1606 	 */
1607 	usleep_range(10, 15);
1608 
1609 	ufs_mtk_device_reset_ctrl(1, res);
1610 
1611 	/* Some devices may need time to respond to rst_n */
1612 	usleep_range(10000, 15000);
1613 
1614 	dev_info(hba->dev, "device reset done\n");
1615 
1616 	return 0;
1617 }
1618 
1619 static int ufs_mtk_link_set_hpm(struct ufs_hba *hba)
1620 {
1621 	int err;
1622 
1623 	err = ufshcd_hba_enable(hba);
1624 	if (err)
1625 		return err;
1626 
1627 	err = ufs_mtk_unipro_set_lpm(hba, false);
1628 	if (err)
1629 		return err;
1630 
1631 	err = ufshcd_uic_hibern8_exit(hba);
1632 	if (err)
1633 		return err;
1634 
1635 	/* Check link state to make sure exit h8 success */
1636 	err = ufs_mtk_wait_idle_state(hba, 5);
1637 	if (err) {
1638 		dev_warn(hba->dev, "wait idle fail, err=%d\n", err);
1639 		return err;
1640 	}
1641 	err = ufs_mtk_wait_link_state(hba, VS_LINK_UP, 100);
1642 	if (err) {
1643 		dev_warn(hba->dev, "exit h8 state fail, err=%d\n", err);
1644 		return err;
1645 	}
1646 	ufshcd_set_link_active(hba);
1647 
1648 	err = ufshcd_make_hba_operational(hba);
1649 	if (err)
1650 		return err;
1651 
1652 	if (hba->mcq_enabled) {
1653 		ufs_mtk_config_mcq(hba, false);
1654 		ufshcd_mcq_make_queues_operational(hba);
1655 		ufshcd_mcq_config_mac(hba, hba->nutrs);
1656 		ufshcd_mcq_enable(hba);
1657 	}
1658 
1659 	return 0;
1660 }
1661 
1662 static int ufs_mtk_link_set_lpm(struct ufs_hba *hba)
1663 {
1664 	int err;
1665 
1666 	/* Disable reset confirm feature by UniPro */
1667 	ufshcd_writel(hba,
1668 		      (ufshcd_readl(hba, REG_UFS_XOUFS_CTRL) & ~0x100),
1669 		      REG_UFS_XOUFS_CTRL);
1670 
1671 	err = ufs_mtk_unipro_set_lpm(hba, true);
1672 	if (err) {
1673 		/* Resume UniPro state for following error recovery */
1674 		ufs_mtk_unipro_set_lpm(hba, false);
1675 		return err;
1676 	}
1677 
1678 	return 0;
1679 }
1680 
1681 static void ufs_mtk_vccqx_set_lpm(struct ufs_hba *hba, bool lpm)
1682 {
1683 	struct ufs_vreg *vccqx = NULL;
1684 
1685 	if (!hba->vreg_info.vccq && !hba->vreg_info.vccq2)
1686 		return;
1687 
1688 	if (hba->vreg_info.vccq)
1689 		vccqx = hba->vreg_info.vccq;
1690 	else
1691 		vccqx = hba->vreg_info.vccq2;
1692 
1693 	regulator_set_mode(vccqx->reg,
1694 			   lpm ? REGULATOR_MODE_IDLE : REGULATOR_MODE_NORMAL);
1695 }
1696 
1697 static void ufs_mtk_vsx_set_lpm(struct ufs_hba *hba, bool lpm)
1698 {
1699 	struct arm_smccc_res res;
1700 
1701 	ufs_mtk_device_pwr_ctrl(!lpm,
1702 				(unsigned long)hba->dev_info.wspecversion,
1703 				res);
1704 }
1705 
1706 static void ufs_mtk_dev_vreg_set_lpm(struct ufs_hba *hba, bool lpm)
1707 {
1708 	bool skip_vccqx = false;
1709 
1710 	/* Prevent entering LPM when device is still active */
1711 	if (lpm && ufshcd_is_ufs_dev_active(hba))
1712 		return;
1713 
1714 	/* Skip vccqx lpm control and control vsx only */
1715 	if (!hba->vreg_info.vccq && !hba->vreg_info.vccq2)
1716 		skip_vccqx = true;
1717 
1718 	/* VCC is always-on, control vsx only */
1719 	if (!hba->vreg_info.vcc)
1720 		skip_vccqx = true;
1721 
1722 	/* Broken vcc keep vcc always on, most case control vsx only */
1723 	if (lpm && hba->vreg_info.vcc && hba->vreg_info.vcc->enabled) {
1724 		/* Some device vccqx/vsx can enter lpm */
1725 		if (ufs_mtk_is_allow_vccqx_lpm(hba))
1726 			skip_vccqx = false;
1727 		else /* control vsx only */
1728 			skip_vccqx = true;
1729 	}
1730 
1731 	if (lpm) {
1732 		if (!skip_vccqx)
1733 			ufs_mtk_vccqx_set_lpm(hba, lpm);
1734 		ufs_mtk_vsx_set_lpm(hba, lpm);
1735 	} else {
1736 		ufs_mtk_vsx_set_lpm(hba, lpm);
1737 		if (!skip_vccqx)
1738 			ufs_mtk_vccqx_set_lpm(hba, lpm);
1739 	}
1740 }
1741 
1742 static int ufs_mtk_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op,
1743 	enum ufs_notify_change_status status)
1744 {
1745 	int err;
1746 	struct arm_smccc_res res;
1747 
1748 	if (status == PRE_CHANGE) {
1749 		if (ufshcd_is_auto_hibern8_supported(hba))
1750 			return ufs_mtk_auto_hibern8_disable(hba);
1751 		return 0;
1752 	}
1753 
1754 	if (ufshcd_is_link_hibern8(hba)) {
1755 		err = ufs_mtk_link_set_lpm(hba);
1756 		if (err)
1757 			goto fail;
1758 	}
1759 
1760 	if (!ufshcd_is_link_active(hba)) {
1761 		/*
1762 		 * Make sure no error will be returned to prevent
1763 		 * ufshcd_suspend() re-enabling regulators while vreg is still
1764 		 * in low-power mode.
1765 		 */
1766 		err = ufs_mtk_mphy_power_on(hba, false);
1767 		if (err)
1768 			goto fail;
1769 	}
1770 
1771 	if (ufshcd_is_link_off(hba))
1772 		ufs_mtk_device_reset_ctrl(0, res);
1773 
1774 	ufs_mtk_sram_pwr_ctrl(false, res);
1775 
1776 	return 0;
1777 fail:
1778 	/*
1779 	 * Set link as off state enforcedly to trigger
1780 	 * ufshcd_host_reset_and_restore() in ufshcd_suspend()
1781 	 * for completed host reset.
1782 	 */
1783 	ufshcd_set_link_off(hba);
1784 	return -EAGAIN;
1785 }
1786 
1787 static int ufs_mtk_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
1788 {
1789 	int err;
1790 	struct arm_smccc_res res;
1791 
1792 	if (hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL)
1793 		ufs_mtk_dev_vreg_set_lpm(hba, false);
1794 
1795 	ufs_mtk_sram_pwr_ctrl(true, res);
1796 
1797 	err = ufs_mtk_mphy_power_on(hba, true);
1798 	if (err)
1799 		goto fail;
1800 
1801 	if (ufshcd_is_link_hibern8(hba)) {
1802 		err = ufs_mtk_link_set_hpm(hba);
1803 		if (err)
1804 			goto fail;
1805 	}
1806 
1807 	return 0;
1808 
1809 fail:
1810 	/*
1811 	 * Check if the platform (parent) device has resumed, and ensure that
1812 	 * power, clock, and MTCMOS are all turned on.
1813 	 */
1814 	err = ufshcd_link_recovery(hba);
1815 	if (err) {
1816 		dev_err(hba->dev, "Device PM: req=%d, status:%d, err:%d\n",
1817 			hba->dev->power.request,
1818 			hba->dev->power.runtime_status,
1819 			hba->dev->power.runtime_error);
1820 	}
1821 
1822 	return 0; /* Cannot return a failure, otherwise, the I/O will hang. */
1823 }
1824 
1825 static void ufs_mtk_dbg_register_dump(struct ufs_hba *hba)
1826 {
1827 	/* Dump ufshci register 0x140 ~ 0x14C */
1828 	ufshcd_dump_regs(hba, REG_UFS_XOUFS_CTRL, 0x10,
1829 			 "XOUFS Ctrl (0x140): ");
1830 
1831 	ufshcd_dump_regs(hba, REG_UFS_EXTREG, 0x4, "Ext Reg ");
1832 
1833 	/* Dump ufshci register 0x2200 ~ 0x22AC */
1834 	ufshcd_dump_regs(hba, REG_UFS_MPHYCTRL,
1835 			 REG_UFS_REJECT_MON - REG_UFS_MPHYCTRL + 4,
1836 			 "MPHY Ctrl (0x2200): ");
1837 
1838 	/* Direct debugging information to REG_MTK_PROBE */
1839 	ufs_mtk_dbg_sel(hba);
1840 	ufshcd_dump_regs(hba, REG_UFS_PROBE, 0x4, "Debug Probe ");
1841 }
1842 
1843 static int ufs_mtk_apply_dev_quirks(struct ufs_hba *hba)
1844 {
1845 	struct ufs_dev_info *dev_info = &hba->dev_info;
1846 	u16 mid = dev_info->wmanufacturerid;
1847 	unsigned int cpu;
1848 
1849 	if (hba->mcq_enabled) {
1850 		/* Iterate all cpus to set affinity for mcq irqs */
1851 		for (cpu = 0; cpu < nr_cpu_ids; cpu++)
1852 			ufs_mtk_mcq_set_irq_affinity(hba, cpu);
1853 	}
1854 
1855 	if (mid == UFS_VENDOR_SAMSUNG) {
1856 		ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TACTIVATE), 6);
1857 		ufshcd_dme_set(hba, UIC_ARG_MIB(PA_HIBERN8TIME), 10);
1858 	} else if (mid == UFS_VENDOR_MICRON) {
1859 		/* Only for the host which have TX skew issue */
1860 		if (ufs_mtk_is_tx_skew_fix(hba) &&
1861 			(STR_PRFX_EQUAL("MT128GBCAV2U31", dev_info->model) ||
1862 			STR_PRFX_EQUAL("MT256GBCAV4U31", dev_info->model) ||
1863 			STR_PRFX_EQUAL("MT512GBCAV8U31", dev_info->model) ||
1864 			STR_PRFX_EQUAL("MT256GBEAX4U40", dev_info->model) ||
1865 			STR_PRFX_EQUAL("MT512GAYAX4U40", dev_info->model) ||
1866 			STR_PRFX_EQUAL("MT001TAYAX8U40", dev_info->model))) {
1867 			ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TACTIVATE), 8);
1868 		}
1869 	}
1870 
1871 	/*
1872 	 * Decide waiting time before gating reference clock and
1873 	 * after ungating reference clock according to vendors'
1874 	 * requirements.
1875 	 */
1876 	if (mid == UFS_VENDOR_SAMSUNG)
1877 		ufs_mtk_setup_ref_clk_wait_us(hba, 1);
1878 	else if (mid == UFS_VENDOR_SKHYNIX)
1879 		ufs_mtk_setup_ref_clk_wait_us(hba, 30);
1880 	else if (mid == UFS_VENDOR_TOSHIBA)
1881 		ufs_mtk_setup_ref_clk_wait_us(hba, 100);
1882 	else
1883 		ufs_mtk_setup_ref_clk_wait_us(hba,
1884 					      REFCLK_DEFAULT_WAIT_US);
1885 	return 0;
1886 }
1887 
1888 static void ufs_mtk_fixup_dev_quirks(struct ufs_hba *hba)
1889 {
1890 	ufshcd_fixup_dev_quirks(hba, ufs_mtk_dev_fixups);
1891 
1892 	if (ufs_mtk_is_broken_vcc(hba) && hba->vreg_info.vcc &&
1893 	    (hba->dev_quirks & UFS_DEVICE_QUIRK_DELAY_AFTER_LPM)) {
1894 		hba->vreg_info.vcc->always_on = true;
1895 		/*
1896 		 * VCC will be kept always-on thus we don't
1897 		 * need any delay during regulator operations
1898 		 */
1899 		hba->dev_quirks &= ~(UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM |
1900 			UFS_DEVICE_QUIRK_DELAY_AFTER_LPM);
1901 	}
1902 
1903 	ufs_mtk_vreg_fix_vcc(hba);
1904 	ufs_mtk_vreg_fix_vccqx(hba);
1905 	ufs_mtk_fix_ahit(hba);
1906 	ufs_mtk_fix_clock_scaling(hba);
1907 }
1908 
1909 static void ufs_mtk_event_notify(struct ufs_hba *hba,
1910 				 enum ufs_event_type evt, void *data)
1911 {
1912 	unsigned int val = *(u32 *)data;
1913 	unsigned long reg;
1914 	u8 bit;
1915 
1916 	trace_ufs_mtk_event(evt, val);
1917 
1918 	/* Print details of UIC Errors */
1919 	if (evt <= UFS_EVT_DME_ERR) {
1920 		dev_info(hba->dev,
1921 			 "Host UIC Error Code (%s): %08x\n",
1922 			 ufs_uic_err_str[evt], val);
1923 		reg = val;
1924 	}
1925 
1926 	if (evt == UFS_EVT_PA_ERR) {
1927 		for_each_set_bit(bit, &reg, ARRAY_SIZE(ufs_uic_pa_err_str))
1928 			dev_info(hba->dev, "%s\n", ufs_uic_pa_err_str[bit]);
1929 	}
1930 
1931 	if (evt == UFS_EVT_DL_ERR) {
1932 		for_each_set_bit(bit, &reg, ARRAY_SIZE(ufs_uic_dl_err_str))
1933 			dev_info(hba->dev, "%s\n", ufs_uic_dl_err_str[bit]);
1934 	}
1935 }
1936 
1937 static void ufs_mtk_config_scaling_param(struct ufs_hba *hba,
1938 				struct devfreq_dev_profile *profile,
1939 				struct devfreq_simple_ondemand_data *data)
1940 {
1941 	/* Customize min gear in clk scaling */
1942 	hba->clk_scaling.min_gear = UFS_HS_G4;
1943 
1944 	hba->vps->devfreq_profile.polling_ms = 200;
1945 	hba->vps->ondemand_data.upthreshold = 50;
1946 	hba->vps->ondemand_data.downdifferential = 20;
1947 }
1948 
1949 static void _ufs_mtk_clk_scale(struct ufs_hba *hba, bool scale_up)
1950 {
1951 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
1952 	struct ufs_mtk_clk *mclk = &host->mclk;
1953 	struct ufs_clk_info *clki = mclk->ufs_sel_clki;
1954 	struct ufs_clk_info *fde_clki = mclk->ufs_fde_clki;
1955 	struct regulator *reg;
1956 	int volt, ret = 0;
1957 	bool clk_bind_vcore = false;
1958 	bool clk_fde_scale = false;
1959 
1960 	if (!hba->clk_scaling.is_initialized)
1961 		return;
1962 
1963 	if (!clki || !fde_clki)
1964 		return;
1965 
1966 	reg = host->mclk.reg_vcore;
1967 	volt = host->mclk.vcore_volt;
1968 	if (reg && volt != 0)
1969 		clk_bind_vcore = true;
1970 
1971 	if (mclk->ufs_fde_max_clki && mclk->ufs_fde_min_clki)
1972 		clk_fde_scale = true;
1973 
1974 	ret = clk_prepare_enable(clki->clk);
1975 	if (ret) {
1976 		dev_info(hba->dev,
1977 			 "clk_prepare_enable() fail, ret: %d\n", ret);
1978 		return;
1979 	}
1980 
1981 	if (clk_fde_scale) {
1982 		ret = clk_prepare_enable(fde_clki->clk);
1983 		if (ret) {
1984 			dev_info(hba->dev,
1985 				 "fde clk_prepare_enable() fail, ret: %d\n", ret);
1986 			return;
1987 		}
1988 	}
1989 
1990 	if (scale_up) {
1991 		if (clk_bind_vcore) {
1992 			ret = regulator_set_voltage(reg, volt, INT_MAX);
1993 			if (ret) {
1994 				dev_info(hba->dev,
1995 					"Failed to set vcore to %d\n", volt);
1996 				goto out;
1997 			}
1998 		}
1999 
2000 		ret = clk_set_parent(clki->clk, mclk->ufs_sel_max_clki->clk);
2001 		if (ret) {
2002 			dev_info(hba->dev, "Failed to set clk mux, ret = %d\n",
2003 				ret);
2004 		}
2005 
2006 		if (clk_fde_scale) {
2007 			ret = clk_set_parent(fde_clki->clk,
2008 				mclk->ufs_fde_max_clki->clk);
2009 			if (ret) {
2010 				dev_info(hba->dev,
2011 					"Failed to set fde clk mux, ret = %d\n",
2012 					ret);
2013 			}
2014 		}
2015 	} else {
2016 		if (clk_fde_scale) {
2017 			ret = clk_set_parent(fde_clki->clk,
2018 				mclk->ufs_fde_min_clki->clk);
2019 			if (ret) {
2020 				dev_info(hba->dev,
2021 					"Failed to set fde clk mux, ret = %d\n",
2022 					ret);
2023 				goto out;
2024 			}
2025 		}
2026 
2027 		ret = clk_set_parent(clki->clk, mclk->ufs_sel_min_clki->clk);
2028 		if (ret) {
2029 			dev_info(hba->dev, "Failed to set clk mux, ret = %d\n",
2030 				ret);
2031 			goto out;
2032 		}
2033 
2034 		if (clk_bind_vcore) {
2035 			ret = regulator_set_voltage(reg, 0, INT_MAX);
2036 			if (ret) {
2037 				dev_info(hba->dev,
2038 					"failed to set vcore to MIN\n");
2039 			}
2040 		}
2041 	}
2042 
2043 out:
2044 	clk_disable_unprepare(clki->clk);
2045 
2046 	if (clk_fde_scale)
2047 		clk_disable_unprepare(fde_clki->clk);
2048 }
2049 
2050 /**
2051  * ufs_mtk_clk_scale - Internal clk scaling operation
2052  *
2053  * MTK platform supports clk scaling by switching parent of ufs_sel(mux).
2054  * The ufs_sel downstream to ufs_ck which feeds directly to UFS hardware.
2055  * Max and min clocks rate of ufs_sel defined in dts should match rate of
2056  * "ufs_sel_max_src" and "ufs_sel_min_src" respectively.
2057  * This prevent changing rate of pll clock that is shared between modules.
2058  *
2059  * @hba: per adapter instance
2060  * @scale_up: True for scaling up and false for scaling down
2061  */
2062 static void ufs_mtk_clk_scale(struct ufs_hba *hba, bool scale_up)
2063 {
2064 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
2065 	struct ufs_mtk_clk *mclk = &host->mclk;
2066 	struct ufs_clk_info *clki = mclk->ufs_sel_clki;
2067 
2068 	if (host->clk_scale_up == scale_up)
2069 		goto out;
2070 
2071 	if (scale_up)
2072 		_ufs_mtk_clk_scale(hba, true);
2073 	else
2074 		_ufs_mtk_clk_scale(hba, false);
2075 
2076 	host->clk_scale_up = scale_up;
2077 
2078 	/* Must always set before clk_set_rate() */
2079 	if (scale_up)
2080 		clki->curr_freq = clki->max_freq;
2081 	else
2082 		clki->curr_freq = clki->min_freq;
2083 out:
2084 	trace_ufs_mtk_clk_scale(clki->name, scale_up, clk_get_rate(clki->clk));
2085 }
2086 
2087 static int ufs_mtk_clk_scale_notify(struct ufs_hba *hba, bool scale_up,
2088 				    unsigned long target_freq,
2089 				    enum ufs_notify_change_status status)
2090 {
2091 	if (!ufshcd_is_clkscaling_supported(hba))
2092 		return 0;
2093 
2094 	if (status == PRE_CHANGE) {
2095 		/* Switch parent before clk_set_rate() */
2096 		ufs_mtk_clk_scale(hba, scale_up);
2097 	} else {
2098 		/* Request interrupt latency QoS accordingly */
2099 		ufs_mtk_scale_perf(hba, scale_up);
2100 	}
2101 
2102 	return 0;
2103 }
2104 
2105 static int ufs_mtk_get_hba_mac(struct ufs_hba *hba)
2106 {
2107 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
2108 
2109 	/* MCQ operation not permitted */
2110 	if (host->caps & UFS_MTK_CAP_DISABLE_MCQ)
2111 		return -EPERM;
2112 
2113 	return MAX_SUPP_MAC;
2114 }
2115 
2116 static int ufs_mtk_op_runtime_config(struct ufs_hba *hba)
2117 {
2118 	struct ufshcd_mcq_opr_info_t *opr;
2119 	int i;
2120 
2121 	hba->mcq_opr[OPR_SQD].offset = REG_UFS_MTK_SQD;
2122 	hba->mcq_opr[OPR_SQIS].offset = REG_UFS_MTK_SQIS;
2123 	hba->mcq_opr[OPR_CQD].offset = REG_UFS_MTK_CQD;
2124 	hba->mcq_opr[OPR_CQIS].offset = REG_UFS_MTK_CQIS;
2125 
2126 	for (i = 0; i < OPR_MAX; i++) {
2127 		opr = &hba->mcq_opr[i];
2128 		opr->stride = REG_UFS_MCQ_STRIDE;
2129 		opr->base = hba->mmio_base + opr->offset;
2130 	}
2131 
2132 	return 0;
2133 }
2134 
2135 static int ufs_mtk_mcq_config_resource(struct ufs_hba *hba)
2136 {
2137 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
2138 
2139 	/* fail mcq initialization if interrupt is not filled properly */
2140 	if (!host->mcq_nr_intr) {
2141 		dev_info(hba->dev, "IRQs not ready. MCQ disabled.");
2142 		return -EINVAL;
2143 	}
2144 
2145 	hba->mcq_base = hba->mmio_base + MCQ_QUEUE_OFFSET(hba->mcq_capabilities);
2146 	return 0;
2147 }
2148 
2149 static irqreturn_t ufs_mtk_mcq_intr(int irq, void *__intr_info)
2150 {
2151 	struct ufs_mtk_mcq_intr_info *mcq_intr_info = __intr_info;
2152 	struct ufs_hba *hba = mcq_intr_info->hba;
2153 	struct ufs_hw_queue *hwq;
2154 	u32 events;
2155 	int qid = mcq_intr_info->qid;
2156 
2157 	hwq = &hba->uhq[qid];
2158 
2159 	events = ufshcd_mcq_read_cqis(hba, qid);
2160 	if (events)
2161 		ufshcd_mcq_write_cqis(hba, events, qid);
2162 
2163 	if (events & UFSHCD_MCQ_CQIS_TAIL_ENT_PUSH_STS)
2164 		ufshcd_mcq_poll_cqe_lock(hba, hwq);
2165 
2166 	return IRQ_HANDLED;
2167 }
2168 
2169 static int ufs_mtk_config_mcq_irq(struct ufs_hba *hba)
2170 {
2171 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
2172 	u32 irq, i;
2173 	int ret;
2174 
2175 	for (i = 0; i < host->mcq_nr_intr; i++) {
2176 		irq = host->mcq_intr_info[i].irq;
2177 		if (irq == MTK_MCQ_INVALID_IRQ) {
2178 			dev_err(hba->dev, "invalid irq. %d\n", i);
2179 			return -ENOPARAM;
2180 		}
2181 
2182 		host->mcq_intr_info[i].qid = i;
2183 		ret = devm_request_irq(hba->dev, irq, ufs_mtk_mcq_intr, 0, UFSHCD,
2184 				       &host->mcq_intr_info[i]);
2185 
2186 		dev_dbg(hba->dev, "request irq %d intr %s\n", irq, ret ? "failed" : "");
2187 
2188 		if (ret) {
2189 			dev_err(hba->dev, "Cannot request irq %d\n", ret);
2190 			return ret;
2191 		}
2192 	}
2193 	host->is_mcq_intr_enabled = true;
2194 
2195 	return 0;
2196 }
2197 
2198 static int ufs_mtk_config_mcq(struct ufs_hba *hba, bool irq)
2199 {
2200 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
2201 	int ret = 0;
2202 
2203 	if (!host->mcq_set_intr) {
2204 		/* Disable irq option register */
2205 		ufshcd_rmwl(hba, MCQ_INTR_EN_MSK, 0, REG_UFS_MMIO_OPT_CTRL_0);
2206 
2207 		if (irq) {
2208 			ret = ufs_mtk_config_mcq_irq(hba);
2209 			if (ret)
2210 				return ret;
2211 		}
2212 
2213 		host->mcq_set_intr = true;
2214 	}
2215 
2216 	ufshcd_rmwl(hba, MCQ_AH8, MCQ_AH8, REG_UFS_MMIO_OPT_CTRL_0);
2217 	ufshcd_rmwl(hba, MCQ_INTR_EN_MSK, MCQ_MULTI_INTR_EN, REG_UFS_MMIO_OPT_CTRL_0);
2218 
2219 	return 0;
2220 }
2221 
2222 static int ufs_mtk_config_esi(struct ufs_hba *hba)
2223 {
2224 	return ufs_mtk_config_mcq(hba, true);
2225 }
2226 
2227 static void ufs_mtk_config_scsi_dev(struct scsi_device *sdev)
2228 {
2229 	struct ufs_hba *hba = shost_priv(sdev->host);
2230 
2231 	dev_dbg(hba->dev, "lu %llu scsi device configured", sdev->lun);
2232 	if (sdev->lun == 2)
2233 		blk_queue_flag_set(QUEUE_FLAG_SAME_FORCE, sdev->request_queue);
2234 }
2235 
2236 /*
2237  * struct ufs_hba_mtk_vops - UFS MTK specific variant operations
2238  *
2239  * The variant operations configure the necessary controller and PHY
2240  * handshake during initialization.
2241  */
2242 static const struct ufs_hba_variant_ops ufs_hba_mtk_vops = {
2243 	.name                = "mediatek.ufshci",
2244 	.max_num_rtt         = MTK_MAX_NUM_RTT,
2245 	.init                = ufs_mtk_init,
2246 	.get_ufs_hci_version = ufs_mtk_get_ufs_hci_version,
2247 	.setup_clocks        = ufs_mtk_setup_clocks,
2248 	.hce_enable_notify   = ufs_mtk_hce_enable_notify,
2249 	.link_startup_notify = ufs_mtk_link_startup_notify,
2250 	.pwr_change_notify   = ufs_mtk_pwr_change_notify,
2251 	.apply_dev_quirks    = ufs_mtk_apply_dev_quirks,
2252 	.fixup_dev_quirks    = ufs_mtk_fixup_dev_quirks,
2253 	.suspend             = ufs_mtk_suspend,
2254 	.resume              = ufs_mtk_resume,
2255 	.dbg_register_dump   = ufs_mtk_dbg_register_dump,
2256 	.device_reset        = ufs_mtk_device_reset,
2257 	.event_notify        = ufs_mtk_event_notify,
2258 	.config_scaling_param = ufs_mtk_config_scaling_param,
2259 	.clk_scale_notify    = ufs_mtk_clk_scale_notify,
2260 	/* mcq vops */
2261 	.get_hba_mac         = ufs_mtk_get_hba_mac,
2262 	.op_runtime_config   = ufs_mtk_op_runtime_config,
2263 	.mcq_config_resource = ufs_mtk_mcq_config_resource,
2264 	.config_esi          = ufs_mtk_config_esi,
2265 	.config_scsi_dev     = ufs_mtk_config_scsi_dev,
2266 };
2267 
2268 /**
2269  * ufs_mtk_probe - probe routine of the driver
2270  * @pdev: pointer to Platform device handle
2271  *
2272  * Return: zero for success and non-zero for failure.
2273  */
2274 static int ufs_mtk_probe(struct platform_device *pdev)
2275 {
2276 	int err;
2277 	struct device *dev = &pdev->dev, *phy_dev = NULL;
2278 	struct device_node *reset_node, *phy_node = NULL;
2279 	struct platform_device *reset_pdev, *phy_pdev = NULL;
2280 	struct device_link *link;
2281 	struct ufs_hba *hba;
2282 	struct ufs_mtk_host *host;
2283 
2284 	reset_node = of_find_compatible_node(NULL, NULL,
2285 					     "ti,syscon-reset");
2286 	if (!reset_node) {
2287 		dev_notice(dev, "find ti,syscon-reset fail\n");
2288 		goto skip_reset;
2289 	}
2290 	reset_pdev = of_find_device_by_node(reset_node);
2291 	if (!reset_pdev) {
2292 		dev_notice(dev, "find reset_pdev fail\n");
2293 		goto skip_reset;
2294 	}
2295 	link = device_link_add(dev, &reset_pdev->dev,
2296 		DL_FLAG_AUTOPROBE_CONSUMER);
2297 	put_device(&reset_pdev->dev);
2298 	if (!link) {
2299 		dev_notice(dev, "add reset device_link fail\n");
2300 		goto skip_reset;
2301 	}
2302 	/* supplier is not probed */
2303 	if (link->status == DL_STATE_DORMANT) {
2304 		err = -EPROBE_DEFER;
2305 		goto out;
2306 	}
2307 
2308 skip_reset:
2309 	/* find phy node */
2310 	phy_node = of_parse_phandle(dev->of_node, "phys", 0);
2311 
2312 	if (phy_node) {
2313 		phy_pdev = of_find_device_by_node(phy_node);
2314 		if (!phy_pdev)
2315 			goto skip_phy;
2316 		phy_dev = &phy_pdev->dev;
2317 
2318 		pm_runtime_set_active(phy_dev);
2319 		pm_runtime_enable(phy_dev);
2320 		pm_runtime_get_sync(phy_dev);
2321 
2322 		put_device(phy_dev);
2323 		dev_info(dev, "phys node found\n");
2324 	} else {
2325 		dev_notice(dev, "phys node not found\n");
2326 	}
2327 
2328 skip_phy:
2329 	/* perform generic probe */
2330 	err = ufshcd_pltfrm_init(pdev, &ufs_hba_mtk_vops);
2331 	if (err) {
2332 		dev_err(dev, "probe failed %d\n", err);
2333 		goto out;
2334 	}
2335 
2336 	hba = platform_get_drvdata(pdev);
2337 	if (!hba)
2338 		goto out;
2339 
2340 	if (phy_node && phy_dev) {
2341 		host = ufshcd_get_variant(hba);
2342 		host->phy_dev = phy_dev;
2343 	}
2344 
2345 	/*
2346 	 * Because the default power setting of VSx (the upper layer of
2347 	 * VCCQ/VCCQ2) is HWLP, we need to prevent VCCQ/VCCQ2 from
2348 	 * entering LPM.
2349 	 */
2350 	ufs_mtk_dev_vreg_set_lpm(hba, false);
2351 
2352 out:
2353 	of_node_put(phy_node);
2354 	of_node_put(reset_node);
2355 	return err;
2356 }
2357 
2358 /**
2359  * ufs_mtk_remove - set driver_data of the device to NULL
2360  * @pdev: pointer to platform device handle
2361  *
2362  * Always return 0
2363  */
2364 static void ufs_mtk_remove(struct platform_device *pdev)
2365 {
2366 	ufshcd_pltfrm_remove(pdev);
2367 }
2368 
2369 #ifdef CONFIG_PM_SLEEP
2370 static int ufs_mtk_system_suspend(struct device *dev)
2371 {
2372 	struct ufs_hba *hba = dev_get_drvdata(dev);
2373 	struct arm_smccc_res res;
2374 	int ret;
2375 
2376 	ret = ufshcd_system_suspend(dev);
2377 	if (ret)
2378 		goto out;
2379 
2380 	if (pm_runtime_suspended(hba->dev))
2381 		goto out;
2382 
2383 	ufs_mtk_dev_vreg_set_lpm(hba, true);
2384 
2385 	if (ufs_mtk_is_rtff_mtcmos(hba))
2386 		ufs_mtk_mtcmos_ctrl(false, res);
2387 
2388 out:
2389 	return ret;
2390 }
2391 
2392 static int ufs_mtk_system_resume(struct device *dev)
2393 {
2394 	int ret = 0;
2395 	struct ufs_hba *hba = dev_get_drvdata(dev);
2396 	struct arm_smccc_res res;
2397 
2398 	if (pm_runtime_suspended(hba->dev))
2399 		goto out;
2400 
2401 	if (ufs_mtk_is_rtff_mtcmos(hba))
2402 		ufs_mtk_mtcmos_ctrl(true, res);
2403 
2404 	ufs_mtk_dev_vreg_set_lpm(hba, false);
2405 
2406 out:
2407 	ret = ufshcd_system_resume(dev);
2408 
2409 	return ret;
2410 }
2411 #endif
2412 
2413 #ifdef CONFIG_PM
2414 static int ufs_mtk_runtime_suspend(struct device *dev)
2415 {
2416 	struct ufs_hba *hba = dev_get_drvdata(dev);
2417 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
2418 	struct arm_smccc_res res;
2419 	int ret = 0;
2420 
2421 	ret = ufshcd_runtime_suspend(dev);
2422 	if (ret)
2423 		return ret;
2424 
2425 	ufs_mtk_dev_vreg_set_lpm(hba, true);
2426 
2427 	if (ufs_mtk_is_rtff_mtcmos(hba))
2428 		ufs_mtk_mtcmos_ctrl(false, res);
2429 
2430 	if (host->phy_dev)
2431 		pm_runtime_put_sync(host->phy_dev);
2432 
2433 	return 0;
2434 }
2435 
2436 static int ufs_mtk_runtime_resume(struct device *dev)
2437 {
2438 	struct ufs_hba *hba = dev_get_drvdata(dev);
2439 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
2440 	struct arm_smccc_res res;
2441 
2442 	if (ufs_mtk_is_rtff_mtcmos(hba))
2443 		ufs_mtk_mtcmos_ctrl(true, res);
2444 
2445 	if (host->phy_dev)
2446 		pm_runtime_get_sync(host->phy_dev);
2447 
2448 	ufs_mtk_dev_vreg_set_lpm(hba, false);
2449 
2450 	return ufshcd_runtime_resume(dev);
2451 }
2452 #endif
2453 
2454 static const struct dev_pm_ops ufs_mtk_pm_ops = {
2455 	SET_SYSTEM_SLEEP_PM_OPS(ufs_mtk_system_suspend,
2456 				ufs_mtk_system_resume)
2457 	SET_RUNTIME_PM_OPS(ufs_mtk_runtime_suspend,
2458 			   ufs_mtk_runtime_resume, NULL)
2459 	.prepare	 = ufshcd_suspend_prepare,
2460 	.complete	 = ufshcd_resume_complete,
2461 };
2462 
2463 static struct platform_driver ufs_mtk_pltform = {
2464 	.probe      = ufs_mtk_probe,
2465 	.remove = ufs_mtk_remove,
2466 	.driver = {
2467 		.name   = "ufshcd-mtk",
2468 		.pm     = &ufs_mtk_pm_ops,
2469 		.of_match_table = ufs_mtk_of_match,
2470 	},
2471 };
2472 
2473 MODULE_AUTHOR("Stanley Chu <stanley.chu@mediatek.com>");
2474 MODULE_AUTHOR("Peter Wang <peter.wang@mediatek.com>");
2475 MODULE_DESCRIPTION("MediaTek UFS Host Driver");
2476 MODULE_LICENSE("GPL v2");
2477 
2478 module_platform_driver(ufs_mtk_pltform);
2479