xref: /linux/drivers/ufs/host/ufs-mediatek.c (revision 3f41368fbfe1b3d5922d317fe1a0a0cab6846802)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2019 MediaTek Inc.
4  * Authors:
5  *	Stanley Chu <stanley.chu@mediatek.com>
6  *	Peter Wang <peter.wang@mediatek.com>
7  */
8 
9 #include <linux/arm-smccc.h>
10 #include <linux/bitfield.h>
11 #include <linux/clk.h>
12 #include <linux/delay.h>
13 #include <linux/module.h>
14 #include <linux/of.h>
15 #include <linux/of_address.h>
16 #include <linux/of_device.h>
17 #include <linux/of_platform.h>
18 #include <linux/phy/phy.h>
19 #include <linux/platform_device.h>
20 #include <linux/regulator/consumer.h>
21 #include <linux/reset.h>
22 
23 #include <ufs/ufshcd.h>
24 #include "ufshcd-pltfrm.h"
25 #include <ufs/ufs_quirks.h>
26 #include <ufs/unipro.h>
27 
28 #include "ufs-mediatek.h"
29 #include "ufs-mediatek-sip.h"
30 
31 static int  ufs_mtk_config_mcq(struct ufs_hba *hba, bool irq);
32 
33 #define CREATE_TRACE_POINTS
34 #include "ufs-mediatek-trace.h"
35 #undef CREATE_TRACE_POINTS
36 
37 #define MAX_SUPP_MAC 64
38 #define MCQ_QUEUE_OFFSET(c) ((((c) >> 16) & 0xFF) * 0x200)
39 
40 static const struct ufs_dev_quirk ufs_mtk_dev_fixups[] = {
41 	{ .wmanufacturerid = UFS_ANY_VENDOR,
42 	  .model = UFS_ANY_MODEL,
43 	  .quirk = UFS_DEVICE_QUIRK_DELAY_AFTER_LPM |
44 		UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM },
45 	{ .wmanufacturerid = UFS_VENDOR_SKHYNIX,
46 	  .model = "H9HQ21AFAMZDAR",
47 	  .quirk = UFS_DEVICE_QUIRK_SUPPORT_EXTENDED_FEATURES },
48 	{}
49 };
50 
51 static const struct of_device_id ufs_mtk_of_match[] = {
52 	{ .compatible = "mediatek,mt8183-ufshci" },
53 	{},
54 };
55 MODULE_DEVICE_TABLE(of, ufs_mtk_of_match);
56 
57 /*
58  * Details of UIC Errors
59  */
60 static const char *const ufs_uic_err_str[] = {
61 	"PHY Adapter Layer",
62 	"Data Link Layer",
63 	"Network Link Layer",
64 	"Transport Link Layer",
65 	"DME"
66 };
67 
68 static const char *const ufs_uic_pa_err_str[] = {
69 	"PHY error on Lane 0",
70 	"PHY error on Lane 1",
71 	"PHY error on Lane 2",
72 	"PHY error on Lane 3",
73 	"Generic PHY Adapter Error. This should be the LINERESET indication"
74 };
75 
76 static const char *const ufs_uic_dl_err_str[] = {
77 	"NAC_RECEIVED",
78 	"TCx_REPLAY_TIMER_EXPIRED",
79 	"AFCx_REQUEST_TIMER_EXPIRED",
80 	"FCx_PROTECTION_TIMER_EXPIRED",
81 	"CRC_ERROR",
82 	"RX_BUFFER_OVERFLOW",
83 	"MAX_FRAME_LENGTH_EXCEEDED",
84 	"WRONG_SEQUENCE_NUMBER",
85 	"AFC_FRAME_SYNTAX_ERROR",
86 	"NAC_FRAME_SYNTAX_ERROR",
87 	"EOF_SYNTAX_ERROR",
88 	"FRAME_SYNTAX_ERROR",
89 	"BAD_CTRL_SYMBOL_TYPE",
90 	"PA_INIT_ERROR",
91 	"PA_ERROR_IND_RECEIVED",
92 	"PA_INIT"
93 };
94 
95 static bool ufs_mtk_is_boost_crypt_enabled(struct ufs_hba *hba)
96 {
97 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
98 
99 	return !!(host->caps & UFS_MTK_CAP_BOOST_CRYPT_ENGINE);
100 }
101 
102 static bool ufs_mtk_is_va09_supported(struct ufs_hba *hba)
103 {
104 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
105 
106 	return !!(host->caps & UFS_MTK_CAP_VA09_PWR_CTRL);
107 }
108 
109 static bool ufs_mtk_is_broken_vcc(struct ufs_hba *hba)
110 {
111 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
112 
113 	return !!(host->caps & UFS_MTK_CAP_BROKEN_VCC);
114 }
115 
116 static bool ufs_mtk_is_pmc_via_fastauto(struct ufs_hba *hba)
117 {
118 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
119 
120 	return !!(host->caps & UFS_MTK_CAP_PMC_VIA_FASTAUTO);
121 }
122 
123 static bool ufs_mtk_is_tx_skew_fix(struct ufs_hba *hba)
124 {
125 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
126 
127 	return (host->caps & UFS_MTK_CAP_TX_SKEW_FIX);
128 }
129 
130 static bool ufs_mtk_is_rtff_mtcmos(struct ufs_hba *hba)
131 {
132 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
133 
134 	return (host->caps & UFS_MTK_CAP_RTFF_MTCMOS);
135 }
136 
137 static bool ufs_mtk_is_allow_vccqx_lpm(struct ufs_hba *hba)
138 {
139 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
140 
141 	return (host->caps & UFS_MTK_CAP_ALLOW_VCCQX_LPM);
142 }
143 
144 static void ufs_mtk_cfg_unipro_cg(struct ufs_hba *hba, bool enable)
145 {
146 	u32 tmp;
147 
148 	if (enable) {
149 		ufshcd_dme_get(hba,
150 			       UIC_ARG_MIB(VS_SAVEPOWERCONTROL), &tmp);
151 		tmp = tmp |
152 		      (1 << RX_SYMBOL_CLK_GATE_EN) |
153 		      (1 << SYS_CLK_GATE_EN) |
154 		      (1 << TX_CLK_GATE_EN);
155 		ufshcd_dme_set(hba,
156 			       UIC_ARG_MIB(VS_SAVEPOWERCONTROL), tmp);
157 
158 		ufshcd_dme_get(hba,
159 			       UIC_ARG_MIB(VS_DEBUGCLOCKENABLE), &tmp);
160 		tmp = tmp & ~(1 << TX_SYMBOL_CLK_REQ_FORCE);
161 		ufshcd_dme_set(hba,
162 			       UIC_ARG_MIB(VS_DEBUGCLOCKENABLE), tmp);
163 	} else {
164 		ufshcd_dme_get(hba,
165 			       UIC_ARG_MIB(VS_SAVEPOWERCONTROL), &tmp);
166 		tmp = tmp & ~((1 << RX_SYMBOL_CLK_GATE_EN) |
167 			      (1 << SYS_CLK_GATE_EN) |
168 			      (1 << TX_CLK_GATE_EN));
169 		ufshcd_dme_set(hba,
170 			       UIC_ARG_MIB(VS_SAVEPOWERCONTROL), tmp);
171 
172 		ufshcd_dme_get(hba,
173 			       UIC_ARG_MIB(VS_DEBUGCLOCKENABLE), &tmp);
174 		tmp = tmp | (1 << TX_SYMBOL_CLK_REQ_FORCE);
175 		ufshcd_dme_set(hba,
176 			       UIC_ARG_MIB(VS_DEBUGCLOCKENABLE), tmp);
177 	}
178 }
179 
180 static void ufs_mtk_crypto_enable(struct ufs_hba *hba)
181 {
182 	struct arm_smccc_res res;
183 
184 	ufs_mtk_crypto_ctrl(res, 1);
185 	if (res.a0) {
186 		dev_info(hba->dev, "%s: crypto enable failed, err: %lu\n",
187 			 __func__, res.a0);
188 		hba->caps &= ~UFSHCD_CAP_CRYPTO;
189 	}
190 }
191 
192 static void ufs_mtk_host_reset(struct ufs_hba *hba)
193 {
194 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
195 	struct arm_smccc_res res;
196 
197 	reset_control_assert(host->hci_reset);
198 	reset_control_assert(host->crypto_reset);
199 	reset_control_assert(host->unipro_reset);
200 	reset_control_assert(host->mphy_reset);
201 
202 	usleep_range(100, 110);
203 
204 	reset_control_deassert(host->unipro_reset);
205 	reset_control_deassert(host->crypto_reset);
206 	reset_control_deassert(host->hci_reset);
207 	reset_control_deassert(host->mphy_reset);
208 
209 	/* restore mphy setting aftre mphy reset */
210 	if (host->mphy_reset)
211 		ufs_mtk_mphy_ctrl(UFS_MPHY_RESTORE, res);
212 }
213 
214 static void ufs_mtk_init_reset_control(struct ufs_hba *hba,
215 				       struct reset_control **rc,
216 				       char *str)
217 {
218 	*rc = devm_reset_control_get(hba->dev, str);
219 	if (IS_ERR(*rc)) {
220 		dev_info(hba->dev, "Failed to get reset control %s: %ld\n",
221 			 str, PTR_ERR(*rc));
222 		*rc = NULL;
223 	}
224 }
225 
226 static void ufs_mtk_init_reset(struct ufs_hba *hba)
227 {
228 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
229 
230 	ufs_mtk_init_reset_control(hba, &host->hci_reset,
231 				   "hci_rst");
232 	ufs_mtk_init_reset_control(hba, &host->unipro_reset,
233 				   "unipro_rst");
234 	ufs_mtk_init_reset_control(hba, &host->crypto_reset,
235 				   "crypto_rst");
236 	ufs_mtk_init_reset_control(hba, &host->mphy_reset,
237 				   "mphy_rst");
238 }
239 
240 static int ufs_mtk_hce_enable_notify(struct ufs_hba *hba,
241 				     enum ufs_notify_change_status status)
242 {
243 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
244 
245 	if (status == PRE_CHANGE) {
246 		if (host->unipro_lpm) {
247 			hba->vps->hba_enable_delay_us = 0;
248 		} else {
249 			hba->vps->hba_enable_delay_us = 600;
250 			ufs_mtk_host_reset(hba);
251 		}
252 
253 		if (hba->caps & UFSHCD_CAP_CRYPTO)
254 			ufs_mtk_crypto_enable(hba);
255 
256 		if (host->caps & UFS_MTK_CAP_DISABLE_AH8) {
257 			ufshcd_writel(hba, 0,
258 				      REG_AUTO_HIBERNATE_IDLE_TIMER);
259 			hba->capabilities &= ~MASK_AUTO_HIBERN8_SUPPORT;
260 			hba->ahit = 0;
261 		}
262 
263 		/*
264 		 * Turn on CLK_CG early to bypass abnormal ERR_CHK signal
265 		 * to prevent host hang issue
266 		 */
267 		ufshcd_writel(hba,
268 			      ufshcd_readl(hba, REG_UFS_XOUFS_CTRL) | 0x80,
269 			      REG_UFS_XOUFS_CTRL);
270 	}
271 
272 	return 0;
273 }
274 
275 static int ufs_mtk_bind_mphy(struct ufs_hba *hba)
276 {
277 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
278 	struct device *dev = hba->dev;
279 	struct device_node *np = dev->of_node;
280 	int err = 0;
281 
282 	host->mphy = devm_of_phy_get_by_index(dev, np, 0);
283 
284 	if (host->mphy == ERR_PTR(-EPROBE_DEFER)) {
285 		/*
286 		 * UFS driver might be probed before the phy driver does.
287 		 * In that case we would like to return EPROBE_DEFER code.
288 		 */
289 		err = -EPROBE_DEFER;
290 		dev_info(dev,
291 			 "%s: required phy hasn't probed yet. err = %d\n",
292 			__func__, err);
293 	} else if (IS_ERR(host->mphy)) {
294 		err = PTR_ERR(host->mphy);
295 		if (err != -ENODEV) {
296 			dev_info(dev, "%s: PHY get failed %d\n", __func__,
297 				 err);
298 		}
299 	}
300 
301 	if (err)
302 		host->mphy = NULL;
303 	/*
304 	 * Allow unbound mphy because not every platform needs specific
305 	 * mphy control.
306 	 */
307 	if (err == -ENODEV)
308 		err = 0;
309 
310 	return err;
311 }
312 
313 static int ufs_mtk_setup_ref_clk(struct ufs_hba *hba, bool on)
314 {
315 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
316 	struct arm_smccc_res res;
317 	ktime_t timeout, time_checked;
318 	u32 value;
319 
320 	if (host->ref_clk_enabled == on)
321 		return 0;
322 
323 	ufs_mtk_ref_clk_notify(on, PRE_CHANGE, res);
324 
325 	if (on) {
326 		ufshcd_writel(hba, REFCLK_REQUEST, REG_UFS_REFCLK_CTRL);
327 	} else {
328 		ufshcd_delay_us(host->ref_clk_gating_wait_us, 10);
329 		ufshcd_writel(hba, REFCLK_RELEASE, REG_UFS_REFCLK_CTRL);
330 	}
331 
332 	/* Wait for ack */
333 	timeout = ktime_add_us(ktime_get(), REFCLK_REQ_TIMEOUT_US);
334 	do {
335 		time_checked = ktime_get();
336 		value = ufshcd_readl(hba, REG_UFS_REFCLK_CTRL);
337 
338 		/* Wait until ack bit equals to req bit */
339 		if (((value & REFCLK_ACK) >> 1) == (value & REFCLK_REQUEST))
340 			goto out;
341 
342 		usleep_range(100, 200);
343 	} while (ktime_before(time_checked, timeout));
344 
345 	dev_err(hba->dev, "missing ack of refclk req, reg: 0x%x\n", value);
346 
347 	ufs_mtk_ref_clk_notify(host->ref_clk_enabled, POST_CHANGE, res);
348 
349 	return -ETIMEDOUT;
350 
351 out:
352 	host->ref_clk_enabled = on;
353 	if (on)
354 		ufshcd_delay_us(host->ref_clk_ungating_wait_us, 10);
355 
356 	ufs_mtk_ref_clk_notify(on, POST_CHANGE, res);
357 
358 	return 0;
359 }
360 
361 static void ufs_mtk_setup_ref_clk_wait_us(struct ufs_hba *hba,
362 					  u16 gating_us)
363 {
364 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
365 
366 	if (hba->dev_info.clk_gating_wait_us) {
367 		host->ref_clk_gating_wait_us =
368 			hba->dev_info.clk_gating_wait_us;
369 	} else {
370 		host->ref_clk_gating_wait_us = gating_us;
371 	}
372 
373 	host->ref_clk_ungating_wait_us = REFCLK_DEFAULT_WAIT_US;
374 }
375 
376 static void ufs_mtk_dbg_sel(struct ufs_hba *hba)
377 {
378 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
379 
380 	if (((host->ip_ver >> 16) & 0xFF) >= 0x36) {
381 		ufshcd_writel(hba, 0x820820, REG_UFS_DEBUG_SEL);
382 		ufshcd_writel(hba, 0x0, REG_UFS_DEBUG_SEL_B0);
383 		ufshcd_writel(hba, 0x55555555, REG_UFS_DEBUG_SEL_B1);
384 		ufshcd_writel(hba, 0xaaaaaaaa, REG_UFS_DEBUG_SEL_B2);
385 		ufshcd_writel(hba, 0xffffffff, REG_UFS_DEBUG_SEL_B3);
386 	} else {
387 		ufshcd_writel(hba, 0x20, REG_UFS_DEBUG_SEL);
388 	}
389 }
390 
391 static void ufs_mtk_wait_idle_state(struct ufs_hba *hba,
392 			    unsigned long retry_ms)
393 {
394 	u64 timeout, time_checked;
395 	u32 val, sm;
396 	bool wait_idle;
397 
398 	/* cannot use plain ktime_get() in suspend */
399 	timeout = ktime_get_mono_fast_ns() + retry_ms * 1000000UL;
400 
401 	/* wait a specific time after check base */
402 	udelay(10);
403 	wait_idle = false;
404 
405 	do {
406 		time_checked = ktime_get_mono_fast_ns();
407 		ufs_mtk_dbg_sel(hba);
408 		val = ufshcd_readl(hba, REG_UFS_PROBE);
409 
410 		sm = val & 0x1f;
411 
412 		/*
413 		 * if state is in H8 enter and H8 enter confirm
414 		 * wait until return to idle state.
415 		 */
416 		if ((sm >= VS_HIB_ENTER) && (sm <= VS_HIB_EXIT)) {
417 			wait_idle = true;
418 			udelay(50);
419 			continue;
420 		} else if (!wait_idle)
421 			break;
422 
423 		if (wait_idle && (sm == VS_HCE_BASE))
424 			break;
425 	} while (time_checked < timeout);
426 
427 	if (wait_idle && sm != VS_HCE_BASE)
428 		dev_info(hba->dev, "wait idle tmo: 0x%x\n", val);
429 }
430 
431 static int ufs_mtk_wait_link_state(struct ufs_hba *hba, u32 state,
432 				   unsigned long max_wait_ms)
433 {
434 	ktime_t timeout, time_checked;
435 	u32 val;
436 
437 	timeout = ktime_add_ms(ktime_get(), max_wait_ms);
438 	do {
439 		time_checked = ktime_get();
440 		ufs_mtk_dbg_sel(hba);
441 		val = ufshcd_readl(hba, REG_UFS_PROBE);
442 		val = val >> 28;
443 
444 		if (val == state)
445 			return 0;
446 
447 		/* Sleep for max. 200us */
448 		usleep_range(100, 200);
449 	} while (ktime_before(time_checked, timeout));
450 
451 	return -ETIMEDOUT;
452 }
453 
454 static int ufs_mtk_mphy_power_on(struct ufs_hba *hba, bool on)
455 {
456 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
457 	struct phy *mphy = host->mphy;
458 	struct arm_smccc_res res;
459 	int ret = 0;
460 
461 	if (!mphy || !(on ^ host->mphy_powered_on))
462 		return 0;
463 
464 	if (on) {
465 		if (ufs_mtk_is_va09_supported(hba)) {
466 			ret = regulator_enable(host->reg_va09);
467 			if (ret < 0)
468 				goto out;
469 			/* wait 200 us to stablize VA09 */
470 			usleep_range(200, 210);
471 			ufs_mtk_va09_pwr_ctrl(res, 1);
472 		}
473 		phy_power_on(mphy);
474 	} else {
475 		phy_power_off(mphy);
476 		if (ufs_mtk_is_va09_supported(hba)) {
477 			ufs_mtk_va09_pwr_ctrl(res, 0);
478 			ret = regulator_disable(host->reg_va09);
479 		}
480 	}
481 out:
482 	if (ret) {
483 		dev_info(hba->dev,
484 			 "failed to %s va09: %d\n",
485 			 on ? "enable" : "disable",
486 			 ret);
487 	} else {
488 		host->mphy_powered_on = on;
489 	}
490 
491 	return ret;
492 }
493 
494 static int ufs_mtk_get_host_clk(struct device *dev, const char *name,
495 				struct clk **clk_out)
496 {
497 	struct clk *clk;
498 	int err = 0;
499 
500 	clk = devm_clk_get(dev, name);
501 	if (IS_ERR(clk))
502 		err = PTR_ERR(clk);
503 	else
504 		*clk_out = clk;
505 
506 	return err;
507 }
508 
509 static void ufs_mtk_boost_crypt(struct ufs_hba *hba, bool boost)
510 {
511 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
512 	struct ufs_mtk_crypt_cfg *cfg;
513 	struct regulator *reg;
514 	int volt, ret;
515 
516 	if (!ufs_mtk_is_boost_crypt_enabled(hba))
517 		return;
518 
519 	cfg = host->crypt;
520 	volt = cfg->vcore_volt;
521 	reg = cfg->reg_vcore;
522 
523 	ret = clk_prepare_enable(cfg->clk_crypt_mux);
524 	if (ret) {
525 		dev_info(hba->dev, "clk_prepare_enable(): %d\n",
526 			 ret);
527 		return;
528 	}
529 
530 	if (boost) {
531 		ret = regulator_set_voltage(reg, volt, INT_MAX);
532 		if (ret) {
533 			dev_info(hba->dev,
534 				 "failed to set vcore to %d\n", volt);
535 			goto out;
536 		}
537 
538 		ret = clk_set_parent(cfg->clk_crypt_mux,
539 				     cfg->clk_crypt_perf);
540 		if (ret) {
541 			dev_info(hba->dev,
542 				 "failed to set clk_crypt_perf\n");
543 			regulator_set_voltage(reg, 0, INT_MAX);
544 			goto out;
545 		}
546 	} else {
547 		ret = clk_set_parent(cfg->clk_crypt_mux,
548 				     cfg->clk_crypt_lp);
549 		if (ret) {
550 			dev_info(hba->dev,
551 				 "failed to set clk_crypt_lp\n");
552 			goto out;
553 		}
554 
555 		ret = regulator_set_voltage(reg, 0, INT_MAX);
556 		if (ret) {
557 			dev_info(hba->dev,
558 				 "failed to set vcore to MIN\n");
559 		}
560 	}
561 out:
562 	clk_disable_unprepare(cfg->clk_crypt_mux);
563 }
564 
565 static int ufs_mtk_init_host_clk(struct ufs_hba *hba, const char *name,
566 				 struct clk **clk)
567 {
568 	int ret;
569 
570 	ret = ufs_mtk_get_host_clk(hba->dev, name, clk);
571 	if (ret) {
572 		dev_info(hba->dev, "%s: failed to get %s: %d", __func__,
573 			 name, ret);
574 	}
575 
576 	return ret;
577 }
578 
579 static void ufs_mtk_init_boost_crypt(struct ufs_hba *hba)
580 {
581 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
582 	struct ufs_mtk_crypt_cfg *cfg;
583 	struct device *dev = hba->dev;
584 	struct regulator *reg;
585 	u32 volt;
586 
587 	host->crypt = devm_kzalloc(dev, sizeof(*(host->crypt)),
588 				   GFP_KERNEL);
589 	if (!host->crypt)
590 		goto disable_caps;
591 
592 	reg = devm_regulator_get_optional(dev, "dvfsrc-vcore");
593 	if (IS_ERR(reg)) {
594 		dev_info(dev, "failed to get dvfsrc-vcore: %ld",
595 			 PTR_ERR(reg));
596 		goto disable_caps;
597 	}
598 
599 	if (of_property_read_u32(dev->of_node, "boost-crypt-vcore-min",
600 				 &volt)) {
601 		dev_info(dev, "failed to get boost-crypt-vcore-min");
602 		goto disable_caps;
603 	}
604 
605 	cfg = host->crypt;
606 	if (ufs_mtk_init_host_clk(hba, "crypt_mux",
607 				  &cfg->clk_crypt_mux))
608 		goto disable_caps;
609 
610 	if (ufs_mtk_init_host_clk(hba, "crypt_lp",
611 				  &cfg->clk_crypt_lp))
612 		goto disable_caps;
613 
614 	if (ufs_mtk_init_host_clk(hba, "crypt_perf",
615 				  &cfg->clk_crypt_perf))
616 		goto disable_caps;
617 
618 	cfg->reg_vcore = reg;
619 	cfg->vcore_volt = volt;
620 	host->caps |= UFS_MTK_CAP_BOOST_CRYPT_ENGINE;
621 
622 disable_caps:
623 	return;
624 }
625 
626 static void ufs_mtk_init_va09_pwr_ctrl(struct ufs_hba *hba)
627 {
628 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
629 
630 	host->reg_va09 = regulator_get(hba->dev, "va09");
631 	if (IS_ERR(host->reg_va09))
632 		dev_info(hba->dev, "failed to get va09");
633 	else
634 		host->caps |= UFS_MTK_CAP_VA09_PWR_CTRL;
635 }
636 
637 static void ufs_mtk_init_host_caps(struct ufs_hba *hba)
638 {
639 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
640 	struct device_node *np = hba->dev->of_node;
641 
642 	if (of_property_read_bool(np, "mediatek,ufs-boost-crypt"))
643 		ufs_mtk_init_boost_crypt(hba);
644 
645 	if (of_property_read_bool(np, "mediatek,ufs-support-va09"))
646 		ufs_mtk_init_va09_pwr_ctrl(hba);
647 
648 	if (of_property_read_bool(np, "mediatek,ufs-disable-ah8"))
649 		host->caps |= UFS_MTK_CAP_DISABLE_AH8;
650 
651 	if (of_property_read_bool(np, "mediatek,ufs-broken-vcc"))
652 		host->caps |= UFS_MTK_CAP_BROKEN_VCC;
653 
654 	if (of_property_read_bool(np, "mediatek,ufs-pmc-via-fastauto"))
655 		host->caps |= UFS_MTK_CAP_PMC_VIA_FASTAUTO;
656 
657 	if (of_property_read_bool(np, "mediatek,ufs-tx-skew-fix"))
658 		host->caps |= UFS_MTK_CAP_TX_SKEW_FIX;
659 
660 	if (of_property_read_bool(np, "mediatek,ufs-disable-mcq"))
661 		host->caps |= UFS_MTK_CAP_DISABLE_MCQ;
662 
663 	if (of_property_read_bool(np, "mediatek,ufs-rtff-mtcmos"))
664 		host->caps |= UFS_MTK_CAP_RTFF_MTCMOS;
665 
666 	dev_info(hba->dev, "caps: 0x%x", host->caps);
667 }
668 
669 static void ufs_mtk_scale_perf(struct ufs_hba *hba, bool scale_up)
670 {
671 	ufs_mtk_boost_crypt(hba, scale_up);
672 }
673 
674 static void ufs_mtk_pwr_ctrl(struct ufs_hba *hba, bool on)
675 {
676 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
677 
678 	if (on) {
679 		phy_power_on(host->mphy);
680 		ufs_mtk_setup_ref_clk(hba, on);
681 		if (!ufshcd_is_clkscaling_supported(hba))
682 			ufs_mtk_scale_perf(hba, on);
683 	} else {
684 		if (!ufshcd_is_clkscaling_supported(hba))
685 			ufs_mtk_scale_perf(hba, on);
686 		ufs_mtk_setup_ref_clk(hba, on);
687 		phy_power_off(host->mphy);
688 	}
689 }
690 
691 static void ufs_mtk_mcq_disable_irq(struct ufs_hba *hba)
692 {
693 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
694 	u32 irq, i;
695 
696 	if (!is_mcq_enabled(hba))
697 		return;
698 
699 	if (host->mcq_nr_intr == 0)
700 		return;
701 
702 	for (i = 0; i < host->mcq_nr_intr; i++) {
703 		irq = host->mcq_intr_info[i].irq;
704 		disable_irq(irq);
705 	}
706 	host->is_mcq_intr_enabled = false;
707 }
708 
709 static void ufs_mtk_mcq_enable_irq(struct ufs_hba *hba)
710 {
711 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
712 	u32 irq, i;
713 
714 	if (!is_mcq_enabled(hba))
715 		return;
716 
717 	if (host->mcq_nr_intr == 0)
718 		return;
719 
720 	if (host->is_mcq_intr_enabled == true)
721 		return;
722 
723 	for (i = 0; i < host->mcq_nr_intr; i++) {
724 		irq = host->mcq_intr_info[i].irq;
725 		enable_irq(irq);
726 	}
727 	host->is_mcq_intr_enabled = true;
728 }
729 
730 /**
731  * ufs_mtk_setup_clocks - enables/disable clocks
732  * @hba: host controller instance
733  * @on: If true, enable clocks else disable them.
734  * @status: PRE_CHANGE or POST_CHANGE notify
735  *
736  * Return: 0 on success, non-zero on failure.
737  */
738 static int ufs_mtk_setup_clocks(struct ufs_hba *hba, bool on,
739 				enum ufs_notify_change_status status)
740 {
741 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
742 	bool clk_pwr_off = false;
743 	int ret = 0;
744 
745 	/*
746 	 * In case ufs_mtk_init() is not yet done, simply ignore.
747 	 * This ufs_mtk_setup_clocks() shall be called from
748 	 * ufs_mtk_init() after init is done.
749 	 */
750 	if (!host)
751 		return 0;
752 
753 	if (!on && status == PRE_CHANGE) {
754 		if (ufshcd_is_link_off(hba)) {
755 			clk_pwr_off = true;
756 		} else if (ufshcd_is_link_hibern8(hba) ||
757 			 (!ufshcd_can_hibern8_during_gating(hba) &&
758 			 ufshcd_is_auto_hibern8_enabled(hba))) {
759 			/*
760 			 * Gate ref-clk and poweroff mphy if link state is in
761 			 * OFF or Hibern8 by either Auto-Hibern8 or
762 			 * ufshcd_link_state_transition().
763 			 */
764 			ret = ufs_mtk_wait_link_state(hba,
765 						      VS_LINK_HIBERN8,
766 						      15);
767 			if (!ret)
768 				clk_pwr_off = true;
769 		}
770 
771 		if (clk_pwr_off)
772 			ufs_mtk_pwr_ctrl(hba, false);
773 		ufs_mtk_mcq_disable_irq(hba);
774 	} else if (on && status == POST_CHANGE) {
775 		ufs_mtk_pwr_ctrl(hba, true);
776 		ufs_mtk_mcq_enable_irq(hba);
777 	}
778 
779 	return ret;
780 }
781 
782 static void ufs_mtk_get_controller_version(struct ufs_hba *hba)
783 {
784 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
785 	int ret, ver = 0;
786 
787 	if (host->hw_ver.major)
788 		return;
789 
790 	/* Set default (minimum) version anyway */
791 	host->hw_ver.major = 2;
792 
793 	ret = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_LOCALVERINFO), &ver);
794 	if (!ret) {
795 		if (ver >= UFS_UNIPRO_VER_1_8) {
796 			host->hw_ver.major = 3;
797 			/*
798 			 * Fix HCI version for some platforms with
799 			 * incorrect version
800 			 */
801 			if (hba->ufs_version < ufshci_version(3, 0))
802 				hba->ufs_version = ufshci_version(3, 0);
803 		}
804 	}
805 }
806 
807 static u32 ufs_mtk_get_ufs_hci_version(struct ufs_hba *hba)
808 {
809 	return hba->ufs_version;
810 }
811 
812 /**
813  * ufs_mtk_init_clocks - Init mtk driver private clocks
814  *
815  * @hba: per adapter instance
816  */
817 static void ufs_mtk_init_clocks(struct ufs_hba *hba)
818 {
819 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
820 	struct list_head *head = &hba->clk_list_head;
821 	struct ufs_mtk_clk *mclk = &host->mclk;
822 	struct ufs_clk_info *clki, *clki_tmp;
823 
824 	/*
825 	 * Find private clocks and store them in struct ufs_mtk_clk.
826 	 * Remove "ufs_sel_min_src" and "ufs_sel_min_src" from list to avoid
827 	 * being switched on/off in clock gating.
828 	 */
829 	list_for_each_entry_safe(clki, clki_tmp, head, list) {
830 		if (!strcmp(clki->name, "ufs_sel")) {
831 			host->mclk.ufs_sel_clki = clki;
832 		} else if (!strcmp(clki->name, "ufs_sel_max_src")) {
833 			host->mclk.ufs_sel_max_clki = clki;
834 			clk_disable_unprepare(clki->clk);
835 			list_del(&clki->list);
836 		} else if (!strcmp(clki->name, "ufs_sel_min_src")) {
837 			host->mclk.ufs_sel_min_clki = clki;
838 			clk_disable_unprepare(clki->clk);
839 			list_del(&clki->list);
840 		}
841 	}
842 
843 	if (!mclk->ufs_sel_clki || !mclk->ufs_sel_max_clki ||
844 	    !mclk->ufs_sel_min_clki) {
845 		hba->caps &= ~UFSHCD_CAP_CLK_SCALING;
846 		dev_info(hba->dev,
847 			 "%s: Clk-scaling not ready. Feature disabled.",
848 			 __func__);
849 	}
850 }
851 
852 #define MAX_VCC_NAME 30
853 static int ufs_mtk_vreg_fix_vcc(struct ufs_hba *hba)
854 {
855 	struct ufs_vreg_info *info = &hba->vreg_info;
856 	struct device_node *np = hba->dev->of_node;
857 	struct device *dev = hba->dev;
858 	char vcc_name[MAX_VCC_NAME];
859 	struct arm_smccc_res res;
860 	int err, ver;
861 
862 	if (hba->vreg_info.vcc)
863 		return 0;
864 
865 	if (of_property_read_bool(np, "mediatek,ufs-vcc-by-num")) {
866 		ufs_mtk_get_vcc_num(res);
867 		if (res.a1 > UFS_VCC_NONE && res.a1 < UFS_VCC_MAX)
868 			snprintf(vcc_name, MAX_VCC_NAME, "vcc-opt%lu", res.a1);
869 		else
870 			return -ENODEV;
871 	} else if (of_property_read_bool(np, "mediatek,ufs-vcc-by-ver")) {
872 		ver = (hba->dev_info.wspecversion & 0xF00) >> 8;
873 		snprintf(vcc_name, MAX_VCC_NAME, "vcc-ufs%u", ver);
874 	} else {
875 		return 0;
876 	}
877 
878 	err = ufshcd_populate_vreg(dev, vcc_name, &info->vcc, false);
879 	if (err)
880 		return err;
881 
882 	err = ufshcd_get_vreg(dev, info->vcc);
883 	if (err)
884 		return err;
885 
886 	err = regulator_enable(info->vcc->reg);
887 	if (!err) {
888 		info->vcc->enabled = true;
889 		dev_info(dev, "%s: %s enabled\n", __func__, vcc_name);
890 	}
891 
892 	return err;
893 }
894 
895 static void ufs_mtk_vreg_fix_vccqx(struct ufs_hba *hba)
896 {
897 	struct ufs_vreg_info *info = &hba->vreg_info;
898 	struct ufs_vreg **vreg_on, **vreg_off;
899 
900 	if (hba->dev_info.wspecversion >= 0x0300) {
901 		vreg_on = &info->vccq;
902 		vreg_off = &info->vccq2;
903 	} else {
904 		vreg_on = &info->vccq2;
905 		vreg_off = &info->vccq;
906 	}
907 
908 	if (*vreg_on)
909 		(*vreg_on)->always_on = true;
910 
911 	if (*vreg_off) {
912 		regulator_disable((*vreg_off)->reg);
913 		devm_kfree(hba->dev, (*vreg_off)->name);
914 		devm_kfree(hba->dev, *vreg_off);
915 		*vreg_off = NULL;
916 	}
917 }
918 
919 static void ufs_mtk_init_mcq_irq(struct ufs_hba *hba)
920 {
921 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
922 	struct platform_device *pdev;
923 	int i;
924 	int irq;
925 
926 	host->mcq_nr_intr = UFSHCD_MAX_Q_NR;
927 	pdev = container_of(hba->dev, struct platform_device, dev);
928 
929 	if (host->caps & UFS_MTK_CAP_DISABLE_MCQ)
930 		goto failed;
931 
932 	for (i = 0; i < host->mcq_nr_intr; i++) {
933 		/* irq index 0 is legacy irq, sq/cq irq start from index 1 */
934 		irq = platform_get_irq(pdev, i + 1);
935 		if (irq < 0) {
936 			host->mcq_intr_info[i].irq = MTK_MCQ_INVALID_IRQ;
937 			goto failed;
938 		}
939 		host->mcq_intr_info[i].hba = hba;
940 		host->mcq_intr_info[i].irq = irq;
941 		dev_info(hba->dev, "get platform mcq irq: %d, %d\n", i, irq);
942 	}
943 
944 	return;
945 failed:
946        /* invalidate irq info */
947 	for (i = 0; i < host->mcq_nr_intr; i++)
948 		host->mcq_intr_info[i].irq = MTK_MCQ_INVALID_IRQ;
949 
950 	host->mcq_nr_intr = 0;
951 }
952 
953 /**
954  * ufs_mtk_init - find other essential mmio bases
955  * @hba: host controller instance
956  *
957  * Binds PHY with controller and powers up PHY enabling clocks
958  * and regulators.
959  *
960  * Return: -EPROBE_DEFER if binding fails, returns negative error
961  * on phy power up failure and returns zero on success.
962  */
963 static int ufs_mtk_init(struct ufs_hba *hba)
964 {
965 	const struct of_device_id *id;
966 	struct device *dev = hba->dev;
967 	struct ufs_mtk_host *host;
968 	struct Scsi_Host *shost = hba->host;
969 	int err = 0;
970 	struct arm_smccc_res res;
971 
972 	host = devm_kzalloc(dev, sizeof(*host), GFP_KERNEL);
973 	if (!host) {
974 		err = -ENOMEM;
975 		dev_info(dev, "%s: no memory for mtk ufs host\n", __func__);
976 		goto out;
977 	}
978 
979 	host->hba = hba;
980 	ufshcd_set_variant(hba, host);
981 
982 	id = of_match_device(ufs_mtk_of_match, dev);
983 	if (!id) {
984 		err = -EINVAL;
985 		goto out;
986 	}
987 
988 	/* Initialize host capability */
989 	ufs_mtk_init_host_caps(hba);
990 
991 	ufs_mtk_init_mcq_irq(hba);
992 
993 	err = ufs_mtk_bind_mphy(hba);
994 	if (err)
995 		goto out_variant_clear;
996 
997 	ufs_mtk_init_reset(hba);
998 
999 	/* backup mphy setting if mphy can reset */
1000 	if (host->mphy_reset)
1001 		ufs_mtk_mphy_ctrl(UFS_MPHY_BACKUP, res);
1002 
1003 	/* Enable runtime autosuspend */
1004 	hba->caps |= UFSHCD_CAP_RPM_AUTOSUSPEND;
1005 
1006 	/* Enable clock-gating */
1007 	hba->caps |= UFSHCD_CAP_CLK_GATING;
1008 
1009 	/* Enable inline encryption */
1010 	hba->caps |= UFSHCD_CAP_CRYPTO;
1011 
1012 	/* Enable WriteBooster */
1013 	hba->caps |= UFSHCD_CAP_WB_EN;
1014 
1015 	/* Enable clk scaling*/
1016 	hba->caps |= UFSHCD_CAP_CLK_SCALING;
1017 
1018 	/* Set runtime pm delay to replace default */
1019 	shost->rpm_autosuspend_delay = MTK_RPM_AUTOSUSPEND_DELAY_MS;
1020 
1021 	hba->quirks |= UFSHCI_QUIRK_SKIP_MANUAL_WB_FLUSH_CTRL;
1022 	hba->quirks |= UFSHCD_QUIRK_MCQ_BROKEN_INTR;
1023 	hba->quirks |= UFSHCD_QUIRK_MCQ_BROKEN_RTC;
1024 	hba->vps->wb_flush_threshold = UFS_WB_BUF_REMAIN_PERCENT(80);
1025 
1026 	if (host->caps & UFS_MTK_CAP_DISABLE_AH8)
1027 		hba->caps |= UFSHCD_CAP_HIBERN8_WITH_CLK_GATING;
1028 
1029 	ufs_mtk_init_clocks(hba);
1030 
1031 	/*
1032 	 * ufshcd_vops_init() is invoked after
1033 	 * ufshcd_setup_clock(true) in ufshcd_hba_init() thus
1034 	 * phy clock setup is skipped.
1035 	 *
1036 	 * Enable phy clocks specifically here.
1037 	 */
1038 	ufs_mtk_mphy_power_on(hba, true);
1039 
1040 	if (ufs_mtk_is_rtff_mtcmos(hba)) {
1041 		/* First Restore here, to avoid backup unexpected value */
1042 		ufs_mtk_mtcmos_ctrl(false, res);
1043 
1044 		/* Power on to init */
1045 		ufs_mtk_mtcmos_ctrl(true, res);
1046 	}
1047 
1048 	ufs_mtk_setup_clocks(hba, true, POST_CHANGE);
1049 
1050 	host->ip_ver = ufshcd_readl(hba, REG_UFS_MTK_IP_VER);
1051 
1052 	goto out;
1053 
1054 out_variant_clear:
1055 	ufshcd_set_variant(hba, NULL);
1056 out:
1057 	return err;
1058 }
1059 
1060 static bool ufs_mtk_pmc_via_fastauto(struct ufs_hba *hba,
1061 				     struct ufs_pa_layer_attr *dev_req_params)
1062 {
1063 	if (!ufs_mtk_is_pmc_via_fastauto(hba))
1064 		return false;
1065 
1066 	if (dev_req_params->hs_rate == hba->pwr_info.hs_rate)
1067 		return false;
1068 
1069 	if (dev_req_params->pwr_tx != FAST_MODE &&
1070 	    dev_req_params->gear_tx < UFS_HS_G4)
1071 		return false;
1072 
1073 	if (dev_req_params->pwr_rx != FAST_MODE &&
1074 	    dev_req_params->gear_rx < UFS_HS_G4)
1075 		return false;
1076 
1077 	return true;
1078 }
1079 
1080 static int ufs_mtk_pre_pwr_change(struct ufs_hba *hba,
1081 				  struct ufs_pa_layer_attr *dev_max_params,
1082 				  struct ufs_pa_layer_attr *dev_req_params)
1083 {
1084 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
1085 	struct ufs_host_params host_params;
1086 	int ret;
1087 
1088 	ufshcd_init_host_params(&host_params);
1089 	host_params.hs_rx_gear = UFS_HS_G5;
1090 	host_params.hs_tx_gear = UFS_HS_G5;
1091 
1092 	ret = ufshcd_negotiate_pwr_params(&host_params, dev_max_params, dev_req_params);
1093 	if (ret) {
1094 		pr_info("%s: failed to determine capabilities\n",
1095 			__func__);
1096 	}
1097 
1098 	if (ufs_mtk_pmc_via_fastauto(hba, dev_req_params)) {
1099 		ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXTERMINATION), true);
1100 		ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXGEAR), UFS_HS_G1);
1101 
1102 		ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXTERMINATION), true);
1103 		ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXGEAR), UFS_HS_G1);
1104 
1105 		ufshcd_dme_set(hba, UIC_ARG_MIB(PA_ACTIVETXDATALANES),
1106 			       dev_req_params->lane_tx);
1107 		ufshcd_dme_set(hba, UIC_ARG_MIB(PA_ACTIVERXDATALANES),
1108 			       dev_req_params->lane_rx);
1109 		ufshcd_dme_set(hba, UIC_ARG_MIB(PA_HSSERIES),
1110 			       dev_req_params->hs_rate);
1111 
1112 		ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXHSADAPTTYPE),
1113 			       PA_NO_ADAPT);
1114 
1115 		ret = ufshcd_uic_change_pwr_mode(hba,
1116 					FASTAUTO_MODE << 4 | FASTAUTO_MODE);
1117 
1118 		if (ret) {
1119 			dev_err(hba->dev, "%s: HSG1B FASTAUTO failed ret=%d\n",
1120 				__func__, ret);
1121 		}
1122 	}
1123 
1124 	if (host->hw_ver.major >= 3) {
1125 		ret = ufshcd_dme_configure_adapt(hba,
1126 					   dev_req_params->gear_tx,
1127 					   PA_INITIAL_ADAPT);
1128 	}
1129 
1130 	return ret;
1131 }
1132 
1133 static int ufs_mtk_pwr_change_notify(struct ufs_hba *hba,
1134 				     enum ufs_notify_change_status stage,
1135 				     struct ufs_pa_layer_attr *dev_max_params,
1136 				     struct ufs_pa_layer_attr *dev_req_params)
1137 {
1138 	int ret = 0;
1139 
1140 	switch (stage) {
1141 	case PRE_CHANGE:
1142 		ret = ufs_mtk_pre_pwr_change(hba, dev_max_params,
1143 					     dev_req_params);
1144 		break;
1145 	case POST_CHANGE:
1146 		break;
1147 	default:
1148 		ret = -EINVAL;
1149 		break;
1150 	}
1151 
1152 	return ret;
1153 }
1154 
1155 static int ufs_mtk_unipro_set_lpm(struct ufs_hba *hba, bool lpm)
1156 {
1157 	int ret;
1158 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
1159 
1160 	ret = ufshcd_dme_set(hba,
1161 			     UIC_ARG_MIB_SEL(VS_UNIPROPOWERDOWNCONTROL, 0),
1162 			     lpm ? 1 : 0);
1163 	if (!ret || !lpm) {
1164 		/*
1165 		 * Forcibly set as non-LPM mode if UIC commands is failed
1166 		 * to use default hba_enable_delay_us value for re-enabling
1167 		 * the host.
1168 		 */
1169 		host->unipro_lpm = lpm;
1170 	}
1171 
1172 	return ret;
1173 }
1174 
1175 static int ufs_mtk_pre_link(struct ufs_hba *hba)
1176 {
1177 	int ret;
1178 	u32 tmp;
1179 
1180 	ufs_mtk_get_controller_version(hba);
1181 
1182 	ret = ufs_mtk_unipro_set_lpm(hba, false);
1183 	if (ret)
1184 		return ret;
1185 
1186 	/*
1187 	 * Setting PA_Local_TX_LCC_Enable to 0 before link startup
1188 	 * to make sure that both host and device TX LCC are disabled
1189 	 * once link startup is completed.
1190 	 */
1191 	ret = ufshcd_disable_host_tx_lcc(hba);
1192 	if (ret)
1193 		return ret;
1194 
1195 	/* disable deep stall */
1196 	ret = ufshcd_dme_get(hba, UIC_ARG_MIB(VS_SAVEPOWERCONTROL), &tmp);
1197 	if (ret)
1198 		return ret;
1199 
1200 	tmp &= ~(1 << 6);
1201 
1202 	ret = ufshcd_dme_set(hba, UIC_ARG_MIB(VS_SAVEPOWERCONTROL), tmp);
1203 
1204 	return ret;
1205 }
1206 
1207 static void ufs_mtk_setup_clk_gating(struct ufs_hba *hba)
1208 {
1209 	u32 ah_ms;
1210 
1211 	if (ufshcd_is_clkgating_allowed(hba)) {
1212 		if (ufshcd_is_auto_hibern8_supported(hba) && hba->ahit)
1213 			ah_ms = FIELD_GET(UFSHCI_AHIBERN8_TIMER_MASK,
1214 					  hba->ahit);
1215 		else
1216 			ah_ms = 10;
1217 		ufshcd_clkgate_delay_set(hba->dev, ah_ms + 5);
1218 	}
1219 }
1220 
1221 static void ufs_mtk_post_link(struct ufs_hba *hba)
1222 {
1223 	/* enable unipro clock gating feature */
1224 	ufs_mtk_cfg_unipro_cg(hba, true);
1225 
1226 	/* will be configured during probe hba */
1227 	if (ufshcd_is_auto_hibern8_supported(hba))
1228 		hba->ahit = FIELD_PREP(UFSHCI_AHIBERN8_TIMER_MASK, 10) |
1229 			FIELD_PREP(UFSHCI_AHIBERN8_SCALE_MASK, 3);
1230 
1231 	ufs_mtk_setup_clk_gating(hba);
1232 }
1233 
1234 static int ufs_mtk_link_startup_notify(struct ufs_hba *hba,
1235 				       enum ufs_notify_change_status stage)
1236 {
1237 	int ret = 0;
1238 
1239 	switch (stage) {
1240 	case PRE_CHANGE:
1241 		ret = ufs_mtk_pre_link(hba);
1242 		break;
1243 	case POST_CHANGE:
1244 		ufs_mtk_post_link(hba);
1245 		break;
1246 	default:
1247 		ret = -EINVAL;
1248 		break;
1249 	}
1250 
1251 	return ret;
1252 }
1253 
1254 static int ufs_mtk_device_reset(struct ufs_hba *hba)
1255 {
1256 	struct arm_smccc_res res;
1257 
1258 	/* disable hba before device reset */
1259 	ufshcd_hba_stop(hba);
1260 
1261 	ufs_mtk_device_reset_ctrl(0, res);
1262 
1263 	/*
1264 	 * The reset signal is active low. UFS devices shall detect
1265 	 * more than or equal to 1us of positive or negative RST_n
1266 	 * pulse width.
1267 	 *
1268 	 * To be on safe side, keep the reset low for at least 10us.
1269 	 */
1270 	usleep_range(10, 15);
1271 
1272 	ufs_mtk_device_reset_ctrl(1, res);
1273 
1274 	/* Some devices may need time to respond to rst_n */
1275 	usleep_range(10000, 15000);
1276 
1277 	dev_info(hba->dev, "device reset done\n");
1278 
1279 	return 0;
1280 }
1281 
1282 static int ufs_mtk_link_set_hpm(struct ufs_hba *hba)
1283 {
1284 	int err;
1285 
1286 	err = ufshcd_hba_enable(hba);
1287 	if (err)
1288 		return err;
1289 
1290 	err = ufs_mtk_unipro_set_lpm(hba, false);
1291 	if (err)
1292 		return err;
1293 
1294 	err = ufshcd_uic_hibern8_exit(hba);
1295 	if (err)
1296 		return err;
1297 
1298 	/* Check link state to make sure exit h8 success */
1299 	ufs_mtk_wait_idle_state(hba, 5);
1300 	err = ufs_mtk_wait_link_state(hba, VS_LINK_UP, 100);
1301 	if (err) {
1302 		dev_warn(hba->dev, "exit h8 state fail, err=%d\n", err);
1303 		return err;
1304 	}
1305 	ufshcd_set_link_active(hba);
1306 
1307 	err = ufshcd_make_hba_operational(hba);
1308 	if (err)
1309 		return err;
1310 
1311 	if (is_mcq_enabled(hba)) {
1312 		ufs_mtk_config_mcq(hba, false);
1313 		ufshcd_mcq_make_queues_operational(hba);
1314 		ufshcd_mcq_config_mac(hba, hba->nutrs);
1315 		ufshcd_mcq_enable(hba);
1316 	}
1317 
1318 	return 0;
1319 }
1320 
1321 static int ufs_mtk_link_set_lpm(struct ufs_hba *hba)
1322 {
1323 	int err;
1324 
1325 	/* Disable reset confirm feature by UniPro */
1326 	ufshcd_writel(hba,
1327 		      (ufshcd_readl(hba, REG_UFS_XOUFS_CTRL) & ~0x100),
1328 		      REG_UFS_XOUFS_CTRL);
1329 
1330 	err = ufs_mtk_unipro_set_lpm(hba, true);
1331 	if (err) {
1332 		/* Resume UniPro state for following error recovery */
1333 		ufs_mtk_unipro_set_lpm(hba, false);
1334 		return err;
1335 	}
1336 
1337 	return 0;
1338 }
1339 
1340 static void ufs_mtk_vccqx_set_lpm(struct ufs_hba *hba, bool lpm)
1341 {
1342 	struct ufs_vreg *vccqx = NULL;
1343 
1344 	if (hba->vreg_info.vccq)
1345 		vccqx = hba->vreg_info.vccq;
1346 	else
1347 		vccqx = hba->vreg_info.vccq2;
1348 
1349 	regulator_set_mode(vccqx->reg,
1350 			   lpm ? REGULATOR_MODE_IDLE : REGULATOR_MODE_NORMAL);
1351 }
1352 
1353 static void ufs_mtk_vsx_set_lpm(struct ufs_hba *hba, bool lpm)
1354 {
1355 	struct arm_smccc_res res;
1356 
1357 	ufs_mtk_device_pwr_ctrl(!lpm,
1358 				(unsigned long)hba->dev_info.wspecversion,
1359 				res);
1360 }
1361 
1362 static void ufs_mtk_dev_vreg_set_lpm(struct ufs_hba *hba, bool lpm)
1363 {
1364 	bool skip_vccqx = false;
1365 
1366 	/* Prevent entering LPM when device is still active */
1367 	if (lpm && ufshcd_is_ufs_dev_active(hba))
1368 		return;
1369 
1370 	/* Skip vccqx lpm control and control vsx only */
1371 	if (!hba->vreg_info.vccq && !hba->vreg_info.vccq2)
1372 		skip_vccqx = true;
1373 
1374 	/* VCC is always-on, control vsx only */
1375 	if (!hba->vreg_info.vcc)
1376 		skip_vccqx = true;
1377 
1378 	/* Broken vcc keep vcc always on, most case control vsx only */
1379 	if (lpm && hba->vreg_info.vcc && hba->vreg_info.vcc->enabled) {
1380 		/* Some device vccqx/vsx can enter lpm */
1381 		if (ufs_mtk_is_allow_vccqx_lpm(hba))
1382 			skip_vccqx = false;
1383 		else /* control vsx only */
1384 			skip_vccqx = true;
1385 	}
1386 
1387 	if (lpm) {
1388 		if (!skip_vccqx)
1389 			ufs_mtk_vccqx_set_lpm(hba, lpm);
1390 		ufs_mtk_vsx_set_lpm(hba, lpm);
1391 	} else {
1392 		ufs_mtk_vsx_set_lpm(hba, lpm);
1393 		if (!skip_vccqx)
1394 			ufs_mtk_vccqx_set_lpm(hba, lpm);
1395 	}
1396 }
1397 
1398 static void ufs_mtk_auto_hibern8_disable(struct ufs_hba *hba)
1399 {
1400 	int ret;
1401 
1402 	/* disable auto-hibern8 */
1403 	ufshcd_writel(hba, 0, REG_AUTO_HIBERNATE_IDLE_TIMER);
1404 
1405 	/* wait host return to idle state when auto-hibern8 off */
1406 	ufs_mtk_wait_idle_state(hba, 5);
1407 
1408 	ret = ufs_mtk_wait_link_state(hba, VS_LINK_UP, 100);
1409 	if (ret)
1410 		dev_warn(hba->dev, "exit h8 state fail, ret=%d\n", ret);
1411 }
1412 
1413 static int ufs_mtk_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op,
1414 	enum ufs_notify_change_status status)
1415 {
1416 	int err;
1417 	struct arm_smccc_res res;
1418 
1419 	if (status == PRE_CHANGE) {
1420 		if (ufshcd_is_auto_hibern8_supported(hba))
1421 			ufs_mtk_auto_hibern8_disable(hba);
1422 		return 0;
1423 	}
1424 
1425 	if (ufshcd_is_link_hibern8(hba)) {
1426 		err = ufs_mtk_link_set_lpm(hba);
1427 		if (err)
1428 			goto fail;
1429 	}
1430 
1431 	if (!ufshcd_is_link_active(hba)) {
1432 		/*
1433 		 * Make sure no error will be returned to prevent
1434 		 * ufshcd_suspend() re-enabling regulators while vreg is still
1435 		 * in low-power mode.
1436 		 */
1437 		err = ufs_mtk_mphy_power_on(hba, false);
1438 		if (err)
1439 			goto fail;
1440 	}
1441 
1442 	if (ufshcd_is_link_off(hba))
1443 		ufs_mtk_device_reset_ctrl(0, res);
1444 
1445 	ufs_mtk_sram_pwr_ctrl(false, res);
1446 
1447 	return 0;
1448 fail:
1449 	/*
1450 	 * Set link as off state enforcedly to trigger
1451 	 * ufshcd_host_reset_and_restore() in ufshcd_suspend()
1452 	 * for completed host reset.
1453 	 */
1454 	ufshcd_set_link_off(hba);
1455 	return -EAGAIN;
1456 }
1457 
1458 static int ufs_mtk_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
1459 {
1460 	int err;
1461 	struct arm_smccc_res res;
1462 
1463 	if (hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL)
1464 		ufs_mtk_dev_vreg_set_lpm(hba, false);
1465 
1466 	ufs_mtk_sram_pwr_ctrl(true, res);
1467 
1468 	err = ufs_mtk_mphy_power_on(hba, true);
1469 	if (err)
1470 		goto fail;
1471 
1472 	if (ufshcd_is_link_hibern8(hba)) {
1473 		err = ufs_mtk_link_set_hpm(hba);
1474 		if (err)
1475 			goto fail;
1476 	}
1477 
1478 	return 0;
1479 fail:
1480 	return ufshcd_link_recovery(hba);
1481 }
1482 
1483 static void ufs_mtk_dbg_register_dump(struct ufs_hba *hba)
1484 {
1485 	/* Dump ufshci register 0x140 ~ 0x14C */
1486 	ufshcd_dump_regs(hba, REG_UFS_XOUFS_CTRL, 0x10,
1487 			 "XOUFS Ctrl (0x140): ");
1488 
1489 	ufshcd_dump_regs(hba, REG_UFS_EXTREG, 0x4, "Ext Reg ");
1490 
1491 	/* Dump ufshci register 0x2200 ~ 0x22AC */
1492 	ufshcd_dump_regs(hba, REG_UFS_MPHYCTRL,
1493 			 REG_UFS_REJECT_MON - REG_UFS_MPHYCTRL + 4,
1494 			 "MPHY Ctrl (0x2200): ");
1495 
1496 	/* Direct debugging information to REG_MTK_PROBE */
1497 	ufs_mtk_dbg_sel(hba);
1498 	ufshcd_dump_regs(hba, REG_UFS_PROBE, 0x4, "Debug Probe ");
1499 }
1500 
1501 static int ufs_mtk_apply_dev_quirks(struct ufs_hba *hba)
1502 {
1503 	struct ufs_dev_info *dev_info = &hba->dev_info;
1504 	u16 mid = dev_info->wmanufacturerid;
1505 
1506 	if (mid == UFS_VENDOR_SAMSUNG) {
1507 		ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TACTIVATE), 6);
1508 		ufshcd_dme_set(hba, UIC_ARG_MIB(PA_HIBERN8TIME), 10);
1509 	} else if (mid == UFS_VENDOR_MICRON) {
1510 		/* Only for the host which have TX skew issue */
1511 		if (ufs_mtk_is_tx_skew_fix(hba) &&
1512 			(STR_PRFX_EQUAL("MT128GBCAV2U31", dev_info->model) ||
1513 			STR_PRFX_EQUAL("MT256GBCAV4U31", dev_info->model) ||
1514 			STR_PRFX_EQUAL("MT512GBCAV8U31", dev_info->model) ||
1515 			STR_PRFX_EQUAL("MT256GBEAX4U40", dev_info->model) ||
1516 			STR_PRFX_EQUAL("MT512GAYAX4U40", dev_info->model) ||
1517 			STR_PRFX_EQUAL("MT001TAYAX8U40", dev_info->model))) {
1518 			ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TACTIVATE), 8);
1519 		}
1520 	}
1521 
1522 	/*
1523 	 * Decide waiting time before gating reference clock and
1524 	 * after ungating reference clock according to vendors'
1525 	 * requirements.
1526 	 */
1527 	if (mid == UFS_VENDOR_SAMSUNG)
1528 		ufs_mtk_setup_ref_clk_wait_us(hba, 1);
1529 	else if (mid == UFS_VENDOR_SKHYNIX)
1530 		ufs_mtk_setup_ref_clk_wait_us(hba, 30);
1531 	else if (mid == UFS_VENDOR_TOSHIBA)
1532 		ufs_mtk_setup_ref_clk_wait_us(hba, 100);
1533 	else
1534 		ufs_mtk_setup_ref_clk_wait_us(hba,
1535 					      REFCLK_DEFAULT_WAIT_US);
1536 	return 0;
1537 }
1538 
1539 static void ufs_mtk_fixup_dev_quirks(struct ufs_hba *hba)
1540 {
1541 	ufshcd_fixup_dev_quirks(hba, ufs_mtk_dev_fixups);
1542 
1543 	if (ufs_mtk_is_broken_vcc(hba) && hba->vreg_info.vcc &&
1544 	    (hba->dev_quirks & UFS_DEVICE_QUIRK_DELAY_AFTER_LPM)) {
1545 		hba->vreg_info.vcc->always_on = true;
1546 		/*
1547 		 * VCC will be kept always-on thus we don't
1548 		 * need any delay during regulator operations
1549 		 */
1550 		hba->dev_quirks &= ~(UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM |
1551 			UFS_DEVICE_QUIRK_DELAY_AFTER_LPM);
1552 	}
1553 
1554 	ufs_mtk_vreg_fix_vcc(hba);
1555 	ufs_mtk_vreg_fix_vccqx(hba);
1556 }
1557 
1558 static void ufs_mtk_event_notify(struct ufs_hba *hba,
1559 				 enum ufs_event_type evt, void *data)
1560 {
1561 	unsigned int val = *(u32 *)data;
1562 	unsigned long reg;
1563 	u8 bit;
1564 
1565 	trace_ufs_mtk_event(evt, val);
1566 
1567 	/* Print details of UIC Errors */
1568 	if (evt <= UFS_EVT_DME_ERR) {
1569 		dev_info(hba->dev,
1570 			 "Host UIC Error Code (%s): %08x\n",
1571 			 ufs_uic_err_str[evt], val);
1572 		reg = val;
1573 	}
1574 
1575 	if (evt == UFS_EVT_PA_ERR) {
1576 		for_each_set_bit(bit, &reg, ARRAY_SIZE(ufs_uic_pa_err_str))
1577 			dev_info(hba->dev, "%s\n", ufs_uic_pa_err_str[bit]);
1578 	}
1579 
1580 	if (evt == UFS_EVT_DL_ERR) {
1581 		for_each_set_bit(bit, &reg, ARRAY_SIZE(ufs_uic_dl_err_str))
1582 			dev_info(hba->dev, "%s\n", ufs_uic_dl_err_str[bit]);
1583 	}
1584 }
1585 
1586 static void ufs_mtk_config_scaling_param(struct ufs_hba *hba,
1587 				struct devfreq_dev_profile *profile,
1588 				struct devfreq_simple_ondemand_data *data)
1589 {
1590 	/* Customize min gear in clk scaling */
1591 	hba->clk_scaling.min_gear = UFS_HS_G4;
1592 
1593 	hba->vps->devfreq_profile.polling_ms = 200;
1594 	hba->vps->ondemand_data.upthreshold = 50;
1595 	hba->vps->ondemand_data.downdifferential = 20;
1596 }
1597 
1598 /**
1599  * ufs_mtk_clk_scale - Internal clk scaling operation
1600  *
1601  * MTK platform supports clk scaling by switching parent of ufs_sel(mux).
1602  * The ufs_sel downstream to ufs_ck which feeds directly to UFS hardware.
1603  * Max and min clocks rate of ufs_sel defined in dts should match rate of
1604  * "ufs_sel_max_src" and "ufs_sel_min_src" respectively.
1605  * This prevent changing rate of pll clock that is shared between modules.
1606  *
1607  * @hba: per adapter instance
1608  * @scale_up: True for scaling up and false for scaling down
1609  */
1610 static void ufs_mtk_clk_scale(struct ufs_hba *hba, bool scale_up)
1611 {
1612 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
1613 	struct ufs_mtk_clk *mclk = &host->mclk;
1614 	struct ufs_clk_info *clki = mclk->ufs_sel_clki;
1615 	int ret = 0;
1616 
1617 	ret = clk_prepare_enable(clki->clk);
1618 	if (ret) {
1619 		dev_info(hba->dev,
1620 			 "clk_prepare_enable() fail, ret: %d\n", ret);
1621 		return;
1622 	}
1623 
1624 	if (scale_up) {
1625 		ret = clk_set_parent(clki->clk, mclk->ufs_sel_max_clki->clk);
1626 		clki->curr_freq = clki->max_freq;
1627 	} else {
1628 		ret = clk_set_parent(clki->clk, mclk->ufs_sel_min_clki->clk);
1629 		clki->curr_freq = clki->min_freq;
1630 	}
1631 
1632 	if (ret) {
1633 		dev_info(hba->dev,
1634 			 "Failed to set ufs_sel_clki, ret: %d\n", ret);
1635 	}
1636 
1637 	clk_disable_unprepare(clki->clk);
1638 
1639 	trace_ufs_mtk_clk_scale(clki->name, scale_up, clk_get_rate(clki->clk));
1640 }
1641 
1642 static int ufs_mtk_clk_scale_notify(struct ufs_hba *hba, bool scale_up,
1643 				    enum ufs_notify_change_status status)
1644 {
1645 	if (!ufshcd_is_clkscaling_supported(hba))
1646 		return 0;
1647 
1648 	if (status == PRE_CHANGE) {
1649 		/* Switch parent before clk_set_rate() */
1650 		ufs_mtk_clk_scale(hba, scale_up);
1651 	} else {
1652 		/* Request interrupt latency QoS accordingly */
1653 		ufs_mtk_scale_perf(hba, scale_up);
1654 	}
1655 
1656 	return 0;
1657 }
1658 
1659 static int ufs_mtk_get_hba_mac(struct ufs_hba *hba)
1660 {
1661 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
1662 
1663 	/* MCQ operation not permitted */
1664 	if (host->caps & UFS_MTK_CAP_DISABLE_MCQ)
1665 		return -EPERM;
1666 
1667 	return MAX_SUPP_MAC;
1668 }
1669 
1670 static int ufs_mtk_op_runtime_config(struct ufs_hba *hba)
1671 {
1672 	struct ufshcd_mcq_opr_info_t *opr;
1673 	int i;
1674 
1675 	hba->mcq_opr[OPR_SQD].offset = REG_UFS_MTK_SQD;
1676 	hba->mcq_opr[OPR_SQIS].offset = REG_UFS_MTK_SQIS;
1677 	hba->mcq_opr[OPR_CQD].offset = REG_UFS_MTK_CQD;
1678 	hba->mcq_opr[OPR_CQIS].offset = REG_UFS_MTK_CQIS;
1679 
1680 	for (i = 0; i < OPR_MAX; i++) {
1681 		opr = &hba->mcq_opr[i];
1682 		opr->stride = REG_UFS_MCQ_STRIDE;
1683 		opr->base = hba->mmio_base + opr->offset;
1684 	}
1685 
1686 	return 0;
1687 }
1688 
1689 static int ufs_mtk_mcq_config_resource(struct ufs_hba *hba)
1690 {
1691 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
1692 
1693 	/* fail mcq initialization if interrupt is not filled properly */
1694 	if (!host->mcq_nr_intr) {
1695 		dev_info(hba->dev, "IRQs not ready. MCQ disabled.");
1696 		return -EINVAL;
1697 	}
1698 
1699 	hba->mcq_base = hba->mmio_base + MCQ_QUEUE_OFFSET(hba->mcq_capabilities);
1700 	return 0;
1701 }
1702 
1703 static irqreturn_t ufs_mtk_mcq_intr(int irq, void *__intr_info)
1704 {
1705 	struct ufs_mtk_mcq_intr_info *mcq_intr_info = __intr_info;
1706 	struct ufs_hba *hba = mcq_intr_info->hba;
1707 	struct ufs_hw_queue *hwq;
1708 	u32 events;
1709 	int qid = mcq_intr_info->qid;
1710 
1711 	hwq = &hba->uhq[qid];
1712 
1713 	events = ufshcd_mcq_read_cqis(hba, qid);
1714 	if (events)
1715 		ufshcd_mcq_write_cqis(hba, events, qid);
1716 
1717 	if (events & UFSHCD_MCQ_CQIS_TAIL_ENT_PUSH_STS)
1718 		ufshcd_mcq_poll_cqe_lock(hba, hwq);
1719 
1720 	return IRQ_HANDLED;
1721 }
1722 
1723 static int ufs_mtk_config_mcq_irq(struct ufs_hba *hba)
1724 {
1725 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
1726 	u32 irq, i;
1727 	int ret;
1728 
1729 	for (i = 0; i < host->mcq_nr_intr; i++) {
1730 		irq = host->mcq_intr_info[i].irq;
1731 		if (irq == MTK_MCQ_INVALID_IRQ) {
1732 			dev_err(hba->dev, "invalid irq. %d\n", i);
1733 			return -ENOPARAM;
1734 		}
1735 
1736 		host->mcq_intr_info[i].qid = i;
1737 		ret = devm_request_irq(hba->dev, irq, ufs_mtk_mcq_intr, 0, UFSHCD,
1738 				       &host->mcq_intr_info[i]);
1739 
1740 		dev_dbg(hba->dev, "request irq %d intr %s\n", irq, ret ? "failed" : "");
1741 
1742 		if (ret) {
1743 			dev_err(hba->dev, "Cannot request irq %d\n", ret);
1744 			return ret;
1745 		}
1746 	}
1747 
1748 	return 0;
1749 }
1750 
1751 static int ufs_mtk_config_mcq(struct ufs_hba *hba, bool irq)
1752 {
1753 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
1754 	int ret = 0;
1755 
1756 	if (!host->mcq_set_intr) {
1757 		/* Disable irq option register */
1758 		ufshcd_rmwl(hba, MCQ_INTR_EN_MSK, 0, REG_UFS_MMIO_OPT_CTRL_0);
1759 
1760 		if (irq) {
1761 			ret = ufs_mtk_config_mcq_irq(hba);
1762 			if (ret)
1763 				return ret;
1764 		}
1765 
1766 		host->mcq_set_intr = true;
1767 	}
1768 
1769 	ufshcd_rmwl(hba, MCQ_AH8, MCQ_AH8, REG_UFS_MMIO_OPT_CTRL_0);
1770 	ufshcd_rmwl(hba, MCQ_INTR_EN_MSK, MCQ_MULTI_INTR_EN, REG_UFS_MMIO_OPT_CTRL_0);
1771 
1772 	return 0;
1773 }
1774 
1775 static int ufs_mtk_config_esi(struct ufs_hba *hba)
1776 {
1777 	return ufs_mtk_config_mcq(hba, true);
1778 }
1779 
1780 /*
1781  * struct ufs_hba_mtk_vops - UFS MTK specific variant operations
1782  *
1783  * The variant operations configure the necessary controller and PHY
1784  * handshake during initialization.
1785  */
1786 static const struct ufs_hba_variant_ops ufs_hba_mtk_vops = {
1787 	.name                = "mediatek.ufshci",
1788 	.init                = ufs_mtk_init,
1789 	.get_ufs_hci_version = ufs_mtk_get_ufs_hci_version,
1790 	.setup_clocks        = ufs_mtk_setup_clocks,
1791 	.hce_enable_notify   = ufs_mtk_hce_enable_notify,
1792 	.link_startup_notify = ufs_mtk_link_startup_notify,
1793 	.pwr_change_notify   = ufs_mtk_pwr_change_notify,
1794 	.apply_dev_quirks    = ufs_mtk_apply_dev_quirks,
1795 	.fixup_dev_quirks    = ufs_mtk_fixup_dev_quirks,
1796 	.suspend             = ufs_mtk_suspend,
1797 	.resume              = ufs_mtk_resume,
1798 	.dbg_register_dump   = ufs_mtk_dbg_register_dump,
1799 	.device_reset        = ufs_mtk_device_reset,
1800 	.event_notify        = ufs_mtk_event_notify,
1801 	.config_scaling_param = ufs_mtk_config_scaling_param,
1802 	.clk_scale_notify    = ufs_mtk_clk_scale_notify,
1803 	/* mcq vops */
1804 	.get_hba_mac         = ufs_mtk_get_hba_mac,
1805 	.op_runtime_config   = ufs_mtk_op_runtime_config,
1806 	.mcq_config_resource = ufs_mtk_mcq_config_resource,
1807 	.config_esi          = ufs_mtk_config_esi,
1808 };
1809 
1810 /**
1811  * ufs_mtk_probe - probe routine of the driver
1812  * @pdev: pointer to Platform device handle
1813  *
1814  * Return: zero for success and non-zero for failure.
1815  */
1816 static int ufs_mtk_probe(struct platform_device *pdev)
1817 {
1818 	int err;
1819 	struct device *dev = &pdev->dev;
1820 	struct device_node *reset_node;
1821 	struct platform_device *reset_pdev;
1822 	struct device_link *link;
1823 
1824 	reset_node = of_find_compatible_node(NULL, NULL,
1825 					     "ti,syscon-reset");
1826 	if (!reset_node) {
1827 		dev_notice(dev, "find ti,syscon-reset fail\n");
1828 		goto skip_reset;
1829 	}
1830 	reset_pdev = of_find_device_by_node(reset_node);
1831 	if (!reset_pdev) {
1832 		dev_notice(dev, "find reset_pdev fail\n");
1833 		goto skip_reset;
1834 	}
1835 	link = device_link_add(dev, &reset_pdev->dev,
1836 		DL_FLAG_AUTOPROBE_CONSUMER);
1837 	put_device(&reset_pdev->dev);
1838 	if (!link) {
1839 		dev_notice(dev, "add reset device_link fail\n");
1840 		goto skip_reset;
1841 	}
1842 	/* supplier is not probed */
1843 	if (link->status == DL_STATE_DORMANT) {
1844 		err = -EPROBE_DEFER;
1845 		goto out;
1846 	}
1847 
1848 skip_reset:
1849 	/* perform generic probe */
1850 	err = ufshcd_pltfrm_init(pdev, &ufs_hba_mtk_vops);
1851 
1852 out:
1853 	if (err)
1854 		dev_err(dev, "probe failed %d\n", err);
1855 
1856 	of_node_put(reset_node);
1857 	return err;
1858 }
1859 
1860 /**
1861  * ufs_mtk_remove - set driver_data of the device to NULL
1862  * @pdev: pointer to platform device handle
1863  *
1864  * Always return 0
1865  */
1866 static void ufs_mtk_remove(struct platform_device *pdev)
1867 {
1868 	struct ufs_hba *hba =  platform_get_drvdata(pdev);
1869 
1870 	pm_runtime_get_sync(&(pdev)->dev);
1871 	ufshcd_remove(hba);
1872 }
1873 
1874 #ifdef CONFIG_PM_SLEEP
1875 static int ufs_mtk_system_suspend(struct device *dev)
1876 {
1877 	struct ufs_hba *hba = dev_get_drvdata(dev);
1878 	struct arm_smccc_res res;
1879 	int ret;
1880 
1881 	ret = ufshcd_system_suspend(dev);
1882 	if (ret)
1883 		return ret;
1884 
1885 	ufs_mtk_dev_vreg_set_lpm(hba, true);
1886 
1887 	if (ufs_mtk_is_rtff_mtcmos(hba))
1888 		ufs_mtk_mtcmos_ctrl(false, res);
1889 
1890 	return 0;
1891 }
1892 
1893 static int ufs_mtk_system_resume(struct device *dev)
1894 {
1895 	struct ufs_hba *hba = dev_get_drvdata(dev);
1896 	struct arm_smccc_res res;
1897 
1898 	ufs_mtk_dev_vreg_set_lpm(hba, false);
1899 
1900 	if (ufs_mtk_is_rtff_mtcmos(hba))
1901 		ufs_mtk_mtcmos_ctrl(true, res);
1902 
1903 	return ufshcd_system_resume(dev);
1904 }
1905 #endif
1906 
1907 #ifdef CONFIG_PM
1908 static int ufs_mtk_runtime_suspend(struct device *dev)
1909 {
1910 	struct ufs_hba *hba = dev_get_drvdata(dev);
1911 	struct arm_smccc_res res;
1912 	int ret = 0;
1913 
1914 	ret = ufshcd_runtime_suspend(dev);
1915 	if (ret)
1916 		return ret;
1917 
1918 	ufs_mtk_dev_vreg_set_lpm(hba, true);
1919 
1920 	if (ufs_mtk_is_rtff_mtcmos(hba))
1921 		ufs_mtk_mtcmos_ctrl(false, res);
1922 
1923 	return 0;
1924 }
1925 
1926 static int ufs_mtk_runtime_resume(struct device *dev)
1927 {
1928 	struct ufs_hba *hba = dev_get_drvdata(dev);
1929 	struct arm_smccc_res res;
1930 
1931 	if (ufs_mtk_is_rtff_mtcmos(hba))
1932 		ufs_mtk_mtcmos_ctrl(true, res);
1933 
1934 	ufs_mtk_dev_vreg_set_lpm(hba, false);
1935 
1936 	return ufshcd_runtime_resume(dev);
1937 }
1938 #endif
1939 
1940 static const struct dev_pm_ops ufs_mtk_pm_ops = {
1941 	SET_SYSTEM_SLEEP_PM_OPS(ufs_mtk_system_suspend,
1942 				ufs_mtk_system_resume)
1943 	SET_RUNTIME_PM_OPS(ufs_mtk_runtime_suspend,
1944 			   ufs_mtk_runtime_resume, NULL)
1945 	.prepare	 = ufshcd_suspend_prepare,
1946 	.complete	 = ufshcd_resume_complete,
1947 };
1948 
1949 static struct platform_driver ufs_mtk_pltform = {
1950 	.probe      = ufs_mtk_probe,
1951 	.remove_new = ufs_mtk_remove,
1952 	.driver = {
1953 		.name   = "ufshcd-mtk",
1954 		.pm     = &ufs_mtk_pm_ops,
1955 		.of_match_table = ufs_mtk_of_match,
1956 	},
1957 };
1958 
1959 MODULE_AUTHOR("Stanley Chu <stanley.chu@mediatek.com>");
1960 MODULE_AUTHOR("Peter Wang <peter.wang@mediatek.com>");
1961 MODULE_DESCRIPTION("MediaTek UFS Host Driver");
1962 MODULE_LICENSE("GPL v2");
1963 
1964 module_platform_driver(ufs_mtk_pltform);
1965