xref: /linux/drivers/ufs/host/ufs-mediatek.c (revision 68f715a820b02f965e2afc584a6cb542843cbc98)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2019 MediaTek Inc.
4  * Authors:
5  *	Stanley Chu <stanley.chu@mediatek.com>
6  *	Peter Wang <peter.wang@mediatek.com>
7  */
8 
9 #include <linux/arm-smccc.h>
10 #include <linux/bitfield.h>
11 #include <linux/clk.h>
12 #include <linux/delay.h>
13 #include <linux/module.h>
14 #include <linux/of.h>
15 #include <linux/of_address.h>
16 #include <linux/of_device.h>
17 #include <linux/of_platform.h>
18 #include <linux/phy/phy.h>
19 #include <linux/platform_device.h>
20 #include <linux/pm_qos.h>
21 #include <linux/regulator/consumer.h>
22 #include <linux/reset.h>
23 #include <linux/soc/mediatek/mtk_sip_svc.h>
24 
25 #include <ufs/ufshcd.h>
26 #include "ufshcd-pltfrm.h"
27 #include <ufs/ufs_quirks.h>
28 #include <ufs/unipro.h>
29 #include "ufs-mediatek.h"
30 
31 static int  ufs_mtk_config_mcq(struct ufs_hba *hba, bool irq);
32 
33 #define CREATE_TRACE_POINTS
34 #include "ufs-mediatek-trace.h"
35 #undef CREATE_TRACE_POINTS
36 
37 #define MAX_SUPP_MAC 64
38 #define MCQ_QUEUE_OFFSET(c) ((((c) >> 16) & 0xFF) * 0x200)
39 
40 static const struct ufs_dev_quirk ufs_mtk_dev_fixups[] = {
41 	{ .wmanufacturerid = UFS_ANY_VENDOR,
42 	  .model = UFS_ANY_MODEL,
43 	  .quirk = UFS_DEVICE_QUIRK_DELAY_AFTER_LPM |
44 		UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM },
45 	{ .wmanufacturerid = UFS_VENDOR_SKHYNIX,
46 	  .model = "H9HQ21AFAMZDAR",
47 	  .quirk = UFS_DEVICE_QUIRK_SUPPORT_EXTENDED_FEATURES },
48 	{}
49 };
50 
51 static const struct of_device_id ufs_mtk_of_match[] = {
52 	{ .compatible = "mediatek,mt8183-ufshci" },
53 	{},
54 };
55 
56 /*
57  * Details of UIC Errors
58  */
59 static const char *const ufs_uic_err_str[] = {
60 	"PHY Adapter Layer",
61 	"Data Link Layer",
62 	"Network Link Layer",
63 	"Transport Link Layer",
64 	"DME"
65 };
66 
67 static const char *const ufs_uic_pa_err_str[] = {
68 	"PHY error on Lane 0",
69 	"PHY error on Lane 1",
70 	"PHY error on Lane 2",
71 	"PHY error on Lane 3",
72 	"Generic PHY Adapter Error. This should be the LINERESET indication"
73 };
74 
75 static const char *const ufs_uic_dl_err_str[] = {
76 	"NAC_RECEIVED",
77 	"TCx_REPLAY_TIMER_EXPIRED",
78 	"AFCx_REQUEST_TIMER_EXPIRED",
79 	"FCx_PROTECTION_TIMER_EXPIRED",
80 	"CRC_ERROR",
81 	"RX_BUFFER_OVERFLOW",
82 	"MAX_FRAME_LENGTH_EXCEEDED",
83 	"WRONG_SEQUENCE_NUMBER",
84 	"AFC_FRAME_SYNTAX_ERROR",
85 	"NAC_FRAME_SYNTAX_ERROR",
86 	"EOF_SYNTAX_ERROR",
87 	"FRAME_SYNTAX_ERROR",
88 	"BAD_CTRL_SYMBOL_TYPE",
89 	"PA_INIT_ERROR",
90 	"PA_ERROR_IND_RECEIVED",
91 	"PA_INIT"
92 };
93 
94 static bool ufs_mtk_is_boost_crypt_enabled(struct ufs_hba *hba)
95 {
96 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
97 
98 	return !!(host->caps & UFS_MTK_CAP_BOOST_CRYPT_ENGINE);
99 }
100 
101 static bool ufs_mtk_is_va09_supported(struct ufs_hba *hba)
102 {
103 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
104 
105 	return !!(host->caps & UFS_MTK_CAP_VA09_PWR_CTRL);
106 }
107 
108 static bool ufs_mtk_is_broken_vcc(struct ufs_hba *hba)
109 {
110 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
111 
112 	return !!(host->caps & UFS_MTK_CAP_BROKEN_VCC);
113 }
114 
115 static bool ufs_mtk_is_pmc_via_fastauto(struct ufs_hba *hba)
116 {
117 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
118 
119 	return !!(host->caps & UFS_MTK_CAP_PMC_VIA_FASTAUTO);
120 }
121 
122 static void ufs_mtk_cfg_unipro_cg(struct ufs_hba *hba, bool enable)
123 {
124 	u32 tmp;
125 
126 	if (enable) {
127 		ufshcd_dme_get(hba,
128 			       UIC_ARG_MIB(VS_SAVEPOWERCONTROL), &tmp);
129 		tmp = tmp |
130 		      (1 << RX_SYMBOL_CLK_GATE_EN) |
131 		      (1 << SYS_CLK_GATE_EN) |
132 		      (1 << TX_CLK_GATE_EN);
133 		ufshcd_dme_set(hba,
134 			       UIC_ARG_MIB(VS_SAVEPOWERCONTROL), tmp);
135 
136 		ufshcd_dme_get(hba,
137 			       UIC_ARG_MIB(VS_DEBUGCLOCKENABLE), &tmp);
138 		tmp = tmp & ~(1 << TX_SYMBOL_CLK_REQ_FORCE);
139 		ufshcd_dme_set(hba,
140 			       UIC_ARG_MIB(VS_DEBUGCLOCKENABLE), tmp);
141 	} else {
142 		ufshcd_dme_get(hba,
143 			       UIC_ARG_MIB(VS_SAVEPOWERCONTROL), &tmp);
144 		tmp = tmp & ~((1 << RX_SYMBOL_CLK_GATE_EN) |
145 			      (1 << SYS_CLK_GATE_EN) |
146 			      (1 << TX_CLK_GATE_EN));
147 		ufshcd_dme_set(hba,
148 			       UIC_ARG_MIB(VS_SAVEPOWERCONTROL), tmp);
149 
150 		ufshcd_dme_get(hba,
151 			       UIC_ARG_MIB(VS_DEBUGCLOCKENABLE), &tmp);
152 		tmp = tmp | (1 << TX_SYMBOL_CLK_REQ_FORCE);
153 		ufshcd_dme_set(hba,
154 			       UIC_ARG_MIB(VS_DEBUGCLOCKENABLE), tmp);
155 	}
156 }
157 
158 static void ufs_mtk_crypto_enable(struct ufs_hba *hba)
159 {
160 	struct arm_smccc_res res;
161 
162 	ufs_mtk_crypto_ctrl(res, 1);
163 	if (res.a0) {
164 		dev_info(hba->dev, "%s: crypto enable failed, err: %lu\n",
165 			 __func__, res.a0);
166 		hba->caps &= ~UFSHCD_CAP_CRYPTO;
167 	}
168 }
169 
170 static void ufs_mtk_host_reset(struct ufs_hba *hba)
171 {
172 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
173 
174 	reset_control_assert(host->hci_reset);
175 	reset_control_assert(host->crypto_reset);
176 	reset_control_assert(host->unipro_reset);
177 
178 	usleep_range(100, 110);
179 
180 	reset_control_deassert(host->unipro_reset);
181 	reset_control_deassert(host->crypto_reset);
182 	reset_control_deassert(host->hci_reset);
183 }
184 
185 static void ufs_mtk_init_reset_control(struct ufs_hba *hba,
186 				       struct reset_control **rc,
187 				       char *str)
188 {
189 	*rc = devm_reset_control_get(hba->dev, str);
190 	if (IS_ERR(*rc)) {
191 		dev_info(hba->dev, "Failed to get reset control %s: %ld\n",
192 			 str, PTR_ERR(*rc));
193 		*rc = NULL;
194 	}
195 }
196 
197 static void ufs_mtk_init_reset(struct ufs_hba *hba)
198 {
199 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
200 
201 	ufs_mtk_init_reset_control(hba, &host->hci_reset,
202 				   "hci_rst");
203 	ufs_mtk_init_reset_control(hba, &host->unipro_reset,
204 				   "unipro_rst");
205 	ufs_mtk_init_reset_control(hba, &host->crypto_reset,
206 				   "crypto_rst");
207 }
208 
209 static int ufs_mtk_hce_enable_notify(struct ufs_hba *hba,
210 				     enum ufs_notify_change_status status)
211 {
212 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
213 
214 	if (status == PRE_CHANGE) {
215 		if (host->unipro_lpm) {
216 			hba->vps->hba_enable_delay_us = 0;
217 		} else {
218 			hba->vps->hba_enable_delay_us = 600;
219 			ufs_mtk_host_reset(hba);
220 		}
221 
222 		if (hba->caps & UFSHCD_CAP_CRYPTO)
223 			ufs_mtk_crypto_enable(hba);
224 
225 		if (host->caps & UFS_MTK_CAP_DISABLE_AH8) {
226 			ufshcd_writel(hba, 0,
227 				      REG_AUTO_HIBERNATE_IDLE_TIMER);
228 			hba->capabilities &= ~MASK_AUTO_HIBERN8_SUPPORT;
229 			hba->ahit = 0;
230 		}
231 
232 		/*
233 		 * Turn on CLK_CG early to bypass abnormal ERR_CHK signal
234 		 * to prevent host hang issue
235 		 */
236 		ufshcd_writel(hba,
237 			      ufshcd_readl(hba, REG_UFS_XOUFS_CTRL) | 0x80,
238 			      REG_UFS_XOUFS_CTRL);
239 	}
240 
241 	return 0;
242 }
243 
244 static int ufs_mtk_bind_mphy(struct ufs_hba *hba)
245 {
246 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
247 	struct device *dev = hba->dev;
248 	struct device_node *np = dev->of_node;
249 	int err = 0;
250 
251 	host->mphy = devm_of_phy_get_by_index(dev, np, 0);
252 
253 	if (host->mphy == ERR_PTR(-EPROBE_DEFER)) {
254 		/*
255 		 * UFS driver might be probed before the phy driver does.
256 		 * In that case we would like to return EPROBE_DEFER code.
257 		 */
258 		err = -EPROBE_DEFER;
259 		dev_info(dev,
260 			 "%s: required phy hasn't probed yet. err = %d\n",
261 			__func__, err);
262 	} else if (IS_ERR(host->mphy)) {
263 		err = PTR_ERR(host->mphy);
264 		if (err != -ENODEV) {
265 			dev_info(dev, "%s: PHY get failed %d\n", __func__,
266 				 err);
267 		}
268 	}
269 
270 	if (err)
271 		host->mphy = NULL;
272 	/*
273 	 * Allow unbound mphy because not every platform needs specific
274 	 * mphy control.
275 	 */
276 	if (err == -ENODEV)
277 		err = 0;
278 
279 	return err;
280 }
281 
282 static int ufs_mtk_setup_ref_clk(struct ufs_hba *hba, bool on)
283 {
284 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
285 	struct arm_smccc_res res;
286 	ktime_t timeout, time_checked;
287 	u32 value;
288 
289 	if (host->ref_clk_enabled == on)
290 		return 0;
291 
292 	ufs_mtk_ref_clk_notify(on, PRE_CHANGE, res);
293 
294 	if (on) {
295 		ufshcd_writel(hba, REFCLK_REQUEST, REG_UFS_REFCLK_CTRL);
296 	} else {
297 		ufshcd_delay_us(host->ref_clk_gating_wait_us, 10);
298 		ufshcd_writel(hba, REFCLK_RELEASE, REG_UFS_REFCLK_CTRL);
299 	}
300 
301 	/* Wait for ack */
302 	timeout = ktime_add_us(ktime_get(), REFCLK_REQ_TIMEOUT_US);
303 	do {
304 		time_checked = ktime_get();
305 		value = ufshcd_readl(hba, REG_UFS_REFCLK_CTRL);
306 
307 		/* Wait until ack bit equals to req bit */
308 		if (((value & REFCLK_ACK) >> 1) == (value & REFCLK_REQUEST))
309 			goto out;
310 
311 		usleep_range(100, 200);
312 	} while (ktime_before(time_checked, timeout));
313 
314 	dev_err(hba->dev, "missing ack of refclk req, reg: 0x%x\n", value);
315 
316 	ufs_mtk_ref_clk_notify(host->ref_clk_enabled, POST_CHANGE, res);
317 
318 	return -ETIMEDOUT;
319 
320 out:
321 	host->ref_clk_enabled = on;
322 	if (on)
323 		ufshcd_delay_us(host->ref_clk_ungating_wait_us, 10);
324 
325 	ufs_mtk_ref_clk_notify(on, POST_CHANGE, res);
326 
327 	return 0;
328 }
329 
330 static void ufs_mtk_setup_ref_clk_wait_us(struct ufs_hba *hba,
331 					  u16 gating_us)
332 {
333 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
334 
335 	if (hba->dev_info.clk_gating_wait_us) {
336 		host->ref_clk_gating_wait_us =
337 			hba->dev_info.clk_gating_wait_us;
338 	} else {
339 		host->ref_clk_gating_wait_us = gating_us;
340 	}
341 
342 	host->ref_clk_ungating_wait_us = REFCLK_DEFAULT_WAIT_US;
343 }
344 
345 static void ufs_mtk_dbg_sel(struct ufs_hba *hba)
346 {
347 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
348 
349 	if (((host->ip_ver >> 16) & 0xFF) >= 0x36) {
350 		ufshcd_writel(hba, 0x820820, REG_UFS_DEBUG_SEL);
351 		ufshcd_writel(hba, 0x0, REG_UFS_DEBUG_SEL_B0);
352 		ufshcd_writel(hba, 0x55555555, REG_UFS_DEBUG_SEL_B1);
353 		ufshcd_writel(hba, 0xaaaaaaaa, REG_UFS_DEBUG_SEL_B2);
354 		ufshcd_writel(hba, 0xffffffff, REG_UFS_DEBUG_SEL_B3);
355 	} else {
356 		ufshcd_writel(hba, 0x20, REG_UFS_DEBUG_SEL);
357 	}
358 }
359 
360 static void ufs_mtk_wait_idle_state(struct ufs_hba *hba,
361 			    unsigned long retry_ms)
362 {
363 	u64 timeout, time_checked;
364 	u32 val, sm;
365 	bool wait_idle;
366 
367 	/* cannot use plain ktime_get() in suspend */
368 	timeout = ktime_get_mono_fast_ns() + retry_ms * 1000000UL;
369 
370 	/* wait a specific time after check base */
371 	udelay(10);
372 	wait_idle = false;
373 
374 	do {
375 		time_checked = ktime_get_mono_fast_ns();
376 		ufs_mtk_dbg_sel(hba);
377 		val = ufshcd_readl(hba, REG_UFS_PROBE);
378 
379 		sm = val & 0x1f;
380 
381 		/*
382 		 * if state is in H8 enter and H8 enter confirm
383 		 * wait until return to idle state.
384 		 */
385 		if ((sm >= VS_HIB_ENTER) && (sm <= VS_HIB_EXIT)) {
386 			wait_idle = true;
387 			udelay(50);
388 			continue;
389 		} else if (!wait_idle)
390 			break;
391 
392 		if (wait_idle && (sm == VS_HCE_BASE))
393 			break;
394 	} while (time_checked < timeout);
395 
396 	if (wait_idle && sm != VS_HCE_BASE)
397 		dev_info(hba->dev, "wait idle tmo: 0x%x\n", val);
398 }
399 
400 static int ufs_mtk_wait_link_state(struct ufs_hba *hba, u32 state,
401 				   unsigned long max_wait_ms)
402 {
403 	ktime_t timeout, time_checked;
404 	u32 val;
405 
406 	timeout = ktime_add_ms(ktime_get(), max_wait_ms);
407 	do {
408 		time_checked = ktime_get();
409 		ufs_mtk_dbg_sel(hba);
410 		val = ufshcd_readl(hba, REG_UFS_PROBE);
411 		val = val >> 28;
412 
413 		if (val == state)
414 			return 0;
415 
416 		/* Sleep for max. 200us */
417 		usleep_range(100, 200);
418 	} while (ktime_before(time_checked, timeout));
419 
420 	return -ETIMEDOUT;
421 }
422 
423 static int ufs_mtk_mphy_power_on(struct ufs_hba *hba, bool on)
424 {
425 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
426 	struct phy *mphy = host->mphy;
427 	struct arm_smccc_res res;
428 	int ret = 0;
429 
430 	if (!mphy || !(on ^ host->mphy_powered_on))
431 		return 0;
432 
433 	if (on) {
434 		if (ufs_mtk_is_va09_supported(hba)) {
435 			ret = regulator_enable(host->reg_va09);
436 			if (ret < 0)
437 				goto out;
438 			/* wait 200 us to stablize VA09 */
439 			usleep_range(200, 210);
440 			ufs_mtk_va09_pwr_ctrl(res, 1);
441 		}
442 		phy_power_on(mphy);
443 	} else {
444 		phy_power_off(mphy);
445 		if (ufs_mtk_is_va09_supported(hba)) {
446 			ufs_mtk_va09_pwr_ctrl(res, 0);
447 			ret = regulator_disable(host->reg_va09);
448 		}
449 	}
450 out:
451 	if (ret) {
452 		dev_info(hba->dev,
453 			 "failed to %s va09: %d\n",
454 			 on ? "enable" : "disable",
455 			 ret);
456 	} else {
457 		host->mphy_powered_on = on;
458 	}
459 
460 	return ret;
461 }
462 
463 static int ufs_mtk_get_host_clk(struct device *dev, const char *name,
464 				struct clk **clk_out)
465 {
466 	struct clk *clk;
467 	int err = 0;
468 
469 	clk = devm_clk_get(dev, name);
470 	if (IS_ERR(clk))
471 		err = PTR_ERR(clk);
472 	else
473 		*clk_out = clk;
474 
475 	return err;
476 }
477 
478 static void ufs_mtk_boost_crypt(struct ufs_hba *hba, bool boost)
479 {
480 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
481 	struct ufs_mtk_crypt_cfg *cfg;
482 	struct regulator *reg;
483 	int volt, ret;
484 
485 	if (!ufs_mtk_is_boost_crypt_enabled(hba))
486 		return;
487 
488 	cfg = host->crypt;
489 	volt = cfg->vcore_volt;
490 	reg = cfg->reg_vcore;
491 
492 	ret = clk_prepare_enable(cfg->clk_crypt_mux);
493 	if (ret) {
494 		dev_info(hba->dev, "clk_prepare_enable(): %d\n",
495 			 ret);
496 		return;
497 	}
498 
499 	if (boost) {
500 		ret = regulator_set_voltage(reg, volt, INT_MAX);
501 		if (ret) {
502 			dev_info(hba->dev,
503 				 "failed to set vcore to %d\n", volt);
504 			goto out;
505 		}
506 
507 		ret = clk_set_parent(cfg->clk_crypt_mux,
508 				     cfg->clk_crypt_perf);
509 		if (ret) {
510 			dev_info(hba->dev,
511 				 "failed to set clk_crypt_perf\n");
512 			regulator_set_voltage(reg, 0, INT_MAX);
513 			goto out;
514 		}
515 	} else {
516 		ret = clk_set_parent(cfg->clk_crypt_mux,
517 				     cfg->clk_crypt_lp);
518 		if (ret) {
519 			dev_info(hba->dev,
520 				 "failed to set clk_crypt_lp\n");
521 			goto out;
522 		}
523 
524 		ret = regulator_set_voltage(reg, 0, INT_MAX);
525 		if (ret) {
526 			dev_info(hba->dev,
527 				 "failed to set vcore to MIN\n");
528 		}
529 	}
530 out:
531 	clk_disable_unprepare(cfg->clk_crypt_mux);
532 }
533 
534 static int ufs_mtk_init_host_clk(struct ufs_hba *hba, const char *name,
535 				 struct clk **clk)
536 {
537 	int ret;
538 
539 	ret = ufs_mtk_get_host_clk(hba->dev, name, clk);
540 	if (ret) {
541 		dev_info(hba->dev, "%s: failed to get %s: %d", __func__,
542 			 name, ret);
543 	}
544 
545 	return ret;
546 }
547 
548 static void ufs_mtk_init_boost_crypt(struct ufs_hba *hba)
549 {
550 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
551 	struct ufs_mtk_crypt_cfg *cfg;
552 	struct device *dev = hba->dev;
553 	struct regulator *reg;
554 	u32 volt;
555 
556 	host->crypt = devm_kzalloc(dev, sizeof(*(host->crypt)),
557 				   GFP_KERNEL);
558 	if (!host->crypt)
559 		goto disable_caps;
560 
561 	reg = devm_regulator_get_optional(dev, "dvfsrc-vcore");
562 	if (IS_ERR(reg)) {
563 		dev_info(dev, "failed to get dvfsrc-vcore: %ld",
564 			 PTR_ERR(reg));
565 		goto disable_caps;
566 	}
567 
568 	if (of_property_read_u32(dev->of_node, "boost-crypt-vcore-min",
569 				 &volt)) {
570 		dev_info(dev, "failed to get boost-crypt-vcore-min");
571 		goto disable_caps;
572 	}
573 
574 	cfg = host->crypt;
575 	if (ufs_mtk_init_host_clk(hba, "crypt_mux",
576 				  &cfg->clk_crypt_mux))
577 		goto disable_caps;
578 
579 	if (ufs_mtk_init_host_clk(hba, "crypt_lp",
580 				  &cfg->clk_crypt_lp))
581 		goto disable_caps;
582 
583 	if (ufs_mtk_init_host_clk(hba, "crypt_perf",
584 				  &cfg->clk_crypt_perf))
585 		goto disable_caps;
586 
587 	cfg->reg_vcore = reg;
588 	cfg->vcore_volt = volt;
589 	host->caps |= UFS_MTK_CAP_BOOST_CRYPT_ENGINE;
590 
591 disable_caps:
592 	return;
593 }
594 
595 static void ufs_mtk_init_va09_pwr_ctrl(struct ufs_hba *hba)
596 {
597 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
598 
599 	host->reg_va09 = regulator_get(hba->dev, "va09");
600 	if (IS_ERR(host->reg_va09))
601 		dev_info(hba->dev, "failed to get va09");
602 	else
603 		host->caps |= UFS_MTK_CAP_VA09_PWR_CTRL;
604 }
605 
606 static void ufs_mtk_init_host_caps(struct ufs_hba *hba)
607 {
608 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
609 	struct device_node *np = hba->dev->of_node;
610 
611 	if (of_property_read_bool(np, "mediatek,ufs-boost-crypt"))
612 		ufs_mtk_init_boost_crypt(hba);
613 
614 	if (of_property_read_bool(np, "mediatek,ufs-support-va09"))
615 		ufs_mtk_init_va09_pwr_ctrl(hba);
616 
617 	if (of_property_read_bool(np, "mediatek,ufs-disable-ah8"))
618 		host->caps |= UFS_MTK_CAP_DISABLE_AH8;
619 
620 	if (of_property_read_bool(np, "mediatek,ufs-broken-vcc"))
621 		host->caps |= UFS_MTK_CAP_BROKEN_VCC;
622 
623 	if (of_property_read_bool(np, "mediatek,ufs-pmc-via-fastauto"))
624 		host->caps |= UFS_MTK_CAP_PMC_VIA_FASTAUTO;
625 
626 	dev_info(hba->dev, "caps: 0x%x", host->caps);
627 }
628 
629 static void ufs_mtk_boost_pm_qos(struct ufs_hba *hba, bool boost)
630 {
631 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
632 
633 	if (!host || !host->pm_qos_init)
634 		return;
635 
636 	cpu_latency_qos_update_request(&host->pm_qos_req,
637 				       boost ? 0 : PM_QOS_DEFAULT_VALUE);
638 }
639 
640 static void ufs_mtk_scale_perf(struct ufs_hba *hba, bool scale_up)
641 {
642 	ufs_mtk_boost_crypt(hba, scale_up);
643 	ufs_mtk_boost_pm_qos(hba, scale_up);
644 }
645 
646 static void ufs_mtk_pwr_ctrl(struct ufs_hba *hba, bool on)
647 {
648 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
649 
650 	if (on) {
651 		phy_power_on(host->mphy);
652 		ufs_mtk_setup_ref_clk(hba, on);
653 		if (!ufshcd_is_clkscaling_supported(hba))
654 			ufs_mtk_scale_perf(hba, on);
655 	} else {
656 		if (!ufshcd_is_clkscaling_supported(hba))
657 			ufs_mtk_scale_perf(hba, on);
658 		ufs_mtk_setup_ref_clk(hba, on);
659 		phy_power_off(host->mphy);
660 	}
661 }
662 
663 /**
664  * ufs_mtk_setup_clocks - enables/disable clocks
665  * @hba: host controller instance
666  * @on: If true, enable clocks else disable them.
667  * @status: PRE_CHANGE or POST_CHANGE notify
668  *
669  * Return: 0 on success, non-zero on failure.
670  */
671 static int ufs_mtk_setup_clocks(struct ufs_hba *hba, bool on,
672 				enum ufs_notify_change_status status)
673 {
674 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
675 	bool clk_pwr_off = false;
676 	int ret = 0;
677 
678 	/*
679 	 * In case ufs_mtk_init() is not yet done, simply ignore.
680 	 * This ufs_mtk_setup_clocks() shall be called from
681 	 * ufs_mtk_init() after init is done.
682 	 */
683 	if (!host)
684 		return 0;
685 
686 	if (!on && status == PRE_CHANGE) {
687 		if (ufshcd_is_link_off(hba)) {
688 			clk_pwr_off = true;
689 		} else if (ufshcd_is_link_hibern8(hba) ||
690 			 (!ufshcd_can_hibern8_during_gating(hba) &&
691 			 ufshcd_is_auto_hibern8_enabled(hba))) {
692 			/*
693 			 * Gate ref-clk and poweroff mphy if link state is in
694 			 * OFF or Hibern8 by either Auto-Hibern8 or
695 			 * ufshcd_link_state_transition().
696 			 */
697 			ret = ufs_mtk_wait_link_state(hba,
698 						      VS_LINK_HIBERN8,
699 						      15);
700 			if (!ret)
701 				clk_pwr_off = true;
702 		}
703 
704 		if (clk_pwr_off)
705 			ufs_mtk_pwr_ctrl(hba, false);
706 	} else if (on && status == POST_CHANGE) {
707 		ufs_mtk_pwr_ctrl(hba, true);
708 	}
709 
710 	return ret;
711 }
712 
713 static void ufs_mtk_get_controller_version(struct ufs_hba *hba)
714 {
715 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
716 	int ret, ver = 0;
717 
718 	if (host->hw_ver.major)
719 		return;
720 
721 	/* Set default (minimum) version anyway */
722 	host->hw_ver.major = 2;
723 
724 	ret = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_LOCALVERINFO), &ver);
725 	if (!ret) {
726 		if (ver >= UFS_UNIPRO_VER_1_8) {
727 			host->hw_ver.major = 3;
728 			/*
729 			 * Fix HCI version for some platforms with
730 			 * incorrect version
731 			 */
732 			if (hba->ufs_version < ufshci_version(3, 0))
733 				hba->ufs_version = ufshci_version(3, 0);
734 		}
735 	}
736 }
737 
738 static u32 ufs_mtk_get_ufs_hci_version(struct ufs_hba *hba)
739 {
740 	return hba->ufs_version;
741 }
742 
743 /**
744  * ufs_mtk_init_clocks - Init mtk driver private clocks
745  *
746  * @hba: per adapter instance
747  */
748 static void ufs_mtk_init_clocks(struct ufs_hba *hba)
749 {
750 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
751 	struct list_head *head = &hba->clk_list_head;
752 	struct ufs_mtk_clk *mclk = &host->mclk;
753 	struct ufs_clk_info *clki, *clki_tmp;
754 
755 	/*
756 	 * Find private clocks and store them in struct ufs_mtk_clk.
757 	 * Remove "ufs_sel_min_src" and "ufs_sel_min_src" from list to avoid
758 	 * being switched on/off in clock gating.
759 	 */
760 	list_for_each_entry_safe(clki, clki_tmp, head, list) {
761 		if (!strcmp(clki->name, "ufs_sel")) {
762 			host->mclk.ufs_sel_clki = clki;
763 		} else if (!strcmp(clki->name, "ufs_sel_max_src")) {
764 			host->mclk.ufs_sel_max_clki = clki;
765 			clk_disable_unprepare(clki->clk);
766 			list_del(&clki->list);
767 		} else if (!strcmp(clki->name, "ufs_sel_min_src")) {
768 			host->mclk.ufs_sel_min_clki = clki;
769 			clk_disable_unprepare(clki->clk);
770 			list_del(&clki->list);
771 		}
772 	}
773 
774 	if (!mclk->ufs_sel_clki || !mclk->ufs_sel_max_clki ||
775 	    !mclk->ufs_sel_min_clki) {
776 		hba->caps &= ~UFSHCD_CAP_CLK_SCALING;
777 		dev_info(hba->dev,
778 			 "%s: Clk-scaling not ready. Feature disabled.",
779 			 __func__);
780 	}
781 }
782 
783 #define MAX_VCC_NAME 30
784 static int ufs_mtk_vreg_fix_vcc(struct ufs_hba *hba)
785 {
786 	struct ufs_vreg_info *info = &hba->vreg_info;
787 	struct device_node *np = hba->dev->of_node;
788 	struct device *dev = hba->dev;
789 	char vcc_name[MAX_VCC_NAME];
790 	struct arm_smccc_res res;
791 	int err, ver;
792 
793 	if (hba->vreg_info.vcc)
794 		return 0;
795 
796 	if (of_property_read_bool(np, "mediatek,ufs-vcc-by-num")) {
797 		ufs_mtk_get_vcc_num(res);
798 		if (res.a1 > UFS_VCC_NONE && res.a1 < UFS_VCC_MAX)
799 			snprintf(vcc_name, MAX_VCC_NAME, "vcc-opt%lu", res.a1);
800 		else
801 			return -ENODEV;
802 	} else if (of_property_read_bool(np, "mediatek,ufs-vcc-by-ver")) {
803 		ver = (hba->dev_info.wspecversion & 0xF00) >> 8;
804 		snprintf(vcc_name, MAX_VCC_NAME, "vcc-ufs%u", ver);
805 	} else {
806 		return 0;
807 	}
808 
809 	err = ufshcd_populate_vreg(dev, vcc_name, &info->vcc, false);
810 	if (err)
811 		return err;
812 
813 	err = ufshcd_get_vreg(dev, info->vcc);
814 	if (err)
815 		return err;
816 
817 	err = regulator_enable(info->vcc->reg);
818 	if (!err) {
819 		info->vcc->enabled = true;
820 		dev_info(dev, "%s: %s enabled\n", __func__, vcc_name);
821 	}
822 
823 	return err;
824 }
825 
826 static void ufs_mtk_vreg_fix_vccqx(struct ufs_hba *hba)
827 {
828 	struct ufs_vreg_info *info = &hba->vreg_info;
829 	struct ufs_vreg **vreg_on, **vreg_off;
830 
831 	if (hba->dev_info.wspecversion >= 0x0300) {
832 		vreg_on = &info->vccq;
833 		vreg_off = &info->vccq2;
834 	} else {
835 		vreg_on = &info->vccq2;
836 		vreg_off = &info->vccq;
837 	}
838 
839 	if (*vreg_on)
840 		(*vreg_on)->always_on = true;
841 
842 	if (*vreg_off) {
843 		regulator_disable((*vreg_off)->reg);
844 		devm_kfree(hba->dev, (*vreg_off)->name);
845 		devm_kfree(hba->dev, *vreg_off);
846 		*vreg_off = NULL;
847 	}
848 }
849 
850 static void ufs_mtk_init_mcq_irq(struct ufs_hba *hba)
851 {
852 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
853 	struct platform_device *pdev;
854 	int i;
855 	int irq;
856 
857 	host->mcq_nr_intr = UFSHCD_MAX_Q_NR;
858 	pdev = container_of(hba->dev, struct platform_device, dev);
859 
860 	for (i = 0; i < host->mcq_nr_intr; i++) {
861 		/* irq index 0 is legacy irq, sq/cq irq start from index 1 */
862 		irq = platform_get_irq(pdev, i + 1);
863 		if (irq < 0) {
864 			host->mcq_intr_info[i].irq = MTK_MCQ_INVALID_IRQ;
865 			goto failed;
866 		}
867 		host->mcq_intr_info[i].hba = hba;
868 		host->mcq_intr_info[i].irq = irq;
869 		dev_info(hba->dev, "get platform mcq irq: %d, %d\n", i, irq);
870 	}
871 
872 	return;
873 failed:
874        /* invalidate irq info */
875 	for (i = 0; i < host->mcq_nr_intr; i++)
876 		host->mcq_intr_info[i].irq = MTK_MCQ_INVALID_IRQ;
877 
878 	host->mcq_nr_intr = 0;
879 }
880 
881 /**
882  * ufs_mtk_init - find other essential mmio bases
883  * @hba: host controller instance
884  *
885  * Binds PHY with controller and powers up PHY enabling clocks
886  * and regulators.
887  *
888  * Return: -EPROBE_DEFER if binding fails, returns negative error
889  * on phy power up failure and returns zero on success.
890  */
891 static int ufs_mtk_init(struct ufs_hba *hba)
892 {
893 	const struct of_device_id *id;
894 	struct device *dev = hba->dev;
895 	struct ufs_mtk_host *host;
896 	int err = 0;
897 
898 	host = devm_kzalloc(dev, sizeof(*host), GFP_KERNEL);
899 	if (!host) {
900 		err = -ENOMEM;
901 		dev_info(dev, "%s: no memory for mtk ufs host\n", __func__);
902 		goto out;
903 	}
904 
905 	host->hba = hba;
906 	ufshcd_set_variant(hba, host);
907 
908 	id = of_match_device(ufs_mtk_of_match, dev);
909 	if (!id) {
910 		err = -EINVAL;
911 		goto out;
912 	}
913 
914 	/* Initialize host capability */
915 	ufs_mtk_init_host_caps(hba);
916 
917 	ufs_mtk_init_mcq_irq(hba);
918 
919 	err = ufs_mtk_bind_mphy(hba);
920 	if (err)
921 		goto out_variant_clear;
922 
923 	ufs_mtk_init_reset(hba);
924 
925 	/* Enable runtime autosuspend */
926 	hba->caps |= UFSHCD_CAP_RPM_AUTOSUSPEND;
927 
928 	/* Enable clock-gating */
929 	hba->caps |= UFSHCD_CAP_CLK_GATING;
930 
931 	/* Enable inline encryption */
932 	hba->caps |= UFSHCD_CAP_CRYPTO;
933 
934 	/* Enable WriteBooster */
935 	hba->caps |= UFSHCD_CAP_WB_EN;
936 
937 	/* Enable clk scaling*/
938 	hba->caps |= UFSHCD_CAP_CLK_SCALING;
939 
940 	hba->quirks |= UFSHCI_QUIRK_SKIP_MANUAL_WB_FLUSH_CTRL;
941 	hba->quirks |= UFSHCD_QUIRK_MCQ_BROKEN_INTR;
942 	hba->quirks |= UFSHCD_QUIRK_MCQ_BROKEN_RTC;
943 	hba->vps->wb_flush_threshold = UFS_WB_BUF_REMAIN_PERCENT(80);
944 
945 	if (host->caps & UFS_MTK_CAP_DISABLE_AH8)
946 		hba->caps |= UFSHCD_CAP_HIBERN8_WITH_CLK_GATING;
947 
948 	ufs_mtk_init_clocks(hba);
949 
950 	/*
951 	 * ufshcd_vops_init() is invoked after
952 	 * ufshcd_setup_clock(true) in ufshcd_hba_init() thus
953 	 * phy clock setup is skipped.
954 	 *
955 	 * Enable phy clocks specifically here.
956 	 */
957 	ufs_mtk_mphy_power_on(hba, true);
958 	ufs_mtk_setup_clocks(hba, true, POST_CHANGE);
959 
960 	host->ip_ver = ufshcd_readl(hba, REG_UFS_MTK_IP_VER);
961 
962 	/* Initialize pm-qos request */
963 	cpu_latency_qos_add_request(&host->pm_qos_req, PM_QOS_DEFAULT_VALUE);
964 	host->pm_qos_init = true;
965 
966 	goto out;
967 
968 out_variant_clear:
969 	ufshcd_set_variant(hba, NULL);
970 out:
971 	return err;
972 }
973 
974 static bool ufs_mtk_pmc_via_fastauto(struct ufs_hba *hba,
975 				     struct ufs_pa_layer_attr *dev_req_params)
976 {
977 	if (!ufs_mtk_is_pmc_via_fastauto(hba))
978 		return false;
979 
980 	if (dev_req_params->hs_rate == hba->pwr_info.hs_rate)
981 		return false;
982 
983 	if (dev_req_params->pwr_tx != FAST_MODE &&
984 	    dev_req_params->gear_tx < UFS_HS_G4)
985 		return false;
986 
987 	if (dev_req_params->pwr_rx != FAST_MODE &&
988 	    dev_req_params->gear_rx < UFS_HS_G4)
989 		return false;
990 
991 	return true;
992 }
993 
994 static int ufs_mtk_pre_pwr_change(struct ufs_hba *hba,
995 				  struct ufs_pa_layer_attr *dev_max_params,
996 				  struct ufs_pa_layer_attr *dev_req_params)
997 {
998 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
999 	struct ufs_host_params host_params;
1000 	int ret;
1001 
1002 	ufshcd_init_host_params(&host_params);
1003 	host_params.hs_rx_gear = UFS_HS_G5;
1004 	host_params.hs_tx_gear = UFS_HS_G5;
1005 
1006 	ret = ufshcd_negotiate_pwr_params(&host_params, dev_max_params, dev_req_params);
1007 	if (ret) {
1008 		pr_info("%s: failed to determine capabilities\n",
1009 			__func__);
1010 	}
1011 
1012 	if (ufs_mtk_pmc_via_fastauto(hba, dev_req_params)) {
1013 		ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXTERMINATION), true);
1014 		ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXGEAR), UFS_HS_G1);
1015 
1016 		ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXTERMINATION), true);
1017 		ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXGEAR), UFS_HS_G1);
1018 
1019 		ufshcd_dme_set(hba, UIC_ARG_MIB(PA_ACTIVETXDATALANES),
1020 			       dev_req_params->lane_tx);
1021 		ufshcd_dme_set(hba, UIC_ARG_MIB(PA_ACTIVERXDATALANES),
1022 			       dev_req_params->lane_rx);
1023 		ufshcd_dme_set(hba, UIC_ARG_MIB(PA_HSSERIES),
1024 			       dev_req_params->hs_rate);
1025 
1026 		ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXHSADAPTTYPE),
1027 			       PA_NO_ADAPT);
1028 
1029 		ret = ufshcd_uic_change_pwr_mode(hba,
1030 					FASTAUTO_MODE << 4 | FASTAUTO_MODE);
1031 
1032 		if (ret) {
1033 			dev_err(hba->dev, "%s: HSG1B FASTAUTO failed ret=%d\n",
1034 				__func__, ret);
1035 		}
1036 	}
1037 
1038 	if (host->hw_ver.major >= 3) {
1039 		ret = ufshcd_dme_configure_adapt(hba,
1040 					   dev_req_params->gear_tx,
1041 					   PA_INITIAL_ADAPT);
1042 	}
1043 
1044 	return ret;
1045 }
1046 
1047 static int ufs_mtk_pwr_change_notify(struct ufs_hba *hba,
1048 				     enum ufs_notify_change_status stage,
1049 				     struct ufs_pa_layer_attr *dev_max_params,
1050 				     struct ufs_pa_layer_attr *dev_req_params)
1051 {
1052 	int ret = 0;
1053 
1054 	switch (stage) {
1055 	case PRE_CHANGE:
1056 		ret = ufs_mtk_pre_pwr_change(hba, dev_max_params,
1057 					     dev_req_params);
1058 		break;
1059 	case POST_CHANGE:
1060 		break;
1061 	default:
1062 		ret = -EINVAL;
1063 		break;
1064 	}
1065 
1066 	return ret;
1067 }
1068 
1069 static int ufs_mtk_unipro_set_lpm(struct ufs_hba *hba, bool lpm)
1070 {
1071 	int ret;
1072 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
1073 
1074 	ret = ufshcd_dme_set(hba,
1075 			     UIC_ARG_MIB_SEL(VS_UNIPROPOWERDOWNCONTROL, 0),
1076 			     lpm ? 1 : 0);
1077 	if (!ret || !lpm) {
1078 		/*
1079 		 * Forcibly set as non-LPM mode if UIC commands is failed
1080 		 * to use default hba_enable_delay_us value for re-enabling
1081 		 * the host.
1082 		 */
1083 		host->unipro_lpm = lpm;
1084 	}
1085 
1086 	return ret;
1087 }
1088 
1089 static int ufs_mtk_pre_link(struct ufs_hba *hba)
1090 {
1091 	int ret;
1092 	u32 tmp;
1093 
1094 	ufs_mtk_get_controller_version(hba);
1095 
1096 	ret = ufs_mtk_unipro_set_lpm(hba, false);
1097 	if (ret)
1098 		return ret;
1099 
1100 	/*
1101 	 * Setting PA_Local_TX_LCC_Enable to 0 before link startup
1102 	 * to make sure that both host and device TX LCC are disabled
1103 	 * once link startup is completed.
1104 	 */
1105 	ret = ufshcd_disable_host_tx_lcc(hba);
1106 	if (ret)
1107 		return ret;
1108 
1109 	/* disable deep stall */
1110 	ret = ufshcd_dme_get(hba, UIC_ARG_MIB(VS_SAVEPOWERCONTROL), &tmp);
1111 	if (ret)
1112 		return ret;
1113 
1114 	tmp &= ~(1 << 6);
1115 
1116 	ret = ufshcd_dme_set(hba, UIC_ARG_MIB(VS_SAVEPOWERCONTROL), tmp);
1117 
1118 	return ret;
1119 }
1120 
1121 static void ufs_mtk_setup_clk_gating(struct ufs_hba *hba)
1122 {
1123 	u32 ah_ms;
1124 
1125 	if (ufshcd_is_clkgating_allowed(hba)) {
1126 		if (ufshcd_is_auto_hibern8_supported(hba) && hba->ahit)
1127 			ah_ms = FIELD_GET(UFSHCI_AHIBERN8_TIMER_MASK,
1128 					  hba->ahit);
1129 		else
1130 			ah_ms = 10;
1131 		ufshcd_clkgate_delay_set(hba->dev, ah_ms + 5);
1132 	}
1133 }
1134 
1135 static void ufs_mtk_post_link(struct ufs_hba *hba)
1136 {
1137 	/* enable unipro clock gating feature */
1138 	ufs_mtk_cfg_unipro_cg(hba, true);
1139 
1140 	/* will be configured during probe hba */
1141 	if (ufshcd_is_auto_hibern8_supported(hba))
1142 		hba->ahit = FIELD_PREP(UFSHCI_AHIBERN8_TIMER_MASK, 10) |
1143 			FIELD_PREP(UFSHCI_AHIBERN8_SCALE_MASK, 3);
1144 
1145 	ufs_mtk_setup_clk_gating(hba);
1146 }
1147 
1148 static int ufs_mtk_link_startup_notify(struct ufs_hba *hba,
1149 				       enum ufs_notify_change_status stage)
1150 {
1151 	int ret = 0;
1152 
1153 	switch (stage) {
1154 	case PRE_CHANGE:
1155 		ret = ufs_mtk_pre_link(hba);
1156 		break;
1157 	case POST_CHANGE:
1158 		ufs_mtk_post_link(hba);
1159 		break;
1160 	default:
1161 		ret = -EINVAL;
1162 		break;
1163 	}
1164 
1165 	return ret;
1166 }
1167 
1168 static int ufs_mtk_device_reset(struct ufs_hba *hba)
1169 {
1170 	struct arm_smccc_res res;
1171 
1172 	/* disable hba before device reset */
1173 	ufshcd_hba_stop(hba);
1174 
1175 	ufs_mtk_device_reset_ctrl(0, res);
1176 
1177 	/*
1178 	 * The reset signal is active low. UFS devices shall detect
1179 	 * more than or equal to 1us of positive or negative RST_n
1180 	 * pulse width.
1181 	 *
1182 	 * To be on safe side, keep the reset low for at least 10us.
1183 	 */
1184 	usleep_range(10, 15);
1185 
1186 	ufs_mtk_device_reset_ctrl(1, res);
1187 
1188 	/* Some devices may need time to respond to rst_n */
1189 	usleep_range(10000, 15000);
1190 
1191 	dev_info(hba->dev, "device reset done\n");
1192 
1193 	return 0;
1194 }
1195 
1196 static int ufs_mtk_link_set_hpm(struct ufs_hba *hba)
1197 {
1198 	int err;
1199 
1200 	err = ufshcd_hba_enable(hba);
1201 	if (err)
1202 		return err;
1203 
1204 	err = ufs_mtk_unipro_set_lpm(hba, false);
1205 	if (err)
1206 		return err;
1207 
1208 	err = ufshcd_uic_hibern8_exit(hba);
1209 	if (!err)
1210 		ufshcd_set_link_active(hba);
1211 	else
1212 		return err;
1213 
1214 	if (!hba->mcq_enabled) {
1215 		err = ufshcd_make_hba_operational(hba);
1216 	} else {
1217 		ufs_mtk_config_mcq(hba, false);
1218 		ufshcd_mcq_make_queues_operational(hba);
1219 		ufshcd_mcq_config_mac(hba, hba->nutrs);
1220 		/* Enable MCQ mode */
1221 		ufshcd_writel(hba, ufshcd_readl(hba, REG_UFS_MEM_CFG) | 0x1,
1222 			      REG_UFS_MEM_CFG);
1223 	}
1224 
1225 	if (err)
1226 		return err;
1227 
1228 	return 0;
1229 }
1230 
1231 static int ufs_mtk_link_set_lpm(struct ufs_hba *hba)
1232 {
1233 	int err;
1234 
1235 	/* Disable reset confirm feature by UniPro */
1236 	ufshcd_writel(hba,
1237 		      (ufshcd_readl(hba, REG_UFS_XOUFS_CTRL) & ~0x100),
1238 		      REG_UFS_XOUFS_CTRL);
1239 
1240 	err = ufs_mtk_unipro_set_lpm(hba, true);
1241 	if (err) {
1242 		/* Resume UniPro state for following error recovery */
1243 		ufs_mtk_unipro_set_lpm(hba, false);
1244 		return err;
1245 	}
1246 
1247 	return 0;
1248 }
1249 
1250 static void ufs_mtk_vccqx_set_lpm(struct ufs_hba *hba, bool lpm)
1251 {
1252 	struct ufs_vreg *vccqx = NULL;
1253 
1254 	if (hba->vreg_info.vccq)
1255 		vccqx = hba->vreg_info.vccq;
1256 	else
1257 		vccqx = hba->vreg_info.vccq2;
1258 
1259 	regulator_set_mode(vccqx->reg,
1260 			   lpm ? REGULATOR_MODE_IDLE : REGULATOR_MODE_NORMAL);
1261 }
1262 
1263 static void ufs_mtk_vsx_set_lpm(struct ufs_hba *hba, bool lpm)
1264 {
1265 	struct arm_smccc_res res;
1266 
1267 	ufs_mtk_device_pwr_ctrl(!lpm,
1268 				(unsigned long)hba->dev_info.wspecversion,
1269 				res);
1270 }
1271 
1272 static void ufs_mtk_dev_vreg_set_lpm(struct ufs_hba *hba, bool lpm)
1273 {
1274 	if (!hba->vreg_info.vccq && !hba->vreg_info.vccq2)
1275 		return;
1276 
1277 	/* Skip if VCC is assumed always-on */
1278 	if (!hba->vreg_info.vcc)
1279 		return;
1280 
1281 	/* Bypass LPM when device is still active */
1282 	if (lpm && ufshcd_is_ufs_dev_active(hba))
1283 		return;
1284 
1285 	/* Bypass LPM if VCC is enabled */
1286 	if (lpm && hba->vreg_info.vcc->enabled)
1287 		return;
1288 
1289 	if (lpm) {
1290 		ufs_mtk_vccqx_set_lpm(hba, lpm);
1291 		ufs_mtk_vsx_set_lpm(hba, lpm);
1292 	} else {
1293 		ufs_mtk_vsx_set_lpm(hba, lpm);
1294 		ufs_mtk_vccqx_set_lpm(hba, lpm);
1295 	}
1296 }
1297 
1298 static void ufs_mtk_auto_hibern8_disable(struct ufs_hba *hba)
1299 {
1300 	int ret;
1301 
1302 	/* disable auto-hibern8 */
1303 	ufshcd_writel(hba, 0, REG_AUTO_HIBERNATE_IDLE_TIMER);
1304 
1305 	/* wait host return to idle state when auto-hibern8 off */
1306 	ufs_mtk_wait_idle_state(hba, 5);
1307 
1308 	ret = ufs_mtk_wait_link_state(hba, VS_LINK_UP, 100);
1309 	if (ret)
1310 		dev_warn(hba->dev, "exit h8 state fail, ret=%d\n", ret);
1311 }
1312 
1313 static int ufs_mtk_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op,
1314 	enum ufs_notify_change_status status)
1315 {
1316 	int err;
1317 	struct arm_smccc_res res;
1318 
1319 	if (status == PRE_CHANGE) {
1320 		if (ufshcd_is_auto_hibern8_supported(hba))
1321 			ufs_mtk_auto_hibern8_disable(hba);
1322 		return 0;
1323 	}
1324 
1325 	if (ufshcd_is_link_hibern8(hba)) {
1326 		err = ufs_mtk_link_set_lpm(hba);
1327 		if (err)
1328 			goto fail;
1329 	}
1330 
1331 	if (!ufshcd_is_link_active(hba)) {
1332 		/*
1333 		 * Make sure no error will be returned to prevent
1334 		 * ufshcd_suspend() re-enabling regulators while vreg is still
1335 		 * in low-power mode.
1336 		 */
1337 		err = ufs_mtk_mphy_power_on(hba, false);
1338 		if (err)
1339 			goto fail;
1340 	}
1341 
1342 	if (ufshcd_is_link_off(hba))
1343 		ufs_mtk_device_reset_ctrl(0, res);
1344 
1345 	ufs_mtk_host_pwr_ctrl(HOST_PWR_HCI, false, res);
1346 
1347 	return 0;
1348 fail:
1349 	/*
1350 	 * Set link as off state enforcedly to trigger
1351 	 * ufshcd_host_reset_and_restore() in ufshcd_suspend()
1352 	 * for completed host reset.
1353 	 */
1354 	ufshcd_set_link_off(hba);
1355 	return -EAGAIN;
1356 }
1357 
1358 static int ufs_mtk_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
1359 {
1360 	int err;
1361 	struct arm_smccc_res res;
1362 
1363 	if (hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL)
1364 		ufs_mtk_dev_vreg_set_lpm(hba, false);
1365 
1366 	ufs_mtk_host_pwr_ctrl(HOST_PWR_HCI, true, res);
1367 
1368 	err = ufs_mtk_mphy_power_on(hba, true);
1369 	if (err)
1370 		goto fail;
1371 
1372 	if (ufshcd_is_link_hibern8(hba)) {
1373 		err = ufs_mtk_link_set_hpm(hba);
1374 		if (err)
1375 			goto fail;
1376 	}
1377 
1378 	return 0;
1379 fail:
1380 	return ufshcd_link_recovery(hba);
1381 }
1382 
1383 static void ufs_mtk_dbg_register_dump(struct ufs_hba *hba)
1384 {
1385 	/* Dump ufshci register 0x140 ~ 0x14C */
1386 	ufshcd_dump_regs(hba, REG_UFS_XOUFS_CTRL, 0x10,
1387 			 "XOUFS Ctrl (0x140): ");
1388 
1389 	ufshcd_dump_regs(hba, REG_UFS_EXTREG, 0x4, "Ext Reg ");
1390 
1391 	/* Dump ufshci register 0x2200 ~ 0x22AC */
1392 	ufshcd_dump_regs(hba, REG_UFS_MPHYCTRL,
1393 			 REG_UFS_REJECT_MON - REG_UFS_MPHYCTRL + 4,
1394 			 "MPHY Ctrl (0x2200): ");
1395 
1396 	/* Direct debugging information to REG_MTK_PROBE */
1397 	ufs_mtk_dbg_sel(hba);
1398 	ufshcd_dump_regs(hba, REG_UFS_PROBE, 0x4, "Debug Probe ");
1399 }
1400 
1401 static int ufs_mtk_apply_dev_quirks(struct ufs_hba *hba)
1402 {
1403 	struct ufs_dev_info *dev_info = &hba->dev_info;
1404 	u16 mid = dev_info->wmanufacturerid;
1405 
1406 	if (mid == UFS_VENDOR_SAMSUNG) {
1407 		ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TACTIVATE), 6);
1408 		ufshcd_dme_set(hba, UIC_ARG_MIB(PA_HIBERN8TIME), 10);
1409 	}
1410 
1411 	/*
1412 	 * Decide waiting time before gating reference clock and
1413 	 * after ungating reference clock according to vendors'
1414 	 * requirements.
1415 	 */
1416 	if (mid == UFS_VENDOR_SAMSUNG)
1417 		ufs_mtk_setup_ref_clk_wait_us(hba, 1);
1418 	else if (mid == UFS_VENDOR_SKHYNIX)
1419 		ufs_mtk_setup_ref_clk_wait_us(hba, 30);
1420 	else if (mid == UFS_VENDOR_TOSHIBA)
1421 		ufs_mtk_setup_ref_clk_wait_us(hba, 100);
1422 	else
1423 		ufs_mtk_setup_ref_clk_wait_us(hba,
1424 					      REFCLK_DEFAULT_WAIT_US);
1425 	return 0;
1426 }
1427 
1428 static void ufs_mtk_fixup_dev_quirks(struct ufs_hba *hba)
1429 {
1430 	ufshcd_fixup_dev_quirks(hba, ufs_mtk_dev_fixups);
1431 
1432 	if (ufs_mtk_is_broken_vcc(hba) && hba->vreg_info.vcc &&
1433 	    (hba->dev_quirks & UFS_DEVICE_QUIRK_DELAY_AFTER_LPM)) {
1434 		hba->vreg_info.vcc->always_on = true;
1435 		/*
1436 		 * VCC will be kept always-on thus we don't
1437 		 * need any delay during regulator operations
1438 		 */
1439 		hba->dev_quirks &= ~(UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM |
1440 			UFS_DEVICE_QUIRK_DELAY_AFTER_LPM);
1441 	}
1442 
1443 	ufs_mtk_vreg_fix_vcc(hba);
1444 	ufs_mtk_vreg_fix_vccqx(hba);
1445 }
1446 
1447 static void ufs_mtk_event_notify(struct ufs_hba *hba,
1448 				 enum ufs_event_type evt, void *data)
1449 {
1450 	unsigned int val = *(u32 *)data;
1451 	unsigned long reg;
1452 	u8 bit;
1453 
1454 	trace_ufs_mtk_event(evt, val);
1455 
1456 	/* Print details of UIC Errors */
1457 	if (evt <= UFS_EVT_DME_ERR) {
1458 		dev_info(hba->dev,
1459 			 "Host UIC Error Code (%s): %08x\n",
1460 			 ufs_uic_err_str[evt], val);
1461 		reg = val;
1462 	}
1463 
1464 	if (evt == UFS_EVT_PA_ERR) {
1465 		for_each_set_bit(bit, &reg, ARRAY_SIZE(ufs_uic_pa_err_str))
1466 			dev_info(hba->dev, "%s\n", ufs_uic_pa_err_str[bit]);
1467 	}
1468 
1469 	if (evt == UFS_EVT_DL_ERR) {
1470 		for_each_set_bit(bit, &reg, ARRAY_SIZE(ufs_uic_dl_err_str))
1471 			dev_info(hba->dev, "%s\n", ufs_uic_dl_err_str[bit]);
1472 	}
1473 }
1474 
1475 static void ufs_mtk_config_scaling_param(struct ufs_hba *hba,
1476 				struct devfreq_dev_profile *profile,
1477 				struct devfreq_simple_ondemand_data *data)
1478 {
1479 	/* Customize min gear in clk scaling */
1480 	hba->clk_scaling.min_gear = UFS_HS_G4;
1481 
1482 	hba->vps->devfreq_profile.polling_ms = 200;
1483 	hba->vps->ondemand_data.upthreshold = 50;
1484 	hba->vps->ondemand_data.downdifferential = 20;
1485 }
1486 
1487 /**
1488  * ufs_mtk_clk_scale - Internal clk scaling operation
1489  *
1490  * MTK platform supports clk scaling by switching parent of ufs_sel(mux).
1491  * The ufs_sel downstream to ufs_ck which feeds directly to UFS hardware.
1492  * Max and min clocks rate of ufs_sel defined in dts should match rate of
1493  * "ufs_sel_max_src" and "ufs_sel_min_src" respectively.
1494  * This prevent changing rate of pll clock that is shared between modules.
1495  *
1496  * @hba: per adapter instance
1497  * @scale_up: True for scaling up and false for scaling down
1498  */
1499 static void ufs_mtk_clk_scale(struct ufs_hba *hba, bool scale_up)
1500 {
1501 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
1502 	struct ufs_mtk_clk *mclk = &host->mclk;
1503 	struct ufs_clk_info *clki = mclk->ufs_sel_clki;
1504 	int ret = 0;
1505 
1506 	ret = clk_prepare_enable(clki->clk);
1507 	if (ret) {
1508 		dev_info(hba->dev,
1509 			 "clk_prepare_enable() fail, ret: %d\n", ret);
1510 		return;
1511 	}
1512 
1513 	if (scale_up) {
1514 		ret = clk_set_parent(clki->clk, mclk->ufs_sel_max_clki->clk);
1515 		clki->curr_freq = clki->max_freq;
1516 	} else {
1517 		ret = clk_set_parent(clki->clk, mclk->ufs_sel_min_clki->clk);
1518 		clki->curr_freq = clki->min_freq;
1519 	}
1520 
1521 	if (ret) {
1522 		dev_info(hba->dev,
1523 			 "Failed to set ufs_sel_clki, ret: %d\n", ret);
1524 	}
1525 
1526 	clk_disable_unprepare(clki->clk);
1527 
1528 	trace_ufs_mtk_clk_scale(clki->name, scale_up, clk_get_rate(clki->clk));
1529 }
1530 
1531 static int ufs_mtk_clk_scale_notify(struct ufs_hba *hba, bool scale_up,
1532 				    enum ufs_notify_change_status status)
1533 {
1534 	if (!ufshcd_is_clkscaling_supported(hba))
1535 		return 0;
1536 
1537 	if (status == PRE_CHANGE) {
1538 		/* Switch parent before clk_set_rate() */
1539 		ufs_mtk_clk_scale(hba, scale_up);
1540 	} else {
1541 		/* Request interrupt latency QoS accordingly */
1542 		ufs_mtk_scale_perf(hba, scale_up);
1543 	}
1544 
1545 	return 0;
1546 }
1547 
1548 static int ufs_mtk_get_hba_mac(struct ufs_hba *hba)
1549 {
1550 	return MAX_SUPP_MAC;
1551 }
1552 
1553 static int ufs_mtk_op_runtime_config(struct ufs_hba *hba)
1554 {
1555 	struct ufshcd_mcq_opr_info_t *opr;
1556 	int i;
1557 
1558 	hba->mcq_opr[OPR_SQD].offset = REG_UFS_MTK_SQD;
1559 	hba->mcq_opr[OPR_SQIS].offset = REG_UFS_MTK_SQIS;
1560 	hba->mcq_opr[OPR_CQD].offset = REG_UFS_MTK_CQD;
1561 	hba->mcq_opr[OPR_CQIS].offset = REG_UFS_MTK_CQIS;
1562 
1563 	for (i = 0; i < OPR_MAX; i++) {
1564 		opr = &hba->mcq_opr[i];
1565 		opr->stride = REG_UFS_MCQ_STRIDE;
1566 		opr->base = hba->mmio_base + opr->offset;
1567 	}
1568 
1569 	return 0;
1570 }
1571 
1572 static int ufs_mtk_mcq_config_resource(struct ufs_hba *hba)
1573 {
1574 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
1575 
1576 	/* fail mcq initialization if interrupt is not filled properly */
1577 	if (!host->mcq_nr_intr) {
1578 		dev_info(hba->dev, "IRQs not ready. MCQ disabled.");
1579 		return -EINVAL;
1580 	}
1581 
1582 	hba->mcq_base = hba->mmio_base + MCQ_QUEUE_OFFSET(hba->mcq_capabilities);
1583 	return 0;
1584 }
1585 
1586 static irqreturn_t ufs_mtk_mcq_intr(int irq, void *__intr_info)
1587 {
1588 	struct ufs_mtk_mcq_intr_info *mcq_intr_info = __intr_info;
1589 	struct ufs_hba *hba = mcq_intr_info->hba;
1590 	struct ufs_hw_queue *hwq;
1591 	u32 events;
1592 	int qid = mcq_intr_info->qid;
1593 
1594 	hwq = &hba->uhq[qid];
1595 
1596 	events = ufshcd_mcq_read_cqis(hba, qid);
1597 	if (events)
1598 		ufshcd_mcq_write_cqis(hba, events, qid);
1599 
1600 	if (events & UFSHCD_MCQ_CQIS_TAIL_ENT_PUSH_STS)
1601 		ufshcd_mcq_poll_cqe_lock(hba, hwq);
1602 
1603 	return IRQ_HANDLED;
1604 }
1605 
1606 static int ufs_mtk_config_mcq_irq(struct ufs_hba *hba)
1607 {
1608 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
1609 	u32 irq, i;
1610 	int ret;
1611 
1612 	for (i = 0; i < host->mcq_nr_intr; i++) {
1613 		irq = host->mcq_intr_info[i].irq;
1614 		if (irq == MTK_MCQ_INVALID_IRQ) {
1615 			dev_err(hba->dev, "invalid irq. %d\n", i);
1616 			return -ENOPARAM;
1617 		}
1618 
1619 		host->mcq_intr_info[i].qid = i;
1620 		ret = devm_request_irq(hba->dev, irq, ufs_mtk_mcq_intr, 0, UFSHCD,
1621 				       &host->mcq_intr_info[i]);
1622 
1623 		dev_dbg(hba->dev, "request irq %d intr %s\n", irq, ret ? "failed" : "");
1624 
1625 		if (ret) {
1626 			dev_err(hba->dev, "Cannot request irq %d\n", ret);
1627 			return ret;
1628 		}
1629 	}
1630 
1631 	return 0;
1632 }
1633 
1634 static int ufs_mtk_config_mcq(struct ufs_hba *hba, bool irq)
1635 {
1636 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
1637 	int ret = 0;
1638 
1639 	if (!host->mcq_set_intr) {
1640 		/* Disable irq option register */
1641 		ufshcd_rmwl(hba, MCQ_INTR_EN_MSK, 0, REG_UFS_MMIO_OPT_CTRL_0);
1642 
1643 		if (irq) {
1644 			ret = ufs_mtk_config_mcq_irq(hba);
1645 			if (ret)
1646 				return ret;
1647 		}
1648 
1649 		host->mcq_set_intr = true;
1650 	}
1651 
1652 	ufshcd_rmwl(hba, MCQ_AH8, MCQ_AH8, REG_UFS_MMIO_OPT_CTRL_0);
1653 	ufshcd_rmwl(hba, MCQ_INTR_EN_MSK, MCQ_MULTI_INTR_EN, REG_UFS_MMIO_OPT_CTRL_0);
1654 
1655 	return 0;
1656 }
1657 
1658 static int ufs_mtk_config_esi(struct ufs_hba *hba)
1659 {
1660 	return ufs_mtk_config_mcq(hba, true);
1661 }
1662 
1663 /*
1664  * struct ufs_hba_mtk_vops - UFS MTK specific variant operations
1665  *
1666  * The variant operations configure the necessary controller and PHY
1667  * handshake during initialization.
1668  */
1669 static const struct ufs_hba_variant_ops ufs_hba_mtk_vops = {
1670 	.name                = "mediatek.ufshci",
1671 	.init                = ufs_mtk_init,
1672 	.get_ufs_hci_version = ufs_mtk_get_ufs_hci_version,
1673 	.setup_clocks        = ufs_mtk_setup_clocks,
1674 	.hce_enable_notify   = ufs_mtk_hce_enable_notify,
1675 	.link_startup_notify = ufs_mtk_link_startup_notify,
1676 	.pwr_change_notify   = ufs_mtk_pwr_change_notify,
1677 	.apply_dev_quirks    = ufs_mtk_apply_dev_quirks,
1678 	.fixup_dev_quirks    = ufs_mtk_fixup_dev_quirks,
1679 	.suspend             = ufs_mtk_suspend,
1680 	.resume              = ufs_mtk_resume,
1681 	.dbg_register_dump   = ufs_mtk_dbg_register_dump,
1682 	.device_reset        = ufs_mtk_device_reset,
1683 	.event_notify        = ufs_mtk_event_notify,
1684 	.config_scaling_param = ufs_mtk_config_scaling_param,
1685 	.clk_scale_notify    = ufs_mtk_clk_scale_notify,
1686 	/* mcq vops */
1687 	.get_hba_mac         = ufs_mtk_get_hba_mac,
1688 	.op_runtime_config   = ufs_mtk_op_runtime_config,
1689 	.mcq_config_resource = ufs_mtk_mcq_config_resource,
1690 	.config_esi          = ufs_mtk_config_esi,
1691 };
1692 
1693 /**
1694  * ufs_mtk_probe - probe routine of the driver
1695  * @pdev: pointer to Platform device handle
1696  *
1697  * Return: zero for success and non-zero for failure.
1698  */
1699 static int ufs_mtk_probe(struct platform_device *pdev)
1700 {
1701 	int err;
1702 	struct device *dev = &pdev->dev;
1703 	struct device_node *reset_node;
1704 	struct platform_device *reset_pdev;
1705 	struct device_link *link;
1706 
1707 	reset_node = of_find_compatible_node(NULL, NULL,
1708 					     "ti,syscon-reset");
1709 	if (!reset_node) {
1710 		dev_notice(dev, "find ti,syscon-reset fail\n");
1711 		goto skip_reset;
1712 	}
1713 	reset_pdev = of_find_device_by_node(reset_node);
1714 	if (!reset_pdev) {
1715 		dev_notice(dev, "find reset_pdev fail\n");
1716 		goto skip_reset;
1717 	}
1718 	link = device_link_add(dev, &reset_pdev->dev,
1719 		DL_FLAG_AUTOPROBE_CONSUMER);
1720 	put_device(&reset_pdev->dev);
1721 	if (!link) {
1722 		dev_notice(dev, "add reset device_link fail\n");
1723 		goto skip_reset;
1724 	}
1725 	/* supplier is not probed */
1726 	if (link->status == DL_STATE_DORMANT) {
1727 		err = -EPROBE_DEFER;
1728 		goto out;
1729 	}
1730 
1731 skip_reset:
1732 	/* perform generic probe */
1733 	err = ufshcd_pltfrm_init(pdev, &ufs_hba_mtk_vops);
1734 
1735 out:
1736 	if (err)
1737 		dev_err(dev, "probe failed %d\n", err);
1738 
1739 	of_node_put(reset_node);
1740 	return err;
1741 }
1742 
1743 /**
1744  * ufs_mtk_remove - set driver_data of the device to NULL
1745  * @pdev: pointer to platform device handle
1746  *
1747  * Always return 0
1748  */
1749 static void ufs_mtk_remove(struct platform_device *pdev)
1750 {
1751 	struct ufs_hba *hba =  platform_get_drvdata(pdev);
1752 
1753 	pm_runtime_get_sync(&(pdev)->dev);
1754 	ufshcd_remove(hba);
1755 }
1756 
1757 #ifdef CONFIG_PM_SLEEP
1758 static int ufs_mtk_system_suspend(struct device *dev)
1759 {
1760 	struct ufs_hba *hba = dev_get_drvdata(dev);
1761 	int ret;
1762 
1763 	ret = ufshcd_system_suspend(dev);
1764 	if (ret)
1765 		return ret;
1766 
1767 	ufs_mtk_dev_vreg_set_lpm(hba, true);
1768 
1769 	return 0;
1770 }
1771 
1772 static int ufs_mtk_system_resume(struct device *dev)
1773 {
1774 	struct ufs_hba *hba = dev_get_drvdata(dev);
1775 
1776 	ufs_mtk_dev_vreg_set_lpm(hba, false);
1777 
1778 	return ufshcd_system_resume(dev);
1779 }
1780 #endif
1781 
1782 #ifdef CONFIG_PM
1783 static int ufs_mtk_runtime_suspend(struct device *dev)
1784 {
1785 	struct ufs_hba *hba = dev_get_drvdata(dev);
1786 	int ret = 0;
1787 
1788 	ret = ufshcd_runtime_suspend(dev);
1789 	if (ret)
1790 		return ret;
1791 
1792 	ufs_mtk_dev_vreg_set_lpm(hba, true);
1793 
1794 	return 0;
1795 }
1796 
1797 static int ufs_mtk_runtime_resume(struct device *dev)
1798 {
1799 	struct ufs_hba *hba = dev_get_drvdata(dev);
1800 
1801 	ufs_mtk_dev_vreg_set_lpm(hba, false);
1802 
1803 	return ufshcd_runtime_resume(dev);
1804 }
1805 #endif
1806 
1807 static const struct dev_pm_ops ufs_mtk_pm_ops = {
1808 	SET_SYSTEM_SLEEP_PM_OPS(ufs_mtk_system_suspend,
1809 				ufs_mtk_system_resume)
1810 	SET_RUNTIME_PM_OPS(ufs_mtk_runtime_suspend,
1811 			   ufs_mtk_runtime_resume, NULL)
1812 	.prepare	 = ufshcd_suspend_prepare,
1813 	.complete	 = ufshcd_resume_complete,
1814 };
1815 
1816 static struct platform_driver ufs_mtk_pltform = {
1817 	.probe      = ufs_mtk_probe,
1818 	.remove_new = ufs_mtk_remove,
1819 	.driver = {
1820 		.name   = "ufshcd-mtk",
1821 		.pm     = &ufs_mtk_pm_ops,
1822 		.of_match_table = ufs_mtk_of_match,
1823 	},
1824 };
1825 
1826 MODULE_AUTHOR("Stanley Chu <stanley.chu@mediatek.com>");
1827 MODULE_AUTHOR("Peter Wang <peter.wang@mediatek.com>");
1828 MODULE_DESCRIPTION("MediaTek UFS Host Driver");
1829 MODULE_LICENSE("GPL v2");
1830 
1831 module_platform_driver(ufs_mtk_pltform);
1832