xref: /linux/drivers/ufs/host/ufs-mediatek.c (revision 7eb7f5723df50a7d5564aa609e4c147f669a5cb4)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2019 MediaTek Inc.
4  * Authors:
5  *	Stanley Chu <stanley.chu@mediatek.com>
6  *	Peter Wang <peter.wang@mediatek.com>
7  */
8 
9 #include <linux/arm-smccc.h>
10 #include <linux/bitfield.h>
11 #include <linux/clk.h>
12 #include <linux/delay.h>
13 #include <linux/module.h>
14 #include <linux/of.h>
15 #include <linux/of_address.h>
16 #include <linux/of_device.h>
17 #include <linux/of_platform.h>
18 #include <linux/phy/phy.h>
19 #include <linux/platform_device.h>
20 #include <linux/regulator/consumer.h>
21 #include <linux/reset.h>
22 
23 #include <ufs/ufshcd.h>
24 #include "ufshcd-pltfrm.h"
25 #include <ufs/ufs_quirks.h>
26 #include <ufs/unipro.h>
27 
28 #include "ufs-mediatek.h"
29 #include "ufs-mediatek-sip.h"
30 
31 static int  ufs_mtk_config_mcq(struct ufs_hba *hba, bool irq);
32 static void _ufs_mtk_clk_scale(struct ufs_hba *hba, bool scale_up);
33 
34 #define CREATE_TRACE_POINTS
35 #include "ufs-mediatek-trace.h"
36 #undef CREATE_TRACE_POINTS
37 
38 #define MAX_SUPP_MAC 64
39 #define MCQ_QUEUE_OFFSET(c) ((((c) >> 16) & 0xFF) * 0x200)
40 
41 static const struct ufs_dev_quirk ufs_mtk_dev_fixups[] = {
42 	{ .wmanufacturerid = UFS_ANY_VENDOR,
43 	  .model = UFS_ANY_MODEL,
44 	  .quirk = UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM },
45 	{ .wmanufacturerid = UFS_VENDOR_SKHYNIX,
46 	  .model = "H9HQ21AFAMZDAR",
47 	  .quirk = UFS_DEVICE_QUIRK_SUPPORT_EXTENDED_FEATURES },
48 	{}
49 };
50 
51 static const struct of_device_id ufs_mtk_of_match[] = {
52 	{ .compatible = "mediatek,mt8183-ufshci" },
53 	{ .compatible = "mediatek,mt8195-ufshci" },
54 	{},
55 };
56 MODULE_DEVICE_TABLE(of, ufs_mtk_of_match);
57 
58 /*
59  * Details of UIC Errors
60  */
61 static const char *const ufs_uic_err_str[] = {
62 	"PHY Adapter Layer",
63 	"Data Link Layer",
64 	"Network Link Layer",
65 	"Transport Link Layer",
66 	"DME"
67 };
68 
69 static const char *const ufs_uic_pa_err_str[] = {
70 	"PHY error on Lane 0",
71 	"PHY error on Lane 1",
72 	"PHY error on Lane 2",
73 	"PHY error on Lane 3",
74 	"Generic PHY Adapter Error. This should be the LINERESET indication"
75 };
76 
77 static const char *const ufs_uic_dl_err_str[] = {
78 	"NAC_RECEIVED",
79 	"TCx_REPLAY_TIMER_EXPIRED",
80 	"AFCx_REQUEST_TIMER_EXPIRED",
81 	"FCx_PROTECTION_TIMER_EXPIRED",
82 	"CRC_ERROR",
83 	"RX_BUFFER_OVERFLOW",
84 	"MAX_FRAME_LENGTH_EXCEEDED",
85 	"WRONG_SEQUENCE_NUMBER",
86 	"AFC_FRAME_SYNTAX_ERROR",
87 	"NAC_FRAME_SYNTAX_ERROR",
88 	"EOF_SYNTAX_ERROR",
89 	"FRAME_SYNTAX_ERROR",
90 	"BAD_CTRL_SYMBOL_TYPE",
91 	"PA_INIT_ERROR",
92 	"PA_ERROR_IND_RECEIVED",
93 	"PA_INIT"
94 };
95 
ufs_mtk_is_boost_crypt_enabled(struct ufs_hba * hba)96 static bool ufs_mtk_is_boost_crypt_enabled(struct ufs_hba *hba)
97 {
98 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
99 
100 	return host->caps & UFS_MTK_CAP_BOOST_CRYPT_ENGINE;
101 }
102 
ufs_mtk_is_va09_supported(struct ufs_hba * hba)103 static bool ufs_mtk_is_va09_supported(struct ufs_hba *hba)
104 {
105 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
106 
107 	return host->caps & UFS_MTK_CAP_VA09_PWR_CTRL;
108 }
109 
ufs_mtk_is_broken_vcc(struct ufs_hba * hba)110 static bool ufs_mtk_is_broken_vcc(struct ufs_hba *hba)
111 {
112 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
113 
114 	return host->caps & UFS_MTK_CAP_BROKEN_VCC;
115 }
116 
ufs_mtk_is_pmc_via_fastauto(struct ufs_hba * hba)117 static bool ufs_mtk_is_pmc_via_fastauto(struct ufs_hba *hba)
118 {
119 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
120 
121 	return host->caps & UFS_MTK_CAP_PMC_VIA_FASTAUTO;
122 }
123 
ufs_mtk_is_tx_skew_fix(struct ufs_hba * hba)124 static bool ufs_mtk_is_tx_skew_fix(struct ufs_hba *hba)
125 {
126 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
127 
128 	return host->caps & UFS_MTK_CAP_TX_SKEW_FIX;
129 }
130 
ufs_mtk_is_rtff_mtcmos(struct ufs_hba * hba)131 static bool ufs_mtk_is_rtff_mtcmos(struct ufs_hba *hba)
132 {
133 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
134 
135 	return host->caps & UFS_MTK_CAP_RTFF_MTCMOS;
136 }
137 
ufs_mtk_is_allow_vccqx_lpm(struct ufs_hba * hba)138 static bool ufs_mtk_is_allow_vccqx_lpm(struct ufs_hba *hba)
139 {
140 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
141 
142 	return host->caps & UFS_MTK_CAP_ALLOW_VCCQX_LPM;
143 }
144 
ufs_mtk_is_clk_scale_ready(struct ufs_hba * hba)145 static bool ufs_mtk_is_clk_scale_ready(struct ufs_hba *hba)
146 {
147 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
148 	struct ufs_mtk_clk *mclk = &host->mclk;
149 
150 	return mclk->ufs_sel_clki &&
151 		mclk->ufs_sel_max_clki &&
152 		mclk->ufs_sel_min_clki;
153 }
154 
ufs_mtk_cfg_unipro_cg(struct ufs_hba * hba,bool enable)155 static void ufs_mtk_cfg_unipro_cg(struct ufs_hba *hba, bool enable)
156 {
157 	u32 tmp;
158 
159 	if (enable) {
160 		ufshcd_dme_get(hba,
161 			       UIC_ARG_MIB(VS_SAVEPOWERCONTROL), &tmp);
162 		tmp = tmp |
163 		      (1 << RX_SYMBOL_CLK_GATE_EN) |
164 		      (1 << SYS_CLK_GATE_EN) |
165 		      (1 << TX_CLK_GATE_EN);
166 		ufshcd_dme_set(hba,
167 			       UIC_ARG_MIB(VS_SAVEPOWERCONTROL), tmp);
168 
169 		ufshcd_dme_get(hba,
170 			       UIC_ARG_MIB(VS_DEBUGCLOCKENABLE), &tmp);
171 		tmp = tmp & ~(1 << TX_SYMBOL_CLK_REQ_FORCE);
172 		ufshcd_dme_set(hba,
173 			       UIC_ARG_MIB(VS_DEBUGCLOCKENABLE), tmp);
174 	} else {
175 		ufshcd_dme_get(hba,
176 			       UIC_ARG_MIB(VS_SAVEPOWERCONTROL), &tmp);
177 		tmp = tmp & ~((1 << RX_SYMBOL_CLK_GATE_EN) |
178 			      (1 << SYS_CLK_GATE_EN) |
179 			      (1 << TX_CLK_GATE_EN));
180 		ufshcd_dme_set(hba,
181 			       UIC_ARG_MIB(VS_SAVEPOWERCONTROL), tmp);
182 
183 		ufshcd_dme_get(hba,
184 			       UIC_ARG_MIB(VS_DEBUGCLOCKENABLE), &tmp);
185 		tmp = tmp | (1 << TX_SYMBOL_CLK_REQ_FORCE);
186 		ufshcd_dme_set(hba,
187 			       UIC_ARG_MIB(VS_DEBUGCLOCKENABLE), tmp);
188 	}
189 }
190 
ufs_mtk_crypto_enable(struct ufs_hba * hba)191 static void ufs_mtk_crypto_enable(struct ufs_hba *hba)
192 {
193 	struct arm_smccc_res res;
194 
195 	ufs_mtk_crypto_ctrl(res, 1);
196 	if (res.a0) {
197 		dev_info(hba->dev, "%s: crypto enable failed, err: %lu\n",
198 			 __func__, res.a0);
199 		hba->caps &= ~UFSHCD_CAP_CRYPTO;
200 	}
201 }
202 
ufs_mtk_host_reset(struct ufs_hba * hba)203 static void ufs_mtk_host_reset(struct ufs_hba *hba)
204 {
205 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
206 	struct arm_smccc_res res;
207 
208 	reset_control_assert(host->hci_reset);
209 	reset_control_assert(host->crypto_reset);
210 	reset_control_assert(host->unipro_reset);
211 	reset_control_assert(host->mphy_reset);
212 
213 	usleep_range(100, 110);
214 
215 	reset_control_deassert(host->unipro_reset);
216 	reset_control_deassert(host->crypto_reset);
217 	reset_control_deassert(host->hci_reset);
218 	reset_control_deassert(host->mphy_reset);
219 
220 	/* restore mphy setting aftre mphy reset */
221 	if (host->mphy_reset)
222 		ufs_mtk_mphy_ctrl(UFS_MPHY_RESTORE, res);
223 }
224 
ufs_mtk_init_reset_control(struct ufs_hba * hba,struct reset_control ** rc,char * str)225 static void ufs_mtk_init_reset_control(struct ufs_hba *hba,
226 				       struct reset_control **rc,
227 				       char *str)
228 {
229 	*rc = devm_reset_control_get(hba->dev, str);
230 	if (IS_ERR(*rc)) {
231 		dev_info(hba->dev, "Failed to get reset control %s: %ld\n",
232 			 str, PTR_ERR(*rc));
233 		*rc = NULL;
234 	}
235 }
236 
ufs_mtk_init_reset(struct ufs_hba * hba)237 static void ufs_mtk_init_reset(struct ufs_hba *hba)
238 {
239 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
240 
241 	ufs_mtk_init_reset_control(hba, &host->hci_reset,
242 				   "hci_rst");
243 	ufs_mtk_init_reset_control(hba, &host->unipro_reset,
244 				   "unipro_rst");
245 	ufs_mtk_init_reset_control(hba, &host->crypto_reset,
246 				   "crypto_rst");
247 	ufs_mtk_init_reset_control(hba, &host->mphy_reset,
248 				   "mphy_rst");
249 }
250 
ufs_mtk_hce_enable_notify(struct ufs_hba * hba,enum ufs_notify_change_status status)251 static int ufs_mtk_hce_enable_notify(struct ufs_hba *hba,
252 				     enum ufs_notify_change_status status)
253 {
254 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
255 
256 	if (status == PRE_CHANGE) {
257 		if (host->unipro_lpm) {
258 			hba->vps->hba_enable_delay_us = 0;
259 		} else {
260 			hba->vps->hba_enable_delay_us = 600;
261 			ufs_mtk_host_reset(hba);
262 		}
263 
264 		if (hba->caps & UFSHCD_CAP_CRYPTO)
265 			ufs_mtk_crypto_enable(hba);
266 
267 		if (host->caps & UFS_MTK_CAP_DISABLE_AH8) {
268 			ufshcd_writel(hba, 0,
269 				      REG_AUTO_HIBERNATE_IDLE_TIMER);
270 			hba->capabilities &= ~MASK_AUTO_HIBERN8_SUPPORT;
271 			hba->ahit = 0;
272 		}
273 
274 		/*
275 		 * Turn on CLK_CG early to bypass abnormal ERR_CHK signal
276 		 * to prevent host hang issue
277 		 */
278 		ufshcd_writel(hba,
279 			      ufshcd_readl(hba, REG_UFS_XOUFS_CTRL) | 0x80,
280 			      REG_UFS_XOUFS_CTRL);
281 
282 		if (host->legacy_ip_ver)
283 			return 0;
284 
285 		/* DDR_EN setting */
286 		if (host->ip_ver >= IP_VER_MT6989) {
287 			ufshcd_rmwl(hba, UFS_MASK(0x7FFF, 8),
288 				0x453000, REG_UFS_MMIO_OPT_CTRL_0);
289 		}
290 
291 		if (host->ip_ver >= IP_VER_MT6991_A0) {
292 			/* Enable multi-rtt */
293 			ufshcd_rmwl(hba, MRTT_EN, MRTT_EN, REG_UFS_MMIO_OPT_CTRL_0);
294 			/* Enable random performance improvement */
295 			ufshcd_rmwl(hba, RDN_PFM_IMPV_DIS, 0, REG_UFS_MMIO_OPT_CTRL_0);
296 		}
297 	}
298 
299 	return 0;
300 }
301 
ufs_mtk_bind_mphy(struct ufs_hba * hba)302 static int ufs_mtk_bind_mphy(struct ufs_hba *hba)
303 {
304 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
305 	struct device *dev = hba->dev;
306 	struct device_node *np = dev->of_node;
307 	int err = 0;
308 
309 	host->mphy = devm_of_phy_get_by_index(dev, np, 0);
310 
311 	if (host->mphy == ERR_PTR(-EPROBE_DEFER)) {
312 		/*
313 		 * UFS driver might be probed before the phy driver does.
314 		 * In that case we would like to return EPROBE_DEFER code.
315 		 */
316 		err = -EPROBE_DEFER;
317 		dev_info(dev,
318 			 "%s: required phy hasn't probed yet. err = %d\n",
319 			__func__, err);
320 	} else if (IS_ERR(host->mphy)) {
321 		err = PTR_ERR(host->mphy);
322 		if (err != -ENODEV) {
323 			dev_info(dev, "%s: PHY get failed %d\n", __func__,
324 				 err);
325 		}
326 	}
327 
328 	if (err)
329 		host->mphy = NULL;
330 	/*
331 	 * Allow unbound mphy because not every platform needs specific
332 	 * mphy control.
333 	 */
334 	if (err == -ENODEV)
335 		err = 0;
336 
337 	return err;
338 }
339 
ufs_mtk_setup_ref_clk(struct ufs_hba * hba,bool on)340 static int ufs_mtk_setup_ref_clk(struct ufs_hba *hba, bool on)
341 {
342 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
343 	struct arm_smccc_res res;
344 	ktime_t timeout, time_checked;
345 	u32 value;
346 
347 	if (host->ref_clk_enabled == on)
348 		return 0;
349 
350 	ufs_mtk_ref_clk_notify(on, PRE_CHANGE, res);
351 
352 	if (on) {
353 		ufshcd_writel(hba, REFCLK_REQUEST, REG_UFS_REFCLK_CTRL);
354 	} else {
355 		ufshcd_delay_us(host->ref_clk_gating_wait_us, 10);
356 		ufshcd_writel(hba, REFCLK_RELEASE, REG_UFS_REFCLK_CTRL);
357 	}
358 
359 	/* Wait for ack */
360 	timeout = ktime_add_us(ktime_get(), REFCLK_REQ_TIMEOUT_US);
361 	do {
362 		time_checked = ktime_get();
363 		value = ufshcd_readl(hba, REG_UFS_REFCLK_CTRL);
364 
365 		/* Wait until ack bit equals to req bit */
366 		if (((value & REFCLK_ACK) >> 1) == (value & REFCLK_REQUEST))
367 			goto out;
368 
369 		usleep_range(100, 200);
370 	} while (ktime_before(time_checked, timeout));
371 
372 	dev_err(hba->dev, "missing ack of refclk req, reg: 0x%x\n", value);
373 
374 	/*
375 	 * If clock on timeout, assume clock is off, notify tfa do clock
376 	 * off setting.(keep DIFN disable, release resource)
377 	 * If clock off timeout, assume clock will off finally,
378 	 * set ref_clk_enabled directly.(keep DIFN disable, keep resource)
379 	 */
380 	if (on)
381 		ufs_mtk_ref_clk_notify(false, POST_CHANGE, res);
382 	else
383 		host->ref_clk_enabled = false;
384 
385 	return -ETIMEDOUT;
386 
387 out:
388 	host->ref_clk_enabled = on;
389 	if (on)
390 		ufshcd_delay_us(host->ref_clk_ungating_wait_us, 10);
391 
392 	ufs_mtk_ref_clk_notify(on, POST_CHANGE, res);
393 
394 	return 0;
395 }
396 
ufs_mtk_setup_ref_clk_wait_us(struct ufs_hba * hba,u16 gating_us)397 static void ufs_mtk_setup_ref_clk_wait_us(struct ufs_hba *hba,
398 					  u16 gating_us)
399 {
400 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
401 
402 	if (hba->dev_info.clk_gating_wait_us) {
403 		host->ref_clk_gating_wait_us =
404 			hba->dev_info.clk_gating_wait_us;
405 	} else {
406 		host->ref_clk_gating_wait_us = gating_us;
407 	}
408 
409 	host->ref_clk_ungating_wait_us = REFCLK_DEFAULT_WAIT_US;
410 }
411 
ufs_mtk_dbg_sel(struct ufs_hba * hba)412 static void ufs_mtk_dbg_sel(struct ufs_hba *hba)
413 {
414 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
415 
416 	if (!host->legacy_ip_ver && host->ip_ver >= IP_VER_MT6983) {
417 		ufshcd_writel(hba, 0x820820, REG_UFS_DEBUG_SEL);
418 		ufshcd_writel(hba, 0x0, REG_UFS_DEBUG_SEL_B0);
419 		ufshcd_writel(hba, 0x55555555, REG_UFS_DEBUG_SEL_B1);
420 		ufshcd_writel(hba, 0xaaaaaaaa, REG_UFS_DEBUG_SEL_B2);
421 		ufshcd_writel(hba, 0xffffffff, REG_UFS_DEBUG_SEL_B3);
422 	} else {
423 		ufshcd_writel(hba, 0x20, REG_UFS_DEBUG_SEL);
424 	}
425 }
426 
ufs_mtk_wait_idle_state(struct ufs_hba * hba,unsigned long retry_ms)427 static int ufs_mtk_wait_idle_state(struct ufs_hba *hba,
428 			    unsigned long retry_ms)
429 {
430 	u64 timeout, time_checked;
431 	u32 val, sm;
432 	bool wait_idle;
433 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
434 
435 	/* cannot use plain ktime_get() in suspend */
436 	timeout = ktime_get_mono_fast_ns() + retry_ms * 1000000UL;
437 
438 	/* wait a specific time after check base */
439 	udelay(10);
440 	wait_idle = false;
441 
442 	do {
443 		time_checked = ktime_get_mono_fast_ns();
444 		if (host->legacy_ip_ver || host->ip_ver < IP_VER_MT6899) {
445 			ufs_mtk_dbg_sel(hba);
446 			val = ufshcd_readl(hba, REG_UFS_PROBE);
447 		} else {
448 			val = ufshcd_readl(hba, REG_UFS_UFS_MMIO_OTSD_CTRL);
449 			val = val >> 16;
450 		}
451 
452 		sm = val & 0x1f;
453 
454 		/*
455 		 * if state is in H8 enter and H8 enter confirm
456 		 * wait until return to idle state.
457 		 */
458 		if ((sm >= VS_HIB_ENTER) && (sm <= VS_HIB_EXIT)) {
459 			wait_idle = true;
460 			udelay(50);
461 			continue;
462 		} else if (!wait_idle)
463 			break;
464 
465 		if (wait_idle && (sm == VS_HCE_BASE))
466 			break;
467 	} while (time_checked < timeout);
468 
469 	if (wait_idle && sm != VS_HCE_BASE) {
470 		dev_info(hba->dev, "wait idle tmo: 0x%x\n", val);
471 		return -ETIMEDOUT;
472 	}
473 
474 	return 0;
475 }
476 
ufs_mtk_wait_link_state(struct ufs_hba * hba,u32 state,unsigned long max_wait_ms)477 static int ufs_mtk_wait_link_state(struct ufs_hba *hba, u32 state,
478 				   unsigned long max_wait_ms)
479 {
480 	ktime_t timeout, time_checked;
481 	u32 val;
482 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
483 
484 	timeout = ktime_add_ms(ktime_get(), max_wait_ms);
485 	do {
486 		time_checked = ktime_get();
487 
488 		if (host->legacy_ip_ver || host->ip_ver < IP_VER_MT6899) {
489 			ufs_mtk_dbg_sel(hba);
490 			val = ufshcd_readl(hba, REG_UFS_PROBE);
491 			val = val >> 28;
492 		} else {
493 			val = ufshcd_readl(hba, REG_UFS_UFS_MMIO_OTSD_CTRL);
494 			val = val >> 24;
495 		}
496 
497 		if (val == state)
498 			return 0;
499 
500 		/* Sleep for max. 200us */
501 		usleep_range(100, 200);
502 	} while (ktime_before(time_checked, timeout));
503 
504 	return -ETIMEDOUT;
505 }
506 
ufs_mtk_mphy_power_on(struct ufs_hba * hba,bool on)507 static int ufs_mtk_mphy_power_on(struct ufs_hba *hba, bool on)
508 {
509 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
510 	struct phy *mphy = host->mphy;
511 	struct arm_smccc_res res;
512 	int ret = 0;
513 
514 	if (!mphy || !(on ^ host->mphy_powered_on))
515 		return 0;
516 
517 	if (on) {
518 		if (ufs_mtk_is_va09_supported(hba)) {
519 			ret = regulator_enable(host->reg_va09);
520 			if (ret < 0)
521 				goto out;
522 			/* wait 200 us to stablize VA09 */
523 			usleep_range(200, 210);
524 			ufs_mtk_va09_pwr_ctrl(res, 1);
525 		}
526 		phy_power_on(mphy);
527 	} else {
528 		phy_power_off(mphy);
529 		if (ufs_mtk_is_va09_supported(hba)) {
530 			ufs_mtk_va09_pwr_ctrl(res, 0);
531 			ret = regulator_disable(host->reg_va09);
532 		}
533 	}
534 out:
535 	if (ret) {
536 		dev_info(hba->dev,
537 			 "failed to %s va09: %d\n",
538 			 on ? "enable" : "disable",
539 			 ret);
540 	} else {
541 		host->mphy_powered_on = on;
542 	}
543 
544 	return ret;
545 }
546 
ufs_mtk_get_host_clk(struct device * dev,const char * name,struct clk ** clk_out)547 static int ufs_mtk_get_host_clk(struct device *dev, const char *name,
548 				struct clk **clk_out)
549 {
550 	struct clk *clk;
551 	int err = 0;
552 
553 	clk = devm_clk_get(dev, name);
554 	if (IS_ERR(clk))
555 		err = PTR_ERR(clk);
556 	else
557 		*clk_out = clk;
558 
559 	return err;
560 }
561 
ufs_mtk_boost_crypt(struct ufs_hba * hba,bool boost)562 static void ufs_mtk_boost_crypt(struct ufs_hba *hba, bool boost)
563 {
564 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
565 	struct ufs_mtk_crypt_cfg *cfg;
566 	struct regulator *reg;
567 	int volt, ret;
568 
569 	if (!ufs_mtk_is_boost_crypt_enabled(hba))
570 		return;
571 
572 	cfg = host->crypt;
573 	volt = cfg->vcore_volt;
574 	reg = cfg->reg_vcore;
575 
576 	ret = clk_prepare_enable(cfg->clk_crypt_mux);
577 	if (ret) {
578 		dev_info(hba->dev, "clk_prepare_enable(): %d\n",
579 			 ret);
580 		return;
581 	}
582 
583 	if (boost) {
584 		ret = regulator_set_voltage(reg, volt, INT_MAX);
585 		if (ret) {
586 			dev_info(hba->dev,
587 				 "failed to set vcore to %d\n", volt);
588 			goto out;
589 		}
590 
591 		ret = clk_set_parent(cfg->clk_crypt_mux,
592 				     cfg->clk_crypt_perf);
593 		if (ret) {
594 			dev_info(hba->dev,
595 				 "failed to set clk_crypt_perf\n");
596 			regulator_set_voltage(reg, 0, INT_MAX);
597 			goto out;
598 		}
599 	} else {
600 		ret = clk_set_parent(cfg->clk_crypt_mux,
601 				     cfg->clk_crypt_lp);
602 		if (ret) {
603 			dev_info(hba->dev,
604 				 "failed to set clk_crypt_lp\n");
605 			goto out;
606 		}
607 
608 		ret = regulator_set_voltage(reg, 0, INT_MAX);
609 		if (ret) {
610 			dev_info(hba->dev,
611 				 "failed to set vcore to MIN\n");
612 		}
613 	}
614 out:
615 	clk_disable_unprepare(cfg->clk_crypt_mux);
616 }
617 
ufs_mtk_init_host_clk(struct ufs_hba * hba,const char * name,struct clk ** clk)618 static int ufs_mtk_init_host_clk(struct ufs_hba *hba, const char *name,
619 				 struct clk **clk)
620 {
621 	int ret;
622 
623 	ret = ufs_mtk_get_host_clk(hba->dev, name, clk);
624 	if (ret) {
625 		dev_info(hba->dev, "%s: failed to get %s: %d", __func__,
626 			 name, ret);
627 	}
628 
629 	return ret;
630 }
631 
ufs_mtk_init_boost_crypt(struct ufs_hba * hba)632 static void ufs_mtk_init_boost_crypt(struct ufs_hba *hba)
633 {
634 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
635 	struct ufs_mtk_crypt_cfg *cfg;
636 	struct device *dev = hba->dev;
637 	struct regulator *reg;
638 	u32 volt;
639 
640 	host->crypt = devm_kzalloc(dev, sizeof(*(host->crypt)),
641 				   GFP_KERNEL);
642 	if (!host->crypt)
643 		goto disable_caps;
644 
645 	reg = devm_regulator_get_optional(dev, "dvfsrc-vcore");
646 	if (IS_ERR(reg)) {
647 		dev_info(dev, "failed to get dvfsrc-vcore: %ld",
648 			 PTR_ERR(reg));
649 		goto disable_caps;
650 	}
651 
652 	if (of_property_read_u32(dev->of_node, "boost-crypt-vcore-min",
653 				 &volt)) {
654 		dev_info(dev, "failed to get boost-crypt-vcore-min");
655 		goto disable_caps;
656 	}
657 
658 	cfg = host->crypt;
659 	if (ufs_mtk_init_host_clk(hba, "crypt_mux",
660 				  &cfg->clk_crypt_mux))
661 		goto disable_caps;
662 
663 	if (ufs_mtk_init_host_clk(hba, "crypt_lp",
664 				  &cfg->clk_crypt_lp))
665 		goto disable_caps;
666 
667 	if (ufs_mtk_init_host_clk(hba, "crypt_perf",
668 				  &cfg->clk_crypt_perf))
669 		goto disable_caps;
670 
671 	cfg->reg_vcore = reg;
672 	cfg->vcore_volt = volt;
673 	host->caps |= UFS_MTK_CAP_BOOST_CRYPT_ENGINE;
674 
675 disable_caps:
676 	return;
677 }
678 
ufs_mtk_init_va09_pwr_ctrl(struct ufs_hba * hba)679 static void ufs_mtk_init_va09_pwr_ctrl(struct ufs_hba *hba)
680 {
681 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
682 
683 	host->reg_va09 = regulator_get(hba->dev, "va09");
684 	if (IS_ERR(host->reg_va09))
685 		dev_info(hba->dev, "failed to get va09");
686 	else
687 		host->caps |= UFS_MTK_CAP_VA09_PWR_CTRL;
688 }
689 
ufs_mtk_init_host_caps(struct ufs_hba * hba)690 static void ufs_mtk_init_host_caps(struct ufs_hba *hba)
691 {
692 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
693 	struct device_node *np = hba->dev->of_node;
694 
695 	if (of_property_read_bool(np, "mediatek,ufs-boost-crypt"))
696 		ufs_mtk_init_boost_crypt(hba);
697 
698 	if (of_property_read_bool(np, "mediatek,ufs-support-va09"))
699 		ufs_mtk_init_va09_pwr_ctrl(hba);
700 
701 	if (of_property_read_bool(np, "mediatek,ufs-disable-ah8"))
702 		host->caps |= UFS_MTK_CAP_DISABLE_AH8;
703 
704 	if (of_property_read_bool(np, "mediatek,ufs-broken-vcc"))
705 		host->caps |= UFS_MTK_CAP_BROKEN_VCC;
706 
707 	if (of_property_read_bool(np, "mediatek,ufs-pmc-via-fastauto"))
708 		host->caps |= UFS_MTK_CAP_PMC_VIA_FASTAUTO;
709 
710 	if (of_property_read_bool(np, "mediatek,ufs-tx-skew-fix"))
711 		host->caps |= UFS_MTK_CAP_TX_SKEW_FIX;
712 
713 	if (of_property_read_bool(np, "mediatek,ufs-disable-mcq"))
714 		host->caps |= UFS_MTK_CAP_DISABLE_MCQ;
715 
716 	if (of_property_read_bool(np, "mediatek,ufs-rtff-mtcmos"))
717 		host->caps |= UFS_MTK_CAP_RTFF_MTCMOS;
718 
719 	if (of_property_read_bool(np, "mediatek,ufs-broken-rtc"))
720 		host->caps |= UFS_MTK_CAP_MCQ_BROKEN_RTC;
721 
722 	dev_info(hba->dev, "caps: 0x%x", host->caps);
723 }
724 
ufs_mtk_scale_perf(struct ufs_hba * hba,bool scale_up)725 static void ufs_mtk_scale_perf(struct ufs_hba *hba, bool scale_up)
726 {
727 	ufs_mtk_boost_crypt(hba, scale_up);
728 }
729 
ufs_mtk_pwr_ctrl(struct ufs_hba * hba,bool on)730 static void ufs_mtk_pwr_ctrl(struct ufs_hba *hba, bool on)
731 {
732 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
733 
734 	if (on) {
735 		phy_power_on(host->mphy);
736 		ufs_mtk_setup_ref_clk(hba, on);
737 		if (!ufshcd_is_clkscaling_supported(hba))
738 			ufs_mtk_scale_perf(hba, on);
739 	} else {
740 		if (!ufshcd_is_clkscaling_supported(hba))
741 			ufs_mtk_scale_perf(hba, on);
742 		ufs_mtk_setup_ref_clk(hba, on);
743 		phy_power_off(host->mphy);
744 	}
745 }
746 
ufs_mtk_mcq_disable_irq(struct ufs_hba * hba)747 static void ufs_mtk_mcq_disable_irq(struct ufs_hba *hba)
748 {
749 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
750 	u32 irq, i;
751 
752 	if (!hba->mcq_enabled)
753 		return;
754 
755 	if (host->mcq_nr_intr == 0)
756 		return;
757 
758 	for (i = 0; i < host->mcq_nr_intr; i++) {
759 		irq = host->mcq_intr_info[i].irq;
760 		disable_irq(irq);
761 	}
762 	host->is_mcq_intr_enabled = false;
763 }
764 
ufs_mtk_mcq_enable_irq(struct ufs_hba * hba)765 static void ufs_mtk_mcq_enable_irq(struct ufs_hba *hba)
766 {
767 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
768 	u32 irq, i;
769 
770 	if (!hba->mcq_enabled)
771 		return;
772 
773 	if (host->mcq_nr_intr == 0)
774 		return;
775 
776 	if (host->is_mcq_intr_enabled == true)
777 		return;
778 
779 	for (i = 0; i < host->mcq_nr_intr; i++) {
780 		irq = host->mcq_intr_info[i].irq;
781 		enable_irq(irq);
782 	}
783 	host->is_mcq_intr_enabled = true;
784 }
785 
786 /**
787  * ufs_mtk_setup_clocks - enables/disable clocks
788  * @hba: host controller instance
789  * @on: If true, enable clocks else disable them.
790  * @status: PRE_CHANGE or POST_CHANGE notify
791  *
792  * Return: 0 on success, non-zero on failure.
793  */
ufs_mtk_setup_clocks(struct ufs_hba * hba,bool on,enum ufs_notify_change_status status)794 static int ufs_mtk_setup_clocks(struct ufs_hba *hba, bool on,
795 				enum ufs_notify_change_status status)
796 {
797 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
798 	bool clk_pwr_off = false;
799 	int ret = 0;
800 
801 	/*
802 	 * In case ufs_mtk_init() is not yet done, simply ignore.
803 	 * This ufs_mtk_setup_clocks() shall be called from
804 	 * ufs_mtk_init() after init is done.
805 	 */
806 	if (!host)
807 		return 0;
808 
809 	if (!on && status == PRE_CHANGE) {
810 		if (ufshcd_is_link_off(hba)) {
811 			clk_pwr_off = true;
812 		} else if (ufshcd_is_link_hibern8(hba) ||
813 			 (!ufshcd_can_hibern8_during_gating(hba) &&
814 			 ufshcd_is_auto_hibern8_enabled(hba))) {
815 			/*
816 			 * Gate ref-clk and poweroff mphy if link state is in
817 			 * OFF or Hibern8 by either Auto-Hibern8 or
818 			 * ufshcd_link_state_transition().
819 			 */
820 			ret = ufs_mtk_wait_link_state(hba,
821 						      VS_LINK_HIBERN8,
822 						      15);
823 			if (!ret)
824 				clk_pwr_off = true;
825 		}
826 
827 		if (clk_pwr_off) {
828 			ufs_mtk_pwr_ctrl(hba, false);
829 		} else {
830 			dev_warn(hba->dev, "Clock is not turned off, hba->ahit = 0x%x, AHIT = 0x%x\n",
831 				hba->ahit,
832 				ufshcd_readl(hba,
833 					REG_AUTO_HIBERNATE_IDLE_TIMER));
834 		}
835 		ufs_mtk_mcq_disable_irq(hba);
836 	} else if (on && status == POST_CHANGE) {
837 		ufs_mtk_pwr_ctrl(hba, true);
838 		ufs_mtk_mcq_enable_irq(hba);
839 	}
840 
841 	return ret;
842 }
843 
ufs_mtk_mcq_get_irq(struct ufs_hba * hba,unsigned int cpu)844 static u32 ufs_mtk_mcq_get_irq(struct ufs_hba *hba, unsigned int cpu)
845 {
846 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
847 	struct blk_mq_tag_set *tag_set = &hba->host->tag_set;
848 	struct blk_mq_queue_map	*map = &tag_set->map[HCTX_TYPE_DEFAULT];
849 	unsigned int nr = map->nr_queues;
850 	unsigned int q_index;
851 
852 	q_index = map->mq_map[cpu];
853 	if (q_index >= nr) {
854 		dev_err(hba->dev, "hwq index %d exceed %d\n",
855 			q_index, nr);
856 		return MTK_MCQ_INVALID_IRQ;
857 	}
858 
859 	return host->mcq_intr_info[q_index].irq;
860 }
861 
ufs_mtk_mcq_set_irq_affinity(struct ufs_hba * hba,unsigned int cpu)862 static void ufs_mtk_mcq_set_irq_affinity(struct ufs_hba *hba, unsigned int cpu)
863 {
864 	unsigned int irq, _cpu;
865 	int ret;
866 
867 	irq = ufs_mtk_mcq_get_irq(hba, cpu);
868 	if (irq == MTK_MCQ_INVALID_IRQ) {
869 		dev_err(hba->dev, "invalid irq. unable to bind irq to cpu%d", cpu);
870 		return;
871 	}
872 
873 	/* force migrate irq of cpu0 to cpu3 */
874 	_cpu = (cpu == 0) ? 3 : cpu;
875 	ret = irq_set_affinity(irq, cpumask_of(_cpu));
876 	if (ret) {
877 		dev_err(hba->dev, "set irq %d affinity to CPU %d failed\n",
878 			irq, _cpu);
879 		return;
880 	}
881 	dev_info(hba->dev, "set irq %d affinity to CPU: %d\n", irq, _cpu);
882 }
883 
ufs_mtk_is_legacy_chipset(struct ufs_hba * hba,u32 hw_ip_ver)884 static bool ufs_mtk_is_legacy_chipset(struct ufs_hba *hba, u32 hw_ip_ver)
885 {
886 	bool is_legacy = false;
887 
888 	switch (hw_ip_ver) {
889 	case IP_LEGACY_VER_MT6893:
890 	case IP_LEGACY_VER_MT6781:
891 		/* can add other legacy chipset ID here accordingly */
892 		is_legacy = true;
893 		break;
894 	default:
895 		break;
896 	}
897 	dev_info(hba->dev, "legacy IP version - 0x%x, is legacy : %d", hw_ip_ver, is_legacy);
898 
899 	return is_legacy;
900 }
901 
902 /*
903  * HW version format has been changed from 01MMmmmm to 1MMMmmmm, since
904  * project MT6878. In order to perform correct version comparison,
905  * version number is changed by SW for the following projects.
906  * IP_VER_MT6983	0x00360000 to 0x10360000
907  * IP_VER_MT6897	0x01440000 to 0x10440000
908  * IP_VER_MT6989	0x01450000 to 0x10450000
909  * IP_VER_MT6991	0x01460000 to 0x10460000
910  */
ufs_mtk_get_hw_ip_version(struct ufs_hba * hba)911 static void ufs_mtk_get_hw_ip_version(struct ufs_hba *hba)
912 {
913 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
914 	u32 hw_ip_ver;
915 
916 	hw_ip_ver = ufshcd_readl(hba, REG_UFS_MTK_IP_VER);
917 
918 	if (((hw_ip_ver & (0xFF << 24)) == (0x1 << 24)) ||
919 	    ((hw_ip_ver & (0xFF << 24)) == 0)) {
920 		hw_ip_ver &= ~(0xFF << 24);
921 		hw_ip_ver |= (0x1 << 28);
922 	}
923 
924 	host->ip_ver = hw_ip_ver;
925 
926 	host->legacy_ip_ver = ufs_mtk_is_legacy_chipset(hba, hw_ip_ver);
927 }
928 
ufs_mtk_get_controller_version(struct ufs_hba * hba)929 static void ufs_mtk_get_controller_version(struct ufs_hba *hba)
930 {
931 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
932 	int ret, ver = 0;
933 
934 	if (host->hw_ver.major)
935 		return;
936 
937 	/* Set default (minimum) version anyway */
938 	host->hw_ver.major = 2;
939 
940 	ret = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_LOCALVERINFO), &ver);
941 	if (!ret) {
942 		if (ver >= UFS_UNIPRO_VER_1_8) {
943 			host->hw_ver.major = 3;
944 			/*
945 			 * Fix HCI version for some platforms with
946 			 * incorrect version
947 			 */
948 			if (hba->ufs_version < ufshci_version(3, 0))
949 				hba->ufs_version = ufshci_version(3, 0);
950 		}
951 	}
952 }
953 
ufs_mtk_get_ufs_hci_version(struct ufs_hba * hba)954 static u32 ufs_mtk_get_ufs_hci_version(struct ufs_hba *hba)
955 {
956 	return hba->ufs_version;
957 }
958 
959 /**
960  * ufs_mtk_init_clocks - Init mtk driver private clocks
961  *
962  * @hba: per adapter instance
963  */
ufs_mtk_init_clocks(struct ufs_hba * hba)964 static void ufs_mtk_init_clocks(struct ufs_hba *hba)
965 {
966 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
967 	struct list_head *head = &hba->clk_list_head;
968 	struct ufs_clk_info *clki, *clki_tmp;
969 	struct device *dev = hba->dev;
970 	struct regulator *reg;
971 	u32 volt;
972 
973 	/*
974 	 * Find private clocks and store them in struct ufs_mtk_clk.
975 	 * Remove "ufs_sel_min_src" and "ufs_sel_min_src" from list to avoid
976 	 * being switched on/off in clock gating.
977 	 */
978 	list_for_each_entry_safe(clki, clki_tmp, head, list) {
979 		if (!strcmp(clki->name, "ufs_sel")) {
980 			host->mclk.ufs_sel_clki = clki;
981 		} else if (!strcmp(clki->name, "ufs_sel_max_src")) {
982 			host->mclk.ufs_sel_max_clki = clki;
983 			clk_disable_unprepare(clki->clk);
984 			list_del(&clki->list);
985 		} else if (!strcmp(clki->name, "ufs_sel_min_src")) {
986 			host->mclk.ufs_sel_min_clki = clki;
987 			clk_disable_unprepare(clki->clk);
988 			list_del(&clki->list);
989 		} else if (!strcmp(clki->name, "ufs_fde")) {
990 			host->mclk.ufs_fde_clki = clki;
991 		} else if (!strcmp(clki->name, "ufs_fde_max_src")) {
992 			host->mclk.ufs_fde_max_clki = clki;
993 			clk_disable_unprepare(clki->clk);
994 			list_del(&clki->list);
995 		} else if (!strcmp(clki->name, "ufs_fde_min_src")) {
996 			host->mclk.ufs_fde_min_clki = clki;
997 			clk_disable_unprepare(clki->clk);
998 			list_del(&clki->list);
999 		}
1000 	}
1001 
1002 	list_for_each_entry(clki, head, list) {
1003 		dev_info(hba->dev, "clk \"%s\" present", clki->name);
1004 	}
1005 
1006 	if (!ufs_mtk_is_clk_scale_ready(hba)) {
1007 		hba->caps &= ~UFSHCD_CAP_CLK_SCALING;
1008 		dev_info(hba->dev,
1009 			 "%s: Clk-scaling not ready. Feature disabled.",
1010 			 __func__);
1011 		return;
1012 	}
1013 
1014 	/*
1015 	 * Default get vcore if dts have these settings.
1016 	 * No matter clock scaling support or not. (may disable by customer)
1017 	 */
1018 	reg = devm_regulator_get_optional(dev, "dvfsrc-vcore");
1019 	if (IS_ERR(reg)) {
1020 		dev_info(dev, "failed to get dvfsrc-vcore: %ld",
1021 			 PTR_ERR(reg));
1022 		return;
1023 	}
1024 
1025 	if (of_property_read_u32(dev->of_node, "clk-scale-up-vcore-min",
1026 				 &volt)) {
1027 		dev_info(dev, "failed to get clk-scale-up-vcore-min");
1028 		return;
1029 	}
1030 
1031 	host->mclk.reg_vcore = reg;
1032 	host->mclk.vcore_volt = volt;
1033 
1034 	/* If default boot is max gear, request vcore */
1035 	if (reg && volt && host->clk_scale_up) {
1036 		if (regulator_set_voltage(reg, volt, INT_MAX)) {
1037 			dev_info(hba->dev,
1038 				"Failed to set vcore to %d\n", volt);
1039 		}
1040 	}
1041 }
1042 
1043 #define MAX_VCC_NAME 30
ufs_mtk_vreg_fix_vcc(struct ufs_hba * hba)1044 static int ufs_mtk_vreg_fix_vcc(struct ufs_hba *hba)
1045 {
1046 	struct ufs_vreg_info *info = &hba->vreg_info;
1047 	struct device_node *np = hba->dev->of_node;
1048 	struct device *dev = hba->dev;
1049 	char vcc_name[MAX_VCC_NAME];
1050 	struct arm_smccc_res res;
1051 	int err, ver;
1052 
1053 	if (info->vcc)
1054 		return 0;
1055 
1056 	if (of_property_read_bool(np, "mediatek,ufs-vcc-by-num")) {
1057 		ufs_mtk_get_vcc_num(res);
1058 		if (res.a1 > UFS_VCC_NONE && res.a1 < UFS_VCC_MAX)
1059 			snprintf(vcc_name, MAX_VCC_NAME, "vcc-opt%lu", res.a1);
1060 		else
1061 			return -ENODEV;
1062 	} else if (of_property_read_bool(np, "mediatek,ufs-vcc-by-ver")) {
1063 		ver = (hba->dev_info.wspecversion & 0xF00) >> 8;
1064 		snprintf(vcc_name, MAX_VCC_NAME, "vcc-ufs%u", ver);
1065 	} else {
1066 		return 0;
1067 	}
1068 
1069 	err = ufshcd_populate_vreg(dev, vcc_name, &info->vcc, false);
1070 	if (err)
1071 		return err;
1072 
1073 	err = ufshcd_get_vreg(dev, info->vcc);
1074 	if (err)
1075 		return err;
1076 
1077 	err = regulator_enable(info->vcc->reg);
1078 	if (!err) {
1079 		info->vcc->enabled = true;
1080 		dev_info(dev, "%s: %s enabled\n", __func__, vcc_name);
1081 	}
1082 
1083 	return err;
1084 }
1085 
ufs_mtk_vreg_fix_vccqx(struct ufs_hba * hba)1086 static void ufs_mtk_vreg_fix_vccqx(struct ufs_hba *hba)
1087 {
1088 	struct ufs_vreg_info *info = &hba->vreg_info;
1089 	struct ufs_vreg **vreg_on, **vreg_off;
1090 
1091 	if (hba->dev_info.wspecversion >= 0x0300) {
1092 		vreg_on = &info->vccq;
1093 		vreg_off = &info->vccq2;
1094 	} else {
1095 		vreg_on = &info->vccq2;
1096 		vreg_off = &info->vccq;
1097 	}
1098 
1099 	if (*vreg_on)
1100 		(*vreg_on)->always_on = true;
1101 
1102 	if (*vreg_off) {
1103 		regulator_disable((*vreg_off)->reg);
1104 		devm_kfree(hba->dev, (*vreg_off)->name);
1105 		devm_kfree(hba->dev, *vreg_off);
1106 		*vreg_off = NULL;
1107 	}
1108 }
1109 
ufs_mtk_setup_clk_gating(struct ufs_hba * hba)1110 static void ufs_mtk_setup_clk_gating(struct ufs_hba *hba)
1111 {
1112 	unsigned long flags;
1113 	u32 ah_ms = 10;
1114 	u32 ah_scale, ah_timer;
1115 	u32 scale_us[] = {1, 10, 100, 1000, 10000, 100000};
1116 
1117 	if (ufshcd_is_clkgating_allowed(hba)) {
1118 		if (ufshcd_is_auto_hibern8_supported(hba) && hba->ahit) {
1119 			ah_scale = FIELD_GET(UFSHCI_AHIBERN8_SCALE_MASK,
1120 					  hba->ahit);
1121 			ah_timer = FIELD_GET(UFSHCI_AHIBERN8_TIMER_MASK,
1122 					  hba->ahit);
1123 			if (ah_scale <= 5)
1124 				ah_ms = ah_timer * scale_us[ah_scale] / 1000;
1125 		}
1126 
1127 		spin_lock_irqsave(hba->host->host_lock, flags);
1128 		hba->clk_gating.delay_ms = max(ah_ms, 10U);
1129 		spin_unlock_irqrestore(hba->host->host_lock, flags);
1130 	}
1131 }
1132 
ufs_mtk_fix_ahit(struct ufs_hba * hba)1133 static void ufs_mtk_fix_ahit(struct ufs_hba *hba)
1134 {
1135 	unsigned int us;
1136 
1137 	if (ufshcd_is_auto_hibern8_supported(hba)) {
1138 		switch (hba->dev_info.wmanufacturerid) {
1139 		case UFS_VENDOR_SAMSUNG:
1140 			/* configure auto-hibern8 timer to 3.5 ms */
1141 			us = 3500;
1142 			break;
1143 
1144 		case UFS_VENDOR_MICRON:
1145 			/* configure auto-hibern8 timer to 2 ms */
1146 			us = 2000;
1147 			break;
1148 
1149 		default:
1150 			/* configure auto-hibern8 timer to 1 ms */
1151 			us = 1000;
1152 			break;
1153 		}
1154 
1155 		hba->ahit = ufshcd_us_to_ahit(us);
1156 	}
1157 
1158 	ufs_mtk_setup_clk_gating(hba);
1159 }
1160 
ufs_mtk_fix_clock_scaling(struct ufs_hba * hba)1161 static void ufs_mtk_fix_clock_scaling(struct ufs_hba *hba)
1162 {
1163 	/* UFS version is below 4.0, clock scaling is not necessary */
1164 	if ((hba->dev_info.wspecversion < 0x0400)  &&
1165 		ufs_mtk_is_clk_scale_ready(hba)) {
1166 		hba->caps &= ~UFSHCD_CAP_CLK_SCALING;
1167 
1168 		_ufs_mtk_clk_scale(hba, false);
1169 	}
1170 }
1171 
ufs_mtk_init_mcq_irq(struct ufs_hba * hba)1172 static void ufs_mtk_init_mcq_irq(struct ufs_hba *hba)
1173 {
1174 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
1175 	struct platform_device *pdev;
1176 	int i;
1177 	int irq;
1178 
1179 	host->mcq_nr_intr = UFSHCD_MAX_Q_NR;
1180 	pdev = container_of(hba->dev, struct platform_device, dev);
1181 
1182 	if (host->caps & UFS_MTK_CAP_DISABLE_MCQ)
1183 		goto failed;
1184 
1185 	for (i = 0; i < host->mcq_nr_intr; i++) {
1186 		/* irq index 0 is legacy irq, sq/cq irq start from index 1 */
1187 		irq = platform_get_irq(pdev, i + 1);
1188 		if (irq < 0) {
1189 			host->mcq_intr_info[i].irq = MTK_MCQ_INVALID_IRQ;
1190 			goto failed;
1191 		}
1192 		host->mcq_intr_info[i].hba = hba;
1193 		host->mcq_intr_info[i].irq = irq;
1194 		dev_info(hba->dev, "get platform mcq irq: %d, %d\n", i, irq);
1195 	}
1196 
1197 	return;
1198 failed:
1199        /* invalidate irq info */
1200 	for (i = 0; i < host->mcq_nr_intr; i++)
1201 		host->mcq_intr_info[i].irq = MTK_MCQ_INVALID_IRQ;
1202 
1203 	host->mcq_nr_intr = 0;
1204 }
1205 
1206 /**
1207  * ufs_mtk_init - find other essential mmio bases
1208  * @hba: host controller instance
1209  *
1210  * Binds PHY with controller and powers up PHY enabling clocks
1211  * and regulators.
1212  *
1213  * Return: -EPROBE_DEFER if binding fails, returns negative error
1214  * on phy power up failure and returns zero on success.
1215  */
ufs_mtk_init(struct ufs_hba * hba)1216 static int ufs_mtk_init(struct ufs_hba *hba)
1217 {
1218 	const struct of_device_id *id;
1219 	struct device *dev = hba->dev;
1220 	struct ufs_mtk_host *host;
1221 	struct Scsi_Host *shost = hba->host;
1222 	int err = 0;
1223 	struct arm_smccc_res res;
1224 
1225 	host = devm_kzalloc(dev, sizeof(*host), GFP_KERNEL);
1226 	if (!host) {
1227 		err = -ENOMEM;
1228 		dev_info(dev, "%s: no memory for mtk ufs host\n", __func__);
1229 		goto out;
1230 	}
1231 
1232 	host->hba = hba;
1233 	ufshcd_set_variant(hba, host);
1234 
1235 	id = of_match_device(ufs_mtk_of_match, dev);
1236 	if (!id) {
1237 		err = -EINVAL;
1238 		goto out;
1239 	}
1240 
1241 	/* Initialize host capability */
1242 	ufs_mtk_init_host_caps(hba);
1243 
1244 	ufs_mtk_init_mcq_irq(hba);
1245 
1246 	err = ufs_mtk_bind_mphy(hba);
1247 	if (err)
1248 		goto out_variant_clear;
1249 
1250 	ufs_mtk_init_reset(hba);
1251 
1252 	/* backup mphy setting if mphy can reset */
1253 	if (host->mphy_reset)
1254 		ufs_mtk_mphy_ctrl(UFS_MPHY_BACKUP, res);
1255 
1256 	/* Enable runtime autosuspend */
1257 	hba->caps |= UFSHCD_CAP_RPM_AUTOSUSPEND;
1258 
1259 	/* Enable clock-gating */
1260 	hba->caps |= UFSHCD_CAP_CLK_GATING;
1261 
1262 	/* Enable inline encryption */
1263 	hba->caps |= UFSHCD_CAP_CRYPTO;
1264 
1265 	/* Enable WriteBooster */
1266 	hba->caps |= UFSHCD_CAP_WB_EN;
1267 
1268 	/* Enable clk scaling*/
1269 	hba->caps |= UFSHCD_CAP_CLK_SCALING;
1270 	host->clk_scale_up = true; /* default is max freq */
1271 
1272 	/* Set runtime pm delay to replace default */
1273 	shost->rpm_autosuspend_delay = MTK_RPM_AUTOSUSPEND_DELAY_MS;
1274 
1275 	hba->quirks |= UFSHCI_QUIRK_SKIP_MANUAL_WB_FLUSH_CTRL;
1276 
1277 	hba->quirks |= UFSHCD_QUIRK_MCQ_BROKEN_INTR;
1278 	if (host->caps & UFS_MTK_CAP_MCQ_BROKEN_RTC)
1279 		hba->quirks |= UFSHCD_QUIRK_MCQ_BROKEN_RTC;
1280 
1281 	hba->vps->wb_flush_threshold = UFS_WB_BUF_REMAIN_PERCENT(80);
1282 
1283 	if (host->caps & UFS_MTK_CAP_DISABLE_AH8)
1284 		hba->caps |= UFSHCD_CAP_HIBERN8_WITH_CLK_GATING;
1285 
1286 	if (host->caps & UFS_MTK_CAP_DISABLE_MCQ)
1287 		hba->quirks |= UFSHCD_QUIRK_BROKEN_LSDBS_CAP;
1288 
1289 	ufs_mtk_init_clocks(hba);
1290 
1291 	/*
1292 	 * ufshcd_vops_init() is invoked after
1293 	 * ufshcd_setup_clock(true) in ufshcd_hba_init() thus
1294 	 * phy clock setup is skipped.
1295 	 *
1296 	 * Enable phy clocks specifically here.
1297 	 */
1298 	ufs_mtk_mphy_power_on(hba, true);
1299 
1300 	if (ufs_mtk_is_rtff_mtcmos(hba)) {
1301 		/* First Restore here, to avoid backup unexpected value */
1302 		ufs_mtk_mtcmos_ctrl(false, res);
1303 
1304 		/* Power on to init */
1305 		ufs_mtk_mtcmos_ctrl(true, res);
1306 	}
1307 
1308 	ufs_mtk_setup_clocks(hba, true, POST_CHANGE);
1309 
1310 	ufs_mtk_get_hw_ip_version(hba);
1311 
1312 	goto out;
1313 
1314 out_variant_clear:
1315 	ufshcd_set_variant(hba, NULL);
1316 out:
1317 	return err;
1318 }
1319 
ufs_mtk_pmc_via_fastauto(struct ufs_hba * hba,struct ufs_pa_layer_attr * dev_req_params)1320 static bool ufs_mtk_pmc_via_fastauto(struct ufs_hba *hba,
1321 				     struct ufs_pa_layer_attr *dev_req_params)
1322 {
1323 	if (!ufs_mtk_is_pmc_via_fastauto(hba))
1324 		return false;
1325 
1326 	if (dev_req_params->hs_rate == hba->pwr_info.hs_rate)
1327 		return false;
1328 
1329 	if (dev_req_params->pwr_tx != FAST_MODE &&
1330 	    dev_req_params->gear_tx < UFS_HS_G4)
1331 		return false;
1332 
1333 	if (dev_req_params->pwr_rx != FAST_MODE &&
1334 	    dev_req_params->gear_rx < UFS_HS_G4)
1335 		return false;
1336 
1337 	if (dev_req_params->pwr_tx == SLOW_MODE ||
1338 	    dev_req_params->pwr_rx == SLOW_MODE)
1339 		return false;
1340 
1341 	return true;
1342 }
1343 
ufs_mtk_adjust_sync_length(struct ufs_hba * hba)1344 static void ufs_mtk_adjust_sync_length(struct ufs_hba *hba)
1345 {
1346 	int i;
1347 	u32 value;
1348 	u32 cnt, att, min;
1349 	struct attr_min {
1350 		u32 attr;
1351 		u32 min_value;
1352 	} pa_min_sync_length[] = {
1353 		{PA_TXHSG1SYNCLENGTH, 0x48},
1354 		{PA_TXHSG2SYNCLENGTH, 0x48},
1355 		{PA_TXHSG3SYNCLENGTH, 0x48},
1356 		{PA_TXHSG4SYNCLENGTH, 0x48},
1357 		{PA_TXHSG5SYNCLENGTH, 0x48}
1358 	};
1359 
1360 	cnt = sizeof(pa_min_sync_length) / sizeof(struct attr_min);
1361 	for (i = 0; i < cnt; i++) {
1362 		att = pa_min_sync_length[i].attr;
1363 		min = pa_min_sync_length[i].min_value;
1364 		ufshcd_dme_get(hba, UIC_ARG_MIB(att), &value);
1365 		if (value < min)
1366 			ufshcd_dme_set(hba, UIC_ARG_MIB(att), min);
1367 
1368 		ufshcd_dme_peer_get(hba, UIC_ARG_MIB(att), &value);
1369 		if (value < min)
1370 			ufshcd_dme_peer_set(hba, UIC_ARG_MIB(att), min);
1371 	}
1372 }
1373 
ufs_mtk_pre_pwr_change(struct ufs_hba * hba,const struct ufs_pa_layer_attr * dev_max_params,struct ufs_pa_layer_attr * dev_req_params)1374 static int ufs_mtk_pre_pwr_change(struct ufs_hba *hba,
1375 				const struct ufs_pa_layer_attr *dev_max_params,
1376 				struct ufs_pa_layer_attr *dev_req_params)
1377 {
1378 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
1379 	struct ufs_host_params host_params;
1380 	int ret;
1381 
1382 	ufshcd_init_host_params(&host_params);
1383 	host_params.hs_rx_gear = UFS_HS_G5;
1384 	host_params.hs_tx_gear = UFS_HS_G5;
1385 
1386 	if (dev_max_params->pwr_rx == SLOW_MODE ||
1387 	    dev_max_params->pwr_tx == SLOW_MODE)
1388 		host_params.desired_working_mode = UFS_PWM_MODE;
1389 
1390 	ret = ufshcd_negotiate_pwr_params(&host_params, dev_max_params, dev_req_params);
1391 	if (ret) {
1392 		pr_info("%s: failed to determine capabilities\n",
1393 			__func__);
1394 	}
1395 
1396 	if (ufs_mtk_pmc_via_fastauto(hba, dev_req_params)) {
1397 		ufs_mtk_adjust_sync_length(hba);
1398 
1399 		ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXTERMINATION), true);
1400 		ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXGEAR), UFS_HS_G1);
1401 
1402 		ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXTERMINATION), true);
1403 		ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXGEAR), UFS_HS_G1);
1404 
1405 		ufshcd_dme_set(hba, UIC_ARG_MIB(PA_ACTIVETXDATALANES),
1406 			       dev_req_params->lane_tx);
1407 		ufshcd_dme_set(hba, UIC_ARG_MIB(PA_ACTIVERXDATALANES),
1408 			       dev_req_params->lane_rx);
1409 		ufshcd_dme_set(hba, UIC_ARG_MIB(PA_HSSERIES),
1410 			       dev_req_params->hs_rate);
1411 
1412 		ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXHSADAPTTYPE),
1413 			       PA_NO_ADAPT);
1414 
1415 		if (!(hba->quirks & UFSHCD_QUIRK_SKIP_DEF_UNIPRO_TIMEOUT_SETTING)) {
1416 			ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA0),
1417 					DL_FC0ProtectionTimeOutVal_Default);
1418 			ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA1),
1419 					DL_TC0ReplayTimeOutVal_Default);
1420 			ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA2),
1421 					DL_AFC0ReqTimeOutVal_Default);
1422 			ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA3),
1423 					DL_FC1ProtectionTimeOutVal_Default);
1424 			ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA4),
1425 					DL_TC1ReplayTimeOutVal_Default);
1426 			ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA5),
1427 					DL_AFC1ReqTimeOutVal_Default);
1428 
1429 			ufshcd_dme_set(hba, UIC_ARG_MIB(DME_LocalFC0ProtectionTimeOutVal),
1430 					DL_FC0ProtectionTimeOutVal_Default);
1431 			ufshcd_dme_set(hba, UIC_ARG_MIB(DME_LocalTC0ReplayTimeOutVal),
1432 					DL_TC0ReplayTimeOutVal_Default);
1433 			ufshcd_dme_set(hba, UIC_ARG_MIB(DME_LocalAFC0ReqTimeOutVal),
1434 					DL_AFC0ReqTimeOutVal_Default);
1435 		}
1436 
1437 		ret = ufshcd_uic_change_pwr_mode(hba,
1438 					FASTAUTO_MODE << 4 | FASTAUTO_MODE);
1439 
1440 		if (ret) {
1441 			dev_err(hba->dev, "%s: HSG1B FASTAUTO failed ret=%d\n",
1442 				__func__, ret);
1443 		}
1444 	}
1445 
1446 	/* if already configured to the requested pwr_mode, skip adapt */
1447 	if (dev_req_params->gear_rx == hba->pwr_info.gear_rx &&
1448 	    dev_req_params->gear_tx == hba->pwr_info.gear_tx &&
1449 	    dev_req_params->lane_rx == hba->pwr_info.lane_rx &&
1450 	    dev_req_params->lane_tx == hba->pwr_info.lane_tx &&
1451 	    dev_req_params->pwr_rx == hba->pwr_info.pwr_rx &&
1452 	    dev_req_params->pwr_tx == hba->pwr_info.pwr_tx &&
1453 	    dev_req_params->hs_rate == hba->pwr_info.hs_rate) {
1454 		return ret;
1455 	}
1456 
1457 	if (dev_req_params->pwr_rx == FAST_MODE ||
1458 	    dev_req_params->pwr_rx == FASTAUTO_MODE) {
1459 		if (host->hw_ver.major >= 3) {
1460 			ret = ufshcd_dme_configure_adapt(hba,
1461 						   dev_req_params->gear_tx,
1462 						   PA_INITIAL_ADAPT);
1463 		} else {
1464 			ret = ufshcd_dme_configure_adapt(hba,
1465 				   dev_req_params->gear_tx,
1466 				   PA_NO_ADAPT);
1467 		}
1468 	} else {
1469 		ret = ufshcd_dme_configure_adapt(hba,
1470 			   dev_req_params->gear_tx,
1471 			   PA_NO_ADAPT);
1472 	}
1473 
1474 	return ret;
1475 }
1476 
ufs_mtk_auto_hibern8_disable(struct ufs_hba * hba)1477 static int ufs_mtk_auto_hibern8_disable(struct ufs_hba *hba)
1478 {
1479 	int ret;
1480 
1481 	/* disable auto-hibern8 */
1482 	ufshcd_writel(hba, 0, REG_AUTO_HIBERNATE_IDLE_TIMER);
1483 
1484 	/* wait host return to idle state when auto-hibern8 off */
1485 	ret = ufs_mtk_wait_idle_state(hba, 5);
1486 	if (ret)
1487 		goto out;
1488 
1489 	ret = ufs_mtk_wait_link_state(hba, VS_LINK_UP, 100);
1490 
1491 out:
1492 	if (ret) {
1493 		dev_warn(hba->dev, "exit h8 state fail, ret=%d\n", ret);
1494 
1495 		ufshcd_force_error_recovery(hba);
1496 
1497 		/* trigger error handler and break suspend */
1498 		ret = -EBUSY;
1499 	}
1500 
1501 	return ret;
1502 }
1503 
ufs_mtk_pwr_change_notify(struct ufs_hba * hba,enum ufs_notify_change_status stage,const struct ufs_pa_layer_attr * dev_max_params,struct ufs_pa_layer_attr * dev_req_params)1504 static int ufs_mtk_pwr_change_notify(struct ufs_hba *hba,
1505 				enum ufs_notify_change_status stage,
1506 				const struct ufs_pa_layer_attr *dev_max_params,
1507 				struct ufs_pa_layer_attr *dev_req_params)
1508 {
1509 	int ret = 0;
1510 	static u32 reg;
1511 
1512 	switch (stage) {
1513 	case PRE_CHANGE:
1514 		if (ufshcd_is_auto_hibern8_supported(hba)) {
1515 			reg = ufshcd_readl(hba, REG_AUTO_HIBERNATE_IDLE_TIMER);
1516 			ufs_mtk_auto_hibern8_disable(hba);
1517 		}
1518 		ret = ufs_mtk_pre_pwr_change(hba, dev_max_params,
1519 					     dev_req_params);
1520 		break;
1521 	case POST_CHANGE:
1522 		if (ufshcd_is_auto_hibern8_supported(hba))
1523 			ufshcd_writel(hba, reg, REG_AUTO_HIBERNATE_IDLE_TIMER);
1524 		break;
1525 	default:
1526 		ret = -EINVAL;
1527 		break;
1528 	}
1529 
1530 	return ret;
1531 }
1532 
ufs_mtk_unipro_set_lpm(struct ufs_hba * hba,bool lpm)1533 static int ufs_mtk_unipro_set_lpm(struct ufs_hba *hba, bool lpm)
1534 {
1535 	int ret;
1536 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
1537 
1538 	ret = ufshcd_dme_set(hba,
1539 			     UIC_ARG_MIB_SEL(VS_UNIPROPOWERDOWNCONTROL, 0),
1540 			     lpm ? 1 : 0);
1541 	if (!ret || !lpm) {
1542 		/*
1543 		 * Forcibly set as non-LPM mode if UIC commands is failed
1544 		 * to use default hba_enable_delay_us value for re-enabling
1545 		 * the host.
1546 		 */
1547 		host->unipro_lpm = lpm;
1548 	}
1549 
1550 	return ret;
1551 }
1552 
ufs_mtk_pre_link(struct ufs_hba * hba)1553 static int ufs_mtk_pre_link(struct ufs_hba *hba)
1554 {
1555 	int ret;
1556 	u32 tmp;
1557 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
1558 
1559 	ufs_mtk_get_controller_version(hba);
1560 
1561 	ret = ufs_mtk_unipro_set_lpm(hba, false);
1562 	if (ret)
1563 		return ret;
1564 
1565 	/*
1566 	 * Setting PA_Local_TX_LCC_Enable to 0 before link startup
1567 	 * to make sure that both host and device TX LCC are disabled
1568 	 * once link startup is completed.
1569 	 */
1570 	ret = ufshcd_disable_host_tx_lcc(hba);
1571 	if (ret)
1572 		return ret;
1573 
1574 	/* disable deep stall */
1575 	ret = ufshcd_dme_get(hba, UIC_ARG_MIB(VS_SAVEPOWERCONTROL), &tmp);
1576 	if (ret)
1577 		return ret;
1578 
1579 	tmp &= ~(1 << 6);
1580 
1581 	ret = ufshcd_dme_set(hba, UIC_ARG_MIB(VS_SAVEPOWERCONTROL), tmp);
1582 
1583 	/* Enable the 1144 functions setting */
1584 	if (host->ip_ver == IP_VER_MT6989) {
1585 		ret = ufshcd_dme_get(hba, UIC_ARG_MIB(VS_DEBUGOMC), &tmp);
1586 		if (ret)
1587 			return ret;
1588 
1589 		tmp |= 0x10;
1590 		ret = ufshcd_dme_set(hba, UIC_ARG_MIB(VS_DEBUGOMC), tmp);
1591 	}
1592 
1593 	return ret;
1594 }
1595 
ufs_mtk_post_link(struct ufs_hba * hba)1596 static void ufs_mtk_post_link(struct ufs_hba *hba)
1597 {
1598 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
1599 	u32 tmp;
1600 
1601 	/* fix device PA_INIT no adapt */
1602 	if (host->ip_ver >= IP_VER_MT6899) {
1603 		ufshcd_dme_get(hba, UIC_ARG_MIB(VS_DEBUGOMC), &tmp);
1604 		tmp |= 0x100;
1605 		ufshcd_dme_set(hba, UIC_ARG_MIB(VS_DEBUGOMC), tmp);
1606 	}
1607 
1608 	/* enable unipro clock gating feature */
1609 	ufs_mtk_cfg_unipro_cg(hba, true);
1610 }
1611 
ufs_mtk_link_startup_notify(struct ufs_hba * hba,enum ufs_notify_change_status stage)1612 static int ufs_mtk_link_startup_notify(struct ufs_hba *hba,
1613 				       enum ufs_notify_change_status stage)
1614 {
1615 	int ret = 0;
1616 
1617 	switch (stage) {
1618 	case PRE_CHANGE:
1619 		ret = ufs_mtk_pre_link(hba);
1620 		break;
1621 	case POST_CHANGE:
1622 		ufs_mtk_post_link(hba);
1623 		break;
1624 	default:
1625 		ret = -EINVAL;
1626 		break;
1627 	}
1628 
1629 	return ret;
1630 }
1631 
ufs_mtk_device_reset(struct ufs_hba * hba)1632 static int ufs_mtk_device_reset(struct ufs_hba *hba)
1633 {
1634 	struct arm_smccc_res res;
1635 
1636 	ufs_mtk_device_reset_ctrl(0, res);
1637 
1638 	/* disable hba in middle of device reset */
1639 	ufshcd_hba_stop(hba);
1640 
1641 	/*
1642 	 * The reset signal is active low. UFS devices shall detect
1643 	 * more than or equal to 1us of positive or negative RST_n
1644 	 * pulse width.
1645 	 *
1646 	 * To be on safe side, keep the reset low for at least 10us.
1647 	 */
1648 	usleep_range(10, 15);
1649 
1650 	ufs_mtk_device_reset_ctrl(1, res);
1651 
1652 	/* Some devices may need time to respond to rst_n */
1653 	usleep_range(10000, 15000);
1654 
1655 	dev_info(hba->dev, "device reset done\n");
1656 
1657 	return 0;
1658 }
1659 
ufs_mtk_link_set_hpm(struct ufs_hba * hba)1660 static int ufs_mtk_link_set_hpm(struct ufs_hba *hba)
1661 {
1662 	int err;
1663 	u32 val;
1664 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
1665 
1666 	err = ufshcd_hba_enable(hba);
1667 	if (err)
1668 		return err;
1669 
1670 	err = ufs_mtk_unipro_set_lpm(hba, false);
1671 	if (err) {
1672 		if (host->ip_ver < IP_VER_MT6899) {
1673 			ufs_mtk_dbg_sel(hba);
1674 			val = ufshcd_readl(hba, REG_UFS_PROBE);
1675 		} else {
1676 			val = ufshcd_readl(hba, REG_UFS_UFS_MMIO_OTSD_CTRL);
1677 		}
1678 		ufshcd_update_evt_hist(hba, UFS_EVT_RESUME_ERR, (u32)val);
1679 		val = ufshcd_readl(hba, REG_INTERRUPT_STATUS);
1680 		ufshcd_update_evt_hist(hba, UFS_EVT_RESUME_ERR, (u32)val);
1681 		return err;
1682 	}
1683 
1684 	err = ufshcd_uic_hibern8_exit(hba);
1685 	if (err)
1686 		return err;
1687 
1688 	/* Check link state to make sure exit h8 success */
1689 	err = ufs_mtk_wait_idle_state(hba, 5);
1690 	if (err) {
1691 		dev_warn(hba->dev, "wait idle fail, err=%d\n", err);
1692 		return err;
1693 	}
1694 	err = ufs_mtk_wait_link_state(hba, VS_LINK_UP, 100);
1695 	if (err) {
1696 		dev_warn(hba->dev, "exit h8 state fail, err=%d\n", err);
1697 		return err;
1698 	}
1699 	ufshcd_set_link_active(hba);
1700 
1701 	err = ufshcd_make_hba_operational(hba);
1702 	if (err)
1703 		return err;
1704 
1705 	if (hba->mcq_enabled) {
1706 		ufs_mtk_config_mcq(hba, false);
1707 		ufshcd_mcq_make_queues_operational(hba);
1708 		ufshcd_mcq_config_mac(hba, hba->nutrs);
1709 		ufshcd_mcq_enable(hba);
1710 	}
1711 
1712 	return 0;
1713 }
1714 
ufs_mtk_link_set_lpm(struct ufs_hba * hba)1715 static int ufs_mtk_link_set_lpm(struct ufs_hba *hba)
1716 {
1717 	int err;
1718 
1719 	/* Disable reset confirm feature by UniPro */
1720 	ufshcd_writel(hba,
1721 		      (ufshcd_readl(hba, REG_UFS_XOUFS_CTRL) & ~0x100),
1722 		      REG_UFS_XOUFS_CTRL);
1723 
1724 	err = ufs_mtk_unipro_set_lpm(hba, true);
1725 	if (err) {
1726 		/* Resume UniPro state for following error recovery */
1727 		ufs_mtk_unipro_set_lpm(hba, false);
1728 		return err;
1729 	}
1730 
1731 	return 0;
1732 }
1733 
ufs_mtk_vccqx_set_lpm(struct ufs_hba * hba,bool lpm)1734 static void ufs_mtk_vccqx_set_lpm(struct ufs_hba *hba, bool lpm)
1735 {
1736 	struct ufs_vreg *vccqx = NULL;
1737 
1738 	if (!hba->vreg_info.vccq && !hba->vreg_info.vccq2)
1739 		return;
1740 
1741 	if (hba->vreg_info.vccq)
1742 		vccqx = hba->vreg_info.vccq;
1743 	else
1744 		vccqx = hba->vreg_info.vccq2;
1745 
1746 	regulator_set_mode(vccqx->reg,
1747 			   lpm ? REGULATOR_MODE_IDLE : REGULATOR_MODE_NORMAL);
1748 }
1749 
ufs_mtk_vsx_set_lpm(struct ufs_hba * hba,bool lpm)1750 static void ufs_mtk_vsx_set_lpm(struct ufs_hba *hba, bool lpm)
1751 {
1752 	struct arm_smccc_res res;
1753 
1754 	ufs_mtk_device_pwr_ctrl(!lpm,
1755 				(unsigned long)hba->dev_info.wspecversion,
1756 				res);
1757 }
1758 
ufs_mtk_dev_vreg_set_lpm(struct ufs_hba * hba,bool lpm)1759 static void ufs_mtk_dev_vreg_set_lpm(struct ufs_hba *hba, bool lpm)
1760 {
1761 	bool skip_vccqx = false;
1762 
1763 	/* Prevent entering LPM when device is still active */
1764 	if (lpm && ufshcd_is_ufs_dev_active(hba))
1765 		return;
1766 
1767 	/* Skip vccqx lpm control and control vsx only */
1768 	if (!hba->vreg_info.vccq && !hba->vreg_info.vccq2)
1769 		skip_vccqx = true;
1770 
1771 	/* VCC is always-on, control vsx only */
1772 	if (!hba->vreg_info.vcc)
1773 		skip_vccqx = true;
1774 
1775 	/* Broken vcc keep vcc always on, most case control vsx only */
1776 	if (lpm && hba->vreg_info.vcc && hba->vreg_info.vcc->enabled) {
1777 		/* Some device vccqx/vsx can enter lpm */
1778 		if (ufs_mtk_is_allow_vccqx_lpm(hba))
1779 			skip_vccqx = false;
1780 		else /* control vsx only */
1781 			skip_vccqx = true;
1782 	}
1783 
1784 	if (lpm) {
1785 		if (!skip_vccqx)
1786 			ufs_mtk_vccqx_set_lpm(hba, lpm);
1787 		ufs_mtk_vsx_set_lpm(hba, lpm);
1788 	} else {
1789 		ufs_mtk_vsx_set_lpm(hba, lpm);
1790 		if (!skip_vccqx)
1791 			ufs_mtk_vccqx_set_lpm(hba, lpm);
1792 	}
1793 }
1794 
ufs_mtk_suspend(struct ufs_hba * hba,enum ufs_pm_op pm_op,enum ufs_notify_change_status status)1795 static int ufs_mtk_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op,
1796 	enum ufs_notify_change_status status)
1797 {
1798 	int err;
1799 	struct arm_smccc_res res;
1800 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
1801 
1802 	if (status == PRE_CHANGE) {
1803 		if (ufshcd_is_auto_hibern8_supported(hba))
1804 			return ufs_mtk_auto_hibern8_disable(hba);
1805 		return 0;
1806 	}
1807 
1808 	if (ufshcd_is_link_hibern8(hba)) {
1809 		err = ufs_mtk_link_set_lpm(hba);
1810 		if (err)
1811 			goto fail;
1812 	}
1813 
1814 	if (!ufshcd_is_link_active(hba)) {
1815 		/*
1816 		 * Make sure no error will be returned to prevent
1817 		 * ufshcd_suspend() re-enabling regulators while vreg is still
1818 		 * in low-power mode.
1819 		 */
1820 		err = ufs_mtk_mphy_power_on(hba, false);
1821 		if (err)
1822 			goto fail;
1823 	}
1824 
1825 	if (ufshcd_is_link_off(hba))
1826 		ufs_mtk_device_reset_ctrl(0, res);
1827 
1828 	ufs_mtk_sram_pwr_ctrl(false, res);
1829 
1830 	/* Release pm_qos/clk if in scale-up mode during suspend */
1831 	if (ufshcd_is_clkscaling_supported(hba) && (host->clk_scale_up)) {
1832 		ufshcd_pm_qos_update(hba, false);
1833 		_ufs_mtk_clk_scale(hba, false);
1834 	} else if ((!ufshcd_is_clkscaling_supported(hba) &&
1835 		    hba->pwr_info.gear_rx >= UFS_HS_G5)) {
1836 		_ufs_mtk_clk_scale(hba, false);
1837 	}
1838 
1839 	return 0;
1840 fail:
1841 	/*
1842 	 * Set link as off state enforcedly to trigger
1843 	 * ufshcd_host_reset_and_restore() in ufshcd_suspend()
1844 	 * for completed host reset.
1845 	 */
1846 	ufshcd_set_link_off(hba);
1847 	return -EAGAIN;
1848 }
1849 
ufs_mtk_resume(struct ufs_hba * hba,enum ufs_pm_op pm_op)1850 static int ufs_mtk_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
1851 {
1852 	int err;
1853 	struct arm_smccc_res res;
1854 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
1855 
1856 	if (hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL)
1857 		ufs_mtk_dev_vreg_set_lpm(hba, false);
1858 
1859 	ufs_mtk_sram_pwr_ctrl(true, res);
1860 
1861 	err = ufs_mtk_mphy_power_on(hba, true);
1862 	if (err)
1863 		goto fail;
1864 
1865 	/* Request pm_qos/clk if in scale-up mode after resume */
1866 	if (ufshcd_is_clkscaling_supported(hba) && (host->clk_scale_up)) {
1867 		ufshcd_pm_qos_update(hba, true);
1868 		_ufs_mtk_clk_scale(hba, true);
1869 	} else if ((!ufshcd_is_clkscaling_supported(hba) &&
1870 		    hba->pwr_info.gear_rx >= UFS_HS_G5)) {
1871 		_ufs_mtk_clk_scale(hba, true);
1872 	}
1873 
1874 	if (ufshcd_is_link_hibern8(hba)) {
1875 		err = ufs_mtk_link_set_hpm(hba);
1876 		if (err)
1877 			goto fail;
1878 	}
1879 
1880 	return 0;
1881 
1882 fail:
1883 	/*
1884 	 * Check if the platform (parent) device has resumed, and ensure that
1885 	 * power, clock, and MTCMOS are all turned on.
1886 	 */
1887 	err = ufshcd_link_recovery(hba);
1888 	if (err) {
1889 		dev_err(hba->dev, "Device PM: req=%d, status:%d, err:%d\n",
1890 			hba->dev->power.request,
1891 			hba->dev->power.runtime_status,
1892 			hba->dev->power.runtime_error);
1893 	}
1894 
1895 	return 0; /* Cannot return a failure, otherwise, the I/O will hang. */
1896 }
1897 
ufs_mtk_dbg_register_dump(struct ufs_hba * hba)1898 static void ufs_mtk_dbg_register_dump(struct ufs_hba *hba)
1899 {
1900 	/* Dump ufshci register 0x140 ~ 0x14C */
1901 	ufshcd_dump_regs(hba, REG_UFS_XOUFS_CTRL, 0x10,
1902 			 "XOUFS Ctrl (0x140): ");
1903 
1904 	ufshcd_dump_regs(hba, REG_UFS_EXTREG, 0x4, "Ext Reg ");
1905 
1906 	/* Dump ufshci register 0x2200 ~ 0x22AC */
1907 	ufshcd_dump_regs(hba, REG_UFS_MPHYCTRL,
1908 			 REG_UFS_REJECT_MON - REG_UFS_MPHYCTRL + 4,
1909 			 "MPHY Ctrl (0x2200): ");
1910 
1911 	/* Direct debugging information to REG_MTK_PROBE */
1912 	ufs_mtk_dbg_sel(hba);
1913 	ufshcd_dump_regs(hba, REG_UFS_PROBE, 0x4, "Debug Probe ");
1914 }
1915 
ufs_mtk_apply_dev_quirks(struct ufs_hba * hba)1916 static int ufs_mtk_apply_dev_quirks(struct ufs_hba *hba)
1917 {
1918 	struct ufs_dev_info *dev_info = &hba->dev_info;
1919 	u16 mid = dev_info->wmanufacturerid;
1920 	unsigned int cpu;
1921 
1922 	if (hba->mcq_enabled) {
1923 		/* Iterate all cpus to set affinity for mcq irqs */
1924 		for (cpu = 0; cpu < nr_cpu_ids; cpu++)
1925 			ufs_mtk_mcq_set_irq_affinity(hba, cpu);
1926 	}
1927 
1928 	if (mid == UFS_VENDOR_SAMSUNG) {
1929 		ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TACTIVATE), 6);
1930 		ufshcd_dme_set(hba, UIC_ARG_MIB(PA_HIBERN8TIME), 10);
1931 	} else if (mid == UFS_VENDOR_MICRON) {
1932 		/* Only for the host which have TX skew issue */
1933 		if (ufs_mtk_is_tx_skew_fix(hba) &&
1934 			(STR_PRFX_EQUAL("MT128GBCAV2U31", dev_info->model) ||
1935 			STR_PRFX_EQUAL("MT256GBCAV4U31", dev_info->model) ||
1936 			STR_PRFX_EQUAL("MT512GBCAV8U31", dev_info->model) ||
1937 			STR_PRFX_EQUAL("MT256GBEAX4U40", dev_info->model) ||
1938 			STR_PRFX_EQUAL("MT512GAYAX4U40", dev_info->model) ||
1939 			STR_PRFX_EQUAL("MT001TAYAX8U40", dev_info->model))) {
1940 			ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TACTIVATE), 8);
1941 		}
1942 	}
1943 
1944 	/*
1945 	 * Decide waiting time before gating reference clock and
1946 	 * after ungating reference clock according to vendors'
1947 	 * requirements.
1948 	 */
1949 	if (mid == UFS_VENDOR_SAMSUNG)
1950 		ufs_mtk_setup_ref_clk_wait_us(hba, 1);
1951 	else if (mid == UFS_VENDOR_SKHYNIX)
1952 		ufs_mtk_setup_ref_clk_wait_us(hba, 30);
1953 	else if (mid == UFS_VENDOR_TOSHIBA)
1954 		ufs_mtk_setup_ref_clk_wait_us(hba, 100);
1955 	else
1956 		ufs_mtk_setup_ref_clk_wait_us(hba,
1957 					      REFCLK_DEFAULT_WAIT_US);
1958 	return 0;
1959 }
1960 
ufs_mtk_fixup_dev_quirks(struct ufs_hba * hba)1961 static void ufs_mtk_fixup_dev_quirks(struct ufs_hba *hba)
1962 {
1963 	ufshcd_fixup_dev_quirks(hba, ufs_mtk_dev_fixups);
1964 
1965 	if (ufs_mtk_is_broken_vcc(hba) && hba->vreg_info.vcc) {
1966 		hba->vreg_info.vcc->always_on = true;
1967 		/*
1968 		 * VCC will be kept always-on thus we don't
1969 		 * need any delay before putting device's VCC in LPM mode.
1970 		 */
1971 		hba->dev_quirks &= ~UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM;
1972 	}
1973 
1974 	ufs_mtk_vreg_fix_vcc(hba);
1975 	ufs_mtk_vreg_fix_vccqx(hba);
1976 	ufs_mtk_fix_ahit(hba);
1977 	ufs_mtk_fix_clock_scaling(hba);
1978 }
1979 
ufs_mtk_event_notify(struct ufs_hba * hba,enum ufs_event_type evt,void * data)1980 static void ufs_mtk_event_notify(struct ufs_hba *hba,
1981 				 enum ufs_event_type evt, void *data)
1982 {
1983 	unsigned int val = *(u32 *)data;
1984 	unsigned long reg;
1985 	u8 bit;
1986 
1987 	trace_ufs_mtk_event(evt, val);
1988 
1989 	/* Print details of UIC Errors */
1990 	if (evt <= UFS_EVT_DME_ERR) {
1991 		dev_info(hba->dev,
1992 			 "Host UIC Error Code (%s): %08x\n",
1993 			 ufs_uic_err_str[evt], val);
1994 		reg = val;
1995 	}
1996 
1997 	if (evt == UFS_EVT_PA_ERR) {
1998 		for_each_set_bit(bit, &reg, ARRAY_SIZE(ufs_uic_pa_err_str))
1999 			dev_info(hba->dev, "%s\n", ufs_uic_pa_err_str[bit]);
2000 	}
2001 
2002 	if (evt == UFS_EVT_DL_ERR) {
2003 		for_each_set_bit(bit, &reg, ARRAY_SIZE(ufs_uic_dl_err_str))
2004 			dev_info(hba->dev, "%s\n", ufs_uic_dl_err_str[bit]);
2005 	}
2006 }
2007 
ufs_mtk_config_scaling_param(struct ufs_hba * hba,struct devfreq_dev_profile * profile,struct devfreq_simple_ondemand_data * data)2008 static void ufs_mtk_config_scaling_param(struct ufs_hba *hba,
2009 				struct devfreq_dev_profile *profile,
2010 				struct devfreq_simple_ondemand_data *data)
2011 {
2012 	/* Customize min gear in clk scaling */
2013 	hba->clk_scaling.min_gear = UFS_HS_G4;
2014 
2015 	hba->vps->devfreq_profile.polling_ms = 200;
2016 	hba->vps->ondemand_data.upthreshold = 50;
2017 	hba->vps->ondemand_data.downdifferential = 20;
2018 }
2019 
_ufs_mtk_clk_scale(struct ufs_hba * hba,bool scale_up)2020 static void _ufs_mtk_clk_scale(struct ufs_hba *hba, bool scale_up)
2021 {
2022 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
2023 	struct ufs_mtk_clk *mclk = &host->mclk;
2024 	struct ufs_clk_info *clki = mclk->ufs_sel_clki;
2025 	struct ufs_clk_info *fde_clki = mclk->ufs_fde_clki;
2026 	struct regulator *reg;
2027 	int volt, ret = 0;
2028 	bool clk_bind_vcore = false;
2029 	bool clk_fde_scale = false;
2030 
2031 	if (!hba->clk_scaling.is_initialized)
2032 		return;
2033 
2034 	if (!clki || !fde_clki)
2035 		return;
2036 
2037 	reg = host->mclk.reg_vcore;
2038 	volt = host->mclk.vcore_volt;
2039 	if (reg && volt != 0)
2040 		clk_bind_vcore = true;
2041 
2042 	if (mclk->ufs_fde_max_clki && mclk->ufs_fde_min_clki)
2043 		clk_fde_scale = true;
2044 
2045 	ret = clk_prepare_enable(clki->clk);
2046 	if (ret) {
2047 		dev_info(hba->dev,
2048 			 "clk_prepare_enable() fail, ret: %d\n", ret);
2049 		return;
2050 	}
2051 
2052 	if (clk_fde_scale) {
2053 		ret = clk_prepare_enable(fde_clki->clk);
2054 		if (ret) {
2055 			dev_info(hba->dev,
2056 				 "fde clk_prepare_enable() fail, ret: %d\n", ret);
2057 			return;
2058 		}
2059 	}
2060 
2061 	if (scale_up) {
2062 		if (clk_bind_vcore) {
2063 			ret = regulator_set_voltage(reg, volt, INT_MAX);
2064 			if (ret) {
2065 				dev_info(hba->dev,
2066 					"Failed to set vcore to %d\n", volt);
2067 				goto out;
2068 			}
2069 		}
2070 
2071 		ret = clk_set_parent(clki->clk, mclk->ufs_sel_max_clki->clk);
2072 		if (ret) {
2073 			dev_info(hba->dev, "Failed to set clk mux, ret = %d\n",
2074 				ret);
2075 		}
2076 
2077 		if (clk_fde_scale) {
2078 			ret = clk_set_parent(fde_clki->clk,
2079 				mclk->ufs_fde_max_clki->clk);
2080 			if (ret) {
2081 				dev_info(hba->dev,
2082 					"Failed to set fde clk mux, ret = %d\n",
2083 					ret);
2084 			}
2085 		}
2086 	} else {
2087 		if (clk_fde_scale) {
2088 			ret = clk_set_parent(fde_clki->clk,
2089 				mclk->ufs_fde_min_clki->clk);
2090 			if (ret) {
2091 				dev_info(hba->dev,
2092 					"Failed to set fde clk mux, ret = %d\n",
2093 					ret);
2094 				goto out;
2095 			}
2096 		}
2097 
2098 		ret = clk_set_parent(clki->clk, mclk->ufs_sel_min_clki->clk);
2099 		if (ret) {
2100 			dev_info(hba->dev, "Failed to set clk mux, ret = %d\n",
2101 				ret);
2102 			goto out;
2103 		}
2104 
2105 		if (clk_bind_vcore) {
2106 			ret = regulator_set_voltage(reg, 0, INT_MAX);
2107 			if (ret) {
2108 				dev_info(hba->dev,
2109 					"failed to set vcore to MIN\n");
2110 			}
2111 		}
2112 	}
2113 
2114 out:
2115 	clk_disable_unprepare(clki->clk);
2116 
2117 	if (clk_fde_scale)
2118 		clk_disable_unprepare(fde_clki->clk);
2119 }
2120 
2121 /**
2122  * ufs_mtk_clk_scale - Internal clk scaling operation
2123  *
2124  * MTK platform supports clk scaling by switching parent of ufs_sel(mux).
2125  * The ufs_sel downstream to ufs_ck which feeds directly to UFS hardware.
2126  * Max and min clocks rate of ufs_sel defined in dts should match rate of
2127  * "ufs_sel_max_src" and "ufs_sel_min_src" respectively.
2128  * This prevent changing rate of pll clock that is shared between modules.
2129  *
2130  * @hba: per adapter instance
2131  * @scale_up: True for scaling up and false for scaling down
2132  */
ufs_mtk_clk_scale(struct ufs_hba * hba,bool scale_up)2133 static void ufs_mtk_clk_scale(struct ufs_hba *hba, bool scale_up)
2134 {
2135 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
2136 	struct ufs_mtk_clk *mclk = &host->mclk;
2137 	struct ufs_clk_info *clki = mclk->ufs_sel_clki;
2138 
2139 	if (host->clk_scale_up == scale_up)
2140 		goto out;
2141 
2142 	if (scale_up)
2143 		_ufs_mtk_clk_scale(hba, true);
2144 	else
2145 		_ufs_mtk_clk_scale(hba, false);
2146 
2147 	host->clk_scale_up = scale_up;
2148 
2149 	/* Must always set before clk_set_rate() */
2150 	if (scale_up)
2151 		clki->curr_freq = clki->max_freq;
2152 	else
2153 		clki->curr_freq = clki->min_freq;
2154 out:
2155 	trace_ufs_mtk_clk_scale(clki->name, scale_up, clk_get_rate(clki->clk));
2156 }
2157 
ufs_mtk_clk_scale_notify(struct ufs_hba * hba,bool scale_up,unsigned long target_freq,enum ufs_notify_change_status status)2158 static int ufs_mtk_clk_scale_notify(struct ufs_hba *hba, bool scale_up,
2159 				    unsigned long target_freq,
2160 				    enum ufs_notify_change_status status)
2161 {
2162 	if (!ufshcd_is_clkscaling_supported(hba))
2163 		return 0;
2164 
2165 	if (status == PRE_CHANGE) {
2166 		/* Switch parent before clk_set_rate() */
2167 		ufs_mtk_clk_scale(hba, scale_up);
2168 	} else {
2169 		/* Request interrupt latency QoS accordingly */
2170 		ufs_mtk_scale_perf(hba, scale_up);
2171 	}
2172 
2173 	return 0;
2174 }
2175 
ufs_mtk_get_hba_mac(struct ufs_hba * hba)2176 static int ufs_mtk_get_hba_mac(struct ufs_hba *hba)
2177 {
2178 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
2179 
2180 	/* MCQ operation not permitted */
2181 	if (host->caps & UFS_MTK_CAP_DISABLE_MCQ)
2182 		return -EPERM;
2183 
2184 	return MAX_SUPP_MAC;
2185 }
2186 
ufs_mtk_op_runtime_config(struct ufs_hba * hba)2187 static int ufs_mtk_op_runtime_config(struct ufs_hba *hba)
2188 {
2189 	struct ufshcd_mcq_opr_info_t *opr;
2190 	int i;
2191 
2192 	hba->mcq_opr[OPR_SQD].offset = REG_UFS_MTK_SQD;
2193 	hba->mcq_opr[OPR_SQIS].offset = REG_UFS_MTK_SQIS;
2194 	hba->mcq_opr[OPR_CQD].offset = REG_UFS_MTK_CQD;
2195 	hba->mcq_opr[OPR_CQIS].offset = REG_UFS_MTK_CQIS;
2196 
2197 	for (i = 0; i < OPR_MAX; i++) {
2198 		opr = &hba->mcq_opr[i];
2199 		opr->stride = REG_UFS_MCQ_STRIDE;
2200 		opr->base = hba->mmio_base + opr->offset;
2201 	}
2202 
2203 	return 0;
2204 }
2205 
ufs_mtk_mcq_config_resource(struct ufs_hba * hba)2206 static int ufs_mtk_mcq_config_resource(struct ufs_hba *hba)
2207 {
2208 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
2209 
2210 	/* fail mcq initialization if interrupt is not filled properly */
2211 	if (!host->mcq_nr_intr) {
2212 		dev_info(hba->dev, "IRQs not ready. MCQ disabled.");
2213 		return -EINVAL;
2214 	}
2215 
2216 	hba->mcq_base = hba->mmio_base + MCQ_QUEUE_OFFSET(hba->mcq_capabilities);
2217 	return 0;
2218 }
2219 
ufs_mtk_mcq_intr(int irq,void * __intr_info)2220 static irqreturn_t ufs_mtk_mcq_intr(int irq, void *__intr_info)
2221 {
2222 	struct ufs_mtk_mcq_intr_info *mcq_intr_info = __intr_info;
2223 	struct ufs_hba *hba = mcq_intr_info->hba;
2224 	struct ufs_hw_queue *hwq;
2225 	u32 events;
2226 	int qid = mcq_intr_info->qid;
2227 
2228 	hwq = &hba->uhq[qid];
2229 
2230 	events = ufshcd_mcq_read_cqis(hba, qid);
2231 	if (events)
2232 		ufshcd_mcq_write_cqis(hba, events, qid);
2233 
2234 	if (events & UFSHCD_MCQ_CQIS_TAIL_ENT_PUSH_STS)
2235 		ufshcd_mcq_poll_cqe_lock(hba, hwq);
2236 
2237 	return IRQ_HANDLED;
2238 }
2239 
ufs_mtk_config_mcq_irq(struct ufs_hba * hba)2240 static int ufs_mtk_config_mcq_irq(struct ufs_hba *hba)
2241 {
2242 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
2243 	u32 irq, i;
2244 	int ret;
2245 
2246 	for (i = 0; i < host->mcq_nr_intr; i++) {
2247 		irq = host->mcq_intr_info[i].irq;
2248 		if (irq == MTK_MCQ_INVALID_IRQ) {
2249 			dev_err(hba->dev, "invalid irq. %d\n", i);
2250 			return -ENOPARAM;
2251 		}
2252 
2253 		host->mcq_intr_info[i].qid = i;
2254 		ret = devm_request_irq(hba->dev, irq, ufs_mtk_mcq_intr, 0, UFSHCD,
2255 				       &host->mcq_intr_info[i]);
2256 
2257 		dev_dbg(hba->dev, "request irq %d intr %s\n", irq, ret ? "failed" : "");
2258 
2259 		if (ret) {
2260 			dev_err(hba->dev, "Cannot request irq %d\n", ret);
2261 			return ret;
2262 		}
2263 	}
2264 	host->is_mcq_intr_enabled = true;
2265 
2266 	return 0;
2267 }
2268 
ufs_mtk_config_mcq(struct ufs_hba * hba,bool irq)2269 static int ufs_mtk_config_mcq(struct ufs_hba *hba, bool irq)
2270 {
2271 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
2272 	int ret = 0;
2273 
2274 	if (!host->mcq_set_intr) {
2275 		/* Disable irq option register */
2276 		ufshcd_rmwl(hba, MCQ_INTR_EN_MSK, 0, REG_UFS_MMIO_OPT_CTRL_0);
2277 
2278 		if (irq) {
2279 			ret = ufs_mtk_config_mcq_irq(hba);
2280 			if (ret)
2281 				return ret;
2282 		}
2283 
2284 		host->mcq_set_intr = true;
2285 	}
2286 
2287 	ufshcd_rmwl(hba, MCQ_AH8, MCQ_AH8, REG_UFS_MMIO_OPT_CTRL_0);
2288 	ufshcd_rmwl(hba, MCQ_INTR_EN_MSK, MCQ_MULTI_INTR_EN, REG_UFS_MMIO_OPT_CTRL_0);
2289 
2290 	return 0;
2291 }
2292 
ufs_mtk_config_esi(struct ufs_hba * hba)2293 static int ufs_mtk_config_esi(struct ufs_hba *hba)
2294 {
2295 	return ufs_mtk_config_mcq(hba, true);
2296 }
2297 
ufs_mtk_config_scsi_dev(struct scsi_device * sdev)2298 static void ufs_mtk_config_scsi_dev(struct scsi_device *sdev)
2299 {
2300 	struct ufs_hba *hba = shost_priv(sdev->host);
2301 
2302 	dev_dbg(hba->dev, "lu %llu scsi device configured", sdev->lun);
2303 	if (sdev->lun == 2)
2304 		blk_queue_flag_set(QUEUE_FLAG_SAME_FORCE, sdev->request_queue);
2305 }
2306 
2307 /*
2308  * struct ufs_hba_mtk_vops - UFS MTK specific variant operations
2309  *
2310  * The variant operations configure the necessary controller and PHY
2311  * handshake during initialization.
2312  */
2313 static const struct ufs_hba_variant_ops ufs_hba_mtk_vops = {
2314 	.name                = "mediatek.ufshci",
2315 	.max_num_rtt         = MTK_MAX_NUM_RTT,
2316 	.init                = ufs_mtk_init,
2317 	.get_ufs_hci_version = ufs_mtk_get_ufs_hci_version,
2318 	.setup_clocks        = ufs_mtk_setup_clocks,
2319 	.hce_enable_notify   = ufs_mtk_hce_enable_notify,
2320 	.link_startup_notify = ufs_mtk_link_startup_notify,
2321 	.pwr_change_notify   = ufs_mtk_pwr_change_notify,
2322 	.apply_dev_quirks    = ufs_mtk_apply_dev_quirks,
2323 	.fixup_dev_quirks    = ufs_mtk_fixup_dev_quirks,
2324 	.suspend             = ufs_mtk_suspend,
2325 	.resume              = ufs_mtk_resume,
2326 	.dbg_register_dump   = ufs_mtk_dbg_register_dump,
2327 	.device_reset        = ufs_mtk_device_reset,
2328 	.event_notify        = ufs_mtk_event_notify,
2329 	.config_scaling_param = ufs_mtk_config_scaling_param,
2330 	.clk_scale_notify    = ufs_mtk_clk_scale_notify,
2331 	/* mcq vops */
2332 	.get_hba_mac         = ufs_mtk_get_hba_mac,
2333 	.op_runtime_config   = ufs_mtk_op_runtime_config,
2334 	.mcq_config_resource = ufs_mtk_mcq_config_resource,
2335 	.config_esi          = ufs_mtk_config_esi,
2336 	.config_scsi_dev     = ufs_mtk_config_scsi_dev,
2337 };
2338 
2339 /**
2340  * ufs_mtk_probe - probe routine of the driver
2341  * @pdev: pointer to Platform device handle
2342  *
2343  * Return: zero for success and non-zero for failure.
2344  */
ufs_mtk_probe(struct platform_device * pdev)2345 static int ufs_mtk_probe(struct platform_device *pdev)
2346 {
2347 	int err;
2348 	struct device *dev = &pdev->dev, *phy_dev = NULL;
2349 	struct device_node *reset_node, *phy_node = NULL;
2350 	struct platform_device *reset_pdev, *phy_pdev = NULL;
2351 	struct device_link *link;
2352 	struct ufs_hba *hba;
2353 	struct ufs_mtk_host *host;
2354 
2355 	reset_node = of_find_compatible_node(NULL, NULL,
2356 					     "ti,syscon-reset");
2357 	if (!reset_node) {
2358 		dev_notice(dev, "find ti,syscon-reset fail\n");
2359 		goto skip_reset;
2360 	}
2361 	reset_pdev = of_find_device_by_node(reset_node);
2362 	if (!reset_pdev) {
2363 		dev_notice(dev, "find reset_pdev fail\n");
2364 		goto skip_reset;
2365 	}
2366 	link = device_link_add(dev, &reset_pdev->dev,
2367 		DL_FLAG_AUTOPROBE_CONSUMER);
2368 	put_device(&reset_pdev->dev);
2369 	if (!link) {
2370 		dev_notice(dev, "add reset device_link fail\n");
2371 		goto skip_reset;
2372 	}
2373 	/* supplier is not probed */
2374 	if (link->status == DL_STATE_DORMANT) {
2375 		err = -EPROBE_DEFER;
2376 		goto out;
2377 	}
2378 
2379 skip_reset:
2380 	/* find phy node */
2381 	phy_node = of_parse_phandle(dev->of_node, "phys", 0);
2382 
2383 	if (phy_node) {
2384 		phy_pdev = of_find_device_by_node(phy_node);
2385 		if (!phy_pdev)
2386 			goto skip_phy;
2387 		phy_dev = &phy_pdev->dev;
2388 
2389 		pm_runtime_set_active(phy_dev);
2390 		pm_runtime_enable(phy_dev);
2391 		pm_runtime_get_sync(phy_dev);
2392 
2393 		put_device(phy_dev);
2394 		dev_info(dev, "phys node found\n");
2395 	} else {
2396 		dev_notice(dev, "phys node not found\n");
2397 	}
2398 
2399 skip_phy:
2400 	/* perform generic probe */
2401 	err = ufshcd_pltfrm_init(pdev, &ufs_hba_mtk_vops);
2402 	if (err) {
2403 		dev_err(dev, "probe failed %d\n", err);
2404 		goto out;
2405 	}
2406 
2407 	hba = platform_get_drvdata(pdev);
2408 	if (!hba)
2409 		goto out;
2410 
2411 	if (phy_node && phy_dev) {
2412 		host = ufshcd_get_variant(hba);
2413 		host->phy_dev = phy_dev;
2414 	}
2415 
2416 	/*
2417 	 * Because the default power setting of VSx (the upper layer of
2418 	 * VCCQ/VCCQ2) is HWLP, we need to prevent VCCQ/VCCQ2 from
2419 	 * entering LPM.
2420 	 */
2421 	ufs_mtk_dev_vreg_set_lpm(hba, false);
2422 
2423 out:
2424 	of_node_put(phy_node);
2425 	of_node_put(reset_node);
2426 	return err;
2427 }
2428 
2429 /**
2430  * ufs_mtk_remove - set driver_data of the device to NULL
2431  * @pdev: pointer to platform device handle
2432  *
2433  * Always return 0
2434  */
ufs_mtk_remove(struct platform_device * pdev)2435 static void ufs_mtk_remove(struct platform_device *pdev)
2436 {
2437 	ufshcd_pltfrm_remove(pdev);
2438 }
2439 
2440 #ifdef CONFIG_PM_SLEEP
ufs_mtk_system_suspend(struct device * dev)2441 static int ufs_mtk_system_suspend(struct device *dev)
2442 {
2443 	struct ufs_hba *hba = dev_get_drvdata(dev);
2444 	struct arm_smccc_res res;
2445 	int ret;
2446 
2447 	if (hba->shutting_down) {
2448 		ret = -EBUSY;
2449 		goto out;
2450 	}
2451 
2452 	ret = ufshcd_system_suspend(dev);
2453 	if (ret)
2454 		goto out;
2455 
2456 	if (pm_runtime_suspended(hba->dev))
2457 		goto out;
2458 
2459 	ufs_mtk_dev_vreg_set_lpm(hba, true);
2460 
2461 	if (ufs_mtk_is_rtff_mtcmos(hba))
2462 		ufs_mtk_mtcmos_ctrl(false, res);
2463 
2464 out:
2465 	return ret;
2466 }
2467 
ufs_mtk_system_resume(struct device * dev)2468 static int ufs_mtk_system_resume(struct device *dev)
2469 {
2470 	int ret = 0;
2471 	struct ufs_hba *hba = dev_get_drvdata(dev);
2472 	struct arm_smccc_res res;
2473 
2474 	if (pm_runtime_suspended(hba->dev))
2475 		goto out;
2476 
2477 	if (ufs_mtk_is_rtff_mtcmos(hba))
2478 		ufs_mtk_mtcmos_ctrl(true, res);
2479 
2480 	ufs_mtk_dev_vreg_set_lpm(hba, false);
2481 
2482 out:
2483 	ret = ufshcd_system_resume(dev);
2484 
2485 	return ret;
2486 }
2487 #endif
2488 
2489 #ifdef CONFIG_PM
ufs_mtk_runtime_suspend(struct device * dev)2490 static int ufs_mtk_runtime_suspend(struct device *dev)
2491 {
2492 	struct ufs_hba *hba = dev_get_drvdata(dev);
2493 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
2494 	struct arm_smccc_res res;
2495 	int ret = 0;
2496 
2497 	ret = ufshcd_runtime_suspend(dev);
2498 	if (ret)
2499 		return ret;
2500 
2501 	ufs_mtk_dev_vreg_set_lpm(hba, true);
2502 
2503 	if (ufs_mtk_is_rtff_mtcmos(hba))
2504 		ufs_mtk_mtcmos_ctrl(false, res);
2505 
2506 	if (host->phy_dev)
2507 		pm_runtime_put_sync(host->phy_dev);
2508 
2509 	return 0;
2510 }
2511 
ufs_mtk_runtime_resume(struct device * dev)2512 static int ufs_mtk_runtime_resume(struct device *dev)
2513 {
2514 	struct ufs_hba *hba = dev_get_drvdata(dev);
2515 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
2516 	struct arm_smccc_res res;
2517 
2518 	if (ufs_mtk_is_rtff_mtcmos(hba))
2519 		ufs_mtk_mtcmos_ctrl(true, res);
2520 
2521 	if (host->phy_dev)
2522 		pm_runtime_get_sync(host->phy_dev);
2523 
2524 	ufs_mtk_dev_vreg_set_lpm(hba, false);
2525 
2526 	return ufshcd_runtime_resume(dev);
2527 }
2528 #endif
2529 
2530 static const struct dev_pm_ops ufs_mtk_pm_ops = {
2531 	SET_SYSTEM_SLEEP_PM_OPS(ufs_mtk_system_suspend,
2532 				ufs_mtk_system_resume)
2533 	SET_RUNTIME_PM_OPS(ufs_mtk_runtime_suspend,
2534 			   ufs_mtk_runtime_resume, NULL)
2535 	.prepare	 = ufshcd_suspend_prepare,
2536 	.complete	 = ufshcd_resume_complete,
2537 };
2538 
2539 static struct platform_driver ufs_mtk_pltform = {
2540 	.probe      = ufs_mtk_probe,
2541 	.remove = ufs_mtk_remove,
2542 	.driver = {
2543 		.name   = "ufshcd-mtk",
2544 		.pm     = &ufs_mtk_pm_ops,
2545 		.of_match_table = ufs_mtk_of_match,
2546 	},
2547 };
2548 
2549 MODULE_AUTHOR("Stanley Chu <stanley.chu@mediatek.com>");
2550 MODULE_AUTHOR("Peter Wang <peter.wang@mediatek.com>");
2551 MODULE_DESCRIPTION("MediaTek UFS Host Driver");
2552 MODULE_LICENSE("GPL v2");
2553 
2554 module_platform_driver(ufs_mtk_pltform);
2555