xref: /linux/drivers/ufs/host/ufs-mediatek.c (revision 37d4cc69876f6ed981b54b07f0d07fc4d4bd9f13)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2019 MediaTek Inc.
4  * Authors:
5  *	Stanley Chu <stanley.chu@mediatek.com>
6  *	Peter Wang <peter.wang@mediatek.com>
7  */
8 
9 #include <linux/arm-smccc.h>
10 #include <linux/bitfield.h>
11 #include <linux/clk.h>
12 #include <linux/delay.h>
13 #include <linux/module.h>
14 #include <linux/of.h>
15 #include <linux/of_address.h>
16 #include <linux/of_device.h>
17 #include <linux/of_platform.h>
18 #include <linux/phy/phy.h>
19 #include <linux/platform_device.h>
20 #include <linux/regulator/consumer.h>
21 #include <linux/reset.h>
22 
23 #include <ufs/ufshcd.h>
24 #include "ufshcd-pltfrm.h"
25 #include <ufs/ufs_quirks.h>
26 #include <ufs/unipro.h>
27 
28 #include "ufs-mediatek.h"
29 #include "ufs-mediatek-sip.h"
30 
31 static int  ufs_mtk_config_mcq(struct ufs_hba *hba, bool irq);
32 
33 #define CREATE_TRACE_POINTS
34 #include "ufs-mediatek-trace.h"
35 #undef CREATE_TRACE_POINTS
36 
37 #define MAX_SUPP_MAC 64
38 #define MCQ_QUEUE_OFFSET(c) ((((c) >> 16) & 0xFF) * 0x200)
39 
40 static const struct ufs_dev_quirk ufs_mtk_dev_fixups[] = {
41 	{ .wmanufacturerid = UFS_ANY_VENDOR,
42 	  .model = UFS_ANY_MODEL,
43 	  .quirk = UFS_DEVICE_QUIRK_DELAY_AFTER_LPM |
44 		UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM },
45 	{ .wmanufacturerid = UFS_VENDOR_SKHYNIX,
46 	  .model = "H9HQ21AFAMZDAR",
47 	  .quirk = UFS_DEVICE_QUIRK_SUPPORT_EXTENDED_FEATURES },
48 	{}
49 };
50 
51 static const struct of_device_id ufs_mtk_of_match[] = {
52 	{ .compatible = "mediatek,mt8183-ufshci" },
53 	{},
54 };
55 MODULE_DEVICE_TABLE(of, ufs_mtk_of_match);
56 
57 /*
58  * Details of UIC Errors
59  */
60 static const char *const ufs_uic_err_str[] = {
61 	"PHY Adapter Layer",
62 	"Data Link Layer",
63 	"Network Link Layer",
64 	"Transport Link Layer",
65 	"DME"
66 };
67 
68 static const char *const ufs_uic_pa_err_str[] = {
69 	"PHY error on Lane 0",
70 	"PHY error on Lane 1",
71 	"PHY error on Lane 2",
72 	"PHY error on Lane 3",
73 	"Generic PHY Adapter Error. This should be the LINERESET indication"
74 };
75 
76 static const char *const ufs_uic_dl_err_str[] = {
77 	"NAC_RECEIVED",
78 	"TCx_REPLAY_TIMER_EXPIRED",
79 	"AFCx_REQUEST_TIMER_EXPIRED",
80 	"FCx_PROTECTION_TIMER_EXPIRED",
81 	"CRC_ERROR",
82 	"RX_BUFFER_OVERFLOW",
83 	"MAX_FRAME_LENGTH_EXCEEDED",
84 	"WRONG_SEQUENCE_NUMBER",
85 	"AFC_FRAME_SYNTAX_ERROR",
86 	"NAC_FRAME_SYNTAX_ERROR",
87 	"EOF_SYNTAX_ERROR",
88 	"FRAME_SYNTAX_ERROR",
89 	"BAD_CTRL_SYMBOL_TYPE",
90 	"PA_INIT_ERROR",
91 	"PA_ERROR_IND_RECEIVED",
92 	"PA_INIT"
93 };
94 
ufs_mtk_is_boost_crypt_enabled(struct ufs_hba * hba)95 static bool ufs_mtk_is_boost_crypt_enabled(struct ufs_hba *hba)
96 {
97 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
98 
99 	return !!(host->caps & UFS_MTK_CAP_BOOST_CRYPT_ENGINE);
100 }
101 
ufs_mtk_is_va09_supported(struct ufs_hba * hba)102 static bool ufs_mtk_is_va09_supported(struct ufs_hba *hba)
103 {
104 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
105 
106 	return !!(host->caps & UFS_MTK_CAP_VA09_PWR_CTRL);
107 }
108 
ufs_mtk_is_broken_vcc(struct ufs_hba * hba)109 static bool ufs_mtk_is_broken_vcc(struct ufs_hba *hba)
110 {
111 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
112 
113 	return !!(host->caps & UFS_MTK_CAP_BROKEN_VCC);
114 }
115 
ufs_mtk_is_pmc_via_fastauto(struct ufs_hba * hba)116 static bool ufs_mtk_is_pmc_via_fastauto(struct ufs_hba *hba)
117 {
118 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
119 
120 	return !!(host->caps & UFS_MTK_CAP_PMC_VIA_FASTAUTO);
121 }
122 
ufs_mtk_is_tx_skew_fix(struct ufs_hba * hba)123 static bool ufs_mtk_is_tx_skew_fix(struct ufs_hba *hba)
124 {
125 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
126 
127 	return (host->caps & UFS_MTK_CAP_TX_SKEW_FIX);
128 }
129 
ufs_mtk_is_rtff_mtcmos(struct ufs_hba * hba)130 static bool ufs_mtk_is_rtff_mtcmos(struct ufs_hba *hba)
131 {
132 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
133 
134 	return (host->caps & UFS_MTK_CAP_RTFF_MTCMOS);
135 }
136 
ufs_mtk_is_allow_vccqx_lpm(struct ufs_hba * hba)137 static bool ufs_mtk_is_allow_vccqx_lpm(struct ufs_hba *hba)
138 {
139 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
140 
141 	return (host->caps & UFS_MTK_CAP_ALLOW_VCCQX_LPM);
142 }
143 
ufs_mtk_cfg_unipro_cg(struct ufs_hba * hba,bool enable)144 static void ufs_mtk_cfg_unipro_cg(struct ufs_hba *hba, bool enable)
145 {
146 	u32 tmp;
147 
148 	if (enable) {
149 		ufshcd_dme_get(hba,
150 			       UIC_ARG_MIB(VS_SAVEPOWERCONTROL), &tmp);
151 		tmp = tmp |
152 		      (1 << RX_SYMBOL_CLK_GATE_EN) |
153 		      (1 << SYS_CLK_GATE_EN) |
154 		      (1 << TX_CLK_GATE_EN);
155 		ufshcd_dme_set(hba,
156 			       UIC_ARG_MIB(VS_SAVEPOWERCONTROL), tmp);
157 
158 		ufshcd_dme_get(hba,
159 			       UIC_ARG_MIB(VS_DEBUGCLOCKENABLE), &tmp);
160 		tmp = tmp & ~(1 << TX_SYMBOL_CLK_REQ_FORCE);
161 		ufshcd_dme_set(hba,
162 			       UIC_ARG_MIB(VS_DEBUGCLOCKENABLE), tmp);
163 	} else {
164 		ufshcd_dme_get(hba,
165 			       UIC_ARG_MIB(VS_SAVEPOWERCONTROL), &tmp);
166 		tmp = tmp & ~((1 << RX_SYMBOL_CLK_GATE_EN) |
167 			      (1 << SYS_CLK_GATE_EN) |
168 			      (1 << TX_CLK_GATE_EN));
169 		ufshcd_dme_set(hba,
170 			       UIC_ARG_MIB(VS_SAVEPOWERCONTROL), tmp);
171 
172 		ufshcd_dme_get(hba,
173 			       UIC_ARG_MIB(VS_DEBUGCLOCKENABLE), &tmp);
174 		tmp = tmp | (1 << TX_SYMBOL_CLK_REQ_FORCE);
175 		ufshcd_dme_set(hba,
176 			       UIC_ARG_MIB(VS_DEBUGCLOCKENABLE), tmp);
177 	}
178 }
179 
ufs_mtk_crypto_enable(struct ufs_hba * hba)180 static void ufs_mtk_crypto_enable(struct ufs_hba *hba)
181 {
182 	struct arm_smccc_res res;
183 
184 	ufs_mtk_crypto_ctrl(res, 1);
185 	if (res.a0) {
186 		dev_info(hba->dev, "%s: crypto enable failed, err: %lu\n",
187 			 __func__, res.a0);
188 		hba->caps &= ~UFSHCD_CAP_CRYPTO;
189 	}
190 }
191 
ufs_mtk_host_reset(struct ufs_hba * hba)192 static void ufs_mtk_host_reset(struct ufs_hba *hba)
193 {
194 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
195 	struct arm_smccc_res res;
196 
197 	reset_control_assert(host->hci_reset);
198 	reset_control_assert(host->crypto_reset);
199 	reset_control_assert(host->unipro_reset);
200 	reset_control_assert(host->mphy_reset);
201 
202 	usleep_range(100, 110);
203 
204 	reset_control_deassert(host->unipro_reset);
205 	reset_control_deassert(host->crypto_reset);
206 	reset_control_deassert(host->hci_reset);
207 	reset_control_deassert(host->mphy_reset);
208 
209 	/* restore mphy setting aftre mphy reset */
210 	if (host->mphy_reset)
211 		ufs_mtk_mphy_ctrl(UFS_MPHY_RESTORE, res);
212 }
213 
ufs_mtk_init_reset_control(struct ufs_hba * hba,struct reset_control ** rc,char * str)214 static void ufs_mtk_init_reset_control(struct ufs_hba *hba,
215 				       struct reset_control **rc,
216 				       char *str)
217 {
218 	*rc = devm_reset_control_get(hba->dev, str);
219 	if (IS_ERR(*rc)) {
220 		dev_info(hba->dev, "Failed to get reset control %s: %ld\n",
221 			 str, PTR_ERR(*rc));
222 		*rc = NULL;
223 	}
224 }
225 
ufs_mtk_init_reset(struct ufs_hba * hba)226 static void ufs_mtk_init_reset(struct ufs_hba *hba)
227 {
228 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
229 
230 	ufs_mtk_init_reset_control(hba, &host->hci_reset,
231 				   "hci_rst");
232 	ufs_mtk_init_reset_control(hba, &host->unipro_reset,
233 				   "unipro_rst");
234 	ufs_mtk_init_reset_control(hba, &host->crypto_reset,
235 				   "crypto_rst");
236 	ufs_mtk_init_reset_control(hba, &host->mphy_reset,
237 				   "mphy_rst");
238 }
239 
ufs_mtk_hce_enable_notify(struct ufs_hba * hba,enum ufs_notify_change_status status)240 static int ufs_mtk_hce_enable_notify(struct ufs_hba *hba,
241 				     enum ufs_notify_change_status status)
242 {
243 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
244 
245 	if (status == PRE_CHANGE) {
246 		if (host->unipro_lpm) {
247 			hba->vps->hba_enable_delay_us = 0;
248 		} else {
249 			hba->vps->hba_enable_delay_us = 600;
250 			ufs_mtk_host_reset(hba);
251 		}
252 
253 		if (hba->caps & UFSHCD_CAP_CRYPTO)
254 			ufs_mtk_crypto_enable(hba);
255 
256 		if (host->caps & UFS_MTK_CAP_DISABLE_AH8) {
257 			ufshcd_writel(hba, 0,
258 				      REG_AUTO_HIBERNATE_IDLE_TIMER);
259 			hba->capabilities &= ~MASK_AUTO_HIBERN8_SUPPORT;
260 			hba->ahit = 0;
261 		}
262 
263 		/*
264 		 * Turn on CLK_CG early to bypass abnormal ERR_CHK signal
265 		 * to prevent host hang issue
266 		 */
267 		ufshcd_writel(hba,
268 			      ufshcd_readl(hba, REG_UFS_XOUFS_CTRL) | 0x80,
269 			      REG_UFS_XOUFS_CTRL);
270 	}
271 
272 	return 0;
273 }
274 
ufs_mtk_bind_mphy(struct ufs_hba * hba)275 static int ufs_mtk_bind_mphy(struct ufs_hba *hba)
276 {
277 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
278 	struct device *dev = hba->dev;
279 	struct device_node *np = dev->of_node;
280 	int err = 0;
281 
282 	host->mphy = devm_of_phy_get_by_index(dev, np, 0);
283 
284 	if (host->mphy == ERR_PTR(-EPROBE_DEFER)) {
285 		/*
286 		 * UFS driver might be probed before the phy driver does.
287 		 * In that case we would like to return EPROBE_DEFER code.
288 		 */
289 		err = -EPROBE_DEFER;
290 		dev_info(dev,
291 			 "%s: required phy hasn't probed yet. err = %d\n",
292 			__func__, err);
293 	} else if (IS_ERR(host->mphy)) {
294 		err = PTR_ERR(host->mphy);
295 		if (err != -ENODEV) {
296 			dev_info(dev, "%s: PHY get failed %d\n", __func__,
297 				 err);
298 		}
299 	}
300 
301 	if (err)
302 		host->mphy = NULL;
303 	/*
304 	 * Allow unbound mphy because not every platform needs specific
305 	 * mphy control.
306 	 */
307 	if (err == -ENODEV)
308 		err = 0;
309 
310 	return err;
311 }
312 
ufs_mtk_setup_ref_clk(struct ufs_hba * hba,bool on)313 static int ufs_mtk_setup_ref_clk(struct ufs_hba *hba, bool on)
314 {
315 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
316 	struct arm_smccc_res res;
317 	ktime_t timeout, time_checked;
318 	u32 value;
319 
320 	if (host->ref_clk_enabled == on)
321 		return 0;
322 
323 	ufs_mtk_ref_clk_notify(on, PRE_CHANGE, res);
324 
325 	if (on) {
326 		ufshcd_writel(hba, REFCLK_REQUEST, REG_UFS_REFCLK_CTRL);
327 	} else {
328 		ufshcd_delay_us(host->ref_clk_gating_wait_us, 10);
329 		ufshcd_writel(hba, REFCLK_RELEASE, REG_UFS_REFCLK_CTRL);
330 	}
331 
332 	/* Wait for ack */
333 	timeout = ktime_add_us(ktime_get(), REFCLK_REQ_TIMEOUT_US);
334 	do {
335 		time_checked = ktime_get();
336 		value = ufshcd_readl(hba, REG_UFS_REFCLK_CTRL);
337 
338 		/* Wait until ack bit equals to req bit */
339 		if (((value & REFCLK_ACK) >> 1) == (value & REFCLK_REQUEST))
340 			goto out;
341 
342 		usleep_range(100, 200);
343 	} while (ktime_before(time_checked, timeout));
344 
345 	dev_err(hba->dev, "missing ack of refclk req, reg: 0x%x\n", value);
346 
347 	ufs_mtk_ref_clk_notify(host->ref_clk_enabled, POST_CHANGE, res);
348 
349 	return -ETIMEDOUT;
350 
351 out:
352 	host->ref_clk_enabled = on;
353 	if (on)
354 		ufshcd_delay_us(host->ref_clk_ungating_wait_us, 10);
355 
356 	ufs_mtk_ref_clk_notify(on, POST_CHANGE, res);
357 
358 	return 0;
359 }
360 
ufs_mtk_setup_ref_clk_wait_us(struct ufs_hba * hba,u16 gating_us)361 static void ufs_mtk_setup_ref_clk_wait_us(struct ufs_hba *hba,
362 					  u16 gating_us)
363 {
364 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
365 
366 	if (hba->dev_info.clk_gating_wait_us) {
367 		host->ref_clk_gating_wait_us =
368 			hba->dev_info.clk_gating_wait_us;
369 	} else {
370 		host->ref_clk_gating_wait_us = gating_us;
371 	}
372 
373 	host->ref_clk_ungating_wait_us = REFCLK_DEFAULT_WAIT_US;
374 }
375 
ufs_mtk_dbg_sel(struct ufs_hba * hba)376 static void ufs_mtk_dbg_sel(struct ufs_hba *hba)
377 {
378 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
379 
380 	if (((host->ip_ver >> 16) & 0xFF) >= 0x36) {
381 		ufshcd_writel(hba, 0x820820, REG_UFS_DEBUG_SEL);
382 		ufshcd_writel(hba, 0x0, REG_UFS_DEBUG_SEL_B0);
383 		ufshcd_writel(hba, 0x55555555, REG_UFS_DEBUG_SEL_B1);
384 		ufshcd_writel(hba, 0xaaaaaaaa, REG_UFS_DEBUG_SEL_B2);
385 		ufshcd_writel(hba, 0xffffffff, REG_UFS_DEBUG_SEL_B3);
386 	} else {
387 		ufshcd_writel(hba, 0x20, REG_UFS_DEBUG_SEL);
388 	}
389 }
390 
ufs_mtk_wait_idle_state(struct ufs_hba * hba,unsigned long retry_ms)391 static void ufs_mtk_wait_idle_state(struct ufs_hba *hba,
392 			    unsigned long retry_ms)
393 {
394 	u64 timeout, time_checked;
395 	u32 val, sm;
396 	bool wait_idle;
397 
398 	/* cannot use plain ktime_get() in suspend */
399 	timeout = ktime_get_mono_fast_ns() + retry_ms * 1000000UL;
400 
401 	/* wait a specific time after check base */
402 	udelay(10);
403 	wait_idle = false;
404 
405 	do {
406 		time_checked = ktime_get_mono_fast_ns();
407 		ufs_mtk_dbg_sel(hba);
408 		val = ufshcd_readl(hba, REG_UFS_PROBE);
409 
410 		sm = val & 0x1f;
411 
412 		/*
413 		 * if state is in H8 enter and H8 enter confirm
414 		 * wait until return to idle state.
415 		 */
416 		if ((sm >= VS_HIB_ENTER) && (sm <= VS_HIB_EXIT)) {
417 			wait_idle = true;
418 			udelay(50);
419 			continue;
420 		} else if (!wait_idle)
421 			break;
422 
423 		if (wait_idle && (sm == VS_HCE_BASE))
424 			break;
425 	} while (time_checked < timeout);
426 
427 	if (wait_idle && sm != VS_HCE_BASE)
428 		dev_info(hba->dev, "wait idle tmo: 0x%x\n", val);
429 }
430 
ufs_mtk_wait_link_state(struct ufs_hba * hba,u32 state,unsigned long max_wait_ms)431 static int ufs_mtk_wait_link_state(struct ufs_hba *hba, u32 state,
432 				   unsigned long max_wait_ms)
433 {
434 	ktime_t timeout, time_checked;
435 	u32 val;
436 
437 	timeout = ktime_add_ms(ktime_get(), max_wait_ms);
438 	do {
439 		time_checked = ktime_get();
440 		ufs_mtk_dbg_sel(hba);
441 		val = ufshcd_readl(hba, REG_UFS_PROBE);
442 		val = val >> 28;
443 
444 		if (val == state)
445 			return 0;
446 
447 		/* Sleep for max. 200us */
448 		usleep_range(100, 200);
449 	} while (ktime_before(time_checked, timeout));
450 
451 	return -ETIMEDOUT;
452 }
453 
ufs_mtk_mphy_power_on(struct ufs_hba * hba,bool on)454 static int ufs_mtk_mphy_power_on(struct ufs_hba *hba, bool on)
455 {
456 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
457 	struct phy *mphy = host->mphy;
458 	struct arm_smccc_res res;
459 	int ret = 0;
460 
461 	if (!mphy || !(on ^ host->mphy_powered_on))
462 		return 0;
463 
464 	if (on) {
465 		if (ufs_mtk_is_va09_supported(hba)) {
466 			ret = regulator_enable(host->reg_va09);
467 			if (ret < 0)
468 				goto out;
469 			/* wait 200 us to stablize VA09 */
470 			usleep_range(200, 210);
471 			ufs_mtk_va09_pwr_ctrl(res, 1);
472 		}
473 		phy_power_on(mphy);
474 	} else {
475 		phy_power_off(mphy);
476 		if (ufs_mtk_is_va09_supported(hba)) {
477 			ufs_mtk_va09_pwr_ctrl(res, 0);
478 			ret = regulator_disable(host->reg_va09);
479 		}
480 	}
481 out:
482 	if (ret) {
483 		dev_info(hba->dev,
484 			 "failed to %s va09: %d\n",
485 			 on ? "enable" : "disable",
486 			 ret);
487 	} else {
488 		host->mphy_powered_on = on;
489 	}
490 
491 	return ret;
492 }
493 
ufs_mtk_get_host_clk(struct device * dev,const char * name,struct clk ** clk_out)494 static int ufs_mtk_get_host_clk(struct device *dev, const char *name,
495 				struct clk **clk_out)
496 {
497 	struct clk *clk;
498 	int err = 0;
499 
500 	clk = devm_clk_get(dev, name);
501 	if (IS_ERR(clk))
502 		err = PTR_ERR(clk);
503 	else
504 		*clk_out = clk;
505 
506 	return err;
507 }
508 
ufs_mtk_boost_crypt(struct ufs_hba * hba,bool boost)509 static void ufs_mtk_boost_crypt(struct ufs_hba *hba, bool boost)
510 {
511 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
512 	struct ufs_mtk_crypt_cfg *cfg;
513 	struct regulator *reg;
514 	int volt, ret;
515 
516 	if (!ufs_mtk_is_boost_crypt_enabled(hba))
517 		return;
518 
519 	cfg = host->crypt;
520 	volt = cfg->vcore_volt;
521 	reg = cfg->reg_vcore;
522 
523 	ret = clk_prepare_enable(cfg->clk_crypt_mux);
524 	if (ret) {
525 		dev_info(hba->dev, "clk_prepare_enable(): %d\n",
526 			 ret);
527 		return;
528 	}
529 
530 	if (boost) {
531 		ret = regulator_set_voltage(reg, volt, INT_MAX);
532 		if (ret) {
533 			dev_info(hba->dev,
534 				 "failed to set vcore to %d\n", volt);
535 			goto out;
536 		}
537 
538 		ret = clk_set_parent(cfg->clk_crypt_mux,
539 				     cfg->clk_crypt_perf);
540 		if (ret) {
541 			dev_info(hba->dev,
542 				 "failed to set clk_crypt_perf\n");
543 			regulator_set_voltage(reg, 0, INT_MAX);
544 			goto out;
545 		}
546 	} else {
547 		ret = clk_set_parent(cfg->clk_crypt_mux,
548 				     cfg->clk_crypt_lp);
549 		if (ret) {
550 			dev_info(hba->dev,
551 				 "failed to set clk_crypt_lp\n");
552 			goto out;
553 		}
554 
555 		ret = regulator_set_voltage(reg, 0, INT_MAX);
556 		if (ret) {
557 			dev_info(hba->dev,
558 				 "failed to set vcore to MIN\n");
559 		}
560 	}
561 out:
562 	clk_disable_unprepare(cfg->clk_crypt_mux);
563 }
564 
ufs_mtk_init_host_clk(struct ufs_hba * hba,const char * name,struct clk ** clk)565 static int ufs_mtk_init_host_clk(struct ufs_hba *hba, const char *name,
566 				 struct clk **clk)
567 {
568 	int ret;
569 
570 	ret = ufs_mtk_get_host_clk(hba->dev, name, clk);
571 	if (ret) {
572 		dev_info(hba->dev, "%s: failed to get %s: %d", __func__,
573 			 name, ret);
574 	}
575 
576 	return ret;
577 }
578 
ufs_mtk_init_boost_crypt(struct ufs_hba * hba)579 static void ufs_mtk_init_boost_crypt(struct ufs_hba *hba)
580 {
581 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
582 	struct ufs_mtk_crypt_cfg *cfg;
583 	struct device *dev = hba->dev;
584 	struct regulator *reg;
585 	u32 volt;
586 
587 	host->crypt = devm_kzalloc(dev, sizeof(*(host->crypt)),
588 				   GFP_KERNEL);
589 	if (!host->crypt)
590 		goto disable_caps;
591 
592 	reg = devm_regulator_get_optional(dev, "dvfsrc-vcore");
593 	if (IS_ERR(reg)) {
594 		dev_info(dev, "failed to get dvfsrc-vcore: %ld",
595 			 PTR_ERR(reg));
596 		goto disable_caps;
597 	}
598 
599 	if (of_property_read_u32(dev->of_node, "boost-crypt-vcore-min",
600 				 &volt)) {
601 		dev_info(dev, "failed to get boost-crypt-vcore-min");
602 		goto disable_caps;
603 	}
604 
605 	cfg = host->crypt;
606 	if (ufs_mtk_init_host_clk(hba, "crypt_mux",
607 				  &cfg->clk_crypt_mux))
608 		goto disable_caps;
609 
610 	if (ufs_mtk_init_host_clk(hba, "crypt_lp",
611 				  &cfg->clk_crypt_lp))
612 		goto disable_caps;
613 
614 	if (ufs_mtk_init_host_clk(hba, "crypt_perf",
615 				  &cfg->clk_crypt_perf))
616 		goto disable_caps;
617 
618 	cfg->reg_vcore = reg;
619 	cfg->vcore_volt = volt;
620 	host->caps |= UFS_MTK_CAP_BOOST_CRYPT_ENGINE;
621 
622 disable_caps:
623 	return;
624 }
625 
ufs_mtk_init_va09_pwr_ctrl(struct ufs_hba * hba)626 static void ufs_mtk_init_va09_pwr_ctrl(struct ufs_hba *hba)
627 {
628 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
629 
630 	host->reg_va09 = regulator_get(hba->dev, "va09");
631 	if (IS_ERR(host->reg_va09))
632 		dev_info(hba->dev, "failed to get va09");
633 	else
634 		host->caps |= UFS_MTK_CAP_VA09_PWR_CTRL;
635 }
636 
ufs_mtk_init_host_caps(struct ufs_hba * hba)637 static void ufs_mtk_init_host_caps(struct ufs_hba *hba)
638 {
639 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
640 	struct device_node *np = hba->dev->of_node;
641 
642 	if (of_property_read_bool(np, "mediatek,ufs-boost-crypt"))
643 		ufs_mtk_init_boost_crypt(hba);
644 
645 	if (of_property_read_bool(np, "mediatek,ufs-support-va09"))
646 		ufs_mtk_init_va09_pwr_ctrl(hba);
647 
648 	if (of_property_read_bool(np, "mediatek,ufs-disable-ah8"))
649 		host->caps |= UFS_MTK_CAP_DISABLE_AH8;
650 
651 	if (of_property_read_bool(np, "mediatek,ufs-broken-vcc"))
652 		host->caps |= UFS_MTK_CAP_BROKEN_VCC;
653 
654 	if (of_property_read_bool(np, "mediatek,ufs-pmc-via-fastauto"))
655 		host->caps |= UFS_MTK_CAP_PMC_VIA_FASTAUTO;
656 
657 	if (of_property_read_bool(np, "mediatek,ufs-tx-skew-fix"))
658 		host->caps |= UFS_MTK_CAP_TX_SKEW_FIX;
659 
660 	if (of_property_read_bool(np, "mediatek,ufs-disable-mcq"))
661 		host->caps |= UFS_MTK_CAP_DISABLE_MCQ;
662 
663 	if (of_property_read_bool(np, "mediatek,ufs-rtff-mtcmos"))
664 		host->caps |= UFS_MTK_CAP_RTFF_MTCMOS;
665 
666 	dev_info(hba->dev, "caps: 0x%x", host->caps);
667 }
668 
ufs_mtk_scale_perf(struct ufs_hba * hba,bool scale_up)669 static void ufs_mtk_scale_perf(struct ufs_hba *hba, bool scale_up)
670 {
671 	ufs_mtk_boost_crypt(hba, scale_up);
672 }
673 
ufs_mtk_pwr_ctrl(struct ufs_hba * hba,bool on)674 static void ufs_mtk_pwr_ctrl(struct ufs_hba *hba, bool on)
675 {
676 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
677 
678 	if (on) {
679 		phy_power_on(host->mphy);
680 		ufs_mtk_setup_ref_clk(hba, on);
681 		if (!ufshcd_is_clkscaling_supported(hba))
682 			ufs_mtk_scale_perf(hba, on);
683 	} else {
684 		if (!ufshcd_is_clkscaling_supported(hba))
685 			ufs_mtk_scale_perf(hba, on);
686 		ufs_mtk_setup_ref_clk(hba, on);
687 		phy_power_off(host->mphy);
688 	}
689 }
690 
ufs_mtk_mcq_disable_irq(struct ufs_hba * hba)691 static void ufs_mtk_mcq_disable_irq(struct ufs_hba *hba)
692 {
693 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
694 	u32 irq, i;
695 
696 	if (!hba->mcq_enabled)
697 		return;
698 
699 	if (host->mcq_nr_intr == 0)
700 		return;
701 
702 	for (i = 0; i < host->mcq_nr_intr; i++) {
703 		irq = host->mcq_intr_info[i].irq;
704 		disable_irq(irq);
705 	}
706 	host->is_mcq_intr_enabled = false;
707 }
708 
ufs_mtk_mcq_enable_irq(struct ufs_hba * hba)709 static void ufs_mtk_mcq_enable_irq(struct ufs_hba *hba)
710 {
711 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
712 	u32 irq, i;
713 
714 	if (!hba->mcq_enabled)
715 		return;
716 
717 	if (host->mcq_nr_intr == 0)
718 		return;
719 
720 	if (host->is_mcq_intr_enabled == true)
721 		return;
722 
723 	for (i = 0; i < host->mcq_nr_intr; i++) {
724 		irq = host->mcq_intr_info[i].irq;
725 		enable_irq(irq);
726 	}
727 	host->is_mcq_intr_enabled = true;
728 }
729 
730 /**
731  * ufs_mtk_setup_clocks - enables/disable clocks
732  * @hba: host controller instance
733  * @on: If true, enable clocks else disable them.
734  * @status: PRE_CHANGE or POST_CHANGE notify
735  *
736  * Return: 0 on success, non-zero on failure.
737  */
ufs_mtk_setup_clocks(struct ufs_hba * hba,bool on,enum ufs_notify_change_status status)738 static int ufs_mtk_setup_clocks(struct ufs_hba *hba, bool on,
739 				enum ufs_notify_change_status status)
740 {
741 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
742 	bool clk_pwr_off = false;
743 	int ret = 0;
744 
745 	/*
746 	 * In case ufs_mtk_init() is not yet done, simply ignore.
747 	 * This ufs_mtk_setup_clocks() shall be called from
748 	 * ufs_mtk_init() after init is done.
749 	 */
750 	if (!host)
751 		return 0;
752 
753 	if (!on && status == PRE_CHANGE) {
754 		if (ufshcd_is_link_off(hba)) {
755 			clk_pwr_off = true;
756 		} else if (ufshcd_is_link_hibern8(hba) ||
757 			 (!ufshcd_can_hibern8_during_gating(hba) &&
758 			 ufshcd_is_auto_hibern8_enabled(hba))) {
759 			/*
760 			 * Gate ref-clk and poweroff mphy if link state is in
761 			 * OFF or Hibern8 by either Auto-Hibern8 or
762 			 * ufshcd_link_state_transition().
763 			 */
764 			ret = ufs_mtk_wait_link_state(hba,
765 						      VS_LINK_HIBERN8,
766 						      15);
767 			if (!ret)
768 				clk_pwr_off = true;
769 		}
770 
771 		if (clk_pwr_off)
772 			ufs_mtk_pwr_ctrl(hba, false);
773 		ufs_mtk_mcq_disable_irq(hba);
774 	} else if (on && status == POST_CHANGE) {
775 		ufs_mtk_pwr_ctrl(hba, true);
776 		ufs_mtk_mcq_enable_irq(hba);
777 	}
778 
779 	return ret;
780 }
781 
ufs_mtk_get_controller_version(struct ufs_hba * hba)782 static void ufs_mtk_get_controller_version(struct ufs_hba *hba)
783 {
784 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
785 	int ret, ver = 0;
786 
787 	if (host->hw_ver.major)
788 		return;
789 
790 	/* Set default (minimum) version anyway */
791 	host->hw_ver.major = 2;
792 
793 	ret = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_LOCALVERINFO), &ver);
794 	if (!ret) {
795 		if (ver >= UFS_UNIPRO_VER_1_8) {
796 			host->hw_ver.major = 3;
797 			/*
798 			 * Fix HCI version for some platforms with
799 			 * incorrect version
800 			 */
801 			if (hba->ufs_version < ufshci_version(3, 0))
802 				hba->ufs_version = ufshci_version(3, 0);
803 		}
804 	}
805 }
806 
ufs_mtk_get_ufs_hci_version(struct ufs_hba * hba)807 static u32 ufs_mtk_get_ufs_hci_version(struct ufs_hba *hba)
808 {
809 	return hba->ufs_version;
810 }
811 
812 /**
813  * ufs_mtk_init_clocks - Init mtk driver private clocks
814  *
815  * @hba: per adapter instance
816  */
ufs_mtk_init_clocks(struct ufs_hba * hba)817 static void ufs_mtk_init_clocks(struct ufs_hba *hba)
818 {
819 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
820 	struct list_head *head = &hba->clk_list_head;
821 	struct ufs_mtk_clk *mclk = &host->mclk;
822 	struct ufs_clk_info *clki, *clki_tmp;
823 
824 	/*
825 	 * Find private clocks and store them in struct ufs_mtk_clk.
826 	 * Remove "ufs_sel_min_src" and "ufs_sel_min_src" from list to avoid
827 	 * being switched on/off in clock gating.
828 	 */
829 	list_for_each_entry_safe(clki, clki_tmp, head, list) {
830 		if (!strcmp(clki->name, "ufs_sel")) {
831 			host->mclk.ufs_sel_clki = clki;
832 		} else if (!strcmp(clki->name, "ufs_sel_max_src")) {
833 			host->mclk.ufs_sel_max_clki = clki;
834 			clk_disable_unprepare(clki->clk);
835 			list_del(&clki->list);
836 		} else if (!strcmp(clki->name, "ufs_sel_min_src")) {
837 			host->mclk.ufs_sel_min_clki = clki;
838 			clk_disable_unprepare(clki->clk);
839 			list_del(&clki->list);
840 		}
841 	}
842 
843 	if (!mclk->ufs_sel_clki || !mclk->ufs_sel_max_clki ||
844 	    !mclk->ufs_sel_min_clki) {
845 		hba->caps &= ~UFSHCD_CAP_CLK_SCALING;
846 		dev_info(hba->dev,
847 			 "%s: Clk-scaling not ready. Feature disabled.",
848 			 __func__);
849 	}
850 }
851 
852 #define MAX_VCC_NAME 30
ufs_mtk_vreg_fix_vcc(struct ufs_hba * hba)853 static int ufs_mtk_vreg_fix_vcc(struct ufs_hba *hba)
854 {
855 	struct ufs_vreg_info *info = &hba->vreg_info;
856 	struct device_node *np = hba->dev->of_node;
857 	struct device *dev = hba->dev;
858 	char vcc_name[MAX_VCC_NAME];
859 	struct arm_smccc_res res;
860 	int err, ver;
861 
862 	if (hba->vreg_info.vcc)
863 		return 0;
864 
865 	if (of_property_read_bool(np, "mediatek,ufs-vcc-by-num")) {
866 		ufs_mtk_get_vcc_num(res);
867 		if (res.a1 > UFS_VCC_NONE && res.a1 < UFS_VCC_MAX)
868 			snprintf(vcc_name, MAX_VCC_NAME, "vcc-opt%lu", res.a1);
869 		else
870 			return -ENODEV;
871 	} else if (of_property_read_bool(np, "mediatek,ufs-vcc-by-ver")) {
872 		ver = (hba->dev_info.wspecversion & 0xF00) >> 8;
873 		snprintf(vcc_name, MAX_VCC_NAME, "vcc-ufs%u", ver);
874 	} else {
875 		return 0;
876 	}
877 
878 	err = ufshcd_populate_vreg(dev, vcc_name, &info->vcc, false);
879 	if (err)
880 		return err;
881 
882 	err = ufshcd_get_vreg(dev, info->vcc);
883 	if (err)
884 		return err;
885 
886 	err = regulator_enable(info->vcc->reg);
887 	if (!err) {
888 		info->vcc->enabled = true;
889 		dev_info(dev, "%s: %s enabled\n", __func__, vcc_name);
890 	}
891 
892 	return err;
893 }
894 
ufs_mtk_vreg_fix_vccqx(struct ufs_hba * hba)895 static void ufs_mtk_vreg_fix_vccqx(struct ufs_hba *hba)
896 {
897 	struct ufs_vreg_info *info = &hba->vreg_info;
898 	struct ufs_vreg **vreg_on, **vreg_off;
899 
900 	if (hba->dev_info.wspecversion >= 0x0300) {
901 		vreg_on = &info->vccq;
902 		vreg_off = &info->vccq2;
903 	} else {
904 		vreg_on = &info->vccq2;
905 		vreg_off = &info->vccq;
906 	}
907 
908 	if (*vreg_on)
909 		(*vreg_on)->always_on = true;
910 
911 	if (*vreg_off) {
912 		regulator_disable((*vreg_off)->reg);
913 		devm_kfree(hba->dev, (*vreg_off)->name);
914 		devm_kfree(hba->dev, *vreg_off);
915 		*vreg_off = NULL;
916 	}
917 }
918 
ufs_mtk_init_mcq_irq(struct ufs_hba * hba)919 static void ufs_mtk_init_mcq_irq(struct ufs_hba *hba)
920 {
921 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
922 	struct platform_device *pdev;
923 	int i;
924 	int irq;
925 
926 	host->mcq_nr_intr = UFSHCD_MAX_Q_NR;
927 	pdev = container_of(hba->dev, struct platform_device, dev);
928 
929 	if (host->caps & UFS_MTK_CAP_DISABLE_MCQ)
930 		goto failed;
931 
932 	for (i = 0; i < host->mcq_nr_intr; i++) {
933 		/* irq index 0 is legacy irq, sq/cq irq start from index 1 */
934 		irq = platform_get_irq(pdev, i + 1);
935 		if (irq < 0) {
936 			host->mcq_intr_info[i].irq = MTK_MCQ_INVALID_IRQ;
937 			goto failed;
938 		}
939 		host->mcq_intr_info[i].hba = hba;
940 		host->mcq_intr_info[i].irq = irq;
941 		dev_info(hba->dev, "get platform mcq irq: %d, %d\n", i, irq);
942 	}
943 
944 	return;
945 failed:
946        /* invalidate irq info */
947 	for (i = 0; i < host->mcq_nr_intr; i++)
948 		host->mcq_intr_info[i].irq = MTK_MCQ_INVALID_IRQ;
949 
950 	host->mcq_nr_intr = 0;
951 }
952 
953 /**
954  * ufs_mtk_init - find other essential mmio bases
955  * @hba: host controller instance
956  *
957  * Binds PHY with controller and powers up PHY enabling clocks
958  * and regulators.
959  *
960  * Return: -EPROBE_DEFER if binding fails, returns negative error
961  * on phy power up failure and returns zero on success.
962  */
ufs_mtk_init(struct ufs_hba * hba)963 static int ufs_mtk_init(struct ufs_hba *hba)
964 {
965 	const struct of_device_id *id;
966 	struct device *dev = hba->dev;
967 	struct ufs_mtk_host *host;
968 	struct Scsi_Host *shost = hba->host;
969 	int err = 0;
970 	struct arm_smccc_res res;
971 
972 	host = devm_kzalloc(dev, sizeof(*host), GFP_KERNEL);
973 	if (!host) {
974 		err = -ENOMEM;
975 		dev_info(dev, "%s: no memory for mtk ufs host\n", __func__);
976 		goto out;
977 	}
978 
979 	host->hba = hba;
980 	ufshcd_set_variant(hba, host);
981 
982 	id = of_match_device(ufs_mtk_of_match, dev);
983 	if (!id) {
984 		err = -EINVAL;
985 		goto out;
986 	}
987 
988 	/* Initialize host capability */
989 	ufs_mtk_init_host_caps(hba);
990 
991 	ufs_mtk_init_mcq_irq(hba);
992 
993 	err = ufs_mtk_bind_mphy(hba);
994 	if (err)
995 		goto out_variant_clear;
996 
997 	ufs_mtk_init_reset(hba);
998 
999 	/* backup mphy setting if mphy can reset */
1000 	if (host->mphy_reset)
1001 		ufs_mtk_mphy_ctrl(UFS_MPHY_BACKUP, res);
1002 
1003 	/* Enable runtime autosuspend */
1004 	hba->caps |= UFSHCD_CAP_RPM_AUTOSUSPEND;
1005 
1006 	/* Enable clock-gating */
1007 	hba->caps |= UFSHCD_CAP_CLK_GATING;
1008 
1009 	/* Enable inline encryption */
1010 	hba->caps |= UFSHCD_CAP_CRYPTO;
1011 
1012 	/* Enable WriteBooster */
1013 	hba->caps |= UFSHCD_CAP_WB_EN;
1014 
1015 	/* Enable clk scaling*/
1016 	hba->caps |= UFSHCD_CAP_CLK_SCALING;
1017 
1018 	/* Set runtime pm delay to replace default */
1019 	shost->rpm_autosuspend_delay = MTK_RPM_AUTOSUSPEND_DELAY_MS;
1020 
1021 	hba->quirks |= UFSHCI_QUIRK_SKIP_MANUAL_WB_FLUSH_CTRL;
1022 	hba->quirks |= UFSHCD_QUIRK_MCQ_BROKEN_INTR;
1023 	hba->quirks |= UFSHCD_QUIRK_MCQ_BROKEN_RTC;
1024 	hba->vps->wb_flush_threshold = UFS_WB_BUF_REMAIN_PERCENT(80);
1025 
1026 	if (host->caps & UFS_MTK_CAP_DISABLE_AH8)
1027 		hba->caps |= UFSHCD_CAP_HIBERN8_WITH_CLK_GATING;
1028 
1029 	if (host->caps & UFS_MTK_CAP_DISABLE_MCQ)
1030 		hba->quirks |= UFSHCD_QUIRK_BROKEN_LSDBS_CAP;
1031 
1032 	ufs_mtk_init_clocks(hba);
1033 
1034 	/*
1035 	 * ufshcd_vops_init() is invoked after
1036 	 * ufshcd_setup_clock(true) in ufshcd_hba_init() thus
1037 	 * phy clock setup is skipped.
1038 	 *
1039 	 * Enable phy clocks specifically here.
1040 	 */
1041 	ufs_mtk_mphy_power_on(hba, true);
1042 
1043 	if (ufs_mtk_is_rtff_mtcmos(hba)) {
1044 		/* First Restore here, to avoid backup unexpected value */
1045 		ufs_mtk_mtcmos_ctrl(false, res);
1046 
1047 		/* Power on to init */
1048 		ufs_mtk_mtcmos_ctrl(true, res);
1049 	}
1050 
1051 	ufs_mtk_setup_clocks(hba, true, POST_CHANGE);
1052 
1053 	host->ip_ver = ufshcd_readl(hba, REG_UFS_MTK_IP_VER);
1054 
1055 	goto out;
1056 
1057 out_variant_clear:
1058 	ufshcd_set_variant(hba, NULL);
1059 out:
1060 	return err;
1061 }
1062 
ufs_mtk_pmc_via_fastauto(struct ufs_hba * hba,struct ufs_pa_layer_attr * dev_req_params)1063 static bool ufs_mtk_pmc_via_fastauto(struct ufs_hba *hba,
1064 				     struct ufs_pa_layer_attr *dev_req_params)
1065 {
1066 	if (!ufs_mtk_is_pmc_via_fastauto(hba))
1067 		return false;
1068 
1069 	if (dev_req_params->hs_rate == hba->pwr_info.hs_rate)
1070 		return false;
1071 
1072 	if (dev_req_params->pwr_tx != FAST_MODE &&
1073 	    dev_req_params->gear_tx < UFS_HS_G4)
1074 		return false;
1075 
1076 	if (dev_req_params->pwr_rx != FAST_MODE &&
1077 	    dev_req_params->gear_rx < UFS_HS_G4)
1078 		return false;
1079 
1080 	return true;
1081 }
1082 
ufs_mtk_pre_pwr_change(struct ufs_hba * hba,struct ufs_pa_layer_attr * dev_max_params,struct ufs_pa_layer_attr * dev_req_params)1083 static int ufs_mtk_pre_pwr_change(struct ufs_hba *hba,
1084 				  struct ufs_pa_layer_attr *dev_max_params,
1085 				  struct ufs_pa_layer_attr *dev_req_params)
1086 {
1087 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
1088 	struct ufs_host_params host_params;
1089 	int ret;
1090 
1091 	ufshcd_init_host_params(&host_params);
1092 	host_params.hs_rx_gear = UFS_HS_G5;
1093 	host_params.hs_tx_gear = UFS_HS_G5;
1094 
1095 	ret = ufshcd_negotiate_pwr_params(&host_params, dev_max_params, dev_req_params);
1096 	if (ret) {
1097 		pr_info("%s: failed to determine capabilities\n",
1098 			__func__);
1099 	}
1100 
1101 	if (ufs_mtk_pmc_via_fastauto(hba, dev_req_params)) {
1102 		ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXTERMINATION), true);
1103 		ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXGEAR), UFS_HS_G1);
1104 
1105 		ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXTERMINATION), true);
1106 		ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXGEAR), UFS_HS_G1);
1107 
1108 		ufshcd_dme_set(hba, UIC_ARG_MIB(PA_ACTIVETXDATALANES),
1109 			       dev_req_params->lane_tx);
1110 		ufshcd_dme_set(hba, UIC_ARG_MIB(PA_ACTIVERXDATALANES),
1111 			       dev_req_params->lane_rx);
1112 		ufshcd_dme_set(hba, UIC_ARG_MIB(PA_HSSERIES),
1113 			       dev_req_params->hs_rate);
1114 
1115 		ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXHSADAPTTYPE),
1116 			       PA_NO_ADAPT);
1117 
1118 		ret = ufshcd_uic_change_pwr_mode(hba,
1119 					FASTAUTO_MODE << 4 | FASTAUTO_MODE);
1120 
1121 		if (ret) {
1122 			dev_err(hba->dev, "%s: HSG1B FASTAUTO failed ret=%d\n",
1123 				__func__, ret);
1124 		}
1125 	}
1126 
1127 	if (host->hw_ver.major >= 3) {
1128 		ret = ufshcd_dme_configure_adapt(hba,
1129 					   dev_req_params->gear_tx,
1130 					   PA_INITIAL_ADAPT);
1131 	}
1132 
1133 	return ret;
1134 }
1135 
ufs_mtk_pwr_change_notify(struct ufs_hba * hba,enum ufs_notify_change_status stage,struct ufs_pa_layer_attr * dev_max_params,struct ufs_pa_layer_attr * dev_req_params)1136 static int ufs_mtk_pwr_change_notify(struct ufs_hba *hba,
1137 				     enum ufs_notify_change_status stage,
1138 				     struct ufs_pa_layer_attr *dev_max_params,
1139 				     struct ufs_pa_layer_attr *dev_req_params)
1140 {
1141 	int ret = 0;
1142 
1143 	switch (stage) {
1144 	case PRE_CHANGE:
1145 		ret = ufs_mtk_pre_pwr_change(hba, dev_max_params,
1146 					     dev_req_params);
1147 		break;
1148 	case POST_CHANGE:
1149 		break;
1150 	default:
1151 		ret = -EINVAL;
1152 		break;
1153 	}
1154 
1155 	return ret;
1156 }
1157 
ufs_mtk_unipro_set_lpm(struct ufs_hba * hba,bool lpm)1158 static int ufs_mtk_unipro_set_lpm(struct ufs_hba *hba, bool lpm)
1159 {
1160 	int ret;
1161 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
1162 
1163 	ret = ufshcd_dme_set(hba,
1164 			     UIC_ARG_MIB_SEL(VS_UNIPROPOWERDOWNCONTROL, 0),
1165 			     lpm ? 1 : 0);
1166 	if (!ret || !lpm) {
1167 		/*
1168 		 * Forcibly set as non-LPM mode if UIC commands is failed
1169 		 * to use default hba_enable_delay_us value for re-enabling
1170 		 * the host.
1171 		 */
1172 		host->unipro_lpm = lpm;
1173 	}
1174 
1175 	return ret;
1176 }
1177 
ufs_mtk_pre_link(struct ufs_hba * hba)1178 static int ufs_mtk_pre_link(struct ufs_hba *hba)
1179 {
1180 	int ret;
1181 	u32 tmp;
1182 
1183 	ufs_mtk_get_controller_version(hba);
1184 
1185 	ret = ufs_mtk_unipro_set_lpm(hba, false);
1186 	if (ret)
1187 		return ret;
1188 
1189 	/*
1190 	 * Setting PA_Local_TX_LCC_Enable to 0 before link startup
1191 	 * to make sure that both host and device TX LCC are disabled
1192 	 * once link startup is completed.
1193 	 */
1194 	ret = ufshcd_disable_host_tx_lcc(hba);
1195 	if (ret)
1196 		return ret;
1197 
1198 	/* disable deep stall */
1199 	ret = ufshcd_dme_get(hba, UIC_ARG_MIB(VS_SAVEPOWERCONTROL), &tmp);
1200 	if (ret)
1201 		return ret;
1202 
1203 	tmp &= ~(1 << 6);
1204 
1205 	ret = ufshcd_dme_set(hba, UIC_ARG_MIB(VS_SAVEPOWERCONTROL), tmp);
1206 
1207 	return ret;
1208 }
1209 
ufs_mtk_setup_clk_gating(struct ufs_hba * hba)1210 static void ufs_mtk_setup_clk_gating(struct ufs_hba *hba)
1211 {
1212 	u32 ah_ms;
1213 
1214 	if (ufshcd_is_clkgating_allowed(hba)) {
1215 		if (ufshcd_is_auto_hibern8_supported(hba) && hba->ahit)
1216 			ah_ms = FIELD_GET(UFSHCI_AHIBERN8_TIMER_MASK,
1217 					  hba->ahit);
1218 		else
1219 			ah_ms = 10;
1220 		ufshcd_clkgate_delay_set(hba->dev, ah_ms + 5);
1221 	}
1222 }
1223 
ufs_mtk_post_link(struct ufs_hba * hba)1224 static void ufs_mtk_post_link(struct ufs_hba *hba)
1225 {
1226 	/* enable unipro clock gating feature */
1227 	ufs_mtk_cfg_unipro_cg(hba, true);
1228 
1229 	/* will be configured during probe hba */
1230 	if (ufshcd_is_auto_hibern8_supported(hba))
1231 		hba->ahit = FIELD_PREP(UFSHCI_AHIBERN8_TIMER_MASK, 10) |
1232 			FIELD_PREP(UFSHCI_AHIBERN8_SCALE_MASK, 3);
1233 
1234 	ufs_mtk_setup_clk_gating(hba);
1235 }
1236 
ufs_mtk_link_startup_notify(struct ufs_hba * hba,enum ufs_notify_change_status stage)1237 static int ufs_mtk_link_startup_notify(struct ufs_hba *hba,
1238 				       enum ufs_notify_change_status stage)
1239 {
1240 	int ret = 0;
1241 
1242 	switch (stage) {
1243 	case PRE_CHANGE:
1244 		ret = ufs_mtk_pre_link(hba);
1245 		break;
1246 	case POST_CHANGE:
1247 		ufs_mtk_post_link(hba);
1248 		break;
1249 	default:
1250 		ret = -EINVAL;
1251 		break;
1252 	}
1253 
1254 	return ret;
1255 }
1256 
ufs_mtk_device_reset(struct ufs_hba * hba)1257 static int ufs_mtk_device_reset(struct ufs_hba *hba)
1258 {
1259 	struct arm_smccc_res res;
1260 
1261 	/* disable hba before device reset */
1262 	ufshcd_hba_stop(hba);
1263 
1264 	ufs_mtk_device_reset_ctrl(0, res);
1265 
1266 	/*
1267 	 * The reset signal is active low. UFS devices shall detect
1268 	 * more than or equal to 1us of positive or negative RST_n
1269 	 * pulse width.
1270 	 *
1271 	 * To be on safe side, keep the reset low for at least 10us.
1272 	 */
1273 	usleep_range(10, 15);
1274 
1275 	ufs_mtk_device_reset_ctrl(1, res);
1276 
1277 	/* Some devices may need time to respond to rst_n */
1278 	usleep_range(10000, 15000);
1279 
1280 	dev_info(hba->dev, "device reset done\n");
1281 
1282 	return 0;
1283 }
1284 
ufs_mtk_link_set_hpm(struct ufs_hba * hba)1285 static int ufs_mtk_link_set_hpm(struct ufs_hba *hba)
1286 {
1287 	int err;
1288 
1289 	err = ufshcd_hba_enable(hba);
1290 	if (err)
1291 		return err;
1292 
1293 	err = ufs_mtk_unipro_set_lpm(hba, false);
1294 	if (err)
1295 		return err;
1296 
1297 	err = ufshcd_uic_hibern8_exit(hba);
1298 	if (err)
1299 		return err;
1300 
1301 	/* Check link state to make sure exit h8 success */
1302 	ufs_mtk_wait_idle_state(hba, 5);
1303 	err = ufs_mtk_wait_link_state(hba, VS_LINK_UP, 100);
1304 	if (err) {
1305 		dev_warn(hba->dev, "exit h8 state fail, err=%d\n", err);
1306 		return err;
1307 	}
1308 	ufshcd_set_link_active(hba);
1309 
1310 	err = ufshcd_make_hba_operational(hba);
1311 	if (err)
1312 		return err;
1313 
1314 	if (hba->mcq_enabled) {
1315 		ufs_mtk_config_mcq(hba, false);
1316 		ufshcd_mcq_make_queues_operational(hba);
1317 		ufshcd_mcq_config_mac(hba, hba->nutrs);
1318 		ufshcd_mcq_enable(hba);
1319 	}
1320 
1321 	return 0;
1322 }
1323 
ufs_mtk_link_set_lpm(struct ufs_hba * hba)1324 static int ufs_mtk_link_set_lpm(struct ufs_hba *hba)
1325 {
1326 	int err;
1327 
1328 	/* Disable reset confirm feature by UniPro */
1329 	ufshcd_writel(hba,
1330 		      (ufshcd_readl(hba, REG_UFS_XOUFS_CTRL) & ~0x100),
1331 		      REG_UFS_XOUFS_CTRL);
1332 
1333 	err = ufs_mtk_unipro_set_lpm(hba, true);
1334 	if (err) {
1335 		/* Resume UniPro state for following error recovery */
1336 		ufs_mtk_unipro_set_lpm(hba, false);
1337 		return err;
1338 	}
1339 
1340 	return 0;
1341 }
1342 
ufs_mtk_vccqx_set_lpm(struct ufs_hba * hba,bool lpm)1343 static void ufs_mtk_vccqx_set_lpm(struct ufs_hba *hba, bool lpm)
1344 {
1345 	struct ufs_vreg *vccqx = NULL;
1346 
1347 	if (hba->vreg_info.vccq)
1348 		vccqx = hba->vreg_info.vccq;
1349 	else
1350 		vccqx = hba->vreg_info.vccq2;
1351 
1352 	regulator_set_mode(vccqx->reg,
1353 			   lpm ? REGULATOR_MODE_IDLE : REGULATOR_MODE_NORMAL);
1354 }
1355 
ufs_mtk_vsx_set_lpm(struct ufs_hba * hba,bool lpm)1356 static void ufs_mtk_vsx_set_lpm(struct ufs_hba *hba, bool lpm)
1357 {
1358 	struct arm_smccc_res res;
1359 
1360 	ufs_mtk_device_pwr_ctrl(!lpm,
1361 				(unsigned long)hba->dev_info.wspecversion,
1362 				res);
1363 }
1364 
ufs_mtk_dev_vreg_set_lpm(struct ufs_hba * hba,bool lpm)1365 static void ufs_mtk_dev_vreg_set_lpm(struct ufs_hba *hba, bool lpm)
1366 {
1367 	bool skip_vccqx = false;
1368 
1369 	/* Prevent entering LPM when device is still active */
1370 	if (lpm && ufshcd_is_ufs_dev_active(hba))
1371 		return;
1372 
1373 	/* Skip vccqx lpm control and control vsx only */
1374 	if (!hba->vreg_info.vccq && !hba->vreg_info.vccq2)
1375 		skip_vccqx = true;
1376 
1377 	/* VCC is always-on, control vsx only */
1378 	if (!hba->vreg_info.vcc)
1379 		skip_vccqx = true;
1380 
1381 	/* Broken vcc keep vcc always on, most case control vsx only */
1382 	if (lpm && hba->vreg_info.vcc && hba->vreg_info.vcc->enabled) {
1383 		/* Some device vccqx/vsx can enter lpm */
1384 		if (ufs_mtk_is_allow_vccqx_lpm(hba))
1385 			skip_vccqx = false;
1386 		else /* control vsx only */
1387 			skip_vccqx = true;
1388 	}
1389 
1390 	if (lpm) {
1391 		if (!skip_vccqx)
1392 			ufs_mtk_vccqx_set_lpm(hba, lpm);
1393 		ufs_mtk_vsx_set_lpm(hba, lpm);
1394 	} else {
1395 		ufs_mtk_vsx_set_lpm(hba, lpm);
1396 		if (!skip_vccqx)
1397 			ufs_mtk_vccqx_set_lpm(hba, lpm);
1398 	}
1399 }
1400 
ufs_mtk_auto_hibern8_disable(struct ufs_hba * hba)1401 static void ufs_mtk_auto_hibern8_disable(struct ufs_hba *hba)
1402 {
1403 	int ret;
1404 
1405 	/* disable auto-hibern8 */
1406 	ufshcd_writel(hba, 0, REG_AUTO_HIBERNATE_IDLE_TIMER);
1407 
1408 	/* wait host return to idle state when auto-hibern8 off */
1409 	ufs_mtk_wait_idle_state(hba, 5);
1410 
1411 	ret = ufs_mtk_wait_link_state(hba, VS_LINK_UP, 100);
1412 	if (ret)
1413 		dev_warn(hba->dev, "exit h8 state fail, ret=%d\n", ret);
1414 }
1415 
ufs_mtk_suspend(struct ufs_hba * hba,enum ufs_pm_op pm_op,enum ufs_notify_change_status status)1416 static int ufs_mtk_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op,
1417 	enum ufs_notify_change_status status)
1418 {
1419 	int err;
1420 	struct arm_smccc_res res;
1421 
1422 	if (status == PRE_CHANGE) {
1423 		if (ufshcd_is_auto_hibern8_supported(hba))
1424 			ufs_mtk_auto_hibern8_disable(hba);
1425 		return 0;
1426 	}
1427 
1428 	if (ufshcd_is_link_hibern8(hba)) {
1429 		err = ufs_mtk_link_set_lpm(hba);
1430 		if (err)
1431 			goto fail;
1432 	}
1433 
1434 	if (!ufshcd_is_link_active(hba)) {
1435 		/*
1436 		 * Make sure no error will be returned to prevent
1437 		 * ufshcd_suspend() re-enabling regulators while vreg is still
1438 		 * in low-power mode.
1439 		 */
1440 		err = ufs_mtk_mphy_power_on(hba, false);
1441 		if (err)
1442 			goto fail;
1443 	}
1444 
1445 	if (ufshcd_is_link_off(hba))
1446 		ufs_mtk_device_reset_ctrl(0, res);
1447 
1448 	ufs_mtk_sram_pwr_ctrl(false, res);
1449 
1450 	return 0;
1451 fail:
1452 	/*
1453 	 * Set link as off state enforcedly to trigger
1454 	 * ufshcd_host_reset_and_restore() in ufshcd_suspend()
1455 	 * for completed host reset.
1456 	 */
1457 	ufshcd_set_link_off(hba);
1458 	return -EAGAIN;
1459 }
1460 
ufs_mtk_resume(struct ufs_hba * hba,enum ufs_pm_op pm_op)1461 static int ufs_mtk_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
1462 {
1463 	int err;
1464 	struct arm_smccc_res res;
1465 
1466 	if (hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL)
1467 		ufs_mtk_dev_vreg_set_lpm(hba, false);
1468 
1469 	ufs_mtk_sram_pwr_ctrl(true, res);
1470 
1471 	err = ufs_mtk_mphy_power_on(hba, true);
1472 	if (err)
1473 		goto fail;
1474 
1475 	if (ufshcd_is_link_hibern8(hba)) {
1476 		err = ufs_mtk_link_set_hpm(hba);
1477 		if (err)
1478 			goto fail;
1479 	}
1480 
1481 	return 0;
1482 fail:
1483 	return ufshcd_link_recovery(hba);
1484 }
1485 
ufs_mtk_dbg_register_dump(struct ufs_hba * hba)1486 static void ufs_mtk_dbg_register_dump(struct ufs_hba *hba)
1487 {
1488 	/* Dump ufshci register 0x140 ~ 0x14C */
1489 	ufshcd_dump_regs(hba, REG_UFS_XOUFS_CTRL, 0x10,
1490 			 "XOUFS Ctrl (0x140): ");
1491 
1492 	ufshcd_dump_regs(hba, REG_UFS_EXTREG, 0x4, "Ext Reg ");
1493 
1494 	/* Dump ufshci register 0x2200 ~ 0x22AC */
1495 	ufshcd_dump_regs(hba, REG_UFS_MPHYCTRL,
1496 			 REG_UFS_REJECT_MON - REG_UFS_MPHYCTRL + 4,
1497 			 "MPHY Ctrl (0x2200): ");
1498 
1499 	/* Direct debugging information to REG_MTK_PROBE */
1500 	ufs_mtk_dbg_sel(hba);
1501 	ufshcd_dump_regs(hba, REG_UFS_PROBE, 0x4, "Debug Probe ");
1502 }
1503 
ufs_mtk_apply_dev_quirks(struct ufs_hba * hba)1504 static int ufs_mtk_apply_dev_quirks(struct ufs_hba *hba)
1505 {
1506 	struct ufs_dev_info *dev_info = &hba->dev_info;
1507 	u16 mid = dev_info->wmanufacturerid;
1508 
1509 	if (mid == UFS_VENDOR_SAMSUNG) {
1510 		ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TACTIVATE), 6);
1511 		ufshcd_dme_set(hba, UIC_ARG_MIB(PA_HIBERN8TIME), 10);
1512 	} else if (mid == UFS_VENDOR_MICRON) {
1513 		/* Only for the host which have TX skew issue */
1514 		if (ufs_mtk_is_tx_skew_fix(hba) &&
1515 			(STR_PRFX_EQUAL("MT128GBCAV2U31", dev_info->model) ||
1516 			STR_PRFX_EQUAL("MT256GBCAV4U31", dev_info->model) ||
1517 			STR_PRFX_EQUAL("MT512GBCAV8U31", dev_info->model) ||
1518 			STR_PRFX_EQUAL("MT256GBEAX4U40", dev_info->model) ||
1519 			STR_PRFX_EQUAL("MT512GAYAX4U40", dev_info->model) ||
1520 			STR_PRFX_EQUAL("MT001TAYAX8U40", dev_info->model))) {
1521 			ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TACTIVATE), 8);
1522 		}
1523 	}
1524 
1525 	/*
1526 	 * Decide waiting time before gating reference clock and
1527 	 * after ungating reference clock according to vendors'
1528 	 * requirements.
1529 	 */
1530 	if (mid == UFS_VENDOR_SAMSUNG)
1531 		ufs_mtk_setup_ref_clk_wait_us(hba, 1);
1532 	else if (mid == UFS_VENDOR_SKHYNIX)
1533 		ufs_mtk_setup_ref_clk_wait_us(hba, 30);
1534 	else if (mid == UFS_VENDOR_TOSHIBA)
1535 		ufs_mtk_setup_ref_clk_wait_us(hba, 100);
1536 	else
1537 		ufs_mtk_setup_ref_clk_wait_us(hba,
1538 					      REFCLK_DEFAULT_WAIT_US);
1539 	return 0;
1540 }
1541 
ufs_mtk_fixup_dev_quirks(struct ufs_hba * hba)1542 static void ufs_mtk_fixup_dev_quirks(struct ufs_hba *hba)
1543 {
1544 	ufshcd_fixup_dev_quirks(hba, ufs_mtk_dev_fixups);
1545 
1546 	if (ufs_mtk_is_broken_vcc(hba) && hba->vreg_info.vcc &&
1547 	    (hba->dev_quirks & UFS_DEVICE_QUIRK_DELAY_AFTER_LPM)) {
1548 		hba->vreg_info.vcc->always_on = true;
1549 		/*
1550 		 * VCC will be kept always-on thus we don't
1551 		 * need any delay during regulator operations
1552 		 */
1553 		hba->dev_quirks &= ~(UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM |
1554 			UFS_DEVICE_QUIRK_DELAY_AFTER_LPM);
1555 	}
1556 
1557 	ufs_mtk_vreg_fix_vcc(hba);
1558 	ufs_mtk_vreg_fix_vccqx(hba);
1559 }
1560 
ufs_mtk_event_notify(struct ufs_hba * hba,enum ufs_event_type evt,void * data)1561 static void ufs_mtk_event_notify(struct ufs_hba *hba,
1562 				 enum ufs_event_type evt, void *data)
1563 {
1564 	unsigned int val = *(u32 *)data;
1565 	unsigned long reg;
1566 	u8 bit;
1567 
1568 	trace_ufs_mtk_event(evt, val);
1569 
1570 	/* Print details of UIC Errors */
1571 	if (evt <= UFS_EVT_DME_ERR) {
1572 		dev_info(hba->dev,
1573 			 "Host UIC Error Code (%s): %08x\n",
1574 			 ufs_uic_err_str[evt], val);
1575 		reg = val;
1576 	}
1577 
1578 	if (evt == UFS_EVT_PA_ERR) {
1579 		for_each_set_bit(bit, &reg, ARRAY_SIZE(ufs_uic_pa_err_str))
1580 			dev_info(hba->dev, "%s\n", ufs_uic_pa_err_str[bit]);
1581 	}
1582 
1583 	if (evt == UFS_EVT_DL_ERR) {
1584 		for_each_set_bit(bit, &reg, ARRAY_SIZE(ufs_uic_dl_err_str))
1585 			dev_info(hba->dev, "%s\n", ufs_uic_dl_err_str[bit]);
1586 	}
1587 }
1588 
ufs_mtk_config_scaling_param(struct ufs_hba * hba,struct devfreq_dev_profile * profile,struct devfreq_simple_ondemand_data * data)1589 static void ufs_mtk_config_scaling_param(struct ufs_hba *hba,
1590 				struct devfreq_dev_profile *profile,
1591 				struct devfreq_simple_ondemand_data *data)
1592 {
1593 	/* Customize min gear in clk scaling */
1594 	hba->clk_scaling.min_gear = UFS_HS_G4;
1595 
1596 	hba->vps->devfreq_profile.polling_ms = 200;
1597 	hba->vps->ondemand_data.upthreshold = 50;
1598 	hba->vps->ondemand_data.downdifferential = 20;
1599 }
1600 
1601 /**
1602  * ufs_mtk_clk_scale - Internal clk scaling operation
1603  *
1604  * MTK platform supports clk scaling by switching parent of ufs_sel(mux).
1605  * The ufs_sel downstream to ufs_ck which feeds directly to UFS hardware.
1606  * Max and min clocks rate of ufs_sel defined in dts should match rate of
1607  * "ufs_sel_max_src" and "ufs_sel_min_src" respectively.
1608  * This prevent changing rate of pll clock that is shared between modules.
1609  *
1610  * @hba: per adapter instance
1611  * @scale_up: True for scaling up and false for scaling down
1612  */
ufs_mtk_clk_scale(struct ufs_hba * hba,bool scale_up)1613 static void ufs_mtk_clk_scale(struct ufs_hba *hba, bool scale_up)
1614 {
1615 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
1616 	struct ufs_mtk_clk *mclk = &host->mclk;
1617 	struct ufs_clk_info *clki = mclk->ufs_sel_clki;
1618 	int ret = 0;
1619 
1620 	ret = clk_prepare_enable(clki->clk);
1621 	if (ret) {
1622 		dev_info(hba->dev,
1623 			 "clk_prepare_enable() fail, ret: %d\n", ret);
1624 		return;
1625 	}
1626 
1627 	if (scale_up) {
1628 		ret = clk_set_parent(clki->clk, mclk->ufs_sel_max_clki->clk);
1629 		clki->curr_freq = clki->max_freq;
1630 	} else {
1631 		ret = clk_set_parent(clki->clk, mclk->ufs_sel_min_clki->clk);
1632 		clki->curr_freq = clki->min_freq;
1633 	}
1634 
1635 	if (ret) {
1636 		dev_info(hba->dev,
1637 			 "Failed to set ufs_sel_clki, ret: %d\n", ret);
1638 	}
1639 
1640 	clk_disable_unprepare(clki->clk);
1641 
1642 	trace_ufs_mtk_clk_scale(clki->name, scale_up, clk_get_rate(clki->clk));
1643 }
1644 
ufs_mtk_clk_scale_notify(struct ufs_hba * hba,bool scale_up,enum ufs_notify_change_status status)1645 static int ufs_mtk_clk_scale_notify(struct ufs_hba *hba, bool scale_up,
1646 				    enum ufs_notify_change_status status)
1647 {
1648 	if (!ufshcd_is_clkscaling_supported(hba))
1649 		return 0;
1650 
1651 	if (status == PRE_CHANGE) {
1652 		/* Switch parent before clk_set_rate() */
1653 		ufs_mtk_clk_scale(hba, scale_up);
1654 	} else {
1655 		/* Request interrupt latency QoS accordingly */
1656 		ufs_mtk_scale_perf(hba, scale_up);
1657 	}
1658 
1659 	return 0;
1660 }
1661 
ufs_mtk_get_hba_mac(struct ufs_hba * hba)1662 static int ufs_mtk_get_hba_mac(struct ufs_hba *hba)
1663 {
1664 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
1665 
1666 	/* MCQ operation not permitted */
1667 	if (host->caps & UFS_MTK_CAP_DISABLE_MCQ)
1668 		return -EPERM;
1669 
1670 	return MAX_SUPP_MAC;
1671 }
1672 
ufs_mtk_op_runtime_config(struct ufs_hba * hba)1673 static int ufs_mtk_op_runtime_config(struct ufs_hba *hba)
1674 {
1675 	struct ufshcd_mcq_opr_info_t *opr;
1676 	int i;
1677 
1678 	hba->mcq_opr[OPR_SQD].offset = REG_UFS_MTK_SQD;
1679 	hba->mcq_opr[OPR_SQIS].offset = REG_UFS_MTK_SQIS;
1680 	hba->mcq_opr[OPR_CQD].offset = REG_UFS_MTK_CQD;
1681 	hba->mcq_opr[OPR_CQIS].offset = REG_UFS_MTK_CQIS;
1682 
1683 	for (i = 0; i < OPR_MAX; i++) {
1684 		opr = &hba->mcq_opr[i];
1685 		opr->stride = REG_UFS_MCQ_STRIDE;
1686 		opr->base = hba->mmio_base + opr->offset;
1687 	}
1688 
1689 	return 0;
1690 }
1691 
ufs_mtk_mcq_config_resource(struct ufs_hba * hba)1692 static int ufs_mtk_mcq_config_resource(struct ufs_hba *hba)
1693 {
1694 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
1695 
1696 	/* fail mcq initialization if interrupt is not filled properly */
1697 	if (!host->mcq_nr_intr) {
1698 		dev_info(hba->dev, "IRQs not ready. MCQ disabled.");
1699 		return -EINVAL;
1700 	}
1701 
1702 	hba->mcq_base = hba->mmio_base + MCQ_QUEUE_OFFSET(hba->mcq_capabilities);
1703 	return 0;
1704 }
1705 
ufs_mtk_mcq_intr(int irq,void * __intr_info)1706 static irqreturn_t ufs_mtk_mcq_intr(int irq, void *__intr_info)
1707 {
1708 	struct ufs_mtk_mcq_intr_info *mcq_intr_info = __intr_info;
1709 	struct ufs_hba *hba = mcq_intr_info->hba;
1710 	struct ufs_hw_queue *hwq;
1711 	u32 events;
1712 	int qid = mcq_intr_info->qid;
1713 
1714 	hwq = &hba->uhq[qid];
1715 
1716 	events = ufshcd_mcq_read_cqis(hba, qid);
1717 	if (events)
1718 		ufshcd_mcq_write_cqis(hba, events, qid);
1719 
1720 	if (events & UFSHCD_MCQ_CQIS_TAIL_ENT_PUSH_STS)
1721 		ufshcd_mcq_poll_cqe_lock(hba, hwq);
1722 
1723 	return IRQ_HANDLED;
1724 }
1725 
ufs_mtk_config_mcq_irq(struct ufs_hba * hba)1726 static int ufs_mtk_config_mcq_irq(struct ufs_hba *hba)
1727 {
1728 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
1729 	u32 irq, i;
1730 	int ret;
1731 
1732 	for (i = 0; i < host->mcq_nr_intr; i++) {
1733 		irq = host->mcq_intr_info[i].irq;
1734 		if (irq == MTK_MCQ_INVALID_IRQ) {
1735 			dev_err(hba->dev, "invalid irq. %d\n", i);
1736 			return -ENOPARAM;
1737 		}
1738 
1739 		host->mcq_intr_info[i].qid = i;
1740 		ret = devm_request_irq(hba->dev, irq, ufs_mtk_mcq_intr, 0, UFSHCD,
1741 				       &host->mcq_intr_info[i]);
1742 
1743 		dev_dbg(hba->dev, "request irq %d intr %s\n", irq, ret ? "failed" : "");
1744 
1745 		if (ret) {
1746 			dev_err(hba->dev, "Cannot request irq %d\n", ret);
1747 			return ret;
1748 		}
1749 	}
1750 
1751 	return 0;
1752 }
1753 
ufs_mtk_config_mcq(struct ufs_hba * hba,bool irq)1754 static int ufs_mtk_config_mcq(struct ufs_hba *hba, bool irq)
1755 {
1756 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
1757 	int ret = 0;
1758 
1759 	if (!host->mcq_set_intr) {
1760 		/* Disable irq option register */
1761 		ufshcd_rmwl(hba, MCQ_INTR_EN_MSK, 0, REG_UFS_MMIO_OPT_CTRL_0);
1762 
1763 		if (irq) {
1764 			ret = ufs_mtk_config_mcq_irq(hba);
1765 			if (ret)
1766 				return ret;
1767 		}
1768 
1769 		host->mcq_set_intr = true;
1770 	}
1771 
1772 	ufshcd_rmwl(hba, MCQ_AH8, MCQ_AH8, REG_UFS_MMIO_OPT_CTRL_0);
1773 	ufshcd_rmwl(hba, MCQ_INTR_EN_MSK, MCQ_MULTI_INTR_EN, REG_UFS_MMIO_OPT_CTRL_0);
1774 
1775 	return 0;
1776 }
1777 
ufs_mtk_config_esi(struct ufs_hba * hba)1778 static int ufs_mtk_config_esi(struct ufs_hba *hba)
1779 {
1780 	return ufs_mtk_config_mcq(hba, true);
1781 }
1782 
1783 /*
1784  * struct ufs_hba_mtk_vops - UFS MTK specific variant operations
1785  *
1786  * The variant operations configure the necessary controller and PHY
1787  * handshake during initialization.
1788  */
1789 static const struct ufs_hba_variant_ops ufs_hba_mtk_vops = {
1790 	.name                = "mediatek.ufshci",
1791 	.max_num_rtt         = MTK_MAX_NUM_RTT,
1792 	.init                = ufs_mtk_init,
1793 	.get_ufs_hci_version = ufs_mtk_get_ufs_hci_version,
1794 	.setup_clocks        = ufs_mtk_setup_clocks,
1795 	.hce_enable_notify   = ufs_mtk_hce_enable_notify,
1796 	.link_startup_notify = ufs_mtk_link_startup_notify,
1797 	.pwr_change_notify   = ufs_mtk_pwr_change_notify,
1798 	.apply_dev_quirks    = ufs_mtk_apply_dev_quirks,
1799 	.fixup_dev_quirks    = ufs_mtk_fixup_dev_quirks,
1800 	.suspend             = ufs_mtk_suspend,
1801 	.resume              = ufs_mtk_resume,
1802 	.dbg_register_dump   = ufs_mtk_dbg_register_dump,
1803 	.device_reset        = ufs_mtk_device_reset,
1804 	.event_notify        = ufs_mtk_event_notify,
1805 	.config_scaling_param = ufs_mtk_config_scaling_param,
1806 	.clk_scale_notify    = ufs_mtk_clk_scale_notify,
1807 	/* mcq vops */
1808 	.get_hba_mac         = ufs_mtk_get_hba_mac,
1809 	.op_runtime_config   = ufs_mtk_op_runtime_config,
1810 	.mcq_config_resource = ufs_mtk_mcq_config_resource,
1811 	.config_esi          = ufs_mtk_config_esi,
1812 };
1813 
1814 /**
1815  * ufs_mtk_probe - probe routine of the driver
1816  * @pdev: pointer to Platform device handle
1817  *
1818  * Return: zero for success and non-zero for failure.
1819  */
ufs_mtk_probe(struct platform_device * pdev)1820 static int ufs_mtk_probe(struct platform_device *pdev)
1821 {
1822 	int err;
1823 	struct device *dev = &pdev->dev;
1824 	struct device_node *reset_node;
1825 	struct platform_device *reset_pdev;
1826 	struct device_link *link;
1827 
1828 	reset_node = of_find_compatible_node(NULL, NULL,
1829 					     "ti,syscon-reset");
1830 	if (!reset_node) {
1831 		dev_notice(dev, "find ti,syscon-reset fail\n");
1832 		goto skip_reset;
1833 	}
1834 	reset_pdev = of_find_device_by_node(reset_node);
1835 	if (!reset_pdev) {
1836 		dev_notice(dev, "find reset_pdev fail\n");
1837 		goto skip_reset;
1838 	}
1839 	link = device_link_add(dev, &reset_pdev->dev,
1840 		DL_FLAG_AUTOPROBE_CONSUMER);
1841 	put_device(&reset_pdev->dev);
1842 	if (!link) {
1843 		dev_notice(dev, "add reset device_link fail\n");
1844 		goto skip_reset;
1845 	}
1846 	/* supplier is not probed */
1847 	if (link->status == DL_STATE_DORMANT) {
1848 		err = -EPROBE_DEFER;
1849 		goto out;
1850 	}
1851 
1852 skip_reset:
1853 	/* perform generic probe */
1854 	err = ufshcd_pltfrm_init(pdev, &ufs_hba_mtk_vops);
1855 
1856 out:
1857 	if (err)
1858 		dev_err(dev, "probe failed %d\n", err);
1859 
1860 	of_node_put(reset_node);
1861 	return err;
1862 }
1863 
1864 /**
1865  * ufs_mtk_remove - set driver_data of the device to NULL
1866  * @pdev: pointer to platform device handle
1867  *
1868  * Always return 0
1869  */
ufs_mtk_remove(struct platform_device * pdev)1870 static void ufs_mtk_remove(struct platform_device *pdev)
1871 {
1872 	struct ufs_hba *hba =  platform_get_drvdata(pdev);
1873 
1874 	pm_runtime_get_sync(&(pdev)->dev);
1875 	ufshcd_remove(hba);
1876 }
1877 
1878 #ifdef CONFIG_PM_SLEEP
ufs_mtk_system_suspend(struct device * dev)1879 static int ufs_mtk_system_suspend(struct device *dev)
1880 {
1881 	struct ufs_hba *hba = dev_get_drvdata(dev);
1882 	struct arm_smccc_res res;
1883 	int ret;
1884 
1885 	ret = ufshcd_system_suspend(dev);
1886 	if (ret)
1887 		return ret;
1888 
1889 	ufs_mtk_dev_vreg_set_lpm(hba, true);
1890 
1891 	if (ufs_mtk_is_rtff_mtcmos(hba))
1892 		ufs_mtk_mtcmos_ctrl(false, res);
1893 
1894 	return 0;
1895 }
1896 
ufs_mtk_system_resume(struct device * dev)1897 static int ufs_mtk_system_resume(struct device *dev)
1898 {
1899 	struct ufs_hba *hba = dev_get_drvdata(dev);
1900 	struct arm_smccc_res res;
1901 
1902 	ufs_mtk_dev_vreg_set_lpm(hba, false);
1903 
1904 	if (ufs_mtk_is_rtff_mtcmos(hba))
1905 		ufs_mtk_mtcmos_ctrl(true, res);
1906 
1907 	return ufshcd_system_resume(dev);
1908 }
1909 #endif
1910 
1911 #ifdef CONFIG_PM
ufs_mtk_runtime_suspend(struct device * dev)1912 static int ufs_mtk_runtime_suspend(struct device *dev)
1913 {
1914 	struct ufs_hba *hba = dev_get_drvdata(dev);
1915 	struct arm_smccc_res res;
1916 	int ret = 0;
1917 
1918 	ret = ufshcd_runtime_suspend(dev);
1919 	if (ret)
1920 		return ret;
1921 
1922 	ufs_mtk_dev_vreg_set_lpm(hba, true);
1923 
1924 	if (ufs_mtk_is_rtff_mtcmos(hba))
1925 		ufs_mtk_mtcmos_ctrl(false, res);
1926 
1927 	return 0;
1928 }
1929 
ufs_mtk_runtime_resume(struct device * dev)1930 static int ufs_mtk_runtime_resume(struct device *dev)
1931 {
1932 	struct ufs_hba *hba = dev_get_drvdata(dev);
1933 	struct arm_smccc_res res;
1934 
1935 	if (ufs_mtk_is_rtff_mtcmos(hba))
1936 		ufs_mtk_mtcmos_ctrl(true, res);
1937 
1938 	ufs_mtk_dev_vreg_set_lpm(hba, false);
1939 
1940 	return ufshcd_runtime_resume(dev);
1941 }
1942 #endif
1943 
1944 static const struct dev_pm_ops ufs_mtk_pm_ops = {
1945 	SET_SYSTEM_SLEEP_PM_OPS(ufs_mtk_system_suspend,
1946 				ufs_mtk_system_resume)
1947 	SET_RUNTIME_PM_OPS(ufs_mtk_runtime_suspend,
1948 			   ufs_mtk_runtime_resume, NULL)
1949 	.prepare	 = ufshcd_suspend_prepare,
1950 	.complete	 = ufshcd_resume_complete,
1951 };
1952 
1953 static struct platform_driver ufs_mtk_pltform = {
1954 	.probe      = ufs_mtk_probe,
1955 	.remove_new = ufs_mtk_remove,
1956 	.driver = {
1957 		.name   = "ufshcd-mtk",
1958 		.pm     = &ufs_mtk_pm_ops,
1959 		.of_match_table = ufs_mtk_of_match,
1960 	},
1961 };
1962 
1963 MODULE_AUTHOR("Stanley Chu <stanley.chu@mediatek.com>");
1964 MODULE_AUTHOR("Peter Wang <peter.wang@mediatek.com>");
1965 MODULE_DESCRIPTION("MediaTek UFS Host Driver");
1966 MODULE_LICENSE("GPL v2");
1967 
1968 module_platform_driver(ufs_mtk_pltform);
1969