xref: /linux/drivers/ufs/host/ufs-hisi.c (revision f9bff0e31881d03badf191d3b0005839391f5f2b)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * HiSilicon Hixxxx UFS Driver
4  *
5  * Copyright (c) 2016-2017 Linaro Ltd.
6  * Copyright (c) 2016-2017 HiSilicon Technologies Co., Ltd.
7  */
8 
9 #include <linux/time.h>
10 #include <linux/delay.h>
11 #include <linux/module.h>
12 #include <linux/of.h>
13 #include <linux/of_address.h>
14 #include <linux/dma-mapping.h>
15 #include <linux/platform_device.h>
16 #include <linux/reset.h>
17 
18 #include <ufs/ufshcd.h>
19 #include "ufshcd-pltfrm.h"
20 #include <ufs/unipro.h>
21 #include "ufs-hisi.h"
22 #include <ufs/ufshci.h>
23 #include <ufs/ufs_quirks.h>
24 
25 static int ufs_hisi_check_hibern8(struct ufs_hba *hba)
26 {
27 	int err = 0;
28 	u32 tx_fsm_val_0 = 0;
29 	u32 tx_fsm_val_1 = 0;
30 	unsigned long timeout = jiffies + msecs_to_jiffies(HBRN8_POLL_TOUT_MS);
31 
32 	do {
33 		err = ufshcd_dme_get(hba, UIC_ARG_MIB_SEL(MPHY_TX_FSM_STATE, 0),
34 				      &tx_fsm_val_0);
35 		err |= ufshcd_dme_get(hba,
36 		    UIC_ARG_MIB_SEL(MPHY_TX_FSM_STATE, 1), &tx_fsm_val_1);
37 		if (err || (tx_fsm_val_0 == TX_FSM_HIBERN8 &&
38 			tx_fsm_val_1 == TX_FSM_HIBERN8))
39 			break;
40 
41 		/* sleep for max. 200us */
42 		usleep_range(100, 200);
43 	} while (time_before(jiffies, timeout));
44 
45 	/*
46 	 * we might have scheduled out for long during polling so
47 	 * check the state again.
48 	 */
49 	if (time_after(jiffies, timeout)) {
50 		err = ufshcd_dme_get(hba, UIC_ARG_MIB_SEL(MPHY_TX_FSM_STATE, 0),
51 				     &tx_fsm_val_0);
52 		err |= ufshcd_dme_get(hba,
53 		 UIC_ARG_MIB_SEL(MPHY_TX_FSM_STATE, 1), &tx_fsm_val_1);
54 	}
55 
56 	if (err) {
57 		dev_err(hba->dev, "%s: unable to get TX_FSM_STATE, err %d\n",
58 			__func__, err);
59 	} else if (tx_fsm_val_0 != TX_FSM_HIBERN8 ||
60 			 tx_fsm_val_1 != TX_FSM_HIBERN8) {
61 		err = -1;
62 		dev_err(hba->dev, "%s: invalid TX_FSM_STATE, lane0 = %d, lane1 = %d\n",
63 			__func__, tx_fsm_val_0, tx_fsm_val_1);
64 	}
65 
66 	return err;
67 }
68 
69 static void ufs_hisi_clk_init(struct ufs_hba *hba)
70 {
71 	struct ufs_hisi_host *host = ufshcd_get_variant(hba);
72 
73 	ufs_sys_ctrl_clr_bits(host, BIT_SYSCTRL_REF_CLOCK_EN, PHY_CLK_CTRL);
74 	if (ufs_sys_ctrl_readl(host, PHY_CLK_CTRL) & BIT_SYSCTRL_REF_CLOCK_EN)
75 		mdelay(1);
76 	/* use abb clk */
77 	ufs_sys_ctrl_clr_bits(host, BIT_UFS_REFCLK_SRC_SEl, UFS_SYSCTRL);
78 	ufs_sys_ctrl_clr_bits(host, BIT_UFS_REFCLK_ISO_EN, PHY_ISO_EN);
79 	/* open mphy ref clk */
80 	ufs_sys_ctrl_set_bits(host, BIT_SYSCTRL_REF_CLOCK_EN, PHY_CLK_CTRL);
81 }
82 
83 static void ufs_hisi_soc_init(struct ufs_hba *hba)
84 {
85 	struct ufs_hisi_host *host = ufshcd_get_variant(hba);
86 	u32 reg;
87 
88 	if (!IS_ERR(host->rst))
89 		reset_control_assert(host->rst);
90 
91 	/* HC_PSW powerup */
92 	ufs_sys_ctrl_set_bits(host, BIT_UFS_PSW_MTCMOS_EN, PSW_POWER_CTRL);
93 	udelay(10);
94 	/* notify PWR ready */
95 	ufs_sys_ctrl_set_bits(host, BIT_SYSCTRL_PWR_READY, HC_LP_CTRL);
96 	ufs_sys_ctrl_writel(host, MASK_UFS_DEVICE_RESET | 0,
97 		UFS_DEVICE_RESET_CTRL);
98 
99 	reg = ufs_sys_ctrl_readl(host, PHY_CLK_CTRL);
100 	reg = (reg & ~MASK_SYSCTRL_CFG_CLOCK_FREQ) | UFS_FREQ_CFG_CLK;
101 	/* set cfg clk freq */
102 	ufs_sys_ctrl_writel(host, reg, PHY_CLK_CTRL);
103 	/* set ref clk freq */
104 	ufs_sys_ctrl_clr_bits(host, MASK_SYSCTRL_REF_CLOCK_SEL, PHY_CLK_CTRL);
105 	/* bypass ufs clk gate */
106 	ufs_sys_ctrl_set_bits(host, MASK_UFS_CLK_GATE_BYPASS,
107 						 CLOCK_GATE_BYPASS);
108 	ufs_sys_ctrl_set_bits(host, MASK_UFS_SYSCRTL_BYPASS, UFS_SYSCTRL);
109 
110 	/* open psw clk */
111 	ufs_sys_ctrl_set_bits(host, BIT_SYSCTRL_PSW_CLK_EN, PSW_CLK_CTRL);
112 	/* disable ufshc iso */
113 	ufs_sys_ctrl_clr_bits(host, BIT_UFS_PSW_ISO_CTRL, PSW_POWER_CTRL);
114 	/* disable phy iso */
115 	ufs_sys_ctrl_clr_bits(host, BIT_UFS_PHY_ISO_CTRL, PHY_ISO_EN);
116 	/* notice iso disable */
117 	ufs_sys_ctrl_clr_bits(host, BIT_SYSCTRL_LP_ISOL_EN, HC_LP_CTRL);
118 
119 	/* disable lp_reset_n */
120 	ufs_sys_ctrl_set_bits(host, BIT_SYSCTRL_LP_RESET_N, RESET_CTRL_EN);
121 	mdelay(1);
122 
123 	ufs_sys_ctrl_writel(host, MASK_UFS_DEVICE_RESET | BIT_UFS_DEVICE_RESET,
124 		UFS_DEVICE_RESET_CTRL);
125 
126 	msleep(20);
127 
128 	/*
129 	 * enable the fix of linereset recovery,
130 	 * and enable rx_reset/tx_rest beat
131 	 * enable ref_clk_en override(bit5) &
132 	 * override value = 1(bit4), with mask
133 	 */
134 	ufs_sys_ctrl_writel(host, 0x03300330, UFS_DEVICE_RESET_CTRL);
135 
136 	if (!IS_ERR(host->rst))
137 		reset_control_deassert(host->rst);
138 }
139 
140 static int ufs_hisi_link_startup_pre_change(struct ufs_hba *hba)
141 {
142 	struct ufs_hisi_host *host = ufshcd_get_variant(hba);
143 	int err;
144 	uint32_t value;
145 	uint32_t reg;
146 
147 	/* Unipro VS_mphy_disable */
148 	ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0xD0C1, 0x0), 0x1);
149 	/* PA_HSSeries */
150 	ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x156A, 0x0), 0x2);
151 	/* MPHY CBRATESEL */
152 	ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x8114, 0x0), 0x1);
153 	/* MPHY CBOVRCTRL2 */
154 	ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x8121, 0x0), 0x2D);
155 	/* MPHY CBOVRCTRL3 */
156 	ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x8122, 0x0), 0x1);
157 
158 	if (host->caps & UFS_HISI_CAP_PHY10nm) {
159 		/* MPHY CBOVRCTRL4 */
160 		ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x8127, 0x0), 0x98);
161 		/* MPHY CBOVRCTRL5 */
162 		ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x8128, 0x0), 0x1);
163 	}
164 
165 	/* Unipro VS_MphyCfgUpdt */
166 	ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0xD085, 0x0), 0x1);
167 	/* MPHY RXOVRCTRL4 rx0 */
168 	ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x800D, 0x4), 0x58);
169 	/* MPHY RXOVRCTRL4 rx1 */
170 	ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x800D, 0x5), 0x58);
171 	/* MPHY RXOVRCTRL5 rx0 */
172 	ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x800E, 0x4), 0xB);
173 	/* MPHY RXOVRCTRL5 rx1 */
174 	ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x800E, 0x5), 0xB);
175 	/* MPHY RXSQCONTROL rx0 */
176 	ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x8009, 0x4), 0x1);
177 	/* MPHY RXSQCONTROL rx1 */
178 	ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x8009, 0x5), 0x1);
179 	/* Unipro VS_MphyCfgUpdt */
180 	ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0xD085, 0x0), 0x1);
181 
182 	ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x8113, 0x0), 0x1);
183 	ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0xD085, 0x0), 0x1);
184 
185 	if (host->caps & UFS_HISI_CAP_PHY10nm) {
186 		/* RX_Hibern8Time_Capability*/
187 		ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x0092, 0x4), 0xA);
188 		/* RX_Hibern8Time_Capability*/
189 		ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x0092, 0x5), 0xA);
190 		/* RX_Min_ActivateTime */
191 		ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x008f, 0x4), 0xA);
192 		/* RX_Min_ActivateTime*/
193 		ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x008f, 0x5), 0xA);
194 	} else {
195 		/* Tactive RX */
196 		ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x008F, 0x4), 0x7);
197 		/* Tactive RX */
198 		ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x008F, 0x5), 0x7);
199 	}
200 
201 	/* Gear3 Synclength */
202 	ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x0095, 0x4), 0x4F);
203 	/* Gear3 Synclength */
204 	ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x0095, 0x5), 0x4F);
205 	/* Gear2 Synclength */
206 	ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x0094, 0x4), 0x4F);
207 	/* Gear2 Synclength */
208 	ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x0094, 0x5), 0x4F);
209 	/* Gear1 Synclength */
210 	ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x008B, 0x4), 0x4F);
211 	/* Gear1 Synclength */
212 	ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x008B, 0x5), 0x4F);
213 	/* Thibernate Tx */
214 	ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x000F, 0x0), 0x5);
215 	/* Thibernate Tx */
216 	ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x000F, 0x1), 0x5);
217 
218 	ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0xD085, 0x0), 0x1);
219 	/* Unipro VS_mphy_disable */
220 	ufshcd_dme_get(hba, UIC_ARG_MIB_SEL(0xD0C1, 0x0), &value);
221 	if (value != 0x1)
222 		dev_info(hba->dev,
223 		    "Warring!!! Unipro VS_mphy_disable is 0x%x\n", value);
224 
225 	/* Unipro VS_mphy_disable */
226 	ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0xD0C1, 0x0), 0x0);
227 	err = ufs_hisi_check_hibern8(hba);
228 	if (err)
229 		dev_err(hba->dev, "ufs_hisi_check_hibern8 error\n");
230 
231 	if (!(host->caps & UFS_HISI_CAP_PHY10nm))
232 		ufshcd_writel(hba, UFS_HCLKDIV_NORMAL_VALUE, UFS_REG_HCLKDIV);
233 
234 	/* disable auto H8 */
235 	reg = ufshcd_readl(hba, REG_AUTO_HIBERNATE_IDLE_TIMER);
236 	reg = reg & (~UFS_AHIT_AH8ITV_MASK);
237 	ufshcd_writel(hba, reg, REG_AUTO_HIBERNATE_IDLE_TIMER);
238 
239 	/* Unipro PA_Local_TX_LCC_Enable */
240 	ufshcd_disable_host_tx_lcc(hba);
241 	/* close Unipro VS_Mk2ExtnSupport */
242 	ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0xD0AB, 0x0), 0x0);
243 	ufshcd_dme_get(hba, UIC_ARG_MIB_SEL(0xD0AB, 0x0), &value);
244 	if (value != 0) {
245 		/* Ensure close success */
246 		dev_info(hba->dev, "WARN: close VS_Mk2ExtnSupport failed\n");
247 	}
248 
249 	return err;
250 }
251 
252 static int ufs_hisi_link_startup_post_change(struct ufs_hba *hba)
253 {
254 	struct ufs_hisi_host *host = ufshcd_get_variant(hba);
255 
256 	/* Unipro DL_AFC0CreditThreshold */
257 	ufshcd_dme_set(hba, UIC_ARG_MIB(0x2044), 0x0);
258 	/* Unipro DL_TC0OutAckThreshold */
259 	ufshcd_dme_set(hba, UIC_ARG_MIB(0x2045), 0x0);
260 	/* Unipro DL_TC0TXFCThreshold */
261 	ufshcd_dme_set(hba, UIC_ARG_MIB(0x2040), 0x9);
262 
263 	/* not bypass ufs clk gate */
264 	ufs_sys_ctrl_clr_bits(host, MASK_UFS_CLK_GATE_BYPASS,
265 						CLOCK_GATE_BYPASS);
266 	ufs_sys_ctrl_clr_bits(host, MASK_UFS_SYSCRTL_BYPASS,
267 						UFS_SYSCTRL);
268 
269 	/* select received symbol cnt */
270 	ufshcd_dme_set(hba, UIC_ARG_MIB(0xd09a), 0x80000000);
271 	 /* reset counter0 and enable */
272 	ufshcd_dme_set(hba, UIC_ARG_MIB(0xd09c), 0x00000005);
273 
274 	return 0;
275 }
276 
277 static int ufs_hisi_link_startup_notify(struct ufs_hba *hba,
278 					  enum ufs_notify_change_status status)
279 {
280 	int err = 0;
281 
282 	switch (status) {
283 	case PRE_CHANGE:
284 		err = ufs_hisi_link_startup_pre_change(hba);
285 		break;
286 	case POST_CHANGE:
287 		err = ufs_hisi_link_startup_post_change(hba);
288 		break;
289 	default:
290 		break;
291 	}
292 
293 	return err;
294 }
295 
296 static void ufs_hisi_set_dev_cap(struct ufs_dev_params *hisi_param)
297 {
298 	ufshcd_init_pwr_dev_param(hisi_param);
299 }
300 
301 static void ufs_hisi_pwr_change_pre_change(struct ufs_hba *hba)
302 {
303 	struct ufs_hisi_host *host = ufshcd_get_variant(hba);
304 
305 	if (host->caps & UFS_HISI_CAP_PHY10nm) {
306 		/*
307 		 * Boston platform need to set SaveConfigTime to 0x13,
308 		 * and change sync length to maximum value
309 		 */
310 		/* VS_DebugSaveConfigTime */
311 		ufshcd_dme_set(hba, UIC_ARG_MIB((u32)0xD0A0), 0x13);
312 		/* g1 sync length */
313 		ufshcd_dme_set(hba, UIC_ARG_MIB((u32)0x1552), 0x4f);
314 		/* g2 sync length */
315 		ufshcd_dme_set(hba, UIC_ARG_MIB((u32)0x1554), 0x4f);
316 		/* g3 sync length */
317 		ufshcd_dme_set(hba, UIC_ARG_MIB((u32)0x1556), 0x4f);
318 		/* PA_Hibern8Time */
319 		ufshcd_dme_set(hba, UIC_ARG_MIB((u32)0x15a7), 0xA);
320 		/* PA_Tactivate */
321 		ufshcd_dme_set(hba, UIC_ARG_MIB((u32)0x15a8), 0xA);
322 		ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0xd085, 0x0), 0x01);
323 	}
324 
325 	if (hba->dev_quirks & UFS_DEVICE_QUIRK_HOST_VS_DEBUGSAVECONFIGTIME) {
326 		pr_info("ufs flash device must set VS_DebugSaveConfigTime 0x10\n");
327 		/* VS_DebugSaveConfigTime */
328 		ufshcd_dme_set(hba, UIC_ARG_MIB(0xD0A0), 0x10);
329 		/* sync length */
330 		ufshcd_dme_set(hba, UIC_ARG_MIB(0x1556), 0x48);
331 	}
332 
333 	/* update */
334 	ufshcd_dme_set(hba, UIC_ARG_MIB(0x15A8), 0x1);
335 	/* PA_TxSkip */
336 	ufshcd_dme_set(hba, UIC_ARG_MIB(0x155c), 0x0);
337 	/*PA_PWRModeUserData0 = 8191, default is 0*/
338 	ufshcd_dme_set(hba, UIC_ARG_MIB(0x15b0), SZ_8K - 1);
339 	/*PA_PWRModeUserData1 = 65535, default is 0*/
340 	ufshcd_dme_set(hba, UIC_ARG_MIB(0x15b1), SZ_64K - 1);
341 	/*PA_PWRModeUserData2 = 32767, default is 0*/
342 	ufshcd_dme_set(hba, UIC_ARG_MIB(0x15b2), SZ_32K - 1);
343 	/*DME_FC0ProtectionTimeOutVal = 8191, default is 0*/
344 	ufshcd_dme_set(hba, UIC_ARG_MIB(0xd041), SZ_8K - 1);
345 	/*DME_TC0ReplayTimeOutVal = 65535, default is 0*/
346 	ufshcd_dme_set(hba, UIC_ARG_MIB(0xd042), SZ_64K - 1);
347 	/*DME_AFC0ReqTimeOutVal = 32767, default is 0*/
348 	ufshcd_dme_set(hba, UIC_ARG_MIB(0xd043), SZ_32K - 1);
349 	/*PA_PWRModeUserData3 = 8191, default is 0*/
350 	ufshcd_dme_set(hba, UIC_ARG_MIB(0x15b3), SZ_8K - 1);
351 	/*PA_PWRModeUserData4 = 65535, default is 0*/
352 	ufshcd_dme_set(hba, UIC_ARG_MIB(0x15b4), SZ_64K - 1);
353 	/*PA_PWRModeUserData5 = 32767, default is 0*/
354 	ufshcd_dme_set(hba, UIC_ARG_MIB(0x15b5), SZ_32K - 1);
355 	/*DME_FC1ProtectionTimeOutVal = 8191, default is 0*/
356 	ufshcd_dme_set(hba, UIC_ARG_MIB(0xd044), SZ_8K - 1);
357 	/*DME_TC1ReplayTimeOutVal = 65535, default is 0*/
358 	ufshcd_dme_set(hba, UIC_ARG_MIB(0xd045), SZ_64K - 1);
359 	/*DME_AFC1ReqTimeOutVal = 32767, default is 0*/
360 	ufshcd_dme_set(hba, UIC_ARG_MIB(0xd046), SZ_32K - 1);
361 }
362 
363 static int ufs_hisi_pwr_change_notify(struct ufs_hba *hba,
364 				       enum ufs_notify_change_status status,
365 				       struct ufs_pa_layer_attr *dev_max_params,
366 				       struct ufs_pa_layer_attr *dev_req_params)
367 {
368 	struct ufs_dev_params ufs_hisi_cap;
369 	int ret = 0;
370 
371 	if (!dev_req_params) {
372 		dev_err(hba->dev,
373 			    "%s: incoming dev_req_params is NULL\n", __func__);
374 		ret = -EINVAL;
375 		goto out;
376 	}
377 
378 	switch (status) {
379 	case PRE_CHANGE:
380 		ufs_hisi_set_dev_cap(&ufs_hisi_cap);
381 		ret = ufshcd_get_pwr_dev_param(&ufs_hisi_cap,
382 					       dev_max_params, dev_req_params);
383 		if (ret) {
384 			dev_err(hba->dev,
385 			    "%s: failed to determine capabilities\n", __func__);
386 			goto out;
387 		}
388 
389 		ufs_hisi_pwr_change_pre_change(hba);
390 		break;
391 	case POST_CHANGE:
392 		break;
393 	default:
394 		ret = -EINVAL;
395 		break;
396 	}
397 out:
398 	return ret;
399 }
400 
401 static int ufs_hisi_suspend_prepare(struct device *dev)
402 {
403 	/* RPM and SPM are different. Refer ufs_hisi_suspend() */
404 	return __ufshcd_suspend_prepare(dev, false);
405 }
406 
407 static int ufs_hisi_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op,
408 	enum ufs_notify_change_status status)
409 {
410 	struct ufs_hisi_host *host = ufshcd_get_variant(hba);
411 
412 	if (status == PRE_CHANGE)
413 		return 0;
414 
415 	if (pm_op == UFS_RUNTIME_PM)
416 		return 0;
417 
418 	if (host->in_suspend) {
419 		WARN_ON(1);
420 		return 0;
421 	}
422 
423 	ufs_sys_ctrl_clr_bits(host, BIT_SYSCTRL_REF_CLOCK_EN, PHY_CLK_CTRL);
424 	udelay(10);
425 	/* set ref_dig_clk override of PHY PCS to 0 */
426 	ufs_sys_ctrl_writel(host, 0x00100000, UFS_DEVICE_RESET_CTRL);
427 
428 	host->in_suspend = true;
429 
430 	return 0;
431 }
432 
433 static int ufs_hisi_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
434 {
435 	struct ufs_hisi_host *host = ufshcd_get_variant(hba);
436 
437 	if (!host->in_suspend)
438 		return 0;
439 
440 	/* set ref_dig_clk override of PHY PCS to 1 */
441 	ufs_sys_ctrl_writel(host, 0x00100010, UFS_DEVICE_RESET_CTRL);
442 	udelay(10);
443 	ufs_sys_ctrl_set_bits(host, BIT_SYSCTRL_REF_CLOCK_EN, PHY_CLK_CTRL);
444 
445 	host->in_suspend = false;
446 	return 0;
447 }
448 
449 static int ufs_hisi_get_resource(struct ufs_hisi_host *host)
450 {
451 	struct device *dev = host->hba->dev;
452 	struct platform_device *pdev = to_platform_device(dev);
453 
454 	/* get resource of ufs sys ctrl */
455 	host->ufs_sys_ctrl = devm_platform_ioremap_resource(pdev, 1);
456 	return PTR_ERR_OR_ZERO(host->ufs_sys_ctrl);
457 }
458 
459 static void ufs_hisi_set_pm_lvl(struct ufs_hba *hba)
460 {
461 	hba->rpm_lvl = UFS_PM_LVL_1;
462 	hba->spm_lvl = UFS_PM_LVL_3;
463 }
464 
465 /**
466  * ufs_hisi_init_common
467  * @hba: host controller instance
468  */
469 static int ufs_hisi_init_common(struct ufs_hba *hba)
470 {
471 	int err = 0;
472 	struct device *dev = hba->dev;
473 	struct ufs_hisi_host *host;
474 
475 	host = devm_kzalloc(dev, sizeof(*host), GFP_KERNEL);
476 	if (!host)
477 		return -ENOMEM;
478 
479 	host->hba = hba;
480 	ufshcd_set_variant(hba, host);
481 
482 	host->rst = devm_reset_control_get(dev, "rst");
483 	if (IS_ERR(host->rst)) {
484 		dev_err(dev, "%s: failed to get reset control\n", __func__);
485 		err = PTR_ERR(host->rst);
486 		goto error;
487 	}
488 
489 	ufs_hisi_set_pm_lvl(hba);
490 
491 	err = ufs_hisi_get_resource(host);
492 	if (err)
493 		goto error;
494 
495 	return 0;
496 
497 error:
498 	ufshcd_set_variant(hba, NULL);
499 	return err;
500 }
501 
502 static int ufs_hi3660_init(struct ufs_hba *hba)
503 {
504 	int ret = 0;
505 	struct device *dev = hba->dev;
506 
507 	ret = ufs_hisi_init_common(hba);
508 	if (ret) {
509 		dev_err(dev, "%s: ufs common init fail\n", __func__);
510 		return ret;
511 	}
512 
513 	ufs_hisi_clk_init(hba);
514 
515 	ufs_hisi_soc_init(hba);
516 
517 	return 0;
518 }
519 
520 static int ufs_hi3670_init(struct ufs_hba *hba)
521 {
522 	int ret = 0;
523 	struct device *dev = hba->dev;
524 	struct ufs_hisi_host *host;
525 
526 	ret = ufs_hisi_init_common(hba);
527 	if (ret) {
528 		dev_err(dev, "%s: ufs common init fail\n", __func__);
529 		return ret;
530 	}
531 
532 	ufs_hisi_clk_init(hba);
533 
534 	ufs_hisi_soc_init(hba);
535 
536 	/* Add cap for 10nm PHY variant on HI3670 SoC */
537 	host = ufshcd_get_variant(hba);
538 	host->caps |= UFS_HISI_CAP_PHY10nm;
539 
540 	return 0;
541 }
542 
543 static const struct ufs_hba_variant_ops ufs_hba_hi3660_vops = {
544 	.name = "hi3660",
545 	.init = ufs_hi3660_init,
546 	.link_startup_notify = ufs_hisi_link_startup_notify,
547 	.pwr_change_notify = ufs_hisi_pwr_change_notify,
548 	.suspend = ufs_hisi_suspend,
549 	.resume = ufs_hisi_resume,
550 };
551 
552 static const struct ufs_hba_variant_ops ufs_hba_hi3670_vops = {
553 	.name = "hi3670",
554 	.init = ufs_hi3670_init,
555 	.link_startup_notify = ufs_hisi_link_startup_notify,
556 	.pwr_change_notify = ufs_hisi_pwr_change_notify,
557 	.suspend = ufs_hisi_suspend,
558 	.resume = ufs_hisi_resume,
559 };
560 
561 static const struct of_device_id ufs_hisi_of_match[] = {
562 	{ .compatible = "hisilicon,hi3660-ufs", .data = &ufs_hba_hi3660_vops },
563 	{ .compatible = "hisilicon,hi3670-ufs", .data = &ufs_hba_hi3670_vops },
564 	{},
565 };
566 
567 MODULE_DEVICE_TABLE(of, ufs_hisi_of_match);
568 
569 static int ufs_hisi_probe(struct platform_device *pdev)
570 {
571 	const struct of_device_id *of_id;
572 
573 	of_id = of_match_node(ufs_hisi_of_match, pdev->dev.of_node);
574 
575 	return ufshcd_pltfrm_init(pdev, of_id->data);
576 }
577 
578 static int ufs_hisi_remove(struct platform_device *pdev)
579 {
580 	struct ufs_hba *hba =  platform_get_drvdata(pdev);
581 
582 	ufshcd_remove(hba);
583 	return 0;
584 }
585 
586 static const struct dev_pm_ops ufs_hisi_pm_ops = {
587 	SET_SYSTEM_SLEEP_PM_OPS(ufshcd_system_suspend, ufshcd_system_resume)
588 	SET_RUNTIME_PM_OPS(ufshcd_runtime_suspend, ufshcd_runtime_resume, NULL)
589 	.prepare	 = ufs_hisi_suspend_prepare,
590 	.complete	 = ufshcd_resume_complete,
591 };
592 
593 static struct platform_driver ufs_hisi_pltform = {
594 	.probe	= ufs_hisi_probe,
595 	.remove	= ufs_hisi_remove,
596 	.driver	= {
597 		.name	= "ufshcd-hisi",
598 		.pm	= &ufs_hisi_pm_ops,
599 		.of_match_table = ufs_hisi_of_match,
600 	},
601 };
602 module_platform_driver(ufs_hisi_pltform);
603 
604 MODULE_LICENSE("GPL");
605 MODULE_ALIAS("platform:ufshcd-hisi");
606 MODULE_DESCRIPTION("HiSilicon Hixxxx UFS Driver");
607