1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * HiSilicon Hixxxx UFS Driver
4 *
5 * Copyright (c) 2016-2017 Linaro Ltd.
6 * Copyright (c) 2016-2017 HiSilicon Technologies Co., Ltd.
7 */
8
9 #include <linux/time.h>
10 #include <linux/delay.h>
11 #include <linux/module.h>
12 #include <linux/of.h>
13 #include <linux/of_address.h>
14 #include <linux/dma-mapping.h>
15 #include <linux/platform_device.h>
16 #include <linux/reset.h>
17
18 #include <ufs/ufshcd.h>
19 #include "ufshcd-pltfrm.h"
20 #include <ufs/unipro.h>
21 #include "ufs-hisi.h"
22 #include <ufs/ufshci.h>
23 #include <ufs/ufs_quirks.h>
24
ufs_hisi_check_hibern8(struct ufs_hba * hba)25 static int ufs_hisi_check_hibern8(struct ufs_hba *hba)
26 {
27 int err = 0;
28 u32 tx_fsm_val_0 = 0;
29 u32 tx_fsm_val_1 = 0;
30 unsigned long timeout = jiffies + msecs_to_jiffies(HBRN8_POLL_TOUT_MS);
31
32 do {
33 err = ufshcd_dme_get(hba, UIC_ARG_MIB_SEL(MPHY_TX_FSM_STATE, 0),
34 &tx_fsm_val_0);
35 err |= ufshcd_dme_get(hba,
36 UIC_ARG_MIB_SEL(MPHY_TX_FSM_STATE, 1), &tx_fsm_val_1);
37 if (err || (tx_fsm_val_0 == TX_FSM_HIBERN8 &&
38 tx_fsm_val_1 == TX_FSM_HIBERN8))
39 break;
40
41 /* sleep for max. 200us */
42 usleep_range(100, 200);
43 } while (time_before(jiffies, timeout));
44
45 /*
46 * we might have scheduled out for long during polling so
47 * check the state again.
48 */
49 if (time_after(jiffies, timeout)) {
50 err = ufshcd_dme_get(hba, UIC_ARG_MIB_SEL(MPHY_TX_FSM_STATE, 0),
51 &tx_fsm_val_0);
52 err |= ufshcd_dme_get(hba,
53 UIC_ARG_MIB_SEL(MPHY_TX_FSM_STATE, 1), &tx_fsm_val_1);
54 }
55
56 if (err) {
57 dev_err(hba->dev, "%s: unable to get TX_FSM_STATE, err %d\n",
58 __func__, err);
59 } else if (tx_fsm_val_0 != TX_FSM_HIBERN8 ||
60 tx_fsm_val_1 != TX_FSM_HIBERN8) {
61 err = -1;
62 dev_err(hba->dev, "%s: invalid TX_FSM_STATE, lane0 = %d, lane1 = %d\n",
63 __func__, tx_fsm_val_0, tx_fsm_val_1);
64 }
65
66 return err;
67 }
68
ufs_hisi_clk_init(struct ufs_hba * hba)69 static void ufs_hisi_clk_init(struct ufs_hba *hba)
70 {
71 struct ufs_hisi_host *host = ufshcd_get_variant(hba);
72
73 ufs_sys_ctrl_clr_bits(host, BIT_SYSCTRL_REF_CLOCK_EN, PHY_CLK_CTRL);
74 if (ufs_sys_ctrl_readl(host, PHY_CLK_CTRL) & BIT_SYSCTRL_REF_CLOCK_EN)
75 mdelay(1);
76 /* use abb clk */
77 ufs_sys_ctrl_clr_bits(host, BIT_UFS_REFCLK_SRC_SEl, UFS_SYSCTRL);
78 ufs_sys_ctrl_clr_bits(host, BIT_UFS_REFCLK_ISO_EN, PHY_ISO_EN);
79 /* open mphy ref clk */
80 ufs_sys_ctrl_set_bits(host, BIT_SYSCTRL_REF_CLOCK_EN, PHY_CLK_CTRL);
81 }
82
ufs_hisi_soc_init(struct ufs_hba * hba)83 static void ufs_hisi_soc_init(struct ufs_hba *hba)
84 {
85 struct ufs_hisi_host *host = ufshcd_get_variant(hba);
86 u32 reg;
87
88 if (!IS_ERR(host->rst))
89 reset_control_assert(host->rst);
90
91 /* HC_PSW powerup */
92 ufs_sys_ctrl_set_bits(host, BIT_UFS_PSW_MTCMOS_EN, PSW_POWER_CTRL);
93 udelay(10);
94 /* notify PWR ready */
95 ufs_sys_ctrl_set_bits(host, BIT_SYSCTRL_PWR_READY, HC_LP_CTRL);
96 ufs_sys_ctrl_writel(host, MASK_UFS_DEVICE_RESET | 0,
97 UFS_DEVICE_RESET_CTRL);
98
99 reg = ufs_sys_ctrl_readl(host, PHY_CLK_CTRL);
100 reg = (reg & ~MASK_SYSCTRL_CFG_CLOCK_FREQ) | UFS_FREQ_CFG_CLK;
101 /* set cfg clk freq */
102 ufs_sys_ctrl_writel(host, reg, PHY_CLK_CTRL);
103 /* set ref clk freq */
104 ufs_sys_ctrl_clr_bits(host, MASK_SYSCTRL_REF_CLOCK_SEL, PHY_CLK_CTRL);
105 /* bypass ufs clk gate */
106 ufs_sys_ctrl_set_bits(host, MASK_UFS_CLK_GATE_BYPASS,
107 CLOCK_GATE_BYPASS);
108 ufs_sys_ctrl_set_bits(host, MASK_UFS_SYSCRTL_BYPASS, UFS_SYSCTRL);
109
110 /* open psw clk */
111 ufs_sys_ctrl_set_bits(host, BIT_SYSCTRL_PSW_CLK_EN, PSW_CLK_CTRL);
112 /* disable ufshc iso */
113 ufs_sys_ctrl_clr_bits(host, BIT_UFS_PSW_ISO_CTRL, PSW_POWER_CTRL);
114 /* disable phy iso */
115 ufs_sys_ctrl_clr_bits(host, BIT_UFS_PHY_ISO_CTRL, PHY_ISO_EN);
116 /* notice iso disable */
117 ufs_sys_ctrl_clr_bits(host, BIT_SYSCTRL_LP_ISOL_EN, HC_LP_CTRL);
118
119 /* disable lp_reset_n */
120 ufs_sys_ctrl_set_bits(host, BIT_SYSCTRL_LP_RESET_N, RESET_CTRL_EN);
121 mdelay(1);
122
123 ufs_sys_ctrl_writel(host, MASK_UFS_DEVICE_RESET | BIT_UFS_DEVICE_RESET,
124 UFS_DEVICE_RESET_CTRL);
125
126 msleep(20);
127
128 /*
129 * enable the fix of linereset recovery,
130 * and enable rx_reset/tx_rest beat
131 * enable ref_clk_en override(bit5) &
132 * override value = 1(bit4), with mask
133 */
134 ufs_sys_ctrl_writel(host, 0x03300330, UFS_DEVICE_RESET_CTRL);
135
136 if (!IS_ERR(host->rst))
137 reset_control_deassert(host->rst);
138 }
139
ufs_hisi_link_startup_pre_change(struct ufs_hba * hba)140 static int ufs_hisi_link_startup_pre_change(struct ufs_hba *hba)
141 {
142 struct ufs_hisi_host *host = ufshcd_get_variant(hba);
143 int err;
144 uint32_t value;
145 uint32_t reg;
146
147 /* Unipro VS_mphy_disable */
148 ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0xD0C1, 0x0), 0x1);
149 /* PA_HSSeries */
150 ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x156A, 0x0), 0x2);
151 /* MPHY CBRATESEL */
152 ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x8114, 0x0), 0x1);
153 /* MPHY CBOVRCTRL2 */
154 ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x8121, 0x0), 0x2D);
155 /* MPHY CBOVRCTRL3 */
156 ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x8122, 0x0), 0x1);
157
158 if (host->caps & UFS_HISI_CAP_PHY10nm) {
159 /* MPHY CBOVRCTRL4 */
160 ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x8127, 0x0), 0x98);
161 /* MPHY CBOVRCTRL5 */
162 ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x8128, 0x0), 0x1);
163 }
164
165 /* Unipro VS_MphyCfgUpdt */
166 ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0xD085, 0x0), 0x1);
167 /* MPHY RXOVRCTRL4 rx0 */
168 ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x800D, 0x4), 0x58);
169 /* MPHY RXOVRCTRL4 rx1 */
170 ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x800D, 0x5), 0x58);
171 /* MPHY RXOVRCTRL5 rx0 */
172 ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x800E, 0x4), 0xB);
173 /* MPHY RXOVRCTRL5 rx1 */
174 ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x800E, 0x5), 0xB);
175 /* MPHY RXSQCONTROL rx0 */
176 ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x8009, 0x4), 0x1);
177 /* MPHY RXSQCONTROL rx1 */
178 ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x8009, 0x5), 0x1);
179 /* Unipro VS_MphyCfgUpdt */
180 ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0xD085, 0x0), 0x1);
181
182 ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x8113, 0x0), 0x1);
183 ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0xD085, 0x0), 0x1);
184
185 if (host->caps & UFS_HISI_CAP_PHY10nm) {
186 /* RX_Hibern8Time_Capability*/
187 ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x0092, 0x4), 0xA);
188 /* RX_Hibern8Time_Capability*/
189 ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x0092, 0x5), 0xA);
190 /* RX_Min_ActivateTime */
191 ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x008f, 0x4), 0xA);
192 /* RX_Min_ActivateTime*/
193 ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x008f, 0x5), 0xA);
194 } else {
195 /* Tactive RX */
196 ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x008F, 0x4), 0x7);
197 /* Tactive RX */
198 ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x008F, 0x5), 0x7);
199 }
200
201 /* Gear3 Synclength */
202 ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x0095, 0x4), 0x4F);
203 /* Gear3 Synclength */
204 ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x0095, 0x5), 0x4F);
205 /* Gear2 Synclength */
206 ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x0094, 0x4), 0x4F);
207 /* Gear2 Synclength */
208 ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x0094, 0x5), 0x4F);
209 /* Gear1 Synclength */
210 ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x008B, 0x4), 0x4F);
211 /* Gear1 Synclength */
212 ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x008B, 0x5), 0x4F);
213 /* Thibernate Tx */
214 ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x000F, 0x0), 0x5);
215 /* Thibernate Tx */
216 ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x000F, 0x1), 0x5);
217
218 ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0xD085, 0x0), 0x1);
219 /* Unipro VS_mphy_disable */
220 ufshcd_dme_get(hba, UIC_ARG_MIB_SEL(0xD0C1, 0x0), &value);
221 if (value != 0x1)
222 dev_info(hba->dev,
223 "Warring!!! Unipro VS_mphy_disable is 0x%x\n", value);
224
225 /* Unipro VS_mphy_disable */
226 ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0xD0C1, 0x0), 0x0);
227 err = ufs_hisi_check_hibern8(hba);
228 if (err)
229 dev_err(hba->dev, "ufs_hisi_check_hibern8 error\n");
230
231 if (!(host->caps & UFS_HISI_CAP_PHY10nm))
232 ufshcd_writel(hba, UFS_HCLKDIV_NORMAL_VALUE, UFS_REG_HCLKDIV);
233
234 /* disable auto H8 */
235 reg = ufshcd_readl(hba, REG_AUTO_HIBERNATE_IDLE_TIMER);
236 reg = reg & (~UFS_AHIT_AH8ITV_MASK);
237 ufshcd_writel(hba, reg, REG_AUTO_HIBERNATE_IDLE_TIMER);
238
239 /* Unipro PA_Local_TX_LCC_Enable */
240 ufshcd_disable_host_tx_lcc(hba);
241 /* close Unipro VS_Mk2ExtnSupport */
242 ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0xD0AB, 0x0), 0x0);
243 ufshcd_dme_get(hba, UIC_ARG_MIB_SEL(0xD0AB, 0x0), &value);
244 if (value != 0) {
245 /* Ensure close success */
246 dev_info(hba->dev, "WARN: close VS_Mk2ExtnSupport failed\n");
247 }
248
249 return err;
250 }
251
ufs_hisi_link_startup_post_change(struct ufs_hba * hba)252 static int ufs_hisi_link_startup_post_change(struct ufs_hba *hba)
253 {
254 struct ufs_hisi_host *host = ufshcd_get_variant(hba);
255
256 /* Unipro DL_AFC0CreditThreshold */
257 ufshcd_dme_set(hba, UIC_ARG_MIB(0x2044), 0x0);
258 /* Unipro DL_TC0OutAckThreshold */
259 ufshcd_dme_set(hba, UIC_ARG_MIB(0x2045), 0x0);
260 /* Unipro DL_TC0TXFCThreshold */
261 ufshcd_dme_set(hba, UIC_ARG_MIB(0x2040), 0x9);
262
263 /* not bypass ufs clk gate */
264 ufs_sys_ctrl_clr_bits(host, MASK_UFS_CLK_GATE_BYPASS,
265 CLOCK_GATE_BYPASS);
266 ufs_sys_ctrl_clr_bits(host, MASK_UFS_SYSCRTL_BYPASS,
267 UFS_SYSCTRL);
268
269 /* select received symbol cnt */
270 ufshcd_dme_set(hba, UIC_ARG_MIB(0xd09a), 0x80000000);
271 /* reset counter0 and enable */
272 ufshcd_dme_set(hba, UIC_ARG_MIB(0xd09c), 0x00000005);
273
274 return 0;
275 }
276
ufs_hisi_link_startup_notify(struct ufs_hba * hba,enum ufs_notify_change_status status)277 static int ufs_hisi_link_startup_notify(struct ufs_hba *hba,
278 enum ufs_notify_change_status status)
279 {
280 int err = 0;
281
282 switch (status) {
283 case PRE_CHANGE:
284 err = ufs_hisi_link_startup_pre_change(hba);
285 break;
286 case POST_CHANGE:
287 err = ufs_hisi_link_startup_post_change(hba);
288 break;
289 default:
290 break;
291 }
292
293 return err;
294 }
295
ufs_hisi_set_dev_cap(struct ufs_host_params * host_params)296 static void ufs_hisi_set_dev_cap(struct ufs_host_params *host_params)
297 {
298 ufshcd_init_host_params(host_params);
299 }
300
ufs_hisi_pwr_change_pre_change(struct ufs_hba * hba)301 static void ufs_hisi_pwr_change_pre_change(struct ufs_hba *hba)
302 {
303 struct ufs_hisi_host *host = ufshcd_get_variant(hba);
304
305 if (host->caps & UFS_HISI_CAP_PHY10nm) {
306 /*
307 * Boston platform need to set SaveConfigTime to 0x13,
308 * and change sync length to maximum value
309 */
310 /* VS_DebugSaveConfigTime */
311 ufshcd_dme_set(hba, UIC_ARG_MIB((u32)0xD0A0), 0x13);
312 /* g1 sync length */
313 ufshcd_dme_set(hba, UIC_ARG_MIB((u32)0x1552), 0x4f);
314 /* g2 sync length */
315 ufshcd_dme_set(hba, UIC_ARG_MIB((u32)0x1554), 0x4f);
316 /* g3 sync length */
317 ufshcd_dme_set(hba, UIC_ARG_MIB((u32)0x1556), 0x4f);
318 /* PA_Hibern8Time */
319 ufshcd_dme_set(hba, UIC_ARG_MIB((u32)0x15a7), 0xA);
320 /* PA_Tactivate */
321 ufshcd_dme_set(hba, UIC_ARG_MIB((u32)0x15a8), 0xA);
322 ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0xd085, 0x0), 0x01);
323 }
324
325 if (hba->dev_quirks & UFS_DEVICE_QUIRK_HOST_VS_DEBUGSAVECONFIGTIME) {
326 pr_info("ufs flash device must set VS_DebugSaveConfigTime 0x10\n");
327 /* VS_DebugSaveConfigTime */
328 ufshcd_dme_set(hba, UIC_ARG_MIB(0xD0A0), 0x10);
329 /* sync length */
330 ufshcd_dme_set(hba, UIC_ARG_MIB(0x1556), 0x48);
331 }
332
333 /* update */
334 ufshcd_dme_set(hba, UIC_ARG_MIB(0x15A8), 0x1);
335 /* PA_TxSkip */
336 ufshcd_dme_set(hba, UIC_ARG_MIB(0x155c), 0x0);
337 /*PA_PWRModeUserData0 = 8191, default is 0*/
338 ufshcd_dme_set(hba, UIC_ARG_MIB(0x15b0), SZ_8K - 1);
339 /*PA_PWRModeUserData1 = 65535, default is 0*/
340 ufshcd_dme_set(hba, UIC_ARG_MIB(0x15b1), SZ_64K - 1);
341 /*PA_PWRModeUserData2 = 32767, default is 0*/
342 ufshcd_dme_set(hba, UIC_ARG_MIB(0x15b2), SZ_32K - 1);
343 /*DME_FC0ProtectionTimeOutVal = 8191, default is 0*/
344 ufshcd_dme_set(hba, UIC_ARG_MIB(0xd041), SZ_8K - 1);
345 /*DME_TC0ReplayTimeOutVal = 65535, default is 0*/
346 ufshcd_dme_set(hba, UIC_ARG_MIB(0xd042), SZ_64K - 1);
347 /*DME_AFC0ReqTimeOutVal = 32767, default is 0*/
348 ufshcd_dme_set(hba, UIC_ARG_MIB(0xd043), SZ_32K - 1);
349 /*PA_PWRModeUserData3 = 8191, default is 0*/
350 ufshcd_dme_set(hba, UIC_ARG_MIB(0x15b3), SZ_8K - 1);
351 /*PA_PWRModeUserData4 = 65535, default is 0*/
352 ufshcd_dme_set(hba, UIC_ARG_MIB(0x15b4), SZ_64K - 1);
353 /*PA_PWRModeUserData5 = 32767, default is 0*/
354 ufshcd_dme_set(hba, UIC_ARG_MIB(0x15b5), SZ_32K - 1);
355 /*DME_FC1ProtectionTimeOutVal = 8191, default is 0*/
356 ufshcd_dme_set(hba, UIC_ARG_MIB(0xd044), SZ_8K - 1);
357 /*DME_TC1ReplayTimeOutVal = 65535, default is 0*/
358 ufshcd_dme_set(hba, UIC_ARG_MIB(0xd045), SZ_64K - 1);
359 /*DME_AFC1ReqTimeOutVal = 32767, default is 0*/
360 ufshcd_dme_set(hba, UIC_ARG_MIB(0xd046), SZ_32K - 1);
361 }
362
ufs_hisi_pwr_change_notify(struct ufs_hba * hba,enum ufs_notify_change_status status,struct ufs_pa_layer_attr * dev_max_params,struct ufs_pa_layer_attr * dev_req_params)363 static int ufs_hisi_pwr_change_notify(struct ufs_hba *hba,
364 enum ufs_notify_change_status status,
365 struct ufs_pa_layer_attr *dev_max_params,
366 struct ufs_pa_layer_attr *dev_req_params)
367 {
368 struct ufs_host_params host_params;
369 int ret = 0;
370
371 if (!dev_req_params) {
372 dev_err(hba->dev,
373 "%s: incoming dev_req_params is NULL\n", __func__);
374 ret = -EINVAL;
375 goto out;
376 }
377
378 switch (status) {
379 case PRE_CHANGE:
380 ufs_hisi_set_dev_cap(&host_params);
381 ret = ufshcd_negotiate_pwr_params(&host_params, dev_max_params, dev_req_params);
382 if (ret) {
383 dev_err(hba->dev,
384 "%s: failed to determine capabilities\n", __func__);
385 goto out;
386 }
387
388 ufs_hisi_pwr_change_pre_change(hba);
389 break;
390 case POST_CHANGE:
391 break;
392 default:
393 ret = -EINVAL;
394 break;
395 }
396 out:
397 return ret;
398 }
399
ufs_hisi_suspend_prepare(struct device * dev)400 static int ufs_hisi_suspend_prepare(struct device *dev)
401 {
402 /* RPM and SPM are different. Refer ufs_hisi_suspend() */
403 return __ufshcd_suspend_prepare(dev, false);
404 }
405
ufs_hisi_suspend(struct ufs_hba * hba,enum ufs_pm_op pm_op,enum ufs_notify_change_status status)406 static int ufs_hisi_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op,
407 enum ufs_notify_change_status status)
408 {
409 struct ufs_hisi_host *host = ufshcd_get_variant(hba);
410
411 if (status == PRE_CHANGE)
412 return 0;
413
414 if (pm_op == UFS_RUNTIME_PM)
415 return 0;
416
417 if (host->in_suspend) {
418 WARN_ON(1);
419 return 0;
420 }
421
422 ufs_sys_ctrl_clr_bits(host, BIT_SYSCTRL_REF_CLOCK_EN, PHY_CLK_CTRL);
423 udelay(10);
424 /* set ref_dig_clk override of PHY PCS to 0 */
425 ufs_sys_ctrl_writel(host, 0x00100000, UFS_DEVICE_RESET_CTRL);
426
427 host->in_suspend = true;
428
429 return 0;
430 }
431
ufs_hisi_resume(struct ufs_hba * hba,enum ufs_pm_op pm_op)432 static int ufs_hisi_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
433 {
434 struct ufs_hisi_host *host = ufshcd_get_variant(hba);
435
436 if (!host->in_suspend)
437 return 0;
438
439 /* set ref_dig_clk override of PHY PCS to 1 */
440 ufs_sys_ctrl_writel(host, 0x00100010, UFS_DEVICE_RESET_CTRL);
441 udelay(10);
442 ufs_sys_ctrl_set_bits(host, BIT_SYSCTRL_REF_CLOCK_EN, PHY_CLK_CTRL);
443
444 host->in_suspend = false;
445 return 0;
446 }
447
ufs_hisi_get_resource(struct ufs_hisi_host * host)448 static int ufs_hisi_get_resource(struct ufs_hisi_host *host)
449 {
450 struct device *dev = host->hba->dev;
451 struct platform_device *pdev = to_platform_device(dev);
452
453 /* get resource of ufs sys ctrl */
454 host->ufs_sys_ctrl = devm_platform_ioremap_resource(pdev, 1);
455 return PTR_ERR_OR_ZERO(host->ufs_sys_ctrl);
456 }
457
ufs_hisi_set_pm_lvl(struct ufs_hba * hba)458 static void ufs_hisi_set_pm_lvl(struct ufs_hba *hba)
459 {
460 hba->rpm_lvl = UFS_PM_LVL_1;
461 hba->spm_lvl = UFS_PM_LVL_3;
462 }
463
464 /**
465 * ufs_hisi_init_common
466 * @hba: host controller instance
467 */
ufs_hisi_init_common(struct ufs_hba * hba)468 static int ufs_hisi_init_common(struct ufs_hba *hba)
469 {
470 int err = 0;
471 struct device *dev = hba->dev;
472 struct ufs_hisi_host *host;
473
474 host = devm_kzalloc(dev, sizeof(*host), GFP_KERNEL);
475 if (!host)
476 return -ENOMEM;
477
478 host->hba = hba;
479 ufshcd_set_variant(hba, host);
480
481 host->rst = devm_reset_control_get(dev, "rst");
482 if (IS_ERR(host->rst)) {
483 dev_err(dev, "%s: failed to get reset control\n", __func__);
484 err = PTR_ERR(host->rst);
485 goto error;
486 }
487
488 ufs_hisi_set_pm_lvl(hba);
489
490 err = ufs_hisi_get_resource(host);
491 if (err)
492 goto error;
493
494 return 0;
495
496 error:
497 ufshcd_set_variant(hba, NULL);
498 return err;
499 }
500
ufs_hi3660_init(struct ufs_hba * hba)501 static int ufs_hi3660_init(struct ufs_hba *hba)
502 {
503 int ret = 0;
504 struct device *dev = hba->dev;
505
506 ret = ufs_hisi_init_common(hba);
507 if (ret) {
508 dev_err(dev, "%s: ufs common init fail\n", __func__);
509 return ret;
510 }
511
512 ufs_hisi_clk_init(hba);
513
514 ufs_hisi_soc_init(hba);
515
516 return 0;
517 }
518
ufs_hi3670_init(struct ufs_hba * hba)519 static int ufs_hi3670_init(struct ufs_hba *hba)
520 {
521 int ret = 0;
522 struct device *dev = hba->dev;
523 struct ufs_hisi_host *host;
524
525 ret = ufs_hisi_init_common(hba);
526 if (ret) {
527 dev_err(dev, "%s: ufs common init fail\n", __func__);
528 return ret;
529 }
530
531 ufs_hisi_clk_init(hba);
532
533 ufs_hisi_soc_init(hba);
534
535 /* Add cap for 10nm PHY variant on HI3670 SoC */
536 host = ufshcd_get_variant(hba);
537 host->caps |= UFS_HISI_CAP_PHY10nm;
538
539 return 0;
540 }
541
542 static const struct ufs_hba_variant_ops ufs_hba_hi3660_vops = {
543 .name = "hi3660",
544 .init = ufs_hi3660_init,
545 .link_startup_notify = ufs_hisi_link_startup_notify,
546 .pwr_change_notify = ufs_hisi_pwr_change_notify,
547 .suspend = ufs_hisi_suspend,
548 .resume = ufs_hisi_resume,
549 };
550
551 static const struct ufs_hba_variant_ops ufs_hba_hi3670_vops = {
552 .name = "hi3670",
553 .init = ufs_hi3670_init,
554 .link_startup_notify = ufs_hisi_link_startup_notify,
555 .pwr_change_notify = ufs_hisi_pwr_change_notify,
556 .suspend = ufs_hisi_suspend,
557 .resume = ufs_hisi_resume,
558 };
559
560 static const struct of_device_id ufs_hisi_of_match[] = {
561 { .compatible = "hisilicon,hi3660-ufs", .data = &ufs_hba_hi3660_vops },
562 { .compatible = "hisilicon,hi3670-ufs", .data = &ufs_hba_hi3670_vops },
563 {},
564 };
565
566 MODULE_DEVICE_TABLE(of, ufs_hisi_of_match);
567
ufs_hisi_probe(struct platform_device * pdev)568 static int ufs_hisi_probe(struct platform_device *pdev)
569 {
570 const struct of_device_id *of_id;
571
572 of_id = of_match_node(ufs_hisi_of_match, pdev->dev.of_node);
573
574 return ufshcd_pltfrm_init(pdev, of_id->data);
575 }
576
ufs_hisi_remove(struct platform_device * pdev)577 static void ufs_hisi_remove(struct platform_device *pdev)
578 {
579 struct ufs_hba *hba = platform_get_drvdata(pdev);
580
581 ufshcd_remove(hba);
582 }
583
584 static const struct dev_pm_ops ufs_hisi_pm_ops = {
585 SET_SYSTEM_SLEEP_PM_OPS(ufshcd_system_suspend, ufshcd_system_resume)
586 SET_RUNTIME_PM_OPS(ufshcd_runtime_suspend, ufshcd_runtime_resume, NULL)
587 .prepare = ufs_hisi_suspend_prepare,
588 .complete = ufshcd_resume_complete,
589 };
590
591 static struct platform_driver ufs_hisi_pltform = {
592 .probe = ufs_hisi_probe,
593 .remove_new = ufs_hisi_remove,
594 .driver = {
595 .name = "ufshcd-hisi",
596 .pm = &ufs_hisi_pm_ops,
597 .of_match_table = ufs_hisi_of_match,
598 },
599 };
600 module_platform_driver(ufs_hisi_pltform);
601
602 MODULE_LICENSE("GPL");
603 MODULE_ALIAS("platform:ufshcd-hisi");
604 MODULE_DESCRIPTION("HiSilicon Hixxxx UFS Driver");
605