xref: /linux/drivers/ufs/host/ufs-amd-versal2.c (revision 7eb7f5723df50a7d5564aa609e4c147f669a5cb4)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2025 Advanced Micro Devices, Inc.
4  *
5  * Authors: Sai Krishna Potthuri <sai.krishna.potthuri@amd.com>
6  */
7 
8 #include <linux/clk.h>
9 #include <linux/delay.h>
10 #include <linux/firmware/xlnx-zynqmp.h>
11 #include <linux/irqreturn.h>
12 #include <linux/module.h>
13 #include <linux/of.h>
14 #include <linux/platform_device.h>
15 #include <linux/reset.h>
16 #include <ufs/unipro.h>
17 
18 #include "ufshcd-dwc.h"
19 #include "ufshcd-pltfrm.h"
20 #include "ufshci-dwc.h"
21 
22 /* PHY modes */
23 #define UFSHCD_DWC_PHY_MODE_ROM         0
24 
25 #define MPHY_FAST_RX_AFE_CAL		BIT(2)
26 #define MPHY_FW_CALIB_CFG_VAL		BIT(8)
27 
28 #define MPHY_RX_OVRD_EN			BIT(3)
29 #define MPHY_RX_OVRD_VAL		BIT(2)
30 #define MPHY_RX_ACK_MASK		BIT(0)
31 
32 #define TIMEOUT_MICROSEC	1000000
33 
34 struct ufs_versal2_host {
35 	struct ufs_hba *hba;
36 	struct reset_control *rstc;
37 	struct reset_control *rstphy;
38 	u32 phy_mode;
39 	unsigned long host_clk;
40 	u8 attcompval0;
41 	u8 attcompval1;
42 	u8 ctlecompval0;
43 	u8 ctlecompval1;
44 };
45 
ufs_versal2_phy_reg_write(struct ufs_hba * hba,u32 addr,u32 val)46 static int ufs_versal2_phy_reg_write(struct ufs_hba *hba, u32 addr, u32 val)
47 {
48 	static struct ufshcd_dme_attr_val phy_write_attrs[] = {
49 		{ UIC_ARG_MIB(CBCREGADDRLSB), 0, DME_LOCAL },
50 		{ UIC_ARG_MIB(CBCREGADDRMSB), 0, DME_LOCAL },
51 		{ UIC_ARG_MIB(CBCREGWRLSB), 0, DME_LOCAL },
52 		{ UIC_ARG_MIB(CBCREGWRMSB), 0, DME_LOCAL },
53 		{ UIC_ARG_MIB(CBCREGRDWRSEL), 1, DME_LOCAL },
54 		{ UIC_ARG_MIB(VS_MPHYCFGUPDT), 1, DME_LOCAL }
55 	};
56 
57 	phy_write_attrs[0].mib_val = (u8)addr;
58 	phy_write_attrs[1].mib_val = (u8)(addr >> 8);
59 	phy_write_attrs[2].mib_val = (u8)val;
60 	phy_write_attrs[3].mib_val = (u8)(val >> 8);
61 
62 	return ufshcd_dwc_dme_set_attrs(hba, phy_write_attrs, ARRAY_SIZE(phy_write_attrs));
63 }
64 
ufs_versal2_phy_reg_read(struct ufs_hba * hba,u32 addr,u32 * val)65 static int ufs_versal2_phy_reg_read(struct ufs_hba *hba, u32 addr, u32 *val)
66 {
67 	u32 mib_val;
68 	int ret;
69 	static struct ufshcd_dme_attr_val phy_read_attrs[] = {
70 		{ UIC_ARG_MIB(CBCREGADDRLSB), 0, DME_LOCAL },
71 		{ UIC_ARG_MIB(CBCREGADDRMSB), 0, DME_LOCAL },
72 		{ UIC_ARG_MIB(CBCREGRDWRSEL), 0, DME_LOCAL },
73 		{ UIC_ARG_MIB(VS_MPHYCFGUPDT), 1, DME_LOCAL }
74 	};
75 
76 	phy_read_attrs[0].mib_val = (u8)addr;
77 	phy_read_attrs[1].mib_val = (u8)(addr >> 8);
78 
79 	ret = ufshcd_dwc_dme_set_attrs(hba, phy_read_attrs, ARRAY_SIZE(phy_read_attrs));
80 	if (ret)
81 		return ret;
82 
83 	ret = ufshcd_dme_get(hba, UIC_ARG_MIB(CBCREGRDLSB), &mib_val);
84 	if (ret)
85 		return ret;
86 
87 	*val = mib_val;
88 	ret = ufshcd_dme_get(hba, UIC_ARG_MIB(CBCREGRDMSB), &mib_val);
89 	if (ret)
90 		return ret;
91 
92 	*val |= (mib_val << 8);
93 
94 	return 0;
95 }
96 
ufs_versal2_enable_phy(struct ufs_hba * hba)97 static int ufs_versal2_enable_phy(struct ufs_hba *hba)
98 {
99 	u32 offset, reg;
100 	int ret;
101 
102 	ret = ufshcd_dme_set(hba, UIC_ARG_MIB(VS_MPHYDISABLE), 0);
103 	if (ret)
104 		return ret;
105 
106 	ret = ufshcd_dme_set(hba, UIC_ARG_MIB(VS_MPHYCFGUPDT), 1);
107 	if (ret)
108 		return ret;
109 
110 	/* Check Tx/Rx FSM states */
111 	for (offset = 0; offset < 2; offset++) {
112 		u32 time_left, mibsel;
113 
114 		time_left = TIMEOUT_MICROSEC;
115 		mibsel = UIC_ARG_MIB_SEL(MTX_FSM_STATE, UIC_ARG_MPHY_TX_GEN_SEL_INDEX(offset));
116 		do {
117 			ret = ufshcd_dme_get(hba, mibsel, &reg);
118 			if (ret)
119 				return ret;
120 
121 			if (reg == TX_STATE_HIBERN8 || reg == TX_STATE_SLEEP ||
122 			    reg == TX_STATE_LSBURST)
123 				break;
124 
125 			time_left--;
126 			usleep_range(1, 5);
127 		} while (time_left);
128 
129 		if (!time_left) {
130 			dev_err(hba->dev, "Invalid Tx FSM state.\n");
131 			return -ETIMEDOUT;
132 		}
133 
134 		time_left = TIMEOUT_MICROSEC;
135 		mibsel = UIC_ARG_MIB_SEL(MRX_FSM_STATE, UIC_ARG_MPHY_RX_GEN_SEL_INDEX(offset));
136 		do {
137 			ret = ufshcd_dme_get(hba, mibsel, &reg);
138 			if (ret)
139 				return ret;
140 
141 			if (reg == RX_STATE_HIBERN8 || reg == RX_STATE_SLEEP ||
142 			    reg == RX_STATE_LSBURST)
143 				break;
144 
145 			time_left--;
146 			usleep_range(1, 5);
147 		} while (time_left);
148 
149 		if (!time_left) {
150 			dev_err(hba->dev, "Invalid Rx FSM state.\n");
151 			return -ETIMEDOUT;
152 		}
153 	}
154 
155 	return 0;
156 }
157 
ufs_versal2_setup_phy(struct ufs_hba * hba)158 static int ufs_versal2_setup_phy(struct ufs_hba *hba)
159 {
160 	struct ufs_versal2_host *host = ufshcd_get_variant(hba);
161 	int ret;
162 	u32 reg;
163 
164 	/* Bypass RX-AFE offset calibrations (ATT/CTLE) */
165 	ret = ufs_versal2_phy_reg_read(hba, FAST_FLAGS(0), &reg);
166 	if (ret)
167 		return ret;
168 
169 	reg |= MPHY_FAST_RX_AFE_CAL;
170 	ret = ufs_versal2_phy_reg_write(hba, FAST_FLAGS(0), reg);
171 	if (ret)
172 		return ret;
173 
174 	ret = ufs_versal2_phy_reg_read(hba, FAST_FLAGS(1), &reg);
175 	if (ret)
176 		return ret;
177 
178 	reg |= MPHY_FAST_RX_AFE_CAL;
179 	ret = ufs_versal2_phy_reg_write(hba, FAST_FLAGS(1), reg);
180 	if (ret)
181 		return ret;
182 
183 	/* Program ATT and CTLE compensation values */
184 	if (host->attcompval0) {
185 		ret = ufs_versal2_phy_reg_write(hba, RX_AFE_ATT_IDAC(0), host->attcompval0);
186 		if (ret)
187 			return ret;
188 	}
189 
190 	if (host->attcompval1) {
191 		ret = ufs_versal2_phy_reg_write(hba, RX_AFE_ATT_IDAC(1), host->attcompval1);
192 		if (ret)
193 			return ret;
194 	}
195 
196 	if (host->ctlecompval0) {
197 		ret = ufs_versal2_phy_reg_write(hba, RX_AFE_CTLE_IDAC(0), host->ctlecompval0);
198 		if (ret)
199 			return ret;
200 	}
201 
202 	if (host->ctlecompval1) {
203 		ret = ufs_versal2_phy_reg_write(hba, RX_AFE_CTLE_IDAC(1), host->ctlecompval1);
204 		if (ret)
205 			return ret;
206 	}
207 
208 	ret = ufs_versal2_phy_reg_read(hba, FW_CALIB_CCFG(0), &reg);
209 	if (ret)
210 		return ret;
211 
212 	reg |= MPHY_FW_CALIB_CFG_VAL;
213 	ret = ufs_versal2_phy_reg_write(hba, FW_CALIB_CCFG(0), reg);
214 	if (ret)
215 		return ret;
216 
217 	ret = ufs_versal2_phy_reg_read(hba, FW_CALIB_CCFG(1), &reg);
218 	if (ret)
219 		return ret;
220 
221 	reg |= MPHY_FW_CALIB_CFG_VAL;
222 	return ufs_versal2_phy_reg_write(hba, FW_CALIB_CCFG(1), reg);
223 }
224 
ufs_versal2_phy_init(struct ufs_hba * hba)225 static int ufs_versal2_phy_init(struct ufs_hba *hba)
226 {
227 	struct ufs_versal2_host *host = ufshcd_get_variant(hba);
228 	u32 time_left;
229 	bool is_ready;
230 	int ret;
231 	static const struct ufshcd_dme_attr_val rmmi_attrs[] = {
232 		{ UIC_ARG_MIB(CBREFCLKCTRL2), CBREFREFCLK_GATE_OVR_EN, DME_LOCAL },
233 		{ UIC_ARG_MIB(CBCRCTRL), 1, DME_LOCAL },
234 		{ UIC_ARG_MIB(CBC10DIRECTCONF2), 1, DME_LOCAL },
235 		{ UIC_ARG_MIB(VS_MPHYCFGUPDT), 1, DME_LOCAL }
236 	};
237 
238 	/* Wait for Tx/Rx config_rdy */
239 	time_left = TIMEOUT_MICROSEC;
240 	do {
241 		time_left--;
242 		ret = zynqmp_pm_is_mphy_tx_rx_config_ready(&is_ready);
243 		if (ret)
244 			return ret;
245 
246 		if (!is_ready)
247 			break;
248 
249 		usleep_range(1, 5);
250 	} while (time_left);
251 
252 	if (!time_left) {
253 		dev_err(hba->dev, "Tx/Rx configuration signal busy.\n");
254 		return -ETIMEDOUT;
255 	}
256 
257 	ret = ufshcd_dwc_dme_set_attrs(hba, rmmi_attrs, ARRAY_SIZE(rmmi_attrs));
258 	if (ret)
259 		return ret;
260 
261 	ret = reset_control_deassert(host->rstphy);
262 	if (ret) {
263 		dev_err(hba->dev, "ufsphy reset deassert failed, err = %d\n", ret);
264 		return ret;
265 	}
266 
267 	/* Wait for SRAM init done */
268 	time_left = TIMEOUT_MICROSEC;
269 	do {
270 		time_left--;
271 		ret = zynqmp_pm_is_sram_init_done(&is_ready);
272 		if (ret)
273 			return ret;
274 
275 		if (is_ready)
276 			break;
277 
278 		usleep_range(1, 5);
279 	} while (time_left);
280 
281 	if (!time_left) {
282 		dev_err(hba->dev, "SRAM initialization failed.\n");
283 		return -ETIMEDOUT;
284 	}
285 
286 	ret = ufs_versal2_setup_phy(hba);
287 	if (ret)
288 		return ret;
289 
290 	return ufs_versal2_enable_phy(hba);
291 }
292 
ufs_versal2_init(struct ufs_hba * hba)293 static int ufs_versal2_init(struct ufs_hba *hba)
294 {
295 	struct ufs_versal2_host *host;
296 	struct device *dev = hba->dev;
297 	struct ufs_clk_info *clki;
298 	int ret;
299 	u32 cal;
300 
301 	host = devm_kzalloc(dev, sizeof(*host), GFP_KERNEL);
302 	if (!host)
303 		return -ENOMEM;
304 
305 	host->hba = hba;
306 	ufshcd_set_variant(hba, host);
307 
308 	host->phy_mode = UFSHCD_DWC_PHY_MODE_ROM;
309 
310 	list_for_each_entry(clki, &hba->clk_list_head, list) {
311 		if (!strcmp(clki->name, "core"))
312 			host->host_clk = clk_get_rate(clki->clk);
313 	}
314 
315 	host->rstc = devm_reset_control_get_exclusive(dev, "host");
316 	if (IS_ERR(host->rstc)) {
317 		dev_err(dev, "failed to get reset ctrl: host\n");
318 		return PTR_ERR(host->rstc);
319 	}
320 
321 	host->rstphy = devm_reset_control_get_exclusive(dev, "phy");
322 	if (IS_ERR(host->rstphy)) {
323 		dev_err(dev, "failed to get reset ctrl: phy\n");
324 		return PTR_ERR(host->rstphy);
325 	}
326 
327 	ret = reset_control_assert(host->rstc);
328 	if (ret) {
329 		dev_err(hba->dev, "host reset assert failed, err = %d\n", ret);
330 		return ret;
331 	}
332 
333 	ret = reset_control_assert(host->rstphy);
334 	if (ret) {
335 		dev_err(hba->dev, "phy reset assert failed, err = %d\n", ret);
336 		return ret;
337 	}
338 
339 	ret = zynqmp_pm_set_sram_bypass();
340 	if (ret) {
341 		dev_err(dev, "Bypass SRAM interface failed, err = %d\n", ret);
342 		return ret;
343 	}
344 
345 	ret = reset_control_deassert(host->rstc);
346 	if (ret)
347 		dev_err(hba->dev, "host reset deassert failed, err = %d\n", ret);
348 
349 	ret = zynqmp_pm_get_ufs_calibration_values(&cal);
350 	if (ret) {
351 		dev_err(dev, "failed to read calibration values\n");
352 		return ret;
353 	}
354 
355 	host->attcompval0 = (u8)cal;
356 	host->attcompval1 = (u8)(cal >> 8);
357 	host->ctlecompval0 = (u8)(cal >> 16);
358 	host->ctlecompval1 = (u8)(cal >> 24);
359 
360 	hba->quirks |= UFSHCD_QUIRK_SKIP_DEF_UNIPRO_TIMEOUT_SETTING;
361 
362 	return 0;
363 }
364 
ufs_versal2_hce_enable_notify(struct ufs_hba * hba,enum ufs_notify_change_status status)365 static int ufs_versal2_hce_enable_notify(struct ufs_hba *hba,
366 					 enum ufs_notify_change_status status)
367 {
368 	int ret = 0;
369 
370 	if (status == PRE_CHANGE) {
371 		ret = ufs_versal2_phy_init(hba);
372 		if (ret)
373 			dev_err(hba->dev, "Phy init failed (%d)\n", ret);
374 	}
375 
376 	return ret;
377 }
378 
ufs_versal2_link_startup_notify(struct ufs_hba * hba,enum ufs_notify_change_status status)379 static int ufs_versal2_link_startup_notify(struct ufs_hba *hba,
380 					   enum ufs_notify_change_status status)
381 {
382 	struct ufs_versal2_host *host = ufshcd_get_variant(hba);
383 	int ret = 0;
384 
385 	switch (status) {
386 	case PRE_CHANGE:
387 		if (host->host_clk)
388 			ufshcd_writel(hba, host->host_clk / 1000000, DWC_UFS_REG_HCLKDIV);
389 
390 		break;
391 	case POST_CHANGE:
392 		ret = ufshcd_dwc_link_startup_notify(hba, status);
393 		break;
394 	default:
395 		ret = -EINVAL;
396 		break;
397 	}
398 
399 	return ret;
400 }
401 
ufs_versal2_phy_ratesel(struct ufs_hba * hba,u32 activelanes,u32 rx_req)402 static int ufs_versal2_phy_ratesel(struct ufs_hba *hba, u32 activelanes, u32 rx_req)
403 {
404 	u32 time_left, reg, lane;
405 	int ret;
406 
407 	for (lane = 0; lane < activelanes; lane++) {
408 		time_left = TIMEOUT_MICROSEC;
409 		ret = ufs_versal2_phy_reg_read(hba, RX_OVRD_IN_1(lane), &reg);
410 		if (ret)
411 			return ret;
412 
413 		reg |= MPHY_RX_OVRD_EN;
414 		if (rx_req)
415 			reg |= MPHY_RX_OVRD_VAL;
416 		else
417 			reg &= ~MPHY_RX_OVRD_VAL;
418 
419 		ret = ufs_versal2_phy_reg_write(hba, RX_OVRD_IN_1(lane), reg);
420 		if (ret)
421 			return ret;
422 
423 		do {
424 			ret = ufs_versal2_phy_reg_read(hba, RX_PCS_OUT(lane), &reg);
425 			if (ret)
426 				return ret;
427 
428 			reg &= MPHY_RX_ACK_MASK;
429 			if (reg == rx_req)
430 				break;
431 
432 			time_left--;
433 			usleep_range(1, 5);
434 		} while (time_left);
435 
436 		if (!time_left) {
437 			dev_err(hba->dev, "Invalid Rx Ack value.\n");
438 			return -ETIMEDOUT;
439 		}
440 	}
441 
442 	return 0;
443 }
444 
ufs_versal2_pwr_change_notify(struct ufs_hba * hba,enum ufs_notify_change_status status,const struct ufs_pa_layer_attr * dev_max_params,struct ufs_pa_layer_attr * dev_req_params)445 static int ufs_versal2_pwr_change_notify(struct ufs_hba *hba, enum ufs_notify_change_status status,
446 					 const struct ufs_pa_layer_attr *dev_max_params,
447 					 struct ufs_pa_layer_attr *dev_req_params)
448 {
449 	struct ufs_versal2_host *host = ufshcd_get_variant(hba);
450 	u32 lane, reg, rate = 0;
451 	int ret = 0;
452 
453 	if (status == PRE_CHANGE) {
454 		memcpy(dev_req_params, dev_max_params, sizeof(struct ufs_pa_layer_attr));
455 
456 		/* If it is not a calibrated part, switch PWRMODE to SLOW_MODE */
457 		if (!host->attcompval0 && !host->attcompval1 && !host->ctlecompval0 &&
458 		    !host->ctlecompval1) {
459 			dev_req_params->pwr_rx = SLOW_MODE;
460 			dev_req_params->pwr_tx = SLOW_MODE;
461 			return 0;
462 		}
463 
464 		if (dev_req_params->pwr_rx == SLOW_MODE || dev_req_params->pwr_rx == SLOWAUTO_MODE)
465 			return 0;
466 
467 		if (dev_req_params->hs_rate == PA_HS_MODE_B)
468 			rate = 1;
469 
470 		 /* Select the rate */
471 		ret = ufshcd_dme_set(hba, UIC_ARG_MIB(CBRATESEL), rate);
472 		if (ret)
473 			return ret;
474 
475 		ret = ufshcd_dme_set(hba, UIC_ARG_MIB(VS_MPHYCFGUPDT), 1);
476 		if (ret)
477 			return ret;
478 
479 		ret = ufs_versal2_phy_ratesel(hba, dev_req_params->lane_tx, 1);
480 		if (ret)
481 			return ret;
482 
483 		ret = ufs_versal2_phy_ratesel(hba, dev_req_params->lane_tx, 0);
484 		if (ret)
485 			return ret;
486 
487 		/* Remove rx_req override */
488 		for (lane = 0; lane < dev_req_params->lane_tx; lane++) {
489 			ret = ufs_versal2_phy_reg_read(hba, RX_OVRD_IN_1(lane), &reg);
490 			if (ret)
491 				return ret;
492 
493 			reg &= ~MPHY_RX_OVRD_EN;
494 			ret = ufs_versal2_phy_reg_write(hba, RX_OVRD_IN_1(lane), reg);
495 			if (ret)
496 				return ret;
497 		}
498 
499 		if (dev_req_params->lane_tx == UFS_LANE_2 && dev_req_params->lane_rx == UFS_LANE_2)
500 			ret = ufshcd_dme_configure_adapt(hba, dev_req_params->gear_tx,
501 							 PA_INITIAL_ADAPT);
502 	}
503 
504 	return ret;
505 }
506 
507 static struct ufs_hba_variant_ops ufs_versal2_hba_vops = {
508 	.name			= "ufs-versal2-pltfm",
509 	.init			= ufs_versal2_init,
510 	.link_startup_notify	= ufs_versal2_link_startup_notify,
511 	.hce_enable_notify	= ufs_versal2_hce_enable_notify,
512 	.pwr_change_notify	= ufs_versal2_pwr_change_notify,
513 };
514 
515 static const struct of_device_id ufs_versal2_pltfm_match[] = {
516 	{
517 		.compatible = "amd,versal2-ufs",
518 		.data = &ufs_versal2_hba_vops,
519 	},
520 	{ },
521 };
522 MODULE_DEVICE_TABLE(of, ufs_versal2_pltfm_match);
523 
ufs_versal2_probe(struct platform_device * pdev)524 static int ufs_versal2_probe(struct platform_device *pdev)
525 {
526 	struct device *dev = &pdev->dev;
527 	int ret;
528 
529 	/* Perform generic probe */
530 	ret = ufshcd_pltfrm_init(pdev, &ufs_versal2_hba_vops);
531 	if (ret)
532 		dev_err(dev, "ufshcd_pltfrm_init() failed %d\n", ret);
533 
534 	return ret;
535 }
536 
ufs_versal2_remove(struct platform_device * pdev)537 static void ufs_versal2_remove(struct platform_device *pdev)
538 {
539 	struct ufs_hba *hba = platform_get_drvdata(pdev);
540 
541 	pm_runtime_get_sync(&(pdev)->dev);
542 	ufshcd_remove(hba);
543 }
544 
545 static const struct dev_pm_ops ufs_versal2_pm_ops = {
546 	SET_SYSTEM_SLEEP_PM_OPS(ufshcd_system_suspend, ufshcd_system_resume)
547 	SET_RUNTIME_PM_OPS(ufshcd_runtime_suspend, ufshcd_runtime_resume, NULL)
548 };
549 
550 static struct platform_driver ufs_versal2_pltfm = {
551 	.probe		= ufs_versal2_probe,
552 	.remove		= ufs_versal2_remove,
553 	.driver		= {
554 		.name	= "ufshcd-versal2",
555 		.pm	= &ufs_versal2_pm_ops,
556 		.of_match_table	= of_match_ptr(ufs_versal2_pltfm_match),
557 	},
558 };
559 
560 module_platform_driver(ufs_versal2_pltfm);
561 
562 MODULE_AUTHOR("Sai Krishna Potthuri <sai.krishna.potthuri@amd.com>");
563 MODULE_DESCRIPTION("AMD Versal Gen 2 UFS Host Controller driver");
564 MODULE_LICENSE("GPL");
565