xref: /linux/drivers/phy/xilinx/phy-zynqmp.c (revision b8e4b0529d59a3ccd0b25a31d3cfc8b0f3b34068)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * phy-zynqmp.c - PHY driver for Xilinx ZynqMP GT.
4  *
5  * Copyright (C) 2018-2020 Xilinx Inc.
6  *
7  * Author: Anurag Kumar Vulisha <anuragku@xilinx.com>
8  * Author: Subbaraya Sundeep <sundeep.lkml@gmail.com>
9  * Author: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
10  *
11  * This driver is tested for USB, SGMII, SATA and Display Port currently.
12  * PCIe should also work but that is experimental as of now.
13  */
14 
15 #include <linux/clk.h>
16 #include <linux/debugfs.h>
17 #include <linux/delay.h>
18 #include <linux/io.h>
19 #include <linux/kernel.h>
20 #include <linux/module.h>
21 #include <linux/of.h>
22 #include <linux/phy/phy.h>
23 #include <linux/platform_device.h>
24 #include <linux/pm_runtime.h>
25 #include <linux/slab.h>
26 
27 #include <dt-bindings/phy/phy.h>
28 
29 /*
30  * Lane Registers
31  */
32 
33 /* TX De-emphasis parameters */
34 #define L0_TX_ANA_TM_18			0x0048
35 #define L0_TX_ANA_TM_118		0x01d8
36 #define L0_TX_ANA_TM_118_FORCE_17_0	BIT(0)
37 
38 /* DN Resistor calibration code parameters */
39 #define L0_TXPMA_ST_3			0x0b0c
40 #define L0_DN_CALIB_CODE		0x3f
41 
42 /* PMA control parameters */
43 #define L0_TXPMD_TM_45			0x0cb4
44 #define L0_TXPMD_TM_48			0x0cc0
45 #define L0_TXPMD_TM_45_OVER_DP_MAIN	BIT(0)
46 #define L0_TXPMD_TM_45_ENABLE_DP_MAIN	BIT(1)
47 #define L0_TXPMD_TM_45_OVER_DP_POST1	BIT(2)
48 #define L0_TXPMD_TM_45_ENABLE_DP_POST1	BIT(3)
49 #define L0_TXPMD_TM_45_OVER_DP_POST2	BIT(4)
50 #define L0_TXPMD_TM_45_ENABLE_DP_POST2	BIT(5)
51 
52 /* PCS control parameters */
53 #define L0_TM_DIG_6			0x106c
54 #define L0_TM_DIS_DESCRAMBLE_DECODER	0x0f
55 #define L0_TX_DIG_61			0x00f4
56 #define L0_TM_DISABLE_SCRAMBLE_ENCODER	0x0f
57 
58 /* PLL Test Mode register parameters */
59 #define L0_TM_PLL_DIG_37		0x2094
60 #define L0_TM_COARSE_CODE_LIMIT		0x10
61 
62 /* PLL SSC step size offsets */
63 #define L0_PLL_SS_STEPS_0_LSB		0x2368
64 #define L0_PLL_SS_STEPS_1_MSB		0x236c
65 #define L0_PLL_SS_STEP_SIZE_0_LSB	0x2370
66 #define L0_PLL_SS_STEP_SIZE_1		0x2374
67 #define L0_PLL_SS_STEP_SIZE_2		0x2378
68 #define L0_PLL_SS_STEP_SIZE_3_MSB	0x237c
69 #define L0_PLL_STATUS_READ_1		0x23e4
70 
71 /* SSC step size parameters */
72 #define STEP_SIZE_0_MASK		0xff
73 #define STEP_SIZE_1_MASK		0xff
74 #define STEP_SIZE_2_MASK		0xff
75 #define STEP_SIZE_3_MASK		0x3
76 #define STEP_SIZE_SHIFT			8
77 #define FORCE_STEP_SIZE			0x10
78 #define FORCE_STEPS			0x20
79 #define STEPS_0_MASK			0xff
80 #define STEPS_1_MASK			0x07
81 
82 /* Reference clock selection parameters */
83 #define L0_Ln_REF_CLK_SEL(n)		(0x2860 + (n) * 4)
84 #define L0_REF_CLK_LCL_SEL		BIT(7)
85 #define L0_REF_CLK_SEL_MASK		0x9f
86 
87 /* Calibration digital logic parameters */
88 #define L3_TM_CALIB_DIG19		0xec4c
89 #define L3_CALIB_DONE_STATUS		0xef14
90 #define L3_TM_CALIB_DIG18		0xec48
91 #define L3_TM_CALIB_DIG19_NSW		0x07
92 #define L3_TM_CALIB_DIG18_NSW		0xe0
93 #define L3_TM_OVERRIDE_NSW_CODE         0x20
94 #define L3_CALIB_DONE			0x02
95 #define L3_NSW_SHIFT			5
96 #define L3_NSW_PIPE_SHIFT		4
97 #define L3_NSW_CALIB_SHIFT		3
98 
99 #define PHY_REG_OFFSET			0x4000
100 
101 /*
102  * Global Registers
103  */
104 
105 /* Refclk selection parameters */
106 #define PLL_REF_SEL(n)			(0x10000 + (n) * 4)
107 #define PLL_FREQ_MASK			0x1f
108 #define PLL_STATUS_LOCKED		0x10
109 
110 /* Inter Connect Matrix parameters */
111 #define ICM_CFG0			0x10010
112 #define ICM_CFG1			0x10014
113 #define ICM_CFG0_L0_MASK		0x07
114 #define ICM_CFG0_L1_MASK		0x70
115 #define ICM_CFG1_L2_MASK		0x07
116 #define ICM_CFG2_L3_MASK		0x70
117 #define ICM_CFG_SHIFT			4
118 
119 /* Inter Connect Matrix allowed protocols */
120 #define ICM_PROTOCOL_PD			0x0
121 #define ICM_PROTOCOL_PCIE		0x1
122 #define ICM_PROTOCOL_SATA		0x2
123 #define ICM_PROTOCOL_USB		0x3
124 #define ICM_PROTOCOL_DP			0x4
125 #define ICM_PROTOCOL_SGMII		0x5
126 
127 static const char *const xpsgtr_icm_str[] = {
128 	[ICM_PROTOCOL_PD] = "none",
129 	[ICM_PROTOCOL_PCIE] = "PCIe",
130 	[ICM_PROTOCOL_SATA] = "SATA",
131 	[ICM_PROTOCOL_USB] = "USB",
132 	[ICM_PROTOCOL_DP] = "DisplayPort",
133 	[ICM_PROTOCOL_SGMII] = "SGMII",
134 };
135 
136 /* Test Mode common reset control  parameters */
137 #define TM_CMN_RST			0x10018
138 #define TM_CMN_RST_EN			0x1
139 #define TM_CMN_RST_SET			0x2
140 #define TM_CMN_RST_MASK			0x3
141 
142 /* Bus width parameters */
143 #define TX_PROT_BUS_WIDTH		0x10040
144 #define RX_PROT_BUS_WIDTH		0x10044
145 #define PROT_BUS_WIDTH_10		0x0
146 #define PROT_BUS_WIDTH_20		0x1
147 #define PROT_BUS_WIDTH_40		0x2
148 #define PROT_BUS_WIDTH_SHIFT(n)		((n) * 2)
149 #define PROT_BUS_WIDTH_MASK(n)		GENMASK((n) * 2 + 1, (n) * 2)
150 
151 /* Number of GT lanes */
152 #define NUM_LANES			4
153 
154 /* SIOU SATA control register */
155 #define SATA_CONTROL_OFFSET		0x0100
156 
157 /* Total number of controllers */
158 #define CONTROLLERS_PER_LANE		5
159 
160 /* Timeout values */
161 #define TIMEOUT_US			1000
162 
163 struct xpsgtr_dev;
164 
165 /**
166  * struct xpsgtr_ssc - structure to hold SSC settings for a lane
167  * @refclk_rate: PLL reference clock frequency
168  * @pll_ref_clk: value to be written to register for corresponding ref clk rate
169  * @steps: number of steps of SSC (Spread Spectrum Clock)
170  * @step_size: step size of each step
171  */
172 struct xpsgtr_ssc {
173 	u32 refclk_rate;
174 	u8  pll_ref_clk;
175 	u32 steps;
176 	u32 step_size;
177 };
178 
179 /**
180  * struct xpsgtr_phy - representation of a lane
181  * @phy: pointer to the kernel PHY device
182  * @instance: instance of the protocol type (such as the lane within a
183  *            protocol, or the USB/Ethernet controller)
184  * @lane: lane number
185  * @protocol: protocol in which the lane operates
186  * @skip_phy_init: skip phy_init() if true
187  * @dev: pointer to the xpsgtr_dev instance
188  * @refclk: reference clock index
189  */
190 struct xpsgtr_phy {
191 	struct phy *phy;
192 	u8 instance;
193 	u8 lane;
194 	u8 protocol;
195 	bool skip_phy_init;
196 	struct xpsgtr_dev *dev;
197 	unsigned int refclk;
198 };
199 
200 /**
201  * struct xpsgtr_dev - representation of a ZynMP GT device
202  * @dev: pointer to device
203  * @serdes: serdes base address
204  * @siou: siou base address
205  * @gtr_mutex: mutex for locking
206  * @phys: PHY lanes
207  * @refclk_sscs: spread spectrum settings for the reference clocks
208  * @clk: reference clocks
209  * @tx_term_fix: fix for GT issue
210  * @saved_icm_cfg0: stored value of ICM CFG0 register
211  * @saved_icm_cfg1: stored value of ICM CFG1 register
212  */
213 struct xpsgtr_dev {
214 	struct device *dev;
215 	void __iomem *serdes;
216 	void __iomem *siou;
217 	struct mutex gtr_mutex; /* mutex for locking */
218 	struct xpsgtr_phy phys[NUM_LANES];
219 	const struct xpsgtr_ssc *refclk_sscs[NUM_LANES];
220 	struct clk *clk[NUM_LANES];
221 	bool tx_term_fix;
222 	unsigned int saved_icm_cfg0;
223 	unsigned int saved_icm_cfg1;
224 };
225 
226 /*
227  * Configuration Data
228  */
229 
230 /* lookup table to hold all settings needed for a ref clock frequency */
231 static const struct xpsgtr_ssc ssc_lookup[] = {
232 	{  19200000, 0x05,  608, 264020 },
233 	{  20000000, 0x06,  634, 243454 },
234 	{  24000000, 0x07,  760, 168973 },
235 	{  26000000, 0x08,  824, 143860 },
236 	{  27000000, 0x09,  856,  86551 },
237 	{  38400000, 0x0a, 1218,  65896 },
238 	{  40000000, 0x0b,  634, 243454 },
239 	{  52000000, 0x0c,  824, 143860 },
240 	{ 100000000, 0x0d, 1058,  87533 },
241 	{ 108000000, 0x0e,  856,  86551 },
242 	{ 125000000, 0x0f,  992, 119497 },
243 	{ 135000000, 0x10, 1070,  55393 },
244 	{ 150000000, 0x11,  792, 187091 }
245 };
246 
247 /*
248  * I/O Accessors
249  */
250 
251 static inline u32 xpsgtr_read(struct xpsgtr_dev *gtr_dev, u32 reg)
252 {
253 	return readl(gtr_dev->serdes + reg);
254 }
255 
256 static inline void xpsgtr_write(struct xpsgtr_dev *gtr_dev, u32 reg, u32 value)
257 {
258 	writel(value, gtr_dev->serdes + reg);
259 }
260 
261 static inline void xpsgtr_clr_set(struct xpsgtr_dev *gtr_dev, u32 reg,
262 				  u32 clr, u32 set)
263 {
264 	u32 value = xpsgtr_read(gtr_dev, reg);
265 
266 	value &= ~clr;
267 	value |= set;
268 	xpsgtr_write(gtr_dev, reg, value);
269 }
270 
271 static inline u32 xpsgtr_read_phy(struct xpsgtr_phy *gtr_phy, u32 reg)
272 {
273 	void __iomem *addr = gtr_phy->dev->serdes
274 			   + gtr_phy->lane * PHY_REG_OFFSET + reg;
275 
276 	return readl(addr);
277 }
278 
279 static inline void xpsgtr_write_phy(struct xpsgtr_phy *gtr_phy,
280 				    u32 reg, u32 value)
281 {
282 	void __iomem *addr = gtr_phy->dev->serdes
283 			   + gtr_phy->lane * PHY_REG_OFFSET + reg;
284 
285 	writel(value, addr);
286 }
287 
288 static inline void xpsgtr_clr_set_phy(struct xpsgtr_phy *gtr_phy,
289 				      u32 reg, u32 clr, u32 set)
290 {
291 	void __iomem *addr = gtr_phy->dev->serdes
292 			   + gtr_phy->lane * PHY_REG_OFFSET + reg;
293 
294 	writel((readl(addr) & ~clr) | set, addr);
295 }
296 
297 /*
298  * Hardware Configuration
299  */
300 
301 /* Wait for the PLL to lock (with a timeout). */
302 static int xpsgtr_wait_pll_lock(struct phy *phy)
303 {
304 	struct xpsgtr_phy *gtr_phy = phy_get_drvdata(phy);
305 	struct xpsgtr_dev *gtr_dev = gtr_phy->dev;
306 	unsigned int timeout = TIMEOUT_US;
307 	u8 protocol = gtr_phy->protocol;
308 	int ret;
309 
310 	dev_dbg(gtr_dev->dev, "Waiting for PLL lock\n");
311 
312 	/*
313 	 * For DP and PCIe, only the instance 0 PLL is used. Switch to that phy
314 	 * so we wait on the right PLL.
315 	 */
316 	if ((protocol == ICM_PROTOCOL_DP || protocol == ICM_PROTOCOL_PCIE) &&
317 	    gtr_phy->instance) {
318 		int i;
319 
320 		for (i = 0; i < NUM_LANES; i++) {
321 			gtr_phy = &gtr_dev->phys[i];
322 
323 			if (gtr_phy->protocol == protocol && !gtr_phy->instance)
324 				goto got_phy;
325 		}
326 
327 		return -EBUSY;
328 	}
329 
330 got_phy:
331 	while (1) {
332 		u32 reg = xpsgtr_read_phy(gtr_phy, L0_PLL_STATUS_READ_1);
333 
334 		if ((reg & PLL_STATUS_LOCKED) == PLL_STATUS_LOCKED) {
335 			ret = 0;
336 			break;
337 		}
338 
339 		if (--timeout == 0) {
340 			ret = -ETIMEDOUT;
341 			break;
342 		}
343 
344 		udelay(1);
345 	}
346 
347 	if (ret == -ETIMEDOUT)
348 		dev_err(gtr_dev->dev,
349 			"lane %u (protocol %u, instance %u): PLL lock timeout\n",
350 			gtr_phy->lane, gtr_phy->protocol, gtr_phy->instance);
351 
352 	return ret;
353 }
354 
355 /* Configure PLL and spread-sprectrum clock. */
356 static void xpsgtr_configure_pll(struct xpsgtr_phy *gtr_phy)
357 {
358 	const struct xpsgtr_ssc *ssc;
359 	u32 step_size;
360 
361 	ssc = gtr_phy->dev->refclk_sscs[gtr_phy->refclk];
362 	step_size = ssc->step_size;
363 
364 	xpsgtr_clr_set(gtr_phy->dev, PLL_REF_SEL(gtr_phy->lane),
365 		       PLL_FREQ_MASK, ssc->pll_ref_clk);
366 
367 	/* Enable lane clock sharing, if required */
368 	if (gtr_phy->refclk == gtr_phy->lane)
369 		xpsgtr_clr_set(gtr_phy->dev, L0_Ln_REF_CLK_SEL(gtr_phy->lane),
370 			       L0_REF_CLK_SEL_MASK, L0_REF_CLK_LCL_SEL);
371 	else
372 		xpsgtr_clr_set(gtr_phy->dev, L0_Ln_REF_CLK_SEL(gtr_phy->lane),
373 			       L0_REF_CLK_SEL_MASK, 1 << gtr_phy->refclk);
374 
375 	/* SSC step size [7:0] */
376 	xpsgtr_clr_set_phy(gtr_phy, L0_PLL_SS_STEP_SIZE_0_LSB,
377 			   STEP_SIZE_0_MASK, step_size & STEP_SIZE_0_MASK);
378 
379 	/* SSC step size [15:8] */
380 	step_size >>= STEP_SIZE_SHIFT;
381 	xpsgtr_clr_set_phy(gtr_phy, L0_PLL_SS_STEP_SIZE_1,
382 			   STEP_SIZE_1_MASK, step_size & STEP_SIZE_1_MASK);
383 
384 	/* SSC step size [23:16] */
385 	step_size >>= STEP_SIZE_SHIFT;
386 	xpsgtr_clr_set_phy(gtr_phy, L0_PLL_SS_STEP_SIZE_2,
387 			   STEP_SIZE_2_MASK, step_size & STEP_SIZE_2_MASK);
388 
389 	/* SSC steps [7:0] */
390 	xpsgtr_clr_set_phy(gtr_phy, L0_PLL_SS_STEPS_0_LSB,
391 			   STEPS_0_MASK, ssc->steps & STEPS_0_MASK);
392 
393 	/* SSC steps [10:8] */
394 	xpsgtr_clr_set_phy(gtr_phy, L0_PLL_SS_STEPS_1_MSB,
395 			   STEPS_1_MASK,
396 			   (ssc->steps >> STEP_SIZE_SHIFT) & STEPS_1_MASK);
397 
398 	/* SSC step size [24:25] */
399 	step_size >>= STEP_SIZE_SHIFT;
400 	xpsgtr_clr_set_phy(gtr_phy, L0_PLL_SS_STEP_SIZE_3_MSB,
401 			   STEP_SIZE_3_MASK, (step_size & STEP_SIZE_3_MASK) |
402 			   FORCE_STEP_SIZE | FORCE_STEPS);
403 }
404 
405 /* Configure the lane protocol. */
406 static void xpsgtr_lane_set_protocol(struct xpsgtr_phy *gtr_phy)
407 {
408 	struct xpsgtr_dev *gtr_dev = gtr_phy->dev;
409 	u8 protocol = gtr_phy->protocol;
410 
411 	switch (gtr_phy->lane) {
412 	case 0:
413 		xpsgtr_clr_set(gtr_dev, ICM_CFG0, ICM_CFG0_L0_MASK, protocol);
414 		break;
415 	case 1:
416 		xpsgtr_clr_set(gtr_dev, ICM_CFG0, ICM_CFG0_L1_MASK,
417 			       protocol << ICM_CFG_SHIFT);
418 		break;
419 	case 2:
420 		xpsgtr_clr_set(gtr_dev, ICM_CFG1, ICM_CFG0_L0_MASK, protocol);
421 		break;
422 	case 3:
423 		xpsgtr_clr_set(gtr_dev, ICM_CFG1, ICM_CFG0_L1_MASK,
424 			       protocol << ICM_CFG_SHIFT);
425 		break;
426 	default:
427 		/* We already checked 0 <= lane <= 3 */
428 		break;
429 	}
430 }
431 
432 /* Bypass (de)scrambler and 8b/10b decoder and encoder. */
433 static void xpsgtr_bypass_scrambler_8b10b(struct xpsgtr_phy *gtr_phy)
434 {
435 	xpsgtr_write_phy(gtr_phy, L0_TM_DIG_6, L0_TM_DIS_DESCRAMBLE_DECODER);
436 	xpsgtr_write_phy(gtr_phy, L0_TX_DIG_61, L0_TM_DISABLE_SCRAMBLE_ENCODER);
437 }
438 
439 /* DP-specific initialization. */
440 static void xpsgtr_phy_init_dp(struct xpsgtr_phy *gtr_phy)
441 {
442 	xpsgtr_write_phy(gtr_phy, L0_TXPMD_TM_45,
443 			 L0_TXPMD_TM_45_OVER_DP_MAIN |
444 			 L0_TXPMD_TM_45_ENABLE_DP_MAIN |
445 			 L0_TXPMD_TM_45_OVER_DP_POST1 |
446 			 L0_TXPMD_TM_45_OVER_DP_POST2 |
447 			 L0_TXPMD_TM_45_ENABLE_DP_POST2);
448 	xpsgtr_write_phy(gtr_phy, L0_TX_ANA_TM_118,
449 			 L0_TX_ANA_TM_118_FORCE_17_0);
450 }
451 
452 /* SATA-specific initialization. */
453 static void xpsgtr_phy_init_sata(struct xpsgtr_phy *gtr_phy)
454 {
455 	struct xpsgtr_dev *gtr_dev = gtr_phy->dev;
456 
457 	xpsgtr_bypass_scrambler_8b10b(gtr_phy);
458 
459 	writel(gtr_phy->lane, gtr_dev->siou + SATA_CONTROL_OFFSET);
460 }
461 
462 /* SGMII-specific initialization. */
463 static void xpsgtr_phy_init_sgmii(struct xpsgtr_phy *gtr_phy)
464 {
465 	struct xpsgtr_dev *gtr_dev = gtr_phy->dev;
466 	u32 mask = PROT_BUS_WIDTH_MASK(gtr_phy->lane);
467 	u32 val = PROT_BUS_WIDTH_10 << PROT_BUS_WIDTH_SHIFT(gtr_phy->lane);
468 
469 	/* Set SGMII protocol TX and RX bus width to 10 bits. */
470 	xpsgtr_clr_set(gtr_dev, TX_PROT_BUS_WIDTH, mask, val);
471 	xpsgtr_clr_set(gtr_dev, RX_PROT_BUS_WIDTH, mask, val);
472 
473 	xpsgtr_bypass_scrambler_8b10b(gtr_phy);
474 }
475 
476 /* Configure TX de-emphasis and margining for DP. */
477 static void xpsgtr_phy_configure_dp(struct xpsgtr_phy *gtr_phy, unsigned int pre,
478 				    unsigned int voltage)
479 {
480 	static const u8 voltage_swing[4][4] = {
481 		{ 0x2a, 0x27, 0x24, 0x20 },
482 		{ 0x27, 0x23, 0x20, 0xff },
483 		{ 0x24, 0x20, 0xff, 0xff },
484 		{ 0xff, 0xff, 0xff, 0xff }
485 	};
486 	static const u8 pre_emphasis[4][4] = {
487 		{ 0x02, 0x02, 0x02, 0x02 },
488 		{ 0x01, 0x01, 0x01, 0xff },
489 		{ 0x00, 0x00, 0xff, 0xff },
490 		{ 0xff, 0xff, 0xff, 0xff }
491 	};
492 
493 	xpsgtr_write_phy(gtr_phy, L0_TXPMD_TM_48, voltage_swing[pre][voltage]);
494 	xpsgtr_write_phy(gtr_phy, L0_TX_ANA_TM_18, pre_emphasis[pre][voltage]);
495 }
496 
497 /*
498  * PHY Operations
499  */
500 
501 static bool xpsgtr_phy_init_required(struct xpsgtr_phy *gtr_phy)
502 {
503 	/*
504 	 * As USB may save the snapshot of the states during hibernation, doing
505 	 * phy_init() will put the USB controller into reset, resulting in the
506 	 * losing of the saved snapshot. So try to avoid phy_init() for USB
507 	 * except when gtr_phy->skip_phy_init is false (this happens when FPD is
508 	 * shutdown during suspend or when gt lane is changed from current one)
509 	 */
510 	if (gtr_phy->protocol == ICM_PROTOCOL_USB && gtr_phy->skip_phy_init)
511 		return false;
512 	else
513 		return true;
514 }
515 
516 /*
517  * There is a functional issue in the GT. The TX termination resistance can be
518  * out of spec due to a issue in the calibration logic. This is the workaround
519  * to fix it, required for XCZU9EG silicon.
520  */
521 static int xpsgtr_phy_tx_term_fix(struct xpsgtr_phy *gtr_phy)
522 {
523 	struct xpsgtr_dev *gtr_dev = gtr_phy->dev;
524 	u32 timeout = TIMEOUT_US;
525 	u32 nsw;
526 
527 	/* Enabling Test Mode control for CMN Rest */
528 	xpsgtr_clr_set(gtr_dev, TM_CMN_RST, TM_CMN_RST_MASK, TM_CMN_RST_SET);
529 
530 	/* Set Test Mode reset */
531 	xpsgtr_clr_set(gtr_dev, TM_CMN_RST, TM_CMN_RST_MASK, TM_CMN_RST_EN);
532 
533 	xpsgtr_write(gtr_dev, L3_TM_CALIB_DIG18, 0x00);
534 	xpsgtr_write(gtr_dev, L3_TM_CALIB_DIG19, L3_TM_OVERRIDE_NSW_CODE);
535 
536 	/*
537 	 * As a part of work around sequence for PMOS calibration fix,
538 	 * we need to configure any lane ICM_CFG to valid protocol. This
539 	 * will deassert the CMN_Resetn signal.
540 	 */
541 	xpsgtr_lane_set_protocol(gtr_phy);
542 
543 	/* Clear Test Mode reset */
544 	xpsgtr_clr_set(gtr_dev, TM_CMN_RST, TM_CMN_RST_MASK, TM_CMN_RST_SET);
545 
546 	dev_dbg(gtr_dev->dev, "calibrating...\n");
547 
548 	do {
549 		u32 reg = xpsgtr_read(gtr_dev, L3_CALIB_DONE_STATUS);
550 
551 		if ((reg & L3_CALIB_DONE) == L3_CALIB_DONE)
552 			break;
553 
554 		if (!--timeout) {
555 			dev_err(gtr_dev->dev, "calibration time out\n");
556 			return -ETIMEDOUT;
557 		}
558 
559 		udelay(1);
560 	} while (timeout > 0);
561 
562 	dev_dbg(gtr_dev->dev, "calibration done\n");
563 
564 	/* Reading NMOS Register Code */
565 	nsw = xpsgtr_read(gtr_dev, L0_TXPMA_ST_3) & L0_DN_CALIB_CODE;
566 
567 	/* Set Test Mode reset */
568 	xpsgtr_clr_set(gtr_dev, TM_CMN_RST, TM_CMN_RST_MASK, TM_CMN_RST_EN);
569 
570 	/* Writing NMOS register values back [5:3] */
571 	xpsgtr_write(gtr_dev, L3_TM_CALIB_DIG19, nsw >> L3_NSW_CALIB_SHIFT);
572 
573 	/* Writing NMOS register value [2:0] */
574 	xpsgtr_write(gtr_dev, L3_TM_CALIB_DIG18,
575 		     ((nsw & L3_TM_CALIB_DIG19_NSW) << L3_NSW_SHIFT) |
576 		     (1 << L3_NSW_PIPE_SHIFT));
577 
578 	/* Clear Test Mode reset */
579 	xpsgtr_clr_set(gtr_dev, TM_CMN_RST, TM_CMN_RST_MASK, TM_CMN_RST_SET);
580 
581 	return 0;
582 }
583 
584 static int xpsgtr_phy_init(struct phy *phy)
585 {
586 	struct xpsgtr_phy *gtr_phy = phy_get_drvdata(phy);
587 	struct xpsgtr_dev *gtr_dev = gtr_phy->dev;
588 	int ret = 0;
589 
590 	mutex_lock(&gtr_dev->gtr_mutex);
591 
592 	/* Configure and enable the clock when peripheral phy_init call */
593 	if (clk_prepare_enable(gtr_dev->clk[gtr_phy->refclk]))
594 		goto out;
595 
596 	/* Skip initialization if not required. */
597 	if (!xpsgtr_phy_init_required(gtr_phy))
598 		goto out;
599 
600 	if (gtr_dev->tx_term_fix) {
601 		ret = xpsgtr_phy_tx_term_fix(gtr_phy);
602 		if (ret < 0)
603 			goto out;
604 
605 		gtr_dev->tx_term_fix = false;
606 	}
607 
608 	/* Enable coarse code saturation limiting logic. */
609 	xpsgtr_write_phy(gtr_phy, L0_TM_PLL_DIG_37, L0_TM_COARSE_CODE_LIMIT);
610 
611 	/*
612 	 * Configure the PLL, the lane protocol, and perform protocol-specific
613 	 * initialization.
614 	 */
615 	xpsgtr_configure_pll(gtr_phy);
616 	xpsgtr_lane_set_protocol(gtr_phy);
617 
618 	switch (gtr_phy->protocol) {
619 	case ICM_PROTOCOL_DP:
620 		xpsgtr_phy_init_dp(gtr_phy);
621 		break;
622 
623 	case ICM_PROTOCOL_SATA:
624 		xpsgtr_phy_init_sata(gtr_phy);
625 		break;
626 
627 	case ICM_PROTOCOL_SGMII:
628 		xpsgtr_phy_init_sgmii(gtr_phy);
629 		break;
630 	}
631 
632 out:
633 	mutex_unlock(&gtr_dev->gtr_mutex);
634 	return ret;
635 }
636 
637 static int xpsgtr_phy_exit(struct phy *phy)
638 {
639 	struct xpsgtr_phy *gtr_phy = phy_get_drvdata(phy);
640 	struct xpsgtr_dev *gtr_dev = gtr_phy->dev;
641 
642 	gtr_phy->skip_phy_init = false;
643 
644 	/* Ensure that disable clock only, which configure for lane */
645 	clk_disable_unprepare(gtr_dev->clk[gtr_phy->refclk]);
646 
647 	return 0;
648 }
649 
650 static int xpsgtr_phy_power_on(struct phy *phy)
651 {
652 	struct xpsgtr_phy *gtr_phy = phy_get_drvdata(phy);
653 	int ret = 0;
654 
655 	/* Skip initialization if not required. */
656 	if (!xpsgtr_phy_init_required(gtr_phy))
657 		return ret;
658 	return xpsgtr_wait_pll_lock(phy);
659 }
660 
661 static int xpsgtr_phy_configure(struct phy *phy, union phy_configure_opts *opts)
662 {
663 	struct xpsgtr_phy *gtr_phy = phy_get_drvdata(phy);
664 
665 	if (gtr_phy->protocol != ICM_PROTOCOL_DP)
666 		return 0;
667 
668 	xpsgtr_phy_configure_dp(gtr_phy, opts->dp.pre[0], opts->dp.voltage[0]);
669 
670 	return 0;
671 }
672 
673 static const struct phy_ops xpsgtr_phyops = {
674 	.init		= xpsgtr_phy_init,
675 	.exit		= xpsgtr_phy_exit,
676 	.power_on	= xpsgtr_phy_power_on,
677 	.configure	= xpsgtr_phy_configure,
678 	.owner		= THIS_MODULE,
679 };
680 
681 /*
682  * OF Xlate Support
683  */
684 
685 /* Set the lane protocol and instance based on the PHY type and instance number. */
686 static int xpsgtr_set_lane_type(struct xpsgtr_phy *gtr_phy, u8 phy_type,
687 				unsigned int phy_instance)
688 {
689 	unsigned int num_phy_types;
690 
691 	switch (phy_type) {
692 	case PHY_TYPE_SATA:
693 		num_phy_types = 2;
694 		gtr_phy->protocol = ICM_PROTOCOL_SATA;
695 		break;
696 	case PHY_TYPE_USB3:
697 		num_phy_types = 2;
698 		gtr_phy->protocol = ICM_PROTOCOL_USB;
699 		break;
700 	case PHY_TYPE_DP:
701 		num_phy_types = 2;
702 		gtr_phy->protocol = ICM_PROTOCOL_DP;
703 		break;
704 	case PHY_TYPE_PCIE:
705 		num_phy_types = 4;
706 		gtr_phy->protocol = ICM_PROTOCOL_PCIE;
707 		break;
708 	case PHY_TYPE_SGMII:
709 		num_phy_types = 4;
710 		gtr_phy->protocol = ICM_PROTOCOL_SGMII;
711 		break;
712 	default:
713 		return -EINVAL;
714 	}
715 
716 	if (phy_instance >= num_phy_types)
717 		return -EINVAL;
718 
719 	gtr_phy->instance = phy_instance;
720 	return 0;
721 }
722 
723 /*
724  * Valid combinations of controllers and lanes (Interconnect Matrix). Each
725  * "instance" represents one controller for a lane. For PCIe and DP, the
726  * "instance" is the logical lane in the link. For SATA, USB, and SGMII,
727  * the instance is the index of the controller.
728  *
729  * This information is only used to validate the devicetree reference, and is
730  * not used when programming the hardware.
731  */
732 static const unsigned int icm_matrix[NUM_LANES][CONTROLLERS_PER_LANE] = {
733 	/* PCIe, SATA, USB, DP, SGMII */
734 	{ 0, 0, 0, 1, 0 }, /* Lane 0 */
735 	{ 1, 1, 0, 0, 1 }, /* Lane 1 */
736 	{ 2, 0, 0, 1, 2 }, /* Lane 2 */
737 	{ 3, 1, 1, 0, 3 }, /* Lane 3 */
738 };
739 
740 /* Translate OF phandle and args to PHY instance. */
741 static struct phy *xpsgtr_xlate(struct device *dev,
742 				const struct of_phandle_args *args)
743 {
744 	struct xpsgtr_dev *gtr_dev = dev_get_drvdata(dev);
745 	struct xpsgtr_phy *gtr_phy;
746 	unsigned int phy_instance;
747 	unsigned int phy_lane;
748 	unsigned int phy_type;
749 	unsigned int refclk;
750 	unsigned int i;
751 	int ret;
752 
753 	if (args->args_count != 4) {
754 		dev_err(dev, "Invalid number of cells in 'phy' property\n");
755 		return ERR_PTR(-EINVAL);
756 	}
757 
758 	/*
759 	 * Get the PHY parameters from the OF arguments and derive the lane
760 	 * type.
761 	 */
762 	phy_lane = args->args[0];
763 	if (phy_lane >= ARRAY_SIZE(gtr_dev->phys)) {
764 		dev_err(dev, "Invalid lane number %u\n", phy_lane);
765 		return ERR_PTR(-ENODEV);
766 	}
767 
768 	gtr_phy = &gtr_dev->phys[phy_lane];
769 	phy_type = args->args[1];
770 	phy_instance = args->args[2];
771 
772 	guard(mutex)(&gtr_phy->phy->mutex);
773 	ret = xpsgtr_set_lane_type(gtr_phy, phy_type, phy_instance);
774 	if (ret < 0) {
775 		dev_err(gtr_dev->dev, "Invalid PHY type and/or instance\n");
776 		return ERR_PTR(ret);
777 	}
778 
779 	refclk = args->args[3];
780 	if (refclk >= ARRAY_SIZE(gtr_dev->refclk_sscs) ||
781 	    !gtr_dev->refclk_sscs[refclk]) {
782 		dev_err(dev, "Invalid reference clock number %u\n", refclk);
783 		return ERR_PTR(-EINVAL);
784 	}
785 
786 	gtr_phy->refclk = refclk;
787 
788 	/*
789 	 * Ensure that the Interconnect Matrix is obeyed, i.e a given lane type
790 	 * is allowed to operate on the lane.
791 	 */
792 	for (i = 0; i < CONTROLLERS_PER_LANE; i++) {
793 		if (icm_matrix[phy_lane][i] == gtr_phy->instance)
794 			return gtr_phy->phy;
795 	}
796 
797 	return ERR_PTR(-EINVAL);
798 }
799 
800 /*
801  * DebugFS
802  */
803 
804 static int xpsgtr_status_read(struct seq_file *seq, void *data)
805 {
806 	struct device *dev = seq->private;
807 	struct xpsgtr_phy *gtr_phy = dev_get_drvdata(dev);
808 	struct clk *clk;
809 	u32 pll_status;
810 
811 	mutex_lock(&gtr_phy->phy->mutex);
812 	pll_status = xpsgtr_read_phy(gtr_phy, L0_PLL_STATUS_READ_1);
813 	clk = gtr_phy->dev->clk[gtr_phy->refclk];
814 
815 	seq_printf(seq, "Lane:            %u\n", gtr_phy->lane);
816 	seq_printf(seq, "Protocol:        %s\n",
817 		   xpsgtr_icm_str[gtr_phy->protocol]);
818 	seq_printf(seq, "Instance:        %u\n", gtr_phy->instance);
819 	seq_printf(seq, "Reference clock: %u (%pC)\n", gtr_phy->refclk, clk);
820 	seq_printf(seq, "Reference rate:  %lu\n", clk_get_rate(clk));
821 	seq_printf(seq, "PLL locked:      %s\n",
822 		   pll_status & PLL_STATUS_LOCKED ? "yes" : "no");
823 
824 	mutex_unlock(&gtr_phy->phy->mutex);
825 	return 0;
826 }
827 
828 /*
829  * Power Management
830  */
831 
832 static int xpsgtr_runtime_suspend(struct device *dev)
833 {
834 	struct xpsgtr_dev *gtr_dev = dev_get_drvdata(dev);
835 
836 	/* Save the snapshot ICM_CFG registers. */
837 	gtr_dev->saved_icm_cfg0 = xpsgtr_read(gtr_dev, ICM_CFG0);
838 	gtr_dev->saved_icm_cfg1 = xpsgtr_read(gtr_dev, ICM_CFG1);
839 
840 	return 0;
841 }
842 
843 static int xpsgtr_runtime_resume(struct device *dev)
844 {
845 	struct xpsgtr_dev *gtr_dev = dev_get_drvdata(dev);
846 	unsigned int icm_cfg0, icm_cfg1;
847 	unsigned int i;
848 	bool skip_phy_init;
849 
850 	icm_cfg0 = xpsgtr_read(gtr_dev, ICM_CFG0);
851 	icm_cfg1 = xpsgtr_read(gtr_dev, ICM_CFG1);
852 
853 	/* Return if no GT lanes got configured before suspend. */
854 	if (!gtr_dev->saved_icm_cfg0 && !gtr_dev->saved_icm_cfg1)
855 		return 0;
856 
857 	/* Check if the ICM configurations changed after suspend. */
858 	if (icm_cfg0 == gtr_dev->saved_icm_cfg0 &&
859 	    icm_cfg1 == gtr_dev->saved_icm_cfg1)
860 		skip_phy_init = true;
861 	else
862 		skip_phy_init = false;
863 
864 	/* Update the skip_phy_init for all gtr_phy instances. */
865 	for (i = 0; i < ARRAY_SIZE(gtr_dev->phys); i++)
866 		gtr_dev->phys[i].skip_phy_init = skip_phy_init;
867 
868 	return 0;
869 }
870 
871 static DEFINE_RUNTIME_DEV_PM_OPS(xpsgtr_pm_ops, xpsgtr_runtime_suspend,
872 				 xpsgtr_runtime_resume, NULL);
873 /*
874  * Probe & Platform Driver
875  */
876 
877 static int xpsgtr_get_ref_clocks(struct xpsgtr_dev *gtr_dev)
878 {
879 	unsigned int refclk;
880 
881 	for (refclk = 0; refclk < ARRAY_SIZE(gtr_dev->refclk_sscs); ++refclk) {
882 		unsigned long rate;
883 		unsigned int i;
884 		struct clk *clk;
885 		char name[8];
886 
887 		snprintf(name, sizeof(name), "ref%u", refclk);
888 		clk = devm_clk_get_optional(gtr_dev->dev, name);
889 		if (IS_ERR(clk)) {
890 			return dev_err_probe(gtr_dev->dev, PTR_ERR(clk),
891 					     "Failed to get ref clock %u\n",
892 					     refclk);
893 		}
894 
895 		if (!clk)
896 			continue;
897 
898 		gtr_dev->clk[refclk] = clk;
899 
900 		/*
901 		 * Get the spread spectrum (SSC) settings for the reference
902 		 * clock rate.
903 		 */
904 		rate = clk_get_rate(clk);
905 
906 		for (i = 0 ; i < ARRAY_SIZE(ssc_lookup); i++) {
907 			/* Allow an error of 100 ppm */
908 			unsigned long error = ssc_lookup[i].refclk_rate / 10000;
909 
910 			if (abs(rate - ssc_lookup[i].refclk_rate) < error) {
911 				gtr_dev->refclk_sscs[refclk] = &ssc_lookup[i];
912 				break;
913 			}
914 		}
915 
916 		if (i == ARRAY_SIZE(ssc_lookup)) {
917 			dev_err(gtr_dev->dev,
918 				"Invalid rate %lu for reference clock %u\n",
919 				rate, refclk);
920 			return -EINVAL;
921 		}
922 	}
923 
924 	return 0;
925 }
926 
927 static int xpsgtr_probe(struct platform_device *pdev)
928 {
929 	struct device_node *np = pdev->dev.of_node;
930 	struct xpsgtr_dev *gtr_dev;
931 	struct phy_provider *provider;
932 	unsigned int port;
933 	int ret;
934 
935 	gtr_dev = devm_kzalloc(&pdev->dev, sizeof(*gtr_dev), GFP_KERNEL);
936 	if (!gtr_dev)
937 		return -ENOMEM;
938 
939 	gtr_dev->dev = &pdev->dev;
940 	platform_set_drvdata(pdev, gtr_dev);
941 
942 	mutex_init(&gtr_dev->gtr_mutex);
943 
944 	if (of_device_is_compatible(np, "xlnx,zynqmp-psgtr"))
945 		gtr_dev->tx_term_fix =
946 			of_property_read_bool(np, "xlnx,tx-termination-fix");
947 
948 	/* Acquire resources. */
949 	gtr_dev->serdes = devm_platform_ioremap_resource_byname(pdev, "serdes");
950 	if (IS_ERR(gtr_dev->serdes))
951 		return PTR_ERR(gtr_dev->serdes);
952 
953 	gtr_dev->siou = devm_platform_ioremap_resource_byname(pdev, "siou");
954 	if (IS_ERR(gtr_dev->siou))
955 		return PTR_ERR(gtr_dev->siou);
956 
957 	ret = xpsgtr_get_ref_clocks(gtr_dev);
958 	if (ret)
959 		return ret;
960 
961 	/* Create PHYs. */
962 	for (port = 0; port < ARRAY_SIZE(gtr_dev->phys); ++port) {
963 		struct xpsgtr_phy *gtr_phy = &gtr_dev->phys[port];
964 		struct phy *phy;
965 
966 		gtr_phy->lane = port;
967 		gtr_phy->dev = gtr_dev;
968 
969 		phy = devm_phy_create(&pdev->dev, np, &xpsgtr_phyops);
970 		if (IS_ERR(phy)) {
971 			dev_err(&pdev->dev, "failed to create PHY\n");
972 			return PTR_ERR(phy);
973 		}
974 
975 		gtr_phy->phy = phy;
976 		phy_set_drvdata(phy, gtr_phy);
977 		debugfs_create_devm_seqfile(&phy->dev, "status", phy->debugfs,
978 					    xpsgtr_status_read);
979 	}
980 
981 	/* Register the PHY provider. */
982 	provider = devm_of_phy_provider_register(&pdev->dev, xpsgtr_xlate);
983 	if (IS_ERR(provider)) {
984 		dev_err(&pdev->dev, "registering provider failed\n");
985 		return PTR_ERR(provider);
986 	}
987 
988 	pm_runtime_set_active(gtr_dev->dev);
989 	pm_runtime_enable(gtr_dev->dev);
990 
991 	ret = pm_runtime_resume_and_get(gtr_dev->dev);
992 	if (ret < 0) {
993 		pm_runtime_disable(gtr_dev->dev);
994 		return ret;
995 	}
996 
997 	return 0;
998 }
999 
1000 static void xpsgtr_remove(struct platform_device *pdev)
1001 {
1002 	struct xpsgtr_dev *gtr_dev = platform_get_drvdata(pdev);
1003 
1004 	pm_runtime_disable(gtr_dev->dev);
1005 	pm_runtime_put_noidle(gtr_dev->dev);
1006 	pm_runtime_set_suspended(gtr_dev->dev);
1007 }
1008 
1009 static const struct of_device_id xpsgtr_of_match[] = {
1010 	{ .compatible = "xlnx,zynqmp-psgtr", },
1011 	{ .compatible = "xlnx,zynqmp-psgtr-v1.1", },
1012 	{},
1013 };
1014 MODULE_DEVICE_TABLE(of, xpsgtr_of_match);
1015 
1016 static struct platform_driver xpsgtr_driver = {
1017 	.probe = xpsgtr_probe,
1018 	.remove_new = xpsgtr_remove,
1019 	.driver = {
1020 		.name = "xilinx-psgtr",
1021 		.of_match_table	= xpsgtr_of_match,
1022 		.pm =  pm_ptr(&xpsgtr_pm_ops),
1023 	},
1024 };
1025 
1026 module_platform_driver(xpsgtr_driver);
1027 
1028 MODULE_AUTHOR("Xilinx Inc.");
1029 MODULE_LICENSE("GPL v2");
1030 MODULE_DESCRIPTION("Xilinx ZynqMP High speed Gigabit Transceiver");
1031