xref: /linux/drivers/phy/xilinx/phy-zynqmp.c (revision d79c6840917097285e03a49f709321f5fb972750)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * phy-zynqmp.c - PHY driver for Xilinx ZynqMP GT.
4  *
5  * Copyright (C) 2018-2020 Xilinx Inc.
6  *
7  * Author: Anurag Kumar Vulisha <anuragku@xilinx.com>
8  * Author: Subbaraya Sundeep <sundeep.lkml@gmail.com>
9  * Author: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
10  *
11  * This driver is tested for USB, SGMII, SATA and Display Port currently.
12  * PCIe should also work but that is experimental as of now.
13  */
14 
15 #include <linux/clk.h>
16 #include <linux/delay.h>
17 #include <linux/io.h>
18 #include <linux/kernel.h>
19 #include <linux/module.h>
20 #include <linux/of.h>
21 #include <linux/phy/phy.h>
22 #include <linux/platform_device.h>
23 #include <linux/pm_runtime.h>
24 #include <linux/slab.h>
25 
26 #include <dt-bindings/phy/phy.h>
27 
28 /*
29  * Lane Registers
30  */
31 
32 /* TX De-emphasis parameters */
33 #define L0_TX_ANA_TM_18			0x0048
34 #define L0_TX_ANA_TM_118		0x01d8
35 #define L0_TX_ANA_TM_118_FORCE_17_0	BIT(0)
36 
37 /* DN Resistor calibration code parameters */
38 #define L0_TXPMA_ST_3			0x0b0c
39 #define L0_DN_CALIB_CODE		0x3f
40 
41 /* PMA control parameters */
42 #define L0_TXPMD_TM_45			0x0cb4
43 #define L0_TXPMD_TM_48			0x0cc0
44 #define L0_TXPMD_TM_45_OVER_DP_MAIN	BIT(0)
45 #define L0_TXPMD_TM_45_ENABLE_DP_MAIN	BIT(1)
46 #define L0_TXPMD_TM_45_OVER_DP_POST1	BIT(2)
47 #define L0_TXPMD_TM_45_ENABLE_DP_POST1	BIT(3)
48 #define L0_TXPMD_TM_45_OVER_DP_POST2	BIT(4)
49 #define L0_TXPMD_TM_45_ENABLE_DP_POST2	BIT(5)
50 
51 /* PCS control parameters */
52 #define L0_TM_DIG_6			0x106c
53 #define L0_TM_DIS_DESCRAMBLE_DECODER	0x0f
54 #define L0_TX_DIG_61			0x00f4
55 #define L0_TM_DISABLE_SCRAMBLE_ENCODER	0x0f
56 
57 /* PLL Test Mode register parameters */
58 #define L0_TM_PLL_DIG_37		0x2094
59 #define L0_TM_COARSE_CODE_LIMIT		0x10
60 
61 /* PLL SSC step size offsets */
62 #define L0_PLL_SS_STEPS_0_LSB		0x2368
63 #define L0_PLL_SS_STEPS_1_MSB		0x236c
64 #define L0_PLL_SS_STEP_SIZE_0_LSB	0x2370
65 #define L0_PLL_SS_STEP_SIZE_1		0x2374
66 #define L0_PLL_SS_STEP_SIZE_2		0x2378
67 #define L0_PLL_SS_STEP_SIZE_3_MSB	0x237c
68 #define L0_PLL_STATUS_READ_1		0x23e4
69 
70 /* SSC step size parameters */
71 #define STEP_SIZE_0_MASK		0xff
72 #define STEP_SIZE_1_MASK		0xff
73 #define STEP_SIZE_2_MASK		0xff
74 #define STEP_SIZE_3_MASK		0x3
75 #define STEP_SIZE_SHIFT			8
76 #define FORCE_STEP_SIZE			0x10
77 #define FORCE_STEPS			0x20
78 #define STEPS_0_MASK			0xff
79 #define STEPS_1_MASK			0x07
80 
81 /* Reference clock selection parameters */
82 #define L0_Ln_REF_CLK_SEL(n)		(0x2860 + (n) * 4)
83 #define L0_REF_CLK_LCL_SEL		BIT(7)
84 #define L0_REF_CLK_SEL_MASK		0x9f
85 
86 /* Calibration digital logic parameters */
87 #define L3_TM_CALIB_DIG19		0xec4c
88 #define L3_CALIB_DONE_STATUS		0xef14
89 #define L3_TM_CALIB_DIG18		0xec48
90 #define L3_TM_CALIB_DIG19_NSW		0x07
91 #define L3_TM_CALIB_DIG18_NSW		0xe0
92 #define L3_TM_OVERRIDE_NSW_CODE         0x20
93 #define L3_CALIB_DONE			0x02
94 #define L3_NSW_SHIFT			5
95 #define L3_NSW_PIPE_SHIFT		4
96 #define L3_NSW_CALIB_SHIFT		3
97 
98 #define PHY_REG_OFFSET			0x4000
99 
100 /*
101  * Global Registers
102  */
103 
104 /* Refclk selection parameters */
105 #define PLL_REF_SEL(n)			(0x10000 + (n) * 4)
106 #define PLL_FREQ_MASK			0x1f
107 #define PLL_STATUS_LOCKED		0x10
108 
109 /* Inter Connect Matrix parameters */
110 #define ICM_CFG0			0x10010
111 #define ICM_CFG1			0x10014
112 #define ICM_CFG0_L0_MASK		0x07
113 #define ICM_CFG0_L1_MASK		0x70
114 #define ICM_CFG1_L2_MASK		0x07
115 #define ICM_CFG2_L3_MASK		0x70
116 #define ICM_CFG_SHIFT			4
117 
118 /* Inter Connect Matrix allowed protocols */
119 #define ICM_PROTOCOL_PD			0x0
120 #define ICM_PROTOCOL_PCIE		0x1
121 #define ICM_PROTOCOL_SATA		0x2
122 #define ICM_PROTOCOL_USB		0x3
123 #define ICM_PROTOCOL_DP			0x4
124 #define ICM_PROTOCOL_SGMII		0x5
125 
126 /* Test Mode common reset control  parameters */
127 #define TM_CMN_RST			0x10018
128 #define TM_CMN_RST_EN			0x1
129 #define TM_CMN_RST_SET			0x2
130 #define TM_CMN_RST_MASK			0x3
131 
132 /* Bus width parameters */
133 #define TX_PROT_BUS_WIDTH		0x10040
134 #define RX_PROT_BUS_WIDTH		0x10044
135 #define PROT_BUS_WIDTH_10		0x0
136 #define PROT_BUS_WIDTH_20		0x1
137 #define PROT_BUS_WIDTH_40		0x2
138 #define PROT_BUS_WIDTH_SHIFT(n)		((n) * 2)
139 #define PROT_BUS_WIDTH_MASK(n)		GENMASK((n) * 2 + 1, (n) * 2)
140 
141 /* Number of GT lanes */
142 #define NUM_LANES			4
143 
144 /* SIOU SATA control register */
145 #define SATA_CONTROL_OFFSET		0x0100
146 
147 /* Total number of controllers */
148 #define CONTROLLERS_PER_LANE		5
149 
150 /* Timeout values */
151 #define TIMEOUT_US			1000
152 
153 struct xpsgtr_dev;
154 
155 /**
156  * struct xpsgtr_ssc - structure to hold SSC settings for a lane
157  * @refclk_rate: PLL reference clock frequency
158  * @pll_ref_clk: value to be written to register for corresponding ref clk rate
159  * @steps: number of steps of SSC (Spread Spectrum Clock)
160  * @step_size: step size of each step
161  */
162 struct xpsgtr_ssc {
163 	u32 refclk_rate;
164 	u8  pll_ref_clk;
165 	u32 steps;
166 	u32 step_size;
167 };
168 
169 /**
170  * struct xpsgtr_phy - representation of a lane
171  * @phy: pointer to the kernel PHY device
172  * @instance: instance of the protocol type (such as the lane within a
173  *            protocol, or the USB/Ethernet controller)
174  * @lane: lane number
175  * @protocol: protocol in which the lane operates
176  * @skip_phy_init: skip phy_init() if true
177  * @dev: pointer to the xpsgtr_dev instance
178  * @refclk: reference clock index
179  */
180 struct xpsgtr_phy {
181 	struct phy *phy;
182 	u8 instance;
183 	u8 lane;
184 	u8 protocol;
185 	bool skip_phy_init;
186 	struct xpsgtr_dev *dev;
187 	unsigned int refclk;
188 };
189 
190 /**
191  * struct xpsgtr_dev - representation of a ZynMP GT device
192  * @dev: pointer to device
193  * @serdes: serdes base address
194  * @siou: siou base address
195  * @gtr_mutex: mutex for locking
196  * @phys: PHY lanes
197  * @refclk_sscs: spread spectrum settings for the reference clocks
198  * @clk: reference clocks
199  * @tx_term_fix: fix for GT issue
200  * @saved_icm_cfg0: stored value of ICM CFG0 register
201  * @saved_icm_cfg1: stored value of ICM CFG1 register
202  */
203 struct xpsgtr_dev {
204 	struct device *dev;
205 	void __iomem *serdes;
206 	void __iomem *siou;
207 	struct mutex gtr_mutex; /* mutex for locking */
208 	struct xpsgtr_phy phys[NUM_LANES];
209 	const struct xpsgtr_ssc *refclk_sscs[NUM_LANES];
210 	struct clk *clk[NUM_LANES];
211 	bool tx_term_fix;
212 	unsigned int saved_icm_cfg0;
213 	unsigned int saved_icm_cfg1;
214 };
215 
216 /*
217  * Configuration Data
218  */
219 
220 /* lookup table to hold all settings needed for a ref clock frequency */
221 static const struct xpsgtr_ssc ssc_lookup[] = {
222 	{  19200000, 0x05,  608, 264020 },
223 	{  20000000, 0x06,  634, 243454 },
224 	{  24000000, 0x07,  760, 168973 },
225 	{  26000000, 0x08,  824, 143860 },
226 	{  27000000, 0x09,  856,  86551 },
227 	{  38400000, 0x0a, 1218,  65896 },
228 	{  40000000, 0x0b,  634, 243454 },
229 	{  52000000, 0x0c,  824, 143860 },
230 	{ 100000000, 0x0d, 1058,  87533 },
231 	{ 108000000, 0x0e,  856,  86551 },
232 	{ 125000000, 0x0f,  992, 119497 },
233 	{ 135000000, 0x10, 1070,  55393 },
234 	{ 150000000, 0x11,  792, 187091 }
235 };
236 
237 /*
238  * I/O Accessors
239  */
240 
241 static inline u32 xpsgtr_read(struct xpsgtr_dev *gtr_dev, u32 reg)
242 {
243 	return readl(gtr_dev->serdes + reg);
244 }
245 
246 static inline void xpsgtr_write(struct xpsgtr_dev *gtr_dev, u32 reg, u32 value)
247 {
248 	writel(value, gtr_dev->serdes + reg);
249 }
250 
251 static inline void xpsgtr_clr_set(struct xpsgtr_dev *gtr_dev, u32 reg,
252 				  u32 clr, u32 set)
253 {
254 	u32 value = xpsgtr_read(gtr_dev, reg);
255 
256 	value &= ~clr;
257 	value |= set;
258 	xpsgtr_write(gtr_dev, reg, value);
259 }
260 
261 static inline u32 xpsgtr_read_phy(struct xpsgtr_phy *gtr_phy, u32 reg)
262 {
263 	void __iomem *addr = gtr_phy->dev->serdes
264 			   + gtr_phy->lane * PHY_REG_OFFSET + reg;
265 
266 	return readl(addr);
267 }
268 
269 static inline void xpsgtr_write_phy(struct xpsgtr_phy *gtr_phy,
270 				    u32 reg, u32 value)
271 {
272 	void __iomem *addr = gtr_phy->dev->serdes
273 			   + gtr_phy->lane * PHY_REG_OFFSET + reg;
274 
275 	writel(value, addr);
276 }
277 
278 static inline void xpsgtr_clr_set_phy(struct xpsgtr_phy *gtr_phy,
279 				      u32 reg, u32 clr, u32 set)
280 {
281 	void __iomem *addr = gtr_phy->dev->serdes
282 			   + gtr_phy->lane * PHY_REG_OFFSET + reg;
283 
284 	writel((readl(addr) & ~clr) | set, addr);
285 }
286 
287 /*
288  * Hardware Configuration
289  */
290 
291 /* Wait for the PLL to lock (with a timeout). */
292 static int xpsgtr_wait_pll_lock(struct phy *phy)
293 {
294 	struct xpsgtr_phy *gtr_phy = phy_get_drvdata(phy);
295 	struct xpsgtr_dev *gtr_dev = gtr_phy->dev;
296 	unsigned int timeout = TIMEOUT_US;
297 	u8 protocol = gtr_phy->protocol;
298 	int ret;
299 
300 	dev_dbg(gtr_dev->dev, "Waiting for PLL lock\n");
301 
302 	/*
303 	 * For DP and PCIe, only the instance 0 PLL is used. Switch to that phy
304 	 * so we wait on the right PLL.
305 	 */
306 	if ((protocol == ICM_PROTOCOL_DP || protocol == ICM_PROTOCOL_PCIE) &&
307 	    gtr_phy->instance) {
308 		int i;
309 
310 		for (i = 0; i < NUM_LANES; i++) {
311 			gtr_phy = &gtr_dev->phys[i];
312 
313 			if (gtr_phy->protocol == protocol && !gtr_phy->instance)
314 				goto got_phy;
315 		}
316 
317 		return -EBUSY;
318 	}
319 
320 got_phy:
321 	while (1) {
322 		u32 reg = xpsgtr_read_phy(gtr_phy, L0_PLL_STATUS_READ_1);
323 
324 		if ((reg & PLL_STATUS_LOCKED) == PLL_STATUS_LOCKED) {
325 			ret = 0;
326 			break;
327 		}
328 
329 		if (--timeout == 0) {
330 			ret = -ETIMEDOUT;
331 			break;
332 		}
333 
334 		udelay(1);
335 	}
336 
337 	if (ret == -ETIMEDOUT)
338 		dev_err(gtr_dev->dev,
339 			"lane %u (protocol %u, instance %u): PLL lock timeout\n",
340 			gtr_phy->lane, gtr_phy->protocol, gtr_phy->instance);
341 
342 	return ret;
343 }
344 
345 /* Configure PLL and spread-sprectrum clock. */
346 static void xpsgtr_configure_pll(struct xpsgtr_phy *gtr_phy)
347 {
348 	const struct xpsgtr_ssc *ssc;
349 	u32 step_size;
350 
351 	ssc = gtr_phy->dev->refclk_sscs[gtr_phy->refclk];
352 	step_size = ssc->step_size;
353 
354 	xpsgtr_clr_set(gtr_phy->dev, PLL_REF_SEL(gtr_phy->lane),
355 		       PLL_FREQ_MASK, ssc->pll_ref_clk);
356 
357 	/* Enable lane clock sharing, if required */
358 	if (gtr_phy->refclk == gtr_phy->lane)
359 		xpsgtr_clr_set(gtr_phy->dev, L0_Ln_REF_CLK_SEL(gtr_phy->lane),
360 			       L0_REF_CLK_SEL_MASK, L0_REF_CLK_LCL_SEL);
361 	else
362 		xpsgtr_clr_set(gtr_phy->dev, L0_Ln_REF_CLK_SEL(gtr_phy->lane),
363 			       L0_REF_CLK_SEL_MASK, 1 << gtr_phy->refclk);
364 
365 	/* SSC step size [7:0] */
366 	xpsgtr_clr_set_phy(gtr_phy, L0_PLL_SS_STEP_SIZE_0_LSB,
367 			   STEP_SIZE_0_MASK, step_size & STEP_SIZE_0_MASK);
368 
369 	/* SSC step size [15:8] */
370 	step_size >>= STEP_SIZE_SHIFT;
371 	xpsgtr_clr_set_phy(gtr_phy, L0_PLL_SS_STEP_SIZE_1,
372 			   STEP_SIZE_1_MASK, step_size & STEP_SIZE_1_MASK);
373 
374 	/* SSC step size [23:16] */
375 	step_size >>= STEP_SIZE_SHIFT;
376 	xpsgtr_clr_set_phy(gtr_phy, L0_PLL_SS_STEP_SIZE_2,
377 			   STEP_SIZE_2_MASK, step_size & STEP_SIZE_2_MASK);
378 
379 	/* SSC steps [7:0] */
380 	xpsgtr_clr_set_phy(gtr_phy, L0_PLL_SS_STEPS_0_LSB,
381 			   STEPS_0_MASK, ssc->steps & STEPS_0_MASK);
382 
383 	/* SSC steps [10:8] */
384 	xpsgtr_clr_set_phy(gtr_phy, L0_PLL_SS_STEPS_1_MSB,
385 			   STEPS_1_MASK,
386 			   (ssc->steps >> STEP_SIZE_SHIFT) & STEPS_1_MASK);
387 
388 	/* SSC step size [24:25] */
389 	step_size >>= STEP_SIZE_SHIFT;
390 	xpsgtr_clr_set_phy(gtr_phy, L0_PLL_SS_STEP_SIZE_3_MSB,
391 			   STEP_SIZE_3_MASK, (step_size & STEP_SIZE_3_MASK) |
392 			   FORCE_STEP_SIZE | FORCE_STEPS);
393 }
394 
395 /* Configure the lane protocol. */
396 static void xpsgtr_lane_set_protocol(struct xpsgtr_phy *gtr_phy)
397 {
398 	struct xpsgtr_dev *gtr_dev = gtr_phy->dev;
399 	u8 protocol = gtr_phy->protocol;
400 
401 	switch (gtr_phy->lane) {
402 	case 0:
403 		xpsgtr_clr_set(gtr_dev, ICM_CFG0, ICM_CFG0_L0_MASK, protocol);
404 		break;
405 	case 1:
406 		xpsgtr_clr_set(gtr_dev, ICM_CFG0, ICM_CFG0_L1_MASK,
407 			       protocol << ICM_CFG_SHIFT);
408 		break;
409 	case 2:
410 		xpsgtr_clr_set(gtr_dev, ICM_CFG1, ICM_CFG0_L0_MASK, protocol);
411 		break;
412 	case 3:
413 		xpsgtr_clr_set(gtr_dev, ICM_CFG1, ICM_CFG0_L1_MASK,
414 			       protocol << ICM_CFG_SHIFT);
415 		break;
416 	default:
417 		/* We already checked 0 <= lane <= 3 */
418 		break;
419 	}
420 }
421 
422 /* Bypass (de)scrambler and 8b/10b decoder and encoder. */
423 static void xpsgtr_bypass_scrambler_8b10b(struct xpsgtr_phy *gtr_phy)
424 {
425 	xpsgtr_write_phy(gtr_phy, L0_TM_DIG_6, L0_TM_DIS_DESCRAMBLE_DECODER);
426 	xpsgtr_write_phy(gtr_phy, L0_TX_DIG_61, L0_TM_DISABLE_SCRAMBLE_ENCODER);
427 }
428 
429 /* DP-specific initialization. */
430 static void xpsgtr_phy_init_dp(struct xpsgtr_phy *gtr_phy)
431 {
432 	xpsgtr_write_phy(gtr_phy, L0_TXPMD_TM_45,
433 			 L0_TXPMD_TM_45_OVER_DP_MAIN |
434 			 L0_TXPMD_TM_45_ENABLE_DP_MAIN |
435 			 L0_TXPMD_TM_45_OVER_DP_POST1 |
436 			 L0_TXPMD_TM_45_OVER_DP_POST2 |
437 			 L0_TXPMD_TM_45_ENABLE_DP_POST2);
438 	xpsgtr_write_phy(gtr_phy, L0_TX_ANA_TM_118,
439 			 L0_TX_ANA_TM_118_FORCE_17_0);
440 }
441 
442 /* SATA-specific initialization. */
443 static void xpsgtr_phy_init_sata(struct xpsgtr_phy *gtr_phy)
444 {
445 	struct xpsgtr_dev *gtr_dev = gtr_phy->dev;
446 
447 	xpsgtr_bypass_scrambler_8b10b(gtr_phy);
448 
449 	writel(gtr_phy->lane, gtr_dev->siou + SATA_CONTROL_OFFSET);
450 }
451 
452 /* SGMII-specific initialization. */
453 static void xpsgtr_phy_init_sgmii(struct xpsgtr_phy *gtr_phy)
454 {
455 	struct xpsgtr_dev *gtr_dev = gtr_phy->dev;
456 	u32 mask = PROT_BUS_WIDTH_MASK(gtr_phy->lane);
457 	u32 val = PROT_BUS_WIDTH_10 << PROT_BUS_WIDTH_SHIFT(gtr_phy->lane);
458 
459 	/* Set SGMII protocol TX and RX bus width to 10 bits. */
460 	xpsgtr_clr_set(gtr_dev, TX_PROT_BUS_WIDTH, mask, val);
461 	xpsgtr_clr_set(gtr_dev, RX_PROT_BUS_WIDTH, mask, val);
462 
463 	xpsgtr_bypass_scrambler_8b10b(gtr_phy);
464 }
465 
466 /* Configure TX de-emphasis and margining for DP. */
467 static void xpsgtr_phy_configure_dp(struct xpsgtr_phy *gtr_phy, unsigned int pre,
468 				    unsigned int voltage)
469 {
470 	static const u8 voltage_swing[4][4] = {
471 		{ 0x2a, 0x27, 0x24, 0x20 },
472 		{ 0x27, 0x23, 0x20, 0xff },
473 		{ 0x24, 0x20, 0xff, 0xff },
474 		{ 0xff, 0xff, 0xff, 0xff }
475 	};
476 	static const u8 pre_emphasis[4][4] = {
477 		{ 0x02, 0x02, 0x02, 0x02 },
478 		{ 0x01, 0x01, 0x01, 0xff },
479 		{ 0x00, 0x00, 0xff, 0xff },
480 		{ 0xff, 0xff, 0xff, 0xff }
481 	};
482 
483 	xpsgtr_write_phy(gtr_phy, L0_TXPMD_TM_48, voltage_swing[pre][voltage]);
484 	xpsgtr_write_phy(gtr_phy, L0_TX_ANA_TM_18, pre_emphasis[pre][voltage]);
485 }
486 
487 /*
488  * PHY Operations
489  */
490 
491 static bool xpsgtr_phy_init_required(struct xpsgtr_phy *gtr_phy)
492 {
493 	/*
494 	 * As USB may save the snapshot of the states during hibernation, doing
495 	 * phy_init() will put the USB controller into reset, resulting in the
496 	 * losing of the saved snapshot. So try to avoid phy_init() for USB
497 	 * except when gtr_phy->skip_phy_init is false (this happens when FPD is
498 	 * shutdown during suspend or when gt lane is changed from current one)
499 	 */
500 	if (gtr_phy->protocol == ICM_PROTOCOL_USB && gtr_phy->skip_phy_init)
501 		return false;
502 	else
503 		return true;
504 }
505 
506 /*
507  * There is a functional issue in the GT. The TX termination resistance can be
508  * out of spec due to a issue in the calibration logic. This is the workaround
509  * to fix it, required for XCZU9EG silicon.
510  */
511 static int xpsgtr_phy_tx_term_fix(struct xpsgtr_phy *gtr_phy)
512 {
513 	struct xpsgtr_dev *gtr_dev = gtr_phy->dev;
514 	u32 timeout = TIMEOUT_US;
515 	u32 nsw;
516 
517 	/* Enabling Test Mode control for CMN Rest */
518 	xpsgtr_clr_set(gtr_dev, TM_CMN_RST, TM_CMN_RST_MASK, TM_CMN_RST_SET);
519 
520 	/* Set Test Mode reset */
521 	xpsgtr_clr_set(gtr_dev, TM_CMN_RST, TM_CMN_RST_MASK, TM_CMN_RST_EN);
522 
523 	xpsgtr_write(gtr_dev, L3_TM_CALIB_DIG18, 0x00);
524 	xpsgtr_write(gtr_dev, L3_TM_CALIB_DIG19, L3_TM_OVERRIDE_NSW_CODE);
525 
526 	/*
527 	 * As a part of work around sequence for PMOS calibration fix,
528 	 * we need to configure any lane ICM_CFG to valid protocol. This
529 	 * will deassert the CMN_Resetn signal.
530 	 */
531 	xpsgtr_lane_set_protocol(gtr_phy);
532 
533 	/* Clear Test Mode reset */
534 	xpsgtr_clr_set(gtr_dev, TM_CMN_RST, TM_CMN_RST_MASK, TM_CMN_RST_SET);
535 
536 	dev_dbg(gtr_dev->dev, "calibrating...\n");
537 
538 	do {
539 		u32 reg = xpsgtr_read(gtr_dev, L3_CALIB_DONE_STATUS);
540 
541 		if ((reg & L3_CALIB_DONE) == L3_CALIB_DONE)
542 			break;
543 
544 		if (!--timeout) {
545 			dev_err(gtr_dev->dev, "calibration time out\n");
546 			return -ETIMEDOUT;
547 		}
548 
549 		udelay(1);
550 	} while (timeout > 0);
551 
552 	dev_dbg(gtr_dev->dev, "calibration done\n");
553 
554 	/* Reading NMOS Register Code */
555 	nsw = xpsgtr_read(gtr_dev, L0_TXPMA_ST_3) & L0_DN_CALIB_CODE;
556 
557 	/* Set Test Mode reset */
558 	xpsgtr_clr_set(gtr_dev, TM_CMN_RST, TM_CMN_RST_MASK, TM_CMN_RST_EN);
559 
560 	/* Writing NMOS register values back [5:3] */
561 	xpsgtr_write(gtr_dev, L3_TM_CALIB_DIG19, nsw >> L3_NSW_CALIB_SHIFT);
562 
563 	/* Writing NMOS register value [2:0] */
564 	xpsgtr_write(gtr_dev, L3_TM_CALIB_DIG18,
565 		     ((nsw & L3_TM_CALIB_DIG19_NSW) << L3_NSW_SHIFT) |
566 		     (1 << L3_NSW_PIPE_SHIFT));
567 
568 	/* Clear Test Mode reset */
569 	xpsgtr_clr_set(gtr_dev, TM_CMN_RST, TM_CMN_RST_MASK, TM_CMN_RST_SET);
570 
571 	return 0;
572 }
573 
574 static int xpsgtr_phy_init(struct phy *phy)
575 {
576 	struct xpsgtr_phy *gtr_phy = phy_get_drvdata(phy);
577 	struct xpsgtr_dev *gtr_dev = gtr_phy->dev;
578 	int ret = 0;
579 
580 	mutex_lock(&gtr_dev->gtr_mutex);
581 
582 	/* Configure and enable the clock when peripheral phy_init call */
583 	if (clk_prepare_enable(gtr_dev->clk[gtr_phy->refclk]))
584 		goto out;
585 
586 	/* Skip initialization if not required. */
587 	if (!xpsgtr_phy_init_required(gtr_phy))
588 		goto out;
589 
590 	if (gtr_dev->tx_term_fix) {
591 		ret = xpsgtr_phy_tx_term_fix(gtr_phy);
592 		if (ret < 0)
593 			goto out;
594 
595 		gtr_dev->tx_term_fix = false;
596 	}
597 
598 	/* Enable coarse code saturation limiting logic. */
599 	xpsgtr_write_phy(gtr_phy, L0_TM_PLL_DIG_37, L0_TM_COARSE_CODE_LIMIT);
600 
601 	/*
602 	 * Configure the PLL, the lane protocol, and perform protocol-specific
603 	 * initialization.
604 	 */
605 	xpsgtr_configure_pll(gtr_phy);
606 	xpsgtr_lane_set_protocol(gtr_phy);
607 
608 	switch (gtr_phy->protocol) {
609 	case ICM_PROTOCOL_DP:
610 		xpsgtr_phy_init_dp(gtr_phy);
611 		break;
612 
613 	case ICM_PROTOCOL_SATA:
614 		xpsgtr_phy_init_sata(gtr_phy);
615 		break;
616 
617 	case ICM_PROTOCOL_SGMII:
618 		xpsgtr_phy_init_sgmii(gtr_phy);
619 		break;
620 	}
621 
622 out:
623 	mutex_unlock(&gtr_dev->gtr_mutex);
624 	return ret;
625 }
626 
627 static int xpsgtr_phy_exit(struct phy *phy)
628 {
629 	struct xpsgtr_phy *gtr_phy = phy_get_drvdata(phy);
630 	struct xpsgtr_dev *gtr_dev = gtr_phy->dev;
631 
632 	gtr_phy->skip_phy_init = false;
633 
634 	/* Ensure that disable clock only, which configure for lane */
635 	clk_disable_unprepare(gtr_dev->clk[gtr_phy->refclk]);
636 
637 	return 0;
638 }
639 
640 static int xpsgtr_phy_power_on(struct phy *phy)
641 {
642 	struct xpsgtr_phy *gtr_phy = phy_get_drvdata(phy);
643 	int ret = 0;
644 
645 	/* Skip initialization if not required. */
646 	if (!xpsgtr_phy_init_required(gtr_phy))
647 		return ret;
648 	return xpsgtr_wait_pll_lock(phy);
649 }
650 
651 static int xpsgtr_phy_configure(struct phy *phy, union phy_configure_opts *opts)
652 {
653 	struct xpsgtr_phy *gtr_phy = phy_get_drvdata(phy);
654 
655 	if (gtr_phy->protocol != ICM_PROTOCOL_DP)
656 		return 0;
657 
658 	xpsgtr_phy_configure_dp(gtr_phy, opts->dp.pre[0], opts->dp.voltage[0]);
659 
660 	return 0;
661 }
662 
663 static const struct phy_ops xpsgtr_phyops = {
664 	.init		= xpsgtr_phy_init,
665 	.exit		= xpsgtr_phy_exit,
666 	.power_on	= xpsgtr_phy_power_on,
667 	.configure	= xpsgtr_phy_configure,
668 	.owner		= THIS_MODULE,
669 };
670 
671 /*
672  * OF Xlate Support
673  */
674 
675 /* Set the lane protocol and instance based on the PHY type and instance number. */
676 static int xpsgtr_set_lane_type(struct xpsgtr_phy *gtr_phy, u8 phy_type,
677 				unsigned int phy_instance)
678 {
679 	unsigned int num_phy_types;
680 
681 	switch (phy_type) {
682 	case PHY_TYPE_SATA:
683 		num_phy_types = 2;
684 		gtr_phy->protocol = ICM_PROTOCOL_SATA;
685 		break;
686 	case PHY_TYPE_USB3:
687 		num_phy_types = 2;
688 		gtr_phy->protocol = ICM_PROTOCOL_USB;
689 		break;
690 	case PHY_TYPE_DP:
691 		num_phy_types = 2;
692 		gtr_phy->protocol = ICM_PROTOCOL_DP;
693 		break;
694 	case PHY_TYPE_PCIE:
695 		num_phy_types = 4;
696 		gtr_phy->protocol = ICM_PROTOCOL_PCIE;
697 		break;
698 	case PHY_TYPE_SGMII:
699 		num_phy_types = 4;
700 		gtr_phy->protocol = ICM_PROTOCOL_SGMII;
701 		break;
702 	default:
703 		return -EINVAL;
704 	}
705 
706 	if (phy_instance >= num_phy_types)
707 		return -EINVAL;
708 
709 	gtr_phy->instance = phy_instance;
710 	return 0;
711 }
712 
713 /*
714  * Valid combinations of controllers and lanes (Interconnect Matrix). Each
715  * "instance" represents one controller for a lane. For PCIe and DP, the
716  * "instance" is the logical lane in the link. For SATA, USB, and SGMII,
717  * the instance is the index of the controller.
718  *
719  * This information is only used to validate the devicetree reference, and is
720  * not used when programming the hardware.
721  */
722 static const unsigned int icm_matrix[NUM_LANES][CONTROLLERS_PER_LANE] = {
723 	/* PCIe, SATA, USB, DP, SGMII */
724 	{ 0, 0, 0, 1, 0 }, /* Lane 0 */
725 	{ 1, 1, 0, 0, 1 }, /* Lane 1 */
726 	{ 2, 0, 0, 1, 2 }, /* Lane 2 */
727 	{ 3, 1, 1, 0, 3 }, /* Lane 3 */
728 };
729 
730 /* Translate OF phandle and args to PHY instance. */
731 static struct phy *xpsgtr_xlate(struct device *dev,
732 				const struct of_phandle_args *args)
733 {
734 	struct xpsgtr_dev *gtr_dev = dev_get_drvdata(dev);
735 	struct xpsgtr_phy *gtr_phy;
736 	unsigned int phy_instance;
737 	unsigned int phy_lane;
738 	unsigned int phy_type;
739 	unsigned int refclk;
740 	unsigned int i;
741 	int ret;
742 
743 	if (args->args_count != 4) {
744 		dev_err(dev, "Invalid number of cells in 'phy' property\n");
745 		return ERR_PTR(-EINVAL);
746 	}
747 
748 	/*
749 	 * Get the PHY parameters from the OF arguments and derive the lane
750 	 * type.
751 	 */
752 	phy_lane = args->args[0];
753 	if (phy_lane >= ARRAY_SIZE(gtr_dev->phys)) {
754 		dev_err(dev, "Invalid lane number %u\n", phy_lane);
755 		return ERR_PTR(-ENODEV);
756 	}
757 
758 	gtr_phy = &gtr_dev->phys[phy_lane];
759 	phy_type = args->args[1];
760 	phy_instance = args->args[2];
761 
762 	guard(mutex)(&gtr_phy->phy->mutex);
763 	ret = xpsgtr_set_lane_type(gtr_phy, phy_type, phy_instance);
764 	if (ret < 0) {
765 		dev_err(gtr_dev->dev, "Invalid PHY type and/or instance\n");
766 		return ERR_PTR(ret);
767 	}
768 
769 	refclk = args->args[3];
770 	if (refclk >= ARRAY_SIZE(gtr_dev->refclk_sscs) ||
771 	    !gtr_dev->refclk_sscs[refclk]) {
772 		dev_err(dev, "Invalid reference clock number %u\n", refclk);
773 		return ERR_PTR(-EINVAL);
774 	}
775 
776 	gtr_phy->refclk = refclk;
777 
778 	/*
779 	 * Ensure that the Interconnect Matrix is obeyed, i.e a given lane type
780 	 * is allowed to operate on the lane.
781 	 */
782 	for (i = 0; i < CONTROLLERS_PER_LANE; i++) {
783 		if (icm_matrix[phy_lane][i] == gtr_phy->instance)
784 			return gtr_phy->phy;
785 	}
786 
787 	return ERR_PTR(-EINVAL);
788 }
789 
790 /*
791  * Power Management
792  */
793 
794 static int xpsgtr_runtime_suspend(struct device *dev)
795 {
796 	struct xpsgtr_dev *gtr_dev = dev_get_drvdata(dev);
797 
798 	/* Save the snapshot ICM_CFG registers. */
799 	gtr_dev->saved_icm_cfg0 = xpsgtr_read(gtr_dev, ICM_CFG0);
800 	gtr_dev->saved_icm_cfg1 = xpsgtr_read(gtr_dev, ICM_CFG1);
801 
802 	return 0;
803 }
804 
805 static int xpsgtr_runtime_resume(struct device *dev)
806 {
807 	struct xpsgtr_dev *gtr_dev = dev_get_drvdata(dev);
808 	unsigned int icm_cfg0, icm_cfg1;
809 	unsigned int i;
810 	bool skip_phy_init;
811 
812 	icm_cfg0 = xpsgtr_read(gtr_dev, ICM_CFG0);
813 	icm_cfg1 = xpsgtr_read(gtr_dev, ICM_CFG1);
814 
815 	/* Return if no GT lanes got configured before suspend. */
816 	if (!gtr_dev->saved_icm_cfg0 && !gtr_dev->saved_icm_cfg1)
817 		return 0;
818 
819 	/* Check if the ICM configurations changed after suspend. */
820 	if (icm_cfg0 == gtr_dev->saved_icm_cfg0 &&
821 	    icm_cfg1 == gtr_dev->saved_icm_cfg1)
822 		skip_phy_init = true;
823 	else
824 		skip_phy_init = false;
825 
826 	/* Update the skip_phy_init for all gtr_phy instances. */
827 	for (i = 0; i < ARRAY_SIZE(gtr_dev->phys); i++)
828 		gtr_dev->phys[i].skip_phy_init = skip_phy_init;
829 
830 	return 0;
831 }
832 
833 static DEFINE_RUNTIME_DEV_PM_OPS(xpsgtr_pm_ops, xpsgtr_runtime_suspend,
834 				 xpsgtr_runtime_resume, NULL);
835 /*
836  * Probe & Platform Driver
837  */
838 
839 static int xpsgtr_get_ref_clocks(struct xpsgtr_dev *gtr_dev)
840 {
841 	unsigned int refclk;
842 
843 	for (refclk = 0; refclk < ARRAY_SIZE(gtr_dev->refclk_sscs); ++refclk) {
844 		unsigned long rate;
845 		unsigned int i;
846 		struct clk *clk;
847 		char name[8];
848 
849 		snprintf(name, sizeof(name), "ref%u", refclk);
850 		clk = devm_clk_get_optional(gtr_dev->dev, name);
851 		if (IS_ERR(clk)) {
852 			return dev_err_probe(gtr_dev->dev, PTR_ERR(clk),
853 					     "Failed to get ref clock %u\n",
854 					     refclk);
855 		}
856 
857 		if (!clk)
858 			continue;
859 
860 		gtr_dev->clk[refclk] = clk;
861 
862 		/*
863 		 * Get the spread spectrum (SSC) settings for the reference
864 		 * clock rate.
865 		 */
866 		rate = clk_get_rate(clk);
867 
868 		for (i = 0 ; i < ARRAY_SIZE(ssc_lookup); i++) {
869 			/* Allow an error of 100 ppm */
870 			unsigned long error = ssc_lookup[i].refclk_rate / 10000;
871 
872 			if (abs(rate - ssc_lookup[i].refclk_rate) < error) {
873 				gtr_dev->refclk_sscs[refclk] = &ssc_lookup[i];
874 				break;
875 			}
876 		}
877 
878 		if (i == ARRAY_SIZE(ssc_lookup)) {
879 			dev_err(gtr_dev->dev,
880 				"Invalid rate %lu for reference clock %u\n",
881 				rate, refclk);
882 			return -EINVAL;
883 		}
884 	}
885 
886 	return 0;
887 }
888 
889 static int xpsgtr_probe(struct platform_device *pdev)
890 {
891 	struct device_node *np = pdev->dev.of_node;
892 	struct xpsgtr_dev *gtr_dev;
893 	struct phy_provider *provider;
894 	unsigned int port;
895 	int ret;
896 
897 	gtr_dev = devm_kzalloc(&pdev->dev, sizeof(*gtr_dev), GFP_KERNEL);
898 	if (!gtr_dev)
899 		return -ENOMEM;
900 
901 	gtr_dev->dev = &pdev->dev;
902 	platform_set_drvdata(pdev, gtr_dev);
903 
904 	mutex_init(&gtr_dev->gtr_mutex);
905 
906 	if (of_device_is_compatible(np, "xlnx,zynqmp-psgtr"))
907 		gtr_dev->tx_term_fix =
908 			of_property_read_bool(np, "xlnx,tx-termination-fix");
909 
910 	/* Acquire resources. */
911 	gtr_dev->serdes = devm_platform_ioremap_resource_byname(pdev, "serdes");
912 	if (IS_ERR(gtr_dev->serdes))
913 		return PTR_ERR(gtr_dev->serdes);
914 
915 	gtr_dev->siou = devm_platform_ioremap_resource_byname(pdev, "siou");
916 	if (IS_ERR(gtr_dev->siou))
917 		return PTR_ERR(gtr_dev->siou);
918 
919 	ret = xpsgtr_get_ref_clocks(gtr_dev);
920 	if (ret)
921 		return ret;
922 
923 	/* Create PHYs. */
924 	for (port = 0; port < ARRAY_SIZE(gtr_dev->phys); ++port) {
925 		struct xpsgtr_phy *gtr_phy = &gtr_dev->phys[port];
926 		struct phy *phy;
927 
928 		gtr_phy->lane = port;
929 		gtr_phy->dev = gtr_dev;
930 
931 		phy = devm_phy_create(&pdev->dev, np, &xpsgtr_phyops);
932 		if (IS_ERR(phy)) {
933 			dev_err(&pdev->dev, "failed to create PHY\n");
934 			return PTR_ERR(phy);
935 		}
936 
937 		gtr_phy->phy = phy;
938 		phy_set_drvdata(phy, gtr_phy);
939 	}
940 
941 	/* Register the PHY provider. */
942 	provider = devm_of_phy_provider_register(&pdev->dev, xpsgtr_xlate);
943 	if (IS_ERR(provider)) {
944 		dev_err(&pdev->dev, "registering provider failed\n");
945 		return PTR_ERR(provider);
946 	}
947 
948 	pm_runtime_set_active(gtr_dev->dev);
949 	pm_runtime_enable(gtr_dev->dev);
950 
951 	ret = pm_runtime_resume_and_get(gtr_dev->dev);
952 	if (ret < 0) {
953 		pm_runtime_disable(gtr_dev->dev);
954 		return ret;
955 	}
956 
957 	return 0;
958 }
959 
960 static void xpsgtr_remove(struct platform_device *pdev)
961 {
962 	struct xpsgtr_dev *gtr_dev = platform_get_drvdata(pdev);
963 
964 	pm_runtime_disable(gtr_dev->dev);
965 	pm_runtime_put_noidle(gtr_dev->dev);
966 	pm_runtime_set_suspended(gtr_dev->dev);
967 }
968 
969 static const struct of_device_id xpsgtr_of_match[] = {
970 	{ .compatible = "xlnx,zynqmp-psgtr", },
971 	{ .compatible = "xlnx,zynqmp-psgtr-v1.1", },
972 	{},
973 };
974 MODULE_DEVICE_TABLE(of, xpsgtr_of_match);
975 
976 static struct platform_driver xpsgtr_driver = {
977 	.probe = xpsgtr_probe,
978 	.remove_new = xpsgtr_remove,
979 	.driver = {
980 		.name = "xilinx-psgtr",
981 		.of_match_table	= xpsgtr_of_match,
982 		.pm =  pm_ptr(&xpsgtr_pm_ops),
983 	},
984 };
985 
986 module_platform_driver(xpsgtr_driver);
987 
988 MODULE_AUTHOR("Xilinx Inc.");
989 MODULE_LICENSE("GPL v2");
990 MODULE_DESCRIPTION("Xilinx ZynqMP High speed Gigabit Transceiver");
991