xref: /linux/drivers/phy/xilinx/phy-zynqmp.c (revision d12ed2b7e1fe5c9e4a372a95fb7635a7f81eff6a)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * phy-zynqmp.c - PHY driver for Xilinx ZynqMP GT.
4  *
5  * Copyright (C) 2018-2020 Xilinx Inc.
6  *
7  * Author: Anurag Kumar Vulisha <anuragku@xilinx.com>
8  * Author: Subbaraya Sundeep <sundeep.lkml@gmail.com>
9  * Author: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
10  *
11  * This driver is tested for USB, SGMII, SATA and Display Port currently.
12  * PCIe should also work but that is experimental as of now.
13  */
14 
15 #include <linux/clk.h>
16 #include <linux/debugfs.h>
17 #include <linux/delay.h>
18 #include <linux/io.h>
19 #include <linux/kernel.h>
20 #include <linux/module.h>
21 #include <linux/of.h>
22 #include <linux/phy/phy.h>
23 #include <linux/platform_device.h>
24 #include <linux/pm_runtime.h>
25 #include <linux/slab.h>
26 
27 #include <dt-bindings/phy/phy.h>
28 
29 /*
30  * Lane Registers
31  */
32 
33 /* TX De-emphasis parameters */
34 #define L0_TX_ANA_TM_18			0x0048
35 #define L0_TX_ANA_TM_118		0x01d8
36 #define L0_TX_ANA_TM_118_FORCE_17_0	BIT(0)
37 
38 /* DN Resistor calibration code parameters */
39 #define L0_TXPMA_ST_3			0x0b0c
40 #define L0_DN_CALIB_CODE		0x3f
41 
42 /* PMA control parameters */
43 #define L0_TXPMD_TM_45			0x0cb4
44 #define L0_TXPMD_TM_48			0x0cc0
45 #define L0_TXPMD_TM_45_OVER_DP_MAIN	BIT(0)
46 #define L0_TXPMD_TM_45_ENABLE_DP_MAIN	BIT(1)
47 #define L0_TXPMD_TM_45_OVER_DP_POST1	BIT(2)
48 #define L0_TXPMD_TM_45_ENABLE_DP_POST1	BIT(3)
49 #define L0_TXPMD_TM_45_OVER_DP_POST2	BIT(4)
50 #define L0_TXPMD_TM_45_ENABLE_DP_POST2	BIT(5)
51 
52 /* PCS control parameters */
53 #define L0_TM_DIG_6			0x106c
54 #define L0_TM_DIS_DESCRAMBLE_DECODER	0x0f
55 #define L0_TX_DIG_61			0x00f4
56 #define L0_TM_DISABLE_SCRAMBLE_ENCODER	0x0f
57 
58 /* PLL Test Mode register parameters */
59 #define L0_TM_PLL_DIG_37		0x2094
60 #define L0_TM_COARSE_CODE_LIMIT		0x10
61 
62 /* PLL SSC step size offsets */
63 #define L0_PLL_SS_STEPS_0_LSB		0x2368
64 #define L0_PLL_SS_STEPS_1_MSB		0x236c
65 #define L0_PLL_SS_STEP_SIZE_0_LSB	0x2370
66 #define L0_PLL_SS_STEP_SIZE_1		0x2374
67 #define L0_PLL_SS_STEP_SIZE_2		0x2378
68 #define L0_PLL_SS_STEP_SIZE_3_MSB	0x237c
69 #define L0_PLL_STATUS_READ_1		0x23e4
70 
71 /* SSC step size parameters */
72 #define STEP_SIZE_0_MASK		0xff
73 #define STEP_SIZE_1_MASK		0xff
74 #define STEP_SIZE_2_MASK		0xff
75 #define STEP_SIZE_3_MASK		0x3
76 #define STEP_SIZE_SHIFT			8
77 #define FORCE_STEP_SIZE			0x10
78 #define FORCE_STEPS			0x20
79 #define STEPS_0_MASK			0xff
80 #define STEPS_1_MASK			0x07
81 
82 /* Reference clock selection parameters */
83 #define L0_Ln_REF_CLK_SEL(n)		(0x2860 + (n) * 4)
84 #define L0_REF_CLK_LCL_SEL		BIT(7)
85 #define L0_REF_CLK_SEL_MASK		0x9f
86 
87 /* Calibration digital logic parameters */
88 #define L3_TM_CALIB_DIG19		0xec4c
89 #define L3_CALIB_DONE_STATUS		0xef14
90 #define L3_TM_CALIB_DIG18		0xec48
91 #define L3_TM_CALIB_DIG19_NSW		0x07
92 #define L3_TM_CALIB_DIG18_NSW		0xe0
93 #define L3_TM_OVERRIDE_NSW_CODE         0x20
94 #define L3_CALIB_DONE			0x02
95 #define L3_NSW_SHIFT			5
96 #define L3_NSW_PIPE_SHIFT		4
97 #define L3_NSW_CALIB_SHIFT		3
98 
99 #define PHY_REG_OFFSET			0x4000
100 
101 /*
102  * Global Registers
103  */
104 
105 /* Refclk selection parameters */
106 #define PLL_REF_SEL(n)			(0x10000 + (n) * 4)
107 #define PLL_FREQ_MASK			0x1f
108 #define PLL_STATUS_LOCKED		0x10
109 
110 /* Inter Connect Matrix parameters */
111 #define ICM_CFG0			0x10010
112 #define ICM_CFG1			0x10014
113 #define ICM_CFG0_L0_MASK		0x07
114 #define ICM_CFG0_L1_MASK		0x70
115 #define ICM_CFG1_L2_MASK		0x07
116 #define ICM_CFG2_L3_MASK		0x70
117 #define ICM_CFG_SHIFT			4
118 
119 /* Inter Connect Matrix allowed protocols */
120 #define ICM_PROTOCOL_PD			0x0
121 #define ICM_PROTOCOL_PCIE		0x1
122 #define ICM_PROTOCOL_SATA		0x2
123 #define ICM_PROTOCOL_USB		0x3
124 #define ICM_PROTOCOL_DP			0x4
125 #define ICM_PROTOCOL_SGMII		0x5
126 
127 static const char *const xpsgtr_icm_str[] = {
128 	[ICM_PROTOCOL_PD] = "none",
129 	[ICM_PROTOCOL_PCIE] = "PCIe",
130 	[ICM_PROTOCOL_SATA] = "SATA",
131 	[ICM_PROTOCOL_USB] = "USB",
132 	[ICM_PROTOCOL_DP] = "DisplayPort",
133 	[ICM_PROTOCOL_SGMII] = "SGMII",
134 };
135 
136 /* Test Mode common reset control  parameters */
137 #define TM_CMN_RST			0x10018
138 #define TM_CMN_RST_EN			0x1
139 #define TM_CMN_RST_SET			0x2
140 #define TM_CMN_RST_MASK			0x3
141 
142 /* Bus width parameters */
143 #define TX_PROT_BUS_WIDTH		0x10040
144 #define RX_PROT_BUS_WIDTH		0x10044
145 #define PROT_BUS_WIDTH_10		0x0
146 #define PROT_BUS_WIDTH_20		0x1
147 #define PROT_BUS_WIDTH_40		0x2
148 #define PROT_BUS_WIDTH_SHIFT(n)		((n) * 2)
149 #define PROT_BUS_WIDTH_MASK(n)		GENMASK((n) * 2 + 1, (n) * 2)
150 
151 /* Number of GT lanes */
152 #define NUM_LANES			4
153 
154 /* SIOU SATA control register */
155 #define SATA_CONTROL_OFFSET		0x0100
156 
157 /* Total number of controllers */
158 #define CONTROLLERS_PER_LANE		5
159 
160 /* Timeout values */
161 #define TIMEOUT_US			1000
162 
163 /* Lane 0/1/2/3 offset */
164 #define DIG_8(n)		((0x4000 * (n)) + 0x1074)
165 #define ILL13(n)		((0x4000 * (n)) + 0x1994)
166 #define DIG_10(n)		((0x4000 * (n)) + 0x107c)
167 #define RST_DLY(n)		((0x4000 * (n)) + 0x19a4)
168 #define BYP_15(n)		((0x4000 * (n)) + 0x1038)
169 #define BYP_12(n)		((0x4000 * (n)) + 0x102c)
170 #define MISC3(n)		((0x4000 * (n)) + 0x19ac)
171 #define EQ11(n)			((0x4000 * (n)) + 0x1978)
172 
173 static u32 save_reg_address[] = {
174 	/* Lane 0/1/2/3 Register */
175 	DIG_8(0), ILL13(0), DIG_10(0), RST_DLY(0), BYP_15(0), BYP_12(0), MISC3(0), EQ11(0),
176 	DIG_8(1), ILL13(1), DIG_10(1), RST_DLY(1), BYP_15(1), BYP_12(1), MISC3(1), EQ11(1),
177 	DIG_8(2), ILL13(2), DIG_10(2), RST_DLY(2), BYP_15(2), BYP_12(2), MISC3(2), EQ11(2),
178 	DIG_8(3), ILL13(3), DIG_10(3), RST_DLY(3), BYP_15(3), BYP_12(3), MISC3(3), EQ11(3),
179 };
180 
181 struct xpsgtr_dev;
182 
183 /**
184  * struct xpsgtr_ssc - structure to hold SSC settings for a lane
185  * @refclk_rate: PLL reference clock frequency
186  * @pll_ref_clk: value to be written to register for corresponding ref clk rate
187  * @steps: number of steps of SSC (Spread Spectrum Clock)
188  * @step_size: step size of each step
189  */
190 struct xpsgtr_ssc {
191 	u32 refclk_rate;
192 	u8  pll_ref_clk;
193 	u32 steps;
194 	u32 step_size;
195 };
196 
197 /**
198  * struct xpsgtr_phy - representation of a lane
199  * @phy: pointer to the kernel PHY device
200  * @instance: instance of the protocol type (such as the lane within a
201  *            protocol, or the USB/Ethernet controller)
202  * @lane: lane number
203  * @protocol: protocol in which the lane operates
204  * @skip_phy_init: skip phy_init() if true
205  * @dev: pointer to the xpsgtr_dev instance
206  * @refclk: reference clock index
207  */
208 struct xpsgtr_phy {
209 	struct phy *phy;
210 	u8 instance;
211 	u8 lane;
212 	u8 protocol;
213 	bool skip_phy_init;
214 	struct xpsgtr_dev *dev;
215 	unsigned int refclk;
216 };
217 
218 /**
219  * struct xpsgtr_dev - representation of a ZynMP GT device
220  * @dev: pointer to device
221  * @serdes: serdes base address
222  * @siou: siou base address
223  * @gtr_mutex: mutex for locking
224  * @phys: PHY lanes
225  * @clk: reference clocks
226  * @tx_term_fix: fix for GT issue
227  * @saved_icm_cfg0: stored value of ICM CFG0 register
228  * @saved_icm_cfg1: stored value of ICM CFG1 register
229  * @saved_regs: registers to be saved/restored during suspend/resume
230  */
231 struct xpsgtr_dev {
232 	struct device *dev;
233 	void __iomem *serdes;
234 	void __iomem *siou;
235 	struct mutex gtr_mutex; /* mutex for locking */
236 	struct xpsgtr_phy phys[NUM_LANES];
237 	struct clk *clk[NUM_LANES];
238 	bool tx_term_fix;
239 	unsigned int saved_icm_cfg0;
240 	unsigned int saved_icm_cfg1;
241 	u32 *saved_regs;
242 };
243 
244 /*
245  * Configuration Data
246  */
247 
248 /* lookup table to hold all settings needed for a ref clock frequency */
249 static const struct xpsgtr_ssc ssc_lookup[] = {
250 	{  19200000, 0x05,  608, 264020 },
251 	{  20000000, 0x06,  634, 243454 },
252 	{  24000000, 0x07,  760, 168973 },
253 	{  26000000, 0x08,  824, 143860 },
254 	{  27000000, 0x09,  856,  86551 },
255 	{  38400000, 0x0a, 1218,  65896 },
256 	{  40000000, 0x0b,  634, 243454 },
257 	{  52000000, 0x0c,  824, 143860 },
258 	{ 100000000, 0x0d, 1058,  87533 },
259 	{ 108000000, 0x0e,  856,  86551 },
260 	{ 125000000, 0x0f,  992, 119497 },
261 	{ 135000000, 0x10, 1070,  55393 },
262 	{ 150000000, 0x11,  792, 187091 }
263 };
264 
265 /*
266  * I/O Accessors
267  */
268 
xpsgtr_read(struct xpsgtr_dev * gtr_dev,u32 reg)269 static inline u32 xpsgtr_read(struct xpsgtr_dev *gtr_dev, u32 reg)
270 {
271 	return readl(gtr_dev->serdes + reg);
272 }
273 
xpsgtr_write(struct xpsgtr_dev * gtr_dev,u32 reg,u32 value)274 static inline void xpsgtr_write(struct xpsgtr_dev *gtr_dev, u32 reg, u32 value)
275 {
276 	writel(value, gtr_dev->serdes + reg);
277 }
278 
xpsgtr_clr_set(struct xpsgtr_dev * gtr_dev,u32 reg,u32 clr,u32 set)279 static inline void xpsgtr_clr_set(struct xpsgtr_dev *gtr_dev, u32 reg,
280 				  u32 clr, u32 set)
281 {
282 	u32 value = xpsgtr_read(gtr_dev, reg);
283 
284 	value &= ~clr;
285 	value |= set;
286 	xpsgtr_write(gtr_dev, reg, value);
287 }
288 
xpsgtr_read_phy(struct xpsgtr_phy * gtr_phy,u32 reg)289 static inline u32 xpsgtr_read_phy(struct xpsgtr_phy *gtr_phy, u32 reg)
290 {
291 	void __iomem *addr = gtr_phy->dev->serdes
292 			   + gtr_phy->lane * PHY_REG_OFFSET + reg;
293 
294 	return readl(addr);
295 }
296 
xpsgtr_write_phy(struct xpsgtr_phy * gtr_phy,u32 reg,u32 value)297 static inline void xpsgtr_write_phy(struct xpsgtr_phy *gtr_phy,
298 				    u32 reg, u32 value)
299 {
300 	void __iomem *addr = gtr_phy->dev->serdes
301 			   + gtr_phy->lane * PHY_REG_OFFSET + reg;
302 
303 	writel(value, addr);
304 }
305 
xpsgtr_clr_set_phy(struct xpsgtr_phy * gtr_phy,u32 reg,u32 clr,u32 set)306 static inline void xpsgtr_clr_set_phy(struct xpsgtr_phy *gtr_phy,
307 				      u32 reg, u32 clr, u32 set)
308 {
309 	void __iomem *addr = gtr_phy->dev->serdes
310 			   + gtr_phy->lane * PHY_REG_OFFSET + reg;
311 
312 	writel((readl(addr) & ~clr) | set, addr);
313 }
314 
315 /**
316  * xpsgtr_save_lane_regs - Saves registers on suspend
317  * @gtr_dev: pointer to phy controller context structure
318  */
xpsgtr_save_lane_regs(struct xpsgtr_dev * gtr_dev)319 static void xpsgtr_save_lane_regs(struct xpsgtr_dev *gtr_dev)
320 {
321 	int i;
322 
323 	for (i = 0; i < ARRAY_SIZE(save_reg_address); i++)
324 		gtr_dev->saved_regs[i] = xpsgtr_read(gtr_dev,
325 						     save_reg_address[i]);
326 }
327 
328 /**
329  * xpsgtr_restore_lane_regs - Restores registers on resume
330  * @gtr_dev: pointer to phy controller context structure
331  */
xpsgtr_restore_lane_regs(struct xpsgtr_dev * gtr_dev)332 static void xpsgtr_restore_lane_regs(struct xpsgtr_dev *gtr_dev)
333 {
334 	int i;
335 
336 	for (i = 0; i < ARRAY_SIZE(save_reg_address); i++)
337 		xpsgtr_write(gtr_dev, save_reg_address[i],
338 			     gtr_dev->saved_regs[i]);
339 }
340 
341 /*
342  * Hardware Configuration
343  */
344 
345 /* Wait for the PLL to lock (with a timeout). */
xpsgtr_wait_pll_lock(struct phy * phy)346 static int xpsgtr_wait_pll_lock(struct phy *phy)
347 {
348 	struct xpsgtr_phy *gtr_phy = phy_get_drvdata(phy);
349 	struct xpsgtr_dev *gtr_dev = gtr_phy->dev;
350 	unsigned int timeout = TIMEOUT_US;
351 	u8 protocol = gtr_phy->protocol;
352 	int ret;
353 
354 	dev_dbg(gtr_dev->dev, "Waiting for PLL lock\n");
355 
356 	/*
357 	 * For DP and PCIe, only the instance 0 PLL is used. Switch to that phy
358 	 * so we wait on the right PLL.
359 	 */
360 	if ((protocol == ICM_PROTOCOL_DP || protocol == ICM_PROTOCOL_PCIE) &&
361 	    gtr_phy->instance) {
362 		int i;
363 
364 		for (i = 0; i < NUM_LANES; i++) {
365 			gtr_phy = &gtr_dev->phys[i];
366 
367 			if (gtr_phy->protocol == protocol && !gtr_phy->instance)
368 				goto got_phy;
369 		}
370 
371 		return -EBUSY;
372 	}
373 
374 got_phy:
375 	while (1) {
376 		u32 reg = xpsgtr_read_phy(gtr_phy, L0_PLL_STATUS_READ_1);
377 
378 		if ((reg & PLL_STATUS_LOCKED) == PLL_STATUS_LOCKED) {
379 			ret = 0;
380 			break;
381 		}
382 
383 		if (--timeout == 0) {
384 			ret = -ETIMEDOUT;
385 			break;
386 		}
387 
388 		udelay(1);
389 	}
390 
391 	if (ret == -ETIMEDOUT)
392 		dev_err(gtr_dev->dev,
393 			"lane %u (protocol %u, instance %u): PLL lock timeout\n",
394 			gtr_phy->lane, gtr_phy->protocol, gtr_phy->instance);
395 
396 	return ret;
397 }
398 
399 /* Get the spread spectrum (SSC) settings for the reference clock rate */
xpsgtr_find_sscs(struct xpsgtr_phy * gtr_phy)400 static const struct xpsgtr_ssc *xpsgtr_find_sscs(struct xpsgtr_phy *gtr_phy)
401 {
402 	unsigned long rate;
403 	struct clk *clk;
404 	unsigned int i;
405 
406 	clk = gtr_phy->dev->clk[gtr_phy->refclk];
407 	rate = clk_get_rate(clk);
408 
409 	for (i = 0 ; i < ARRAY_SIZE(ssc_lookup); i++) {
410 		/* Allow an error of 100 ppm */
411 		unsigned long error = ssc_lookup[i].refclk_rate / 10000;
412 
413 		if (abs(rate - ssc_lookup[i].refclk_rate) < error)
414 			return &ssc_lookup[i];
415 	}
416 
417 	dev_err(gtr_phy->dev->dev, "Invalid rate %lu for reference clock %u\n",
418 		rate, gtr_phy->refclk);
419 
420 	return NULL;
421 }
422 
423 /* Configure PLL and spread-sprectrum clock. */
xpsgtr_configure_pll(struct xpsgtr_phy * gtr_phy)424 static int xpsgtr_configure_pll(struct xpsgtr_phy *gtr_phy)
425 {
426 	const struct xpsgtr_ssc *ssc;
427 	u32 step_size;
428 
429 	ssc = xpsgtr_find_sscs(gtr_phy);
430 	if (!ssc)
431 		return -EINVAL;
432 
433 	step_size = ssc->step_size;
434 
435 	xpsgtr_clr_set(gtr_phy->dev, PLL_REF_SEL(gtr_phy->lane),
436 		       PLL_FREQ_MASK, ssc->pll_ref_clk);
437 
438 	/* Enable lane clock sharing, if required */
439 	if (gtr_phy->refclk == gtr_phy->lane)
440 		xpsgtr_clr_set(gtr_phy->dev, L0_Ln_REF_CLK_SEL(gtr_phy->lane),
441 			       L0_REF_CLK_SEL_MASK, L0_REF_CLK_LCL_SEL);
442 	else
443 		xpsgtr_clr_set(gtr_phy->dev, L0_Ln_REF_CLK_SEL(gtr_phy->lane),
444 			       L0_REF_CLK_SEL_MASK, 1 << gtr_phy->refclk);
445 
446 	/* SSC step size [7:0] */
447 	xpsgtr_clr_set_phy(gtr_phy, L0_PLL_SS_STEP_SIZE_0_LSB,
448 			   STEP_SIZE_0_MASK, step_size & STEP_SIZE_0_MASK);
449 
450 	/* SSC step size [15:8] */
451 	step_size >>= STEP_SIZE_SHIFT;
452 	xpsgtr_clr_set_phy(gtr_phy, L0_PLL_SS_STEP_SIZE_1,
453 			   STEP_SIZE_1_MASK, step_size & STEP_SIZE_1_MASK);
454 
455 	/* SSC step size [23:16] */
456 	step_size >>= STEP_SIZE_SHIFT;
457 	xpsgtr_clr_set_phy(gtr_phy, L0_PLL_SS_STEP_SIZE_2,
458 			   STEP_SIZE_2_MASK, step_size & STEP_SIZE_2_MASK);
459 
460 	/* SSC steps [7:0] */
461 	xpsgtr_clr_set_phy(gtr_phy, L0_PLL_SS_STEPS_0_LSB,
462 			   STEPS_0_MASK, ssc->steps & STEPS_0_MASK);
463 
464 	/* SSC steps [10:8] */
465 	xpsgtr_clr_set_phy(gtr_phy, L0_PLL_SS_STEPS_1_MSB,
466 			   STEPS_1_MASK,
467 			   (ssc->steps >> STEP_SIZE_SHIFT) & STEPS_1_MASK);
468 
469 	/* SSC step size [24:25] */
470 	step_size >>= STEP_SIZE_SHIFT;
471 	xpsgtr_clr_set_phy(gtr_phy, L0_PLL_SS_STEP_SIZE_3_MSB,
472 			   STEP_SIZE_3_MASK, (step_size & STEP_SIZE_3_MASK) |
473 			   FORCE_STEP_SIZE | FORCE_STEPS);
474 
475 	return 0;
476 }
477 
478 /* Configure the lane protocol. */
xpsgtr_lane_set_protocol(struct xpsgtr_phy * gtr_phy)479 static void xpsgtr_lane_set_protocol(struct xpsgtr_phy *gtr_phy)
480 {
481 	struct xpsgtr_dev *gtr_dev = gtr_phy->dev;
482 	u8 protocol = gtr_phy->protocol;
483 
484 	switch (gtr_phy->lane) {
485 	case 0:
486 		xpsgtr_clr_set(gtr_dev, ICM_CFG0, ICM_CFG0_L0_MASK, protocol);
487 		break;
488 	case 1:
489 		xpsgtr_clr_set(gtr_dev, ICM_CFG0, ICM_CFG0_L1_MASK,
490 			       protocol << ICM_CFG_SHIFT);
491 		break;
492 	case 2:
493 		xpsgtr_clr_set(gtr_dev, ICM_CFG1, ICM_CFG0_L0_MASK, protocol);
494 		break;
495 	case 3:
496 		xpsgtr_clr_set(gtr_dev, ICM_CFG1, ICM_CFG0_L1_MASK,
497 			       protocol << ICM_CFG_SHIFT);
498 		break;
499 	default:
500 		/* We already checked 0 <= lane <= 3 */
501 		break;
502 	}
503 }
504 
505 /* Bypass (de)scrambler and 8b/10b decoder and encoder. */
xpsgtr_bypass_scrambler_8b10b(struct xpsgtr_phy * gtr_phy)506 static void xpsgtr_bypass_scrambler_8b10b(struct xpsgtr_phy *gtr_phy)
507 {
508 	xpsgtr_write_phy(gtr_phy, L0_TM_DIG_6, L0_TM_DIS_DESCRAMBLE_DECODER);
509 	xpsgtr_write_phy(gtr_phy, L0_TX_DIG_61, L0_TM_DISABLE_SCRAMBLE_ENCODER);
510 }
511 
512 /* DP-specific initialization. */
xpsgtr_phy_init_dp(struct xpsgtr_phy * gtr_phy)513 static void xpsgtr_phy_init_dp(struct xpsgtr_phy *gtr_phy)
514 {
515 	xpsgtr_write_phy(gtr_phy, L0_TXPMD_TM_45,
516 			 L0_TXPMD_TM_45_OVER_DP_MAIN |
517 			 L0_TXPMD_TM_45_ENABLE_DP_MAIN |
518 			 L0_TXPMD_TM_45_OVER_DP_POST1 |
519 			 L0_TXPMD_TM_45_OVER_DP_POST2 |
520 			 L0_TXPMD_TM_45_ENABLE_DP_POST2);
521 	xpsgtr_write_phy(gtr_phy, L0_TX_ANA_TM_118,
522 			 L0_TX_ANA_TM_118_FORCE_17_0);
523 }
524 
525 /* SATA-specific initialization. */
xpsgtr_phy_init_sata(struct xpsgtr_phy * gtr_phy)526 static void xpsgtr_phy_init_sata(struct xpsgtr_phy *gtr_phy)
527 {
528 	struct xpsgtr_dev *gtr_dev = gtr_phy->dev;
529 
530 	xpsgtr_bypass_scrambler_8b10b(gtr_phy);
531 
532 	writel(gtr_phy->lane, gtr_dev->siou + SATA_CONTROL_OFFSET);
533 }
534 
535 /* SGMII-specific initialization. */
xpsgtr_phy_init_sgmii(struct xpsgtr_phy * gtr_phy)536 static void xpsgtr_phy_init_sgmii(struct xpsgtr_phy *gtr_phy)
537 {
538 	struct xpsgtr_dev *gtr_dev = gtr_phy->dev;
539 	u32 mask = PROT_BUS_WIDTH_MASK(gtr_phy->lane);
540 	u32 val = PROT_BUS_WIDTH_10 << PROT_BUS_WIDTH_SHIFT(gtr_phy->lane);
541 
542 	/* Set SGMII protocol TX and RX bus width to 10 bits. */
543 	xpsgtr_clr_set(gtr_dev, TX_PROT_BUS_WIDTH, mask, val);
544 	xpsgtr_clr_set(gtr_dev, RX_PROT_BUS_WIDTH, mask, val);
545 
546 	xpsgtr_bypass_scrambler_8b10b(gtr_phy);
547 }
548 
549 /* Configure TX de-emphasis and margining for DP. */
xpsgtr_phy_configure_dp(struct xpsgtr_phy * gtr_phy,unsigned int pre,unsigned int voltage)550 static void xpsgtr_phy_configure_dp(struct xpsgtr_phy *gtr_phy, unsigned int pre,
551 				    unsigned int voltage)
552 {
553 	static const u8 voltage_swing[4][4] = {
554 		{ 0x2a, 0x27, 0x24, 0x20 },
555 		{ 0x27, 0x23, 0x20, 0xff },
556 		{ 0x24, 0x20, 0xff, 0xff },
557 		{ 0xff, 0xff, 0xff, 0xff }
558 	};
559 	static const u8 pre_emphasis[4][4] = {
560 		{ 0x02, 0x02, 0x02, 0x02 },
561 		{ 0x01, 0x01, 0x01, 0xff },
562 		{ 0x00, 0x00, 0xff, 0xff },
563 		{ 0xff, 0xff, 0xff, 0xff }
564 	};
565 
566 	xpsgtr_write_phy(gtr_phy, L0_TXPMD_TM_48, voltage_swing[pre][voltage]);
567 	xpsgtr_write_phy(gtr_phy, L0_TX_ANA_TM_18, pre_emphasis[pre][voltage]);
568 }
569 
570 /*
571  * PHY Operations
572  */
573 
xpsgtr_phy_init_required(struct xpsgtr_phy * gtr_phy)574 static bool xpsgtr_phy_init_required(struct xpsgtr_phy *gtr_phy)
575 {
576 	/*
577 	 * As USB may save the snapshot of the states during hibernation, doing
578 	 * phy_init() will put the USB controller into reset, resulting in the
579 	 * losing of the saved snapshot. So try to avoid phy_init() for USB
580 	 * except when gtr_phy->skip_phy_init is false (this happens when FPD is
581 	 * shutdown during suspend or when gt lane is changed from current one)
582 	 */
583 	if (gtr_phy->protocol == ICM_PROTOCOL_USB && gtr_phy->skip_phy_init)
584 		return false;
585 	else
586 		return true;
587 }
588 
589 /*
590  * There is a functional issue in the GT. The TX termination resistance can be
591  * out of spec due to a issue in the calibration logic. This is the workaround
592  * to fix it, required for XCZU9EG silicon.
593  */
xpsgtr_phy_tx_term_fix(struct xpsgtr_phy * gtr_phy)594 static int xpsgtr_phy_tx_term_fix(struct xpsgtr_phy *gtr_phy)
595 {
596 	struct xpsgtr_dev *gtr_dev = gtr_phy->dev;
597 	u32 timeout = TIMEOUT_US;
598 	u32 nsw;
599 
600 	/* Enabling Test Mode control for CMN Rest */
601 	xpsgtr_clr_set(gtr_dev, TM_CMN_RST, TM_CMN_RST_MASK, TM_CMN_RST_SET);
602 
603 	/* Set Test Mode reset */
604 	xpsgtr_clr_set(gtr_dev, TM_CMN_RST, TM_CMN_RST_MASK, TM_CMN_RST_EN);
605 
606 	xpsgtr_write(gtr_dev, L3_TM_CALIB_DIG18, 0x00);
607 	xpsgtr_write(gtr_dev, L3_TM_CALIB_DIG19, L3_TM_OVERRIDE_NSW_CODE);
608 
609 	/*
610 	 * As a part of work around sequence for PMOS calibration fix,
611 	 * we need to configure any lane ICM_CFG to valid protocol. This
612 	 * will deassert the CMN_Resetn signal.
613 	 */
614 	xpsgtr_lane_set_protocol(gtr_phy);
615 
616 	/* Clear Test Mode reset */
617 	xpsgtr_clr_set(gtr_dev, TM_CMN_RST, TM_CMN_RST_MASK, TM_CMN_RST_SET);
618 
619 	dev_dbg(gtr_dev->dev, "calibrating...\n");
620 
621 	do {
622 		u32 reg = xpsgtr_read(gtr_dev, L3_CALIB_DONE_STATUS);
623 
624 		if ((reg & L3_CALIB_DONE) == L3_CALIB_DONE)
625 			break;
626 
627 		if (!--timeout) {
628 			dev_err(gtr_dev->dev, "calibration time out\n");
629 			return -ETIMEDOUT;
630 		}
631 
632 		udelay(1);
633 	} while (timeout > 0);
634 
635 	dev_dbg(gtr_dev->dev, "calibration done\n");
636 
637 	/* Reading NMOS Register Code */
638 	nsw = xpsgtr_read(gtr_dev, L0_TXPMA_ST_3) & L0_DN_CALIB_CODE;
639 
640 	/* Set Test Mode reset */
641 	xpsgtr_clr_set(gtr_dev, TM_CMN_RST, TM_CMN_RST_MASK, TM_CMN_RST_EN);
642 
643 	/* Writing NMOS register values back [5:3] */
644 	xpsgtr_write(gtr_dev, L3_TM_CALIB_DIG19, nsw >> L3_NSW_CALIB_SHIFT);
645 
646 	/* Writing NMOS register value [2:0] */
647 	xpsgtr_write(gtr_dev, L3_TM_CALIB_DIG18,
648 		     ((nsw & L3_TM_CALIB_DIG19_NSW) << L3_NSW_SHIFT) |
649 		     (1 << L3_NSW_PIPE_SHIFT));
650 
651 	/* Clear Test Mode reset */
652 	xpsgtr_clr_set(gtr_dev, TM_CMN_RST, TM_CMN_RST_MASK, TM_CMN_RST_SET);
653 
654 	return 0;
655 }
656 
xpsgtr_phy_init(struct phy * phy)657 static int xpsgtr_phy_init(struct phy *phy)
658 {
659 	struct xpsgtr_phy *gtr_phy = phy_get_drvdata(phy);
660 	struct xpsgtr_dev *gtr_dev = gtr_phy->dev;
661 	int ret = 0;
662 
663 	mutex_lock(&gtr_dev->gtr_mutex);
664 
665 	/* Configure and enable the clock when peripheral phy_init call */
666 	if (clk_prepare_enable(gtr_dev->clk[gtr_phy->refclk]))
667 		goto out;
668 
669 	/* Skip initialization if not required. */
670 	if (!xpsgtr_phy_init_required(gtr_phy))
671 		goto out;
672 
673 	if (gtr_dev->tx_term_fix) {
674 		ret = xpsgtr_phy_tx_term_fix(gtr_phy);
675 		if (ret < 0)
676 			goto out;
677 
678 		gtr_dev->tx_term_fix = false;
679 	}
680 
681 	/* Enable coarse code saturation limiting logic. */
682 	xpsgtr_write_phy(gtr_phy, L0_TM_PLL_DIG_37, L0_TM_COARSE_CODE_LIMIT);
683 
684 	/*
685 	 * Configure the PLL, the lane protocol, and perform protocol-specific
686 	 * initialization.
687 	 */
688 	ret = xpsgtr_configure_pll(gtr_phy);
689 	if (ret)
690 		goto out;
691 
692 	xpsgtr_lane_set_protocol(gtr_phy);
693 
694 	switch (gtr_phy->protocol) {
695 	case ICM_PROTOCOL_DP:
696 		xpsgtr_phy_init_dp(gtr_phy);
697 		break;
698 
699 	case ICM_PROTOCOL_SATA:
700 		xpsgtr_phy_init_sata(gtr_phy);
701 		break;
702 
703 	case ICM_PROTOCOL_SGMII:
704 		xpsgtr_phy_init_sgmii(gtr_phy);
705 		break;
706 	}
707 
708 out:
709 	mutex_unlock(&gtr_dev->gtr_mutex);
710 	return ret;
711 }
712 
xpsgtr_phy_exit(struct phy * phy)713 static int xpsgtr_phy_exit(struct phy *phy)
714 {
715 	struct xpsgtr_phy *gtr_phy = phy_get_drvdata(phy);
716 	struct xpsgtr_dev *gtr_dev = gtr_phy->dev;
717 
718 	gtr_phy->skip_phy_init = false;
719 
720 	/* Ensure that disable clock only, which configure for lane */
721 	clk_disable_unprepare(gtr_dev->clk[gtr_phy->refclk]);
722 
723 	return 0;
724 }
725 
xpsgtr_phy_power_on(struct phy * phy)726 static int xpsgtr_phy_power_on(struct phy *phy)
727 {
728 	struct xpsgtr_phy *gtr_phy = phy_get_drvdata(phy);
729 	int ret = 0;
730 
731 	/* Skip initialization if not required. */
732 	if (!xpsgtr_phy_init_required(gtr_phy))
733 		return ret;
734 	return xpsgtr_wait_pll_lock(phy);
735 }
736 
xpsgtr_phy_configure(struct phy * phy,union phy_configure_opts * opts)737 static int xpsgtr_phy_configure(struct phy *phy, union phy_configure_opts *opts)
738 {
739 	struct xpsgtr_phy *gtr_phy = phy_get_drvdata(phy);
740 
741 	if (gtr_phy->protocol != ICM_PROTOCOL_DP)
742 		return 0;
743 
744 	xpsgtr_phy_configure_dp(gtr_phy, opts->dp.pre[0], opts->dp.voltage[0]);
745 
746 	return 0;
747 }
748 
749 static const struct phy_ops xpsgtr_phyops = {
750 	.init		= xpsgtr_phy_init,
751 	.exit		= xpsgtr_phy_exit,
752 	.power_on	= xpsgtr_phy_power_on,
753 	.configure	= xpsgtr_phy_configure,
754 	.owner		= THIS_MODULE,
755 };
756 
757 /*
758  * OF Xlate Support
759  */
760 
761 /* Set the lane protocol and instance based on the PHY type and instance number. */
xpsgtr_set_lane_type(struct xpsgtr_phy * gtr_phy,u8 phy_type,unsigned int phy_instance)762 static int xpsgtr_set_lane_type(struct xpsgtr_phy *gtr_phy, u8 phy_type,
763 				unsigned int phy_instance)
764 {
765 	unsigned int num_phy_types;
766 
767 	switch (phy_type) {
768 	case PHY_TYPE_SATA:
769 		num_phy_types = 2;
770 		gtr_phy->protocol = ICM_PROTOCOL_SATA;
771 		break;
772 	case PHY_TYPE_USB3:
773 		num_phy_types = 2;
774 		gtr_phy->protocol = ICM_PROTOCOL_USB;
775 		break;
776 	case PHY_TYPE_DP:
777 		num_phy_types = 2;
778 		gtr_phy->protocol = ICM_PROTOCOL_DP;
779 		break;
780 	case PHY_TYPE_PCIE:
781 		num_phy_types = 4;
782 		gtr_phy->protocol = ICM_PROTOCOL_PCIE;
783 		break;
784 	case PHY_TYPE_SGMII:
785 		num_phy_types = 4;
786 		gtr_phy->protocol = ICM_PROTOCOL_SGMII;
787 		break;
788 	default:
789 		return -EINVAL;
790 	}
791 
792 	if (phy_instance >= num_phy_types)
793 		return -EINVAL;
794 
795 	gtr_phy->instance = phy_instance;
796 	return 0;
797 }
798 
799 /*
800  * Valid combinations of controllers and lanes (Interconnect Matrix). Each
801  * "instance" represents one controller for a lane. For PCIe and DP, the
802  * "instance" is the logical lane in the link. For SATA, USB, and SGMII,
803  * the instance is the index of the controller.
804  *
805  * This information is only used to validate the devicetree reference, and is
806  * not used when programming the hardware.
807  */
808 static const unsigned int icm_matrix[NUM_LANES][CONTROLLERS_PER_LANE] = {
809 	/* PCIe, SATA, USB, DP, SGMII */
810 	{ 0, 0, 0, 1, 0 }, /* Lane 0 */
811 	{ 1, 1, 0, 0, 1 }, /* Lane 1 */
812 	{ 2, 0, 0, 1, 2 }, /* Lane 2 */
813 	{ 3, 1, 1, 0, 3 }, /* Lane 3 */
814 };
815 
816 /* Translate OF phandle and args to PHY instance. */
xpsgtr_xlate(struct device * dev,const struct of_phandle_args * args)817 static struct phy *xpsgtr_xlate(struct device *dev,
818 				const struct of_phandle_args *args)
819 {
820 	struct xpsgtr_dev *gtr_dev = dev_get_drvdata(dev);
821 	struct xpsgtr_phy *gtr_phy;
822 	unsigned int phy_instance;
823 	unsigned int phy_lane;
824 	unsigned int phy_type;
825 	unsigned int refclk;
826 	unsigned int i;
827 	int ret;
828 
829 	if (args->args_count != 4) {
830 		dev_err(dev, "Invalid number of cells in 'phy' property\n");
831 		return ERR_PTR(-EINVAL);
832 	}
833 
834 	/*
835 	 * Get the PHY parameters from the OF arguments and derive the lane
836 	 * type.
837 	 */
838 	phy_lane = args->args[0];
839 	if (phy_lane >= ARRAY_SIZE(gtr_dev->phys)) {
840 		dev_err(dev, "Invalid lane number %u\n", phy_lane);
841 		return ERR_PTR(-ENODEV);
842 	}
843 
844 	gtr_phy = &gtr_dev->phys[phy_lane];
845 	phy_type = args->args[1];
846 	phy_instance = args->args[2];
847 
848 	guard(mutex)(&gtr_phy->phy->mutex);
849 	ret = xpsgtr_set_lane_type(gtr_phy, phy_type, phy_instance);
850 	if (ret < 0) {
851 		dev_err(gtr_dev->dev, "Invalid PHY type and/or instance\n");
852 		return ERR_PTR(ret);
853 	}
854 
855 	refclk = args->args[3];
856 	if (refclk >= ARRAY_SIZE(gtr_dev->clk)) {
857 		dev_err(dev, "Invalid reference clock number %u\n", refclk);
858 		return ERR_PTR(-EINVAL);
859 	}
860 
861 	gtr_phy->refclk = refclk;
862 
863 	/*
864 	 * Ensure that the Interconnect Matrix is obeyed, i.e a given lane type
865 	 * is allowed to operate on the lane.
866 	 */
867 	for (i = 0; i < CONTROLLERS_PER_LANE; i++) {
868 		if (icm_matrix[phy_lane][i] == gtr_phy->instance)
869 			return gtr_phy->phy;
870 	}
871 
872 	return ERR_PTR(-EINVAL);
873 }
874 
875 /*
876  * DebugFS
877  */
878 
xpsgtr_status_read(struct seq_file * seq,void * data)879 static int xpsgtr_status_read(struct seq_file *seq, void *data)
880 {
881 	struct device *dev = seq->private;
882 	struct xpsgtr_phy *gtr_phy = dev_get_drvdata(dev);
883 	struct clk *clk;
884 	u32 pll_status;
885 
886 	mutex_lock(&gtr_phy->phy->mutex);
887 	pll_status = xpsgtr_read_phy(gtr_phy, L0_PLL_STATUS_READ_1);
888 	clk = gtr_phy->dev->clk[gtr_phy->refclk];
889 
890 	seq_printf(seq, "Lane:            %u\n", gtr_phy->lane);
891 	seq_printf(seq, "Protocol:        %s\n",
892 		   xpsgtr_icm_str[gtr_phy->protocol]);
893 	seq_printf(seq, "Instance:        %u\n", gtr_phy->instance);
894 	seq_printf(seq, "Reference clock: %u (%pC)\n", gtr_phy->refclk, clk);
895 	seq_printf(seq, "Reference rate:  %lu\n", clk_get_rate(clk));
896 	seq_printf(seq, "PLL locked:      %s\n",
897 		   pll_status & PLL_STATUS_LOCKED ? "yes" : "no");
898 
899 	mutex_unlock(&gtr_phy->phy->mutex);
900 	return 0;
901 }
902 
903 /*
904  * Power Management
905  */
906 
xpsgtr_runtime_suspend(struct device * dev)907 static int xpsgtr_runtime_suspend(struct device *dev)
908 {
909 	struct xpsgtr_dev *gtr_dev = dev_get_drvdata(dev);
910 
911 	/* Save the snapshot ICM_CFG registers. */
912 	gtr_dev->saved_icm_cfg0 = xpsgtr_read(gtr_dev, ICM_CFG0);
913 	gtr_dev->saved_icm_cfg1 = xpsgtr_read(gtr_dev, ICM_CFG1);
914 
915 	xpsgtr_save_lane_regs(gtr_dev);
916 
917 	return 0;
918 }
919 
xpsgtr_runtime_resume(struct device * dev)920 static int xpsgtr_runtime_resume(struct device *dev)
921 {
922 	struct xpsgtr_dev *gtr_dev = dev_get_drvdata(dev);
923 	unsigned int icm_cfg0, icm_cfg1;
924 	unsigned int i;
925 	bool skip_phy_init;
926 
927 	xpsgtr_restore_lane_regs(gtr_dev);
928 
929 	icm_cfg0 = xpsgtr_read(gtr_dev, ICM_CFG0);
930 	icm_cfg1 = xpsgtr_read(gtr_dev, ICM_CFG1);
931 
932 	/* Return if no GT lanes got configured before suspend. */
933 	if (!gtr_dev->saved_icm_cfg0 && !gtr_dev->saved_icm_cfg1)
934 		return 0;
935 
936 	/* Check if the ICM configurations changed after suspend. */
937 	if (icm_cfg0 == gtr_dev->saved_icm_cfg0 &&
938 	    icm_cfg1 == gtr_dev->saved_icm_cfg1)
939 		skip_phy_init = true;
940 	else
941 		skip_phy_init = false;
942 
943 	/* Update the skip_phy_init for all gtr_phy instances. */
944 	for (i = 0; i < ARRAY_SIZE(gtr_dev->phys); i++)
945 		gtr_dev->phys[i].skip_phy_init = skip_phy_init;
946 
947 	return 0;
948 }
949 
950 static DEFINE_RUNTIME_DEV_PM_OPS(xpsgtr_pm_ops, xpsgtr_runtime_suspend,
951 				 xpsgtr_runtime_resume, NULL);
952 /*
953  * Probe & Platform Driver
954  */
955 
xpsgtr_get_ref_clocks(struct xpsgtr_dev * gtr_dev)956 static int xpsgtr_get_ref_clocks(struct xpsgtr_dev *gtr_dev)
957 {
958 	unsigned int refclk;
959 
960 	for (refclk = 0; refclk < ARRAY_SIZE(gtr_dev->clk); ++refclk) {
961 		struct clk *clk;
962 		char name[8];
963 
964 		snprintf(name, sizeof(name), "ref%u", refclk);
965 		clk = devm_clk_get_optional(gtr_dev->dev, name);
966 		if (IS_ERR(clk)) {
967 			return dev_err_probe(gtr_dev->dev, PTR_ERR(clk),
968 					     "Failed to get ref clock %u\n",
969 					     refclk);
970 		}
971 
972 		if (!clk)
973 			continue;
974 
975 		gtr_dev->clk[refclk] = clk;
976 	}
977 
978 	return 0;
979 }
980 
xpsgtr_probe(struct platform_device * pdev)981 static int xpsgtr_probe(struct platform_device *pdev)
982 {
983 	struct device_node *np = pdev->dev.of_node;
984 	struct xpsgtr_dev *gtr_dev;
985 	struct phy_provider *provider;
986 	unsigned int port;
987 	int ret;
988 
989 	gtr_dev = devm_kzalloc(&pdev->dev, sizeof(*gtr_dev), GFP_KERNEL);
990 	if (!gtr_dev)
991 		return -ENOMEM;
992 
993 	gtr_dev->dev = &pdev->dev;
994 	platform_set_drvdata(pdev, gtr_dev);
995 
996 	mutex_init(&gtr_dev->gtr_mutex);
997 
998 	if (of_device_is_compatible(np, "xlnx,zynqmp-psgtr"))
999 		gtr_dev->tx_term_fix =
1000 			of_property_read_bool(np, "xlnx,tx-termination-fix");
1001 
1002 	/* Acquire resources. */
1003 	gtr_dev->serdes = devm_platform_ioremap_resource_byname(pdev, "serdes");
1004 	if (IS_ERR(gtr_dev->serdes))
1005 		return PTR_ERR(gtr_dev->serdes);
1006 
1007 	gtr_dev->siou = devm_platform_ioremap_resource_byname(pdev, "siou");
1008 	if (IS_ERR(gtr_dev->siou))
1009 		return PTR_ERR(gtr_dev->siou);
1010 
1011 	ret = xpsgtr_get_ref_clocks(gtr_dev);
1012 	if (ret)
1013 		return ret;
1014 
1015 	/* Create PHYs. */
1016 	for (port = 0; port < ARRAY_SIZE(gtr_dev->phys); ++port) {
1017 		struct xpsgtr_phy *gtr_phy = &gtr_dev->phys[port];
1018 		struct phy *phy;
1019 
1020 		gtr_phy->lane = port;
1021 		gtr_phy->dev = gtr_dev;
1022 
1023 		phy = devm_phy_create(&pdev->dev, np, &xpsgtr_phyops);
1024 		if (IS_ERR(phy)) {
1025 			dev_err(&pdev->dev, "failed to create PHY\n");
1026 			return PTR_ERR(phy);
1027 		}
1028 
1029 		gtr_phy->phy = phy;
1030 		phy_set_drvdata(phy, gtr_phy);
1031 		debugfs_create_devm_seqfile(&phy->dev, "status", phy->debugfs,
1032 					    xpsgtr_status_read);
1033 	}
1034 
1035 	/* Register the PHY provider. */
1036 	provider = devm_of_phy_provider_register(&pdev->dev, xpsgtr_xlate);
1037 	if (IS_ERR(provider)) {
1038 		dev_err(&pdev->dev, "registering provider failed\n");
1039 		return PTR_ERR(provider);
1040 	}
1041 
1042 	pm_runtime_set_active(gtr_dev->dev);
1043 	pm_runtime_enable(gtr_dev->dev);
1044 
1045 	ret = pm_runtime_resume_and_get(gtr_dev->dev);
1046 	if (ret < 0) {
1047 		pm_runtime_disable(gtr_dev->dev);
1048 		return ret;
1049 	}
1050 
1051 	gtr_dev->saved_regs = devm_kmalloc(gtr_dev->dev,
1052 					   sizeof(save_reg_address),
1053 					   GFP_KERNEL);
1054 	if (!gtr_dev->saved_regs)
1055 		return -ENOMEM;
1056 
1057 	return 0;
1058 }
1059 
xpsgtr_remove(struct platform_device * pdev)1060 static void xpsgtr_remove(struct platform_device *pdev)
1061 {
1062 	struct xpsgtr_dev *gtr_dev = platform_get_drvdata(pdev);
1063 
1064 	pm_runtime_disable(gtr_dev->dev);
1065 	pm_runtime_put_noidle(gtr_dev->dev);
1066 	pm_runtime_set_suspended(gtr_dev->dev);
1067 }
1068 
1069 static const struct of_device_id xpsgtr_of_match[] = {
1070 	{ .compatible = "xlnx,zynqmp-psgtr", },
1071 	{ .compatible = "xlnx,zynqmp-psgtr-v1.1", },
1072 	{},
1073 };
1074 MODULE_DEVICE_TABLE(of, xpsgtr_of_match);
1075 
1076 static struct platform_driver xpsgtr_driver = {
1077 	.probe = xpsgtr_probe,
1078 	.remove = xpsgtr_remove,
1079 	.driver = {
1080 		.name = "xilinx-psgtr",
1081 		.of_match_table	= xpsgtr_of_match,
1082 		.pm =  pm_ptr(&xpsgtr_pm_ops),
1083 	},
1084 };
1085 
1086 module_platform_driver(xpsgtr_driver);
1087 
1088 MODULE_AUTHOR("Xilinx Inc.");
1089 MODULE_LICENSE("GPL v2");
1090 MODULE_DESCRIPTION("Xilinx ZynqMP High speed Gigabit Transceiver");
1091