xref: /linux/drivers/net/dsa/sja1105/sja1105_clocking.c (revision 0ea5c948cb64bab5bc7a5516774eb8536f05aa0d)
1 // SPDX-License-Identifier: BSD-3-Clause
2 /* Copyright 2016-2018 NXP
3  * Copyright (c) 2018-2019, Vladimir Oltean <olteanv@gmail.com>
4  */
5 #include <linux/packing.h>
6 #include "sja1105.h"
7 
8 #define SJA1105_SIZE_CGU_CMD	4
9 #define SJA1110_BASE_MCSS_CLK	SJA1110_CGU_ADDR(0x70)
10 #define SJA1110_BASE_TIMER_CLK	SJA1110_CGU_ADDR(0x74)
11 
12 /* Common structure for CFG_PAD_MIIx_RX and CFG_PAD_MIIx_TX */
13 struct sja1105_cfg_pad_mii {
14 	u64 d32_os;
15 	u64 d32_ih;
16 	u64 d32_ipud;
17 	u64 d10_ih;
18 	u64 d10_os;
19 	u64 d10_ipud;
20 	u64 ctrl_os;
21 	u64 ctrl_ih;
22 	u64 ctrl_ipud;
23 	u64 clk_os;
24 	u64 clk_ih;
25 	u64 clk_ipud;
26 };
27 
28 struct sja1105_cfg_pad_mii_id {
29 	u64 rxc_stable_ovr;
30 	u64 rxc_delay;
31 	u64 rxc_bypass;
32 	u64 rxc_pd;
33 	u64 txc_stable_ovr;
34 	u64 txc_delay;
35 	u64 txc_bypass;
36 	u64 txc_pd;
37 };
38 
39 /* UM10944 Table 82.
40  * IDIV_0_C to IDIV_4_C control registers
41  * (addr. 10000Bh to 10000Fh)
42  */
43 struct sja1105_cgu_idiv {
44 	u64 clksrc;
45 	u64 autoblock;
46 	u64 idiv;
47 	u64 pd;
48 };
49 
50 /* PLL_1_C control register
51  *
52  * SJA1105 E/T: UM10944 Table 81 (address 10000Ah)
53  * SJA1105 P/Q/R/S: UM11040 Table 116 (address 10000Ah)
54  */
55 struct sja1105_cgu_pll_ctrl {
56 	u64 pllclksrc;
57 	u64 msel;
58 	u64 autoblock;
59 	u64 psel;
60 	u64 direct;
61 	u64 fbsel;
62 	u64 bypass;
63 	u64 pd;
64 };
65 
66 struct sja1110_cgu_outclk {
67 	u64 clksrc;
68 	u64 autoblock;
69 	u64 pd;
70 };
71 
72 enum {
73 	CLKSRC_MII0_TX_CLK	= 0x00,
74 	CLKSRC_MII0_RX_CLK	= 0x01,
75 	CLKSRC_MII1_TX_CLK	= 0x02,
76 	CLKSRC_MII1_RX_CLK	= 0x03,
77 	CLKSRC_MII2_TX_CLK	= 0x04,
78 	CLKSRC_MII2_RX_CLK	= 0x05,
79 	CLKSRC_MII3_TX_CLK	= 0x06,
80 	CLKSRC_MII3_RX_CLK	= 0x07,
81 	CLKSRC_MII4_TX_CLK	= 0x08,
82 	CLKSRC_MII4_RX_CLK	= 0x09,
83 	CLKSRC_PLL0		= 0x0B,
84 	CLKSRC_PLL1		= 0x0E,
85 	CLKSRC_IDIV0		= 0x11,
86 	CLKSRC_IDIV1		= 0x12,
87 	CLKSRC_IDIV2		= 0x13,
88 	CLKSRC_IDIV3		= 0x14,
89 	CLKSRC_IDIV4		= 0x15,
90 };
91 
92 /* UM10944 Table 83.
93  * MIIx clock control registers 1 to 30
94  * (addresses 100013h to 100035h)
95  */
96 struct sja1105_cgu_mii_ctrl {
97 	u64 clksrc;
98 	u64 autoblock;
99 	u64 pd;
100 };
101 
sja1105_cgu_idiv_packing(void * buf,struct sja1105_cgu_idiv * idiv,enum packing_op op)102 static void sja1105_cgu_idiv_packing(void *buf, struct sja1105_cgu_idiv *idiv,
103 				     enum packing_op op)
104 {
105 	const int size = 4;
106 
107 	sja1105_packing(buf, &idiv->clksrc,    28, 24, size, op);
108 	sja1105_packing(buf, &idiv->autoblock, 11, 11, size, op);
109 	sja1105_packing(buf, &idiv->idiv,       5,  2, size, op);
110 	sja1105_packing(buf, &idiv->pd,         0,  0, size, op);
111 }
112 
sja1105_cgu_idiv_config(struct sja1105_private * priv,int port,bool enabled,int factor)113 static int sja1105_cgu_idiv_config(struct sja1105_private *priv, int port,
114 				   bool enabled, int factor)
115 {
116 	const struct sja1105_regs *regs = priv->info->regs;
117 	struct device *dev = priv->ds->dev;
118 	struct sja1105_cgu_idiv idiv;
119 	u8 packed_buf[SJA1105_SIZE_CGU_CMD] = {0};
120 
121 	if (regs->cgu_idiv[port] == SJA1105_RSV_ADDR)
122 		return 0;
123 
124 	if (enabled && factor != 1 && factor != 10) {
125 		dev_err(dev, "idiv factor must be 1 or 10\n");
126 		return -ERANGE;
127 	}
128 
129 	/* Payload for packed_buf */
130 	idiv.clksrc    = 0x0A;            /* 25MHz */
131 	idiv.autoblock = 1;               /* Block clk automatically */
132 	idiv.idiv      = factor - 1;      /* Divide by 1 or 10 */
133 	idiv.pd        = enabled ? 0 : 1; /* Power down? */
134 	sja1105_cgu_idiv_packing(packed_buf, &idiv, PACK);
135 
136 	return sja1105_xfer_buf(priv, SPI_WRITE, regs->cgu_idiv[port],
137 				packed_buf, SJA1105_SIZE_CGU_CMD);
138 }
139 
140 static void
sja1105_cgu_mii_control_packing(void * buf,struct sja1105_cgu_mii_ctrl * cmd,enum packing_op op)141 sja1105_cgu_mii_control_packing(void *buf, struct sja1105_cgu_mii_ctrl *cmd,
142 				enum packing_op op)
143 {
144 	const int size = 4;
145 
146 	sja1105_packing(buf, &cmd->clksrc,    28, 24, size, op);
147 	sja1105_packing(buf, &cmd->autoblock, 11, 11, size, op);
148 	sja1105_packing(buf, &cmd->pd,         0,  0, size, op);
149 }
150 
sja1105_cgu_mii_tx_clk_config(struct sja1105_private * priv,int port,sja1105_mii_role_t role)151 static int sja1105_cgu_mii_tx_clk_config(struct sja1105_private *priv,
152 					 int port, sja1105_mii_role_t role)
153 {
154 	const struct sja1105_regs *regs = priv->info->regs;
155 	struct sja1105_cgu_mii_ctrl mii_tx_clk;
156 	static const int mac_clk_sources[] = {
157 		CLKSRC_MII0_TX_CLK,
158 		CLKSRC_MII1_TX_CLK,
159 		CLKSRC_MII2_TX_CLK,
160 		CLKSRC_MII3_TX_CLK,
161 		CLKSRC_MII4_TX_CLK,
162 	};
163 	static const int phy_clk_sources[] = {
164 		CLKSRC_IDIV0,
165 		CLKSRC_IDIV1,
166 		CLKSRC_IDIV2,
167 		CLKSRC_IDIV3,
168 		CLKSRC_IDIV4,
169 	};
170 	u8 packed_buf[SJA1105_SIZE_CGU_CMD] = {0};
171 	int clksrc;
172 
173 	if (regs->mii_tx_clk[port] == SJA1105_RSV_ADDR)
174 		return 0;
175 
176 	if (role == XMII_MAC)
177 		clksrc = mac_clk_sources[port];
178 	else
179 		clksrc = phy_clk_sources[port];
180 
181 	/* Payload for packed_buf */
182 	mii_tx_clk.clksrc    = clksrc;
183 	mii_tx_clk.autoblock = 1;  /* Autoblock clk while changing clksrc */
184 	mii_tx_clk.pd        = 0;  /* Power Down off => enabled */
185 	sja1105_cgu_mii_control_packing(packed_buf, &mii_tx_clk, PACK);
186 
187 	return sja1105_xfer_buf(priv, SPI_WRITE, regs->mii_tx_clk[port],
188 				packed_buf, SJA1105_SIZE_CGU_CMD);
189 }
190 
191 static int
sja1105_cgu_mii_rx_clk_config(struct sja1105_private * priv,int port)192 sja1105_cgu_mii_rx_clk_config(struct sja1105_private *priv, int port)
193 {
194 	const struct sja1105_regs *regs = priv->info->regs;
195 	struct sja1105_cgu_mii_ctrl mii_rx_clk;
196 	u8 packed_buf[SJA1105_SIZE_CGU_CMD] = {0};
197 	static const int clk_sources[] = {
198 		CLKSRC_MII0_RX_CLK,
199 		CLKSRC_MII1_RX_CLK,
200 		CLKSRC_MII2_RX_CLK,
201 		CLKSRC_MII3_RX_CLK,
202 		CLKSRC_MII4_RX_CLK,
203 	};
204 
205 	if (regs->mii_rx_clk[port] == SJA1105_RSV_ADDR)
206 		return 0;
207 
208 	/* Payload for packed_buf */
209 	mii_rx_clk.clksrc    = clk_sources[port];
210 	mii_rx_clk.autoblock = 1;  /* Autoblock clk while changing clksrc */
211 	mii_rx_clk.pd        = 0;  /* Power Down off => enabled */
212 	sja1105_cgu_mii_control_packing(packed_buf, &mii_rx_clk, PACK);
213 
214 	return sja1105_xfer_buf(priv, SPI_WRITE, regs->mii_rx_clk[port],
215 				packed_buf, SJA1105_SIZE_CGU_CMD);
216 }
217 
218 static int
sja1105_cgu_mii_ext_tx_clk_config(struct sja1105_private * priv,int port)219 sja1105_cgu_mii_ext_tx_clk_config(struct sja1105_private *priv, int port)
220 {
221 	const struct sja1105_regs *regs = priv->info->regs;
222 	struct sja1105_cgu_mii_ctrl mii_ext_tx_clk;
223 	u8 packed_buf[SJA1105_SIZE_CGU_CMD] = {0};
224 	static const int clk_sources[] = {
225 		CLKSRC_IDIV0,
226 		CLKSRC_IDIV1,
227 		CLKSRC_IDIV2,
228 		CLKSRC_IDIV3,
229 		CLKSRC_IDIV4,
230 	};
231 
232 	if (regs->mii_ext_tx_clk[port] == SJA1105_RSV_ADDR)
233 		return 0;
234 
235 	/* Payload for packed_buf */
236 	mii_ext_tx_clk.clksrc    = clk_sources[port];
237 	mii_ext_tx_clk.autoblock = 1; /* Autoblock clk while changing clksrc */
238 	mii_ext_tx_clk.pd        = 0; /* Power Down off => enabled */
239 	sja1105_cgu_mii_control_packing(packed_buf, &mii_ext_tx_clk, PACK);
240 
241 	return sja1105_xfer_buf(priv, SPI_WRITE, regs->mii_ext_tx_clk[port],
242 				packed_buf, SJA1105_SIZE_CGU_CMD);
243 }
244 
245 static int
sja1105_cgu_mii_ext_rx_clk_config(struct sja1105_private * priv,int port)246 sja1105_cgu_mii_ext_rx_clk_config(struct sja1105_private *priv, int port)
247 {
248 	const struct sja1105_regs *regs = priv->info->regs;
249 	struct sja1105_cgu_mii_ctrl mii_ext_rx_clk;
250 	u8 packed_buf[SJA1105_SIZE_CGU_CMD] = {0};
251 	static const int clk_sources[] = {
252 		CLKSRC_IDIV0,
253 		CLKSRC_IDIV1,
254 		CLKSRC_IDIV2,
255 		CLKSRC_IDIV3,
256 		CLKSRC_IDIV4,
257 	};
258 
259 	if (regs->mii_ext_rx_clk[port] == SJA1105_RSV_ADDR)
260 		return 0;
261 
262 	/* Payload for packed_buf */
263 	mii_ext_rx_clk.clksrc    = clk_sources[port];
264 	mii_ext_rx_clk.autoblock = 1; /* Autoblock clk while changing clksrc */
265 	mii_ext_rx_clk.pd        = 0; /* Power Down off => enabled */
266 	sja1105_cgu_mii_control_packing(packed_buf, &mii_ext_rx_clk, PACK);
267 
268 	return sja1105_xfer_buf(priv, SPI_WRITE, regs->mii_ext_rx_clk[port],
269 				packed_buf, SJA1105_SIZE_CGU_CMD);
270 }
271 
sja1105_mii_clocking_setup(struct sja1105_private * priv,int port,sja1105_mii_role_t role)272 static int sja1105_mii_clocking_setup(struct sja1105_private *priv, int port,
273 				      sja1105_mii_role_t role)
274 {
275 	struct device *dev = priv->ds->dev;
276 	int rc;
277 
278 	dev_dbg(dev, "Configuring MII-%s clocking\n",
279 		(role == XMII_MAC) ? "MAC" : "PHY");
280 	/* If role is MAC, disable IDIV
281 	 * If role is PHY, enable IDIV and configure for 1/1 divider
282 	 */
283 	rc = sja1105_cgu_idiv_config(priv, port, (role == XMII_PHY), 1);
284 	if (rc < 0)
285 		return rc;
286 
287 	/* Configure CLKSRC of MII_TX_CLK_n
288 	 *   * If role is MAC, select TX_CLK_n
289 	 *   * If role is PHY, select IDIV_n
290 	 */
291 	rc = sja1105_cgu_mii_tx_clk_config(priv, port, role);
292 	if (rc < 0)
293 		return rc;
294 
295 	/* Configure CLKSRC of MII_RX_CLK_n
296 	 * Select RX_CLK_n
297 	 */
298 	rc = sja1105_cgu_mii_rx_clk_config(priv, port);
299 	if (rc < 0)
300 		return rc;
301 
302 	if (role == XMII_PHY) {
303 		/* Per MII spec, the PHY (which is us) drives the TX_CLK pin */
304 
305 		/* Configure CLKSRC of EXT_TX_CLK_n
306 		 * Select IDIV_n
307 		 */
308 		rc = sja1105_cgu_mii_ext_tx_clk_config(priv, port);
309 		if (rc < 0)
310 			return rc;
311 
312 		/* Configure CLKSRC of EXT_RX_CLK_n
313 		 * Select IDIV_n
314 		 */
315 		rc = sja1105_cgu_mii_ext_rx_clk_config(priv, port);
316 		if (rc < 0)
317 			return rc;
318 	}
319 	return 0;
320 }
321 
322 static void
sja1105_cgu_pll_control_packing(void * buf,struct sja1105_cgu_pll_ctrl * cmd,enum packing_op op)323 sja1105_cgu_pll_control_packing(void *buf, struct sja1105_cgu_pll_ctrl *cmd,
324 				enum packing_op op)
325 {
326 	const int size = 4;
327 
328 	sja1105_packing(buf, &cmd->pllclksrc, 28, 24, size, op);
329 	sja1105_packing(buf, &cmd->msel,      23, 16, size, op);
330 	sja1105_packing(buf, &cmd->autoblock, 11, 11, size, op);
331 	sja1105_packing(buf, &cmd->psel,       9,  8, size, op);
332 	sja1105_packing(buf, &cmd->direct,     7,  7, size, op);
333 	sja1105_packing(buf, &cmd->fbsel,      6,  6, size, op);
334 	sja1105_packing(buf, &cmd->bypass,     1,  1, size, op);
335 	sja1105_packing(buf, &cmd->pd,         0,  0, size, op);
336 }
337 
sja1105_cgu_rgmii_tx_clk_config(struct sja1105_private * priv,int port,u64 speed)338 static int sja1105_cgu_rgmii_tx_clk_config(struct sja1105_private *priv,
339 					   int port, u64 speed)
340 {
341 	const struct sja1105_regs *regs = priv->info->regs;
342 	struct sja1105_cgu_mii_ctrl txc;
343 	u8 packed_buf[SJA1105_SIZE_CGU_CMD] = {0};
344 	int clksrc;
345 
346 	if (regs->rgmii_tx_clk[port] == SJA1105_RSV_ADDR)
347 		return 0;
348 
349 	if (speed == priv->info->port_speed[SJA1105_SPEED_1000MBPS]) {
350 		clksrc = CLKSRC_PLL0;
351 	} else {
352 		static const int clk_sources[] = {
353 			CLKSRC_IDIV0,
354 			CLKSRC_IDIV1,
355 			CLKSRC_IDIV2,
356 			CLKSRC_IDIV3,
357 			CLKSRC_IDIV4,
358 		};
359 		clksrc = clk_sources[port];
360 	}
361 
362 	/* RGMII: 125MHz for 1000, 25MHz for 100, 2.5MHz for 10 */
363 	txc.clksrc = clksrc;
364 	/* Autoblock clk while changing clksrc */
365 	txc.autoblock = 1;
366 	/* Power Down off => enabled */
367 	txc.pd = 0;
368 	sja1105_cgu_mii_control_packing(packed_buf, &txc, PACK);
369 
370 	return sja1105_xfer_buf(priv, SPI_WRITE, regs->rgmii_tx_clk[port],
371 				packed_buf, SJA1105_SIZE_CGU_CMD);
372 }
373 
374 /* AGU */
375 static void
sja1105_cfg_pad_mii_packing(void * buf,struct sja1105_cfg_pad_mii * cmd,enum packing_op op)376 sja1105_cfg_pad_mii_packing(void *buf, struct sja1105_cfg_pad_mii *cmd,
377 			    enum packing_op op)
378 {
379 	const int size = 4;
380 
381 	sja1105_packing(buf, &cmd->d32_os,   28, 27, size, op);
382 	sja1105_packing(buf, &cmd->d32_ih,   26, 26, size, op);
383 	sja1105_packing(buf, &cmd->d32_ipud, 25, 24, size, op);
384 	sja1105_packing(buf, &cmd->d10_os,   20, 19, size, op);
385 	sja1105_packing(buf, &cmd->d10_ih,   18, 18, size, op);
386 	sja1105_packing(buf, &cmd->d10_ipud, 17, 16, size, op);
387 	sja1105_packing(buf, &cmd->ctrl_os,  12, 11, size, op);
388 	sja1105_packing(buf, &cmd->ctrl_ih,  10, 10, size, op);
389 	sja1105_packing(buf, &cmd->ctrl_ipud, 9,  8, size, op);
390 	sja1105_packing(buf, &cmd->clk_os,    4,  3, size, op);
391 	sja1105_packing(buf, &cmd->clk_ih,    2,  2, size, op);
392 	sja1105_packing(buf, &cmd->clk_ipud,  1,  0, size, op);
393 }
394 
sja1105_rgmii_cfg_pad_tx_config(struct sja1105_private * priv,int port)395 static int sja1105_rgmii_cfg_pad_tx_config(struct sja1105_private *priv,
396 					   int port)
397 {
398 	const struct sja1105_regs *regs = priv->info->regs;
399 	struct sja1105_cfg_pad_mii pad_mii_tx = {0};
400 	u8 packed_buf[SJA1105_SIZE_CGU_CMD] = {0};
401 
402 	if (regs->pad_mii_tx[port] == SJA1105_RSV_ADDR)
403 		return 0;
404 
405 	/* Payload */
406 	pad_mii_tx.d32_os    = 3; /* TXD[3:2] output stage: */
407 				  /*          high noise/high speed */
408 	pad_mii_tx.d10_os    = 3; /* TXD[1:0] output stage: */
409 				  /*          high noise/high speed */
410 	pad_mii_tx.d32_ipud  = 2; /* TXD[3:2] input stage: */
411 				  /*          plain input (default) */
412 	pad_mii_tx.d10_ipud  = 2; /* TXD[1:0] input stage: */
413 				  /*          plain input (default) */
414 	pad_mii_tx.ctrl_os   = 3; /* TX_CTL / TX_ER output stage */
415 	pad_mii_tx.ctrl_ipud = 2; /* TX_CTL / TX_ER input stage (default) */
416 	pad_mii_tx.clk_os    = 3; /* TX_CLK output stage */
417 	pad_mii_tx.clk_ih    = 0; /* TX_CLK input hysteresis (default) */
418 	pad_mii_tx.clk_ipud  = 2; /* TX_CLK input stage (default) */
419 	sja1105_cfg_pad_mii_packing(packed_buf, &pad_mii_tx, PACK);
420 
421 	return sja1105_xfer_buf(priv, SPI_WRITE, regs->pad_mii_tx[port],
422 				packed_buf, SJA1105_SIZE_CGU_CMD);
423 }
424 
sja1105_cfg_pad_rx_config(struct sja1105_private * priv,int port)425 static int sja1105_cfg_pad_rx_config(struct sja1105_private *priv, int port)
426 {
427 	const struct sja1105_regs *regs = priv->info->regs;
428 	struct sja1105_cfg_pad_mii pad_mii_rx = {0};
429 	u8 packed_buf[SJA1105_SIZE_CGU_CMD] = {0};
430 
431 	if (regs->pad_mii_rx[port] == SJA1105_RSV_ADDR)
432 		return 0;
433 
434 	/* Payload */
435 	pad_mii_rx.d32_ih    = 0; /* RXD[3:2] input stage hysteresis: */
436 				  /*          non-Schmitt (default) */
437 	pad_mii_rx.d32_ipud  = 2; /* RXD[3:2] input weak pull-up/down */
438 				  /*          plain input (default) */
439 	pad_mii_rx.d10_ih    = 0; /* RXD[1:0] input stage hysteresis: */
440 				  /*          non-Schmitt (default) */
441 	pad_mii_rx.d10_ipud  = 2; /* RXD[1:0] input weak pull-up/down */
442 				  /*          plain input (default) */
443 	pad_mii_rx.ctrl_ih   = 0; /* RX_DV/CRS_DV/RX_CTL and RX_ER */
444 				  /* input stage hysteresis: */
445 				  /* non-Schmitt (default) */
446 	pad_mii_rx.ctrl_ipud = 3; /* RX_DV/CRS_DV/RX_CTL and RX_ER */
447 				  /* input stage weak pull-up/down: */
448 				  /* pull-down */
449 	pad_mii_rx.clk_os    = 2; /* RX_CLK/RXC output stage: */
450 				  /* medium noise/fast speed (default) */
451 	pad_mii_rx.clk_ih    = 0; /* RX_CLK/RXC input hysteresis: */
452 				  /* non-Schmitt (default) */
453 	pad_mii_rx.clk_ipud  = 2; /* RX_CLK/RXC input pull-up/down: */
454 				  /* plain input (default) */
455 	sja1105_cfg_pad_mii_packing(packed_buf, &pad_mii_rx, PACK);
456 
457 	return sja1105_xfer_buf(priv, SPI_WRITE, regs->pad_mii_rx[port],
458 				packed_buf, SJA1105_SIZE_CGU_CMD);
459 }
460 
461 static void
sja1105_cfg_pad_mii_id_packing(void * buf,struct sja1105_cfg_pad_mii_id * cmd,enum packing_op op)462 sja1105_cfg_pad_mii_id_packing(void *buf, struct sja1105_cfg_pad_mii_id *cmd,
463 			       enum packing_op op)
464 {
465 	const int size = SJA1105_SIZE_CGU_CMD;
466 
467 	sja1105_packing(buf, &cmd->rxc_stable_ovr, 15, 15, size, op);
468 	sja1105_packing(buf, &cmd->rxc_delay,      14, 10, size, op);
469 	sja1105_packing(buf, &cmd->rxc_bypass,      9,  9, size, op);
470 	sja1105_packing(buf, &cmd->rxc_pd,          8,  8, size, op);
471 	sja1105_packing(buf, &cmd->txc_stable_ovr,  7,  7, size, op);
472 	sja1105_packing(buf, &cmd->txc_delay,       6,  2, size, op);
473 	sja1105_packing(buf, &cmd->txc_bypass,      1,  1, size, op);
474 	sja1105_packing(buf, &cmd->txc_pd,          0,  0, size, op);
475 }
476 
477 static void
sja1110_cfg_pad_mii_id_packing(void * buf,struct sja1105_cfg_pad_mii_id * cmd,enum packing_op op)478 sja1110_cfg_pad_mii_id_packing(void *buf, struct sja1105_cfg_pad_mii_id *cmd,
479 			       enum packing_op op)
480 {
481 	const int size = SJA1105_SIZE_CGU_CMD;
482 	u64 range = 4;
483 
484 	/* Fields RXC_RANGE and TXC_RANGE select the input frequency range:
485 	 * 0 = 2.5MHz
486 	 * 1 = 25MHz
487 	 * 2 = 50MHz
488 	 * 3 = 125MHz
489 	 * 4 = Automatically determined by port speed.
490 	 * There's no point in defining a structure different than the one for
491 	 * SJA1105, so just hardcode the frequency range to automatic, just as
492 	 * before.
493 	 */
494 	sja1105_packing(buf, &cmd->rxc_stable_ovr, 26, 26, size, op);
495 	sja1105_packing(buf, &cmd->rxc_delay,      25, 21, size, op);
496 	sja1105_packing(buf, &range,               20, 18, size, op);
497 	sja1105_packing(buf, &cmd->rxc_bypass,     17, 17, size, op);
498 	sja1105_packing(buf, &cmd->rxc_pd,         16, 16, size, op);
499 	sja1105_packing(buf, &cmd->txc_stable_ovr, 10, 10, size, op);
500 	sja1105_packing(buf, &cmd->txc_delay,       9,  5, size, op);
501 	sja1105_packing(buf, &range,                4,  2, size, op);
502 	sja1105_packing(buf, &cmd->txc_bypass,      1,  1, size, op);
503 	sja1105_packing(buf, &cmd->txc_pd,          0,  0, size, op);
504 }
505 
506 /* The RGMII delay setup procedure is 2-step and gets called upon each
507  * .phylink_mac_config. Both are strategic.
508  * The reason is that the RX Tunable Delay Line of the SJA1105 MAC has issues
509  * with recovering from a frequency change of the link partner's RGMII clock.
510  * The easiest way to recover from this is to temporarily power down the TDL,
511  * as it will re-lock at the new frequency afterwards.
512  */
sja1105pqrs_setup_rgmii_delay(const void * ctx,int port)513 int sja1105pqrs_setup_rgmii_delay(const void *ctx, int port)
514 {
515 	const struct sja1105_private *priv = ctx;
516 	const struct sja1105_regs *regs = priv->info->regs;
517 	struct sja1105_cfg_pad_mii_id pad_mii_id = {0};
518 	int rx_delay = priv->rgmii_rx_delay_ps[port];
519 	int tx_delay = priv->rgmii_tx_delay_ps[port];
520 	u8 packed_buf[SJA1105_SIZE_CGU_CMD] = {0};
521 	int rc;
522 
523 	if (rx_delay)
524 		pad_mii_id.rxc_delay = SJA1105_RGMII_DELAY_PS_TO_HW(rx_delay);
525 	if (tx_delay)
526 		pad_mii_id.txc_delay = SJA1105_RGMII_DELAY_PS_TO_HW(tx_delay);
527 
528 	/* Stage 1: Turn the RGMII delay lines off. */
529 	pad_mii_id.rxc_bypass = 1;
530 	pad_mii_id.rxc_pd = 1;
531 	pad_mii_id.txc_bypass = 1;
532 	pad_mii_id.txc_pd = 1;
533 	sja1105_cfg_pad_mii_id_packing(packed_buf, &pad_mii_id, PACK);
534 
535 	rc = sja1105_xfer_buf(priv, SPI_WRITE, regs->pad_mii_id[port],
536 			      packed_buf, SJA1105_SIZE_CGU_CMD);
537 	if (rc < 0)
538 		return rc;
539 
540 	/* Stage 2: Turn the RGMII delay lines on. */
541 	if (rx_delay) {
542 		pad_mii_id.rxc_bypass = 0;
543 		pad_mii_id.rxc_pd = 0;
544 	}
545 	if (tx_delay) {
546 		pad_mii_id.txc_bypass = 0;
547 		pad_mii_id.txc_pd = 0;
548 	}
549 	sja1105_cfg_pad_mii_id_packing(packed_buf, &pad_mii_id, PACK);
550 
551 	return sja1105_xfer_buf(priv, SPI_WRITE, regs->pad_mii_id[port],
552 				packed_buf, SJA1105_SIZE_CGU_CMD);
553 }
554 
sja1110_setup_rgmii_delay(const void * ctx,int port)555 int sja1110_setup_rgmii_delay(const void *ctx, int port)
556 {
557 	const struct sja1105_private *priv = ctx;
558 	const struct sja1105_regs *regs = priv->info->regs;
559 	struct sja1105_cfg_pad_mii_id pad_mii_id = {0};
560 	int rx_delay = priv->rgmii_rx_delay_ps[port];
561 	int tx_delay = priv->rgmii_tx_delay_ps[port];
562 	u8 packed_buf[SJA1105_SIZE_CGU_CMD] = {0};
563 
564 	pad_mii_id.rxc_pd = 1;
565 	pad_mii_id.txc_pd = 1;
566 
567 	if (rx_delay) {
568 		pad_mii_id.rxc_delay = SJA1105_RGMII_DELAY_PS_TO_HW(rx_delay);
569 		/* The "BYPASS" bit in SJA1110 is actually a "don't bypass" */
570 		pad_mii_id.rxc_bypass = 1;
571 		pad_mii_id.rxc_pd = 0;
572 	}
573 
574 	if (tx_delay) {
575 		pad_mii_id.txc_delay = SJA1105_RGMII_DELAY_PS_TO_HW(tx_delay);
576 		pad_mii_id.txc_bypass = 1;
577 		pad_mii_id.txc_pd = 0;
578 	}
579 
580 	sja1110_cfg_pad_mii_id_packing(packed_buf, &pad_mii_id, PACK);
581 
582 	return sja1105_xfer_buf(priv, SPI_WRITE, regs->pad_mii_id[port],
583 				packed_buf, SJA1105_SIZE_CGU_CMD);
584 }
585 
sja1105_rgmii_clocking_setup(struct sja1105_private * priv,int port,sja1105_mii_role_t role)586 static int sja1105_rgmii_clocking_setup(struct sja1105_private *priv, int port,
587 					sja1105_mii_role_t role)
588 {
589 	struct device *dev = priv->ds->dev;
590 	struct sja1105_mac_config_entry *mac;
591 	u64 speed;
592 	int rc;
593 
594 	mac = priv->static_config.tables[BLK_IDX_MAC_CONFIG].entries;
595 	speed = mac[port].speed;
596 
597 	dev_dbg(dev, "Configuring port %d RGMII at speed %lldMbps\n",
598 		port, speed);
599 
600 	if (speed == priv->info->port_speed[SJA1105_SPEED_1000MBPS]) {
601 		/* 1000Mbps, IDIV disabled (125 MHz) */
602 		rc = sja1105_cgu_idiv_config(priv, port, false, 1);
603 	} else if (speed == priv->info->port_speed[SJA1105_SPEED_100MBPS]) {
604 		/* 100Mbps, IDIV enabled, divide by 1 (25 MHz) */
605 		rc = sja1105_cgu_idiv_config(priv, port, true, 1);
606 	} else if (speed == priv->info->port_speed[SJA1105_SPEED_10MBPS]) {
607 		/* 10Mbps, IDIV enabled, divide by 10 (2.5 MHz) */
608 		rc = sja1105_cgu_idiv_config(priv, port, true, 10);
609 	} else if (speed == priv->info->port_speed[SJA1105_SPEED_AUTO]) {
610 		/* Skip CGU configuration if there is no speed available
611 		 * (e.g. link is not established yet)
612 		 */
613 		dev_dbg(dev, "Speed not available, skipping CGU config\n");
614 		return 0;
615 	} else {
616 		rc = -EINVAL;
617 	}
618 
619 	if (rc < 0) {
620 		dev_err(dev, "Failed to configure idiv\n");
621 		return rc;
622 	}
623 	rc = sja1105_cgu_rgmii_tx_clk_config(priv, port, speed);
624 	if (rc < 0) {
625 		dev_err(dev, "Failed to configure RGMII Tx clock\n");
626 		return rc;
627 	}
628 	rc = sja1105_rgmii_cfg_pad_tx_config(priv, port);
629 	if (rc < 0) {
630 		dev_err(dev, "Failed to configure Tx pad registers\n");
631 		return rc;
632 	}
633 
634 	if (!priv->info->setup_rgmii_delay)
635 		return 0;
636 
637 	return priv->info->setup_rgmii_delay(priv, port);
638 }
639 
sja1105_cgu_rmii_ref_clk_config(struct sja1105_private * priv,int port)640 static int sja1105_cgu_rmii_ref_clk_config(struct sja1105_private *priv,
641 					   int port)
642 {
643 	const struct sja1105_regs *regs = priv->info->regs;
644 	struct sja1105_cgu_mii_ctrl ref_clk;
645 	u8 packed_buf[SJA1105_SIZE_CGU_CMD] = {0};
646 	static const int clk_sources[] = {
647 		CLKSRC_MII0_TX_CLK,
648 		CLKSRC_MII1_TX_CLK,
649 		CLKSRC_MII2_TX_CLK,
650 		CLKSRC_MII3_TX_CLK,
651 		CLKSRC_MII4_TX_CLK,
652 	};
653 
654 	if (regs->rmii_ref_clk[port] == SJA1105_RSV_ADDR)
655 		return 0;
656 
657 	/* Payload for packed_buf */
658 	ref_clk.clksrc    = clk_sources[port];
659 	ref_clk.autoblock = 1;      /* Autoblock clk while changing clksrc */
660 	ref_clk.pd        = 0;      /* Power Down off => enabled */
661 	sja1105_cgu_mii_control_packing(packed_buf, &ref_clk, PACK);
662 
663 	return sja1105_xfer_buf(priv, SPI_WRITE, regs->rmii_ref_clk[port],
664 				packed_buf, SJA1105_SIZE_CGU_CMD);
665 }
666 
667 static int
sja1105_cgu_rmii_ext_tx_clk_config(struct sja1105_private * priv,int port)668 sja1105_cgu_rmii_ext_tx_clk_config(struct sja1105_private *priv, int port)
669 {
670 	const struct sja1105_regs *regs = priv->info->regs;
671 	struct sja1105_cgu_mii_ctrl ext_tx_clk;
672 	u8 packed_buf[SJA1105_SIZE_CGU_CMD] = {0};
673 
674 	if (regs->rmii_ext_tx_clk[port] == SJA1105_RSV_ADDR)
675 		return 0;
676 
677 	/* Payload for packed_buf */
678 	ext_tx_clk.clksrc    = CLKSRC_PLL1;
679 	ext_tx_clk.autoblock = 1;   /* Autoblock clk while changing clksrc */
680 	ext_tx_clk.pd        = 0;   /* Power Down off => enabled */
681 	sja1105_cgu_mii_control_packing(packed_buf, &ext_tx_clk, PACK);
682 
683 	return sja1105_xfer_buf(priv, SPI_WRITE, regs->rmii_ext_tx_clk[port],
684 				packed_buf, SJA1105_SIZE_CGU_CMD);
685 }
686 
sja1105_cgu_rmii_pll_config(struct sja1105_private * priv)687 static int sja1105_cgu_rmii_pll_config(struct sja1105_private *priv)
688 {
689 	const struct sja1105_regs *regs = priv->info->regs;
690 	u8 packed_buf[SJA1105_SIZE_CGU_CMD] = {0};
691 	struct sja1105_cgu_pll_ctrl pll = {0};
692 	struct device *dev = priv->ds->dev;
693 	int rc;
694 
695 	if (regs->rmii_pll1 == SJA1105_RSV_ADDR)
696 		return 0;
697 
698 	/* PLL1 must be enabled and output 50 Mhz.
699 	 * This is done by writing first 0x0A010941 to
700 	 * the PLL_1_C register and then deasserting
701 	 * power down (PD) 0x0A010940.
702 	 */
703 
704 	/* Step 1: PLL1 setup for 50Mhz */
705 	pll.pllclksrc = 0xA;
706 	pll.msel      = 0x1;
707 	pll.autoblock = 0x1;
708 	pll.psel      = 0x1;
709 	pll.direct    = 0x0;
710 	pll.fbsel     = 0x1;
711 	pll.bypass    = 0x0;
712 	pll.pd        = 0x1;
713 
714 	sja1105_cgu_pll_control_packing(packed_buf, &pll, PACK);
715 	rc = sja1105_xfer_buf(priv, SPI_WRITE, regs->rmii_pll1, packed_buf,
716 			      SJA1105_SIZE_CGU_CMD);
717 	if (rc < 0) {
718 		dev_err(dev, "failed to configure PLL1 for 50MHz\n");
719 		return rc;
720 	}
721 
722 	/* Step 2: Enable PLL1 */
723 	pll.pd = 0x0;
724 
725 	sja1105_cgu_pll_control_packing(packed_buf, &pll, PACK);
726 	rc = sja1105_xfer_buf(priv, SPI_WRITE, regs->rmii_pll1, packed_buf,
727 			      SJA1105_SIZE_CGU_CMD);
728 	if (rc < 0) {
729 		dev_err(dev, "failed to enable PLL1\n");
730 		return rc;
731 	}
732 	return rc;
733 }
734 
sja1105_rmii_clocking_setup(struct sja1105_private * priv,int port,sja1105_mii_role_t role)735 static int sja1105_rmii_clocking_setup(struct sja1105_private *priv, int port,
736 				       sja1105_mii_role_t role)
737 {
738 	struct device *dev = priv->ds->dev;
739 	int rc;
740 
741 	dev_dbg(dev, "Configuring RMII-%s clocking\n",
742 		(role == XMII_MAC) ? "MAC" : "PHY");
743 	/* AH1601.pdf chapter 2.5.1. Sources */
744 	if (role == XMII_MAC) {
745 		/* Configure and enable PLL1 for 50Mhz output */
746 		rc = sja1105_cgu_rmii_pll_config(priv);
747 		if (rc < 0)
748 			return rc;
749 	}
750 	/* Disable IDIV for this port */
751 	rc = sja1105_cgu_idiv_config(priv, port, false, 1);
752 	if (rc < 0)
753 		return rc;
754 	/* Source to sink mappings */
755 	rc = sja1105_cgu_rmii_ref_clk_config(priv, port);
756 	if (rc < 0)
757 		return rc;
758 	if (role == XMII_MAC) {
759 		rc = sja1105_cgu_rmii_ext_tx_clk_config(priv, port);
760 		if (rc < 0)
761 			return rc;
762 	}
763 	return 0;
764 }
765 
sja1105_clocking_setup_port(struct sja1105_private * priv,int port)766 int sja1105_clocking_setup_port(struct sja1105_private *priv, int port)
767 {
768 	struct sja1105_xmii_params_entry *mii;
769 	struct device *dev = priv->ds->dev;
770 	sja1105_phy_interface_t phy_mode;
771 	sja1105_mii_role_t role;
772 	int rc;
773 
774 	mii = priv->static_config.tables[BLK_IDX_XMII_PARAMS].entries;
775 
776 	/* RGMII etc */
777 	phy_mode = mii->xmii_mode[port];
778 	/* MAC or PHY, for applicable types (not RGMII) */
779 	role = mii->phy_mac[port];
780 
781 	switch (phy_mode) {
782 	case XMII_MODE_MII:
783 		rc = sja1105_mii_clocking_setup(priv, port, role);
784 		break;
785 	case XMII_MODE_RMII:
786 		rc = sja1105_rmii_clocking_setup(priv, port, role);
787 		break;
788 	case XMII_MODE_RGMII:
789 		rc = sja1105_rgmii_clocking_setup(priv, port, role);
790 		break;
791 	case XMII_MODE_SGMII:
792 		/* Nothing to do in the CGU for SGMII */
793 		rc = 0;
794 		break;
795 	default:
796 		dev_err(dev, "Invalid interface mode specified: %d\n",
797 			phy_mode);
798 		return -EINVAL;
799 	}
800 	if (rc) {
801 		dev_err(dev, "Clocking setup for port %d failed: %d\n",
802 			port, rc);
803 		return rc;
804 	}
805 
806 	/* Internally pull down the RX_DV/CRS_DV/RX_CTL and RX_ER inputs */
807 	return sja1105_cfg_pad_rx_config(priv, port);
808 }
809 
sja1105_clocking_setup(struct sja1105_private * priv)810 int sja1105_clocking_setup(struct sja1105_private *priv)
811 {
812 	struct dsa_switch *ds = priv->ds;
813 	int port, rc;
814 
815 	for (port = 0; port < ds->num_ports; port++) {
816 		rc = sja1105_clocking_setup_port(priv, port);
817 		if (rc < 0)
818 			return rc;
819 	}
820 	return 0;
821 }
822 
823 static void
sja1110_cgu_outclk_packing(void * buf,struct sja1110_cgu_outclk * outclk,enum packing_op op)824 sja1110_cgu_outclk_packing(void *buf, struct sja1110_cgu_outclk *outclk,
825 			   enum packing_op op)
826 {
827 	const int size = 4;
828 
829 	sja1105_packing(buf, &outclk->clksrc,    27, 24, size, op);
830 	sja1105_packing(buf, &outclk->autoblock, 11, 11, size, op);
831 	sja1105_packing(buf, &outclk->pd,         0,  0, size, op);
832 }
833 
sja1110_disable_microcontroller(struct sja1105_private * priv)834 int sja1110_disable_microcontroller(struct sja1105_private *priv)
835 {
836 	u8 packed_buf[SJA1105_SIZE_CGU_CMD] = {0};
837 	struct sja1110_cgu_outclk outclk_6_c = {
838 		.clksrc = 0x3,
839 		.pd = true,
840 	};
841 	struct sja1110_cgu_outclk outclk_7_c = {
842 		.clksrc = 0x5,
843 		.pd = true,
844 	};
845 	int rc;
846 
847 	/* Power down the BASE_TIMER_CLK to disable the watchdog timer */
848 	sja1110_cgu_outclk_packing(packed_buf, &outclk_7_c, PACK);
849 
850 	rc = sja1105_xfer_buf(priv, SPI_WRITE, SJA1110_BASE_TIMER_CLK,
851 			      packed_buf, SJA1105_SIZE_CGU_CMD);
852 	if (rc)
853 		return rc;
854 
855 	/* Power down the BASE_MCSS_CLOCK to gate the microcontroller off */
856 	sja1110_cgu_outclk_packing(packed_buf, &outclk_6_c, PACK);
857 
858 	return sja1105_xfer_buf(priv, SPI_WRITE, SJA1110_BASE_MCSS_CLK,
859 				packed_buf, SJA1105_SIZE_CGU_CMD);
860 }
861