xref: /linux/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c (revision ca55b2fef3a9373fcfc30f82fd26bc7fccbda732)
1 /* Applied Micro X-Gene SoC Ethernet Driver
2  *
3  * Copyright (c) 2014, Applied Micro Circuits Corporation
4  * Authors: Iyappan Subramanian <isubramanian@apm.com>
5  *	    Ravi Patel <rapatel@apm.com>
6  *	    Keyur Chudgar <kchudgar@apm.com>
7  *
8  * This program is free software; you can redistribute  it and/or modify it
9  * under  the terms of  the GNU General  Public License as published by the
10  * Free Software Foundation;  either version 2 of the  License, or (at your
11  * option) any later version.
12  *
13  * This program is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16  * GNU General Public License for more details.
17  *
18  * You should have received a copy of the GNU General Public License
19  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
20  */
21 
22 #include "xgene_enet_main.h"
23 #include "xgene_enet_hw.h"
24 
25 static void xgene_enet_ring_init(struct xgene_enet_desc_ring *ring)
26 {
27 	u32 *ring_cfg = ring->state;
28 	u64 addr = ring->dma;
29 	enum xgene_enet_ring_cfgsize cfgsize = ring->cfgsize;
30 
31 	ring_cfg[4] |= (1 << SELTHRSH_POS) &
32 			CREATE_MASK(SELTHRSH_POS, SELTHRSH_LEN);
33 	ring_cfg[3] |= ACCEPTLERR;
34 	ring_cfg[2] |= QCOHERENT;
35 
36 	addr >>= 8;
37 	ring_cfg[2] |= (addr << RINGADDRL_POS) &
38 			CREATE_MASK_ULL(RINGADDRL_POS, RINGADDRL_LEN);
39 	addr >>= RINGADDRL_LEN;
40 	ring_cfg[3] |= addr & CREATE_MASK_ULL(RINGADDRH_POS, RINGADDRH_LEN);
41 	ring_cfg[3] |= ((u32)cfgsize << RINGSIZE_POS) &
42 			CREATE_MASK(RINGSIZE_POS, RINGSIZE_LEN);
43 }
44 
45 static void xgene_enet_ring_set_type(struct xgene_enet_desc_ring *ring)
46 {
47 	u32 *ring_cfg = ring->state;
48 	bool is_bufpool;
49 	u32 val;
50 
51 	is_bufpool = xgene_enet_is_bufpool(ring->id);
52 	val = (is_bufpool) ? RING_BUFPOOL : RING_REGULAR;
53 	ring_cfg[4] |= (val << RINGTYPE_POS) &
54 			CREATE_MASK(RINGTYPE_POS, RINGTYPE_LEN);
55 
56 	if (is_bufpool) {
57 		ring_cfg[3] |= (BUFPOOL_MODE << RINGMODE_POS) &
58 				CREATE_MASK(RINGMODE_POS, RINGMODE_LEN);
59 	}
60 }
61 
62 static void xgene_enet_ring_set_recombbuf(struct xgene_enet_desc_ring *ring)
63 {
64 	u32 *ring_cfg = ring->state;
65 
66 	ring_cfg[3] |= RECOMBBUF;
67 	ring_cfg[3] |= (0xf << RECOMTIMEOUTL_POS) &
68 			CREATE_MASK(RECOMTIMEOUTL_POS, RECOMTIMEOUTL_LEN);
69 	ring_cfg[4] |= 0x7 & CREATE_MASK(RECOMTIMEOUTH_POS, RECOMTIMEOUTH_LEN);
70 }
71 
72 static void xgene_enet_ring_wr32(struct xgene_enet_desc_ring *ring,
73 				 u32 offset, u32 data)
74 {
75 	struct xgene_enet_pdata *pdata = netdev_priv(ring->ndev);
76 
77 	iowrite32(data, pdata->ring_csr_addr + offset);
78 }
79 
80 static void xgene_enet_ring_rd32(struct xgene_enet_desc_ring *ring,
81 				 u32 offset, u32 *data)
82 {
83 	struct xgene_enet_pdata *pdata = netdev_priv(ring->ndev);
84 
85 	*data = ioread32(pdata->ring_csr_addr + offset);
86 }
87 
88 static void xgene_enet_write_ring_state(struct xgene_enet_desc_ring *ring)
89 {
90 	struct xgene_enet_pdata *pdata = netdev_priv(ring->ndev);
91 	int i;
92 
93 	xgene_enet_ring_wr32(ring, CSR_RING_CONFIG, ring->num);
94 	for (i = 0; i < pdata->ring_ops->num_ring_config; i++) {
95 		xgene_enet_ring_wr32(ring, CSR_RING_WR_BASE + (i * 4),
96 				     ring->state[i]);
97 	}
98 }
99 
100 static void xgene_enet_clr_ring_state(struct xgene_enet_desc_ring *ring)
101 {
102 	memset(ring->state, 0, sizeof(ring->state));
103 	xgene_enet_write_ring_state(ring);
104 }
105 
106 static void xgene_enet_set_ring_state(struct xgene_enet_desc_ring *ring)
107 {
108 	xgene_enet_ring_set_type(ring);
109 
110 	if (xgene_enet_ring_owner(ring->id) == RING_OWNER_ETH0)
111 		xgene_enet_ring_set_recombbuf(ring);
112 
113 	xgene_enet_ring_init(ring);
114 	xgene_enet_write_ring_state(ring);
115 }
116 
117 static void xgene_enet_set_ring_id(struct xgene_enet_desc_ring *ring)
118 {
119 	u32 ring_id_val, ring_id_buf;
120 	bool is_bufpool;
121 
122 	is_bufpool = xgene_enet_is_bufpool(ring->id);
123 
124 	ring_id_val = ring->id & GENMASK(9, 0);
125 	ring_id_val |= OVERWRITE;
126 
127 	ring_id_buf = (ring->num << 9) & GENMASK(18, 9);
128 	ring_id_buf |= PREFETCH_BUF_EN;
129 	if (is_bufpool)
130 		ring_id_buf |= IS_BUFFER_POOL;
131 
132 	xgene_enet_ring_wr32(ring, CSR_RING_ID, ring_id_val);
133 	xgene_enet_ring_wr32(ring, CSR_RING_ID_BUF, ring_id_buf);
134 }
135 
136 static void xgene_enet_clr_desc_ring_id(struct xgene_enet_desc_ring *ring)
137 {
138 	u32 ring_id;
139 
140 	ring_id = ring->id | OVERWRITE;
141 	xgene_enet_ring_wr32(ring, CSR_RING_ID, ring_id);
142 	xgene_enet_ring_wr32(ring, CSR_RING_ID_BUF, 0);
143 }
144 
145 static struct xgene_enet_desc_ring *xgene_enet_setup_ring(
146 				    struct xgene_enet_desc_ring *ring)
147 {
148 	u32 size = ring->size;
149 	u32 i, data;
150 	bool is_bufpool;
151 
152 	xgene_enet_clr_ring_state(ring);
153 	xgene_enet_set_ring_state(ring);
154 	xgene_enet_set_ring_id(ring);
155 
156 	ring->slots = xgene_enet_get_numslots(ring->id, size);
157 
158 	is_bufpool = xgene_enet_is_bufpool(ring->id);
159 	if (is_bufpool || xgene_enet_ring_owner(ring->id) != RING_OWNER_CPU)
160 		return ring;
161 
162 	for (i = 0; i < ring->slots; i++)
163 		xgene_enet_mark_desc_slot_empty(&ring->raw_desc[i]);
164 
165 	xgene_enet_ring_rd32(ring, CSR_RING_NE_INT_MODE, &data);
166 	data |= BIT(31 - xgene_enet_ring_bufnum(ring->id));
167 	xgene_enet_ring_wr32(ring, CSR_RING_NE_INT_MODE, data);
168 
169 	return ring;
170 }
171 
172 static void xgene_enet_clear_ring(struct xgene_enet_desc_ring *ring)
173 {
174 	u32 data;
175 	bool is_bufpool;
176 
177 	is_bufpool = xgene_enet_is_bufpool(ring->id);
178 	if (is_bufpool || xgene_enet_ring_owner(ring->id) != RING_OWNER_CPU)
179 		goto out;
180 
181 	xgene_enet_ring_rd32(ring, CSR_RING_NE_INT_MODE, &data);
182 	data &= ~BIT(31 - xgene_enet_ring_bufnum(ring->id));
183 	xgene_enet_ring_wr32(ring, CSR_RING_NE_INT_MODE, data);
184 
185 out:
186 	xgene_enet_clr_desc_ring_id(ring);
187 	xgene_enet_clr_ring_state(ring);
188 }
189 
190 static void xgene_enet_wr_cmd(struct xgene_enet_desc_ring *ring, int count)
191 {
192 	iowrite32(count, ring->cmd);
193 }
194 
195 static u32 xgene_enet_ring_len(struct xgene_enet_desc_ring *ring)
196 {
197 	u32 __iomem *cmd_base = ring->cmd_base;
198 	u32 ring_state, num_msgs;
199 
200 	ring_state = ioread32(&cmd_base[1]);
201 	num_msgs = GET_VAL(NUMMSGSINQ, ring_state);
202 
203 	return num_msgs;
204 }
205 
206 void xgene_enet_parse_error(struct xgene_enet_desc_ring *ring,
207 			    struct xgene_enet_pdata *pdata,
208 			    enum xgene_enet_err_code status)
209 {
210 	struct rtnl_link_stats64 *stats = &pdata->stats;
211 
212 	switch (status) {
213 	case INGRESS_CRC:
214 		stats->rx_crc_errors++;
215 		break;
216 	case INGRESS_CHECKSUM:
217 	case INGRESS_CHECKSUM_COMPUTE:
218 		stats->rx_errors++;
219 		break;
220 	case INGRESS_TRUNC_FRAME:
221 		stats->rx_frame_errors++;
222 		break;
223 	case INGRESS_PKT_LEN:
224 		stats->rx_length_errors++;
225 		break;
226 	case INGRESS_PKT_UNDER:
227 		stats->rx_frame_errors++;
228 		break;
229 	case INGRESS_FIFO_OVERRUN:
230 		stats->rx_fifo_errors++;
231 		break;
232 	default:
233 		break;
234 	}
235 }
236 
237 static void xgene_enet_wr_csr(struct xgene_enet_pdata *pdata,
238 			      u32 offset, u32 val)
239 {
240 	void __iomem *addr = pdata->eth_csr_addr + offset;
241 
242 	iowrite32(val, addr);
243 }
244 
245 static void xgene_enet_wr_ring_if(struct xgene_enet_pdata *pdata,
246 				  u32 offset, u32 val)
247 {
248 	void __iomem *addr = pdata->eth_ring_if_addr + offset;
249 
250 	iowrite32(val, addr);
251 }
252 
253 static void xgene_enet_wr_diag_csr(struct xgene_enet_pdata *pdata,
254 				   u32 offset, u32 val)
255 {
256 	void __iomem *addr = pdata->eth_diag_csr_addr + offset;
257 
258 	iowrite32(val, addr);
259 }
260 
261 static void xgene_enet_wr_mcx_csr(struct xgene_enet_pdata *pdata,
262 				  u32 offset, u32 val)
263 {
264 	void __iomem *addr = pdata->mcx_mac_csr_addr + offset;
265 
266 	iowrite32(val, addr);
267 }
268 
269 static bool xgene_enet_wr_indirect(void __iomem *addr, void __iomem *wr,
270 				   void __iomem *cmd, void __iomem *cmd_done,
271 				   u32 wr_addr, u32 wr_data)
272 {
273 	u32 done;
274 	u8 wait = 10;
275 
276 	iowrite32(wr_addr, addr);
277 	iowrite32(wr_data, wr);
278 	iowrite32(XGENE_ENET_WR_CMD, cmd);
279 
280 	/* wait for write command to complete */
281 	while (!(done = ioread32(cmd_done)) && wait--)
282 		udelay(1);
283 
284 	if (!done)
285 		return false;
286 
287 	iowrite32(0, cmd);
288 
289 	return true;
290 }
291 
292 static void xgene_enet_wr_mcx_mac(struct xgene_enet_pdata *pdata,
293 				  u32 wr_addr, u32 wr_data)
294 {
295 	void __iomem *addr, *wr, *cmd, *cmd_done;
296 
297 	addr = pdata->mcx_mac_addr + MAC_ADDR_REG_OFFSET;
298 	wr = pdata->mcx_mac_addr + MAC_WRITE_REG_OFFSET;
299 	cmd = pdata->mcx_mac_addr + MAC_COMMAND_REG_OFFSET;
300 	cmd_done = pdata->mcx_mac_addr + MAC_COMMAND_DONE_REG_OFFSET;
301 
302 	if (!xgene_enet_wr_indirect(addr, wr, cmd, cmd_done, wr_addr, wr_data))
303 		netdev_err(pdata->ndev, "MCX mac write failed, addr: %04x\n",
304 			   wr_addr);
305 }
306 
307 static void xgene_enet_rd_csr(struct xgene_enet_pdata *pdata,
308 			      u32 offset, u32 *val)
309 {
310 	void __iomem *addr = pdata->eth_csr_addr + offset;
311 
312 	*val = ioread32(addr);
313 }
314 
315 static void xgene_enet_rd_diag_csr(struct xgene_enet_pdata *pdata,
316 				   u32 offset, u32 *val)
317 {
318 	void __iomem *addr = pdata->eth_diag_csr_addr + offset;
319 
320 	*val = ioread32(addr);
321 }
322 
323 static void xgene_enet_rd_mcx_csr(struct xgene_enet_pdata *pdata,
324 				  u32 offset, u32 *val)
325 {
326 	void __iomem *addr = pdata->mcx_mac_csr_addr + offset;
327 
328 	*val = ioread32(addr);
329 }
330 
331 static bool xgene_enet_rd_indirect(void __iomem *addr, void __iomem *rd,
332 				   void __iomem *cmd, void __iomem *cmd_done,
333 				   u32 rd_addr, u32 *rd_data)
334 {
335 	u32 done;
336 	u8 wait = 10;
337 
338 	iowrite32(rd_addr, addr);
339 	iowrite32(XGENE_ENET_RD_CMD, cmd);
340 
341 	/* wait for read command to complete */
342 	while (!(done = ioread32(cmd_done)) && wait--)
343 		udelay(1);
344 
345 	if (!done)
346 		return false;
347 
348 	*rd_data = ioread32(rd);
349 	iowrite32(0, cmd);
350 
351 	return true;
352 }
353 
354 static void xgene_enet_rd_mcx_mac(struct xgene_enet_pdata *pdata,
355 				  u32 rd_addr, u32 *rd_data)
356 {
357 	void __iomem *addr, *rd, *cmd, *cmd_done;
358 
359 	addr = pdata->mcx_mac_addr + MAC_ADDR_REG_OFFSET;
360 	rd = pdata->mcx_mac_addr + MAC_READ_REG_OFFSET;
361 	cmd = pdata->mcx_mac_addr + MAC_COMMAND_REG_OFFSET;
362 	cmd_done = pdata->mcx_mac_addr + MAC_COMMAND_DONE_REG_OFFSET;
363 
364 	if (!xgene_enet_rd_indirect(addr, rd, cmd, cmd_done, rd_addr, rd_data))
365 		netdev_err(pdata->ndev, "MCX mac read failed, addr: %04x\n",
366 			   rd_addr);
367 }
368 
369 static int xgene_mii_phy_write(struct xgene_enet_pdata *pdata, int phy_id,
370 			       u32 reg, u16 data)
371 {
372 	u32 addr = 0, wr_data = 0;
373 	u32 done;
374 	u8 wait = 10;
375 
376 	PHY_ADDR_SET(&addr, phy_id);
377 	REG_ADDR_SET(&addr, reg);
378 	xgene_enet_wr_mcx_mac(pdata, MII_MGMT_ADDRESS_ADDR, addr);
379 
380 	PHY_CONTROL_SET(&wr_data, data);
381 	xgene_enet_wr_mcx_mac(pdata, MII_MGMT_CONTROL_ADDR, wr_data);
382 	do {
383 		usleep_range(5, 10);
384 		xgene_enet_rd_mcx_mac(pdata, MII_MGMT_INDICATORS_ADDR, &done);
385 	} while ((done & BUSY_MASK) && wait--);
386 
387 	if (done & BUSY_MASK) {
388 		netdev_err(pdata->ndev, "MII_MGMT write failed\n");
389 		return -EBUSY;
390 	}
391 
392 	return 0;
393 }
394 
395 static int xgene_mii_phy_read(struct xgene_enet_pdata *pdata,
396 			      u8 phy_id, u32 reg)
397 {
398 	u32 addr = 0;
399 	u32 data, done;
400 	u8 wait = 10;
401 
402 	PHY_ADDR_SET(&addr, phy_id);
403 	REG_ADDR_SET(&addr, reg);
404 	xgene_enet_wr_mcx_mac(pdata, MII_MGMT_ADDRESS_ADDR, addr);
405 	xgene_enet_wr_mcx_mac(pdata, MII_MGMT_COMMAND_ADDR, READ_CYCLE_MASK);
406 	do {
407 		usleep_range(5, 10);
408 		xgene_enet_rd_mcx_mac(pdata, MII_MGMT_INDICATORS_ADDR, &done);
409 	} while ((done & BUSY_MASK) && wait--);
410 
411 	if (done & BUSY_MASK) {
412 		netdev_err(pdata->ndev, "MII_MGMT read failed\n");
413 		return -EBUSY;
414 	}
415 
416 	xgene_enet_rd_mcx_mac(pdata, MII_MGMT_STATUS_ADDR, &data);
417 	xgene_enet_wr_mcx_mac(pdata, MII_MGMT_COMMAND_ADDR, 0);
418 
419 	return data;
420 }
421 
422 static void xgene_gmac_set_mac_addr(struct xgene_enet_pdata *pdata)
423 {
424 	u32 addr0, addr1;
425 	u8 *dev_addr = pdata->ndev->dev_addr;
426 
427 	addr0 = (dev_addr[3] << 24) | (dev_addr[2] << 16) |
428 		(dev_addr[1] << 8) | dev_addr[0];
429 	addr1 = (dev_addr[5] << 24) | (dev_addr[4] << 16);
430 
431 	xgene_enet_wr_mcx_mac(pdata, STATION_ADDR0_ADDR, addr0);
432 	xgene_enet_wr_mcx_mac(pdata, STATION_ADDR1_ADDR, addr1);
433 }
434 
435 static int xgene_enet_ecc_init(struct xgene_enet_pdata *pdata)
436 {
437 	struct net_device *ndev = pdata->ndev;
438 	u32 data;
439 	u8 wait = 10;
440 
441 	xgene_enet_wr_diag_csr(pdata, ENET_CFG_MEM_RAM_SHUTDOWN_ADDR, 0x0);
442 	do {
443 		usleep_range(100, 110);
444 		xgene_enet_rd_diag_csr(pdata, ENET_BLOCK_MEM_RDY_ADDR, &data);
445 	} while ((data != 0xffffffff) && wait--);
446 
447 	if (data != 0xffffffff) {
448 		netdev_err(ndev, "Failed to release memory from shutdown\n");
449 		return -ENODEV;
450 	}
451 
452 	return 0;
453 }
454 
455 static void xgene_gmac_reset(struct xgene_enet_pdata *pdata)
456 {
457 	xgene_enet_wr_mcx_mac(pdata, MAC_CONFIG_1_ADDR, SOFT_RESET1);
458 	xgene_enet_wr_mcx_mac(pdata, MAC_CONFIG_1_ADDR, 0);
459 }
460 
461 static void xgene_gmac_init(struct xgene_enet_pdata *pdata)
462 {
463 	u32 value, mc2;
464 	u32 intf_ctl, rgmii;
465 	u32 icm0, icm2;
466 
467 	xgene_gmac_reset(pdata);
468 
469 	xgene_enet_rd_mcx_csr(pdata, ICM_CONFIG0_REG_0_ADDR, &icm0);
470 	xgene_enet_rd_mcx_csr(pdata, ICM_CONFIG2_REG_0_ADDR, &icm2);
471 	xgene_enet_rd_mcx_mac(pdata, MAC_CONFIG_2_ADDR, &mc2);
472 	xgene_enet_rd_mcx_mac(pdata, INTERFACE_CONTROL_ADDR, &intf_ctl);
473 	xgene_enet_rd_csr(pdata, RGMII_REG_0_ADDR, &rgmii);
474 
475 	switch (pdata->phy_speed) {
476 	case SPEED_10:
477 		ENET_INTERFACE_MODE2_SET(&mc2, 1);
478 		CFG_MACMODE_SET(&icm0, 0);
479 		CFG_WAITASYNCRD_SET(&icm2, 500);
480 		rgmii &= ~CFG_SPEED_1250;
481 		break;
482 	case SPEED_100:
483 		ENET_INTERFACE_MODE2_SET(&mc2, 1);
484 		intf_ctl |= ENET_LHD_MODE;
485 		CFG_MACMODE_SET(&icm0, 1);
486 		CFG_WAITASYNCRD_SET(&icm2, 80);
487 		rgmii &= ~CFG_SPEED_1250;
488 		break;
489 	default:
490 		ENET_INTERFACE_MODE2_SET(&mc2, 2);
491 		intf_ctl |= ENET_GHD_MODE;
492 		CFG_TXCLK_MUXSEL0_SET(&rgmii, 4);
493 		xgene_enet_rd_csr(pdata, DEBUG_REG_ADDR, &value);
494 		value |= CFG_BYPASS_UNISEC_TX | CFG_BYPASS_UNISEC_RX;
495 		xgene_enet_wr_csr(pdata, DEBUG_REG_ADDR, value);
496 		break;
497 	}
498 
499 	mc2 |= FULL_DUPLEX2;
500 	xgene_enet_wr_mcx_mac(pdata, MAC_CONFIG_2_ADDR, mc2);
501 	xgene_enet_wr_mcx_mac(pdata, INTERFACE_CONTROL_ADDR, intf_ctl);
502 
503 	xgene_gmac_set_mac_addr(pdata);
504 
505 	/* Adjust MDC clock frequency */
506 	xgene_enet_rd_mcx_mac(pdata, MII_MGMT_CONFIG_ADDR, &value);
507 	MGMT_CLOCK_SEL_SET(&value, 7);
508 	xgene_enet_wr_mcx_mac(pdata, MII_MGMT_CONFIG_ADDR, value);
509 
510 	/* Enable drop if bufpool not available */
511 	xgene_enet_rd_csr(pdata, RSIF_CONFIG_REG_ADDR, &value);
512 	value |= CFG_RSIF_FPBUFF_TIMEOUT_EN;
513 	xgene_enet_wr_csr(pdata, RSIF_CONFIG_REG_ADDR, value);
514 
515 	/* Rtype should be copied from FP */
516 	xgene_enet_wr_csr(pdata, RSIF_RAM_DBG_REG0_ADDR, 0);
517 	xgene_enet_wr_csr(pdata, RGMII_REG_0_ADDR, rgmii);
518 
519 	/* Rx-Tx traffic resume */
520 	xgene_enet_wr_csr(pdata, CFG_LINK_AGGR_RESUME_0_ADDR, TX_PORT0);
521 
522 	xgene_enet_wr_mcx_csr(pdata, ICM_CONFIG0_REG_0_ADDR, icm0);
523 	xgene_enet_wr_mcx_csr(pdata, ICM_CONFIG2_REG_0_ADDR, icm2);
524 
525 	xgene_enet_rd_mcx_csr(pdata, RX_DV_GATE_REG_0_ADDR, &value);
526 	value &= ~TX_DV_GATE_EN0;
527 	value &= ~RX_DV_GATE_EN0;
528 	value |= RESUME_RX0;
529 	xgene_enet_wr_mcx_csr(pdata, RX_DV_GATE_REG_0_ADDR, value);
530 
531 	xgene_enet_wr_csr(pdata, CFG_BYPASS_ADDR, RESUME_TX);
532 }
533 
534 static void xgene_enet_config_ring_if_assoc(struct xgene_enet_pdata *pdata)
535 {
536 	u32 val = 0xffffffff;
537 
538 	xgene_enet_wr_ring_if(pdata, ENET_CFGSSQMIWQASSOC_ADDR, val);
539 	xgene_enet_wr_ring_if(pdata, ENET_CFGSSQMIFPQASSOC_ADDR, val);
540 	xgene_enet_wr_ring_if(pdata, ENET_CFGSSQMIQMLITEWQASSOC_ADDR, val);
541 	xgene_enet_wr_ring_if(pdata, ENET_CFGSSQMIQMLITEFPQASSOC_ADDR, val);
542 }
543 
544 static void xgene_enet_cle_bypass(struct xgene_enet_pdata *pdata,
545 				  u32 dst_ring_num, u16 bufpool_id)
546 {
547 	u32 cb;
548 	u32 fpsel;
549 
550 	fpsel = xgene_enet_ring_bufnum(bufpool_id) - 0x20;
551 
552 	xgene_enet_rd_csr(pdata, CLE_BYPASS_REG0_0_ADDR, &cb);
553 	cb |= CFG_CLE_BYPASS_EN0;
554 	CFG_CLE_IP_PROTOCOL0_SET(&cb, 3);
555 	xgene_enet_wr_csr(pdata, CLE_BYPASS_REG0_0_ADDR, cb);
556 
557 	xgene_enet_rd_csr(pdata, CLE_BYPASS_REG1_0_ADDR, &cb);
558 	CFG_CLE_DSTQID0_SET(&cb, dst_ring_num);
559 	CFG_CLE_FPSEL0_SET(&cb, fpsel);
560 	xgene_enet_wr_csr(pdata, CLE_BYPASS_REG1_0_ADDR, cb);
561 }
562 
563 static void xgene_gmac_rx_enable(struct xgene_enet_pdata *pdata)
564 {
565 	u32 data;
566 
567 	xgene_enet_rd_mcx_mac(pdata, MAC_CONFIG_1_ADDR, &data);
568 	xgene_enet_wr_mcx_mac(pdata, MAC_CONFIG_1_ADDR, data | RX_EN);
569 }
570 
571 static void xgene_gmac_tx_enable(struct xgene_enet_pdata *pdata)
572 {
573 	u32 data;
574 
575 	xgene_enet_rd_mcx_mac(pdata, MAC_CONFIG_1_ADDR, &data);
576 	xgene_enet_wr_mcx_mac(pdata, MAC_CONFIG_1_ADDR, data | TX_EN);
577 }
578 
579 static void xgene_gmac_rx_disable(struct xgene_enet_pdata *pdata)
580 {
581 	u32 data;
582 
583 	xgene_enet_rd_mcx_mac(pdata, MAC_CONFIG_1_ADDR, &data);
584 	xgene_enet_wr_mcx_mac(pdata, MAC_CONFIG_1_ADDR, data & ~RX_EN);
585 }
586 
587 static void xgene_gmac_tx_disable(struct xgene_enet_pdata *pdata)
588 {
589 	u32 data;
590 
591 	xgene_enet_rd_mcx_mac(pdata, MAC_CONFIG_1_ADDR, &data);
592 	xgene_enet_wr_mcx_mac(pdata, MAC_CONFIG_1_ADDR, data & ~TX_EN);
593 }
594 
595 bool xgene_ring_mgr_init(struct xgene_enet_pdata *p)
596 {
597 	if (!ioread32(p->ring_csr_addr + CLKEN_ADDR))
598 		return false;
599 
600 	if (ioread32(p->ring_csr_addr + SRST_ADDR))
601 		return false;
602 
603 	return true;
604 }
605 
606 static int xgene_enet_reset(struct xgene_enet_pdata *pdata)
607 {
608 	u32 val;
609 
610 	if (!xgene_ring_mgr_init(pdata))
611 		return -ENODEV;
612 
613 	if (!IS_ERR(pdata->clk)) {
614 		clk_prepare_enable(pdata->clk);
615 		clk_disable_unprepare(pdata->clk);
616 		clk_prepare_enable(pdata->clk);
617 		xgene_enet_ecc_init(pdata);
618 	}
619 	xgene_enet_config_ring_if_assoc(pdata);
620 
621 	/* Enable auto-incr for scanning */
622 	xgene_enet_rd_mcx_mac(pdata, MII_MGMT_CONFIG_ADDR, &val);
623 	val |= SCAN_AUTO_INCR;
624 	MGMT_CLOCK_SEL_SET(&val, 1);
625 	xgene_enet_wr_mcx_mac(pdata, MII_MGMT_CONFIG_ADDR, val);
626 
627 	return 0;
628 }
629 
630 static void xgene_gport_shutdown(struct xgene_enet_pdata *pdata)
631 {
632 	if (!IS_ERR(pdata->clk))
633 		clk_disable_unprepare(pdata->clk);
634 }
635 
636 static int xgene_enet_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
637 {
638 	struct xgene_enet_pdata *pdata = bus->priv;
639 	u32 val;
640 
641 	val = xgene_mii_phy_read(pdata, mii_id, regnum);
642 	netdev_dbg(pdata->ndev, "mdio_rd: bus=%d reg=%d val=%x\n",
643 		   mii_id, regnum, val);
644 
645 	return val;
646 }
647 
648 static int xgene_enet_mdio_write(struct mii_bus *bus, int mii_id, int regnum,
649 				 u16 val)
650 {
651 	struct xgene_enet_pdata *pdata = bus->priv;
652 
653 	netdev_dbg(pdata->ndev, "mdio_wr: bus=%d reg=%d val=%x\n",
654 		   mii_id, regnum, val);
655 	return xgene_mii_phy_write(pdata, mii_id, regnum, val);
656 }
657 
658 static void xgene_enet_adjust_link(struct net_device *ndev)
659 {
660 	struct xgene_enet_pdata *pdata = netdev_priv(ndev);
661 	struct phy_device *phydev = pdata->phy_dev;
662 
663 	if (phydev->link) {
664 		if (pdata->phy_speed != phydev->speed) {
665 			pdata->phy_speed = phydev->speed;
666 			xgene_gmac_init(pdata);
667 			xgene_gmac_rx_enable(pdata);
668 			xgene_gmac_tx_enable(pdata);
669 			phy_print_status(phydev);
670 		}
671 	} else {
672 		xgene_gmac_rx_disable(pdata);
673 		xgene_gmac_tx_disable(pdata);
674 		pdata->phy_speed = SPEED_UNKNOWN;
675 		phy_print_status(phydev);
676 	}
677 }
678 
679 static int xgene_enet_phy_connect(struct net_device *ndev)
680 {
681 	struct xgene_enet_pdata *pdata = netdev_priv(ndev);
682 	struct device_node *phy_np;
683 	struct phy_device *phy_dev;
684 	struct device *dev = &pdata->pdev->dev;
685 
686 	if (dev->of_node) {
687 		phy_np = of_parse_phandle(dev->of_node, "phy-handle", 0);
688 		if (!phy_np) {
689 			netdev_dbg(ndev, "No phy-handle found in DT\n");
690 			return -ENODEV;
691 		}
692 
693 		phy_dev = of_phy_connect(ndev, phy_np, &xgene_enet_adjust_link,
694 					 0, pdata->phy_mode);
695 		if (!phy_dev) {
696 			netdev_err(ndev, "Could not connect to PHY\n");
697 			return -ENODEV;
698 		}
699 
700 		pdata->phy_dev = phy_dev;
701 	} else {
702 		phy_dev = pdata->phy_dev;
703 
704 		if (!phy_dev ||
705 		    phy_connect_direct(ndev, phy_dev, &xgene_enet_adjust_link,
706 				       pdata->phy_mode)) {
707 			netdev_err(ndev, "Could not connect to PHY\n");
708 			return  -ENODEV;
709 		}
710 	}
711 
712 	pdata->phy_speed = SPEED_UNKNOWN;
713 	phy_dev->supported &= ~SUPPORTED_10baseT_Half &
714 			      ~SUPPORTED_100baseT_Half &
715 			      ~SUPPORTED_1000baseT_Half;
716 	phy_dev->advertising = phy_dev->supported;
717 
718 	return 0;
719 }
720 
721 static int xgene_mdiobus_register(struct xgene_enet_pdata *pdata,
722 				  struct mii_bus *mdio)
723 {
724 	struct device *dev = &pdata->pdev->dev;
725 	struct net_device *ndev = pdata->ndev;
726 	struct phy_device *phy;
727 	struct device_node *child_np;
728 	struct device_node *mdio_np = NULL;
729 	int ret;
730 	u32 phy_id;
731 
732 	if (dev->of_node) {
733 		for_each_child_of_node(dev->of_node, child_np) {
734 			if (of_device_is_compatible(child_np,
735 						    "apm,xgene-mdio")) {
736 				mdio_np = child_np;
737 				break;
738 			}
739 		}
740 
741 		if (!mdio_np) {
742 			netdev_dbg(ndev, "No mdio node in the dts\n");
743 			return -ENXIO;
744 		}
745 
746 		return of_mdiobus_register(mdio, mdio_np);
747 	}
748 
749 	/* Mask out all PHYs from auto probing. */
750 	mdio->phy_mask = ~0;
751 
752 	/* Register the MDIO bus */
753 	ret = mdiobus_register(mdio);
754 	if (ret)
755 		return ret;
756 
757 	ret = device_property_read_u32(dev, "phy-channel", &phy_id);
758 	if (ret)
759 		ret = device_property_read_u32(dev, "phy-addr", &phy_id);
760 	if (ret)
761 		return -EINVAL;
762 
763 	phy = get_phy_device(mdio, phy_id, false);
764 	if (!phy || IS_ERR(phy))
765 		return -EIO;
766 
767 	ret = phy_device_register(phy);
768 	if (ret)
769 		phy_device_free(phy);
770 	else
771 		pdata->phy_dev = phy;
772 
773 	return ret;
774 }
775 
776 int xgene_enet_mdio_config(struct xgene_enet_pdata *pdata)
777 {
778 	struct net_device *ndev = pdata->ndev;
779 	struct mii_bus *mdio_bus;
780 	int ret;
781 
782 	mdio_bus = mdiobus_alloc();
783 	if (!mdio_bus)
784 		return -ENOMEM;
785 
786 	mdio_bus->name = "APM X-Gene MDIO bus";
787 	mdio_bus->read = xgene_enet_mdio_read;
788 	mdio_bus->write = xgene_enet_mdio_write;
789 	snprintf(mdio_bus->id, MII_BUS_ID_SIZE, "%s-%s", "xgene-mii",
790 		 ndev->name);
791 
792 	mdio_bus->priv = pdata;
793 	mdio_bus->parent = &ndev->dev;
794 
795 	ret = xgene_mdiobus_register(pdata, mdio_bus);
796 	if (ret) {
797 		netdev_err(ndev, "Failed to register MDIO bus\n");
798 		mdiobus_free(mdio_bus);
799 		return ret;
800 	}
801 	pdata->mdio_bus = mdio_bus;
802 
803 	ret = xgene_enet_phy_connect(ndev);
804 	if (ret)
805 		xgene_enet_mdio_remove(pdata);
806 
807 	return ret;
808 }
809 
810 void xgene_enet_mdio_remove(struct xgene_enet_pdata *pdata)
811 {
812 	if (pdata->phy_dev)
813 		phy_disconnect(pdata->phy_dev);
814 
815 	mdiobus_unregister(pdata->mdio_bus);
816 	mdiobus_free(pdata->mdio_bus);
817 	pdata->mdio_bus = NULL;
818 }
819 
820 struct xgene_mac_ops xgene_gmac_ops = {
821 	.init = xgene_gmac_init,
822 	.reset = xgene_gmac_reset,
823 	.rx_enable = xgene_gmac_rx_enable,
824 	.tx_enable = xgene_gmac_tx_enable,
825 	.rx_disable = xgene_gmac_rx_disable,
826 	.tx_disable = xgene_gmac_tx_disable,
827 	.set_mac_addr = xgene_gmac_set_mac_addr,
828 };
829 
830 struct xgene_port_ops xgene_gport_ops = {
831 	.reset = xgene_enet_reset,
832 	.cle_bypass = xgene_enet_cle_bypass,
833 	.shutdown = xgene_gport_shutdown,
834 };
835 
836 struct xgene_ring_ops xgene_ring1_ops = {
837 	.num_ring_config = NUM_RING_CONFIG,
838 	.num_ring_id_shift = 6,
839 	.setup = xgene_enet_setup_ring,
840 	.clear = xgene_enet_clear_ring,
841 	.wr_cmd = xgene_enet_wr_cmd,
842 	.len = xgene_enet_ring_len,
843 };
844