xref: /linux/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c (revision e5c86679d5e864947a52fb31e45a425dea3e7fa9)
1 /* Applied Micro X-Gene SoC Ethernet Driver
2  *
3  * Copyright (c) 2014, Applied Micro Circuits Corporation
4  * Authors: Iyappan Subramanian <isubramanian@apm.com>
5  *	    Ravi Patel <rapatel@apm.com>
6  *	    Keyur Chudgar <kchudgar@apm.com>
7  *
8  * This program is free software; you can redistribute  it and/or modify it
9  * under  the terms of  the GNU General  Public License as published by the
10  * Free Software Foundation;  either version 2 of the  License, or (at your
11  * option) any later version.
12  *
13  * This program is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16  * GNU General Public License for more details.
17  *
18  * You should have received a copy of the GNU General Public License
19  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
20  */
21 
22 #include "xgene_enet_main.h"
23 #include "xgene_enet_hw.h"
24 
25 static void xgene_enet_ring_init(struct xgene_enet_desc_ring *ring)
26 {
27 	u32 *ring_cfg = ring->state;
28 	u64 addr = ring->dma;
29 	enum xgene_enet_ring_cfgsize cfgsize = ring->cfgsize;
30 
31 	ring_cfg[4] |= (1 << SELTHRSH_POS) &
32 			CREATE_MASK(SELTHRSH_POS, SELTHRSH_LEN);
33 	ring_cfg[3] |= ACCEPTLERR;
34 	ring_cfg[2] |= QCOHERENT;
35 
36 	addr >>= 8;
37 	ring_cfg[2] |= (addr << RINGADDRL_POS) &
38 			CREATE_MASK_ULL(RINGADDRL_POS, RINGADDRL_LEN);
39 	addr >>= RINGADDRL_LEN;
40 	ring_cfg[3] |= addr & CREATE_MASK_ULL(RINGADDRH_POS, RINGADDRH_LEN);
41 	ring_cfg[3] |= ((u32)cfgsize << RINGSIZE_POS) &
42 			CREATE_MASK(RINGSIZE_POS, RINGSIZE_LEN);
43 }
44 
45 static void xgene_enet_ring_set_type(struct xgene_enet_desc_ring *ring)
46 {
47 	u32 *ring_cfg = ring->state;
48 	bool is_bufpool;
49 	u32 val;
50 
51 	is_bufpool = xgene_enet_is_bufpool(ring->id);
52 	val = (is_bufpool) ? RING_BUFPOOL : RING_REGULAR;
53 	ring_cfg[4] |= (val << RINGTYPE_POS) &
54 			CREATE_MASK(RINGTYPE_POS, RINGTYPE_LEN);
55 
56 	if (is_bufpool) {
57 		ring_cfg[3] |= (BUFPOOL_MODE << RINGMODE_POS) &
58 				CREATE_MASK(RINGMODE_POS, RINGMODE_LEN);
59 	}
60 }
61 
62 static void xgene_enet_ring_set_recombbuf(struct xgene_enet_desc_ring *ring)
63 {
64 	u32 *ring_cfg = ring->state;
65 
66 	ring_cfg[3] |= RECOMBBUF;
67 	ring_cfg[3] |= (0xf << RECOMTIMEOUTL_POS) &
68 			CREATE_MASK(RECOMTIMEOUTL_POS, RECOMTIMEOUTL_LEN);
69 	ring_cfg[4] |= 0x7 & CREATE_MASK(RECOMTIMEOUTH_POS, RECOMTIMEOUTH_LEN);
70 }
71 
72 static void xgene_enet_ring_wr32(struct xgene_enet_desc_ring *ring,
73 				 u32 offset, u32 data)
74 {
75 	struct xgene_enet_pdata *pdata = netdev_priv(ring->ndev);
76 
77 	iowrite32(data, pdata->ring_csr_addr + offset);
78 }
79 
80 static void xgene_enet_ring_rd32(struct xgene_enet_desc_ring *ring,
81 				 u32 offset, u32 *data)
82 {
83 	struct xgene_enet_pdata *pdata = netdev_priv(ring->ndev);
84 
85 	*data = ioread32(pdata->ring_csr_addr + offset);
86 }
87 
88 static void xgene_enet_write_ring_state(struct xgene_enet_desc_ring *ring)
89 {
90 	struct xgene_enet_pdata *pdata = netdev_priv(ring->ndev);
91 	int i;
92 
93 	xgene_enet_ring_wr32(ring, CSR_RING_CONFIG, ring->num);
94 	for (i = 0; i < pdata->ring_ops->num_ring_config; i++) {
95 		xgene_enet_ring_wr32(ring, CSR_RING_WR_BASE + (i * 4),
96 				     ring->state[i]);
97 	}
98 }
99 
100 static void xgene_enet_clr_ring_state(struct xgene_enet_desc_ring *ring)
101 {
102 	memset(ring->state, 0, sizeof(ring->state));
103 	xgene_enet_write_ring_state(ring);
104 }
105 
106 static void xgene_enet_set_ring_state(struct xgene_enet_desc_ring *ring)
107 {
108 	xgene_enet_ring_set_type(ring);
109 
110 	if (xgene_enet_ring_owner(ring->id) == RING_OWNER_ETH0 ||
111 	    xgene_enet_ring_owner(ring->id) == RING_OWNER_ETH1)
112 		xgene_enet_ring_set_recombbuf(ring);
113 
114 	xgene_enet_ring_init(ring);
115 	xgene_enet_write_ring_state(ring);
116 }
117 
118 static void xgene_enet_set_ring_id(struct xgene_enet_desc_ring *ring)
119 {
120 	u32 ring_id_val, ring_id_buf;
121 	bool is_bufpool;
122 
123 	is_bufpool = xgene_enet_is_bufpool(ring->id);
124 
125 	ring_id_val = ring->id & GENMASK(9, 0);
126 	ring_id_val |= OVERWRITE;
127 
128 	ring_id_buf = (ring->num << 9) & GENMASK(18, 9);
129 	ring_id_buf |= PREFETCH_BUF_EN;
130 	if (is_bufpool)
131 		ring_id_buf |= IS_BUFFER_POOL;
132 
133 	xgene_enet_ring_wr32(ring, CSR_RING_ID, ring_id_val);
134 	xgene_enet_ring_wr32(ring, CSR_RING_ID_BUF, ring_id_buf);
135 }
136 
137 static void xgene_enet_clr_desc_ring_id(struct xgene_enet_desc_ring *ring)
138 {
139 	u32 ring_id;
140 
141 	ring_id = ring->id | OVERWRITE;
142 	xgene_enet_ring_wr32(ring, CSR_RING_ID, ring_id);
143 	xgene_enet_ring_wr32(ring, CSR_RING_ID_BUF, 0);
144 }
145 
146 static struct xgene_enet_desc_ring *xgene_enet_setup_ring(
147 				    struct xgene_enet_desc_ring *ring)
148 {
149 	u32 size = ring->size;
150 	u32 i, data;
151 	bool is_bufpool;
152 
153 	xgene_enet_clr_ring_state(ring);
154 	xgene_enet_set_ring_state(ring);
155 	xgene_enet_set_ring_id(ring);
156 
157 	ring->slots = xgene_enet_get_numslots(ring->id, size);
158 
159 	is_bufpool = xgene_enet_is_bufpool(ring->id);
160 	if (is_bufpool || xgene_enet_ring_owner(ring->id) != RING_OWNER_CPU)
161 		return ring;
162 
163 	for (i = 0; i < ring->slots; i++)
164 		xgene_enet_mark_desc_slot_empty(&ring->raw_desc[i]);
165 
166 	xgene_enet_ring_rd32(ring, CSR_RING_NE_INT_MODE, &data);
167 	data |= BIT(31 - xgene_enet_ring_bufnum(ring->id));
168 	xgene_enet_ring_wr32(ring, CSR_RING_NE_INT_MODE, data);
169 
170 	return ring;
171 }
172 
173 static void xgene_enet_clear_ring(struct xgene_enet_desc_ring *ring)
174 {
175 	u32 data;
176 	bool is_bufpool;
177 
178 	is_bufpool = xgene_enet_is_bufpool(ring->id);
179 	if (is_bufpool || xgene_enet_ring_owner(ring->id) != RING_OWNER_CPU)
180 		goto out;
181 
182 	xgene_enet_ring_rd32(ring, CSR_RING_NE_INT_MODE, &data);
183 	data &= ~BIT(31 - xgene_enet_ring_bufnum(ring->id));
184 	xgene_enet_ring_wr32(ring, CSR_RING_NE_INT_MODE, data);
185 
186 out:
187 	xgene_enet_clr_desc_ring_id(ring);
188 	xgene_enet_clr_ring_state(ring);
189 }
190 
191 static void xgene_enet_wr_cmd(struct xgene_enet_desc_ring *ring, int count)
192 {
193 	iowrite32(count, ring->cmd);
194 }
195 
196 static u32 xgene_enet_ring_len(struct xgene_enet_desc_ring *ring)
197 {
198 	u32 __iomem *cmd_base = ring->cmd_base;
199 	u32 ring_state, num_msgs;
200 
201 	ring_state = ioread32(&cmd_base[1]);
202 	num_msgs = GET_VAL(NUMMSGSINQ, ring_state);
203 
204 	return num_msgs;
205 }
206 
207 void xgene_enet_parse_error(struct xgene_enet_desc_ring *ring,
208 			    struct xgene_enet_pdata *pdata,
209 			    enum xgene_enet_err_code status)
210 {
211 	switch (status) {
212 	case INGRESS_CRC:
213 		ring->rx_crc_errors++;
214 		ring->rx_dropped++;
215 		break;
216 	case INGRESS_CHECKSUM:
217 	case INGRESS_CHECKSUM_COMPUTE:
218 		ring->rx_errors++;
219 		ring->rx_dropped++;
220 		break;
221 	case INGRESS_TRUNC_FRAME:
222 		ring->rx_frame_errors++;
223 		ring->rx_dropped++;
224 		break;
225 	case INGRESS_PKT_LEN:
226 		ring->rx_length_errors++;
227 		ring->rx_dropped++;
228 		break;
229 	case INGRESS_PKT_UNDER:
230 		ring->rx_frame_errors++;
231 		ring->rx_dropped++;
232 		break;
233 	case INGRESS_FIFO_OVERRUN:
234 		ring->rx_fifo_errors++;
235 		break;
236 	default:
237 		break;
238 	}
239 }
240 
241 static void xgene_enet_wr_csr(struct xgene_enet_pdata *pdata,
242 			      u32 offset, u32 val)
243 {
244 	void __iomem *addr = pdata->eth_csr_addr + offset;
245 
246 	iowrite32(val, addr);
247 }
248 
249 static void xgene_enet_wr_ring_if(struct xgene_enet_pdata *pdata,
250 				  u32 offset, u32 val)
251 {
252 	void __iomem *addr = pdata->eth_ring_if_addr + offset;
253 
254 	iowrite32(val, addr);
255 }
256 
257 static void xgene_enet_wr_diag_csr(struct xgene_enet_pdata *pdata,
258 				   u32 offset, u32 val)
259 {
260 	void __iomem *addr = pdata->eth_diag_csr_addr + offset;
261 
262 	iowrite32(val, addr);
263 }
264 
265 static void xgene_enet_wr_mcx_csr(struct xgene_enet_pdata *pdata,
266 				  u32 offset, u32 val)
267 {
268 	void __iomem *addr = pdata->mcx_mac_csr_addr + offset;
269 
270 	iowrite32(val, addr);
271 }
272 
273 static bool xgene_enet_wr_indirect(void __iomem *addr, void __iomem *wr,
274 				   void __iomem *cmd, void __iomem *cmd_done,
275 				   u32 wr_addr, u32 wr_data)
276 {
277 	u32 done;
278 	u8 wait = 10;
279 
280 	iowrite32(wr_addr, addr);
281 	iowrite32(wr_data, wr);
282 	iowrite32(XGENE_ENET_WR_CMD, cmd);
283 
284 	/* wait for write command to complete */
285 	while (!(done = ioread32(cmd_done)) && wait--)
286 		udelay(1);
287 
288 	if (!done)
289 		return false;
290 
291 	iowrite32(0, cmd);
292 
293 	return true;
294 }
295 
296 static void xgene_enet_wr_mcx_mac(struct xgene_enet_pdata *pdata,
297 				  u32 wr_addr, u32 wr_data)
298 {
299 	void __iomem *addr, *wr, *cmd, *cmd_done;
300 
301 	addr = pdata->mcx_mac_addr + MAC_ADDR_REG_OFFSET;
302 	wr = pdata->mcx_mac_addr + MAC_WRITE_REG_OFFSET;
303 	cmd = pdata->mcx_mac_addr + MAC_COMMAND_REG_OFFSET;
304 	cmd_done = pdata->mcx_mac_addr + MAC_COMMAND_DONE_REG_OFFSET;
305 
306 	if (!xgene_enet_wr_indirect(addr, wr, cmd, cmd_done, wr_addr, wr_data))
307 		netdev_err(pdata->ndev, "MCX mac write failed, addr: %04x\n",
308 			   wr_addr);
309 }
310 
311 static void xgene_enet_rd_csr(struct xgene_enet_pdata *pdata,
312 			      u32 offset, u32 *val)
313 {
314 	void __iomem *addr = pdata->eth_csr_addr + offset;
315 
316 	*val = ioread32(addr);
317 }
318 
319 static void xgene_enet_rd_diag_csr(struct xgene_enet_pdata *pdata,
320 				   u32 offset, u32 *val)
321 {
322 	void __iomem *addr = pdata->eth_diag_csr_addr + offset;
323 
324 	*val = ioread32(addr);
325 }
326 
327 static void xgene_enet_rd_mcx_csr(struct xgene_enet_pdata *pdata,
328 				  u32 offset, u32 *val)
329 {
330 	void __iomem *addr = pdata->mcx_mac_csr_addr + offset;
331 
332 	*val = ioread32(addr);
333 }
334 
335 static bool xgene_enet_rd_indirect(void __iomem *addr, void __iomem *rd,
336 				   void __iomem *cmd, void __iomem *cmd_done,
337 				   u32 rd_addr, u32 *rd_data)
338 {
339 	u32 done;
340 	u8 wait = 10;
341 
342 	iowrite32(rd_addr, addr);
343 	iowrite32(XGENE_ENET_RD_CMD, cmd);
344 
345 	/* wait for read command to complete */
346 	while (!(done = ioread32(cmd_done)) && wait--)
347 		udelay(1);
348 
349 	if (!done)
350 		return false;
351 
352 	*rd_data = ioread32(rd);
353 	iowrite32(0, cmd);
354 
355 	return true;
356 }
357 
358 static void xgene_enet_rd_mcx_mac(struct xgene_enet_pdata *pdata,
359 				  u32 rd_addr, u32 *rd_data)
360 {
361 	void __iomem *addr, *rd, *cmd, *cmd_done;
362 
363 	addr = pdata->mcx_mac_addr + MAC_ADDR_REG_OFFSET;
364 	rd = pdata->mcx_mac_addr + MAC_READ_REG_OFFSET;
365 	cmd = pdata->mcx_mac_addr + MAC_COMMAND_REG_OFFSET;
366 	cmd_done = pdata->mcx_mac_addr + MAC_COMMAND_DONE_REG_OFFSET;
367 
368 	if (!xgene_enet_rd_indirect(addr, rd, cmd, cmd_done, rd_addr, rd_data))
369 		netdev_err(pdata->ndev, "MCX mac read failed, addr: %04x\n",
370 			   rd_addr);
371 }
372 
373 static void xgene_gmac_set_mac_addr(struct xgene_enet_pdata *pdata)
374 {
375 	u32 addr0, addr1;
376 	u8 *dev_addr = pdata->ndev->dev_addr;
377 
378 	addr0 = (dev_addr[3] << 24) | (dev_addr[2] << 16) |
379 		(dev_addr[1] << 8) | dev_addr[0];
380 	addr1 = (dev_addr[5] << 24) | (dev_addr[4] << 16);
381 
382 	xgene_enet_wr_mcx_mac(pdata, STATION_ADDR0_ADDR, addr0);
383 	xgene_enet_wr_mcx_mac(pdata, STATION_ADDR1_ADDR, addr1);
384 }
385 
386 static int xgene_enet_ecc_init(struct xgene_enet_pdata *pdata)
387 {
388 	struct net_device *ndev = pdata->ndev;
389 	u32 data;
390 	u8 wait = 10;
391 
392 	xgene_enet_wr_diag_csr(pdata, ENET_CFG_MEM_RAM_SHUTDOWN_ADDR, 0x0);
393 	do {
394 		usleep_range(100, 110);
395 		xgene_enet_rd_diag_csr(pdata, ENET_BLOCK_MEM_RDY_ADDR, &data);
396 	} while ((data != 0xffffffff) && wait--);
397 
398 	if (data != 0xffffffff) {
399 		netdev_err(ndev, "Failed to release memory from shutdown\n");
400 		return -ENODEV;
401 	}
402 
403 	return 0;
404 }
405 
406 static void xgene_gmac_reset(struct xgene_enet_pdata *pdata)
407 {
408 	xgene_enet_wr_mcx_mac(pdata, MAC_CONFIG_1_ADDR, SOFT_RESET1);
409 	xgene_enet_wr_mcx_mac(pdata, MAC_CONFIG_1_ADDR, 0);
410 }
411 
412 static void xgene_enet_configure_clock(struct xgene_enet_pdata *pdata)
413 {
414 	struct device *dev = &pdata->pdev->dev;
415 
416 	if (dev->of_node) {
417 		struct clk *parent = clk_get_parent(pdata->clk);
418 
419 		switch (pdata->phy_speed) {
420 		case SPEED_10:
421 			clk_set_rate(parent, 2500000);
422 			break;
423 		case SPEED_100:
424 			clk_set_rate(parent, 25000000);
425 			break;
426 		default:
427 			clk_set_rate(parent, 125000000);
428 			break;
429 		}
430 	}
431 #ifdef CONFIG_ACPI
432 	else {
433 		switch (pdata->phy_speed) {
434 		case SPEED_10:
435 			acpi_evaluate_object(ACPI_HANDLE(dev),
436 					     "S10", NULL, NULL);
437 			break;
438 		case SPEED_100:
439 			acpi_evaluate_object(ACPI_HANDLE(dev),
440 					     "S100", NULL, NULL);
441 			break;
442 		default:
443 			acpi_evaluate_object(ACPI_HANDLE(dev),
444 					     "S1G", NULL, NULL);
445 			break;
446 		}
447 	}
448 #endif
449 }
450 
451 static void xgene_gmac_set_speed(struct xgene_enet_pdata *pdata)
452 {
453 	struct device *dev = &pdata->pdev->dev;
454 	u32 icm0, icm2, mc2;
455 	u32 intf_ctl, rgmii, value;
456 
457 	xgene_enet_rd_mcx_csr(pdata, ICM_CONFIG0_REG_0_ADDR, &icm0);
458 	xgene_enet_rd_mcx_csr(pdata, ICM_CONFIG2_REG_0_ADDR, &icm2);
459 	xgene_enet_rd_mcx_mac(pdata, MAC_CONFIG_2_ADDR, &mc2);
460 	xgene_enet_rd_mcx_mac(pdata, INTERFACE_CONTROL_ADDR, &intf_ctl);
461 	xgene_enet_rd_csr(pdata, RGMII_REG_0_ADDR, &rgmii);
462 
463 	switch (pdata->phy_speed) {
464 	case SPEED_10:
465 		ENET_INTERFACE_MODE2_SET(&mc2, 1);
466 		intf_ctl &= ~(ENET_LHD_MODE | ENET_GHD_MODE);
467 		CFG_MACMODE_SET(&icm0, 0);
468 		CFG_WAITASYNCRD_SET(&icm2, 500);
469 		rgmii &= ~CFG_SPEED_1250;
470 		break;
471 	case SPEED_100:
472 		ENET_INTERFACE_MODE2_SET(&mc2, 1);
473 		intf_ctl &= ~ENET_GHD_MODE;
474 		intf_ctl |= ENET_LHD_MODE;
475 		CFG_MACMODE_SET(&icm0, 1);
476 		CFG_WAITASYNCRD_SET(&icm2, 80);
477 		rgmii &= ~CFG_SPEED_1250;
478 		break;
479 	default:
480 		ENET_INTERFACE_MODE2_SET(&mc2, 2);
481 		intf_ctl &= ~ENET_LHD_MODE;
482 		intf_ctl |= ENET_GHD_MODE;
483 		CFG_MACMODE_SET(&icm0, 2);
484 		CFG_WAITASYNCRD_SET(&icm2, 0);
485 		if (dev->of_node) {
486 			CFG_TXCLK_MUXSEL0_SET(&rgmii, pdata->tx_delay);
487 			CFG_RXCLK_MUXSEL0_SET(&rgmii, pdata->rx_delay);
488 		}
489 		rgmii |= CFG_SPEED_1250;
490 
491 		xgene_enet_rd_csr(pdata, DEBUG_REG_ADDR, &value);
492 		value |= CFG_BYPASS_UNISEC_TX | CFG_BYPASS_UNISEC_RX;
493 		xgene_enet_wr_csr(pdata, DEBUG_REG_ADDR, value);
494 		break;
495 	}
496 
497 	mc2 |= FULL_DUPLEX2 | PAD_CRC;
498 	xgene_enet_wr_mcx_mac(pdata, MAC_CONFIG_2_ADDR, mc2);
499 	xgene_enet_wr_mcx_mac(pdata, INTERFACE_CONTROL_ADDR, intf_ctl);
500 	xgene_enet_wr_csr(pdata, RGMII_REG_0_ADDR, rgmii);
501 	xgene_enet_configure_clock(pdata);
502 
503 	xgene_enet_wr_mcx_csr(pdata, ICM_CONFIG0_REG_0_ADDR, icm0);
504 	xgene_enet_wr_mcx_csr(pdata, ICM_CONFIG2_REG_0_ADDR, icm2);
505 }
506 
507 static void xgene_enet_set_frame_size(struct xgene_enet_pdata *pdata, int size)
508 {
509 	xgene_enet_wr_mcx_mac(pdata, MAX_FRAME_LEN_ADDR, size);
510 }
511 
512 static void xgene_gmac_enable_tx_pause(struct xgene_enet_pdata *pdata,
513 				       bool enable)
514 {
515 	u32 data;
516 
517 	xgene_enet_rd_mcx_csr(pdata, CSR_ECM_CFG_0_ADDR, &data);
518 
519 	if (enable)
520 		data |= MULTI_DPF_AUTOCTRL | PAUSE_XON_EN;
521 	else
522 		data &= ~(MULTI_DPF_AUTOCTRL | PAUSE_XON_EN);
523 
524 	xgene_enet_wr_mcx_csr(pdata, CSR_ECM_CFG_0_ADDR, data);
525 }
526 
527 static void xgene_gmac_flowctl_tx(struct xgene_enet_pdata *pdata, bool enable)
528 {
529 	u32 data;
530 
531 	xgene_enet_rd_mcx_mac(pdata, MAC_CONFIG_1_ADDR, &data);
532 
533 	if (enable)
534 		data |= TX_FLOW_EN;
535 	else
536 		data &= ~TX_FLOW_EN;
537 
538 	xgene_enet_wr_mcx_mac(pdata, MAC_CONFIG_1_ADDR, data);
539 
540 	pdata->mac_ops->enable_tx_pause(pdata, enable);
541 }
542 
543 static void xgene_gmac_flowctl_rx(struct xgene_enet_pdata *pdata, bool enable)
544 {
545 	u32 data;
546 
547 	xgene_enet_rd_mcx_mac(pdata, MAC_CONFIG_1_ADDR, &data);
548 
549 	if (enable)
550 		data |= RX_FLOW_EN;
551 	else
552 		data &= ~RX_FLOW_EN;
553 
554 	xgene_enet_wr_mcx_mac(pdata, MAC_CONFIG_1_ADDR, data);
555 }
556 
557 static void xgene_gmac_init(struct xgene_enet_pdata *pdata)
558 {
559 	u32 value;
560 
561 	if (!pdata->mdio_driver)
562 		xgene_gmac_reset(pdata);
563 
564 	xgene_gmac_set_speed(pdata);
565 	xgene_gmac_set_mac_addr(pdata);
566 
567 	/* Adjust MDC clock frequency */
568 	xgene_enet_rd_mcx_mac(pdata, MII_MGMT_CONFIG_ADDR, &value);
569 	MGMT_CLOCK_SEL_SET(&value, 7);
570 	xgene_enet_wr_mcx_mac(pdata, MII_MGMT_CONFIG_ADDR, value);
571 
572 	/* Enable drop if bufpool not available */
573 	xgene_enet_rd_csr(pdata, RSIF_CONFIG_REG_ADDR, &value);
574 	value |= CFG_RSIF_FPBUFF_TIMEOUT_EN;
575 	xgene_enet_wr_csr(pdata, RSIF_CONFIG_REG_ADDR, value);
576 
577 	/* Rtype should be copied from FP */
578 	xgene_enet_wr_csr(pdata, RSIF_RAM_DBG_REG0_ADDR, 0);
579 
580 	/* Configure HW pause frame generation */
581 	xgene_enet_rd_mcx_csr(pdata, CSR_MULTI_DPF0_ADDR, &value);
582 	value = (DEF_QUANTA << 16) | (value & 0xFFFF);
583 	xgene_enet_wr_mcx_csr(pdata, CSR_MULTI_DPF0_ADDR, value);
584 
585 	xgene_enet_wr_csr(pdata, RXBUF_PAUSE_THRESH, DEF_PAUSE_THRES);
586 	xgene_enet_wr_csr(pdata, RXBUF_PAUSE_OFF_THRESH, DEF_PAUSE_OFF_THRES);
587 
588 	xgene_gmac_flowctl_tx(pdata, pdata->tx_pause);
589 	xgene_gmac_flowctl_rx(pdata, pdata->rx_pause);
590 
591 	/* Rx-Tx traffic resume */
592 	xgene_enet_wr_csr(pdata, CFG_LINK_AGGR_RESUME_0_ADDR, TX_PORT0);
593 
594 	xgene_enet_rd_mcx_csr(pdata, RX_DV_GATE_REG_0_ADDR, &value);
595 	value &= ~TX_DV_GATE_EN0;
596 	value &= ~RX_DV_GATE_EN0;
597 	value |= RESUME_RX0;
598 	xgene_enet_wr_mcx_csr(pdata, RX_DV_GATE_REG_0_ADDR, value);
599 
600 	xgene_enet_wr_csr(pdata, CFG_BYPASS_ADDR, RESUME_TX);
601 }
602 
603 static void xgene_enet_config_ring_if_assoc(struct xgene_enet_pdata *pdata)
604 {
605 	u32 val = 0xffffffff;
606 
607 	xgene_enet_wr_ring_if(pdata, ENET_CFGSSQMIWQASSOC_ADDR, val);
608 	xgene_enet_wr_ring_if(pdata, ENET_CFGSSQMIFPQASSOC_ADDR, val);
609 	xgene_enet_wr_ring_if(pdata, ENET_CFGSSQMIQMLITEWQASSOC_ADDR, val);
610 	xgene_enet_wr_ring_if(pdata, ENET_CFGSSQMIQMLITEFPQASSOC_ADDR, val);
611 }
612 
613 static void xgene_enet_cle_bypass(struct xgene_enet_pdata *pdata,
614 				  u32 dst_ring_num, u16 bufpool_id,
615 				  u16 nxtbufpool_id)
616 {
617 	u32 cb;
618 	u32 fpsel, nxtfpsel;
619 
620 	fpsel = xgene_enet_get_fpsel(bufpool_id);
621 	nxtfpsel = xgene_enet_get_fpsel(nxtbufpool_id);
622 
623 	xgene_enet_rd_csr(pdata, CLE_BYPASS_REG0_0_ADDR, &cb);
624 	cb |= CFG_CLE_BYPASS_EN0;
625 	CFG_CLE_IP_PROTOCOL0_SET(&cb, 3);
626 	xgene_enet_wr_csr(pdata, CLE_BYPASS_REG0_0_ADDR, cb);
627 
628 	xgene_enet_rd_csr(pdata, CLE_BYPASS_REG1_0_ADDR, &cb);
629 	CFG_CLE_DSTQID0_SET(&cb, dst_ring_num);
630 	CFG_CLE_FPSEL0_SET(&cb, fpsel);
631 	CFG_CLE_NXTFPSEL0_SET(&cb, nxtfpsel);
632 	xgene_enet_wr_csr(pdata, CLE_BYPASS_REG1_0_ADDR, cb);
633 }
634 
635 static void xgene_gmac_rx_enable(struct xgene_enet_pdata *pdata)
636 {
637 	u32 data;
638 
639 	xgene_enet_rd_mcx_mac(pdata, MAC_CONFIG_1_ADDR, &data);
640 	xgene_enet_wr_mcx_mac(pdata, MAC_CONFIG_1_ADDR, data | RX_EN);
641 }
642 
643 static void xgene_gmac_tx_enable(struct xgene_enet_pdata *pdata)
644 {
645 	u32 data;
646 
647 	xgene_enet_rd_mcx_mac(pdata, MAC_CONFIG_1_ADDR, &data);
648 	xgene_enet_wr_mcx_mac(pdata, MAC_CONFIG_1_ADDR, data | TX_EN);
649 }
650 
651 static void xgene_gmac_rx_disable(struct xgene_enet_pdata *pdata)
652 {
653 	u32 data;
654 
655 	xgene_enet_rd_mcx_mac(pdata, MAC_CONFIG_1_ADDR, &data);
656 	xgene_enet_wr_mcx_mac(pdata, MAC_CONFIG_1_ADDR, data & ~RX_EN);
657 }
658 
659 static void xgene_gmac_tx_disable(struct xgene_enet_pdata *pdata)
660 {
661 	u32 data;
662 
663 	xgene_enet_rd_mcx_mac(pdata, MAC_CONFIG_1_ADDR, &data);
664 	xgene_enet_wr_mcx_mac(pdata, MAC_CONFIG_1_ADDR, data & ~TX_EN);
665 }
666 
667 bool xgene_ring_mgr_init(struct xgene_enet_pdata *p)
668 {
669 	if (!ioread32(p->ring_csr_addr + CLKEN_ADDR))
670 		return false;
671 
672 	if (ioread32(p->ring_csr_addr + SRST_ADDR))
673 		return false;
674 
675 	return true;
676 }
677 
678 static int xgene_enet_reset(struct xgene_enet_pdata *pdata)
679 {
680 	struct device *dev = &pdata->pdev->dev;
681 
682 	if (!xgene_ring_mgr_init(pdata))
683 		return -ENODEV;
684 
685 	if (pdata->mdio_driver) {
686 		xgene_enet_config_ring_if_assoc(pdata);
687 		return 0;
688 	}
689 
690 	if (dev->of_node) {
691 		clk_prepare_enable(pdata->clk);
692 		udelay(5);
693 		clk_disable_unprepare(pdata->clk);
694 		udelay(5);
695 		clk_prepare_enable(pdata->clk);
696 		udelay(5);
697 	} else {
698 #ifdef CONFIG_ACPI
699 		if (acpi_has_method(ACPI_HANDLE(&pdata->pdev->dev), "_RST")) {
700 			acpi_evaluate_object(ACPI_HANDLE(&pdata->pdev->dev),
701 					     "_RST", NULL, NULL);
702 		} else if (acpi_has_method(ACPI_HANDLE(&pdata->pdev->dev),
703 					 "_INI")) {
704 			acpi_evaluate_object(ACPI_HANDLE(&pdata->pdev->dev),
705 					     "_INI", NULL, NULL);
706 		}
707 #endif
708 	}
709 
710 	xgene_enet_ecc_init(pdata);
711 	xgene_enet_config_ring_if_assoc(pdata);
712 
713 	return 0;
714 }
715 
716 static void xgene_enet_clear(struct xgene_enet_pdata *pdata,
717 			     struct xgene_enet_desc_ring *ring)
718 {
719 	u32 addr, data;
720 
721 	if (xgene_enet_is_bufpool(ring->id)) {
722 		addr = ENET_CFGSSQMIFPRESET_ADDR;
723 		data = BIT(xgene_enet_get_fpsel(ring->id));
724 	} else {
725 		addr = ENET_CFGSSQMIWQRESET_ADDR;
726 		data = BIT(xgene_enet_ring_bufnum(ring->id));
727 	}
728 
729 	xgene_enet_wr_ring_if(pdata, addr, data);
730 }
731 
732 static void xgene_gport_shutdown(struct xgene_enet_pdata *pdata)
733 {
734 	struct device *dev = &pdata->pdev->dev;
735 	struct xgene_enet_desc_ring *ring;
736 	u32 pb;
737 	int i;
738 
739 	pb = 0;
740 	for (i = 0; i < pdata->rxq_cnt; i++) {
741 		ring = pdata->rx_ring[i]->buf_pool;
742 		pb |= BIT(xgene_enet_get_fpsel(ring->id));
743 		ring = pdata->rx_ring[i]->page_pool;
744 		if (ring)
745 			pb |= BIT(xgene_enet_get_fpsel(ring->id));
746 
747 	}
748 	xgene_enet_wr_ring_if(pdata, ENET_CFGSSQMIFPRESET_ADDR, pb);
749 
750 	pb = 0;
751 	for (i = 0; i < pdata->txq_cnt; i++) {
752 		ring = pdata->tx_ring[i];
753 		pb |= BIT(xgene_enet_ring_bufnum(ring->id));
754 	}
755 	xgene_enet_wr_ring_if(pdata, ENET_CFGSSQMIWQRESET_ADDR, pb);
756 
757 	if (dev->of_node) {
758 		if (!IS_ERR(pdata->clk))
759 			clk_disable_unprepare(pdata->clk);
760 	}
761 }
762 
763 static u32 xgene_enet_flowctrl_cfg(struct net_device *ndev)
764 {
765 	struct xgene_enet_pdata *pdata = netdev_priv(ndev);
766 	struct phy_device *phydev = ndev->phydev;
767 	u16 lcladv, rmtadv = 0;
768 	u32 rx_pause, tx_pause;
769 	u8 flowctl = 0;
770 
771 	if (!phydev->duplex || !pdata->pause_autoneg)
772 		return 0;
773 
774 	if (pdata->tx_pause)
775 		flowctl |= FLOW_CTRL_TX;
776 
777 	if (pdata->rx_pause)
778 		flowctl |= FLOW_CTRL_RX;
779 
780 	lcladv = mii_advertise_flowctrl(flowctl);
781 
782 	if (phydev->pause)
783 		rmtadv = LPA_PAUSE_CAP;
784 
785 	if (phydev->asym_pause)
786 		rmtadv |= LPA_PAUSE_ASYM;
787 
788 	flowctl = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
789 	tx_pause = !!(flowctl & FLOW_CTRL_TX);
790 	rx_pause = !!(flowctl & FLOW_CTRL_RX);
791 
792 	if (tx_pause != pdata->tx_pause) {
793 		pdata->tx_pause = tx_pause;
794 		pdata->mac_ops->flowctl_tx(pdata, pdata->tx_pause);
795 	}
796 
797 	if (rx_pause != pdata->rx_pause) {
798 		pdata->rx_pause = rx_pause;
799 		pdata->mac_ops->flowctl_rx(pdata, pdata->rx_pause);
800 	}
801 
802 	return 0;
803 }
804 
805 static void xgene_enet_adjust_link(struct net_device *ndev)
806 {
807 	struct xgene_enet_pdata *pdata = netdev_priv(ndev);
808 	const struct xgene_mac_ops *mac_ops = pdata->mac_ops;
809 	struct phy_device *phydev = ndev->phydev;
810 
811 	if (phydev->link) {
812 		if (pdata->phy_speed != phydev->speed) {
813 			pdata->phy_speed = phydev->speed;
814 			mac_ops->set_speed(pdata);
815 			mac_ops->rx_enable(pdata);
816 			mac_ops->tx_enable(pdata);
817 			phy_print_status(phydev);
818 		}
819 
820 		xgene_enet_flowctrl_cfg(ndev);
821 	} else {
822 		mac_ops->rx_disable(pdata);
823 		mac_ops->tx_disable(pdata);
824 		pdata->phy_speed = SPEED_UNKNOWN;
825 		phy_print_status(phydev);
826 	}
827 }
828 
829 #ifdef CONFIG_ACPI
830 static struct acpi_device *acpi_phy_find_device(struct device *dev)
831 {
832 	struct acpi_reference_args args;
833 	struct fwnode_handle *fw_node;
834 	int status;
835 
836 	fw_node = acpi_fwnode_handle(ACPI_COMPANION(dev));
837 	status = acpi_node_get_property_reference(fw_node, "phy-handle", 0,
838 						  &args);
839 	if (ACPI_FAILURE(status)) {
840 		dev_dbg(dev, "No matching phy in ACPI table\n");
841 		return NULL;
842 	}
843 
844 	return args.adev;
845 }
846 #endif
847 
848 int xgene_enet_phy_connect(struct net_device *ndev)
849 {
850 	struct xgene_enet_pdata *pdata = netdev_priv(ndev);
851 	struct device_node *np;
852 	struct phy_device *phy_dev;
853 	struct device *dev = &pdata->pdev->dev;
854 	int i;
855 
856 	if (dev->of_node) {
857 		for (i = 0 ; i < 2; i++) {
858 			np = of_parse_phandle(dev->of_node, "phy-handle", i);
859 			phy_dev = of_phy_connect(ndev, np,
860 						 &xgene_enet_adjust_link,
861 						 0, pdata->phy_mode);
862 			of_node_put(np);
863 			if (phy_dev)
864 				break;
865 		}
866 
867 		if (!phy_dev) {
868 			netdev_err(ndev, "Could not connect to PHY\n");
869 			return -ENODEV;
870 		}
871 	} else {
872 #ifdef CONFIG_ACPI
873 		struct acpi_device *adev = acpi_phy_find_device(dev);
874 		if (adev)
875 			phy_dev = adev->driver_data;
876 		else
877 			phy_dev = NULL;
878 
879 		if (!phy_dev ||
880 		    phy_connect_direct(ndev, phy_dev, &xgene_enet_adjust_link,
881 				       pdata->phy_mode)) {
882 			netdev_err(ndev, "Could not connect to PHY\n");
883 			return  -ENODEV;
884 		}
885 #else
886 		return -ENODEV;
887 #endif
888 	}
889 
890 	pdata->phy_speed = SPEED_UNKNOWN;
891 	phy_dev->supported &= ~SUPPORTED_10baseT_Half &
892 			      ~SUPPORTED_100baseT_Half &
893 			      ~SUPPORTED_1000baseT_Half;
894 	phy_dev->supported |= SUPPORTED_Pause |
895 			      SUPPORTED_Asym_Pause;
896 	phy_dev->advertising = phy_dev->supported;
897 
898 	return 0;
899 }
900 
901 static int xgene_mdiobus_register(struct xgene_enet_pdata *pdata,
902 				  struct mii_bus *mdio)
903 {
904 	struct device *dev = &pdata->pdev->dev;
905 	struct net_device *ndev = pdata->ndev;
906 	struct phy_device *phy;
907 	struct device_node *child_np;
908 	struct device_node *mdio_np = NULL;
909 	u32 phy_addr;
910 	int ret;
911 
912 	if (dev->of_node) {
913 		for_each_child_of_node(dev->of_node, child_np) {
914 			if (of_device_is_compatible(child_np,
915 						    "apm,xgene-mdio")) {
916 				mdio_np = child_np;
917 				break;
918 			}
919 		}
920 
921 		if (!mdio_np) {
922 			netdev_dbg(ndev, "No mdio node in the dts\n");
923 			return -ENXIO;
924 		}
925 
926 		return of_mdiobus_register(mdio, mdio_np);
927 	}
928 
929 	/* Mask out all PHYs from auto probing. */
930 	mdio->phy_mask = ~0;
931 
932 	/* Register the MDIO bus */
933 	ret = mdiobus_register(mdio);
934 	if (ret)
935 		return ret;
936 
937 	ret = device_property_read_u32(dev, "phy-channel", &phy_addr);
938 	if (ret)
939 		ret = device_property_read_u32(dev, "phy-addr", &phy_addr);
940 	if (ret)
941 		return -EINVAL;
942 
943 	phy = xgene_enet_phy_register(mdio, phy_addr);
944 	if (!phy)
945 		return -EIO;
946 
947 	return ret;
948 }
949 
950 int xgene_enet_mdio_config(struct xgene_enet_pdata *pdata)
951 {
952 	struct net_device *ndev = pdata->ndev;
953 	struct mii_bus *mdio_bus;
954 	int ret;
955 
956 	mdio_bus = mdiobus_alloc();
957 	if (!mdio_bus)
958 		return -ENOMEM;
959 
960 	mdio_bus->name = "APM X-Gene MDIO bus";
961 	mdio_bus->read = xgene_mdio_rgmii_read;
962 	mdio_bus->write = xgene_mdio_rgmii_write;
963 	snprintf(mdio_bus->id, MII_BUS_ID_SIZE, "%s-%s", "xgene-mii",
964 		 ndev->name);
965 
966 	mdio_bus->priv = (void __force *)pdata->mcx_mac_addr;
967 	mdio_bus->parent = &pdata->pdev->dev;
968 
969 	ret = xgene_mdiobus_register(pdata, mdio_bus);
970 	if (ret) {
971 		netdev_err(ndev, "Failed to register MDIO bus\n");
972 		mdiobus_free(mdio_bus);
973 		return ret;
974 	}
975 	pdata->mdio_bus = mdio_bus;
976 
977 	ret = xgene_enet_phy_connect(ndev);
978 	if (ret)
979 		xgene_enet_mdio_remove(pdata);
980 
981 	return ret;
982 }
983 
984 void xgene_enet_phy_disconnect(struct xgene_enet_pdata *pdata)
985 {
986 	struct net_device *ndev = pdata->ndev;
987 
988 	if (ndev->phydev)
989 		phy_disconnect(ndev->phydev);
990 }
991 
992 void xgene_enet_mdio_remove(struct xgene_enet_pdata *pdata)
993 {
994 	struct net_device *ndev = pdata->ndev;
995 
996 	if (ndev->phydev)
997 		phy_disconnect(ndev->phydev);
998 
999 	mdiobus_unregister(pdata->mdio_bus);
1000 	mdiobus_free(pdata->mdio_bus);
1001 	pdata->mdio_bus = NULL;
1002 }
1003 
1004 const struct xgene_mac_ops xgene_gmac_ops = {
1005 	.init = xgene_gmac_init,
1006 	.reset = xgene_gmac_reset,
1007 	.rx_enable = xgene_gmac_rx_enable,
1008 	.tx_enable = xgene_gmac_tx_enable,
1009 	.rx_disable = xgene_gmac_rx_disable,
1010 	.tx_disable = xgene_gmac_tx_disable,
1011 	.set_speed = xgene_gmac_set_speed,
1012 	.set_mac_addr = xgene_gmac_set_mac_addr,
1013 	.set_framesize = xgene_enet_set_frame_size,
1014 	.enable_tx_pause = xgene_gmac_enable_tx_pause,
1015 	.flowctl_tx     = xgene_gmac_flowctl_tx,
1016 	.flowctl_rx     = xgene_gmac_flowctl_rx,
1017 };
1018 
1019 const struct xgene_port_ops xgene_gport_ops = {
1020 	.reset = xgene_enet_reset,
1021 	.clear = xgene_enet_clear,
1022 	.cle_bypass = xgene_enet_cle_bypass,
1023 	.shutdown = xgene_gport_shutdown,
1024 };
1025 
1026 struct xgene_ring_ops xgene_ring1_ops = {
1027 	.num_ring_config = NUM_RING_CONFIG,
1028 	.num_ring_id_shift = 6,
1029 	.setup = xgene_enet_setup_ring,
1030 	.clear = xgene_enet_clear_ring,
1031 	.wr_cmd = xgene_enet_wr_cmd,
1032 	.len = xgene_enet_ring_len,
1033 };
1034