xref: /linux/drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c (revision bfd5bb6f90af092aa345b15cd78143956a13c2a8)
1 /* Applied Micro X-Gene SoC Ethernet Driver
2  *
3  * Copyright (c) 2014, Applied Micro Circuits Corporation
4  * Authors: Iyappan Subramanian <isubramanian@apm.com>
5  *	    Keyur Chudgar <kchudgar@apm.com>
6  *
7  * This program is free software; you can redistribute  it and/or modify it
8  * under  the terms of  the GNU General  Public License as published by the
9  * Free Software Foundation;  either version 2 of the  License, or (at your
10  * option) any later version.
11  *
12  * This program is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15  * GNU General Public License for more details.
16  *
17  * You should have received a copy of the GNU General Public License
18  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
19  */
20 
21 #include "xgene_enet_main.h"
22 #include "xgene_enet_hw.h"
23 #include "xgene_enet_sgmac.h"
24 #include "xgene_enet_xgmac.h"
25 
26 static void xgene_enet_wr_csr(struct xgene_enet_pdata *p, u32 offset, u32 val)
27 {
28 	iowrite32(val, p->eth_csr_addr + offset);
29 }
30 
31 static void xgene_enet_wr_clkrst_csr(struct xgene_enet_pdata *p, u32 offset,
32 				     u32 val)
33 {
34 	iowrite32(val, p->base_addr + offset);
35 }
36 
37 static void xgene_enet_wr_ring_if(struct xgene_enet_pdata *p,
38 				  u32 offset, u32 val)
39 {
40 	iowrite32(val, p->eth_ring_if_addr + offset);
41 }
42 
43 static void xgene_enet_wr_diag_csr(struct xgene_enet_pdata *p,
44 				   u32 offset, u32 val)
45 {
46 	iowrite32(val, p->eth_diag_csr_addr + offset);
47 }
48 
49 static void xgene_enet_wr_mcx_csr(struct xgene_enet_pdata *pdata,
50 				  u32 offset, u32 val)
51 {
52 	void __iomem *addr = pdata->mcx_mac_csr_addr + offset;
53 
54 	iowrite32(val, addr);
55 }
56 
57 static u32 xgene_enet_rd_csr(struct xgene_enet_pdata *p, u32 offset)
58 {
59 	return ioread32(p->eth_csr_addr + offset);
60 }
61 
62 static u32 xgene_enet_rd_diag_csr(struct xgene_enet_pdata *p, u32 offset)
63 {
64 	return ioread32(p->eth_diag_csr_addr + offset);
65 }
66 
67 static u32 xgene_enet_rd_mcx_csr(struct xgene_enet_pdata *p, u32 offset)
68 {
69 	return ioread32(p->mcx_mac_csr_addr + offset);
70 }
71 
72 static int xgene_enet_ecc_init(struct xgene_enet_pdata *p)
73 {
74 	struct net_device *ndev = p->ndev;
75 	u32 data, shutdown;
76 	int i = 0;
77 
78 	shutdown = xgene_enet_rd_diag_csr(p, ENET_CFG_MEM_RAM_SHUTDOWN_ADDR);
79 	data = xgene_enet_rd_diag_csr(p, ENET_BLOCK_MEM_RDY_ADDR);
80 
81 	if (!shutdown && data == ~0U) {
82 		netdev_dbg(ndev, "+ ecc_init done, skipping\n");
83 		return 0;
84 	}
85 
86 	xgene_enet_wr_diag_csr(p, ENET_CFG_MEM_RAM_SHUTDOWN_ADDR, 0);
87 	do {
88 		usleep_range(100, 110);
89 		data = xgene_enet_rd_diag_csr(p, ENET_BLOCK_MEM_RDY_ADDR);
90 		if (data == ~0U)
91 			return 0;
92 	} while (++i < 10);
93 
94 	netdev_err(ndev, "Failed to release memory from shutdown\n");
95 	return -ENODEV;
96 }
97 
98 static void xgene_sgmac_get_drop_cnt(struct xgene_enet_pdata *pdata,
99 				     u32 *rx, u32 *tx)
100 {
101 	u32 addr, count;
102 
103 	addr = (pdata->enet_id != XGENE_ENET1) ?
104 		XG_MCX_ICM_ECM_DROP_COUNT_REG0_ADDR :
105 		ICM_ECM_DROP_COUNT_REG0_ADDR + pdata->port_id * OFFSET_4;
106 	count = xgene_enet_rd_mcx_csr(pdata, addr);
107 	*rx = ICM_DROP_COUNT(count);
108 	*tx = ECM_DROP_COUNT(count);
109 	/* Errata: 10GE_4 - ICM_ECM_DROP_COUNT not clear-on-read */
110 	addr = (pdata->enet_id != XGENE_ENET1) ?
111 		XG_MCX_ECM_CONFIG0_REG_0_ADDR :
112 		ECM_CONFIG0_REG_0_ADDR + pdata->port_id * OFFSET_4;
113 	xgene_enet_rd_mcx_csr(pdata, addr);
114 }
115 
116 static void xgene_enet_config_ring_if_assoc(struct xgene_enet_pdata *p)
117 {
118 	u32 val;
119 
120 	val = (p->enet_id == XGENE_ENET1) ? 0xffffffff : 0;
121 	xgene_enet_wr_ring_if(p, ENET_CFGSSQMIWQASSOC_ADDR, val);
122 	xgene_enet_wr_ring_if(p, ENET_CFGSSQMIFPQASSOC_ADDR, val);
123 }
124 
125 static void xgene_mii_phy_write(struct xgene_enet_pdata *p, u8 phy_id,
126 				u32 reg, u16 data)
127 {
128 	u32 addr, wr_data, done;
129 	int i;
130 
131 	addr = PHY_ADDR(phy_id) | REG_ADDR(reg);
132 	xgene_enet_wr_mac(p, MII_MGMT_ADDRESS_ADDR, addr);
133 
134 	wr_data = PHY_CONTROL(data);
135 	xgene_enet_wr_mac(p, MII_MGMT_CONTROL_ADDR, wr_data);
136 
137 	for (i = 0; i < 10; i++) {
138 		done = xgene_enet_rd_mac(p, MII_MGMT_INDICATORS_ADDR);
139 		if (!(done & BUSY_MASK))
140 			return;
141 		usleep_range(10, 20);
142 	}
143 
144 	netdev_err(p->ndev, "MII_MGMT write failed\n");
145 }
146 
147 static u32 xgene_mii_phy_read(struct xgene_enet_pdata *p, u8 phy_id, u32 reg)
148 {
149 	u32 addr, data, done;
150 	int i;
151 
152 	addr = PHY_ADDR(phy_id) | REG_ADDR(reg);
153 	xgene_enet_wr_mac(p, MII_MGMT_ADDRESS_ADDR, addr);
154 	xgene_enet_wr_mac(p, MII_MGMT_COMMAND_ADDR, READ_CYCLE_MASK);
155 
156 	for (i = 0; i < 10; i++) {
157 		done = xgene_enet_rd_mac(p, MII_MGMT_INDICATORS_ADDR);
158 		if (!(done & BUSY_MASK)) {
159 			data = xgene_enet_rd_mac(p, MII_MGMT_STATUS_ADDR);
160 			xgene_enet_wr_mac(p, MII_MGMT_COMMAND_ADDR, 0);
161 
162 			return data;
163 		}
164 		usleep_range(10, 20);
165 	}
166 
167 	netdev_err(p->ndev, "MII_MGMT read failed\n");
168 
169 	return 0;
170 }
171 
172 static void xgene_sgmac_reset(struct xgene_enet_pdata *p)
173 {
174 	xgene_enet_wr_mac(p, MAC_CONFIG_1_ADDR, SOFT_RESET1);
175 	xgene_enet_wr_mac(p, MAC_CONFIG_1_ADDR, 0);
176 }
177 
178 static void xgene_sgmac_set_mac_addr(struct xgene_enet_pdata *p)
179 {
180 	u32 addr0, addr1;
181 	u8 *dev_addr = p->ndev->dev_addr;
182 
183 	addr0 = (dev_addr[3] << 24) | (dev_addr[2] << 16) |
184 		(dev_addr[1] << 8) | dev_addr[0];
185 	xgene_enet_wr_mac(p, STATION_ADDR0_ADDR, addr0);
186 
187 	addr1 = xgene_enet_rd_mac(p, STATION_ADDR1_ADDR);
188 	addr1 |= (dev_addr[5] << 24) | (dev_addr[4] << 16);
189 	xgene_enet_wr_mac(p, STATION_ADDR1_ADDR, addr1);
190 }
191 
192 static u32 xgene_enet_link_status(struct xgene_enet_pdata *p)
193 {
194 	u32 data;
195 
196 	data = xgene_mii_phy_read(p, INT_PHY_ADDR,
197 				  SGMII_BASE_PAGE_ABILITY_ADDR >> 2);
198 
199 	if (LINK_SPEED(data) == PHY_SPEED_1000)
200 		p->phy_speed = SPEED_1000;
201 	else if (LINK_SPEED(data) == PHY_SPEED_100)
202 		p->phy_speed = SPEED_100;
203 	else
204 		p->phy_speed = SPEED_10;
205 
206 	return data & LINK_UP;
207 }
208 
209 static void xgene_sgmii_configure(struct xgene_enet_pdata *p)
210 {
211 	xgene_mii_phy_write(p, INT_PHY_ADDR, SGMII_TBI_CONTROL_ADDR >> 2,
212 			    0x8000);
213 	xgene_mii_phy_write(p, INT_PHY_ADDR, SGMII_CONTROL_ADDR >> 2, 0x9000);
214 	xgene_mii_phy_write(p, INT_PHY_ADDR, SGMII_TBI_CONTROL_ADDR >> 2, 0);
215 }
216 
217 static void xgene_sgmii_tbi_control_reset(struct xgene_enet_pdata *p)
218 {
219 	xgene_mii_phy_write(p, INT_PHY_ADDR, SGMII_TBI_CONTROL_ADDR >> 2,
220 			    0x8000);
221 	xgene_mii_phy_write(p, INT_PHY_ADDR, SGMII_TBI_CONTROL_ADDR >> 2, 0);
222 }
223 
224 static void xgene_sgmii_reset(struct xgene_enet_pdata *p)
225 {
226 	u32 value;
227 
228 	if (p->phy_speed == SPEED_UNKNOWN)
229 		return;
230 
231 	value = xgene_mii_phy_read(p, INT_PHY_ADDR,
232 				   SGMII_BASE_PAGE_ABILITY_ADDR >> 2);
233 	if (!(value & LINK_UP))
234 		xgene_sgmii_tbi_control_reset(p);
235 }
236 
237 static void xgene_sgmac_set_speed(struct xgene_enet_pdata *p)
238 {
239 	u32 icm0_addr, icm2_addr, debug_addr;
240 	u32 icm0, icm2, intf_ctl;
241 	u32 mc2, value;
242 
243 	xgene_sgmii_reset(p);
244 
245 	if (p->enet_id == XGENE_ENET1) {
246 		icm0_addr = ICM_CONFIG0_REG_0_ADDR + p->port_id * OFFSET_8;
247 		icm2_addr = ICM_CONFIG2_REG_0_ADDR + p->port_id * OFFSET_4;
248 		debug_addr = DEBUG_REG_ADDR;
249 	} else {
250 		icm0_addr = XG_MCX_ICM_CONFIG0_REG_0_ADDR;
251 		icm2_addr = XG_MCX_ICM_CONFIG2_REG_0_ADDR;
252 		debug_addr = XG_DEBUG_REG_ADDR;
253 	}
254 
255 	icm0 = xgene_enet_rd_mcx_csr(p, icm0_addr);
256 	icm2 = xgene_enet_rd_mcx_csr(p, icm2_addr);
257 	mc2 = xgene_enet_rd_mac(p, MAC_CONFIG_2_ADDR);
258 	intf_ctl = xgene_enet_rd_mac(p, INTERFACE_CONTROL_ADDR);
259 
260 	switch (p->phy_speed) {
261 	case SPEED_10:
262 		ENET_INTERFACE_MODE2_SET(&mc2, 1);
263 		intf_ctl &= ~(ENET_LHD_MODE | ENET_GHD_MODE);
264 		CFG_MACMODE_SET(&icm0, 0);
265 		CFG_WAITASYNCRD_SET(&icm2, 500);
266 		break;
267 	case SPEED_100:
268 		ENET_INTERFACE_MODE2_SET(&mc2, 1);
269 		intf_ctl &= ~ENET_GHD_MODE;
270 		intf_ctl |= ENET_LHD_MODE;
271 		CFG_MACMODE_SET(&icm0, 1);
272 		CFG_WAITASYNCRD_SET(&icm2, 80);
273 		break;
274 	default:
275 		ENET_INTERFACE_MODE2_SET(&mc2, 2);
276 		intf_ctl &= ~ENET_LHD_MODE;
277 		intf_ctl |= ENET_GHD_MODE;
278 		CFG_MACMODE_SET(&icm0, 2);
279 		CFG_WAITASYNCRD_SET(&icm2, 16);
280 		value = xgene_enet_rd_csr(p, debug_addr);
281 		value |= CFG_BYPASS_UNISEC_TX | CFG_BYPASS_UNISEC_RX;
282 		xgene_enet_wr_csr(p, debug_addr, value);
283 		break;
284 	}
285 
286 	mc2 |= FULL_DUPLEX2 | PAD_CRC;
287 	xgene_enet_wr_mac(p, MAC_CONFIG_2_ADDR, mc2);
288 	xgene_enet_wr_mac(p, INTERFACE_CONTROL_ADDR, intf_ctl);
289 	xgene_enet_wr_mcx_csr(p, icm0_addr, icm0);
290 	xgene_enet_wr_mcx_csr(p, icm2_addr, icm2);
291 }
292 
293 static void xgene_sgmac_set_frame_size(struct xgene_enet_pdata *pdata, int size)
294 {
295 	xgene_enet_wr_mac(pdata, MAX_FRAME_LEN_ADDR, size);
296 }
297 
298 static void xgene_sgmii_enable_autoneg(struct xgene_enet_pdata *p)
299 {
300 	u32 data, loop = 10;
301 
302 	xgene_sgmii_configure(p);
303 
304 	while (loop--) {
305 		data = xgene_mii_phy_read(p, INT_PHY_ADDR,
306 					  SGMII_STATUS_ADDR >> 2);
307 		if ((data & AUTO_NEG_COMPLETE) && (data & LINK_STATUS))
308 			break;
309 		usleep_range(1000, 2000);
310 	}
311 	if (!(data & AUTO_NEG_COMPLETE) || !(data & LINK_STATUS))
312 		netdev_err(p->ndev, "Auto-negotiation failed\n");
313 }
314 
315 static void xgene_sgmac_rxtx(struct xgene_enet_pdata *p, u32 bits, bool set)
316 {
317 	u32 data;
318 
319 	data = xgene_enet_rd_mac(p, MAC_CONFIG_1_ADDR);
320 
321 	if (set)
322 		data |= bits;
323 	else
324 		data &= ~bits;
325 
326 	xgene_enet_wr_mac(p, MAC_CONFIG_1_ADDR, data);
327 }
328 
329 static void xgene_sgmac_flowctl_tx(struct xgene_enet_pdata *p, bool enable)
330 {
331 	xgene_sgmac_rxtx(p, TX_FLOW_EN, enable);
332 
333 	p->mac_ops->enable_tx_pause(p, enable);
334 }
335 
336 static void xgene_sgmac_flowctl_rx(struct xgene_enet_pdata *pdata, bool enable)
337 {
338 	xgene_sgmac_rxtx(pdata, RX_FLOW_EN, enable);
339 }
340 
341 static void xgene_sgmac_init(struct xgene_enet_pdata *p)
342 {
343 	u32 pause_thres_reg, pause_off_thres_reg;
344 	u32 enet_spare_cfg_reg, rsif_config_reg;
345 	u32 cfg_bypass_reg, rx_dv_gate_reg;
346 	u32 data, data1, data2, offset;
347 	u32 multi_dpf_reg;
348 
349 	if (!(p->enet_id == XGENE_ENET2 && p->mdio_driver))
350 		xgene_sgmac_reset(p);
351 
352 	xgene_sgmii_enable_autoneg(p);
353 	xgene_sgmac_set_speed(p);
354 	xgene_sgmac_set_mac_addr(p);
355 
356 	if (p->enet_id == XGENE_ENET1) {
357 		enet_spare_cfg_reg = ENET_SPARE_CFG_REG_ADDR;
358 		rsif_config_reg = RSIF_CONFIG_REG_ADDR;
359 		cfg_bypass_reg = CFG_BYPASS_ADDR;
360 		offset = p->port_id * OFFSET_4;
361 		rx_dv_gate_reg = SG_RX_DV_GATE_REG_0_ADDR + offset;
362 	} else {
363 		enet_spare_cfg_reg = XG_ENET_SPARE_CFG_REG_ADDR;
364 		rsif_config_reg = XG_RSIF_CONFIG_REG_ADDR;
365 		cfg_bypass_reg = XG_CFG_BYPASS_ADDR;
366 		rx_dv_gate_reg = XG_MCX_RX_DV_GATE_REG_0_ADDR;
367 	}
368 
369 	data = xgene_enet_rd_csr(p, enet_spare_cfg_reg);
370 	data |= MPA_IDLE_WITH_QMI_EMPTY;
371 	xgene_enet_wr_csr(p, enet_spare_cfg_reg, data);
372 
373 	/* Adjust MDC clock frequency */
374 	data = xgene_enet_rd_mac(p, MII_MGMT_CONFIG_ADDR);
375 	MGMT_CLOCK_SEL_SET(&data, 7);
376 	xgene_enet_wr_mac(p, MII_MGMT_CONFIG_ADDR, data);
377 
378 	/* Enable drop if bufpool not available */
379 	data = xgene_enet_rd_csr(p, rsif_config_reg);
380 	data |= CFG_RSIF_FPBUFF_TIMEOUT_EN;
381 	xgene_enet_wr_csr(p, rsif_config_reg, data);
382 
383 	/* Configure HW pause frame generation */
384 	multi_dpf_reg = (p->enet_id == XGENE_ENET1) ? CSR_MULTI_DPF0_ADDR :
385 			 XG_MCX_MULTI_DPF0_ADDR;
386 	data = xgene_enet_rd_mcx_csr(p, multi_dpf_reg);
387 	data = (DEF_QUANTA << 16) | (data & 0xffff);
388 	xgene_enet_wr_mcx_csr(p, multi_dpf_reg, data);
389 
390 	if (p->enet_id != XGENE_ENET1) {
391 		data = xgene_enet_rd_mcx_csr(p, XG_MCX_MULTI_DPF1_ADDR);
392 		data =  (NORM_PAUSE_OPCODE << 16) | (data & 0xFFFF);
393 		xgene_enet_wr_mcx_csr(p, XG_MCX_MULTI_DPF1_ADDR, data);
394 	}
395 
396 	pause_thres_reg = (p->enet_id == XGENE_ENET1) ? RXBUF_PAUSE_THRESH :
397 			   XG_RXBUF_PAUSE_THRESH;
398 	pause_off_thres_reg = (p->enet_id == XGENE_ENET1) ?
399 			       RXBUF_PAUSE_OFF_THRESH : 0;
400 
401 	if (p->enet_id == XGENE_ENET1) {
402 		data1 = xgene_enet_rd_csr(p, pause_thres_reg);
403 		data2 = xgene_enet_rd_csr(p, pause_off_thres_reg);
404 
405 		if (!(p->port_id % 2)) {
406 			data1 = (data1 & 0xffff0000) | DEF_PAUSE_THRES;
407 			data2 = (data2 & 0xffff0000) | DEF_PAUSE_OFF_THRES;
408 		} else {
409 			data1 = (data1 & 0xffff) | (DEF_PAUSE_THRES << 16);
410 			data2 = (data2 & 0xffff) | (DEF_PAUSE_OFF_THRES << 16);
411 		}
412 
413 		xgene_enet_wr_csr(p, pause_thres_reg, data1);
414 		xgene_enet_wr_csr(p, pause_off_thres_reg, data2);
415 	} else {
416 		data = (DEF_PAUSE_OFF_THRES << 16) | DEF_PAUSE_THRES;
417 		xgene_enet_wr_csr(p, pause_thres_reg, data);
418 	}
419 
420 	xgene_sgmac_flowctl_tx(p, p->tx_pause);
421 	xgene_sgmac_flowctl_rx(p, p->rx_pause);
422 
423 	/* Bypass traffic gating */
424 	xgene_enet_wr_csr(p, XG_ENET_SPARE_CFG_REG_1_ADDR, 0x84);
425 	xgene_enet_wr_csr(p, cfg_bypass_reg, RESUME_TX);
426 	xgene_enet_wr_mcx_csr(p, rx_dv_gate_reg, RESUME_RX0);
427 }
428 
429 static void xgene_sgmac_rx_enable(struct xgene_enet_pdata *p)
430 {
431 	xgene_sgmac_rxtx(p, RX_EN, true);
432 }
433 
434 static void xgene_sgmac_tx_enable(struct xgene_enet_pdata *p)
435 {
436 	xgene_sgmac_rxtx(p, TX_EN, true);
437 }
438 
439 static void xgene_sgmac_rx_disable(struct xgene_enet_pdata *p)
440 {
441 	xgene_sgmac_rxtx(p, RX_EN, false);
442 }
443 
444 static void xgene_sgmac_tx_disable(struct xgene_enet_pdata *p)
445 {
446 	xgene_sgmac_rxtx(p, TX_EN, false);
447 }
448 
449 static int xgene_enet_reset(struct xgene_enet_pdata *p)
450 {
451 	struct device *dev = &p->pdev->dev;
452 
453 	if (!xgene_ring_mgr_init(p))
454 		return -ENODEV;
455 
456 	if (p->mdio_driver && p->enet_id == XGENE_ENET2) {
457 		xgene_enet_config_ring_if_assoc(p);
458 		return 0;
459 	}
460 
461 	if (p->enet_id == XGENE_ENET2)
462 		xgene_enet_wr_clkrst_csr(p, XGENET_CONFIG_REG_ADDR, SGMII_EN);
463 
464 	if (dev->of_node) {
465 		if (!IS_ERR(p->clk)) {
466 			clk_prepare_enable(p->clk);
467 			udelay(5);
468 			clk_disable_unprepare(p->clk);
469 			udelay(5);
470 			clk_prepare_enable(p->clk);
471 			udelay(5);
472 		}
473 	} else {
474 #ifdef CONFIG_ACPI
475 		if (acpi_has_method(ACPI_HANDLE(&p->pdev->dev), "_RST"))
476 			acpi_evaluate_object(ACPI_HANDLE(&p->pdev->dev),
477 					     "_RST", NULL, NULL);
478 		else if (acpi_has_method(ACPI_HANDLE(&p->pdev->dev), "_INI"))
479 			acpi_evaluate_object(ACPI_HANDLE(&p->pdev->dev),
480 					     "_INI", NULL, NULL);
481 #endif
482 	}
483 
484 	if (!p->port_id) {
485 		xgene_enet_ecc_init(p);
486 		xgene_enet_config_ring_if_assoc(p);
487 	}
488 
489 	return 0;
490 }
491 
492 static void xgene_enet_cle_bypass(struct xgene_enet_pdata *p,
493 				  u32 dst_ring_num, u16 bufpool_id,
494 				  u16 nxtbufpool_id)
495 {
496 	u32 cle_bypass_reg0, cle_bypass_reg1;
497 	u32 offset = p->port_id * MAC_OFFSET;
498 	u32 data, fpsel, nxtfpsel;
499 
500 	if (p->enet_id == XGENE_ENET1) {
501 		cle_bypass_reg0 = CLE_BYPASS_REG0_0_ADDR;
502 		cle_bypass_reg1 = CLE_BYPASS_REG1_0_ADDR;
503 	} else {
504 		cle_bypass_reg0 = XCLE_BYPASS_REG0_ADDR;
505 		cle_bypass_reg1 = XCLE_BYPASS_REG1_ADDR;
506 	}
507 
508 	data = CFG_CLE_BYPASS_EN0;
509 	xgene_enet_wr_csr(p, cle_bypass_reg0 + offset, data);
510 
511 	fpsel = xgene_enet_get_fpsel(bufpool_id);
512 	nxtfpsel = xgene_enet_get_fpsel(nxtbufpool_id);
513 	data = CFG_CLE_DSTQID0(dst_ring_num) | CFG_CLE_FPSEL0(fpsel) |
514 	       CFG_CLE_NXTFPSEL0(nxtfpsel);
515 	xgene_enet_wr_csr(p, cle_bypass_reg1 + offset, data);
516 }
517 
518 static void xgene_enet_clear(struct xgene_enet_pdata *pdata,
519 			     struct xgene_enet_desc_ring *ring)
520 {
521 	u32 addr, data;
522 
523 	if (xgene_enet_is_bufpool(ring->id)) {
524 		addr = ENET_CFGSSQMIFPRESET_ADDR;
525 		data = BIT(xgene_enet_get_fpsel(ring->id));
526 	} else {
527 		addr = ENET_CFGSSQMIWQRESET_ADDR;
528 		data = BIT(xgene_enet_ring_bufnum(ring->id));
529 	}
530 
531 	xgene_enet_wr_ring_if(pdata, addr, data);
532 }
533 
534 static void xgene_enet_shutdown(struct xgene_enet_pdata *p)
535 {
536 	struct device *dev = &p->pdev->dev;
537 
538 	if (dev->of_node) {
539 		if (!IS_ERR(p->clk))
540 			clk_disable_unprepare(p->clk);
541 	}
542 }
543 
544 static void xgene_enet_link_state(struct work_struct *work)
545 {
546 	struct xgene_enet_pdata *p = container_of(to_delayed_work(work),
547 				     struct xgene_enet_pdata, link_work);
548 	struct net_device *ndev = p->ndev;
549 	u32 link, poll_interval;
550 
551 	link = xgene_enet_link_status(p);
552 	if (link) {
553 		if (!netif_carrier_ok(ndev)) {
554 			netif_carrier_on(ndev);
555 			xgene_sgmac_set_speed(p);
556 			xgene_sgmac_rx_enable(p);
557 			xgene_sgmac_tx_enable(p);
558 			netdev_info(ndev, "Link is Up - %dMbps\n",
559 				    p->phy_speed);
560 		}
561 		poll_interval = PHY_POLL_LINK_ON;
562 	} else {
563 		if (netif_carrier_ok(ndev)) {
564 			xgene_sgmac_rx_disable(p);
565 			xgene_sgmac_tx_disable(p);
566 			netif_carrier_off(ndev);
567 			netdev_info(ndev, "Link is Down\n");
568 		}
569 		poll_interval = PHY_POLL_LINK_OFF;
570 	}
571 
572 	schedule_delayed_work(&p->link_work, poll_interval);
573 }
574 
575 static void xgene_sgmac_enable_tx_pause(struct xgene_enet_pdata *p, bool enable)
576 {
577 	u32 data, ecm_cfg_addr;
578 
579 	if (p->enet_id == XGENE_ENET1) {
580 		ecm_cfg_addr = (!(p->port_id % 2)) ? CSR_ECM_CFG_0_ADDR :
581 				CSR_ECM_CFG_1_ADDR;
582 	} else {
583 		ecm_cfg_addr = XG_MCX_ECM_CFG_0_ADDR;
584 	}
585 
586 	data = xgene_enet_rd_mcx_csr(p, ecm_cfg_addr);
587 	if (enable)
588 		data |= MULTI_DPF_AUTOCTRL | PAUSE_XON_EN;
589 	else
590 		data &= ~(MULTI_DPF_AUTOCTRL | PAUSE_XON_EN);
591 	xgene_enet_wr_mcx_csr(p, ecm_cfg_addr, data);
592 }
593 
594 const struct xgene_mac_ops xgene_sgmac_ops = {
595 	.init		= xgene_sgmac_init,
596 	.reset		= xgene_sgmac_reset,
597 	.rx_enable	= xgene_sgmac_rx_enable,
598 	.tx_enable	= xgene_sgmac_tx_enable,
599 	.rx_disable	= xgene_sgmac_rx_disable,
600 	.tx_disable	= xgene_sgmac_tx_disable,
601 	.get_drop_cnt   = xgene_sgmac_get_drop_cnt,
602 	.set_speed	= xgene_sgmac_set_speed,
603 	.set_mac_addr	= xgene_sgmac_set_mac_addr,
604 	.set_framesize  = xgene_sgmac_set_frame_size,
605 	.link_state	= xgene_enet_link_state,
606 	.enable_tx_pause = xgene_sgmac_enable_tx_pause,
607 	.flowctl_tx     = xgene_sgmac_flowctl_tx,
608 	.flowctl_rx     = xgene_sgmac_flowctl_rx
609 };
610 
611 const struct xgene_port_ops xgene_sgport_ops = {
612 	.reset		= xgene_enet_reset,
613 	.clear		= xgene_enet_clear,
614 	.cle_bypass	= xgene_enet_cle_bypass,
615 	.shutdown	= xgene_enet_shutdown
616 };
617