xref: /linux/drivers/net/ethernet/cadence/macb_main.c (revision 8341c989ac77d712c7d6e2bce29e8a4bcb2eeae4)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Cadence MACB/GEM Ethernet Controller driver
4  *
5  * Copyright (C) 2004-2006 Atmel Corporation
6  */
7 
8 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
9 #include <linux/circ_buf.h>
10 #include <linux/clk-provider.h>
11 #include <linux/clk.h>
12 #include <linux/crc32.h>
13 #include <linux/dma-mapping.h>
14 #include <linux/etherdevice.h>
15 #include <linux/firmware/xlnx-zynqmp.h>
16 #include <linux/inetdevice.h>
17 #include <linux/init.h>
18 #include <linux/interrupt.h>
19 #include <linux/io.h>
20 #include <linux/iopoll.h>
21 #include <linux/ip.h>
22 #include <linux/kernel.h>
23 #include <linux/module.h>
24 #include <linux/moduleparam.h>
25 #include <linux/netdevice.h>
26 #include <linux/of.h>
27 #include <linux/of_mdio.h>
28 #include <linux/of_net.h>
29 #include <linux/phy/phy.h>
30 #include <linux/phylink.h>
31 #include <linux/platform_device.h>
32 #include <linux/pm_runtime.h>
33 #include <linux/ptp_classify.h>
34 #include <linux/reset.h>
35 #include <linux/slab.h>
36 #include <linux/tcp.h>
37 #include <linux/types.h>
38 #include <linux/udp.h>
39 #include <net/pkt_sched.h>
40 #include "macb.h"
41 
42 /* This structure is only used for MACB on SiFive FU540 devices */
43 struct sifive_fu540_macb_mgmt {
44 	void __iomem *reg;
45 	unsigned long rate;
46 	struct clk_hw hw;
47 };
48 
49 #define MACB_RX_BUFFER_SIZE	128
50 #define RX_BUFFER_MULTIPLE	64  /* bytes */
51 
52 #define DEFAULT_RX_RING_SIZE	512 /* must be power of 2 */
53 #define MIN_RX_RING_SIZE	64
54 #define MAX_RX_RING_SIZE	8192
55 
56 #define DEFAULT_TX_RING_SIZE	512 /* must be power of 2 */
57 #define MIN_TX_RING_SIZE	64
58 #define MAX_TX_RING_SIZE	4096
59 
60 /* level of occupied TX descriptors under which we wake up TX process */
61 #define MACB_TX_WAKEUP_THRESH(bp)	(3 * (bp)->tx_ring_size / 4)
62 
63 #define MACB_RX_INT_FLAGS	(MACB_BIT(RCOMP) | MACB_BIT(ISR_ROVR))
64 #define MACB_TX_ERR_FLAGS	(MACB_BIT(ISR_TUND)			\
65 					| MACB_BIT(ISR_RLE)		\
66 					| MACB_BIT(TXERR))
67 #define MACB_TX_INT_FLAGS	(MACB_TX_ERR_FLAGS | MACB_BIT(TCOMP)	\
68 					| MACB_BIT(TXUBR))
69 
70 /* Max length of transmit frame must be a multiple of 8 bytes */
71 #define MACB_TX_LEN_ALIGN	8
72 #define MACB_MAX_TX_LEN		((unsigned int)((1 << MACB_TX_FRMLEN_SIZE) - 1) & ~((unsigned int)(MACB_TX_LEN_ALIGN - 1)))
73 /* Limit maximum TX length as per Cadence TSO errata. This is to avoid a
74  * false amba_error in TX path from the DMA assuming there is not enough
75  * space in the SRAM (16KB) even when there is.
76  */
77 #define GEM_MAX_TX_LEN		(unsigned int)(0x3FC0)
78 
79 #define GEM_MTU_MIN_SIZE	ETH_MIN_MTU
80 #define MACB_NETIF_LSO		NETIF_F_TSO
81 
82 #define MACB_WOL_ENABLED		BIT(0)
83 
84 #define HS_SPEED_10000M			4
85 #define MACB_SERDES_RATE_10G		1
86 
87 /* Graceful stop timeouts in us. We should allow up to
88  * 1 frame time (10 Mbits/s, full-duplex, ignoring collisions)
89  */
90 #define MACB_HALT_TIMEOUT	14000
91 #define MACB_PM_TIMEOUT  100 /* ms */
92 
93 #define MACB_MDIO_TIMEOUT	1000000 /* in usecs */
94 
95 /* DMA buffer descriptor might be different size
96  * depends on hardware configuration:
97  *
98  * 1. dma address width 32 bits:
99  *    word 1: 32 bit address of Data Buffer
100  *    word 2: control
101  *
102  * 2. dma address width 64 bits:
103  *    word 1: 32 bit address of Data Buffer
104  *    word 2: control
105  *    word 3: upper 32 bit address of Data Buffer
106  *    word 4: unused
107  *
108  * 3. dma address width 32 bits with hardware timestamping:
109  *    word 1: 32 bit address of Data Buffer
110  *    word 2: control
111  *    word 3: timestamp word 1
112  *    word 4: timestamp word 2
113  *
114  * 4. dma address width 64 bits with hardware timestamping:
115  *    word 1: 32 bit address of Data Buffer
116  *    word 2: control
117  *    word 3: upper 32 bit address of Data Buffer
118  *    word 4: unused
119  *    word 5: timestamp word 1
120  *    word 6: timestamp word 2
121  */
122 static unsigned int macb_dma_desc_get_size(struct macb *bp)
123 {
124 	unsigned int desc_size = sizeof(struct macb_dma_desc);
125 
126 	if (macb_dma64(bp))
127 		desc_size += sizeof(struct macb_dma_desc_64);
128 	if (macb_dma_ptp(bp))
129 		desc_size += sizeof(struct macb_dma_desc_ptp);
130 
131 	return desc_size;
132 }
133 
134 static unsigned int macb_adj_dma_desc_idx(struct macb *bp, unsigned int desc_idx)
135 {
136 	return desc_idx * (1 + macb_dma64(bp) + macb_dma_ptp(bp));
137 }
138 
139 static struct macb_dma_desc_64 *macb_64b_desc(struct macb *bp, struct macb_dma_desc *desc)
140 {
141 	return (struct macb_dma_desc_64 *)((void *)desc
142 		+ sizeof(struct macb_dma_desc));
143 }
144 
145 /* Ring buffer accessors */
146 static unsigned int macb_tx_ring_wrap(struct macb *bp, unsigned int index)
147 {
148 	return index & (bp->tx_ring_size - 1);
149 }
150 
151 static struct macb_dma_desc *macb_tx_desc(struct macb_queue *queue,
152 					  unsigned int index)
153 {
154 	index = macb_tx_ring_wrap(queue->bp, index);
155 	index = macb_adj_dma_desc_idx(queue->bp, index);
156 	return &queue->tx_ring[index];
157 }
158 
159 static struct macb_tx_skb *macb_tx_skb(struct macb_queue *queue,
160 				       unsigned int index)
161 {
162 	return &queue->tx_skb[macb_tx_ring_wrap(queue->bp, index)];
163 }
164 
165 static dma_addr_t macb_tx_dma(struct macb_queue *queue, unsigned int index)
166 {
167 	dma_addr_t offset;
168 
169 	offset = macb_tx_ring_wrap(queue->bp, index) *
170 			macb_dma_desc_get_size(queue->bp);
171 
172 	return queue->tx_ring_dma + offset;
173 }
174 
175 static unsigned int macb_rx_ring_wrap(struct macb *bp, unsigned int index)
176 {
177 	return index & (bp->rx_ring_size - 1);
178 }
179 
180 static struct macb_dma_desc *macb_rx_desc(struct macb_queue *queue, unsigned int index)
181 {
182 	index = macb_rx_ring_wrap(queue->bp, index);
183 	index = macb_adj_dma_desc_idx(queue->bp, index);
184 	return &queue->rx_ring[index];
185 }
186 
187 static void *macb_rx_buffer(struct macb_queue *queue, unsigned int index)
188 {
189 	return queue->rx_buffers + queue->bp->rx_buffer_size *
190 	       macb_rx_ring_wrap(queue->bp, index);
191 }
192 
193 /* I/O accessors */
194 static u32 hw_readl_native(struct macb *bp, int offset)
195 {
196 	return __raw_readl(bp->regs + offset);
197 }
198 
199 static void hw_writel_native(struct macb *bp, int offset, u32 value)
200 {
201 	__raw_writel(value, bp->regs + offset);
202 }
203 
204 static u32 hw_readl(struct macb *bp, int offset)
205 {
206 	return readl_relaxed(bp->regs + offset);
207 }
208 
209 static void hw_writel(struct macb *bp, int offset, u32 value)
210 {
211 	writel_relaxed(value, bp->regs + offset);
212 }
213 
214 /* Find the CPU endianness by using the loopback bit of NCR register. When the
215  * CPU is in big endian we need to program swapped mode for management
216  * descriptor access.
217  */
218 static bool hw_is_native_io(void __iomem *addr)
219 {
220 	u32 value = MACB_BIT(LLB);
221 
222 	__raw_writel(value, addr + MACB_NCR);
223 	value = __raw_readl(addr + MACB_NCR);
224 
225 	/* Write 0 back to disable everything */
226 	__raw_writel(0, addr + MACB_NCR);
227 
228 	return value == MACB_BIT(LLB);
229 }
230 
231 static bool hw_is_gem(void __iomem *addr, bool native_io)
232 {
233 	u32 id;
234 
235 	if (native_io)
236 		id = __raw_readl(addr + MACB_MID);
237 	else
238 		id = readl_relaxed(addr + MACB_MID);
239 
240 	return MACB_BFEXT(IDNUM, id) >= 0x2;
241 }
242 
243 static void macb_set_hwaddr(struct macb *bp)
244 {
245 	u32 bottom;
246 	u16 top;
247 
248 	bottom = get_unaligned_le32(bp->dev->dev_addr);
249 	macb_or_gem_writel(bp, SA1B, bottom);
250 	top = get_unaligned_le16(bp->dev->dev_addr + 4);
251 	macb_or_gem_writel(bp, SA1T, top);
252 
253 	if (gem_has_ptp(bp)) {
254 		gem_writel(bp, RXPTPUNI, bottom);
255 		gem_writel(bp, TXPTPUNI, bottom);
256 	}
257 
258 	/* Clear unused address register sets */
259 	macb_or_gem_writel(bp, SA2B, 0);
260 	macb_or_gem_writel(bp, SA2T, 0);
261 	macb_or_gem_writel(bp, SA3B, 0);
262 	macb_or_gem_writel(bp, SA3T, 0);
263 	macb_or_gem_writel(bp, SA4B, 0);
264 	macb_or_gem_writel(bp, SA4T, 0);
265 }
266 
267 static void macb_get_hwaddr(struct macb *bp)
268 {
269 	u32 bottom;
270 	u16 top;
271 	u8 addr[6];
272 	int i;
273 
274 	/* Check all 4 address register for valid address */
275 	for (i = 0; i < 4; i++) {
276 		bottom = macb_or_gem_readl(bp, SA1B + i * 8);
277 		top = macb_or_gem_readl(bp, SA1T + i * 8);
278 
279 		addr[0] = bottom & 0xff;
280 		addr[1] = (bottom >> 8) & 0xff;
281 		addr[2] = (bottom >> 16) & 0xff;
282 		addr[3] = (bottom >> 24) & 0xff;
283 		addr[4] = top & 0xff;
284 		addr[5] = (top >> 8) & 0xff;
285 
286 		if (is_valid_ether_addr(addr)) {
287 			eth_hw_addr_set(bp->dev, addr);
288 			return;
289 		}
290 	}
291 
292 	dev_info(&bp->pdev->dev, "invalid hw address, using random\n");
293 	eth_hw_addr_random(bp->dev);
294 }
295 
296 static int macb_mdio_wait_for_idle(struct macb *bp)
297 {
298 	u32 val;
299 
300 	return readx_poll_timeout(MACB_READ_NSR, bp, val, val & MACB_BIT(IDLE),
301 				  1, MACB_MDIO_TIMEOUT);
302 }
303 
304 static int macb_mdio_read_c22(struct mii_bus *bus, int mii_id, int regnum)
305 {
306 	struct macb *bp = bus->priv;
307 	int status;
308 
309 	status = pm_runtime_resume_and_get(&bp->pdev->dev);
310 	if (status < 0)
311 		goto mdio_pm_exit;
312 
313 	status = macb_mdio_wait_for_idle(bp);
314 	if (status < 0)
315 		goto mdio_read_exit;
316 
317 	macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_C22_SOF)
318 			      | MACB_BF(RW, MACB_MAN_C22_READ)
319 			      | MACB_BF(PHYA, mii_id)
320 			      | MACB_BF(REGA, regnum)
321 			      | MACB_BF(CODE, MACB_MAN_C22_CODE)));
322 
323 	status = macb_mdio_wait_for_idle(bp);
324 	if (status < 0)
325 		goto mdio_read_exit;
326 
327 	status = MACB_BFEXT(DATA, macb_readl(bp, MAN));
328 
329 mdio_read_exit:
330 	pm_runtime_put_autosuspend(&bp->pdev->dev);
331 mdio_pm_exit:
332 	return status;
333 }
334 
335 static int macb_mdio_read_c45(struct mii_bus *bus, int mii_id, int devad,
336 			      int regnum)
337 {
338 	struct macb *bp = bus->priv;
339 	int status;
340 
341 	status = pm_runtime_get_sync(&bp->pdev->dev);
342 	if (status < 0) {
343 		pm_runtime_put_noidle(&bp->pdev->dev);
344 		goto mdio_pm_exit;
345 	}
346 
347 	status = macb_mdio_wait_for_idle(bp);
348 	if (status < 0)
349 		goto mdio_read_exit;
350 
351 	macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_C45_SOF)
352 			      | MACB_BF(RW, MACB_MAN_C45_ADDR)
353 			      | MACB_BF(PHYA, mii_id)
354 			      | MACB_BF(REGA, devad & 0x1F)
355 			      | MACB_BF(DATA, regnum & 0xFFFF)
356 			      | MACB_BF(CODE, MACB_MAN_C45_CODE)));
357 
358 	status = macb_mdio_wait_for_idle(bp);
359 	if (status < 0)
360 		goto mdio_read_exit;
361 
362 	macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_C45_SOF)
363 			      | MACB_BF(RW, MACB_MAN_C45_READ)
364 			      | MACB_BF(PHYA, mii_id)
365 			      | MACB_BF(REGA, devad & 0x1F)
366 			      | MACB_BF(CODE, MACB_MAN_C45_CODE)));
367 
368 	status = macb_mdio_wait_for_idle(bp);
369 	if (status < 0)
370 		goto mdio_read_exit;
371 
372 	status = MACB_BFEXT(DATA, macb_readl(bp, MAN));
373 
374 mdio_read_exit:
375 	pm_runtime_put_autosuspend(&bp->pdev->dev);
376 mdio_pm_exit:
377 	return status;
378 }
379 
380 static int macb_mdio_write_c22(struct mii_bus *bus, int mii_id, int regnum,
381 			       u16 value)
382 {
383 	struct macb *bp = bus->priv;
384 	int status;
385 
386 	status = pm_runtime_resume_and_get(&bp->pdev->dev);
387 	if (status < 0)
388 		goto mdio_pm_exit;
389 
390 	status = macb_mdio_wait_for_idle(bp);
391 	if (status < 0)
392 		goto mdio_write_exit;
393 
394 	macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_C22_SOF)
395 			      | MACB_BF(RW, MACB_MAN_C22_WRITE)
396 			      | MACB_BF(PHYA, mii_id)
397 			      | MACB_BF(REGA, regnum)
398 			      | MACB_BF(CODE, MACB_MAN_C22_CODE)
399 			      | MACB_BF(DATA, value)));
400 
401 	status = macb_mdio_wait_for_idle(bp);
402 	if (status < 0)
403 		goto mdio_write_exit;
404 
405 mdio_write_exit:
406 	pm_runtime_put_autosuspend(&bp->pdev->dev);
407 mdio_pm_exit:
408 	return status;
409 }
410 
411 static int macb_mdio_write_c45(struct mii_bus *bus, int mii_id,
412 			       int devad, int regnum,
413 			       u16 value)
414 {
415 	struct macb *bp = bus->priv;
416 	int status;
417 
418 	status = pm_runtime_get_sync(&bp->pdev->dev);
419 	if (status < 0) {
420 		pm_runtime_put_noidle(&bp->pdev->dev);
421 		goto mdio_pm_exit;
422 	}
423 
424 	status = macb_mdio_wait_for_idle(bp);
425 	if (status < 0)
426 		goto mdio_write_exit;
427 
428 	macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_C45_SOF)
429 			      | MACB_BF(RW, MACB_MAN_C45_ADDR)
430 			      | MACB_BF(PHYA, mii_id)
431 			      | MACB_BF(REGA, devad & 0x1F)
432 			      | MACB_BF(DATA, regnum & 0xFFFF)
433 			      | MACB_BF(CODE, MACB_MAN_C45_CODE)));
434 
435 	status = macb_mdio_wait_for_idle(bp);
436 	if (status < 0)
437 		goto mdio_write_exit;
438 
439 	macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_C45_SOF)
440 			      | MACB_BF(RW, MACB_MAN_C45_WRITE)
441 			      | MACB_BF(PHYA, mii_id)
442 			      | MACB_BF(REGA, devad & 0x1F)
443 			      | MACB_BF(CODE, MACB_MAN_C45_CODE)
444 			      | MACB_BF(DATA, value)));
445 
446 	status = macb_mdio_wait_for_idle(bp);
447 	if (status < 0)
448 		goto mdio_write_exit;
449 
450 mdio_write_exit:
451 	pm_runtime_put_autosuspend(&bp->pdev->dev);
452 mdio_pm_exit:
453 	return status;
454 }
455 
456 static void macb_init_buffers(struct macb *bp)
457 {
458 	struct macb_queue *queue;
459 	unsigned int q;
460 
461 	/* Single register for all queues' high 32 bits. */
462 	if (macb_dma64(bp)) {
463 		macb_writel(bp, RBQPH,
464 			    upper_32_bits(bp->queues[0].rx_ring_dma));
465 		macb_writel(bp, TBQPH,
466 			    upper_32_bits(bp->queues[0].tx_ring_dma));
467 	}
468 
469 	for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
470 		queue_writel(queue, RBQP, lower_32_bits(queue->rx_ring_dma));
471 		queue_writel(queue, TBQP, lower_32_bits(queue->tx_ring_dma));
472 	}
473 }
474 
475 /**
476  * macb_set_tx_clk() - Set a clock to a new frequency
477  * @bp:		pointer to struct macb
478  * @speed:	New frequency in Hz
479  */
480 static void macb_set_tx_clk(struct macb *bp, int speed)
481 {
482 	long ferr, rate, rate_rounded;
483 
484 	if (!bp->tx_clk || (bp->caps & MACB_CAPS_CLK_HW_CHG))
485 		return;
486 
487 	/* In case of MII the PHY is the clock master */
488 	if (bp->phy_interface == PHY_INTERFACE_MODE_MII)
489 		return;
490 
491 	rate = rgmii_clock(speed);
492 	if (rate < 0)
493 		return;
494 
495 	rate_rounded = clk_round_rate(bp->tx_clk, rate);
496 	if (rate_rounded < 0)
497 		return;
498 
499 	/* RGMII allows 50 ppm frequency error. Test and warn if this limit
500 	 * is not satisfied.
501 	 */
502 	ferr = abs(rate_rounded - rate);
503 	ferr = DIV_ROUND_UP(ferr, rate / 100000);
504 	if (ferr > 5)
505 		netdev_warn(bp->dev,
506 			    "unable to generate target frequency: %ld Hz\n",
507 			    rate);
508 
509 	if (clk_set_rate(bp->tx_clk, rate_rounded))
510 		netdev_err(bp->dev, "adjusting tx_clk failed.\n");
511 }
512 
513 static void macb_usx_pcs_link_up(struct phylink_pcs *pcs, unsigned int neg_mode,
514 				 phy_interface_t interface, int speed,
515 				 int duplex)
516 {
517 	struct macb *bp = container_of(pcs, struct macb, phylink_usx_pcs);
518 	u32 config;
519 
520 	config = gem_readl(bp, USX_CONTROL);
521 	config = GEM_BFINS(SERDES_RATE, MACB_SERDES_RATE_10G, config);
522 	config = GEM_BFINS(USX_CTRL_SPEED, HS_SPEED_10000M, config);
523 	config &= ~(GEM_BIT(TX_SCR_BYPASS) | GEM_BIT(RX_SCR_BYPASS));
524 	config |= GEM_BIT(TX_EN);
525 	gem_writel(bp, USX_CONTROL, config);
526 }
527 
528 static void macb_usx_pcs_get_state(struct phylink_pcs *pcs,
529 				   unsigned int neg_mode,
530 				   struct phylink_link_state *state)
531 {
532 	struct macb *bp = container_of(pcs, struct macb, phylink_usx_pcs);
533 	u32 val;
534 
535 	state->speed = SPEED_10000;
536 	state->duplex = 1;
537 	state->an_complete = 1;
538 
539 	val = gem_readl(bp, USX_STATUS);
540 	state->link = !!(val & GEM_BIT(USX_BLOCK_LOCK));
541 	val = gem_readl(bp, NCFGR);
542 	if (val & GEM_BIT(PAE))
543 		state->pause = MLO_PAUSE_RX;
544 }
545 
546 static int macb_usx_pcs_config(struct phylink_pcs *pcs,
547 			       unsigned int neg_mode,
548 			       phy_interface_t interface,
549 			       const unsigned long *advertising,
550 			       bool permit_pause_to_mac)
551 {
552 	struct macb *bp = container_of(pcs, struct macb, phylink_usx_pcs);
553 
554 	gem_writel(bp, USX_CONTROL, gem_readl(bp, USX_CONTROL) |
555 		   GEM_BIT(SIGNAL_OK));
556 
557 	return 0;
558 }
559 
560 static unsigned int macb_pcs_inband_caps(struct phylink_pcs *pcs,
561 					 phy_interface_t interface)
562 {
563 	return LINK_INBAND_DISABLE | LINK_INBAND_ENABLE;
564 }
565 
566 static void macb_pcs_get_state(struct phylink_pcs *pcs, unsigned int neg_mode,
567 			       struct phylink_link_state *state)
568 {
569 	struct macb *bp = container_of(pcs, struct macb, phylink_sgmii_pcs);
570 	u16 bmsr, lpa;
571 
572 	bmsr = gem_readl(bp, PCSSTS);
573 	lpa = gem_readl(bp, PCSANLPBASE);
574 	phylink_mii_c22_pcs_decode_state(state, neg_mode, bmsr, lpa);
575 }
576 
577 static void macb_pcs_an_restart(struct phylink_pcs *pcs)
578 {
579 	/* Not supported */
580 }
581 
582 static int macb_pcs_config(struct phylink_pcs *pcs,
583 			   unsigned int neg_mode,
584 			   phy_interface_t interface,
585 			   const unsigned long *advertising,
586 			   bool permit_pause_to_mac)
587 {
588 	struct macb *bp = container_of(pcs, struct macb, phylink_sgmii_pcs);
589 	u32 old, new;
590 
591 	old = gem_readl(bp, PCSANADV);
592 	new = phylink_mii_c22_pcs_encode_advertisement(interface, advertising);
593 	if (new != -EINVAL && old != new)
594 		gem_writel(bp, PCSANADV, new);
595 
596 	/* Disable AN if it's not to be used, enable otherwise.
597 	 * Must be written after PCSSEL is set in NCFGR which is done in
598 	 * macb_mac_config(), otherwise writes will not take effect.
599 	 */
600 	old = gem_readl(bp, PCSCNTRL);
601 	if (neg_mode == PHYLINK_PCS_NEG_INBAND_ENABLED)
602 		new = old | BMCR_ANENABLE;
603 	else
604 		new = old & ~BMCR_ANENABLE;
605 	if (old != new)
606 		gem_writel(bp, PCSCNTRL, new);
607 
608 	return 0;
609 }
610 
611 static const struct phylink_pcs_ops macb_phylink_usx_pcs_ops = {
612 	.pcs_get_state = macb_usx_pcs_get_state,
613 	.pcs_config = macb_usx_pcs_config,
614 	.pcs_link_up = macb_usx_pcs_link_up,
615 };
616 
617 static const struct phylink_pcs_ops macb_phylink_pcs_ops = {
618 	.pcs_inband_caps = macb_pcs_inband_caps,
619 	.pcs_get_state = macb_pcs_get_state,
620 	.pcs_an_restart = macb_pcs_an_restart,
621 	.pcs_config = macb_pcs_config,
622 };
623 
624 static void macb_mac_config(struct phylink_config *config, unsigned int mode,
625 			    const struct phylink_link_state *state)
626 {
627 	struct net_device *ndev = to_net_dev(config->dev);
628 	struct macb *bp = netdev_priv(ndev);
629 	unsigned long flags;
630 	u32 old_ctrl, ctrl;
631 	u32 old_ncr, ncr;
632 
633 	spin_lock_irqsave(&bp->lock, flags);
634 
635 	old_ctrl = ctrl = macb_or_gem_readl(bp, NCFGR);
636 	old_ncr = ncr = macb_or_gem_readl(bp, NCR);
637 
638 	if (bp->caps & MACB_CAPS_MACB_IS_EMAC) {
639 		if (state->interface == PHY_INTERFACE_MODE_RMII)
640 			ctrl |= MACB_BIT(RM9200_RMII);
641 	} else if (macb_is_gem(bp)) {
642 		ctrl &= ~(GEM_BIT(SGMIIEN) | GEM_BIT(PCSSEL));
643 		ncr &= ~GEM_BIT(ENABLE_HS_MAC);
644 
645 		if (state->interface == PHY_INTERFACE_MODE_SGMII) {
646 			ctrl |= GEM_BIT(SGMIIEN) | GEM_BIT(PCSSEL);
647 		} else if (state->interface == PHY_INTERFACE_MODE_10GBASER) {
648 			ctrl |= GEM_BIT(PCSSEL);
649 			ncr |= GEM_BIT(ENABLE_HS_MAC);
650 		} else if (bp->caps & MACB_CAPS_MIIONRGMII &&
651 			   bp->phy_interface == PHY_INTERFACE_MODE_MII) {
652 			ncr |= MACB_BIT(MIIONRGMII);
653 		}
654 	}
655 
656 	/* Apply the new configuration, if any */
657 	if (old_ctrl ^ ctrl)
658 		macb_or_gem_writel(bp, NCFGR, ctrl);
659 
660 	if (old_ncr ^ ncr)
661 		macb_or_gem_writel(bp, NCR, ncr);
662 
663 	spin_unlock_irqrestore(&bp->lock, flags);
664 }
665 
666 static void macb_mac_link_down(struct phylink_config *config, unsigned int mode,
667 			       phy_interface_t interface)
668 {
669 	struct net_device *ndev = to_net_dev(config->dev);
670 	struct macb *bp = netdev_priv(ndev);
671 	struct macb_queue *queue;
672 	unsigned int q;
673 	u32 ctrl;
674 
675 	if (!(bp->caps & MACB_CAPS_MACB_IS_EMAC))
676 		for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue)
677 			queue_writel(queue, IDR,
678 				     bp->rx_intr_mask | MACB_TX_INT_FLAGS | MACB_BIT(HRESP));
679 
680 	/* Disable Rx and Tx */
681 	ctrl = macb_readl(bp, NCR) & ~(MACB_BIT(RE) | MACB_BIT(TE));
682 	macb_writel(bp, NCR, ctrl);
683 
684 	netif_tx_stop_all_queues(ndev);
685 }
686 
687 static void macb_mac_link_up(struct phylink_config *config,
688 			     struct phy_device *phy,
689 			     unsigned int mode, phy_interface_t interface,
690 			     int speed, int duplex,
691 			     bool tx_pause, bool rx_pause)
692 {
693 	struct net_device *ndev = to_net_dev(config->dev);
694 	struct macb *bp = netdev_priv(ndev);
695 	struct macb_queue *queue;
696 	unsigned long flags;
697 	unsigned int q;
698 	u32 ctrl;
699 
700 	spin_lock_irqsave(&bp->lock, flags);
701 
702 	ctrl = macb_or_gem_readl(bp, NCFGR);
703 
704 	ctrl &= ~(MACB_BIT(SPD) | MACB_BIT(FD));
705 
706 	if (speed == SPEED_100)
707 		ctrl |= MACB_BIT(SPD);
708 
709 	if (duplex)
710 		ctrl |= MACB_BIT(FD);
711 
712 	if (!(bp->caps & MACB_CAPS_MACB_IS_EMAC)) {
713 		ctrl &= ~MACB_BIT(PAE);
714 		if (macb_is_gem(bp)) {
715 			ctrl &= ~GEM_BIT(GBE);
716 
717 			if (speed == SPEED_1000)
718 				ctrl |= GEM_BIT(GBE);
719 		}
720 
721 		if (rx_pause)
722 			ctrl |= MACB_BIT(PAE);
723 
724 		for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
725 			queue->tx_head = 0;
726 			queue->tx_tail = 0;
727 			queue_writel(queue, IER,
728 				     bp->rx_intr_mask | MACB_TX_INT_FLAGS | MACB_BIT(HRESP));
729 		}
730 	}
731 
732 	macb_or_gem_writel(bp, NCFGR, ctrl);
733 
734 	if (bp->phy_interface == PHY_INTERFACE_MODE_10GBASER)
735 		gem_writel(bp, HS_MAC_CONFIG, GEM_BFINS(HS_MAC_SPEED, HS_SPEED_10000M,
736 							gem_readl(bp, HS_MAC_CONFIG)));
737 
738 	spin_unlock_irqrestore(&bp->lock, flags);
739 
740 	if (!(bp->caps & MACB_CAPS_MACB_IS_EMAC))
741 		macb_set_tx_clk(bp, speed);
742 
743 	/* Enable Rx and Tx; Enable PTP unicast */
744 	ctrl = macb_readl(bp, NCR);
745 	if (gem_has_ptp(bp))
746 		ctrl |= MACB_BIT(PTPUNI);
747 
748 	macb_writel(bp, NCR, ctrl | MACB_BIT(RE) | MACB_BIT(TE));
749 
750 	netif_tx_wake_all_queues(ndev);
751 }
752 
753 static struct phylink_pcs *macb_mac_select_pcs(struct phylink_config *config,
754 					       phy_interface_t interface)
755 {
756 	struct net_device *ndev = to_net_dev(config->dev);
757 	struct macb *bp = netdev_priv(ndev);
758 
759 	if (interface == PHY_INTERFACE_MODE_10GBASER)
760 		return &bp->phylink_usx_pcs;
761 	else if (interface == PHY_INTERFACE_MODE_SGMII)
762 		return &bp->phylink_sgmii_pcs;
763 	else
764 		return NULL;
765 }
766 
767 static const struct phylink_mac_ops macb_phylink_ops = {
768 	.mac_select_pcs = macb_mac_select_pcs,
769 	.mac_config = macb_mac_config,
770 	.mac_link_down = macb_mac_link_down,
771 	.mac_link_up = macb_mac_link_up,
772 };
773 
774 static bool macb_phy_handle_exists(struct device_node *dn)
775 {
776 	dn = of_parse_phandle(dn, "phy-handle", 0);
777 	of_node_put(dn);
778 	return dn != NULL;
779 }
780 
781 static int macb_phylink_connect(struct macb *bp)
782 {
783 	struct device_node *dn = bp->pdev->dev.of_node;
784 	struct net_device *dev = bp->dev;
785 	struct phy_device *phydev;
786 	int ret;
787 
788 	if (dn)
789 		ret = phylink_of_phy_connect(bp->phylink, dn, 0);
790 
791 	if (!dn || (ret && !macb_phy_handle_exists(dn))) {
792 		phydev = phy_find_first(bp->mii_bus);
793 		if (!phydev) {
794 			netdev_err(dev, "no PHY found\n");
795 			return -ENXIO;
796 		}
797 
798 		/* attach the mac to the phy */
799 		ret = phylink_connect_phy(bp->phylink, phydev);
800 	}
801 
802 	if (ret) {
803 		netdev_err(dev, "Could not attach PHY (%d)\n", ret);
804 		return ret;
805 	}
806 
807 	phylink_start(bp->phylink);
808 
809 	return 0;
810 }
811 
812 static void macb_get_pcs_fixed_state(struct phylink_config *config,
813 				     struct phylink_link_state *state)
814 {
815 	struct net_device *ndev = to_net_dev(config->dev);
816 	struct macb *bp = netdev_priv(ndev);
817 
818 	state->link = (macb_readl(bp, NSR) & MACB_BIT(NSR_LINK)) != 0;
819 }
820 
821 /* based on au1000_eth. c*/
822 static int macb_mii_probe(struct net_device *dev)
823 {
824 	struct macb *bp = netdev_priv(dev);
825 
826 	bp->phylink_sgmii_pcs.ops = &macb_phylink_pcs_ops;
827 	bp->phylink_usx_pcs.ops = &macb_phylink_usx_pcs_ops;
828 
829 	bp->phylink_config.dev = &dev->dev;
830 	bp->phylink_config.type = PHYLINK_NETDEV;
831 	bp->phylink_config.mac_managed_pm = true;
832 
833 	if (bp->phy_interface == PHY_INTERFACE_MODE_SGMII) {
834 		bp->phylink_config.poll_fixed_state = true;
835 		bp->phylink_config.get_fixed_state = macb_get_pcs_fixed_state;
836 	}
837 
838 	bp->phylink_config.mac_capabilities = MAC_ASYM_PAUSE |
839 		MAC_10 | MAC_100;
840 
841 	__set_bit(PHY_INTERFACE_MODE_MII,
842 		  bp->phylink_config.supported_interfaces);
843 	__set_bit(PHY_INTERFACE_MODE_RMII,
844 		  bp->phylink_config.supported_interfaces);
845 
846 	/* Determine what modes are supported */
847 	if (macb_is_gem(bp) && (bp->caps & MACB_CAPS_GIGABIT_MODE_AVAILABLE)) {
848 		bp->phylink_config.mac_capabilities |= MAC_1000FD;
849 		if (!(bp->caps & MACB_CAPS_NO_GIGABIT_HALF))
850 			bp->phylink_config.mac_capabilities |= MAC_1000HD;
851 
852 		__set_bit(PHY_INTERFACE_MODE_GMII,
853 			  bp->phylink_config.supported_interfaces);
854 		phy_interface_set_rgmii(bp->phylink_config.supported_interfaces);
855 
856 		if (bp->caps & MACB_CAPS_PCS)
857 			__set_bit(PHY_INTERFACE_MODE_SGMII,
858 				  bp->phylink_config.supported_interfaces);
859 
860 		if (bp->caps & MACB_CAPS_HIGH_SPEED) {
861 			__set_bit(PHY_INTERFACE_MODE_10GBASER,
862 				  bp->phylink_config.supported_interfaces);
863 			bp->phylink_config.mac_capabilities |= MAC_10000FD;
864 		}
865 	}
866 
867 	bp->phylink = phylink_create(&bp->phylink_config, bp->pdev->dev.fwnode,
868 				     bp->phy_interface, &macb_phylink_ops);
869 	if (IS_ERR(bp->phylink)) {
870 		netdev_err(dev, "Could not create a phylink instance (%ld)\n",
871 			   PTR_ERR(bp->phylink));
872 		return PTR_ERR(bp->phylink);
873 	}
874 
875 	return 0;
876 }
877 
878 static int macb_mdiobus_register(struct macb *bp, struct device_node *mdio_np)
879 {
880 	struct device_node *child, *np = bp->pdev->dev.of_node;
881 
882 	/* If we have a child named mdio, probe it instead of looking for PHYs
883 	 * directly under the MAC node
884 	 */
885 	if (mdio_np)
886 		return of_mdiobus_register(bp->mii_bus, mdio_np);
887 
888 	/* Only create the PHY from the device tree if at least one PHY is
889 	 * described. Otherwise scan the entire MDIO bus. We do this to support
890 	 * old device tree that did not follow the best practices and did not
891 	 * describe their network PHYs.
892 	 */
893 	for_each_available_child_of_node(np, child)
894 		if (of_mdiobus_child_is_phy(child)) {
895 			/* The loop increments the child refcount,
896 			 * decrement it before returning.
897 			 */
898 			of_node_put(child);
899 
900 			return of_mdiobus_register(bp->mii_bus, np);
901 		}
902 
903 	return mdiobus_register(bp->mii_bus);
904 }
905 
906 static int macb_mii_init(struct macb *bp)
907 {
908 	struct device_node *mdio_np, *np = bp->pdev->dev.of_node;
909 	int err = -ENXIO;
910 
911 	/* With fixed-link, we don't need to register the MDIO bus,
912 	 * except if we have a child named "mdio" in the device tree.
913 	 * In that case, some devices may be attached to the MACB's MDIO bus.
914 	 */
915 	mdio_np = of_get_child_by_name(np, "mdio");
916 	if (!mdio_np && of_phy_is_fixed_link(np))
917 		return macb_mii_probe(bp->dev);
918 
919 	/* Enable management port */
920 	macb_writel(bp, NCR, MACB_BIT(MPE));
921 
922 	bp->mii_bus = mdiobus_alloc();
923 	if (!bp->mii_bus) {
924 		err = -ENOMEM;
925 		goto err_out;
926 	}
927 
928 	bp->mii_bus->name = "MACB_mii_bus";
929 	bp->mii_bus->read = &macb_mdio_read_c22;
930 	bp->mii_bus->write = &macb_mdio_write_c22;
931 	bp->mii_bus->read_c45 = &macb_mdio_read_c45;
932 	bp->mii_bus->write_c45 = &macb_mdio_write_c45;
933 	snprintf(bp->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x",
934 		 bp->pdev->name, bp->pdev->id);
935 	bp->mii_bus->priv = bp;
936 	bp->mii_bus->parent = &bp->pdev->dev;
937 
938 	dev_set_drvdata(&bp->dev->dev, bp->mii_bus);
939 
940 	err = macb_mdiobus_register(bp, mdio_np);
941 	if (err)
942 		goto err_out_free_mdiobus;
943 
944 	err = macb_mii_probe(bp->dev);
945 	if (err)
946 		goto err_out_unregister_bus;
947 
948 	return 0;
949 
950 err_out_unregister_bus:
951 	mdiobus_unregister(bp->mii_bus);
952 err_out_free_mdiobus:
953 	mdiobus_free(bp->mii_bus);
954 err_out:
955 	of_node_put(mdio_np);
956 
957 	return err;
958 }
959 
960 static void macb_update_stats(struct macb *bp)
961 {
962 	u64 *p = &bp->hw_stats.macb.rx_pause_frames;
963 	u64 *end = &bp->hw_stats.macb.tx_pause_frames + 1;
964 	int offset = MACB_PFR;
965 
966 	WARN_ON((unsigned long)(end - p - 1) != (MACB_TPF - MACB_PFR) / 4);
967 
968 	for (; p < end; p++, offset += 4)
969 		*p += bp->macb_reg_readl(bp, offset);
970 }
971 
972 static int macb_halt_tx(struct macb *bp)
973 {
974 	u32 status;
975 
976 	macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(THALT));
977 
978 	/* Poll TSR until TGO is cleared or timeout. */
979 	return read_poll_timeout_atomic(macb_readl, status,
980 					!(status & MACB_BIT(TGO)),
981 					250, MACB_HALT_TIMEOUT, false,
982 					bp, TSR);
983 }
984 
985 static void macb_tx_unmap(struct macb *bp, struct macb_tx_skb *tx_skb, int budget)
986 {
987 	if (tx_skb->mapping) {
988 		if (tx_skb->mapped_as_page)
989 			dma_unmap_page(&bp->pdev->dev, tx_skb->mapping,
990 				       tx_skb->size, DMA_TO_DEVICE);
991 		else
992 			dma_unmap_single(&bp->pdev->dev, tx_skb->mapping,
993 					 tx_skb->size, DMA_TO_DEVICE);
994 		tx_skb->mapping = 0;
995 	}
996 
997 	if (tx_skb->skb) {
998 		napi_consume_skb(tx_skb->skb, budget);
999 		tx_skb->skb = NULL;
1000 	}
1001 }
1002 
1003 static void macb_set_addr(struct macb *bp, struct macb_dma_desc *desc, dma_addr_t addr)
1004 {
1005 	if (macb_dma64(bp)) {
1006 		struct macb_dma_desc_64 *desc_64;
1007 
1008 		desc_64 = macb_64b_desc(bp, desc);
1009 		desc_64->addrh = upper_32_bits(addr);
1010 		/* The low bits of RX address contain the RX_USED bit, clearing
1011 		 * of which allows packet RX. Make sure the high bits are also
1012 		 * visible to HW at that point.
1013 		 */
1014 		dma_wmb();
1015 	}
1016 
1017 	desc->addr = lower_32_bits(addr);
1018 }
1019 
1020 static dma_addr_t macb_get_addr(struct macb *bp, struct macb_dma_desc *desc)
1021 {
1022 	dma_addr_t addr = 0;
1023 
1024 	if (macb_dma64(bp)) {
1025 		struct macb_dma_desc_64 *desc_64;
1026 
1027 		desc_64 = macb_64b_desc(bp, desc);
1028 		addr = ((u64)(desc_64->addrh) << 32);
1029 	}
1030 	addr |= MACB_BF(RX_WADDR, MACB_BFEXT(RX_WADDR, desc->addr));
1031 	if (macb_dma_ptp(bp))
1032 		addr &= ~GEM_BIT(DMA_RXVALID);
1033 	return addr;
1034 }
1035 
1036 static void macb_tx_error_task(struct work_struct *work)
1037 {
1038 	struct macb_queue	*queue = container_of(work, struct macb_queue,
1039 						      tx_error_task);
1040 	bool			halt_timeout = false;
1041 	struct macb		*bp = queue->bp;
1042 	u32			queue_index;
1043 	u32			packets = 0;
1044 	u32			bytes = 0;
1045 	struct macb_tx_skb	*tx_skb;
1046 	struct macb_dma_desc	*desc;
1047 	struct sk_buff		*skb;
1048 	unsigned int		tail;
1049 	unsigned long		flags;
1050 
1051 	queue_index = queue - bp->queues;
1052 	netdev_vdbg(bp->dev, "macb_tx_error_task: q = %u, t = %u, h = %u\n",
1053 		    queue_index, queue->tx_tail, queue->tx_head);
1054 
1055 	/* Prevent the queue NAPI TX poll from running, as it calls
1056 	 * macb_tx_complete(), which in turn may call netif_wake_subqueue().
1057 	 * As explained below, we have to halt the transmission before updating
1058 	 * TBQP registers so we call netif_tx_stop_all_queues() to notify the
1059 	 * network engine about the macb/gem being halted.
1060 	 */
1061 	napi_disable(&queue->napi_tx);
1062 	spin_lock_irqsave(&bp->lock, flags);
1063 
1064 	/* Make sure nobody is trying to queue up new packets */
1065 	netif_tx_stop_all_queues(bp->dev);
1066 
1067 	/* Stop transmission now
1068 	 * (in case we have just queued new packets)
1069 	 * macb/gem must be halted to write TBQP register
1070 	 */
1071 	if (macb_halt_tx(bp)) {
1072 		netdev_err(bp->dev, "BUG: halt tx timed out\n");
1073 		macb_writel(bp, NCR, macb_readl(bp, NCR) & (~MACB_BIT(TE)));
1074 		halt_timeout = true;
1075 	}
1076 
1077 	/* Treat frames in TX queue including the ones that caused the error.
1078 	 * Free transmit buffers in upper layer.
1079 	 */
1080 	for (tail = queue->tx_tail; tail != queue->tx_head; tail++) {
1081 		u32	ctrl;
1082 
1083 		desc = macb_tx_desc(queue, tail);
1084 		ctrl = desc->ctrl;
1085 		tx_skb = macb_tx_skb(queue, tail);
1086 		skb = tx_skb->skb;
1087 
1088 		if (ctrl & MACB_BIT(TX_USED)) {
1089 			/* skb is set for the last buffer of the frame */
1090 			while (!skb) {
1091 				macb_tx_unmap(bp, tx_skb, 0);
1092 				tail++;
1093 				tx_skb = macb_tx_skb(queue, tail);
1094 				skb = tx_skb->skb;
1095 			}
1096 
1097 			/* ctrl still refers to the first buffer descriptor
1098 			 * since it's the only one written back by the hardware
1099 			 */
1100 			if (!(ctrl & MACB_BIT(TX_BUF_EXHAUSTED))) {
1101 				netdev_vdbg(bp->dev, "txerr skb %u (data %p) TX complete\n",
1102 					    macb_tx_ring_wrap(bp, tail),
1103 					    skb->data);
1104 				bp->dev->stats.tx_packets++;
1105 				queue->stats.tx_packets++;
1106 				packets++;
1107 				bp->dev->stats.tx_bytes += skb->len;
1108 				queue->stats.tx_bytes += skb->len;
1109 				bytes += skb->len;
1110 			}
1111 		} else {
1112 			/* "Buffers exhausted mid-frame" errors may only happen
1113 			 * if the driver is buggy, so complain loudly about
1114 			 * those. Statistics are updated by hardware.
1115 			 */
1116 			if (ctrl & MACB_BIT(TX_BUF_EXHAUSTED))
1117 				netdev_err(bp->dev,
1118 					   "BUG: TX buffers exhausted mid-frame\n");
1119 
1120 			desc->ctrl = ctrl | MACB_BIT(TX_USED);
1121 		}
1122 
1123 		macb_tx_unmap(bp, tx_skb, 0);
1124 	}
1125 
1126 	netdev_tx_completed_queue(netdev_get_tx_queue(bp->dev, queue_index),
1127 				  packets, bytes);
1128 
1129 	/* Set end of TX queue */
1130 	desc = macb_tx_desc(queue, 0);
1131 	macb_set_addr(bp, desc, 0);
1132 	desc->ctrl = MACB_BIT(TX_USED);
1133 
1134 	/* Make descriptor updates visible to hardware */
1135 	wmb();
1136 
1137 	/* Reinitialize the TX desc queue */
1138 	queue_writel(queue, TBQP, lower_32_bits(queue->tx_ring_dma));
1139 	/* Make TX ring reflect state of hardware */
1140 	queue->tx_head = 0;
1141 	queue->tx_tail = 0;
1142 
1143 	/* Housework before enabling TX IRQ */
1144 	macb_writel(bp, TSR, macb_readl(bp, TSR));
1145 	queue_writel(queue, IER, MACB_TX_INT_FLAGS);
1146 
1147 	if (halt_timeout)
1148 		macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(TE));
1149 
1150 	/* Now we are ready to start transmission again */
1151 	netif_tx_start_all_queues(bp->dev);
1152 	macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(TSTART));
1153 
1154 	spin_unlock_irqrestore(&bp->lock, flags);
1155 	napi_enable(&queue->napi_tx);
1156 }
1157 
1158 static bool ptp_one_step_sync(struct sk_buff *skb)
1159 {
1160 	struct ptp_header *hdr;
1161 	unsigned int ptp_class;
1162 	u8 msgtype;
1163 
1164 	/* No need to parse packet if PTP TS is not involved */
1165 	if (likely(!(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)))
1166 		goto not_oss;
1167 
1168 	/* Identify and return whether PTP one step sync is being processed */
1169 	ptp_class = ptp_classify_raw(skb);
1170 	if (ptp_class == PTP_CLASS_NONE)
1171 		goto not_oss;
1172 
1173 	hdr = ptp_parse_header(skb, ptp_class);
1174 	if (!hdr)
1175 		goto not_oss;
1176 
1177 	if (hdr->flag_field[0] & PTP_FLAG_TWOSTEP)
1178 		goto not_oss;
1179 
1180 	msgtype = ptp_get_msgtype(hdr, ptp_class);
1181 	if (msgtype == PTP_MSGTYPE_SYNC)
1182 		return true;
1183 
1184 not_oss:
1185 	return false;
1186 }
1187 
1188 static int macb_tx_complete(struct macb_queue *queue, int budget)
1189 {
1190 	struct macb *bp = queue->bp;
1191 	u16 queue_index = queue - bp->queues;
1192 	unsigned long flags;
1193 	unsigned int tail;
1194 	unsigned int head;
1195 	int packets = 0;
1196 	u32 bytes = 0;
1197 
1198 	spin_lock_irqsave(&queue->tx_ptr_lock, flags);
1199 	head = queue->tx_head;
1200 	for (tail = queue->tx_tail; tail != head && packets < budget; tail++) {
1201 		struct macb_tx_skb	*tx_skb;
1202 		struct sk_buff		*skb;
1203 		struct macb_dma_desc	*desc;
1204 		u32			ctrl;
1205 
1206 		desc = macb_tx_desc(queue, tail);
1207 
1208 		/* Make hw descriptor updates visible to CPU */
1209 		rmb();
1210 
1211 		ctrl = desc->ctrl;
1212 
1213 		/* TX_USED bit is only set by hardware on the very first buffer
1214 		 * descriptor of the transmitted frame.
1215 		 */
1216 		if (!(ctrl & MACB_BIT(TX_USED)))
1217 			break;
1218 
1219 		/* Process all buffers of the current transmitted frame */
1220 		for (;; tail++) {
1221 			tx_skb = macb_tx_skb(queue, tail);
1222 			skb = tx_skb->skb;
1223 
1224 			/* First, update TX stats if needed */
1225 			if (skb) {
1226 				if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
1227 				    !ptp_one_step_sync(skb))
1228 					gem_ptp_do_txstamp(bp, skb, desc);
1229 
1230 				netdev_vdbg(bp->dev, "skb %u (data %p) TX complete\n",
1231 					    macb_tx_ring_wrap(bp, tail),
1232 					    skb->data);
1233 				bp->dev->stats.tx_packets++;
1234 				queue->stats.tx_packets++;
1235 				bp->dev->stats.tx_bytes += skb->len;
1236 				queue->stats.tx_bytes += skb->len;
1237 				packets++;
1238 				bytes += skb->len;
1239 			}
1240 
1241 			/* Now we can safely release resources */
1242 			macb_tx_unmap(bp, tx_skb, budget);
1243 
1244 			/* skb is set only for the last buffer of the frame.
1245 			 * WARNING: at this point skb has been freed by
1246 			 * macb_tx_unmap().
1247 			 */
1248 			if (skb)
1249 				break;
1250 		}
1251 	}
1252 
1253 	netdev_tx_completed_queue(netdev_get_tx_queue(bp->dev, queue_index),
1254 				  packets, bytes);
1255 
1256 	queue->tx_tail = tail;
1257 	if (__netif_subqueue_stopped(bp->dev, queue_index) &&
1258 	    CIRC_CNT(queue->tx_head, queue->tx_tail,
1259 		     bp->tx_ring_size) <= MACB_TX_WAKEUP_THRESH(bp))
1260 		netif_wake_subqueue(bp->dev, queue_index);
1261 	spin_unlock_irqrestore(&queue->tx_ptr_lock, flags);
1262 
1263 	return packets;
1264 }
1265 
1266 static void gem_rx_refill(struct macb_queue *queue)
1267 {
1268 	unsigned int		entry;
1269 	struct sk_buff		*skb;
1270 	dma_addr_t		paddr;
1271 	struct macb *bp = queue->bp;
1272 	struct macb_dma_desc *desc;
1273 
1274 	while (CIRC_SPACE(queue->rx_prepared_head, queue->rx_tail,
1275 			bp->rx_ring_size) > 0) {
1276 		entry = macb_rx_ring_wrap(bp, queue->rx_prepared_head);
1277 
1278 		/* Make hw descriptor updates visible to CPU */
1279 		rmb();
1280 
1281 		desc = macb_rx_desc(queue, entry);
1282 
1283 		if (!queue->rx_skbuff[entry]) {
1284 			/* allocate sk_buff for this free entry in ring */
1285 			skb = netdev_alloc_skb(bp->dev, bp->rx_buffer_size);
1286 			if (unlikely(!skb)) {
1287 				netdev_err(bp->dev,
1288 					   "Unable to allocate sk_buff\n");
1289 				break;
1290 			}
1291 
1292 			/* now fill corresponding descriptor entry */
1293 			paddr = dma_map_single(&bp->pdev->dev, skb->data,
1294 					       bp->rx_buffer_size,
1295 					       DMA_FROM_DEVICE);
1296 			if (dma_mapping_error(&bp->pdev->dev, paddr)) {
1297 				dev_kfree_skb(skb);
1298 				break;
1299 			}
1300 
1301 			queue->rx_skbuff[entry] = skb;
1302 
1303 			if (entry == bp->rx_ring_size - 1)
1304 				paddr |= MACB_BIT(RX_WRAP);
1305 			desc->ctrl = 0;
1306 			/* Setting addr clears RX_USED and allows reception,
1307 			 * make sure ctrl is cleared first to avoid a race.
1308 			 */
1309 			dma_wmb();
1310 			macb_set_addr(bp, desc, paddr);
1311 
1312 			/* Properly align Ethernet header.
1313 			 *
1314 			 * Hardware can add dummy bytes if asked using the RBOF
1315 			 * field inside the NCFGR register. That feature isn't
1316 			 * available if hardware is RSC capable.
1317 			 *
1318 			 * We cannot fallback to doing the 2-byte shift before
1319 			 * DMA mapping because the address field does not allow
1320 			 * setting the low 2/3 bits.
1321 			 * It is 3 bits if HW_DMA_CAP_PTP, else 2 bits.
1322 			 */
1323 			if (!(bp->caps & MACB_CAPS_RSC))
1324 				skb_reserve(skb, NET_IP_ALIGN);
1325 		} else {
1326 			desc->ctrl = 0;
1327 			dma_wmb();
1328 			desc->addr &= ~MACB_BIT(RX_USED);
1329 		}
1330 		queue->rx_prepared_head++;
1331 	}
1332 
1333 	/* Make descriptor updates visible to hardware */
1334 	wmb();
1335 
1336 	netdev_vdbg(bp->dev, "rx ring: queue: %p, prepared head %d, tail %d\n",
1337 			queue, queue->rx_prepared_head, queue->rx_tail);
1338 }
1339 
1340 /* Mark DMA descriptors from begin up to and not including end as unused */
1341 static void discard_partial_frame(struct macb_queue *queue, unsigned int begin,
1342 				  unsigned int end)
1343 {
1344 	unsigned int frag;
1345 
1346 	for (frag = begin; frag != end; frag++) {
1347 		struct macb_dma_desc *desc = macb_rx_desc(queue, frag);
1348 
1349 		desc->addr &= ~MACB_BIT(RX_USED);
1350 	}
1351 
1352 	/* Make descriptor updates visible to hardware */
1353 	wmb();
1354 
1355 	/* When this happens, the hardware stats registers for
1356 	 * whatever caused this is updated, so we don't have to record
1357 	 * anything.
1358 	 */
1359 }
1360 
1361 static int gem_rx(struct macb_queue *queue, struct napi_struct *napi,
1362 		  int budget)
1363 {
1364 	struct macb *bp = queue->bp;
1365 	unsigned int		len;
1366 	unsigned int		entry;
1367 	struct sk_buff		*skb;
1368 	struct macb_dma_desc	*desc;
1369 	int			count = 0;
1370 
1371 	while (count < budget) {
1372 		u32 ctrl;
1373 		dma_addr_t addr;
1374 		bool rxused;
1375 
1376 		entry = macb_rx_ring_wrap(bp, queue->rx_tail);
1377 		desc = macb_rx_desc(queue, entry);
1378 
1379 		/* Make hw descriptor updates visible to CPU */
1380 		rmb();
1381 
1382 		rxused = (desc->addr & MACB_BIT(RX_USED)) ? true : false;
1383 		addr = macb_get_addr(bp, desc);
1384 
1385 		if (!rxused)
1386 			break;
1387 
1388 		/* Ensure ctrl is at least as up-to-date as rxused */
1389 		dma_rmb();
1390 
1391 		ctrl = desc->ctrl;
1392 
1393 		queue->rx_tail++;
1394 		count++;
1395 
1396 		if (!(ctrl & MACB_BIT(RX_SOF) && ctrl & MACB_BIT(RX_EOF))) {
1397 			netdev_err(bp->dev,
1398 				   "not whole frame pointed by descriptor\n");
1399 			bp->dev->stats.rx_dropped++;
1400 			queue->stats.rx_dropped++;
1401 			break;
1402 		}
1403 		skb = queue->rx_skbuff[entry];
1404 		if (unlikely(!skb)) {
1405 			netdev_err(bp->dev,
1406 				   "inconsistent Rx descriptor chain\n");
1407 			bp->dev->stats.rx_dropped++;
1408 			queue->stats.rx_dropped++;
1409 			break;
1410 		}
1411 		/* now everything is ready for receiving packet */
1412 		queue->rx_skbuff[entry] = NULL;
1413 		len = ctrl & bp->rx_frm_len_mask;
1414 
1415 		netdev_vdbg(bp->dev, "gem_rx %u (len %u)\n", entry, len);
1416 
1417 		skb_put(skb, len);
1418 		dma_unmap_single(&bp->pdev->dev, addr,
1419 				 bp->rx_buffer_size, DMA_FROM_DEVICE);
1420 
1421 		skb->protocol = eth_type_trans(skb, bp->dev);
1422 		skb_checksum_none_assert(skb);
1423 		if (bp->dev->features & NETIF_F_RXCSUM &&
1424 		    !(bp->dev->flags & IFF_PROMISC) &&
1425 		    GEM_BFEXT(RX_CSUM, ctrl) & GEM_RX_CSUM_CHECKED_MASK)
1426 			skb->ip_summed = CHECKSUM_UNNECESSARY;
1427 
1428 		bp->dev->stats.rx_packets++;
1429 		queue->stats.rx_packets++;
1430 		bp->dev->stats.rx_bytes += skb->len;
1431 		queue->stats.rx_bytes += skb->len;
1432 
1433 		gem_ptp_do_rxstamp(bp, skb, desc);
1434 
1435 #if defined(DEBUG) && defined(VERBOSE_DEBUG)
1436 		netdev_vdbg(bp->dev, "received skb of length %u, csum: %08x\n",
1437 			    skb->len, skb->csum);
1438 		print_hex_dump(KERN_DEBUG, " mac: ", DUMP_PREFIX_ADDRESS, 16, 1,
1439 			       skb_mac_header(skb), 16, true);
1440 		print_hex_dump(KERN_DEBUG, "data: ", DUMP_PREFIX_ADDRESS, 16, 1,
1441 			       skb->data, 32, true);
1442 #endif
1443 
1444 		napi_gro_receive(napi, skb);
1445 	}
1446 
1447 	gem_rx_refill(queue);
1448 
1449 	return count;
1450 }
1451 
1452 static int macb_rx_frame(struct macb_queue *queue, struct napi_struct *napi,
1453 			 unsigned int first_frag, unsigned int last_frag)
1454 {
1455 	unsigned int len;
1456 	unsigned int frag;
1457 	unsigned int offset;
1458 	struct sk_buff *skb;
1459 	struct macb_dma_desc *desc;
1460 	struct macb *bp = queue->bp;
1461 
1462 	desc = macb_rx_desc(queue, last_frag);
1463 	len = desc->ctrl & bp->rx_frm_len_mask;
1464 
1465 	netdev_vdbg(bp->dev, "macb_rx_frame frags %u - %u (len %u)\n",
1466 		macb_rx_ring_wrap(bp, first_frag),
1467 		macb_rx_ring_wrap(bp, last_frag), len);
1468 
1469 	/* The ethernet header starts NET_IP_ALIGN bytes into the
1470 	 * first buffer. Since the header is 14 bytes, this makes the
1471 	 * payload word-aligned.
1472 	 *
1473 	 * Instead of calling skb_reserve(NET_IP_ALIGN), we just copy
1474 	 * the two padding bytes into the skb so that we avoid hitting
1475 	 * the slowpath in memcpy(), and pull them off afterwards.
1476 	 */
1477 	skb = netdev_alloc_skb(bp->dev, len + NET_IP_ALIGN);
1478 	if (!skb) {
1479 		bp->dev->stats.rx_dropped++;
1480 		for (frag = first_frag; ; frag++) {
1481 			desc = macb_rx_desc(queue, frag);
1482 			desc->addr &= ~MACB_BIT(RX_USED);
1483 			if (frag == last_frag)
1484 				break;
1485 		}
1486 
1487 		/* Make descriptor updates visible to hardware */
1488 		wmb();
1489 
1490 		return 1;
1491 	}
1492 
1493 	offset = 0;
1494 	len += NET_IP_ALIGN;
1495 	skb_checksum_none_assert(skb);
1496 	skb_put(skb, len);
1497 
1498 	for (frag = first_frag; ; frag++) {
1499 		unsigned int frag_len = bp->rx_buffer_size;
1500 
1501 		if (offset + frag_len > len) {
1502 			if (unlikely(frag != last_frag)) {
1503 				dev_kfree_skb_any(skb);
1504 				return -1;
1505 			}
1506 			frag_len = len - offset;
1507 		}
1508 		skb_copy_to_linear_data_offset(skb, offset,
1509 					       macb_rx_buffer(queue, frag),
1510 					       frag_len);
1511 		offset += bp->rx_buffer_size;
1512 		desc = macb_rx_desc(queue, frag);
1513 		desc->addr &= ~MACB_BIT(RX_USED);
1514 
1515 		if (frag == last_frag)
1516 			break;
1517 	}
1518 
1519 	/* Make descriptor updates visible to hardware */
1520 	wmb();
1521 
1522 	__skb_pull(skb, NET_IP_ALIGN);
1523 	skb->protocol = eth_type_trans(skb, bp->dev);
1524 
1525 	bp->dev->stats.rx_packets++;
1526 	bp->dev->stats.rx_bytes += skb->len;
1527 	netdev_vdbg(bp->dev, "received skb of length %u, csum: %08x\n",
1528 		    skb->len, skb->csum);
1529 	napi_gro_receive(napi, skb);
1530 
1531 	return 0;
1532 }
1533 
1534 static inline void macb_init_rx_ring(struct macb_queue *queue)
1535 {
1536 	struct macb *bp = queue->bp;
1537 	dma_addr_t addr;
1538 	struct macb_dma_desc *desc = NULL;
1539 	int i;
1540 
1541 	addr = queue->rx_buffers_dma;
1542 	for (i = 0; i < bp->rx_ring_size; i++) {
1543 		desc = macb_rx_desc(queue, i);
1544 		macb_set_addr(bp, desc, addr);
1545 		desc->ctrl = 0;
1546 		addr += bp->rx_buffer_size;
1547 	}
1548 	desc->addr |= MACB_BIT(RX_WRAP);
1549 	queue->rx_tail = 0;
1550 }
1551 
1552 static int macb_rx(struct macb_queue *queue, struct napi_struct *napi,
1553 		   int budget)
1554 {
1555 	struct macb *bp = queue->bp;
1556 	bool reset_rx_queue = false;
1557 	int received = 0;
1558 	unsigned int tail;
1559 	int first_frag = -1;
1560 
1561 	for (tail = queue->rx_tail; budget > 0; tail++) {
1562 		struct macb_dma_desc *desc = macb_rx_desc(queue, tail);
1563 		u32 ctrl;
1564 
1565 		/* Make hw descriptor updates visible to CPU */
1566 		rmb();
1567 
1568 		if (!(desc->addr & MACB_BIT(RX_USED)))
1569 			break;
1570 
1571 		/* Ensure ctrl is at least as up-to-date as addr */
1572 		dma_rmb();
1573 
1574 		ctrl = desc->ctrl;
1575 
1576 		if (ctrl & MACB_BIT(RX_SOF)) {
1577 			if (first_frag != -1)
1578 				discard_partial_frame(queue, first_frag, tail);
1579 			first_frag = tail;
1580 		}
1581 
1582 		if (ctrl & MACB_BIT(RX_EOF)) {
1583 			int dropped;
1584 
1585 			if (unlikely(first_frag == -1)) {
1586 				reset_rx_queue = true;
1587 				continue;
1588 			}
1589 
1590 			dropped = macb_rx_frame(queue, napi, first_frag, tail);
1591 			first_frag = -1;
1592 			if (unlikely(dropped < 0)) {
1593 				reset_rx_queue = true;
1594 				continue;
1595 			}
1596 			if (!dropped) {
1597 				received++;
1598 				budget--;
1599 			}
1600 		}
1601 	}
1602 
1603 	if (unlikely(reset_rx_queue)) {
1604 		unsigned long flags;
1605 		u32 ctrl;
1606 
1607 		netdev_err(bp->dev, "RX queue corruption: reset it\n");
1608 
1609 		spin_lock_irqsave(&bp->lock, flags);
1610 
1611 		ctrl = macb_readl(bp, NCR);
1612 		macb_writel(bp, NCR, ctrl & ~MACB_BIT(RE));
1613 
1614 		macb_init_rx_ring(queue);
1615 		queue_writel(queue, RBQP, queue->rx_ring_dma);
1616 
1617 		macb_writel(bp, NCR, ctrl | MACB_BIT(RE));
1618 
1619 		spin_unlock_irqrestore(&bp->lock, flags);
1620 		return received;
1621 	}
1622 
1623 	if (first_frag != -1)
1624 		queue->rx_tail = first_frag;
1625 	else
1626 		queue->rx_tail = tail;
1627 
1628 	return received;
1629 }
1630 
1631 static bool macb_rx_pending(struct macb_queue *queue)
1632 {
1633 	struct macb *bp = queue->bp;
1634 	unsigned int		entry;
1635 	struct macb_dma_desc	*desc;
1636 
1637 	entry = macb_rx_ring_wrap(bp, queue->rx_tail);
1638 	desc = macb_rx_desc(queue, entry);
1639 
1640 	/* Make hw descriptor updates visible to CPU */
1641 	rmb();
1642 
1643 	return (desc->addr & MACB_BIT(RX_USED)) != 0;
1644 }
1645 
1646 static int macb_rx_poll(struct napi_struct *napi, int budget)
1647 {
1648 	struct macb_queue *queue = container_of(napi, struct macb_queue, napi_rx);
1649 	struct macb *bp = queue->bp;
1650 	int work_done;
1651 
1652 	work_done = bp->macbgem_ops.mog_rx(queue, napi, budget);
1653 
1654 	netdev_vdbg(bp->dev, "RX poll: queue = %u, work_done = %d, budget = %d\n",
1655 		    (unsigned int)(queue - bp->queues), work_done, budget);
1656 
1657 	if (work_done < budget && napi_complete_done(napi, work_done)) {
1658 		queue_writel(queue, IER, bp->rx_intr_mask);
1659 
1660 		/* Packet completions only seem to propagate to raise
1661 		 * interrupts when interrupts are enabled at the time, so if
1662 		 * packets were received while interrupts were disabled,
1663 		 * they will not cause another interrupt to be generated when
1664 		 * interrupts are re-enabled.
1665 		 * Check for this case here to avoid losing a wakeup. This can
1666 		 * potentially race with the interrupt handler doing the same
1667 		 * actions if an interrupt is raised just after enabling them,
1668 		 * but this should be harmless.
1669 		 */
1670 		if (macb_rx_pending(queue)) {
1671 			queue_writel(queue, IDR, bp->rx_intr_mask);
1672 			if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
1673 				queue_writel(queue, ISR, MACB_BIT(RCOMP));
1674 			netdev_vdbg(bp->dev, "poll: packets pending, reschedule\n");
1675 			napi_schedule(napi);
1676 		}
1677 	}
1678 
1679 	/* TODO: Handle errors */
1680 
1681 	return work_done;
1682 }
1683 
1684 static void macb_tx_restart(struct macb_queue *queue)
1685 {
1686 	struct macb *bp = queue->bp;
1687 	unsigned int head_idx, tbqp;
1688 	unsigned long flags;
1689 
1690 	spin_lock_irqsave(&queue->tx_ptr_lock, flags);
1691 
1692 	if (queue->tx_head == queue->tx_tail)
1693 		goto out_tx_ptr_unlock;
1694 
1695 	tbqp = queue_readl(queue, TBQP) / macb_dma_desc_get_size(bp);
1696 	tbqp = macb_adj_dma_desc_idx(bp, macb_tx_ring_wrap(bp, tbqp));
1697 	head_idx = macb_adj_dma_desc_idx(bp, macb_tx_ring_wrap(bp, queue->tx_head));
1698 
1699 	if (tbqp == head_idx)
1700 		goto out_tx_ptr_unlock;
1701 
1702 	spin_lock(&bp->lock);
1703 	macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(TSTART));
1704 	spin_unlock(&bp->lock);
1705 
1706 out_tx_ptr_unlock:
1707 	spin_unlock_irqrestore(&queue->tx_ptr_lock, flags);
1708 }
1709 
1710 static bool macb_tx_complete_pending(struct macb_queue *queue)
1711 {
1712 	bool retval = false;
1713 	unsigned long flags;
1714 
1715 	spin_lock_irqsave(&queue->tx_ptr_lock, flags);
1716 	if (queue->tx_head != queue->tx_tail) {
1717 		/* Make hw descriptor updates visible to CPU */
1718 		rmb();
1719 
1720 		if (macb_tx_desc(queue, queue->tx_tail)->ctrl & MACB_BIT(TX_USED))
1721 			retval = true;
1722 	}
1723 	spin_unlock_irqrestore(&queue->tx_ptr_lock, flags);
1724 	return retval;
1725 }
1726 
1727 static int macb_tx_poll(struct napi_struct *napi, int budget)
1728 {
1729 	struct macb_queue *queue = container_of(napi, struct macb_queue, napi_tx);
1730 	struct macb *bp = queue->bp;
1731 	int work_done;
1732 
1733 	work_done = macb_tx_complete(queue, budget);
1734 
1735 	rmb(); // ensure txubr_pending is up to date
1736 	if (queue->txubr_pending) {
1737 		queue->txubr_pending = false;
1738 		netdev_vdbg(bp->dev, "poll: tx restart\n");
1739 		macb_tx_restart(queue);
1740 	}
1741 
1742 	netdev_vdbg(bp->dev, "TX poll: queue = %u, work_done = %d, budget = %d\n",
1743 		    (unsigned int)(queue - bp->queues), work_done, budget);
1744 
1745 	if (work_done < budget && napi_complete_done(napi, work_done)) {
1746 		queue_writel(queue, IER, MACB_BIT(TCOMP));
1747 
1748 		/* Packet completions only seem to propagate to raise
1749 		 * interrupts when interrupts are enabled at the time, so if
1750 		 * packets were sent while interrupts were disabled,
1751 		 * they will not cause another interrupt to be generated when
1752 		 * interrupts are re-enabled.
1753 		 * Check for this case here to avoid losing a wakeup. This can
1754 		 * potentially race with the interrupt handler doing the same
1755 		 * actions if an interrupt is raised just after enabling them,
1756 		 * but this should be harmless.
1757 		 */
1758 		if (macb_tx_complete_pending(queue)) {
1759 			queue_writel(queue, IDR, MACB_BIT(TCOMP));
1760 			if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
1761 				queue_writel(queue, ISR, MACB_BIT(TCOMP));
1762 			netdev_vdbg(bp->dev, "TX poll: packets pending, reschedule\n");
1763 			napi_schedule(napi);
1764 		}
1765 	}
1766 
1767 	return work_done;
1768 }
1769 
1770 static void macb_hresp_error_task(struct work_struct *work)
1771 {
1772 	struct macb *bp = from_work(bp, work, hresp_err_bh_work);
1773 	struct net_device *dev = bp->dev;
1774 	struct macb_queue *queue;
1775 	unsigned int q;
1776 	u32 ctrl;
1777 
1778 	for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
1779 		queue_writel(queue, IDR, bp->rx_intr_mask |
1780 					 MACB_TX_INT_FLAGS |
1781 					 MACB_BIT(HRESP));
1782 	}
1783 	ctrl = macb_readl(bp, NCR);
1784 	ctrl &= ~(MACB_BIT(RE) | MACB_BIT(TE));
1785 	macb_writel(bp, NCR, ctrl);
1786 
1787 	netif_tx_stop_all_queues(dev);
1788 	netif_carrier_off(dev);
1789 
1790 	bp->macbgem_ops.mog_init_rings(bp);
1791 
1792 	/* Initialize TX and RX buffers */
1793 	macb_init_buffers(bp);
1794 
1795 	/* Enable interrupts */
1796 	for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue)
1797 		queue_writel(queue, IER,
1798 			     bp->rx_intr_mask |
1799 			     MACB_TX_INT_FLAGS |
1800 			     MACB_BIT(HRESP));
1801 
1802 	ctrl |= MACB_BIT(RE) | MACB_BIT(TE);
1803 	macb_writel(bp, NCR, ctrl);
1804 
1805 	netif_carrier_on(dev);
1806 	netif_tx_start_all_queues(dev);
1807 }
1808 
1809 static irqreturn_t macb_wol_interrupt(int irq, void *dev_id)
1810 {
1811 	struct macb_queue *queue = dev_id;
1812 	struct macb *bp = queue->bp;
1813 	u32 status;
1814 
1815 	status = queue_readl(queue, ISR);
1816 
1817 	if (unlikely(!status))
1818 		return IRQ_NONE;
1819 
1820 	spin_lock(&bp->lock);
1821 
1822 	if (status & MACB_BIT(WOL)) {
1823 		queue_writel(queue, IDR, MACB_BIT(WOL));
1824 		macb_writel(bp, WOL, 0);
1825 		netdev_vdbg(bp->dev, "MACB WoL: queue = %u, isr = 0x%08lx\n",
1826 			    (unsigned int)(queue - bp->queues),
1827 			    (unsigned long)status);
1828 		if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
1829 			queue_writel(queue, ISR, MACB_BIT(WOL));
1830 		pm_wakeup_event(&bp->pdev->dev, 0);
1831 	}
1832 
1833 	spin_unlock(&bp->lock);
1834 
1835 	return IRQ_HANDLED;
1836 }
1837 
1838 static irqreturn_t gem_wol_interrupt(int irq, void *dev_id)
1839 {
1840 	struct macb_queue *queue = dev_id;
1841 	struct macb *bp = queue->bp;
1842 	u32 status;
1843 
1844 	status = queue_readl(queue, ISR);
1845 
1846 	if (unlikely(!status))
1847 		return IRQ_NONE;
1848 
1849 	spin_lock(&bp->lock);
1850 
1851 	if (status & GEM_BIT(WOL)) {
1852 		queue_writel(queue, IDR, GEM_BIT(WOL));
1853 		gem_writel(bp, WOL, 0);
1854 		netdev_vdbg(bp->dev, "GEM WoL: queue = %u, isr = 0x%08lx\n",
1855 			    (unsigned int)(queue - bp->queues),
1856 			    (unsigned long)status);
1857 		if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
1858 			queue_writel(queue, ISR, GEM_BIT(WOL));
1859 		pm_wakeup_event(&bp->pdev->dev, 0);
1860 	}
1861 
1862 	spin_unlock(&bp->lock);
1863 
1864 	return IRQ_HANDLED;
1865 }
1866 
1867 static irqreturn_t macb_interrupt(int irq, void *dev_id)
1868 {
1869 	struct macb_queue *queue = dev_id;
1870 	struct macb *bp = queue->bp;
1871 	struct net_device *dev = bp->dev;
1872 	u32 status, ctrl;
1873 
1874 	status = queue_readl(queue, ISR);
1875 
1876 	if (unlikely(!status))
1877 		return IRQ_NONE;
1878 
1879 	spin_lock(&bp->lock);
1880 
1881 	while (status) {
1882 		/* close possible race with dev_close */
1883 		if (unlikely(!netif_running(dev))) {
1884 			queue_writel(queue, IDR, -1);
1885 			if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
1886 				queue_writel(queue, ISR, -1);
1887 			break;
1888 		}
1889 
1890 		netdev_vdbg(bp->dev, "queue = %u, isr = 0x%08lx\n",
1891 			    (unsigned int)(queue - bp->queues),
1892 			    (unsigned long)status);
1893 
1894 		if (status & bp->rx_intr_mask) {
1895 			/* There's no point taking any more interrupts
1896 			 * until we have processed the buffers. The
1897 			 * scheduling call may fail if the poll routine
1898 			 * is already scheduled, so disable interrupts
1899 			 * now.
1900 			 */
1901 			queue_writel(queue, IDR, bp->rx_intr_mask);
1902 			if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
1903 				queue_writel(queue, ISR, MACB_BIT(RCOMP));
1904 
1905 			if (napi_schedule_prep(&queue->napi_rx)) {
1906 				netdev_vdbg(bp->dev, "scheduling RX softirq\n");
1907 				__napi_schedule(&queue->napi_rx);
1908 			}
1909 		}
1910 
1911 		if (status & (MACB_BIT(TCOMP) |
1912 			      MACB_BIT(TXUBR))) {
1913 			queue_writel(queue, IDR, MACB_BIT(TCOMP));
1914 			if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
1915 				queue_writel(queue, ISR, MACB_BIT(TCOMP) |
1916 							 MACB_BIT(TXUBR));
1917 
1918 			if (status & MACB_BIT(TXUBR)) {
1919 				queue->txubr_pending = true;
1920 				wmb(); // ensure softirq can see update
1921 			}
1922 
1923 			if (napi_schedule_prep(&queue->napi_tx)) {
1924 				netdev_vdbg(bp->dev, "scheduling TX softirq\n");
1925 				__napi_schedule(&queue->napi_tx);
1926 			}
1927 		}
1928 
1929 		if (unlikely(status & (MACB_TX_ERR_FLAGS))) {
1930 			queue_writel(queue, IDR, MACB_TX_INT_FLAGS);
1931 			schedule_work(&queue->tx_error_task);
1932 
1933 			if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
1934 				queue_writel(queue, ISR, MACB_TX_ERR_FLAGS);
1935 
1936 			break;
1937 		}
1938 
1939 		/* Link change detection isn't possible with RMII, so we'll
1940 		 * add that if/when we get our hands on a full-blown MII PHY.
1941 		 */
1942 
1943 		/* There is a hardware issue under heavy load where DMA can
1944 		 * stop, this causes endless "used buffer descriptor read"
1945 		 * interrupts but it can be cleared by re-enabling RX. See
1946 		 * the at91rm9200 manual, section 41.3.1 or the Zynq manual
1947 		 * section 16.7.4 for details. RXUBR is only enabled for
1948 		 * these two versions.
1949 		 */
1950 		if (status & MACB_BIT(RXUBR)) {
1951 			ctrl = macb_readl(bp, NCR);
1952 			macb_writel(bp, NCR, ctrl & ~MACB_BIT(RE));
1953 			wmb();
1954 			macb_writel(bp, NCR, ctrl | MACB_BIT(RE));
1955 
1956 			if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
1957 				queue_writel(queue, ISR, MACB_BIT(RXUBR));
1958 		}
1959 
1960 		if (status & MACB_BIT(ISR_ROVR)) {
1961 			/* We missed at least one packet */
1962 			spin_lock(&bp->stats_lock);
1963 			if (macb_is_gem(bp))
1964 				bp->hw_stats.gem.rx_overruns++;
1965 			else
1966 				bp->hw_stats.macb.rx_overruns++;
1967 			spin_unlock(&bp->stats_lock);
1968 
1969 			if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
1970 				queue_writel(queue, ISR, MACB_BIT(ISR_ROVR));
1971 		}
1972 
1973 		if (status & MACB_BIT(HRESP)) {
1974 			queue_work(system_bh_wq, &bp->hresp_err_bh_work);
1975 			netdev_err(dev, "DMA bus error: HRESP not OK\n");
1976 
1977 			if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
1978 				queue_writel(queue, ISR, MACB_BIT(HRESP));
1979 		}
1980 		status = queue_readl(queue, ISR);
1981 	}
1982 
1983 	spin_unlock(&bp->lock);
1984 
1985 	return IRQ_HANDLED;
1986 }
1987 
1988 #ifdef CONFIG_NET_POLL_CONTROLLER
1989 /* Polling receive - used by netconsole and other diagnostic tools
1990  * to allow network i/o with interrupts disabled.
1991  */
1992 static void macb_poll_controller(struct net_device *dev)
1993 {
1994 	struct macb *bp = netdev_priv(dev);
1995 	struct macb_queue *queue;
1996 	unsigned long flags;
1997 	unsigned int q;
1998 
1999 	local_irq_save(flags);
2000 	for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue)
2001 		macb_interrupt(dev->irq, queue);
2002 	local_irq_restore(flags);
2003 }
2004 #endif
2005 
2006 static unsigned int macb_tx_map(struct macb *bp,
2007 				struct macb_queue *queue,
2008 				struct sk_buff *skb,
2009 				unsigned int hdrlen)
2010 {
2011 	unsigned int f, nr_frags = skb_shinfo(skb)->nr_frags;
2012 	unsigned int len, i, tx_head = queue->tx_head;
2013 	u32 ctrl, lso_ctrl = 0, seq_ctrl = 0;
2014 	unsigned int eof = 1, mss_mfs = 0;
2015 	struct macb_tx_skb *tx_skb = NULL;
2016 	struct macb_dma_desc *desc;
2017 	unsigned int offset, size;
2018 	dma_addr_t mapping;
2019 
2020 	/* LSO */
2021 	if (skb_shinfo(skb)->gso_size != 0) {
2022 		if (ip_hdr(skb)->protocol == IPPROTO_UDP)
2023 			/* UDP - UFO */
2024 			lso_ctrl = MACB_LSO_UFO_ENABLE;
2025 		else
2026 			/* TCP - TSO */
2027 			lso_ctrl = MACB_LSO_TSO_ENABLE;
2028 	}
2029 
2030 	/* First, map non-paged data */
2031 	len = skb_headlen(skb);
2032 
2033 	/* first buffer length */
2034 	size = hdrlen;
2035 
2036 	offset = 0;
2037 	while (len) {
2038 		tx_skb = macb_tx_skb(queue, tx_head);
2039 
2040 		mapping = dma_map_single(&bp->pdev->dev,
2041 					 skb->data + offset,
2042 					 size, DMA_TO_DEVICE);
2043 		if (dma_mapping_error(&bp->pdev->dev, mapping))
2044 			goto dma_error;
2045 
2046 		/* Save info to properly release resources */
2047 		tx_skb->skb = NULL;
2048 		tx_skb->mapping = mapping;
2049 		tx_skb->size = size;
2050 		tx_skb->mapped_as_page = false;
2051 
2052 		len -= size;
2053 		offset += size;
2054 		tx_head++;
2055 
2056 		size = umin(len, bp->max_tx_length);
2057 	}
2058 
2059 	/* Then, map paged data from fragments */
2060 	for (f = 0; f < nr_frags; f++) {
2061 		const skb_frag_t *frag = &skb_shinfo(skb)->frags[f];
2062 
2063 		len = skb_frag_size(frag);
2064 		offset = 0;
2065 		while (len) {
2066 			size = umin(len, bp->max_tx_length);
2067 			tx_skb = macb_tx_skb(queue, tx_head);
2068 
2069 			mapping = skb_frag_dma_map(&bp->pdev->dev, frag,
2070 						   offset, size, DMA_TO_DEVICE);
2071 			if (dma_mapping_error(&bp->pdev->dev, mapping))
2072 				goto dma_error;
2073 
2074 			/* Save info to properly release resources */
2075 			tx_skb->skb = NULL;
2076 			tx_skb->mapping = mapping;
2077 			tx_skb->size = size;
2078 			tx_skb->mapped_as_page = true;
2079 
2080 			len -= size;
2081 			offset += size;
2082 			tx_head++;
2083 		}
2084 	}
2085 
2086 	/* Should never happen */
2087 	if (unlikely(!tx_skb)) {
2088 		netdev_err(bp->dev, "BUG! empty skb!\n");
2089 		return 0;
2090 	}
2091 
2092 	/* This is the last buffer of the frame: save socket buffer */
2093 	tx_skb->skb = skb;
2094 
2095 	/* Update TX ring: update buffer descriptors in reverse order
2096 	 * to avoid race condition
2097 	 */
2098 
2099 	/* Set 'TX_USED' bit in buffer descriptor at tx_head position
2100 	 * to set the end of TX queue
2101 	 */
2102 	i = tx_head;
2103 	ctrl = MACB_BIT(TX_USED);
2104 	desc = macb_tx_desc(queue, i);
2105 	desc->ctrl = ctrl;
2106 
2107 	if (lso_ctrl) {
2108 		if (lso_ctrl == MACB_LSO_UFO_ENABLE)
2109 			/* include header and FCS in value given to h/w */
2110 			mss_mfs = skb_shinfo(skb)->gso_size +
2111 					skb_transport_offset(skb) +
2112 					ETH_FCS_LEN;
2113 		else /* TSO */ {
2114 			mss_mfs = skb_shinfo(skb)->gso_size;
2115 			/* TCP Sequence Number Source Select
2116 			 * can be set only for TSO
2117 			 */
2118 			seq_ctrl = 0;
2119 		}
2120 	}
2121 
2122 	do {
2123 		i--;
2124 		tx_skb = macb_tx_skb(queue, i);
2125 		desc = macb_tx_desc(queue, i);
2126 
2127 		ctrl = (u32)tx_skb->size;
2128 		if (eof) {
2129 			ctrl |= MACB_BIT(TX_LAST);
2130 			eof = 0;
2131 		}
2132 		if (unlikely(macb_tx_ring_wrap(bp, i) == bp->tx_ring_size - 1))
2133 			ctrl |= MACB_BIT(TX_WRAP);
2134 
2135 		/* First descriptor is header descriptor */
2136 		if (i == queue->tx_head) {
2137 			ctrl |= MACB_BF(TX_LSO, lso_ctrl);
2138 			ctrl |= MACB_BF(TX_TCP_SEQ_SRC, seq_ctrl);
2139 			if ((bp->dev->features & NETIF_F_HW_CSUM) &&
2140 			    skb->ip_summed != CHECKSUM_PARTIAL && !lso_ctrl &&
2141 			    !ptp_one_step_sync(skb))
2142 				ctrl |= MACB_BIT(TX_NOCRC);
2143 		} else
2144 			/* Only set MSS/MFS on payload descriptors
2145 			 * (second or later descriptor)
2146 			 */
2147 			ctrl |= MACB_BF(MSS_MFS, mss_mfs);
2148 
2149 		/* Set TX buffer descriptor */
2150 		macb_set_addr(bp, desc, tx_skb->mapping);
2151 		/* desc->addr must be visible to hardware before clearing
2152 		 * 'TX_USED' bit in desc->ctrl.
2153 		 */
2154 		wmb();
2155 		desc->ctrl = ctrl;
2156 	} while (i != queue->tx_head);
2157 
2158 	queue->tx_head = tx_head;
2159 
2160 	return 0;
2161 
2162 dma_error:
2163 	netdev_err(bp->dev, "TX DMA map failed\n");
2164 
2165 	for (i = queue->tx_head; i != tx_head; i++) {
2166 		tx_skb = macb_tx_skb(queue, i);
2167 
2168 		macb_tx_unmap(bp, tx_skb, 0);
2169 	}
2170 
2171 	return -ENOMEM;
2172 }
2173 
2174 static netdev_features_t macb_features_check(struct sk_buff *skb,
2175 					     struct net_device *dev,
2176 					     netdev_features_t features)
2177 {
2178 	unsigned int nr_frags, f;
2179 	unsigned int hdrlen;
2180 
2181 	/* Validate LSO compatibility */
2182 
2183 	/* there is only one buffer or protocol is not UDP */
2184 	if (!skb_is_nonlinear(skb) || (ip_hdr(skb)->protocol != IPPROTO_UDP))
2185 		return features;
2186 
2187 	/* length of header */
2188 	hdrlen = skb_transport_offset(skb);
2189 
2190 	/* For UFO only:
2191 	 * When software supplies two or more payload buffers all payload buffers
2192 	 * apart from the last must be a multiple of 8 bytes in size.
2193 	 */
2194 	if (!IS_ALIGNED(skb_headlen(skb) - hdrlen, MACB_TX_LEN_ALIGN))
2195 		return features & ~MACB_NETIF_LSO;
2196 
2197 	nr_frags = skb_shinfo(skb)->nr_frags;
2198 	/* No need to check last fragment */
2199 	nr_frags--;
2200 	for (f = 0; f < nr_frags; f++) {
2201 		const skb_frag_t *frag = &skb_shinfo(skb)->frags[f];
2202 
2203 		if (!IS_ALIGNED(skb_frag_size(frag), MACB_TX_LEN_ALIGN))
2204 			return features & ~MACB_NETIF_LSO;
2205 	}
2206 	return features;
2207 }
2208 
2209 static inline int macb_clear_csum(struct sk_buff *skb)
2210 {
2211 	/* no change for packets without checksum offloading */
2212 	if (skb->ip_summed != CHECKSUM_PARTIAL)
2213 		return 0;
2214 
2215 	/* make sure we can modify the header */
2216 	if (unlikely(skb_cow_head(skb, 0)))
2217 		return -1;
2218 
2219 	/* initialize checksum field
2220 	 * This is required - at least for Zynq, which otherwise calculates
2221 	 * wrong UDP header checksums for UDP packets with UDP data len <=2
2222 	 */
2223 	*(__sum16 *)(skb_checksum_start(skb) + skb->csum_offset) = 0;
2224 	return 0;
2225 }
2226 
2227 static int macb_pad_and_fcs(struct sk_buff **skb, struct net_device *ndev)
2228 {
2229 	bool cloned = skb_cloned(*skb) || skb_header_cloned(*skb) ||
2230 		      skb_is_nonlinear(*skb);
2231 	int padlen = ETH_ZLEN - (*skb)->len;
2232 	int tailroom = skb_tailroom(*skb);
2233 	struct sk_buff *nskb;
2234 	u32 fcs;
2235 
2236 	if (!(ndev->features & NETIF_F_HW_CSUM) ||
2237 	    !((*skb)->ip_summed != CHECKSUM_PARTIAL) ||
2238 	    skb_shinfo(*skb)->gso_size || ptp_one_step_sync(*skb))
2239 		return 0;
2240 
2241 	if (padlen <= 0) {
2242 		/* FCS could be appeded to tailroom. */
2243 		if (tailroom >= ETH_FCS_LEN)
2244 			goto add_fcs;
2245 		/* No room for FCS, need to reallocate skb. */
2246 		else
2247 			padlen = ETH_FCS_LEN;
2248 	} else {
2249 		/* Add room for FCS. */
2250 		padlen += ETH_FCS_LEN;
2251 	}
2252 
2253 	if (cloned || tailroom < padlen) {
2254 		nskb = skb_copy_expand(*skb, 0, padlen, GFP_ATOMIC);
2255 		if (!nskb)
2256 			return -ENOMEM;
2257 
2258 		dev_consume_skb_any(*skb);
2259 		*skb = nskb;
2260 	}
2261 
2262 	if (padlen > ETH_FCS_LEN)
2263 		skb_put_zero(*skb, padlen - ETH_FCS_LEN);
2264 
2265 add_fcs:
2266 	/* set FCS to packet */
2267 	fcs = crc32_le(~0, (*skb)->data, (*skb)->len);
2268 	fcs = ~fcs;
2269 
2270 	skb_put_u8(*skb, fcs		& 0xff);
2271 	skb_put_u8(*skb, (fcs >> 8)	& 0xff);
2272 	skb_put_u8(*skb, (fcs >> 16)	& 0xff);
2273 	skb_put_u8(*skb, (fcs >> 24)	& 0xff);
2274 
2275 	return 0;
2276 }
2277 
2278 static netdev_tx_t macb_start_xmit(struct sk_buff *skb, struct net_device *dev)
2279 {
2280 	u16 queue_index = skb_get_queue_mapping(skb);
2281 	struct macb *bp = netdev_priv(dev);
2282 	struct macb_queue *queue = &bp->queues[queue_index];
2283 	unsigned int desc_cnt, nr_frags, frag_size, f;
2284 	unsigned int hdrlen;
2285 	unsigned long flags;
2286 	bool is_lso;
2287 	netdev_tx_t ret = NETDEV_TX_OK;
2288 
2289 	if (macb_clear_csum(skb)) {
2290 		dev_kfree_skb_any(skb);
2291 		return ret;
2292 	}
2293 
2294 	if (macb_pad_and_fcs(&skb, dev)) {
2295 		dev_kfree_skb_any(skb);
2296 		return ret;
2297 	}
2298 
2299 	if (macb_dma_ptp(bp) &&
2300 	    (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP))
2301 		skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
2302 
2303 	is_lso = (skb_shinfo(skb)->gso_size != 0);
2304 
2305 	if (is_lso) {
2306 		/* length of headers */
2307 		if (ip_hdr(skb)->protocol == IPPROTO_UDP)
2308 			/* only queue eth + ip headers separately for UDP */
2309 			hdrlen = skb_transport_offset(skb);
2310 		else
2311 			hdrlen = skb_tcp_all_headers(skb);
2312 		if (skb_headlen(skb) < hdrlen) {
2313 			netdev_err(bp->dev, "Error - LSO headers fragmented!!!\n");
2314 			/* if this is required, would need to copy to single buffer */
2315 			return NETDEV_TX_BUSY;
2316 		}
2317 	} else
2318 		hdrlen = umin(skb_headlen(skb), bp->max_tx_length);
2319 
2320 #if defined(DEBUG) && defined(VERBOSE_DEBUG)
2321 	netdev_vdbg(bp->dev,
2322 		    "start_xmit: queue %hu len %u head %p data %p tail %p end %p\n",
2323 		    queue_index, skb->len, skb->head, skb->data,
2324 		    skb_tail_pointer(skb), skb_end_pointer(skb));
2325 	print_hex_dump(KERN_DEBUG, "data: ", DUMP_PREFIX_OFFSET, 16, 1,
2326 		       skb->data, 16, true);
2327 #endif
2328 
2329 	/* Count how many TX buffer descriptors are needed to send this
2330 	 * socket buffer: skb fragments of jumbo frames may need to be
2331 	 * split into many buffer descriptors.
2332 	 */
2333 	if (is_lso && (skb_headlen(skb) > hdrlen))
2334 		/* extra header descriptor if also payload in first buffer */
2335 		desc_cnt = DIV_ROUND_UP((skb_headlen(skb) - hdrlen), bp->max_tx_length) + 1;
2336 	else
2337 		desc_cnt = DIV_ROUND_UP(skb_headlen(skb), bp->max_tx_length);
2338 	nr_frags = skb_shinfo(skb)->nr_frags;
2339 	for (f = 0; f < nr_frags; f++) {
2340 		frag_size = skb_frag_size(&skb_shinfo(skb)->frags[f]);
2341 		desc_cnt += DIV_ROUND_UP(frag_size, bp->max_tx_length);
2342 	}
2343 
2344 	spin_lock_irqsave(&queue->tx_ptr_lock, flags);
2345 
2346 	/* This is a hard error, log it. */
2347 	if (CIRC_SPACE(queue->tx_head, queue->tx_tail,
2348 		       bp->tx_ring_size) < desc_cnt) {
2349 		netif_stop_subqueue(dev, queue_index);
2350 		netdev_dbg(bp->dev, "tx_head = %u, tx_tail = %u\n",
2351 			   queue->tx_head, queue->tx_tail);
2352 		ret = NETDEV_TX_BUSY;
2353 		goto unlock;
2354 	}
2355 
2356 	/* Map socket buffer for DMA transfer */
2357 	if (macb_tx_map(bp, queue, skb, hdrlen)) {
2358 		dev_kfree_skb_any(skb);
2359 		goto unlock;
2360 	}
2361 
2362 	/* Make newly initialized descriptor visible to hardware */
2363 	wmb();
2364 	skb_tx_timestamp(skb);
2365 	netdev_tx_sent_queue(netdev_get_tx_queue(bp->dev, queue_index),
2366 			     skb->len);
2367 
2368 	spin_lock(&bp->lock);
2369 	macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(TSTART));
2370 	spin_unlock(&bp->lock);
2371 
2372 	if (CIRC_SPACE(queue->tx_head, queue->tx_tail, bp->tx_ring_size) < 1)
2373 		netif_stop_subqueue(dev, queue_index);
2374 
2375 unlock:
2376 	spin_unlock_irqrestore(&queue->tx_ptr_lock, flags);
2377 
2378 	return ret;
2379 }
2380 
2381 static void macb_init_rx_buffer_size(struct macb *bp, size_t size)
2382 {
2383 	if (!macb_is_gem(bp)) {
2384 		bp->rx_buffer_size = MACB_RX_BUFFER_SIZE;
2385 	} else {
2386 		bp->rx_buffer_size = size;
2387 
2388 		if (bp->rx_buffer_size % RX_BUFFER_MULTIPLE) {
2389 			netdev_dbg(bp->dev,
2390 				   "RX buffer must be multiple of %d bytes, expanding\n",
2391 				   RX_BUFFER_MULTIPLE);
2392 			bp->rx_buffer_size =
2393 				roundup(bp->rx_buffer_size, RX_BUFFER_MULTIPLE);
2394 		}
2395 	}
2396 
2397 	netdev_dbg(bp->dev, "mtu [%u] rx_buffer_size [%zu]\n",
2398 		   bp->dev->mtu, bp->rx_buffer_size);
2399 }
2400 
2401 static void gem_free_rx_buffers(struct macb *bp)
2402 {
2403 	struct sk_buff		*skb;
2404 	struct macb_dma_desc	*desc;
2405 	struct macb_queue *queue;
2406 	dma_addr_t		addr;
2407 	unsigned int q;
2408 	int i;
2409 
2410 	for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
2411 		if (!queue->rx_skbuff)
2412 			continue;
2413 
2414 		for (i = 0; i < bp->rx_ring_size; i++) {
2415 			skb = queue->rx_skbuff[i];
2416 
2417 			if (!skb)
2418 				continue;
2419 
2420 			desc = macb_rx_desc(queue, i);
2421 			addr = macb_get_addr(bp, desc);
2422 
2423 			dma_unmap_single(&bp->pdev->dev, addr, bp->rx_buffer_size,
2424 					DMA_FROM_DEVICE);
2425 			dev_kfree_skb_any(skb);
2426 			skb = NULL;
2427 		}
2428 
2429 		kfree(queue->rx_skbuff);
2430 		queue->rx_skbuff = NULL;
2431 	}
2432 }
2433 
2434 static void macb_free_rx_buffers(struct macb *bp)
2435 {
2436 	struct macb_queue *queue = &bp->queues[0];
2437 
2438 	if (queue->rx_buffers) {
2439 		dma_free_coherent(&bp->pdev->dev,
2440 				  bp->rx_ring_size * bp->rx_buffer_size,
2441 				  queue->rx_buffers, queue->rx_buffers_dma);
2442 		queue->rx_buffers = NULL;
2443 	}
2444 }
2445 
2446 static unsigned int macb_tx_ring_size_per_queue(struct macb *bp)
2447 {
2448 	return macb_dma_desc_get_size(bp) * bp->tx_ring_size + bp->tx_bd_rd_prefetch;
2449 }
2450 
2451 static unsigned int macb_rx_ring_size_per_queue(struct macb *bp)
2452 {
2453 	return macb_dma_desc_get_size(bp) * bp->rx_ring_size + bp->rx_bd_rd_prefetch;
2454 }
2455 
2456 static void macb_free_consistent(struct macb *bp)
2457 {
2458 	struct device *dev = &bp->pdev->dev;
2459 	struct macb_queue *queue;
2460 	unsigned int q;
2461 	size_t size;
2462 
2463 	if (bp->rx_ring_tieoff) {
2464 		dma_free_coherent(dev, macb_dma_desc_get_size(bp),
2465 				  bp->rx_ring_tieoff, bp->rx_ring_tieoff_dma);
2466 		bp->rx_ring_tieoff = NULL;
2467 	}
2468 
2469 	bp->macbgem_ops.mog_free_rx_buffers(bp);
2470 
2471 	size = bp->num_queues * macb_tx_ring_size_per_queue(bp);
2472 	dma_free_coherent(dev, size, bp->queues[0].tx_ring, bp->queues[0].tx_ring_dma);
2473 
2474 	size = bp->num_queues * macb_rx_ring_size_per_queue(bp);
2475 	dma_free_coherent(dev, size, bp->queues[0].rx_ring, bp->queues[0].rx_ring_dma);
2476 
2477 	for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
2478 		kfree(queue->tx_skb);
2479 		queue->tx_skb = NULL;
2480 		queue->tx_ring = NULL;
2481 		queue->rx_ring = NULL;
2482 	}
2483 }
2484 
2485 static int gem_alloc_rx_buffers(struct macb *bp)
2486 {
2487 	struct macb_queue *queue;
2488 	unsigned int q;
2489 	int size;
2490 
2491 	for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
2492 		size = bp->rx_ring_size * sizeof(struct sk_buff *);
2493 		queue->rx_skbuff = kzalloc(size, GFP_KERNEL);
2494 		if (!queue->rx_skbuff)
2495 			return -ENOMEM;
2496 		else
2497 			netdev_dbg(bp->dev,
2498 				   "Allocated %d RX struct sk_buff entries at %p\n",
2499 				   bp->rx_ring_size, queue->rx_skbuff);
2500 	}
2501 	return 0;
2502 }
2503 
2504 static int macb_alloc_rx_buffers(struct macb *bp)
2505 {
2506 	struct macb_queue *queue = &bp->queues[0];
2507 	int size;
2508 
2509 	size = bp->rx_ring_size * bp->rx_buffer_size;
2510 	queue->rx_buffers = dma_alloc_coherent(&bp->pdev->dev, size,
2511 					    &queue->rx_buffers_dma, GFP_KERNEL);
2512 	if (!queue->rx_buffers)
2513 		return -ENOMEM;
2514 
2515 	netdev_dbg(bp->dev,
2516 		   "Allocated RX buffers of %d bytes at %08lx (mapped %p)\n",
2517 		   size, (unsigned long)queue->rx_buffers_dma, queue->rx_buffers);
2518 	return 0;
2519 }
2520 
2521 static int macb_alloc_consistent(struct macb *bp)
2522 {
2523 	struct device *dev = &bp->pdev->dev;
2524 	dma_addr_t tx_dma, rx_dma;
2525 	struct macb_queue *queue;
2526 	unsigned int q;
2527 	void *tx, *rx;
2528 	size_t size;
2529 
2530 	/*
2531 	 * Upper 32-bits of Tx/Rx DMA descriptor for each queues much match!
2532 	 * We cannot enforce this guarantee, the best we can do is do a single
2533 	 * allocation and hope it will land into alloc_pages() that guarantees
2534 	 * natural alignment of physical addresses.
2535 	 */
2536 
2537 	size = bp->num_queues * macb_tx_ring_size_per_queue(bp);
2538 	tx = dma_alloc_coherent(dev, size, &tx_dma, GFP_KERNEL);
2539 	if (!tx || upper_32_bits(tx_dma) != upper_32_bits(tx_dma + size - 1))
2540 		goto out_err;
2541 	netdev_dbg(bp->dev, "Allocated %zu bytes for %u TX rings at %08lx (mapped %p)\n",
2542 		   size, bp->num_queues, (unsigned long)tx_dma, tx);
2543 
2544 	size = bp->num_queues * macb_rx_ring_size_per_queue(bp);
2545 	rx = dma_alloc_coherent(dev, size, &rx_dma, GFP_KERNEL);
2546 	if (!rx || upper_32_bits(rx_dma) != upper_32_bits(rx_dma + size - 1))
2547 		goto out_err;
2548 	netdev_dbg(bp->dev, "Allocated %zu bytes for %u RX rings at %08lx (mapped %p)\n",
2549 		   size, bp->num_queues, (unsigned long)rx_dma, rx);
2550 
2551 	for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
2552 		queue->tx_ring = tx + macb_tx_ring_size_per_queue(bp) * q;
2553 		queue->tx_ring_dma = tx_dma + macb_tx_ring_size_per_queue(bp) * q;
2554 
2555 		queue->rx_ring = rx + macb_rx_ring_size_per_queue(bp) * q;
2556 		queue->rx_ring_dma = rx_dma + macb_rx_ring_size_per_queue(bp) * q;
2557 
2558 		size = bp->tx_ring_size * sizeof(struct macb_tx_skb);
2559 		queue->tx_skb = kmalloc(size, GFP_KERNEL);
2560 		if (!queue->tx_skb)
2561 			goto out_err;
2562 	}
2563 	if (bp->macbgem_ops.mog_alloc_rx_buffers(bp))
2564 		goto out_err;
2565 
2566 	/* Required for tie off descriptor for PM cases */
2567 	if (!(bp->caps & MACB_CAPS_QUEUE_DISABLE)) {
2568 		bp->rx_ring_tieoff = dma_alloc_coherent(&bp->pdev->dev,
2569 							macb_dma_desc_get_size(bp),
2570 							&bp->rx_ring_tieoff_dma,
2571 							GFP_KERNEL);
2572 		if (!bp->rx_ring_tieoff)
2573 			goto out_err;
2574 	}
2575 
2576 	return 0;
2577 
2578 out_err:
2579 	macb_free_consistent(bp);
2580 	return -ENOMEM;
2581 }
2582 
2583 static void macb_init_tieoff(struct macb *bp)
2584 {
2585 	struct macb_dma_desc *desc = bp->rx_ring_tieoff;
2586 
2587 	if (bp->caps & MACB_CAPS_QUEUE_DISABLE)
2588 		return;
2589 	/* Setup a wrapping descriptor with no free slots
2590 	 * (WRAP and USED) to tie off/disable unused RX queues.
2591 	 */
2592 	macb_set_addr(bp, desc, MACB_BIT(RX_WRAP) | MACB_BIT(RX_USED));
2593 	desc->ctrl = 0;
2594 }
2595 
2596 static void gem_init_rings(struct macb *bp)
2597 {
2598 	struct macb_queue *queue;
2599 	struct macb_dma_desc *desc = NULL;
2600 	unsigned int q;
2601 	int i;
2602 
2603 	for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
2604 		for (i = 0; i < bp->tx_ring_size; i++) {
2605 			desc = macb_tx_desc(queue, i);
2606 			macb_set_addr(bp, desc, 0);
2607 			desc->ctrl = MACB_BIT(TX_USED);
2608 		}
2609 		desc->ctrl |= MACB_BIT(TX_WRAP);
2610 		queue->tx_head = 0;
2611 		queue->tx_tail = 0;
2612 
2613 		queue->rx_tail = 0;
2614 		queue->rx_prepared_head = 0;
2615 
2616 		gem_rx_refill(queue);
2617 	}
2618 
2619 	macb_init_tieoff(bp);
2620 }
2621 
2622 static void macb_init_rings(struct macb *bp)
2623 {
2624 	int i;
2625 	struct macb_dma_desc *desc = NULL;
2626 
2627 	macb_init_rx_ring(&bp->queues[0]);
2628 
2629 	for (i = 0; i < bp->tx_ring_size; i++) {
2630 		desc = macb_tx_desc(&bp->queues[0], i);
2631 		macb_set_addr(bp, desc, 0);
2632 		desc->ctrl = MACB_BIT(TX_USED);
2633 	}
2634 	bp->queues[0].tx_head = 0;
2635 	bp->queues[0].tx_tail = 0;
2636 	desc->ctrl |= MACB_BIT(TX_WRAP);
2637 
2638 	macb_init_tieoff(bp);
2639 }
2640 
2641 static void macb_reset_hw(struct macb *bp)
2642 {
2643 	struct macb_queue *queue;
2644 	unsigned int q;
2645 	u32 ctrl = macb_readl(bp, NCR);
2646 
2647 	/* Disable RX and TX (XXX: Should we halt the transmission
2648 	 * more gracefully?)
2649 	 */
2650 	ctrl &= ~(MACB_BIT(RE) | MACB_BIT(TE));
2651 
2652 	/* Clear the stats registers (XXX: Update stats first?) */
2653 	ctrl |= MACB_BIT(CLRSTAT);
2654 
2655 	macb_writel(bp, NCR, ctrl);
2656 
2657 	/* Clear all status flags */
2658 	macb_writel(bp, TSR, -1);
2659 	macb_writel(bp, RSR, -1);
2660 
2661 	/* Disable RX partial store and forward and reset watermark value */
2662 	gem_writel(bp, PBUFRXCUT, 0);
2663 
2664 	/* Disable all interrupts */
2665 	for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
2666 		queue_writel(queue, IDR, -1);
2667 		queue_readl(queue, ISR);
2668 		if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
2669 			queue_writel(queue, ISR, -1);
2670 	}
2671 }
2672 
2673 static u32 gem_mdc_clk_div(struct macb *bp)
2674 {
2675 	u32 config;
2676 	unsigned long pclk_hz = clk_get_rate(bp->pclk);
2677 
2678 	if (pclk_hz <= 20000000)
2679 		config = GEM_BF(CLK, GEM_CLK_DIV8);
2680 	else if (pclk_hz <= 40000000)
2681 		config = GEM_BF(CLK, GEM_CLK_DIV16);
2682 	else if (pclk_hz <= 80000000)
2683 		config = GEM_BF(CLK, GEM_CLK_DIV32);
2684 	else if (pclk_hz <= 120000000)
2685 		config = GEM_BF(CLK, GEM_CLK_DIV48);
2686 	else if (pclk_hz <= 160000000)
2687 		config = GEM_BF(CLK, GEM_CLK_DIV64);
2688 	else if (pclk_hz <= 240000000)
2689 		config = GEM_BF(CLK, GEM_CLK_DIV96);
2690 	else if (pclk_hz <= 320000000)
2691 		config = GEM_BF(CLK, GEM_CLK_DIV128);
2692 	else
2693 		config = GEM_BF(CLK, GEM_CLK_DIV224);
2694 
2695 	return config;
2696 }
2697 
2698 static u32 macb_mdc_clk_div(struct macb *bp)
2699 {
2700 	u32 config;
2701 	unsigned long pclk_hz;
2702 
2703 	if (macb_is_gem(bp))
2704 		return gem_mdc_clk_div(bp);
2705 
2706 	pclk_hz = clk_get_rate(bp->pclk);
2707 	if (pclk_hz <= 20000000)
2708 		config = MACB_BF(CLK, MACB_CLK_DIV8);
2709 	else if (pclk_hz <= 40000000)
2710 		config = MACB_BF(CLK, MACB_CLK_DIV16);
2711 	else if (pclk_hz <= 80000000)
2712 		config = MACB_BF(CLK, MACB_CLK_DIV32);
2713 	else
2714 		config = MACB_BF(CLK, MACB_CLK_DIV64);
2715 
2716 	return config;
2717 }
2718 
2719 /* Get the DMA bus width field of the network configuration register that we
2720  * should program.  We find the width from decoding the design configuration
2721  * register to find the maximum supported data bus width.
2722  */
2723 static u32 macb_dbw(struct macb *bp)
2724 {
2725 	if (!macb_is_gem(bp))
2726 		return 0;
2727 
2728 	switch (GEM_BFEXT(DBWDEF, gem_readl(bp, DCFG1))) {
2729 	case 4:
2730 		return GEM_BF(DBW, GEM_DBW128);
2731 	case 2:
2732 		return GEM_BF(DBW, GEM_DBW64);
2733 	case 1:
2734 	default:
2735 		return GEM_BF(DBW, GEM_DBW32);
2736 	}
2737 }
2738 
2739 /* Configure the receive DMA engine
2740  * - use the correct receive buffer size
2741  * - set best burst length for DMA operations
2742  *   (if not supported by FIFO, it will fallback to default)
2743  * - set both rx/tx packet buffers to full memory size
2744  * These are configurable parameters for GEM.
2745  */
2746 static void macb_configure_dma(struct macb *bp)
2747 {
2748 	struct macb_queue *queue;
2749 	u32 buffer_size;
2750 	unsigned int q;
2751 	u32 dmacfg;
2752 
2753 	buffer_size = bp->rx_buffer_size / RX_BUFFER_MULTIPLE;
2754 	if (macb_is_gem(bp)) {
2755 		dmacfg = gem_readl(bp, DMACFG) & ~GEM_BF(RXBS, -1L);
2756 		for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
2757 			if (q)
2758 				queue_writel(queue, RBQS, buffer_size);
2759 			else
2760 				dmacfg |= GEM_BF(RXBS, buffer_size);
2761 		}
2762 		if (bp->dma_burst_length)
2763 			dmacfg = GEM_BFINS(FBLDO, bp->dma_burst_length, dmacfg);
2764 		dmacfg |= GEM_BIT(TXPBMS) | GEM_BF(RXBMS, -1L);
2765 		dmacfg &= ~GEM_BIT(ENDIA_PKT);
2766 
2767 		if (bp->native_io)
2768 			dmacfg &= ~GEM_BIT(ENDIA_DESC);
2769 		else
2770 			dmacfg |= GEM_BIT(ENDIA_DESC); /* CPU in big endian */
2771 
2772 		if (bp->dev->features & NETIF_F_HW_CSUM)
2773 			dmacfg |= GEM_BIT(TXCOEN);
2774 		else
2775 			dmacfg &= ~GEM_BIT(TXCOEN);
2776 
2777 		dmacfg &= ~GEM_BIT(ADDR64);
2778 		if (macb_dma64(bp))
2779 			dmacfg |= GEM_BIT(ADDR64);
2780 		if (macb_dma_ptp(bp))
2781 			dmacfg |= GEM_BIT(RXEXT) | GEM_BIT(TXEXT);
2782 		netdev_dbg(bp->dev, "Cadence configure DMA with 0x%08x\n",
2783 			   dmacfg);
2784 		gem_writel(bp, DMACFG, dmacfg);
2785 	}
2786 }
2787 
2788 static void macb_init_hw(struct macb *bp)
2789 {
2790 	u32 config;
2791 
2792 	macb_reset_hw(bp);
2793 	macb_set_hwaddr(bp);
2794 
2795 	config = macb_mdc_clk_div(bp);
2796 	/* Make eth data aligned.
2797 	 * If RSC capable, that offset is ignored by HW.
2798 	 */
2799 	if (!(bp->caps & MACB_CAPS_RSC))
2800 		config |= MACB_BF(RBOF, NET_IP_ALIGN);
2801 	config |= MACB_BIT(DRFCS);		/* Discard Rx FCS */
2802 	if (bp->caps & MACB_CAPS_JUMBO)
2803 		config |= MACB_BIT(JFRAME);	/* Enable jumbo frames */
2804 	else
2805 		config |= MACB_BIT(BIG);	/* Receive oversized frames */
2806 	if (bp->dev->flags & IFF_PROMISC)
2807 		config |= MACB_BIT(CAF);	/* Copy All Frames */
2808 	else if (macb_is_gem(bp) && bp->dev->features & NETIF_F_RXCSUM)
2809 		config |= GEM_BIT(RXCOEN);
2810 	if (!(bp->dev->flags & IFF_BROADCAST))
2811 		config |= MACB_BIT(NBC);	/* No BroadCast */
2812 	config |= macb_dbw(bp);
2813 	macb_writel(bp, NCFGR, config);
2814 	if ((bp->caps & MACB_CAPS_JUMBO) && bp->jumbo_max_len)
2815 		gem_writel(bp, JML, bp->jumbo_max_len);
2816 	bp->rx_frm_len_mask = MACB_RX_FRMLEN_MASK;
2817 	if (bp->caps & MACB_CAPS_JUMBO)
2818 		bp->rx_frm_len_mask = MACB_RX_JFRMLEN_MASK;
2819 
2820 	macb_configure_dma(bp);
2821 
2822 	/* Enable RX partial store and forward and set watermark */
2823 	if (bp->rx_watermark)
2824 		gem_writel(bp, PBUFRXCUT, (bp->rx_watermark | GEM_BIT(ENCUTTHRU)));
2825 }
2826 
2827 /* The hash address register is 64 bits long and takes up two
2828  * locations in the memory map.  The least significant bits are stored
2829  * in EMAC_HSL and the most significant bits in EMAC_HSH.
2830  *
2831  * The unicast hash enable and the multicast hash enable bits in the
2832  * network configuration register enable the reception of hash matched
2833  * frames. The destination address is reduced to a 6 bit index into
2834  * the 64 bit hash register using the following hash function.  The
2835  * hash function is an exclusive or of every sixth bit of the
2836  * destination address.
2837  *
2838  * hi[5] = da[5] ^ da[11] ^ da[17] ^ da[23] ^ da[29] ^ da[35] ^ da[41] ^ da[47]
2839  * hi[4] = da[4] ^ da[10] ^ da[16] ^ da[22] ^ da[28] ^ da[34] ^ da[40] ^ da[46]
2840  * hi[3] = da[3] ^ da[09] ^ da[15] ^ da[21] ^ da[27] ^ da[33] ^ da[39] ^ da[45]
2841  * hi[2] = da[2] ^ da[08] ^ da[14] ^ da[20] ^ da[26] ^ da[32] ^ da[38] ^ da[44]
2842  * hi[1] = da[1] ^ da[07] ^ da[13] ^ da[19] ^ da[25] ^ da[31] ^ da[37] ^ da[43]
2843  * hi[0] = da[0] ^ da[06] ^ da[12] ^ da[18] ^ da[24] ^ da[30] ^ da[36] ^ da[42]
2844  *
2845  * da[0] represents the least significant bit of the first byte
2846  * received, that is, the multicast/unicast indicator, and da[47]
2847  * represents the most significant bit of the last byte received.  If
2848  * the hash index, hi[n], points to a bit that is set in the hash
2849  * register then the frame will be matched according to whether the
2850  * frame is multicast or unicast.  A multicast match will be signalled
2851  * if the multicast hash enable bit is set, da[0] is 1 and the hash
2852  * index points to a bit set in the hash register.  A unicast match
2853  * will be signalled if the unicast hash enable bit is set, da[0] is 0
2854  * and the hash index points to a bit set in the hash register.  To
2855  * receive all multicast frames, the hash register should be set with
2856  * all ones and the multicast hash enable bit should be set in the
2857  * network configuration register.
2858  */
2859 
2860 static inline int hash_bit_value(int bitnr, __u8 *addr)
2861 {
2862 	if (addr[bitnr / 8] & (1 << (bitnr % 8)))
2863 		return 1;
2864 	return 0;
2865 }
2866 
2867 /* Return the hash index value for the specified address. */
2868 static int hash_get_index(__u8 *addr)
2869 {
2870 	int i, j, bitval;
2871 	int hash_index = 0;
2872 
2873 	for (j = 0; j < 6; j++) {
2874 		for (i = 0, bitval = 0; i < 8; i++)
2875 			bitval ^= hash_bit_value(i * 6 + j, addr);
2876 
2877 		hash_index |= (bitval << j);
2878 	}
2879 
2880 	return hash_index;
2881 }
2882 
2883 /* Add multicast addresses to the internal multicast-hash table. */
2884 static void macb_sethashtable(struct net_device *dev)
2885 {
2886 	struct netdev_hw_addr *ha;
2887 	unsigned long mc_filter[2];
2888 	unsigned int bitnr;
2889 	struct macb *bp = netdev_priv(dev);
2890 
2891 	mc_filter[0] = 0;
2892 	mc_filter[1] = 0;
2893 
2894 	netdev_for_each_mc_addr(ha, dev) {
2895 		bitnr = hash_get_index(ha->addr);
2896 		mc_filter[bitnr >> 5] |= 1 << (bitnr & 31);
2897 	}
2898 
2899 	macb_or_gem_writel(bp, HRB, mc_filter[0]);
2900 	macb_or_gem_writel(bp, HRT, mc_filter[1]);
2901 }
2902 
2903 /* Enable/Disable promiscuous and multicast modes. */
2904 static void macb_set_rx_mode(struct net_device *dev)
2905 {
2906 	unsigned long cfg;
2907 	struct macb *bp = netdev_priv(dev);
2908 
2909 	cfg = macb_readl(bp, NCFGR);
2910 
2911 	if (dev->flags & IFF_PROMISC) {
2912 		/* Enable promiscuous mode */
2913 		cfg |= MACB_BIT(CAF);
2914 
2915 		/* Disable RX checksum offload */
2916 		if (macb_is_gem(bp))
2917 			cfg &= ~GEM_BIT(RXCOEN);
2918 	} else {
2919 		/* Disable promiscuous mode */
2920 		cfg &= ~MACB_BIT(CAF);
2921 
2922 		/* Enable RX checksum offload only if requested */
2923 		if (macb_is_gem(bp) && dev->features & NETIF_F_RXCSUM)
2924 			cfg |= GEM_BIT(RXCOEN);
2925 	}
2926 
2927 	if (dev->flags & IFF_ALLMULTI) {
2928 		/* Enable all multicast mode */
2929 		macb_or_gem_writel(bp, HRB, -1);
2930 		macb_or_gem_writel(bp, HRT, -1);
2931 		cfg |= MACB_BIT(NCFGR_MTI);
2932 	} else if (!netdev_mc_empty(dev)) {
2933 		/* Enable specific multicasts */
2934 		macb_sethashtable(dev);
2935 		cfg |= MACB_BIT(NCFGR_MTI);
2936 	} else if (dev->flags & (~IFF_ALLMULTI)) {
2937 		/* Disable all multicast mode */
2938 		macb_or_gem_writel(bp, HRB, 0);
2939 		macb_or_gem_writel(bp, HRT, 0);
2940 		cfg &= ~MACB_BIT(NCFGR_MTI);
2941 	}
2942 
2943 	macb_writel(bp, NCFGR, cfg);
2944 }
2945 
2946 static int macb_open(struct net_device *dev)
2947 {
2948 	size_t bufsz = dev->mtu + ETH_HLEN + ETH_FCS_LEN + NET_IP_ALIGN;
2949 	struct macb *bp = netdev_priv(dev);
2950 	struct macb_queue *queue;
2951 	unsigned int q;
2952 	int err;
2953 
2954 	netdev_dbg(bp->dev, "open\n");
2955 
2956 	err = pm_runtime_resume_and_get(&bp->pdev->dev);
2957 	if (err < 0)
2958 		return err;
2959 
2960 	/* RX buffers initialization */
2961 	macb_init_rx_buffer_size(bp, bufsz);
2962 
2963 	err = macb_alloc_consistent(bp);
2964 	if (err) {
2965 		netdev_err(dev, "Unable to allocate DMA memory (error %d)\n",
2966 			   err);
2967 		goto pm_exit;
2968 	}
2969 
2970 	bp->macbgem_ops.mog_init_rings(bp);
2971 	macb_init_buffers(bp);
2972 
2973 	for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
2974 		napi_enable(&queue->napi_rx);
2975 		napi_enable(&queue->napi_tx);
2976 	}
2977 
2978 	macb_init_hw(bp);
2979 
2980 	err = phy_set_mode_ext(bp->phy, PHY_MODE_ETHERNET, bp->phy_interface);
2981 	if (err)
2982 		goto reset_hw;
2983 
2984 	err = phy_power_on(bp->phy);
2985 	if (err)
2986 		goto reset_hw;
2987 
2988 	err = macb_phylink_connect(bp);
2989 	if (err)
2990 		goto phy_off;
2991 
2992 	netif_tx_start_all_queues(dev);
2993 
2994 	if (bp->ptp_info)
2995 		bp->ptp_info->ptp_init(dev);
2996 
2997 	return 0;
2998 
2999 phy_off:
3000 	phy_power_off(bp->phy);
3001 
3002 reset_hw:
3003 	macb_reset_hw(bp);
3004 	for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
3005 		napi_disable(&queue->napi_rx);
3006 		napi_disable(&queue->napi_tx);
3007 	}
3008 	macb_free_consistent(bp);
3009 pm_exit:
3010 	pm_runtime_put_sync(&bp->pdev->dev);
3011 	return err;
3012 }
3013 
3014 static int macb_close(struct net_device *dev)
3015 {
3016 	struct macb *bp = netdev_priv(dev);
3017 	struct macb_queue *queue;
3018 	unsigned long flags;
3019 	unsigned int q;
3020 
3021 	netif_tx_stop_all_queues(dev);
3022 
3023 	for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
3024 		napi_disable(&queue->napi_rx);
3025 		napi_disable(&queue->napi_tx);
3026 		netdev_tx_reset_queue(netdev_get_tx_queue(dev, q));
3027 	}
3028 
3029 	phylink_stop(bp->phylink);
3030 	phylink_disconnect_phy(bp->phylink);
3031 
3032 	phy_power_off(bp->phy);
3033 
3034 	spin_lock_irqsave(&bp->lock, flags);
3035 	macb_reset_hw(bp);
3036 	netif_carrier_off(dev);
3037 	spin_unlock_irqrestore(&bp->lock, flags);
3038 
3039 	macb_free_consistent(bp);
3040 
3041 	if (bp->ptp_info)
3042 		bp->ptp_info->ptp_remove(dev);
3043 
3044 	pm_runtime_put(&bp->pdev->dev);
3045 
3046 	return 0;
3047 }
3048 
3049 static int macb_change_mtu(struct net_device *dev, int new_mtu)
3050 {
3051 	if (netif_running(dev))
3052 		return -EBUSY;
3053 
3054 	WRITE_ONCE(dev->mtu, new_mtu);
3055 
3056 	return 0;
3057 }
3058 
3059 static int macb_set_mac_addr(struct net_device *dev, void *addr)
3060 {
3061 	int err;
3062 
3063 	err = eth_mac_addr(dev, addr);
3064 	if (err < 0)
3065 		return err;
3066 
3067 	macb_set_hwaddr(netdev_priv(dev));
3068 	return 0;
3069 }
3070 
3071 static void gem_update_stats(struct macb *bp)
3072 {
3073 	struct macb_queue *queue;
3074 	unsigned int i, q, idx;
3075 	unsigned long *stat;
3076 
3077 	u64 *p = &bp->hw_stats.gem.tx_octets;
3078 
3079 	for (i = 0; i < GEM_STATS_LEN; ++i, ++p) {
3080 		u32 offset = gem_statistics[i].offset;
3081 		u64 val = bp->macb_reg_readl(bp, offset);
3082 
3083 		bp->ethtool_stats[i] += val;
3084 		*p += val;
3085 
3086 		if (offset == GEM_OCTTXL || offset == GEM_OCTRXL) {
3087 			/* Add GEM_OCTTXH, GEM_OCTRXH */
3088 			val = bp->macb_reg_readl(bp, offset + 4);
3089 			bp->ethtool_stats[i] += ((u64)val) << 32;
3090 			*p += ((u64)val) << 32;
3091 		}
3092 	}
3093 
3094 	idx = GEM_STATS_LEN;
3095 	for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue)
3096 		for (i = 0, stat = &queue->stats.first; i < QUEUE_STATS_LEN; ++i, ++stat)
3097 			bp->ethtool_stats[idx++] = *stat;
3098 }
3099 
3100 static void gem_get_stats(struct macb *bp, struct rtnl_link_stats64 *nstat)
3101 {
3102 	struct gem_stats *hwstat = &bp->hw_stats.gem;
3103 
3104 	spin_lock_irq(&bp->stats_lock);
3105 	if (netif_running(bp->dev))
3106 		gem_update_stats(bp);
3107 
3108 	nstat->rx_errors = (hwstat->rx_frame_check_sequence_errors +
3109 			    hwstat->rx_alignment_errors +
3110 			    hwstat->rx_resource_errors +
3111 			    hwstat->rx_overruns +
3112 			    hwstat->rx_oversize_frames +
3113 			    hwstat->rx_jabbers +
3114 			    hwstat->rx_undersized_frames +
3115 			    hwstat->rx_length_field_frame_errors);
3116 	nstat->tx_errors = (hwstat->tx_late_collisions +
3117 			    hwstat->tx_excessive_collisions +
3118 			    hwstat->tx_underrun +
3119 			    hwstat->tx_carrier_sense_errors);
3120 	nstat->multicast = hwstat->rx_multicast_frames;
3121 	nstat->collisions = (hwstat->tx_single_collision_frames +
3122 			     hwstat->tx_multiple_collision_frames +
3123 			     hwstat->tx_excessive_collisions);
3124 	nstat->rx_length_errors = (hwstat->rx_oversize_frames +
3125 				   hwstat->rx_jabbers +
3126 				   hwstat->rx_undersized_frames +
3127 				   hwstat->rx_length_field_frame_errors);
3128 	nstat->rx_over_errors = hwstat->rx_resource_errors;
3129 	nstat->rx_crc_errors = hwstat->rx_frame_check_sequence_errors;
3130 	nstat->rx_frame_errors = hwstat->rx_alignment_errors;
3131 	nstat->rx_fifo_errors = hwstat->rx_overruns;
3132 	nstat->tx_aborted_errors = hwstat->tx_excessive_collisions;
3133 	nstat->tx_carrier_errors = hwstat->tx_carrier_sense_errors;
3134 	nstat->tx_fifo_errors = hwstat->tx_underrun;
3135 	spin_unlock_irq(&bp->stats_lock);
3136 }
3137 
3138 static void gem_get_ethtool_stats(struct net_device *dev,
3139 				  struct ethtool_stats *stats, u64 *data)
3140 {
3141 	struct macb *bp = netdev_priv(dev);
3142 
3143 	spin_lock_irq(&bp->stats_lock);
3144 	gem_update_stats(bp);
3145 	memcpy(data, &bp->ethtool_stats, sizeof(u64)
3146 			* (GEM_STATS_LEN + QUEUE_STATS_LEN * MACB_MAX_QUEUES));
3147 	spin_unlock_irq(&bp->stats_lock);
3148 }
3149 
3150 static int gem_get_sset_count(struct net_device *dev, int sset)
3151 {
3152 	struct macb *bp = netdev_priv(dev);
3153 
3154 	switch (sset) {
3155 	case ETH_SS_STATS:
3156 		return GEM_STATS_LEN + bp->num_queues * QUEUE_STATS_LEN;
3157 	default:
3158 		return -EOPNOTSUPP;
3159 	}
3160 }
3161 
3162 static void gem_get_ethtool_strings(struct net_device *dev, u32 sset, u8 *p)
3163 {
3164 	char stat_string[ETH_GSTRING_LEN];
3165 	struct macb *bp = netdev_priv(dev);
3166 	struct macb_queue *queue;
3167 	unsigned int i;
3168 	unsigned int q;
3169 
3170 	switch (sset) {
3171 	case ETH_SS_STATS:
3172 		for (i = 0; i < GEM_STATS_LEN; i++, p += ETH_GSTRING_LEN)
3173 			memcpy(p, gem_statistics[i].stat_string,
3174 			       ETH_GSTRING_LEN);
3175 
3176 		for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
3177 			for (i = 0; i < QUEUE_STATS_LEN; i++, p += ETH_GSTRING_LEN) {
3178 				snprintf(stat_string, ETH_GSTRING_LEN, "q%d_%s",
3179 						q, queue_statistics[i].stat_string);
3180 				memcpy(p, stat_string, ETH_GSTRING_LEN);
3181 			}
3182 		}
3183 		break;
3184 	}
3185 }
3186 
3187 static void macb_get_stats(struct net_device *dev,
3188 			   struct rtnl_link_stats64 *nstat)
3189 {
3190 	struct macb *bp = netdev_priv(dev);
3191 	struct macb_stats *hwstat = &bp->hw_stats.macb;
3192 
3193 	netdev_stats_to_stats64(nstat, &bp->dev->stats);
3194 	if (macb_is_gem(bp)) {
3195 		gem_get_stats(bp, nstat);
3196 		return;
3197 	}
3198 
3199 	/* read stats from hardware */
3200 	spin_lock_irq(&bp->stats_lock);
3201 	macb_update_stats(bp);
3202 
3203 	/* Convert HW stats into netdevice stats */
3204 	nstat->rx_errors = (hwstat->rx_fcs_errors +
3205 			    hwstat->rx_align_errors +
3206 			    hwstat->rx_resource_errors +
3207 			    hwstat->rx_overruns +
3208 			    hwstat->rx_oversize_pkts +
3209 			    hwstat->rx_jabbers +
3210 			    hwstat->rx_undersize_pkts +
3211 			    hwstat->rx_length_mismatch);
3212 	nstat->tx_errors = (hwstat->tx_late_cols +
3213 			    hwstat->tx_excessive_cols +
3214 			    hwstat->tx_underruns +
3215 			    hwstat->tx_carrier_errors +
3216 			    hwstat->sqe_test_errors);
3217 	nstat->collisions = (hwstat->tx_single_cols +
3218 			     hwstat->tx_multiple_cols +
3219 			     hwstat->tx_excessive_cols);
3220 	nstat->rx_length_errors = (hwstat->rx_oversize_pkts +
3221 				   hwstat->rx_jabbers +
3222 				   hwstat->rx_undersize_pkts +
3223 				   hwstat->rx_length_mismatch);
3224 	nstat->rx_over_errors = hwstat->rx_resource_errors +
3225 				   hwstat->rx_overruns;
3226 	nstat->rx_crc_errors = hwstat->rx_fcs_errors;
3227 	nstat->rx_frame_errors = hwstat->rx_align_errors;
3228 	nstat->rx_fifo_errors = hwstat->rx_overruns;
3229 	/* XXX: What does "missed" mean? */
3230 	nstat->tx_aborted_errors = hwstat->tx_excessive_cols;
3231 	nstat->tx_carrier_errors = hwstat->tx_carrier_errors;
3232 	nstat->tx_fifo_errors = hwstat->tx_underruns;
3233 	/* Don't know about heartbeat or window errors... */
3234 	spin_unlock_irq(&bp->stats_lock);
3235 }
3236 
3237 static void macb_get_pause_stats(struct net_device *dev,
3238 				 struct ethtool_pause_stats *pause_stats)
3239 {
3240 	struct macb *bp = netdev_priv(dev);
3241 	struct macb_stats *hwstat = &bp->hw_stats.macb;
3242 
3243 	spin_lock_irq(&bp->stats_lock);
3244 	macb_update_stats(bp);
3245 	pause_stats->tx_pause_frames = hwstat->tx_pause_frames;
3246 	pause_stats->rx_pause_frames = hwstat->rx_pause_frames;
3247 	spin_unlock_irq(&bp->stats_lock);
3248 }
3249 
3250 static void gem_get_pause_stats(struct net_device *dev,
3251 				struct ethtool_pause_stats *pause_stats)
3252 {
3253 	struct macb *bp = netdev_priv(dev);
3254 	struct gem_stats *hwstat = &bp->hw_stats.gem;
3255 
3256 	spin_lock_irq(&bp->stats_lock);
3257 	gem_update_stats(bp);
3258 	pause_stats->tx_pause_frames = hwstat->tx_pause_frames;
3259 	pause_stats->rx_pause_frames = hwstat->rx_pause_frames;
3260 	spin_unlock_irq(&bp->stats_lock);
3261 }
3262 
3263 static void macb_get_eth_mac_stats(struct net_device *dev,
3264 				   struct ethtool_eth_mac_stats *mac_stats)
3265 {
3266 	struct macb *bp = netdev_priv(dev);
3267 	struct macb_stats *hwstat = &bp->hw_stats.macb;
3268 
3269 	spin_lock_irq(&bp->stats_lock);
3270 	macb_update_stats(bp);
3271 	mac_stats->FramesTransmittedOK = hwstat->tx_ok;
3272 	mac_stats->SingleCollisionFrames = hwstat->tx_single_cols;
3273 	mac_stats->MultipleCollisionFrames = hwstat->tx_multiple_cols;
3274 	mac_stats->FramesReceivedOK = hwstat->rx_ok;
3275 	mac_stats->FrameCheckSequenceErrors = hwstat->rx_fcs_errors;
3276 	mac_stats->AlignmentErrors = hwstat->rx_align_errors;
3277 	mac_stats->FramesWithDeferredXmissions = hwstat->tx_deferred;
3278 	mac_stats->LateCollisions = hwstat->tx_late_cols;
3279 	mac_stats->FramesAbortedDueToXSColls = hwstat->tx_excessive_cols;
3280 	mac_stats->FramesLostDueToIntMACXmitError = hwstat->tx_underruns;
3281 	mac_stats->CarrierSenseErrors = hwstat->tx_carrier_errors;
3282 	mac_stats->FramesLostDueToIntMACRcvError = hwstat->rx_overruns;
3283 	mac_stats->InRangeLengthErrors = hwstat->rx_length_mismatch;
3284 	mac_stats->FrameTooLongErrors = hwstat->rx_oversize_pkts;
3285 	spin_unlock_irq(&bp->stats_lock);
3286 }
3287 
3288 static void gem_get_eth_mac_stats(struct net_device *dev,
3289 				  struct ethtool_eth_mac_stats *mac_stats)
3290 {
3291 	struct macb *bp = netdev_priv(dev);
3292 	struct gem_stats *hwstat = &bp->hw_stats.gem;
3293 
3294 	spin_lock_irq(&bp->stats_lock);
3295 	gem_update_stats(bp);
3296 	mac_stats->FramesTransmittedOK = hwstat->tx_frames;
3297 	mac_stats->SingleCollisionFrames = hwstat->tx_single_collision_frames;
3298 	mac_stats->MultipleCollisionFrames =
3299 		hwstat->tx_multiple_collision_frames;
3300 	mac_stats->FramesReceivedOK = hwstat->rx_frames;
3301 	mac_stats->FrameCheckSequenceErrors =
3302 		hwstat->rx_frame_check_sequence_errors;
3303 	mac_stats->AlignmentErrors = hwstat->rx_alignment_errors;
3304 	mac_stats->OctetsTransmittedOK = hwstat->tx_octets;
3305 	mac_stats->FramesWithDeferredXmissions = hwstat->tx_deferred_frames;
3306 	mac_stats->LateCollisions = hwstat->tx_late_collisions;
3307 	mac_stats->FramesAbortedDueToXSColls = hwstat->tx_excessive_collisions;
3308 	mac_stats->FramesLostDueToIntMACXmitError = hwstat->tx_underrun;
3309 	mac_stats->CarrierSenseErrors = hwstat->tx_carrier_sense_errors;
3310 	mac_stats->OctetsReceivedOK = hwstat->rx_octets;
3311 	mac_stats->MulticastFramesXmittedOK = hwstat->tx_multicast_frames;
3312 	mac_stats->BroadcastFramesXmittedOK = hwstat->tx_broadcast_frames;
3313 	mac_stats->MulticastFramesReceivedOK = hwstat->rx_multicast_frames;
3314 	mac_stats->BroadcastFramesReceivedOK = hwstat->rx_broadcast_frames;
3315 	mac_stats->InRangeLengthErrors = hwstat->rx_length_field_frame_errors;
3316 	mac_stats->FrameTooLongErrors = hwstat->rx_oversize_frames;
3317 	spin_unlock_irq(&bp->stats_lock);
3318 }
3319 
3320 /* TODO: Report SQE test errors when added to phy_stats */
3321 static void macb_get_eth_phy_stats(struct net_device *dev,
3322 				   struct ethtool_eth_phy_stats *phy_stats)
3323 {
3324 	struct macb *bp = netdev_priv(dev);
3325 	struct macb_stats *hwstat = &bp->hw_stats.macb;
3326 
3327 	spin_lock_irq(&bp->stats_lock);
3328 	macb_update_stats(bp);
3329 	phy_stats->SymbolErrorDuringCarrier = hwstat->rx_symbol_errors;
3330 	spin_unlock_irq(&bp->stats_lock);
3331 }
3332 
3333 static void gem_get_eth_phy_stats(struct net_device *dev,
3334 				  struct ethtool_eth_phy_stats *phy_stats)
3335 {
3336 	struct macb *bp = netdev_priv(dev);
3337 	struct gem_stats *hwstat = &bp->hw_stats.gem;
3338 
3339 	spin_lock_irq(&bp->stats_lock);
3340 	gem_update_stats(bp);
3341 	phy_stats->SymbolErrorDuringCarrier = hwstat->rx_symbol_errors;
3342 	spin_unlock_irq(&bp->stats_lock);
3343 }
3344 
3345 static void macb_get_rmon_stats(struct net_device *dev,
3346 				struct ethtool_rmon_stats *rmon_stats,
3347 				const struct ethtool_rmon_hist_range **ranges)
3348 {
3349 	struct macb *bp = netdev_priv(dev);
3350 	struct macb_stats *hwstat = &bp->hw_stats.macb;
3351 
3352 	spin_lock_irq(&bp->stats_lock);
3353 	macb_update_stats(bp);
3354 	rmon_stats->undersize_pkts = hwstat->rx_undersize_pkts;
3355 	rmon_stats->oversize_pkts = hwstat->rx_oversize_pkts;
3356 	rmon_stats->jabbers = hwstat->rx_jabbers;
3357 	spin_unlock_irq(&bp->stats_lock);
3358 }
3359 
3360 static const struct ethtool_rmon_hist_range gem_rmon_ranges[] = {
3361 	{   64,    64 },
3362 	{   65,   127 },
3363 	{  128,   255 },
3364 	{  256,   511 },
3365 	{  512,  1023 },
3366 	{ 1024,  1518 },
3367 	{ 1519, 16384 },
3368 	{ },
3369 };
3370 
3371 static void gem_get_rmon_stats(struct net_device *dev,
3372 			       struct ethtool_rmon_stats *rmon_stats,
3373 			       const struct ethtool_rmon_hist_range **ranges)
3374 {
3375 	struct macb *bp = netdev_priv(dev);
3376 	struct gem_stats *hwstat = &bp->hw_stats.gem;
3377 
3378 	spin_lock_irq(&bp->stats_lock);
3379 	gem_update_stats(bp);
3380 	rmon_stats->undersize_pkts = hwstat->rx_undersized_frames;
3381 	rmon_stats->oversize_pkts = hwstat->rx_oversize_frames;
3382 	rmon_stats->jabbers = hwstat->rx_jabbers;
3383 	rmon_stats->hist[0] = hwstat->rx_64_byte_frames;
3384 	rmon_stats->hist[1] = hwstat->rx_65_127_byte_frames;
3385 	rmon_stats->hist[2] = hwstat->rx_128_255_byte_frames;
3386 	rmon_stats->hist[3] = hwstat->rx_256_511_byte_frames;
3387 	rmon_stats->hist[4] = hwstat->rx_512_1023_byte_frames;
3388 	rmon_stats->hist[5] = hwstat->rx_1024_1518_byte_frames;
3389 	rmon_stats->hist[6] = hwstat->rx_greater_than_1518_byte_frames;
3390 	rmon_stats->hist_tx[0] = hwstat->tx_64_byte_frames;
3391 	rmon_stats->hist_tx[1] = hwstat->tx_65_127_byte_frames;
3392 	rmon_stats->hist_tx[2] = hwstat->tx_128_255_byte_frames;
3393 	rmon_stats->hist_tx[3] = hwstat->tx_256_511_byte_frames;
3394 	rmon_stats->hist_tx[4] = hwstat->tx_512_1023_byte_frames;
3395 	rmon_stats->hist_tx[5] = hwstat->tx_1024_1518_byte_frames;
3396 	rmon_stats->hist_tx[6] = hwstat->tx_greater_than_1518_byte_frames;
3397 	spin_unlock_irq(&bp->stats_lock);
3398 	*ranges = gem_rmon_ranges;
3399 }
3400 
3401 static int macb_get_regs_len(struct net_device *netdev)
3402 {
3403 	return MACB_GREGS_NBR * sizeof(u32);
3404 }
3405 
3406 static void macb_get_regs(struct net_device *dev, struct ethtool_regs *regs,
3407 			  void *p)
3408 {
3409 	struct macb *bp = netdev_priv(dev);
3410 	unsigned int tail, head;
3411 	u32 *regs_buff = p;
3412 
3413 	regs->version = (macb_readl(bp, MID) & ((1 << MACB_REV_SIZE) - 1))
3414 			| MACB_GREGS_VERSION;
3415 
3416 	tail = macb_tx_ring_wrap(bp, bp->queues[0].tx_tail);
3417 	head = macb_tx_ring_wrap(bp, bp->queues[0].tx_head);
3418 
3419 	regs_buff[0]  = macb_readl(bp, NCR);
3420 	regs_buff[1]  = macb_or_gem_readl(bp, NCFGR);
3421 	regs_buff[2]  = macb_readl(bp, NSR);
3422 	regs_buff[3]  = macb_readl(bp, TSR);
3423 	regs_buff[4]  = macb_readl(bp, RBQP);
3424 	regs_buff[5]  = macb_readl(bp, TBQP);
3425 	regs_buff[6]  = macb_readl(bp, RSR);
3426 	regs_buff[7]  = macb_readl(bp, IMR);
3427 
3428 	regs_buff[8]  = tail;
3429 	regs_buff[9]  = head;
3430 	regs_buff[10] = macb_tx_dma(&bp->queues[0], tail);
3431 	regs_buff[11] = macb_tx_dma(&bp->queues[0], head);
3432 
3433 	if (!(bp->caps & MACB_CAPS_USRIO_DISABLED))
3434 		regs_buff[12] = macb_or_gem_readl(bp, USRIO);
3435 	if (macb_is_gem(bp))
3436 		regs_buff[13] = gem_readl(bp, DMACFG);
3437 }
3438 
3439 static void macb_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
3440 {
3441 	struct macb *bp = netdev_priv(netdev);
3442 
3443 	phylink_ethtool_get_wol(bp->phylink, wol);
3444 	wol->supported |= (WAKE_MAGIC | WAKE_ARP);
3445 
3446 	/* Add macb wolopts to phy wolopts */
3447 	wol->wolopts |= bp->wolopts;
3448 }
3449 
3450 static int macb_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
3451 {
3452 	struct macb *bp = netdev_priv(netdev);
3453 	int ret;
3454 
3455 	/* Pass the order to phylink layer */
3456 	ret = phylink_ethtool_set_wol(bp->phylink, wol);
3457 	/* Don't manage WoL on MAC, if PHY set_wol() fails */
3458 	if (ret && ret != -EOPNOTSUPP)
3459 		return ret;
3460 
3461 	bp->wolopts = (wol->wolopts & WAKE_MAGIC) ? WAKE_MAGIC : 0;
3462 	bp->wolopts |= (wol->wolopts & WAKE_ARP) ? WAKE_ARP : 0;
3463 	bp->wol = (wol->wolopts) ? MACB_WOL_ENABLED : 0;
3464 
3465 	device_set_wakeup_enable(&bp->pdev->dev, bp->wol);
3466 
3467 	return 0;
3468 }
3469 
3470 static int macb_get_link_ksettings(struct net_device *netdev,
3471 				   struct ethtool_link_ksettings *kset)
3472 {
3473 	struct macb *bp = netdev_priv(netdev);
3474 
3475 	return phylink_ethtool_ksettings_get(bp->phylink, kset);
3476 }
3477 
3478 static int macb_set_link_ksettings(struct net_device *netdev,
3479 				   const struct ethtool_link_ksettings *kset)
3480 {
3481 	struct macb *bp = netdev_priv(netdev);
3482 
3483 	return phylink_ethtool_ksettings_set(bp->phylink, kset);
3484 }
3485 
3486 static void macb_get_ringparam(struct net_device *netdev,
3487 			       struct ethtool_ringparam *ring,
3488 			       struct kernel_ethtool_ringparam *kernel_ring,
3489 			       struct netlink_ext_ack *extack)
3490 {
3491 	struct macb *bp = netdev_priv(netdev);
3492 
3493 	ring->rx_max_pending = MAX_RX_RING_SIZE;
3494 	ring->tx_max_pending = MAX_TX_RING_SIZE;
3495 
3496 	ring->rx_pending = bp->rx_ring_size;
3497 	ring->tx_pending = bp->tx_ring_size;
3498 }
3499 
3500 static int macb_set_ringparam(struct net_device *netdev,
3501 			      struct ethtool_ringparam *ring,
3502 			      struct kernel_ethtool_ringparam *kernel_ring,
3503 			      struct netlink_ext_ack *extack)
3504 {
3505 	struct macb *bp = netdev_priv(netdev);
3506 	u32 new_rx_size, new_tx_size;
3507 	unsigned int reset = 0;
3508 
3509 	if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
3510 		return -EINVAL;
3511 
3512 	new_rx_size = clamp_t(u32, ring->rx_pending,
3513 			      MIN_RX_RING_SIZE, MAX_RX_RING_SIZE);
3514 	new_rx_size = roundup_pow_of_two(new_rx_size);
3515 
3516 	new_tx_size = clamp_t(u32, ring->tx_pending,
3517 			      MIN_TX_RING_SIZE, MAX_TX_RING_SIZE);
3518 	new_tx_size = roundup_pow_of_two(new_tx_size);
3519 
3520 	if ((new_tx_size == bp->tx_ring_size) &&
3521 	    (new_rx_size == bp->rx_ring_size)) {
3522 		/* nothing to do */
3523 		return 0;
3524 	}
3525 
3526 	if (netif_running(bp->dev)) {
3527 		reset = 1;
3528 		macb_close(bp->dev);
3529 	}
3530 
3531 	bp->rx_ring_size = new_rx_size;
3532 	bp->tx_ring_size = new_tx_size;
3533 
3534 	if (reset)
3535 		macb_open(bp->dev);
3536 
3537 	return 0;
3538 }
3539 
3540 #ifdef CONFIG_MACB_USE_HWSTAMP
3541 static unsigned int gem_get_tsu_rate(struct macb *bp)
3542 {
3543 	struct clk *tsu_clk;
3544 	unsigned int tsu_rate;
3545 
3546 	tsu_clk = devm_clk_get(&bp->pdev->dev, "tsu_clk");
3547 	if (!IS_ERR(tsu_clk))
3548 		tsu_rate = clk_get_rate(tsu_clk);
3549 	/* try pclk instead */
3550 	else if (!IS_ERR(bp->pclk)) {
3551 		tsu_clk = bp->pclk;
3552 		tsu_rate = clk_get_rate(tsu_clk);
3553 	} else
3554 		return -ENOTSUPP;
3555 	return tsu_rate;
3556 }
3557 
3558 static s32 gem_get_ptp_max_adj(void)
3559 {
3560 	return 64000000;
3561 }
3562 
3563 static int gem_get_ts_info(struct net_device *dev,
3564 			   struct kernel_ethtool_ts_info *info)
3565 {
3566 	struct macb *bp = netdev_priv(dev);
3567 
3568 	if (!macb_dma_ptp(bp)) {
3569 		ethtool_op_get_ts_info(dev, info);
3570 		return 0;
3571 	}
3572 
3573 	info->so_timestamping =
3574 		SOF_TIMESTAMPING_TX_SOFTWARE |
3575 		SOF_TIMESTAMPING_TX_HARDWARE |
3576 		SOF_TIMESTAMPING_RX_HARDWARE |
3577 		SOF_TIMESTAMPING_RAW_HARDWARE;
3578 	info->tx_types =
3579 		(1 << HWTSTAMP_TX_ONESTEP_SYNC) |
3580 		(1 << HWTSTAMP_TX_OFF) |
3581 		(1 << HWTSTAMP_TX_ON);
3582 	info->rx_filters =
3583 		(1 << HWTSTAMP_FILTER_NONE) |
3584 		(1 << HWTSTAMP_FILTER_ALL);
3585 
3586 	if (bp->ptp_clock)
3587 		info->phc_index = ptp_clock_index(bp->ptp_clock);
3588 
3589 	return 0;
3590 }
3591 
3592 static struct macb_ptp_info gem_ptp_info = {
3593 	.ptp_init	 = gem_ptp_init,
3594 	.ptp_remove	 = gem_ptp_remove,
3595 	.get_ptp_max_adj = gem_get_ptp_max_adj,
3596 	.get_tsu_rate	 = gem_get_tsu_rate,
3597 	.get_ts_info	 = gem_get_ts_info,
3598 	.get_hwtst	 = gem_get_hwtst,
3599 	.set_hwtst	 = gem_set_hwtst,
3600 };
3601 #endif
3602 
3603 static int macb_get_ts_info(struct net_device *netdev,
3604 			    struct kernel_ethtool_ts_info *info)
3605 {
3606 	struct macb *bp = netdev_priv(netdev);
3607 
3608 	if (bp->ptp_info)
3609 		return bp->ptp_info->get_ts_info(netdev, info);
3610 
3611 	return ethtool_op_get_ts_info(netdev, info);
3612 }
3613 
3614 static void gem_enable_flow_filters(struct macb *bp, bool enable)
3615 {
3616 	struct net_device *netdev = bp->dev;
3617 	struct ethtool_rx_fs_item *item;
3618 	u32 t2_scr;
3619 	int num_t2_scr;
3620 
3621 	if (!(netdev->features & NETIF_F_NTUPLE))
3622 		return;
3623 
3624 	num_t2_scr = GEM_BFEXT(T2SCR, gem_readl(bp, DCFG8));
3625 
3626 	list_for_each_entry(item, &bp->rx_fs_list.list, list) {
3627 		struct ethtool_rx_flow_spec *fs = &item->fs;
3628 		struct ethtool_tcpip4_spec *tp4sp_m;
3629 
3630 		if (fs->location >= num_t2_scr)
3631 			continue;
3632 
3633 		t2_scr = gem_readl_n(bp, SCRT2, fs->location);
3634 
3635 		/* enable/disable screener regs for the flow entry */
3636 		t2_scr = GEM_BFINS(ETHTEN, enable, t2_scr);
3637 
3638 		/* only enable fields with no masking */
3639 		tp4sp_m = &(fs->m_u.tcp_ip4_spec);
3640 
3641 		if (enable && (tp4sp_m->ip4src == 0xFFFFFFFF))
3642 			t2_scr = GEM_BFINS(CMPAEN, 1, t2_scr);
3643 		else
3644 			t2_scr = GEM_BFINS(CMPAEN, 0, t2_scr);
3645 
3646 		if (enable && (tp4sp_m->ip4dst == 0xFFFFFFFF))
3647 			t2_scr = GEM_BFINS(CMPBEN, 1, t2_scr);
3648 		else
3649 			t2_scr = GEM_BFINS(CMPBEN, 0, t2_scr);
3650 
3651 		if (enable && ((tp4sp_m->psrc == 0xFFFF) || (tp4sp_m->pdst == 0xFFFF)))
3652 			t2_scr = GEM_BFINS(CMPCEN, 1, t2_scr);
3653 		else
3654 			t2_scr = GEM_BFINS(CMPCEN, 0, t2_scr);
3655 
3656 		gem_writel_n(bp, SCRT2, fs->location, t2_scr);
3657 	}
3658 }
3659 
3660 static void gem_prog_cmp_regs(struct macb *bp, struct ethtool_rx_flow_spec *fs)
3661 {
3662 	struct ethtool_tcpip4_spec *tp4sp_v, *tp4sp_m;
3663 	uint16_t index = fs->location;
3664 	u32 w0, w1, t2_scr;
3665 	bool cmp_a = false;
3666 	bool cmp_b = false;
3667 	bool cmp_c = false;
3668 
3669 	if (!macb_is_gem(bp))
3670 		return;
3671 
3672 	tp4sp_v = &(fs->h_u.tcp_ip4_spec);
3673 	tp4sp_m = &(fs->m_u.tcp_ip4_spec);
3674 
3675 	/* ignore field if any masking set */
3676 	if (tp4sp_m->ip4src == 0xFFFFFFFF) {
3677 		/* 1st compare reg - IP source address */
3678 		w0 = 0;
3679 		w1 = 0;
3680 		w0 = tp4sp_v->ip4src;
3681 		w1 = GEM_BFINS(T2DISMSK, 1, w1); /* 32-bit compare */
3682 		w1 = GEM_BFINS(T2CMPOFST, GEM_T2COMPOFST_ETYPE, w1);
3683 		w1 = GEM_BFINS(T2OFST, ETYPE_SRCIP_OFFSET, w1);
3684 		gem_writel_n(bp, T2CMPW0, T2CMP_OFST(GEM_IP4SRC_CMP(index)), w0);
3685 		gem_writel_n(bp, T2CMPW1, T2CMP_OFST(GEM_IP4SRC_CMP(index)), w1);
3686 		cmp_a = true;
3687 	}
3688 
3689 	/* ignore field if any masking set */
3690 	if (tp4sp_m->ip4dst == 0xFFFFFFFF) {
3691 		/* 2nd compare reg - IP destination address */
3692 		w0 = 0;
3693 		w1 = 0;
3694 		w0 = tp4sp_v->ip4dst;
3695 		w1 = GEM_BFINS(T2DISMSK, 1, w1); /* 32-bit compare */
3696 		w1 = GEM_BFINS(T2CMPOFST, GEM_T2COMPOFST_ETYPE, w1);
3697 		w1 = GEM_BFINS(T2OFST, ETYPE_DSTIP_OFFSET, w1);
3698 		gem_writel_n(bp, T2CMPW0, T2CMP_OFST(GEM_IP4DST_CMP(index)), w0);
3699 		gem_writel_n(bp, T2CMPW1, T2CMP_OFST(GEM_IP4DST_CMP(index)), w1);
3700 		cmp_b = true;
3701 	}
3702 
3703 	/* ignore both port fields if masking set in both */
3704 	if ((tp4sp_m->psrc == 0xFFFF) || (tp4sp_m->pdst == 0xFFFF)) {
3705 		/* 3rd compare reg - source port, destination port */
3706 		w0 = 0;
3707 		w1 = 0;
3708 		w1 = GEM_BFINS(T2CMPOFST, GEM_T2COMPOFST_IPHDR, w1);
3709 		if (tp4sp_m->psrc == tp4sp_m->pdst) {
3710 			w0 = GEM_BFINS(T2MASK, tp4sp_v->psrc, w0);
3711 			w0 = GEM_BFINS(T2CMP, tp4sp_v->pdst, w0);
3712 			w1 = GEM_BFINS(T2DISMSK, 1, w1); /* 32-bit compare */
3713 			w1 = GEM_BFINS(T2OFST, IPHDR_SRCPORT_OFFSET, w1);
3714 		} else {
3715 			/* only one port definition */
3716 			w1 = GEM_BFINS(T2DISMSK, 0, w1); /* 16-bit compare */
3717 			w0 = GEM_BFINS(T2MASK, 0xFFFF, w0);
3718 			if (tp4sp_m->psrc == 0xFFFF) { /* src port */
3719 				w0 = GEM_BFINS(T2CMP, tp4sp_v->psrc, w0);
3720 				w1 = GEM_BFINS(T2OFST, IPHDR_SRCPORT_OFFSET, w1);
3721 			} else { /* dst port */
3722 				w0 = GEM_BFINS(T2CMP, tp4sp_v->pdst, w0);
3723 				w1 = GEM_BFINS(T2OFST, IPHDR_DSTPORT_OFFSET, w1);
3724 			}
3725 		}
3726 		gem_writel_n(bp, T2CMPW0, T2CMP_OFST(GEM_PORT_CMP(index)), w0);
3727 		gem_writel_n(bp, T2CMPW1, T2CMP_OFST(GEM_PORT_CMP(index)), w1);
3728 		cmp_c = true;
3729 	}
3730 
3731 	t2_scr = 0;
3732 	t2_scr = GEM_BFINS(QUEUE, (fs->ring_cookie) & 0xFF, t2_scr);
3733 	t2_scr = GEM_BFINS(ETHT2IDX, SCRT2_ETHT, t2_scr);
3734 	if (cmp_a)
3735 		t2_scr = GEM_BFINS(CMPA, GEM_IP4SRC_CMP(index), t2_scr);
3736 	if (cmp_b)
3737 		t2_scr = GEM_BFINS(CMPB, GEM_IP4DST_CMP(index), t2_scr);
3738 	if (cmp_c)
3739 		t2_scr = GEM_BFINS(CMPC, GEM_PORT_CMP(index), t2_scr);
3740 	gem_writel_n(bp, SCRT2, index, t2_scr);
3741 }
3742 
3743 static int gem_add_flow_filter(struct net_device *netdev,
3744 		struct ethtool_rxnfc *cmd)
3745 {
3746 	struct macb *bp = netdev_priv(netdev);
3747 	struct ethtool_rx_flow_spec *fs = &cmd->fs;
3748 	struct ethtool_rx_fs_item *item, *newfs;
3749 	unsigned long flags;
3750 	int ret = -EINVAL;
3751 	bool added = false;
3752 
3753 	newfs = kmalloc_obj(*newfs);
3754 	if (newfs == NULL)
3755 		return -ENOMEM;
3756 	memcpy(&newfs->fs, fs, sizeof(newfs->fs));
3757 
3758 	netdev_dbg(netdev,
3759 			"Adding flow filter entry,type=%u,queue=%u,loc=%u,src=%08X,dst=%08X,ps=%u,pd=%u\n",
3760 			fs->flow_type, (int)fs->ring_cookie, fs->location,
3761 			htonl(fs->h_u.tcp_ip4_spec.ip4src),
3762 			htonl(fs->h_u.tcp_ip4_spec.ip4dst),
3763 			be16_to_cpu(fs->h_u.tcp_ip4_spec.psrc),
3764 			be16_to_cpu(fs->h_u.tcp_ip4_spec.pdst));
3765 
3766 	spin_lock_irqsave(&bp->rx_fs_lock, flags);
3767 
3768 	/* find correct place to add in list */
3769 	list_for_each_entry(item, &bp->rx_fs_list.list, list) {
3770 		if (item->fs.location > newfs->fs.location) {
3771 			list_add_tail(&newfs->list, &item->list);
3772 			added = true;
3773 			break;
3774 		} else if (item->fs.location == fs->location) {
3775 			netdev_err(netdev, "Rule not added: location %d not free!\n",
3776 					fs->location);
3777 			ret = -EBUSY;
3778 			goto err;
3779 		}
3780 	}
3781 	if (!added)
3782 		list_add_tail(&newfs->list, &bp->rx_fs_list.list);
3783 
3784 	gem_prog_cmp_regs(bp, fs);
3785 	bp->rx_fs_list.count++;
3786 	/* enable filtering if NTUPLE on */
3787 	gem_enable_flow_filters(bp, 1);
3788 
3789 	spin_unlock_irqrestore(&bp->rx_fs_lock, flags);
3790 	return 0;
3791 
3792 err:
3793 	spin_unlock_irqrestore(&bp->rx_fs_lock, flags);
3794 	kfree(newfs);
3795 	return ret;
3796 }
3797 
3798 static int gem_del_flow_filter(struct net_device *netdev,
3799 		struct ethtool_rxnfc *cmd)
3800 {
3801 	struct macb *bp = netdev_priv(netdev);
3802 	struct ethtool_rx_fs_item *item;
3803 	struct ethtool_rx_flow_spec *fs;
3804 	unsigned long flags;
3805 
3806 	spin_lock_irqsave(&bp->rx_fs_lock, flags);
3807 
3808 	list_for_each_entry(item, &bp->rx_fs_list.list, list) {
3809 		if (item->fs.location == cmd->fs.location) {
3810 			/* disable screener regs for the flow entry */
3811 			fs = &(item->fs);
3812 			netdev_dbg(netdev,
3813 					"Deleting flow filter entry,type=%u,queue=%u,loc=%u,src=%08X,dst=%08X,ps=%u,pd=%u\n",
3814 					fs->flow_type, (int)fs->ring_cookie, fs->location,
3815 					htonl(fs->h_u.tcp_ip4_spec.ip4src),
3816 					htonl(fs->h_u.tcp_ip4_spec.ip4dst),
3817 					be16_to_cpu(fs->h_u.tcp_ip4_spec.psrc),
3818 					be16_to_cpu(fs->h_u.tcp_ip4_spec.pdst));
3819 
3820 			gem_writel_n(bp, SCRT2, fs->location, 0);
3821 
3822 			list_del(&item->list);
3823 			bp->rx_fs_list.count--;
3824 			spin_unlock_irqrestore(&bp->rx_fs_lock, flags);
3825 			kfree(item);
3826 			return 0;
3827 		}
3828 	}
3829 
3830 	spin_unlock_irqrestore(&bp->rx_fs_lock, flags);
3831 	return -EINVAL;
3832 }
3833 
3834 static int gem_get_flow_entry(struct net_device *netdev,
3835 		struct ethtool_rxnfc *cmd)
3836 {
3837 	struct macb *bp = netdev_priv(netdev);
3838 	struct ethtool_rx_fs_item *item;
3839 
3840 	list_for_each_entry(item, &bp->rx_fs_list.list, list) {
3841 		if (item->fs.location == cmd->fs.location) {
3842 			memcpy(&cmd->fs, &item->fs, sizeof(cmd->fs));
3843 			return 0;
3844 		}
3845 	}
3846 	return -EINVAL;
3847 }
3848 
3849 static int gem_get_all_flow_entries(struct net_device *netdev,
3850 		struct ethtool_rxnfc *cmd, u32 *rule_locs)
3851 {
3852 	struct macb *bp = netdev_priv(netdev);
3853 	struct ethtool_rx_fs_item *item;
3854 	uint32_t cnt = 0;
3855 
3856 	list_for_each_entry(item, &bp->rx_fs_list.list, list) {
3857 		if (cnt == cmd->rule_cnt)
3858 			return -EMSGSIZE;
3859 		rule_locs[cnt] = item->fs.location;
3860 		cnt++;
3861 	}
3862 	cmd->data = bp->max_tuples;
3863 	cmd->rule_cnt = cnt;
3864 
3865 	return 0;
3866 }
3867 
3868 static u32 gem_get_rx_ring_count(struct net_device *netdev)
3869 {
3870 	struct macb *bp = netdev_priv(netdev);
3871 
3872 	return bp->num_queues;
3873 }
3874 
3875 static int gem_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd,
3876 		u32 *rule_locs)
3877 {
3878 	struct macb *bp = netdev_priv(netdev);
3879 	int ret = 0;
3880 
3881 	switch (cmd->cmd) {
3882 	case ETHTOOL_GRXCLSRLCNT:
3883 		cmd->rule_cnt = bp->rx_fs_list.count;
3884 		break;
3885 	case ETHTOOL_GRXCLSRULE:
3886 		ret = gem_get_flow_entry(netdev, cmd);
3887 		break;
3888 	case ETHTOOL_GRXCLSRLALL:
3889 		ret = gem_get_all_flow_entries(netdev, cmd, rule_locs);
3890 		break;
3891 	default:
3892 		netdev_err(netdev,
3893 			  "Command parameter %d is not supported\n", cmd->cmd);
3894 		ret = -EOPNOTSUPP;
3895 	}
3896 
3897 	return ret;
3898 }
3899 
3900 static int gem_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd)
3901 {
3902 	struct macb *bp = netdev_priv(netdev);
3903 	int ret;
3904 
3905 	switch (cmd->cmd) {
3906 	case ETHTOOL_SRXCLSRLINS:
3907 		if ((cmd->fs.location >= bp->max_tuples)
3908 				|| (cmd->fs.ring_cookie >= bp->num_queues)) {
3909 			ret = -EINVAL;
3910 			break;
3911 		}
3912 		ret = gem_add_flow_filter(netdev, cmd);
3913 		break;
3914 	case ETHTOOL_SRXCLSRLDEL:
3915 		ret = gem_del_flow_filter(netdev, cmd);
3916 		break;
3917 	default:
3918 		netdev_err(netdev,
3919 			  "Command parameter %d is not supported\n", cmd->cmd);
3920 		ret = -EOPNOTSUPP;
3921 	}
3922 
3923 	return ret;
3924 }
3925 
3926 static const struct ethtool_ops macb_ethtool_ops = {
3927 	.get_regs_len		= macb_get_regs_len,
3928 	.get_regs		= macb_get_regs,
3929 	.get_link		= ethtool_op_get_link,
3930 	.get_ts_info		= ethtool_op_get_ts_info,
3931 	.get_pause_stats	= macb_get_pause_stats,
3932 	.get_eth_mac_stats	= macb_get_eth_mac_stats,
3933 	.get_eth_phy_stats	= macb_get_eth_phy_stats,
3934 	.get_rmon_stats		= macb_get_rmon_stats,
3935 	.get_wol		= macb_get_wol,
3936 	.set_wol		= macb_set_wol,
3937 	.get_link_ksettings     = macb_get_link_ksettings,
3938 	.set_link_ksettings     = macb_set_link_ksettings,
3939 	.get_ringparam		= macb_get_ringparam,
3940 	.set_ringparam		= macb_set_ringparam,
3941 };
3942 
3943 static const struct ethtool_ops gem_ethtool_ops = {
3944 	.get_regs_len		= macb_get_regs_len,
3945 	.get_regs		= macb_get_regs,
3946 	.get_wol		= macb_get_wol,
3947 	.set_wol		= macb_set_wol,
3948 	.get_link		= ethtool_op_get_link,
3949 	.get_ts_info		= macb_get_ts_info,
3950 	.get_ethtool_stats	= gem_get_ethtool_stats,
3951 	.get_strings		= gem_get_ethtool_strings,
3952 	.get_sset_count		= gem_get_sset_count,
3953 	.get_pause_stats	= gem_get_pause_stats,
3954 	.get_eth_mac_stats	= gem_get_eth_mac_stats,
3955 	.get_eth_phy_stats	= gem_get_eth_phy_stats,
3956 	.get_rmon_stats		= gem_get_rmon_stats,
3957 	.get_link_ksettings     = macb_get_link_ksettings,
3958 	.set_link_ksettings     = macb_set_link_ksettings,
3959 	.get_ringparam		= macb_get_ringparam,
3960 	.set_ringparam		= macb_set_ringparam,
3961 	.get_rxnfc			= gem_get_rxnfc,
3962 	.set_rxnfc			= gem_set_rxnfc,
3963 	.get_rx_ring_count		= gem_get_rx_ring_count,
3964 	.nway_reset			= phy_ethtool_nway_reset,
3965 };
3966 
3967 static int macb_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
3968 {
3969 	struct macb *bp = netdev_priv(dev);
3970 
3971 	if (!netif_running(dev))
3972 		return -EINVAL;
3973 
3974 	return phylink_mii_ioctl(bp->phylink, rq, cmd);
3975 }
3976 
3977 static int macb_hwtstamp_get(struct net_device *dev,
3978 			     struct kernel_hwtstamp_config *cfg)
3979 {
3980 	struct macb *bp = netdev_priv(dev);
3981 
3982 	if (!netif_running(dev))
3983 		return -EINVAL;
3984 
3985 	if (!bp->ptp_info)
3986 		return -EOPNOTSUPP;
3987 
3988 	return bp->ptp_info->get_hwtst(dev, cfg);
3989 }
3990 
3991 static int macb_hwtstamp_set(struct net_device *dev,
3992 			     struct kernel_hwtstamp_config *cfg,
3993 			     struct netlink_ext_ack *extack)
3994 {
3995 	struct macb *bp = netdev_priv(dev);
3996 
3997 	if (!netif_running(dev))
3998 		return -EINVAL;
3999 
4000 	if (!bp->ptp_info)
4001 		return -EOPNOTSUPP;
4002 
4003 	return bp->ptp_info->set_hwtst(dev, cfg, extack);
4004 }
4005 
4006 static inline void macb_set_txcsum_feature(struct macb *bp,
4007 					   netdev_features_t features)
4008 {
4009 	u32 val;
4010 
4011 	if (!macb_is_gem(bp))
4012 		return;
4013 
4014 	val = gem_readl(bp, DMACFG);
4015 	if (features & NETIF_F_HW_CSUM)
4016 		val |= GEM_BIT(TXCOEN);
4017 	else
4018 		val &= ~GEM_BIT(TXCOEN);
4019 
4020 	gem_writel(bp, DMACFG, val);
4021 }
4022 
4023 static inline void macb_set_rxcsum_feature(struct macb *bp,
4024 					   netdev_features_t features)
4025 {
4026 	struct net_device *netdev = bp->dev;
4027 	u32 val;
4028 
4029 	if (!macb_is_gem(bp))
4030 		return;
4031 
4032 	val = gem_readl(bp, NCFGR);
4033 	if ((features & NETIF_F_RXCSUM) && !(netdev->flags & IFF_PROMISC))
4034 		val |= GEM_BIT(RXCOEN);
4035 	else
4036 		val &= ~GEM_BIT(RXCOEN);
4037 
4038 	gem_writel(bp, NCFGR, val);
4039 }
4040 
4041 static inline void macb_set_rxflow_feature(struct macb *bp,
4042 					   netdev_features_t features)
4043 {
4044 	if (!macb_is_gem(bp))
4045 		return;
4046 
4047 	gem_enable_flow_filters(bp, !!(features & NETIF_F_NTUPLE));
4048 }
4049 
4050 static int macb_set_features(struct net_device *netdev,
4051 			     netdev_features_t features)
4052 {
4053 	struct macb *bp = netdev_priv(netdev);
4054 	netdev_features_t changed = features ^ netdev->features;
4055 
4056 	/* TX checksum offload */
4057 	if (changed & NETIF_F_HW_CSUM)
4058 		macb_set_txcsum_feature(bp, features);
4059 
4060 	/* RX checksum offload */
4061 	if (changed & NETIF_F_RXCSUM)
4062 		macb_set_rxcsum_feature(bp, features);
4063 
4064 	/* RX Flow Filters */
4065 	if (changed & NETIF_F_NTUPLE)
4066 		macb_set_rxflow_feature(bp, features);
4067 
4068 	return 0;
4069 }
4070 
4071 static void macb_restore_features(struct macb *bp)
4072 {
4073 	struct net_device *netdev = bp->dev;
4074 	netdev_features_t features = netdev->features;
4075 	struct ethtool_rx_fs_item *item;
4076 
4077 	/* TX checksum offload */
4078 	macb_set_txcsum_feature(bp, features);
4079 
4080 	/* RX checksum offload */
4081 	macb_set_rxcsum_feature(bp, features);
4082 
4083 	/* RX Flow Filters */
4084 	list_for_each_entry(item, &bp->rx_fs_list.list, list)
4085 		gem_prog_cmp_regs(bp, &item->fs);
4086 
4087 	macb_set_rxflow_feature(bp, features);
4088 }
4089 
4090 static int macb_taprio_setup_replace(struct net_device *ndev,
4091 				     struct tc_taprio_qopt_offload *conf)
4092 {
4093 	u64 total_on_time = 0, start_time_sec = 0, start_time = conf->base_time;
4094 	u32 configured_queues = 0, speed = 0, start_time_nsec;
4095 	struct macb_queue_enst_config *enst_queue;
4096 	struct tc_taprio_sched_entry *entry;
4097 	struct macb *bp = netdev_priv(ndev);
4098 	struct ethtool_link_ksettings kset;
4099 	struct macb_queue *queue;
4100 	u32 queue_mask;
4101 	u8 queue_id;
4102 	size_t i;
4103 	int err;
4104 
4105 	if (conf->num_entries > bp->num_queues) {
4106 		netdev_err(ndev, "Too many TAPRIO entries: %zu > %d queues\n",
4107 			   conf->num_entries, bp->num_queues);
4108 		return -EINVAL;
4109 	}
4110 
4111 	if (conf->base_time < 0) {
4112 		netdev_err(ndev, "Invalid base_time: must be 0 or positive, got %lld\n",
4113 			   conf->base_time);
4114 		return -ERANGE;
4115 	}
4116 
4117 	/* Get the current link speed */
4118 	err = phylink_ethtool_ksettings_get(bp->phylink, &kset);
4119 	if (unlikely(err)) {
4120 		netdev_err(ndev, "Failed to get link settings: %d\n", err);
4121 		return err;
4122 	}
4123 
4124 	speed = kset.base.speed;
4125 	if (unlikely(speed <= 0)) {
4126 		netdev_err(ndev, "Invalid speed: %d\n", speed);
4127 		return -EINVAL;
4128 	}
4129 
4130 	enst_queue = kcalloc(conf->num_entries, sizeof(*enst_queue), GFP_KERNEL);
4131 	if (unlikely(!enst_queue))
4132 		return -ENOMEM;
4133 
4134 	/* Pre-validate all entries before making any hardware changes */
4135 	for (i = 0; i < conf->num_entries; i++) {
4136 		entry = &conf->entries[i];
4137 
4138 		if (entry->command != TC_TAPRIO_CMD_SET_GATES) {
4139 			netdev_err(ndev, "Entry %zu: unsupported command %d\n",
4140 				   i, entry->command);
4141 			err = -EOPNOTSUPP;
4142 			goto cleanup;
4143 		}
4144 
4145 		/* Validate gate_mask: must be nonzero, single queue, and within range */
4146 		if (!is_power_of_2(entry->gate_mask)) {
4147 			netdev_err(ndev, "Entry %zu: gate_mask 0x%x is not a power of 2 (only one queue per entry allowed)\n",
4148 				   i, entry->gate_mask);
4149 			err = -EINVAL;
4150 			goto cleanup;
4151 		}
4152 
4153 		/* gate_mask must not select queues outside the valid queues */
4154 		queue_id = order_base_2(entry->gate_mask);
4155 		if (queue_id >= bp->num_queues) {
4156 			netdev_err(ndev, "Entry %zu: gate_mask 0x%x exceeds queue range (max_queues=%d)\n",
4157 				   i, entry->gate_mask, bp->num_queues);
4158 			err = -EINVAL;
4159 			goto cleanup;
4160 		}
4161 
4162 		/* Check for start time limits */
4163 		start_time_sec = start_time;
4164 		start_time_nsec = do_div(start_time_sec, NSEC_PER_SEC);
4165 		if (start_time_sec > GENMASK(GEM_START_TIME_SEC_SIZE - 1, 0)) {
4166 			netdev_err(ndev, "Entry %zu: Start time %llu s exceeds hardware limit\n",
4167 				   i, start_time_sec);
4168 			err = -ERANGE;
4169 			goto cleanup;
4170 		}
4171 
4172 		/* Check for on time limit */
4173 		if (entry->interval > enst_max_hw_interval(speed)) {
4174 			netdev_err(ndev, "Entry %zu: interval %u ns exceeds hardware limit %llu ns\n",
4175 				   i, entry->interval, enst_max_hw_interval(speed));
4176 			err = -ERANGE;
4177 			goto cleanup;
4178 		}
4179 
4180 		/* Check for off time limit*/
4181 		if ((conf->cycle_time - entry->interval) > enst_max_hw_interval(speed)) {
4182 			netdev_err(ndev, "Entry %zu: off_time %llu ns exceeds hardware limit %llu ns\n",
4183 				   i, conf->cycle_time - entry->interval,
4184 				   enst_max_hw_interval(speed));
4185 			err = -ERANGE;
4186 			goto cleanup;
4187 		}
4188 
4189 		enst_queue[i].queue_id = queue_id;
4190 		enst_queue[i].start_time_mask =
4191 			(start_time_sec << GEM_START_TIME_SEC_OFFSET) |
4192 			start_time_nsec;
4193 		enst_queue[i].on_time_bytes =
4194 			enst_ns_to_hw_units(entry->interval, speed);
4195 		enst_queue[i].off_time_bytes =
4196 			enst_ns_to_hw_units(conf->cycle_time - entry->interval, speed);
4197 
4198 		configured_queues |= entry->gate_mask;
4199 		total_on_time += entry->interval;
4200 		start_time += entry->interval;
4201 	}
4202 
4203 	/* Check total interval doesn't exceed cycle time */
4204 	if (total_on_time > conf->cycle_time) {
4205 		netdev_err(ndev, "Total ON %llu ns exceeds cycle time %llu ns\n",
4206 			   total_on_time, conf->cycle_time);
4207 		err = -EINVAL;
4208 		goto cleanup;
4209 	}
4210 
4211 	netdev_dbg(ndev, "TAPRIO setup: %zu entries, base_time=%lld ns, cycle_time=%llu ns\n",
4212 		   conf->num_entries, conf->base_time, conf->cycle_time);
4213 
4214 	/* All validations passed - proceed with hardware configuration */
4215 	scoped_guard(spinlock_irqsave, &bp->lock) {
4216 		/* Disable ENST queues if running before configuring */
4217 		queue_mask = BIT_U32(bp->num_queues) - 1;
4218 		gem_writel(bp, ENST_CONTROL,
4219 			   queue_mask << GEM_ENST_DISABLE_QUEUE_OFFSET);
4220 
4221 		for (i = 0; i < conf->num_entries; i++) {
4222 			queue = &bp->queues[enst_queue[i].queue_id];
4223 			/* Configure queue timing registers */
4224 			queue_writel(queue, ENST_START_TIME,
4225 				     enst_queue[i].start_time_mask);
4226 			queue_writel(queue, ENST_ON_TIME,
4227 				     enst_queue[i].on_time_bytes);
4228 			queue_writel(queue, ENST_OFF_TIME,
4229 				     enst_queue[i].off_time_bytes);
4230 		}
4231 
4232 		/* Enable ENST for all configured queues in one write */
4233 		gem_writel(bp, ENST_CONTROL, configured_queues);
4234 	}
4235 
4236 	netdev_info(ndev, "TAPRIO configuration completed successfully: %zu entries, %d queues configured\n",
4237 		    conf->num_entries, hweight32(configured_queues));
4238 
4239 cleanup:
4240 	kfree(enst_queue);
4241 	return err;
4242 }
4243 
4244 static void macb_taprio_destroy(struct net_device *ndev)
4245 {
4246 	struct macb *bp = netdev_priv(ndev);
4247 	struct macb_queue *queue;
4248 	u32 queue_mask;
4249 	unsigned int q;
4250 
4251 	netdev_reset_tc(ndev);
4252 	queue_mask = BIT_U32(bp->num_queues) - 1;
4253 
4254 	scoped_guard(spinlock_irqsave, &bp->lock) {
4255 		/* Single disable command for all queues */
4256 		gem_writel(bp, ENST_CONTROL,
4257 			   queue_mask << GEM_ENST_DISABLE_QUEUE_OFFSET);
4258 
4259 		/* Clear all queue ENST registers in batch */
4260 		for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
4261 			queue_writel(queue, ENST_START_TIME, 0);
4262 			queue_writel(queue, ENST_ON_TIME, 0);
4263 			queue_writel(queue, ENST_OFF_TIME, 0);
4264 		}
4265 	}
4266 	netdev_info(ndev, "TAPRIO destroy: All gates disabled\n");
4267 }
4268 
4269 static int macb_setup_taprio(struct net_device *ndev,
4270 			     struct tc_taprio_qopt_offload *taprio)
4271 {
4272 	struct macb *bp = netdev_priv(ndev);
4273 	int err = 0;
4274 
4275 	if (unlikely(!(ndev->hw_features & NETIF_F_HW_TC)))
4276 		return -EOPNOTSUPP;
4277 
4278 	/* Check if Device is in runtime suspend */
4279 	if (unlikely(pm_runtime_suspended(&bp->pdev->dev))) {
4280 		netdev_err(ndev, "Device is in runtime suspend\n");
4281 		return -EOPNOTSUPP;
4282 	}
4283 
4284 	switch (taprio->cmd) {
4285 	case TAPRIO_CMD_REPLACE:
4286 		err = macb_taprio_setup_replace(ndev, taprio);
4287 		break;
4288 	case TAPRIO_CMD_DESTROY:
4289 		macb_taprio_destroy(ndev);
4290 		break;
4291 	default:
4292 		err = -EOPNOTSUPP;
4293 	}
4294 
4295 	return err;
4296 }
4297 
4298 static int macb_setup_tc(struct net_device *dev, enum tc_setup_type type,
4299 			 void *type_data)
4300 {
4301 	if (!dev || !type_data)
4302 		return -EINVAL;
4303 
4304 	switch (type) {
4305 	case TC_SETUP_QDISC_TAPRIO:
4306 		return macb_setup_taprio(dev, type_data);
4307 	default:
4308 		return -EOPNOTSUPP;
4309 	}
4310 }
4311 
4312 static const struct net_device_ops macb_netdev_ops = {
4313 	.ndo_open		= macb_open,
4314 	.ndo_stop		= macb_close,
4315 	.ndo_start_xmit		= macb_start_xmit,
4316 	.ndo_set_rx_mode	= macb_set_rx_mode,
4317 	.ndo_get_stats64	= macb_get_stats,
4318 	.ndo_eth_ioctl		= macb_ioctl,
4319 	.ndo_validate_addr	= eth_validate_addr,
4320 	.ndo_change_mtu		= macb_change_mtu,
4321 	.ndo_set_mac_address	= macb_set_mac_addr,
4322 #ifdef CONFIG_NET_POLL_CONTROLLER
4323 	.ndo_poll_controller	= macb_poll_controller,
4324 #endif
4325 	.ndo_set_features	= macb_set_features,
4326 	.ndo_features_check	= macb_features_check,
4327 	.ndo_hwtstamp_set	= macb_hwtstamp_set,
4328 	.ndo_hwtstamp_get	= macb_hwtstamp_get,
4329 	.ndo_setup_tc		= macb_setup_tc,
4330 };
4331 
4332 /* Configure peripheral capabilities according to device tree
4333  * and integration options used
4334  */
4335 static void macb_configure_caps(struct macb *bp,
4336 				const struct macb_config *dt_conf)
4337 {
4338 	struct device_node *np = bp->pdev->dev.of_node;
4339 	bool refclk_ext;
4340 	u32 dcfg;
4341 
4342 	refclk_ext = of_property_read_bool(np, "cdns,refclk-ext");
4343 
4344 	if (dt_conf)
4345 		bp->caps = dt_conf->caps;
4346 
4347 	if (hw_is_gem(bp->regs, bp->native_io)) {
4348 		bp->caps |= MACB_CAPS_MACB_IS_GEM;
4349 
4350 		dcfg = gem_readl(bp, DCFG1);
4351 		if (GEM_BFEXT(IRQCOR, dcfg) == 0)
4352 			bp->caps |= MACB_CAPS_ISR_CLEAR_ON_WRITE;
4353 		if (GEM_BFEXT(NO_PCS, dcfg) == 0)
4354 			bp->caps |= MACB_CAPS_PCS;
4355 		dcfg = gem_readl(bp, DCFG12);
4356 		if (GEM_BFEXT(HIGH_SPEED, dcfg) == 1)
4357 			bp->caps |= MACB_CAPS_HIGH_SPEED;
4358 		dcfg = gem_readl(bp, DCFG2);
4359 		if ((dcfg & (GEM_BIT(RX_PKT_BUFF) | GEM_BIT(TX_PKT_BUFF))) == 0)
4360 			bp->caps |= MACB_CAPS_FIFO_MODE;
4361 		if (GEM_BFEXT(PBUF_RSC, gem_readl(bp, DCFG6)))
4362 			bp->caps |= MACB_CAPS_RSC;
4363 		if (gem_has_ptp(bp)) {
4364 			if (!GEM_BFEXT(TSU, gem_readl(bp, DCFG5)))
4365 				dev_err(&bp->pdev->dev,
4366 					"GEM doesn't support hardware ptp.\n");
4367 			else {
4368 #ifdef CONFIG_MACB_USE_HWSTAMP
4369 				bp->caps |= MACB_CAPS_DMA_PTP;
4370 				bp->ptp_info = &gem_ptp_info;
4371 #endif
4372 			}
4373 		}
4374 	}
4375 
4376 	if (refclk_ext)
4377 		bp->caps |= MACB_CAPS_USRIO_HAS_CLKEN;
4378 
4379 	dev_dbg(&bp->pdev->dev, "Cadence caps 0x%08x\n", bp->caps);
4380 }
4381 
4382 static int macb_probe_queues(struct device *dev, void __iomem *mem, bool native_io)
4383 {
4384 	/* BIT(0) is never set but queue 0 always exists. */
4385 	unsigned int queue_mask = 0x1;
4386 
4387 	/* Use hw_is_gem() as MACB_CAPS_MACB_IS_GEM is not yet positioned. */
4388 	if (hw_is_gem(mem, native_io)) {
4389 		if (native_io)
4390 			queue_mask |= __raw_readl(mem + GEM_DCFG6) & 0xFF;
4391 		else
4392 			queue_mask |= readl_relaxed(mem + GEM_DCFG6) & 0xFF;
4393 
4394 		if (fls(queue_mask) != ffz(queue_mask)) {
4395 			dev_err(dev, "queue mask %#x has a hole\n", queue_mask);
4396 			return -EINVAL;
4397 		}
4398 	}
4399 
4400 	return hweight32(queue_mask);
4401 }
4402 
4403 static void macb_clks_disable(struct clk *pclk, struct clk *hclk, struct clk *tx_clk,
4404 			      struct clk *rx_clk, struct clk *tsu_clk)
4405 {
4406 	struct clk_bulk_data clks[] = {
4407 		{ .clk = tsu_clk, },
4408 		{ .clk = rx_clk, },
4409 		{ .clk = pclk, },
4410 		{ .clk = hclk, },
4411 		{ .clk = tx_clk },
4412 	};
4413 
4414 	clk_bulk_disable_unprepare(ARRAY_SIZE(clks), clks);
4415 }
4416 
4417 static int macb_clk_init(struct platform_device *pdev, struct clk **pclk,
4418 			 struct clk **hclk, struct clk **tx_clk,
4419 			 struct clk **rx_clk, struct clk **tsu_clk)
4420 {
4421 	struct macb_platform_data *pdata;
4422 	int err;
4423 
4424 	pdata = dev_get_platdata(&pdev->dev);
4425 	if (pdata) {
4426 		*pclk = pdata->pclk;
4427 		*hclk = pdata->hclk;
4428 	} else {
4429 		*pclk = devm_clk_get(&pdev->dev, "pclk");
4430 		*hclk = devm_clk_get(&pdev->dev, "hclk");
4431 	}
4432 
4433 	if (IS_ERR_OR_NULL(*pclk))
4434 		return dev_err_probe(&pdev->dev,
4435 				     IS_ERR(*pclk) ? PTR_ERR(*pclk) : -ENODEV,
4436 				     "failed to get pclk\n");
4437 
4438 	if (IS_ERR_OR_NULL(*hclk))
4439 		return dev_err_probe(&pdev->dev,
4440 				     IS_ERR(*hclk) ? PTR_ERR(*hclk) : -ENODEV,
4441 				     "failed to get hclk\n");
4442 
4443 	*tx_clk = devm_clk_get_optional(&pdev->dev, "tx_clk");
4444 	if (IS_ERR(*tx_clk))
4445 		return PTR_ERR(*tx_clk);
4446 
4447 	*rx_clk = devm_clk_get_optional(&pdev->dev, "rx_clk");
4448 	if (IS_ERR(*rx_clk))
4449 		return PTR_ERR(*rx_clk);
4450 
4451 	*tsu_clk = devm_clk_get_optional(&pdev->dev, "tsu_clk");
4452 	if (IS_ERR(*tsu_clk))
4453 		return PTR_ERR(*tsu_clk);
4454 
4455 	err = clk_prepare_enable(*pclk);
4456 	if (err) {
4457 		dev_err(&pdev->dev, "failed to enable pclk (%d)\n", err);
4458 		return err;
4459 	}
4460 
4461 	err = clk_prepare_enable(*hclk);
4462 	if (err) {
4463 		dev_err(&pdev->dev, "failed to enable hclk (%d)\n", err);
4464 		goto err_disable_pclk;
4465 	}
4466 
4467 	err = clk_prepare_enable(*tx_clk);
4468 	if (err) {
4469 		dev_err(&pdev->dev, "failed to enable tx_clk (%d)\n", err);
4470 		goto err_disable_hclk;
4471 	}
4472 
4473 	err = clk_prepare_enable(*rx_clk);
4474 	if (err) {
4475 		dev_err(&pdev->dev, "failed to enable rx_clk (%d)\n", err);
4476 		goto err_disable_txclk;
4477 	}
4478 
4479 	err = clk_prepare_enable(*tsu_clk);
4480 	if (err) {
4481 		dev_err(&pdev->dev, "failed to enable tsu_clk (%d)\n", err);
4482 		goto err_disable_rxclk;
4483 	}
4484 
4485 	return 0;
4486 
4487 err_disable_rxclk:
4488 	clk_disable_unprepare(*rx_clk);
4489 
4490 err_disable_txclk:
4491 	clk_disable_unprepare(*tx_clk);
4492 
4493 err_disable_hclk:
4494 	clk_disable_unprepare(*hclk);
4495 
4496 err_disable_pclk:
4497 	clk_disable_unprepare(*pclk);
4498 
4499 	return err;
4500 }
4501 
4502 static int macb_init(struct platform_device *pdev)
4503 {
4504 	struct net_device *dev = platform_get_drvdata(pdev);
4505 	unsigned int hw_q, q;
4506 	struct macb *bp = netdev_priv(dev);
4507 	struct macb_queue *queue;
4508 	int err;
4509 	u32 val, reg;
4510 
4511 	bp->tx_ring_size = DEFAULT_TX_RING_SIZE;
4512 	bp->rx_ring_size = DEFAULT_RX_RING_SIZE;
4513 
4514 	/* set the queue register mapping once for all: queue0 has a special
4515 	 * register mapping but we don't want to test the queue index then
4516 	 * compute the corresponding register offset at run time.
4517 	 */
4518 	for (hw_q = 0, q = 0; hw_q < bp->num_queues; ++hw_q) {
4519 		queue = &bp->queues[q];
4520 		queue->bp = bp;
4521 		spin_lock_init(&queue->tx_ptr_lock);
4522 		netif_napi_add(dev, &queue->napi_rx, macb_rx_poll);
4523 		netif_napi_add(dev, &queue->napi_tx, macb_tx_poll);
4524 		if (hw_q) {
4525 			queue->ISR  = GEM_ISR(hw_q - 1);
4526 			queue->IER  = GEM_IER(hw_q - 1);
4527 			queue->IDR  = GEM_IDR(hw_q - 1);
4528 			queue->IMR  = GEM_IMR(hw_q - 1);
4529 			queue->TBQP = GEM_TBQP(hw_q - 1);
4530 			queue->RBQP = GEM_RBQP(hw_q - 1);
4531 			queue->RBQS = GEM_RBQS(hw_q - 1);
4532 		} else {
4533 			/* queue0 uses legacy registers */
4534 			queue->ISR  = MACB_ISR;
4535 			queue->IER  = MACB_IER;
4536 			queue->IDR  = MACB_IDR;
4537 			queue->IMR  = MACB_IMR;
4538 			queue->TBQP = MACB_TBQP;
4539 			queue->RBQP = MACB_RBQP;
4540 		}
4541 
4542 		queue->ENST_START_TIME = GEM_ENST_START_TIME(hw_q);
4543 		queue->ENST_ON_TIME = GEM_ENST_ON_TIME(hw_q);
4544 		queue->ENST_OFF_TIME = GEM_ENST_OFF_TIME(hw_q);
4545 
4546 		/* get irq: here we use the linux queue index, not the hardware
4547 		 * queue index. the queue irq definitions in the device tree
4548 		 * must remove the optional gaps that could exist in the
4549 		 * hardware queue mask.
4550 		 */
4551 		queue->irq = platform_get_irq(pdev, q);
4552 		err = devm_request_irq(&pdev->dev, queue->irq, macb_interrupt,
4553 				       IRQF_SHARED, dev->name, queue);
4554 		if (err) {
4555 			dev_err(&pdev->dev,
4556 				"Unable to request IRQ %d (error %d)\n",
4557 				queue->irq, err);
4558 			return err;
4559 		}
4560 
4561 		INIT_WORK(&queue->tx_error_task, macb_tx_error_task);
4562 		q++;
4563 	}
4564 
4565 	dev->netdev_ops = &macb_netdev_ops;
4566 
4567 	/* setup appropriated routines according to adapter type */
4568 	if (macb_is_gem(bp)) {
4569 		bp->macbgem_ops.mog_alloc_rx_buffers = gem_alloc_rx_buffers;
4570 		bp->macbgem_ops.mog_free_rx_buffers = gem_free_rx_buffers;
4571 		bp->macbgem_ops.mog_init_rings = gem_init_rings;
4572 		bp->macbgem_ops.mog_rx = gem_rx;
4573 		dev->ethtool_ops = &gem_ethtool_ops;
4574 	} else {
4575 		bp->macbgem_ops.mog_alloc_rx_buffers = macb_alloc_rx_buffers;
4576 		bp->macbgem_ops.mog_free_rx_buffers = macb_free_rx_buffers;
4577 		bp->macbgem_ops.mog_init_rings = macb_init_rings;
4578 		bp->macbgem_ops.mog_rx = macb_rx;
4579 		dev->ethtool_ops = &macb_ethtool_ops;
4580 	}
4581 
4582 	netdev_sw_irq_coalesce_default_on(dev);
4583 
4584 	dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
4585 
4586 	/* Set features */
4587 	dev->hw_features = NETIF_F_SG;
4588 
4589 	/* Check LSO capability; runtime detection can be overridden by a cap
4590 	 * flag if the hardware is known to be buggy
4591 	 */
4592 	if (!(bp->caps & MACB_CAPS_NO_LSO) &&
4593 	    GEM_BFEXT(PBUF_LSO, gem_readl(bp, DCFG6)))
4594 		dev->hw_features |= MACB_NETIF_LSO;
4595 
4596 	/* Checksum offload is only available on gem with packet buffer */
4597 	if (macb_is_gem(bp) && !(bp->caps & MACB_CAPS_FIFO_MODE))
4598 		dev->hw_features |= NETIF_F_HW_CSUM | NETIF_F_RXCSUM;
4599 	if (bp->caps & MACB_CAPS_SG_DISABLED)
4600 		dev->hw_features &= ~NETIF_F_SG;
4601 	/* Enable HW_TC if hardware supports QBV */
4602 	if (bp->caps & MACB_CAPS_QBV)
4603 		dev->hw_features |= NETIF_F_HW_TC;
4604 
4605 	dev->features = dev->hw_features;
4606 
4607 	/* Check RX Flow Filters support.
4608 	 * Max Rx flows set by availability of screeners & compare regs:
4609 	 * each 4-tuple define requires 1 T2 screener reg + 3 compare regs
4610 	 */
4611 	reg = gem_readl(bp, DCFG8);
4612 	bp->max_tuples = umin((GEM_BFEXT(SCR2CMP, reg) / 3),
4613 			      GEM_BFEXT(T2SCR, reg));
4614 	INIT_LIST_HEAD(&bp->rx_fs_list.list);
4615 	if (bp->max_tuples > 0) {
4616 		/* also needs one ethtype match to check IPv4 */
4617 		if (GEM_BFEXT(SCR2ETH, reg) > 0) {
4618 			/* program this reg now */
4619 			reg = 0;
4620 			reg = GEM_BFINS(ETHTCMP, (uint16_t)ETH_P_IP, reg);
4621 			gem_writel_n(bp, ETHT, SCRT2_ETHT, reg);
4622 			/* Filtering is supported in hw but don't enable it in kernel now */
4623 			dev->hw_features |= NETIF_F_NTUPLE;
4624 			/* init Rx flow definitions */
4625 			bp->rx_fs_list.count = 0;
4626 			spin_lock_init(&bp->rx_fs_lock);
4627 		} else
4628 			bp->max_tuples = 0;
4629 	}
4630 
4631 	if (!(bp->caps & MACB_CAPS_USRIO_DISABLED)) {
4632 		val = 0;
4633 		if (phy_interface_mode_is_rgmii(bp->phy_interface))
4634 			val = bp->usrio->rgmii;
4635 		else if (bp->phy_interface == PHY_INTERFACE_MODE_RMII &&
4636 			 (bp->caps & MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII))
4637 			val = bp->usrio->rmii;
4638 		else if (!(bp->caps & MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII))
4639 			val = bp->usrio->mii;
4640 
4641 		if (bp->caps & MACB_CAPS_USRIO_HAS_CLKEN)
4642 			val |= bp->usrio->refclk;
4643 
4644 		macb_or_gem_writel(bp, USRIO, val);
4645 	}
4646 
4647 	/* Set MII management clock divider */
4648 	val = macb_mdc_clk_div(bp);
4649 	val |= macb_dbw(bp);
4650 	if (bp->phy_interface == PHY_INTERFACE_MODE_SGMII)
4651 		val |= GEM_BIT(SGMIIEN) | GEM_BIT(PCSSEL);
4652 	macb_writel(bp, NCFGR, val);
4653 
4654 	return 0;
4655 }
4656 
4657 static const struct macb_usrio_config macb_default_usrio = {
4658 	.mii = MACB_BIT(MII),
4659 	.rmii = MACB_BIT(RMII),
4660 	.rgmii = GEM_BIT(RGMII),
4661 	.refclk = MACB_BIT(CLKEN),
4662 };
4663 
4664 #if defined(CONFIG_OF)
4665 /* 1518 rounded up */
4666 #define AT91ETHER_MAX_RBUFF_SZ	0x600
4667 /* max number of receive buffers */
4668 #define AT91ETHER_MAX_RX_DESCR	9
4669 
4670 static struct sifive_fu540_macb_mgmt *mgmt;
4671 
4672 static int at91ether_alloc_coherent(struct macb *lp)
4673 {
4674 	struct macb_queue *q = &lp->queues[0];
4675 
4676 	q->rx_ring = dma_alloc_coherent(&lp->pdev->dev,
4677 					 (AT91ETHER_MAX_RX_DESCR *
4678 					  macb_dma_desc_get_size(lp)),
4679 					 &q->rx_ring_dma, GFP_KERNEL);
4680 	if (!q->rx_ring)
4681 		return -ENOMEM;
4682 
4683 	q->rx_buffers = dma_alloc_coherent(&lp->pdev->dev,
4684 					    AT91ETHER_MAX_RX_DESCR *
4685 					    AT91ETHER_MAX_RBUFF_SZ,
4686 					    &q->rx_buffers_dma, GFP_KERNEL);
4687 	if (!q->rx_buffers) {
4688 		dma_free_coherent(&lp->pdev->dev,
4689 				  AT91ETHER_MAX_RX_DESCR *
4690 				  macb_dma_desc_get_size(lp),
4691 				  q->rx_ring, q->rx_ring_dma);
4692 		q->rx_ring = NULL;
4693 		return -ENOMEM;
4694 	}
4695 
4696 	return 0;
4697 }
4698 
4699 static void at91ether_free_coherent(struct macb *lp)
4700 {
4701 	struct macb_queue *q = &lp->queues[0];
4702 
4703 	if (q->rx_ring) {
4704 		dma_free_coherent(&lp->pdev->dev,
4705 				  AT91ETHER_MAX_RX_DESCR *
4706 				  macb_dma_desc_get_size(lp),
4707 				  q->rx_ring, q->rx_ring_dma);
4708 		q->rx_ring = NULL;
4709 	}
4710 
4711 	if (q->rx_buffers) {
4712 		dma_free_coherent(&lp->pdev->dev,
4713 				  AT91ETHER_MAX_RX_DESCR *
4714 				  AT91ETHER_MAX_RBUFF_SZ,
4715 				  q->rx_buffers, q->rx_buffers_dma);
4716 		q->rx_buffers = NULL;
4717 	}
4718 }
4719 
4720 /* Initialize and start the Receiver and Transmit subsystems */
4721 static int at91ether_start(struct macb *lp)
4722 {
4723 	struct macb_queue *q = &lp->queues[0];
4724 	struct macb_dma_desc *desc;
4725 	dma_addr_t addr;
4726 	u32 ctl;
4727 	int i, ret;
4728 
4729 	ret = at91ether_alloc_coherent(lp);
4730 	if (ret)
4731 		return ret;
4732 
4733 	addr = q->rx_buffers_dma;
4734 	for (i = 0; i < AT91ETHER_MAX_RX_DESCR; i++) {
4735 		desc = macb_rx_desc(q, i);
4736 		macb_set_addr(lp, desc, addr);
4737 		desc->ctrl = 0;
4738 		addr += AT91ETHER_MAX_RBUFF_SZ;
4739 	}
4740 
4741 	/* Set the Wrap bit on the last descriptor */
4742 	desc->addr |= MACB_BIT(RX_WRAP);
4743 
4744 	/* Reset buffer index */
4745 	q->rx_tail = 0;
4746 
4747 	/* Program address of descriptor list in Rx Buffer Queue register */
4748 	macb_writel(lp, RBQP, q->rx_ring_dma);
4749 
4750 	/* Enable Receive and Transmit */
4751 	ctl = macb_readl(lp, NCR);
4752 	macb_writel(lp, NCR, ctl | MACB_BIT(RE) | MACB_BIT(TE));
4753 
4754 	/* Enable MAC interrupts */
4755 	macb_writel(lp, IER, MACB_BIT(RCOMP)	|
4756 			     MACB_BIT(RXUBR)	|
4757 			     MACB_BIT(ISR_TUND)	|
4758 			     MACB_BIT(ISR_RLE)	|
4759 			     MACB_BIT(TCOMP)	|
4760 			     MACB_BIT(ISR_ROVR)	|
4761 			     MACB_BIT(HRESP));
4762 
4763 	return 0;
4764 }
4765 
4766 static void at91ether_stop(struct macb *lp)
4767 {
4768 	u32 ctl;
4769 
4770 	/* Disable MAC interrupts */
4771 	macb_writel(lp, IDR, MACB_BIT(RCOMP)	|
4772 			     MACB_BIT(RXUBR)	|
4773 			     MACB_BIT(ISR_TUND)	|
4774 			     MACB_BIT(ISR_RLE)	|
4775 			     MACB_BIT(TCOMP)	|
4776 			     MACB_BIT(ISR_ROVR) |
4777 			     MACB_BIT(HRESP));
4778 
4779 	/* Disable Receiver and Transmitter */
4780 	ctl = macb_readl(lp, NCR);
4781 	macb_writel(lp, NCR, ctl & ~(MACB_BIT(TE) | MACB_BIT(RE)));
4782 
4783 	/* Free resources. */
4784 	at91ether_free_coherent(lp);
4785 }
4786 
4787 /* Open the ethernet interface */
4788 static int at91ether_open(struct net_device *dev)
4789 {
4790 	struct macb *lp = netdev_priv(dev);
4791 	u32 ctl;
4792 	int ret;
4793 
4794 	ret = pm_runtime_resume_and_get(&lp->pdev->dev);
4795 	if (ret < 0)
4796 		return ret;
4797 
4798 	/* Clear internal statistics */
4799 	ctl = macb_readl(lp, NCR);
4800 	macb_writel(lp, NCR, ctl | MACB_BIT(CLRSTAT));
4801 
4802 	macb_set_hwaddr(lp);
4803 
4804 	ret = at91ether_start(lp);
4805 	if (ret)
4806 		goto pm_exit;
4807 
4808 	ret = macb_phylink_connect(lp);
4809 	if (ret)
4810 		goto stop;
4811 
4812 	netif_start_queue(dev);
4813 
4814 	return 0;
4815 
4816 stop:
4817 	at91ether_stop(lp);
4818 pm_exit:
4819 	pm_runtime_put_sync(&lp->pdev->dev);
4820 	return ret;
4821 }
4822 
4823 /* Close the interface */
4824 static int at91ether_close(struct net_device *dev)
4825 {
4826 	struct macb *lp = netdev_priv(dev);
4827 
4828 	netif_stop_queue(dev);
4829 
4830 	phylink_stop(lp->phylink);
4831 	phylink_disconnect_phy(lp->phylink);
4832 
4833 	at91ether_stop(lp);
4834 
4835 	pm_runtime_put(&lp->pdev->dev);
4836 
4837 	return 0;
4838 }
4839 
4840 /* Transmit packet */
4841 static netdev_tx_t at91ether_start_xmit(struct sk_buff *skb,
4842 					struct net_device *dev)
4843 {
4844 	struct macb *lp = netdev_priv(dev);
4845 
4846 	if (macb_readl(lp, TSR) & MACB_BIT(RM9200_BNQ)) {
4847 		int desc = 0;
4848 
4849 		netif_stop_queue(dev);
4850 
4851 		/* Store packet information (to free when Tx completed) */
4852 		lp->rm9200_txq[desc].skb = skb;
4853 		lp->rm9200_txq[desc].size = skb->len;
4854 		lp->rm9200_txq[desc].mapping = dma_map_single(&lp->pdev->dev, skb->data,
4855 							      skb->len, DMA_TO_DEVICE);
4856 		if (dma_mapping_error(&lp->pdev->dev, lp->rm9200_txq[desc].mapping)) {
4857 			dev_kfree_skb_any(skb);
4858 			dev->stats.tx_dropped++;
4859 			netdev_err(dev, "%s: DMA mapping error\n", __func__);
4860 			return NETDEV_TX_OK;
4861 		}
4862 
4863 		/* Set address of the data in the Transmit Address register */
4864 		macb_writel(lp, TAR, lp->rm9200_txq[desc].mapping);
4865 		/* Set length of the packet in the Transmit Control register */
4866 		macb_writel(lp, TCR, skb->len);
4867 
4868 	} else {
4869 		netdev_err(dev, "%s called, but device is busy!\n", __func__);
4870 		return NETDEV_TX_BUSY;
4871 	}
4872 
4873 	return NETDEV_TX_OK;
4874 }
4875 
4876 /* Extract received frame from buffer descriptors and sent to upper layers.
4877  * (Called from interrupt context)
4878  */
4879 static void at91ether_rx(struct net_device *dev)
4880 {
4881 	struct macb *lp = netdev_priv(dev);
4882 	struct macb_queue *q = &lp->queues[0];
4883 	struct macb_dma_desc *desc;
4884 	unsigned char *p_recv;
4885 	struct sk_buff *skb;
4886 	unsigned int pktlen;
4887 
4888 	desc = macb_rx_desc(q, q->rx_tail);
4889 	while (desc->addr & MACB_BIT(RX_USED)) {
4890 		p_recv = q->rx_buffers + q->rx_tail * AT91ETHER_MAX_RBUFF_SZ;
4891 		pktlen = MACB_BF(RX_FRMLEN, desc->ctrl);
4892 		skb = netdev_alloc_skb(dev, pktlen + 2);
4893 		if (skb) {
4894 			skb_reserve(skb, 2);
4895 			skb_put_data(skb, p_recv, pktlen);
4896 
4897 			skb->protocol = eth_type_trans(skb, dev);
4898 			dev->stats.rx_packets++;
4899 			dev->stats.rx_bytes += pktlen;
4900 			netif_rx(skb);
4901 		} else {
4902 			dev->stats.rx_dropped++;
4903 		}
4904 
4905 		if (desc->ctrl & MACB_BIT(RX_MHASH_MATCH))
4906 			dev->stats.multicast++;
4907 
4908 		/* reset ownership bit */
4909 		desc->addr &= ~MACB_BIT(RX_USED);
4910 
4911 		/* wrap after last buffer */
4912 		if (q->rx_tail == AT91ETHER_MAX_RX_DESCR - 1)
4913 			q->rx_tail = 0;
4914 		else
4915 			q->rx_tail++;
4916 
4917 		desc = macb_rx_desc(q, q->rx_tail);
4918 	}
4919 }
4920 
4921 /* MAC interrupt handler */
4922 static irqreturn_t at91ether_interrupt(int irq, void *dev_id)
4923 {
4924 	struct net_device *dev = dev_id;
4925 	struct macb *lp = netdev_priv(dev);
4926 	u32 intstatus, ctl;
4927 	unsigned int desc;
4928 
4929 	/* MAC Interrupt Status register indicates what interrupts are pending.
4930 	 * It is automatically cleared once read.
4931 	 */
4932 	intstatus = macb_readl(lp, ISR);
4933 
4934 	/* Receive complete */
4935 	if (intstatus & MACB_BIT(RCOMP))
4936 		at91ether_rx(dev);
4937 
4938 	/* Transmit complete */
4939 	if (intstatus & MACB_BIT(TCOMP)) {
4940 		/* The TCOM bit is set even if the transmission failed */
4941 		if (intstatus & (MACB_BIT(ISR_TUND) | MACB_BIT(ISR_RLE)))
4942 			dev->stats.tx_errors++;
4943 
4944 		desc = 0;
4945 		if (lp->rm9200_txq[desc].skb) {
4946 			dev_consume_skb_irq(lp->rm9200_txq[desc].skb);
4947 			lp->rm9200_txq[desc].skb = NULL;
4948 			dma_unmap_single(&lp->pdev->dev, lp->rm9200_txq[desc].mapping,
4949 					 lp->rm9200_txq[desc].size, DMA_TO_DEVICE);
4950 			dev->stats.tx_packets++;
4951 			dev->stats.tx_bytes += lp->rm9200_txq[desc].size;
4952 		}
4953 		netif_wake_queue(dev);
4954 	}
4955 
4956 	/* Work-around for EMAC Errata section 41.3.1 */
4957 	if (intstatus & MACB_BIT(RXUBR)) {
4958 		ctl = macb_readl(lp, NCR);
4959 		macb_writel(lp, NCR, ctl & ~MACB_BIT(RE));
4960 		wmb();
4961 		macb_writel(lp, NCR, ctl | MACB_BIT(RE));
4962 	}
4963 
4964 	if (intstatus & MACB_BIT(ISR_ROVR))
4965 		netdev_err(dev, "ROVR error\n");
4966 
4967 	return IRQ_HANDLED;
4968 }
4969 
4970 #ifdef CONFIG_NET_POLL_CONTROLLER
4971 static void at91ether_poll_controller(struct net_device *dev)
4972 {
4973 	unsigned long flags;
4974 
4975 	local_irq_save(flags);
4976 	at91ether_interrupt(dev->irq, dev);
4977 	local_irq_restore(flags);
4978 }
4979 #endif
4980 
4981 static const struct net_device_ops at91ether_netdev_ops = {
4982 	.ndo_open		= at91ether_open,
4983 	.ndo_stop		= at91ether_close,
4984 	.ndo_start_xmit		= at91ether_start_xmit,
4985 	.ndo_get_stats64	= macb_get_stats,
4986 	.ndo_set_rx_mode	= macb_set_rx_mode,
4987 	.ndo_set_mac_address	= eth_mac_addr,
4988 	.ndo_eth_ioctl		= macb_ioctl,
4989 	.ndo_validate_addr	= eth_validate_addr,
4990 #ifdef CONFIG_NET_POLL_CONTROLLER
4991 	.ndo_poll_controller	= at91ether_poll_controller,
4992 #endif
4993 	.ndo_hwtstamp_set	= macb_hwtstamp_set,
4994 	.ndo_hwtstamp_get	= macb_hwtstamp_get,
4995 };
4996 
4997 static int at91ether_clk_init(struct platform_device *pdev, struct clk **pclk,
4998 			      struct clk **hclk, struct clk **tx_clk,
4999 			      struct clk **rx_clk, struct clk **tsu_clk)
5000 {
5001 	int err;
5002 
5003 	*hclk = NULL;
5004 	*tx_clk = NULL;
5005 	*rx_clk = NULL;
5006 	*tsu_clk = NULL;
5007 
5008 	*pclk = devm_clk_get(&pdev->dev, "ether_clk");
5009 	if (IS_ERR(*pclk))
5010 		return PTR_ERR(*pclk);
5011 
5012 	err = clk_prepare_enable(*pclk);
5013 	if (err) {
5014 		dev_err(&pdev->dev, "failed to enable pclk (%d)\n", err);
5015 		return err;
5016 	}
5017 
5018 	return 0;
5019 }
5020 
5021 static int at91ether_init(struct platform_device *pdev)
5022 {
5023 	struct net_device *dev = platform_get_drvdata(pdev);
5024 	struct macb *bp = netdev_priv(dev);
5025 	int err;
5026 
5027 	bp->queues[0].bp = bp;
5028 
5029 	dev->netdev_ops = &at91ether_netdev_ops;
5030 	dev->ethtool_ops = &macb_ethtool_ops;
5031 
5032 	err = devm_request_irq(&pdev->dev, dev->irq, at91ether_interrupt,
5033 			       0, dev->name, dev);
5034 	if (err)
5035 		return err;
5036 
5037 	macb_writel(bp, NCR, 0);
5038 
5039 	macb_writel(bp, NCFGR, MACB_BF(CLK, MACB_CLK_DIV32) | MACB_BIT(BIG));
5040 
5041 	return 0;
5042 }
5043 
5044 static unsigned long fu540_macb_tx_recalc_rate(struct clk_hw *hw,
5045 					       unsigned long parent_rate)
5046 {
5047 	return mgmt->rate;
5048 }
5049 
5050 static int fu540_macb_tx_determine_rate(struct clk_hw *hw,
5051 					struct clk_rate_request *req)
5052 {
5053 	if (WARN_ON(req->rate < 2500000))
5054 		req->rate = 2500000;
5055 	else if (req->rate == 2500000)
5056 		req->rate = 2500000;
5057 	else if (WARN_ON(req->rate < 13750000))
5058 		req->rate = 2500000;
5059 	else if (WARN_ON(req->rate < 25000000))
5060 		req->rate = 25000000;
5061 	else if (req->rate == 25000000)
5062 		req->rate = 25000000;
5063 	else if (WARN_ON(req->rate < 75000000))
5064 		req->rate = 25000000;
5065 	else if (WARN_ON(req->rate < 125000000))
5066 		req->rate = 125000000;
5067 	else if (req->rate == 125000000)
5068 		req->rate = 125000000;
5069 	else if (WARN_ON(req->rate > 125000000))
5070 		req->rate = 125000000;
5071 	else
5072 		req->rate = 125000000;
5073 
5074 	return 0;
5075 }
5076 
5077 static int fu540_macb_tx_set_rate(struct clk_hw *hw, unsigned long rate,
5078 				  unsigned long parent_rate)
5079 {
5080 	struct clk_rate_request req;
5081 	int ret;
5082 
5083 	clk_hw_init_rate_request(hw, &req, rate);
5084 	ret = fu540_macb_tx_determine_rate(hw, &req);
5085 	if (ret != 0)
5086 		return ret;
5087 
5088 	if (req.rate != 125000000)
5089 		iowrite32(1, mgmt->reg);
5090 	else
5091 		iowrite32(0, mgmt->reg);
5092 	mgmt->rate = rate;
5093 
5094 	return 0;
5095 }
5096 
5097 static const struct clk_ops fu540_c000_ops = {
5098 	.recalc_rate = fu540_macb_tx_recalc_rate,
5099 	.determine_rate = fu540_macb_tx_determine_rate,
5100 	.set_rate = fu540_macb_tx_set_rate,
5101 };
5102 
5103 static int fu540_c000_clk_init(struct platform_device *pdev, struct clk **pclk,
5104 			       struct clk **hclk, struct clk **tx_clk,
5105 			       struct clk **rx_clk, struct clk **tsu_clk)
5106 {
5107 	struct clk_init_data init;
5108 	int err = 0;
5109 
5110 	err = macb_clk_init(pdev, pclk, hclk, tx_clk, rx_clk, tsu_clk);
5111 	if (err)
5112 		return err;
5113 
5114 	mgmt = devm_kzalloc(&pdev->dev, sizeof(*mgmt), GFP_KERNEL);
5115 	if (!mgmt) {
5116 		err = -ENOMEM;
5117 		goto err_disable_clks;
5118 	}
5119 
5120 	init.name = "sifive-gemgxl-mgmt";
5121 	init.ops = &fu540_c000_ops;
5122 	init.flags = 0;
5123 	init.num_parents = 0;
5124 
5125 	mgmt->rate = 0;
5126 	mgmt->hw.init = &init;
5127 
5128 	*tx_clk = devm_clk_register(&pdev->dev, &mgmt->hw);
5129 	if (IS_ERR(*tx_clk)) {
5130 		err = PTR_ERR(*tx_clk);
5131 		goto err_disable_clks;
5132 	}
5133 
5134 	err = clk_prepare_enable(*tx_clk);
5135 	if (err) {
5136 		dev_err(&pdev->dev, "failed to enable tx_clk (%u)\n", err);
5137 		*tx_clk = NULL;
5138 		goto err_disable_clks;
5139 	} else {
5140 		dev_info(&pdev->dev, "Registered clk switch '%s'\n", init.name);
5141 	}
5142 
5143 	return 0;
5144 
5145 err_disable_clks:
5146 	macb_clks_disable(*pclk, *hclk, *tx_clk, *rx_clk, *tsu_clk);
5147 
5148 	return err;
5149 }
5150 
5151 static int fu540_c000_init(struct platform_device *pdev)
5152 {
5153 	mgmt->reg = devm_platform_ioremap_resource(pdev, 1);
5154 	if (IS_ERR(mgmt->reg))
5155 		return PTR_ERR(mgmt->reg);
5156 
5157 	return macb_init(pdev);
5158 }
5159 
5160 static int init_reset_optional(struct platform_device *pdev)
5161 {
5162 	struct net_device *dev = platform_get_drvdata(pdev);
5163 	struct macb *bp = netdev_priv(dev);
5164 	int ret;
5165 
5166 	if (bp->phy_interface == PHY_INTERFACE_MODE_SGMII) {
5167 		/* Ensure PHY device used in SGMII mode is ready */
5168 		bp->phy = devm_phy_optional_get(&pdev->dev, NULL);
5169 
5170 		if (IS_ERR(bp->phy))
5171 			return dev_err_probe(&pdev->dev, PTR_ERR(bp->phy),
5172 					     "failed to get SGMII PHY\n");
5173 
5174 		ret = phy_init(bp->phy);
5175 		if (ret)
5176 			return dev_err_probe(&pdev->dev, ret,
5177 					     "failed to init SGMII PHY\n");
5178 
5179 		ret = zynqmp_pm_is_function_supported(PM_IOCTL, IOCTL_SET_GEM_CONFIG);
5180 		if (!ret) {
5181 			u32 pm_info[2];
5182 
5183 			ret = of_property_read_u32_array(pdev->dev.of_node, "power-domains",
5184 							 pm_info, ARRAY_SIZE(pm_info));
5185 			if (ret) {
5186 				dev_err(&pdev->dev, "Failed to read power management information\n");
5187 				goto err_out_phy_exit;
5188 			}
5189 			ret = zynqmp_pm_set_gem_config(pm_info[1], GEM_CONFIG_FIXED, 0);
5190 			if (ret)
5191 				goto err_out_phy_exit;
5192 
5193 			ret = zynqmp_pm_set_gem_config(pm_info[1], GEM_CONFIG_SGMII_MODE, 1);
5194 			if (ret)
5195 				goto err_out_phy_exit;
5196 		}
5197 
5198 	}
5199 
5200 	/* Fully reset controller at hardware level if mapped in device tree */
5201 	ret = device_reset_optional(&pdev->dev);
5202 	if (ret) {
5203 		phy_exit(bp->phy);
5204 		return dev_err_probe(&pdev->dev, ret, "failed to reset controller");
5205 	}
5206 
5207 	ret = macb_init(pdev);
5208 
5209 err_out_phy_exit:
5210 	if (ret)
5211 		phy_exit(bp->phy);
5212 
5213 	return ret;
5214 }
5215 
5216 static int eyeq5_init(struct platform_device *pdev)
5217 {
5218 	struct net_device *netdev = platform_get_drvdata(pdev);
5219 	struct macb *bp = netdev_priv(netdev);
5220 	struct device *dev = &pdev->dev;
5221 	int ret;
5222 
5223 	bp->phy = devm_phy_get(dev, NULL);
5224 	if (IS_ERR(bp->phy))
5225 		return dev_err_probe(dev, PTR_ERR(bp->phy),
5226 				     "failed to get PHY\n");
5227 
5228 	ret = phy_init(bp->phy);
5229 	if (ret)
5230 		return dev_err_probe(dev, ret, "failed to init PHY\n");
5231 
5232 	ret = macb_init(pdev);
5233 	if (ret)
5234 		phy_exit(bp->phy);
5235 	return ret;
5236 }
5237 
5238 static const struct macb_usrio_config sama7g5_usrio = {
5239 	.mii = 0,
5240 	.rmii = 1,
5241 	.rgmii = 2,
5242 	.refclk = BIT(2),
5243 	.hdfctlen = BIT(6),
5244 };
5245 
5246 static const struct macb_config fu540_c000_config = {
5247 	.caps = MACB_CAPS_GIGABIT_MODE_AVAILABLE | MACB_CAPS_JUMBO |
5248 		MACB_CAPS_GEM_HAS_PTP,
5249 	.dma_burst_length = 16,
5250 	.clk_init = fu540_c000_clk_init,
5251 	.init = fu540_c000_init,
5252 	.jumbo_max_len = 10240,
5253 	.usrio = &macb_default_usrio,
5254 };
5255 
5256 static const struct macb_config at91sam9260_config = {
5257 	.caps = MACB_CAPS_USRIO_HAS_CLKEN | MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII,
5258 	.clk_init = macb_clk_init,
5259 	.init = macb_init,
5260 	.usrio = &macb_default_usrio,
5261 };
5262 
5263 static const struct macb_config sama5d3macb_config = {
5264 	.caps = MACB_CAPS_SG_DISABLED |
5265 		MACB_CAPS_USRIO_HAS_CLKEN | MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII,
5266 	.clk_init = macb_clk_init,
5267 	.init = macb_init,
5268 	.usrio = &macb_default_usrio,
5269 };
5270 
5271 static const struct macb_config pc302gem_config = {
5272 	.caps = MACB_CAPS_SG_DISABLED | MACB_CAPS_GIGABIT_MODE_AVAILABLE,
5273 	.dma_burst_length = 16,
5274 	.clk_init = macb_clk_init,
5275 	.init = macb_init,
5276 	.usrio = &macb_default_usrio,
5277 };
5278 
5279 static const struct macb_config sama5d2_config = {
5280 	.caps = MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII | MACB_CAPS_JUMBO,
5281 	.dma_burst_length = 16,
5282 	.clk_init = macb_clk_init,
5283 	.init = macb_init,
5284 	.jumbo_max_len = 10240,
5285 	.usrio = &macb_default_usrio,
5286 };
5287 
5288 static const struct macb_config sama5d29_config = {
5289 	.caps = MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII | MACB_CAPS_GEM_HAS_PTP,
5290 	.dma_burst_length = 16,
5291 	.clk_init = macb_clk_init,
5292 	.init = macb_init,
5293 	.usrio = &macb_default_usrio,
5294 };
5295 
5296 static const struct macb_config sama5d3_config = {
5297 	.caps = MACB_CAPS_SG_DISABLED | MACB_CAPS_GIGABIT_MODE_AVAILABLE |
5298 		MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII | MACB_CAPS_JUMBO,
5299 	.dma_burst_length = 16,
5300 	.clk_init = macb_clk_init,
5301 	.init = macb_init,
5302 	.jumbo_max_len = 10240,
5303 	.usrio = &macb_default_usrio,
5304 };
5305 
5306 static const struct macb_config sama5d4_config = {
5307 	.caps = MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII,
5308 	.dma_burst_length = 4,
5309 	.clk_init = macb_clk_init,
5310 	.init = macb_init,
5311 	.usrio = &macb_default_usrio,
5312 };
5313 
5314 static const struct macb_config emac_config = {
5315 	.caps = MACB_CAPS_NEEDS_RSTONUBR | MACB_CAPS_MACB_IS_EMAC,
5316 	.clk_init = at91ether_clk_init,
5317 	.init = at91ether_init,
5318 	.usrio = &macb_default_usrio,
5319 };
5320 
5321 static const struct macb_config np4_config = {
5322 	.caps = MACB_CAPS_USRIO_DISABLED,
5323 	.clk_init = macb_clk_init,
5324 	.init = macb_init,
5325 	.usrio = &macb_default_usrio,
5326 };
5327 
5328 static const struct macb_config zynqmp_config = {
5329 	.caps = MACB_CAPS_GIGABIT_MODE_AVAILABLE |
5330 		MACB_CAPS_JUMBO |
5331 		MACB_CAPS_GEM_HAS_PTP | MACB_CAPS_BD_RD_PREFETCH,
5332 	.dma_burst_length = 16,
5333 	.clk_init = macb_clk_init,
5334 	.init = init_reset_optional,
5335 	.jumbo_max_len = 10240,
5336 	.usrio = &macb_default_usrio,
5337 };
5338 
5339 static const struct macb_config zynq_config = {
5340 	.caps = MACB_CAPS_GIGABIT_MODE_AVAILABLE | MACB_CAPS_NO_GIGABIT_HALF |
5341 		MACB_CAPS_NEEDS_RSTONUBR,
5342 	.dma_burst_length = 16,
5343 	.clk_init = macb_clk_init,
5344 	.init = macb_init,
5345 	.usrio = &macb_default_usrio,
5346 };
5347 
5348 static const struct macb_config mpfs_config = {
5349 	.caps = MACB_CAPS_GIGABIT_MODE_AVAILABLE |
5350 		MACB_CAPS_JUMBO |
5351 		MACB_CAPS_GEM_HAS_PTP,
5352 	.dma_burst_length = 16,
5353 	.clk_init = macb_clk_init,
5354 	.init = init_reset_optional,
5355 	.usrio = &macb_default_usrio,
5356 	.max_tx_length = 4040, /* Cadence Erratum 1686 */
5357 	.jumbo_max_len = 4040,
5358 };
5359 
5360 static const struct macb_config sama7g5_gem_config = {
5361 	.caps = MACB_CAPS_GIGABIT_MODE_AVAILABLE | MACB_CAPS_CLK_HW_CHG |
5362 		MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII |
5363 		MACB_CAPS_MIIONRGMII | MACB_CAPS_GEM_HAS_PTP,
5364 	.dma_burst_length = 16,
5365 	.clk_init = macb_clk_init,
5366 	.init = macb_init,
5367 	.usrio = &sama7g5_usrio,
5368 };
5369 
5370 static const struct macb_config sama7g5_emac_config = {
5371 	.caps = MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII |
5372 		MACB_CAPS_USRIO_HAS_CLKEN | MACB_CAPS_MIIONRGMII |
5373 		MACB_CAPS_GEM_HAS_PTP,
5374 	.dma_burst_length = 16,
5375 	.clk_init = macb_clk_init,
5376 	.init = macb_init,
5377 	.usrio = &sama7g5_usrio,
5378 };
5379 
5380 static const struct macb_config versal_config = {
5381 	.caps = MACB_CAPS_GIGABIT_MODE_AVAILABLE | MACB_CAPS_JUMBO |
5382 		MACB_CAPS_GEM_HAS_PTP | MACB_CAPS_BD_RD_PREFETCH |
5383 		MACB_CAPS_NEED_TSUCLK | MACB_CAPS_QUEUE_DISABLE |
5384 		MACB_CAPS_QBV,
5385 	.dma_burst_length = 16,
5386 	.clk_init = macb_clk_init,
5387 	.init = init_reset_optional,
5388 	.jumbo_max_len = 10240,
5389 	.usrio = &macb_default_usrio,
5390 };
5391 
5392 static const struct macb_config eyeq5_config = {
5393 	.caps = MACB_CAPS_GIGABIT_MODE_AVAILABLE | MACB_CAPS_JUMBO |
5394 		MACB_CAPS_GEM_HAS_PTP | MACB_CAPS_QUEUE_DISABLE |
5395 		MACB_CAPS_NO_LSO,
5396 	.dma_burst_length = 16,
5397 	.clk_init = macb_clk_init,
5398 	.init = eyeq5_init,
5399 	.jumbo_max_len = 10240,
5400 	.usrio = &macb_default_usrio,
5401 };
5402 
5403 static const struct macb_config raspberrypi_rp1_config = {
5404 	.caps = MACB_CAPS_GIGABIT_MODE_AVAILABLE | MACB_CAPS_CLK_HW_CHG |
5405 		MACB_CAPS_JUMBO |
5406 		MACB_CAPS_GEM_HAS_PTP,
5407 	.dma_burst_length = 16,
5408 	.clk_init = macb_clk_init,
5409 	.init = macb_init,
5410 	.usrio = &macb_default_usrio,
5411 	.jumbo_max_len = 10240,
5412 };
5413 
5414 static const struct of_device_id macb_dt_ids[] = {
5415 	{ .compatible = "cdns,at91sam9260-macb", .data = &at91sam9260_config },
5416 	{ .compatible = "cdns,macb" },
5417 	{ .compatible = "cdns,np4-macb", .data = &np4_config },
5418 	{ .compatible = "cdns,pc302-gem", .data = &pc302gem_config },
5419 	{ .compatible = "cdns,gem", .data = &pc302gem_config },
5420 	{ .compatible = "cdns,sam9x60-macb", .data = &at91sam9260_config },
5421 	{ .compatible = "atmel,sama5d2-gem", .data = &sama5d2_config },
5422 	{ .compatible = "atmel,sama5d29-gem", .data = &sama5d29_config },
5423 	{ .compatible = "atmel,sama5d3-gem", .data = &sama5d3_config },
5424 	{ .compatible = "atmel,sama5d3-macb", .data = &sama5d3macb_config },
5425 	{ .compatible = "atmel,sama5d4-gem", .data = &sama5d4_config },
5426 	{ .compatible = "cdns,at91rm9200-emac", .data = &emac_config },
5427 	{ .compatible = "cdns,emac", .data = &emac_config },
5428 	{ .compatible = "cdns,zynqmp-gem", .data = &zynqmp_config}, /* deprecated */
5429 	{ .compatible = "cdns,zynq-gem", .data = &zynq_config }, /* deprecated */
5430 	{ .compatible = "sifive,fu540-c000-gem", .data = &fu540_c000_config },
5431 	{ .compatible = "microchip,mpfs-macb", .data = &mpfs_config },
5432 	{ .compatible = "microchip,sama7g5-gem", .data = &sama7g5_gem_config },
5433 	{ .compatible = "microchip,sama7g5-emac", .data = &sama7g5_emac_config },
5434 	{ .compatible = "mobileye,eyeq5-gem", .data = &eyeq5_config },
5435 	{ .compatible = "raspberrypi,rp1-gem", .data = &raspberrypi_rp1_config },
5436 	{ .compatible = "xlnx,zynqmp-gem", .data = &zynqmp_config},
5437 	{ .compatible = "xlnx,zynq-gem", .data = &zynq_config },
5438 	{ .compatible = "xlnx,versal-gem", .data = &versal_config},
5439 	{ /* sentinel */ }
5440 };
5441 MODULE_DEVICE_TABLE(of, macb_dt_ids);
5442 #endif /* CONFIG_OF */
5443 
5444 static const struct macb_config default_gem_config = {
5445 	.caps = MACB_CAPS_GIGABIT_MODE_AVAILABLE |
5446 		MACB_CAPS_JUMBO |
5447 		MACB_CAPS_GEM_HAS_PTP,
5448 	.dma_burst_length = 16,
5449 	.clk_init = macb_clk_init,
5450 	.init = macb_init,
5451 	.usrio = &macb_default_usrio,
5452 	.jumbo_max_len = 10240,
5453 };
5454 
5455 static int macb_probe(struct platform_device *pdev)
5456 {
5457 	struct clk *pclk, *hclk = NULL, *tx_clk = NULL, *rx_clk = NULL;
5458 	struct device_node *np = pdev->dev.of_node;
5459 	const struct macb_config *macb_config;
5460 	struct clk *tsu_clk = NULL;
5461 	phy_interface_t interface;
5462 	struct net_device *dev;
5463 	struct resource *regs;
5464 	u32 wtrmrk_rst_val;
5465 	void __iomem *mem;
5466 	struct macb *bp;
5467 	int num_queues;
5468 	bool native_io;
5469 	int err, val;
5470 
5471 	mem = devm_platform_get_and_ioremap_resource(pdev, 0, &regs);
5472 	if (IS_ERR(mem))
5473 		return PTR_ERR(mem);
5474 
5475 	macb_config = of_device_get_match_data(&pdev->dev);
5476 	if (!macb_config)
5477 		macb_config = &default_gem_config;
5478 
5479 	err = macb_config->clk_init(pdev, &pclk, &hclk, &tx_clk, &rx_clk, &tsu_clk);
5480 	if (err)
5481 		return err;
5482 
5483 	pm_runtime_set_autosuspend_delay(&pdev->dev, MACB_PM_TIMEOUT);
5484 	pm_runtime_use_autosuspend(&pdev->dev);
5485 	pm_runtime_get_noresume(&pdev->dev);
5486 	pm_runtime_set_active(&pdev->dev);
5487 	pm_runtime_enable(&pdev->dev);
5488 	native_io = hw_is_native_io(mem);
5489 
5490 	num_queues = macb_probe_queues(&pdev->dev, mem, native_io);
5491 	if (num_queues < 0) {
5492 		err = num_queues;
5493 		goto err_disable_clocks;
5494 	}
5495 
5496 	dev = alloc_etherdev_mq(sizeof(*bp), num_queues);
5497 	if (!dev) {
5498 		err = -ENOMEM;
5499 		goto err_disable_clocks;
5500 	}
5501 
5502 	dev->base_addr = regs->start;
5503 
5504 	SET_NETDEV_DEV(dev, &pdev->dev);
5505 
5506 	bp = netdev_priv(dev);
5507 	bp->pdev = pdev;
5508 	bp->dev = dev;
5509 	bp->regs = mem;
5510 	bp->native_io = native_io;
5511 	if (native_io) {
5512 		bp->macb_reg_readl = hw_readl_native;
5513 		bp->macb_reg_writel = hw_writel_native;
5514 	} else {
5515 		bp->macb_reg_readl = hw_readl;
5516 		bp->macb_reg_writel = hw_writel;
5517 	}
5518 	bp->num_queues = num_queues;
5519 	bp->dma_burst_length = macb_config->dma_burst_length;
5520 	bp->pclk = pclk;
5521 	bp->hclk = hclk;
5522 	bp->tx_clk = tx_clk;
5523 	bp->rx_clk = rx_clk;
5524 	bp->tsu_clk = tsu_clk;
5525 	bp->jumbo_max_len = macb_config->jumbo_max_len;
5526 
5527 	if (!hw_is_gem(bp->regs, bp->native_io))
5528 		bp->max_tx_length = MACB_MAX_TX_LEN;
5529 	else if (macb_config->max_tx_length)
5530 		bp->max_tx_length = macb_config->max_tx_length;
5531 	else
5532 		bp->max_tx_length = GEM_MAX_TX_LEN;
5533 
5534 	bp->wol = 0;
5535 	device_set_wakeup_capable(&pdev->dev, 1);
5536 
5537 	bp->usrio = macb_config->usrio;
5538 
5539 	/* By default we set to partial store and forward mode for zynqmp.
5540 	 * Disable if not set in devicetree.
5541 	 */
5542 	if (GEM_BFEXT(PBUF_CUTTHRU, gem_readl(bp, DCFG6))) {
5543 		err = of_property_read_u32(bp->pdev->dev.of_node,
5544 					   "cdns,rx-watermark",
5545 					   &bp->rx_watermark);
5546 
5547 		if (!err) {
5548 			/* Disable partial store and forward in case of error or
5549 			 * invalid watermark value
5550 			 */
5551 			wtrmrk_rst_val = (1 << (GEM_BFEXT(RX_PBUF_ADDR, gem_readl(bp, DCFG2)))) - 1;
5552 			if (bp->rx_watermark > wtrmrk_rst_val || !bp->rx_watermark) {
5553 				dev_info(&bp->pdev->dev, "Invalid watermark value\n");
5554 				bp->rx_watermark = 0;
5555 			}
5556 		}
5557 	}
5558 	spin_lock_init(&bp->lock);
5559 	spin_lock_init(&bp->stats_lock);
5560 
5561 	/* setup capabilities */
5562 	macb_configure_caps(bp, macb_config);
5563 
5564 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
5565 	if (GEM_BFEXT(DAW64, gem_readl(bp, DCFG6))) {
5566 		err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(44));
5567 		if (err) {
5568 			dev_err(&pdev->dev, "failed to set DMA mask\n");
5569 			goto err_out_free_netdev;
5570 		}
5571 		bp->caps |= MACB_CAPS_DMA_64B;
5572 	}
5573 #endif
5574 	platform_set_drvdata(pdev, dev);
5575 
5576 	dev->irq = platform_get_irq(pdev, 0);
5577 	if (dev->irq < 0) {
5578 		err = dev->irq;
5579 		goto err_out_free_netdev;
5580 	}
5581 
5582 	/* MTU range: 68 - 1518 or 10240 */
5583 	dev->min_mtu = GEM_MTU_MIN_SIZE;
5584 	if ((bp->caps & MACB_CAPS_JUMBO) && bp->jumbo_max_len)
5585 		dev->max_mtu = bp->jumbo_max_len - ETH_HLEN - ETH_FCS_LEN;
5586 	else
5587 		dev->max_mtu = 1536 - ETH_HLEN - ETH_FCS_LEN;
5588 
5589 	if (bp->caps & MACB_CAPS_BD_RD_PREFETCH) {
5590 		val = GEM_BFEXT(RXBD_RDBUFF, gem_readl(bp, DCFG10));
5591 		if (val)
5592 			bp->rx_bd_rd_prefetch = (2 << (val - 1)) *
5593 						macb_dma_desc_get_size(bp);
5594 
5595 		val = GEM_BFEXT(TXBD_RDBUFF, gem_readl(bp, DCFG10));
5596 		if (val)
5597 			bp->tx_bd_rd_prefetch = (2 << (val - 1)) *
5598 						macb_dma_desc_get_size(bp);
5599 	}
5600 
5601 	bp->rx_intr_mask = MACB_RX_INT_FLAGS;
5602 	if (bp->caps & MACB_CAPS_NEEDS_RSTONUBR)
5603 		bp->rx_intr_mask |= MACB_BIT(RXUBR);
5604 
5605 	err = of_get_ethdev_address(np, bp->dev);
5606 	if (err == -EPROBE_DEFER)
5607 		goto err_out_free_netdev;
5608 	else if (err)
5609 		macb_get_hwaddr(bp);
5610 
5611 	err = of_get_phy_mode(np, &interface);
5612 	if (err)
5613 		/* not found in DT, MII by default */
5614 		bp->phy_interface = PHY_INTERFACE_MODE_MII;
5615 	else
5616 		bp->phy_interface = interface;
5617 
5618 	/* IP specific init */
5619 	err = macb_config->init(pdev);
5620 	if (err)
5621 		goto err_out_free_netdev;
5622 
5623 	err = macb_mii_init(bp);
5624 	if (err)
5625 		goto err_out_phy_exit;
5626 
5627 	netif_carrier_off(dev);
5628 
5629 	err = register_netdev(dev);
5630 	if (err) {
5631 		dev_err(&pdev->dev, "Cannot register net device, aborting.\n");
5632 		goto err_out_unregister_mdio;
5633 	}
5634 
5635 	INIT_WORK(&bp->hresp_err_bh_work, macb_hresp_error_task);
5636 
5637 	netdev_info(dev, "Cadence %s rev 0x%08x at 0x%08lx irq %d (%pM)\n",
5638 		    macb_is_gem(bp) ? "GEM" : "MACB", macb_readl(bp, MID),
5639 		    dev->base_addr, dev->irq, dev->dev_addr);
5640 
5641 	pm_runtime_put_autosuspend(&bp->pdev->dev);
5642 
5643 	return 0;
5644 
5645 err_out_unregister_mdio:
5646 	mdiobus_unregister(bp->mii_bus);
5647 	mdiobus_free(bp->mii_bus);
5648 
5649 err_out_phy_exit:
5650 	phy_exit(bp->phy);
5651 
5652 err_out_free_netdev:
5653 	free_netdev(dev);
5654 
5655 err_disable_clocks:
5656 	macb_clks_disable(pclk, hclk, tx_clk, rx_clk, tsu_clk);
5657 	pm_runtime_disable(&pdev->dev);
5658 	pm_runtime_set_suspended(&pdev->dev);
5659 	pm_runtime_dont_use_autosuspend(&pdev->dev);
5660 
5661 	return err;
5662 }
5663 
5664 static void macb_remove(struct platform_device *pdev)
5665 {
5666 	struct net_device *dev;
5667 	struct macb *bp;
5668 
5669 	dev = platform_get_drvdata(pdev);
5670 
5671 	if (dev) {
5672 		bp = netdev_priv(dev);
5673 		unregister_netdev(dev);
5674 		phy_exit(bp->phy);
5675 		mdiobus_unregister(bp->mii_bus);
5676 		mdiobus_free(bp->mii_bus);
5677 
5678 		device_set_wakeup_enable(&bp->pdev->dev, 0);
5679 		cancel_work_sync(&bp->hresp_err_bh_work);
5680 		pm_runtime_disable(&pdev->dev);
5681 		pm_runtime_dont_use_autosuspend(&pdev->dev);
5682 		pm_runtime_set_suspended(&pdev->dev);
5683 		phylink_destroy(bp->phylink);
5684 		free_netdev(dev);
5685 	}
5686 }
5687 
5688 static int __maybe_unused macb_suspend(struct device *dev)
5689 {
5690 	struct net_device *netdev = dev_get_drvdata(dev);
5691 	struct macb *bp = netdev_priv(netdev);
5692 	struct in_ifaddr *ifa = NULL;
5693 	struct macb_queue *queue;
5694 	struct in_device *idev;
5695 	unsigned long flags;
5696 	unsigned int q;
5697 	int err;
5698 	u32 tmp;
5699 
5700 	if (!device_may_wakeup(&bp->dev->dev))
5701 		phy_exit(bp->phy);
5702 
5703 	if (!netif_running(netdev))
5704 		return 0;
5705 
5706 	if (bp->wol & MACB_WOL_ENABLED) {
5707 		/* Check for IP address in WOL ARP mode */
5708 		idev = __in_dev_get_rcu(bp->dev);
5709 		if (idev)
5710 			ifa = rcu_dereference(idev->ifa_list);
5711 		if ((bp->wolopts & WAKE_ARP) && !ifa) {
5712 			netdev_err(netdev, "IP address not assigned as required by WoL walk ARP\n");
5713 			return -EOPNOTSUPP;
5714 		}
5715 		spin_lock_irqsave(&bp->lock, flags);
5716 
5717 		/* Disable Tx and Rx engines before  disabling the queues,
5718 		 * this is mandatory as per the IP spec sheet
5719 		 */
5720 		tmp = macb_readl(bp, NCR);
5721 		macb_writel(bp, NCR, tmp & ~(MACB_BIT(TE) | MACB_BIT(RE)));
5722 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
5723 		if (!(bp->caps & MACB_CAPS_QUEUE_DISABLE))
5724 			macb_writel(bp, RBQPH,
5725 				    upper_32_bits(bp->rx_ring_tieoff_dma));
5726 #endif
5727 		for (q = 0, queue = bp->queues; q < bp->num_queues;
5728 		     ++q, ++queue) {
5729 			/* Disable RX queues */
5730 			if (bp->caps & MACB_CAPS_QUEUE_DISABLE) {
5731 				queue_writel(queue, RBQP, MACB_BIT(QUEUE_DISABLE));
5732 			} else {
5733 				/* Tie off RX queues */
5734 				queue_writel(queue, RBQP,
5735 					     lower_32_bits(bp->rx_ring_tieoff_dma));
5736 			}
5737 			/* Disable all interrupts */
5738 			queue_writel(queue, IDR, -1);
5739 			queue_readl(queue, ISR);
5740 			if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
5741 				queue_writel(queue, ISR, -1);
5742 		}
5743 		/* Enable Receive engine */
5744 		macb_writel(bp, NCR, tmp | MACB_BIT(RE));
5745 		/* Flush all status bits */
5746 		macb_writel(bp, TSR, -1);
5747 		macb_writel(bp, RSR, -1);
5748 
5749 		tmp = (bp->wolopts & WAKE_MAGIC) ? MACB_BIT(MAG) : 0;
5750 		if (bp->wolopts & WAKE_ARP) {
5751 			tmp |= MACB_BIT(ARP);
5752 			/* write IP address into register */
5753 			tmp |= MACB_BFEXT(IP, be32_to_cpu(ifa->ifa_local));
5754 		}
5755 
5756 		/* Change interrupt handler and
5757 		 * Enable WoL IRQ on queue 0
5758 		 */
5759 		devm_free_irq(dev, bp->queues[0].irq, bp->queues);
5760 		if (macb_is_gem(bp)) {
5761 			err = devm_request_irq(dev, bp->queues[0].irq, gem_wol_interrupt,
5762 					       IRQF_SHARED, netdev->name, bp->queues);
5763 			if (err) {
5764 				dev_err(dev,
5765 					"Unable to request IRQ %d (error %d)\n",
5766 					bp->queues[0].irq, err);
5767 				spin_unlock_irqrestore(&bp->lock, flags);
5768 				return err;
5769 			}
5770 			queue_writel(bp->queues, IER, GEM_BIT(WOL));
5771 			gem_writel(bp, WOL, tmp);
5772 		} else {
5773 			err = devm_request_irq(dev, bp->queues[0].irq, macb_wol_interrupt,
5774 					       IRQF_SHARED, netdev->name, bp->queues);
5775 			if (err) {
5776 				dev_err(dev,
5777 					"Unable to request IRQ %d (error %d)\n",
5778 					bp->queues[0].irq, err);
5779 				spin_unlock_irqrestore(&bp->lock, flags);
5780 				return err;
5781 			}
5782 			queue_writel(bp->queues, IER, MACB_BIT(WOL));
5783 			macb_writel(bp, WOL, tmp);
5784 		}
5785 		spin_unlock_irqrestore(&bp->lock, flags);
5786 
5787 		enable_irq_wake(bp->queues[0].irq);
5788 	}
5789 
5790 	netif_device_detach(netdev);
5791 	for (q = 0, queue = bp->queues; q < bp->num_queues;
5792 	     ++q, ++queue) {
5793 		napi_disable(&queue->napi_rx);
5794 		napi_disable(&queue->napi_tx);
5795 	}
5796 
5797 	if (!(bp->wol & MACB_WOL_ENABLED)) {
5798 		rtnl_lock();
5799 		phylink_stop(bp->phylink);
5800 		rtnl_unlock();
5801 		spin_lock_irqsave(&bp->lock, flags);
5802 		macb_reset_hw(bp);
5803 		spin_unlock_irqrestore(&bp->lock, flags);
5804 	}
5805 
5806 	if (!(bp->caps & MACB_CAPS_USRIO_DISABLED))
5807 		bp->pm_data.usrio = macb_or_gem_readl(bp, USRIO);
5808 
5809 	if (netdev->hw_features & NETIF_F_NTUPLE)
5810 		bp->pm_data.scrt2 = gem_readl_n(bp, ETHT, SCRT2_ETHT);
5811 
5812 	if (bp->ptp_info)
5813 		bp->ptp_info->ptp_remove(netdev);
5814 	if (!device_may_wakeup(dev))
5815 		pm_runtime_force_suspend(dev);
5816 
5817 	return 0;
5818 }
5819 
5820 static int __maybe_unused macb_resume(struct device *dev)
5821 {
5822 	struct net_device *netdev = dev_get_drvdata(dev);
5823 	struct macb *bp = netdev_priv(netdev);
5824 	struct macb_queue *queue;
5825 	unsigned long flags;
5826 	unsigned int q;
5827 	int err;
5828 
5829 	if (!device_may_wakeup(&bp->dev->dev))
5830 		phy_init(bp->phy);
5831 
5832 	if (!netif_running(netdev))
5833 		return 0;
5834 
5835 	if (!device_may_wakeup(dev))
5836 		pm_runtime_force_resume(dev);
5837 
5838 	if (bp->wol & MACB_WOL_ENABLED) {
5839 		spin_lock_irqsave(&bp->lock, flags);
5840 		/* Disable WoL */
5841 		if (macb_is_gem(bp)) {
5842 			queue_writel(bp->queues, IDR, GEM_BIT(WOL));
5843 			gem_writel(bp, WOL, 0);
5844 		} else {
5845 			queue_writel(bp->queues, IDR, MACB_BIT(WOL));
5846 			macb_writel(bp, WOL, 0);
5847 		}
5848 		/* Clear ISR on queue 0 */
5849 		queue_readl(bp->queues, ISR);
5850 		if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
5851 			queue_writel(bp->queues, ISR, -1);
5852 		/* Replace interrupt handler on queue 0 */
5853 		devm_free_irq(dev, bp->queues[0].irq, bp->queues);
5854 		err = devm_request_irq(dev, bp->queues[0].irq, macb_interrupt,
5855 				       IRQF_SHARED, netdev->name, bp->queues);
5856 		if (err) {
5857 			dev_err(dev,
5858 				"Unable to request IRQ %d (error %d)\n",
5859 				bp->queues[0].irq, err);
5860 			spin_unlock_irqrestore(&bp->lock, flags);
5861 			return err;
5862 		}
5863 		spin_unlock_irqrestore(&bp->lock, flags);
5864 
5865 		disable_irq_wake(bp->queues[0].irq);
5866 
5867 		/* Now make sure we disable phy before moving
5868 		 * to common restore path
5869 		 */
5870 		rtnl_lock();
5871 		phylink_stop(bp->phylink);
5872 		rtnl_unlock();
5873 	}
5874 
5875 	for (q = 0, queue = bp->queues; q < bp->num_queues;
5876 	     ++q, ++queue) {
5877 		napi_enable(&queue->napi_rx);
5878 		napi_enable(&queue->napi_tx);
5879 	}
5880 
5881 	if (netdev->hw_features & NETIF_F_NTUPLE)
5882 		gem_writel_n(bp, ETHT, SCRT2_ETHT, bp->pm_data.scrt2);
5883 
5884 	if (!(bp->caps & MACB_CAPS_USRIO_DISABLED))
5885 		macb_or_gem_writel(bp, USRIO, bp->pm_data.usrio);
5886 
5887 	macb_writel(bp, NCR, MACB_BIT(MPE));
5888 	macb_init_hw(bp);
5889 	macb_set_rx_mode(netdev);
5890 	macb_restore_features(bp);
5891 	rtnl_lock();
5892 
5893 	phylink_start(bp->phylink);
5894 	rtnl_unlock();
5895 
5896 	netif_device_attach(netdev);
5897 	if (bp->ptp_info)
5898 		bp->ptp_info->ptp_init(netdev);
5899 
5900 	return 0;
5901 }
5902 
5903 static int __maybe_unused macb_runtime_suspend(struct device *dev)
5904 {
5905 	struct net_device *netdev = dev_get_drvdata(dev);
5906 	struct macb *bp = netdev_priv(netdev);
5907 
5908 	if (!(device_may_wakeup(dev)))
5909 		macb_clks_disable(bp->pclk, bp->hclk, bp->tx_clk, bp->rx_clk, bp->tsu_clk);
5910 	else if (!(bp->caps & MACB_CAPS_NEED_TSUCLK))
5911 		macb_clks_disable(NULL, NULL, NULL, NULL, bp->tsu_clk);
5912 
5913 	return 0;
5914 }
5915 
5916 static int __maybe_unused macb_runtime_resume(struct device *dev)
5917 {
5918 	struct net_device *netdev = dev_get_drvdata(dev);
5919 	struct macb *bp = netdev_priv(netdev);
5920 
5921 	if (!(device_may_wakeup(dev))) {
5922 		clk_prepare_enable(bp->pclk);
5923 		clk_prepare_enable(bp->hclk);
5924 		clk_prepare_enable(bp->tx_clk);
5925 		clk_prepare_enable(bp->rx_clk);
5926 		clk_prepare_enable(bp->tsu_clk);
5927 	} else if (!(bp->caps & MACB_CAPS_NEED_TSUCLK)) {
5928 		clk_prepare_enable(bp->tsu_clk);
5929 	}
5930 
5931 	return 0;
5932 }
5933 
5934 static void macb_shutdown(struct platform_device *pdev)
5935 {
5936 	struct net_device *netdev = platform_get_drvdata(pdev);
5937 
5938 	rtnl_lock();
5939 
5940 	if (netif_running(netdev))
5941 		dev_close(netdev);
5942 
5943 	netif_device_detach(netdev);
5944 
5945 	rtnl_unlock();
5946 }
5947 
5948 static const struct dev_pm_ops macb_pm_ops = {
5949 	SET_SYSTEM_SLEEP_PM_OPS(macb_suspend, macb_resume)
5950 	SET_RUNTIME_PM_OPS(macb_runtime_suspend, macb_runtime_resume, NULL)
5951 };
5952 
5953 static struct platform_driver macb_driver = {
5954 	.probe		= macb_probe,
5955 	.remove		= macb_remove,
5956 	.driver		= {
5957 		.name		= "macb",
5958 		.of_match_table	= of_match_ptr(macb_dt_ids),
5959 		.pm	= &macb_pm_ops,
5960 	},
5961 	.shutdown	= macb_shutdown,
5962 };
5963 
5964 module_platform_driver(macb_driver);
5965 
5966 MODULE_LICENSE("GPL");
5967 MODULE_DESCRIPTION("Cadence MACB/GEM Ethernet driver");
5968 MODULE_AUTHOR("Haavard Skinnemoen (Atmel)");
5969 MODULE_ALIAS("platform:macb");
5970