xref: /linux/drivers/net/ethernet/cadence/macb_main.c (revision a266ef69b890f099069cf51bb40572611c435a54)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Cadence MACB/GEM Ethernet Controller driver
4  *
5  * Copyright (C) 2004-2006 Atmel Corporation
6  */
7 
8 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
9 #include <linux/clk.h>
10 #include <linux/clk-provider.h>
11 #include <linux/crc32.h>
12 #include <linux/module.h>
13 #include <linux/moduleparam.h>
14 #include <linux/kernel.h>
15 #include <linux/types.h>
16 #include <linux/circ_buf.h>
17 #include <linux/slab.h>
18 #include <linux/init.h>
19 #include <linux/io.h>
20 #include <linux/gpio.h>
21 #include <linux/gpio/consumer.h>
22 #include <linux/interrupt.h>
23 #include <linux/netdevice.h>
24 #include <linux/etherdevice.h>
25 #include <linux/dma-mapping.h>
26 #include <linux/platform_device.h>
27 #include <linux/phylink.h>
28 #include <linux/of.h>
29 #include <linux/of_device.h>
30 #include <linux/of_gpio.h>
31 #include <linux/of_mdio.h>
32 #include <linux/of_net.h>
33 #include <linux/ip.h>
34 #include <linux/udp.h>
35 #include <linux/tcp.h>
36 #include <linux/iopoll.h>
37 #include <linux/phy/phy.h>
38 #include <linux/pm_runtime.h>
39 #include <linux/ptp_classify.h>
40 #include <linux/reset.h>
41 #include <linux/firmware/xlnx-zynqmp.h>
42 #include "macb.h"
43 
44 /* This structure is only used for MACB on SiFive FU540 devices */
45 struct sifive_fu540_macb_mgmt {
46 	void __iomem *reg;
47 	unsigned long rate;
48 	struct clk_hw hw;
49 };
50 
51 #define MACB_RX_BUFFER_SIZE	128
52 #define RX_BUFFER_MULTIPLE	64  /* bytes */
53 
54 #define DEFAULT_RX_RING_SIZE	512 /* must be power of 2 */
55 #define MIN_RX_RING_SIZE	64
56 #define MAX_RX_RING_SIZE	8192
57 #define RX_RING_BYTES(bp)	(macb_dma_desc_get_size(bp)	\
58 				 * (bp)->rx_ring_size)
59 
60 #define DEFAULT_TX_RING_SIZE	512 /* must be power of 2 */
61 #define MIN_TX_RING_SIZE	64
62 #define MAX_TX_RING_SIZE	4096
63 #define TX_RING_BYTES(bp)	(macb_dma_desc_get_size(bp)	\
64 				 * (bp)->tx_ring_size)
65 
66 /* level of occupied TX descriptors under which we wake up TX process */
67 #define MACB_TX_WAKEUP_THRESH(bp)	(3 * (bp)->tx_ring_size / 4)
68 
69 #define MACB_RX_INT_FLAGS	(MACB_BIT(RCOMP) | MACB_BIT(ISR_ROVR))
70 #define MACB_TX_ERR_FLAGS	(MACB_BIT(ISR_TUND)			\
71 					| MACB_BIT(ISR_RLE)		\
72 					| MACB_BIT(TXERR))
73 #define MACB_TX_INT_FLAGS	(MACB_TX_ERR_FLAGS | MACB_BIT(TCOMP)	\
74 					| MACB_BIT(TXUBR))
75 
76 /* Max length of transmit frame must be a multiple of 8 bytes */
77 #define MACB_TX_LEN_ALIGN	8
78 #define MACB_MAX_TX_LEN		((unsigned int)((1 << MACB_TX_FRMLEN_SIZE) - 1) & ~((unsigned int)(MACB_TX_LEN_ALIGN - 1)))
79 /* Limit maximum TX length as per Cadence TSO errata. This is to avoid a
80  * false amba_error in TX path from the DMA assuming there is not enough
81  * space in the SRAM (16KB) even when there is.
82  */
83 #define GEM_MAX_TX_LEN		(unsigned int)(0x3FC0)
84 
85 #define GEM_MTU_MIN_SIZE	ETH_MIN_MTU
86 #define MACB_NETIF_LSO		NETIF_F_TSO
87 
88 #define MACB_WOL_HAS_MAGIC_PACKET	(0x1 << 0)
89 #define MACB_WOL_ENABLED		(0x1 << 1)
90 
91 #define HS_SPEED_10000M			4
92 #define MACB_SERDES_RATE_10G		1
93 
94 /* Graceful stop timeouts in us. We should allow up to
95  * 1 frame time (10 Mbits/s, full-duplex, ignoring collisions)
96  */
97 #define MACB_HALT_TIMEOUT	1230
98 
99 #define MACB_PM_TIMEOUT  100 /* ms */
100 
101 #define MACB_MDIO_TIMEOUT	1000000 /* in usecs */
102 
103 /* DMA buffer descriptor might be different size
104  * depends on hardware configuration:
105  *
106  * 1. dma address width 32 bits:
107  *    word 1: 32 bit address of Data Buffer
108  *    word 2: control
109  *
110  * 2. dma address width 64 bits:
111  *    word 1: 32 bit address of Data Buffer
112  *    word 2: control
113  *    word 3: upper 32 bit address of Data Buffer
114  *    word 4: unused
115  *
116  * 3. dma address width 32 bits with hardware timestamping:
117  *    word 1: 32 bit address of Data Buffer
118  *    word 2: control
119  *    word 3: timestamp word 1
120  *    word 4: timestamp word 2
121  *
122  * 4. dma address width 64 bits with hardware timestamping:
123  *    word 1: 32 bit address of Data Buffer
124  *    word 2: control
125  *    word 3: upper 32 bit address of Data Buffer
126  *    word 4: unused
127  *    word 5: timestamp word 1
128  *    word 6: timestamp word 2
129  */
130 static unsigned int macb_dma_desc_get_size(struct macb *bp)
131 {
132 #ifdef MACB_EXT_DESC
133 	unsigned int desc_size;
134 
135 	switch (bp->hw_dma_cap) {
136 	case HW_DMA_CAP_64B:
137 		desc_size = sizeof(struct macb_dma_desc)
138 			+ sizeof(struct macb_dma_desc_64);
139 		break;
140 	case HW_DMA_CAP_PTP:
141 		desc_size = sizeof(struct macb_dma_desc)
142 			+ sizeof(struct macb_dma_desc_ptp);
143 		break;
144 	case HW_DMA_CAP_64B_PTP:
145 		desc_size = sizeof(struct macb_dma_desc)
146 			+ sizeof(struct macb_dma_desc_64)
147 			+ sizeof(struct macb_dma_desc_ptp);
148 		break;
149 	default:
150 		desc_size = sizeof(struct macb_dma_desc);
151 	}
152 	return desc_size;
153 #endif
154 	return sizeof(struct macb_dma_desc);
155 }
156 
157 static unsigned int macb_adj_dma_desc_idx(struct macb *bp, unsigned int desc_idx)
158 {
159 #ifdef MACB_EXT_DESC
160 	switch (bp->hw_dma_cap) {
161 	case HW_DMA_CAP_64B:
162 	case HW_DMA_CAP_PTP:
163 		desc_idx <<= 1;
164 		break;
165 	case HW_DMA_CAP_64B_PTP:
166 		desc_idx *= 3;
167 		break;
168 	default:
169 		break;
170 	}
171 #endif
172 	return desc_idx;
173 }
174 
175 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
176 static struct macb_dma_desc_64 *macb_64b_desc(struct macb *bp, struct macb_dma_desc *desc)
177 {
178 	return (struct macb_dma_desc_64 *)((void *)desc
179 		+ sizeof(struct macb_dma_desc));
180 }
181 #endif
182 
183 /* Ring buffer accessors */
184 static unsigned int macb_tx_ring_wrap(struct macb *bp, unsigned int index)
185 {
186 	return index & (bp->tx_ring_size - 1);
187 }
188 
189 static struct macb_dma_desc *macb_tx_desc(struct macb_queue *queue,
190 					  unsigned int index)
191 {
192 	index = macb_tx_ring_wrap(queue->bp, index);
193 	index = macb_adj_dma_desc_idx(queue->bp, index);
194 	return &queue->tx_ring[index];
195 }
196 
197 static struct macb_tx_skb *macb_tx_skb(struct macb_queue *queue,
198 				       unsigned int index)
199 {
200 	return &queue->tx_skb[macb_tx_ring_wrap(queue->bp, index)];
201 }
202 
203 static dma_addr_t macb_tx_dma(struct macb_queue *queue, unsigned int index)
204 {
205 	dma_addr_t offset;
206 
207 	offset = macb_tx_ring_wrap(queue->bp, index) *
208 			macb_dma_desc_get_size(queue->bp);
209 
210 	return queue->tx_ring_dma + offset;
211 }
212 
213 static unsigned int macb_rx_ring_wrap(struct macb *bp, unsigned int index)
214 {
215 	return index & (bp->rx_ring_size - 1);
216 }
217 
218 static struct macb_dma_desc *macb_rx_desc(struct macb_queue *queue, unsigned int index)
219 {
220 	index = macb_rx_ring_wrap(queue->bp, index);
221 	index = macb_adj_dma_desc_idx(queue->bp, index);
222 	return &queue->rx_ring[index];
223 }
224 
225 static void *macb_rx_buffer(struct macb_queue *queue, unsigned int index)
226 {
227 	return queue->rx_buffers + queue->bp->rx_buffer_size *
228 	       macb_rx_ring_wrap(queue->bp, index);
229 }
230 
231 /* I/O accessors */
232 static u32 hw_readl_native(struct macb *bp, int offset)
233 {
234 	return __raw_readl(bp->regs + offset);
235 }
236 
237 static void hw_writel_native(struct macb *bp, int offset, u32 value)
238 {
239 	__raw_writel(value, bp->regs + offset);
240 }
241 
242 static u32 hw_readl(struct macb *bp, int offset)
243 {
244 	return readl_relaxed(bp->regs + offset);
245 }
246 
247 static void hw_writel(struct macb *bp, int offset, u32 value)
248 {
249 	writel_relaxed(value, bp->regs + offset);
250 }
251 
252 /* Find the CPU endianness by using the loopback bit of NCR register. When the
253  * CPU is in big endian we need to program swapped mode for management
254  * descriptor access.
255  */
256 static bool hw_is_native_io(void __iomem *addr)
257 {
258 	u32 value = MACB_BIT(LLB);
259 
260 	__raw_writel(value, addr + MACB_NCR);
261 	value = __raw_readl(addr + MACB_NCR);
262 
263 	/* Write 0 back to disable everything */
264 	__raw_writel(0, addr + MACB_NCR);
265 
266 	return value == MACB_BIT(LLB);
267 }
268 
269 static bool hw_is_gem(void __iomem *addr, bool native_io)
270 {
271 	u32 id;
272 
273 	if (native_io)
274 		id = __raw_readl(addr + MACB_MID);
275 	else
276 		id = readl_relaxed(addr + MACB_MID);
277 
278 	return MACB_BFEXT(IDNUM, id) >= 0x2;
279 }
280 
281 static void macb_set_hwaddr(struct macb *bp)
282 {
283 	u32 bottom;
284 	u16 top;
285 
286 	bottom = cpu_to_le32(*((u32 *)bp->dev->dev_addr));
287 	macb_or_gem_writel(bp, SA1B, bottom);
288 	top = cpu_to_le16(*((u16 *)(bp->dev->dev_addr + 4)));
289 	macb_or_gem_writel(bp, SA1T, top);
290 
291 	/* Clear unused address register sets */
292 	macb_or_gem_writel(bp, SA2B, 0);
293 	macb_or_gem_writel(bp, SA2T, 0);
294 	macb_or_gem_writel(bp, SA3B, 0);
295 	macb_or_gem_writel(bp, SA3T, 0);
296 	macb_or_gem_writel(bp, SA4B, 0);
297 	macb_or_gem_writel(bp, SA4T, 0);
298 }
299 
300 static void macb_get_hwaddr(struct macb *bp)
301 {
302 	u32 bottom;
303 	u16 top;
304 	u8 addr[6];
305 	int i;
306 
307 	/* Check all 4 address register for valid address */
308 	for (i = 0; i < 4; i++) {
309 		bottom = macb_or_gem_readl(bp, SA1B + i * 8);
310 		top = macb_or_gem_readl(bp, SA1T + i * 8);
311 
312 		addr[0] = bottom & 0xff;
313 		addr[1] = (bottom >> 8) & 0xff;
314 		addr[2] = (bottom >> 16) & 0xff;
315 		addr[3] = (bottom >> 24) & 0xff;
316 		addr[4] = top & 0xff;
317 		addr[5] = (top >> 8) & 0xff;
318 
319 		if (is_valid_ether_addr(addr)) {
320 			eth_hw_addr_set(bp->dev, addr);
321 			return;
322 		}
323 	}
324 
325 	dev_info(&bp->pdev->dev, "invalid hw address, using random\n");
326 	eth_hw_addr_random(bp->dev);
327 }
328 
329 static int macb_mdio_wait_for_idle(struct macb *bp)
330 {
331 	u32 val;
332 
333 	return readx_poll_timeout(MACB_READ_NSR, bp, val, val & MACB_BIT(IDLE),
334 				  1, MACB_MDIO_TIMEOUT);
335 }
336 
337 static int macb_mdio_read_c22(struct mii_bus *bus, int mii_id, int regnum)
338 {
339 	struct macb *bp = bus->priv;
340 	int status;
341 
342 	status = pm_runtime_resume_and_get(&bp->pdev->dev);
343 	if (status < 0)
344 		goto mdio_pm_exit;
345 
346 	status = macb_mdio_wait_for_idle(bp);
347 	if (status < 0)
348 		goto mdio_read_exit;
349 
350 	macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_C22_SOF)
351 			      | MACB_BF(RW, MACB_MAN_C22_READ)
352 			      | MACB_BF(PHYA, mii_id)
353 			      | MACB_BF(REGA, regnum)
354 			      | MACB_BF(CODE, MACB_MAN_C22_CODE)));
355 
356 	status = macb_mdio_wait_for_idle(bp);
357 	if (status < 0)
358 		goto mdio_read_exit;
359 
360 	status = MACB_BFEXT(DATA, macb_readl(bp, MAN));
361 
362 mdio_read_exit:
363 	pm_runtime_mark_last_busy(&bp->pdev->dev);
364 	pm_runtime_put_autosuspend(&bp->pdev->dev);
365 mdio_pm_exit:
366 	return status;
367 }
368 
369 static int macb_mdio_read_c45(struct mii_bus *bus, int mii_id, int devad,
370 			      int regnum)
371 {
372 	struct macb *bp = bus->priv;
373 	int status;
374 
375 	status = pm_runtime_get_sync(&bp->pdev->dev);
376 	if (status < 0) {
377 		pm_runtime_put_noidle(&bp->pdev->dev);
378 		goto mdio_pm_exit;
379 	}
380 
381 	status = macb_mdio_wait_for_idle(bp);
382 	if (status < 0)
383 		goto mdio_read_exit;
384 
385 	macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_C45_SOF)
386 			      | MACB_BF(RW, MACB_MAN_C45_ADDR)
387 			      | MACB_BF(PHYA, mii_id)
388 			      | MACB_BF(REGA, devad & 0x1F)
389 			      | MACB_BF(DATA, regnum & 0xFFFF)
390 			      | MACB_BF(CODE, MACB_MAN_C45_CODE)));
391 
392 	status = macb_mdio_wait_for_idle(bp);
393 	if (status < 0)
394 		goto mdio_read_exit;
395 
396 	macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_C45_SOF)
397 			      | MACB_BF(RW, MACB_MAN_C45_READ)
398 			      | MACB_BF(PHYA, mii_id)
399 			      | MACB_BF(REGA, devad & 0x1F)
400 			      | MACB_BF(CODE, MACB_MAN_C45_CODE)));
401 
402 	status = macb_mdio_wait_for_idle(bp);
403 	if (status < 0)
404 		goto mdio_read_exit;
405 
406 	status = MACB_BFEXT(DATA, macb_readl(bp, MAN));
407 
408 mdio_read_exit:
409 	pm_runtime_mark_last_busy(&bp->pdev->dev);
410 	pm_runtime_put_autosuspend(&bp->pdev->dev);
411 mdio_pm_exit:
412 	return status;
413 }
414 
415 static int macb_mdio_write_c22(struct mii_bus *bus, int mii_id, int regnum,
416 			       u16 value)
417 {
418 	struct macb *bp = bus->priv;
419 	int status;
420 
421 	status = pm_runtime_resume_and_get(&bp->pdev->dev);
422 	if (status < 0)
423 		goto mdio_pm_exit;
424 
425 	status = macb_mdio_wait_for_idle(bp);
426 	if (status < 0)
427 		goto mdio_write_exit;
428 
429 	macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_C22_SOF)
430 			      | MACB_BF(RW, MACB_MAN_C22_WRITE)
431 			      | MACB_BF(PHYA, mii_id)
432 			      | MACB_BF(REGA, regnum)
433 			      | MACB_BF(CODE, MACB_MAN_C22_CODE)
434 			      | MACB_BF(DATA, value)));
435 
436 	status = macb_mdio_wait_for_idle(bp);
437 	if (status < 0)
438 		goto mdio_write_exit;
439 
440 mdio_write_exit:
441 	pm_runtime_mark_last_busy(&bp->pdev->dev);
442 	pm_runtime_put_autosuspend(&bp->pdev->dev);
443 mdio_pm_exit:
444 	return status;
445 }
446 
447 static int macb_mdio_write_c45(struct mii_bus *bus, int mii_id,
448 			       int devad, int regnum,
449 			       u16 value)
450 {
451 	struct macb *bp = bus->priv;
452 	int status;
453 
454 	status = pm_runtime_get_sync(&bp->pdev->dev);
455 	if (status < 0) {
456 		pm_runtime_put_noidle(&bp->pdev->dev);
457 		goto mdio_pm_exit;
458 	}
459 
460 	status = macb_mdio_wait_for_idle(bp);
461 	if (status < 0)
462 		goto mdio_write_exit;
463 
464 	macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_C45_SOF)
465 			      | MACB_BF(RW, MACB_MAN_C45_ADDR)
466 			      | MACB_BF(PHYA, mii_id)
467 			      | MACB_BF(REGA, devad & 0x1F)
468 			      | MACB_BF(DATA, regnum & 0xFFFF)
469 			      | MACB_BF(CODE, MACB_MAN_C45_CODE)));
470 
471 	status = macb_mdio_wait_for_idle(bp);
472 	if (status < 0)
473 		goto mdio_write_exit;
474 
475 	macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_C45_SOF)
476 			      | MACB_BF(RW, MACB_MAN_C45_WRITE)
477 			      | MACB_BF(PHYA, mii_id)
478 			      | MACB_BF(REGA, devad & 0x1F)
479 			      | MACB_BF(CODE, MACB_MAN_C45_CODE)
480 			      | MACB_BF(DATA, value)));
481 
482 	status = macb_mdio_wait_for_idle(bp);
483 	if (status < 0)
484 		goto mdio_write_exit;
485 
486 mdio_write_exit:
487 	pm_runtime_mark_last_busy(&bp->pdev->dev);
488 	pm_runtime_put_autosuspend(&bp->pdev->dev);
489 mdio_pm_exit:
490 	return status;
491 }
492 
493 static void macb_init_buffers(struct macb *bp)
494 {
495 	struct macb_queue *queue;
496 	unsigned int q;
497 
498 	for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
499 		queue_writel(queue, RBQP, lower_32_bits(queue->rx_ring_dma));
500 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
501 		if (bp->hw_dma_cap & HW_DMA_CAP_64B)
502 			queue_writel(queue, RBQPH,
503 				     upper_32_bits(queue->rx_ring_dma));
504 #endif
505 		queue_writel(queue, TBQP, lower_32_bits(queue->tx_ring_dma));
506 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
507 		if (bp->hw_dma_cap & HW_DMA_CAP_64B)
508 			queue_writel(queue, TBQPH,
509 				     upper_32_bits(queue->tx_ring_dma));
510 #endif
511 	}
512 }
513 
514 /**
515  * macb_set_tx_clk() - Set a clock to a new frequency
516  * @bp:		pointer to struct macb
517  * @speed:	New frequency in Hz
518  */
519 static void macb_set_tx_clk(struct macb *bp, int speed)
520 {
521 	long ferr, rate, rate_rounded;
522 
523 	if (!bp->tx_clk || (bp->caps & MACB_CAPS_CLK_HW_CHG))
524 		return;
525 
526 	/* In case of MII the PHY is the clock master */
527 	if (bp->phy_interface == PHY_INTERFACE_MODE_MII)
528 		return;
529 
530 	switch (speed) {
531 	case SPEED_10:
532 		rate = 2500000;
533 		break;
534 	case SPEED_100:
535 		rate = 25000000;
536 		break;
537 	case SPEED_1000:
538 		rate = 125000000;
539 		break;
540 	default:
541 		return;
542 	}
543 
544 	rate_rounded = clk_round_rate(bp->tx_clk, rate);
545 	if (rate_rounded < 0)
546 		return;
547 
548 	/* RGMII allows 50 ppm frequency error. Test and warn if this limit
549 	 * is not satisfied.
550 	 */
551 	ferr = abs(rate_rounded - rate);
552 	ferr = DIV_ROUND_UP(ferr, rate / 100000);
553 	if (ferr > 5)
554 		netdev_warn(bp->dev,
555 			    "unable to generate target frequency: %ld Hz\n",
556 			    rate);
557 
558 	if (clk_set_rate(bp->tx_clk, rate_rounded))
559 		netdev_err(bp->dev, "adjusting tx_clk failed.\n");
560 }
561 
562 static void macb_usx_pcs_link_up(struct phylink_pcs *pcs, unsigned int mode,
563 				 phy_interface_t interface, int speed,
564 				 int duplex)
565 {
566 	struct macb *bp = container_of(pcs, struct macb, phylink_usx_pcs);
567 	u32 config;
568 
569 	config = gem_readl(bp, USX_CONTROL);
570 	config = GEM_BFINS(SERDES_RATE, MACB_SERDES_RATE_10G, config);
571 	config = GEM_BFINS(USX_CTRL_SPEED, HS_SPEED_10000M, config);
572 	config &= ~(GEM_BIT(TX_SCR_BYPASS) | GEM_BIT(RX_SCR_BYPASS));
573 	config |= GEM_BIT(TX_EN);
574 	gem_writel(bp, USX_CONTROL, config);
575 }
576 
577 static void macb_usx_pcs_get_state(struct phylink_pcs *pcs,
578 				   struct phylink_link_state *state)
579 {
580 	struct macb *bp = container_of(pcs, struct macb, phylink_usx_pcs);
581 	u32 val;
582 
583 	state->speed = SPEED_10000;
584 	state->duplex = 1;
585 	state->an_complete = 1;
586 
587 	val = gem_readl(bp, USX_STATUS);
588 	state->link = !!(val & GEM_BIT(USX_BLOCK_LOCK));
589 	val = gem_readl(bp, NCFGR);
590 	if (val & GEM_BIT(PAE))
591 		state->pause = MLO_PAUSE_RX;
592 }
593 
594 static int macb_usx_pcs_config(struct phylink_pcs *pcs,
595 			       unsigned int mode,
596 			       phy_interface_t interface,
597 			       const unsigned long *advertising,
598 			       bool permit_pause_to_mac)
599 {
600 	struct macb *bp = container_of(pcs, struct macb, phylink_usx_pcs);
601 
602 	gem_writel(bp, USX_CONTROL, gem_readl(bp, USX_CONTROL) |
603 		   GEM_BIT(SIGNAL_OK));
604 
605 	return 0;
606 }
607 
608 static void macb_pcs_get_state(struct phylink_pcs *pcs,
609 			       struct phylink_link_state *state)
610 {
611 	state->link = 0;
612 }
613 
614 static void macb_pcs_an_restart(struct phylink_pcs *pcs)
615 {
616 	/* Not supported */
617 }
618 
619 static int macb_pcs_config(struct phylink_pcs *pcs,
620 			   unsigned int mode,
621 			   phy_interface_t interface,
622 			   const unsigned long *advertising,
623 			   bool permit_pause_to_mac)
624 {
625 	return 0;
626 }
627 
628 static const struct phylink_pcs_ops macb_phylink_usx_pcs_ops = {
629 	.pcs_get_state = macb_usx_pcs_get_state,
630 	.pcs_config = macb_usx_pcs_config,
631 	.pcs_link_up = macb_usx_pcs_link_up,
632 };
633 
634 static const struct phylink_pcs_ops macb_phylink_pcs_ops = {
635 	.pcs_get_state = macb_pcs_get_state,
636 	.pcs_an_restart = macb_pcs_an_restart,
637 	.pcs_config = macb_pcs_config,
638 };
639 
640 static void macb_mac_config(struct phylink_config *config, unsigned int mode,
641 			    const struct phylink_link_state *state)
642 {
643 	struct net_device *ndev = to_net_dev(config->dev);
644 	struct macb *bp = netdev_priv(ndev);
645 	unsigned long flags;
646 	u32 old_ctrl, ctrl;
647 	u32 old_ncr, ncr;
648 
649 	spin_lock_irqsave(&bp->lock, flags);
650 
651 	old_ctrl = ctrl = macb_or_gem_readl(bp, NCFGR);
652 	old_ncr = ncr = macb_or_gem_readl(bp, NCR);
653 
654 	if (bp->caps & MACB_CAPS_MACB_IS_EMAC) {
655 		if (state->interface == PHY_INTERFACE_MODE_RMII)
656 			ctrl |= MACB_BIT(RM9200_RMII);
657 	} else if (macb_is_gem(bp)) {
658 		ctrl &= ~(GEM_BIT(SGMIIEN) | GEM_BIT(PCSSEL));
659 		ncr &= ~GEM_BIT(ENABLE_HS_MAC);
660 
661 		if (state->interface == PHY_INTERFACE_MODE_SGMII) {
662 			ctrl |= GEM_BIT(SGMIIEN) | GEM_BIT(PCSSEL);
663 		} else if (state->interface == PHY_INTERFACE_MODE_10GBASER) {
664 			ctrl |= GEM_BIT(PCSSEL);
665 			ncr |= GEM_BIT(ENABLE_HS_MAC);
666 		} else if (bp->caps & MACB_CAPS_MIIONRGMII &&
667 			   bp->phy_interface == PHY_INTERFACE_MODE_MII) {
668 			ncr |= MACB_BIT(MIIONRGMII);
669 		}
670 	}
671 
672 	/* Apply the new configuration, if any */
673 	if (old_ctrl ^ ctrl)
674 		macb_or_gem_writel(bp, NCFGR, ctrl);
675 
676 	if (old_ncr ^ ncr)
677 		macb_or_gem_writel(bp, NCR, ncr);
678 
679 	/* Disable AN for SGMII fixed link configuration, enable otherwise.
680 	 * Must be written after PCSSEL is set in NCFGR,
681 	 * otherwise writes will not take effect.
682 	 */
683 	if (macb_is_gem(bp) && state->interface == PHY_INTERFACE_MODE_SGMII) {
684 		u32 pcsctrl, old_pcsctrl;
685 
686 		old_pcsctrl = gem_readl(bp, PCSCNTRL);
687 		if (mode == MLO_AN_FIXED)
688 			pcsctrl = old_pcsctrl & ~GEM_BIT(PCSAUTONEG);
689 		else
690 			pcsctrl = old_pcsctrl | GEM_BIT(PCSAUTONEG);
691 		if (old_pcsctrl != pcsctrl)
692 			gem_writel(bp, PCSCNTRL, pcsctrl);
693 	}
694 
695 	spin_unlock_irqrestore(&bp->lock, flags);
696 }
697 
698 static void macb_mac_link_down(struct phylink_config *config, unsigned int mode,
699 			       phy_interface_t interface)
700 {
701 	struct net_device *ndev = to_net_dev(config->dev);
702 	struct macb *bp = netdev_priv(ndev);
703 	struct macb_queue *queue;
704 	unsigned int q;
705 	u32 ctrl;
706 
707 	if (!(bp->caps & MACB_CAPS_MACB_IS_EMAC))
708 		for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue)
709 			queue_writel(queue, IDR,
710 				     bp->rx_intr_mask | MACB_TX_INT_FLAGS | MACB_BIT(HRESP));
711 
712 	/* Disable Rx and Tx */
713 	ctrl = macb_readl(bp, NCR) & ~(MACB_BIT(RE) | MACB_BIT(TE));
714 	macb_writel(bp, NCR, ctrl);
715 
716 	netif_tx_stop_all_queues(ndev);
717 }
718 
719 static void macb_mac_link_up(struct phylink_config *config,
720 			     struct phy_device *phy,
721 			     unsigned int mode, phy_interface_t interface,
722 			     int speed, int duplex,
723 			     bool tx_pause, bool rx_pause)
724 {
725 	struct net_device *ndev = to_net_dev(config->dev);
726 	struct macb *bp = netdev_priv(ndev);
727 	struct macb_queue *queue;
728 	unsigned long flags;
729 	unsigned int q;
730 	u32 ctrl;
731 
732 	spin_lock_irqsave(&bp->lock, flags);
733 
734 	ctrl = macb_or_gem_readl(bp, NCFGR);
735 
736 	ctrl &= ~(MACB_BIT(SPD) | MACB_BIT(FD));
737 
738 	if (speed == SPEED_100)
739 		ctrl |= MACB_BIT(SPD);
740 
741 	if (duplex)
742 		ctrl |= MACB_BIT(FD);
743 
744 	if (!(bp->caps & MACB_CAPS_MACB_IS_EMAC)) {
745 		ctrl &= ~MACB_BIT(PAE);
746 		if (macb_is_gem(bp)) {
747 			ctrl &= ~GEM_BIT(GBE);
748 
749 			if (speed == SPEED_1000)
750 				ctrl |= GEM_BIT(GBE);
751 		}
752 
753 		if (rx_pause)
754 			ctrl |= MACB_BIT(PAE);
755 
756 		macb_set_tx_clk(bp, speed);
757 
758 		/* Initialize rings & buffers as clearing MACB_BIT(TE) in link down
759 		 * cleared the pipeline and control registers.
760 		 */
761 		bp->macbgem_ops.mog_init_rings(bp);
762 		macb_init_buffers(bp);
763 
764 		for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue)
765 			queue_writel(queue, IER,
766 				     bp->rx_intr_mask | MACB_TX_INT_FLAGS | MACB_BIT(HRESP));
767 	}
768 
769 	macb_or_gem_writel(bp, NCFGR, ctrl);
770 
771 	if (bp->phy_interface == PHY_INTERFACE_MODE_10GBASER)
772 		gem_writel(bp, HS_MAC_CONFIG, GEM_BFINS(HS_MAC_SPEED, HS_SPEED_10000M,
773 							gem_readl(bp, HS_MAC_CONFIG)));
774 
775 	spin_unlock_irqrestore(&bp->lock, flags);
776 
777 	/* Enable Rx and Tx */
778 	macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(RE) | MACB_BIT(TE));
779 
780 	netif_tx_wake_all_queues(ndev);
781 }
782 
783 static struct phylink_pcs *macb_mac_select_pcs(struct phylink_config *config,
784 					       phy_interface_t interface)
785 {
786 	struct net_device *ndev = to_net_dev(config->dev);
787 	struct macb *bp = netdev_priv(ndev);
788 
789 	if (interface == PHY_INTERFACE_MODE_10GBASER)
790 		return &bp->phylink_usx_pcs;
791 	else if (interface == PHY_INTERFACE_MODE_SGMII)
792 		return &bp->phylink_sgmii_pcs;
793 	else
794 		return NULL;
795 }
796 
797 static const struct phylink_mac_ops macb_phylink_ops = {
798 	.mac_select_pcs = macb_mac_select_pcs,
799 	.mac_config = macb_mac_config,
800 	.mac_link_down = macb_mac_link_down,
801 	.mac_link_up = macb_mac_link_up,
802 };
803 
804 static bool macb_phy_handle_exists(struct device_node *dn)
805 {
806 	dn = of_parse_phandle(dn, "phy-handle", 0);
807 	of_node_put(dn);
808 	return dn != NULL;
809 }
810 
811 static int macb_phylink_connect(struct macb *bp)
812 {
813 	struct device_node *dn = bp->pdev->dev.of_node;
814 	struct net_device *dev = bp->dev;
815 	struct phy_device *phydev;
816 	int ret;
817 
818 	if (dn)
819 		ret = phylink_of_phy_connect(bp->phylink, dn, 0);
820 
821 	if (!dn || (ret && !macb_phy_handle_exists(dn))) {
822 		phydev = phy_find_first(bp->mii_bus);
823 		if (!phydev) {
824 			netdev_err(dev, "no PHY found\n");
825 			return -ENXIO;
826 		}
827 
828 		/* attach the mac to the phy */
829 		ret = phylink_connect_phy(bp->phylink, phydev);
830 	}
831 
832 	if (ret) {
833 		netdev_err(dev, "Could not attach PHY (%d)\n", ret);
834 		return ret;
835 	}
836 
837 	phylink_start(bp->phylink);
838 
839 	return 0;
840 }
841 
842 static void macb_get_pcs_fixed_state(struct phylink_config *config,
843 				     struct phylink_link_state *state)
844 {
845 	struct net_device *ndev = to_net_dev(config->dev);
846 	struct macb *bp = netdev_priv(ndev);
847 
848 	state->link = (macb_readl(bp, NSR) & MACB_BIT(NSR_LINK)) != 0;
849 }
850 
851 /* based on au1000_eth. c*/
852 static int macb_mii_probe(struct net_device *dev)
853 {
854 	struct macb *bp = netdev_priv(dev);
855 
856 	bp->phylink_sgmii_pcs.ops = &macb_phylink_pcs_ops;
857 	bp->phylink_usx_pcs.ops = &macb_phylink_usx_pcs_ops;
858 
859 	bp->phylink_config.dev = &dev->dev;
860 	bp->phylink_config.type = PHYLINK_NETDEV;
861 	bp->phylink_config.mac_managed_pm = true;
862 
863 	if (bp->phy_interface == PHY_INTERFACE_MODE_SGMII) {
864 		bp->phylink_config.poll_fixed_state = true;
865 		bp->phylink_config.get_fixed_state = macb_get_pcs_fixed_state;
866 	}
867 
868 	bp->phylink_config.mac_capabilities = MAC_ASYM_PAUSE |
869 		MAC_10 | MAC_100;
870 
871 	__set_bit(PHY_INTERFACE_MODE_MII,
872 		  bp->phylink_config.supported_interfaces);
873 	__set_bit(PHY_INTERFACE_MODE_RMII,
874 		  bp->phylink_config.supported_interfaces);
875 
876 	/* Determine what modes are supported */
877 	if (macb_is_gem(bp) && (bp->caps & MACB_CAPS_GIGABIT_MODE_AVAILABLE)) {
878 		bp->phylink_config.mac_capabilities |= MAC_1000FD;
879 		if (!(bp->caps & MACB_CAPS_NO_GIGABIT_HALF))
880 			bp->phylink_config.mac_capabilities |= MAC_1000HD;
881 
882 		__set_bit(PHY_INTERFACE_MODE_GMII,
883 			  bp->phylink_config.supported_interfaces);
884 		phy_interface_set_rgmii(bp->phylink_config.supported_interfaces);
885 
886 		if (bp->caps & MACB_CAPS_PCS)
887 			__set_bit(PHY_INTERFACE_MODE_SGMII,
888 				  bp->phylink_config.supported_interfaces);
889 
890 		if (bp->caps & MACB_CAPS_HIGH_SPEED) {
891 			__set_bit(PHY_INTERFACE_MODE_10GBASER,
892 				  bp->phylink_config.supported_interfaces);
893 			bp->phylink_config.mac_capabilities |= MAC_10000FD;
894 		}
895 	}
896 
897 	bp->phylink = phylink_create(&bp->phylink_config, bp->pdev->dev.fwnode,
898 				     bp->phy_interface, &macb_phylink_ops);
899 	if (IS_ERR(bp->phylink)) {
900 		netdev_err(dev, "Could not create a phylink instance (%ld)\n",
901 			   PTR_ERR(bp->phylink));
902 		return PTR_ERR(bp->phylink);
903 	}
904 
905 	return 0;
906 }
907 
908 static int macb_mdiobus_register(struct macb *bp)
909 {
910 	struct device_node *child, *np = bp->pdev->dev.of_node;
911 
912 	/* If we have a child named mdio, probe it instead of looking for PHYs
913 	 * directly under the MAC node
914 	 */
915 	child = of_get_child_by_name(np, "mdio");
916 	if (child) {
917 		int ret = of_mdiobus_register(bp->mii_bus, child);
918 
919 		of_node_put(child);
920 		return ret;
921 	}
922 
923 	if (of_phy_is_fixed_link(np))
924 		return mdiobus_register(bp->mii_bus);
925 
926 	/* Only create the PHY from the device tree if at least one PHY is
927 	 * described. Otherwise scan the entire MDIO bus. We do this to support
928 	 * old device tree that did not follow the best practices and did not
929 	 * describe their network PHYs.
930 	 */
931 	for_each_available_child_of_node(np, child)
932 		if (of_mdiobus_child_is_phy(child)) {
933 			/* The loop increments the child refcount,
934 			 * decrement it before returning.
935 			 */
936 			of_node_put(child);
937 
938 			return of_mdiobus_register(bp->mii_bus, np);
939 		}
940 
941 	return mdiobus_register(bp->mii_bus);
942 }
943 
944 static int macb_mii_init(struct macb *bp)
945 {
946 	int err = -ENXIO;
947 
948 	/* Enable management port */
949 	macb_writel(bp, NCR, MACB_BIT(MPE));
950 
951 	bp->mii_bus = mdiobus_alloc();
952 	if (!bp->mii_bus) {
953 		err = -ENOMEM;
954 		goto err_out;
955 	}
956 
957 	bp->mii_bus->name = "MACB_mii_bus";
958 	bp->mii_bus->read = &macb_mdio_read_c22;
959 	bp->mii_bus->write = &macb_mdio_write_c22;
960 	bp->mii_bus->read_c45 = &macb_mdio_read_c45;
961 	bp->mii_bus->write_c45 = &macb_mdio_write_c45;
962 	snprintf(bp->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x",
963 		 bp->pdev->name, bp->pdev->id);
964 	bp->mii_bus->priv = bp;
965 	bp->mii_bus->parent = &bp->pdev->dev;
966 
967 	dev_set_drvdata(&bp->dev->dev, bp->mii_bus);
968 
969 	err = macb_mdiobus_register(bp);
970 	if (err)
971 		goto err_out_free_mdiobus;
972 
973 	err = macb_mii_probe(bp->dev);
974 	if (err)
975 		goto err_out_unregister_bus;
976 
977 	return 0;
978 
979 err_out_unregister_bus:
980 	mdiobus_unregister(bp->mii_bus);
981 err_out_free_mdiobus:
982 	mdiobus_free(bp->mii_bus);
983 err_out:
984 	return err;
985 }
986 
987 static void macb_update_stats(struct macb *bp)
988 {
989 	u32 *p = &bp->hw_stats.macb.rx_pause_frames;
990 	u32 *end = &bp->hw_stats.macb.tx_pause_frames + 1;
991 	int offset = MACB_PFR;
992 
993 	WARN_ON((unsigned long)(end - p - 1) != (MACB_TPF - MACB_PFR) / 4);
994 
995 	for (; p < end; p++, offset += 4)
996 		*p += bp->macb_reg_readl(bp, offset);
997 }
998 
999 static int macb_halt_tx(struct macb *bp)
1000 {
1001 	unsigned long	halt_time, timeout;
1002 	u32		status;
1003 
1004 	macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(THALT));
1005 
1006 	timeout = jiffies + usecs_to_jiffies(MACB_HALT_TIMEOUT);
1007 	do {
1008 		halt_time = jiffies;
1009 		status = macb_readl(bp, TSR);
1010 		if (!(status & MACB_BIT(TGO)))
1011 			return 0;
1012 
1013 		udelay(250);
1014 	} while (time_before(halt_time, timeout));
1015 
1016 	return -ETIMEDOUT;
1017 }
1018 
1019 static void macb_tx_unmap(struct macb *bp, struct macb_tx_skb *tx_skb, int budget)
1020 {
1021 	if (tx_skb->mapping) {
1022 		if (tx_skb->mapped_as_page)
1023 			dma_unmap_page(&bp->pdev->dev, tx_skb->mapping,
1024 				       tx_skb->size, DMA_TO_DEVICE);
1025 		else
1026 			dma_unmap_single(&bp->pdev->dev, tx_skb->mapping,
1027 					 tx_skb->size, DMA_TO_DEVICE);
1028 		tx_skb->mapping = 0;
1029 	}
1030 
1031 	if (tx_skb->skb) {
1032 		napi_consume_skb(tx_skb->skb, budget);
1033 		tx_skb->skb = NULL;
1034 	}
1035 }
1036 
1037 static void macb_set_addr(struct macb *bp, struct macb_dma_desc *desc, dma_addr_t addr)
1038 {
1039 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
1040 	struct macb_dma_desc_64 *desc_64;
1041 
1042 	if (bp->hw_dma_cap & HW_DMA_CAP_64B) {
1043 		desc_64 = macb_64b_desc(bp, desc);
1044 		desc_64->addrh = upper_32_bits(addr);
1045 		/* The low bits of RX address contain the RX_USED bit, clearing
1046 		 * of which allows packet RX. Make sure the high bits are also
1047 		 * visible to HW at that point.
1048 		 */
1049 		dma_wmb();
1050 	}
1051 #endif
1052 	desc->addr = lower_32_bits(addr);
1053 }
1054 
1055 static dma_addr_t macb_get_addr(struct macb *bp, struct macb_dma_desc *desc)
1056 {
1057 	dma_addr_t addr = 0;
1058 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
1059 	struct macb_dma_desc_64 *desc_64;
1060 
1061 	if (bp->hw_dma_cap & HW_DMA_CAP_64B) {
1062 		desc_64 = macb_64b_desc(bp, desc);
1063 		addr = ((u64)(desc_64->addrh) << 32);
1064 	}
1065 #endif
1066 	addr |= MACB_BF(RX_WADDR, MACB_BFEXT(RX_WADDR, desc->addr));
1067 	return addr;
1068 }
1069 
1070 static void macb_tx_error_task(struct work_struct *work)
1071 {
1072 	struct macb_queue	*queue = container_of(work, struct macb_queue,
1073 						      tx_error_task);
1074 	struct macb		*bp = queue->bp;
1075 	struct macb_tx_skb	*tx_skb;
1076 	struct macb_dma_desc	*desc;
1077 	struct sk_buff		*skb;
1078 	unsigned int		tail;
1079 	unsigned long		flags;
1080 
1081 	netdev_vdbg(bp->dev, "macb_tx_error_task: q = %u, t = %u, h = %u\n",
1082 		    (unsigned int)(queue - bp->queues),
1083 		    queue->tx_tail, queue->tx_head);
1084 
1085 	/* Prevent the queue NAPI TX poll from running, as it calls
1086 	 * macb_tx_complete(), which in turn may call netif_wake_subqueue().
1087 	 * As explained below, we have to halt the transmission before updating
1088 	 * TBQP registers so we call netif_tx_stop_all_queues() to notify the
1089 	 * network engine about the macb/gem being halted.
1090 	 */
1091 	napi_disable(&queue->napi_tx);
1092 	spin_lock_irqsave(&bp->lock, flags);
1093 
1094 	/* Make sure nobody is trying to queue up new packets */
1095 	netif_tx_stop_all_queues(bp->dev);
1096 
1097 	/* Stop transmission now
1098 	 * (in case we have just queued new packets)
1099 	 * macb/gem must be halted to write TBQP register
1100 	 */
1101 	if (macb_halt_tx(bp))
1102 		/* Just complain for now, reinitializing TX path can be good */
1103 		netdev_err(bp->dev, "BUG: halt tx timed out\n");
1104 
1105 	/* Treat frames in TX queue including the ones that caused the error.
1106 	 * Free transmit buffers in upper layer.
1107 	 */
1108 	for (tail = queue->tx_tail; tail != queue->tx_head; tail++) {
1109 		u32	ctrl;
1110 
1111 		desc = macb_tx_desc(queue, tail);
1112 		ctrl = desc->ctrl;
1113 		tx_skb = macb_tx_skb(queue, tail);
1114 		skb = tx_skb->skb;
1115 
1116 		if (ctrl & MACB_BIT(TX_USED)) {
1117 			/* skb is set for the last buffer of the frame */
1118 			while (!skb) {
1119 				macb_tx_unmap(bp, tx_skb, 0);
1120 				tail++;
1121 				tx_skb = macb_tx_skb(queue, tail);
1122 				skb = tx_skb->skb;
1123 			}
1124 
1125 			/* ctrl still refers to the first buffer descriptor
1126 			 * since it's the only one written back by the hardware
1127 			 */
1128 			if (!(ctrl & MACB_BIT(TX_BUF_EXHAUSTED))) {
1129 				netdev_vdbg(bp->dev, "txerr skb %u (data %p) TX complete\n",
1130 					    macb_tx_ring_wrap(bp, tail),
1131 					    skb->data);
1132 				bp->dev->stats.tx_packets++;
1133 				queue->stats.tx_packets++;
1134 				bp->dev->stats.tx_bytes += skb->len;
1135 				queue->stats.tx_bytes += skb->len;
1136 			}
1137 		} else {
1138 			/* "Buffers exhausted mid-frame" errors may only happen
1139 			 * if the driver is buggy, so complain loudly about
1140 			 * those. Statistics are updated by hardware.
1141 			 */
1142 			if (ctrl & MACB_BIT(TX_BUF_EXHAUSTED))
1143 				netdev_err(bp->dev,
1144 					   "BUG: TX buffers exhausted mid-frame\n");
1145 
1146 			desc->ctrl = ctrl | MACB_BIT(TX_USED);
1147 		}
1148 
1149 		macb_tx_unmap(bp, tx_skb, 0);
1150 	}
1151 
1152 	/* Set end of TX queue */
1153 	desc = macb_tx_desc(queue, 0);
1154 	macb_set_addr(bp, desc, 0);
1155 	desc->ctrl = MACB_BIT(TX_USED);
1156 
1157 	/* Make descriptor updates visible to hardware */
1158 	wmb();
1159 
1160 	/* Reinitialize the TX desc queue */
1161 	queue_writel(queue, TBQP, lower_32_bits(queue->tx_ring_dma));
1162 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
1163 	if (bp->hw_dma_cap & HW_DMA_CAP_64B)
1164 		queue_writel(queue, TBQPH, upper_32_bits(queue->tx_ring_dma));
1165 #endif
1166 	/* Make TX ring reflect state of hardware */
1167 	queue->tx_head = 0;
1168 	queue->tx_tail = 0;
1169 
1170 	/* Housework before enabling TX IRQ */
1171 	macb_writel(bp, TSR, macb_readl(bp, TSR));
1172 	queue_writel(queue, IER, MACB_TX_INT_FLAGS);
1173 
1174 	/* Now we are ready to start transmission again */
1175 	netif_tx_start_all_queues(bp->dev);
1176 	macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(TSTART));
1177 
1178 	spin_unlock_irqrestore(&bp->lock, flags);
1179 	napi_enable(&queue->napi_tx);
1180 }
1181 
1182 static bool ptp_one_step_sync(struct sk_buff *skb)
1183 {
1184 	struct ptp_header *hdr;
1185 	unsigned int ptp_class;
1186 	u8 msgtype;
1187 
1188 	/* No need to parse packet if PTP TS is not involved */
1189 	if (likely(!(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)))
1190 		goto not_oss;
1191 
1192 	/* Identify and return whether PTP one step sync is being processed */
1193 	ptp_class = ptp_classify_raw(skb);
1194 	if (ptp_class == PTP_CLASS_NONE)
1195 		goto not_oss;
1196 
1197 	hdr = ptp_parse_header(skb, ptp_class);
1198 	if (!hdr)
1199 		goto not_oss;
1200 
1201 	if (hdr->flag_field[0] & PTP_FLAG_TWOSTEP)
1202 		goto not_oss;
1203 
1204 	msgtype = ptp_get_msgtype(hdr, ptp_class);
1205 	if (msgtype == PTP_MSGTYPE_SYNC)
1206 		return true;
1207 
1208 not_oss:
1209 	return false;
1210 }
1211 
1212 static int macb_tx_complete(struct macb_queue *queue, int budget)
1213 {
1214 	struct macb *bp = queue->bp;
1215 	u16 queue_index = queue - bp->queues;
1216 	unsigned int tail;
1217 	unsigned int head;
1218 	int packets = 0;
1219 
1220 	spin_lock(&queue->tx_ptr_lock);
1221 	head = queue->tx_head;
1222 	for (tail = queue->tx_tail; tail != head && packets < budget; tail++) {
1223 		struct macb_tx_skb	*tx_skb;
1224 		struct sk_buff		*skb;
1225 		struct macb_dma_desc	*desc;
1226 		u32			ctrl;
1227 
1228 		desc = macb_tx_desc(queue, tail);
1229 
1230 		/* Make hw descriptor updates visible to CPU */
1231 		rmb();
1232 
1233 		ctrl = desc->ctrl;
1234 
1235 		/* TX_USED bit is only set by hardware on the very first buffer
1236 		 * descriptor of the transmitted frame.
1237 		 */
1238 		if (!(ctrl & MACB_BIT(TX_USED)))
1239 			break;
1240 
1241 		/* Process all buffers of the current transmitted frame */
1242 		for (;; tail++) {
1243 			tx_skb = macb_tx_skb(queue, tail);
1244 			skb = tx_skb->skb;
1245 
1246 			/* First, update TX stats if needed */
1247 			if (skb) {
1248 				if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
1249 				    !ptp_one_step_sync(skb))
1250 					gem_ptp_do_txstamp(bp, skb, desc);
1251 
1252 				netdev_vdbg(bp->dev, "skb %u (data %p) TX complete\n",
1253 					    macb_tx_ring_wrap(bp, tail),
1254 					    skb->data);
1255 				bp->dev->stats.tx_packets++;
1256 				queue->stats.tx_packets++;
1257 				bp->dev->stats.tx_bytes += skb->len;
1258 				queue->stats.tx_bytes += skb->len;
1259 				packets++;
1260 			}
1261 
1262 			/* Now we can safely release resources */
1263 			macb_tx_unmap(bp, tx_skb, budget);
1264 
1265 			/* skb is set only for the last buffer of the frame.
1266 			 * WARNING: at this point skb has been freed by
1267 			 * macb_tx_unmap().
1268 			 */
1269 			if (skb)
1270 				break;
1271 		}
1272 	}
1273 
1274 	queue->tx_tail = tail;
1275 	if (__netif_subqueue_stopped(bp->dev, queue_index) &&
1276 	    CIRC_CNT(queue->tx_head, queue->tx_tail,
1277 		     bp->tx_ring_size) <= MACB_TX_WAKEUP_THRESH(bp))
1278 		netif_wake_subqueue(bp->dev, queue_index);
1279 	spin_unlock(&queue->tx_ptr_lock);
1280 
1281 	return packets;
1282 }
1283 
1284 static void gem_rx_refill(struct macb_queue *queue)
1285 {
1286 	unsigned int		entry;
1287 	struct sk_buff		*skb;
1288 	dma_addr_t		paddr;
1289 	struct macb *bp = queue->bp;
1290 	struct macb_dma_desc *desc;
1291 
1292 	while (CIRC_SPACE(queue->rx_prepared_head, queue->rx_tail,
1293 			bp->rx_ring_size) > 0) {
1294 		entry = macb_rx_ring_wrap(bp, queue->rx_prepared_head);
1295 
1296 		/* Make hw descriptor updates visible to CPU */
1297 		rmb();
1298 
1299 		desc = macb_rx_desc(queue, entry);
1300 
1301 		if (!queue->rx_skbuff[entry]) {
1302 			/* allocate sk_buff for this free entry in ring */
1303 			skb = netdev_alloc_skb(bp->dev, bp->rx_buffer_size);
1304 			if (unlikely(!skb)) {
1305 				netdev_err(bp->dev,
1306 					   "Unable to allocate sk_buff\n");
1307 				break;
1308 			}
1309 
1310 			/* now fill corresponding descriptor entry */
1311 			paddr = dma_map_single(&bp->pdev->dev, skb->data,
1312 					       bp->rx_buffer_size,
1313 					       DMA_FROM_DEVICE);
1314 			if (dma_mapping_error(&bp->pdev->dev, paddr)) {
1315 				dev_kfree_skb(skb);
1316 				break;
1317 			}
1318 
1319 			queue->rx_skbuff[entry] = skb;
1320 
1321 			if (entry == bp->rx_ring_size - 1)
1322 				paddr |= MACB_BIT(RX_WRAP);
1323 			desc->ctrl = 0;
1324 			/* Setting addr clears RX_USED and allows reception,
1325 			 * make sure ctrl is cleared first to avoid a race.
1326 			 */
1327 			dma_wmb();
1328 			macb_set_addr(bp, desc, paddr);
1329 
1330 			/* properly align Ethernet header */
1331 			skb_reserve(skb, NET_IP_ALIGN);
1332 		} else {
1333 			desc->ctrl = 0;
1334 			dma_wmb();
1335 			desc->addr &= ~MACB_BIT(RX_USED);
1336 		}
1337 		queue->rx_prepared_head++;
1338 	}
1339 
1340 	/* Make descriptor updates visible to hardware */
1341 	wmb();
1342 
1343 	netdev_vdbg(bp->dev, "rx ring: queue: %p, prepared head %d, tail %d\n",
1344 			queue, queue->rx_prepared_head, queue->rx_tail);
1345 }
1346 
1347 /* Mark DMA descriptors from begin up to and not including end as unused */
1348 static void discard_partial_frame(struct macb_queue *queue, unsigned int begin,
1349 				  unsigned int end)
1350 {
1351 	unsigned int frag;
1352 
1353 	for (frag = begin; frag != end; frag++) {
1354 		struct macb_dma_desc *desc = macb_rx_desc(queue, frag);
1355 
1356 		desc->addr &= ~MACB_BIT(RX_USED);
1357 	}
1358 
1359 	/* Make descriptor updates visible to hardware */
1360 	wmb();
1361 
1362 	/* When this happens, the hardware stats registers for
1363 	 * whatever caused this is updated, so we don't have to record
1364 	 * anything.
1365 	 */
1366 }
1367 
1368 static int gem_rx(struct macb_queue *queue, struct napi_struct *napi,
1369 		  int budget)
1370 {
1371 	struct macb *bp = queue->bp;
1372 	unsigned int		len;
1373 	unsigned int		entry;
1374 	struct sk_buff		*skb;
1375 	struct macb_dma_desc	*desc;
1376 	int			count = 0;
1377 
1378 	while (count < budget) {
1379 		u32 ctrl;
1380 		dma_addr_t addr;
1381 		bool rxused;
1382 
1383 		entry = macb_rx_ring_wrap(bp, queue->rx_tail);
1384 		desc = macb_rx_desc(queue, entry);
1385 
1386 		/* Make hw descriptor updates visible to CPU */
1387 		rmb();
1388 
1389 		rxused = (desc->addr & MACB_BIT(RX_USED)) ? true : false;
1390 		addr = macb_get_addr(bp, desc);
1391 
1392 		if (!rxused)
1393 			break;
1394 
1395 		/* Ensure ctrl is at least as up-to-date as rxused */
1396 		dma_rmb();
1397 
1398 		ctrl = desc->ctrl;
1399 
1400 		queue->rx_tail++;
1401 		count++;
1402 
1403 		if (!(ctrl & MACB_BIT(RX_SOF) && ctrl & MACB_BIT(RX_EOF))) {
1404 			netdev_err(bp->dev,
1405 				   "not whole frame pointed by descriptor\n");
1406 			bp->dev->stats.rx_dropped++;
1407 			queue->stats.rx_dropped++;
1408 			break;
1409 		}
1410 		skb = queue->rx_skbuff[entry];
1411 		if (unlikely(!skb)) {
1412 			netdev_err(bp->dev,
1413 				   "inconsistent Rx descriptor chain\n");
1414 			bp->dev->stats.rx_dropped++;
1415 			queue->stats.rx_dropped++;
1416 			break;
1417 		}
1418 		/* now everything is ready for receiving packet */
1419 		queue->rx_skbuff[entry] = NULL;
1420 		len = ctrl & bp->rx_frm_len_mask;
1421 
1422 		netdev_vdbg(bp->dev, "gem_rx %u (len %u)\n", entry, len);
1423 
1424 		skb_put(skb, len);
1425 		dma_unmap_single(&bp->pdev->dev, addr,
1426 				 bp->rx_buffer_size, DMA_FROM_DEVICE);
1427 
1428 		skb->protocol = eth_type_trans(skb, bp->dev);
1429 		skb_checksum_none_assert(skb);
1430 		if (bp->dev->features & NETIF_F_RXCSUM &&
1431 		    !(bp->dev->flags & IFF_PROMISC) &&
1432 		    GEM_BFEXT(RX_CSUM, ctrl) & GEM_RX_CSUM_CHECKED_MASK)
1433 			skb->ip_summed = CHECKSUM_UNNECESSARY;
1434 
1435 		bp->dev->stats.rx_packets++;
1436 		queue->stats.rx_packets++;
1437 		bp->dev->stats.rx_bytes += skb->len;
1438 		queue->stats.rx_bytes += skb->len;
1439 
1440 		gem_ptp_do_rxstamp(bp, skb, desc);
1441 
1442 #if defined(DEBUG) && defined(VERBOSE_DEBUG)
1443 		netdev_vdbg(bp->dev, "received skb of length %u, csum: %08x\n",
1444 			    skb->len, skb->csum);
1445 		print_hex_dump(KERN_DEBUG, " mac: ", DUMP_PREFIX_ADDRESS, 16, 1,
1446 			       skb_mac_header(skb), 16, true);
1447 		print_hex_dump(KERN_DEBUG, "data: ", DUMP_PREFIX_ADDRESS, 16, 1,
1448 			       skb->data, 32, true);
1449 #endif
1450 
1451 		napi_gro_receive(napi, skb);
1452 	}
1453 
1454 	gem_rx_refill(queue);
1455 
1456 	return count;
1457 }
1458 
1459 static int macb_rx_frame(struct macb_queue *queue, struct napi_struct *napi,
1460 			 unsigned int first_frag, unsigned int last_frag)
1461 {
1462 	unsigned int len;
1463 	unsigned int frag;
1464 	unsigned int offset;
1465 	struct sk_buff *skb;
1466 	struct macb_dma_desc *desc;
1467 	struct macb *bp = queue->bp;
1468 
1469 	desc = macb_rx_desc(queue, last_frag);
1470 	len = desc->ctrl & bp->rx_frm_len_mask;
1471 
1472 	netdev_vdbg(bp->dev, "macb_rx_frame frags %u - %u (len %u)\n",
1473 		macb_rx_ring_wrap(bp, first_frag),
1474 		macb_rx_ring_wrap(bp, last_frag), len);
1475 
1476 	/* The ethernet header starts NET_IP_ALIGN bytes into the
1477 	 * first buffer. Since the header is 14 bytes, this makes the
1478 	 * payload word-aligned.
1479 	 *
1480 	 * Instead of calling skb_reserve(NET_IP_ALIGN), we just copy
1481 	 * the two padding bytes into the skb so that we avoid hitting
1482 	 * the slowpath in memcpy(), and pull them off afterwards.
1483 	 */
1484 	skb = netdev_alloc_skb(bp->dev, len + NET_IP_ALIGN);
1485 	if (!skb) {
1486 		bp->dev->stats.rx_dropped++;
1487 		for (frag = first_frag; ; frag++) {
1488 			desc = macb_rx_desc(queue, frag);
1489 			desc->addr &= ~MACB_BIT(RX_USED);
1490 			if (frag == last_frag)
1491 				break;
1492 		}
1493 
1494 		/* Make descriptor updates visible to hardware */
1495 		wmb();
1496 
1497 		return 1;
1498 	}
1499 
1500 	offset = 0;
1501 	len += NET_IP_ALIGN;
1502 	skb_checksum_none_assert(skb);
1503 	skb_put(skb, len);
1504 
1505 	for (frag = first_frag; ; frag++) {
1506 		unsigned int frag_len = bp->rx_buffer_size;
1507 
1508 		if (offset + frag_len > len) {
1509 			if (unlikely(frag != last_frag)) {
1510 				dev_kfree_skb_any(skb);
1511 				return -1;
1512 			}
1513 			frag_len = len - offset;
1514 		}
1515 		skb_copy_to_linear_data_offset(skb, offset,
1516 					       macb_rx_buffer(queue, frag),
1517 					       frag_len);
1518 		offset += bp->rx_buffer_size;
1519 		desc = macb_rx_desc(queue, frag);
1520 		desc->addr &= ~MACB_BIT(RX_USED);
1521 
1522 		if (frag == last_frag)
1523 			break;
1524 	}
1525 
1526 	/* Make descriptor updates visible to hardware */
1527 	wmb();
1528 
1529 	__skb_pull(skb, NET_IP_ALIGN);
1530 	skb->protocol = eth_type_trans(skb, bp->dev);
1531 
1532 	bp->dev->stats.rx_packets++;
1533 	bp->dev->stats.rx_bytes += skb->len;
1534 	netdev_vdbg(bp->dev, "received skb of length %u, csum: %08x\n",
1535 		    skb->len, skb->csum);
1536 	napi_gro_receive(napi, skb);
1537 
1538 	return 0;
1539 }
1540 
1541 static inline void macb_init_rx_ring(struct macb_queue *queue)
1542 {
1543 	struct macb *bp = queue->bp;
1544 	dma_addr_t addr;
1545 	struct macb_dma_desc *desc = NULL;
1546 	int i;
1547 
1548 	addr = queue->rx_buffers_dma;
1549 	for (i = 0; i < bp->rx_ring_size; i++) {
1550 		desc = macb_rx_desc(queue, i);
1551 		macb_set_addr(bp, desc, addr);
1552 		desc->ctrl = 0;
1553 		addr += bp->rx_buffer_size;
1554 	}
1555 	desc->addr |= MACB_BIT(RX_WRAP);
1556 	queue->rx_tail = 0;
1557 }
1558 
1559 static int macb_rx(struct macb_queue *queue, struct napi_struct *napi,
1560 		   int budget)
1561 {
1562 	struct macb *bp = queue->bp;
1563 	bool reset_rx_queue = false;
1564 	int received = 0;
1565 	unsigned int tail;
1566 	int first_frag = -1;
1567 
1568 	for (tail = queue->rx_tail; budget > 0; tail++) {
1569 		struct macb_dma_desc *desc = macb_rx_desc(queue, tail);
1570 		u32 ctrl;
1571 
1572 		/* Make hw descriptor updates visible to CPU */
1573 		rmb();
1574 
1575 		if (!(desc->addr & MACB_BIT(RX_USED)))
1576 			break;
1577 
1578 		/* Ensure ctrl is at least as up-to-date as addr */
1579 		dma_rmb();
1580 
1581 		ctrl = desc->ctrl;
1582 
1583 		if (ctrl & MACB_BIT(RX_SOF)) {
1584 			if (first_frag != -1)
1585 				discard_partial_frame(queue, first_frag, tail);
1586 			first_frag = tail;
1587 		}
1588 
1589 		if (ctrl & MACB_BIT(RX_EOF)) {
1590 			int dropped;
1591 
1592 			if (unlikely(first_frag == -1)) {
1593 				reset_rx_queue = true;
1594 				continue;
1595 			}
1596 
1597 			dropped = macb_rx_frame(queue, napi, first_frag, tail);
1598 			first_frag = -1;
1599 			if (unlikely(dropped < 0)) {
1600 				reset_rx_queue = true;
1601 				continue;
1602 			}
1603 			if (!dropped) {
1604 				received++;
1605 				budget--;
1606 			}
1607 		}
1608 	}
1609 
1610 	if (unlikely(reset_rx_queue)) {
1611 		unsigned long flags;
1612 		u32 ctrl;
1613 
1614 		netdev_err(bp->dev, "RX queue corruption: reset it\n");
1615 
1616 		spin_lock_irqsave(&bp->lock, flags);
1617 
1618 		ctrl = macb_readl(bp, NCR);
1619 		macb_writel(bp, NCR, ctrl & ~MACB_BIT(RE));
1620 
1621 		macb_init_rx_ring(queue);
1622 		queue_writel(queue, RBQP, queue->rx_ring_dma);
1623 
1624 		macb_writel(bp, NCR, ctrl | MACB_BIT(RE));
1625 
1626 		spin_unlock_irqrestore(&bp->lock, flags);
1627 		return received;
1628 	}
1629 
1630 	if (first_frag != -1)
1631 		queue->rx_tail = first_frag;
1632 	else
1633 		queue->rx_tail = tail;
1634 
1635 	return received;
1636 }
1637 
1638 static bool macb_rx_pending(struct macb_queue *queue)
1639 {
1640 	struct macb *bp = queue->bp;
1641 	unsigned int		entry;
1642 	struct macb_dma_desc	*desc;
1643 
1644 	entry = macb_rx_ring_wrap(bp, queue->rx_tail);
1645 	desc = macb_rx_desc(queue, entry);
1646 
1647 	/* Make hw descriptor updates visible to CPU */
1648 	rmb();
1649 
1650 	return (desc->addr & MACB_BIT(RX_USED)) != 0;
1651 }
1652 
1653 static int macb_rx_poll(struct napi_struct *napi, int budget)
1654 {
1655 	struct macb_queue *queue = container_of(napi, struct macb_queue, napi_rx);
1656 	struct macb *bp = queue->bp;
1657 	int work_done;
1658 
1659 	work_done = bp->macbgem_ops.mog_rx(queue, napi, budget);
1660 
1661 	netdev_vdbg(bp->dev, "RX poll: queue = %u, work_done = %d, budget = %d\n",
1662 		    (unsigned int)(queue - bp->queues), work_done, budget);
1663 
1664 	if (work_done < budget && napi_complete_done(napi, work_done)) {
1665 		queue_writel(queue, IER, bp->rx_intr_mask);
1666 
1667 		/* Packet completions only seem to propagate to raise
1668 		 * interrupts when interrupts are enabled at the time, so if
1669 		 * packets were received while interrupts were disabled,
1670 		 * they will not cause another interrupt to be generated when
1671 		 * interrupts are re-enabled.
1672 		 * Check for this case here to avoid losing a wakeup. This can
1673 		 * potentially race with the interrupt handler doing the same
1674 		 * actions if an interrupt is raised just after enabling them,
1675 		 * but this should be harmless.
1676 		 */
1677 		if (macb_rx_pending(queue)) {
1678 			queue_writel(queue, IDR, bp->rx_intr_mask);
1679 			if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
1680 				queue_writel(queue, ISR, MACB_BIT(RCOMP));
1681 			netdev_vdbg(bp->dev, "poll: packets pending, reschedule\n");
1682 			napi_schedule(napi);
1683 		}
1684 	}
1685 
1686 	/* TODO: Handle errors */
1687 
1688 	return work_done;
1689 }
1690 
1691 static void macb_tx_restart(struct macb_queue *queue)
1692 {
1693 	struct macb *bp = queue->bp;
1694 	unsigned int head_idx, tbqp;
1695 
1696 	spin_lock(&queue->tx_ptr_lock);
1697 
1698 	if (queue->tx_head == queue->tx_tail)
1699 		goto out_tx_ptr_unlock;
1700 
1701 	tbqp = queue_readl(queue, TBQP) / macb_dma_desc_get_size(bp);
1702 	tbqp = macb_adj_dma_desc_idx(bp, macb_tx_ring_wrap(bp, tbqp));
1703 	head_idx = macb_adj_dma_desc_idx(bp, macb_tx_ring_wrap(bp, queue->tx_head));
1704 
1705 	if (tbqp == head_idx)
1706 		goto out_tx_ptr_unlock;
1707 
1708 	spin_lock_irq(&bp->lock);
1709 	macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(TSTART));
1710 	spin_unlock_irq(&bp->lock);
1711 
1712 out_tx_ptr_unlock:
1713 	spin_unlock(&queue->tx_ptr_lock);
1714 }
1715 
1716 static bool macb_tx_complete_pending(struct macb_queue *queue)
1717 {
1718 	bool retval = false;
1719 
1720 	spin_lock(&queue->tx_ptr_lock);
1721 	if (queue->tx_head != queue->tx_tail) {
1722 		/* Make hw descriptor updates visible to CPU */
1723 		rmb();
1724 
1725 		if (macb_tx_desc(queue, queue->tx_tail)->ctrl & MACB_BIT(TX_USED))
1726 			retval = true;
1727 	}
1728 	spin_unlock(&queue->tx_ptr_lock);
1729 	return retval;
1730 }
1731 
1732 static int macb_tx_poll(struct napi_struct *napi, int budget)
1733 {
1734 	struct macb_queue *queue = container_of(napi, struct macb_queue, napi_tx);
1735 	struct macb *bp = queue->bp;
1736 	int work_done;
1737 
1738 	work_done = macb_tx_complete(queue, budget);
1739 
1740 	rmb(); // ensure txubr_pending is up to date
1741 	if (queue->txubr_pending) {
1742 		queue->txubr_pending = false;
1743 		netdev_vdbg(bp->dev, "poll: tx restart\n");
1744 		macb_tx_restart(queue);
1745 	}
1746 
1747 	netdev_vdbg(bp->dev, "TX poll: queue = %u, work_done = %d, budget = %d\n",
1748 		    (unsigned int)(queue - bp->queues), work_done, budget);
1749 
1750 	if (work_done < budget && napi_complete_done(napi, work_done)) {
1751 		queue_writel(queue, IER, MACB_BIT(TCOMP));
1752 
1753 		/* Packet completions only seem to propagate to raise
1754 		 * interrupts when interrupts are enabled at the time, so if
1755 		 * packets were sent while interrupts were disabled,
1756 		 * they will not cause another interrupt to be generated when
1757 		 * interrupts are re-enabled.
1758 		 * Check for this case here to avoid losing a wakeup. This can
1759 		 * potentially race with the interrupt handler doing the same
1760 		 * actions if an interrupt is raised just after enabling them,
1761 		 * but this should be harmless.
1762 		 */
1763 		if (macb_tx_complete_pending(queue)) {
1764 			queue_writel(queue, IDR, MACB_BIT(TCOMP));
1765 			if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
1766 				queue_writel(queue, ISR, MACB_BIT(TCOMP));
1767 			netdev_vdbg(bp->dev, "TX poll: packets pending, reschedule\n");
1768 			napi_schedule(napi);
1769 		}
1770 	}
1771 
1772 	return work_done;
1773 }
1774 
1775 static void macb_hresp_error_task(struct tasklet_struct *t)
1776 {
1777 	struct macb *bp = from_tasklet(bp, t, hresp_err_tasklet);
1778 	struct net_device *dev = bp->dev;
1779 	struct macb_queue *queue;
1780 	unsigned int q;
1781 	u32 ctrl;
1782 
1783 	for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
1784 		queue_writel(queue, IDR, bp->rx_intr_mask |
1785 					 MACB_TX_INT_FLAGS |
1786 					 MACB_BIT(HRESP));
1787 	}
1788 	ctrl = macb_readl(bp, NCR);
1789 	ctrl &= ~(MACB_BIT(RE) | MACB_BIT(TE));
1790 	macb_writel(bp, NCR, ctrl);
1791 
1792 	netif_tx_stop_all_queues(dev);
1793 	netif_carrier_off(dev);
1794 
1795 	bp->macbgem_ops.mog_init_rings(bp);
1796 
1797 	/* Initialize TX and RX buffers */
1798 	macb_init_buffers(bp);
1799 
1800 	/* Enable interrupts */
1801 	for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue)
1802 		queue_writel(queue, IER,
1803 			     bp->rx_intr_mask |
1804 			     MACB_TX_INT_FLAGS |
1805 			     MACB_BIT(HRESP));
1806 
1807 	ctrl |= MACB_BIT(RE) | MACB_BIT(TE);
1808 	macb_writel(bp, NCR, ctrl);
1809 
1810 	netif_carrier_on(dev);
1811 	netif_tx_start_all_queues(dev);
1812 }
1813 
1814 static irqreturn_t macb_wol_interrupt(int irq, void *dev_id)
1815 {
1816 	struct macb_queue *queue = dev_id;
1817 	struct macb *bp = queue->bp;
1818 	u32 status;
1819 
1820 	status = queue_readl(queue, ISR);
1821 
1822 	if (unlikely(!status))
1823 		return IRQ_NONE;
1824 
1825 	spin_lock(&bp->lock);
1826 
1827 	if (status & MACB_BIT(WOL)) {
1828 		queue_writel(queue, IDR, MACB_BIT(WOL));
1829 		macb_writel(bp, WOL, 0);
1830 		netdev_vdbg(bp->dev, "MACB WoL: queue = %u, isr = 0x%08lx\n",
1831 			    (unsigned int)(queue - bp->queues),
1832 			    (unsigned long)status);
1833 		if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
1834 			queue_writel(queue, ISR, MACB_BIT(WOL));
1835 		pm_wakeup_event(&bp->pdev->dev, 0);
1836 	}
1837 
1838 	spin_unlock(&bp->lock);
1839 
1840 	return IRQ_HANDLED;
1841 }
1842 
1843 static irqreturn_t gem_wol_interrupt(int irq, void *dev_id)
1844 {
1845 	struct macb_queue *queue = dev_id;
1846 	struct macb *bp = queue->bp;
1847 	u32 status;
1848 
1849 	status = queue_readl(queue, ISR);
1850 
1851 	if (unlikely(!status))
1852 		return IRQ_NONE;
1853 
1854 	spin_lock(&bp->lock);
1855 
1856 	if (status & GEM_BIT(WOL)) {
1857 		queue_writel(queue, IDR, GEM_BIT(WOL));
1858 		gem_writel(bp, WOL, 0);
1859 		netdev_vdbg(bp->dev, "GEM WoL: queue = %u, isr = 0x%08lx\n",
1860 			    (unsigned int)(queue - bp->queues),
1861 			    (unsigned long)status);
1862 		if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
1863 			queue_writel(queue, ISR, GEM_BIT(WOL));
1864 		pm_wakeup_event(&bp->pdev->dev, 0);
1865 	}
1866 
1867 	spin_unlock(&bp->lock);
1868 
1869 	return IRQ_HANDLED;
1870 }
1871 
1872 static irqreturn_t macb_interrupt(int irq, void *dev_id)
1873 {
1874 	struct macb_queue *queue = dev_id;
1875 	struct macb *bp = queue->bp;
1876 	struct net_device *dev = bp->dev;
1877 	u32 status, ctrl;
1878 
1879 	status = queue_readl(queue, ISR);
1880 
1881 	if (unlikely(!status))
1882 		return IRQ_NONE;
1883 
1884 	spin_lock(&bp->lock);
1885 
1886 	while (status) {
1887 		/* close possible race with dev_close */
1888 		if (unlikely(!netif_running(dev))) {
1889 			queue_writel(queue, IDR, -1);
1890 			if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
1891 				queue_writel(queue, ISR, -1);
1892 			break;
1893 		}
1894 
1895 		netdev_vdbg(bp->dev, "queue = %u, isr = 0x%08lx\n",
1896 			    (unsigned int)(queue - bp->queues),
1897 			    (unsigned long)status);
1898 
1899 		if (status & bp->rx_intr_mask) {
1900 			/* There's no point taking any more interrupts
1901 			 * until we have processed the buffers. The
1902 			 * scheduling call may fail if the poll routine
1903 			 * is already scheduled, so disable interrupts
1904 			 * now.
1905 			 */
1906 			queue_writel(queue, IDR, bp->rx_intr_mask);
1907 			if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
1908 				queue_writel(queue, ISR, MACB_BIT(RCOMP));
1909 
1910 			if (napi_schedule_prep(&queue->napi_rx)) {
1911 				netdev_vdbg(bp->dev, "scheduling RX softirq\n");
1912 				__napi_schedule(&queue->napi_rx);
1913 			}
1914 		}
1915 
1916 		if (status & (MACB_BIT(TCOMP) |
1917 			      MACB_BIT(TXUBR))) {
1918 			queue_writel(queue, IDR, MACB_BIT(TCOMP));
1919 			if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
1920 				queue_writel(queue, ISR, MACB_BIT(TCOMP) |
1921 							 MACB_BIT(TXUBR));
1922 
1923 			if (status & MACB_BIT(TXUBR)) {
1924 				queue->txubr_pending = true;
1925 				wmb(); // ensure softirq can see update
1926 			}
1927 
1928 			if (napi_schedule_prep(&queue->napi_tx)) {
1929 				netdev_vdbg(bp->dev, "scheduling TX softirq\n");
1930 				__napi_schedule(&queue->napi_tx);
1931 			}
1932 		}
1933 
1934 		if (unlikely(status & (MACB_TX_ERR_FLAGS))) {
1935 			queue_writel(queue, IDR, MACB_TX_INT_FLAGS);
1936 			schedule_work(&queue->tx_error_task);
1937 
1938 			if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
1939 				queue_writel(queue, ISR, MACB_TX_ERR_FLAGS);
1940 
1941 			break;
1942 		}
1943 
1944 		/* Link change detection isn't possible with RMII, so we'll
1945 		 * add that if/when we get our hands on a full-blown MII PHY.
1946 		 */
1947 
1948 		/* There is a hardware issue under heavy load where DMA can
1949 		 * stop, this causes endless "used buffer descriptor read"
1950 		 * interrupts but it can be cleared by re-enabling RX. See
1951 		 * the at91rm9200 manual, section 41.3.1 or the Zynq manual
1952 		 * section 16.7.4 for details. RXUBR is only enabled for
1953 		 * these two versions.
1954 		 */
1955 		if (status & MACB_BIT(RXUBR)) {
1956 			ctrl = macb_readl(bp, NCR);
1957 			macb_writel(bp, NCR, ctrl & ~MACB_BIT(RE));
1958 			wmb();
1959 			macb_writel(bp, NCR, ctrl | MACB_BIT(RE));
1960 
1961 			if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
1962 				queue_writel(queue, ISR, MACB_BIT(RXUBR));
1963 		}
1964 
1965 		if (status & MACB_BIT(ISR_ROVR)) {
1966 			/* We missed at least one packet */
1967 			if (macb_is_gem(bp))
1968 				bp->hw_stats.gem.rx_overruns++;
1969 			else
1970 				bp->hw_stats.macb.rx_overruns++;
1971 
1972 			if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
1973 				queue_writel(queue, ISR, MACB_BIT(ISR_ROVR));
1974 		}
1975 
1976 		if (status & MACB_BIT(HRESP)) {
1977 			tasklet_schedule(&bp->hresp_err_tasklet);
1978 			netdev_err(dev, "DMA bus error: HRESP not OK\n");
1979 
1980 			if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
1981 				queue_writel(queue, ISR, MACB_BIT(HRESP));
1982 		}
1983 		status = queue_readl(queue, ISR);
1984 	}
1985 
1986 	spin_unlock(&bp->lock);
1987 
1988 	return IRQ_HANDLED;
1989 }
1990 
1991 #ifdef CONFIG_NET_POLL_CONTROLLER
1992 /* Polling receive - used by netconsole and other diagnostic tools
1993  * to allow network i/o with interrupts disabled.
1994  */
1995 static void macb_poll_controller(struct net_device *dev)
1996 {
1997 	struct macb *bp = netdev_priv(dev);
1998 	struct macb_queue *queue;
1999 	unsigned long flags;
2000 	unsigned int q;
2001 
2002 	local_irq_save(flags);
2003 	for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue)
2004 		macb_interrupt(dev->irq, queue);
2005 	local_irq_restore(flags);
2006 }
2007 #endif
2008 
2009 static unsigned int macb_tx_map(struct macb *bp,
2010 				struct macb_queue *queue,
2011 				struct sk_buff *skb,
2012 				unsigned int hdrlen)
2013 {
2014 	dma_addr_t mapping;
2015 	unsigned int len, entry, i, tx_head = queue->tx_head;
2016 	struct macb_tx_skb *tx_skb = NULL;
2017 	struct macb_dma_desc *desc;
2018 	unsigned int offset, size, count = 0;
2019 	unsigned int f, nr_frags = skb_shinfo(skb)->nr_frags;
2020 	unsigned int eof = 1, mss_mfs = 0;
2021 	u32 ctrl, lso_ctrl = 0, seq_ctrl = 0;
2022 
2023 	/* LSO */
2024 	if (skb_shinfo(skb)->gso_size != 0) {
2025 		if (ip_hdr(skb)->protocol == IPPROTO_UDP)
2026 			/* UDP - UFO */
2027 			lso_ctrl = MACB_LSO_UFO_ENABLE;
2028 		else
2029 			/* TCP - TSO */
2030 			lso_ctrl = MACB_LSO_TSO_ENABLE;
2031 	}
2032 
2033 	/* First, map non-paged data */
2034 	len = skb_headlen(skb);
2035 
2036 	/* first buffer length */
2037 	size = hdrlen;
2038 
2039 	offset = 0;
2040 	while (len) {
2041 		entry = macb_tx_ring_wrap(bp, tx_head);
2042 		tx_skb = &queue->tx_skb[entry];
2043 
2044 		mapping = dma_map_single(&bp->pdev->dev,
2045 					 skb->data + offset,
2046 					 size, DMA_TO_DEVICE);
2047 		if (dma_mapping_error(&bp->pdev->dev, mapping))
2048 			goto dma_error;
2049 
2050 		/* Save info to properly release resources */
2051 		tx_skb->skb = NULL;
2052 		tx_skb->mapping = mapping;
2053 		tx_skb->size = size;
2054 		tx_skb->mapped_as_page = false;
2055 
2056 		len -= size;
2057 		offset += size;
2058 		count++;
2059 		tx_head++;
2060 
2061 		size = min(len, bp->max_tx_length);
2062 	}
2063 
2064 	/* Then, map paged data from fragments */
2065 	for (f = 0; f < nr_frags; f++) {
2066 		const skb_frag_t *frag = &skb_shinfo(skb)->frags[f];
2067 
2068 		len = skb_frag_size(frag);
2069 		offset = 0;
2070 		while (len) {
2071 			size = min(len, bp->max_tx_length);
2072 			entry = macb_tx_ring_wrap(bp, tx_head);
2073 			tx_skb = &queue->tx_skb[entry];
2074 
2075 			mapping = skb_frag_dma_map(&bp->pdev->dev, frag,
2076 						   offset, size, DMA_TO_DEVICE);
2077 			if (dma_mapping_error(&bp->pdev->dev, mapping))
2078 				goto dma_error;
2079 
2080 			/* Save info to properly release resources */
2081 			tx_skb->skb = NULL;
2082 			tx_skb->mapping = mapping;
2083 			tx_skb->size = size;
2084 			tx_skb->mapped_as_page = true;
2085 
2086 			len -= size;
2087 			offset += size;
2088 			count++;
2089 			tx_head++;
2090 		}
2091 	}
2092 
2093 	/* Should never happen */
2094 	if (unlikely(!tx_skb)) {
2095 		netdev_err(bp->dev, "BUG! empty skb!\n");
2096 		return 0;
2097 	}
2098 
2099 	/* This is the last buffer of the frame: save socket buffer */
2100 	tx_skb->skb = skb;
2101 
2102 	/* Update TX ring: update buffer descriptors in reverse order
2103 	 * to avoid race condition
2104 	 */
2105 
2106 	/* Set 'TX_USED' bit in buffer descriptor at tx_head position
2107 	 * to set the end of TX queue
2108 	 */
2109 	i = tx_head;
2110 	entry = macb_tx_ring_wrap(bp, i);
2111 	ctrl = MACB_BIT(TX_USED);
2112 	desc = macb_tx_desc(queue, entry);
2113 	desc->ctrl = ctrl;
2114 
2115 	if (lso_ctrl) {
2116 		if (lso_ctrl == MACB_LSO_UFO_ENABLE)
2117 			/* include header and FCS in value given to h/w */
2118 			mss_mfs = skb_shinfo(skb)->gso_size +
2119 					skb_transport_offset(skb) +
2120 					ETH_FCS_LEN;
2121 		else /* TSO */ {
2122 			mss_mfs = skb_shinfo(skb)->gso_size;
2123 			/* TCP Sequence Number Source Select
2124 			 * can be set only for TSO
2125 			 */
2126 			seq_ctrl = 0;
2127 		}
2128 	}
2129 
2130 	do {
2131 		i--;
2132 		entry = macb_tx_ring_wrap(bp, i);
2133 		tx_skb = &queue->tx_skb[entry];
2134 		desc = macb_tx_desc(queue, entry);
2135 
2136 		ctrl = (u32)tx_skb->size;
2137 		if (eof) {
2138 			ctrl |= MACB_BIT(TX_LAST);
2139 			eof = 0;
2140 		}
2141 		if (unlikely(entry == (bp->tx_ring_size - 1)))
2142 			ctrl |= MACB_BIT(TX_WRAP);
2143 
2144 		/* First descriptor is header descriptor */
2145 		if (i == queue->tx_head) {
2146 			ctrl |= MACB_BF(TX_LSO, lso_ctrl);
2147 			ctrl |= MACB_BF(TX_TCP_SEQ_SRC, seq_ctrl);
2148 			if ((bp->dev->features & NETIF_F_HW_CSUM) &&
2149 			    skb->ip_summed != CHECKSUM_PARTIAL && !lso_ctrl &&
2150 			    !ptp_one_step_sync(skb))
2151 				ctrl |= MACB_BIT(TX_NOCRC);
2152 		} else
2153 			/* Only set MSS/MFS on payload descriptors
2154 			 * (second or later descriptor)
2155 			 */
2156 			ctrl |= MACB_BF(MSS_MFS, mss_mfs);
2157 
2158 		/* Set TX buffer descriptor */
2159 		macb_set_addr(bp, desc, tx_skb->mapping);
2160 		/* desc->addr must be visible to hardware before clearing
2161 		 * 'TX_USED' bit in desc->ctrl.
2162 		 */
2163 		wmb();
2164 		desc->ctrl = ctrl;
2165 	} while (i != queue->tx_head);
2166 
2167 	queue->tx_head = tx_head;
2168 
2169 	return count;
2170 
2171 dma_error:
2172 	netdev_err(bp->dev, "TX DMA map failed\n");
2173 
2174 	for (i = queue->tx_head; i != tx_head; i++) {
2175 		tx_skb = macb_tx_skb(queue, i);
2176 
2177 		macb_tx_unmap(bp, tx_skb, 0);
2178 	}
2179 
2180 	return 0;
2181 }
2182 
2183 static netdev_features_t macb_features_check(struct sk_buff *skb,
2184 					     struct net_device *dev,
2185 					     netdev_features_t features)
2186 {
2187 	unsigned int nr_frags, f;
2188 	unsigned int hdrlen;
2189 
2190 	/* Validate LSO compatibility */
2191 
2192 	/* there is only one buffer or protocol is not UDP */
2193 	if (!skb_is_nonlinear(skb) || (ip_hdr(skb)->protocol != IPPROTO_UDP))
2194 		return features;
2195 
2196 	/* length of header */
2197 	hdrlen = skb_transport_offset(skb);
2198 
2199 	/* For UFO only:
2200 	 * When software supplies two or more payload buffers all payload buffers
2201 	 * apart from the last must be a multiple of 8 bytes in size.
2202 	 */
2203 	if (!IS_ALIGNED(skb_headlen(skb) - hdrlen, MACB_TX_LEN_ALIGN))
2204 		return features & ~MACB_NETIF_LSO;
2205 
2206 	nr_frags = skb_shinfo(skb)->nr_frags;
2207 	/* No need to check last fragment */
2208 	nr_frags--;
2209 	for (f = 0; f < nr_frags; f++) {
2210 		const skb_frag_t *frag = &skb_shinfo(skb)->frags[f];
2211 
2212 		if (!IS_ALIGNED(skb_frag_size(frag), MACB_TX_LEN_ALIGN))
2213 			return features & ~MACB_NETIF_LSO;
2214 	}
2215 	return features;
2216 }
2217 
2218 static inline int macb_clear_csum(struct sk_buff *skb)
2219 {
2220 	/* no change for packets without checksum offloading */
2221 	if (skb->ip_summed != CHECKSUM_PARTIAL)
2222 		return 0;
2223 
2224 	/* make sure we can modify the header */
2225 	if (unlikely(skb_cow_head(skb, 0)))
2226 		return -1;
2227 
2228 	/* initialize checksum field
2229 	 * This is required - at least for Zynq, which otherwise calculates
2230 	 * wrong UDP header checksums for UDP packets with UDP data len <=2
2231 	 */
2232 	*(__sum16 *)(skb_checksum_start(skb) + skb->csum_offset) = 0;
2233 	return 0;
2234 }
2235 
2236 static int macb_pad_and_fcs(struct sk_buff **skb, struct net_device *ndev)
2237 {
2238 	bool cloned = skb_cloned(*skb) || skb_header_cloned(*skb) ||
2239 		      skb_is_nonlinear(*skb);
2240 	int padlen = ETH_ZLEN - (*skb)->len;
2241 	int tailroom = skb_tailroom(*skb);
2242 	struct sk_buff *nskb;
2243 	u32 fcs;
2244 
2245 	if (!(ndev->features & NETIF_F_HW_CSUM) ||
2246 	    !((*skb)->ip_summed != CHECKSUM_PARTIAL) ||
2247 	    skb_shinfo(*skb)->gso_size || ptp_one_step_sync(*skb))
2248 		return 0;
2249 
2250 	if (padlen <= 0) {
2251 		/* FCS could be appeded to tailroom. */
2252 		if (tailroom >= ETH_FCS_LEN)
2253 			goto add_fcs;
2254 		/* No room for FCS, need to reallocate skb. */
2255 		else
2256 			padlen = ETH_FCS_LEN;
2257 	} else {
2258 		/* Add room for FCS. */
2259 		padlen += ETH_FCS_LEN;
2260 	}
2261 
2262 	if (cloned || tailroom < padlen) {
2263 		nskb = skb_copy_expand(*skb, 0, padlen, GFP_ATOMIC);
2264 		if (!nskb)
2265 			return -ENOMEM;
2266 
2267 		dev_consume_skb_any(*skb);
2268 		*skb = nskb;
2269 	}
2270 
2271 	if (padlen > ETH_FCS_LEN)
2272 		skb_put_zero(*skb, padlen - ETH_FCS_LEN);
2273 
2274 add_fcs:
2275 	/* set FCS to packet */
2276 	fcs = crc32_le(~0, (*skb)->data, (*skb)->len);
2277 	fcs = ~fcs;
2278 
2279 	skb_put_u8(*skb, fcs		& 0xff);
2280 	skb_put_u8(*skb, (fcs >> 8)	& 0xff);
2281 	skb_put_u8(*skb, (fcs >> 16)	& 0xff);
2282 	skb_put_u8(*skb, (fcs >> 24)	& 0xff);
2283 
2284 	return 0;
2285 }
2286 
2287 static netdev_tx_t macb_start_xmit(struct sk_buff *skb, struct net_device *dev)
2288 {
2289 	u16 queue_index = skb_get_queue_mapping(skb);
2290 	struct macb *bp = netdev_priv(dev);
2291 	struct macb_queue *queue = &bp->queues[queue_index];
2292 	unsigned int desc_cnt, nr_frags, frag_size, f;
2293 	unsigned int hdrlen;
2294 	bool is_lso;
2295 	netdev_tx_t ret = NETDEV_TX_OK;
2296 
2297 	if (macb_clear_csum(skb)) {
2298 		dev_kfree_skb_any(skb);
2299 		return ret;
2300 	}
2301 
2302 	if (macb_pad_and_fcs(&skb, dev)) {
2303 		dev_kfree_skb_any(skb);
2304 		return ret;
2305 	}
2306 
2307 #ifdef CONFIG_MACB_USE_HWSTAMP
2308 	if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
2309 	    (bp->hw_dma_cap & HW_DMA_CAP_PTP))
2310 		skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
2311 #endif
2312 
2313 	is_lso = (skb_shinfo(skb)->gso_size != 0);
2314 
2315 	if (is_lso) {
2316 		/* length of headers */
2317 		if (ip_hdr(skb)->protocol == IPPROTO_UDP)
2318 			/* only queue eth + ip headers separately for UDP */
2319 			hdrlen = skb_transport_offset(skb);
2320 		else
2321 			hdrlen = skb_tcp_all_headers(skb);
2322 		if (skb_headlen(skb) < hdrlen) {
2323 			netdev_err(bp->dev, "Error - LSO headers fragmented!!!\n");
2324 			/* if this is required, would need to copy to single buffer */
2325 			return NETDEV_TX_BUSY;
2326 		}
2327 	} else
2328 		hdrlen = min(skb_headlen(skb), bp->max_tx_length);
2329 
2330 #if defined(DEBUG) && defined(VERBOSE_DEBUG)
2331 	netdev_vdbg(bp->dev,
2332 		    "start_xmit: queue %hu len %u head %p data %p tail %p end %p\n",
2333 		    queue_index, skb->len, skb->head, skb->data,
2334 		    skb_tail_pointer(skb), skb_end_pointer(skb));
2335 	print_hex_dump(KERN_DEBUG, "data: ", DUMP_PREFIX_OFFSET, 16, 1,
2336 		       skb->data, 16, true);
2337 #endif
2338 
2339 	/* Count how many TX buffer descriptors are needed to send this
2340 	 * socket buffer: skb fragments of jumbo frames may need to be
2341 	 * split into many buffer descriptors.
2342 	 */
2343 	if (is_lso && (skb_headlen(skb) > hdrlen))
2344 		/* extra header descriptor if also payload in first buffer */
2345 		desc_cnt = DIV_ROUND_UP((skb_headlen(skb) - hdrlen), bp->max_tx_length) + 1;
2346 	else
2347 		desc_cnt = DIV_ROUND_UP(skb_headlen(skb), bp->max_tx_length);
2348 	nr_frags = skb_shinfo(skb)->nr_frags;
2349 	for (f = 0; f < nr_frags; f++) {
2350 		frag_size = skb_frag_size(&skb_shinfo(skb)->frags[f]);
2351 		desc_cnt += DIV_ROUND_UP(frag_size, bp->max_tx_length);
2352 	}
2353 
2354 	spin_lock_bh(&queue->tx_ptr_lock);
2355 
2356 	/* This is a hard error, log it. */
2357 	if (CIRC_SPACE(queue->tx_head, queue->tx_tail,
2358 		       bp->tx_ring_size) < desc_cnt) {
2359 		netif_stop_subqueue(dev, queue_index);
2360 		netdev_dbg(bp->dev, "tx_head = %u, tx_tail = %u\n",
2361 			   queue->tx_head, queue->tx_tail);
2362 		ret = NETDEV_TX_BUSY;
2363 		goto unlock;
2364 	}
2365 
2366 	/* Map socket buffer for DMA transfer */
2367 	if (!macb_tx_map(bp, queue, skb, hdrlen)) {
2368 		dev_kfree_skb_any(skb);
2369 		goto unlock;
2370 	}
2371 
2372 	/* Make newly initialized descriptor visible to hardware */
2373 	wmb();
2374 	skb_tx_timestamp(skb);
2375 
2376 	spin_lock_irq(&bp->lock);
2377 	macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(TSTART));
2378 	spin_unlock_irq(&bp->lock);
2379 
2380 	if (CIRC_SPACE(queue->tx_head, queue->tx_tail, bp->tx_ring_size) < 1)
2381 		netif_stop_subqueue(dev, queue_index);
2382 
2383 unlock:
2384 	spin_unlock_bh(&queue->tx_ptr_lock);
2385 
2386 	return ret;
2387 }
2388 
2389 static void macb_init_rx_buffer_size(struct macb *bp, size_t size)
2390 {
2391 	if (!macb_is_gem(bp)) {
2392 		bp->rx_buffer_size = MACB_RX_BUFFER_SIZE;
2393 	} else {
2394 		bp->rx_buffer_size = size;
2395 
2396 		if (bp->rx_buffer_size % RX_BUFFER_MULTIPLE) {
2397 			netdev_dbg(bp->dev,
2398 				   "RX buffer must be multiple of %d bytes, expanding\n",
2399 				   RX_BUFFER_MULTIPLE);
2400 			bp->rx_buffer_size =
2401 				roundup(bp->rx_buffer_size, RX_BUFFER_MULTIPLE);
2402 		}
2403 	}
2404 
2405 	netdev_dbg(bp->dev, "mtu [%u] rx_buffer_size [%zu]\n",
2406 		   bp->dev->mtu, bp->rx_buffer_size);
2407 }
2408 
2409 static void gem_free_rx_buffers(struct macb *bp)
2410 {
2411 	struct sk_buff		*skb;
2412 	struct macb_dma_desc	*desc;
2413 	struct macb_queue *queue;
2414 	dma_addr_t		addr;
2415 	unsigned int q;
2416 	int i;
2417 
2418 	for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
2419 		if (!queue->rx_skbuff)
2420 			continue;
2421 
2422 		for (i = 0; i < bp->rx_ring_size; i++) {
2423 			skb = queue->rx_skbuff[i];
2424 
2425 			if (!skb)
2426 				continue;
2427 
2428 			desc = macb_rx_desc(queue, i);
2429 			addr = macb_get_addr(bp, desc);
2430 
2431 			dma_unmap_single(&bp->pdev->dev, addr, bp->rx_buffer_size,
2432 					DMA_FROM_DEVICE);
2433 			dev_kfree_skb_any(skb);
2434 			skb = NULL;
2435 		}
2436 
2437 		kfree(queue->rx_skbuff);
2438 		queue->rx_skbuff = NULL;
2439 	}
2440 }
2441 
2442 static void macb_free_rx_buffers(struct macb *bp)
2443 {
2444 	struct macb_queue *queue = &bp->queues[0];
2445 
2446 	if (queue->rx_buffers) {
2447 		dma_free_coherent(&bp->pdev->dev,
2448 				  bp->rx_ring_size * bp->rx_buffer_size,
2449 				  queue->rx_buffers, queue->rx_buffers_dma);
2450 		queue->rx_buffers = NULL;
2451 	}
2452 }
2453 
2454 static void macb_free_consistent(struct macb *bp)
2455 {
2456 	struct macb_queue *queue;
2457 	unsigned int q;
2458 	int size;
2459 
2460 	bp->macbgem_ops.mog_free_rx_buffers(bp);
2461 
2462 	for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
2463 		kfree(queue->tx_skb);
2464 		queue->tx_skb = NULL;
2465 		if (queue->tx_ring) {
2466 			size = TX_RING_BYTES(bp) + bp->tx_bd_rd_prefetch;
2467 			dma_free_coherent(&bp->pdev->dev, size,
2468 					  queue->tx_ring, queue->tx_ring_dma);
2469 			queue->tx_ring = NULL;
2470 		}
2471 		if (queue->rx_ring) {
2472 			size = RX_RING_BYTES(bp) + bp->rx_bd_rd_prefetch;
2473 			dma_free_coherent(&bp->pdev->dev, size,
2474 					  queue->rx_ring, queue->rx_ring_dma);
2475 			queue->rx_ring = NULL;
2476 		}
2477 	}
2478 }
2479 
2480 static int gem_alloc_rx_buffers(struct macb *bp)
2481 {
2482 	struct macb_queue *queue;
2483 	unsigned int q;
2484 	int size;
2485 
2486 	for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
2487 		size = bp->rx_ring_size * sizeof(struct sk_buff *);
2488 		queue->rx_skbuff = kzalloc(size, GFP_KERNEL);
2489 		if (!queue->rx_skbuff)
2490 			return -ENOMEM;
2491 		else
2492 			netdev_dbg(bp->dev,
2493 				   "Allocated %d RX struct sk_buff entries at %p\n",
2494 				   bp->rx_ring_size, queue->rx_skbuff);
2495 	}
2496 	return 0;
2497 }
2498 
2499 static int macb_alloc_rx_buffers(struct macb *bp)
2500 {
2501 	struct macb_queue *queue = &bp->queues[0];
2502 	int size;
2503 
2504 	size = bp->rx_ring_size * bp->rx_buffer_size;
2505 	queue->rx_buffers = dma_alloc_coherent(&bp->pdev->dev, size,
2506 					    &queue->rx_buffers_dma, GFP_KERNEL);
2507 	if (!queue->rx_buffers)
2508 		return -ENOMEM;
2509 
2510 	netdev_dbg(bp->dev,
2511 		   "Allocated RX buffers of %d bytes at %08lx (mapped %p)\n",
2512 		   size, (unsigned long)queue->rx_buffers_dma, queue->rx_buffers);
2513 	return 0;
2514 }
2515 
2516 static int macb_alloc_consistent(struct macb *bp)
2517 {
2518 	struct macb_queue *queue;
2519 	unsigned int q;
2520 	int size;
2521 
2522 	for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
2523 		size = TX_RING_BYTES(bp) + bp->tx_bd_rd_prefetch;
2524 		queue->tx_ring = dma_alloc_coherent(&bp->pdev->dev, size,
2525 						    &queue->tx_ring_dma,
2526 						    GFP_KERNEL);
2527 		if (!queue->tx_ring)
2528 			goto out_err;
2529 		netdev_dbg(bp->dev,
2530 			   "Allocated TX ring for queue %u of %d bytes at %08lx (mapped %p)\n",
2531 			   q, size, (unsigned long)queue->tx_ring_dma,
2532 			   queue->tx_ring);
2533 
2534 		size = bp->tx_ring_size * sizeof(struct macb_tx_skb);
2535 		queue->tx_skb = kmalloc(size, GFP_KERNEL);
2536 		if (!queue->tx_skb)
2537 			goto out_err;
2538 
2539 		size = RX_RING_BYTES(bp) + bp->rx_bd_rd_prefetch;
2540 		queue->rx_ring = dma_alloc_coherent(&bp->pdev->dev, size,
2541 						 &queue->rx_ring_dma, GFP_KERNEL);
2542 		if (!queue->rx_ring)
2543 			goto out_err;
2544 		netdev_dbg(bp->dev,
2545 			   "Allocated RX ring of %d bytes at %08lx (mapped %p)\n",
2546 			   size, (unsigned long)queue->rx_ring_dma, queue->rx_ring);
2547 	}
2548 	if (bp->macbgem_ops.mog_alloc_rx_buffers(bp))
2549 		goto out_err;
2550 
2551 	return 0;
2552 
2553 out_err:
2554 	macb_free_consistent(bp);
2555 	return -ENOMEM;
2556 }
2557 
2558 static void gem_init_rings(struct macb *bp)
2559 {
2560 	struct macb_queue *queue;
2561 	struct macb_dma_desc *desc = NULL;
2562 	unsigned int q;
2563 	int i;
2564 
2565 	for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
2566 		for (i = 0; i < bp->tx_ring_size; i++) {
2567 			desc = macb_tx_desc(queue, i);
2568 			macb_set_addr(bp, desc, 0);
2569 			desc->ctrl = MACB_BIT(TX_USED);
2570 		}
2571 		desc->ctrl |= MACB_BIT(TX_WRAP);
2572 		queue->tx_head = 0;
2573 		queue->tx_tail = 0;
2574 
2575 		queue->rx_tail = 0;
2576 		queue->rx_prepared_head = 0;
2577 
2578 		gem_rx_refill(queue);
2579 	}
2580 
2581 }
2582 
2583 static void macb_init_rings(struct macb *bp)
2584 {
2585 	int i;
2586 	struct macb_dma_desc *desc = NULL;
2587 
2588 	macb_init_rx_ring(&bp->queues[0]);
2589 
2590 	for (i = 0; i < bp->tx_ring_size; i++) {
2591 		desc = macb_tx_desc(&bp->queues[0], i);
2592 		macb_set_addr(bp, desc, 0);
2593 		desc->ctrl = MACB_BIT(TX_USED);
2594 	}
2595 	bp->queues[0].tx_head = 0;
2596 	bp->queues[0].tx_tail = 0;
2597 	desc->ctrl |= MACB_BIT(TX_WRAP);
2598 }
2599 
2600 static void macb_reset_hw(struct macb *bp)
2601 {
2602 	struct macb_queue *queue;
2603 	unsigned int q;
2604 	u32 ctrl = macb_readl(bp, NCR);
2605 
2606 	/* Disable RX and TX (XXX: Should we halt the transmission
2607 	 * more gracefully?)
2608 	 */
2609 	ctrl &= ~(MACB_BIT(RE) | MACB_BIT(TE));
2610 
2611 	/* Clear the stats registers (XXX: Update stats first?) */
2612 	ctrl |= MACB_BIT(CLRSTAT);
2613 
2614 	macb_writel(bp, NCR, ctrl);
2615 
2616 	/* Clear all status flags */
2617 	macb_writel(bp, TSR, -1);
2618 	macb_writel(bp, RSR, -1);
2619 
2620 	/* Disable all interrupts */
2621 	for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
2622 		queue_writel(queue, IDR, -1);
2623 		queue_readl(queue, ISR);
2624 		if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
2625 			queue_writel(queue, ISR, -1);
2626 	}
2627 }
2628 
2629 static u32 gem_mdc_clk_div(struct macb *bp)
2630 {
2631 	u32 config;
2632 	unsigned long pclk_hz = clk_get_rate(bp->pclk);
2633 
2634 	if (pclk_hz <= 20000000)
2635 		config = GEM_BF(CLK, GEM_CLK_DIV8);
2636 	else if (pclk_hz <= 40000000)
2637 		config = GEM_BF(CLK, GEM_CLK_DIV16);
2638 	else if (pclk_hz <= 80000000)
2639 		config = GEM_BF(CLK, GEM_CLK_DIV32);
2640 	else if (pclk_hz <= 120000000)
2641 		config = GEM_BF(CLK, GEM_CLK_DIV48);
2642 	else if (pclk_hz <= 160000000)
2643 		config = GEM_BF(CLK, GEM_CLK_DIV64);
2644 	else
2645 		config = GEM_BF(CLK, GEM_CLK_DIV96);
2646 
2647 	return config;
2648 }
2649 
2650 static u32 macb_mdc_clk_div(struct macb *bp)
2651 {
2652 	u32 config;
2653 	unsigned long pclk_hz;
2654 
2655 	if (macb_is_gem(bp))
2656 		return gem_mdc_clk_div(bp);
2657 
2658 	pclk_hz = clk_get_rate(bp->pclk);
2659 	if (pclk_hz <= 20000000)
2660 		config = MACB_BF(CLK, MACB_CLK_DIV8);
2661 	else if (pclk_hz <= 40000000)
2662 		config = MACB_BF(CLK, MACB_CLK_DIV16);
2663 	else if (pclk_hz <= 80000000)
2664 		config = MACB_BF(CLK, MACB_CLK_DIV32);
2665 	else
2666 		config = MACB_BF(CLK, MACB_CLK_DIV64);
2667 
2668 	return config;
2669 }
2670 
2671 /* Get the DMA bus width field of the network configuration register that we
2672  * should program.  We find the width from decoding the design configuration
2673  * register to find the maximum supported data bus width.
2674  */
2675 static u32 macb_dbw(struct macb *bp)
2676 {
2677 	if (!macb_is_gem(bp))
2678 		return 0;
2679 
2680 	switch (GEM_BFEXT(DBWDEF, gem_readl(bp, DCFG1))) {
2681 	case 4:
2682 		return GEM_BF(DBW, GEM_DBW128);
2683 	case 2:
2684 		return GEM_BF(DBW, GEM_DBW64);
2685 	case 1:
2686 	default:
2687 		return GEM_BF(DBW, GEM_DBW32);
2688 	}
2689 }
2690 
2691 /* Configure the receive DMA engine
2692  * - use the correct receive buffer size
2693  * - set best burst length for DMA operations
2694  *   (if not supported by FIFO, it will fallback to default)
2695  * - set both rx/tx packet buffers to full memory size
2696  * These are configurable parameters for GEM.
2697  */
2698 static void macb_configure_dma(struct macb *bp)
2699 {
2700 	struct macb_queue *queue;
2701 	u32 buffer_size;
2702 	unsigned int q;
2703 	u32 dmacfg;
2704 
2705 	buffer_size = bp->rx_buffer_size / RX_BUFFER_MULTIPLE;
2706 	if (macb_is_gem(bp)) {
2707 		dmacfg = gem_readl(bp, DMACFG) & ~GEM_BF(RXBS, -1L);
2708 		for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
2709 			if (q)
2710 				queue_writel(queue, RBQS, buffer_size);
2711 			else
2712 				dmacfg |= GEM_BF(RXBS, buffer_size);
2713 		}
2714 		if (bp->dma_burst_length)
2715 			dmacfg = GEM_BFINS(FBLDO, bp->dma_burst_length, dmacfg);
2716 		dmacfg |= GEM_BIT(TXPBMS) | GEM_BF(RXBMS, -1L);
2717 		dmacfg &= ~GEM_BIT(ENDIA_PKT);
2718 
2719 		if (bp->native_io)
2720 			dmacfg &= ~GEM_BIT(ENDIA_DESC);
2721 		else
2722 			dmacfg |= GEM_BIT(ENDIA_DESC); /* CPU in big endian */
2723 
2724 		if (bp->dev->features & NETIF_F_HW_CSUM)
2725 			dmacfg |= GEM_BIT(TXCOEN);
2726 		else
2727 			dmacfg &= ~GEM_BIT(TXCOEN);
2728 
2729 		dmacfg &= ~GEM_BIT(ADDR64);
2730 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
2731 		if (bp->hw_dma_cap & HW_DMA_CAP_64B)
2732 			dmacfg |= GEM_BIT(ADDR64);
2733 #endif
2734 #ifdef CONFIG_MACB_USE_HWSTAMP
2735 		if (bp->hw_dma_cap & HW_DMA_CAP_PTP)
2736 			dmacfg |= GEM_BIT(RXEXT) | GEM_BIT(TXEXT);
2737 #endif
2738 		netdev_dbg(bp->dev, "Cadence configure DMA with 0x%08x\n",
2739 			   dmacfg);
2740 		gem_writel(bp, DMACFG, dmacfg);
2741 	}
2742 }
2743 
2744 static void macb_init_hw(struct macb *bp)
2745 {
2746 	u32 config;
2747 
2748 	macb_reset_hw(bp);
2749 	macb_set_hwaddr(bp);
2750 
2751 	config = macb_mdc_clk_div(bp);
2752 	config |= MACB_BF(RBOF, NET_IP_ALIGN);	/* Make eth data aligned */
2753 	config |= MACB_BIT(DRFCS);		/* Discard Rx FCS */
2754 	if (bp->caps & MACB_CAPS_JUMBO)
2755 		config |= MACB_BIT(JFRAME);	/* Enable jumbo frames */
2756 	else
2757 		config |= MACB_BIT(BIG);	/* Receive oversized frames */
2758 	if (bp->dev->flags & IFF_PROMISC)
2759 		config |= MACB_BIT(CAF);	/* Copy All Frames */
2760 	else if (macb_is_gem(bp) && bp->dev->features & NETIF_F_RXCSUM)
2761 		config |= GEM_BIT(RXCOEN);
2762 	if (!(bp->dev->flags & IFF_BROADCAST))
2763 		config |= MACB_BIT(NBC);	/* No BroadCast */
2764 	config |= macb_dbw(bp);
2765 	macb_writel(bp, NCFGR, config);
2766 	if ((bp->caps & MACB_CAPS_JUMBO) && bp->jumbo_max_len)
2767 		gem_writel(bp, JML, bp->jumbo_max_len);
2768 	bp->rx_frm_len_mask = MACB_RX_FRMLEN_MASK;
2769 	if (bp->caps & MACB_CAPS_JUMBO)
2770 		bp->rx_frm_len_mask = MACB_RX_JFRMLEN_MASK;
2771 
2772 	macb_configure_dma(bp);
2773 }
2774 
2775 /* The hash address register is 64 bits long and takes up two
2776  * locations in the memory map.  The least significant bits are stored
2777  * in EMAC_HSL and the most significant bits in EMAC_HSH.
2778  *
2779  * The unicast hash enable and the multicast hash enable bits in the
2780  * network configuration register enable the reception of hash matched
2781  * frames. The destination address is reduced to a 6 bit index into
2782  * the 64 bit hash register using the following hash function.  The
2783  * hash function is an exclusive or of every sixth bit of the
2784  * destination address.
2785  *
2786  * hi[5] = da[5] ^ da[11] ^ da[17] ^ da[23] ^ da[29] ^ da[35] ^ da[41] ^ da[47]
2787  * hi[4] = da[4] ^ da[10] ^ da[16] ^ da[22] ^ da[28] ^ da[34] ^ da[40] ^ da[46]
2788  * hi[3] = da[3] ^ da[09] ^ da[15] ^ da[21] ^ da[27] ^ da[33] ^ da[39] ^ da[45]
2789  * hi[2] = da[2] ^ da[08] ^ da[14] ^ da[20] ^ da[26] ^ da[32] ^ da[38] ^ da[44]
2790  * hi[1] = da[1] ^ da[07] ^ da[13] ^ da[19] ^ da[25] ^ da[31] ^ da[37] ^ da[43]
2791  * hi[0] = da[0] ^ da[06] ^ da[12] ^ da[18] ^ da[24] ^ da[30] ^ da[36] ^ da[42]
2792  *
2793  * da[0] represents the least significant bit of the first byte
2794  * received, that is, the multicast/unicast indicator, and da[47]
2795  * represents the most significant bit of the last byte received.  If
2796  * the hash index, hi[n], points to a bit that is set in the hash
2797  * register then the frame will be matched according to whether the
2798  * frame is multicast or unicast.  A multicast match will be signalled
2799  * if the multicast hash enable bit is set, da[0] is 1 and the hash
2800  * index points to a bit set in the hash register.  A unicast match
2801  * will be signalled if the unicast hash enable bit is set, da[0] is 0
2802  * and the hash index points to a bit set in the hash register.  To
2803  * receive all multicast frames, the hash register should be set with
2804  * all ones and the multicast hash enable bit should be set in the
2805  * network configuration register.
2806  */
2807 
2808 static inline int hash_bit_value(int bitnr, __u8 *addr)
2809 {
2810 	if (addr[bitnr / 8] & (1 << (bitnr % 8)))
2811 		return 1;
2812 	return 0;
2813 }
2814 
2815 /* Return the hash index value for the specified address. */
2816 static int hash_get_index(__u8 *addr)
2817 {
2818 	int i, j, bitval;
2819 	int hash_index = 0;
2820 
2821 	for (j = 0; j < 6; j++) {
2822 		for (i = 0, bitval = 0; i < 8; i++)
2823 			bitval ^= hash_bit_value(i * 6 + j, addr);
2824 
2825 		hash_index |= (bitval << j);
2826 	}
2827 
2828 	return hash_index;
2829 }
2830 
2831 /* Add multicast addresses to the internal multicast-hash table. */
2832 static void macb_sethashtable(struct net_device *dev)
2833 {
2834 	struct netdev_hw_addr *ha;
2835 	unsigned long mc_filter[2];
2836 	unsigned int bitnr;
2837 	struct macb *bp = netdev_priv(dev);
2838 
2839 	mc_filter[0] = 0;
2840 	mc_filter[1] = 0;
2841 
2842 	netdev_for_each_mc_addr(ha, dev) {
2843 		bitnr = hash_get_index(ha->addr);
2844 		mc_filter[bitnr >> 5] |= 1 << (bitnr & 31);
2845 	}
2846 
2847 	macb_or_gem_writel(bp, HRB, mc_filter[0]);
2848 	macb_or_gem_writel(bp, HRT, mc_filter[1]);
2849 }
2850 
2851 /* Enable/Disable promiscuous and multicast modes. */
2852 static void macb_set_rx_mode(struct net_device *dev)
2853 {
2854 	unsigned long cfg;
2855 	struct macb *bp = netdev_priv(dev);
2856 
2857 	cfg = macb_readl(bp, NCFGR);
2858 
2859 	if (dev->flags & IFF_PROMISC) {
2860 		/* Enable promiscuous mode */
2861 		cfg |= MACB_BIT(CAF);
2862 
2863 		/* Disable RX checksum offload */
2864 		if (macb_is_gem(bp))
2865 			cfg &= ~GEM_BIT(RXCOEN);
2866 	} else {
2867 		/* Disable promiscuous mode */
2868 		cfg &= ~MACB_BIT(CAF);
2869 
2870 		/* Enable RX checksum offload only if requested */
2871 		if (macb_is_gem(bp) && dev->features & NETIF_F_RXCSUM)
2872 			cfg |= GEM_BIT(RXCOEN);
2873 	}
2874 
2875 	if (dev->flags & IFF_ALLMULTI) {
2876 		/* Enable all multicast mode */
2877 		macb_or_gem_writel(bp, HRB, -1);
2878 		macb_or_gem_writel(bp, HRT, -1);
2879 		cfg |= MACB_BIT(NCFGR_MTI);
2880 	} else if (!netdev_mc_empty(dev)) {
2881 		/* Enable specific multicasts */
2882 		macb_sethashtable(dev);
2883 		cfg |= MACB_BIT(NCFGR_MTI);
2884 	} else if (dev->flags & (~IFF_ALLMULTI)) {
2885 		/* Disable all multicast mode */
2886 		macb_or_gem_writel(bp, HRB, 0);
2887 		macb_or_gem_writel(bp, HRT, 0);
2888 		cfg &= ~MACB_BIT(NCFGR_MTI);
2889 	}
2890 
2891 	macb_writel(bp, NCFGR, cfg);
2892 }
2893 
2894 static int macb_open(struct net_device *dev)
2895 {
2896 	size_t bufsz = dev->mtu + ETH_HLEN + ETH_FCS_LEN + NET_IP_ALIGN;
2897 	struct macb *bp = netdev_priv(dev);
2898 	struct macb_queue *queue;
2899 	unsigned int q;
2900 	int err;
2901 
2902 	netdev_dbg(bp->dev, "open\n");
2903 
2904 	err = pm_runtime_resume_and_get(&bp->pdev->dev);
2905 	if (err < 0)
2906 		return err;
2907 
2908 	/* RX buffers initialization */
2909 	macb_init_rx_buffer_size(bp, bufsz);
2910 
2911 	err = macb_alloc_consistent(bp);
2912 	if (err) {
2913 		netdev_err(dev, "Unable to allocate DMA memory (error %d)\n",
2914 			   err);
2915 		goto pm_exit;
2916 	}
2917 
2918 	for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
2919 		napi_enable(&queue->napi_rx);
2920 		napi_enable(&queue->napi_tx);
2921 	}
2922 
2923 	macb_init_hw(bp);
2924 
2925 	err = phy_power_on(bp->sgmii_phy);
2926 	if (err)
2927 		goto reset_hw;
2928 
2929 	err = macb_phylink_connect(bp);
2930 	if (err)
2931 		goto phy_off;
2932 
2933 	netif_tx_start_all_queues(dev);
2934 
2935 	if (bp->ptp_info)
2936 		bp->ptp_info->ptp_init(dev);
2937 
2938 	return 0;
2939 
2940 phy_off:
2941 	phy_power_off(bp->sgmii_phy);
2942 
2943 reset_hw:
2944 	macb_reset_hw(bp);
2945 	for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
2946 		napi_disable(&queue->napi_rx);
2947 		napi_disable(&queue->napi_tx);
2948 	}
2949 	macb_free_consistent(bp);
2950 pm_exit:
2951 	pm_runtime_put_sync(&bp->pdev->dev);
2952 	return err;
2953 }
2954 
2955 static int macb_close(struct net_device *dev)
2956 {
2957 	struct macb *bp = netdev_priv(dev);
2958 	struct macb_queue *queue;
2959 	unsigned long flags;
2960 	unsigned int q;
2961 
2962 	netif_tx_stop_all_queues(dev);
2963 
2964 	for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
2965 		napi_disable(&queue->napi_rx);
2966 		napi_disable(&queue->napi_tx);
2967 	}
2968 
2969 	phylink_stop(bp->phylink);
2970 	phylink_disconnect_phy(bp->phylink);
2971 
2972 	phy_power_off(bp->sgmii_phy);
2973 
2974 	spin_lock_irqsave(&bp->lock, flags);
2975 	macb_reset_hw(bp);
2976 	netif_carrier_off(dev);
2977 	spin_unlock_irqrestore(&bp->lock, flags);
2978 
2979 	macb_free_consistent(bp);
2980 
2981 	if (bp->ptp_info)
2982 		bp->ptp_info->ptp_remove(dev);
2983 
2984 	pm_runtime_put(&bp->pdev->dev);
2985 
2986 	return 0;
2987 }
2988 
2989 static int macb_change_mtu(struct net_device *dev, int new_mtu)
2990 {
2991 	if (netif_running(dev))
2992 		return -EBUSY;
2993 
2994 	dev->mtu = new_mtu;
2995 
2996 	return 0;
2997 }
2998 
2999 static int macb_set_mac_addr(struct net_device *dev, void *addr)
3000 {
3001 	int err;
3002 
3003 	err = eth_mac_addr(dev, addr);
3004 	if (err < 0)
3005 		return err;
3006 
3007 	macb_set_hwaddr(netdev_priv(dev));
3008 	return 0;
3009 }
3010 
3011 static void gem_update_stats(struct macb *bp)
3012 {
3013 	struct macb_queue *queue;
3014 	unsigned int i, q, idx;
3015 	unsigned long *stat;
3016 
3017 	u32 *p = &bp->hw_stats.gem.tx_octets_31_0;
3018 
3019 	for (i = 0; i < GEM_STATS_LEN; ++i, ++p) {
3020 		u32 offset = gem_statistics[i].offset;
3021 		u64 val = bp->macb_reg_readl(bp, offset);
3022 
3023 		bp->ethtool_stats[i] += val;
3024 		*p += val;
3025 
3026 		if (offset == GEM_OCTTXL || offset == GEM_OCTRXL) {
3027 			/* Add GEM_OCTTXH, GEM_OCTRXH */
3028 			val = bp->macb_reg_readl(bp, offset + 4);
3029 			bp->ethtool_stats[i] += ((u64)val) << 32;
3030 			*(++p) += val;
3031 		}
3032 	}
3033 
3034 	idx = GEM_STATS_LEN;
3035 	for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue)
3036 		for (i = 0, stat = &queue->stats.first; i < QUEUE_STATS_LEN; ++i, ++stat)
3037 			bp->ethtool_stats[idx++] = *stat;
3038 }
3039 
3040 static struct net_device_stats *gem_get_stats(struct macb *bp)
3041 {
3042 	struct gem_stats *hwstat = &bp->hw_stats.gem;
3043 	struct net_device_stats *nstat = &bp->dev->stats;
3044 
3045 	if (!netif_running(bp->dev))
3046 		return nstat;
3047 
3048 	gem_update_stats(bp);
3049 
3050 	nstat->rx_errors = (hwstat->rx_frame_check_sequence_errors +
3051 			    hwstat->rx_alignment_errors +
3052 			    hwstat->rx_resource_errors +
3053 			    hwstat->rx_overruns +
3054 			    hwstat->rx_oversize_frames +
3055 			    hwstat->rx_jabbers +
3056 			    hwstat->rx_undersized_frames +
3057 			    hwstat->rx_length_field_frame_errors);
3058 	nstat->tx_errors = (hwstat->tx_late_collisions +
3059 			    hwstat->tx_excessive_collisions +
3060 			    hwstat->tx_underrun +
3061 			    hwstat->tx_carrier_sense_errors);
3062 	nstat->multicast = hwstat->rx_multicast_frames;
3063 	nstat->collisions = (hwstat->tx_single_collision_frames +
3064 			     hwstat->tx_multiple_collision_frames +
3065 			     hwstat->tx_excessive_collisions);
3066 	nstat->rx_length_errors = (hwstat->rx_oversize_frames +
3067 				   hwstat->rx_jabbers +
3068 				   hwstat->rx_undersized_frames +
3069 				   hwstat->rx_length_field_frame_errors);
3070 	nstat->rx_over_errors = hwstat->rx_resource_errors;
3071 	nstat->rx_crc_errors = hwstat->rx_frame_check_sequence_errors;
3072 	nstat->rx_frame_errors = hwstat->rx_alignment_errors;
3073 	nstat->rx_fifo_errors = hwstat->rx_overruns;
3074 	nstat->tx_aborted_errors = hwstat->tx_excessive_collisions;
3075 	nstat->tx_carrier_errors = hwstat->tx_carrier_sense_errors;
3076 	nstat->tx_fifo_errors = hwstat->tx_underrun;
3077 
3078 	return nstat;
3079 }
3080 
3081 static void gem_get_ethtool_stats(struct net_device *dev,
3082 				  struct ethtool_stats *stats, u64 *data)
3083 {
3084 	struct macb *bp;
3085 
3086 	bp = netdev_priv(dev);
3087 	gem_update_stats(bp);
3088 	memcpy(data, &bp->ethtool_stats, sizeof(u64)
3089 			* (GEM_STATS_LEN + QUEUE_STATS_LEN * MACB_MAX_QUEUES));
3090 }
3091 
3092 static int gem_get_sset_count(struct net_device *dev, int sset)
3093 {
3094 	struct macb *bp = netdev_priv(dev);
3095 
3096 	switch (sset) {
3097 	case ETH_SS_STATS:
3098 		return GEM_STATS_LEN + bp->num_queues * QUEUE_STATS_LEN;
3099 	default:
3100 		return -EOPNOTSUPP;
3101 	}
3102 }
3103 
3104 static void gem_get_ethtool_strings(struct net_device *dev, u32 sset, u8 *p)
3105 {
3106 	char stat_string[ETH_GSTRING_LEN];
3107 	struct macb *bp = netdev_priv(dev);
3108 	struct macb_queue *queue;
3109 	unsigned int i;
3110 	unsigned int q;
3111 
3112 	switch (sset) {
3113 	case ETH_SS_STATS:
3114 		for (i = 0; i < GEM_STATS_LEN; i++, p += ETH_GSTRING_LEN)
3115 			memcpy(p, gem_statistics[i].stat_string,
3116 			       ETH_GSTRING_LEN);
3117 
3118 		for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
3119 			for (i = 0; i < QUEUE_STATS_LEN; i++, p += ETH_GSTRING_LEN) {
3120 				snprintf(stat_string, ETH_GSTRING_LEN, "q%d_%s",
3121 						q, queue_statistics[i].stat_string);
3122 				memcpy(p, stat_string, ETH_GSTRING_LEN);
3123 			}
3124 		}
3125 		break;
3126 	}
3127 }
3128 
3129 static struct net_device_stats *macb_get_stats(struct net_device *dev)
3130 {
3131 	struct macb *bp = netdev_priv(dev);
3132 	struct net_device_stats *nstat = &bp->dev->stats;
3133 	struct macb_stats *hwstat = &bp->hw_stats.macb;
3134 
3135 	if (macb_is_gem(bp))
3136 		return gem_get_stats(bp);
3137 
3138 	/* read stats from hardware */
3139 	macb_update_stats(bp);
3140 
3141 	/* Convert HW stats into netdevice stats */
3142 	nstat->rx_errors = (hwstat->rx_fcs_errors +
3143 			    hwstat->rx_align_errors +
3144 			    hwstat->rx_resource_errors +
3145 			    hwstat->rx_overruns +
3146 			    hwstat->rx_oversize_pkts +
3147 			    hwstat->rx_jabbers +
3148 			    hwstat->rx_undersize_pkts +
3149 			    hwstat->rx_length_mismatch);
3150 	nstat->tx_errors = (hwstat->tx_late_cols +
3151 			    hwstat->tx_excessive_cols +
3152 			    hwstat->tx_underruns +
3153 			    hwstat->tx_carrier_errors +
3154 			    hwstat->sqe_test_errors);
3155 	nstat->collisions = (hwstat->tx_single_cols +
3156 			     hwstat->tx_multiple_cols +
3157 			     hwstat->tx_excessive_cols);
3158 	nstat->rx_length_errors = (hwstat->rx_oversize_pkts +
3159 				   hwstat->rx_jabbers +
3160 				   hwstat->rx_undersize_pkts +
3161 				   hwstat->rx_length_mismatch);
3162 	nstat->rx_over_errors = hwstat->rx_resource_errors +
3163 				   hwstat->rx_overruns;
3164 	nstat->rx_crc_errors = hwstat->rx_fcs_errors;
3165 	nstat->rx_frame_errors = hwstat->rx_align_errors;
3166 	nstat->rx_fifo_errors = hwstat->rx_overruns;
3167 	/* XXX: What does "missed" mean? */
3168 	nstat->tx_aborted_errors = hwstat->tx_excessive_cols;
3169 	nstat->tx_carrier_errors = hwstat->tx_carrier_errors;
3170 	nstat->tx_fifo_errors = hwstat->tx_underruns;
3171 	/* Don't know about heartbeat or window errors... */
3172 
3173 	return nstat;
3174 }
3175 
3176 static int macb_get_regs_len(struct net_device *netdev)
3177 {
3178 	return MACB_GREGS_NBR * sizeof(u32);
3179 }
3180 
3181 static void macb_get_regs(struct net_device *dev, struct ethtool_regs *regs,
3182 			  void *p)
3183 {
3184 	struct macb *bp = netdev_priv(dev);
3185 	unsigned int tail, head;
3186 	u32 *regs_buff = p;
3187 
3188 	regs->version = (macb_readl(bp, MID) & ((1 << MACB_REV_SIZE) - 1))
3189 			| MACB_GREGS_VERSION;
3190 
3191 	tail = macb_tx_ring_wrap(bp, bp->queues[0].tx_tail);
3192 	head = macb_tx_ring_wrap(bp, bp->queues[0].tx_head);
3193 
3194 	regs_buff[0]  = macb_readl(bp, NCR);
3195 	regs_buff[1]  = macb_or_gem_readl(bp, NCFGR);
3196 	regs_buff[2]  = macb_readl(bp, NSR);
3197 	regs_buff[3]  = macb_readl(bp, TSR);
3198 	regs_buff[4]  = macb_readl(bp, RBQP);
3199 	regs_buff[5]  = macb_readl(bp, TBQP);
3200 	regs_buff[6]  = macb_readl(bp, RSR);
3201 	regs_buff[7]  = macb_readl(bp, IMR);
3202 
3203 	regs_buff[8]  = tail;
3204 	regs_buff[9]  = head;
3205 	regs_buff[10] = macb_tx_dma(&bp->queues[0], tail);
3206 	regs_buff[11] = macb_tx_dma(&bp->queues[0], head);
3207 
3208 	if (!(bp->caps & MACB_CAPS_USRIO_DISABLED))
3209 		regs_buff[12] = macb_or_gem_readl(bp, USRIO);
3210 	if (macb_is_gem(bp))
3211 		regs_buff[13] = gem_readl(bp, DMACFG);
3212 }
3213 
3214 static void macb_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
3215 {
3216 	struct macb *bp = netdev_priv(netdev);
3217 
3218 	if (bp->wol & MACB_WOL_HAS_MAGIC_PACKET) {
3219 		phylink_ethtool_get_wol(bp->phylink, wol);
3220 		wol->supported |= WAKE_MAGIC;
3221 
3222 		if (bp->wol & MACB_WOL_ENABLED)
3223 			wol->wolopts |= WAKE_MAGIC;
3224 	}
3225 }
3226 
3227 static int macb_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
3228 {
3229 	struct macb *bp = netdev_priv(netdev);
3230 	int ret;
3231 
3232 	/* Pass the order to phylink layer */
3233 	ret = phylink_ethtool_set_wol(bp->phylink, wol);
3234 	/* Don't manage WoL on MAC if handled by the PHY
3235 	 * or if there's a failure in talking to the PHY
3236 	 */
3237 	if (!ret || ret != -EOPNOTSUPP)
3238 		return ret;
3239 
3240 	if (!(bp->wol & MACB_WOL_HAS_MAGIC_PACKET) ||
3241 	    (wol->wolopts & ~WAKE_MAGIC))
3242 		return -EOPNOTSUPP;
3243 
3244 	if (wol->wolopts & WAKE_MAGIC)
3245 		bp->wol |= MACB_WOL_ENABLED;
3246 	else
3247 		bp->wol &= ~MACB_WOL_ENABLED;
3248 
3249 	device_set_wakeup_enable(&bp->pdev->dev, bp->wol & MACB_WOL_ENABLED);
3250 
3251 	return 0;
3252 }
3253 
3254 static int macb_get_link_ksettings(struct net_device *netdev,
3255 				   struct ethtool_link_ksettings *kset)
3256 {
3257 	struct macb *bp = netdev_priv(netdev);
3258 
3259 	return phylink_ethtool_ksettings_get(bp->phylink, kset);
3260 }
3261 
3262 static int macb_set_link_ksettings(struct net_device *netdev,
3263 				   const struct ethtool_link_ksettings *kset)
3264 {
3265 	struct macb *bp = netdev_priv(netdev);
3266 
3267 	return phylink_ethtool_ksettings_set(bp->phylink, kset);
3268 }
3269 
3270 static void macb_get_ringparam(struct net_device *netdev,
3271 			       struct ethtool_ringparam *ring,
3272 			       struct kernel_ethtool_ringparam *kernel_ring,
3273 			       struct netlink_ext_ack *extack)
3274 {
3275 	struct macb *bp = netdev_priv(netdev);
3276 
3277 	ring->rx_max_pending = MAX_RX_RING_SIZE;
3278 	ring->tx_max_pending = MAX_TX_RING_SIZE;
3279 
3280 	ring->rx_pending = bp->rx_ring_size;
3281 	ring->tx_pending = bp->tx_ring_size;
3282 }
3283 
3284 static int macb_set_ringparam(struct net_device *netdev,
3285 			      struct ethtool_ringparam *ring,
3286 			      struct kernel_ethtool_ringparam *kernel_ring,
3287 			      struct netlink_ext_ack *extack)
3288 {
3289 	struct macb *bp = netdev_priv(netdev);
3290 	u32 new_rx_size, new_tx_size;
3291 	unsigned int reset = 0;
3292 
3293 	if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
3294 		return -EINVAL;
3295 
3296 	new_rx_size = clamp_t(u32, ring->rx_pending,
3297 			      MIN_RX_RING_SIZE, MAX_RX_RING_SIZE);
3298 	new_rx_size = roundup_pow_of_two(new_rx_size);
3299 
3300 	new_tx_size = clamp_t(u32, ring->tx_pending,
3301 			      MIN_TX_RING_SIZE, MAX_TX_RING_SIZE);
3302 	new_tx_size = roundup_pow_of_two(new_tx_size);
3303 
3304 	if ((new_tx_size == bp->tx_ring_size) &&
3305 	    (new_rx_size == bp->rx_ring_size)) {
3306 		/* nothing to do */
3307 		return 0;
3308 	}
3309 
3310 	if (netif_running(bp->dev)) {
3311 		reset = 1;
3312 		macb_close(bp->dev);
3313 	}
3314 
3315 	bp->rx_ring_size = new_rx_size;
3316 	bp->tx_ring_size = new_tx_size;
3317 
3318 	if (reset)
3319 		macb_open(bp->dev);
3320 
3321 	return 0;
3322 }
3323 
3324 #ifdef CONFIG_MACB_USE_HWSTAMP
3325 static unsigned int gem_get_tsu_rate(struct macb *bp)
3326 {
3327 	struct clk *tsu_clk;
3328 	unsigned int tsu_rate;
3329 
3330 	tsu_clk = devm_clk_get(&bp->pdev->dev, "tsu_clk");
3331 	if (!IS_ERR(tsu_clk))
3332 		tsu_rate = clk_get_rate(tsu_clk);
3333 	/* try pclk instead */
3334 	else if (!IS_ERR(bp->pclk)) {
3335 		tsu_clk = bp->pclk;
3336 		tsu_rate = clk_get_rate(tsu_clk);
3337 	} else
3338 		return -ENOTSUPP;
3339 	return tsu_rate;
3340 }
3341 
3342 static s32 gem_get_ptp_max_adj(void)
3343 {
3344 	return 64000000;
3345 }
3346 
3347 static int gem_get_ts_info(struct net_device *dev,
3348 			   struct ethtool_ts_info *info)
3349 {
3350 	struct macb *bp = netdev_priv(dev);
3351 
3352 	if ((bp->hw_dma_cap & HW_DMA_CAP_PTP) == 0) {
3353 		ethtool_op_get_ts_info(dev, info);
3354 		return 0;
3355 	}
3356 
3357 	info->so_timestamping =
3358 		SOF_TIMESTAMPING_TX_SOFTWARE |
3359 		SOF_TIMESTAMPING_RX_SOFTWARE |
3360 		SOF_TIMESTAMPING_SOFTWARE |
3361 		SOF_TIMESTAMPING_TX_HARDWARE |
3362 		SOF_TIMESTAMPING_RX_HARDWARE |
3363 		SOF_TIMESTAMPING_RAW_HARDWARE;
3364 	info->tx_types =
3365 		(1 << HWTSTAMP_TX_ONESTEP_SYNC) |
3366 		(1 << HWTSTAMP_TX_OFF) |
3367 		(1 << HWTSTAMP_TX_ON);
3368 	info->rx_filters =
3369 		(1 << HWTSTAMP_FILTER_NONE) |
3370 		(1 << HWTSTAMP_FILTER_ALL);
3371 
3372 	info->phc_index = bp->ptp_clock ? ptp_clock_index(bp->ptp_clock) : -1;
3373 
3374 	return 0;
3375 }
3376 
3377 static struct macb_ptp_info gem_ptp_info = {
3378 	.ptp_init	 = gem_ptp_init,
3379 	.ptp_remove	 = gem_ptp_remove,
3380 	.get_ptp_max_adj = gem_get_ptp_max_adj,
3381 	.get_tsu_rate	 = gem_get_tsu_rate,
3382 	.get_ts_info	 = gem_get_ts_info,
3383 	.get_hwtst	 = gem_get_hwtst,
3384 	.set_hwtst	 = gem_set_hwtst,
3385 };
3386 #endif
3387 
3388 static int macb_get_ts_info(struct net_device *netdev,
3389 			    struct ethtool_ts_info *info)
3390 {
3391 	struct macb *bp = netdev_priv(netdev);
3392 
3393 	if (bp->ptp_info)
3394 		return bp->ptp_info->get_ts_info(netdev, info);
3395 
3396 	return ethtool_op_get_ts_info(netdev, info);
3397 }
3398 
3399 static void gem_enable_flow_filters(struct macb *bp, bool enable)
3400 {
3401 	struct net_device *netdev = bp->dev;
3402 	struct ethtool_rx_fs_item *item;
3403 	u32 t2_scr;
3404 	int num_t2_scr;
3405 
3406 	if (!(netdev->features & NETIF_F_NTUPLE))
3407 		return;
3408 
3409 	num_t2_scr = GEM_BFEXT(T2SCR, gem_readl(bp, DCFG8));
3410 
3411 	list_for_each_entry(item, &bp->rx_fs_list.list, list) {
3412 		struct ethtool_rx_flow_spec *fs = &item->fs;
3413 		struct ethtool_tcpip4_spec *tp4sp_m;
3414 
3415 		if (fs->location >= num_t2_scr)
3416 			continue;
3417 
3418 		t2_scr = gem_readl_n(bp, SCRT2, fs->location);
3419 
3420 		/* enable/disable screener regs for the flow entry */
3421 		t2_scr = GEM_BFINS(ETHTEN, enable, t2_scr);
3422 
3423 		/* only enable fields with no masking */
3424 		tp4sp_m = &(fs->m_u.tcp_ip4_spec);
3425 
3426 		if (enable && (tp4sp_m->ip4src == 0xFFFFFFFF))
3427 			t2_scr = GEM_BFINS(CMPAEN, 1, t2_scr);
3428 		else
3429 			t2_scr = GEM_BFINS(CMPAEN, 0, t2_scr);
3430 
3431 		if (enable && (tp4sp_m->ip4dst == 0xFFFFFFFF))
3432 			t2_scr = GEM_BFINS(CMPBEN, 1, t2_scr);
3433 		else
3434 			t2_scr = GEM_BFINS(CMPBEN, 0, t2_scr);
3435 
3436 		if (enable && ((tp4sp_m->psrc == 0xFFFF) || (tp4sp_m->pdst == 0xFFFF)))
3437 			t2_scr = GEM_BFINS(CMPCEN, 1, t2_scr);
3438 		else
3439 			t2_scr = GEM_BFINS(CMPCEN, 0, t2_scr);
3440 
3441 		gem_writel_n(bp, SCRT2, fs->location, t2_scr);
3442 	}
3443 }
3444 
3445 static void gem_prog_cmp_regs(struct macb *bp, struct ethtool_rx_flow_spec *fs)
3446 {
3447 	struct ethtool_tcpip4_spec *tp4sp_v, *tp4sp_m;
3448 	uint16_t index = fs->location;
3449 	u32 w0, w1, t2_scr;
3450 	bool cmp_a = false;
3451 	bool cmp_b = false;
3452 	bool cmp_c = false;
3453 
3454 	if (!macb_is_gem(bp))
3455 		return;
3456 
3457 	tp4sp_v = &(fs->h_u.tcp_ip4_spec);
3458 	tp4sp_m = &(fs->m_u.tcp_ip4_spec);
3459 
3460 	/* ignore field if any masking set */
3461 	if (tp4sp_m->ip4src == 0xFFFFFFFF) {
3462 		/* 1st compare reg - IP source address */
3463 		w0 = 0;
3464 		w1 = 0;
3465 		w0 = tp4sp_v->ip4src;
3466 		w1 = GEM_BFINS(T2DISMSK, 1, w1); /* 32-bit compare */
3467 		w1 = GEM_BFINS(T2CMPOFST, GEM_T2COMPOFST_ETYPE, w1);
3468 		w1 = GEM_BFINS(T2OFST, ETYPE_SRCIP_OFFSET, w1);
3469 		gem_writel_n(bp, T2CMPW0, T2CMP_OFST(GEM_IP4SRC_CMP(index)), w0);
3470 		gem_writel_n(bp, T2CMPW1, T2CMP_OFST(GEM_IP4SRC_CMP(index)), w1);
3471 		cmp_a = true;
3472 	}
3473 
3474 	/* ignore field if any masking set */
3475 	if (tp4sp_m->ip4dst == 0xFFFFFFFF) {
3476 		/* 2nd compare reg - IP destination address */
3477 		w0 = 0;
3478 		w1 = 0;
3479 		w0 = tp4sp_v->ip4dst;
3480 		w1 = GEM_BFINS(T2DISMSK, 1, w1); /* 32-bit compare */
3481 		w1 = GEM_BFINS(T2CMPOFST, GEM_T2COMPOFST_ETYPE, w1);
3482 		w1 = GEM_BFINS(T2OFST, ETYPE_DSTIP_OFFSET, w1);
3483 		gem_writel_n(bp, T2CMPW0, T2CMP_OFST(GEM_IP4DST_CMP(index)), w0);
3484 		gem_writel_n(bp, T2CMPW1, T2CMP_OFST(GEM_IP4DST_CMP(index)), w1);
3485 		cmp_b = true;
3486 	}
3487 
3488 	/* ignore both port fields if masking set in both */
3489 	if ((tp4sp_m->psrc == 0xFFFF) || (tp4sp_m->pdst == 0xFFFF)) {
3490 		/* 3rd compare reg - source port, destination port */
3491 		w0 = 0;
3492 		w1 = 0;
3493 		w1 = GEM_BFINS(T2CMPOFST, GEM_T2COMPOFST_IPHDR, w1);
3494 		if (tp4sp_m->psrc == tp4sp_m->pdst) {
3495 			w0 = GEM_BFINS(T2MASK, tp4sp_v->psrc, w0);
3496 			w0 = GEM_BFINS(T2CMP, tp4sp_v->pdst, w0);
3497 			w1 = GEM_BFINS(T2DISMSK, 1, w1); /* 32-bit compare */
3498 			w1 = GEM_BFINS(T2OFST, IPHDR_SRCPORT_OFFSET, w1);
3499 		} else {
3500 			/* only one port definition */
3501 			w1 = GEM_BFINS(T2DISMSK, 0, w1); /* 16-bit compare */
3502 			w0 = GEM_BFINS(T2MASK, 0xFFFF, w0);
3503 			if (tp4sp_m->psrc == 0xFFFF) { /* src port */
3504 				w0 = GEM_BFINS(T2CMP, tp4sp_v->psrc, w0);
3505 				w1 = GEM_BFINS(T2OFST, IPHDR_SRCPORT_OFFSET, w1);
3506 			} else { /* dst port */
3507 				w0 = GEM_BFINS(T2CMP, tp4sp_v->pdst, w0);
3508 				w1 = GEM_BFINS(T2OFST, IPHDR_DSTPORT_OFFSET, w1);
3509 			}
3510 		}
3511 		gem_writel_n(bp, T2CMPW0, T2CMP_OFST(GEM_PORT_CMP(index)), w0);
3512 		gem_writel_n(bp, T2CMPW1, T2CMP_OFST(GEM_PORT_CMP(index)), w1);
3513 		cmp_c = true;
3514 	}
3515 
3516 	t2_scr = 0;
3517 	t2_scr = GEM_BFINS(QUEUE, (fs->ring_cookie) & 0xFF, t2_scr);
3518 	t2_scr = GEM_BFINS(ETHT2IDX, SCRT2_ETHT, t2_scr);
3519 	if (cmp_a)
3520 		t2_scr = GEM_BFINS(CMPA, GEM_IP4SRC_CMP(index), t2_scr);
3521 	if (cmp_b)
3522 		t2_scr = GEM_BFINS(CMPB, GEM_IP4DST_CMP(index), t2_scr);
3523 	if (cmp_c)
3524 		t2_scr = GEM_BFINS(CMPC, GEM_PORT_CMP(index), t2_scr);
3525 	gem_writel_n(bp, SCRT2, index, t2_scr);
3526 }
3527 
3528 static int gem_add_flow_filter(struct net_device *netdev,
3529 		struct ethtool_rxnfc *cmd)
3530 {
3531 	struct macb *bp = netdev_priv(netdev);
3532 	struct ethtool_rx_flow_spec *fs = &cmd->fs;
3533 	struct ethtool_rx_fs_item *item, *newfs;
3534 	unsigned long flags;
3535 	int ret = -EINVAL;
3536 	bool added = false;
3537 
3538 	newfs = kmalloc(sizeof(*newfs), GFP_KERNEL);
3539 	if (newfs == NULL)
3540 		return -ENOMEM;
3541 	memcpy(&newfs->fs, fs, sizeof(newfs->fs));
3542 
3543 	netdev_dbg(netdev,
3544 			"Adding flow filter entry,type=%u,queue=%u,loc=%u,src=%08X,dst=%08X,ps=%u,pd=%u\n",
3545 			fs->flow_type, (int)fs->ring_cookie, fs->location,
3546 			htonl(fs->h_u.tcp_ip4_spec.ip4src),
3547 			htonl(fs->h_u.tcp_ip4_spec.ip4dst),
3548 			be16_to_cpu(fs->h_u.tcp_ip4_spec.psrc),
3549 			be16_to_cpu(fs->h_u.tcp_ip4_spec.pdst));
3550 
3551 	spin_lock_irqsave(&bp->rx_fs_lock, flags);
3552 
3553 	/* find correct place to add in list */
3554 	list_for_each_entry(item, &bp->rx_fs_list.list, list) {
3555 		if (item->fs.location > newfs->fs.location) {
3556 			list_add_tail(&newfs->list, &item->list);
3557 			added = true;
3558 			break;
3559 		} else if (item->fs.location == fs->location) {
3560 			netdev_err(netdev, "Rule not added: location %d not free!\n",
3561 					fs->location);
3562 			ret = -EBUSY;
3563 			goto err;
3564 		}
3565 	}
3566 	if (!added)
3567 		list_add_tail(&newfs->list, &bp->rx_fs_list.list);
3568 
3569 	gem_prog_cmp_regs(bp, fs);
3570 	bp->rx_fs_list.count++;
3571 	/* enable filtering if NTUPLE on */
3572 	gem_enable_flow_filters(bp, 1);
3573 
3574 	spin_unlock_irqrestore(&bp->rx_fs_lock, flags);
3575 	return 0;
3576 
3577 err:
3578 	spin_unlock_irqrestore(&bp->rx_fs_lock, flags);
3579 	kfree(newfs);
3580 	return ret;
3581 }
3582 
3583 static int gem_del_flow_filter(struct net_device *netdev,
3584 		struct ethtool_rxnfc *cmd)
3585 {
3586 	struct macb *bp = netdev_priv(netdev);
3587 	struct ethtool_rx_fs_item *item;
3588 	struct ethtool_rx_flow_spec *fs;
3589 	unsigned long flags;
3590 
3591 	spin_lock_irqsave(&bp->rx_fs_lock, flags);
3592 
3593 	list_for_each_entry(item, &bp->rx_fs_list.list, list) {
3594 		if (item->fs.location == cmd->fs.location) {
3595 			/* disable screener regs for the flow entry */
3596 			fs = &(item->fs);
3597 			netdev_dbg(netdev,
3598 					"Deleting flow filter entry,type=%u,queue=%u,loc=%u,src=%08X,dst=%08X,ps=%u,pd=%u\n",
3599 					fs->flow_type, (int)fs->ring_cookie, fs->location,
3600 					htonl(fs->h_u.tcp_ip4_spec.ip4src),
3601 					htonl(fs->h_u.tcp_ip4_spec.ip4dst),
3602 					be16_to_cpu(fs->h_u.tcp_ip4_spec.psrc),
3603 					be16_to_cpu(fs->h_u.tcp_ip4_spec.pdst));
3604 
3605 			gem_writel_n(bp, SCRT2, fs->location, 0);
3606 
3607 			list_del(&item->list);
3608 			bp->rx_fs_list.count--;
3609 			spin_unlock_irqrestore(&bp->rx_fs_lock, flags);
3610 			kfree(item);
3611 			return 0;
3612 		}
3613 	}
3614 
3615 	spin_unlock_irqrestore(&bp->rx_fs_lock, flags);
3616 	return -EINVAL;
3617 }
3618 
3619 static int gem_get_flow_entry(struct net_device *netdev,
3620 		struct ethtool_rxnfc *cmd)
3621 {
3622 	struct macb *bp = netdev_priv(netdev);
3623 	struct ethtool_rx_fs_item *item;
3624 
3625 	list_for_each_entry(item, &bp->rx_fs_list.list, list) {
3626 		if (item->fs.location == cmd->fs.location) {
3627 			memcpy(&cmd->fs, &item->fs, sizeof(cmd->fs));
3628 			return 0;
3629 		}
3630 	}
3631 	return -EINVAL;
3632 }
3633 
3634 static int gem_get_all_flow_entries(struct net_device *netdev,
3635 		struct ethtool_rxnfc *cmd, u32 *rule_locs)
3636 {
3637 	struct macb *bp = netdev_priv(netdev);
3638 	struct ethtool_rx_fs_item *item;
3639 	uint32_t cnt = 0;
3640 
3641 	list_for_each_entry(item, &bp->rx_fs_list.list, list) {
3642 		if (cnt == cmd->rule_cnt)
3643 			return -EMSGSIZE;
3644 		rule_locs[cnt] = item->fs.location;
3645 		cnt++;
3646 	}
3647 	cmd->data = bp->max_tuples;
3648 	cmd->rule_cnt = cnt;
3649 
3650 	return 0;
3651 }
3652 
3653 static int gem_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd,
3654 		u32 *rule_locs)
3655 {
3656 	struct macb *bp = netdev_priv(netdev);
3657 	int ret = 0;
3658 
3659 	switch (cmd->cmd) {
3660 	case ETHTOOL_GRXRINGS:
3661 		cmd->data = bp->num_queues;
3662 		break;
3663 	case ETHTOOL_GRXCLSRLCNT:
3664 		cmd->rule_cnt = bp->rx_fs_list.count;
3665 		break;
3666 	case ETHTOOL_GRXCLSRULE:
3667 		ret = gem_get_flow_entry(netdev, cmd);
3668 		break;
3669 	case ETHTOOL_GRXCLSRLALL:
3670 		ret = gem_get_all_flow_entries(netdev, cmd, rule_locs);
3671 		break;
3672 	default:
3673 		netdev_err(netdev,
3674 			  "Command parameter %d is not supported\n", cmd->cmd);
3675 		ret = -EOPNOTSUPP;
3676 	}
3677 
3678 	return ret;
3679 }
3680 
3681 static int gem_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd)
3682 {
3683 	struct macb *bp = netdev_priv(netdev);
3684 	int ret;
3685 
3686 	switch (cmd->cmd) {
3687 	case ETHTOOL_SRXCLSRLINS:
3688 		if ((cmd->fs.location >= bp->max_tuples)
3689 				|| (cmd->fs.ring_cookie >= bp->num_queues)) {
3690 			ret = -EINVAL;
3691 			break;
3692 		}
3693 		ret = gem_add_flow_filter(netdev, cmd);
3694 		break;
3695 	case ETHTOOL_SRXCLSRLDEL:
3696 		ret = gem_del_flow_filter(netdev, cmd);
3697 		break;
3698 	default:
3699 		netdev_err(netdev,
3700 			  "Command parameter %d is not supported\n", cmd->cmd);
3701 		ret = -EOPNOTSUPP;
3702 	}
3703 
3704 	return ret;
3705 }
3706 
3707 static const struct ethtool_ops macb_ethtool_ops = {
3708 	.get_regs_len		= macb_get_regs_len,
3709 	.get_regs		= macb_get_regs,
3710 	.get_link		= ethtool_op_get_link,
3711 	.get_ts_info		= ethtool_op_get_ts_info,
3712 	.get_wol		= macb_get_wol,
3713 	.set_wol		= macb_set_wol,
3714 	.get_link_ksettings     = macb_get_link_ksettings,
3715 	.set_link_ksettings     = macb_set_link_ksettings,
3716 	.get_ringparam		= macb_get_ringparam,
3717 	.set_ringparam		= macb_set_ringparam,
3718 };
3719 
3720 static const struct ethtool_ops gem_ethtool_ops = {
3721 	.get_regs_len		= macb_get_regs_len,
3722 	.get_regs		= macb_get_regs,
3723 	.get_wol		= macb_get_wol,
3724 	.set_wol		= macb_set_wol,
3725 	.get_link		= ethtool_op_get_link,
3726 	.get_ts_info		= macb_get_ts_info,
3727 	.get_ethtool_stats	= gem_get_ethtool_stats,
3728 	.get_strings		= gem_get_ethtool_strings,
3729 	.get_sset_count		= gem_get_sset_count,
3730 	.get_link_ksettings     = macb_get_link_ksettings,
3731 	.set_link_ksettings     = macb_set_link_ksettings,
3732 	.get_ringparam		= macb_get_ringparam,
3733 	.set_ringparam		= macb_set_ringparam,
3734 	.get_rxnfc			= gem_get_rxnfc,
3735 	.set_rxnfc			= gem_set_rxnfc,
3736 };
3737 
3738 static int macb_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
3739 {
3740 	struct macb *bp = netdev_priv(dev);
3741 
3742 	if (!netif_running(dev))
3743 		return -EINVAL;
3744 
3745 	if (bp->ptp_info) {
3746 		switch (cmd) {
3747 		case SIOCSHWTSTAMP:
3748 			return bp->ptp_info->set_hwtst(dev, rq, cmd);
3749 		case SIOCGHWTSTAMP:
3750 			return bp->ptp_info->get_hwtst(dev, rq);
3751 		}
3752 	}
3753 
3754 	return phylink_mii_ioctl(bp->phylink, rq, cmd);
3755 }
3756 
3757 static inline void macb_set_txcsum_feature(struct macb *bp,
3758 					   netdev_features_t features)
3759 {
3760 	u32 val;
3761 
3762 	if (!macb_is_gem(bp))
3763 		return;
3764 
3765 	val = gem_readl(bp, DMACFG);
3766 	if (features & NETIF_F_HW_CSUM)
3767 		val |= GEM_BIT(TXCOEN);
3768 	else
3769 		val &= ~GEM_BIT(TXCOEN);
3770 
3771 	gem_writel(bp, DMACFG, val);
3772 }
3773 
3774 static inline void macb_set_rxcsum_feature(struct macb *bp,
3775 					   netdev_features_t features)
3776 {
3777 	struct net_device *netdev = bp->dev;
3778 	u32 val;
3779 
3780 	if (!macb_is_gem(bp))
3781 		return;
3782 
3783 	val = gem_readl(bp, NCFGR);
3784 	if ((features & NETIF_F_RXCSUM) && !(netdev->flags & IFF_PROMISC))
3785 		val |= GEM_BIT(RXCOEN);
3786 	else
3787 		val &= ~GEM_BIT(RXCOEN);
3788 
3789 	gem_writel(bp, NCFGR, val);
3790 }
3791 
3792 static inline void macb_set_rxflow_feature(struct macb *bp,
3793 					   netdev_features_t features)
3794 {
3795 	if (!macb_is_gem(bp))
3796 		return;
3797 
3798 	gem_enable_flow_filters(bp, !!(features & NETIF_F_NTUPLE));
3799 }
3800 
3801 static int macb_set_features(struct net_device *netdev,
3802 			     netdev_features_t features)
3803 {
3804 	struct macb *bp = netdev_priv(netdev);
3805 	netdev_features_t changed = features ^ netdev->features;
3806 
3807 	/* TX checksum offload */
3808 	if (changed & NETIF_F_HW_CSUM)
3809 		macb_set_txcsum_feature(bp, features);
3810 
3811 	/* RX checksum offload */
3812 	if (changed & NETIF_F_RXCSUM)
3813 		macb_set_rxcsum_feature(bp, features);
3814 
3815 	/* RX Flow Filters */
3816 	if (changed & NETIF_F_NTUPLE)
3817 		macb_set_rxflow_feature(bp, features);
3818 
3819 	return 0;
3820 }
3821 
3822 static void macb_restore_features(struct macb *bp)
3823 {
3824 	struct net_device *netdev = bp->dev;
3825 	netdev_features_t features = netdev->features;
3826 	struct ethtool_rx_fs_item *item;
3827 
3828 	/* TX checksum offload */
3829 	macb_set_txcsum_feature(bp, features);
3830 
3831 	/* RX checksum offload */
3832 	macb_set_rxcsum_feature(bp, features);
3833 
3834 	/* RX Flow Filters */
3835 	list_for_each_entry(item, &bp->rx_fs_list.list, list)
3836 		gem_prog_cmp_regs(bp, &item->fs);
3837 
3838 	macb_set_rxflow_feature(bp, features);
3839 }
3840 
3841 static const struct net_device_ops macb_netdev_ops = {
3842 	.ndo_open		= macb_open,
3843 	.ndo_stop		= macb_close,
3844 	.ndo_start_xmit		= macb_start_xmit,
3845 	.ndo_set_rx_mode	= macb_set_rx_mode,
3846 	.ndo_get_stats		= macb_get_stats,
3847 	.ndo_eth_ioctl		= macb_ioctl,
3848 	.ndo_validate_addr	= eth_validate_addr,
3849 	.ndo_change_mtu		= macb_change_mtu,
3850 	.ndo_set_mac_address	= macb_set_mac_addr,
3851 #ifdef CONFIG_NET_POLL_CONTROLLER
3852 	.ndo_poll_controller	= macb_poll_controller,
3853 #endif
3854 	.ndo_set_features	= macb_set_features,
3855 	.ndo_features_check	= macb_features_check,
3856 };
3857 
3858 /* Configure peripheral capabilities according to device tree
3859  * and integration options used
3860  */
3861 static void macb_configure_caps(struct macb *bp,
3862 				const struct macb_config *dt_conf)
3863 {
3864 	u32 dcfg;
3865 
3866 	if (dt_conf)
3867 		bp->caps = dt_conf->caps;
3868 
3869 	if (hw_is_gem(bp->regs, bp->native_io)) {
3870 		bp->caps |= MACB_CAPS_MACB_IS_GEM;
3871 
3872 		dcfg = gem_readl(bp, DCFG1);
3873 		if (GEM_BFEXT(IRQCOR, dcfg) == 0)
3874 			bp->caps |= MACB_CAPS_ISR_CLEAR_ON_WRITE;
3875 		if (GEM_BFEXT(NO_PCS, dcfg) == 0)
3876 			bp->caps |= MACB_CAPS_PCS;
3877 		dcfg = gem_readl(bp, DCFG12);
3878 		if (GEM_BFEXT(HIGH_SPEED, dcfg) == 1)
3879 			bp->caps |= MACB_CAPS_HIGH_SPEED;
3880 		dcfg = gem_readl(bp, DCFG2);
3881 		if ((dcfg & (GEM_BIT(RX_PKT_BUFF) | GEM_BIT(TX_PKT_BUFF))) == 0)
3882 			bp->caps |= MACB_CAPS_FIFO_MODE;
3883 #ifdef CONFIG_MACB_USE_HWSTAMP
3884 		if (gem_has_ptp(bp)) {
3885 			if (!GEM_BFEXT(TSU, gem_readl(bp, DCFG5)))
3886 				dev_err(&bp->pdev->dev,
3887 					"GEM doesn't support hardware ptp.\n");
3888 			else {
3889 				bp->hw_dma_cap |= HW_DMA_CAP_PTP;
3890 				bp->ptp_info = &gem_ptp_info;
3891 			}
3892 		}
3893 #endif
3894 	}
3895 
3896 	dev_dbg(&bp->pdev->dev, "Cadence caps 0x%08x\n", bp->caps);
3897 }
3898 
3899 static void macb_probe_queues(void __iomem *mem,
3900 			      bool native_io,
3901 			      unsigned int *queue_mask,
3902 			      unsigned int *num_queues)
3903 {
3904 	*queue_mask = 0x1;
3905 	*num_queues = 1;
3906 
3907 	/* is it macb or gem ?
3908 	 *
3909 	 * We need to read directly from the hardware here because
3910 	 * we are early in the probe process and don't have the
3911 	 * MACB_CAPS_MACB_IS_GEM flag positioned
3912 	 */
3913 	if (!hw_is_gem(mem, native_io))
3914 		return;
3915 
3916 	/* bit 0 is never set but queue 0 always exists */
3917 	*queue_mask |= readl_relaxed(mem + GEM_DCFG6) & 0xff;
3918 	*num_queues = hweight32(*queue_mask);
3919 }
3920 
3921 static void macb_clks_disable(struct clk *pclk, struct clk *hclk, struct clk *tx_clk,
3922 			      struct clk *rx_clk, struct clk *tsu_clk)
3923 {
3924 	struct clk_bulk_data clks[] = {
3925 		{ .clk = tsu_clk, },
3926 		{ .clk = rx_clk, },
3927 		{ .clk = pclk, },
3928 		{ .clk = hclk, },
3929 		{ .clk = tx_clk },
3930 	};
3931 
3932 	clk_bulk_disable_unprepare(ARRAY_SIZE(clks), clks);
3933 }
3934 
3935 static int macb_clk_init(struct platform_device *pdev, struct clk **pclk,
3936 			 struct clk **hclk, struct clk **tx_clk,
3937 			 struct clk **rx_clk, struct clk **tsu_clk)
3938 {
3939 	struct macb_platform_data *pdata;
3940 	int err;
3941 
3942 	pdata = dev_get_platdata(&pdev->dev);
3943 	if (pdata) {
3944 		*pclk = pdata->pclk;
3945 		*hclk = pdata->hclk;
3946 	} else {
3947 		*pclk = devm_clk_get(&pdev->dev, "pclk");
3948 		*hclk = devm_clk_get(&pdev->dev, "hclk");
3949 	}
3950 
3951 	if (IS_ERR_OR_NULL(*pclk))
3952 		return dev_err_probe(&pdev->dev,
3953 				     IS_ERR(*pclk) ? PTR_ERR(*pclk) : -ENODEV,
3954 				     "failed to get pclk\n");
3955 
3956 	if (IS_ERR_OR_NULL(*hclk))
3957 		return dev_err_probe(&pdev->dev,
3958 				     IS_ERR(*hclk) ? PTR_ERR(*hclk) : -ENODEV,
3959 				     "failed to get hclk\n");
3960 
3961 	*tx_clk = devm_clk_get_optional(&pdev->dev, "tx_clk");
3962 	if (IS_ERR(*tx_clk))
3963 		return PTR_ERR(*tx_clk);
3964 
3965 	*rx_clk = devm_clk_get_optional(&pdev->dev, "rx_clk");
3966 	if (IS_ERR(*rx_clk))
3967 		return PTR_ERR(*rx_clk);
3968 
3969 	*tsu_clk = devm_clk_get_optional(&pdev->dev, "tsu_clk");
3970 	if (IS_ERR(*tsu_clk))
3971 		return PTR_ERR(*tsu_clk);
3972 
3973 	err = clk_prepare_enable(*pclk);
3974 	if (err) {
3975 		dev_err(&pdev->dev, "failed to enable pclk (%d)\n", err);
3976 		return err;
3977 	}
3978 
3979 	err = clk_prepare_enable(*hclk);
3980 	if (err) {
3981 		dev_err(&pdev->dev, "failed to enable hclk (%d)\n", err);
3982 		goto err_disable_pclk;
3983 	}
3984 
3985 	err = clk_prepare_enable(*tx_clk);
3986 	if (err) {
3987 		dev_err(&pdev->dev, "failed to enable tx_clk (%d)\n", err);
3988 		goto err_disable_hclk;
3989 	}
3990 
3991 	err = clk_prepare_enable(*rx_clk);
3992 	if (err) {
3993 		dev_err(&pdev->dev, "failed to enable rx_clk (%d)\n", err);
3994 		goto err_disable_txclk;
3995 	}
3996 
3997 	err = clk_prepare_enable(*tsu_clk);
3998 	if (err) {
3999 		dev_err(&pdev->dev, "failed to enable tsu_clk (%d)\n", err);
4000 		goto err_disable_rxclk;
4001 	}
4002 
4003 	return 0;
4004 
4005 err_disable_rxclk:
4006 	clk_disable_unprepare(*rx_clk);
4007 
4008 err_disable_txclk:
4009 	clk_disable_unprepare(*tx_clk);
4010 
4011 err_disable_hclk:
4012 	clk_disable_unprepare(*hclk);
4013 
4014 err_disable_pclk:
4015 	clk_disable_unprepare(*pclk);
4016 
4017 	return err;
4018 }
4019 
4020 static int macb_init(struct platform_device *pdev)
4021 {
4022 	struct net_device *dev = platform_get_drvdata(pdev);
4023 	unsigned int hw_q, q;
4024 	struct macb *bp = netdev_priv(dev);
4025 	struct macb_queue *queue;
4026 	int err;
4027 	u32 val, reg;
4028 
4029 	bp->tx_ring_size = DEFAULT_TX_RING_SIZE;
4030 	bp->rx_ring_size = DEFAULT_RX_RING_SIZE;
4031 
4032 	/* set the queue register mapping once for all: queue0 has a special
4033 	 * register mapping but we don't want to test the queue index then
4034 	 * compute the corresponding register offset at run time.
4035 	 */
4036 	for (hw_q = 0, q = 0; hw_q < MACB_MAX_QUEUES; ++hw_q) {
4037 		if (!(bp->queue_mask & (1 << hw_q)))
4038 			continue;
4039 
4040 		queue = &bp->queues[q];
4041 		queue->bp = bp;
4042 		spin_lock_init(&queue->tx_ptr_lock);
4043 		netif_napi_add(dev, &queue->napi_rx, macb_rx_poll);
4044 		netif_napi_add(dev, &queue->napi_tx, macb_tx_poll);
4045 		if (hw_q) {
4046 			queue->ISR  = GEM_ISR(hw_q - 1);
4047 			queue->IER  = GEM_IER(hw_q - 1);
4048 			queue->IDR  = GEM_IDR(hw_q - 1);
4049 			queue->IMR  = GEM_IMR(hw_q - 1);
4050 			queue->TBQP = GEM_TBQP(hw_q - 1);
4051 			queue->RBQP = GEM_RBQP(hw_q - 1);
4052 			queue->RBQS = GEM_RBQS(hw_q - 1);
4053 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
4054 			if (bp->hw_dma_cap & HW_DMA_CAP_64B) {
4055 				queue->TBQPH = GEM_TBQPH(hw_q - 1);
4056 				queue->RBQPH = GEM_RBQPH(hw_q - 1);
4057 			}
4058 #endif
4059 		} else {
4060 			/* queue0 uses legacy registers */
4061 			queue->ISR  = MACB_ISR;
4062 			queue->IER  = MACB_IER;
4063 			queue->IDR  = MACB_IDR;
4064 			queue->IMR  = MACB_IMR;
4065 			queue->TBQP = MACB_TBQP;
4066 			queue->RBQP = MACB_RBQP;
4067 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
4068 			if (bp->hw_dma_cap & HW_DMA_CAP_64B) {
4069 				queue->TBQPH = MACB_TBQPH;
4070 				queue->RBQPH = MACB_RBQPH;
4071 			}
4072 #endif
4073 		}
4074 
4075 		/* get irq: here we use the linux queue index, not the hardware
4076 		 * queue index. the queue irq definitions in the device tree
4077 		 * must remove the optional gaps that could exist in the
4078 		 * hardware queue mask.
4079 		 */
4080 		queue->irq = platform_get_irq(pdev, q);
4081 		err = devm_request_irq(&pdev->dev, queue->irq, macb_interrupt,
4082 				       IRQF_SHARED, dev->name, queue);
4083 		if (err) {
4084 			dev_err(&pdev->dev,
4085 				"Unable to request IRQ %d (error %d)\n",
4086 				queue->irq, err);
4087 			return err;
4088 		}
4089 
4090 		INIT_WORK(&queue->tx_error_task, macb_tx_error_task);
4091 		q++;
4092 	}
4093 
4094 	dev->netdev_ops = &macb_netdev_ops;
4095 
4096 	/* setup appropriated routines according to adapter type */
4097 	if (macb_is_gem(bp)) {
4098 		bp->max_tx_length = GEM_MAX_TX_LEN;
4099 		bp->macbgem_ops.mog_alloc_rx_buffers = gem_alloc_rx_buffers;
4100 		bp->macbgem_ops.mog_free_rx_buffers = gem_free_rx_buffers;
4101 		bp->macbgem_ops.mog_init_rings = gem_init_rings;
4102 		bp->macbgem_ops.mog_rx = gem_rx;
4103 		dev->ethtool_ops = &gem_ethtool_ops;
4104 	} else {
4105 		bp->max_tx_length = MACB_MAX_TX_LEN;
4106 		bp->macbgem_ops.mog_alloc_rx_buffers = macb_alloc_rx_buffers;
4107 		bp->macbgem_ops.mog_free_rx_buffers = macb_free_rx_buffers;
4108 		bp->macbgem_ops.mog_init_rings = macb_init_rings;
4109 		bp->macbgem_ops.mog_rx = macb_rx;
4110 		dev->ethtool_ops = &macb_ethtool_ops;
4111 	}
4112 
4113 	dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
4114 
4115 	/* Set features */
4116 	dev->hw_features = NETIF_F_SG;
4117 
4118 	/* Check LSO capability */
4119 	if (GEM_BFEXT(PBUF_LSO, gem_readl(bp, DCFG6)))
4120 		dev->hw_features |= MACB_NETIF_LSO;
4121 
4122 	/* Checksum offload is only available on gem with packet buffer */
4123 	if (macb_is_gem(bp) && !(bp->caps & MACB_CAPS_FIFO_MODE))
4124 		dev->hw_features |= NETIF_F_HW_CSUM | NETIF_F_RXCSUM;
4125 	if (bp->caps & MACB_CAPS_SG_DISABLED)
4126 		dev->hw_features &= ~NETIF_F_SG;
4127 	dev->features = dev->hw_features;
4128 
4129 	/* Check RX Flow Filters support.
4130 	 * Max Rx flows set by availability of screeners & compare regs:
4131 	 * each 4-tuple define requires 1 T2 screener reg + 3 compare regs
4132 	 */
4133 	reg = gem_readl(bp, DCFG8);
4134 	bp->max_tuples = min((GEM_BFEXT(SCR2CMP, reg) / 3),
4135 			GEM_BFEXT(T2SCR, reg));
4136 	INIT_LIST_HEAD(&bp->rx_fs_list.list);
4137 	if (bp->max_tuples > 0) {
4138 		/* also needs one ethtype match to check IPv4 */
4139 		if (GEM_BFEXT(SCR2ETH, reg) > 0) {
4140 			/* program this reg now */
4141 			reg = 0;
4142 			reg = GEM_BFINS(ETHTCMP, (uint16_t)ETH_P_IP, reg);
4143 			gem_writel_n(bp, ETHT, SCRT2_ETHT, reg);
4144 			/* Filtering is supported in hw but don't enable it in kernel now */
4145 			dev->hw_features |= NETIF_F_NTUPLE;
4146 			/* init Rx flow definitions */
4147 			bp->rx_fs_list.count = 0;
4148 			spin_lock_init(&bp->rx_fs_lock);
4149 		} else
4150 			bp->max_tuples = 0;
4151 	}
4152 
4153 	if (!(bp->caps & MACB_CAPS_USRIO_DISABLED)) {
4154 		val = 0;
4155 		if (phy_interface_mode_is_rgmii(bp->phy_interface))
4156 			val = bp->usrio->rgmii;
4157 		else if (bp->phy_interface == PHY_INTERFACE_MODE_RMII &&
4158 			 (bp->caps & MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII))
4159 			val = bp->usrio->rmii;
4160 		else if (!(bp->caps & MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII))
4161 			val = bp->usrio->mii;
4162 
4163 		if (bp->caps & MACB_CAPS_USRIO_HAS_CLKEN)
4164 			val |= bp->usrio->refclk;
4165 
4166 		macb_or_gem_writel(bp, USRIO, val);
4167 	}
4168 
4169 	/* Set MII management clock divider */
4170 	val = macb_mdc_clk_div(bp);
4171 	val |= macb_dbw(bp);
4172 	if (bp->phy_interface == PHY_INTERFACE_MODE_SGMII)
4173 		val |= GEM_BIT(SGMIIEN) | GEM_BIT(PCSSEL);
4174 	macb_writel(bp, NCFGR, val);
4175 
4176 	return 0;
4177 }
4178 
4179 static const struct macb_usrio_config macb_default_usrio = {
4180 	.mii = MACB_BIT(MII),
4181 	.rmii = MACB_BIT(RMII),
4182 	.rgmii = GEM_BIT(RGMII),
4183 	.refclk = MACB_BIT(CLKEN),
4184 };
4185 
4186 #if defined(CONFIG_OF)
4187 /* 1518 rounded up */
4188 #define AT91ETHER_MAX_RBUFF_SZ	0x600
4189 /* max number of receive buffers */
4190 #define AT91ETHER_MAX_RX_DESCR	9
4191 
4192 static struct sifive_fu540_macb_mgmt *mgmt;
4193 
4194 static int at91ether_alloc_coherent(struct macb *lp)
4195 {
4196 	struct macb_queue *q = &lp->queues[0];
4197 
4198 	q->rx_ring = dma_alloc_coherent(&lp->pdev->dev,
4199 					 (AT91ETHER_MAX_RX_DESCR *
4200 					  macb_dma_desc_get_size(lp)),
4201 					 &q->rx_ring_dma, GFP_KERNEL);
4202 	if (!q->rx_ring)
4203 		return -ENOMEM;
4204 
4205 	q->rx_buffers = dma_alloc_coherent(&lp->pdev->dev,
4206 					    AT91ETHER_MAX_RX_DESCR *
4207 					    AT91ETHER_MAX_RBUFF_SZ,
4208 					    &q->rx_buffers_dma, GFP_KERNEL);
4209 	if (!q->rx_buffers) {
4210 		dma_free_coherent(&lp->pdev->dev,
4211 				  AT91ETHER_MAX_RX_DESCR *
4212 				  macb_dma_desc_get_size(lp),
4213 				  q->rx_ring, q->rx_ring_dma);
4214 		q->rx_ring = NULL;
4215 		return -ENOMEM;
4216 	}
4217 
4218 	return 0;
4219 }
4220 
4221 static void at91ether_free_coherent(struct macb *lp)
4222 {
4223 	struct macb_queue *q = &lp->queues[0];
4224 
4225 	if (q->rx_ring) {
4226 		dma_free_coherent(&lp->pdev->dev,
4227 				  AT91ETHER_MAX_RX_DESCR *
4228 				  macb_dma_desc_get_size(lp),
4229 				  q->rx_ring, q->rx_ring_dma);
4230 		q->rx_ring = NULL;
4231 	}
4232 
4233 	if (q->rx_buffers) {
4234 		dma_free_coherent(&lp->pdev->dev,
4235 				  AT91ETHER_MAX_RX_DESCR *
4236 				  AT91ETHER_MAX_RBUFF_SZ,
4237 				  q->rx_buffers, q->rx_buffers_dma);
4238 		q->rx_buffers = NULL;
4239 	}
4240 }
4241 
4242 /* Initialize and start the Receiver and Transmit subsystems */
4243 static int at91ether_start(struct macb *lp)
4244 {
4245 	struct macb_queue *q = &lp->queues[0];
4246 	struct macb_dma_desc *desc;
4247 	dma_addr_t addr;
4248 	u32 ctl;
4249 	int i, ret;
4250 
4251 	ret = at91ether_alloc_coherent(lp);
4252 	if (ret)
4253 		return ret;
4254 
4255 	addr = q->rx_buffers_dma;
4256 	for (i = 0; i < AT91ETHER_MAX_RX_DESCR; i++) {
4257 		desc = macb_rx_desc(q, i);
4258 		macb_set_addr(lp, desc, addr);
4259 		desc->ctrl = 0;
4260 		addr += AT91ETHER_MAX_RBUFF_SZ;
4261 	}
4262 
4263 	/* Set the Wrap bit on the last descriptor */
4264 	desc->addr |= MACB_BIT(RX_WRAP);
4265 
4266 	/* Reset buffer index */
4267 	q->rx_tail = 0;
4268 
4269 	/* Program address of descriptor list in Rx Buffer Queue register */
4270 	macb_writel(lp, RBQP, q->rx_ring_dma);
4271 
4272 	/* Enable Receive and Transmit */
4273 	ctl = macb_readl(lp, NCR);
4274 	macb_writel(lp, NCR, ctl | MACB_BIT(RE) | MACB_BIT(TE));
4275 
4276 	/* Enable MAC interrupts */
4277 	macb_writel(lp, IER, MACB_BIT(RCOMP)	|
4278 			     MACB_BIT(RXUBR)	|
4279 			     MACB_BIT(ISR_TUND)	|
4280 			     MACB_BIT(ISR_RLE)	|
4281 			     MACB_BIT(TCOMP)	|
4282 			     MACB_BIT(ISR_ROVR)	|
4283 			     MACB_BIT(HRESP));
4284 
4285 	return 0;
4286 }
4287 
4288 static void at91ether_stop(struct macb *lp)
4289 {
4290 	u32 ctl;
4291 
4292 	/* Disable MAC interrupts */
4293 	macb_writel(lp, IDR, MACB_BIT(RCOMP)	|
4294 			     MACB_BIT(RXUBR)	|
4295 			     MACB_BIT(ISR_TUND)	|
4296 			     MACB_BIT(ISR_RLE)	|
4297 			     MACB_BIT(TCOMP)	|
4298 			     MACB_BIT(ISR_ROVR) |
4299 			     MACB_BIT(HRESP));
4300 
4301 	/* Disable Receiver and Transmitter */
4302 	ctl = macb_readl(lp, NCR);
4303 	macb_writel(lp, NCR, ctl & ~(MACB_BIT(TE) | MACB_BIT(RE)));
4304 
4305 	/* Free resources. */
4306 	at91ether_free_coherent(lp);
4307 }
4308 
4309 /* Open the ethernet interface */
4310 static int at91ether_open(struct net_device *dev)
4311 {
4312 	struct macb *lp = netdev_priv(dev);
4313 	u32 ctl;
4314 	int ret;
4315 
4316 	ret = pm_runtime_resume_and_get(&lp->pdev->dev);
4317 	if (ret < 0)
4318 		return ret;
4319 
4320 	/* Clear internal statistics */
4321 	ctl = macb_readl(lp, NCR);
4322 	macb_writel(lp, NCR, ctl | MACB_BIT(CLRSTAT));
4323 
4324 	macb_set_hwaddr(lp);
4325 
4326 	ret = at91ether_start(lp);
4327 	if (ret)
4328 		goto pm_exit;
4329 
4330 	ret = macb_phylink_connect(lp);
4331 	if (ret)
4332 		goto stop;
4333 
4334 	netif_start_queue(dev);
4335 
4336 	return 0;
4337 
4338 stop:
4339 	at91ether_stop(lp);
4340 pm_exit:
4341 	pm_runtime_put_sync(&lp->pdev->dev);
4342 	return ret;
4343 }
4344 
4345 /* Close the interface */
4346 static int at91ether_close(struct net_device *dev)
4347 {
4348 	struct macb *lp = netdev_priv(dev);
4349 
4350 	netif_stop_queue(dev);
4351 
4352 	phylink_stop(lp->phylink);
4353 	phylink_disconnect_phy(lp->phylink);
4354 
4355 	at91ether_stop(lp);
4356 
4357 	return pm_runtime_put(&lp->pdev->dev);
4358 }
4359 
4360 /* Transmit packet */
4361 static netdev_tx_t at91ether_start_xmit(struct sk_buff *skb,
4362 					struct net_device *dev)
4363 {
4364 	struct macb *lp = netdev_priv(dev);
4365 
4366 	if (macb_readl(lp, TSR) & MACB_BIT(RM9200_BNQ)) {
4367 		int desc = 0;
4368 
4369 		netif_stop_queue(dev);
4370 
4371 		/* Store packet information (to free when Tx completed) */
4372 		lp->rm9200_txq[desc].skb = skb;
4373 		lp->rm9200_txq[desc].size = skb->len;
4374 		lp->rm9200_txq[desc].mapping = dma_map_single(&lp->pdev->dev, skb->data,
4375 							      skb->len, DMA_TO_DEVICE);
4376 		if (dma_mapping_error(&lp->pdev->dev, lp->rm9200_txq[desc].mapping)) {
4377 			dev_kfree_skb_any(skb);
4378 			dev->stats.tx_dropped++;
4379 			netdev_err(dev, "%s: DMA mapping error\n", __func__);
4380 			return NETDEV_TX_OK;
4381 		}
4382 
4383 		/* Set address of the data in the Transmit Address register */
4384 		macb_writel(lp, TAR, lp->rm9200_txq[desc].mapping);
4385 		/* Set length of the packet in the Transmit Control register */
4386 		macb_writel(lp, TCR, skb->len);
4387 
4388 	} else {
4389 		netdev_err(dev, "%s called, but device is busy!\n", __func__);
4390 		return NETDEV_TX_BUSY;
4391 	}
4392 
4393 	return NETDEV_TX_OK;
4394 }
4395 
4396 /* Extract received frame from buffer descriptors and sent to upper layers.
4397  * (Called from interrupt context)
4398  */
4399 static void at91ether_rx(struct net_device *dev)
4400 {
4401 	struct macb *lp = netdev_priv(dev);
4402 	struct macb_queue *q = &lp->queues[0];
4403 	struct macb_dma_desc *desc;
4404 	unsigned char *p_recv;
4405 	struct sk_buff *skb;
4406 	unsigned int pktlen;
4407 
4408 	desc = macb_rx_desc(q, q->rx_tail);
4409 	while (desc->addr & MACB_BIT(RX_USED)) {
4410 		p_recv = q->rx_buffers + q->rx_tail * AT91ETHER_MAX_RBUFF_SZ;
4411 		pktlen = MACB_BF(RX_FRMLEN, desc->ctrl);
4412 		skb = netdev_alloc_skb(dev, pktlen + 2);
4413 		if (skb) {
4414 			skb_reserve(skb, 2);
4415 			skb_put_data(skb, p_recv, pktlen);
4416 
4417 			skb->protocol = eth_type_trans(skb, dev);
4418 			dev->stats.rx_packets++;
4419 			dev->stats.rx_bytes += pktlen;
4420 			netif_rx(skb);
4421 		} else {
4422 			dev->stats.rx_dropped++;
4423 		}
4424 
4425 		if (desc->ctrl & MACB_BIT(RX_MHASH_MATCH))
4426 			dev->stats.multicast++;
4427 
4428 		/* reset ownership bit */
4429 		desc->addr &= ~MACB_BIT(RX_USED);
4430 
4431 		/* wrap after last buffer */
4432 		if (q->rx_tail == AT91ETHER_MAX_RX_DESCR - 1)
4433 			q->rx_tail = 0;
4434 		else
4435 			q->rx_tail++;
4436 
4437 		desc = macb_rx_desc(q, q->rx_tail);
4438 	}
4439 }
4440 
4441 /* MAC interrupt handler */
4442 static irqreturn_t at91ether_interrupt(int irq, void *dev_id)
4443 {
4444 	struct net_device *dev = dev_id;
4445 	struct macb *lp = netdev_priv(dev);
4446 	u32 intstatus, ctl;
4447 	unsigned int desc;
4448 
4449 	/* MAC Interrupt Status register indicates what interrupts are pending.
4450 	 * It is automatically cleared once read.
4451 	 */
4452 	intstatus = macb_readl(lp, ISR);
4453 
4454 	/* Receive complete */
4455 	if (intstatus & MACB_BIT(RCOMP))
4456 		at91ether_rx(dev);
4457 
4458 	/* Transmit complete */
4459 	if (intstatus & MACB_BIT(TCOMP)) {
4460 		/* The TCOM bit is set even if the transmission failed */
4461 		if (intstatus & (MACB_BIT(ISR_TUND) | MACB_BIT(ISR_RLE)))
4462 			dev->stats.tx_errors++;
4463 
4464 		desc = 0;
4465 		if (lp->rm9200_txq[desc].skb) {
4466 			dev_consume_skb_irq(lp->rm9200_txq[desc].skb);
4467 			lp->rm9200_txq[desc].skb = NULL;
4468 			dma_unmap_single(&lp->pdev->dev, lp->rm9200_txq[desc].mapping,
4469 					 lp->rm9200_txq[desc].size, DMA_TO_DEVICE);
4470 			dev->stats.tx_packets++;
4471 			dev->stats.tx_bytes += lp->rm9200_txq[desc].size;
4472 		}
4473 		netif_wake_queue(dev);
4474 	}
4475 
4476 	/* Work-around for EMAC Errata section 41.3.1 */
4477 	if (intstatus & MACB_BIT(RXUBR)) {
4478 		ctl = macb_readl(lp, NCR);
4479 		macb_writel(lp, NCR, ctl & ~MACB_BIT(RE));
4480 		wmb();
4481 		macb_writel(lp, NCR, ctl | MACB_BIT(RE));
4482 	}
4483 
4484 	if (intstatus & MACB_BIT(ISR_ROVR))
4485 		netdev_err(dev, "ROVR error\n");
4486 
4487 	return IRQ_HANDLED;
4488 }
4489 
4490 #ifdef CONFIG_NET_POLL_CONTROLLER
4491 static void at91ether_poll_controller(struct net_device *dev)
4492 {
4493 	unsigned long flags;
4494 
4495 	local_irq_save(flags);
4496 	at91ether_interrupt(dev->irq, dev);
4497 	local_irq_restore(flags);
4498 }
4499 #endif
4500 
4501 static const struct net_device_ops at91ether_netdev_ops = {
4502 	.ndo_open		= at91ether_open,
4503 	.ndo_stop		= at91ether_close,
4504 	.ndo_start_xmit		= at91ether_start_xmit,
4505 	.ndo_get_stats		= macb_get_stats,
4506 	.ndo_set_rx_mode	= macb_set_rx_mode,
4507 	.ndo_set_mac_address	= eth_mac_addr,
4508 	.ndo_eth_ioctl		= macb_ioctl,
4509 	.ndo_validate_addr	= eth_validate_addr,
4510 #ifdef CONFIG_NET_POLL_CONTROLLER
4511 	.ndo_poll_controller	= at91ether_poll_controller,
4512 #endif
4513 };
4514 
4515 static int at91ether_clk_init(struct platform_device *pdev, struct clk **pclk,
4516 			      struct clk **hclk, struct clk **tx_clk,
4517 			      struct clk **rx_clk, struct clk **tsu_clk)
4518 {
4519 	int err;
4520 
4521 	*hclk = NULL;
4522 	*tx_clk = NULL;
4523 	*rx_clk = NULL;
4524 	*tsu_clk = NULL;
4525 
4526 	*pclk = devm_clk_get(&pdev->dev, "ether_clk");
4527 	if (IS_ERR(*pclk))
4528 		return PTR_ERR(*pclk);
4529 
4530 	err = clk_prepare_enable(*pclk);
4531 	if (err) {
4532 		dev_err(&pdev->dev, "failed to enable pclk (%d)\n", err);
4533 		return err;
4534 	}
4535 
4536 	return 0;
4537 }
4538 
4539 static int at91ether_init(struct platform_device *pdev)
4540 {
4541 	struct net_device *dev = platform_get_drvdata(pdev);
4542 	struct macb *bp = netdev_priv(dev);
4543 	int err;
4544 
4545 	bp->queues[0].bp = bp;
4546 
4547 	dev->netdev_ops = &at91ether_netdev_ops;
4548 	dev->ethtool_ops = &macb_ethtool_ops;
4549 
4550 	err = devm_request_irq(&pdev->dev, dev->irq, at91ether_interrupt,
4551 			       0, dev->name, dev);
4552 	if (err)
4553 		return err;
4554 
4555 	macb_writel(bp, NCR, 0);
4556 
4557 	macb_writel(bp, NCFGR, MACB_BF(CLK, MACB_CLK_DIV32) | MACB_BIT(BIG));
4558 
4559 	return 0;
4560 }
4561 
4562 static unsigned long fu540_macb_tx_recalc_rate(struct clk_hw *hw,
4563 					       unsigned long parent_rate)
4564 {
4565 	return mgmt->rate;
4566 }
4567 
4568 static long fu540_macb_tx_round_rate(struct clk_hw *hw, unsigned long rate,
4569 				     unsigned long *parent_rate)
4570 {
4571 	if (WARN_ON(rate < 2500000))
4572 		return 2500000;
4573 	else if (rate == 2500000)
4574 		return 2500000;
4575 	else if (WARN_ON(rate < 13750000))
4576 		return 2500000;
4577 	else if (WARN_ON(rate < 25000000))
4578 		return 25000000;
4579 	else if (rate == 25000000)
4580 		return 25000000;
4581 	else if (WARN_ON(rate < 75000000))
4582 		return 25000000;
4583 	else if (WARN_ON(rate < 125000000))
4584 		return 125000000;
4585 	else if (rate == 125000000)
4586 		return 125000000;
4587 
4588 	WARN_ON(rate > 125000000);
4589 
4590 	return 125000000;
4591 }
4592 
4593 static int fu540_macb_tx_set_rate(struct clk_hw *hw, unsigned long rate,
4594 				  unsigned long parent_rate)
4595 {
4596 	rate = fu540_macb_tx_round_rate(hw, rate, &parent_rate);
4597 	if (rate != 125000000)
4598 		iowrite32(1, mgmt->reg);
4599 	else
4600 		iowrite32(0, mgmt->reg);
4601 	mgmt->rate = rate;
4602 
4603 	return 0;
4604 }
4605 
4606 static const struct clk_ops fu540_c000_ops = {
4607 	.recalc_rate = fu540_macb_tx_recalc_rate,
4608 	.round_rate = fu540_macb_tx_round_rate,
4609 	.set_rate = fu540_macb_tx_set_rate,
4610 };
4611 
4612 static int fu540_c000_clk_init(struct platform_device *pdev, struct clk **pclk,
4613 			       struct clk **hclk, struct clk **tx_clk,
4614 			       struct clk **rx_clk, struct clk **tsu_clk)
4615 {
4616 	struct clk_init_data init;
4617 	int err = 0;
4618 
4619 	err = macb_clk_init(pdev, pclk, hclk, tx_clk, rx_clk, tsu_clk);
4620 	if (err)
4621 		return err;
4622 
4623 	mgmt = devm_kzalloc(&pdev->dev, sizeof(*mgmt), GFP_KERNEL);
4624 	if (!mgmt) {
4625 		err = -ENOMEM;
4626 		goto err_disable_clks;
4627 	}
4628 
4629 	init.name = "sifive-gemgxl-mgmt";
4630 	init.ops = &fu540_c000_ops;
4631 	init.flags = 0;
4632 	init.num_parents = 0;
4633 
4634 	mgmt->rate = 0;
4635 	mgmt->hw.init = &init;
4636 
4637 	*tx_clk = devm_clk_register(&pdev->dev, &mgmt->hw);
4638 	if (IS_ERR(*tx_clk)) {
4639 		err = PTR_ERR(*tx_clk);
4640 		goto err_disable_clks;
4641 	}
4642 
4643 	err = clk_prepare_enable(*tx_clk);
4644 	if (err) {
4645 		dev_err(&pdev->dev, "failed to enable tx_clk (%u)\n", err);
4646 		*tx_clk = NULL;
4647 		goto err_disable_clks;
4648 	} else {
4649 		dev_info(&pdev->dev, "Registered clk switch '%s'\n", init.name);
4650 	}
4651 
4652 	return 0;
4653 
4654 err_disable_clks:
4655 	macb_clks_disable(*pclk, *hclk, *tx_clk, *rx_clk, *tsu_clk);
4656 
4657 	return err;
4658 }
4659 
4660 static int fu540_c000_init(struct platform_device *pdev)
4661 {
4662 	mgmt->reg = devm_platform_ioremap_resource(pdev, 1);
4663 	if (IS_ERR(mgmt->reg))
4664 		return PTR_ERR(mgmt->reg);
4665 
4666 	return macb_init(pdev);
4667 }
4668 
4669 static int init_reset_optional(struct platform_device *pdev)
4670 {
4671 	struct net_device *dev = platform_get_drvdata(pdev);
4672 	struct macb *bp = netdev_priv(dev);
4673 	int ret;
4674 
4675 	if (bp->phy_interface == PHY_INTERFACE_MODE_SGMII) {
4676 		/* Ensure PHY device used in SGMII mode is ready */
4677 		bp->sgmii_phy = devm_phy_optional_get(&pdev->dev, NULL);
4678 
4679 		if (IS_ERR(bp->sgmii_phy))
4680 			return dev_err_probe(&pdev->dev, PTR_ERR(bp->sgmii_phy),
4681 					     "failed to get SGMII PHY\n");
4682 
4683 		ret = phy_init(bp->sgmii_phy);
4684 		if (ret)
4685 			return dev_err_probe(&pdev->dev, ret,
4686 					     "failed to init SGMII PHY\n");
4687 	}
4688 
4689 	ret = zynqmp_pm_is_function_supported(PM_IOCTL, IOCTL_SET_GEM_CONFIG);
4690 	if (!ret) {
4691 		u32 pm_info[2];
4692 
4693 		ret = of_property_read_u32_array(pdev->dev.of_node, "power-domains",
4694 						 pm_info, ARRAY_SIZE(pm_info));
4695 		if (ret) {
4696 			dev_err(&pdev->dev, "Failed to read power management information\n");
4697 			goto err_out_phy_exit;
4698 		}
4699 		ret = zynqmp_pm_set_gem_config(pm_info[1], GEM_CONFIG_FIXED, 0);
4700 		if (ret)
4701 			goto err_out_phy_exit;
4702 
4703 		ret = zynqmp_pm_set_gem_config(pm_info[1], GEM_CONFIG_SGMII_MODE, 1);
4704 		if (ret)
4705 			goto err_out_phy_exit;
4706 	}
4707 
4708 	/* Fully reset controller at hardware level if mapped in device tree */
4709 	ret = device_reset_optional(&pdev->dev);
4710 	if (ret) {
4711 		phy_exit(bp->sgmii_phy);
4712 		return dev_err_probe(&pdev->dev, ret, "failed to reset controller");
4713 	}
4714 
4715 	ret = macb_init(pdev);
4716 
4717 err_out_phy_exit:
4718 	if (ret)
4719 		phy_exit(bp->sgmii_phy);
4720 
4721 	return ret;
4722 }
4723 
4724 static const struct macb_usrio_config sama7g5_usrio = {
4725 	.mii = 0,
4726 	.rmii = 1,
4727 	.rgmii = 2,
4728 	.refclk = BIT(2),
4729 	.hdfctlen = BIT(6),
4730 };
4731 
4732 static const struct macb_config fu540_c000_config = {
4733 	.caps = MACB_CAPS_GIGABIT_MODE_AVAILABLE | MACB_CAPS_JUMBO |
4734 		MACB_CAPS_GEM_HAS_PTP,
4735 	.dma_burst_length = 16,
4736 	.clk_init = fu540_c000_clk_init,
4737 	.init = fu540_c000_init,
4738 	.jumbo_max_len = 10240,
4739 	.usrio = &macb_default_usrio,
4740 };
4741 
4742 static const struct macb_config at91sam9260_config = {
4743 	.caps = MACB_CAPS_USRIO_HAS_CLKEN | MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII,
4744 	.clk_init = macb_clk_init,
4745 	.init = macb_init,
4746 	.usrio = &macb_default_usrio,
4747 };
4748 
4749 static const struct macb_config sama5d3macb_config = {
4750 	.caps = MACB_CAPS_SG_DISABLED |
4751 		MACB_CAPS_USRIO_HAS_CLKEN | MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII,
4752 	.clk_init = macb_clk_init,
4753 	.init = macb_init,
4754 	.usrio = &macb_default_usrio,
4755 };
4756 
4757 static const struct macb_config pc302gem_config = {
4758 	.caps = MACB_CAPS_SG_DISABLED | MACB_CAPS_GIGABIT_MODE_AVAILABLE,
4759 	.dma_burst_length = 16,
4760 	.clk_init = macb_clk_init,
4761 	.init = macb_init,
4762 	.usrio = &macb_default_usrio,
4763 };
4764 
4765 static const struct macb_config sama5d2_config = {
4766 	.caps = MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII,
4767 	.dma_burst_length = 16,
4768 	.clk_init = macb_clk_init,
4769 	.init = macb_init,
4770 	.usrio = &macb_default_usrio,
4771 };
4772 
4773 static const struct macb_config sama5d29_config = {
4774 	.caps = MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII | MACB_CAPS_GEM_HAS_PTP,
4775 	.dma_burst_length = 16,
4776 	.clk_init = macb_clk_init,
4777 	.init = macb_init,
4778 	.usrio = &macb_default_usrio,
4779 };
4780 
4781 static const struct macb_config sama5d3_config = {
4782 	.caps = MACB_CAPS_SG_DISABLED | MACB_CAPS_GIGABIT_MODE_AVAILABLE |
4783 		MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII | MACB_CAPS_JUMBO,
4784 	.dma_burst_length = 16,
4785 	.clk_init = macb_clk_init,
4786 	.init = macb_init,
4787 	.jumbo_max_len = 10240,
4788 	.usrio = &macb_default_usrio,
4789 };
4790 
4791 static const struct macb_config sama5d4_config = {
4792 	.caps = MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII,
4793 	.dma_burst_length = 4,
4794 	.clk_init = macb_clk_init,
4795 	.init = macb_init,
4796 	.usrio = &macb_default_usrio,
4797 };
4798 
4799 static const struct macb_config emac_config = {
4800 	.caps = MACB_CAPS_NEEDS_RSTONUBR | MACB_CAPS_MACB_IS_EMAC,
4801 	.clk_init = at91ether_clk_init,
4802 	.init = at91ether_init,
4803 	.usrio = &macb_default_usrio,
4804 };
4805 
4806 static const struct macb_config np4_config = {
4807 	.caps = MACB_CAPS_USRIO_DISABLED,
4808 	.clk_init = macb_clk_init,
4809 	.init = macb_init,
4810 	.usrio = &macb_default_usrio,
4811 };
4812 
4813 static const struct macb_config zynqmp_config = {
4814 	.caps = MACB_CAPS_GIGABIT_MODE_AVAILABLE |
4815 		MACB_CAPS_JUMBO |
4816 		MACB_CAPS_GEM_HAS_PTP | MACB_CAPS_BD_RD_PREFETCH,
4817 	.dma_burst_length = 16,
4818 	.clk_init = macb_clk_init,
4819 	.init = init_reset_optional,
4820 	.jumbo_max_len = 10240,
4821 	.usrio = &macb_default_usrio,
4822 };
4823 
4824 static const struct macb_config zynq_config = {
4825 	.caps = MACB_CAPS_GIGABIT_MODE_AVAILABLE | MACB_CAPS_NO_GIGABIT_HALF |
4826 		MACB_CAPS_NEEDS_RSTONUBR,
4827 	.dma_burst_length = 16,
4828 	.clk_init = macb_clk_init,
4829 	.init = macb_init,
4830 	.usrio = &macb_default_usrio,
4831 };
4832 
4833 static const struct macb_config mpfs_config = {
4834 	.caps = MACB_CAPS_GIGABIT_MODE_AVAILABLE |
4835 		MACB_CAPS_JUMBO |
4836 		MACB_CAPS_GEM_HAS_PTP,
4837 	.dma_burst_length = 16,
4838 	.clk_init = macb_clk_init,
4839 	.init = init_reset_optional,
4840 	.usrio = &macb_default_usrio,
4841 	.jumbo_max_len = 10240,
4842 };
4843 
4844 static const struct macb_config sama7g5_gem_config = {
4845 	.caps = MACB_CAPS_GIGABIT_MODE_AVAILABLE | MACB_CAPS_CLK_HW_CHG |
4846 		MACB_CAPS_MIIONRGMII,
4847 	.dma_burst_length = 16,
4848 	.clk_init = macb_clk_init,
4849 	.init = macb_init,
4850 	.usrio = &sama7g5_usrio,
4851 };
4852 
4853 static const struct macb_config sama7g5_emac_config = {
4854 	.caps = MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII |
4855 		MACB_CAPS_USRIO_HAS_CLKEN | MACB_CAPS_MIIONRGMII,
4856 	.dma_burst_length = 16,
4857 	.clk_init = macb_clk_init,
4858 	.init = macb_init,
4859 	.usrio = &sama7g5_usrio,
4860 };
4861 
4862 static const struct macb_config versal_config = {
4863 	.caps = MACB_CAPS_GIGABIT_MODE_AVAILABLE | MACB_CAPS_JUMBO |
4864 		MACB_CAPS_GEM_HAS_PTP | MACB_CAPS_BD_RD_PREFETCH | MACB_CAPS_NEED_TSUCLK,
4865 	.dma_burst_length = 16,
4866 	.clk_init = macb_clk_init,
4867 	.init = init_reset_optional,
4868 	.jumbo_max_len = 10240,
4869 	.usrio = &macb_default_usrio,
4870 };
4871 
4872 static const struct of_device_id macb_dt_ids[] = {
4873 	{ .compatible = "cdns,at91sam9260-macb", .data = &at91sam9260_config },
4874 	{ .compatible = "cdns,macb" },
4875 	{ .compatible = "cdns,np4-macb", .data = &np4_config },
4876 	{ .compatible = "cdns,pc302-gem", .data = &pc302gem_config },
4877 	{ .compatible = "cdns,gem", .data = &pc302gem_config },
4878 	{ .compatible = "cdns,sam9x60-macb", .data = &at91sam9260_config },
4879 	{ .compatible = "atmel,sama5d2-gem", .data = &sama5d2_config },
4880 	{ .compatible = "atmel,sama5d29-gem", .data = &sama5d29_config },
4881 	{ .compatible = "atmel,sama5d3-gem", .data = &sama5d3_config },
4882 	{ .compatible = "atmel,sama5d3-macb", .data = &sama5d3macb_config },
4883 	{ .compatible = "atmel,sama5d4-gem", .data = &sama5d4_config },
4884 	{ .compatible = "cdns,at91rm9200-emac", .data = &emac_config },
4885 	{ .compatible = "cdns,emac", .data = &emac_config },
4886 	{ .compatible = "cdns,zynqmp-gem", .data = &zynqmp_config}, /* deprecated */
4887 	{ .compatible = "cdns,zynq-gem", .data = &zynq_config }, /* deprecated */
4888 	{ .compatible = "sifive,fu540-c000-gem", .data = &fu540_c000_config },
4889 	{ .compatible = "microchip,mpfs-macb", .data = &mpfs_config },
4890 	{ .compatible = "microchip,sama7g5-gem", .data = &sama7g5_gem_config },
4891 	{ .compatible = "microchip,sama7g5-emac", .data = &sama7g5_emac_config },
4892 	{ .compatible = "xlnx,zynqmp-gem", .data = &zynqmp_config},
4893 	{ .compatible = "xlnx,zynq-gem", .data = &zynq_config },
4894 	{ .compatible = "xlnx,versal-gem", .data = &versal_config},
4895 	{ /* sentinel */ }
4896 };
4897 MODULE_DEVICE_TABLE(of, macb_dt_ids);
4898 #endif /* CONFIG_OF */
4899 
4900 static const struct macb_config default_gem_config = {
4901 	.caps = MACB_CAPS_GIGABIT_MODE_AVAILABLE |
4902 		MACB_CAPS_JUMBO |
4903 		MACB_CAPS_GEM_HAS_PTP,
4904 	.dma_burst_length = 16,
4905 	.clk_init = macb_clk_init,
4906 	.init = macb_init,
4907 	.usrio = &macb_default_usrio,
4908 	.jumbo_max_len = 10240,
4909 };
4910 
4911 static int macb_probe(struct platform_device *pdev)
4912 {
4913 	const struct macb_config *macb_config = &default_gem_config;
4914 	int (*clk_init)(struct platform_device *, struct clk **,
4915 			struct clk **, struct clk **,  struct clk **,
4916 			struct clk **) = macb_config->clk_init;
4917 	int (*init)(struct platform_device *) = macb_config->init;
4918 	struct device_node *np = pdev->dev.of_node;
4919 	struct clk *pclk, *hclk = NULL, *tx_clk = NULL, *rx_clk = NULL;
4920 	struct clk *tsu_clk = NULL;
4921 	unsigned int queue_mask, num_queues;
4922 	bool native_io;
4923 	phy_interface_t interface;
4924 	struct net_device *dev;
4925 	struct resource *regs;
4926 	void __iomem *mem;
4927 	struct macb *bp;
4928 	int err, val;
4929 
4930 	mem = devm_platform_get_and_ioremap_resource(pdev, 0, &regs);
4931 	if (IS_ERR(mem))
4932 		return PTR_ERR(mem);
4933 
4934 	if (np) {
4935 		const struct of_device_id *match;
4936 
4937 		match = of_match_node(macb_dt_ids, np);
4938 		if (match && match->data) {
4939 			macb_config = match->data;
4940 			clk_init = macb_config->clk_init;
4941 			init = macb_config->init;
4942 		}
4943 	}
4944 
4945 	err = clk_init(pdev, &pclk, &hclk, &tx_clk, &rx_clk, &tsu_clk);
4946 	if (err)
4947 		return err;
4948 
4949 	pm_runtime_set_autosuspend_delay(&pdev->dev, MACB_PM_TIMEOUT);
4950 	pm_runtime_use_autosuspend(&pdev->dev);
4951 	pm_runtime_get_noresume(&pdev->dev);
4952 	pm_runtime_set_active(&pdev->dev);
4953 	pm_runtime_enable(&pdev->dev);
4954 	native_io = hw_is_native_io(mem);
4955 
4956 	macb_probe_queues(mem, native_io, &queue_mask, &num_queues);
4957 	dev = alloc_etherdev_mq(sizeof(*bp), num_queues);
4958 	if (!dev) {
4959 		err = -ENOMEM;
4960 		goto err_disable_clocks;
4961 	}
4962 
4963 	dev->base_addr = regs->start;
4964 
4965 	SET_NETDEV_DEV(dev, &pdev->dev);
4966 
4967 	bp = netdev_priv(dev);
4968 	bp->pdev = pdev;
4969 	bp->dev = dev;
4970 	bp->regs = mem;
4971 	bp->native_io = native_io;
4972 	if (native_io) {
4973 		bp->macb_reg_readl = hw_readl_native;
4974 		bp->macb_reg_writel = hw_writel_native;
4975 	} else {
4976 		bp->macb_reg_readl = hw_readl;
4977 		bp->macb_reg_writel = hw_writel;
4978 	}
4979 	bp->num_queues = num_queues;
4980 	bp->queue_mask = queue_mask;
4981 	if (macb_config)
4982 		bp->dma_burst_length = macb_config->dma_burst_length;
4983 	bp->pclk = pclk;
4984 	bp->hclk = hclk;
4985 	bp->tx_clk = tx_clk;
4986 	bp->rx_clk = rx_clk;
4987 	bp->tsu_clk = tsu_clk;
4988 	if (macb_config)
4989 		bp->jumbo_max_len = macb_config->jumbo_max_len;
4990 
4991 	bp->wol = 0;
4992 	if (of_get_property(np, "magic-packet", NULL))
4993 		bp->wol |= MACB_WOL_HAS_MAGIC_PACKET;
4994 	device_set_wakeup_capable(&pdev->dev, bp->wol & MACB_WOL_HAS_MAGIC_PACKET);
4995 
4996 	bp->usrio = macb_config->usrio;
4997 
4998 	spin_lock_init(&bp->lock);
4999 
5000 	/* setup capabilities */
5001 	macb_configure_caps(bp, macb_config);
5002 
5003 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
5004 	if (GEM_BFEXT(DAW64, gem_readl(bp, DCFG6))) {
5005 		dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(44));
5006 		bp->hw_dma_cap |= HW_DMA_CAP_64B;
5007 	}
5008 #endif
5009 	platform_set_drvdata(pdev, dev);
5010 
5011 	dev->irq = platform_get_irq(pdev, 0);
5012 	if (dev->irq < 0) {
5013 		err = dev->irq;
5014 		goto err_out_free_netdev;
5015 	}
5016 
5017 	/* MTU range: 68 - 1500 or 10240 */
5018 	dev->min_mtu = GEM_MTU_MIN_SIZE;
5019 	if ((bp->caps & MACB_CAPS_JUMBO) && bp->jumbo_max_len)
5020 		dev->max_mtu = bp->jumbo_max_len - ETH_HLEN - ETH_FCS_LEN;
5021 	else
5022 		dev->max_mtu = ETH_DATA_LEN;
5023 
5024 	if (bp->caps & MACB_CAPS_BD_RD_PREFETCH) {
5025 		val = GEM_BFEXT(RXBD_RDBUFF, gem_readl(bp, DCFG10));
5026 		if (val)
5027 			bp->rx_bd_rd_prefetch = (2 << (val - 1)) *
5028 						macb_dma_desc_get_size(bp);
5029 
5030 		val = GEM_BFEXT(TXBD_RDBUFF, gem_readl(bp, DCFG10));
5031 		if (val)
5032 			bp->tx_bd_rd_prefetch = (2 << (val - 1)) *
5033 						macb_dma_desc_get_size(bp);
5034 	}
5035 
5036 	bp->rx_intr_mask = MACB_RX_INT_FLAGS;
5037 	if (bp->caps & MACB_CAPS_NEEDS_RSTONUBR)
5038 		bp->rx_intr_mask |= MACB_BIT(RXUBR);
5039 
5040 	err = of_get_ethdev_address(np, bp->dev);
5041 	if (err == -EPROBE_DEFER)
5042 		goto err_out_free_netdev;
5043 	else if (err)
5044 		macb_get_hwaddr(bp);
5045 
5046 	err = of_get_phy_mode(np, &interface);
5047 	if (err)
5048 		/* not found in DT, MII by default */
5049 		bp->phy_interface = PHY_INTERFACE_MODE_MII;
5050 	else
5051 		bp->phy_interface = interface;
5052 
5053 	/* IP specific init */
5054 	err = init(pdev);
5055 	if (err)
5056 		goto err_out_free_netdev;
5057 
5058 	err = macb_mii_init(bp);
5059 	if (err)
5060 		goto err_out_phy_exit;
5061 
5062 	netif_carrier_off(dev);
5063 
5064 	err = register_netdev(dev);
5065 	if (err) {
5066 		dev_err(&pdev->dev, "Cannot register net device, aborting.\n");
5067 		goto err_out_unregister_mdio;
5068 	}
5069 
5070 	tasklet_setup(&bp->hresp_err_tasklet, macb_hresp_error_task);
5071 
5072 	netdev_info(dev, "Cadence %s rev 0x%08x at 0x%08lx irq %d (%pM)\n",
5073 		    macb_is_gem(bp) ? "GEM" : "MACB", macb_readl(bp, MID),
5074 		    dev->base_addr, dev->irq, dev->dev_addr);
5075 
5076 	pm_runtime_mark_last_busy(&bp->pdev->dev);
5077 	pm_runtime_put_autosuspend(&bp->pdev->dev);
5078 
5079 	return 0;
5080 
5081 err_out_unregister_mdio:
5082 	mdiobus_unregister(bp->mii_bus);
5083 	mdiobus_free(bp->mii_bus);
5084 
5085 err_out_phy_exit:
5086 	phy_exit(bp->sgmii_phy);
5087 
5088 err_out_free_netdev:
5089 	free_netdev(dev);
5090 
5091 err_disable_clocks:
5092 	macb_clks_disable(pclk, hclk, tx_clk, rx_clk, tsu_clk);
5093 	pm_runtime_disable(&pdev->dev);
5094 	pm_runtime_set_suspended(&pdev->dev);
5095 	pm_runtime_dont_use_autosuspend(&pdev->dev);
5096 
5097 	return err;
5098 }
5099 
5100 static int macb_remove(struct platform_device *pdev)
5101 {
5102 	struct net_device *dev;
5103 	struct macb *bp;
5104 
5105 	dev = platform_get_drvdata(pdev);
5106 
5107 	if (dev) {
5108 		bp = netdev_priv(dev);
5109 		phy_exit(bp->sgmii_phy);
5110 		mdiobus_unregister(bp->mii_bus);
5111 		mdiobus_free(bp->mii_bus);
5112 
5113 		unregister_netdev(dev);
5114 		tasklet_kill(&bp->hresp_err_tasklet);
5115 		pm_runtime_disable(&pdev->dev);
5116 		pm_runtime_dont_use_autosuspend(&pdev->dev);
5117 		if (!pm_runtime_suspended(&pdev->dev)) {
5118 			macb_clks_disable(bp->pclk, bp->hclk, bp->tx_clk,
5119 					  bp->rx_clk, bp->tsu_clk);
5120 			pm_runtime_set_suspended(&pdev->dev);
5121 		}
5122 		phylink_destroy(bp->phylink);
5123 		free_netdev(dev);
5124 	}
5125 
5126 	return 0;
5127 }
5128 
5129 static int __maybe_unused macb_suspend(struct device *dev)
5130 {
5131 	struct net_device *netdev = dev_get_drvdata(dev);
5132 	struct macb *bp = netdev_priv(netdev);
5133 	struct macb_queue *queue;
5134 	unsigned long flags;
5135 	unsigned int q;
5136 	int err;
5137 
5138 	if (!netif_running(netdev))
5139 		return 0;
5140 
5141 	if (bp->wol & MACB_WOL_ENABLED) {
5142 		spin_lock_irqsave(&bp->lock, flags);
5143 		/* Flush all status bits */
5144 		macb_writel(bp, TSR, -1);
5145 		macb_writel(bp, RSR, -1);
5146 		for (q = 0, queue = bp->queues; q < bp->num_queues;
5147 		     ++q, ++queue) {
5148 			/* Disable all interrupts */
5149 			queue_writel(queue, IDR, -1);
5150 			queue_readl(queue, ISR);
5151 			if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
5152 				queue_writel(queue, ISR, -1);
5153 		}
5154 		/* Change interrupt handler and
5155 		 * Enable WoL IRQ on queue 0
5156 		 */
5157 		devm_free_irq(dev, bp->queues[0].irq, bp->queues);
5158 		if (macb_is_gem(bp)) {
5159 			err = devm_request_irq(dev, bp->queues[0].irq, gem_wol_interrupt,
5160 					       IRQF_SHARED, netdev->name, bp->queues);
5161 			if (err) {
5162 				dev_err(dev,
5163 					"Unable to request IRQ %d (error %d)\n",
5164 					bp->queues[0].irq, err);
5165 				spin_unlock_irqrestore(&bp->lock, flags);
5166 				return err;
5167 			}
5168 			queue_writel(bp->queues, IER, GEM_BIT(WOL));
5169 			gem_writel(bp, WOL, MACB_BIT(MAG));
5170 		} else {
5171 			err = devm_request_irq(dev, bp->queues[0].irq, macb_wol_interrupt,
5172 					       IRQF_SHARED, netdev->name, bp->queues);
5173 			if (err) {
5174 				dev_err(dev,
5175 					"Unable to request IRQ %d (error %d)\n",
5176 					bp->queues[0].irq, err);
5177 				spin_unlock_irqrestore(&bp->lock, flags);
5178 				return err;
5179 			}
5180 			queue_writel(bp->queues, IER, MACB_BIT(WOL));
5181 			macb_writel(bp, WOL, MACB_BIT(MAG));
5182 		}
5183 		spin_unlock_irqrestore(&bp->lock, flags);
5184 
5185 		enable_irq_wake(bp->queues[0].irq);
5186 	}
5187 
5188 	netif_device_detach(netdev);
5189 	for (q = 0, queue = bp->queues; q < bp->num_queues;
5190 	     ++q, ++queue) {
5191 		napi_disable(&queue->napi_rx);
5192 		napi_disable(&queue->napi_tx);
5193 	}
5194 
5195 	if (!(bp->wol & MACB_WOL_ENABLED)) {
5196 		rtnl_lock();
5197 		phylink_stop(bp->phylink);
5198 		phy_exit(bp->sgmii_phy);
5199 		rtnl_unlock();
5200 		spin_lock_irqsave(&bp->lock, flags);
5201 		macb_reset_hw(bp);
5202 		spin_unlock_irqrestore(&bp->lock, flags);
5203 	}
5204 
5205 	if (!(bp->caps & MACB_CAPS_USRIO_DISABLED))
5206 		bp->pm_data.usrio = macb_or_gem_readl(bp, USRIO);
5207 
5208 	if (netdev->hw_features & NETIF_F_NTUPLE)
5209 		bp->pm_data.scrt2 = gem_readl_n(bp, ETHT, SCRT2_ETHT);
5210 
5211 	if (bp->ptp_info)
5212 		bp->ptp_info->ptp_remove(netdev);
5213 	if (!device_may_wakeup(dev))
5214 		pm_runtime_force_suspend(dev);
5215 
5216 	return 0;
5217 }
5218 
5219 static int __maybe_unused macb_resume(struct device *dev)
5220 {
5221 	struct net_device *netdev = dev_get_drvdata(dev);
5222 	struct macb *bp = netdev_priv(netdev);
5223 	struct macb_queue *queue;
5224 	unsigned long flags;
5225 	unsigned int q;
5226 	int err;
5227 
5228 	if (!netif_running(netdev))
5229 		return 0;
5230 
5231 	if (!device_may_wakeup(dev))
5232 		pm_runtime_force_resume(dev);
5233 
5234 	if (bp->wol & MACB_WOL_ENABLED) {
5235 		spin_lock_irqsave(&bp->lock, flags);
5236 		/* Disable WoL */
5237 		if (macb_is_gem(bp)) {
5238 			queue_writel(bp->queues, IDR, GEM_BIT(WOL));
5239 			gem_writel(bp, WOL, 0);
5240 		} else {
5241 			queue_writel(bp->queues, IDR, MACB_BIT(WOL));
5242 			macb_writel(bp, WOL, 0);
5243 		}
5244 		/* Clear ISR on queue 0 */
5245 		queue_readl(bp->queues, ISR);
5246 		if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
5247 			queue_writel(bp->queues, ISR, -1);
5248 		/* Replace interrupt handler on queue 0 */
5249 		devm_free_irq(dev, bp->queues[0].irq, bp->queues);
5250 		err = devm_request_irq(dev, bp->queues[0].irq, macb_interrupt,
5251 				       IRQF_SHARED, netdev->name, bp->queues);
5252 		if (err) {
5253 			dev_err(dev,
5254 				"Unable to request IRQ %d (error %d)\n",
5255 				bp->queues[0].irq, err);
5256 			spin_unlock_irqrestore(&bp->lock, flags);
5257 			return err;
5258 		}
5259 		spin_unlock_irqrestore(&bp->lock, flags);
5260 
5261 		disable_irq_wake(bp->queues[0].irq);
5262 
5263 		/* Now make sure we disable phy before moving
5264 		 * to common restore path
5265 		 */
5266 		rtnl_lock();
5267 		phylink_stop(bp->phylink);
5268 		rtnl_unlock();
5269 	}
5270 
5271 	for (q = 0, queue = bp->queues; q < bp->num_queues;
5272 	     ++q, ++queue) {
5273 		napi_enable(&queue->napi_rx);
5274 		napi_enable(&queue->napi_tx);
5275 	}
5276 
5277 	if (netdev->hw_features & NETIF_F_NTUPLE)
5278 		gem_writel_n(bp, ETHT, SCRT2_ETHT, bp->pm_data.scrt2);
5279 
5280 	if (!(bp->caps & MACB_CAPS_USRIO_DISABLED))
5281 		macb_or_gem_writel(bp, USRIO, bp->pm_data.usrio);
5282 
5283 	macb_writel(bp, NCR, MACB_BIT(MPE));
5284 	macb_init_hw(bp);
5285 	macb_set_rx_mode(netdev);
5286 	macb_restore_features(bp);
5287 	rtnl_lock();
5288 	if (!device_may_wakeup(&bp->dev->dev))
5289 		phy_init(bp->sgmii_phy);
5290 
5291 	phylink_start(bp->phylink);
5292 	rtnl_unlock();
5293 
5294 	netif_device_attach(netdev);
5295 	if (bp->ptp_info)
5296 		bp->ptp_info->ptp_init(netdev);
5297 
5298 	return 0;
5299 }
5300 
5301 static int __maybe_unused macb_runtime_suspend(struct device *dev)
5302 {
5303 	struct net_device *netdev = dev_get_drvdata(dev);
5304 	struct macb *bp = netdev_priv(netdev);
5305 
5306 	if (!(device_may_wakeup(dev)))
5307 		macb_clks_disable(bp->pclk, bp->hclk, bp->tx_clk, bp->rx_clk, bp->tsu_clk);
5308 	else if (!(bp->caps & MACB_CAPS_NEED_TSUCLK))
5309 		macb_clks_disable(NULL, NULL, NULL, NULL, bp->tsu_clk);
5310 
5311 	return 0;
5312 }
5313 
5314 static int __maybe_unused macb_runtime_resume(struct device *dev)
5315 {
5316 	struct net_device *netdev = dev_get_drvdata(dev);
5317 	struct macb *bp = netdev_priv(netdev);
5318 
5319 	if (!(device_may_wakeup(dev))) {
5320 		clk_prepare_enable(bp->pclk);
5321 		clk_prepare_enable(bp->hclk);
5322 		clk_prepare_enable(bp->tx_clk);
5323 		clk_prepare_enable(bp->rx_clk);
5324 		clk_prepare_enable(bp->tsu_clk);
5325 	} else if (!(bp->caps & MACB_CAPS_NEED_TSUCLK)) {
5326 		clk_prepare_enable(bp->tsu_clk);
5327 	}
5328 
5329 	return 0;
5330 }
5331 
5332 static const struct dev_pm_ops macb_pm_ops = {
5333 	SET_SYSTEM_SLEEP_PM_OPS(macb_suspend, macb_resume)
5334 	SET_RUNTIME_PM_OPS(macb_runtime_suspend, macb_runtime_resume, NULL)
5335 };
5336 
5337 static struct platform_driver macb_driver = {
5338 	.probe		= macb_probe,
5339 	.remove		= macb_remove,
5340 	.driver		= {
5341 		.name		= "macb",
5342 		.of_match_table	= of_match_ptr(macb_dt_ids),
5343 		.pm	= &macb_pm_ops,
5344 	},
5345 };
5346 
5347 module_platform_driver(macb_driver);
5348 
5349 MODULE_LICENSE("GPL");
5350 MODULE_DESCRIPTION("Cadence MACB/GEM Ethernet driver");
5351 MODULE_AUTHOR("Haavard Skinnemoen (Atmel)");
5352 MODULE_ALIAS("platform:macb");
5353