xref: /linux/drivers/net/ethernet/spacemit/k1_emac.c (revision 1fd1dc41724319406b0aff221a352a400b0ddfc5)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * SpacemiT K1 Ethernet driver
4  *
5  * Copyright (C) 2023-2025 SpacemiT (Hangzhou) Technology Co. Ltd
6  * Copyright (C) 2025 Vivian Wang <wangruikang@iscas.ac.cn>
7  */
8 
9 #include <linux/bitfield.h>
10 #include <linux/clk.h>
11 #include <linux/delay.h>
12 #include <linux/dma-mapping.h>
13 #include <linux/etherdevice.h>
14 #include <linux/ethtool.h>
15 #include <linux/if_vlan.h>
16 #include <linux/interrupt.h>
17 #include <linux/io.h>
18 #include <linux/iopoll.h>
19 #include <linux/kernel.h>
20 #include <linux/mfd/syscon.h>
21 #include <linux/module.h>
22 #include <linux/of.h>
23 #include <linux/of_irq.h>
24 #include <linux/of_mdio.h>
25 #include <linux/of_net.h>
26 #include <linux/phy.h>
27 #include <linux/platform_device.h>
28 #include <linux/pm_runtime.h>
29 #include <linux/pm.h>
30 #include <linux/regmap.h>
31 #include <linux/reset.h>
32 #include <linux/rtnetlink.h>
33 #include <linux/timer.h>
34 #include <linux/types.h>
35 
36 #include "k1_emac.h"
37 
38 #define DRIVER_NAME "k1_emac"
39 
40 #define EMAC_DEFAULT_BUFSIZE		1536
41 #define EMAC_RX_BUF_2K			2048
42 #define EMAC_RX_BUF_MAX			FIELD_MAX(RX_DESC_1_BUFFER_SIZE_1_MASK)
43 
44 /* Tuning parameters from SpacemiT */
45 #define EMAC_TX_FRAMES			64
46 #define EMAC_TX_COAL_TIMEOUT		40000
47 #define EMAC_RX_FRAMES			64
48 #define EMAC_RX_COAL_TIMEOUT		(600 * 312)
49 
50 #define DEFAULT_TX_ALMOST_FULL		0x1f8
51 #define DEFAULT_TX_THRESHOLD		1518
52 #define DEFAULT_RX_THRESHOLD		12
53 #define DEFAULT_TX_RING_NUM		1024
54 #define DEFAULT_RX_RING_NUM		1024
55 #define DEFAULT_DMA_BURST		MREGBIT_BURST_16WORD
56 #define HASH_TABLE_SIZE			64
57 
58 struct desc_buf {
59 	u64 dma_addr;
60 	void *buff_addr;
61 	u16 dma_len;
62 	u8 map_as_page;
63 };
64 
65 struct emac_tx_desc_buffer {
66 	struct sk_buff *skb;
67 	struct desc_buf buf[2];
68 };
69 
70 struct emac_rx_desc_buffer {
71 	struct sk_buff *skb;
72 	u64 dma_addr;
73 	void *buff_addr;
74 	u16 dma_len;
75 	u8 map_as_page;
76 };
77 
78 /**
79  * struct emac_desc_ring - Software-side information for one descriptor ring
80  * Same structure used for both RX and TX
81  * @desc_addr: Virtual address to the descriptor ring memory
82  * @desc_dma_addr: DMA address of the descriptor ring
83  * @total_size: Size of ring in bytes
84  * @total_cnt: Number of descriptors
85  * @head: Next descriptor to associate a buffer with
86  * @tail: Next descriptor to check status bit
87  * @rx_desc_buf: Array of descriptors for RX
88  * @tx_desc_buf: Array of descriptors for TX, with max of two buffers each
89  */
90 struct emac_desc_ring {
91 	void *desc_addr;
92 	dma_addr_t desc_dma_addr;
93 	u32 total_size;
94 	u32 total_cnt;
95 	u32 head;
96 	u32 tail;
97 	union {
98 		struct emac_rx_desc_buffer *rx_desc_buf;
99 		struct emac_tx_desc_buffer *tx_desc_buf;
100 	};
101 };
102 
103 struct emac_priv {
104 	void __iomem *iobase;
105 	u32 dma_buf_sz;
106 	struct emac_desc_ring tx_ring;
107 	struct emac_desc_ring rx_ring;
108 
109 	struct net_device *ndev;
110 	struct napi_struct napi;
111 	struct platform_device *pdev;
112 	struct clk *bus_clk;
113 	struct clk *ref_clk;
114 	struct regmap *regmap_apmu;
115 	u32 regmap_apmu_offset;
116 	int irq;
117 
118 	phy_interface_t phy_interface;
119 
120 	union emac_hw_tx_stats tx_stats, tx_stats_off;
121 	union emac_hw_rx_stats rx_stats, rx_stats_off;
122 
123 	u32 tx_count_frames;
124 	u32 tx_coal_frames;
125 	u32 tx_coal_timeout;
126 	struct work_struct tx_timeout_task;
127 
128 	struct timer_list txtimer;
129 	struct timer_list stats_timer;
130 
131 	u32 tx_delay;
132 	u32 rx_delay;
133 
134 	/* Softirq-safe, hold while touching hardware statistics */
135 	spinlock_t stats_lock;
136 };
137 
138 static void emac_wr(struct emac_priv *priv, u32 reg, u32 val)
139 {
140 	writel(val, priv->iobase + reg);
141 }
142 
143 static u32 emac_rd(struct emac_priv *priv, u32 reg)
144 {
145 	return readl(priv->iobase + reg);
146 }
147 
148 static int emac_phy_interface_config(struct emac_priv *priv)
149 {
150 	u32 val = 0, mask = REF_CLK_SEL | RGMII_TX_CLK_SEL | PHY_INTF_RGMII;
151 
152 	if (phy_interface_mode_is_rgmii(priv->phy_interface))
153 		val |= PHY_INTF_RGMII;
154 
155 	regmap_update_bits(priv->regmap_apmu,
156 			   priv->regmap_apmu_offset + APMU_EMAC_CTRL_REG,
157 			   mask, val);
158 
159 	return 0;
160 }
161 
162 /*
163  * Where the hardware expects a MAC address, it is laid out in this high, med,
164  * low order in three consecutive registers and in this format.
165  */
166 
167 static void emac_set_mac_addr_reg(struct emac_priv *priv,
168 				  const unsigned char *addr,
169 				  u32 reg)
170 {
171 	emac_wr(priv, reg + sizeof(u32) * 0, addr[1] << 8 | addr[0]);
172 	emac_wr(priv, reg + sizeof(u32) * 1, addr[3] << 8 | addr[2]);
173 	emac_wr(priv, reg + sizeof(u32) * 2, addr[5] << 8 | addr[4]);
174 }
175 
176 static void emac_set_mac_addr(struct emac_priv *priv, const unsigned char *addr)
177 {
178 	emac_set_mac_addr_reg(priv, addr, MAC_ADDRESS1_HIGH);
179 }
180 
181 static void emac_reset_hw(struct emac_priv *priv)
182 {
183 	/* Disable all interrupts */
184 	emac_wr(priv, MAC_INTERRUPT_ENABLE, 0x0);
185 	emac_wr(priv, DMA_INTERRUPT_ENABLE, 0x0);
186 
187 	/* Disable transmit and receive units */
188 	emac_wr(priv, MAC_RECEIVE_CONTROL, 0x0);
189 	emac_wr(priv, MAC_TRANSMIT_CONTROL, 0x0);
190 
191 	/* Disable DMA */
192 	emac_wr(priv, DMA_CONTROL, 0x0);
193 }
194 
195 static void emac_init_hw(struct emac_priv *priv)
196 {
197 	u32 rxirq = 0, dma = 0, frame_sz;
198 
199 	regmap_set_bits(priv->regmap_apmu,
200 			priv->regmap_apmu_offset + APMU_EMAC_CTRL_REG,
201 			AXI_SINGLE_ID);
202 
203 	/* Disable transmit and receive units */
204 	emac_wr(priv, MAC_RECEIVE_CONTROL, 0x0);
205 	emac_wr(priv, MAC_TRANSMIT_CONTROL, 0x0);
206 
207 	/* Enable MAC address 1 filtering */
208 	emac_wr(priv, MAC_ADDRESS_CONTROL, MREGBIT_MAC_ADDRESS1_ENABLE);
209 
210 	/* Zero initialize the multicast hash table */
211 	emac_wr(priv, MAC_MULTICAST_HASH_TABLE1, 0x0);
212 	emac_wr(priv, MAC_MULTICAST_HASH_TABLE2, 0x0);
213 	emac_wr(priv, MAC_MULTICAST_HASH_TABLE3, 0x0);
214 	emac_wr(priv, MAC_MULTICAST_HASH_TABLE4, 0x0);
215 
216 	/* Configure thresholds */
217 	emac_wr(priv, MAC_TRANSMIT_FIFO_ALMOST_FULL, DEFAULT_TX_ALMOST_FULL);
218 	emac_wr(priv, MAC_TRANSMIT_PACKET_START_THRESHOLD,
219 		DEFAULT_TX_THRESHOLD);
220 	emac_wr(priv, MAC_RECEIVE_PACKET_START_THRESHOLD, DEFAULT_RX_THRESHOLD);
221 
222 	/* Set maximum frame size and jabber size based on configured MTU,
223 	 * accounting for Ethernet header, double VLAN tags, and FCS.
224 	 */
225 	frame_sz = priv->ndev->mtu + ETH_HLEN + 2 * VLAN_HLEN + ETH_FCS_LEN;
226 
227 	emac_wr(priv, MAC_MAXIMUM_FRAME_SIZE, frame_sz);
228 	emac_wr(priv, MAC_TRANSMIT_JABBER_SIZE, frame_sz);
229 	emac_wr(priv, MAC_RECEIVE_JABBER_SIZE, frame_sz);
230 
231 	/* RX IRQ mitigation */
232 	rxirq = FIELD_PREP(MREGBIT_RECEIVE_IRQ_FRAME_COUNTER_MASK,
233 			   EMAC_RX_FRAMES);
234 	rxirq |= FIELD_PREP(MREGBIT_RECEIVE_IRQ_TIMEOUT_COUNTER_MASK,
235 			    EMAC_RX_COAL_TIMEOUT);
236 	rxirq |= MREGBIT_RECEIVE_IRQ_MITIGATION_ENABLE;
237 	emac_wr(priv, DMA_RECEIVE_IRQ_MITIGATION_CTRL, rxirq);
238 
239 	/* Disable and set DMA config */
240 	emac_wr(priv, DMA_CONTROL, 0x0);
241 
242 	emac_wr(priv, DMA_CONFIGURATION, MREGBIT_SOFTWARE_RESET);
243 	usleep_range(9000, 10000);
244 	emac_wr(priv, DMA_CONFIGURATION, 0x0);
245 	usleep_range(9000, 10000);
246 
247 	dma |= MREGBIT_STRICT_BURST;
248 	dma |= MREGBIT_DMA_64BIT_MODE;
249 	dma |= DEFAULT_DMA_BURST;
250 
251 	emac_wr(priv, DMA_CONFIGURATION, dma);
252 }
253 
254 static void emac_dma_start_transmit(struct emac_priv *priv)
255 {
256 	/* The actual value written does not matter */
257 	emac_wr(priv, DMA_TRANSMIT_POLL_DEMAND, 1);
258 }
259 
260 static void emac_enable_interrupt(struct emac_priv *priv)
261 {
262 	u32 val;
263 
264 	val = emac_rd(priv, DMA_INTERRUPT_ENABLE);
265 	val |= MREGBIT_TRANSMIT_TRANSFER_DONE_INTR_ENABLE;
266 	val |= MREGBIT_RECEIVE_TRANSFER_DONE_INTR_ENABLE;
267 	emac_wr(priv, DMA_INTERRUPT_ENABLE, val);
268 }
269 
270 static void emac_disable_interrupt(struct emac_priv *priv)
271 {
272 	u32 val;
273 
274 	val = emac_rd(priv, DMA_INTERRUPT_ENABLE);
275 	val &= ~MREGBIT_TRANSMIT_TRANSFER_DONE_INTR_ENABLE;
276 	val &= ~MREGBIT_RECEIVE_TRANSFER_DONE_INTR_ENABLE;
277 	emac_wr(priv, DMA_INTERRUPT_ENABLE, val);
278 }
279 
280 static u32 emac_tx_avail(struct emac_priv *priv)
281 {
282 	struct emac_desc_ring *tx_ring = &priv->tx_ring;
283 	u32 avail;
284 
285 	if (tx_ring->tail > tx_ring->head)
286 		avail = tx_ring->tail - tx_ring->head - 1;
287 	else
288 		avail = tx_ring->total_cnt - tx_ring->head + tx_ring->tail - 1;
289 
290 	return avail;
291 }
292 
293 static void emac_tx_coal_timer_resched(struct emac_priv *priv)
294 {
295 	mod_timer(&priv->txtimer,
296 		  jiffies + usecs_to_jiffies(priv->tx_coal_timeout));
297 }
298 
299 static void emac_tx_coal_timer(struct timer_list *t)
300 {
301 	struct emac_priv *priv = timer_container_of(priv, t, txtimer);
302 
303 	napi_schedule(&priv->napi);
304 }
305 
306 static bool emac_tx_should_interrupt(struct emac_priv *priv, u32 pkt_num)
307 {
308 	priv->tx_count_frames += pkt_num;
309 	if (likely(priv->tx_coal_frames > priv->tx_count_frames)) {
310 		emac_tx_coal_timer_resched(priv);
311 		return false;
312 	}
313 
314 	priv->tx_count_frames = 0;
315 	return true;
316 }
317 
318 static void emac_free_tx_buf(struct emac_priv *priv, int i)
319 {
320 	struct emac_tx_desc_buffer *tx_buf;
321 	struct emac_desc_ring *tx_ring;
322 	struct desc_buf *buf;
323 	int j;
324 
325 	tx_ring = &priv->tx_ring;
326 	tx_buf = &tx_ring->tx_desc_buf[i];
327 
328 	for (j = 0; j < 2; j++) {
329 		buf = &tx_buf->buf[j];
330 		if (!buf->dma_addr)
331 			continue;
332 
333 		if (buf->map_as_page)
334 			dma_unmap_page(&priv->pdev->dev, buf->dma_addr,
335 				       buf->dma_len, DMA_TO_DEVICE);
336 		else
337 			dma_unmap_single(&priv->pdev->dev,
338 					 buf->dma_addr, buf->dma_len,
339 					 DMA_TO_DEVICE);
340 
341 		buf->dma_addr = 0;
342 		buf->map_as_page = false;
343 		buf->buff_addr = NULL;
344 	}
345 
346 	if (tx_buf->skb) {
347 		dev_kfree_skb_any(tx_buf->skb);
348 		tx_buf->skb = NULL;
349 	}
350 }
351 
352 static void emac_clean_tx_desc_ring(struct emac_priv *priv)
353 {
354 	struct emac_desc_ring *tx_ring = &priv->tx_ring;
355 	u32 i;
356 
357 	for (i = 0; i < tx_ring->total_cnt; i++)
358 		emac_free_tx_buf(priv, i);
359 
360 	tx_ring->head = 0;
361 	tx_ring->tail = 0;
362 }
363 
364 static void emac_clean_rx_desc_ring(struct emac_priv *priv)
365 {
366 	struct emac_rx_desc_buffer *rx_buf;
367 	struct emac_desc_ring *rx_ring;
368 	u32 i;
369 
370 	rx_ring = &priv->rx_ring;
371 
372 	for (i = 0; i < rx_ring->total_cnt; i++) {
373 		rx_buf = &rx_ring->rx_desc_buf[i];
374 
375 		if (!rx_buf->skb)
376 			continue;
377 
378 		dma_unmap_single(&priv->pdev->dev, rx_buf->dma_addr,
379 				 rx_buf->dma_len, DMA_FROM_DEVICE);
380 
381 		dev_kfree_skb(rx_buf->skb);
382 		rx_buf->skb = NULL;
383 	}
384 
385 	rx_ring->tail = 0;
386 	rx_ring->head = 0;
387 }
388 
389 static int emac_alloc_tx_resources(struct emac_priv *priv)
390 {
391 	struct emac_desc_ring *tx_ring = &priv->tx_ring;
392 	struct platform_device *pdev = priv->pdev;
393 
394 	tx_ring->tx_desc_buf = kzalloc_objs(*tx_ring->tx_desc_buf,
395 					    tx_ring->total_cnt);
396 
397 	if (!tx_ring->tx_desc_buf)
398 		return -ENOMEM;
399 
400 	tx_ring->total_size = tx_ring->total_cnt * sizeof(struct emac_desc);
401 	tx_ring->total_size = ALIGN(tx_ring->total_size, PAGE_SIZE);
402 
403 	tx_ring->desc_addr = dma_alloc_coherent(&pdev->dev, tx_ring->total_size,
404 						&tx_ring->desc_dma_addr,
405 						GFP_KERNEL);
406 	if (!tx_ring->desc_addr) {
407 		kfree(tx_ring->tx_desc_buf);
408 		return -ENOMEM;
409 	}
410 
411 	tx_ring->head = 0;
412 	tx_ring->tail = 0;
413 
414 	return 0;
415 }
416 
417 static int emac_alloc_rx_resources(struct emac_priv *priv)
418 {
419 	struct emac_desc_ring *rx_ring = &priv->rx_ring;
420 	struct platform_device *pdev = priv->pdev;
421 
422 	rx_ring->rx_desc_buf = kzalloc_objs(*rx_ring->rx_desc_buf,
423 					    rx_ring->total_cnt);
424 	if (!rx_ring->rx_desc_buf)
425 		return -ENOMEM;
426 
427 	rx_ring->total_size = rx_ring->total_cnt * sizeof(struct emac_desc);
428 
429 	rx_ring->total_size = ALIGN(rx_ring->total_size, PAGE_SIZE);
430 
431 	rx_ring->desc_addr = dma_alloc_coherent(&pdev->dev, rx_ring->total_size,
432 						&rx_ring->desc_dma_addr,
433 						GFP_KERNEL);
434 	if (!rx_ring->desc_addr) {
435 		kfree(rx_ring->rx_desc_buf);
436 		return -ENOMEM;
437 	}
438 
439 	rx_ring->head = 0;
440 	rx_ring->tail = 0;
441 
442 	return 0;
443 }
444 
445 static void emac_free_tx_resources(struct emac_priv *priv)
446 {
447 	struct emac_desc_ring *tr = &priv->tx_ring;
448 	struct device *dev = &priv->pdev->dev;
449 
450 	emac_clean_tx_desc_ring(priv);
451 
452 	kfree(tr->tx_desc_buf);
453 	tr->tx_desc_buf = NULL;
454 
455 	dma_free_coherent(dev, tr->total_size, tr->desc_addr,
456 			  tr->desc_dma_addr);
457 	tr->desc_addr = NULL;
458 }
459 
460 static void emac_free_rx_resources(struct emac_priv *priv)
461 {
462 	struct emac_desc_ring *rr = &priv->rx_ring;
463 	struct device *dev = &priv->pdev->dev;
464 
465 	emac_clean_rx_desc_ring(priv);
466 
467 	kfree(rr->rx_desc_buf);
468 	rr->rx_desc_buf = NULL;
469 
470 	dma_free_coherent(dev, rr->total_size, rr->desc_addr,
471 			  rr->desc_dma_addr);
472 	rr->desc_addr = NULL;
473 }
474 
475 static int emac_tx_clean_desc(struct emac_priv *priv)
476 {
477 	struct net_device *ndev = priv->ndev;
478 	struct emac_desc_ring *tx_ring;
479 	struct emac_desc *tx_desc;
480 	u32 i;
481 
482 	netif_tx_lock(ndev);
483 
484 	tx_ring = &priv->tx_ring;
485 
486 	i = tx_ring->tail;
487 
488 	while (i != tx_ring->head) {
489 		tx_desc = &((struct emac_desc *)tx_ring->desc_addr)[i];
490 
491 		/* Stop checking if desc still own by DMA */
492 		if (READ_ONCE(tx_desc->desc0) & TX_DESC_0_OWN)
493 			break;
494 
495 		emac_free_tx_buf(priv, i);
496 		memset(tx_desc, 0, sizeof(struct emac_desc));
497 
498 		if (++i == tx_ring->total_cnt)
499 			i = 0;
500 	}
501 
502 	tx_ring->tail = i;
503 
504 	if (unlikely(netif_queue_stopped(ndev) &&
505 		     emac_tx_avail(priv) > tx_ring->total_cnt / 4))
506 		netif_wake_queue(ndev);
507 
508 	netif_tx_unlock(ndev);
509 
510 	return 0;
511 }
512 
513 static bool emac_rx_frame_good(struct emac_priv *priv, struct emac_desc *desc)
514 {
515 	const char *msg;
516 	u32 len;
517 
518 	len = FIELD_GET(RX_DESC_0_FRAME_PACKET_LENGTH_MASK, desc->desc0);
519 
520 	if (WARN_ON_ONCE(!(desc->desc0 & RX_DESC_0_LAST_DESCRIPTOR)))
521 		msg = "Not last descriptor"; /* This would be a bug */
522 	else if (desc->desc0 & RX_DESC_0_FRAME_RUNT)
523 		msg = "Runt frame";
524 	else if (desc->desc0 & RX_DESC_0_FRAME_CRC_ERR)
525 		msg = "Frame CRC error";
526 	else if (desc->desc0 & RX_DESC_0_FRAME_MAX_LEN_ERR)
527 		msg = "Frame exceeds max length";
528 	else if (desc->desc0 & RX_DESC_0_FRAME_JABBER_ERR)
529 		msg = "Frame jabber error";
530 	else if (desc->desc0 & RX_DESC_0_FRAME_LENGTH_ERR)
531 		msg = "Frame length error";
532 	else if (len <= ETH_FCS_LEN || len > priv->dma_buf_sz)
533 		msg = "Frame length unacceptable";
534 	else
535 		return true; /* All good */
536 
537 	dev_dbg_ratelimited(&priv->ndev->dev, "RX error: %s", msg);
538 
539 	return false;
540 }
541 
542 static void emac_alloc_rx_desc_buffers(struct emac_priv *priv)
543 {
544 	struct emac_desc_ring *rx_ring = &priv->rx_ring;
545 	struct emac_desc rx_desc, *rx_desc_addr;
546 	struct net_device *ndev = priv->ndev;
547 	struct emac_rx_desc_buffer *rx_buf;
548 	struct sk_buff *skb;
549 	u32 i;
550 
551 	i = rx_ring->head;
552 	rx_buf = &rx_ring->rx_desc_buf[i];
553 
554 	while (!rx_buf->skb) {
555 		skb = netdev_alloc_skb_ip_align(ndev, priv->dma_buf_sz);
556 		if (!skb)
557 			break;
558 
559 		skb->dev = ndev;
560 
561 		rx_buf->skb = skb;
562 		rx_buf->dma_len = priv->dma_buf_sz;
563 		rx_buf->dma_addr = dma_map_single(&priv->pdev->dev, skb->data,
564 						  priv->dma_buf_sz,
565 						  DMA_FROM_DEVICE);
566 		if (dma_mapping_error(&priv->pdev->dev, rx_buf->dma_addr)) {
567 			dev_err_ratelimited(&ndev->dev, "Mapping skb failed\n");
568 			goto err_free_skb;
569 		}
570 
571 		rx_desc_addr = &((struct emac_desc *)rx_ring->desc_addr)[i];
572 
573 		memset(&rx_desc, 0, sizeof(rx_desc));
574 
575 		rx_desc.buffer_addr_1 = rx_buf->dma_addr;
576 		rx_desc.desc1 = FIELD_PREP(RX_DESC_1_BUFFER_SIZE_1_MASK,
577 					   rx_buf->dma_len);
578 
579 		if (++i == rx_ring->total_cnt) {
580 			rx_desc.desc1 |= RX_DESC_1_END_RING;
581 			i = 0;
582 		}
583 
584 		*rx_desc_addr = rx_desc;
585 		dma_wmb();
586 		WRITE_ONCE(rx_desc_addr->desc0, rx_desc.desc0 | RX_DESC_0_OWN);
587 
588 		rx_buf = &rx_ring->rx_desc_buf[i];
589 	}
590 
591 	rx_ring->head = i;
592 	return;
593 
594 err_free_skb:
595 	dev_kfree_skb_any(skb);
596 	rx_buf->skb = NULL;
597 }
598 
599 /* Returns number of packets received */
600 static int emac_rx_clean_desc(struct emac_priv *priv, int budget)
601 {
602 	struct net_device *ndev = priv->ndev;
603 	struct emac_rx_desc_buffer *rx_buf;
604 	struct emac_desc_ring *rx_ring;
605 	struct sk_buff *skb = NULL;
606 	struct emac_desc *rx_desc;
607 	u32 got = 0, skb_len, i;
608 
609 	rx_ring = &priv->rx_ring;
610 
611 	i = rx_ring->tail;
612 
613 	while (budget--) {
614 		rx_desc = &((struct emac_desc *)rx_ring->desc_addr)[i];
615 
616 		/* Stop checking if rx_desc still owned by DMA */
617 		if (READ_ONCE(rx_desc->desc0) & RX_DESC_0_OWN)
618 			break;
619 
620 		dma_rmb();
621 
622 		rx_buf = &rx_ring->rx_desc_buf[i];
623 
624 		if (!rx_buf->skb)
625 			break;
626 
627 		got++;
628 
629 		dma_unmap_single(&priv->pdev->dev, rx_buf->dma_addr,
630 				 rx_buf->dma_len, DMA_FROM_DEVICE);
631 
632 		if (likely(emac_rx_frame_good(priv, rx_desc))) {
633 			skb = rx_buf->skb;
634 
635 			skb_len = FIELD_GET(RX_DESC_0_FRAME_PACKET_LENGTH_MASK,
636 					    rx_desc->desc0);
637 			skb_len -= ETH_FCS_LEN;
638 
639 			skb_put(skb, skb_len);
640 			skb->dev = ndev;
641 			ndev->hard_header_len = ETH_HLEN;
642 
643 			skb->protocol = eth_type_trans(skb, ndev);
644 
645 			skb->ip_summed = CHECKSUM_NONE;
646 
647 			napi_gro_receive(&priv->napi, skb);
648 
649 			memset(rx_desc, 0, sizeof(struct emac_desc));
650 			rx_buf->skb = NULL;
651 		} else {
652 			dev_kfree_skb_irq(rx_buf->skb);
653 			rx_buf->skb = NULL;
654 		}
655 
656 		if (++i == rx_ring->total_cnt)
657 			i = 0;
658 	}
659 
660 	rx_ring->tail = i;
661 
662 	emac_alloc_rx_desc_buffers(priv);
663 
664 	return got;
665 }
666 
667 static int emac_rx_poll(struct napi_struct *napi, int budget)
668 {
669 	struct emac_priv *priv = container_of(napi, struct emac_priv, napi);
670 	int work_done;
671 
672 	emac_tx_clean_desc(priv);
673 
674 	work_done = emac_rx_clean_desc(priv, budget);
675 	if (work_done < budget && napi_complete_done(napi, work_done))
676 		emac_enable_interrupt(priv);
677 
678 	return work_done;
679 }
680 
681 /*
682  * For convenience, skb->data is fragment 0, frags[0] is fragment 1, etc.
683  *
684  * Each descriptor can hold up to two fragments, called buffer 1 and 2. For each
685  * fragment f, if f % 2 == 0, it uses buffer 1, otherwise it uses buffer 2.
686  */
687 
688 static int emac_tx_map_frag(struct device *dev, struct emac_desc *tx_desc,
689 			    struct emac_tx_desc_buffer *tx_buf,
690 			    struct sk_buff *skb, u32 frag_idx)
691 {
692 	bool map_as_page, buf_idx;
693 	const skb_frag_t *frag;
694 	phys_addr_t addr;
695 	u32 len;
696 	int ret;
697 
698 	buf_idx = frag_idx % 2;
699 
700 	if (frag_idx == 0) {
701 		/* Non-fragmented part */
702 		len = skb_headlen(skb);
703 		addr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
704 		map_as_page = false;
705 	} else {
706 		/* Fragment */
707 		frag = &skb_shinfo(skb)->frags[frag_idx - 1];
708 		len = skb_frag_size(frag);
709 		addr = skb_frag_dma_map(dev, frag, 0, len, DMA_TO_DEVICE);
710 		map_as_page = true;
711 	}
712 
713 	ret = dma_mapping_error(dev, addr);
714 	if (ret)
715 		return ret;
716 
717 	tx_buf->buf[buf_idx].dma_addr = addr;
718 	tx_buf->buf[buf_idx].dma_len = len;
719 	tx_buf->buf[buf_idx].map_as_page = map_as_page;
720 
721 	if (buf_idx == 0) {
722 		tx_desc->buffer_addr_1 = addr;
723 		tx_desc->desc1 |= FIELD_PREP(TX_DESC_1_BUFFER_SIZE_1_MASK, len);
724 	} else {
725 		tx_desc->buffer_addr_2 = addr;
726 		tx_desc->desc1 |= FIELD_PREP(TX_DESC_1_BUFFER_SIZE_2_MASK, len);
727 	}
728 
729 	return 0;
730 }
731 
732 static void emac_tx_mem_map(struct emac_priv *priv, struct sk_buff *skb)
733 {
734 	struct emac_desc_ring *tx_ring = &priv->tx_ring;
735 	struct emac_desc tx_desc, *tx_desc_addr;
736 	struct device *dev = &priv->pdev->dev;
737 	struct emac_tx_desc_buffer *tx_buf;
738 	u32 head, old_head, frag_num, f;
739 	bool buf_idx;
740 
741 	frag_num = skb_shinfo(skb)->nr_frags;
742 	head = tx_ring->head;
743 	old_head = head;
744 
745 	for (f = 0; f < frag_num + 1; f++) {
746 		buf_idx = f % 2;
747 
748 		/*
749 		 * If using buffer 1, initialize a new desc. Otherwise, use
750 		 * buffer 2 of previous fragment's desc.
751 		 */
752 		if (!buf_idx) {
753 			tx_buf = &tx_ring->tx_desc_buf[head];
754 			tx_desc_addr =
755 				&((struct emac_desc *)tx_ring->desc_addr)[head];
756 			memset(&tx_desc, 0, sizeof(tx_desc));
757 
758 			/*
759 			 * Give ownership for all but first desc initially. For
760 			 * first desc, give at the end so DMA cannot start
761 			 * reading uninitialized descs.
762 			 */
763 			if (head != old_head)
764 				tx_desc.desc0 |= TX_DESC_0_OWN;
765 
766 			if (++head == tx_ring->total_cnt) {
767 				/* Just used last desc in ring */
768 				tx_desc.desc1 |= TX_DESC_1_END_RING;
769 				head = 0;
770 			}
771 		}
772 
773 		if (emac_tx_map_frag(dev, &tx_desc, tx_buf, skb, f)) {
774 			dev_err_ratelimited(&priv->ndev->dev,
775 					    "Map TX frag %d failed\n", f);
776 			goto err_free_skb;
777 		}
778 
779 		if (f == 0)
780 			tx_desc.desc1 |= TX_DESC_1_FIRST_SEGMENT;
781 
782 		if (f == frag_num) {
783 			tx_desc.desc1 |= TX_DESC_1_LAST_SEGMENT;
784 			tx_buf->skb = skb;
785 			if (emac_tx_should_interrupt(priv, frag_num + 1))
786 				tx_desc.desc1 |=
787 					TX_DESC_1_INTERRUPT_ON_COMPLETION;
788 		}
789 
790 		*tx_desc_addr = tx_desc;
791 	}
792 
793 	/* All descriptors are ready, give ownership for first desc */
794 	tx_desc_addr = &((struct emac_desc *)tx_ring->desc_addr)[old_head];
795 	dma_wmb();
796 	WRITE_ONCE(tx_desc_addr->desc0, tx_desc_addr->desc0 | TX_DESC_0_OWN);
797 
798 	emac_dma_start_transmit(priv);
799 
800 	tx_ring->head = head;
801 
802 	return;
803 
804 err_free_skb:
805 	dev_dstats_tx_dropped(priv->ndev);
806 	dev_kfree_skb_any(skb);
807 }
808 
809 static netdev_tx_t emac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
810 {
811 	struct emac_priv *priv = netdev_priv(ndev);
812 	int nfrags = skb_shinfo(skb)->nr_frags;
813 	struct device *dev = &priv->pdev->dev;
814 
815 	if (unlikely(emac_tx_avail(priv) < nfrags + 1)) {
816 		if (!netif_queue_stopped(ndev)) {
817 			netif_stop_queue(ndev);
818 			dev_err_ratelimited(dev, "TX ring full, stop TX queue\n");
819 		}
820 		return NETDEV_TX_BUSY;
821 	}
822 
823 	emac_tx_mem_map(priv, skb);
824 
825 	/* Make sure there is space in the ring for the next TX. */
826 	if (unlikely(emac_tx_avail(priv) <= MAX_SKB_FRAGS + 2))
827 		netif_stop_queue(ndev);
828 
829 	return NETDEV_TX_OK;
830 }
831 
832 static int emac_set_mac_address(struct net_device *ndev, void *addr)
833 {
834 	struct emac_priv *priv = netdev_priv(ndev);
835 	int ret = eth_mac_addr(ndev, addr);
836 
837 	if (ret)
838 		return ret;
839 
840 	/* If running, set now; if not running it will be set in emac_up. */
841 	if (netif_running(ndev))
842 		emac_set_mac_addr(priv, ndev->dev_addr);
843 
844 	return 0;
845 }
846 
847 static void emac_mac_multicast_filter_clear(struct emac_priv *priv)
848 {
849 	emac_wr(priv, MAC_MULTICAST_HASH_TABLE1, 0x0);
850 	emac_wr(priv, MAC_MULTICAST_HASH_TABLE2, 0x0);
851 	emac_wr(priv, MAC_MULTICAST_HASH_TABLE3, 0x0);
852 	emac_wr(priv, MAC_MULTICAST_HASH_TABLE4, 0x0);
853 }
854 
855 /*
856  * The upper 6 bits of the Ethernet CRC of the MAC address is used as the hash
857  * when matching multicast addresses.
858  */
859 static u32 emac_ether_addr_hash(u8 addr[ETH_ALEN])
860 {
861 	u32 crc32 = ether_crc(ETH_ALEN, addr);
862 
863 	return crc32 >> 26;
864 }
865 
866 /* Configure Multicast and Promiscuous modes */
867 static void emac_set_rx_mode(struct net_device *ndev)
868 {
869 	struct emac_priv *priv = netdev_priv(ndev);
870 	struct netdev_hw_addr *ha;
871 	u32 mc_filter[4] = { 0 };
872 	u32 hash, reg, bit, val;
873 
874 	val = emac_rd(priv, MAC_ADDRESS_CONTROL);
875 
876 	val &= ~MREGBIT_PROMISCUOUS_MODE;
877 
878 	if (ndev->flags & IFF_PROMISC) {
879 		/* Enable promisc mode */
880 		val |= MREGBIT_PROMISCUOUS_MODE;
881 	} else if ((ndev->flags & IFF_ALLMULTI) ||
882 		   (netdev_mc_count(ndev) > HASH_TABLE_SIZE)) {
883 		/* Accept all multicast frames by setting every bit */
884 		emac_wr(priv, MAC_MULTICAST_HASH_TABLE1, 0xffff);
885 		emac_wr(priv, MAC_MULTICAST_HASH_TABLE2, 0xffff);
886 		emac_wr(priv, MAC_MULTICAST_HASH_TABLE3, 0xffff);
887 		emac_wr(priv, MAC_MULTICAST_HASH_TABLE4, 0xffff);
888 	} else if (!netdev_mc_empty(ndev)) {
889 		emac_mac_multicast_filter_clear(priv);
890 		netdev_for_each_mc_addr(ha, ndev) {
891 			/*
892 			 * The hash table is an array of 4 16-bit registers. It
893 			 * is treated like an array of 64 bits (bits[hash]).
894 			 */
895 			hash = emac_ether_addr_hash(ha->addr);
896 			reg = hash / 16;
897 			bit = hash % 16;
898 			mc_filter[reg] |= BIT(bit);
899 		}
900 		emac_wr(priv, MAC_MULTICAST_HASH_TABLE1, mc_filter[0]);
901 		emac_wr(priv, MAC_MULTICAST_HASH_TABLE2, mc_filter[1]);
902 		emac_wr(priv, MAC_MULTICAST_HASH_TABLE3, mc_filter[2]);
903 		emac_wr(priv, MAC_MULTICAST_HASH_TABLE4, mc_filter[3]);
904 	}
905 
906 	emac_wr(priv, MAC_ADDRESS_CONTROL, val);
907 }
908 
909 static int emac_change_mtu(struct net_device *ndev, int mtu)
910 {
911 	struct emac_priv *priv = netdev_priv(ndev);
912 	u32 frame_len;
913 
914 	if (netif_running(ndev)) {
915 		netdev_err(ndev, "must be stopped to change MTU\n");
916 		return -EBUSY;
917 	}
918 
919 	frame_len = mtu + ETH_HLEN + 2 * VLAN_HLEN + ETH_FCS_LEN;
920 
921 	if (frame_len <= EMAC_DEFAULT_BUFSIZE)
922 		priv->dma_buf_sz = EMAC_DEFAULT_BUFSIZE;
923 	else if (frame_len <= EMAC_RX_BUF_2K)
924 		priv->dma_buf_sz = EMAC_RX_BUF_2K;
925 	else
926 		priv->dma_buf_sz = EMAC_RX_BUF_MAX;
927 
928 	ndev->mtu = mtu;
929 
930 	return 0;
931 }
932 
933 static void emac_tx_timeout(struct net_device *ndev, unsigned int txqueue)
934 {
935 	struct emac_priv *priv = netdev_priv(ndev);
936 
937 	schedule_work(&priv->tx_timeout_task);
938 }
939 
940 static int emac_mii_read(struct mii_bus *bus, int phy_addr, int regnum)
941 {
942 	struct emac_priv *priv = bus->priv;
943 	u32 cmd = 0, val;
944 	int ret;
945 
946 	cmd |= FIELD_PREP(MREGBIT_PHY_ADDRESS, phy_addr);
947 	cmd |= FIELD_PREP(MREGBIT_REGISTER_ADDRESS, regnum);
948 	cmd |= MREGBIT_START_MDIO_TRANS | MREGBIT_MDIO_READ_WRITE;
949 
950 	emac_wr(priv, MAC_MDIO_DATA, 0x0);
951 	emac_wr(priv, MAC_MDIO_CONTROL, cmd);
952 
953 	ret = readl_poll_timeout(priv->iobase + MAC_MDIO_CONTROL, val,
954 				 !(val & MREGBIT_START_MDIO_TRANS), 100, 10000);
955 
956 	if (ret)
957 		return ret;
958 
959 	val = emac_rd(priv, MAC_MDIO_DATA);
960 	return FIELD_GET(MREGBIT_MDIO_DATA, val);
961 }
962 
963 static int emac_mii_write(struct mii_bus *bus, int phy_addr, int regnum,
964 			  u16 value)
965 {
966 	struct emac_priv *priv = bus->priv;
967 	u32 cmd = 0, val;
968 	int ret;
969 
970 	emac_wr(priv, MAC_MDIO_DATA, value);
971 
972 	cmd |= FIELD_PREP(MREGBIT_PHY_ADDRESS, phy_addr);
973 	cmd |= FIELD_PREP(MREGBIT_REGISTER_ADDRESS, regnum);
974 	cmd |= MREGBIT_START_MDIO_TRANS;
975 
976 	emac_wr(priv, MAC_MDIO_CONTROL, cmd);
977 
978 	ret = readl_poll_timeout(priv->iobase + MAC_MDIO_CONTROL, val,
979 				 !(val & MREGBIT_START_MDIO_TRANS), 100, 10000);
980 
981 	return ret;
982 }
983 
984 static int emac_mdio_init(struct emac_priv *priv)
985 {
986 	struct device *dev = &priv->pdev->dev;
987 	struct device_node *mii_np;
988 	struct mii_bus *mii;
989 	int ret;
990 
991 	mii = devm_mdiobus_alloc(dev);
992 	if (!mii)
993 		return -ENOMEM;
994 
995 	mii->priv = priv;
996 	mii->name = "k1_emac_mii";
997 	mii->read = emac_mii_read;
998 	mii->write = emac_mii_write;
999 	mii->parent = dev;
1000 	mii->phy_mask = ~0;
1001 	snprintf(mii->id, MII_BUS_ID_SIZE, "%s", priv->pdev->name);
1002 
1003 	mii_np = of_get_available_child_by_name(dev->of_node, "mdio-bus");
1004 
1005 	ret = devm_of_mdiobus_register(dev, mii, mii_np);
1006 	if (ret)
1007 		dev_err_probe(dev, ret, "Failed to register mdio bus\n");
1008 
1009 	of_node_put(mii_np);
1010 	return ret;
1011 }
1012 
1013 /*
1014  * Even though this MAC supports gigabit operation, it only provides 32-bit
1015  * statistics counters. The most overflow-prone counters are the "bytes" ones,
1016  * which at gigabit overflow about twice a minute.
1017  *
1018  * Therefore, we maintain the high 32 bits of counters ourselves, incrementing
1019  * every time statistics seem to go backwards. Also, update periodically to
1020  * catch overflows when we are not otherwise checking the statistics often
1021  * enough.
1022  */
1023 
1024 #define EMAC_STATS_TIMER_PERIOD		20
1025 
1026 static int emac_read_stat_cnt(struct emac_priv *priv, u8 cnt, u32 *res,
1027 			      u32 control_reg, u32 high_reg, u32 low_reg)
1028 {
1029 	u32 val, high, low;
1030 	int ret;
1031 
1032 	/* The "read" bit is the same for TX and RX */
1033 
1034 	val = MREGBIT_START_TX_COUNTER_READ | cnt;
1035 	emac_wr(priv, control_reg, val);
1036 	val = emac_rd(priv, control_reg);
1037 
1038 	ret = readl_poll_timeout_atomic(priv->iobase + control_reg, val,
1039 					!(val & MREGBIT_START_TX_COUNTER_READ),
1040 					100, 10000);
1041 
1042 	if (ret) {
1043 		/*
1044 		 * This could be caused by the PHY stopping its refclk even when
1045 		 * the link is up, for power saving. See also comments in
1046 		 * emac_stats_update().
1047 		 */
1048 		dev_err_ratelimited(&priv->ndev->dev,
1049 				    "Read stat timeout. PHY clock stopped?\n");
1050 		return ret;
1051 	}
1052 
1053 	high = emac_rd(priv, high_reg);
1054 	low = emac_rd(priv, low_reg);
1055 	*res = high << 16 | lower_16_bits(low);
1056 
1057 	return 0;
1058 }
1059 
1060 static int emac_tx_read_stat_cnt(struct emac_priv *priv, u8 cnt, u32 *res)
1061 {
1062 	return emac_read_stat_cnt(priv, cnt, res, MAC_TX_STATCTR_CONTROL,
1063 				  MAC_TX_STATCTR_DATA_HIGH,
1064 				  MAC_TX_STATCTR_DATA_LOW);
1065 }
1066 
1067 static int emac_rx_read_stat_cnt(struct emac_priv *priv, u8 cnt, u32 *res)
1068 {
1069 	return emac_read_stat_cnt(priv, cnt, res, MAC_RX_STATCTR_CONTROL,
1070 				  MAC_RX_STATCTR_DATA_HIGH,
1071 				  MAC_RX_STATCTR_DATA_LOW);
1072 }
1073 
1074 static void emac_update_counter(u64 *counter, u32 new_low)
1075 {
1076 	u32 old_low = lower_32_bits(*counter);
1077 	u64 high = upper_32_bits(*counter);
1078 
1079 	if (old_low > new_low) {
1080 		/* Overflowed, increment high 32 bits */
1081 		high++;
1082 	}
1083 
1084 	*counter = (high << 32) | new_low;
1085 }
1086 
1087 static void emac_stats_update(struct emac_priv *priv)
1088 {
1089 	u64 *tx_stats_off = priv->tx_stats_off.array;
1090 	u64 *rx_stats_off = priv->rx_stats_off.array;
1091 	u64 *tx_stats = priv->tx_stats.array;
1092 	u64 *rx_stats = priv->rx_stats.array;
1093 	u32 i, res, offset;
1094 
1095 	assert_spin_locked(&priv->stats_lock);
1096 
1097 	/*
1098 	 * We can't read statistics if the interface is not up. Also, some PHYs
1099 	 * stop their reference clocks for link down power saving, which also
1100 	 * causes reading statistics to time out. Don't update and don't
1101 	 * reschedule in these cases.
1102 	 */
1103 	if (!netif_running(priv->ndev) ||
1104 	    !netif_carrier_ok(priv->ndev) ||
1105 	    !netif_device_present(priv->ndev)) {
1106 		return;
1107 	}
1108 
1109 	for (i = 0; i < sizeof(priv->tx_stats) / sizeof(*tx_stats); i++) {
1110 		/*
1111 		 * If reading stats times out anyway, the stat registers will be
1112 		 * stuck, and we can't really recover from that.
1113 		 *
1114 		 * Reading statistics also can't return an error, so just return
1115 		 * without updating and without rescheduling.
1116 		 */
1117 		if (emac_tx_read_stat_cnt(priv, i, &res))
1118 			return;
1119 
1120 		/*
1121 		 * Re-initializing while bringing interface up resets counters
1122 		 * to zero, so to provide continuity, we add the values saved
1123 		 * last time we did emac_down() to the new hardware-provided
1124 		 * value.
1125 		 */
1126 		offset = lower_32_bits(tx_stats_off[i]);
1127 		emac_update_counter(&tx_stats[i], res + offset);
1128 	}
1129 
1130 	/* Similar remarks as TX stats */
1131 	for (i = 0; i < sizeof(priv->rx_stats) / sizeof(*rx_stats); i++) {
1132 		if (emac_rx_read_stat_cnt(priv, i, &res))
1133 			return;
1134 		offset = lower_32_bits(rx_stats_off[i]);
1135 		emac_update_counter(&rx_stats[i], res + offset);
1136 	}
1137 
1138 	mod_timer(&priv->stats_timer, jiffies + EMAC_STATS_TIMER_PERIOD * HZ);
1139 }
1140 
1141 static void emac_stats_timer(struct timer_list *t)
1142 {
1143 	struct emac_priv *priv = timer_container_of(priv, t, stats_timer);
1144 
1145 	spin_lock(&priv->stats_lock);
1146 
1147 	emac_stats_update(priv);
1148 
1149 	spin_unlock(&priv->stats_lock);
1150 }
1151 
1152 static const struct ethtool_rmon_hist_range emac_rmon_hist_ranges[] = {
1153 	{   64,   64 },
1154 	{   65,  127 },
1155 	{  128,  255 },
1156 	{  256,  511 },
1157 	{  512, 1023 },
1158 	{ 1024, 1518 },
1159 	{ 1519, 4096 },
1160 	{ /* sentinel */ },
1161 };
1162 
1163 /* Like dev_fetch_dstats(), but we only use tx_drops */
1164 static u64 emac_get_stat_tx_drops(struct emac_priv *priv)
1165 {
1166 	const struct pcpu_dstats *stats;
1167 	u64 tx_drops, total = 0;
1168 	unsigned int start;
1169 	int cpu;
1170 
1171 	for_each_possible_cpu(cpu) {
1172 		stats = per_cpu_ptr(priv->ndev->dstats, cpu);
1173 		do {
1174 			start = u64_stats_fetch_begin(&stats->syncp);
1175 			tx_drops = u64_stats_read(&stats->tx_drops);
1176 		} while (u64_stats_fetch_retry(&stats->syncp, start));
1177 
1178 		total += tx_drops;
1179 	}
1180 
1181 	return total;
1182 }
1183 
1184 static void emac_get_stats64(struct net_device *dev,
1185 			     struct rtnl_link_stats64 *storage)
1186 {
1187 	struct emac_priv *priv = netdev_priv(dev);
1188 	union emac_hw_tx_stats *tx_stats;
1189 	union emac_hw_rx_stats *rx_stats;
1190 
1191 	tx_stats = &priv->tx_stats;
1192 	rx_stats = &priv->rx_stats;
1193 
1194 	/* This is the only software counter */
1195 	storage->tx_dropped = emac_get_stat_tx_drops(priv);
1196 
1197 	spin_lock_bh(&priv->stats_lock);
1198 
1199 	emac_stats_update(priv);
1200 
1201 	storage->tx_packets = tx_stats->stats.tx_ok_pkts;
1202 	storage->tx_bytes = tx_stats->stats.tx_ok_bytes;
1203 	storage->tx_errors = tx_stats->stats.tx_err_pkts;
1204 
1205 	storage->rx_packets = rx_stats->stats.rx_ok_pkts;
1206 	storage->rx_bytes = rx_stats->stats.rx_ok_bytes;
1207 	storage->rx_errors = rx_stats->stats.rx_err_total_pkts;
1208 	storage->rx_crc_errors = rx_stats->stats.rx_crc_err_pkts;
1209 	storage->rx_frame_errors = rx_stats->stats.rx_align_err_pkts;
1210 	storage->rx_length_errors = rx_stats->stats.rx_len_err_pkts;
1211 
1212 	storage->collisions = tx_stats->stats.tx_singleclsn_pkts;
1213 	storage->collisions += tx_stats->stats.tx_multiclsn_pkts;
1214 	storage->collisions += tx_stats->stats.tx_excessclsn_pkts;
1215 
1216 	storage->rx_missed_errors = rx_stats->stats.rx_drp_fifo_full_pkts;
1217 	storage->rx_missed_errors += rx_stats->stats.rx_truncate_fifo_full_pkts;
1218 
1219 	spin_unlock_bh(&priv->stats_lock);
1220 }
1221 
1222 static void emac_get_rmon_stats(struct net_device *dev,
1223 				struct ethtool_rmon_stats *rmon_stats,
1224 				const struct ethtool_rmon_hist_range **ranges)
1225 {
1226 	struct emac_priv *priv = netdev_priv(dev);
1227 	union emac_hw_rx_stats *rx_stats;
1228 
1229 	rx_stats = &priv->rx_stats;
1230 
1231 	*ranges = emac_rmon_hist_ranges;
1232 
1233 	spin_lock_bh(&priv->stats_lock);
1234 
1235 	emac_stats_update(priv);
1236 
1237 	rmon_stats->undersize_pkts = rx_stats->stats.rx_len_undersize_pkts;
1238 	rmon_stats->oversize_pkts = rx_stats->stats.rx_len_oversize_pkts;
1239 	rmon_stats->fragments = rx_stats->stats.rx_len_fragment_pkts;
1240 	rmon_stats->jabbers = rx_stats->stats.rx_len_jabber_pkts;
1241 
1242 	/* Only RX has histogram stats */
1243 
1244 	rmon_stats->hist[0] = rx_stats->stats.rx_64_pkts;
1245 	rmon_stats->hist[1] = rx_stats->stats.rx_65_127_pkts;
1246 	rmon_stats->hist[2] = rx_stats->stats.rx_128_255_pkts;
1247 	rmon_stats->hist[3] = rx_stats->stats.rx_256_511_pkts;
1248 	rmon_stats->hist[4] = rx_stats->stats.rx_512_1023_pkts;
1249 	rmon_stats->hist[5] = rx_stats->stats.rx_1024_1518_pkts;
1250 	rmon_stats->hist[6] = rx_stats->stats.rx_1519_plus_pkts;
1251 
1252 	spin_unlock_bh(&priv->stats_lock);
1253 }
1254 
1255 static void emac_get_eth_mac_stats(struct net_device *dev,
1256 				   struct ethtool_eth_mac_stats *mac_stats)
1257 {
1258 	struct emac_priv *priv = netdev_priv(dev);
1259 	union emac_hw_tx_stats *tx_stats;
1260 	union emac_hw_rx_stats *rx_stats;
1261 
1262 	tx_stats = &priv->tx_stats;
1263 	rx_stats = &priv->rx_stats;
1264 
1265 	spin_lock_bh(&priv->stats_lock);
1266 
1267 	emac_stats_update(priv);
1268 
1269 	mac_stats->MulticastFramesXmittedOK = tx_stats->stats.tx_multicast_pkts;
1270 	mac_stats->BroadcastFramesXmittedOK = tx_stats->stats.tx_broadcast_pkts;
1271 
1272 	mac_stats->MulticastFramesReceivedOK =
1273 		rx_stats->stats.rx_multicast_pkts;
1274 	mac_stats->BroadcastFramesReceivedOK =
1275 		rx_stats->stats.rx_broadcast_pkts;
1276 
1277 	mac_stats->SingleCollisionFrames = tx_stats->stats.tx_singleclsn_pkts;
1278 	mac_stats->MultipleCollisionFrames = tx_stats->stats.tx_multiclsn_pkts;
1279 	mac_stats->LateCollisions = tx_stats->stats.tx_lateclsn_pkts;
1280 	mac_stats->FramesAbortedDueToXSColls =
1281 		tx_stats->stats.tx_excessclsn_pkts;
1282 
1283 	spin_unlock_bh(&priv->stats_lock);
1284 }
1285 
1286 static void emac_get_pause_stats(struct net_device *dev,
1287 				 struct ethtool_pause_stats *pause_stats)
1288 {
1289 	struct emac_priv *priv = netdev_priv(dev);
1290 	union emac_hw_tx_stats *tx_stats;
1291 	union emac_hw_rx_stats *rx_stats;
1292 
1293 	tx_stats = &priv->tx_stats;
1294 	rx_stats = &priv->rx_stats;
1295 
1296 	spin_lock_bh(&priv->stats_lock);
1297 
1298 	emac_stats_update(priv);
1299 
1300 	pause_stats->tx_pause_frames = tx_stats->stats.tx_pause_pkts;
1301 	pause_stats->rx_pause_frames = rx_stats->stats.rx_pause_pkts;
1302 
1303 	spin_unlock_bh(&priv->stats_lock);
1304 }
1305 
1306 /* Other statistics that are not derivable from standard statistics */
1307 
1308 #define EMAC_ETHTOOL_STAT(type, name) \
1309 	{ offsetof(type, stats.name) / sizeof(u64), #name }
1310 
1311 static const struct emac_ethtool_stats {
1312 	size_t offset;
1313 	char str[ETH_GSTRING_LEN];
1314 } emac_ethtool_rx_stats[] = {
1315 	EMAC_ETHTOOL_STAT(union emac_hw_rx_stats, rx_drp_fifo_full_pkts),
1316 	EMAC_ETHTOOL_STAT(union emac_hw_rx_stats, rx_truncate_fifo_full_pkts),
1317 };
1318 
1319 static int emac_get_sset_count(struct net_device *dev, int sset)
1320 {
1321 	switch (sset) {
1322 	case ETH_SS_STATS:
1323 		return ARRAY_SIZE(emac_ethtool_rx_stats);
1324 	default:
1325 		return -EOPNOTSUPP;
1326 	}
1327 }
1328 
1329 static void emac_get_strings(struct net_device *dev, u32 stringset, u8 *data)
1330 {
1331 	int i;
1332 
1333 	switch (stringset) {
1334 	case ETH_SS_STATS:
1335 		for (i = 0; i < ARRAY_SIZE(emac_ethtool_rx_stats); i++) {
1336 			memcpy(data, emac_ethtool_rx_stats[i].str,
1337 			       ETH_GSTRING_LEN);
1338 			data += ETH_GSTRING_LEN;
1339 		}
1340 		break;
1341 	}
1342 }
1343 
1344 static void emac_get_ethtool_stats(struct net_device *dev,
1345 				   struct ethtool_stats *stats, u64 *data)
1346 {
1347 	struct emac_priv *priv = netdev_priv(dev);
1348 	u64 *rx_stats = (u64 *)&priv->rx_stats;
1349 	int i;
1350 
1351 	spin_lock_bh(&priv->stats_lock);
1352 
1353 	emac_stats_update(priv);
1354 
1355 	for (i = 0; i < ARRAY_SIZE(emac_ethtool_rx_stats); i++)
1356 		data[i] = rx_stats[emac_ethtool_rx_stats[i].offset];
1357 
1358 	spin_unlock_bh(&priv->stats_lock);
1359 }
1360 
1361 static int emac_ethtool_get_regs_len(struct net_device *dev)
1362 {
1363 	return (EMAC_DMA_REG_CNT + EMAC_MAC_REG_CNT) * sizeof(u32);
1364 }
1365 
1366 static void emac_ethtool_get_regs(struct net_device *dev,
1367 				  struct ethtool_regs *regs, void *space)
1368 {
1369 	struct emac_priv *priv = netdev_priv(dev);
1370 	u32 *reg_space = space;
1371 	int i;
1372 
1373 	regs->version = 1;
1374 
1375 	for (i = 0; i < EMAC_DMA_REG_CNT; i++)
1376 		reg_space[i] = emac_rd(priv, DMA_CONFIGURATION + i * 4);
1377 
1378 	for (i = 0; i < EMAC_MAC_REG_CNT; i++)
1379 		reg_space[i + EMAC_DMA_REG_CNT] =
1380 			emac_rd(priv, MAC_GLOBAL_CONTROL + i * 4);
1381 }
1382 
1383 static void emac_get_drvinfo(struct net_device *dev,
1384 			     struct ethtool_drvinfo *info)
1385 {
1386 	strscpy(info->driver, DRIVER_NAME, sizeof(info->driver));
1387 	info->n_stats = ARRAY_SIZE(emac_ethtool_rx_stats);
1388 }
1389 
1390 static void emac_tx_timeout_task(struct work_struct *work)
1391 {
1392 	struct net_device *ndev;
1393 	struct emac_priv *priv;
1394 
1395 	priv = container_of(work, struct emac_priv, tx_timeout_task);
1396 	ndev = priv->ndev;
1397 
1398 	rtnl_lock();
1399 
1400 	/* No need to reset if already down */
1401 	if (!netif_running(ndev)) {
1402 		rtnl_unlock();
1403 		return;
1404 	}
1405 
1406 	netdev_err(ndev, "MAC reset due to TX timeout\n");
1407 
1408 	netif_trans_update(ndev); /* prevent tx timeout */
1409 	dev_close(ndev);
1410 	dev_open(ndev, NULL);
1411 
1412 	rtnl_unlock();
1413 }
1414 
1415 static void emac_sw_init(struct emac_priv *priv)
1416 {
1417 	priv->dma_buf_sz = EMAC_DEFAULT_BUFSIZE;
1418 
1419 	priv->tx_ring.total_cnt = DEFAULT_TX_RING_NUM;
1420 	priv->rx_ring.total_cnt = DEFAULT_RX_RING_NUM;
1421 
1422 	spin_lock_init(&priv->stats_lock);
1423 
1424 	INIT_WORK(&priv->tx_timeout_task, emac_tx_timeout_task);
1425 
1426 	priv->tx_coal_frames = EMAC_TX_FRAMES;
1427 	priv->tx_coal_timeout = EMAC_TX_COAL_TIMEOUT;
1428 
1429 	timer_setup(&priv->txtimer, emac_tx_coal_timer, 0);
1430 	timer_setup(&priv->stats_timer, emac_stats_timer, 0);
1431 }
1432 
1433 static irqreturn_t emac_interrupt_handler(int irq, void *dev_id)
1434 {
1435 	struct net_device *ndev = (struct net_device *)dev_id;
1436 	struct emac_priv *priv = netdev_priv(ndev);
1437 	bool should_schedule = false;
1438 	u32 clr = 0;
1439 	u32 status;
1440 
1441 	status = emac_rd(priv, DMA_STATUS_IRQ);
1442 
1443 	if (status & MREGBIT_TRANSMIT_TRANSFER_DONE_IRQ) {
1444 		clr |= MREGBIT_TRANSMIT_TRANSFER_DONE_IRQ;
1445 		should_schedule = true;
1446 	}
1447 
1448 	if (status & MREGBIT_TRANSMIT_DES_UNAVAILABLE_IRQ)
1449 		clr |= MREGBIT_TRANSMIT_DES_UNAVAILABLE_IRQ;
1450 
1451 	if (status & MREGBIT_TRANSMIT_DMA_STOPPED_IRQ)
1452 		clr |= MREGBIT_TRANSMIT_DMA_STOPPED_IRQ;
1453 
1454 	if (status & MREGBIT_RECEIVE_TRANSFER_DONE_IRQ) {
1455 		clr |= MREGBIT_RECEIVE_TRANSFER_DONE_IRQ;
1456 		should_schedule = true;
1457 	}
1458 
1459 	if (status & MREGBIT_RECEIVE_DES_UNAVAILABLE_IRQ)
1460 		clr |= MREGBIT_RECEIVE_DES_UNAVAILABLE_IRQ;
1461 
1462 	if (status & MREGBIT_RECEIVE_DMA_STOPPED_IRQ)
1463 		clr |= MREGBIT_RECEIVE_DMA_STOPPED_IRQ;
1464 
1465 	if (status & MREGBIT_RECEIVE_MISSED_FRAME_IRQ)
1466 		clr |= MREGBIT_RECEIVE_MISSED_FRAME_IRQ;
1467 
1468 	if (should_schedule) {
1469 		if (napi_schedule_prep(&priv->napi)) {
1470 			emac_disable_interrupt(priv);
1471 			__napi_schedule_irqoff(&priv->napi);
1472 		}
1473 	}
1474 
1475 	emac_wr(priv, DMA_STATUS_IRQ, clr);
1476 
1477 	return IRQ_HANDLED;
1478 }
1479 
1480 static void emac_configure_tx(struct emac_priv *priv)
1481 {
1482 	u32 val;
1483 
1484 	/* Set base address */
1485 	val = (u32)priv->tx_ring.desc_dma_addr;
1486 	emac_wr(priv, DMA_TRANSMIT_BASE_ADDRESS, val);
1487 
1488 	/* Set TX inter-frame gap value, enable transmit */
1489 	val = emac_rd(priv, MAC_TRANSMIT_CONTROL);
1490 	val &= ~MREGBIT_IFG_LEN;
1491 	val |= MREGBIT_TRANSMIT_ENABLE;
1492 	val |= MREGBIT_TRANSMIT_AUTO_RETRY;
1493 	emac_wr(priv, MAC_TRANSMIT_CONTROL, val);
1494 
1495 	emac_wr(priv, DMA_TRANSMIT_AUTO_POLL_COUNTER, 0x0);
1496 
1497 	/* Start TX DMA */
1498 	val = emac_rd(priv, DMA_CONTROL);
1499 	val |= MREGBIT_START_STOP_TRANSMIT_DMA;
1500 	emac_wr(priv, DMA_CONTROL, val);
1501 }
1502 
1503 static void emac_configure_rx(struct emac_priv *priv)
1504 {
1505 	u32 val;
1506 
1507 	/* Set base address */
1508 	val = (u32)priv->rx_ring.desc_dma_addr;
1509 	emac_wr(priv, DMA_RECEIVE_BASE_ADDRESS, val);
1510 
1511 	/* Enable receive */
1512 	val = emac_rd(priv, MAC_RECEIVE_CONTROL);
1513 	val |= MREGBIT_RECEIVE_ENABLE;
1514 	val |= MREGBIT_STORE_FORWARD;
1515 	emac_wr(priv, MAC_RECEIVE_CONTROL, val);
1516 
1517 	/* Start RX DMA */
1518 	val = emac_rd(priv, DMA_CONTROL);
1519 	val |= MREGBIT_START_STOP_RECEIVE_DMA;
1520 	emac_wr(priv, DMA_CONTROL, val);
1521 }
1522 
1523 static void emac_adjust_link(struct net_device *dev)
1524 {
1525 	struct emac_priv *priv = netdev_priv(dev);
1526 	struct phy_device *phydev = dev->phydev;
1527 	u32 ctrl;
1528 
1529 	if (phydev->link) {
1530 		ctrl = emac_rd(priv, MAC_GLOBAL_CONTROL);
1531 
1532 		/* Update duplex and speed from PHY */
1533 
1534 		FIELD_MODIFY(MREGBIT_FULL_DUPLEX_MODE, &ctrl,
1535 			     phydev->duplex == DUPLEX_FULL);
1536 
1537 		ctrl &= ~MREGBIT_SPEED;
1538 
1539 		switch (phydev->speed) {
1540 		case SPEED_1000:
1541 			ctrl |= MREGBIT_SPEED_1000M;
1542 			break;
1543 		case SPEED_100:
1544 			ctrl |= MREGBIT_SPEED_100M;
1545 			break;
1546 		case SPEED_10:
1547 			ctrl |= MREGBIT_SPEED_10M;
1548 			break;
1549 		default:
1550 			netdev_err(dev, "Unknown speed: %d\n", phydev->speed);
1551 			phydev->speed = SPEED_UNKNOWN;
1552 			break;
1553 		}
1554 
1555 		emac_wr(priv, MAC_GLOBAL_CONTROL, ctrl);
1556 
1557 		/*
1558 		 * Reschedule stats updates now that link is up. See comments in
1559 		 * emac_stats_update().
1560 		 */
1561 		mod_timer(&priv->stats_timer, jiffies);
1562 	}
1563 
1564 	phy_print_status(phydev);
1565 }
1566 
1567 static void emac_update_delay_line(struct emac_priv *priv)
1568 {
1569 	u32 mask = 0, val = 0;
1570 
1571 	mask |= EMAC_RX_DLINE_EN;
1572 	mask |= EMAC_RX_DLINE_STEP_MASK | EMAC_RX_DLINE_CODE_MASK;
1573 	mask |= EMAC_TX_DLINE_EN;
1574 	mask |= EMAC_TX_DLINE_STEP_MASK | EMAC_TX_DLINE_CODE_MASK;
1575 
1576 	if (phy_interface_mode_is_rgmii(priv->phy_interface)) {
1577 		val |= EMAC_RX_DLINE_EN;
1578 		val |= FIELD_PREP(EMAC_RX_DLINE_STEP_MASK,
1579 				  EMAC_DLINE_STEP_15P6);
1580 		val |= FIELD_PREP(EMAC_RX_DLINE_CODE_MASK, priv->rx_delay);
1581 
1582 		val |= EMAC_TX_DLINE_EN;
1583 		val |= FIELD_PREP(EMAC_TX_DLINE_STEP_MASK,
1584 				  EMAC_DLINE_STEP_15P6);
1585 		val |= FIELD_PREP(EMAC_TX_DLINE_CODE_MASK, priv->tx_delay);
1586 	}
1587 
1588 	regmap_update_bits(priv->regmap_apmu,
1589 			   priv->regmap_apmu_offset + APMU_EMAC_DLINE_REG,
1590 			   mask, val);
1591 }
1592 
1593 static int emac_phy_connect(struct net_device *ndev)
1594 {
1595 	struct emac_priv *priv = netdev_priv(ndev);
1596 	struct device *dev = &priv->pdev->dev;
1597 	struct phy_device *phydev;
1598 	struct device_node *np;
1599 	int ret;
1600 
1601 	ret = of_get_phy_mode(dev->of_node, &priv->phy_interface);
1602 	if (ret) {
1603 		netdev_err(ndev, "No phy-mode found");
1604 		return ret;
1605 	}
1606 
1607 	switch (priv->phy_interface) {
1608 	case PHY_INTERFACE_MODE_RMII:
1609 	case PHY_INTERFACE_MODE_RGMII:
1610 	case PHY_INTERFACE_MODE_RGMII_ID:
1611 	case PHY_INTERFACE_MODE_RGMII_RXID:
1612 	case PHY_INTERFACE_MODE_RGMII_TXID:
1613 		break;
1614 	default:
1615 		netdev_err(ndev, "Unsupported PHY interface %s",
1616 			   phy_modes(priv->phy_interface));
1617 		return -EINVAL;
1618 	}
1619 
1620 	np = of_parse_phandle(dev->of_node, "phy-handle", 0);
1621 	if (!np && of_phy_is_fixed_link(dev->of_node))
1622 		np = of_node_get(dev->of_node);
1623 
1624 	if (!np) {
1625 		netdev_err(ndev, "No PHY specified");
1626 		return -ENODEV;
1627 	}
1628 
1629 	ret = emac_phy_interface_config(priv);
1630 	if (ret)
1631 		goto err_node_put;
1632 
1633 	phydev = of_phy_connect(ndev, np, &emac_adjust_link, 0,
1634 				priv->phy_interface);
1635 	if (!phydev) {
1636 		netdev_err(ndev, "Could not attach to PHY\n");
1637 		ret = -ENODEV;
1638 		goto err_node_put;
1639 	}
1640 
1641 	phydev->mac_managed_pm = true;
1642 
1643 	emac_update_delay_line(priv);
1644 
1645 	phy_attached_info(phydev);
1646 
1647 err_node_put:
1648 	of_node_put(np);
1649 	return ret;
1650 }
1651 
1652 static int emac_up(struct emac_priv *priv)
1653 {
1654 	struct platform_device *pdev = priv->pdev;
1655 	struct net_device *ndev = priv->ndev;
1656 	int ret;
1657 
1658 	pm_runtime_get_sync(&pdev->dev);
1659 
1660 	ret = emac_phy_connect(ndev);
1661 	if (ret) {
1662 		dev_err(&pdev->dev, "emac_phy_connect failed\n");
1663 		goto err_pm_put;
1664 	}
1665 
1666 	emac_init_hw(priv);
1667 
1668 	emac_set_mac_addr(priv, ndev->dev_addr);
1669 	emac_configure_tx(priv);
1670 	emac_configure_rx(priv);
1671 
1672 	emac_alloc_rx_desc_buffers(priv);
1673 
1674 	phy_start(ndev->phydev);
1675 
1676 	ret = request_irq(priv->irq, emac_interrupt_handler, IRQF_SHARED,
1677 			  ndev->name, ndev);
1678 	if (ret) {
1679 		dev_err(&pdev->dev, "request_irq failed\n");
1680 		goto err_reset_disconnect_phy;
1681 	}
1682 
1683 	/* Don't enable MAC interrupts */
1684 	emac_wr(priv, MAC_INTERRUPT_ENABLE, 0x0);
1685 
1686 	/* Enable DMA interrupts */
1687 	emac_wr(priv, DMA_INTERRUPT_ENABLE,
1688 		MREGBIT_TRANSMIT_TRANSFER_DONE_INTR_ENABLE |
1689 			MREGBIT_TRANSMIT_DMA_STOPPED_INTR_ENABLE |
1690 			MREGBIT_RECEIVE_TRANSFER_DONE_INTR_ENABLE |
1691 			MREGBIT_RECEIVE_DMA_STOPPED_INTR_ENABLE |
1692 			MREGBIT_RECEIVE_MISSED_FRAME_INTR_ENABLE);
1693 
1694 	napi_enable(&priv->napi);
1695 
1696 	netif_start_queue(ndev);
1697 
1698 	mod_timer(&priv->stats_timer, jiffies);
1699 
1700 	return 0;
1701 
1702 err_reset_disconnect_phy:
1703 	emac_reset_hw(priv);
1704 	phy_disconnect(ndev->phydev);
1705 
1706 err_pm_put:
1707 	pm_runtime_put_sync(&pdev->dev);
1708 	return ret;
1709 }
1710 
1711 static int emac_down(struct emac_priv *priv)
1712 {
1713 	struct platform_device *pdev = priv->pdev;
1714 	struct net_device *ndev = priv->ndev;
1715 
1716 	netif_stop_queue(ndev);
1717 
1718 	phy_disconnect(ndev->phydev);
1719 
1720 	emac_wr(priv, MAC_INTERRUPT_ENABLE, 0x0);
1721 	emac_wr(priv, DMA_INTERRUPT_ENABLE, 0x0);
1722 
1723 	free_irq(priv->irq, ndev);
1724 
1725 	napi_disable(&priv->napi);
1726 
1727 	timer_delete_sync(&priv->txtimer);
1728 	cancel_work_sync(&priv->tx_timeout_task);
1729 
1730 	timer_delete_sync(&priv->stats_timer);
1731 
1732 	emac_reset_hw(priv);
1733 
1734 	/* Update and save current stats, see emac_stats_update() for usage */
1735 
1736 	spin_lock_bh(&priv->stats_lock);
1737 
1738 	emac_stats_update(priv);
1739 
1740 	priv->tx_stats_off = priv->tx_stats;
1741 	priv->rx_stats_off = priv->rx_stats;
1742 
1743 	spin_unlock_bh(&priv->stats_lock);
1744 
1745 	pm_runtime_put_sync(&pdev->dev);
1746 	return 0;
1747 }
1748 
1749 /* Called when net interface is brought up. */
1750 static int emac_open(struct net_device *ndev)
1751 {
1752 	struct emac_priv *priv = netdev_priv(ndev);
1753 	struct device *dev = &priv->pdev->dev;
1754 	int ret;
1755 
1756 	ret = emac_alloc_tx_resources(priv);
1757 	if (ret) {
1758 		dev_err(dev, "Cannot allocate TX resources\n");
1759 		return ret;
1760 	}
1761 
1762 	ret = emac_alloc_rx_resources(priv);
1763 	if (ret) {
1764 		dev_err(dev, "Cannot allocate RX resources\n");
1765 		goto err_free_tx;
1766 	}
1767 
1768 	ret = emac_up(priv);
1769 	if (ret) {
1770 		dev_err(dev, "Error when bringing interface up\n");
1771 		goto err_free_rx;
1772 	}
1773 	return 0;
1774 
1775 err_free_rx:
1776 	emac_free_rx_resources(priv);
1777 err_free_tx:
1778 	emac_free_tx_resources(priv);
1779 
1780 	return ret;
1781 }
1782 
1783 /* Called when interface is brought down. */
1784 static int emac_stop(struct net_device *ndev)
1785 {
1786 	struct emac_priv *priv = netdev_priv(ndev);
1787 
1788 	emac_down(priv);
1789 	emac_free_tx_resources(priv);
1790 	emac_free_rx_resources(priv);
1791 
1792 	return 0;
1793 }
1794 
1795 static const struct ethtool_ops emac_ethtool_ops = {
1796 	.get_link_ksettings	= phy_ethtool_get_link_ksettings,
1797 	.set_link_ksettings	= phy_ethtool_set_link_ksettings,
1798 	.nway_reset		= phy_ethtool_nway_reset,
1799 	.get_drvinfo		= emac_get_drvinfo,
1800 	.get_link		= ethtool_op_get_link,
1801 
1802 	.get_regs		= emac_ethtool_get_regs,
1803 	.get_regs_len		= emac_ethtool_get_regs_len,
1804 
1805 	.get_rmon_stats		= emac_get_rmon_stats,
1806 	.get_pause_stats	= emac_get_pause_stats,
1807 	.get_eth_mac_stats	= emac_get_eth_mac_stats,
1808 
1809 	.get_sset_count		= emac_get_sset_count,
1810 	.get_strings		= emac_get_strings,
1811 	.get_ethtool_stats	= emac_get_ethtool_stats,
1812 };
1813 
1814 static const struct net_device_ops emac_netdev_ops = {
1815 	.ndo_open               = emac_open,
1816 	.ndo_stop               = emac_stop,
1817 	.ndo_start_xmit         = emac_start_xmit,
1818 	.ndo_validate_addr	= eth_validate_addr,
1819 	.ndo_set_mac_address    = emac_set_mac_address,
1820 	.ndo_eth_ioctl          = phy_do_ioctl_running,
1821 	.ndo_change_mtu         = emac_change_mtu,
1822 	.ndo_tx_timeout         = emac_tx_timeout,
1823 	.ndo_set_rx_mode        = emac_set_rx_mode,
1824 	.ndo_get_stats64	= emac_get_stats64,
1825 };
1826 
1827 /* Currently we always use 15.6 ps/step for the delay line */
1828 
1829 static u32 delay_ps_to_unit(u32 ps)
1830 {
1831 	return DIV_ROUND_CLOSEST(ps * 10, 156);
1832 }
1833 
1834 static u32 delay_unit_to_ps(u32 unit)
1835 {
1836 	return DIV_ROUND_CLOSEST(unit * 156, 10);
1837 }
1838 
1839 #define EMAC_MAX_DELAY_UNIT	FIELD_MAX(EMAC_TX_DLINE_CODE_MASK)
1840 
1841 /* Minus one just to be safe from rounding errors */
1842 #define EMAC_MAX_DELAY_PS	(delay_unit_to_ps(EMAC_MAX_DELAY_UNIT - 1))
1843 
1844 static int emac_config_dt(struct platform_device *pdev, struct emac_priv *priv)
1845 {
1846 	struct device_node *np = pdev->dev.of_node;
1847 	struct device *dev = &pdev->dev;
1848 	u8 mac_addr[ETH_ALEN] = { 0 };
1849 	int ret;
1850 
1851 	priv->iobase = devm_platform_ioremap_resource(pdev, 0);
1852 	if (IS_ERR(priv->iobase))
1853 		return dev_err_probe(dev, PTR_ERR(priv->iobase),
1854 				     "ioremap failed\n");
1855 
1856 	priv->regmap_apmu =
1857 		syscon_regmap_lookup_by_phandle_args(np, "spacemit,apmu", 1,
1858 						     &priv->regmap_apmu_offset);
1859 
1860 	if (IS_ERR(priv->regmap_apmu))
1861 		return dev_err_probe(dev, PTR_ERR(priv->regmap_apmu),
1862 				     "failed to get syscon\n");
1863 
1864 	priv->irq = platform_get_irq(pdev, 0);
1865 	if (priv->irq < 0)
1866 		return priv->irq;
1867 
1868 	ret = of_get_mac_address(np, mac_addr);
1869 	if (ret) {
1870 		if (ret == -EPROBE_DEFER)
1871 			return dev_err_probe(dev, ret,
1872 					     "Can't get MAC address\n");
1873 
1874 		dev_info(&pdev->dev, "Using random MAC address\n");
1875 		eth_hw_addr_random(priv->ndev);
1876 	} else {
1877 		eth_hw_addr_set(priv->ndev, mac_addr);
1878 	}
1879 
1880 	priv->tx_delay = 0;
1881 	priv->rx_delay = 0;
1882 
1883 	of_property_read_u32(np, "tx-internal-delay-ps", &priv->tx_delay);
1884 	of_property_read_u32(np, "rx-internal-delay-ps", &priv->rx_delay);
1885 
1886 	if (priv->tx_delay > EMAC_MAX_DELAY_PS) {
1887 		dev_err(&pdev->dev,
1888 			"tx-internal-delay-ps too large: max %d, got %d",
1889 			EMAC_MAX_DELAY_PS, priv->tx_delay);
1890 		return -EINVAL;
1891 	}
1892 
1893 	if (priv->rx_delay > EMAC_MAX_DELAY_PS) {
1894 		dev_err(&pdev->dev,
1895 			"rx-internal-delay-ps too large: max %d, got %d",
1896 			EMAC_MAX_DELAY_PS, priv->rx_delay);
1897 		return -EINVAL;
1898 	}
1899 
1900 	priv->tx_delay = delay_ps_to_unit(priv->tx_delay);
1901 	priv->rx_delay = delay_ps_to_unit(priv->rx_delay);
1902 
1903 	return 0;
1904 }
1905 
1906 static void emac_phy_deregister_fixed_link(void *data)
1907 {
1908 	struct device_node *of_node = data;
1909 
1910 	of_phy_deregister_fixed_link(of_node);
1911 }
1912 
1913 static int emac_probe(struct platform_device *pdev)
1914 {
1915 	struct device *dev = &pdev->dev;
1916 	struct reset_control *reset;
1917 	struct net_device *ndev;
1918 	struct emac_priv *priv;
1919 	int ret;
1920 
1921 	ndev = devm_alloc_etherdev(dev, sizeof(struct emac_priv));
1922 	if (!ndev)
1923 		return -ENOMEM;
1924 
1925 	ndev->hw_features = NETIF_F_SG;
1926 	ndev->features |= ndev->hw_features;
1927 
1928 	ndev->max_mtu = EMAC_RX_BUF_MAX - (ETH_HLEN + 2 * VLAN_HLEN + ETH_FCS_LEN);
1929 	ndev->pcpu_stat_type = NETDEV_PCPU_STAT_DSTATS;
1930 
1931 	priv = netdev_priv(ndev);
1932 	priv->ndev = ndev;
1933 	priv->pdev = pdev;
1934 	platform_set_drvdata(pdev, priv);
1935 
1936 	ret = emac_config_dt(pdev, priv);
1937 	if (ret < 0)
1938 		return dev_err_probe(dev, ret, "Configuration failed\n");
1939 
1940 	ndev->watchdog_timeo = 5 * HZ;
1941 	ndev->base_addr = (unsigned long)priv->iobase;
1942 	ndev->irq = priv->irq;
1943 
1944 	ndev->ethtool_ops = &emac_ethtool_ops;
1945 	ndev->netdev_ops = &emac_netdev_ops;
1946 
1947 	devm_pm_runtime_enable(&pdev->dev);
1948 
1949 	priv->bus_clk = devm_clk_get_enabled(&pdev->dev, NULL);
1950 	if (IS_ERR(priv->bus_clk))
1951 		return dev_err_probe(dev, PTR_ERR(priv->bus_clk),
1952 				     "Failed to get clock\n");
1953 
1954 	reset = devm_reset_control_get_optional_exclusive_deasserted(&pdev->dev,
1955 								     NULL);
1956 	if (IS_ERR(reset))
1957 		return dev_err_probe(dev, PTR_ERR(reset),
1958 				     "Failed to get reset\n");
1959 
1960 	if (of_phy_is_fixed_link(dev->of_node)) {
1961 		ret = of_phy_register_fixed_link(dev->of_node);
1962 		if (ret)
1963 			return dev_err_probe(dev, ret,
1964 					     "Failed to register fixed-link\n");
1965 
1966 		ret = devm_add_action_or_reset(dev,
1967 					       emac_phy_deregister_fixed_link,
1968 					       dev->of_node);
1969 
1970 		if (ret) {
1971 			dev_err(dev, "devm_add_action_or_reset failed\n");
1972 			return ret;
1973 		}
1974 	}
1975 
1976 	emac_sw_init(priv);
1977 
1978 	ret = emac_mdio_init(priv);
1979 	if (ret)
1980 		goto err_timer_delete;
1981 
1982 	SET_NETDEV_DEV(ndev, &pdev->dev);
1983 
1984 	ret = devm_register_netdev(dev, ndev);
1985 	if (ret) {
1986 		dev_err(dev, "devm_register_netdev failed\n");
1987 		goto err_timer_delete;
1988 	}
1989 
1990 	netif_napi_add(ndev, &priv->napi, emac_rx_poll);
1991 	netif_carrier_off(ndev);
1992 
1993 	return 0;
1994 
1995 err_timer_delete:
1996 	timer_delete_sync(&priv->txtimer);
1997 	timer_delete_sync(&priv->stats_timer);
1998 
1999 	return ret;
2000 }
2001 
2002 static void emac_remove(struct platform_device *pdev)
2003 {
2004 	struct emac_priv *priv = platform_get_drvdata(pdev);
2005 
2006 	timer_shutdown_sync(&priv->txtimer);
2007 	cancel_work_sync(&priv->tx_timeout_task);
2008 
2009 	timer_shutdown_sync(&priv->stats_timer);
2010 
2011 	emac_reset_hw(priv);
2012 }
2013 
2014 static int emac_resume(struct device *dev)
2015 {
2016 	struct emac_priv *priv = dev_get_drvdata(dev);
2017 	struct net_device *ndev = priv->ndev;
2018 	int ret;
2019 
2020 	ret = clk_prepare_enable(priv->bus_clk);
2021 	if (ret < 0) {
2022 		dev_err(dev, "Failed to enable bus clock: %d\n", ret);
2023 		return ret;
2024 	}
2025 
2026 	if (!netif_running(ndev))
2027 		return 0;
2028 
2029 	ret = emac_open(ndev);
2030 	if (ret) {
2031 		clk_disable_unprepare(priv->bus_clk);
2032 		return ret;
2033 	}
2034 
2035 	netif_device_attach(ndev);
2036 
2037 	mod_timer(&priv->stats_timer, jiffies);
2038 
2039 	return 0;
2040 }
2041 
2042 static int emac_suspend(struct device *dev)
2043 {
2044 	struct emac_priv *priv = dev_get_drvdata(dev);
2045 	struct net_device *ndev = priv->ndev;
2046 
2047 	if (!ndev || !netif_running(ndev)) {
2048 		clk_disable_unprepare(priv->bus_clk);
2049 		return 0;
2050 	}
2051 
2052 	emac_stop(ndev);
2053 
2054 	clk_disable_unprepare(priv->bus_clk);
2055 	netif_device_detach(ndev);
2056 	return 0;
2057 }
2058 
2059 static const struct dev_pm_ops emac_pm_ops = {
2060 	SYSTEM_SLEEP_PM_OPS(emac_suspend, emac_resume)
2061 };
2062 
2063 static const struct of_device_id emac_of_match[] = {
2064 	{ .compatible = "spacemit,k1-emac" },
2065 	{ /* sentinel */ },
2066 };
2067 MODULE_DEVICE_TABLE(of, emac_of_match);
2068 
2069 static struct platform_driver emac_driver = {
2070 	.probe = emac_probe,
2071 	.remove = emac_remove,
2072 	.driver = {
2073 		.name = DRIVER_NAME,
2074 		.of_match_table = of_match_ptr(emac_of_match),
2075 		.pm = &emac_pm_ops,
2076 	},
2077 };
2078 module_platform_driver(emac_driver);
2079 
2080 MODULE_DESCRIPTION("SpacemiT K1 Ethernet driver");
2081 MODULE_AUTHOR("Vivian Wang <wangruikang@iscas.ac.cn>");
2082 MODULE_LICENSE("GPL");
2083