xref: /linux/drivers/net/ethernet/spacemit/k1_emac.c (revision a34b0e4e21d6be3c3d620aa7f9dfbf0e9550c19e)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * SpacemiT K1 Ethernet driver
4  *
5  * Copyright (C) 2023-2025 SpacemiT (Hangzhou) Technology Co. Ltd
6  * Copyright (C) 2025 Vivian Wang <wangruikang@iscas.ac.cn>
7  */
8 
9 #include <linux/bitfield.h>
10 #include <linux/clk.h>
11 #include <linux/delay.h>
12 #include <linux/dma-mapping.h>
13 #include <linux/etherdevice.h>
14 #include <linux/ethtool.h>
15 #include <linux/interrupt.h>
16 #include <linux/io.h>
17 #include <linux/iopoll.h>
18 #include <linux/kernel.h>
19 #include <linux/mfd/syscon.h>
20 #include <linux/module.h>
21 #include <linux/of.h>
22 #include <linux/of_irq.h>
23 #include <linux/of_mdio.h>
24 #include <linux/of_net.h>
25 #include <linux/phy.h>
26 #include <linux/platform_device.h>
27 #include <linux/pm_runtime.h>
28 #include <linux/pm.h>
29 #include <linux/regmap.h>
30 #include <linux/reset.h>
31 #include <linux/rtnetlink.h>
32 #include <linux/timer.h>
33 #include <linux/types.h>
34 
35 #include "k1_emac.h"
36 
37 #define DRIVER_NAME "k1_emac"
38 
39 #define EMAC_DEFAULT_BUFSIZE		1536
40 #define EMAC_RX_BUF_2K			2048
41 #define EMAC_RX_BUF_4K			4096
42 
43 /* Tuning parameters from SpacemiT */
44 #define EMAC_TX_FRAMES			64
45 #define EMAC_TX_COAL_TIMEOUT		40000
46 #define EMAC_RX_FRAMES			64
47 #define EMAC_RX_COAL_TIMEOUT		(600 * 312)
48 
49 #define DEFAULT_TX_ALMOST_FULL		0x1f8
50 #define DEFAULT_TX_THRESHOLD		1518
51 #define DEFAULT_RX_THRESHOLD		12
52 #define DEFAULT_TX_RING_NUM		1024
53 #define DEFAULT_RX_RING_NUM		1024
54 #define DEFAULT_DMA_BURST		MREGBIT_BURST_16WORD
55 #define HASH_TABLE_SIZE			64
56 
57 struct desc_buf {
58 	u64 dma_addr;
59 	void *buff_addr;
60 	u16 dma_len;
61 	u8 map_as_page;
62 };
63 
64 struct emac_tx_desc_buffer {
65 	struct sk_buff *skb;
66 	struct desc_buf buf[2];
67 };
68 
69 struct emac_rx_desc_buffer {
70 	struct sk_buff *skb;
71 	u64 dma_addr;
72 	void *buff_addr;
73 	u16 dma_len;
74 	u8 map_as_page;
75 };
76 
77 /**
78  * struct emac_desc_ring - Software-side information for one descriptor ring
79  * Same structure used for both RX and TX
80  * @desc_addr: Virtual address to the descriptor ring memory
81  * @desc_dma_addr: DMA address of the descriptor ring
82  * @total_size: Size of ring in bytes
83  * @total_cnt: Number of descriptors
84  * @head: Next descriptor to associate a buffer with
85  * @tail: Next descriptor to check status bit
86  * @rx_desc_buf: Array of descriptors for RX
87  * @tx_desc_buf: Array of descriptors for TX, with max of two buffers each
88  */
89 struct emac_desc_ring {
90 	void *desc_addr;
91 	dma_addr_t desc_dma_addr;
92 	u32 total_size;
93 	u32 total_cnt;
94 	u32 head;
95 	u32 tail;
96 	union {
97 		struct emac_rx_desc_buffer *rx_desc_buf;
98 		struct emac_tx_desc_buffer *tx_desc_buf;
99 	};
100 };
101 
102 struct emac_priv {
103 	void __iomem *iobase;
104 	u32 dma_buf_sz;
105 	struct emac_desc_ring tx_ring;
106 	struct emac_desc_ring rx_ring;
107 
108 	struct net_device *ndev;
109 	struct napi_struct napi;
110 	struct platform_device *pdev;
111 	struct clk *bus_clk;
112 	struct clk *ref_clk;
113 	struct regmap *regmap_apmu;
114 	u32 regmap_apmu_offset;
115 	int irq;
116 
117 	phy_interface_t phy_interface;
118 
119 	union emac_hw_tx_stats tx_stats, tx_stats_off;
120 	union emac_hw_rx_stats rx_stats, rx_stats_off;
121 
122 	u32 tx_count_frames;
123 	u32 tx_coal_frames;
124 	u32 tx_coal_timeout;
125 	struct work_struct tx_timeout_task;
126 
127 	struct timer_list txtimer;
128 	struct timer_list stats_timer;
129 
130 	u32 tx_delay;
131 	u32 rx_delay;
132 
133 	/* Softirq-safe, hold while touching hardware statistics */
134 	spinlock_t stats_lock;
135 };
136 
137 static void emac_wr(struct emac_priv *priv, u32 reg, u32 val)
138 {
139 	writel(val, priv->iobase + reg);
140 }
141 
142 static u32 emac_rd(struct emac_priv *priv, u32 reg)
143 {
144 	return readl(priv->iobase + reg);
145 }
146 
147 static int emac_phy_interface_config(struct emac_priv *priv)
148 {
149 	u32 val = 0, mask = REF_CLK_SEL | RGMII_TX_CLK_SEL | PHY_INTF_RGMII;
150 
151 	if (phy_interface_mode_is_rgmii(priv->phy_interface))
152 		val |= PHY_INTF_RGMII;
153 
154 	regmap_update_bits(priv->regmap_apmu,
155 			   priv->regmap_apmu_offset + APMU_EMAC_CTRL_REG,
156 			   mask, val);
157 
158 	return 0;
159 }
160 
161 /*
162  * Where the hardware expects a MAC address, it is laid out in this high, med,
163  * low order in three consecutive registers and in this format.
164  */
165 
166 static void emac_set_mac_addr_reg(struct emac_priv *priv,
167 				  const unsigned char *addr,
168 				  u32 reg)
169 {
170 	emac_wr(priv, reg + sizeof(u32) * 0, addr[1] << 8 | addr[0]);
171 	emac_wr(priv, reg + sizeof(u32) * 1, addr[3] << 8 | addr[2]);
172 	emac_wr(priv, reg + sizeof(u32) * 2, addr[5] << 8 | addr[4]);
173 }
174 
175 static void emac_set_mac_addr(struct emac_priv *priv, const unsigned char *addr)
176 {
177 	emac_set_mac_addr_reg(priv, addr, MAC_ADDRESS1_HIGH);
178 }
179 
180 static void emac_reset_hw(struct emac_priv *priv)
181 {
182 	/* Disable all interrupts */
183 	emac_wr(priv, MAC_INTERRUPT_ENABLE, 0x0);
184 	emac_wr(priv, DMA_INTERRUPT_ENABLE, 0x0);
185 
186 	/* Disable transmit and receive units */
187 	emac_wr(priv, MAC_RECEIVE_CONTROL, 0x0);
188 	emac_wr(priv, MAC_TRANSMIT_CONTROL, 0x0);
189 
190 	/* Disable DMA */
191 	emac_wr(priv, DMA_CONTROL, 0x0);
192 }
193 
194 static void emac_init_hw(struct emac_priv *priv)
195 {
196 	u32 rxirq = 0, dma = 0;
197 
198 	regmap_set_bits(priv->regmap_apmu,
199 			priv->regmap_apmu_offset + APMU_EMAC_CTRL_REG,
200 			AXI_SINGLE_ID);
201 
202 	/* Disable transmit and receive units */
203 	emac_wr(priv, MAC_RECEIVE_CONTROL, 0x0);
204 	emac_wr(priv, MAC_TRANSMIT_CONTROL, 0x0);
205 
206 	/* Enable MAC address 1 filtering */
207 	emac_wr(priv, MAC_ADDRESS_CONTROL, MREGBIT_MAC_ADDRESS1_ENABLE);
208 
209 	/* Zero initialize the multicast hash table */
210 	emac_wr(priv, MAC_MULTICAST_HASH_TABLE1, 0x0);
211 	emac_wr(priv, MAC_MULTICAST_HASH_TABLE2, 0x0);
212 	emac_wr(priv, MAC_MULTICAST_HASH_TABLE3, 0x0);
213 	emac_wr(priv, MAC_MULTICAST_HASH_TABLE4, 0x0);
214 
215 	/* Configure thresholds */
216 	emac_wr(priv, MAC_TRANSMIT_FIFO_ALMOST_FULL, DEFAULT_TX_ALMOST_FULL);
217 	emac_wr(priv, MAC_TRANSMIT_PACKET_START_THRESHOLD,
218 		DEFAULT_TX_THRESHOLD);
219 	emac_wr(priv, MAC_RECEIVE_PACKET_START_THRESHOLD, DEFAULT_RX_THRESHOLD);
220 
221 	/* RX IRQ mitigation */
222 	rxirq = FIELD_PREP(MREGBIT_RECEIVE_IRQ_FRAME_COUNTER_MASK,
223 			   EMAC_RX_FRAMES);
224 	rxirq |= FIELD_PREP(MREGBIT_RECEIVE_IRQ_TIMEOUT_COUNTER_MASK,
225 			    EMAC_RX_COAL_TIMEOUT);
226 	rxirq |= MREGBIT_RECEIVE_IRQ_MITIGATION_ENABLE;
227 	emac_wr(priv, DMA_RECEIVE_IRQ_MITIGATION_CTRL, rxirq);
228 
229 	/* Disable and set DMA config */
230 	emac_wr(priv, DMA_CONTROL, 0x0);
231 
232 	emac_wr(priv, DMA_CONFIGURATION, MREGBIT_SOFTWARE_RESET);
233 	usleep_range(9000, 10000);
234 	emac_wr(priv, DMA_CONFIGURATION, 0x0);
235 	usleep_range(9000, 10000);
236 
237 	dma |= MREGBIT_STRICT_BURST;
238 	dma |= MREGBIT_DMA_64BIT_MODE;
239 	dma |= DEFAULT_DMA_BURST;
240 
241 	emac_wr(priv, DMA_CONFIGURATION, dma);
242 }
243 
244 static void emac_dma_start_transmit(struct emac_priv *priv)
245 {
246 	/* The actual value written does not matter */
247 	emac_wr(priv, DMA_TRANSMIT_POLL_DEMAND, 1);
248 }
249 
250 static void emac_enable_interrupt(struct emac_priv *priv)
251 {
252 	u32 val;
253 
254 	val = emac_rd(priv, DMA_INTERRUPT_ENABLE);
255 	val |= MREGBIT_TRANSMIT_TRANSFER_DONE_INTR_ENABLE;
256 	val |= MREGBIT_RECEIVE_TRANSFER_DONE_INTR_ENABLE;
257 	emac_wr(priv, DMA_INTERRUPT_ENABLE, val);
258 }
259 
260 static void emac_disable_interrupt(struct emac_priv *priv)
261 {
262 	u32 val;
263 
264 	val = emac_rd(priv, DMA_INTERRUPT_ENABLE);
265 	val &= ~MREGBIT_TRANSMIT_TRANSFER_DONE_INTR_ENABLE;
266 	val &= ~MREGBIT_RECEIVE_TRANSFER_DONE_INTR_ENABLE;
267 	emac_wr(priv, DMA_INTERRUPT_ENABLE, val);
268 }
269 
270 static u32 emac_tx_avail(struct emac_priv *priv)
271 {
272 	struct emac_desc_ring *tx_ring = &priv->tx_ring;
273 	u32 avail;
274 
275 	if (tx_ring->tail > tx_ring->head)
276 		avail = tx_ring->tail - tx_ring->head - 1;
277 	else
278 		avail = tx_ring->total_cnt - tx_ring->head + tx_ring->tail - 1;
279 
280 	return avail;
281 }
282 
283 static void emac_tx_coal_timer_resched(struct emac_priv *priv)
284 {
285 	mod_timer(&priv->txtimer,
286 		  jiffies + usecs_to_jiffies(priv->tx_coal_timeout));
287 }
288 
289 static void emac_tx_coal_timer(struct timer_list *t)
290 {
291 	struct emac_priv *priv = timer_container_of(priv, t, txtimer);
292 
293 	napi_schedule(&priv->napi);
294 }
295 
296 static bool emac_tx_should_interrupt(struct emac_priv *priv, u32 pkt_num)
297 {
298 	priv->tx_count_frames += pkt_num;
299 	if (likely(priv->tx_coal_frames > priv->tx_count_frames)) {
300 		emac_tx_coal_timer_resched(priv);
301 		return false;
302 	}
303 
304 	priv->tx_count_frames = 0;
305 	return true;
306 }
307 
308 static void emac_free_tx_buf(struct emac_priv *priv, int i)
309 {
310 	struct emac_tx_desc_buffer *tx_buf;
311 	struct emac_desc_ring *tx_ring;
312 	struct desc_buf *buf;
313 	int j;
314 
315 	tx_ring = &priv->tx_ring;
316 	tx_buf = &tx_ring->tx_desc_buf[i];
317 
318 	for (j = 0; j < 2; j++) {
319 		buf = &tx_buf->buf[j];
320 		if (!buf->dma_addr)
321 			continue;
322 
323 		if (buf->map_as_page)
324 			dma_unmap_page(&priv->pdev->dev, buf->dma_addr,
325 				       buf->dma_len, DMA_TO_DEVICE);
326 		else
327 			dma_unmap_single(&priv->pdev->dev,
328 					 buf->dma_addr, buf->dma_len,
329 					 DMA_TO_DEVICE);
330 
331 		buf->dma_addr = 0;
332 		buf->map_as_page = false;
333 		buf->buff_addr = NULL;
334 	}
335 
336 	if (tx_buf->skb) {
337 		dev_kfree_skb_any(tx_buf->skb);
338 		tx_buf->skb = NULL;
339 	}
340 }
341 
342 static void emac_clean_tx_desc_ring(struct emac_priv *priv)
343 {
344 	struct emac_desc_ring *tx_ring = &priv->tx_ring;
345 	u32 i;
346 
347 	for (i = 0; i < tx_ring->total_cnt; i++)
348 		emac_free_tx_buf(priv, i);
349 
350 	tx_ring->head = 0;
351 	tx_ring->tail = 0;
352 }
353 
354 static void emac_clean_rx_desc_ring(struct emac_priv *priv)
355 {
356 	struct emac_rx_desc_buffer *rx_buf;
357 	struct emac_desc_ring *rx_ring;
358 	u32 i;
359 
360 	rx_ring = &priv->rx_ring;
361 
362 	for (i = 0; i < rx_ring->total_cnt; i++) {
363 		rx_buf = &rx_ring->rx_desc_buf[i];
364 
365 		if (!rx_buf->skb)
366 			continue;
367 
368 		dma_unmap_single(&priv->pdev->dev, rx_buf->dma_addr,
369 				 rx_buf->dma_len, DMA_FROM_DEVICE);
370 
371 		dev_kfree_skb(rx_buf->skb);
372 		rx_buf->skb = NULL;
373 	}
374 
375 	rx_ring->tail = 0;
376 	rx_ring->head = 0;
377 }
378 
379 static int emac_alloc_tx_resources(struct emac_priv *priv)
380 {
381 	struct emac_desc_ring *tx_ring = &priv->tx_ring;
382 	struct platform_device *pdev = priv->pdev;
383 
384 	tx_ring->tx_desc_buf = kcalloc(tx_ring->total_cnt,
385 				       sizeof(*tx_ring->tx_desc_buf),
386 				       GFP_KERNEL);
387 
388 	if (!tx_ring->tx_desc_buf)
389 		return -ENOMEM;
390 
391 	tx_ring->total_size = tx_ring->total_cnt * sizeof(struct emac_desc);
392 	tx_ring->total_size = ALIGN(tx_ring->total_size, PAGE_SIZE);
393 
394 	tx_ring->desc_addr = dma_alloc_coherent(&pdev->dev, tx_ring->total_size,
395 						&tx_ring->desc_dma_addr,
396 						GFP_KERNEL);
397 	if (!tx_ring->desc_addr) {
398 		kfree(tx_ring->tx_desc_buf);
399 		return -ENOMEM;
400 	}
401 
402 	tx_ring->head = 0;
403 	tx_ring->tail = 0;
404 
405 	return 0;
406 }
407 
408 static int emac_alloc_rx_resources(struct emac_priv *priv)
409 {
410 	struct emac_desc_ring *rx_ring = &priv->rx_ring;
411 	struct platform_device *pdev = priv->pdev;
412 
413 	rx_ring->rx_desc_buf = kcalloc(rx_ring->total_cnt,
414 				       sizeof(*rx_ring->rx_desc_buf),
415 				       GFP_KERNEL);
416 	if (!rx_ring->rx_desc_buf)
417 		return -ENOMEM;
418 
419 	rx_ring->total_size = rx_ring->total_cnt * sizeof(struct emac_desc);
420 
421 	rx_ring->total_size = ALIGN(rx_ring->total_size, PAGE_SIZE);
422 
423 	rx_ring->desc_addr = dma_alloc_coherent(&pdev->dev, rx_ring->total_size,
424 						&rx_ring->desc_dma_addr,
425 						GFP_KERNEL);
426 	if (!rx_ring->desc_addr) {
427 		kfree(rx_ring->rx_desc_buf);
428 		return -ENOMEM;
429 	}
430 
431 	rx_ring->head = 0;
432 	rx_ring->tail = 0;
433 
434 	return 0;
435 }
436 
437 static void emac_free_tx_resources(struct emac_priv *priv)
438 {
439 	struct emac_desc_ring *tr = &priv->tx_ring;
440 	struct device *dev = &priv->pdev->dev;
441 
442 	emac_clean_tx_desc_ring(priv);
443 
444 	kfree(tr->tx_desc_buf);
445 	tr->tx_desc_buf = NULL;
446 
447 	dma_free_coherent(dev, tr->total_size, tr->desc_addr,
448 			  tr->desc_dma_addr);
449 	tr->desc_addr = NULL;
450 }
451 
452 static void emac_free_rx_resources(struct emac_priv *priv)
453 {
454 	struct emac_desc_ring *rr = &priv->rx_ring;
455 	struct device *dev = &priv->pdev->dev;
456 
457 	emac_clean_rx_desc_ring(priv);
458 
459 	kfree(rr->rx_desc_buf);
460 	rr->rx_desc_buf = NULL;
461 
462 	dma_free_coherent(dev, rr->total_size, rr->desc_addr,
463 			  rr->desc_dma_addr);
464 	rr->desc_addr = NULL;
465 }
466 
467 static int emac_tx_clean_desc(struct emac_priv *priv)
468 {
469 	struct net_device *ndev = priv->ndev;
470 	struct emac_desc_ring *tx_ring;
471 	struct emac_desc *tx_desc;
472 	u32 i;
473 
474 	netif_tx_lock(ndev);
475 
476 	tx_ring = &priv->tx_ring;
477 
478 	i = tx_ring->tail;
479 
480 	while (i != tx_ring->head) {
481 		tx_desc = &((struct emac_desc *)tx_ring->desc_addr)[i];
482 
483 		/* Stop checking if desc still own by DMA */
484 		if (READ_ONCE(tx_desc->desc0) & TX_DESC_0_OWN)
485 			break;
486 
487 		emac_free_tx_buf(priv, i);
488 		memset(tx_desc, 0, sizeof(struct emac_desc));
489 
490 		if (++i == tx_ring->total_cnt)
491 			i = 0;
492 	}
493 
494 	tx_ring->tail = i;
495 
496 	if (unlikely(netif_queue_stopped(ndev) &&
497 		     emac_tx_avail(priv) > tx_ring->total_cnt / 4))
498 		netif_wake_queue(ndev);
499 
500 	netif_tx_unlock(ndev);
501 
502 	return 0;
503 }
504 
505 static bool emac_rx_frame_good(struct emac_priv *priv, struct emac_desc *desc)
506 {
507 	const char *msg;
508 	u32 len;
509 
510 	len = FIELD_GET(RX_DESC_0_FRAME_PACKET_LENGTH_MASK, desc->desc0);
511 
512 	if (WARN_ON_ONCE(!(desc->desc0 & RX_DESC_0_LAST_DESCRIPTOR)))
513 		msg = "Not last descriptor"; /* This would be a bug */
514 	else if (desc->desc0 & RX_DESC_0_FRAME_RUNT)
515 		msg = "Runt frame";
516 	else if (desc->desc0 & RX_DESC_0_FRAME_CRC_ERR)
517 		msg = "Frame CRC error";
518 	else if (desc->desc0 & RX_DESC_0_FRAME_MAX_LEN_ERR)
519 		msg = "Frame exceeds max length";
520 	else if (desc->desc0 & RX_DESC_0_FRAME_JABBER_ERR)
521 		msg = "Frame jabber error";
522 	else if (desc->desc0 & RX_DESC_0_FRAME_LENGTH_ERR)
523 		msg = "Frame length error";
524 	else if (len <= ETH_FCS_LEN || len > priv->dma_buf_sz)
525 		msg = "Frame length unacceptable";
526 	else
527 		return true; /* All good */
528 
529 	dev_dbg_ratelimited(&priv->ndev->dev, "RX error: %s", msg);
530 
531 	return false;
532 }
533 
534 static void emac_alloc_rx_desc_buffers(struct emac_priv *priv)
535 {
536 	struct emac_desc_ring *rx_ring = &priv->rx_ring;
537 	struct emac_desc rx_desc, *rx_desc_addr;
538 	struct net_device *ndev = priv->ndev;
539 	struct emac_rx_desc_buffer *rx_buf;
540 	struct sk_buff *skb;
541 	u32 i;
542 
543 	i = rx_ring->head;
544 	rx_buf = &rx_ring->rx_desc_buf[i];
545 
546 	while (!rx_buf->skb) {
547 		skb = netdev_alloc_skb_ip_align(ndev, priv->dma_buf_sz);
548 		if (!skb)
549 			break;
550 
551 		skb->dev = ndev;
552 
553 		rx_buf->skb = skb;
554 		rx_buf->dma_len = priv->dma_buf_sz;
555 		rx_buf->dma_addr = dma_map_single(&priv->pdev->dev, skb->data,
556 						  priv->dma_buf_sz,
557 						  DMA_FROM_DEVICE);
558 		if (dma_mapping_error(&priv->pdev->dev, rx_buf->dma_addr)) {
559 			dev_err_ratelimited(&ndev->dev, "Mapping skb failed\n");
560 			goto err_free_skb;
561 		}
562 
563 		rx_desc_addr = &((struct emac_desc *)rx_ring->desc_addr)[i];
564 
565 		memset(&rx_desc, 0, sizeof(rx_desc));
566 
567 		rx_desc.buffer_addr_1 = rx_buf->dma_addr;
568 		rx_desc.desc1 = FIELD_PREP(RX_DESC_1_BUFFER_SIZE_1_MASK,
569 					   rx_buf->dma_len);
570 
571 		if (++i == rx_ring->total_cnt) {
572 			rx_desc.desc1 |= RX_DESC_1_END_RING;
573 			i = 0;
574 		}
575 
576 		*rx_desc_addr = rx_desc;
577 		dma_wmb();
578 		WRITE_ONCE(rx_desc_addr->desc0, rx_desc.desc0 | RX_DESC_0_OWN);
579 
580 		rx_buf = &rx_ring->rx_desc_buf[i];
581 	}
582 
583 	rx_ring->head = i;
584 	return;
585 
586 err_free_skb:
587 	dev_kfree_skb_any(skb);
588 	rx_buf->skb = NULL;
589 }
590 
591 /* Returns number of packets received */
592 static int emac_rx_clean_desc(struct emac_priv *priv, int budget)
593 {
594 	struct net_device *ndev = priv->ndev;
595 	struct emac_rx_desc_buffer *rx_buf;
596 	struct emac_desc_ring *rx_ring;
597 	struct sk_buff *skb = NULL;
598 	struct emac_desc *rx_desc;
599 	u32 got = 0, skb_len, i;
600 
601 	rx_ring = &priv->rx_ring;
602 
603 	i = rx_ring->tail;
604 
605 	while (budget--) {
606 		rx_desc = &((struct emac_desc *)rx_ring->desc_addr)[i];
607 
608 		/* Stop checking if rx_desc still owned by DMA */
609 		if (READ_ONCE(rx_desc->desc0) & RX_DESC_0_OWN)
610 			break;
611 
612 		dma_rmb();
613 
614 		rx_buf = &rx_ring->rx_desc_buf[i];
615 
616 		if (!rx_buf->skb)
617 			break;
618 
619 		got++;
620 
621 		dma_unmap_single(&priv->pdev->dev, rx_buf->dma_addr,
622 				 rx_buf->dma_len, DMA_FROM_DEVICE);
623 
624 		if (likely(emac_rx_frame_good(priv, rx_desc))) {
625 			skb = rx_buf->skb;
626 
627 			skb_len = FIELD_GET(RX_DESC_0_FRAME_PACKET_LENGTH_MASK,
628 					    rx_desc->desc0);
629 			skb_len -= ETH_FCS_LEN;
630 
631 			skb_put(skb, skb_len);
632 			skb->dev = ndev;
633 			ndev->hard_header_len = ETH_HLEN;
634 
635 			skb->protocol = eth_type_trans(skb, ndev);
636 
637 			skb->ip_summed = CHECKSUM_NONE;
638 
639 			napi_gro_receive(&priv->napi, skb);
640 
641 			memset(rx_desc, 0, sizeof(struct emac_desc));
642 			rx_buf->skb = NULL;
643 		} else {
644 			dev_kfree_skb_irq(rx_buf->skb);
645 			rx_buf->skb = NULL;
646 		}
647 
648 		if (++i == rx_ring->total_cnt)
649 			i = 0;
650 	}
651 
652 	rx_ring->tail = i;
653 
654 	emac_alloc_rx_desc_buffers(priv);
655 
656 	return got;
657 }
658 
659 static int emac_rx_poll(struct napi_struct *napi, int budget)
660 {
661 	struct emac_priv *priv = container_of(napi, struct emac_priv, napi);
662 	int work_done;
663 
664 	emac_tx_clean_desc(priv);
665 
666 	work_done = emac_rx_clean_desc(priv, budget);
667 	if (work_done < budget && napi_complete_done(napi, work_done))
668 		emac_enable_interrupt(priv);
669 
670 	return work_done;
671 }
672 
673 /*
674  * For convenience, skb->data is fragment 0, frags[0] is fragment 1, etc.
675  *
676  * Each descriptor can hold up to two fragments, called buffer 1 and 2. For each
677  * fragment f, if f % 2 == 0, it uses buffer 1, otherwise it uses buffer 2.
678  */
679 
680 static int emac_tx_map_frag(struct device *dev, struct emac_desc *tx_desc,
681 			    struct emac_tx_desc_buffer *tx_buf,
682 			    struct sk_buff *skb, u32 frag_idx)
683 {
684 	bool map_as_page, buf_idx;
685 	const skb_frag_t *frag;
686 	phys_addr_t addr;
687 	u32 len;
688 	int ret;
689 
690 	buf_idx = frag_idx % 2;
691 
692 	if (frag_idx == 0) {
693 		/* Non-fragmented part */
694 		len = skb_headlen(skb);
695 		addr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
696 		map_as_page = false;
697 	} else {
698 		/* Fragment */
699 		frag = &skb_shinfo(skb)->frags[frag_idx - 1];
700 		len = skb_frag_size(frag);
701 		addr = skb_frag_dma_map(dev, frag, 0, len, DMA_TO_DEVICE);
702 		map_as_page = true;
703 	}
704 
705 	ret = dma_mapping_error(dev, addr);
706 	if (ret)
707 		return ret;
708 
709 	tx_buf->buf[buf_idx].dma_addr = addr;
710 	tx_buf->buf[buf_idx].dma_len = len;
711 	tx_buf->buf[buf_idx].map_as_page = map_as_page;
712 
713 	if (buf_idx == 0) {
714 		tx_desc->buffer_addr_1 = addr;
715 		tx_desc->desc1 |= FIELD_PREP(TX_DESC_1_BUFFER_SIZE_1_MASK, len);
716 	} else {
717 		tx_desc->buffer_addr_2 = addr;
718 		tx_desc->desc1 |= FIELD_PREP(TX_DESC_1_BUFFER_SIZE_2_MASK, len);
719 	}
720 
721 	return 0;
722 }
723 
724 static void emac_tx_mem_map(struct emac_priv *priv, struct sk_buff *skb)
725 {
726 	struct emac_desc_ring *tx_ring = &priv->tx_ring;
727 	struct emac_desc tx_desc, *tx_desc_addr;
728 	struct device *dev = &priv->pdev->dev;
729 	struct emac_tx_desc_buffer *tx_buf;
730 	u32 head, old_head, frag_num, f;
731 	bool buf_idx;
732 
733 	frag_num = skb_shinfo(skb)->nr_frags;
734 	head = tx_ring->head;
735 	old_head = head;
736 
737 	for (f = 0; f < frag_num + 1; f++) {
738 		buf_idx = f % 2;
739 
740 		/*
741 		 * If using buffer 1, initialize a new desc. Otherwise, use
742 		 * buffer 2 of previous fragment's desc.
743 		 */
744 		if (!buf_idx) {
745 			tx_buf = &tx_ring->tx_desc_buf[head];
746 			tx_desc_addr =
747 				&((struct emac_desc *)tx_ring->desc_addr)[head];
748 			memset(&tx_desc, 0, sizeof(tx_desc));
749 
750 			/*
751 			 * Give ownership for all but first desc initially. For
752 			 * first desc, give at the end so DMA cannot start
753 			 * reading uninitialized descs.
754 			 */
755 			if (head != old_head)
756 				tx_desc.desc0 |= TX_DESC_0_OWN;
757 
758 			if (++head == tx_ring->total_cnt) {
759 				/* Just used last desc in ring */
760 				tx_desc.desc1 |= TX_DESC_1_END_RING;
761 				head = 0;
762 			}
763 		}
764 
765 		if (emac_tx_map_frag(dev, &tx_desc, tx_buf, skb, f)) {
766 			dev_err_ratelimited(&priv->ndev->dev,
767 					    "Map TX frag %d failed\n", f);
768 			goto err_free_skb;
769 		}
770 
771 		if (f == 0)
772 			tx_desc.desc1 |= TX_DESC_1_FIRST_SEGMENT;
773 
774 		if (f == frag_num) {
775 			tx_desc.desc1 |= TX_DESC_1_LAST_SEGMENT;
776 			tx_buf->skb = skb;
777 			if (emac_tx_should_interrupt(priv, frag_num + 1))
778 				tx_desc.desc1 |=
779 					TX_DESC_1_INTERRUPT_ON_COMPLETION;
780 		}
781 
782 		*tx_desc_addr = tx_desc;
783 	}
784 
785 	/* All descriptors are ready, give ownership for first desc */
786 	tx_desc_addr = &((struct emac_desc *)tx_ring->desc_addr)[old_head];
787 	dma_wmb();
788 	WRITE_ONCE(tx_desc_addr->desc0, tx_desc_addr->desc0 | TX_DESC_0_OWN);
789 
790 	emac_dma_start_transmit(priv);
791 
792 	tx_ring->head = head;
793 
794 	return;
795 
796 err_free_skb:
797 	dev_dstats_tx_dropped(priv->ndev);
798 	dev_kfree_skb_any(skb);
799 }
800 
801 static netdev_tx_t emac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
802 {
803 	struct emac_priv *priv = netdev_priv(ndev);
804 	int nfrags = skb_shinfo(skb)->nr_frags;
805 	struct device *dev = &priv->pdev->dev;
806 
807 	if (unlikely(emac_tx_avail(priv) < nfrags + 1)) {
808 		if (!netif_queue_stopped(ndev)) {
809 			netif_stop_queue(ndev);
810 			dev_err_ratelimited(dev, "TX ring full, stop TX queue\n");
811 		}
812 		return NETDEV_TX_BUSY;
813 	}
814 
815 	emac_tx_mem_map(priv, skb);
816 
817 	/* Make sure there is space in the ring for the next TX. */
818 	if (unlikely(emac_tx_avail(priv) <= MAX_SKB_FRAGS + 2))
819 		netif_stop_queue(ndev);
820 
821 	return NETDEV_TX_OK;
822 }
823 
824 static int emac_set_mac_address(struct net_device *ndev, void *addr)
825 {
826 	struct emac_priv *priv = netdev_priv(ndev);
827 	int ret = eth_mac_addr(ndev, addr);
828 
829 	if (ret)
830 		return ret;
831 
832 	/* If running, set now; if not running it will be set in emac_up. */
833 	if (netif_running(ndev))
834 		emac_set_mac_addr(priv, ndev->dev_addr);
835 
836 	return 0;
837 }
838 
839 static void emac_mac_multicast_filter_clear(struct emac_priv *priv)
840 {
841 	emac_wr(priv, MAC_MULTICAST_HASH_TABLE1, 0x0);
842 	emac_wr(priv, MAC_MULTICAST_HASH_TABLE2, 0x0);
843 	emac_wr(priv, MAC_MULTICAST_HASH_TABLE3, 0x0);
844 	emac_wr(priv, MAC_MULTICAST_HASH_TABLE4, 0x0);
845 }
846 
847 /*
848  * The upper 6 bits of the Ethernet CRC of the MAC address is used as the hash
849  * when matching multicast addresses.
850  */
851 static u32 emac_ether_addr_hash(u8 addr[ETH_ALEN])
852 {
853 	u32 crc32 = ether_crc(ETH_ALEN, addr);
854 
855 	return crc32 >> 26;
856 }
857 
858 /* Configure Multicast and Promiscuous modes */
859 static void emac_set_rx_mode(struct net_device *ndev)
860 {
861 	struct emac_priv *priv = netdev_priv(ndev);
862 	struct netdev_hw_addr *ha;
863 	u32 mc_filter[4] = { 0 };
864 	u32 hash, reg, bit, val;
865 
866 	val = emac_rd(priv, MAC_ADDRESS_CONTROL);
867 
868 	val &= ~MREGBIT_PROMISCUOUS_MODE;
869 
870 	if (ndev->flags & IFF_PROMISC) {
871 		/* Enable promisc mode */
872 		val |= MREGBIT_PROMISCUOUS_MODE;
873 	} else if ((ndev->flags & IFF_ALLMULTI) ||
874 		   (netdev_mc_count(ndev) > HASH_TABLE_SIZE)) {
875 		/* Accept all multicast frames by setting every bit */
876 		emac_wr(priv, MAC_MULTICAST_HASH_TABLE1, 0xffff);
877 		emac_wr(priv, MAC_MULTICAST_HASH_TABLE2, 0xffff);
878 		emac_wr(priv, MAC_MULTICAST_HASH_TABLE3, 0xffff);
879 		emac_wr(priv, MAC_MULTICAST_HASH_TABLE4, 0xffff);
880 	} else if (!netdev_mc_empty(ndev)) {
881 		emac_mac_multicast_filter_clear(priv);
882 		netdev_for_each_mc_addr(ha, ndev) {
883 			/*
884 			 * The hash table is an array of 4 16-bit registers. It
885 			 * is treated like an array of 64 bits (bits[hash]).
886 			 */
887 			hash = emac_ether_addr_hash(ha->addr);
888 			reg = hash / 16;
889 			bit = hash % 16;
890 			mc_filter[reg] |= BIT(bit);
891 		}
892 		emac_wr(priv, MAC_MULTICAST_HASH_TABLE1, mc_filter[0]);
893 		emac_wr(priv, MAC_MULTICAST_HASH_TABLE2, mc_filter[1]);
894 		emac_wr(priv, MAC_MULTICAST_HASH_TABLE3, mc_filter[2]);
895 		emac_wr(priv, MAC_MULTICAST_HASH_TABLE4, mc_filter[3]);
896 	}
897 
898 	emac_wr(priv, MAC_ADDRESS_CONTROL, val);
899 }
900 
901 static int emac_change_mtu(struct net_device *ndev, int mtu)
902 {
903 	struct emac_priv *priv = netdev_priv(ndev);
904 	u32 frame_len;
905 
906 	if (netif_running(ndev)) {
907 		netdev_err(ndev, "must be stopped to change MTU\n");
908 		return -EBUSY;
909 	}
910 
911 	frame_len = mtu + ETH_HLEN + ETH_FCS_LEN;
912 
913 	if (frame_len <= EMAC_DEFAULT_BUFSIZE)
914 		priv->dma_buf_sz = EMAC_DEFAULT_BUFSIZE;
915 	else if (frame_len <= EMAC_RX_BUF_2K)
916 		priv->dma_buf_sz = EMAC_RX_BUF_2K;
917 	else
918 		priv->dma_buf_sz = EMAC_RX_BUF_4K;
919 
920 	ndev->mtu = mtu;
921 
922 	return 0;
923 }
924 
925 static void emac_tx_timeout(struct net_device *ndev, unsigned int txqueue)
926 {
927 	struct emac_priv *priv = netdev_priv(ndev);
928 
929 	schedule_work(&priv->tx_timeout_task);
930 }
931 
932 static int emac_mii_read(struct mii_bus *bus, int phy_addr, int regnum)
933 {
934 	struct emac_priv *priv = bus->priv;
935 	u32 cmd = 0, val;
936 	int ret;
937 
938 	cmd |= FIELD_PREP(MREGBIT_PHY_ADDRESS, phy_addr);
939 	cmd |= FIELD_PREP(MREGBIT_REGISTER_ADDRESS, regnum);
940 	cmd |= MREGBIT_START_MDIO_TRANS | MREGBIT_MDIO_READ_WRITE;
941 
942 	emac_wr(priv, MAC_MDIO_DATA, 0x0);
943 	emac_wr(priv, MAC_MDIO_CONTROL, cmd);
944 
945 	ret = readl_poll_timeout(priv->iobase + MAC_MDIO_CONTROL, val,
946 				 !(val & MREGBIT_START_MDIO_TRANS), 100, 10000);
947 
948 	if (ret)
949 		return ret;
950 
951 	val = emac_rd(priv, MAC_MDIO_DATA);
952 	return FIELD_GET(MREGBIT_MDIO_DATA, val);
953 }
954 
955 static int emac_mii_write(struct mii_bus *bus, int phy_addr, int regnum,
956 			  u16 value)
957 {
958 	struct emac_priv *priv = bus->priv;
959 	u32 cmd = 0, val;
960 	int ret;
961 
962 	emac_wr(priv, MAC_MDIO_DATA, value);
963 
964 	cmd |= FIELD_PREP(MREGBIT_PHY_ADDRESS, phy_addr);
965 	cmd |= FIELD_PREP(MREGBIT_REGISTER_ADDRESS, regnum);
966 	cmd |= MREGBIT_START_MDIO_TRANS;
967 
968 	emac_wr(priv, MAC_MDIO_CONTROL, cmd);
969 
970 	ret = readl_poll_timeout(priv->iobase + MAC_MDIO_CONTROL, val,
971 				 !(val & MREGBIT_START_MDIO_TRANS), 100, 10000);
972 
973 	return ret;
974 }
975 
976 static int emac_mdio_init(struct emac_priv *priv)
977 {
978 	struct device *dev = &priv->pdev->dev;
979 	struct device_node *mii_np;
980 	struct mii_bus *mii;
981 	int ret;
982 
983 	mii = devm_mdiobus_alloc(dev);
984 	if (!mii)
985 		return -ENOMEM;
986 
987 	mii->priv = priv;
988 	mii->name = "k1_emac_mii";
989 	mii->read = emac_mii_read;
990 	mii->write = emac_mii_write;
991 	mii->parent = dev;
992 	mii->phy_mask = ~0;
993 	snprintf(mii->id, MII_BUS_ID_SIZE, "%s", priv->pdev->name);
994 
995 	mii_np = of_get_available_child_by_name(dev->of_node, "mdio-bus");
996 
997 	ret = devm_of_mdiobus_register(dev, mii, mii_np);
998 	if (ret)
999 		dev_err_probe(dev, ret, "Failed to register mdio bus\n");
1000 
1001 	of_node_put(mii_np);
1002 	return ret;
1003 }
1004 
1005 /*
1006  * Even though this MAC supports gigabit operation, it only provides 32-bit
1007  * statistics counters. The most overflow-prone counters are the "bytes" ones,
1008  * which at gigabit overflow about twice a minute.
1009  *
1010  * Therefore, we maintain the high 32 bits of counters ourselves, incrementing
1011  * every time statistics seem to go backwards. Also, update periodically to
1012  * catch overflows when we are not otherwise checking the statistics often
1013  * enough.
1014  */
1015 
1016 #define EMAC_STATS_TIMER_PERIOD		20
1017 
1018 static int emac_read_stat_cnt(struct emac_priv *priv, u8 cnt, u32 *res,
1019 			      u32 control_reg, u32 high_reg, u32 low_reg)
1020 {
1021 	u32 val, high, low;
1022 	int ret;
1023 
1024 	/* The "read" bit is the same for TX and RX */
1025 
1026 	val = MREGBIT_START_TX_COUNTER_READ | cnt;
1027 	emac_wr(priv, control_reg, val);
1028 	val = emac_rd(priv, control_reg);
1029 
1030 	ret = readl_poll_timeout_atomic(priv->iobase + control_reg, val,
1031 					!(val & MREGBIT_START_TX_COUNTER_READ),
1032 					100, 10000);
1033 
1034 	if (ret) {
1035 		/*
1036 		 * This could be caused by the PHY stopping its refclk even when
1037 		 * the link is up, for power saving. See also comments in
1038 		 * emac_stats_update().
1039 		 */
1040 		dev_err_ratelimited(&priv->ndev->dev,
1041 				    "Read stat timeout. PHY clock stopped?\n");
1042 		return ret;
1043 	}
1044 
1045 	high = emac_rd(priv, high_reg);
1046 	low = emac_rd(priv, low_reg);
1047 	*res = high << 16 | lower_16_bits(low);
1048 
1049 	return 0;
1050 }
1051 
1052 static int emac_tx_read_stat_cnt(struct emac_priv *priv, u8 cnt, u32 *res)
1053 {
1054 	return emac_read_stat_cnt(priv, cnt, res, MAC_TX_STATCTR_CONTROL,
1055 				  MAC_TX_STATCTR_DATA_HIGH,
1056 				  MAC_TX_STATCTR_DATA_LOW);
1057 }
1058 
1059 static int emac_rx_read_stat_cnt(struct emac_priv *priv, u8 cnt, u32 *res)
1060 {
1061 	return emac_read_stat_cnt(priv, cnt, res, MAC_RX_STATCTR_CONTROL,
1062 				  MAC_RX_STATCTR_DATA_HIGH,
1063 				  MAC_RX_STATCTR_DATA_LOW);
1064 }
1065 
1066 static void emac_update_counter(u64 *counter, u32 new_low)
1067 {
1068 	u32 old_low = lower_32_bits(*counter);
1069 	u64 high = upper_32_bits(*counter);
1070 
1071 	if (old_low > new_low) {
1072 		/* Overflowed, increment high 32 bits */
1073 		high++;
1074 	}
1075 
1076 	*counter = (high << 32) | new_low;
1077 }
1078 
1079 static void emac_stats_update(struct emac_priv *priv)
1080 {
1081 	u64 *tx_stats_off = priv->tx_stats_off.array;
1082 	u64 *rx_stats_off = priv->rx_stats_off.array;
1083 	u64 *tx_stats = priv->tx_stats.array;
1084 	u64 *rx_stats = priv->rx_stats.array;
1085 	u32 i, res, offset;
1086 
1087 	assert_spin_locked(&priv->stats_lock);
1088 
1089 	/*
1090 	 * We can't read statistics if the interface is not up. Also, some PHYs
1091 	 * stop their reference clocks for link down power saving, which also
1092 	 * causes reading statistics to time out. Don't update and don't
1093 	 * reschedule in these cases.
1094 	 */
1095 	if (!netif_running(priv->ndev) ||
1096 	    !netif_carrier_ok(priv->ndev) ||
1097 	    !netif_device_present(priv->ndev)) {
1098 		return;
1099 	}
1100 
1101 	for (i = 0; i < sizeof(priv->tx_stats) / sizeof(*tx_stats); i++) {
1102 		/*
1103 		 * If reading stats times out anyway, the stat registers will be
1104 		 * stuck, and we can't really recover from that.
1105 		 *
1106 		 * Reading statistics also can't return an error, so just return
1107 		 * without updating and without rescheduling.
1108 		 */
1109 		if (emac_tx_read_stat_cnt(priv, i, &res))
1110 			return;
1111 
1112 		/*
1113 		 * Re-initializing while bringing interface up resets counters
1114 		 * to zero, so to provide continuity, we add the values saved
1115 		 * last time we did emac_down() to the new hardware-provided
1116 		 * value.
1117 		 */
1118 		offset = lower_32_bits(tx_stats_off[i]);
1119 		emac_update_counter(&tx_stats[i], res + offset);
1120 	}
1121 
1122 	/* Similar remarks as TX stats */
1123 	for (i = 0; i < sizeof(priv->rx_stats) / sizeof(*rx_stats); i++) {
1124 		if (emac_rx_read_stat_cnt(priv, i, &res))
1125 			return;
1126 		offset = lower_32_bits(rx_stats_off[i]);
1127 		emac_update_counter(&rx_stats[i], res + offset);
1128 	}
1129 
1130 	mod_timer(&priv->stats_timer, jiffies + EMAC_STATS_TIMER_PERIOD * HZ);
1131 }
1132 
1133 static void emac_stats_timer(struct timer_list *t)
1134 {
1135 	struct emac_priv *priv = timer_container_of(priv, t, stats_timer);
1136 
1137 	spin_lock(&priv->stats_lock);
1138 
1139 	emac_stats_update(priv);
1140 
1141 	spin_unlock(&priv->stats_lock);
1142 }
1143 
1144 static const struct ethtool_rmon_hist_range emac_rmon_hist_ranges[] = {
1145 	{   64,   64 },
1146 	{   65,  127 },
1147 	{  128,  255 },
1148 	{  256,  511 },
1149 	{  512, 1023 },
1150 	{ 1024, 1518 },
1151 	{ 1519, 4096 },
1152 	{ /* sentinel */ },
1153 };
1154 
1155 /* Like dev_fetch_dstats(), but we only use tx_drops */
1156 static u64 emac_get_stat_tx_drops(struct emac_priv *priv)
1157 {
1158 	const struct pcpu_dstats *stats;
1159 	u64 tx_drops, total = 0;
1160 	unsigned int start;
1161 	int cpu;
1162 
1163 	for_each_possible_cpu(cpu) {
1164 		stats = per_cpu_ptr(priv->ndev->dstats, cpu);
1165 		do {
1166 			start = u64_stats_fetch_begin(&stats->syncp);
1167 			tx_drops = u64_stats_read(&stats->tx_drops);
1168 		} while (u64_stats_fetch_retry(&stats->syncp, start));
1169 
1170 		total += tx_drops;
1171 	}
1172 
1173 	return total;
1174 }
1175 
1176 static void emac_get_stats64(struct net_device *dev,
1177 			     struct rtnl_link_stats64 *storage)
1178 {
1179 	struct emac_priv *priv = netdev_priv(dev);
1180 	union emac_hw_tx_stats *tx_stats;
1181 	union emac_hw_rx_stats *rx_stats;
1182 
1183 	tx_stats = &priv->tx_stats;
1184 	rx_stats = &priv->rx_stats;
1185 
1186 	/* This is the only software counter */
1187 	storage->tx_dropped = emac_get_stat_tx_drops(priv);
1188 
1189 	spin_lock_bh(&priv->stats_lock);
1190 
1191 	emac_stats_update(priv);
1192 
1193 	storage->tx_packets = tx_stats->stats.tx_ok_pkts;
1194 	storage->tx_bytes = tx_stats->stats.tx_ok_bytes;
1195 	storage->tx_errors = tx_stats->stats.tx_err_pkts;
1196 
1197 	storage->rx_packets = rx_stats->stats.rx_ok_pkts;
1198 	storage->rx_bytes = rx_stats->stats.rx_ok_bytes;
1199 	storage->rx_errors = rx_stats->stats.rx_err_total_pkts;
1200 	storage->rx_crc_errors = rx_stats->stats.rx_crc_err_pkts;
1201 	storage->rx_frame_errors = rx_stats->stats.rx_align_err_pkts;
1202 	storage->rx_length_errors = rx_stats->stats.rx_len_err_pkts;
1203 
1204 	storage->collisions = tx_stats->stats.tx_singleclsn_pkts;
1205 	storage->collisions += tx_stats->stats.tx_multiclsn_pkts;
1206 	storage->collisions += tx_stats->stats.tx_excessclsn_pkts;
1207 
1208 	storage->rx_missed_errors = rx_stats->stats.rx_drp_fifo_full_pkts;
1209 	storage->rx_missed_errors += rx_stats->stats.rx_truncate_fifo_full_pkts;
1210 
1211 	spin_unlock_bh(&priv->stats_lock);
1212 }
1213 
1214 static void emac_get_rmon_stats(struct net_device *dev,
1215 				struct ethtool_rmon_stats *rmon_stats,
1216 				const struct ethtool_rmon_hist_range **ranges)
1217 {
1218 	struct emac_priv *priv = netdev_priv(dev);
1219 	union emac_hw_rx_stats *rx_stats;
1220 
1221 	rx_stats = &priv->rx_stats;
1222 
1223 	*ranges = emac_rmon_hist_ranges;
1224 
1225 	spin_lock_bh(&priv->stats_lock);
1226 
1227 	emac_stats_update(priv);
1228 
1229 	rmon_stats->undersize_pkts = rx_stats->stats.rx_len_undersize_pkts;
1230 	rmon_stats->oversize_pkts = rx_stats->stats.rx_len_oversize_pkts;
1231 	rmon_stats->fragments = rx_stats->stats.rx_len_fragment_pkts;
1232 	rmon_stats->jabbers = rx_stats->stats.rx_len_jabber_pkts;
1233 
1234 	/* Only RX has histogram stats */
1235 
1236 	rmon_stats->hist[0] = rx_stats->stats.rx_64_pkts;
1237 	rmon_stats->hist[1] = rx_stats->stats.rx_65_127_pkts;
1238 	rmon_stats->hist[2] = rx_stats->stats.rx_128_255_pkts;
1239 	rmon_stats->hist[3] = rx_stats->stats.rx_256_511_pkts;
1240 	rmon_stats->hist[4] = rx_stats->stats.rx_512_1023_pkts;
1241 	rmon_stats->hist[5] = rx_stats->stats.rx_1024_1518_pkts;
1242 	rmon_stats->hist[6] = rx_stats->stats.rx_1519_plus_pkts;
1243 
1244 	spin_unlock_bh(&priv->stats_lock);
1245 }
1246 
1247 static void emac_get_eth_mac_stats(struct net_device *dev,
1248 				   struct ethtool_eth_mac_stats *mac_stats)
1249 {
1250 	struct emac_priv *priv = netdev_priv(dev);
1251 	union emac_hw_tx_stats *tx_stats;
1252 	union emac_hw_rx_stats *rx_stats;
1253 
1254 	tx_stats = &priv->tx_stats;
1255 	rx_stats = &priv->rx_stats;
1256 
1257 	spin_lock_bh(&priv->stats_lock);
1258 
1259 	emac_stats_update(priv);
1260 
1261 	mac_stats->MulticastFramesXmittedOK = tx_stats->stats.tx_multicast_pkts;
1262 	mac_stats->BroadcastFramesXmittedOK = tx_stats->stats.tx_broadcast_pkts;
1263 
1264 	mac_stats->MulticastFramesReceivedOK =
1265 		rx_stats->stats.rx_multicast_pkts;
1266 	mac_stats->BroadcastFramesReceivedOK =
1267 		rx_stats->stats.rx_broadcast_pkts;
1268 
1269 	mac_stats->SingleCollisionFrames = tx_stats->stats.tx_singleclsn_pkts;
1270 	mac_stats->MultipleCollisionFrames = tx_stats->stats.tx_multiclsn_pkts;
1271 	mac_stats->LateCollisions = tx_stats->stats.tx_lateclsn_pkts;
1272 	mac_stats->FramesAbortedDueToXSColls =
1273 		tx_stats->stats.tx_excessclsn_pkts;
1274 
1275 	spin_unlock_bh(&priv->stats_lock);
1276 }
1277 
1278 static void emac_get_pause_stats(struct net_device *dev,
1279 				 struct ethtool_pause_stats *pause_stats)
1280 {
1281 	struct emac_priv *priv = netdev_priv(dev);
1282 	union emac_hw_tx_stats *tx_stats;
1283 	union emac_hw_rx_stats *rx_stats;
1284 
1285 	tx_stats = &priv->tx_stats;
1286 	rx_stats = &priv->rx_stats;
1287 
1288 	spin_lock_bh(&priv->stats_lock);
1289 
1290 	emac_stats_update(priv);
1291 
1292 	pause_stats->tx_pause_frames = tx_stats->stats.tx_pause_pkts;
1293 	pause_stats->rx_pause_frames = rx_stats->stats.rx_pause_pkts;
1294 
1295 	spin_unlock_bh(&priv->stats_lock);
1296 }
1297 
1298 /* Other statistics that are not derivable from standard statistics */
1299 
1300 #define EMAC_ETHTOOL_STAT(type, name) \
1301 	{ offsetof(type, stats.name) / sizeof(u64), #name }
1302 
1303 static const struct emac_ethtool_stats {
1304 	size_t offset;
1305 	char str[ETH_GSTRING_LEN];
1306 } emac_ethtool_rx_stats[] = {
1307 	EMAC_ETHTOOL_STAT(union emac_hw_rx_stats, rx_drp_fifo_full_pkts),
1308 	EMAC_ETHTOOL_STAT(union emac_hw_rx_stats, rx_truncate_fifo_full_pkts),
1309 };
1310 
1311 static int emac_get_sset_count(struct net_device *dev, int sset)
1312 {
1313 	switch (sset) {
1314 	case ETH_SS_STATS:
1315 		return ARRAY_SIZE(emac_ethtool_rx_stats);
1316 	default:
1317 		return -EOPNOTSUPP;
1318 	}
1319 }
1320 
1321 static void emac_get_strings(struct net_device *dev, u32 stringset, u8 *data)
1322 {
1323 	int i;
1324 
1325 	switch (stringset) {
1326 	case ETH_SS_STATS:
1327 		for (i = 0; i < ARRAY_SIZE(emac_ethtool_rx_stats); i++) {
1328 			memcpy(data, emac_ethtool_rx_stats[i].str,
1329 			       ETH_GSTRING_LEN);
1330 			data += ETH_GSTRING_LEN;
1331 		}
1332 		break;
1333 	}
1334 }
1335 
1336 static void emac_get_ethtool_stats(struct net_device *dev,
1337 				   struct ethtool_stats *stats, u64 *data)
1338 {
1339 	struct emac_priv *priv = netdev_priv(dev);
1340 	u64 *rx_stats = (u64 *)&priv->rx_stats;
1341 	int i;
1342 
1343 	spin_lock_bh(&priv->stats_lock);
1344 
1345 	emac_stats_update(priv);
1346 
1347 	for (i = 0; i < ARRAY_SIZE(emac_ethtool_rx_stats); i++)
1348 		data[i] = rx_stats[emac_ethtool_rx_stats[i].offset];
1349 
1350 	spin_unlock_bh(&priv->stats_lock);
1351 }
1352 
1353 static int emac_ethtool_get_regs_len(struct net_device *dev)
1354 {
1355 	return (EMAC_DMA_REG_CNT + EMAC_MAC_REG_CNT) * sizeof(u32);
1356 }
1357 
1358 static void emac_ethtool_get_regs(struct net_device *dev,
1359 				  struct ethtool_regs *regs, void *space)
1360 {
1361 	struct emac_priv *priv = netdev_priv(dev);
1362 	u32 *reg_space = space;
1363 	int i;
1364 
1365 	regs->version = 1;
1366 
1367 	for (i = 0; i < EMAC_DMA_REG_CNT; i++)
1368 		reg_space[i] = emac_rd(priv, DMA_CONFIGURATION + i * 4);
1369 
1370 	for (i = 0; i < EMAC_MAC_REG_CNT; i++)
1371 		reg_space[i + EMAC_DMA_REG_CNT] =
1372 			emac_rd(priv, MAC_GLOBAL_CONTROL + i * 4);
1373 }
1374 
1375 static void emac_get_drvinfo(struct net_device *dev,
1376 			     struct ethtool_drvinfo *info)
1377 {
1378 	strscpy(info->driver, DRIVER_NAME, sizeof(info->driver));
1379 	info->n_stats = ARRAY_SIZE(emac_ethtool_rx_stats);
1380 }
1381 
1382 static void emac_tx_timeout_task(struct work_struct *work)
1383 {
1384 	struct net_device *ndev;
1385 	struct emac_priv *priv;
1386 
1387 	priv = container_of(work, struct emac_priv, tx_timeout_task);
1388 	ndev = priv->ndev;
1389 
1390 	rtnl_lock();
1391 
1392 	/* No need to reset if already down */
1393 	if (!netif_running(ndev)) {
1394 		rtnl_unlock();
1395 		return;
1396 	}
1397 
1398 	netdev_err(ndev, "MAC reset due to TX timeout\n");
1399 
1400 	netif_trans_update(ndev); /* prevent tx timeout */
1401 	dev_close(ndev);
1402 	dev_open(ndev, NULL);
1403 
1404 	rtnl_unlock();
1405 }
1406 
1407 static void emac_sw_init(struct emac_priv *priv)
1408 {
1409 	priv->dma_buf_sz = EMAC_DEFAULT_BUFSIZE;
1410 
1411 	priv->tx_ring.total_cnt = DEFAULT_TX_RING_NUM;
1412 	priv->rx_ring.total_cnt = DEFAULT_RX_RING_NUM;
1413 
1414 	spin_lock_init(&priv->stats_lock);
1415 
1416 	INIT_WORK(&priv->tx_timeout_task, emac_tx_timeout_task);
1417 
1418 	priv->tx_coal_frames = EMAC_TX_FRAMES;
1419 	priv->tx_coal_timeout = EMAC_TX_COAL_TIMEOUT;
1420 
1421 	timer_setup(&priv->txtimer, emac_tx_coal_timer, 0);
1422 	timer_setup(&priv->stats_timer, emac_stats_timer, 0);
1423 }
1424 
1425 static irqreturn_t emac_interrupt_handler(int irq, void *dev_id)
1426 {
1427 	struct net_device *ndev = (struct net_device *)dev_id;
1428 	struct emac_priv *priv = netdev_priv(ndev);
1429 	bool should_schedule = false;
1430 	u32 clr = 0;
1431 	u32 status;
1432 
1433 	status = emac_rd(priv, DMA_STATUS_IRQ);
1434 
1435 	if (status & MREGBIT_TRANSMIT_TRANSFER_DONE_IRQ) {
1436 		clr |= MREGBIT_TRANSMIT_TRANSFER_DONE_IRQ;
1437 		should_schedule = true;
1438 	}
1439 
1440 	if (status & MREGBIT_TRANSMIT_DES_UNAVAILABLE_IRQ)
1441 		clr |= MREGBIT_TRANSMIT_DES_UNAVAILABLE_IRQ;
1442 
1443 	if (status & MREGBIT_TRANSMIT_DMA_STOPPED_IRQ)
1444 		clr |= MREGBIT_TRANSMIT_DMA_STOPPED_IRQ;
1445 
1446 	if (status & MREGBIT_RECEIVE_TRANSFER_DONE_IRQ) {
1447 		clr |= MREGBIT_RECEIVE_TRANSFER_DONE_IRQ;
1448 		should_schedule = true;
1449 	}
1450 
1451 	if (status & MREGBIT_RECEIVE_DES_UNAVAILABLE_IRQ)
1452 		clr |= MREGBIT_RECEIVE_DES_UNAVAILABLE_IRQ;
1453 
1454 	if (status & MREGBIT_RECEIVE_DMA_STOPPED_IRQ)
1455 		clr |= MREGBIT_RECEIVE_DMA_STOPPED_IRQ;
1456 
1457 	if (status & MREGBIT_RECEIVE_MISSED_FRAME_IRQ)
1458 		clr |= MREGBIT_RECEIVE_MISSED_FRAME_IRQ;
1459 
1460 	if (should_schedule) {
1461 		if (napi_schedule_prep(&priv->napi)) {
1462 			emac_disable_interrupt(priv);
1463 			__napi_schedule_irqoff(&priv->napi);
1464 		}
1465 	}
1466 
1467 	emac_wr(priv, DMA_STATUS_IRQ, clr);
1468 
1469 	return IRQ_HANDLED;
1470 }
1471 
1472 static void emac_configure_tx(struct emac_priv *priv)
1473 {
1474 	u32 val;
1475 
1476 	/* Set base address */
1477 	val = (u32)priv->tx_ring.desc_dma_addr;
1478 	emac_wr(priv, DMA_TRANSMIT_BASE_ADDRESS, val);
1479 
1480 	/* Set TX inter-frame gap value, enable transmit */
1481 	val = emac_rd(priv, MAC_TRANSMIT_CONTROL);
1482 	val &= ~MREGBIT_IFG_LEN;
1483 	val |= MREGBIT_TRANSMIT_ENABLE;
1484 	val |= MREGBIT_TRANSMIT_AUTO_RETRY;
1485 	emac_wr(priv, MAC_TRANSMIT_CONTROL, val);
1486 
1487 	emac_wr(priv, DMA_TRANSMIT_AUTO_POLL_COUNTER, 0x0);
1488 
1489 	/* Start TX DMA */
1490 	val = emac_rd(priv, DMA_CONTROL);
1491 	val |= MREGBIT_START_STOP_TRANSMIT_DMA;
1492 	emac_wr(priv, DMA_CONTROL, val);
1493 }
1494 
1495 static void emac_configure_rx(struct emac_priv *priv)
1496 {
1497 	u32 val;
1498 
1499 	/* Set base address */
1500 	val = (u32)priv->rx_ring.desc_dma_addr;
1501 	emac_wr(priv, DMA_RECEIVE_BASE_ADDRESS, val);
1502 
1503 	/* Enable receive */
1504 	val = emac_rd(priv, MAC_RECEIVE_CONTROL);
1505 	val |= MREGBIT_RECEIVE_ENABLE;
1506 	val |= MREGBIT_STORE_FORWARD;
1507 	emac_wr(priv, MAC_RECEIVE_CONTROL, val);
1508 
1509 	/* Start RX DMA */
1510 	val = emac_rd(priv, DMA_CONTROL);
1511 	val |= MREGBIT_START_STOP_RECEIVE_DMA;
1512 	emac_wr(priv, DMA_CONTROL, val);
1513 }
1514 
1515 static void emac_adjust_link(struct net_device *dev)
1516 {
1517 	struct emac_priv *priv = netdev_priv(dev);
1518 	struct phy_device *phydev = dev->phydev;
1519 	u32 ctrl;
1520 
1521 	if (phydev->link) {
1522 		ctrl = emac_rd(priv, MAC_GLOBAL_CONTROL);
1523 
1524 		/* Update duplex and speed from PHY */
1525 
1526 		FIELD_MODIFY(MREGBIT_FULL_DUPLEX_MODE, &ctrl,
1527 			     phydev->duplex == DUPLEX_FULL);
1528 
1529 		ctrl &= ~MREGBIT_SPEED;
1530 
1531 		switch (phydev->speed) {
1532 		case SPEED_1000:
1533 			ctrl |= MREGBIT_SPEED_1000M;
1534 			break;
1535 		case SPEED_100:
1536 			ctrl |= MREGBIT_SPEED_100M;
1537 			break;
1538 		case SPEED_10:
1539 			ctrl |= MREGBIT_SPEED_10M;
1540 			break;
1541 		default:
1542 			netdev_err(dev, "Unknown speed: %d\n", phydev->speed);
1543 			phydev->speed = SPEED_UNKNOWN;
1544 			break;
1545 		}
1546 
1547 		emac_wr(priv, MAC_GLOBAL_CONTROL, ctrl);
1548 
1549 		/*
1550 		 * Reschedule stats updates now that link is up. See comments in
1551 		 * emac_stats_update().
1552 		 */
1553 		mod_timer(&priv->stats_timer, jiffies);
1554 	}
1555 
1556 	phy_print_status(phydev);
1557 }
1558 
1559 static void emac_update_delay_line(struct emac_priv *priv)
1560 {
1561 	u32 mask = 0, val = 0;
1562 
1563 	mask |= EMAC_RX_DLINE_EN;
1564 	mask |= EMAC_RX_DLINE_STEP_MASK | EMAC_RX_DLINE_CODE_MASK;
1565 	mask |= EMAC_TX_DLINE_EN;
1566 	mask |= EMAC_TX_DLINE_STEP_MASK | EMAC_TX_DLINE_CODE_MASK;
1567 
1568 	if (phy_interface_mode_is_rgmii(priv->phy_interface)) {
1569 		val |= EMAC_RX_DLINE_EN;
1570 		val |= FIELD_PREP(EMAC_RX_DLINE_STEP_MASK,
1571 				  EMAC_DLINE_STEP_15P6);
1572 		val |= FIELD_PREP(EMAC_RX_DLINE_CODE_MASK, priv->rx_delay);
1573 
1574 		val |= EMAC_TX_DLINE_EN;
1575 		val |= FIELD_PREP(EMAC_TX_DLINE_STEP_MASK,
1576 				  EMAC_DLINE_STEP_15P6);
1577 		val |= FIELD_PREP(EMAC_TX_DLINE_CODE_MASK, priv->tx_delay);
1578 	}
1579 
1580 	regmap_update_bits(priv->regmap_apmu,
1581 			   priv->regmap_apmu_offset + APMU_EMAC_DLINE_REG,
1582 			   mask, val);
1583 }
1584 
1585 static int emac_phy_connect(struct net_device *ndev)
1586 {
1587 	struct emac_priv *priv = netdev_priv(ndev);
1588 	struct device *dev = &priv->pdev->dev;
1589 	struct phy_device *phydev;
1590 	struct device_node *np;
1591 	int ret;
1592 
1593 	ret = of_get_phy_mode(dev->of_node, &priv->phy_interface);
1594 	if (ret) {
1595 		netdev_err(ndev, "No phy-mode found");
1596 		return ret;
1597 	}
1598 
1599 	switch (priv->phy_interface) {
1600 	case PHY_INTERFACE_MODE_RMII:
1601 	case PHY_INTERFACE_MODE_RGMII:
1602 	case PHY_INTERFACE_MODE_RGMII_ID:
1603 	case PHY_INTERFACE_MODE_RGMII_RXID:
1604 	case PHY_INTERFACE_MODE_RGMII_TXID:
1605 		break;
1606 	default:
1607 		netdev_err(ndev, "Unsupported PHY interface %s",
1608 			   phy_modes(priv->phy_interface));
1609 		return -EINVAL;
1610 	}
1611 
1612 	np = of_parse_phandle(dev->of_node, "phy-handle", 0);
1613 	if (!np && of_phy_is_fixed_link(dev->of_node))
1614 		np = of_node_get(dev->of_node);
1615 
1616 	if (!np) {
1617 		netdev_err(ndev, "No PHY specified");
1618 		return -ENODEV;
1619 	}
1620 
1621 	ret = emac_phy_interface_config(priv);
1622 	if (ret)
1623 		goto err_node_put;
1624 
1625 	phydev = of_phy_connect(ndev, np, &emac_adjust_link, 0,
1626 				priv->phy_interface);
1627 	if (!phydev) {
1628 		netdev_err(ndev, "Could not attach to PHY\n");
1629 		ret = -ENODEV;
1630 		goto err_node_put;
1631 	}
1632 
1633 	phydev->mac_managed_pm = true;
1634 
1635 	emac_update_delay_line(priv);
1636 
1637 	phy_attached_info(phydev);
1638 
1639 err_node_put:
1640 	of_node_put(np);
1641 	return ret;
1642 }
1643 
1644 static int emac_up(struct emac_priv *priv)
1645 {
1646 	struct platform_device *pdev = priv->pdev;
1647 	struct net_device *ndev = priv->ndev;
1648 	int ret;
1649 
1650 	pm_runtime_get_sync(&pdev->dev);
1651 
1652 	ret = emac_phy_connect(ndev);
1653 	if (ret) {
1654 		dev_err(&pdev->dev, "emac_phy_connect failed\n");
1655 		goto err_pm_put;
1656 	}
1657 
1658 	emac_init_hw(priv);
1659 
1660 	emac_set_mac_addr(priv, ndev->dev_addr);
1661 	emac_configure_tx(priv);
1662 	emac_configure_rx(priv);
1663 
1664 	emac_alloc_rx_desc_buffers(priv);
1665 
1666 	phy_start(ndev->phydev);
1667 
1668 	ret = request_irq(priv->irq, emac_interrupt_handler, IRQF_SHARED,
1669 			  ndev->name, ndev);
1670 	if (ret) {
1671 		dev_err(&pdev->dev, "request_irq failed\n");
1672 		goto err_reset_disconnect_phy;
1673 	}
1674 
1675 	/* Don't enable MAC interrupts */
1676 	emac_wr(priv, MAC_INTERRUPT_ENABLE, 0x0);
1677 
1678 	/* Enable DMA interrupts */
1679 	emac_wr(priv, DMA_INTERRUPT_ENABLE,
1680 		MREGBIT_TRANSMIT_TRANSFER_DONE_INTR_ENABLE |
1681 			MREGBIT_TRANSMIT_DMA_STOPPED_INTR_ENABLE |
1682 			MREGBIT_RECEIVE_TRANSFER_DONE_INTR_ENABLE |
1683 			MREGBIT_RECEIVE_DMA_STOPPED_INTR_ENABLE |
1684 			MREGBIT_RECEIVE_MISSED_FRAME_INTR_ENABLE);
1685 
1686 	napi_enable(&priv->napi);
1687 
1688 	netif_start_queue(ndev);
1689 
1690 	mod_timer(&priv->stats_timer, jiffies);
1691 
1692 	return 0;
1693 
1694 err_reset_disconnect_phy:
1695 	emac_reset_hw(priv);
1696 	phy_disconnect(ndev->phydev);
1697 
1698 err_pm_put:
1699 	pm_runtime_put_sync(&pdev->dev);
1700 	return ret;
1701 }
1702 
1703 static int emac_down(struct emac_priv *priv)
1704 {
1705 	struct platform_device *pdev = priv->pdev;
1706 	struct net_device *ndev = priv->ndev;
1707 
1708 	netif_stop_queue(ndev);
1709 
1710 	phy_disconnect(ndev->phydev);
1711 
1712 	emac_wr(priv, MAC_INTERRUPT_ENABLE, 0x0);
1713 	emac_wr(priv, DMA_INTERRUPT_ENABLE, 0x0);
1714 
1715 	free_irq(priv->irq, ndev);
1716 
1717 	napi_disable(&priv->napi);
1718 
1719 	timer_delete_sync(&priv->txtimer);
1720 	cancel_work_sync(&priv->tx_timeout_task);
1721 
1722 	timer_delete_sync(&priv->stats_timer);
1723 
1724 	emac_reset_hw(priv);
1725 
1726 	/* Update and save current stats, see emac_stats_update() for usage */
1727 
1728 	spin_lock_bh(&priv->stats_lock);
1729 
1730 	emac_stats_update(priv);
1731 
1732 	priv->tx_stats_off = priv->tx_stats;
1733 	priv->rx_stats_off = priv->rx_stats;
1734 
1735 	spin_unlock_bh(&priv->stats_lock);
1736 
1737 	pm_runtime_put_sync(&pdev->dev);
1738 	return 0;
1739 }
1740 
1741 /* Called when net interface is brought up. */
1742 static int emac_open(struct net_device *ndev)
1743 {
1744 	struct emac_priv *priv = netdev_priv(ndev);
1745 	struct device *dev = &priv->pdev->dev;
1746 	int ret;
1747 
1748 	ret = emac_alloc_tx_resources(priv);
1749 	if (ret) {
1750 		dev_err(dev, "Cannot allocate TX resources\n");
1751 		return ret;
1752 	}
1753 
1754 	ret = emac_alloc_rx_resources(priv);
1755 	if (ret) {
1756 		dev_err(dev, "Cannot allocate RX resources\n");
1757 		goto err_free_tx;
1758 	}
1759 
1760 	ret = emac_up(priv);
1761 	if (ret) {
1762 		dev_err(dev, "Error when bringing interface up\n");
1763 		goto err_free_rx;
1764 	}
1765 	return 0;
1766 
1767 err_free_rx:
1768 	emac_free_rx_resources(priv);
1769 err_free_tx:
1770 	emac_free_tx_resources(priv);
1771 
1772 	return ret;
1773 }
1774 
1775 /* Called when interface is brought down. */
1776 static int emac_stop(struct net_device *ndev)
1777 {
1778 	struct emac_priv *priv = netdev_priv(ndev);
1779 
1780 	emac_down(priv);
1781 	emac_free_tx_resources(priv);
1782 	emac_free_rx_resources(priv);
1783 
1784 	return 0;
1785 }
1786 
1787 static const struct ethtool_ops emac_ethtool_ops = {
1788 	.get_link_ksettings	= phy_ethtool_get_link_ksettings,
1789 	.set_link_ksettings	= phy_ethtool_set_link_ksettings,
1790 	.nway_reset		= phy_ethtool_nway_reset,
1791 	.get_drvinfo		= emac_get_drvinfo,
1792 	.get_link		= ethtool_op_get_link,
1793 
1794 	.get_regs		= emac_ethtool_get_regs,
1795 	.get_regs_len		= emac_ethtool_get_regs_len,
1796 
1797 	.get_rmon_stats		= emac_get_rmon_stats,
1798 	.get_pause_stats	= emac_get_pause_stats,
1799 	.get_eth_mac_stats	= emac_get_eth_mac_stats,
1800 
1801 	.get_sset_count		= emac_get_sset_count,
1802 	.get_strings		= emac_get_strings,
1803 	.get_ethtool_stats	= emac_get_ethtool_stats,
1804 };
1805 
1806 static const struct net_device_ops emac_netdev_ops = {
1807 	.ndo_open               = emac_open,
1808 	.ndo_stop               = emac_stop,
1809 	.ndo_start_xmit         = emac_start_xmit,
1810 	.ndo_validate_addr	= eth_validate_addr,
1811 	.ndo_set_mac_address    = emac_set_mac_address,
1812 	.ndo_eth_ioctl          = phy_do_ioctl_running,
1813 	.ndo_change_mtu         = emac_change_mtu,
1814 	.ndo_tx_timeout         = emac_tx_timeout,
1815 	.ndo_set_rx_mode        = emac_set_rx_mode,
1816 	.ndo_get_stats64	= emac_get_stats64,
1817 };
1818 
1819 /* Currently we always use 15.6 ps/step for the delay line */
1820 
1821 static u32 delay_ps_to_unit(u32 ps)
1822 {
1823 	return DIV_ROUND_CLOSEST(ps * 10, 156);
1824 }
1825 
1826 static u32 delay_unit_to_ps(u32 unit)
1827 {
1828 	return DIV_ROUND_CLOSEST(unit * 156, 10);
1829 }
1830 
1831 #define EMAC_MAX_DELAY_UNIT	FIELD_MAX(EMAC_TX_DLINE_CODE_MASK)
1832 
1833 /* Minus one just to be safe from rounding errors */
1834 #define EMAC_MAX_DELAY_PS	(delay_unit_to_ps(EMAC_MAX_DELAY_UNIT - 1))
1835 
1836 static int emac_config_dt(struct platform_device *pdev, struct emac_priv *priv)
1837 {
1838 	struct device_node *np = pdev->dev.of_node;
1839 	struct device *dev = &pdev->dev;
1840 	u8 mac_addr[ETH_ALEN] = { 0 };
1841 	int ret;
1842 
1843 	priv->iobase = devm_platform_ioremap_resource(pdev, 0);
1844 	if (IS_ERR(priv->iobase))
1845 		return dev_err_probe(dev, PTR_ERR(priv->iobase),
1846 				     "ioremap failed\n");
1847 
1848 	priv->regmap_apmu =
1849 		syscon_regmap_lookup_by_phandle_args(np, "spacemit,apmu", 1,
1850 						     &priv->regmap_apmu_offset);
1851 
1852 	if (IS_ERR(priv->regmap_apmu))
1853 		return dev_err_probe(dev, PTR_ERR(priv->regmap_apmu),
1854 				     "failed to get syscon\n");
1855 
1856 	priv->irq = platform_get_irq(pdev, 0);
1857 	if (priv->irq < 0)
1858 		return priv->irq;
1859 
1860 	ret = of_get_mac_address(np, mac_addr);
1861 	if (ret) {
1862 		if (ret == -EPROBE_DEFER)
1863 			return dev_err_probe(dev, ret,
1864 					     "Can't get MAC address\n");
1865 
1866 		dev_info(&pdev->dev, "Using random MAC address\n");
1867 		eth_hw_addr_random(priv->ndev);
1868 	} else {
1869 		eth_hw_addr_set(priv->ndev, mac_addr);
1870 	}
1871 
1872 	priv->tx_delay = 0;
1873 	priv->rx_delay = 0;
1874 
1875 	of_property_read_u32(np, "tx-internal-delay-ps", &priv->tx_delay);
1876 	of_property_read_u32(np, "rx-internal-delay-ps", &priv->rx_delay);
1877 
1878 	if (priv->tx_delay > EMAC_MAX_DELAY_PS) {
1879 		dev_err(&pdev->dev,
1880 			"tx-internal-delay-ps too large: max %d, got %d",
1881 			EMAC_MAX_DELAY_PS, priv->tx_delay);
1882 		return -EINVAL;
1883 	}
1884 
1885 	if (priv->rx_delay > EMAC_MAX_DELAY_PS) {
1886 		dev_err(&pdev->dev,
1887 			"rx-internal-delay-ps too large: max %d, got %d",
1888 			EMAC_MAX_DELAY_PS, priv->rx_delay);
1889 		return -EINVAL;
1890 	}
1891 
1892 	priv->tx_delay = delay_ps_to_unit(priv->tx_delay);
1893 	priv->rx_delay = delay_ps_to_unit(priv->rx_delay);
1894 
1895 	return 0;
1896 }
1897 
1898 static void emac_phy_deregister_fixed_link(void *data)
1899 {
1900 	struct device_node *of_node = data;
1901 
1902 	of_phy_deregister_fixed_link(of_node);
1903 }
1904 
1905 static int emac_probe(struct platform_device *pdev)
1906 {
1907 	struct device *dev = &pdev->dev;
1908 	struct reset_control *reset;
1909 	struct net_device *ndev;
1910 	struct emac_priv *priv;
1911 	int ret;
1912 
1913 	ndev = devm_alloc_etherdev(dev, sizeof(struct emac_priv));
1914 	if (!ndev)
1915 		return -ENOMEM;
1916 
1917 	ndev->hw_features = NETIF_F_SG;
1918 	ndev->features |= ndev->hw_features;
1919 
1920 	ndev->max_mtu = EMAC_RX_BUF_4K - (ETH_HLEN + ETH_FCS_LEN);
1921 	ndev->pcpu_stat_type = NETDEV_PCPU_STAT_DSTATS;
1922 
1923 	priv = netdev_priv(ndev);
1924 	priv->ndev = ndev;
1925 	priv->pdev = pdev;
1926 	platform_set_drvdata(pdev, priv);
1927 
1928 	ret = emac_config_dt(pdev, priv);
1929 	if (ret < 0)
1930 		return dev_err_probe(dev, ret, "Configuration failed\n");
1931 
1932 	ndev->watchdog_timeo = 5 * HZ;
1933 	ndev->base_addr = (unsigned long)priv->iobase;
1934 	ndev->irq = priv->irq;
1935 
1936 	ndev->ethtool_ops = &emac_ethtool_ops;
1937 	ndev->netdev_ops = &emac_netdev_ops;
1938 
1939 	devm_pm_runtime_enable(&pdev->dev);
1940 
1941 	priv->bus_clk = devm_clk_get_enabled(&pdev->dev, NULL);
1942 	if (IS_ERR(priv->bus_clk))
1943 		return dev_err_probe(dev, PTR_ERR(priv->bus_clk),
1944 				     "Failed to get clock\n");
1945 
1946 	reset = devm_reset_control_get_optional_exclusive_deasserted(&pdev->dev,
1947 								     NULL);
1948 	if (IS_ERR(reset))
1949 		return dev_err_probe(dev, PTR_ERR(reset),
1950 				     "Failed to get reset\n");
1951 
1952 	if (of_phy_is_fixed_link(dev->of_node)) {
1953 		ret = of_phy_register_fixed_link(dev->of_node);
1954 		if (ret)
1955 			return dev_err_probe(dev, ret,
1956 					     "Failed to register fixed-link\n");
1957 
1958 		ret = devm_add_action_or_reset(dev,
1959 					       emac_phy_deregister_fixed_link,
1960 					       dev->of_node);
1961 
1962 		if (ret) {
1963 			dev_err(dev, "devm_add_action_or_reset failed\n");
1964 			return ret;
1965 		}
1966 	}
1967 
1968 	emac_sw_init(priv);
1969 
1970 	ret = emac_mdio_init(priv);
1971 	if (ret)
1972 		goto err_timer_delete;
1973 
1974 	SET_NETDEV_DEV(ndev, &pdev->dev);
1975 
1976 	ret = devm_register_netdev(dev, ndev);
1977 	if (ret) {
1978 		dev_err(dev, "devm_register_netdev failed\n");
1979 		goto err_timer_delete;
1980 	}
1981 
1982 	netif_napi_add(ndev, &priv->napi, emac_rx_poll);
1983 	netif_carrier_off(ndev);
1984 
1985 	return 0;
1986 
1987 err_timer_delete:
1988 	timer_delete_sync(&priv->txtimer);
1989 	timer_delete_sync(&priv->stats_timer);
1990 
1991 	return ret;
1992 }
1993 
1994 static void emac_remove(struct platform_device *pdev)
1995 {
1996 	struct emac_priv *priv = platform_get_drvdata(pdev);
1997 
1998 	timer_shutdown_sync(&priv->txtimer);
1999 	cancel_work_sync(&priv->tx_timeout_task);
2000 
2001 	timer_shutdown_sync(&priv->stats_timer);
2002 
2003 	emac_reset_hw(priv);
2004 }
2005 
2006 static int emac_resume(struct device *dev)
2007 {
2008 	struct emac_priv *priv = dev_get_drvdata(dev);
2009 	struct net_device *ndev = priv->ndev;
2010 	int ret;
2011 
2012 	ret = clk_prepare_enable(priv->bus_clk);
2013 	if (ret < 0) {
2014 		dev_err(dev, "Failed to enable bus clock: %d\n", ret);
2015 		return ret;
2016 	}
2017 
2018 	if (!netif_running(ndev))
2019 		return 0;
2020 
2021 	ret = emac_open(ndev);
2022 	if (ret) {
2023 		clk_disable_unprepare(priv->bus_clk);
2024 		return ret;
2025 	}
2026 
2027 	netif_device_attach(ndev);
2028 
2029 	mod_timer(&priv->stats_timer, jiffies);
2030 
2031 	return 0;
2032 }
2033 
2034 static int emac_suspend(struct device *dev)
2035 {
2036 	struct emac_priv *priv = dev_get_drvdata(dev);
2037 	struct net_device *ndev = priv->ndev;
2038 
2039 	if (!ndev || !netif_running(ndev)) {
2040 		clk_disable_unprepare(priv->bus_clk);
2041 		return 0;
2042 	}
2043 
2044 	emac_stop(ndev);
2045 
2046 	clk_disable_unprepare(priv->bus_clk);
2047 	netif_device_detach(ndev);
2048 	return 0;
2049 }
2050 
2051 static const struct dev_pm_ops emac_pm_ops = {
2052 	SYSTEM_SLEEP_PM_OPS(emac_suspend, emac_resume)
2053 };
2054 
2055 static const struct of_device_id emac_of_match[] = {
2056 	{ .compatible = "spacemit,k1-emac" },
2057 	{ /* sentinel */ },
2058 };
2059 MODULE_DEVICE_TABLE(of, emac_of_match);
2060 
2061 static struct platform_driver emac_driver = {
2062 	.probe = emac_probe,
2063 	.remove = emac_remove,
2064 	.driver = {
2065 		.name = DRIVER_NAME,
2066 		.of_match_table = of_match_ptr(emac_of_match),
2067 		.pm = &emac_pm_ops,
2068 	},
2069 };
2070 module_platform_driver(emac_driver);
2071 
2072 MODULE_DESCRIPTION("SpacemiT K1 Ethernet driver");
2073 MODULE_AUTHOR("Vivian Wang <wangruikang@iscas.ac.cn>");
2074 MODULE_LICENSE("GPL");
2075