xref: /linux/drivers/net/ethernet/spacemit/k1_emac.c (revision 6f7e6393d1ce636bb7ec77a7fe7b77458fddf701)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * SpacemiT K1 Ethernet driver
4  *
5  * Copyright (C) 2023-2025 SpacemiT (Hangzhou) Technology Co. Ltd
6  * Copyright (C) 2025 Vivian Wang <wangruikang@iscas.ac.cn>
7  */
8 
9 #include <linux/bitfield.h>
10 #include <linux/clk.h>
11 #include <linux/delay.h>
12 #include <linux/dma-mapping.h>
13 #include <linux/etherdevice.h>
14 #include <linux/ethtool.h>
15 #include <linux/if_vlan.h>
16 #include <linux/interrupt.h>
17 #include <linux/io.h>
18 #include <linux/iopoll.h>
19 #include <linux/kernel.h>
20 #include <linux/mfd/syscon.h>
21 #include <linux/module.h>
22 #include <linux/of.h>
23 #include <linux/of_irq.h>
24 #include <linux/of_mdio.h>
25 #include <linux/of_net.h>
26 #include <linux/phy.h>
27 #include <linux/platform_device.h>
28 #include <linux/pm_runtime.h>
29 #include <linux/pm.h>
30 #include <linux/regmap.h>
31 #include <linux/reset.h>
32 #include <linux/rtnetlink.h>
33 #include <linux/timer.h>
34 #include <linux/types.h>
35 
36 #include "k1_emac.h"
37 
38 #define DRIVER_NAME "k1_emac"
39 
40 #define EMAC_DEFAULT_BUFSIZE		1536
41 #define EMAC_RX_BUF_2K			2048
42 #define EMAC_RX_BUF_MAX			FIELD_MAX(RX_DESC_1_BUFFER_SIZE_1_MASK)
43 
44 /* Tuning parameters from SpacemiT */
45 #define EMAC_TX_FRAMES			64
46 #define EMAC_TX_COAL_TIMEOUT		40000
47 #define EMAC_RX_FRAMES			64
48 #define EMAC_RX_COAL_TIMEOUT		(600 * 312)
49 
50 #define DEFAULT_FC_PAUSE_TIME		0xffff
51 #define DEFAULT_FC_FIFO_HIGH		1600
52 #define DEFAULT_TX_ALMOST_FULL		0x1f8
53 #define DEFAULT_TX_THRESHOLD		1518
54 #define DEFAULT_RX_THRESHOLD		12
55 #define DEFAULT_TX_RING_NUM		1024
56 #define DEFAULT_RX_RING_NUM		1024
57 #define DEFAULT_DMA_BURST		MREGBIT_BURST_16WORD
58 #define HASH_TABLE_SIZE			64
59 
60 struct desc_buf {
61 	u64 dma_addr;
62 	void *buff_addr;
63 	u16 dma_len;
64 	u8 map_as_page;
65 };
66 
67 struct emac_tx_desc_buffer {
68 	struct sk_buff *skb;
69 	struct desc_buf buf[2];
70 };
71 
72 struct emac_rx_desc_buffer {
73 	struct sk_buff *skb;
74 	u64 dma_addr;
75 	void *buff_addr;
76 	u16 dma_len;
77 	u8 map_as_page;
78 };
79 
80 /**
81  * struct emac_desc_ring - Software-side information for one descriptor ring
82  * Same structure used for both RX and TX
83  * @desc_addr: Virtual address to the descriptor ring memory
84  * @desc_dma_addr: DMA address of the descriptor ring
85  * @total_size: Size of ring in bytes
86  * @total_cnt: Number of descriptors
87  * @head: Next descriptor to associate a buffer with
88  * @tail: Next descriptor to check status bit
89  * @rx_desc_buf: Array of descriptors for RX
90  * @tx_desc_buf: Array of descriptors for TX, with max of two buffers each
91  */
92 struct emac_desc_ring {
93 	void *desc_addr;
94 	dma_addr_t desc_dma_addr;
95 	u32 total_size;
96 	u32 total_cnt;
97 	u32 head;
98 	u32 tail;
99 	union {
100 		struct emac_rx_desc_buffer *rx_desc_buf;
101 		struct emac_tx_desc_buffer *tx_desc_buf;
102 	};
103 };
104 
105 struct emac_priv {
106 	void __iomem *iobase;
107 	u32 dma_buf_sz;
108 	struct emac_desc_ring tx_ring;
109 	struct emac_desc_ring rx_ring;
110 
111 	struct net_device *ndev;
112 	struct napi_struct napi;
113 	struct platform_device *pdev;
114 	struct clk *bus_clk;
115 	struct clk *ref_clk;
116 	struct regmap *regmap_apmu;
117 	u32 regmap_apmu_offset;
118 	int irq;
119 
120 	phy_interface_t phy_interface;
121 
122 	union emac_hw_tx_stats tx_stats, tx_stats_off;
123 	union emac_hw_rx_stats rx_stats, rx_stats_off;
124 
125 	u32 tx_count_frames;
126 	u32 tx_coal_frames;
127 	u32 tx_coal_timeout;
128 	struct work_struct tx_timeout_task;
129 
130 	struct timer_list txtimer;
131 	struct timer_list stats_timer;
132 
133 	u32 tx_delay;
134 	u32 rx_delay;
135 
136 	bool flow_control_autoneg;
137 	u8 flow_control;
138 
139 	/* Softirq-safe, hold while touching hardware statistics */
140 	spinlock_t stats_lock;
141 };
142 
143 static void emac_wr(struct emac_priv *priv, u32 reg, u32 val)
144 {
145 	writel(val, priv->iobase + reg);
146 }
147 
148 static u32 emac_rd(struct emac_priv *priv, u32 reg)
149 {
150 	return readl(priv->iobase + reg);
151 }
152 
153 static int emac_phy_interface_config(struct emac_priv *priv)
154 {
155 	u32 val = 0, mask = REF_CLK_SEL | RGMII_TX_CLK_SEL | PHY_INTF_RGMII;
156 
157 	if (phy_interface_mode_is_rgmii(priv->phy_interface))
158 		val |= PHY_INTF_RGMII;
159 
160 	regmap_update_bits(priv->regmap_apmu,
161 			   priv->regmap_apmu_offset + APMU_EMAC_CTRL_REG,
162 			   mask, val);
163 
164 	return 0;
165 }
166 
167 /*
168  * Where the hardware expects a MAC address, it is laid out in this high, med,
169  * low order in three consecutive registers and in this format.
170  */
171 
172 static void emac_set_mac_addr_reg(struct emac_priv *priv,
173 				  const unsigned char *addr,
174 				  u32 reg)
175 {
176 	emac_wr(priv, reg + sizeof(u32) * 0, addr[1] << 8 | addr[0]);
177 	emac_wr(priv, reg + sizeof(u32) * 1, addr[3] << 8 | addr[2]);
178 	emac_wr(priv, reg + sizeof(u32) * 2, addr[5] << 8 | addr[4]);
179 }
180 
181 static void emac_set_mac_addr(struct emac_priv *priv, const unsigned char *addr)
182 {
183 	/* We use only one address, so set the same for flow control as well */
184 	emac_set_mac_addr_reg(priv, addr, MAC_ADDRESS1_HIGH);
185 	emac_set_mac_addr_reg(priv, addr, MAC_FC_SOURCE_ADDRESS_HIGH);
186 }
187 
188 static void emac_reset_hw(struct emac_priv *priv)
189 {
190 	/* Disable all interrupts */
191 	emac_wr(priv, MAC_INTERRUPT_ENABLE, 0x0);
192 	emac_wr(priv, DMA_INTERRUPT_ENABLE, 0x0);
193 
194 	/* Disable transmit and receive units */
195 	emac_wr(priv, MAC_RECEIVE_CONTROL, 0x0);
196 	emac_wr(priv, MAC_TRANSMIT_CONTROL, 0x0);
197 
198 	/* Disable DMA */
199 	emac_wr(priv, DMA_CONTROL, 0x0);
200 }
201 
202 static void emac_init_hw(struct emac_priv *priv)
203 {
204 	/* Destination address for 802.3x Ethernet flow control */
205 	u8 fc_dest_addr[ETH_ALEN] = { 0x01, 0x80, 0xc2, 0x00, 0x00, 0x01 };
206 	u32 rxirq = 0, dma = 0, frame_sz;
207 
208 	regmap_set_bits(priv->regmap_apmu,
209 			priv->regmap_apmu_offset + APMU_EMAC_CTRL_REG,
210 			AXI_SINGLE_ID);
211 
212 	/* Disable transmit and receive units */
213 	emac_wr(priv, MAC_RECEIVE_CONTROL, 0x0);
214 	emac_wr(priv, MAC_TRANSMIT_CONTROL, 0x0);
215 
216 	/* Enable MAC address 1 filtering */
217 	emac_wr(priv, MAC_ADDRESS_CONTROL, MREGBIT_MAC_ADDRESS1_ENABLE);
218 
219 	/* Zero initialize the multicast hash table */
220 	emac_wr(priv, MAC_MULTICAST_HASH_TABLE1, 0x0);
221 	emac_wr(priv, MAC_MULTICAST_HASH_TABLE2, 0x0);
222 	emac_wr(priv, MAC_MULTICAST_HASH_TABLE3, 0x0);
223 	emac_wr(priv, MAC_MULTICAST_HASH_TABLE4, 0x0);
224 
225 	/* Configure thresholds */
226 	emac_wr(priv, MAC_TRANSMIT_FIFO_ALMOST_FULL, DEFAULT_TX_ALMOST_FULL);
227 	emac_wr(priv, MAC_TRANSMIT_PACKET_START_THRESHOLD,
228 		DEFAULT_TX_THRESHOLD);
229 	emac_wr(priv, MAC_RECEIVE_PACKET_START_THRESHOLD, DEFAULT_RX_THRESHOLD);
230 
231 	/* Set maximum frame size and jabber size based on configured MTU,
232 	 * accounting for Ethernet header, double VLAN tags, and FCS.
233 	 */
234 	frame_sz = priv->ndev->mtu + ETH_HLEN + 2 * VLAN_HLEN + ETH_FCS_LEN;
235 
236 	emac_wr(priv, MAC_MAXIMUM_FRAME_SIZE, frame_sz);
237 	emac_wr(priv, MAC_TRANSMIT_JABBER_SIZE, frame_sz);
238 	emac_wr(priv, MAC_RECEIVE_JABBER_SIZE, frame_sz);
239 
240 	/* Configure flow control (enabled in emac_adjust_link() later) */
241 	emac_set_mac_addr_reg(priv, fc_dest_addr, MAC_FC_SOURCE_ADDRESS_HIGH);
242 	emac_wr(priv, MAC_FC_PAUSE_HIGH_THRESHOLD, DEFAULT_FC_FIFO_HIGH);
243 	emac_wr(priv, MAC_FC_HIGH_PAUSE_TIME, DEFAULT_FC_PAUSE_TIME);
244 	emac_wr(priv, MAC_FC_PAUSE_LOW_THRESHOLD, 0);
245 
246 	/* RX IRQ mitigation */
247 	rxirq = FIELD_PREP(MREGBIT_RECEIVE_IRQ_FRAME_COUNTER_MASK,
248 			   EMAC_RX_FRAMES);
249 	rxirq |= FIELD_PREP(MREGBIT_RECEIVE_IRQ_TIMEOUT_COUNTER_MASK,
250 			    EMAC_RX_COAL_TIMEOUT);
251 	rxirq |= MREGBIT_RECEIVE_IRQ_MITIGATION_ENABLE;
252 	emac_wr(priv, DMA_RECEIVE_IRQ_MITIGATION_CTRL, rxirq);
253 
254 	/* Disable and set DMA config */
255 	emac_wr(priv, DMA_CONTROL, 0x0);
256 
257 	emac_wr(priv, DMA_CONFIGURATION, MREGBIT_SOFTWARE_RESET);
258 	usleep_range(9000, 10000);
259 	emac_wr(priv, DMA_CONFIGURATION, 0x0);
260 	usleep_range(9000, 10000);
261 
262 	dma |= MREGBIT_STRICT_BURST;
263 	dma |= MREGBIT_DMA_64BIT_MODE;
264 	dma |= DEFAULT_DMA_BURST;
265 
266 	emac_wr(priv, DMA_CONFIGURATION, dma);
267 }
268 
269 static void emac_dma_start_transmit(struct emac_priv *priv)
270 {
271 	/* The actual value written does not matter */
272 	emac_wr(priv, DMA_TRANSMIT_POLL_DEMAND, 1);
273 }
274 
275 static void emac_enable_interrupt(struct emac_priv *priv)
276 {
277 	u32 val;
278 
279 	val = emac_rd(priv, DMA_INTERRUPT_ENABLE);
280 	val |= MREGBIT_TRANSMIT_TRANSFER_DONE_INTR_ENABLE;
281 	val |= MREGBIT_RECEIVE_TRANSFER_DONE_INTR_ENABLE;
282 	emac_wr(priv, DMA_INTERRUPT_ENABLE, val);
283 }
284 
285 static void emac_disable_interrupt(struct emac_priv *priv)
286 {
287 	u32 val;
288 
289 	val = emac_rd(priv, DMA_INTERRUPT_ENABLE);
290 	val &= ~MREGBIT_TRANSMIT_TRANSFER_DONE_INTR_ENABLE;
291 	val &= ~MREGBIT_RECEIVE_TRANSFER_DONE_INTR_ENABLE;
292 	emac_wr(priv, DMA_INTERRUPT_ENABLE, val);
293 }
294 
295 static u32 emac_tx_avail(struct emac_priv *priv)
296 {
297 	struct emac_desc_ring *tx_ring = &priv->tx_ring;
298 	u32 avail;
299 
300 	if (tx_ring->tail > tx_ring->head)
301 		avail = tx_ring->tail - tx_ring->head - 1;
302 	else
303 		avail = tx_ring->total_cnt - tx_ring->head + tx_ring->tail - 1;
304 
305 	return avail;
306 }
307 
308 static void emac_tx_coal_timer_resched(struct emac_priv *priv)
309 {
310 	mod_timer(&priv->txtimer,
311 		  jiffies + usecs_to_jiffies(priv->tx_coal_timeout));
312 }
313 
314 static void emac_tx_coal_timer(struct timer_list *t)
315 {
316 	struct emac_priv *priv = timer_container_of(priv, t, txtimer);
317 
318 	napi_schedule(&priv->napi);
319 }
320 
321 static bool emac_tx_should_interrupt(struct emac_priv *priv, u32 pkt_num)
322 {
323 	priv->tx_count_frames += pkt_num;
324 	if (likely(priv->tx_coal_frames > priv->tx_count_frames)) {
325 		emac_tx_coal_timer_resched(priv);
326 		return false;
327 	}
328 
329 	priv->tx_count_frames = 0;
330 	return true;
331 }
332 
333 static void emac_free_tx_buf(struct emac_priv *priv, int i)
334 {
335 	struct emac_tx_desc_buffer *tx_buf;
336 	struct emac_desc_ring *tx_ring;
337 	struct desc_buf *buf;
338 	int j;
339 
340 	tx_ring = &priv->tx_ring;
341 	tx_buf = &tx_ring->tx_desc_buf[i];
342 
343 	for (j = 0; j < 2; j++) {
344 		buf = &tx_buf->buf[j];
345 		if (!buf->dma_addr)
346 			continue;
347 
348 		if (buf->map_as_page)
349 			dma_unmap_page(&priv->pdev->dev, buf->dma_addr,
350 				       buf->dma_len, DMA_TO_DEVICE);
351 		else
352 			dma_unmap_single(&priv->pdev->dev,
353 					 buf->dma_addr, buf->dma_len,
354 					 DMA_TO_DEVICE);
355 
356 		buf->dma_addr = 0;
357 		buf->map_as_page = false;
358 		buf->buff_addr = NULL;
359 	}
360 
361 	if (tx_buf->skb) {
362 		dev_kfree_skb_any(tx_buf->skb);
363 		tx_buf->skb = NULL;
364 	}
365 }
366 
367 static void emac_clean_tx_desc_ring(struct emac_priv *priv)
368 {
369 	struct emac_desc_ring *tx_ring = &priv->tx_ring;
370 	u32 i;
371 
372 	for (i = 0; i < tx_ring->total_cnt; i++)
373 		emac_free_tx_buf(priv, i);
374 
375 	tx_ring->head = 0;
376 	tx_ring->tail = 0;
377 }
378 
379 static void emac_clean_rx_desc_ring(struct emac_priv *priv)
380 {
381 	struct emac_rx_desc_buffer *rx_buf;
382 	struct emac_desc_ring *rx_ring;
383 	u32 i;
384 
385 	rx_ring = &priv->rx_ring;
386 
387 	for (i = 0; i < rx_ring->total_cnt; i++) {
388 		rx_buf = &rx_ring->rx_desc_buf[i];
389 
390 		if (!rx_buf->skb)
391 			continue;
392 
393 		dma_unmap_single(&priv->pdev->dev, rx_buf->dma_addr,
394 				 rx_buf->dma_len, DMA_FROM_DEVICE);
395 
396 		dev_kfree_skb(rx_buf->skb);
397 		rx_buf->skb = NULL;
398 	}
399 
400 	rx_ring->tail = 0;
401 	rx_ring->head = 0;
402 }
403 
404 static int emac_alloc_tx_resources(struct emac_priv *priv)
405 {
406 	struct emac_desc_ring *tx_ring = &priv->tx_ring;
407 	struct platform_device *pdev = priv->pdev;
408 
409 	tx_ring->tx_desc_buf = kcalloc(tx_ring->total_cnt,
410 				       sizeof(*tx_ring->tx_desc_buf),
411 				       GFP_KERNEL);
412 
413 	if (!tx_ring->tx_desc_buf)
414 		return -ENOMEM;
415 
416 	tx_ring->total_size = tx_ring->total_cnt * sizeof(struct emac_desc);
417 	tx_ring->total_size = ALIGN(tx_ring->total_size, PAGE_SIZE);
418 
419 	tx_ring->desc_addr = dma_alloc_coherent(&pdev->dev, tx_ring->total_size,
420 						&tx_ring->desc_dma_addr,
421 						GFP_KERNEL);
422 	if (!tx_ring->desc_addr) {
423 		kfree(tx_ring->tx_desc_buf);
424 		return -ENOMEM;
425 	}
426 
427 	tx_ring->head = 0;
428 	tx_ring->tail = 0;
429 
430 	return 0;
431 }
432 
433 static int emac_alloc_rx_resources(struct emac_priv *priv)
434 {
435 	struct emac_desc_ring *rx_ring = &priv->rx_ring;
436 	struct platform_device *pdev = priv->pdev;
437 
438 	rx_ring->rx_desc_buf = kcalloc(rx_ring->total_cnt,
439 				       sizeof(*rx_ring->rx_desc_buf),
440 				       GFP_KERNEL);
441 	if (!rx_ring->rx_desc_buf)
442 		return -ENOMEM;
443 
444 	rx_ring->total_size = rx_ring->total_cnt * sizeof(struct emac_desc);
445 
446 	rx_ring->total_size = ALIGN(rx_ring->total_size, PAGE_SIZE);
447 
448 	rx_ring->desc_addr = dma_alloc_coherent(&pdev->dev, rx_ring->total_size,
449 						&rx_ring->desc_dma_addr,
450 						GFP_KERNEL);
451 	if (!rx_ring->desc_addr) {
452 		kfree(rx_ring->rx_desc_buf);
453 		return -ENOMEM;
454 	}
455 
456 	rx_ring->head = 0;
457 	rx_ring->tail = 0;
458 
459 	return 0;
460 }
461 
462 static void emac_free_tx_resources(struct emac_priv *priv)
463 {
464 	struct emac_desc_ring *tr = &priv->tx_ring;
465 	struct device *dev = &priv->pdev->dev;
466 
467 	emac_clean_tx_desc_ring(priv);
468 
469 	kfree(tr->tx_desc_buf);
470 	tr->tx_desc_buf = NULL;
471 
472 	dma_free_coherent(dev, tr->total_size, tr->desc_addr,
473 			  tr->desc_dma_addr);
474 	tr->desc_addr = NULL;
475 }
476 
477 static void emac_free_rx_resources(struct emac_priv *priv)
478 {
479 	struct emac_desc_ring *rr = &priv->rx_ring;
480 	struct device *dev = &priv->pdev->dev;
481 
482 	emac_clean_rx_desc_ring(priv);
483 
484 	kfree(rr->rx_desc_buf);
485 	rr->rx_desc_buf = NULL;
486 
487 	dma_free_coherent(dev, rr->total_size, rr->desc_addr,
488 			  rr->desc_dma_addr);
489 	rr->desc_addr = NULL;
490 }
491 
492 static int emac_tx_clean_desc(struct emac_priv *priv)
493 {
494 	struct net_device *ndev = priv->ndev;
495 	struct emac_desc_ring *tx_ring;
496 	struct emac_desc *tx_desc;
497 	u32 i;
498 
499 	netif_tx_lock(ndev);
500 
501 	tx_ring = &priv->tx_ring;
502 
503 	i = tx_ring->tail;
504 
505 	while (i != tx_ring->head) {
506 		tx_desc = &((struct emac_desc *)tx_ring->desc_addr)[i];
507 
508 		/* Stop checking if desc still own by DMA */
509 		if (READ_ONCE(tx_desc->desc0) & TX_DESC_0_OWN)
510 			break;
511 
512 		emac_free_tx_buf(priv, i);
513 		memset(tx_desc, 0, sizeof(struct emac_desc));
514 
515 		if (++i == tx_ring->total_cnt)
516 			i = 0;
517 	}
518 
519 	tx_ring->tail = i;
520 
521 	if (unlikely(netif_queue_stopped(ndev) &&
522 		     emac_tx_avail(priv) > tx_ring->total_cnt / 4))
523 		netif_wake_queue(ndev);
524 
525 	netif_tx_unlock(ndev);
526 
527 	return 0;
528 }
529 
530 static bool emac_rx_frame_good(struct emac_priv *priv, struct emac_desc *desc)
531 {
532 	const char *msg;
533 	u32 len;
534 
535 	len = FIELD_GET(RX_DESC_0_FRAME_PACKET_LENGTH_MASK, desc->desc0);
536 
537 	if (WARN_ON_ONCE(!(desc->desc0 & RX_DESC_0_LAST_DESCRIPTOR)))
538 		msg = "Not last descriptor"; /* This would be a bug */
539 	else if (desc->desc0 & RX_DESC_0_FRAME_RUNT)
540 		msg = "Runt frame";
541 	else if (desc->desc0 & RX_DESC_0_FRAME_CRC_ERR)
542 		msg = "Frame CRC error";
543 	else if (desc->desc0 & RX_DESC_0_FRAME_MAX_LEN_ERR)
544 		msg = "Frame exceeds max length";
545 	else if (desc->desc0 & RX_DESC_0_FRAME_JABBER_ERR)
546 		msg = "Frame jabber error";
547 	else if (desc->desc0 & RX_DESC_0_FRAME_LENGTH_ERR)
548 		msg = "Frame length error";
549 	else if (len <= ETH_FCS_LEN || len > priv->dma_buf_sz)
550 		msg = "Frame length unacceptable";
551 	else
552 		return true; /* All good */
553 
554 	dev_dbg_ratelimited(&priv->ndev->dev, "RX error: %s", msg);
555 
556 	return false;
557 }
558 
559 static void emac_alloc_rx_desc_buffers(struct emac_priv *priv)
560 {
561 	struct emac_desc_ring *rx_ring = &priv->rx_ring;
562 	struct emac_desc rx_desc, *rx_desc_addr;
563 	struct net_device *ndev = priv->ndev;
564 	struct emac_rx_desc_buffer *rx_buf;
565 	struct sk_buff *skb;
566 	u32 i;
567 
568 	i = rx_ring->head;
569 	rx_buf = &rx_ring->rx_desc_buf[i];
570 
571 	while (!rx_buf->skb) {
572 		skb = netdev_alloc_skb_ip_align(ndev, priv->dma_buf_sz);
573 		if (!skb)
574 			break;
575 
576 		skb->dev = ndev;
577 
578 		rx_buf->skb = skb;
579 		rx_buf->dma_len = priv->dma_buf_sz;
580 		rx_buf->dma_addr = dma_map_single(&priv->pdev->dev, skb->data,
581 						  priv->dma_buf_sz,
582 						  DMA_FROM_DEVICE);
583 		if (dma_mapping_error(&priv->pdev->dev, rx_buf->dma_addr)) {
584 			dev_err_ratelimited(&ndev->dev, "Mapping skb failed\n");
585 			goto err_free_skb;
586 		}
587 
588 		rx_desc_addr = &((struct emac_desc *)rx_ring->desc_addr)[i];
589 
590 		memset(&rx_desc, 0, sizeof(rx_desc));
591 
592 		rx_desc.buffer_addr_1 = rx_buf->dma_addr;
593 		rx_desc.desc1 = FIELD_PREP(RX_DESC_1_BUFFER_SIZE_1_MASK,
594 					   rx_buf->dma_len);
595 
596 		if (++i == rx_ring->total_cnt) {
597 			rx_desc.desc1 |= RX_DESC_1_END_RING;
598 			i = 0;
599 		}
600 
601 		*rx_desc_addr = rx_desc;
602 		dma_wmb();
603 		WRITE_ONCE(rx_desc_addr->desc0, rx_desc.desc0 | RX_DESC_0_OWN);
604 
605 		rx_buf = &rx_ring->rx_desc_buf[i];
606 	}
607 
608 	rx_ring->head = i;
609 	return;
610 
611 err_free_skb:
612 	dev_kfree_skb_any(skb);
613 	rx_buf->skb = NULL;
614 }
615 
616 /* Returns number of packets received */
617 static int emac_rx_clean_desc(struct emac_priv *priv, int budget)
618 {
619 	struct net_device *ndev = priv->ndev;
620 	struct emac_rx_desc_buffer *rx_buf;
621 	struct emac_desc_ring *rx_ring;
622 	struct sk_buff *skb = NULL;
623 	struct emac_desc *rx_desc;
624 	u32 got = 0, skb_len, i;
625 
626 	rx_ring = &priv->rx_ring;
627 
628 	i = rx_ring->tail;
629 
630 	while (budget--) {
631 		rx_desc = &((struct emac_desc *)rx_ring->desc_addr)[i];
632 
633 		/* Stop checking if rx_desc still owned by DMA */
634 		if (READ_ONCE(rx_desc->desc0) & RX_DESC_0_OWN)
635 			break;
636 
637 		dma_rmb();
638 
639 		rx_buf = &rx_ring->rx_desc_buf[i];
640 
641 		if (!rx_buf->skb)
642 			break;
643 
644 		got++;
645 
646 		dma_unmap_single(&priv->pdev->dev, rx_buf->dma_addr,
647 				 rx_buf->dma_len, DMA_FROM_DEVICE);
648 
649 		if (likely(emac_rx_frame_good(priv, rx_desc))) {
650 			skb = rx_buf->skb;
651 
652 			skb_len = FIELD_GET(RX_DESC_0_FRAME_PACKET_LENGTH_MASK,
653 					    rx_desc->desc0);
654 			skb_len -= ETH_FCS_LEN;
655 
656 			skb_put(skb, skb_len);
657 			skb->dev = ndev;
658 			ndev->hard_header_len = ETH_HLEN;
659 
660 			skb->protocol = eth_type_trans(skb, ndev);
661 
662 			skb->ip_summed = CHECKSUM_NONE;
663 
664 			napi_gro_receive(&priv->napi, skb);
665 
666 			memset(rx_desc, 0, sizeof(struct emac_desc));
667 			rx_buf->skb = NULL;
668 		} else {
669 			dev_kfree_skb_irq(rx_buf->skb);
670 			rx_buf->skb = NULL;
671 		}
672 
673 		if (++i == rx_ring->total_cnt)
674 			i = 0;
675 	}
676 
677 	rx_ring->tail = i;
678 
679 	emac_alloc_rx_desc_buffers(priv);
680 
681 	return got;
682 }
683 
684 static int emac_rx_poll(struct napi_struct *napi, int budget)
685 {
686 	struct emac_priv *priv = container_of(napi, struct emac_priv, napi);
687 	int work_done;
688 
689 	emac_tx_clean_desc(priv);
690 
691 	work_done = emac_rx_clean_desc(priv, budget);
692 	if (work_done < budget && napi_complete_done(napi, work_done))
693 		emac_enable_interrupt(priv);
694 
695 	return work_done;
696 }
697 
698 /*
699  * For convenience, skb->data is fragment 0, frags[0] is fragment 1, etc.
700  *
701  * Each descriptor can hold up to two fragments, called buffer 1 and 2. For each
702  * fragment f, if f % 2 == 0, it uses buffer 1, otherwise it uses buffer 2.
703  */
704 
705 static int emac_tx_map_frag(struct device *dev, struct emac_desc *tx_desc,
706 			    struct emac_tx_desc_buffer *tx_buf,
707 			    struct sk_buff *skb, u32 frag_idx)
708 {
709 	bool map_as_page, buf_idx;
710 	const skb_frag_t *frag;
711 	phys_addr_t addr;
712 	u32 len;
713 	int ret;
714 
715 	buf_idx = frag_idx % 2;
716 
717 	if (frag_idx == 0) {
718 		/* Non-fragmented part */
719 		len = skb_headlen(skb);
720 		addr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
721 		map_as_page = false;
722 	} else {
723 		/* Fragment */
724 		frag = &skb_shinfo(skb)->frags[frag_idx - 1];
725 		len = skb_frag_size(frag);
726 		addr = skb_frag_dma_map(dev, frag, 0, len, DMA_TO_DEVICE);
727 		map_as_page = true;
728 	}
729 
730 	ret = dma_mapping_error(dev, addr);
731 	if (ret)
732 		return ret;
733 
734 	tx_buf->buf[buf_idx].dma_addr = addr;
735 	tx_buf->buf[buf_idx].dma_len = len;
736 	tx_buf->buf[buf_idx].map_as_page = map_as_page;
737 
738 	if (buf_idx == 0) {
739 		tx_desc->buffer_addr_1 = addr;
740 		tx_desc->desc1 |= FIELD_PREP(TX_DESC_1_BUFFER_SIZE_1_MASK, len);
741 	} else {
742 		tx_desc->buffer_addr_2 = addr;
743 		tx_desc->desc1 |= FIELD_PREP(TX_DESC_1_BUFFER_SIZE_2_MASK, len);
744 	}
745 
746 	return 0;
747 }
748 
749 static void emac_tx_mem_map(struct emac_priv *priv, struct sk_buff *skb)
750 {
751 	struct emac_desc_ring *tx_ring = &priv->tx_ring;
752 	struct emac_desc tx_desc, *tx_desc_addr;
753 	struct device *dev = &priv->pdev->dev;
754 	struct emac_tx_desc_buffer *tx_buf;
755 	u32 head, old_head, frag_num, f;
756 	bool buf_idx;
757 
758 	frag_num = skb_shinfo(skb)->nr_frags;
759 	head = tx_ring->head;
760 	old_head = head;
761 
762 	for (f = 0; f < frag_num + 1; f++) {
763 		buf_idx = f % 2;
764 
765 		/*
766 		 * If using buffer 1, initialize a new desc. Otherwise, use
767 		 * buffer 2 of previous fragment's desc.
768 		 */
769 		if (!buf_idx) {
770 			tx_buf = &tx_ring->tx_desc_buf[head];
771 			tx_desc_addr =
772 				&((struct emac_desc *)tx_ring->desc_addr)[head];
773 			memset(&tx_desc, 0, sizeof(tx_desc));
774 
775 			/*
776 			 * Give ownership for all but first desc initially. For
777 			 * first desc, give at the end so DMA cannot start
778 			 * reading uninitialized descs.
779 			 */
780 			if (head != old_head)
781 				tx_desc.desc0 |= TX_DESC_0_OWN;
782 
783 			if (++head == tx_ring->total_cnt) {
784 				/* Just used last desc in ring */
785 				tx_desc.desc1 |= TX_DESC_1_END_RING;
786 				head = 0;
787 			}
788 		}
789 
790 		if (emac_tx_map_frag(dev, &tx_desc, tx_buf, skb, f)) {
791 			dev_err_ratelimited(&priv->ndev->dev,
792 					    "Map TX frag %d failed\n", f);
793 			goto err_free_skb;
794 		}
795 
796 		if (f == 0)
797 			tx_desc.desc1 |= TX_DESC_1_FIRST_SEGMENT;
798 
799 		if (f == frag_num) {
800 			tx_desc.desc1 |= TX_DESC_1_LAST_SEGMENT;
801 			tx_buf->skb = skb;
802 			if (emac_tx_should_interrupt(priv, frag_num + 1))
803 				tx_desc.desc1 |=
804 					TX_DESC_1_INTERRUPT_ON_COMPLETION;
805 		}
806 
807 		*tx_desc_addr = tx_desc;
808 	}
809 
810 	/* All descriptors are ready, give ownership for first desc */
811 	tx_desc_addr = &((struct emac_desc *)tx_ring->desc_addr)[old_head];
812 	dma_wmb();
813 	WRITE_ONCE(tx_desc_addr->desc0, tx_desc_addr->desc0 | TX_DESC_0_OWN);
814 
815 	emac_dma_start_transmit(priv);
816 
817 	tx_ring->head = head;
818 
819 	return;
820 
821 err_free_skb:
822 	dev_dstats_tx_dropped(priv->ndev);
823 	dev_kfree_skb_any(skb);
824 }
825 
826 static netdev_tx_t emac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
827 {
828 	struct emac_priv *priv = netdev_priv(ndev);
829 	int nfrags = skb_shinfo(skb)->nr_frags;
830 	struct device *dev = &priv->pdev->dev;
831 
832 	if (unlikely(emac_tx_avail(priv) < nfrags + 1)) {
833 		if (!netif_queue_stopped(ndev)) {
834 			netif_stop_queue(ndev);
835 			dev_err_ratelimited(dev, "TX ring full, stop TX queue\n");
836 		}
837 		return NETDEV_TX_BUSY;
838 	}
839 
840 	emac_tx_mem_map(priv, skb);
841 
842 	/* Make sure there is space in the ring for the next TX. */
843 	if (unlikely(emac_tx_avail(priv) <= MAX_SKB_FRAGS + 2))
844 		netif_stop_queue(ndev);
845 
846 	return NETDEV_TX_OK;
847 }
848 
849 static int emac_set_mac_address(struct net_device *ndev, void *addr)
850 {
851 	struct emac_priv *priv = netdev_priv(ndev);
852 	int ret = eth_mac_addr(ndev, addr);
853 
854 	if (ret)
855 		return ret;
856 
857 	/* If running, set now; if not running it will be set in emac_up. */
858 	if (netif_running(ndev))
859 		emac_set_mac_addr(priv, ndev->dev_addr);
860 
861 	return 0;
862 }
863 
864 static void emac_mac_multicast_filter_clear(struct emac_priv *priv)
865 {
866 	emac_wr(priv, MAC_MULTICAST_HASH_TABLE1, 0x0);
867 	emac_wr(priv, MAC_MULTICAST_HASH_TABLE2, 0x0);
868 	emac_wr(priv, MAC_MULTICAST_HASH_TABLE3, 0x0);
869 	emac_wr(priv, MAC_MULTICAST_HASH_TABLE4, 0x0);
870 }
871 
872 /*
873  * The upper 6 bits of the Ethernet CRC of the MAC address is used as the hash
874  * when matching multicast addresses.
875  */
876 static u32 emac_ether_addr_hash(u8 addr[ETH_ALEN])
877 {
878 	u32 crc32 = ether_crc(ETH_ALEN, addr);
879 
880 	return crc32 >> 26;
881 }
882 
883 /* Configure Multicast and Promiscuous modes */
884 static void emac_set_rx_mode(struct net_device *ndev)
885 {
886 	struct emac_priv *priv = netdev_priv(ndev);
887 	struct netdev_hw_addr *ha;
888 	u32 mc_filter[4] = { 0 };
889 	u32 hash, reg, bit, val;
890 
891 	val = emac_rd(priv, MAC_ADDRESS_CONTROL);
892 
893 	val &= ~MREGBIT_PROMISCUOUS_MODE;
894 
895 	if (ndev->flags & IFF_PROMISC) {
896 		/* Enable promisc mode */
897 		val |= MREGBIT_PROMISCUOUS_MODE;
898 	} else if ((ndev->flags & IFF_ALLMULTI) ||
899 		   (netdev_mc_count(ndev) > HASH_TABLE_SIZE)) {
900 		/* Accept all multicast frames by setting every bit */
901 		emac_wr(priv, MAC_MULTICAST_HASH_TABLE1, 0xffff);
902 		emac_wr(priv, MAC_MULTICAST_HASH_TABLE2, 0xffff);
903 		emac_wr(priv, MAC_MULTICAST_HASH_TABLE3, 0xffff);
904 		emac_wr(priv, MAC_MULTICAST_HASH_TABLE4, 0xffff);
905 	} else if (!netdev_mc_empty(ndev)) {
906 		emac_mac_multicast_filter_clear(priv);
907 		netdev_for_each_mc_addr(ha, ndev) {
908 			/*
909 			 * The hash table is an array of 4 16-bit registers. It
910 			 * is treated like an array of 64 bits (bits[hash]).
911 			 */
912 			hash = emac_ether_addr_hash(ha->addr);
913 			reg = hash / 16;
914 			bit = hash % 16;
915 			mc_filter[reg] |= BIT(bit);
916 		}
917 		emac_wr(priv, MAC_MULTICAST_HASH_TABLE1, mc_filter[0]);
918 		emac_wr(priv, MAC_MULTICAST_HASH_TABLE2, mc_filter[1]);
919 		emac_wr(priv, MAC_MULTICAST_HASH_TABLE3, mc_filter[2]);
920 		emac_wr(priv, MAC_MULTICAST_HASH_TABLE4, mc_filter[3]);
921 	}
922 
923 	emac_wr(priv, MAC_ADDRESS_CONTROL, val);
924 }
925 
926 static int emac_change_mtu(struct net_device *ndev, int mtu)
927 {
928 	struct emac_priv *priv = netdev_priv(ndev);
929 	u32 frame_len;
930 
931 	if (netif_running(ndev)) {
932 		netdev_err(ndev, "must be stopped to change MTU\n");
933 		return -EBUSY;
934 	}
935 
936 	frame_len = mtu + ETH_HLEN + 2 * VLAN_HLEN + ETH_FCS_LEN;
937 
938 	if (frame_len <= EMAC_DEFAULT_BUFSIZE)
939 		priv->dma_buf_sz = EMAC_DEFAULT_BUFSIZE;
940 	else if (frame_len <= EMAC_RX_BUF_2K)
941 		priv->dma_buf_sz = EMAC_RX_BUF_2K;
942 	else
943 		priv->dma_buf_sz = EMAC_RX_BUF_MAX;
944 
945 	ndev->mtu = mtu;
946 
947 	return 0;
948 }
949 
950 static void emac_tx_timeout(struct net_device *ndev, unsigned int txqueue)
951 {
952 	struct emac_priv *priv = netdev_priv(ndev);
953 
954 	schedule_work(&priv->tx_timeout_task);
955 }
956 
957 static int emac_mii_read(struct mii_bus *bus, int phy_addr, int regnum)
958 {
959 	struct emac_priv *priv = bus->priv;
960 	u32 cmd = 0, val;
961 	int ret;
962 
963 	cmd |= FIELD_PREP(MREGBIT_PHY_ADDRESS, phy_addr);
964 	cmd |= FIELD_PREP(MREGBIT_REGISTER_ADDRESS, regnum);
965 	cmd |= MREGBIT_START_MDIO_TRANS | MREGBIT_MDIO_READ_WRITE;
966 
967 	emac_wr(priv, MAC_MDIO_DATA, 0x0);
968 	emac_wr(priv, MAC_MDIO_CONTROL, cmd);
969 
970 	ret = readl_poll_timeout(priv->iobase + MAC_MDIO_CONTROL, val,
971 				 !(val & MREGBIT_START_MDIO_TRANS), 100, 10000);
972 
973 	if (ret)
974 		return ret;
975 
976 	val = emac_rd(priv, MAC_MDIO_DATA);
977 	return FIELD_GET(MREGBIT_MDIO_DATA, val);
978 }
979 
980 static int emac_mii_write(struct mii_bus *bus, int phy_addr, int regnum,
981 			  u16 value)
982 {
983 	struct emac_priv *priv = bus->priv;
984 	u32 cmd = 0, val;
985 	int ret;
986 
987 	emac_wr(priv, MAC_MDIO_DATA, value);
988 
989 	cmd |= FIELD_PREP(MREGBIT_PHY_ADDRESS, phy_addr);
990 	cmd |= FIELD_PREP(MREGBIT_REGISTER_ADDRESS, regnum);
991 	cmd |= MREGBIT_START_MDIO_TRANS;
992 
993 	emac_wr(priv, MAC_MDIO_CONTROL, cmd);
994 
995 	ret = readl_poll_timeout(priv->iobase + MAC_MDIO_CONTROL, val,
996 				 !(val & MREGBIT_START_MDIO_TRANS), 100, 10000);
997 
998 	return ret;
999 }
1000 
1001 static int emac_mdio_init(struct emac_priv *priv)
1002 {
1003 	struct device *dev = &priv->pdev->dev;
1004 	struct device_node *mii_np;
1005 	struct mii_bus *mii;
1006 	int ret;
1007 
1008 	mii = devm_mdiobus_alloc(dev);
1009 	if (!mii)
1010 		return -ENOMEM;
1011 
1012 	mii->priv = priv;
1013 	mii->name = "k1_emac_mii";
1014 	mii->read = emac_mii_read;
1015 	mii->write = emac_mii_write;
1016 	mii->parent = dev;
1017 	mii->phy_mask = ~0;
1018 	snprintf(mii->id, MII_BUS_ID_SIZE, "%s", priv->pdev->name);
1019 
1020 	mii_np = of_get_available_child_by_name(dev->of_node, "mdio-bus");
1021 
1022 	ret = devm_of_mdiobus_register(dev, mii, mii_np);
1023 	if (ret)
1024 		dev_err_probe(dev, ret, "Failed to register mdio bus\n");
1025 
1026 	of_node_put(mii_np);
1027 	return ret;
1028 }
1029 
1030 static void emac_set_tx_fc(struct emac_priv *priv, bool enable)
1031 {
1032 	u32 val;
1033 
1034 	val = emac_rd(priv, MAC_FC_CONTROL);
1035 
1036 	FIELD_MODIFY(MREGBIT_FC_GENERATION_ENABLE, &val, enable);
1037 	FIELD_MODIFY(MREGBIT_AUTO_FC_GENERATION_ENABLE, &val, enable);
1038 
1039 	emac_wr(priv, MAC_FC_CONTROL, val);
1040 }
1041 
1042 static void emac_set_rx_fc(struct emac_priv *priv, bool enable)
1043 {
1044 	u32 val = emac_rd(priv, MAC_FC_CONTROL);
1045 
1046 	FIELD_MODIFY(MREGBIT_FC_DECODE_ENABLE, &val, enable);
1047 
1048 	emac_wr(priv, MAC_FC_CONTROL, val);
1049 }
1050 
1051 static void emac_set_fc(struct emac_priv *priv, u8 fc)
1052 {
1053 	emac_set_tx_fc(priv, fc & FLOW_CTRL_TX);
1054 	emac_set_rx_fc(priv, fc & FLOW_CTRL_RX);
1055 	priv->flow_control = fc;
1056 }
1057 
1058 static void emac_set_fc_autoneg(struct emac_priv *priv)
1059 {
1060 	struct phy_device *phydev = priv->ndev->phydev;
1061 	u32 local_adv, remote_adv;
1062 	u8 fc;
1063 
1064 	local_adv = linkmode_adv_to_lcl_adv_t(phydev->advertising);
1065 
1066 	remote_adv = 0;
1067 
1068 	if (phydev->pause)
1069 		remote_adv |= LPA_PAUSE_CAP;
1070 
1071 	if (phydev->asym_pause)
1072 		remote_adv |= LPA_PAUSE_ASYM;
1073 
1074 	fc = mii_resolve_flowctrl_fdx(local_adv, remote_adv);
1075 
1076 	priv->flow_control_autoneg = true;
1077 
1078 	emac_set_fc(priv, fc);
1079 }
1080 
1081 /*
1082  * Even though this MAC supports gigabit operation, it only provides 32-bit
1083  * statistics counters. The most overflow-prone counters are the "bytes" ones,
1084  * which at gigabit overflow about twice a minute.
1085  *
1086  * Therefore, we maintain the high 32 bits of counters ourselves, incrementing
1087  * every time statistics seem to go backwards. Also, update periodically to
1088  * catch overflows when we are not otherwise checking the statistics often
1089  * enough.
1090  */
1091 
1092 #define EMAC_STATS_TIMER_PERIOD		20
1093 
1094 static int emac_read_stat_cnt(struct emac_priv *priv, u8 cnt, u32 *res,
1095 			      u32 control_reg, u32 high_reg, u32 low_reg)
1096 {
1097 	u32 val, high, low;
1098 	int ret;
1099 
1100 	/* The "read" bit is the same for TX and RX */
1101 
1102 	val = MREGBIT_START_TX_COUNTER_READ | cnt;
1103 	emac_wr(priv, control_reg, val);
1104 	val = emac_rd(priv, control_reg);
1105 
1106 	ret = readl_poll_timeout_atomic(priv->iobase + control_reg, val,
1107 					!(val & MREGBIT_START_TX_COUNTER_READ),
1108 					100, 10000);
1109 
1110 	if (ret) {
1111 		/*
1112 		 * This could be caused by the PHY stopping its refclk even when
1113 		 * the link is up, for power saving. See also comments in
1114 		 * emac_stats_update().
1115 		 */
1116 		dev_err_ratelimited(&priv->ndev->dev,
1117 				    "Read stat timeout. PHY clock stopped?\n");
1118 		return ret;
1119 	}
1120 
1121 	high = emac_rd(priv, high_reg);
1122 	low = emac_rd(priv, low_reg);
1123 	*res = high << 16 | lower_16_bits(low);
1124 
1125 	return 0;
1126 }
1127 
1128 static int emac_tx_read_stat_cnt(struct emac_priv *priv, u8 cnt, u32 *res)
1129 {
1130 	return emac_read_stat_cnt(priv, cnt, res, MAC_TX_STATCTR_CONTROL,
1131 				  MAC_TX_STATCTR_DATA_HIGH,
1132 				  MAC_TX_STATCTR_DATA_LOW);
1133 }
1134 
1135 static int emac_rx_read_stat_cnt(struct emac_priv *priv, u8 cnt, u32 *res)
1136 {
1137 	return emac_read_stat_cnt(priv, cnt, res, MAC_RX_STATCTR_CONTROL,
1138 				  MAC_RX_STATCTR_DATA_HIGH,
1139 				  MAC_RX_STATCTR_DATA_LOW);
1140 }
1141 
1142 static void emac_update_counter(u64 *counter, u32 new_low)
1143 {
1144 	u32 old_low = lower_32_bits(*counter);
1145 	u64 high = upper_32_bits(*counter);
1146 
1147 	if (old_low > new_low) {
1148 		/* Overflowed, increment high 32 bits */
1149 		high++;
1150 	}
1151 
1152 	*counter = (high << 32) | new_low;
1153 }
1154 
1155 static void emac_stats_update(struct emac_priv *priv)
1156 {
1157 	u64 *tx_stats_off = priv->tx_stats_off.array;
1158 	u64 *rx_stats_off = priv->rx_stats_off.array;
1159 	u64 *tx_stats = priv->tx_stats.array;
1160 	u64 *rx_stats = priv->rx_stats.array;
1161 	u32 i, res, offset;
1162 
1163 	assert_spin_locked(&priv->stats_lock);
1164 
1165 	/*
1166 	 * We can't read statistics if the interface is not up. Also, some PHYs
1167 	 * stop their reference clocks for link down power saving, which also
1168 	 * causes reading statistics to time out. Don't update and don't
1169 	 * reschedule in these cases.
1170 	 */
1171 	if (!netif_running(priv->ndev) ||
1172 	    !netif_carrier_ok(priv->ndev) ||
1173 	    !netif_device_present(priv->ndev)) {
1174 		return;
1175 	}
1176 
1177 	for (i = 0; i < sizeof(priv->tx_stats) / sizeof(*tx_stats); i++) {
1178 		/*
1179 		 * If reading stats times out anyway, the stat registers will be
1180 		 * stuck, and we can't really recover from that.
1181 		 *
1182 		 * Reading statistics also can't return an error, so just return
1183 		 * without updating and without rescheduling.
1184 		 */
1185 		if (emac_tx_read_stat_cnt(priv, i, &res))
1186 			return;
1187 
1188 		/*
1189 		 * Re-initializing while bringing interface up resets counters
1190 		 * to zero, so to provide continuity, we add the values saved
1191 		 * last time we did emac_down() to the new hardware-provided
1192 		 * value.
1193 		 */
1194 		offset = lower_32_bits(tx_stats_off[i]);
1195 		emac_update_counter(&tx_stats[i], res + offset);
1196 	}
1197 
1198 	/* Similar remarks as TX stats */
1199 	for (i = 0; i < sizeof(priv->rx_stats) / sizeof(*rx_stats); i++) {
1200 		if (emac_rx_read_stat_cnt(priv, i, &res))
1201 			return;
1202 		offset = lower_32_bits(rx_stats_off[i]);
1203 		emac_update_counter(&rx_stats[i], res + offset);
1204 	}
1205 
1206 	mod_timer(&priv->stats_timer, jiffies + EMAC_STATS_TIMER_PERIOD * HZ);
1207 }
1208 
1209 static void emac_stats_timer(struct timer_list *t)
1210 {
1211 	struct emac_priv *priv = timer_container_of(priv, t, stats_timer);
1212 
1213 	spin_lock(&priv->stats_lock);
1214 
1215 	emac_stats_update(priv);
1216 
1217 	spin_unlock(&priv->stats_lock);
1218 }
1219 
1220 static const struct ethtool_rmon_hist_range emac_rmon_hist_ranges[] = {
1221 	{   64,   64 },
1222 	{   65,  127 },
1223 	{  128,  255 },
1224 	{  256,  511 },
1225 	{  512, 1023 },
1226 	{ 1024, 1518 },
1227 	{ 1519, 4096 },
1228 	{ /* sentinel */ },
1229 };
1230 
1231 /* Like dev_fetch_dstats(), but we only use tx_drops */
1232 static u64 emac_get_stat_tx_drops(struct emac_priv *priv)
1233 {
1234 	const struct pcpu_dstats *stats;
1235 	u64 tx_drops, total = 0;
1236 	unsigned int start;
1237 	int cpu;
1238 
1239 	for_each_possible_cpu(cpu) {
1240 		stats = per_cpu_ptr(priv->ndev->dstats, cpu);
1241 		do {
1242 			start = u64_stats_fetch_begin(&stats->syncp);
1243 			tx_drops = u64_stats_read(&stats->tx_drops);
1244 		} while (u64_stats_fetch_retry(&stats->syncp, start));
1245 
1246 		total += tx_drops;
1247 	}
1248 
1249 	return total;
1250 }
1251 
1252 static void emac_get_stats64(struct net_device *dev,
1253 			     struct rtnl_link_stats64 *storage)
1254 {
1255 	struct emac_priv *priv = netdev_priv(dev);
1256 	union emac_hw_tx_stats *tx_stats;
1257 	union emac_hw_rx_stats *rx_stats;
1258 
1259 	tx_stats = &priv->tx_stats;
1260 	rx_stats = &priv->rx_stats;
1261 
1262 	/* This is the only software counter */
1263 	storage->tx_dropped = emac_get_stat_tx_drops(priv);
1264 
1265 	spin_lock_bh(&priv->stats_lock);
1266 
1267 	emac_stats_update(priv);
1268 
1269 	storage->tx_packets = tx_stats->stats.tx_ok_pkts;
1270 	storage->tx_bytes = tx_stats->stats.tx_ok_bytes;
1271 	storage->tx_errors = tx_stats->stats.tx_err_pkts;
1272 
1273 	storage->rx_packets = rx_stats->stats.rx_ok_pkts;
1274 	storage->rx_bytes = rx_stats->stats.rx_ok_bytes;
1275 	storage->rx_errors = rx_stats->stats.rx_err_total_pkts;
1276 	storage->rx_crc_errors = rx_stats->stats.rx_crc_err_pkts;
1277 	storage->rx_frame_errors = rx_stats->stats.rx_align_err_pkts;
1278 	storage->rx_length_errors = rx_stats->stats.rx_len_err_pkts;
1279 
1280 	storage->collisions = tx_stats->stats.tx_singleclsn_pkts;
1281 	storage->collisions += tx_stats->stats.tx_multiclsn_pkts;
1282 	storage->collisions += tx_stats->stats.tx_excessclsn_pkts;
1283 
1284 	storage->rx_missed_errors = rx_stats->stats.rx_drp_fifo_full_pkts;
1285 	storage->rx_missed_errors += rx_stats->stats.rx_truncate_fifo_full_pkts;
1286 
1287 	spin_unlock_bh(&priv->stats_lock);
1288 }
1289 
1290 static void emac_get_rmon_stats(struct net_device *dev,
1291 				struct ethtool_rmon_stats *rmon_stats,
1292 				const struct ethtool_rmon_hist_range **ranges)
1293 {
1294 	struct emac_priv *priv = netdev_priv(dev);
1295 	union emac_hw_rx_stats *rx_stats;
1296 
1297 	rx_stats = &priv->rx_stats;
1298 
1299 	*ranges = emac_rmon_hist_ranges;
1300 
1301 	spin_lock_bh(&priv->stats_lock);
1302 
1303 	emac_stats_update(priv);
1304 
1305 	rmon_stats->undersize_pkts = rx_stats->stats.rx_len_undersize_pkts;
1306 	rmon_stats->oversize_pkts = rx_stats->stats.rx_len_oversize_pkts;
1307 	rmon_stats->fragments = rx_stats->stats.rx_len_fragment_pkts;
1308 	rmon_stats->jabbers = rx_stats->stats.rx_len_jabber_pkts;
1309 
1310 	/* Only RX has histogram stats */
1311 
1312 	rmon_stats->hist[0] = rx_stats->stats.rx_64_pkts;
1313 	rmon_stats->hist[1] = rx_stats->stats.rx_65_127_pkts;
1314 	rmon_stats->hist[2] = rx_stats->stats.rx_128_255_pkts;
1315 	rmon_stats->hist[3] = rx_stats->stats.rx_256_511_pkts;
1316 	rmon_stats->hist[4] = rx_stats->stats.rx_512_1023_pkts;
1317 	rmon_stats->hist[5] = rx_stats->stats.rx_1024_1518_pkts;
1318 	rmon_stats->hist[6] = rx_stats->stats.rx_1519_plus_pkts;
1319 
1320 	spin_unlock_bh(&priv->stats_lock);
1321 }
1322 
1323 static void emac_get_eth_mac_stats(struct net_device *dev,
1324 				   struct ethtool_eth_mac_stats *mac_stats)
1325 {
1326 	struct emac_priv *priv = netdev_priv(dev);
1327 	union emac_hw_tx_stats *tx_stats;
1328 	union emac_hw_rx_stats *rx_stats;
1329 
1330 	tx_stats = &priv->tx_stats;
1331 	rx_stats = &priv->rx_stats;
1332 
1333 	spin_lock_bh(&priv->stats_lock);
1334 
1335 	emac_stats_update(priv);
1336 
1337 	mac_stats->MulticastFramesXmittedOK = tx_stats->stats.tx_multicast_pkts;
1338 	mac_stats->BroadcastFramesXmittedOK = tx_stats->stats.tx_broadcast_pkts;
1339 
1340 	mac_stats->MulticastFramesReceivedOK =
1341 		rx_stats->stats.rx_multicast_pkts;
1342 	mac_stats->BroadcastFramesReceivedOK =
1343 		rx_stats->stats.rx_broadcast_pkts;
1344 
1345 	mac_stats->SingleCollisionFrames = tx_stats->stats.tx_singleclsn_pkts;
1346 	mac_stats->MultipleCollisionFrames = tx_stats->stats.tx_multiclsn_pkts;
1347 	mac_stats->LateCollisions = tx_stats->stats.tx_lateclsn_pkts;
1348 	mac_stats->FramesAbortedDueToXSColls =
1349 		tx_stats->stats.tx_excessclsn_pkts;
1350 
1351 	spin_unlock_bh(&priv->stats_lock);
1352 }
1353 
1354 static void emac_get_pause_stats(struct net_device *dev,
1355 				 struct ethtool_pause_stats *pause_stats)
1356 {
1357 	struct emac_priv *priv = netdev_priv(dev);
1358 	union emac_hw_tx_stats *tx_stats;
1359 	union emac_hw_rx_stats *rx_stats;
1360 
1361 	tx_stats = &priv->tx_stats;
1362 	rx_stats = &priv->rx_stats;
1363 
1364 	spin_lock_bh(&priv->stats_lock);
1365 
1366 	emac_stats_update(priv);
1367 
1368 	pause_stats->tx_pause_frames = tx_stats->stats.tx_pause_pkts;
1369 	pause_stats->rx_pause_frames = rx_stats->stats.rx_pause_pkts;
1370 
1371 	spin_unlock_bh(&priv->stats_lock);
1372 }
1373 
1374 /* Other statistics that are not derivable from standard statistics */
1375 
1376 #define EMAC_ETHTOOL_STAT(type, name) \
1377 	{ offsetof(type, stats.name) / sizeof(u64), #name }
1378 
1379 static const struct emac_ethtool_stats {
1380 	size_t offset;
1381 	char str[ETH_GSTRING_LEN];
1382 } emac_ethtool_rx_stats[] = {
1383 	EMAC_ETHTOOL_STAT(union emac_hw_rx_stats, rx_drp_fifo_full_pkts),
1384 	EMAC_ETHTOOL_STAT(union emac_hw_rx_stats, rx_truncate_fifo_full_pkts),
1385 };
1386 
1387 static int emac_get_sset_count(struct net_device *dev, int sset)
1388 {
1389 	switch (sset) {
1390 	case ETH_SS_STATS:
1391 		return ARRAY_SIZE(emac_ethtool_rx_stats);
1392 	default:
1393 		return -EOPNOTSUPP;
1394 	}
1395 }
1396 
1397 static void emac_get_strings(struct net_device *dev, u32 stringset, u8 *data)
1398 {
1399 	int i;
1400 
1401 	switch (stringset) {
1402 	case ETH_SS_STATS:
1403 		for (i = 0; i < ARRAY_SIZE(emac_ethtool_rx_stats); i++) {
1404 			memcpy(data, emac_ethtool_rx_stats[i].str,
1405 			       ETH_GSTRING_LEN);
1406 			data += ETH_GSTRING_LEN;
1407 		}
1408 		break;
1409 	}
1410 }
1411 
1412 static void emac_get_ethtool_stats(struct net_device *dev,
1413 				   struct ethtool_stats *stats, u64 *data)
1414 {
1415 	struct emac_priv *priv = netdev_priv(dev);
1416 	u64 *rx_stats = (u64 *)&priv->rx_stats;
1417 	int i;
1418 
1419 	spin_lock_bh(&priv->stats_lock);
1420 
1421 	emac_stats_update(priv);
1422 
1423 	for (i = 0; i < ARRAY_SIZE(emac_ethtool_rx_stats); i++)
1424 		data[i] = rx_stats[emac_ethtool_rx_stats[i].offset];
1425 
1426 	spin_unlock_bh(&priv->stats_lock);
1427 }
1428 
1429 static int emac_ethtool_get_regs_len(struct net_device *dev)
1430 {
1431 	return (EMAC_DMA_REG_CNT + EMAC_MAC_REG_CNT) * sizeof(u32);
1432 }
1433 
1434 static void emac_ethtool_get_regs(struct net_device *dev,
1435 				  struct ethtool_regs *regs, void *space)
1436 {
1437 	struct emac_priv *priv = netdev_priv(dev);
1438 	u32 *reg_space = space;
1439 	int i;
1440 
1441 	regs->version = 1;
1442 
1443 	for (i = 0; i < EMAC_DMA_REG_CNT; i++)
1444 		reg_space[i] = emac_rd(priv, DMA_CONFIGURATION + i * 4);
1445 
1446 	for (i = 0; i < EMAC_MAC_REG_CNT; i++)
1447 		reg_space[i + EMAC_DMA_REG_CNT] =
1448 			emac_rd(priv, MAC_GLOBAL_CONTROL + i * 4);
1449 }
1450 
1451 static void emac_get_pauseparam(struct net_device *dev,
1452 				struct ethtool_pauseparam *pause)
1453 {
1454 	struct emac_priv *priv = netdev_priv(dev);
1455 
1456 	pause->autoneg = priv->flow_control_autoneg;
1457 	pause->tx_pause = !!(priv->flow_control & FLOW_CTRL_TX);
1458 	pause->rx_pause = !!(priv->flow_control & FLOW_CTRL_RX);
1459 }
1460 
1461 static int emac_set_pauseparam(struct net_device *dev,
1462 			       struct ethtool_pauseparam *pause)
1463 {
1464 	struct emac_priv *priv = netdev_priv(dev);
1465 	u8 fc = 0;
1466 
1467 	if (!netif_running(dev))
1468 		return -ENETDOWN;
1469 
1470 	priv->flow_control_autoneg = pause->autoneg;
1471 
1472 	if (pause->autoneg) {
1473 		emac_set_fc_autoneg(priv);
1474 	} else {
1475 		if (pause->tx_pause)
1476 			fc |= FLOW_CTRL_TX;
1477 
1478 		if (pause->rx_pause)
1479 			fc |= FLOW_CTRL_RX;
1480 
1481 		emac_set_fc(priv, fc);
1482 	}
1483 
1484 	return 0;
1485 }
1486 
1487 static void emac_get_drvinfo(struct net_device *dev,
1488 			     struct ethtool_drvinfo *info)
1489 {
1490 	strscpy(info->driver, DRIVER_NAME, sizeof(info->driver));
1491 	info->n_stats = ARRAY_SIZE(emac_ethtool_rx_stats);
1492 }
1493 
1494 static void emac_tx_timeout_task(struct work_struct *work)
1495 {
1496 	struct net_device *ndev;
1497 	struct emac_priv *priv;
1498 
1499 	priv = container_of(work, struct emac_priv, tx_timeout_task);
1500 	ndev = priv->ndev;
1501 
1502 	rtnl_lock();
1503 
1504 	/* No need to reset if already down */
1505 	if (!netif_running(ndev)) {
1506 		rtnl_unlock();
1507 		return;
1508 	}
1509 
1510 	netdev_err(ndev, "MAC reset due to TX timeout\n");
1511 
1512 	netif_trans_update(ndev); /* prevent tx timeout */
1513 	dev_close(ndev);
1514 	dev_open(ndev, NULL);
1515 
1516 	rtnl_unlock();
1517 }
1518 
1519 static void emac_sw_init(struct emac_priv *priv)
1520 {
1521 	priv->dma_buf_sz = EMAC_DEFAULT_BUFSIZE;
1522 
1523 	priv->tx_ring.total_cnt = DEFAULT_TX_RING_NUM;
1524 	priv->rx_ring.total_cnt = DEFAULT_RX_RING_NUM;
1525 
1526 	spin_lock_init(&priv->stats_lock);
1527 
1528 	INIT_WORK(&priv->tx_timeout_task, emac_tx_timeout_task);
1529 
1530 	priv->tx_coal_frames = EMAC_TX_FRAMES;
1531 	priv->tx_coal_timeout = EMAC_TX_COAL_TIMEOUT;
1532 
1533 	timer_setup(&priv->txtimer, emac_tx_coal_timer, 0);
1534 	timer_setup(&priv->stats_timer, emac_stats_timer, 0);
1535 }
1536 
1537 static irqreturn_t emac_interrupt_handler(int irq, void *dev_id)
1538 {
1539 	struct net_device *ndev = (struct net_device *)dev_id;
1540 	struct emac_priv *priv = netdev_priv(ndev);
1541 	bool should_schedule = false;
1542 	u32 clr = 0;
1543 	u32 status;
1544 
1545 	status = emac_rd(priv, DMA_STATUS_IRQ);
1546 
1547 	if (status & MREGBIT_TRANSMIT_TRANSFER_DONE_IRQ) {
1548 		clr |= MREGBIT_TRANSMIT_TRANSFER_DONE_IRQ;
1549 		should_schedule = true;
1550 	}
1551 
1552 	if (status & MREGBIT_TRANSMIT_DES_UNAVAILABLE_IRQ)
1553 		clr |= MREGBIT_TRANSMIT_DES_UNAVAILABLE_IRQ;
1554 
1555 	if (status & MREGBIT_TRANSMIT_DMA_STOPPED_IRQ)
1556 		clr |= MREGBIT_TRANSMIT_DMA_STOPPED_IRQ;
1557 
1558 	if (status & MREGBIT_RECEIVE_TRANSFER_DONE_IRQ) {
1559 		clr |= MREGBIT_RECEIVE_TRANSFER_DONE_IRQ;
1560 		should_schedule = true;
1561 	}
1562 
1563 	if (status & MREGBIT_RECEIVE_DES_UNAVAILABLE_IRQ)
1564 		clr |= MREGBIT_RECEIVE_DES_UNAVAILABLE_IRQ;
1565 
1566 	if (status & MREGBIT_RECEIVE_DMA_STOPPED_IRQ)
1567 		clr |= MREGBIT_RECEIVE_DMA_STOPPED_IRQ;
1568 
1569 	if (status & MREGBIT_RECEIVE_MISSED_FRAME_IRQ)
1570 		clr |= MREGBIT_RECEIVE_MISSED_FRAME_IRQ;
1571 
1572 	if (should_schedule) {
1573 		if (napi_schedule_prep(&priv->napi)) {
1574 			emac_disable_interrupt(priv);
1575 			__napi_schedule_irqoff(&priv->napi);
1576 		}
1577 	}
1578 
1579 	emac_wr(priv, DMA_STATUS_IRQ, clr);
1580 
1581 	return IRQ_HANDLED;
1582 }
1583 
1584 static void emac_configure_tx(struct emac_priv *priv)
1585 {
1586 	u32 val;
1587 
1588 	/* Set base address */
1589 	val = (u32)priv->tx_ring.desc_dma_addr;
1590 	emac_wr(priv, DMA_TRANSMIT_BASE_ADDRESS, val);
1591 
1592 	/* Set TX inter-frame gap value, enable transmit */
1593 	val = emac_rd(priv, MAC_TRANSMIT_CONTROL);
1594 	val &= ~MREGBIT_IFG_LEN;
1595 	val |= MREGBIT_TRANSMIT_ENABLE;
1596 	val |= MREGBIT_TRANSMIT_AUTO_RETRY;
1597 	emac_wr(priv, MAC_TRANSMIT_CONTROL, val);
1598 
1599 	emac_wr(priv, DMA_TRANSMIT_AUTO_POLL_COUNTER, 0x0);
1600 
1601 	/* Start TX DMA */
1602 	val = emac_rd(priv, DMA_CONTROL);
1603 	val |= MREGBIT_START_STOP_TRANSMIT_DMA;
1604 	emac_wr(priv, DMA_CONTROL, val);
1605 }
1606 
1607 static void emac_configure_rx(struct emac_priv *priv)
1608 {
1609 	u32 val;
1610 
1611 	/* Set base address */
1612 	val = (u32)priv->rx_ring.desc_dma_addr;
1613 	emac_wr(priv, DMA_RECEIVE_BASE_ADDRESS, val);
1614 
1615 	/* Enable receive */
1616 	val = emac_rd(priv, MAC_RECEIVE_CONTROL);
1617 	val |= MREGBIT_RECEIVE_ENABLE;
1618 	val |= MREGBIT_STORE_FORWARD;
1619 	emac_wr(priv, MAC_RECEIVE_CONTROL, val);
1620 
1621 	/* Start RX DMA */
1622 	val = emac_rd(priv, DMA_CONTROL);
1623 	val |= MREGBIT_START_STOP_RECEIVE_DMA;
1624 	emac_wr(priv, DMA_CONTROL, val);
1625 }
1626 
1627 static void emac_adjust_link(struct net_device *dev)
1628 {
1629 	struct emac_priv *priv = netdev_priv(dev);
1630 	struct phy_device *phydev = dev->phydev;
1631 	u32 ctrl;
1632 
1633 	if (phydev->link) {
1634 		ctrl = emac_rd(priv, MAC_GLOBAL_CONTROL);
1635 
1636 		/* Update duplex and speed from PHY */
1637 
1638 		FIELD_MODIFY(MREGBIT_FULL_DUPLEX_MODE, &ctrl,
1639 			     phydev->duplex == DUPLEX_FULL);
1640 
1641 		ctrl &= ~MREGBIT_SPEED;
1642 
1643 		switch (phydev->speed) {
1644 		case SPEED_1000:
1645 			ctrl |= MREGBIT_SPEED_1000M;
1646 			break;
1647 		case SPEED_100:
1648 			ctrl |= MREGBIT_SPEED_100M;
1649 			break;
1650 		case SPEED_10:
1651 			ctrl |= MREGBIT_SPEED_10M;
1652 			break;
1653 		default:
1654 			netdev_err(dev, "Unknown speed: %d\n", phydev->speed);
1655 			phydev->speed = SPEED_UNKNOWN;
1656 			break;
1657 		}
1658 
1659 		emac_wr(priv, MAC_GLOBAL_CONTROL, ctrl);
1660 
1661 		emac_set_fc_autoneg(priv);
1662 
1663 		/*
1664 		 * Reschedule stats updates now that link is up. See comments in
1665 		 * emac_stats_update().
1666 		 */
1667 		mod_timer(&priv->stats_timer, jiffies);
1668 	}
1669 
1670 	phy_print_status(phydev);
1671 }
1672 
1673 static void emac_update_delay_line(struct emac_priv *priv)
1674 {
1675 	u32 mask = 0, val = 0;
1676 
1677 	mask |= EMAC_RX_DLINE_EN;
1678 	mask |= EMAC_RX_DLINE_STEP_MASK | EMAC_RX_DLINE_CODE_MASK;
1679 	mask |= EMAC_TX_DLINE_EN;
1680 	mask |= EMAC_TX_DLINE_STEP_MASK | EMAC_TX_DLINE_CODE_MASK;
1681 
1682 	if (phy_interface_mode_is_rgmii(priv->phy_interface)) {
1683 		val |= EMAC_RX_DLINE_EN;
1684 		val |= FIELD_PREP(EMAC_RX_DLINE_STEP_MASK,
1685 				  EMAC_DLINE_STEP_15P6);
1686 		val |= FIELD_PREP(EMAC_RX_DLINE_CODE_MASK, priv->rx_delay);
1687 
1688 		val |= EMAC_TX_DLINE_EN;
1689 		val |= FIELD_PREP(EMAC_TX_DLINE_STEP_MASK,
1690 				  EMAC_DLINE_STEP_15P6);
1691 		val |= FIELD_PREP(EMAC_TX_DLINE_CODE_MASK, priv->tx_delay);
1692 	}
1693 
1694 	regmap_update_bits(priv->regmap_apmu,
1695 			   priv->regmap_apmu_offset + APMU_EMAC_DLINE_REG,
1696 			   mask, val);
1697 }
1698 
1699 static int emac_phy_connect(struct net_device *ndev)
1700 {
1701 	struct emac_priv *priv = netdev_priv(ndev);
1702 	struct device *dev = &priv->pdev->dev;
1703 	struct phy_device *phydev;
1704 	struct device_node *np;
1705 	int ret;
1706 
1707 	ret = of_get_phy_mode(dev->of_node, &priv->phy_interface);
1708 	if (ret) {
1709 		netdev_err(ndev, "No phy-mode found");
1710 		return ret;
1711 	}
1712 
1713 	switch (priv->phy_interface) {
1714 	case PHY_INTERFACE_MODE_RMII:
1715 	case PHY_INTERFACE_MODE_RGMII:
1716 	case PHY_INTERFACE_MODE_RGMII_ID:
1717 	case PHY_INTERFACE_MODE_RGMII_RXID:
1718 	case PHY_INTERFACE_MODE_RGMII_TXID:
1719 		break;
1720 	default:
1721 		netdev_err(ndev, "Unsupported PHY interface %s",
1722 			   phy_modes(priv->phy_interface));
1723 		return -EINVAL;
1724 	}
1725 
1726 	np = of_parse_phandle(dev->of_node, "phy-handle", 0);
1727 	if (!np && of_phy_is_fixed_link(dev->of_node))
1728 		np = of_node_get(dev->of_node);
1729 
1730 	if (!np) {
1731 		netdev_err(ndev, "No PHY specified");
1732 		return -ENODEV;
1733 	}
1734 
1735 	ret = emac_phy_interface_config(priv);
1736 	if (ret)
1737 		goto err_node_put;
1738 
1739 	phydev = of_phy_connect(ndev, np, &emac_adjust_link, 0,
1740 				priv->phy_interface);
1741 	if (!phydev) {
1742 		netdev_err(ndev, "Could not attach to PHY\n");
1743 		ret = -ENODEV;
1744 		goto err_node_put;
1745 	}
1746 
1747 	phy_support_asym_pause(phydev);
1748 
1749 	phydev->mac_managed_pm = true;
1750 
1751 	emac_update_delay_line(priv);
1752 
1753 err_node_put:
1754 	of_node_put(np);
1755 	return ret;
1756 }
1757 
1758 static int emac_up(struct emac_priv *priv)
1759 {
1760 	struct platform_device *pdev = priv->pdev;
1761 	struct net_device *ndev = priv->ndev;
1762 	int ret;
1763 
1764 	pm_runtime_get_sync(&pdev->dev);
1765 
1766 	ret = emac_phy_connect(ndev);
1767 	if (ret) {
1768 		dev_err(&pdev->dev, "emac_phy_connect failed\n");
1769 		goto err_pm_put;
1770 	}
1771 
1772 	emac_init_hw(priv);
1773 
1774 	emac_set_mac_addr(priv, ndev->dev_addr);
1775 	emac_configure_tx(priv);
1776 	emac_configure_rx(priv);
1777 
1778 	emac_alloc_rx_desc_buffers(priv);
1779 
1780 	phy_start(ndev->phydev);
1781 
1782 	ret = request_irq(priv->irq, emac_interrupt_handler, IRQF_SHARED,
1783 			  ndev->name, ndev);
1784 	if (ret) {
1785 		dev_err(&pdev->dev, "request_irq failed\n");
1786 		goto err_reset_disconnect_phy;
1787 	}
1788 
1789 	/* Don't enable MAC interrupts */
1790 	emac_wr(priv, MAC_INTERRUPT_ENABLE, 0x0);
1791 
1792 	/* Enable DMA interrupts */
1793 	emac_wr(priv, DMA_INTERRUPT_ENABLE,
1794 		MREGBIT_TRANSMIT_TRANSFER_DONE_INTR_ENABLE |
1795 			MREGBIT_TRANSMIT_DMA_STOPPED_INTR_ENABLE |
1796 			MREGBIT_RECEIVE_TRANSFER_DONE_INTR_ENABLE |
1797 			MREGBIT_RECEIVE_DMA_STOPPED_INTR_ENABLE |
1798 			MREGBIT_RECEIVE_MISSED_FRAME_INTR_ENABLE);
1799 
1800 	napi_enable(&priv->napi);
1801 
1802 	netif_start_queue(ndev);
1803 
1804 	mod_timer(&priv->stats_timer, jiffies);
1805 
1806 	return 0;
1807 
1808 err_reset_disconnect_phy:
1809 	emac_reset_hw(priv);
1810 	phy_disconnect(ndev->phydev);
1811 
1812 err_pm_put:
1813 	pm_runtime_put_sync(&pdev->dev);
1814 	return ret;
1815 }
1816 
1817 static int emac_down(struct emac_priv *priv)
1818 {
1819 	struct platform_device *pdev = priv->pdev;
1820 	struct net_device *ndev = priv->ndev;
1821 
1822 	netif_stop_queue(ndev);
1823 
1824 	phy_disconnect(ndev->phydev);
1825 
1826 	emac_wr(priv, MAC_INTERRUPT_ENABLE, 0x0);
1827 	emac_wr(priv, DMA_INTERRUPT_ENABLE, 0x0);
1828 
1829 	free_irq(priv->irq, ndev);
1830 
1831 	napi_disable(&priv->napi);
1832 
1833 	timer_delete_sync(&priv->txtimer);
1834 	cancel_work_sync(&priv->tx_timeout_task);
1835 
1836 	timer_delete_sync(&priv->stats_timer);
1837 
1838 	emac_reset_hw(priv);
1839 
1840 	/* Update and save current stats, see emac_stats_update() for usage */
1841 
1842 	spin_lock_bh(&priv->stats_lock);
1843 
1844 	emac_stats_update(priv);
1845 
1846 	priv->tx_stats_off = priv->tx_stats;
1847 	priv->rx_stats_off = priv->rx_stats;
1848 
1849 	spin_unlock_bh(&priv->stats_lock);
1850 
1851 	pm_runtime_put_sync(&pdev->dev);
1852 	return 0;
1853 }
1854 
1855 /* Called when net interface is brought up. */
1856 static int emac_open(struct net_device *ndev)
1857 {
1858 	struct emac_priv *priv = netdev_priv(ndev);
1859 	struct device *dev = &priv->pdev->dev;
1860 	int ret;
1861 
1862 	ret = emac_alloc_tx_resources(priv);
1863 	if (ret) {
1864 		dev_err(dev, "Cannot allocate TX resources\n");
1865 		return ret;
1866 	}
1867 
1868 	ret = emac_alloc_rx_resources(priv);
1869 	if (ret) {
1870 		dev_err(dev, "Cannot allocate RX resources\n");
1871 		goto err_free_tx;
1872 	}
1873 
1874 	ret = emac_up(priv);
1875 	if (ret) {
1876 		dev_err(dev, "Error when bringing interface up\n");
1877 		goto err_free_rx;
1878 	}
1879 	return 0;
1880 
1881 err_free_rx:
1882 	emac_free_rx_resources(priv);
1883 err_free_tx:
1884 	emac_free_tx_resources(priv);
1885 
1886 	return ret;
1887 }
1888 
1889 /* Called when interface is brought down. */
1890 static int emac_stop(struct net_device *ndev)
1891 {
1892 	struct emac_priv *priv = netdev_priv(ndev);
1893 
1894 	emac_down(priv);
1895 	emac_free_tx_resources(priv);
1896 	emac_free_rx_resources(priv);
1897 
1898 	return 0;
1899 }
1900 
1901 static const struct ethtool_ops emac_ethtool_ops = {
1902 	.get_link_ksettings	= phy_ethtool_get_link_ksettings,
1903 	.set_link_ksettings	= phy_ethtool_set_link_ksettings,
1904 	.nway_reset		= phy_ethtool_nway_reset,
1905 	.get_drvinfo		= emac_get_drvinfo,
1906 	.get_link		= ethtool_op_get_link,
1907 
1908 	.get_regs		= emac_ethtool_get_regs,
1909 	.get_regs_len		= emac_ethtool_get_regs_len,
1910 
1911 	.get_rmon_stats		= emac_get_rmon_stats,
1912 	.get_pause_stats	= emac_get_pause_stats,
1913 	.get_eth_mac_stats	= emac_get_eth_mac_stats,
1914 
1915 	.get_sset_count		= emac_get_sset_count,
1916 	.get_strings		= emac_get_strings,
1917 	.get_ethtool_stats	= emac_get_ethtool_stats,
1918 
1919 	.get_pauseparam		= emac_get_pauseparam,
1920 	.set_pauseparam		= emac_set_pauseparam,
1921 };
1922 
1923 static const struct net_device_ops emac_netdev_ops = {
1924 	.ndo_open               = emac_open,
1925 	.ndo_stop               = emac_stop,
1926 	.ndo_start_xmit         = emac_start_xmit,
1927 	.ndo_validate_addr	= eth_validate_addr,
1928 	.ndo_set_mac_address    = emac_set_mac_address,
1929 	.ndo_eth_ioctl          = phy_do_ioctl_running,
1930 	.ndo_change_mtu         = emac_change_mtu,
1931 	.ndo_tx_timeout         = emac_tx_timeout,
1932 	.ndo_set_rx_mode        = emac_set_rx_mode,
1933 	.ndo_get_stats64	= emac_get_stats64,
1934 };
1935 
1936 /* Currently we always use 15.6 ps/step for the delay line */
1937 
1938 static u32 delay_ps_to_unit(u32 ps)
1939 {
1940 	return DIV_ROUND_CLOSEST(ps * 10, 156);
1941 }
1942 
1943 static u32 delay_unit_to_ps(u32 unit)
1944 {
1945 	return DIV_ROUND_CLOSEST(unit * 156, 10);
1946 }
1947 
1948 #define EMAC_MAX_DELAY_UNIT	FIELD_MAX(EMAC_TX_DLINE_CODE_MASK)
1949 
1950 /* Minus one just to be safe from rounding errors */
1951 #define EMAC_MAX_DELAY_PS	(delay_unit_to_ps(EMAC_MAX_DELAY_UNIT - 1))
1952 
1953 static int emac_config_dt(struct platform_device *pdev, struct emac_priv *priv)
1954 {
1955 	struct device_node *np = pdev->dev.of_node;
1956 	struct device *dev = &pdev->dev;
1957 	u8 mac_addr[ETH_ALEN] = { 0 };
1958 	int ret;
1959 
1960 	priv->iobase = devm_platform_ioremap_resource(pdev, 0);
1961 	if (IS_ERR(priv->iobase))
1962 		return dev_err_probe(dev, PTR_ERR(priv->iobase),
1963 				     "ioremap failed\n");
1964 
1965 	priv->regmap_apmu =
1966 		syscon_regmap_lookup_by_phandle_args(np, "spacemit,apmu", 1,
1967 						     &priv->regmap_apmu_offset);
1968 
1969 	if (IS_ERR(priv->regmap_apmu))
1970 		return dev_err_probe(dev, PTR_ERR(priv->regmap_apmu),
1971 				     "failed to get syscon\n");
1972 
1973 	priv->irq = platform_get_irq(pdev, 0);
1974 	if (priv->irq < 0)
1975 		return priv->irq;
1976 
1977 	ret = of_get_mac_address(np, mac_addr);
1978 	if (ret) {
1979 		if (ret == -EPROBE_DEFER)
1980 			return dev_err_probe(dev, ret,
1981 					     "Can't get MAC address\n");
1982 
1983 		dev_info(&pdev->dev, "Using random MAC address\n");
1984 		eth_hw_addr_random(priv->ndev);
1985 	} else {
1986 		eth_hw_addr_set(priv->ndev, mac_addr);
1987 	}
1988 
1989 	priv->tx_delay = 0;
1990 	priv->rx_delay = 0;
1991 
1992 	of_property_read_u32(np, "tx-internal-delay-ps", &priv->tx_delay);
1993 	of_property_read_u32(np, "rx-internal-delay-ps", &priv->rx_delay);
1994 
1995 	if (priv->tx_delay > EMAC_MAX_DELAY_PS) {
1996 		dev_err(&pdev->dev,
1997 			"tx-internal-delay-ps too large: max %d, got %d",
1998 			EMAC_MAX_DELAY_PS, priv->tx_delay);
1999 		return -EINVAL;
2000 	}
2001 
2002 	if (priv->rx_delay > EMAC_MAX_DELAY_PS) {
2003 		dev_err(&pdev->dev,
2004 			"rx-internal-delay-ps too large: max %d, got %d",
2005 			EMAC_MAX_DELAY_PS, priv->rx_delay);
2006 		return -EINVAL;
2007 	}
2008 
2009 	priv->tx_delay = delay_ps_to_unit(priv->tx_delay);
2010 	priv->rx_delay = delay_ps_to_unit(priv->rx_delay);
2011 
2012 	return 0;
2013 }
2014 
2015 static void emac_phy_deregister_fixed_link(void *data)
2016 {
2017 	struct device_node *of_node = data;
2018 
2019 	of_phy_deregister_fixed_link(of_node);
2020 }
2021 
2022 static int emac_probe(struct platform_device *pdev)
2023 {
2024 	struct device *dev = &pdev->dev;
2025 	struct reset_control *reset;
2026 	struct net_device *ndev;
2027 	struct emac_priv *priv;
2028 	int ret;
2029 
2030 	ndev = devm_alloc_etherdev(dev, sizeof(struct emac_priv));
2031 	if (!ndev)
2032 		return -ENOMEM;
2033 
2034 	ndev->hw_features = NETIF_F_SG;
2035 	ndev->features |= ndev->hw_features;
2036 
2037 	ndev->max_mtu = EMAC_RX_BUF_MAX - (ETH_HLEN + 2 * VLAN_HLEN + ETH_FCS_LEN);
2038 	ndev->pcpu_stat_type = NETDEV_PCPU_STAT_DSTATS;
2039 
2040 	priv = netdev_priv(ndev);
2041 	priv->ndev = ndev;
2042 	priv->pdev = pdev;
2043 	platform_set_drvdata(pdev, priv);
2044 
2045 	ret = emac_config_dt(pdev, priv);
2046 	if (ret < 0)
2047 		return dev_err_probe(dev, ret, "Configuration failed\n");
2048 
2049 	ndev->watchdog_timeo = 5 * HZ;
2050 	ndev->base_addr = (unsigned long)priv->iobase;
2051 	ndev->irq = priv->irq;
2052 
2053 	ndev->ethtool_ops = &emac_ethtool_ops;
2054 	ndev->netdev_ops = &emac_netdev_ops;
2055 
2056 	devm_pm_runtime_enable(&pdev->dev);
2057 
2058 	priv->bus_clk = devm_clk_get_enabled(&pdev->dev, NULL);
2059 	if (IS_ERR(priv->bus_clk))
2060 		return dev_err_probe(dev, PTR_ERR(priv->bus_clk),
2061 				     "Failed to get clock\n");
2062 
2063 	reset = devm_reset_control_get_optional_exclusive_deasserted(&pdev->dev,
2064 								     NULL);
2065 	if (IS_ERR(reset))
2066 		return dev_err_probe(dev, PTR_ERR(reset),
2067 				     "Failed to get reset\n");
2068 
2069 	if (of_phy_is_fixed_link(dev->of_node)) {
2070 		ret = of_phy_register_fixed_link(dev->of_node);
2071 		if (ret)
2072 			return dev_err_probe(dev, ret,
2073 					     "Failed to register fixed-link\n");
2074 
2075 		ret = devm_add_action_or_reset(dev,
2076 					       emac_phy_deregister_fixed_link,
2077 					       dev->of_node);
2078 
2079 		if (ret) {
2080 			dev_err(dev, "devm_add_action_or_reset failed\n");
2081 			return ret;
2082 		}
2083 	}
2084 
2085 	emac_sw_init(priv);
2086 
2087 	ret = emac_mdio_init(priv);
2088 	if (ret)
2089 		goto err_timer_delete;
2090 
2091 	SET_NETDEV_DEV(ndev, &pdev->dev);
2092 
2093 	ret = devm_register_netdev(dev, ndev);
2094 	if (ret) {
2095 		dev_err(dev, "devm_register_netdev failed\n");
2096 		goto err_timer_delete;
2097 	}
2098 
2099 	netif_napi_add(ndev, &priv->napi, emac_rx_poll);
2100 	netif_carrier_off(ndev);
2101 
2102 	return 0;
2103 
2104 err_timer_delete:
2105 	timer_delete_sync(&priv->txtimer);
2106 	timer_delete_sync(&priv->stats_timer);
2107 
2108 	return ret;
2109 }
2110 
2111 static void emac_remove(struct platform_device *pdev)
2112 {
2113 	struct emac_priv *priv = platform_get_drvdata(pdev);
2114 
2115 	timer_shutdown_sync(&priv->txtimer);
2116 	cancel_work_sync(&priv->tx_timeout_task);
2117 
2118 	timer_shutdown_sync(&priv->stats_timer);
2119 
2120 	emac_reset_hw(priv);
2121 }
2122 
2123 static int emac_resume(struct device *dev)
2124 {
2125 	struct emac_priv *priv = dev_get_drvdata(dev);
2126 	struct net_device *ndev = priv->ndev;
2127 	int ret;
2128 
2129 	ret = clk_prepare_enable(priv->bus_clk);
2130 	if (ret < 0) {
2131 		dev_err(dev, "Failed to enable bus clock: %d\n", ret);
2132 		return ret;
2133 	}
2134 
2135 	if (!netif_running(ndev))
2136 		return 0;
2137 
2138 	ret = emac_open(ndev);
2139 	if (ret) {
2140 		clk_disable_unprepare(priv->bus_clk);
2141 		return ret;
2142 	}
2143 
2144 	netif_device_attach(ndev);
2145 
2146 	mod_timer(&priv->stats_timer, jiffies);
2147 
2148 	return 0;
2149 }
2150 
2151 static int emac_suspend(struct device *dev)
2152 {
2153 	struct emac_priv *priv = dev_get_drvdata(dev);
2154 	struct net_device *ndev = priv->ndev;
2155 
2156 	if (!ndev || !netif_running(ndev)) {
2157 		clk_disable_unprepare(priv->bus_clk);
2158 		return 0;
2159 	}
2160 
2161 	emac_stop(ndev);
2162 
2163 	clk_disable_unprepare(priv->bus_clk);
2164 	netif_device_detach(ndev);
2165 	return 0;
2166 }
2167 
2168 static const struct dev_pm_ops emac_pm_ops = {
2169 	SYSTEM_SLEEP_PM_OPS(emac_suspend, emac_resume)
2170 };
2171 
2172 static const struct of_device_id emac_of_match[] = {
2173 	{ .compatible = "spacemit,k1-emac" },
2174 	{ /* sentinel */ },
2175 };
2176 MODULE_DEVICE_TABLE(of, emac_of_match);
2177 
2178 static struct platform_driver emac_driver = {
2179 	.probe = emac_probe,
2180 	.remove = emac_remove,
2181 	.driver = {
2182 		.name = DRIVER_NAME,
2183 		.of_match_table = of_match_ptr(emac_of_match),
2184 		.pm = &emac_pm_ops,
2185 	},
2186 };
2187 module_platform_driver(emac_driver);
2188 
2189 MODULE_DESCRIPTION("SpacemiT K1 Ethernet driver");
2190 MODULE_AUTHOR("Vivian Wang <wangruikang@iscas.ac.cn>");
2191 MODULE_LICENSE("GPL");
2192