xref: /linux/drivers/net/ethernet/spacemit/k1_emac.c (revision 9e4e86a604dfd06402933467578c4b79f5412b2c)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * SpacemiT K1 Ethernet driver
4  *
5  * Copyright (C) 2023-2025 SpacemiT (Hangzhou) Technology Co. Ltd
6  * Copyright (C) 2025 Vivian Wang <wangruikang@iscas.ac.cn>
7  */
8 
9 #include <linux/bitfield.h>
10 #include <linux/clk.h>
11 #include <linux/delay.h>
12 #include <linux/dma-mapping.h>
13 #include <linux/etherdevice.h>
14 #include <linux/ethtool.h>
15 #include <linux/if_vlan.h>
16 #include <linux/interrupt.h>
17 #include <linux/io.h>
18 #include <linux/iopoll.h>
19 #include <linux/kernel.h>
20 #include <linux/mfd/syscon.h>
21 #include <linux/module.h>
22 #include <linux/of.h>
23 #include <linux/of_irq.h>
24 #include <linux/of_mdio.h>
25 #include <linux/of_net.h>
26 #include <linux/phy.h>
27 #include <linux/platform_device.h>
28 #include <linux/pm_runtime.h>
29 #include <linux/pm.h>
30 #include <linux/regmap.h>
31 #include <linux/reset.h>
32 #include <linux/rtnetlink.h>
33 #include <linux/timer.h>
34 #include <linux/types.h>
35 
36 #include "k1_emac.h"
37 
38 #define DRIVER_NAME "k1_emac"
39 
40 #define EMAC_DEFAULT_BUFSIZE		1536
41 #define EMAC_RX_BUF_2K			2048
42 #define EMAC_RX_BUF_MAX			FIELD_MAX(RX_DESC_1_BUFFER_SIZE_1_MASK)
43 
44 /* Tuning parameters from SpacemiT */
45 #define EMAC_TX_FRAMES			64
46 #define EMAC_TX_COAL_TIMEOUT		40000
47 #define EMAC_RX_FRAMES			64
48 #define EMAC_RX_COAL_TIMEOUT		(600 * 312)
49 
50 #define DEFAULT_TX_ALMOST_FULL		0x1f8
51 #define DEFAULT_TX_THRESHOLD		1518
52 #define DEFAULT_RX_THRESHOLD		12
53 #define DEFAULT_TX_RING_NUM		1024
54 #define DEFAULT_RX_RING_NUM		1024
55 #define DEFAULT_DMA_BURST		MREGBIT_BURST_16WORD
56 #define HASH_TABLE_SIZE			64
57 
58 struct desc_buf {
59 	u64 dma_addr;
60 	u16 dma_len;
61 	u8 map_as_page;
62 };
63 
64 struct emac_tx_desc_buffer {
65 	struct sk_buff *skb;
66 	struct desc_buf buf[2];
67 };
68 
69 struct emac_rx_desc_buffer {
70 	struct sk_buff *skb;
71 	u64 dma_addr;
72 	u16 dma_len;
73 	u8 map_as_page;
74 };
75 
76 /**
77  * struct emac_desc_ring - Software-side information for one descriptor ring
78  * Same structure used for both RX and TX
79  * @desc_addr: Virtual address to the descriptor ring memory
80  * @desc_dma_addr: DMA address of the descriptor ring
81  * @total_size: Size of ring in bytes
82  * @total_cnt: Number of descriptors
83  * @head: Next descriptor to associate a buffer with
84  * @tail: Next descriptor to check status bit
85  * @rx_desc_buf: Array of descriptors for RX
86  * @tx_desc_buf: Array of descriptors for TX, with max of two buffers each
87  */
88 struct emac_desc_ring {
89 	void *desc_addr;
90 	dma_addr_t desc_dma_addr;
91 	u32 total_size;
92 	u32 total_cnt;
93 	u32 head;
94 	u32 tail;
95 	union {
96 		struct emac_rx_desc_buffer *rx_desc_buf;
97 		struct emac_tx_desc_buffer *tx_desc_buf;
98 	};
99 };
100 
101 struct emac_priv {
102 	void __iomem *iobase;
103 	u32 dma_buf_sz;
104 	struct emac_desc_ring tx_ring;
105 	struct emac_desc_ring rx_ring;
106 
107 	struct net_device *ndev;
108 	struct napi_struct napi;
109 	struct platform_device *pdev;
110 	struct clk *bus_clk;
111 	struct clk *ref_clk;
112 	struct regmap *regmap_apmu;
113 	u32 regmap_apmu_offset;
114 	int irq;
115 
116 	phy_interface_t phy_interface;
117 
118 	union emac_hw_tx_stats tx_stats, tx_stats_off;
119 	union emac_hw_rx_stats rx_stats, rx_stats_off;
120 
121 	u32 tx_count_frames;
122 	u32 tx_coal_frames;
123 	u32 tx_coal_timeout;
124 	struct work_struct tx_timeout_task;
125 
126 	struct timer_list txtimer;
127 	struct timer_list stats_timer;
128 
129 	u32 tx_delay;
130 	u32 rx_delay;
131 
132 	/* Softirq-safe, hold while touching hardware statistics */
133 	spinlock_t stats_lock;
134 };
135 
emac_wr(struct emac_priv * priv,u32 reg,u32 val)136 static void emac_wr(struct emac_priv *priv, u32 reg, u32 val)
137 {
138 	writel(val, priv->iobase + reg);
139 }
140 
emac_rd(struct emac_priv * priv,u32 reg)141 static u32 emac_rd(struct emac_priv *priv, u32 reg)
142 {
143 	return readl(priv->iobase + reg);
144 }
145 
emac_phy_interface_config(struct emac_priv * priv)146 static int emac_phy_interface_config(struct emac_priv *priv)
147 {
148 	u32 val = 0, mask = REF_CLK_SEL | RGMII_TX_CLK_SEL | PHY_INTF_RGMII;
149 
150 	if (phy_interface_mode_is_rgmii(priv->phy_interface))
151 		val |= PHY_INTF_RGMII;
152 
153 	regmap_update_bits(priv->regmap_apmu,
154 			   priv->regmap_apmu_offset + APMU_EMAC_CTRL_REG,
155 			   mask, val);
156 
157 	return 0;
158 }
159 
160 /*
161  * Where the hardware expects a MAC address, it is laid out in this high, med,
162  * low order in three consecutive registers and in this format.
163  */
164 
emac_set_mac_addr_reg(struct emac_priv * priv,const unsigned char * addr,u32 reg)165 static void emac_set_mac_addr_reg(struct emac_priv *priv,
166 				  const unsigned char *addr,
167 				  u32 reg)
168 {
169 	emac_wr(priv, reg + sizeof(u32) * 0, addr[1] << 8 | addr[0]);
170 	emac_wr(priv, reg + sizeof(u32) * 1, addr[3] << 8 | addr[2]);
171 	emac_wr(priv, reg + sizeof(u32) * 2, addr[5] << 8 | addr[4]);
172 }
173 
emac_set_mac_addr(struct emac_priv * priv,const unsigned char * addr)174 static void emac_set_mac_addr(struct emac_priv *priv, const unsigned char *addr)
175 {
176 	emac_set_mac_addr_reg(priv, addr, MAC_ADDRESS1_HIGH);
177 }
178 
emac_reset_hw(struct emac_priv * priv)179 static void emac_reset_hw(struct emac_priv *priv)
180 {
181 	/* Disable all interrupts */
182 	emac_wr(priv, MAC_INTERRUPT_ENABLE, 0x0);
183 	emac_wr(priv, DMA_INTERRUPT_ENABLE, 0x0);
184 
185 	/* Disable transmit and receive units */
186 	emac_wr(priv, MAC_RECEIVE_CONTROL, 0x0);
187 	emac_wr(priv, MAC_TRANSMIT_CONTROL, 0x0);
188 
189 	/* Disable DMA */
190 	emac_wr(priv, DMA_CONTROL, 0x0);
191 }
192 
emac_init_hw(struct emac_priv * priv)193 static void emac_init_hw(struct emac_priv *priv)
194 {
195 	u32 rxirq = 0, dma = 0, frame_sz;
196 
197 	regmap_set_bits(priv->regmap_apmu,
198 			priv->regmap_apmu_offset + APMU_EMAC_CTRL_REG,
199 			AXI_SINGLE_ID);
200 
201 	/* Disable transmit and receive units */
202 	emac_wr(priv, MAC_RECEIVE_CONTROL, 0x0);
203 	emac_wr(priv, MAC_TRANSMIT_CONTROL, 0x0);
204 
205 	/* Enable MAC address 1 filtering */
206 	emac_wr(priv, MAC_ADDRESS_CONTROL, MREGBIT_MAC_ADDRESS1_ENABLE);
207 
208 	/* Zero initialize the multicast hash table */
209 	emac_wr(priv, MAC_MULTICAST_HASH_TABLE1, 0x0);
210 	emac_wr(priv, MAC_MULTICAST_HASH_TABLE2, 0x0);
211 	emac_wr(priv, MAC_MULTICAST_HASH_TABLE3, 0x0);
212 	emac_wr(priv, MAC_MULTICAST_HASH_TABLE4, 0x0);
213 
214 	/* Configure thresholds */
215 	emac_wr(priv, MAC_TRANSMIT_FIFO_ALMOST_FULL, DEFAULT_TX_ALMOST_FULL);
216 	emac_wr(priv, MAC_TRANSMIT_PACKET_START_THRESHOLD,
217 		DEFAULT_TX_THRESHOLD);
218 	emac_wr(priv, MAC_RECEIVE_PACKET_START_THRESHOLD, DEFAULT_RX_THRESHOLD);
219 
220 	/* Set maximum frame size and jabber size based on configured MTU,
221 	 * accounting for Ethernet header, double VLAN tags, and FCS.
222 	 */
223 	frame_sz = priv->ndev->mtu + ETH_HLEN + 2 * VLAN_HLEN + ETH_FCS_LEN;
224 
225 	emac_wr(priv, MAC_MAXIMUM_FRAME_SIZE, frame_sz);
226 	emac_wr(priv, MAC_TRANSMIT_JABBER_SIZE, frame_sz);
227 	emac_wr(priv, MAC_RECEIVE_JABBER_SIZE, frame_sz);
228 
229 	/* RX IRQ mitigation */
230 	rxirq = FIELD_PREP(MREGBIT_RECEIVE_IRQ_FRAME_COUNTER_MASK,
231 			   EMAC_RX_FRAMES);
232 	rxirq |= FIELD_PREP(MREGBIT_RECEIVE_IRQ_TIMEOUT_COUNTER_MASK,
233 			    EMAC_RX_COAL_TIMEOUT);
234 	rxirq |= MREGBIT_RECEIVE_IRQ_MITIGATION_ENABLE;
235 	emac_wr(priv, DMA_RECEIVE_IRQ_MITIGATION_CTRL, rxirq);
236 
237 	/* Disable and set DMA config */
238 	emac_wr(priv, DMA_CONTROL, 0x0);
239 
240 	emac_wr(priv, DMA_CONFIGURATION, MREGBIT_SOFTWARE_RESET);
241 	usleep_range(9000, 10000);
242 	emac_wr(priv, DMA_CONFIGURATION, 0x0);
243 	usleep_range(9000, 10000);
244 
245 	dma |= MREGBIT_STRICT_BURST;
246 	dma |= MREGBIT_DMA_64BIT_MODE;
247 	dma |= DEFAULT_DMA_BURST;
248 
249 	emac_wr(priv, DMA_CONFIGURATION, dma);
250 }
251 
emac_dma_start_transmit(struct emac_priv * priv)252 static void emac_dma_start_transmit(struct emac_priv *priv)
253 {
254 	/* The actual value written does not matter */
255 	emac_wr(priv, DMA_TRANSMIT_POLL_DEMAND, 1);
256 }
257 
emac_enable_interrupt(struct emac_priv * priv)258 static void emac_enable_interrupt(struct emac_priv *priv)
259 {
260 	u32 val;
261 
262 	val = emac_rd(priv, DMA_INTERRUPT_ENABLE);
263 	val |= MREGBIT_TRANSMIT_TRANSFER_DONE_INTR_ENABLE;
264 	val |= MREGBIT_RECEIVE_TRANSFER_DONE_INTR_ENABLE;
265 	emac_wr(priv, DMA_INTERRUPT_ENABLE, val);
266 }
267 
emac_disable_interrupt(struct emac_priv * priv)268 static void emac_disable_interrupt(struct emac_priv *priv)
269 {
270 	u32 val;
271 
272 	val = emac_rd(priv, DMA_INTERRUPT_ENABLE);
273 	val &= ~MREGBIT_TRANSMIT_TRANSFER_DONE_INTR_ENABLE;
274 	val &= ~MREGBIT_RECEIVE_TRANSFER_DONE_INTR_ENABLE;
275 	emac_wr(priv, DMA_INTERRUPT_ENABLE, val);
276 }
277 
emac_tx_avail(struct emac_priv * priv)278 static u32 emac_tx_avail(struct emac_priv *priv)
279 {
280 	struct emac_desc_ring *tx_ring = &priv->tx_ring;
281 	u32 avail;
282 
283 	if (tx_ring->tail > tx_ring->head)
284 		avail = tx_ring->tail - tx_ring->head - 1;
285 	else
286 		avail = tx_ring->total_cnt - tx_ring->head + tx_ring->tail - 1;
287 
288 	return avail;
289 }
290 
emac_tx_coal_timer_resched(struct emac_priv * priv)291 static void emac_tx_coal_timer_resched(struct emac_priv *priv)
292 {
293 	mod_timer(&priv->txtimer,
294 		  jiffies + usecs_to_jiffies(priv->tx_coal_timeout));
295 }
296 
emac_tx_coal_timer(struct timer_list * t)297 static void emac_tx_coal_timer(struct timer_list *t)
298 {
299 	struct emac_priv *priv = timer_container_of(priv, t, txtimer);
300 
301 	napi_schedule(&priv->napi);
302 }
303 
emac_tx_should_interrupt(struct emac_priv * priv,u32 pkt_num)304 static bool emac_tx_should_interrupt(struct emac_priv *priv, u32 pkt_num)
305 {
306 	priv->tx_count_frames += pkt_num;
307 	if (likely(priv->tx_coal_frames > priv->tx_count_frames)) {
308 		emac_tx_coal_timer_resched(priv);
309 		return false;
310 	}
311 
312 	priv->tx_count_frames = 0;
313 	return true;
314 }
315 
emac_free_tx_buf(struct emac_priv * priv,int i)316 static void emac_free_tx_buf(struct emac_priv *priv, int i)
317 {
318 	struct emac_tx_desc_buffer *tx_buf;
319 	struct emac_desc_ring *tx_ring;
320 	struct desc_buf *buf;
321 	int j;
322 
323 	tx_ring = &priv->tx_ring;
324 	tx_buf = &tx_ring->tx_desc_buf[i];
325 
326 	for (j = 0; j < 2; j++) {
327 		buf = &tx_buf->buf[j];
328 		if (!buf->dma_addr)
329 			continue;
330 
331 		if (buf->map_as_page)
332 			dma_unmap_page(&priv->pdev->dev, buf->dma_addr,
333 				       buf->dma_len, DMA_TO_DEVICE);
334 		else
335 			dma_unmap_single(&priv->pdev->dev,
336 					 buf->dma_addr, buf->dma_len,
337 					 DMA_TO_DEVICE);
338 
339 		buf->dma_addr = 0;
340 		buf->map_as_page = false;
341 	}
342 
343 	if (tx_buf->skb) {
344 		dev_kfree_skb_any(tx_buf->skb);
345 		tx_buf->skb = NULL;
346 	}
347 }
348 
emac_clean_tx_desc_ring(struct emac_priv * priv)349 static void emac_clean_tx_desc_ring(struct emac_priv *priv)
350 {
351 	struct emac_desc_ring *tx_ring = &priv->tx_ring;
352 	u32 i;
353 
354 	for (i = 0; i < tx_ring->total_cnt; i++)
355 		emac_free_tx_buf(priv, i);
356 
357 	tx_ring->head = 0;
358 	tx_ring->tail = 0;
359 }
360 
emac_clean_rx_desc_ring(struct emac_priv * priv)361 static void emac_clean_rx_desc_ring(struct emac_priv *priv)
362 {
363 	struct emac_rx_desc_buffer *rx_buf;
364 	struct emac_desc_ring *rx_ring;
365 	u32 i;
366 
367 	rx_ring = &priv->rx_ring;
368 
369 	for (i = 0; i < rx_ring->total_cnt; i++) {
370 		rx_buf = &rx_ring->rx_desc_buf[i];
371 
372 		if (!rx_buf->skb)
373 			continue;
374 
375 		dma_unmap_single(&priv->pdev->dev, rx_buf->dma_addr,
376 				 rx_buf->dma_len, DMA_FROM_DEVICE);
377 
378 		dev_kfree_skb(rx_buf->skb);
379 		rx_buf->skb = NULL;
380 	}
381 
382 	rx_ring->tail = 0;
383 	rx_ring->head = 0;
384 }
385 
emac_alloc_tx_resources(struct emac_priv * priv)386 static int emac_alloc_tx_resources(struct emac_priv *priv)
387 {
388 	struct emac_desc_ring *tx_ring = &priv->tx_ring;
389 	struct platform_device *pdev = priv->pdev;
390 
391 	tx_ring->tx_desc_buf = kzalloc_objs(*tx_ring->tx_desc_buf,
392 					    tx_ring->total_cnt);
393 
394 	if (!tx_ring->tx_desc_buf)
395 		return -ENOMEM;
396 
397 	tx_ring->total_size = tx_ring->total_cnt * sizeof(struct emac_desc);
398 	tx_ring->total_size = ALIGN(tx_ring->total_size, PAGE_SIZE);
399 
400 	tx_ring->desc_addr = dma_alloc_coherent(&pdev->dev, tx_ring->total_size,
401 						&tx_ring->desc_dma_addr,
402 						GFP_KERNEL);
403 	if (!tx_ring->desc_addr) {
404 		kfree(tx_ring->tx_desc_buf);
405 		return -ENOMEM;
406 	}
407 
408 	tx_ring->head = 0;
409 	tx_ring->tail = 0;
410 
411 	return 0;
412 }
413 
emac_alloc_rx_resources(struct emac_priv * priv)414 static int emac_alloc_rx_resources(struct emac_priv *priv)
415 {
416 	struct emac_desc_ring *rx_ring = &priv->rx_ring;
417 	struct platform_device *pdev = priv->pdev;
418 
419 	rx_ring->rx_desc_buf = kzalloc_objs(*rx_ring->rx_desc_buf,
420 					    rx_ring->total_cnt);
421 	if (!rx_ring->rx_desc_buf)
422 		return -ENOMEM;
423 
424 	rx_ring->total_size = rx_ring->total_cnt * sizeof(struct emac_desc);
425 
426 	rx_ring->total_size = ALIGN(rx_ring->total_size, PAGE_SIZE);
427 
428 	rx_ring->desc_addr = dma_alloc_coherent(&pdev->dev, rx_ring->total_size,
429 						&rx_ring->desc_dma_addr,
430 						GFP_KERNEL);
431 	if (!rx_ring->desc_addr) {
432 		kfree(rx_ring->rx_desc_buf);
433 		return -ENOMEM;
434 	}
435 
436 	rx_ring->head = 0;
437 	rx_ring->tail = 0;
438 
439 	return 0;
440 }
441 
emac_free_tx_resources(struct emac_priv * priv)442 static void emac_free_tx_resources(struct emac_priv *priv)
443 {
444 	struct emac_desc_ring *tr = &priv->tx_ring;
445 	struct device *dev = &priv->pdev->dev;
446 
447 	emac_clean_tx_desc_ring(priv);
448 
449 	kfree(tr->tx_desc_buf);
450 	tr->tx_desc_buf = NULL;
451 
452 	dma_free_coherent(dev, tr->total_size, tr->desc_addr,
453 			  tr->desc_dma_addr);
454 	tr->desc_addr = NULL;
455 }
456 
emac_free_rx_resources(struct emac_priv * priv)457 static void emac_free_rx_resources(struct emac_priv *priv)
458 {
459 	struct emac_desc_ring *rr = &priv->rx_ring;
460 	struct device *dev = &priv->pdev->dev;
461 
462 	emac_clean_rx_desc_ring(priv);
463 
464 	kfree(rr->rx_desc_buf);
465 	rr->rx_desc_buf = NULL;
466 
467 	dma_free_coherent(dev, rr->total_size, rr->desc_addr,
468 			  rr->desc_dma_addr);
469 	rr->desc_addr = NULL;
470 }
471 
emac_tx_clean_desc(struct emac_priv * priv)472 static int emac_tx_clean_desc(struct emac_priv *priv)
473 {
474 	struct net_device *ndev = priv->ndev;
475 	struct emac_desc_ring *tx_ring;
476 	struct emac_desc *tx_desc;
477 	u32 i;
478 
479 	netif_tx_lock(ndev);
480 
481 	tx_ring = &priv->tx_ring;
482 
483 	i = tx_ring->tail;
484 
485 	while (i != tx_ring->head) {
486 		tx_desc = &((struct emac_desc *)tx_ring->desc_addr)[i];
487 
488 		/* Stop checking if desc still own by DMA */
489 		if (READ_ONCE(tx_desc->desc0) & TX_DESC_0_OWN)
490 			break;
491 
492 		emac_free_tx_buf(priv, i);
493 		memset(tx_desc, 0, sizeof(struct emac_desc));
494 
495 		if (++i == tx_ring->total_cnt)
496 			i = 0;
497 	}
498 
499 	tx_ring->tail = i;
500 
501 	if (unlikely(netif_queue_stopped(ndev) &&
502 		     emac_tx_avail(priv) > tx_ring->total_cnt / 4))
503 		netif_wake_queue(ndev);
504 
505 	netif_tx_unlock(ndev);
506 
507 	return 0;
508 }
509 
emac_rx_frame_good(struct emac_priv * priv,struct emac_desc * desc)510 static bool emac_rx_frame_good(struct emac_priv *priv, struct emac_desc *desc)
511 {
512 	const char *msg;
513 	u32 len;
514 
515 	len = FIELD_GET(RX_DESC_0_FRAME_PACKET_LENGTH_MASK, desc->desc0);
516 
517 	if (WARN_ON_ONCE(!(desc->desc0 & RX_DESC_0_LAST_DESCRIPTOR)))
518 		msg = "Not last descriptor"; /* This would be a bug */
519 	else if (desc->desc0 & RX_DESC_0_FRAME_RUNT)
520 		msg = "Runt frame";
521 	else if (desc->desc0 & RX_DESC_0_FRAME_CRC_ERR)
522 		msg = "Frame CRC error";
523 	else if (desc->desc0 & RX_DESC_0_FRAME_MAX_LEN_ERR)
524 		msg = "Frame exceeds max length";
525 	else if (desc->desc0 & RX_DESC_0_FRAME_JABBER_ERR)
526 		msg = "Frame jabber error";
527 	else if (desc->desc0 & RX_DESC_0_FRAME_LENGTH_ERR)
528 		msg = "Frame length error";
529 	else if (len <= ETH_FCS_LEN || len > priv->dma_buf_sz)
530 		msg = "Frame length unacceptable";
531 	else
532 		return true; /* All good */
533 
534 	dev_dbg_ratelimited(&priv->ndev->dev, "RX error: %s", msg);
535 
536 	return false;
537 }
538 
emac_alloc_rx_desc_buffers(struct emac_priv * priv)539 static void emac_alloc_rx_desc_buffers(struct emac_priv *priv)
540 {
541 	struct emac_desc_ring *rx_ring = &priv->rx_ring;
542 	struct emac_desc rx_desc, *rx_desc_addr;
543 	struct net_device *ndev = priv->ndev;
544 	struct emac_rx_desc_buffer *rx_buf;
545 	struct sk_buff *skb;
546 	u32 i;
547 
548 	i = rx_ring->head;
549 	rx_buf = &rx_ring->rx_desc_buf[i];
550 
551 	while (!rx_buf->skb) {
552 		skb = netdev_alloc_skb_ip_align(ndev, priv->dma_buf_sz);
553 		if (!skb)
554 			break;
555 
556 		skb->dev = ndev;
557 
558 		rx_buf->skb = skb;
559 		rx_buf->dma_len = priv->dma_buf_sz;
560 		rx_buf->dma_addr = dma_map_single(&priv->pdev->dev, skb->data,
561 						  priv->dma_buf_sz,
562 						  DMA_FROM_DEVICE);
563 		if (dma_mapping_error(&priv->pdev->dev, rx_buf->dma_addr)) {
564 			dev_err_ratelimited(&ndev->dev, "Mapping skb failed\n");
565 			dev_kfree_skb_any(skb);
566 			rx_buf->skb = NULL;
567 			break;
568 		}
569 
570 		rx_desc_addr = &((struct emac_desc *)rx_ring->desc_addr)[i];
571 
572 		memset(&rx_desc, 0, sizeof(rx_desc));
573 
574 		rx_desc.buffer_addr_1 = rx_buf->dma_addr;
575 		rx_desc.desc1 = FIELD_PREP(RX_DESC_1_BUFFER_SIZE_1_MASK,
576 					   rx_buf->dma_len);
577 
578 		if (++i == rx_ring->total_cnt) {
579 			rx_desc.desc1 |= RX_DESC_1_END_RING;
580 			i = 0;
581 		}
582 
583 		*rx_desc_addr = rx_desc;
584 		dma_wmb();
585 		WRITE_ONCE(rx_desc_addr->desc0, rx_desc.desc0 | RX_DESC_0_OWN);
586 
587 		rx_buf = &rx_ring->rx_desc_buf[i];
588 	}
589 
590 	rx_ring->head = i;
591 	return;
592 }
593 
594 /* Returns number of packets received */
emac_rx_clean_desc(struct emac_priv * priv,int budget)595 static int emac_rx_clean_desc(struct emac_priv *priv, int budget)
596 {
597 	struct net_device *ndev = priv->ndev;
598 	struct emac_rx_desc_buffer *rx_buf;
599 	struct emac_desc_ring *rx_ring;
600 	struct sk_buff *skb = NULL;
601 	struct emac_desc *rx_desc;
602 	u32 got = 0, skb_len, i;
603 
604 	rx_ring = &priv->rx_ring;
605 
606 	i = rx_ring->tail;
607 
608 	while (budget--) {
609 		rx_desc = &((struct emac_desc *)rx_ring->desc_addr)[i];
610 
611 		/* Stop checking if rx_desc still owned by DMA */
612 		if (READ_ONCE(rx_desc->desc0) & RX_DESC_0_OWN)
613 			break;
614 
615 		dma_rmb();
616 
617 		rx_buf = &rx_ring->rx_desc_buf[i];
618 
619 		if (!rx_buf->skb)
620 			break;
621 
622 		got++;
623 
624 		dma_unmap_single(&priv->pdev->dev, rx_buf->dma_addr,
625 				 rx_buf->dma_len, DMA_FROM_DEVICE);
626 
627 		if (likely(emac_rx_frame_good(priv, rx_desc))) {
628 			skb = rx_buf->skb;
629 
630 			skb_len = FIELD_GET(RX_DESC_0_FRAME_PACKET_LENGTH_MASK,
631 					    rx_desc->desc0);
632 			skb_len -= ETH_FCS_LEN;
633 
634 			skb_put(skb, skb_len);
635 			skb->dev = ndev;
636 			ndev->hard_header_len = ETH_HLEN;
637 
638 			skb->protocol = eth_type_trans(skb, ndev);
639 
640 			skb->ip_summed = CHECKSUM_NONE;
641 
642 			napi_gro_receive(&priv->napi, skb);
643 
644 			memset(rx_desc, 0, sizeof(struct emac_desc));
645 			rx_buf->skb = NULL;
646 		} else {
647 			dev_kfree_skb_irq(rx_buf->skb);
648 			rx_buf->skb = NULL;
649 		}
650 
651 		if (++i == rx_ring->total_cnt)
652 			i = 0;
653 	}
654 
655 	rx_ring->tail = i;
656 
657 	emac_alloc_rx_desc_buffers(priv);
658 
659 	return got;
660 }
661 
emac_rx_poll(struct napi_struct * napi,int budget)662 static int emac_rx_poll(struct napi_struct *napi, int budget)
663 {
664 	struct emac_priv *priv = container_of(napi, struct emac_priv, napi);
665 	int work_done;
666 
667 	emac_tx_clean_desc(priv);
668 
669 	work_done = emac_rx_clean_desc(priv, budget);
670 	if (work_done < budget && napi_complete_done(napi, work_done))
671 		emac_enable_interrupt(priv);
672 
673 	return work_done;
674 }
675 
676 /*
677  * For convenience, skb->data is fragment 0, frags[0] is fragment 1, etc.
678  *
679  * Each descriptor can hold up to two fragments, called buffer 1 and 2. For each
680  * fragment f, if f % 2 == 0, it uses buffer 1, otherwise it uses buffer 2.
681  */
682 
emac_tx_map_frag(struct device * dev,struct emac_desc * tx_desc,struct emac_tx_desc_buffer * tx_buf,struct sk_buff * skb,u32 frag_idx)683 static int emac_tx_map_frag(struct device *dev, struct emac_desc *tx_desc,
684 			    struct emac_tx_desc_buffer *tx_buf,
685 			    struct sk_buff *skb, u32 frag_idx)
686 {
687 	bool map_as_page, buf_idx;
688 	const skb_frag_t *frag;
689 	phys_addr_t addr;
690 	u32 len;
691 	int ret;
692 
693 	buf_idx = frag_idx % 2;
694 
695 	if (frag_idx == 0) {
696 		/* Non-fragmented part */
697 		len = skb_headlen(skb);
698 		addr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
699 		map_as_page = false;
700 	} else {
701 		/* Fragment */
702 		frag = &skb_shinfo(skb)->frags[frag_idx - 1];
703 		len = skb_frag_size(frag);
704 		addr = skb_frag_dma_map(dev, frag, 0, len, DMA_TO_DEVICE);
705 		map_as_page = true;
706 	}
707 
708 	ret = dma_mapping_error(dev, addr);
709 	if (ret)
710 		return ret;
711 
712 	tx_buf->buf[buf_idx].dma_addr = addr;
713 	tx_buf->buf[buf_idx].dma_len = len;
714 	tx_buf->buf[buf_idx].map_as_page = map_as_page;
715 
716 	if (buf_idx == 0) {
717 		tx_desc->buffer_addr_1 = addr;
718 		tx_desc->desc1 |= FIELD_PREP(TX_DESC_1_BUFFER_SIZE_1_MASK, len);
719 	} else {
720 		tx_desc->buffer_addr_2 = addr;
721 		tx_desc->desc1 |= FIELD_PREP(TX_DESC_1_BUFFER_SIZE_2_MASK, len);
722 	}
723 
724 	return 0;
725 }
726 
emac_tx_mem_map(struct emac_priv * priv,struct sk_buff * skb)727 static void emac_tx_mem_map(struct emac_priv *priv, struct sk_buff *skb)
728 {
729 	struct emac_desc_ring *tx_ring = &priv->tx_ring;
730 	struct emac_desc tx_desc, *tx_desc_addr;
731 	struct device *dev = &priv->pdev->dev;
732 	struct emac_tx_desc_buffer *tx_buf;
733 	u32 head, old_head, frag_num, f, i;
734 	bool buf_idx;
735 
736 	frag_num = skb_shinfo(skb)->nr_frags;
737 	head = tx_ring->head;
738 	old_head = head;
739 
740 	for (f = 0; f < frag_num + 1; f++) {
741 		buf_idx = f % 2;
742 
743 		/*
744 		 * If using buffer 1, initialize a new desc. Otherwise, use
745 		 * buffer 2 of previous fragment's desc.
746 		 */
747 		if (!buf_idx) {
748 			tx_buf = &tx_ring->tx_desc_buf[head];
749 			tx_desc_addr =
750 				&((struct emac_desc *)tx_ring->desc_addr)[head];
751 			memset(&tx_desc, 0, sizeof(tx_desc));
752 
753 			/*
754 			 * Give ownership for all but first desc initially. For
755 			 * first desc, give at the end so DMA cannot start
756 			 * reading uninitialized descs.
757 			 */
758 			if (head != old_head)
759 				tx_desc.desc0 |= TX_DESC_0_OWN;
760 
761 			if (++head == tx_ring->total_cnt) {
762 				/* Just used last desc in ring */
763 				tx_desc.desc1 |= TX_DESC_1_END_RING;
764 				head = 0;
765 			}
766 		}
767 
768 		if (emac_tx_map_frag(dev, &tx_desc, tx_buf, skb, f)) {
769 			dev_err_ratelimited(&priv->ndev->dev,
770 					    "Map TX frag %d failed\n", f);
771 			goto err_free_skb;
772 		}
773 
774 		if (f == 0)
775 			tx_desc.desc1 |= TX_DESC_1_FIRST_SEGMENT;
776 
777 		if (f == frag_num) {
778 			tx_desc.desc1 |= TX_DESC_1_LAST_SEGMENT;
779 			tx_buf->skb = skb;
780 			if (emac_tx_should_interrupt(priv, frag_num + 1))
781 				tx_desc.desc1 |=
782 					TX_DESC_1_INTERRUPT_ON_COMPLETION;
783 		}
784 
785 		*tx_desc_addr = tx_desc;
786 	}
787 
788 	/* All descriptors are ready, give ownership for first desc */
789 	tx_desc_addr = &((struct emac_desc *)tx_ring->desc_addr)[old_head];
790 	dma_wmb();
791 	WRITE_ONCE(tx_desc_addr->desc0, tx_desc_addr->desc0 | TX_DESC_0_OWN);
792 
793 	emac_dma_start_transmit(priv);
794 
795 	tx_ring->head = head;
796 
797 	return;
798 
799 err_free_skb:
800 	dev_dstats_tx_dropped(priv->ndev);
801 
802 	i = old_head;
803 	while (i != head) {
804 		emac_free_tx_buf(priv, i);
805 
806 		if (++i == tx_ring->total_cnt)
807 			i = 0;
808 	}
809 
810 	dev_kfree_skb_any(skb);
811 }
812 
emac_start_xmit(struct sk_buff * skb,struct net_device * ndev)813 static netdev_tx_t emac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
814 {
815 	struct emac_priv *priv = netdev_priv(ndev);
816 	int nfrags = skb_shinfo(skb)->nr_frags;
817 	struct device *dev = &priv->pdev->dev;
818 
819 	if (unlikely(emac_tx_avail(priv) < nfrags + 1)) {
820 		if (!netif_queue_stopped(ndev)) {
821 			netif_stop_queue(ndev);
822 			dev_err_ratelimited(dev, "TX ring full, stop TX queue\n");
823 		}
824 		return NETDEV_TX_BUSY;
825 	}
826 
827 	emac_tx_mem_map(priv, skb);
828 
829 	/* Make sure there is space in the ring for the next TX. */
830 	if (unlikely(emac_tx_avail(priv) <= MAX_SKB_FRAGS + 2))
831 		netif_stop_queue(ndev);
832 
833 	return NETDEV_TX_OK;
834 }
835 
emac_set_mac_address(struct net_device * ndev,void * addr)836 static int emac_set_mac_address(struct net_device *ndev, void *addr)
837 {
838 	struct emac_priv *priv = netdev_priv(ndev);
839 	int ret = eth_mac_addr(ndev, addr);
840 
841 	if (ret)
842 		return ret;
843 
844 	/* If running, set now; if not running it will be set in emac_up. */
845 	if (netif_running(ndev))
846 		emac_set_mac_addr(priv, ndev->dev_addr);
847 
848 	return 0;
849 }
850 
emac_mac_multicast_filter_clear(struct emac_priv * priv)851 static void emac_mac_multicast_filter_clear(struct emac_priv *priv)
852 {
853 	emac_wr(priv, MAC_MULTICAST_HASH_TABLE1, 0x0);
854 	emac_wr(priv, MAC_MULTICAST_HASH_TABLE2, 0x0);
855 	emac_wr(priv, MAC_MULTICAST_HASH_TABLE3, 0x0);
856 	emac_wr(priv, MAC_MULTICAST_HASH_TABLE4, 0x0);
857 }
858 
859 /*
860  * The upper 6 bits of the Ethernet CRC of the MAC address is used as the hash
861  * when matching multicast addresses.
862  */
emac_ether_addr_hash(u8 addr[ETH_ALEN])863 static u32 emac_ether_addr_hash(u8 addr[ETH_ALEN])
864 {
865 	u32 crc32 = ether_crc(ETH_ALEN, addr);
866 
867 	return crc32 >> 26;
868 }
869 
870 /* Configure Multicast and Promiscuous modes */
emac_set_rx_mode(struct net_device * ndev)871 static void emac_set_rx_mode(struct net_device *ndev)
872 {
873 	struct emac_priv *priv = netdev_priv(ndev);
874 	struct netdev_hw_addr *ha;
875 	u32 mc_filter[4] = { 0 };
876 	u32 hash, reg, bit, val;
877 
878 	val = emac_rd(priv, MAC_ADDRESS_CONTROL);
879 
880 	val &= ~MREGBIT_PROMISCUOUS_MODE;
881 
882 	if (ndev->flags & IFF_PROMISC) {
883 		/* Enable promisc mode */
884 		val |= MREGBIT_PROMISCUOUS_MODE;
885 	} else if ((ndev->flags & IFF_ALLMULTI) ||
886 		   (netdev_mc_count(ndev) > HASH_TABLE_SIZE)) {
887 		/* Accept all multicast frames by setting every bit */
888 		emac_wr(priv, MAC_MULTICAST_HASH_TABLE1, 0xffff);
889 		emac_wr(priv, MAC_MULTICAST_HASH_TABLE2, 0xffff);
890 		emac_wr(priv, MAC_MULTICAST_HASH_TABLE3, 0xffff);
891 		emac_wr(priv, MAC_MULTICAST_HASH_TABLE4, 0xffff);
892 	} else if (!netdev_mc_empty(ndev)) {
893 		emac_mac_multicast_filter_clear(priv);
894 		netdev_for_each_mc_addr(ha, ndev) {
895 			/*
896 			 * The hash table is an array of 4 16-bit registers. It
897 			 * is treated like an array of 64 bits (bits[hash]).
898 			 */
899 			hash = emac_ether_addr_hash(ha->addr);
900 			reg = hash / 16;
901 			bit = hash % 16;
902 			mc_filter[reg] |= BIT(bit);
903 		}
904 		emac_wr(priv, MAC_MULTICAST_HASH_TABLE1, mc_filter[0]);
905 		emac_wr(priv, MAC_MULTICAST_HASH_TABLE2, mc_filter[1]);
906 		emac_wr(priv, MAC_MULTICAST_HASH_TABLE3, mc_filter[2]);
907 		emac_wr(priv, MAC_MULTICAST_HASH_TABLE4, mc_filter[3]);
908 	}
909 
910 	emac_wr(priv, MAC_ADDRESS_CONTROL, val);
911 }
912 
emac_change_mtu(struct net_device * ndev,int mtu)913 static int emac_change_mtu(struct net_device *ndev, int mtu)
914 {
915 	struct emac_priv *priv = netdev_priv(ndev);
916 	u32 frame_len;
917 
918 	if (netif_running(ndev)) {
919 		netdev_err(ndev, "must be stopped to change MTU\n");
920 		return -EBUSY;
921 	}
922 
923 	frame_len = mtu + ETH_HLEN + 2 * VLAN_HLEN + ETH_FCS_LEN;
924 
925 	if (frame_len <= EMAC_DEFAULT_BUFSIZE)
926 		priv->dma_buf_sz = EMAC_DEFAULT_BUFSIZE;
927 	else if (frame_len <= EMAC_RX_BUF_2K)
928 		priv->dma_buf_sz = EMAC_RX_BUF_2K;
929 	else
930 		priv->dma_buf_sz = EMAC_RX_BUF_MAX;
931 
932 	ndev->mtu = mtu;
933 
934 	return 0;
935 }
936 
emac_tx_timeout(struct net_device * ndev,unsigned int txqueue)937 static void emac_tx_timeout(struct net_device *ndev, unsigned int txqueue)
938 {
939 	struct emac_priv *priv = netdev_priv(ndev);
940 
941 	schedule_work(&priv->tx_timeout_task);
942 }
943 
emac_mii_read(struct mii_bus * bus,int phy_addr,int regnum)944 static int emac_mii_read(struct mii_bus *bus, int phy_addr, int regnum)
945 {
946 	struct emac_priv *priv = bus->priv;
947 	u32 cmd = 0, val;
948 	int ret;
949 
950 	cmd |= FIELD_PREP(MREGBIT_PHY_ADDRESS, phy_addr);
951 	cmd |= FIELD_PREP(MREGBIT_REGISTER_ADDRESS, regnum);
952 	cmd |= MREGBIT_START_MDIO_TRANS | MREGBIT_MDIO_READ_WRITE;
953 
954 	emac_wr(priv, MAC_MDIO_DATA, 0x0);
955 	emac_wr(priv, MAC_MDIO_CONTROL, cmd);
956 
957 	ret = readl_poll_timeout(priv->iobase + MAC_MDIO_CONTROL, val,
958 				 !(val & MREGBIT_START_MDIO_TRANS), 100, 10000);
959 
960 	if (ret)
961 		return ret;
962 
963 	val = emac_rd(priv, MAC_MDIO_DATA);
964 	return FIELD_GET(MREGBIT_MDIO_DATA, val);
965 }
966 
emac_mii_write(struct mii_bus * bus,int phy_addr,int regnum,u16 value)967 static int emac_mii_write(struct mii_bus *bus, int phy_addr, int regnum,
968 			  u16 value)
969 {
970 	struct emac_priv *priv = bus->priv;
971 	u32 cmd = 0, val;
972 	int ret;
973 
974 	emac_wr(priv, MAC_MDIO_DATA, value);
975 
976 	cmd |= FIELD_PREP(MREGBIT_PHY_ADDRESS, phy_addr);
977 	cmd |= FIELD_PREP(MREGBIT_REGISTER_ADDRESS, regnum);
978 	cmd |= MREGBIT_START_MDIO_TRANS;
979 
980 	emac_wr(priv, MAC_MDIO_CONTROL, cmd);
981 
982 	ret = readl_poll_timeout(priv->iobase + MAC_MDIO_CONTROL, val,
983 				 !(val & MREGBIT_START_MDIO_TRANS), 100, 10000);
984 
985 	return ret;
986 }
987 
emac_mdio_init(struct emac_priv * priv)988 static int emac_mdio_init(struct emac_priv *priv)
989 {
990 	struct device *dev = &priv->pdev->dev;
991 	struct device_node *mii_np;
992 	struct mii_bus *mii;
993 	int ret;
994 
995 	mii = devm_mdiobus_alloc(dev);
996 	if (!mii)
997 		return -ENOMEM;
998 
999 	mii->priv = priv;
1000 	mii->name = "k1_emac_mii";
1001 	mii->read = emac_mii_read;
1002 	mii->write = emac_mii_write;
1003 	mii->parent = dev;
1004 	mii->phy_mask = ~0;
1005 	snprintf(mii->id, MII_BUS_ID_SIZE, "%s", priv->pdev->name);
1006 
1007 	mii_np = of_get_available_child_by_name(dev->of_node, "mdio-bus");
1008 
1009 	ret = devm_of_mdiobus_register(dev, mii, mii_np);
1010 	if (ret)
1011 		dev_err_probe(dev, ret, "Failed to register mdio bus\n");
1012 
1013 	of_node_put(mii_np);
1014 	return ret;
1015 }
1016 
1017 /*
1018  * Even though this MAC supports gigabit operation, it only provides 32-bit
1019  * statistics counters. The most overflow-prone counters are the "bytes" ones,
1020  * which at gigabit overflow about twice a minute.
1021  *
1022  * Therefore, we maintain the high 32 bits of counters ourselves, incrementing
1023  * every time statistics seem to go backwards. Also, update periodically to
1024  * catch overflows when we are not otherwise checking the statistics often
1025  * enough.
1026  */
1027 
1028 #define EMAC_STATS_TIMER_PERIOD		20
1029 
emac_read_stat_cnt(struct emac_priv * priv,u8 cnt,u32 * res,u32 control_reg,u32 high_reg,u32 low_reg)1030 static int emac_read_stat_cnt(struct emac_priv *priv, u8 cnt, u32 *res,
1031 			      u32 control_reg, u32 high_reg, u32 low_reg)
1032 {
1033 	u32 val, high, low;
1034 	int ret;
1035 
1036 	/* The "read" bit is the same for TX and RX */
1037 
1038 	val = MREGBIT_START_TX_COUNTER_READ | cnt;
1039 	emac_wr(priv, control_reg, val);
1040 	val = emac_rd(priv, control_reg);
1041 
1042 	ret = readl_poll_timeout_atomic(priv->iobase + control_reg, val,
1043 					!(val & MREGBIT_START_TX_COUNTER_READ),
1044 					100, 10000);
1045 
1046 	if (ret) {
1047 		/*
1048 		 * This could be caused by the PHY stopping its refclk even when
1049 		 * the link is up, for power saving. See also comments in
1050 		 * emac_stats_update().
1051 		 */
1052 		dev_err_ratelimited(&priv->ndev->dev,
1053 				    "Read stat timeout. PHY clock stopped?\n");
1054 		return ret;
1055 	}
1056 
1057 	high = emac_rd(priv, high_reg);
1058 	low = emac_rd(priv, low_reg);
1059 	*res = high << 16 | lower_16_bits(low);
1060 
1061 	return 0;
1062 }
1063 
emac_tx_read_stat_cnt(struct emac_priv * priv,u8 cnt,u32 * res)1064 static int emac_tx_read_stat_cnt(struct emac_priv *priv, u8 cnt, u32 *res)
1065 {
1066 	return emac_read_stat_cnt(priv, cnt, res, MAC_TX_STATCTR_CONTROL,
1067 				  MAC_TX_STATCTR_DATA_HIGH,
1068 				  MAC_TX_STATCTR_DATA_LOW);
1069 }
1070 
emac_rx_read_stat_cnt(struct emac_priv * priv,u8 cnt,u32 * res)1071 static int emac_rx_read_stat_cnt(struct emac_priv *priv, u8 cnt, u32 *res)
1072 {
1073 	return emac_read_stat_cnt(priv, cnt, res, MAC_RX_STATCTR_CONTROL,
1074 				  MAC_RX_STATCTR_DATA_HIGH,
1075 				  MAC_RX_STATCTR_DATA_LOW);
1076 }
1077 
emac_update_counter(u64 * counter,u32 new_low)1078 static void emac_update_counter(u64 *counter, u32 new_low)
1079 {
1080 	u32 old_low = lower_32_bits(*counter);
1081 	u64 high = upper_32_bits(*counter);
1082 
1083 	if (old_low > new_low) {
1084 		/* Overflowed, increment high 32 bits */
1085 		high++;
1086 	}
1087 
1088 	*counter = (high << 32) | new_low;
1089 }
1090 
emac_stats_update(struct emac_priv * priv)1091 static void emac_stats_update(struct emac_priv *priv)
1092 {
1093 	u64 *tx_stats_off = priv->tx_stats_off.array;
1094 	u64 *rx_stats_off = priv->rx_stats_off.array;
1095 	u64 *tx_stats = priv->tx_stats.array;
1096 	u64 *rx_stats = priv->rx_stats.array;
1097 	u32 i, res, offset;
1098 
1099 	assert_spin_locked(&priv->stats_lock);
1100 
1101 	/*
1102 	 * We can't read statistics if the interface is not up. Also, some PHYs
1103 	 * stop their reference clocks for link down power saving, which also
1104 	 * causes reading statistics to time out. Don't update and don't
1105 	 * reschedule in these cases.
1106 	 */
1107 	if (!netif_running(priv->ndev) ||
1108 	    !netif_carrier_ok(priv->ndev) ||
1109 	    !netif_device_present(priv->ndev)) {
1110 		return;
1111 	}
1112 
1113 	for (i = 0; i < sizeof(priv->tx_stats) / sizeof(*tx_stats); i++) {
1114 		/*
1115 		 * If reading stats times out anyway, the stat registers will be
1116 		 * stuck, and we can't really recover from that.
1117 		 *
1118 		 * Reading statistics also can't return an error, so just return
1119 		 * without updating and without rescheduling.
1120 		 */
1121 		if (emac_tx_read_stat_cnt(priv, i, &res))
1122 			return;
1123 
1124 		/*
1125 		 * Re-initializing while bringing interface up resets counters
1126 		 * to zero, so to provide continuity, we add the values saved
1127 		 * last time we did emac_down() to the new hardware-provided
1128 		 * value.
1129 		 */
1130 		offset = lower_32_bits(tx_stats_off[i]);
1131 		emac_update_counter(&tx_stats[i], res + offset);
1132 	}
1133 
1134 	/* Similar remarks as TX stats */
1135 	for (i = 0; i < sizeof(priv->rx_stats) / sizeof(*rx_stats); i++) {
1136 		if (emac_rx_read_stat_cnt(priv, i, &res))
1137 			return;
1138 		offset = lower_32_bits(rx_stats_off[i]);
1139 		emac_update_counter(&rx_stats[i], res + offset);
1140 	}
1141 
1142 	mod_timer(&priv->stats_timer, jiffies + EMAC_STATS_TIMER_PERIOD * HZ);
1143 }
1144 
emac_stats_timer(struct timer_list * t)1145 static void emac_stats_timer(struct timer_list *t)
1146 {
1147 	struct emac_priv *priv = timer_container_of(priv, t, stats_timer);
1148 
1149 	spin_lock(&priv->stats_lock);
1150 
1151 	emac_stats_update(priv);
1152 
1153 	spin_unlock(&priv->stats_lock);
1154 }
1155 
1156 static const struct ethtool_rmon_hist_range emac_rmon_hist_ranges[] = {
1157 	{   64,   64 },
1158 	{   65,  127 },
1159 	{  128,  255 },
1160 	{  256,  511 },
1161 	{  512, 1023 },
1162 	{ 1024, 1518 },
1163 	{ 1519, 4096 },
1164 	{ /* sentinel */ },
1165 };
1166 
1167 /* Like dev_fetch_dstats(), but we only use tx_drops */
emac_get_stat_tx_drops(struct emac_priv * priv)1168 static u64 emac_get_stat_tx_drops(struct emac_priv *priv)
1169 {
1170 	const struct pcpu_dstats *stats;
1171 	u64 tx_drops, total = 0;
1172 	unsigned int start;
1173 	int cpu;
1174 
1175 	for_each_possible_cpu(cpu) {
1176 		stats = per_cpu_ptr(priv->ndev->dstats, cpu);
1177 		do {
1178 			start = u64_stats_fetch_begin(&stats->syncp);
1179 			tx_drops = u64_stats_read(&stats->tx_drops);
1180 		} while (u64_stats_fetch_retry(&stats->syncp, start));
1181 
1182 		total += tx_drops;
1183 	}
1184 
1185 	return total;
1186 }
1187 
emac_get_stats64(struct net_device * dev,struct rtnl_link_stats64 * storage)1188 static void emac_get_stats64(struct net_device *dev,
1189 			     struct rtnl_link_stats64 *storage)
1190 {
1191 	struct emac_priv *priv = netdev_priv(dev);
1192 	union emac_hw_tx_stats *tx_stats;
1193 	union emac_hw_rx_stats *rx_stats;
1194 
1195 	tx_stats = &priv->tx_stats;
1196 	rx_stats = &priv->rx_stats;
1197 
1198 	/* This is the only software counter */
1199 	storage->tx_dropped = emac_get_stat_tx_drops(priv);
1200 
1201 	spin_lock_bh(&priv->stats_lock);
1202 
1203 	emac_stats_update(priv);
1204 
1205 	storage->tx_packets = tx_stats->stats.tx_ok_pkts;
1206 	storage->tx_bytes = tx_stats->stats.tx_ok_bytes;
1207 	storage->tx_errors = tx_stats->stats.tx_err_pkts;
1208 
1209 	storage->rx_packets = rx_stats->stats.rx_ok_pkts;
1210 	storage->rx_bytes = rx_stats->stats.rx_ok_bytes;
1211 	storage->rx_errors = rx_stats->stats.rx_err_total_pkts;
1212 	storage->rx_crc_errors = rx_stats->stats.rx_crc_err_pkts;
1213 	storage->rx_frame_errors = rx_stats->stats.rx_align_err_pkts;
1214 	storage->rx_length_errors = rx_stats->stats.rx_len_err_pkts;
1215 
1216 	storage->collisions = tx_stats->stats.tx_singleclsn_pkts;
1217 	storage->collisions += tx_stats->stats.tx_multiclsn_pkts;
1218 	storage->collisions += tx_stats->stats.tx_excessclsn_pkts;
1219 
1220 	storage->rx_missed_errors = rx_stats->stats.rx_drp_fifo_full_pkts;
1221 	storage->rx_missed_errors += rx_stats->stats.rx_truncate_fifo_full_pkts;
1222 
1223 	spin_unlock_bh(&priv->stats_lock);
1224 }
1225 
emac_get_rmon_stats(struct net_device * dev,struct ethtool_rmon_stats * rmon_stats,const struct ethtool_rmon_hist_range ** ranges)1226 static void emac_get_rmon_stats(struct net_device *dev,
1227 				struct ethtool_rmon_stats *rmon_stats,
1228 				const struct ethtool_rmon_hist_range **ranges)
1229 {
1230 	struct emac_priv *priv = netdev_priv(dev);
1231 	union emac_hw_rx_stats *rx_stats;
1232 
1233 	rx_stats = &priv->rx_stats;
1234 
1235 	*ranges = emac_rmon_hist_ranges;
1236 
1237 	spin_lock_bh(&priv->stats_lock);
1238 
1239 	emac_stats_update(priv);
1240 
1241 	rmon_stats->undersize_pkts = rx_stats->stats.rx_len_undersize_pkts;
1242 	rmon_stats->oversize_pkts = rx_stats->stats.rx_len_oversize_pkts;
1243 	rmon_stats->fragments = rx_stats->stats.rx_len_fragment_pkts;
1244 	rmon_stats->jabbers = rx_stats->stats.rx_len_jabber_pkts;
1245 
1246 	/* Only RX has histogram stats */
1247 
1248 	rmon_stats->hist[0] = rx_stats->stats.rx_64_pkts;
1249 	rmon_stats->hist[1] = rx_stats->stats.rx_65_127_pkts;
1250 	rmon_stats->hist[2] = rx_stats->stats.rx_128_255_pkts;
1251 	rmon_stats->hist[3] = rx_stats->stats.rx_256_511_pkts;
1252 	rmon_stats->hist[4] = rx_stats->stats.rx_512_1023_pkts;
1253 	rmon_stats->hist[5] = rx_stats->stats.rx_1024_1518_pkts;
1254 	rmon_stats->hist[6] = rx_stats->stats.rx_1519_plus_pkts;
1255 
1256 	spin_unlock_bh(&priv->stats_lock);
1257 }
1258 
emac_get_eth_mac_stats(struct net_device * dev,struct ethtool_eth_mac_stats * mac_stats)1259 static void emac_get_eth_mac_stats(struct net_device *dev,
1260 				   struct ethtool_eth_mac_stats *mac_stats)
1261 {
1262 	struct emac_priv *priv = netdev_priv(dev);
1263 	union emac_hw_tx_stats *tx_stats;
1264 	union emac_hw_rx_stats *rx_stats;
1265 
1266 	tx_stats = &priv->tx_stats;
1267 	rx_stats = &priv->rx_stats;
1268 
1269 	spin_lock_bh(&priv->stats_lock);
1270 
1271 	emac_stats_update(priv);
1272 
1273 	mac_stats->MulticastFramesXmittedOK = tx_stats->stats.tx_multicast_pkts;
1274 	mac_stats->BroadcastFramesXmittedOK = tx_stats->stats.tx_broadcast_pkts;
1275 
1276 	mac_stats->MulticastFramesReceivedOK =
1277 		rx_stats->stats.rx_multicast_pkts;
1278 	mac_stats->BroadcastFramesReceivedOK =
1279 		rx_stats->stats.rx_broadcast_pkts;
1280 
1281 	mac_stats->SingleCollisionFrames = tx_stats->stats.tx_singleclsn_pkts;
1282 	mac_stats->MultipleCollisionFrames = tx_stats->stats.tx_multiclsn_pkts;
1283 	mac_stats->LateCollisions = tx_stats->stats.tx_lateclsn_pkts;
1284 	mac_stats->FramesAbortedDueToXSColls =
1285 		tx_stats->stats.tx_excessclsn_pkts;
1286 
1287 	spin_unlock_bh(&priv->stats_lock);
1288 }
1289 
emac_get_pause_stats(struct net_device * dev,struct ethtool_pause_stats * pause_stats)1290 static void emac_get_pause_stats(struct net_device *dev,
1291 				 struct ethtool_pause_stats *pause_stats)
1292 {
1293 	struct emac_priv *priv = netdev_priv(dev);
1294 	union emac_hw_tx_stats *tx_stats;
1295 	union emac_hw_rx_stats *rx_stats;
1296 
1297 	tx_stats = &priv->tx_stats;
1298 	rx_stats = &priv->rx_stats;
1299 
1300 	spin_lock_bh(&priv->stats_lock);
1301 
1302 	emac_stats_update(priv);
1303 
1304 	pause_stats->tx_pause_frames = tx_stats->stats.tx_pause_pkts;
1305 	pause_stats->rx_pause_frames = rx_stats->stats.rx_pause_pkts;
1306 
1307 	spin_unlock_bh(&priv->stats_lock);
1308 }
1309 
1310 /* Other statistics that are not derivable from standard statistics */
1311 
1312 #define EMAC_ETHTOOL_STAT(type, name) \
1313 	{ offsetof(type, stats.name) / sizeof(u64), #name }
1314 
1315 static const struct emac_ethtool_stats {
1316 	size_t offset;
1317 	char str[ETH_GSTRING_LEN];
1318 } emac_ethtool_rx_stats[] = {
1319 	EMAC_ETHTOOL_STAT(union emac_hw_rx_stats, rx_drp_fifo_full_pkts),
1320 	EMAC_ETHTOOL_STAT(union emac_hw_rx_stats, rx_truncate_fifo_full_pkts),
1321 };
1322 
emac_get_sset_count(struct net_device * dev,int sset)1323 static int emac_get_sset_count(struct net_device *dev, int sset)
1324 {
1325 	switch (sset) {
1326 	case ETH_SS_STATS:
1327 		return ARRAY_SIZE(emac_ethtool_rx_stats);
1328 	default:
1329 		return -EOPNOTSUPP;
1330 	}
1331 }
1332 
emac_get_strings(struct net_device * dev,u32 stringset,u8 * data)1333 static void emac_get_strings(struct net_device *dev, u32 stringset, u8 *data)
1334 {
1335 	int i;
1336 
1337 	switch (stringset) {
1338 	case ETH_SS_STATS:
1339 		for (i = 0; i < ARRAY_SIZE(emac_ethtool_rx_stats); i++) {
1340 			memcpy(data, emac_ethtool_rx_stats[i].str,
1341 			       ETH_GSTRING_LEN);
1342 			data += ETH_GSTRING_LEN;
1343 		}
1344 		break;
1345 	}
1346 }
1347 
emac_get_ethtool_stats(struct net_device * dev,struct ethtool_stats * stats,u64 * data)1348 static void emac_get_ethtool_stats(struct net_device *dev,
1349 				   struct ethtool_stats *stats, u64 *data)
1350 {
1351 	struct emac_priv *priv = netdev_priv(dev);
1352 	u64 *rx_stats = (u64 *)&priv->rx_stats;
1353 	int i;
1354 
1355 	spin_lock_bh(&priv->stats_lock);
1356 
1357 	emac_stats_update(priv);
1358 
1359 	for (i = 0; i < ARRAY_SIZE(emac_ethtool_rx_stats); i++)
1360 		data[i] = rx_stats[emac_ethtool_rx_stats[i].offset];
1361 
1362 	spin_unlock_bh(&priv->stats_lock);
1363 }
1364 
emac_ethtool_get_regs_len(struct net_device * dev)1365 static int emac_ethtool_get_regs_len(struct net_device *dev)
1366 {
1367 	return (EMAC_DMA_REG_CNT + EMAC_MAC_REG_CNT) * sizeof(u32);
1368 }
1369 
emac_ethtool_get_regs(struct net_device * dev,struct ethtool_regs * regs,void * space)1370 static void emac_ethtool_get_regs(struct net_device *dev,
1371 				  struct ethtool_regs *regs, void *space)
1372 {
1373 	struct emac_priv *priv = netdev_priv(dev);
1374 	u32 *reg_space = space;
1375 	int i;
1376 
1377 	regs->version = 1;
1378 
1379 	for (i = 0; i < EMAC_DMA_REG_CNT; i++)
1380 		reg_space[i] = emac_rd(priv, DMA_CONFIGURATION + i * 4);
1381 
1382 	for (i = 0; i < EMAC_MAC_REG_CNT; i++)
1383 		reg_space[i + EMAC_DMA_REG_CNT] =
1384 			emac_rd(priv, MAC_GLOBAL_CONTROL + i * 4);
1385 }
1386 
emac_get_drvinfo(struct net_device * dev,struct ethtool_drvinfo * info)1387 static void emac_get_drvinfo(struct net_device *dev,
1388 			     struct ethtool_drvinfo *info)
1389 {
1390 	strscpy(info->driver, DRIVER_NAME, sizeof(info->driver));
1391 	info->n_stats = ARRAY_SIZE(emac_ethtool_rx_stats);
1392 }
1393 
emac_tx_timeout_task(struct work_struct * work)1394 static void emac_tx_timeout_task(struct work_struct *work)
1395 {
1396 	struct net_device *ndev;
1397 	struct emac_priv *priv;
1398 
1399 	priv = container_of(work, struct emac_priv, tx_timeout_task);
1400 	ndev = priv->ndev;
1401 
1402 	rtnl_lock();
1403 
1404 	/* No need to reset if already down */
1405 	if (!netif_running(ndev)) {
1406 		rtnl_unlock();
1407 		return;
1408 	}
1409 
1410 	netdev_err(ndev, "MAC reset due to TX timeout\n");
1411 
1412 	netif_trans_update(ndev); /* prevent tx timeout */
1413 	dev_close(ndev);
1414 	dev_open(ndev, NULL);
1415 
1416 	rtnl_unlock();
1417 }
1418 
emac_sw_init(struct emac_priv * priv)1419 static void emac_sw_init(struct emac_priv *priv)
1420 {
1421 	priv->dma_buf_sz = EMAC_DEFAULT_BUFSIZE;
1422 
1423 	priv->tx_ring.total_cnt = DEFAULT_TX_RING_NUM;
1424 	priv->rx_ring.total_cnt = DEFAULT_RX_RING_NUM;
1425 
1426 	spin_lock_init(&priv->stats_lock);
1427 
1428 	INIT_WORK(&priv->tx_timeout_task, emac_tx_timeout_task);
1429 
1430 	priv->tx_coal_frames = EMAC_TX_FRAMES;
1431 	priv->tx_coal_timeout = EMAC_TX_COAL_TIMEOUT;
1432 
1433 	timer_setup(&priv->txtimer, emac_tx_coal_timer, 0);
1434 	timer_setup(&priv->stats_timer, emac_stats_timer, 0);
1435 }
1436 
emac_interrupt_handler(int irq,void * dev_id)1437 static irqreturn_t emac_interrupt_handler(int irq, void *dev_id)
1438 {
1439 	struct net_device *ndev = (struct net_device *)dev_id;
1440 	struct emac_priv *priv = netdev_priv(ndev);
1441 	bool should_schedule = false;
1442 	u32 clr = 0;
1443 	u32 status;
1444 
1445 	status = emac_rd(priv, DMA_STATUS_IRQ);
1446 
1447 	if (status & MREGBIT_TRANSMIT_TRANSFER_DONE_IRQ) {
1448 		clr |= MREGBIT_TRANSMIT_TRANSFER_DONE_IRQ;
1449 		should_schedule = true;
1450 	}
1451 
1452 	if (status & MREGBIT_TRANSMIT_DES_UNAVAILABLE_IRQ)
1453 		clr |= MREGBIT_TRANSMIT_DES_UNAVAILABLE_IRQ;
1454 
1455 	if (status & MREGBIT_TRANSMIT_DMA_STOPPED_IRQ)
1456 		clr |= MREGBIT_TRANSMIT_DMA_STOPPED_IRQ;
1457 
1458 	if (status & MREGBIT_RECEIVE_TRANSFER_DONE_IRQ) {
1459 		clr |= MREGBIT_RECEIVE_TRANSFER_DONE_IRQ;
1460 		should_schedule = true;
1461 	}
1462 
1463 	if (status & MREGBIT_RECEIVE_DES_UNAVAILABLE_IRQ)
1464 		clr |= MREGBIT_RECEIVE_DES_UNAVAILABLE_IRQ;
1465 
1466 	if (status & MREGBIT_RECEIVE_DMA_STOPPED_IRQ)
1467 		clr |= MREGBIT_RECEIVE_DMA_STOPPED_IRQ;
1468 
1469 	if (status & MREGBIT_RECEIVE_MISSED_FRAME_IRQ)
1470 		clr |= MREGBIT_RECEIVE_MISSED_FRAME_IRQ;
1471 
1472 	if (should_schedule) {
1473 		if (napi_schedule_prep(&priv->napi)) {
1474 			emac_disable_interrupt(priv);
1475 			__napi_schedule_irqoff(&priv->napi);
1476 		}
1477 	}
1478 
1479 	emac_wr(priv, DMA_STATUS_IRQ, clr);
1480 
1481 	return IRQ_HANDLED;
1482 }
1483 
emac_configure_tx(struct emac_priv * priv)1484 static void emac_configure_tx(struct emac_priv *priv)
1485 {
1486 	u32 val;
1487 
1488 	/* Set base address */
1489 	val = (u32)priv->tx_ring.desc_dma_addr;
1490 	emac_wr(priv, DMA_TRANSMIT_BASE_ADDRESS, val);
1491 
1492 	/* Set TX inter-frame gap value, enable transmit */
1493 	val = emac_rd(priv, MAC_TRANSMIT_CONTROL);
1494 	val &= ~MREGBIT_IFG_LEN;
1495 	val |= MREGBIT_TRANSMIT_ENABLE;
1496 	val |= MREGBIT_TRANSMIT_AUTO_RETRY;
1497 	emac_wr(priv, MAC_TRANSMIT_CONTROL, val);
1498 
1499 	emac_wr(priv, DMA_TRANSMIT_AUTO_POLL_COUNTER, 0x0);
1500 
1501 	/* Start TX DMA */
1502 	val = emac_rd(priv, DMA_CONTROL);
1503 	val |= MREGBIT_START_STOP_TRANSMIT_DMA;
1504 	emac_wr(priv, DMA_CONTROL, val);
1505 }
1506 
emac_configure_rx(struct emac_priv * priv)1507 static void emac_configure_rx(struct emac_priv *priv)
1508 {
1509 	u32 val;
1510 
1511 	/* Set base address */
1512 	val = (u32)priv->rx_ring.desc_dma_addr;
1513 	emac_wr(priv, DMA_RECEIVE_BASE_ADDRESS, val);
1514 
1515 	/* Enable receive */
1516 	val = emac_rd(priv, MAC_RECEIVE_CONTROL);
1517 	val |= MREGBIT_RECEIVE_ENABLE;
1518 	val |= MREGBIT_STORE_FORWARD;
1519 	emac_wr(priv, MAC_RECEIVE_CONTROL, val);
1520 
1521 	/* Start RX DMA */
1522 	val = emac_rd(priv, DMA_CONTROL);
1523 	val |= MREGBIT_START_STOP_RECEIVE_DMA;
1524 	emac_wr(priv, DMA_CONTROL, val);
1525 }
1526 
emac_adjust_link(struct net_device * dev)1527 static void emac_adjust_link(struct net_device *dev)
1528 {
1529 	struct emac_priv *priv = netdev_priv(dev);
1530 	struct phy_device *phydev = dev->phydev;
1531 	u32 ctrl;
1532 
1533 	if (phydev->link) {
1534 		ctrl = emac_rd(priv, MAC_GLOBAL_CONTROL);
1535 
1536 		/* Update duplex and speed from PHY */
1537 
1538 		FIELD_MODIFY(MREGBIT_FULL_DUPLEX_MODE, &ctrl,
1539 			     phydev->duplex == DUPLEX_FULL);
1540 
1541 		ctrl &= ~MREGBIT_SPEED;
1542 
1543 		switch (phydev->speed) {
1544 		case SPEED_1000:
1545 			ctrl |= MREGBIT_SPEED_1000M;
1546 			break;
1547 		case SPEED_100:
1548 			ctrl |= MREGBIT_SPEED_100M;
1549 			break;
1550 		case SPEED_10:
1551 			ctrl |= MREGBIT_SPEED_10M;
1552 			break;
1553 		default:
1554 			netdev_err(dev, "Unknown speed: %d\n", phydev->speed);
1555 			phydev->speed = SPEED_UNKNOWN;
1556 			break;
1557 		}
1558 
1559 		emac_wr(priv, MAC_GLOBAL_CONTROL, ctrl);
1560 
1561 		/*
1562 		 * Reschedule stats updates now that link is up. See comments in
1563 		 * emac_stats_update().
1564 		 */
1565 		mod_timer(&priv->stats_timer, jiffies);
1566 	}
1567 
1568 	phy_print_status(phydev);
1569 }
1570 
emac_update_delay_line(struct emac_priv * priv)1571 static void emac_update_delay_line(struct emac_priv *priv)
1572 {
1573 	u32 mask = 0, val = 0;
1574 
1575 	mask |= EMAC_RX_DLINE_EN;
1576 	mask |= EMAC_RX_DLINE_STEP_MASK | EMAC_RX_DLINE_CODE_MASK;
1577 	mask |= EMAC_TX_DLINE_EN;
1578 	mask |= EMAC_TX_DLINE_STEP_MASK | EMAC_TX_DLINE_CODE_MASK;
1579 
1580 	if (phy_interface_mode_is_rgmii(priv->phy_interface)) {
1581 		val |= EMAC_RX_DLINE_EN;
1582 		val |= FIELD_PREP(EMAC_RX_DLINE_STEP_MASK,
1583 				  EMAC_DLINE_STEP_15P6);
1584 		val |= FIELD_PREP(EMAC_RX_DLINE_CODE_MASK, priv->rx_delay);
1585 
1586 		val |= EMAC_TX_DLINE_EN;
1587 		val |= FIELD_PREP(EMAC_TX_DLINE_STEP_MASK,
1588 				  EMAC_DLINE_STEP_15P6);
1589 		val |= FIELD_PREP(EMAC_TX_DLINE_CODE_MASK, priv->tx_delay);
1590 	}
1591 
1592 	regmap_update_bits(priv->regmap_apmu,
1593 			   priv->regmap_apmu_offset + APMU_EMAC_DLINE_REG,
1594 			   mask, val);
1595 }
1596 
emac_phy_connect(struct net_device * ndev)1597 static int emac_phy_connect(struct net_device *ndev)
1598 {
1599 	struct emac_priv *priv = netdev_priv(ndev);
1600 	struct device *dev = &priv->pdev->dev;
1601 	struct phy_device *phydev;
1602 	struct device_node *np;
1603 	int ret;
1604 
1605 	ret = of_get_phy_mode(dev->of_node, &priv->phy_interface);
1606 	if (ret) {
1607 		netdev_err(ndev, "No phy-mode found");
1608 		return ret;
1609 	}
1610 
1611 	switch (priv->phy_interface) {
1612 	case PHY_INTERFACE_MODE_RMII:
1613 	case PHY_INTERFACE_MODE_RGMII:
1614 	case PHY_INTERFACE_MODE_RGMII_ID:
1615 	case PHY_INTERFACE_MODE_RGMII_RXID:
1616 	case PHY_INTERFACE_MODE_RGMII_TXID:
1617 		break;
1618 	default:
1619 		netdev_err(ndev, "Unsupported PHY interface %s",
1620 			   phy_modes(priv->phy_interface));
1621 		return -EINVAL;
1622 	}
1623 
1624 	np = of_parse_phandle(dev->of_node, "phy-handle", 0);
1625 	if (!np && of_phy_is_fixed_link(dev->of_node))
1626 		np = of_node_get(dev->of_node);
1627 
1628 	if (!np) {
1629 		netdev_err(ndev, "No PHY specified");
1630 		return -ENODEV;
1631 	}
1632 
1633 	ret = emac_phy_interface_config(priv);
1634 	if (ret)
1635 		goto err_node_put;
1636 
1637 	phydev = of_phy_connect(ndev, np, &emac_adjust_link, 0,
1638 				priv->phy_interface);
1639 	if (!phydev) {
1640 		netdev_err(ndev, "Could not attach to PHY\n");
1641 		ret = -ENODEV;
1642 		goto err_node_put;
1643 	}
1644 
1645 	phydev->mac_managed_pm = true;
1646 
1647 	emac_update_delay_line(priv);
1648 
1649 	phy_attached_info(phydev);
1650 
1651 err_node_put:
1652 	of_node_put(np);
1653 	return ret;
1654 }
1655 
emac_up(struct emac_priv * priv)1656 static int emac_up(struct emac_priv *priv)
1657 {
1658 	struct platform_device *pdev = priv->pdev;
1659 	struct net_device *ndev = priv->ndev;
1660 	int ret;
1661 
1662 	pm_runtime_get_sync(&pdev->dev);
1663 
1664 	ret = emac_phy_connect(ndev);
1665 	if (ret) {
1666 		dev_err(&pdev->dev, "emac_phy_connect failed\n");
1667 		goto err_pm_put;
1668 	}
1669 
1670 	emac_init_hw(priv);
1671 
1672 	emac_set_mac_addr(priv, ndev->dev_addr);
1673 	emac_configure_tx(priv);
1674 	emac_configure_rx(priv);
1675 
1676 	emac_alloc_rx_desc_buffers(priv);
1677 
1678 	phy_start(ndev->phydev);
1679 
1680 	ret = request_irq(priv->irq, emac_interrupt_handler, IRQF_SHARED,
1681 			  ndev->name, ndev);
1682 	if (ret) {
1683 		dev_err(&pdev->dev, "request_irq failed\n");
1684 		goto err_reset_disconnect_phy;
1685 	}
1686 
1687 	/* Don't enable MAC interrupts */
1688 	emac_wr(priv, MAC_INTERRUPT_ENABLE, 0x0);
1689 
1690 	/* Enable DMA interrupts */
1691 	emac_wr(priv, DMA_INTERRUPT_ENABLE,
1692 		MREGBIT_TRANSMIT_TRANSFER_DONE_INTR_ENABLE |
1693 			MREGBIT_TRANSMIT_DMA_STOPPED_INTR_ENABLE |
1694 			MREGBIT_RECEIVE_TRANSFER_DONE_INTR_ENABLE |
1695 			MREGBIT_RECEIVE_DMA_STOPPED_INTR_ENABLE |
1696 			MREGBIT_RECEIVE_MISSED_FRAME_INTR_ENABLE);
1697 
1698 	napi_enable(&priv->napi);
1699 
1700 	netif_start_queue(ndev);
1701 
1702 	mod_timer(&priv->stats_timer, jiffies);
1703 
1704 	return 0;
1705 
1706 err_reset_disconnect_phy:
1707 	emac_reset_hw(priv);
1708 	phy_disconnect(ndev->phydev);
1709 
1710 err_pm_put:
1711 	pm_runtime_put_sync(&pdev->dev);
1712 	return ret;
1713 }
1714 
emac_down(struct emac_priv * priv)1715 static int emac_down(struct emac_priv *priv)
1716 {
1717 	struct platform_device *pdev = priv->pdev;
1718 	struct net_device *ndev = priv->ndev;
1719 
1720 	netif_stop_queue(ndev);
1721 
1722 	phy_disconnect(ndev->phydev);
1723 
1724 	emac_wr(priv, MAC_INTERRUPT_ENABLE, 0x0);
1725 	emac_wr(priv, DMA_INTERRUPT_ENABLE, 0x0);
1726 
1727 	free_irq(priv->irq, ndev);
1728 
1729 	napi_disable(&priv->napi);
1730 
1731 	timer_delete_sync(&priv->txtimer);
1732 	cancel_work_sync(&priv->tx_timeout_task);
1733 
1734 	timer_delete_sync(&priv->stats_timer);
1735 
1736 	emac_reset_hw(priv);
1737 
1738 	/* Update and save current stats, see emac_stats_update() for usage */
1739 
1740 	spin_lock_bh(&priv->stats_lock);
1741 
1742 	emac_stats_update(priv);
1743 
1744 	priv->tx_stats_off = priv->tx_stats;
1745 	priv->rx_stats_off = priv->rx_stats;
1746 
1747 	spin_unlock_bh(&priv->stats_lock);
1748 
1749 	pm_runtime_put_sync(&pdev->dev);
1750 	return 0;
1751 }
1752 
1753 /* Called when net interface is brought up. */
emac_open(struct net_device * ndev)1754 static int emac_open(struct net_device *ndev)
1755 {
1756 	struct emac_priv *priv = netdev_priv(ndev);
1757 	struct device *dev = &priv->pdev->dev;
1758 	int ret;
1759 
1760 	ret = emac_alloc_tx_resources(priv);
1761 	if (ret) {
1762 		dev_err(dev, "Cannot allocate TX resources\n");
1763 		return ret;
1764 	}
1765 
1766 	ret = emac_alloc_rx_resources(priv);
1767 	if (ret) {
1768 		dev_err(dev, "Cannot allocate RX resources\n");
1769 		goto err_free_tx;
1770 	}
1771 
1772 	ret = emac_up(priv);
1773 	if (ret) {
1774 		dev_err(dev, "Error when bringing interface up\n");
1775 		goto err_free_rx;
1776 	}
1777 	return 0;
1778 
1779 err_free_rx:
1780 	emac_free_rx_resources(priv);
1781 err_free_tx:
1782 	emac_free_tx_resources(priv);
1783 
1784 	return ret;
1785 }
1786 
1787 /* Called when interface is brought down. */
emac_stop(struct net_device * ndev)1788 static int emac_stop(struct net_device *ndev)
1789 {
1790 	struct emac_priv *priv = netdev_priv(ndev);
1791 
1792 	emac_down(priv);
1793 	emac_free_tx_resources(priv);
1794 	emac_free_rx_resources(priv);
1795 
1796 	return 0;
1797 }
1798 
1799 static const struct ethtool_ops emac_ethtool_ops = {
1800 	.get_link_ksettings	= phy_ethtool_get_link_ksettings,
1801 	.set_link_ksettings	= phy_ethtool_set_link_ksettings,
1802 	.nway_reset		= phy_ethtool_nway_reset,
1803 	.get_drvinfo		= emac_get_drvinfo,
1804 	.get_link		= ethtool_op_get_link,
1805 
1806 	.get_regs		= emac_ethtool_get_regs,
1807 	.get_regs_len		= emac_ethtool_get_regs_len,
1808 
1809 	.get_rmon_stats		= emac_get_rmon_stats,
1810 	.get_pause_stats	= emac_get_pause_stats,
1811 	.get_eth_mac_stats	= emac_get_eth_mac_stats,
1812 
1813 	.get_sset_count		= emac_get_sset_count,
1814 	.get_strings		= emac_get_strings,
1815 	.get_ethtool_stats	= emac_get_ethtool_stats,
1816 };
1817 
1818 static const struct net_device_ops emac_netdev_ops = {
1819 	.ndo_open               = emac_open,
1820 	.ndo_stop               = emac_stop,
1821 	.ndo_start_xmit         = emac_start_xmit,
1822 	.ndo_validate_addr	= eth_validate_addr,
1823 	.ndo_set_mac_address    = emac_set_mac_address,
1824 	.ndo_eth_ioctl          = phy_do_ioctl_running,
1825 	.ndo_change_mtu         = emac_change_mtu,
1826 	.ndo_tx_timeout         = emac_tx_timeout,
1827 	.ndo_set_rx_mode        = emac_set_rx_mode,
1828 	.ndo_get_stats64	= emac_get_stats64,
1829 };
1830 
1831 /* Currently we always use 15.6 ps/step for the delay line */
1832 
delay_ps_to_unit(u32 ps)1833 static u32 delay_ps_to_unit(u32 ps)
1834 {
1835 	return DIV_ROUND_CLOSEST(ps * 10, 156);
1836 }
1837 
delay_unit_to_ps(u32 unit)1838 static u32 delay_unit_to_ps(u32 unit)
1839 {
1840 	return DIV_ROUND_CLOSEST(unit * 156, 10);
1841 }
1842 
1843 #define EMAC_MAX_DELAY_UNIT	FIELD_MAX(EMAC_TX_DLINE_CODE_MASK)
1844 
1845 /* Minus one just to be safe from rounding errors */
1846 #define EMAC_MAX_DELAY_PS	(delay_unit_to_ps(EMAC_MAX_DELAY_UNIT - 1))
1847 
emac_config_dt(struct platform_device * pdev,struct emac_priv * priv)1848 static int emac_config_dt(struct platform_device *pdev, struct emac_priv *priv)
1849 {
1850 	struct device_node *np = pdev->dev.of_node;
1851 	struct device *dev = &pdev->dev;
1852 	u8 mac_addr[ETH_ALEN] = { 0 };
1853 	int ret;
1854 
1855 	priv->iobase = devm_platform_ioremap_resource(pdev, 0);
1856 	if (IS_ERR(priv->iobase))
1857 		return dev_err_probe(dev, PTR_ERR(priv->iobase),
1858 				     "ioremap failed\n");
1859 
1860 	priv->regmap_apmu =
1861 		syscon_regmap_lookup_by_phandle_args(np, "spacemit,apmu", 1,
1862 						     &priv->regmap_apmu_offset);
1863 
1864 	if (IS_ERR(priv->regmap_apmu))
1865 		return dev_err_probe(dev, PTR_ERR(priv->regmap_apmu),
1866 				     "failed to get syscon\n");
1867 
1868 	priv->irq = platform_get_irq(pdev, 0);
1869 	if (priv->irq < 0)
1870 		return priv->irq;
1871 
1872 	ret = of_get_mac_address(np, mac_addr);
1873 	if (ret) {
1874 		if (ret == -EPROBE_DEFER)
1875 			return dev_err_probe(dev, ret,
1876 					     "Can't get MAC address\n");
1877 
1878 		dev_info(&pdev->dev, "Using random MAC address\n");
1879 		eth_hw_addr_random(priv->ndev);
1880 	} else {
1881 		eth_hw_addr_set(priv->ndev, mac_addr);
1882 	}
1883 
1884 	priv->tx_delay = 0;
1885 	priv->rx_delay = 0;
1886 
1887 	of_property_read_u32(np, "tx-internal-delay-ps", &priv->tx_delay);
1888 	of_property_read_u32(np, "rx-internal-delay-ps", &priv->rx_delay);
1889 
1890 	if (priv->tx_delay > EMAC_MAX_DELAY_PS) {
1891 		dev_err(&pdev->dev,
1892 			"tx-internal-delay-ps too large: max %d, got %d",
1893 			EMAC_MAX_DELAY_PS, priv->tx_delay);
1894 		return -EINVAL;
1895 	}
1896 
1897 	if (priv->rx_delay > EMAC_MAX_DELAY_PS) {
1898 		dev_err(&pdev->dev,
1899 			"rx-internal-delay-ps too large: max %d, got %d",
1900 			EMAC_MAX_DELAY_PS, priv->rx_delay);
1901 		return -EINVAL;
1902 	}
1903 
1904 	priv->tx_delay = delay_ps_to_unit(priv->tx_delay);
1905 	priv->rx_delay = delay_ps_to_unit(priv->rx_delay);
1906 
1907 	return 0;
1908 }
1909 
emac_phy_deregister_fixed_link(void * data)1910 static void emac_phy_deregister_fixed_link(void *data)
1911 {
1912 	struct device_node *of_node = data;
1913 
1914 	of_phy_deregister_fixed_link(of_node);
1915 }
1916 
emac_probe(struct platform_device * pdev)1917 static int emac_probe(struct platform_device *pdev)
1918 {
1919 	struct device *dev = &pdev->dev;
1920 	struct reset_control *reset;
1921 	struct net_device *ndev;
1922 	struct emac_priv *priv;
1923 	int ret;
1924 
1925 	ndev = devm_alloc_etherdev(dev, sizeof(struct emac_priv));
1926 	if (!ndev)
1927 		return -ENOMEM;
1928 
1929 	ndev->hw_features = NETIF_F_SG;
1930 	ndev->features |= ndev->hw_features;
1931 
1932 	ndev->max_mtu = EMAC_RX_BUF_MAX - (ETH_HLEN + 2 * VLAN_HLEN + ETH_FCS_LEN);
1933 	ndev->pcpu_stat_type = NETDEV_PCPU_STAT_DSTATS;
1934 
1935 	priv = netdev_priv(ndev);
1936 	priv->ndev = ndev;
1937 	priv->pdev = pdev;
1938 	platform_set_drvdata(pdev, priv);
1939 
1940 	ret = emac_config_dt(pdev, priv);
1941 	if (ret < 0)
1942 		return dev_err_probe(dev, ret, "Configuration failed\n");
1943 
1944 	ndev->watchdog_timeo = 5 * HZ;
1945 	ndev->base_addr = (unsigned long)priv->iobase;
1946 	ndev->irq = priv->irq;
1947 
1948 	ndev->ethtool_ops = &emac_ethtool_ops;
1949 	ndev->netdev_ops = &emac_netdev_ops;
1950 
1951 	devm_pm_runtime_enable(&pdev->dev);
1952 
1953 	priv->bus_clk = devm_clk_get_enabled(&pdev->dev, NULL);
1954 	if (IS_ERR(priv->bus_clk))
1955 		return dev_err_probe(dev, PTR_ERR(priv->bus_clk),
1956 				     "Failed to get clock\n");
1957 
1958 	reset = devm_reset_control_get_optional_exclusive_deasserted(&pdev->dev,
1959 								     NULL);
1960 	if (IS_ERR(reset))
1961 		return dev_err_probe(dev, PTR_ERR(reset),
1962 				     "Failed to get reset\n");
1963 
1964 	if (of_phy_is_fixed_link(dev->of_node)) {
1965 		ret = of_phy_register_fixed_link(dev->of_node);
1966 		if (ret)
1967 			return dev_err_probe(dev, ret,
1968 					     "Failed to register fixed-link\n");
1969 
1970 		ret = devm_add_action_or_reset(dev,
1971 					       emac_phy_deregister_fixed_link,
1972 					       dev->of_node);
1973 
1974 		if (ret) {
1975 			dev_err(dev, "devm_add_action_or_reset failed\n");
1976 			return ret;
1977 		}
1978 	}
1979 
1980 	emac_sw_init(priv);
1981 
1982 	ret = emac_mdio_init(priv);
1983 	if (ret)
1984 		goto err_timer_delete;
1985 
1986 	SET_NETDEV_DEV(ndev, &pdev->dev);
1987 
1988 	ret = devm_register_netdev(dev, ndev);
1989 	if (ret) {
1990 		dev_err(dev, "devm_register_netdev failed\n");
1991 		goto err_timer_delete;
1992 	}
1993 
1994 	netif_napi_add(ndev, &priv->napi, emac_rx_poll);
1995 	netif_carrier_off(ndev);
1996 
1997 	return 0;
1998 
1999 err_timer_delete:
2000 	timer_delete_sync(&priv->txtimer);
2001 	timer_delete_sync(&priv->stats_timer);
2002 
2003 	return ret;
2004 }
2005 
emac_remove(struct platform_device * pdev)2006 static void emac_remove(struct platform_device *pdev)
2007 {
2008 	struct emac_priv *priv = platform_get_drvdata(pdev);
2009 
2010 	timer_shutdown_sync(&priv->txtimer);
2011 	cancel_work_sync(&priv->tx_timeout_task);
2012 
2013 	timer_shutdown_sync(&priv->stats_timer);
2014 
2015 	emac_reset_hw(priv);
2016 }
2017 
emac_resume(struct device * dev)2018 static int emac_resume(struct device *dev)
2019 {
2020 	struct emac_priv *priv = dev_get_drvdata(dev);
2021 	struct net_device *ndev = priv->ndev;
2022 	int ret;
2023 
2024 	ret = clk_prepare_enable(priv->bus_clk);
2025 	if (ret < 0) {
2026 		dev_err(dev, "Failed to enable bus clock: %d\n", ret);
2027 		return ret;
2028 	}
2029 
2030 	if (!netif_running(ndev))
2031 		return 0;
2032 
2033 	ret = emac_open(ndev);
2034 	if (ret) {
2035 		clk_disable_unprepare(priv->bus_clk);
2036 		return ret;
2037 	}
2038 
2039 	netif_device_attach(ndev);
2040 
2041 	mod_timer(&priv->stats_timer, jiffies);
2042 
2043 	return 0;
2044 }
2045 
emac_suspend(struct device * dev)2046 static int emac_suspend(struct device *dev)
2047 {
2048 	struct emac_priv *priv = dev_get_drvdata(dev);
2049 	struct net_device *ndev = priv->ndev;
2050 
2051 	if (!ndev || !netif_running(ndev)) {
2052 		clk_disable_unprepare(priv->bus_clk);
2053 		return 0;
2054 	}
2055 
2056 	emac_stop(ndev);
2057 
2058 	clk_disable_unprepare(priv->bus_clk);
2059 	netif_device_detach(ndev);
2060 	return 0;
2061 }
2062 
2063 static const struct dev_pm_ops emac_pm_ops = {
2064 	SYSTEM_SLEEP_PM_OPS(emac_suspend, emac_resume)
2065 };
2066 
2067 static const struct of_device_id emac_of_match[] = {
2068 	{ .compatible = "spacemit,k1-emac" },
2069 	{ /* sentinel */ },
2070 };
2071 MODULE_DEVICE_TABLE(of, emac_of_match);
2072 
2073 static struct platform_driver emac_driver = {
2074 	.probe = emac_probe,
2075 	.remove = emac_remove,
2076 	.driver = {
2077 		.name = DRIVER_NAME,
2078 		.of_match_table = of_match_ptr(emac_of_match),
2079 		.pm = &emac_pm_ops,
2080 	},
2081 };
2082 module_platform_driver(emac_driver);
2083 
2084 MODULE_DESCRIPTION("SpacemiT K1 Ethernet driver");
2085 MODULE_AUTHOR("Vivian Wang <wangruikang@iscas.ac.cn>");
2086 MODULE_LICENSE("GPL");
2087