xref: /linux/drivers/net/ethernet/spacemit/k1_emac.c (revision 2c7e63d702f6c4209c5af833308e7fcbc7d4ab17)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * SpacemiT K1 Ethernet driver
4  *
5  * Copyright (C) 2023-2025 SpacemiT (Hangzhou) Technology Co. Ltd
6  * Copyright (C) 2025 Vivian Wang <wangruikang@iscas.ac.cn>
7  */
8 
9 #include <linux/bitfield.h>
10 #include <linux/clk.h>
11 #include <linux/delay.h>
12 #include <linux/dma-mapping.h>
13 #include <linux/etherdevice.h>
14 #include <linux/ethtool.h>
15 #include <linux/if_vlan.h>
16 #include <linux/interrupt.h>
17 #include <linux/io.h>
18 #include <linux/iopoll.h>
19 #include <linux/kernel.h>
20 #include <linux/mfd/syscon.h>
21 #include <linux/module.h>
22 #include <linux/of.h>
23 #include <linux/of_irq.h>
24 #include <linux/of_mdio.h>
25 #include <linux/of_net.h>
26 #include <linux/phy.h>
27 #include <linux/platform_device.h>
28 #include <linux/pm_runtime.h>
29 #include <linux/pm.h>
30 #include <linux/regmap.h>
31 #include <linux/reset.h>
32 #include <linux/rtnetlink.h>
33 #include <linux/timer.h>
34 #include <linux/types.h>
35 
36 #include "k1_emac.h"
37 
38 #define DRIVER_NAME "k1_emac"
39 
40 #define EMAC_DEFAULT_BUFSIZE		1536
41 #define EMAC_RX_BUF_2K			2048
42 #define EMAC_RX_BUF_MAX			FIELD_MAX(RX_DESC_1_BUFFER_SIZE_1_MASK)
43 
44 /* Tuning parameters from SpacemiT */
45 #define EMAC_TX_FRAMES			64
46 #define EMAC_TX_COAL_TIMEOUT		40000
47 #define EMAC_RX_FRAMES			64
48 #define EMAC_RX_COAL_TIMEOUT		(600 * 312)
49 
50 #define DEFAULT_TX_ALMOST_FULL		0x1f8
51 #define DEFAULT_TX_THRESHOLD		1518
52 #define DEFAULT_RX_THRESHOLD		12
53 #define DEFAULT_TX_RING_NUM		1024
54 #define DEFAULT_RX_RING_NUM		1024
55 #define DEFAULT_DMA_BURST		MREGBIT_BURST_16WORD
56 #define HASH_TABLE_SIZE			64
57 
58 struct desc_buf {
59 	u64 dma_addr;
60 	void *buff_addr;
61 	u16 dma_len;
62 	u8 map_as_page;
63 };
64 
65 struct emac_tx_desc_buffer {
66 	struct sk_buff *skb;
67 	struct desc_buf buf[2];
68 };
69 
70 struct emac_rx_desc_buffer {
71 	struct sk_buff *skb;
72 	u64 dma_addr;
73 	void *buff_addr;
74 	u16 dma_len;
75 	u8 map_as_page;
76 };
77 
78 /**
79  * struct emac_desc_ring - Software-side information for one descriptor ring
80  * Same structure used for both RX and TX
81  * @desc_addr: Virtual address to the descriptor ring memory
82  * @desc_dma_addr: DMA address of the descriptor ring
83  * @total_size: Size of ring in bytes
84  * @total_cnt: Number of descriptors
85  * @head: Next descriptor to associate a buffer with
86  * @tail: Next descriptor to check status bit
87  * @rx_desc_buf: Array of descriptors for RX
88  * @tx_desc_buf: Array of descriptors for TX, with max of two buffers each
89  */
90 struct emac_desc_ring {
91 	void *desc_addr;
92 	dma_addr_t desc_dma_addr;
93 	u32 total_size;
94 	u32 total_cnt;
95 	u32 head;
96 	u32 tail;
97 	union {
98 		struct emac_rx_desc_buffer *rx_desc_buf;
99 		struct emac_tx_desc_buffer *tx_desc_buf;
100 	};
101 };
102 
103 struct emac_priv {
104 	void __iomem *iobase;
105 	u32 dma_buf_sz;
106 	struct emac_desc_ring tx_ring;
107 	struct emac_desc_ring rx_ring;
108 
109 	struct net_device *ndev;
110 	struct napi_struct napi;
111 	struct platform_device *pdev;
112 	struct clk *bus_clk;
113 	struct clk *ref_clk;
114 	struct regmap *regmap_apmu;
115 	u32 regmap_apmu_offset;
116 	int irq;
117 
118 	phy_interface_t phy_interface;
119 
120 	union emac_hw_tx_stats tx_stats, tx_stats_off;
121 	union emac_hw_rx_stats rx_stats, rx_stats_off;
122 
123 	u32 tx_count_frames;
124 	u32 tx_coal_frames;
125 	u32 tx_coal_timeout;
126 	struct work_struct tx_timeout_task;
127 
128 	struct timer_list txtimer;
129 	struct timer_list stats_timer;
130 
131 	u32 tx_delay;
132 	u32 rx_delay;
133 
134 	/* Softirq-safe, hold while touching hardware statistics */
135 	spinlock_t stats_lock;
136 };
137 
emac_wr(struct emac_priv * priv,u32 reg,u32 val)138 static void emac_wr(struct emac_priv *priv, u32 reg, u32 val)
139 {
140 	writel(val, priv->iobase + reg);
141 }
142 
emac_rd(struct emac_priv * priv,u32 reg)143 static u32 emac_rd(struct emac_priv *priv, u32 reg)
144 {
145 	return readl(priv->iobase + reg);
146 }
147 
emac_phy_interface_config(struct emac_priv * priv)148 static int emac_phy_interface_config(struct emac_priv *priv)
149 {
150 	u32 val = 0, mask = REF_CLK_SEL | RGMII_TX_CLK_SEL | PHY_INTF_RGMII;
151 
152 	if (phy_interface_mode_is_rgmii(priv->phy_interface))
153 		val |= PHY_INTF_RGMII;
154 
155 	regmap_update_bits(priv->regmap_apmu,
156 			   priv->regmap_apmu_offset + APMU_EMAC_CTRL_REG,
157 			   mask, val);
158 
159 	return 0;
160 }
161 
162 /*
163  * Where the hardware expects a MAC address, it is laid out in this high, med,
164  * low order in three consecutive registers and in this format.
165  */
166 
emac_set_mac_addr_reg(struct emac_priv * priv,const unsigned char * addr,u32 reg)167 static void emac_set_mac_addr_reg(struct emac_priv *priv,
168 				  const unsigned char *addr,
169 				  u32 reg)
170 {
171 	emac_wr(priv, reg + sizeof(u32) * 0, addr[1] << 8 | addr[0]);
172 	emac_wr(priv, reg + sizeof(u32) * 1, addr[3] << 8 | addr[2]);
173 	emac_wr(priv, reg + sizeof(u32) * 2, addr[5] << 8 | addr[4]);
174 }
175 
emac_set_mac_addr(struct emac_priv * priv,const unsigned char * addr)176 static void emac_set_mac_addr(struct emac_priv *priv, const unsigned char *addr)
177 {
178 	emac_set_mac_addr_reg(priv, addr, MAC_ADDRESS1_HIGH);
179 }
180 
emac_reset_hw(struct emac_priv * priv)181 static void emac_reset_hw(struct emac_priv *priv)
182 {
183 	/* Disable all interrupts */
184 	emac_wr(priv, MAC_INTERRUPT_ENABLE, 0x0);
185 	emac_wr(priv, DMA_INTERRUPT_ENABLE, 0x0);
186 
187 	/* Disable transmit and receive units */
188 	emac_wr(priv, MAC_RECEIVE_CONTROL, 0x0);
189 	emac_wr(priv, MAC_TRANSMIT_CONTROL, 0x0);
190 
191 	/* Disable DMA */
192 	emac_wr(priv, DMA_CONTROL, 0x0);
193 }
194 
emac_init_hw(struct emac_priv * priv)195 static void emac_init_hw(struct emac_priv *priv)
196 {
197 	u32 rxirq = 0, dma = 0, frame_sz;
198 
199 	regmap_set_bits(priv->regmap_apmu,
200 			priv->regmap_apmu_offset + APMU_EMAC_CTRL_REG,
201 			AXI_SINGLE_ID);
202 
203 	/* Disable transmit and receive units */
204 	emac_wr(priv, MAC_RECEIVE_CONTROL, 0x0);
205 	emac_wr(priv, MAC_TRANSMIT_CONTROL, 0x0);
206 
207 	/* Enable MAC address 1 filtering */
208 	emac_wr(priv, MAC_ADDRESS_CONTROL, MREGBIT_MAC_ADDRESS1_ENABLE);
209 
210 	/* Zero initialize the multicast hash table */
211 	emac_wr(priv, MAC_MULTICAST_HASH_TABLE1, 0x0);
212 	emac_wr(priv, MAC_MULTICAST_HASH_TABLE2, 0x0);
213 	emac_wr(priv, MAC_MULTICAST_HASH_TABLE3, 0x0);
214 	emac_wr(priv, MAC_MULTICAST_HASH_TABLE4, 0x0);
215 
216 	/* Configure thresholds */
217 	emac_wr(priv, MAC_TRANSMIT_FIFO_ALMOST_FULL, DEFAULT_TX_ALMOST_FULL);
218 	emac_wr(priv, MAC_TRANSMIT_PACKET_START_THRESHOLD,
219 		DEFAULT_TX_THRESHOLD);
220 	emac_wr(priv, MAC_RECEIVE_PACKET_START_THRESHOLD, DEFAULT_RX_THRESHOLD);
221 
222 	/* Set maximum frame size and jabber size based on configured MTU,
223 	 * accounting for Ethernet header, double VLAN tags, and FCS.
224 	 */
225 	frame_sz = priv->ndev->mtu + ETH_HLEN + 2 * VLAN_HLEN + ETH_FCS_LEN;
226 
227 	emac_wr(priv, MAC_MAXIMUM_FRAME_SIZE, frame_sz);
228 	emac_wr(priv, MAC_TRANSMIT_JABBER_SIZE, frame_sz);
229 	emac_wr(priv, MAC_RECEIVE_JABBER_SIZE, frame_sz);
230 
231 	/* RX IRQ mitigation */
232 	rxirq = FIELD_PREP(MREGBIT_RECEIVE_IRQ_FRAME_COUNTER_MASK,
233 			   EMAC_RX_FRAMES);
234 	rxirq |= FIELD_PREP(MREGBIT_RECEIVE_IRQ_TIMEOUT_COUNTER_MASK,
235 			    EMAC_RX_COAL_TIMEOUT);
236 	rxirq |= MREGBIT_RECEIVE_IRQ_MITIGATION_ENABLE;
237 	emac_wr(priv, DMA_RECEIVE_IRQ_MITIGATION_CTRL, rxirq);
238 
239 	/* Disable and set DMA config */
240 	emac_wr(priv, DMA_CONTROL, 0x0);
241 
242 	emac_wr(priv, DMA_CONFIGURATION, MREGBIT_SOFTWARE_RESET);
243 	usleep_range(9000, 10000);
244 	emac_wr(priv, DMA_CONFIGURATION, 0x0);
245 	usleep_range(9000, 10000);
246 
247 	dma |= MREGBIT_STRICT_BURST;
248 	dma |= MREGBIT_DMA_64BIT_MODE;
249 	dma |= DEFAULT_DMA_BURST;
250 
251 	emac_wr(priv, DMA_CONFIGURATION, dma);
252 }
253 
emac_dma_start_transmit(struct emac_priv * priv)254 static void emac_dma_start_transmit(struct emac_priv *priv)
255 {
256 	/* The actual value written does not matter */
257 	emac_wr(priv, DMA_TRANSMIT_POLL_DEMAND, 1);
258 }
259 
emac_enable_interrupt(struct emac_priv * priv)260 static void emac_enable_interrupt(struct emac_priv *priv)
261 {
262 	u32 val;
263 
264 	val = emac_rd(priv, DMA_INTERRUPT_ENABLE);
265 	val |= MREGBIT_TRANSMIT_TRANSFER_DONE_INTR_ENABLE;
266 	val |= MREGBIT_RECEIVE_TRANSFER_DONE_INTR_ENABLE;
267 	emac_wr(priv, DMA_INTERRUPT_ENABLE, val);
268 }
269 
emac_disable_interrupt(struct emac_priv * priv)270 static void emac_disable_interrupt(struct emac_priv *priv)
271 {
272 	u32 val;
273 
274 	val = emac_rd(priv, DMA_INTERRUPT_ENABLE);
275 	val &= ~MREGBIT_TRANSMIT_TRANSFER_DONE_INTR_ENABLE;
276 	val &= ~MREGBIT_RECEIVE_TRANSFER_DONE_INTR_ENABLE;
277 	emac_wr(priv, DMA_INTERRUPT_ENABLE, val);
278 }
279 
emac_tx_avail(struct emac_priv * priv)280 static u32 emac_tx_avail(struct emac_priv *priv)
281 {
282 	struct emac_desc_ring *tx_ring = &priv->tx_ring;
283 	u32 avail;
284 
285 	if (tx_ring->tail > tx_ring->head)
286 		avail = tx_ring->tail - tx_ring->head - 1;
287 	else
288 		avail = tx_ring->total_cnt - tx_ring->head + tx_ring->tail - 1;
289 
290 	return avail;
291 }
292 
emac_tx_coal_timer_resched(struct emac_priv * priv)293 static void emac_tx_coal_timer_resched(struct emac_priv *priv)
294 {
295 	mod_timer(&priv->txtimer,
296 		  jiffies + usecs_to_jiffies(priv->tx_coal_timeout));
297 }
298 
emac_tx_coal_timer(struct timer_list * t)299 static void emac_tx_coal_timer(struct timer_list *t)
300 {
301 	struct emac_priv *priv = timer_container_of(priv, t, txtimer);
302 
303 	napi_schedule(&priv->napi);
304 }
305 
emac_tx_should_interrupt(struct emac_priv * priv,u32 pkt_num)306 static bool emac_tx_should_interrupt(struct emac_priv *priv, u32 pkt_num)
307 {
308 	priv->tx_count_frames += pkt_num;
309 	if (likely(priv->tx_coal_frames > priv->tx_count_frames)) {
310 		emac_tx_coal_timer_resched(priv);
311 		return false;
312 	}
313 
314 	priv->tx_count_frames = 0;
315 	return true;
316 }
317 
emac_free_tx_buf(struct emac_priv * priv,int i)318 static void emac_free_tx_buf(struct emac_priv *priv, int i)
319 {
320 	struct emac_tx_desc_buffer *tx_buf;
321 	struct emac_desc_ring *tx_ring;
322 	struct desc_buf *buf;
323 	int j;
324 
325 	tx_ring = &priv->tx_ring;
326 	tx_buf = &tx_ring->tx_desc_buf[i];
327 
328 	for (j = 0; j < 2; j++) {
329 		buf = &tx_buf->buf[j];
330 		if (!buf->dma_addr)
331 			continue;
332 
333 		if (buf->map_as_page)
334 			dma_unmap_page(&priv->pdev->dev, buf->dma_addr,
335 				       buf->dma_len, DMA_TO_DEVICE);
336 		else
337 			dma_unmap_single(&priv->pdev->dev,
338 					 buf->dma_addr, buf->dma_len,
339 					 DMA_TO_DEVICE);
340 
341 		buf->dma_addr = 0;
342 		buf->map_as_page = false;
343 		buf->buff_addr = NULL;
344 	}
345 
346 	if (tx_buf->skb) {
347 		dev_kfree_skb_any(tx_buf->skb);
348 		tx_buf->skb = NULL;
349 	}
350 }
351 
emac_clean_tx_desc_ring(struct emac_priv * priv)352 static void emac_clean_tx_desc_ring(struct emac_priv *priv)
353 {
354 	struct emac_desc_ring *tx_ring = &priv->tx_ring;
355 	u32 i;
356 
357 	for (i = 0; i < tx_ring->total_cnt; i++)
358 		emac_free_tx_buf(priv, i);
359 
360 	tx_ring->head = 0;
361 	tx_ring->tail = 0;
362 }
363 
emac_clean_rx_desc_ring(struct emac_priv * priv)364 static void emac_clean_rx_desc_ring(struct emac_priv *priv)
365 {
366 	struct emac_rx_desc_buffer *rx_buf;
367 	struct emac_desc_ring *rx_ring;
368 	u32 i;
369 
370 	rx_ring = &priv->rx_ring;
371 
372 	for (i = 0; i < rx_ring->total_cnt; i++) {
373 		rx_buf = &rx_ring->rx_desc_buf[i];
374 
375 		if (!rx_buf->skb)
376 			continue;
377 
378 		dma_unmap_single(&priv->pdev->dev, rx_buf->dma_addr,
379 				 rx_buf->dma_len, DMA_FROM_DEVICE);
380 
381 		dev_kfree_skb(rx_buf->skb);
382 		rx_buf->skb = NULL;
383 	}
384 
385 	rx_ring->tail = 0;
386 	rx_ring->head = 0;
387 }
388 
emac_alloc_tx_resources(struct emac_priv * priv)389 static int emac_alloc_tx_resources(struct emac_priv *priv)
390 {
391 	struct emac_desc_ring *tx_ring = &priv->tx_ring;
392 	struct platform_device *pdev = priv->pdev;
393 
394 	tx_ring->tx_desc_buf = kzalloc_objs(*tx_ring->tx_desc_buf,
395 					    tx_ring->total_cnt);
396 
397 	if (!tx_ring->tx_desc_buf)
398 		return -ENOMEM;
399 
400 	tx_ring->total_size = tx_ring->total_cnt * sizeof(struct emac_desc);
401 	tx_ring->total_size = ALIGN(tx_ring->total_size, PAGE_SIZE);
402 
403 	tx_ring->desc_addr = dma_alloc_coherent(&pdev->dev, tx_ring->total_size,
404 						&tx_ring->desc_dma_addr,
405 						GFP_KERNEL);
406 	if (!tx_ring->desc_addr) {
407 		kfree(tx_ring->tx_desc_buf);
408 		return -ENOMEM;
409 	}
410 
411 	tx_ring->head = 0;
412 	tx_ring->tail = 0;
413 
414 	return 0;
415 }
416 
emac_alloc_rx_resources(struct emac_priv * priv)417 static int emac_alloc_rx_resources(struct emac_priv *priv)
418 {
419 	struct emac_desc_ring *rx_ring = &priv->rx_ring;
420 	struct platform_device *pdev = priv->pdev;
421 
422 	rx_ring->rx_desc_buf = kzalloc_objs(*rx_ring->rx_desc_buf,
423 					    rx_ring->total_cnt);
424 	if (!rx_ring->rx_desc_buf)
425 		return -ENOMEM;
426 
427 	rx_ring->total_size = rx_ring->total_cnt * sizeof(struct emac_desc);
428 
429 	rx_ring->total_size = ALIGN(rx_ring->total_size, PAGE_SIZE);
430 
431 	rx_ring->desc_addr = dma_alloc_coherent(&pdev->dev, rx_ring->total_size,
432 						&rx_ring->desc_dma_addr,
433 						GFP_KERNEL);
434 	if (!rx_ring->desc_addr) {
435 		kfree(rx_ring->rx_desc_buf);
436 		return -ENOMEM;
437 	}
438 
439 	rx_ring->head = 0;
440 	rx_ring->tail = 0;
441 
442 	return 0;
443 }
444 
emac_free_tx_resources(struct emac_priv * priv)445 static void emac_free_tx_resources(struct emac_priv *priv)
446 {
447 	struct emac_desc_ring *tr = &priv->tx_ring;
448 	struct device *dev = &priv->pdev->dev;
449 
450 	emac_clean_tx_desc_ring(priv);
451 
452 	kfree(tr->tx_desc_buf);
453 	tr->tx_desc_buf = NULL;
454 
455 	dma_free_coherent(dev, tr->total_size, tr->desc_addr,
456 			  tr->desc_dma_addr);
457 	tr->desc_addr = NULL;
458 }
459 
emac_free_rx_resources(struct emac_priv * priv)460 static void emac_free_rx_resources(struct emac_priv *priv)
461 {
462 	struct emac_desc_ring *rr = &priv->rx_ring;
463 	struct device *dev = &priv->pdev->dev;
464 
465 	emac_clean_rx_desc_ring(priv);
466 
467 	kfree(rr->rx_desc_buf);
468 	rr->rx_desc_buf = NULL;
469 
470 	dma_free_coherent(dev, rr->total_size, rr->desc_addr,
471 			  rr->desc_dma_addr);
472 	rr->desc_addr = NULL;
473 }
474 
emac_tx_clean_desc(struct emac_priv * priv)475 static int emac_tx_clean_desc(struct emac_priv *priv)
476 {
477 	struct net_device *ndev = priv->ndev;
478 	struct emac_desc_ring *tx_ring;
479 	struct emac_desc *tx_desc;
480 	u32 i;
481 
482 	netif_tx_lock(ndev);
483 
484 	tx_ring = &priv->tx_ring;
485 
486 	i = tx_ring->tail;
487 
488 	while (i != tx_ring->head) {
489 		tx_desc = &((struct emac_desc *)tx_ring->desc_addr)[i];
490 
491 		/* Stop checking if desc still own by DMA */
492 		if (READ_ONCE(tx_desc->desc0) & TX_DESC_0_OWN)
493 			break;
494 
495 		emac_free_tx_buf(priv, i);
496 		memset(tx_desc, 0, sizeof(struct emac_desc));
497 
498 		if (++i == tx_ring->total_cnt)
499 			i = 0;
500 	}
501 
502 	tx_ring->tail = i;
503 
504 	if (unlikely(netif_queue_stopped(ndev) &&
505 		     emac_tx_avail(priv) > tx_ring->total_cnt / 4))
506 		netif_wake_queue(ndev);
507 
508 	netif_tx_unlock(ndev);
509 
510 	return 0;
511 }
512 
emac_rx_frame_good(struct emac_priv * priv,struct emac_desc * desc)513 static bool emac_rx_frame_good(struct emac_priv *priv, struct emac_desc *desc)
514 {
515 	const char *msg;
516 	u32 len;
517 
518 	len = FIELD_GET(RX_DESC_0_FRAME_PACKET_LENGTH_MASK, desc->desc0);
519 
520 	if (WARN_ON_ONCE(!(desc->desc0 & RX_DESC_0_LAST_DESCRIPTOR)))
521 		msg = "Not last descriptor"; /* This would be a bug */
522 	else if (desc->desc0 & RX_DESC_0_FRAME_RUNT)
523 		msg = "Runt frame";
524 	else if (desc->desc0 & RX_DESC_0_FRAME_CRC_ERR)
525 		msg = "Frame CRC error";
526 	else if (desc->desc0 & RX_DESC_0_FRAME_MAX_LEN_ERR)
527 		msg = "Frame exceeds max length";
528 	else if (desc->desc0 & RX_DESC_0_FRAME_JABBER_ERR)
529 		msg = "Frame jabber error";
530 	else if (desc->desc0 & RX_DESC_0_FRAME_LENGTH_ERR)
531 		msg = "Frame length error";
532 	else if (len <= ETH_FCS_LEN || len > priv->dma_buf_sz)
533 		msg = "Frame length unacceptable";
534 	else
535 		return true; /* All good */
536 
537 	dev_dbg_ratelimited(&priv->ndev->dev, "RX error: %s", msg);
538 
539 	return false;
540 }
541 
emac_alloc_rx_desc_buffers(struct emac_priv * priv)542 static void emac_alloc_rx_desc_buffers(struct emac_priv *priv)
543 {
544 	struct emac_desc_ring *rx_ring = &priv->rx_ring;
545 	struct emac_desc rx_desc, *rx_desc_addr;
546 	struct net_device *ndev = priv->ndev;
547 	struct emac_rx_desc_buffer *rx_buf;
548 	struct sk_buff *skb;
549 	u32 i;
550 
551 	i = rx_ring->head;
552 	rx_buf = &rx_ring->rx_desc_buf[i];
553 
554 	while (!rx_buf->skb) {
555 		skb = netdev_alloc_skb_ip_align(ndev, priv->dma_buf_sz);
556 		if (!skb)
557 			break;
558 
559 		skb->dev = ndev;
560 
561 		rx_buf->skb = skb;
562 		rx_buf->dma_len = priv->dma_buf_sz;
563 		rx_buf->dma_addr = dma_map_single(&priv->pdev->dev, skb->data,
564 						  priv->dma_buf_sz,
565 						  DMA_FROM_DEVICE);
566 		if (dma_mapping_error(&priv->pdev->dev, rx_buf->dma_addr)) {
567 			dev_err_ratelimited(&ndev->dev, "Mapping skb failed\n");
568 			dev_kfree_skb_any(skb);
569 			rx_buf->skb = NULL;
570 			break;
571 		}
572 
573 		rx_desc_addr = &((struct emac_desc *)rx_ring->desc_addr)[i];
574 
575 		memset(&rx_desc, 0, sizeof(rx_desc));
576 
577 		rx_desc.buffer_addr_1 = rx_buf->dma_addr;
578 		rx_desc.desc1 = FIELD_PREP(RX_DESC_1_BUFFER_SIZE_1_MASK,
579 					   rx_buf->dma_len);
580 
581 		if (++i == rx_ring->total_cnt) {
582 			rx_desc.desc1 |= RX_DESC_1_END_RING;
583 			i = 0;
584 		}
585 
586 		*rx_desc_addr = rx_desc;
587 		dma_wmb();
588 		WRITE_ONCE(rx_desc_addr->desc0, rx_desc.desc0 | RX_DESC_0_OWN);
589 
590 		rx_buf = &rx_ring->rx_desc_buf[i];
591 	}
592 
593 	rx_ring->head = i;
594 	return;
595 }
596 
597 /* Returns number of packets received */
emac_rx_clean_desc(struct emac_priv * priv,int budget)598 static int emac_rx_clean_desc(struct emac_priv *priv, int budget)
599 {
600 	struct net_device *ndev = priv->ndev;
601 	struct emac_rx_desc_buffer *rx_buf;
602 	struct emac_desc_ring *rx_ring;
603 	struct sk_buff *skb = NULL;
604 	struct emac_desc *rx_desc;
605 	u32 got = 0, skb_len, i;
606 
607 	rx_ring = &priv->rx_ring;
608 
609 	i = rx_ring->tail;
610 
611 	while (budget--) {
612 		rx_desc = &((struct emac_desc *)rx_ring->desc_addr)[i];
613 
614 		/* Stop checking if rx_desc still owned by DMA */
615 		if (READ_ONCE(rx_desc->desc0) & RX_DESC_0_OWN)
616 			break;
617 
618 		dma_rmb();
619 
620 		rx_buf = &rx_ring->rx_desc_buf[i];
621 
622 		if (!rx_buf->skb)
623 			break;
624 
625 		got++;
626 
627 		dma_unmap_single(&priv->pdev->dev, rx_buf->dma_addr,
628 				 rx_buf->dma_len, DMA_FROM_DEVICE);
629 
630 		if (likely(emac_rx_frame_good(priv, rx_desc))) {
631 			skb = rx_buf->skb;
632 
633 			skb_len = FIELD_GET(RX_DESC_0_FRAME_PACKET_LENGTH_MASK,
634 					    rx_desc->desc0);
635 			skb_len -= ETH_FCS_LEN;
636 
637 			skb_put(skb, skb_len);
638 			skb->dev = ndev;
639 			ndev->hard_header_len = ETH_HLEN;
640 
641 			skb->protocol = eth_type_trans(skb, ndev);
642 
643 			skb->ip_summed = CHECKSUM_NONE;
644 
645 			napi_gro_receive(&priv->napi, skb);
646 
647 			memset(rx_desc, 0, sizeof(struct emac_desc));
648 			rx_buf->skb = NULL;
649 		} else {
650 			dev_kfree_skb_irq(rx_buf->skb);
651 			rx_buf->skb = NULL;
652 		}
653 
654 		if (++i == rx_ring->total_cnt)
655 			i = 0;
656 	}
657 
658 	rx_ring->tail = i;
659 
660 	emac_alloc_rx_desc_buffers(priv);
661 
662 	return got;
663 }
664 
emac_rx_poll(struct napi_struct * napi,int budget)665 static int emac_rx_poll(struct napi_struct *napi, int budget)
666 {
667 	struct emac_priv *priv = container_of(napi, struct emac_priv, napi);
668 	int work_done;
669 
670 	emac_tx_clean_desc(priv);
671 
672 	work_done = emac_rx_clean_desc(priv, budget);
673 	if (work_done < budget && napi_complete_done(napi, work_done))
674 		emac_enable_interrupt(priv);
675 
676 	return work_done;
677 }
678 
679 /*
680  * For convenience, skb->data is fragment 0, frags[0] is fragment 1, etc.
681  *
682  * Each descriptor can hold up to two fragments, called buffer 1 and 2. For each
683  * fragment f, if f % 2 == 0, it uses buffer 1, otherwise it uses buffer 2.
684  */
685 
emac_tx_map_frag(struct device * dev,struct emac_desc * tx_desc,struct emac_tx_desc_buffer * tx_buf,struct sk_buff * skb,u32 frag_idx)686 static int emac_tx_map_frag(struct device *dev, struct emac_desc *tx_desc,
687 			    struct emac_tx_desc_buffer *tx_buf,
688 			    struct sk_buff *skb, u32 frag_idx)
689 {
690 	bool map_as_page, buf_idx;
691 	const skb_frag_t *frag;
692 	phys_addr_t addr;
693 	u32 len;
694 	int ret;
695 
696 	buf_idx = frag_idx % 2;
697 
698 	if (frag_idx == 0) {
699 		/* Non-fragmented part */
700 		len = skb_headlen(skb);
701 		addr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
702 		map_as_page = false;
703 	} else {
704 		/* Fragment */
705 		frag = &skb_shinfo(skb)->frags[frag_idx - 1];
706 		len = skb_frag_size(frag);
707 		addr = skb_frag_dma_map(dev, frag, 0, len, DMA_TO_DEVICE);
708 		map_as_page = true;
709 	}
710 
711 	ret = dma_mapping_error(dev, addr);
712 	if (ret)
713 		return ret;
714 
715 	tx_buf->buf[buf_idx].dma_addr = addr;
716 	tx_buf->buf[buf_idx].dma_len = len;
717 	tx_buf->buf[buf_idx].map_as_page = map_as_page;
718 
719 	if (buf_idx == 0) {
720 		tx_desc->buffer_addr_1 = addr;
721 		tx_desc->desc1 |= FIELD_PREP(TX_DESC_1_BUFFER_SIZE_1_MASK, len);
722 	} else {
723 		tx_desc->buffer_addr_2 = addr;
724 		tx_desc->desc1 |= FIELD_PREP(TX_DESC_1_BUFFER_SIZE_2_MASK, len);
725 	}
726 
727 	return 0;
728 }
729 
emac_tx_mem_map(struct emac_priv * priv,struct sk_buff * skb)730 static void emac_tx_mem_map(struct emac_priv *priv, struct sk_buff *skb)
731 {
732 	struct emac_desc_ring *tx_ring = &priv->tx_ring;
733 	struct emac_desc tx_desc, *tx_desc_addr;
734 	struct device *dev = &priv->pdev->dev;
735 	struct emac_tx_desc_buffer *tx_buf;
736 	u32 head, old_head, frag_num, f, i;
737 	bool buf_idx;
738 
739 	frag_num = skb_shinfo(skb)->nr_frags;
740 	head = tx_ring->head;
741 	old_head = head;
742 
743 	for (f = 0; f < frag_num + 1; f++) {
744 		buf_idx = f % 2;
745 
746 		/*
747 		 * If using buffer 1, initialize a new desc. Otherwise, use
748 		 * buffer 2 of previous fragment's desc.
749 		 */
750 		if (!buf_idx) {
751 			tx_buf = &tx_ring->tx_desc_buf[head];
752 			tx_desc_addr =
753 				&((struct emac_desc *)tx_ring->desc_addr)[head];
754 			memset(&tx_desc, 0, sizeof(tx_desc));
755 
756 			/*
757 			 * Give ownership for all but first desc initially. For
758 			 * first desc, give at the end so DMA cannot start
759 			 * reading uninitialized descs.
760 			 */
761 			if (head != old_head)
762 				tx_desc.desc0 |= TX_DESC_0_OWN;
763 
764 			if (++head == tx_ring->total_cnt) {
765 				/* Just used last desc in ring */
766 				tx_desc.desc1 |= TX_DESC_1_END_RING;
767 				head = 0;
768 			}
769 		}
770 
771 		if (emac_tx_map_frag(dev, &tx_desc, tx_buf, skb, f)) {
772 			dev_err_ratelimited(&priv->ndev->dev,
773 					    "Map TX frag %d failed\n", f);
774 			goto err_free_skb;
775 		}
776 
777 		if (f == 0)
778 			tx_desc.desc1 |= TX_DESC_1_FIRST_SEGMENT;
779 
780 		if (f == frag_num) {
781 			tx_desc.desc1 |= TX_DESC_1_LAST_SEGMENT;
782 			tx_buf->skb = skb;
783 			if (emac_tx_should_interrupt(priv, frag_num + 1))
784 				tx_desc.desc1 |=
785 					TX_DESC_1_INTERRUPT_ON_COMPLETION;
786 		}
787 
788 		*tx_desc_addr = tx_desc;
789 	}
790 
791 	/* All descriptors are ready, give ownership for first desc */
792 	tx_desc_addr = &((struct emac_desc *)tx_ring->desc_addr)[old_head];
793 	dma_wmb();
794 	WRITE_ONCE(tx_desc_addr->desc0, tx_desc_addr->desc0 | TX_DESC_0_OWN);
795 
796 	emac_dma_start_transmit(priv);
797 
798 	tx_ring->head = head;
799 
800 	return;
801 
802 err_free_skb:
803 	dev_dstats_tx_dropped(priv->ndev);
804 
805 	i = old_head;
806 	while (i != head) {
807 		emac_free_tx_buf(priv, i);
808 
809 		if (++i == tx_ring->total_cnt)
810 			i = 0;
811 	}
812 
813 	dev_kfree_skb_any(skb);
814 }
815 
emac_start_xmit(struct sk_buff * skb,struct net_device * ndev)816 static netdev_tx_t emac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
817 {
818 	struct emac_priv *priv = netdev_priv(ndev);
819 	int nfrags = skb_shinfo(skb)->nr_frags;
820 	struct device *dev = &priv->pdev->dev;
821 
822 	if (unlikely(emac_tx_avail(priv) < nfrags + 1)) {
823 		if (!netif_queue_stopped(ndev)) {
824 			netif_stop_queue(ndev);
825 			dev_err_ratelimited(dev, "TX ring full, stop TX queue\n");
826 		}
827 		return NETDEV_TX_BUSY;
828 	}
829 
830 	emac_tx_mem_map(priv, skb);
831 
832 	/* Make sure there is space in the ring for the next TX. */
833 	if (unlikely(emac_tx_avail(priv) <= MAX_SKB_FRAGS + 2))
834 		netif_stop_queue(ndev);
835 
836 	return NETDEV_TX_OK;
837 }
838 
emac_set_mac_address(struct net_device * ndev,void * addr)839 static int emac_set_mac_address(struct net_device *ndev, void *addr)
840 {
841 	struct emac_priv *priv = netdev_priv(ndev);
842 	int ret = eth_mac_addr(ndev, addr);
843 
844 	if (ret)
845 		return ret;
846 
847 	/* If running, set now; if not running it will be set in emac_up. */
848 	if (netif_running(ndev))
849 		emac_set_mac_addr(priv, ndev->dev_addr);
850 
851 	return 0;
852 }
853 
emac_mac_multicast_filter_clear(struct emac_priv * priv)854 static void emac_mac_multicast_filter_clear(struct emac_priv *priv)
855 {
856 	emac_wr(priv, MAC_MULTICAST_HASH_TABLE1, 0x0);
857 	emac_wr(priv, MAC_MULTICAST_HASH_TABLE2, 0x0);
858 	emac_wr(priv, MAC_MULTICAST_HASH_TABLE3, 0x0);
859 	emac_wr(priv, MAC_MULTICAST_HASH_TABLE4, 0x0);
860 }
861 
862 /*
863  * The upper 6 bits of the Ethernet CRC of the MAC address is used as the hash
864  * when matching multicast addresses.
865  */
emac_ether_addr_hash(u8 addr[ETH_ALEN])866 static u32 emac_ether_addr_hash(u8 addr[ETH_ALEN])
867 {
868 	u32 crc32 = ether_crc(ETH_ALEN, addr);
869 
870 	return crc32 >> 26;
871 }
872 
873 /* Configure Multicast and Promiscuous modes */
emac_set_rx_mode(struct net_device * ndev)874 static void emac_set_rx_mode(struct net_device *ndev)
875 {
876 	struct emac_priv *priv = netdev_priv(ndev);
877 	struct netdev_hw_addr *ha;
878 	u32 mc_filter[4] = { 0 };
879 	u32 hash, reg, bit, val;
880 
881 	val = emac_rd(priv, MAC_ADDRESS_CONTROL);
882 
883 	val &= ~MREGBIT_PROMISCUOUS_MODE;
884 
885 	if (ndev->flags & IFF_PROMISC) {
886 		/* Enable promisc mode */
887 		val |= MREGBIT_PROMISCUOUS_MODE;
888 	} else if ((ndev->flags & IFF_ALLMULTI) ||
889 		   (netdev_mc_count(ndev) > HASH_TABLE_SIZE)) {
890 		/* Accept all multicast frames by setting every bit */
891 		emac_wr(priv, MAC_MULTICAST_HASH_TABLE1, 0xffff);
892 		emac_wr(priv, MAC_MULTICAST_HASH_TABLE2, 0xffff);
893 		emac_wr(priv, MAC_MULTICAST_HASH_TABLE3, 0xffff);
894 		emac_wr(priv, MAC_MULTICAST_HASH_TABLE4, 0xffff);
895 	} else if (!netdev_mc_empty(ndev)) {
896 		emac_mac_multicast_filter_clear(priv);
897 		netdev_for_each_mc_addr(ha, ndev) {
898 			/*
899 			 * The hash table is an array of 4 16-bit registers. It
900 			 * is treated like an array of 64 bits (bits[hash]).
901 			 */
902 			hash = emac_ether_addr_hash(ha->addr);
903 			reg = hash / 16;
904 			bit = hash % 16;
905 			mc_filter[reg] |= BIT(bit);
906 		}
907 		emac_wr(priv, MAC_MULTICAST_HASH_TABLE1, mc_filter[0]);
908 		emac_wr(priv, MAC_MULTICAST_HASH_TABLE2, mc_filter[1]);
909 		emac_wr(priv, MAC_MULTICAST_HASH_TABLE3, mc_filter[2]);
910 		emac_wr(priv, MAC_MULTICAST_HASH_TABLE4, mc_filter[3]);
911 	}
912 
913 	emac_wr(priv, MAC_ADDRESS_CONTROL, val);
914 }
915 
emac_change_mtu(struct net_device * ndev,int mtu)916 static int emac_change_mtu(struct net_device *ndev, int mtu)
917 {
918 	struct emac_priv *priv = netdev_priv(ndev);
919 	u32 frame_len;
920 
921 	if (netif_running(ndev)) {
922 		netdev_err(ndev, "must be stopped to change MTU\n");
923 		return -EBUSY;
924 	}
925 
926 	frame_len = mtu + ETH_HLEN + 2 * VLAN_HLEN + ETH_FCS_LEN;
927 
928 	if (frame_len <= EMAC_DEFAULT_BUFSIZE)
929 		priv->dma_buf_sz = EMAC_DEFAULT_BUFSIZE;
930 	else if (frame_len <= EMAC_RX_BUF_2K)
931 		priv->dma_buf_sz = EMAC_RX_BUF_2K;
932 	else
933 		priv->dma_buf_sz = EMAC_RX_BUF_MAX;
934 
935 	ndev->mtu = mtu;
936 
937 	return 0;
938 }
939 
emac_tx_timeout(struct net_device * ndev,unsigned int txqueue)940 static void emac_tx_timeout(struct net_device *ndev, unsigned int txqueue)
941 {
942 	struct emac_priv *priv = netdev_priv(ndev);
943 
944 	schedule_work(&priv->tx_timeout_task);
945 }
946 
emac_mii_read(struct mii_bus * bus,int phy_addr,int regnum)947 static int emac_mii_read(struct mii_bus *bus, int phy_addr, int regnum)
948 {
949 	struct emac_priv *priv = bus->priv;
950 	u32 cmd = 0, val;
951 	int ret;
952 
953 	cmd |= FIELD_PREP(MREGBIT_PHY_ADDRESS, phy_addr);
954 	cmd |= FIELD_PREP(MREGBIT_REGISTER_ADDRESS, regnum);
955 	cmd |= MREGBIT_START_MDIO_TRANS | MREGBIT_MDIO_READ_WRITE;
956 
957 	emac_wr(priv, MAC_MDIO_DATA, 0x0);
958 	emac_wr(priv, MAC_MDIO_CONTROL, cmd);
959 
960 	ret = readl_poll_timeout(priv->iobase + MAC_MDIO_CONTROL, val,
961 				 !(val & MREGBIT_START_MDIO_TRANS), 100, 10000);
962 
963 	if (ret)
964 		return ret;
965 
966 	val = emac_rd(priv, MAC_MDIO_DATA);
967 	return FIELD_GET(MREGBIT_MDIO_DATA, val);
968 }
969 
emac_mii_write(struct mii_bus * bus,int phy_addr,int regnum,u16 value)970 static int emac_mii_write(struct mii_bus *bus, int phy_addr, int regnum,
971 			  u16 value)
972 {
973 	struct emac_priv *priv = bus->priv;
974 	u32 cmd = 0, val;
975 	int ret;
976 
977 	emac_wr(priv, MAC_MDIO_DATA, value);
978 
979 	cmd |= FIELD_PREP(MREGBIT_PHY_ADDRESS, phy_addr);
980 	cmd |= FIELD_PREP(MREGBIT_REGISTER_ADDRESS, regnum);
981 	cmd |= MREGBIT_START_MDIO_TRANS;
982 
983 	emac_wr(priv, MAC_MDIO_CONTROL, cmd);
984 
985 	ret = readl_poll_timeout(priv->iobase + MAC_MDIO_CONTROL, val,
986 				 !(val & MREGBIT_START_MDIO_TRANS), 100, 10000);
987 
988 	return ret;
989 }
990 
emac_mdio_init(struct emac_priv * priv)991 static int emac_mdio_init(struct emac_priv *priv)
992 {
993 	struct device *dev = &priv->pdev->dev;
994 	struct device_node *mii_np;
995 	struct mii_bus *mii;
996 	int ret;
997 
998 	mii = devm_mdiobus_alloc(dev);
999 	if (!mii)
1000 		return -ENOMEM;
1001 
1002 	mii->priv = priv;
1003 	mii->name = "k1_emac_mii";
1004 	mii->read = emac_mii_read;
1005 	mii->write = emac_mii_write;
1006 	mii->parent = dev;
1007 	mii->phy_mask = ~0;
1008 	snprintf(mii->id, MII_BUS_ID_SIZE, "%s", priv->pdev->name);
1009 
1010 	mii_np = of_get_available_child_by_name(dev->of_node, "mdio-bus");
1011 
1012 	ret = devm_of_mdiobus_register(dev, mii, mii_np);
1013 	if (ret)
1014 		dev_err_probe(dev, ret, "Failed to register mdio bus\n");
1015 
1016 	of_node_put(mii_np);
1017 	return ret;
1018 }
1019 
1020 /*
1021  * Even though this MAC supports gigabit operation, it only provides 32-bit
1022  * statistics counters. The most overflow-prone counters are the "bytes" ones,
1023  * which at gigabit overflow about twice a minute.
1024  *
1025  * Therefore, we maintain the high 32 bits of counters ourselves, incrementing
1026  * every time statistics seem to go backwards. Also, update periodically to
1027  * catch overflows when we are not otherwise checking the statistics often
1028  * enough.
1029  */
1030 
1031 #define EMAC_STATS_TIMER_PERIOD		20
1032 
emac_read_stat_cnt(struct emac_priv * priv,u8 cnt,u32 * res,u32 control_reg,u32 high_reg,u32 low_reg)1033 static int emac_read_stat_cnt(struct emac_priv *priv, u8 cnt, u32 *res,
1034 			      u32 control_reg, u32 high_reg, u32 low_reg)
1035 {
1036 	u32 val, high, low;
1037 	int ret;
1038 
1039 	/* The "read" bit is the same for TX and RX */
1040 
1041 	val = MREGBIT_START_TX_COUNTER_READ | cnt;
1042 	emac_wr(priv, control_reg, val);
1043 	val = emac_rd(priv, control_reg);
1044 
1045 	ret = readl_poll_timeout_atomic(priv->iobase + control_reg, val,
1046 					!(val & MREGBIT_START_TX_COUNTER_READ),
1047 					100, 10000);
1048 
1049 	if (ret) {
1050 		/*
1051 		 * This could be caused by the PHY stopping its refclk even when
1052 		 * the link is up, for power saving. See also comments in
1053 		 * emac_stats_update().
1054 		 */
1055 		dev_err_ratelimited(&priv->ndev->dev,
1056 				    "Read stat timeout. PHY clock stopped?\n");
1057 		return ret;
1058 	}
1059 
1060 	high = emac_rd(priv, high_reg);
1061 	low = emac_rd(priv, low_reg);
1062 	*res = high << 16 | lower_16_bits(low);
1063 
1064 	return 0;
1065 }
1066 
emac_tx_read_stat_cnt(struct emac_priv * priv,u8 cnt,u32 * res)1067 static int emac_tx_read_stat_cnt(struct emac_priv *priv, u8 cnt, u32 *res)
1068 {
1069 	return emac_read_stat_cnt(priv, cnt, res, MAC_TX_STATCTR_CONTROL,
1070 				  MAC_TX_STATCTR_DATA_HIGH,
1071 				  MAC_TX_STATCTR_DATA_LOW);
1072 }
1073 
emac_rx_read_stat_cnt(struct emac_priv * priv,u8 cnt,u32 * res)1074 static int emac_rx_read_stat_cnt(struct emac_priv *priv, u8 cnt, u32 *res)
1075 {
1076 	return emac_read_stat_cnt(priv, cnt, res, MAC_RX_STATCTR_CONTROL,
1077 				  MAC_RX_STATCTR_DATA_HIGH,
1078 				  MAC_RX_STATCTR_DATA_LOW);
1079 }
1080 
emac_update_counter(u64 * counter,u32 new_low)1081 static void emac_update_counter(u64 *counter, u32 new_low)
1082 {
1083 	u32 old_low = lower_32_bits(*counter);
1084 	u64 high = upper_32_bits(*counter);
1085 
1086 	if (old_low > new_low) {
1087 		/* Overflowed, increment high 32 bits */
1088 		high++;
1089 	}
1090 
1091 	*counter = (high << 32) | new_low;
1092 }
1093 
emac_stats_update(struct emac_priv * priv)1094 static void emac_stats_update(struct emac_priv *priv)
1095 {
1096 	u64 *tx_stats_off = priv->tx_stats_off.array;
1097 	u64 *rx_stats_off = priv->rx_stats_off.array;
1098 	u64 *tx_stats = priv->tx_stats.array;
1099 	u64 *rx_stats = priv->rx_stats.array;
1100 	u32 i, res, offset;
1101 
1102 	assert_spin_locked(&priv->stats_lock);
1103 
1104 	/*
1105 	 * We can't read statistics if the interface is not up. Also, some PHYs
1106 	 * stop their reference clocks for link down power saving, which also
1107 	 * causes reading statistics to time out. Don't update and don't
1108 	 * reschedule in these cases.
1109 	 */
1110 	if (!netif_running(priv->ndev) ||
1111 	    !netif_carrier_ok(priv->ndev) ||
1112 	    !netif_device_present(priv->ndev)) {
1113 		return;
1114 	}
1115 
1116 	for (i = 0; i < sizeof(priv->tx_stats) / sizeof(*tx_stats); i++) {
1117 		/*
1118 		 * If reading stats times out anyway, the stat registers will be
1119 		 * stuck, and we can't really recover from that.
1120 		 *
1121 		 * Reading statistics also can't return an error, so just return
1122 		 * without updating and without rescheduling.
1123 		 */
1124 		if (emac_tx_read_stat_cnt(priv, i, &res))
1125 			return;
1126 
1127 		/*
1128 		 * Re-initializing while bringing interface up resets counters
1129 		 * to zero, so to provide continuity, we add the values saved
1130 		 * last time we did emac_down() to the new hardware-provided
1131 		 * value.
1132 		 */
1133 		offset = lower_32_bits(tx_stats_off[i]);
1134 		emac_update_counter(&tx_stats[i], res + offset);
1135 	}
1136 
1137 	/* Similar remarks as TX stats */
1138 	for (i = 0; i < sizeof(priv->rx_stats) / sizeof(*rx_stats); i++) {
1139 		if (emac_rx_read_stat_cnt(priv, i, &res))
1140 			return;
1141 		offset = lower_32_bits(rx_stats_off[i]);
1142 		emac_update_counter(&rx_stats[i], res + offset);
1143 	}
1144 
1145 	mod_timer(&priv->stats_timer, jiffies + EMAC_STATS_TIMER_PERIOD * HZ);
1146 }
1147 
emac_stats_timer(struct timer_list * t)1148 static void emac_stats_timer(struct timer_list *t)
1149 {
1150 	struct emac_priv *priv = timer_container_of(priv, t, stats_timer);
1151 
1152 	spin_lock(&priv->stats_lock);
1153 
1154 	emac_stats_update(priv);
1155 
1156 	spin_unlock(&priv->stats_lock);
1157 }
1158 
1159 static const struct ethtool_rmon_hist_range emac_rmon_hist_ranges[] = {
1160 	{   64,   64 },
1161 	{   65,  127 },
1162 	{  128,  255 },
1163 	{  256,  511 },
1164 	{  512, 1023 },
1165 	{ 1024, 1518 },
1166 	{ 1519, 4096 },
1167 	{ /* sentinel */ },
1168 };
1169 
1170 /* Like dev_fetch_dstats(), but we only use tx_drops */
emac_get_stat_tx_drops(struct emac_priv * priv)1171 static u64 emac_get_stat_tx_drops(struct emac_priv *priv)
1172 {
1173 	const struct pcpu_dstats *stats;
1174 	u64 tx_drops, total = 0;
1175 	unsigned int start;
1176 	int cpu;
1177 
1178 	for_each_possible_cpu(cpu) {
1179 		stats = per_cpu_ptr(priv->ndev->dstats, cpu);
1180 		do {
1181 			start = u64_stats_fetch_begin(&stats->syncp);
1182 			tx_drops = u64_stats_read(&stats->tx_drops);
1183 		} while (u64_stats_fetch_retry(&stats->syncp, start));
1184 
1185 		total += tx_drops;
1186 	}
1187 
1188 	return total;
1189 }
1190 
emac_get_stats64(struct net_device * dev,struct rtnl_link_stats64 * storage)1191 static void emac_get_stats64(struct net_device *dev,
1192 			     struct rtnl_link_stats64 *storage)
1193 {
1194 	struct emac_priv *priv = netdev_priv(dev);
1195 	union emac_hw_tx_stats *tx_stats;
1196 	union emac_hw_rx_stats *rx_stats;
1197 
1198 	tx_stats = &priv->tx_stats;
1199 	rx_stats = &priv->rx_stats;
1200 
1201 	/* This is the only software counter */
1202 	storage->tx_dropped = emac_get_stat_tx_drops(priv);
1203 
1204 	spin_lock_bh(&priv->stats_lock);
1205 
1206 	emac_stats_update(priv);
1207 
1208 	storage->tx_packets = tx_stats->stats.tx_ok_pkts;
1209 	storage->tx_bytes = tx_stats->stats.tx_ok_bytes;
1210 	storage->tx_errors = tx_stats->stats.tx_err_pkts;
1211 
1212 	storage->rx_packets = rx_stats->stats.rx_ok_pkts;
1213 	storage->rx_bytes = rx_stats->stats.rx_ok_bytes;
1214 	storage->rx_errors = rx_stats->stats.rx_err_total_pkts;
1215 	storage->rx_crc_errors = rx_stats->stats.rx_crc_err_pkts;
1216 	storage->rx_frame_errors = rx_stats->stats.rx_align_err_pkts;
1217 	storage->rx_length_errors = rx_stats->stats.rx_len_err_pkts;
1218 
1219 	storage->collisions = tx_stats->stats.tx_singleclsn_pkts;
1220 	storage->collisions += tx_stats->stats.tx_multiclsn_pkts;
1221 	storage->collisions += tx_stats->stats.tx_excessclsn_pkts;
1222 
1223 	storage->rx_missed_errors = rx_stats->stats.rx_drp_fifo_full_pkts;
1224 	storage->rx_missed_errors += rx_stats->stats.rx_truncate_fifo_full_pkts;
1225 
1226 	spin_unlock_bh(&priv->stats_lock);
1227 }
1228 
emac_get_rmon_stats(struct net_device * dev,struct ethtool_rmon_stats * rmon_stats,const struct ethtool_rmon_hist_range ** ranges)1229 static void emac_get_rmon_stats(struct net_device *dev,
1230 				struct ethtool_rmon_stats *rmon_stats,
1231 				const struct ethtool_rmon_hist_range **ranges)
1232 {
1233 	struct emac_priv *priv = netdev_priv(dev);
1234 	union emac_hw_rx_stats *rx_stats;
1235 
1236 	rx_stats = &priv->rx_stats;
1237 
1238 	*ranges = emac_rmon_hist_ranges;
1239 
1240 	spin_lock_bh(&priv->stats_lock);
1241 
1242 	emac_stats_update(priv);
1243 
1244 	rmon_stats->undersize_pkts = rx_stats->stats.rx_len_undersize_pkts;
1245 	rmon_stats->oversize_pkts = rx_stats->stats.rx_len_oversize_pkts;
1246 	rmon_stats->fragments = rx_stats->stats.rx_len_fragment_pkts;
1247 	rmon_stats->jabbers = rx_stats->stats.rx_len_jabber_pkts;
1248 
1249 	/* Only RX has histogram stats */
1250 
1251 	rmon_stats->hist[0] = rx_stats->stats.rx_64_pkts;
1252 	rmon_stats->hist[1] = rx_stats->stats.rx_65_127_pkts;
1253 	rmon_stats->hist[2] = rx_stats->stats.rx_128_255_pkts;
1254 	rmon_stats->hist[3] = rx_stats->stats.rx_256_511_pkts;
1255 	rmon_stats->hist[4] = rx_stats->stats.rx_512_1023_pkts;
1256 	rmon_stats->hist[5] = rx_stats->stats.rx_1024_1518_pkts;
1257 	rmon_stats->hist[6] = rx_stats->stats.rx_1519_plus_pkts;
1258 
1259 	spin_unlock_bh(&priv->stats_lock);
1260 }
1261 
emac_get_eth_mac_stats(struct net_device * dev,struct ethtool_eth_mac_stats * mac_stats)1262 static void emac_get_eth_mac_stats(struct net_device *dev,
1263 				   struct ethtool_eth_mac_stats *mac_stats)
1264 {
1265 	struct emac_priv *priv = netdev_priv(dev);
1266 	union emac_hw_tx_stats *tx_stats;
1267 	union emac_hw_rx_stats *rx_stats;
1268 
1269 	tx_stats = &priv->tx_stats;
1270 	rx_stats = &priv->rx_stats;
1271 
1272 	spin_lock_bh(&priv->stats_lock);
1273 
1274 	emac_stats_update(priv);
1275 
1276 	mac_stats->MulticastFramesXmittedOK = tx_stats->stats.tx_multicast_pkts;
1277 	mac_stats->BroadcastFramesXmittedOK = tx_stats->stats.tx_broadcast_pkts;
1278 
1279 	mac_stats->MulticastFramesReceivedOK =
1280 		rx_stats->stats.rx_multicast_pkts;
1281 	mac_stats->BroadcastFramesReceivedOK =
1282 		rx_stats->stats.rx_broadcast_pkts;
1283 
1284 	mac_stats->SingleCollisionFrames = tx_stats->stats.tx_singleclsn_pkts;
1285 	mac_stats->MultipleCollisionFrames = tx_stats->stats.tx_multiclsn_pkts;
1286 	mac_stats->LateCollisions = tx_stats->stats.tx_lateclsn_pkts;
1287 	mac_stats->FramesAbortedDueToXSColls =
1288 		tx_stats->stats.tx_excessclsn_pkts;
1289 
1290 	spin_unlock_bh(&priv->stats_lock);
1291 }
1292 
emac_get_pause_stats(struct net_device * dev,struct ethtool_pause_stats * pause_stats)1293 static void emac_get_pause_stats(struct net_device *dev,
1294 				 struct ethtool_pause_stats *pause_stats)
1295 {
1296 	struct emac_priv *priv = netdev_priv(dev);
1297 	union emac_hw_tx_stats *tx_stats;
1298 	union emac_hw_rx_stats *rx_stats;
1299 
1300 	tx_stats = &priv->tx_stats;
1301 	rx_stats = &priv->rx_stats;
1302 
1303 	spin_lock_bh(&priv->stats_lock);
1304 
1305 	emac_stats_update(priv);
1306 
1307 	pause_stats->tx_pause_frames = tx_stats->stats.tx_pause_pkts;
1308 	pause_stats->rx_pause_frames = rx_stats->stats.rx_pause_pkts;
1309 
1310 	spin_unlock_bh(&priv->stats_lock);
1311 }
1312 
1313 /* Other statistics that are not derivable from standard statistics */
1314 
1315 #define EMAC_ETHTOOL_STAT(type, name) \
1316 	{ offsetof(type, stats.name) / sizeof(u64), #name }
1317 
1318 static const struct emac_ethtool_stats {
1319 	size_t offset;
1320 	char str[ETH_GSTRING_LEN];
1321 } emac_ethtool_rx_stats[] = {
1322 	EMAC_ETHTOOL_STAT(union emac_hw_rx_stats, rx_drp_fifo_full_pkts),
1323 	EMAC_ETHTOOL_STAT(union emac_hw_rx_stats, rx_truncate_fifo_full_pkts),
1324 };
1325 
emac_get_sset_count(struct net_device * dev,int sset)1326 static int emac_get_sset_count(struct net_device *dev, int sset)
1327 {
1328 	switch (sset) {
1329 	case ETH_SS_STATS:
1330 		return ARRAY_SIZE(emac_ethtool_rx_stats);
1331 	default:
1332 		return -EOPNOTSUPP;
1333 	}
1334 }
1335 
emac_get_strings(struct net_device * dev,u32 stringset,u8 * data)1336 static void emac_get_strings(struct net_device *dev, u32 stringset, u8 *data)
1337 {
1338 	int i;
1339 
1340 	switch (stringset) {
1341 	case ETH_SS_STATS:
1342 		for (i = 0; i < ARRAY_SIZE(emac_ethtool_rx_stats); i++) {
1343 			memcpy(data, emac_ethtool_rx_stats[i].str,
1344 			       ETH_GSTRING_LEN);
1345 			data += ETH_GSTRING_LEN;
1346 		}
1347 		break;
1348 	}
1349 }
1350 
emac_get_ethtool_stats(struct net_device * dev,struct ethtool_stats * stats,u64 * data)1351 static void emac_get_ethtool_stats(struct net_device *dev,
1352 				   struct ethtool_stats *stats, u64 *data)
1353 {
1354 	struct emac_priv *priv = netdev_priv(dev);
1355 	u64 *rx_stats = (u64 *)&priv->rx_stats;
1356 	int i;
1357 
1358 	spin_lock_bh(&priv->stats_lock);
1359 
1360 	emac_stats_update(priv);
1361 
1362 	for (i = 0; i < ARRAY_SIZE(emac_ethtool_rx_stats); i++)
1363 		data[i] = rx_stats[emac_ethtool_rx_stats[i].offset];
1364 
1365 	spin_unlock_bh(&priv->stats_lock);
1366 }
1367 
emac_ethtool_get_regs_len(struct net_device * dev)1368 static int emac_ethtool_get_regs_len(struct net_device *dev)
1369 {
1370 	return (EMAC_DMA_REG_CNT + EMAC_MAC_REG_CNT) * sizeof(u32);
1371 }
1372 
emac_ethtool_get_regs(struct net_device * dev,struct ethtool_regs * regs,void * space)1373 static void emac_ethtool_get_regs(struct net_device *dev,
1374 				  struct ethtool_regs *regs, void *space)
1375 {
1376 	struct emac_priv *priv = netdev_priv(dev);
1377 	u32 *reg_space = space;
1378 	int i;
1379 
1380 	regs->version = 1;
1381 
1382 	for (i = 0; i < EMAC_DMA_REG_CNT; i++)
1383 		reg_space[i] = emac_rd(priv, DMA_CONFIGURATION + i * 4);
1384 
1385 	for (i = 0; i < EMAC_MAC_REG_CNT; i++)
1386 		reg_space[i + EMAC_DMA_REG_CNT] =
1387 			emac_rd(priv, MAC_GLOBAL_CONTROL + i * 4);
1388 }
1389 
emac_get_drvinfo(struct net_device * dev,struct ethtool_drvinfo * info)1390 static void emac_get_drvinfo(struct net_device *dev,
1391 			     struct ethtool_drvinfo *info)
1392 {
1393 	strscpy(info->driver, DRIVER_NAME, sizeof(info->driver));
1394 	info->n_stats = ARRAY_SIZE(emac_ethtool_rx_stats);
1395 }
1396 
emac_tx_timeout_task(struct work_struct * work)1397 static void emac_tx_timeout_task(struct work_struct *work)
1398 {
1399 	struct net_device *ndev;
1400 	struct emac_priv *priv;
1401 
1402 	priv = container_of(work, struct emac_priv, tx_timeout_task);
1403 	ndev = priv->ndev;
1404 
1405 	rtnl_lock();
1406 
1407 	/* No need to reset if already down */
1408 	if (!netif_running(ndev)) {
1409 		rtnl_unlock();
1410 		return;
1411 	}
1412 
1413 	netdev_err(ndev, "MAC reset due to TX timeout\n");
1414 
1415 	netif_trans_update(ndev); /* prevent tx timeout */
1416 	dev_close(ndev);
1417 	dev_open(ndev, NULL);
1418 
1419 	rtnl_unlock();
1420 }
1421 
emac_sw_init(struct emac_priv * priv)1422 static void emac_sw_init(struct emac_priv *priv)
1423 {
1424 	priv->dma_buf_sz = EMAC_DEFAULT_BUFSIZE;
1425 
1426 	priv->tx_ring.total_cnt = DEFAULT_TX_RING_NUM;
1427 	priv->rx_ring.total_cnt = DEFAULT_RX_RING_NUM;
1428 
1429 	spin_lock_init(&priv->stats_lock);
1430 
1431 	INIT_WORK(&priv->tx_timeout_task, emac_tx_timeout_task);
1432 
1433 	priv->tx_coal_frames = EMAC_TX_FRAMES;
1434 	priv->tx_coal_timeout = EMAC_TX_COAL_TIMEOUT;
1435 
1436 	timer_setup(&priv->txtimer, emac_tx_coal_timer, 0);
1437 	timer_setup(&priv->stats_timer, emac_stats_timer, 0);
1438 }
1439 
emac_interrupt_handler(int irq,void * dev_id)1440 static irqreturn_t emac_interrupt_handler(int irq, void *dev_id)
1441 {
1442 	struct net_device *ndev = (struct net_device *)dev_id;
1443 	struct emac_priv *priv = netdev_priv(ndev);
1444 	bool should_schedule = false;
1445 	u32 clr = 0;
1446 	u32 status;
1447 
1448 	status = emac_rd(priv, DMA_STATUS_IRQ);
1449 
1450 	if (status & MREGBIT_TRANSMIT_TRANSFER_DONE_IRQ) {
1451 		clr |= MREGBIT_TRANSMIT_TRANSFER_DONE_IRQ;
1452 		should_schedule = true;
1453 	}
1454 
1455 	if (status & MREGBIT_TRANSMIT_DES_UNAVAILABLE_IRQ)
1456 		clr |= MREGBIT_TRANSMIT_DES_UNAVAILABLE_IRQ;
1457 
1458 	if (status & MREGBIT_TRANSMIT_DMA_STOPPED_IRQ)
1459 		clr |= MREGBIT_TRANSMIT_DMA_STOPPED_IRQ;
1460 
1461 	if (status & MREGBIT_RECEIVE_TRANSFER_DONE_IRQ) {
1462 		clr |= MREGBIT_RECEIVE_TRANSFER_DONE_IRQ;
1463 		should_schedule = true;
1464 	}
1465 
1466 	if (status & MREGBIT_RECEIVE_DES_UNAVAILABLE_IRQ)
1467 		clr |= MREGBIT_RECEIVE_DES_UNAVAILABLE_IRQ;
1468 
1469 	if (status & MREGBIT_RECEIVE_DMA_STOPPED_IRQ)
1470 		clr |= MREGBIT_RECEIVE_DMA_STOPPED_IRQ;
1471 
1472 	if (status & MREGBIT_RECEIVE_MISSED_FRAME_IRQ)
1473 		clr |= MREGBIT_RECEIVE_MISSED_FRAME_IRQ;
1474 
1475 	if (should_schedule) {
1476 		if (napi_schedule_prep(&priv->napi)) {
1477 			emac_disable_interrupt(priv);
1478 			__napi_schedule_irqoff(&priv->napi);
1479 		}
1480 	}
1481 
1482 	emac_wr(priv, DMA_STATUS_IRQ, clr);
1483 
1484 	return IRQ_HANDLED;
1485 }
1486 
emac_configure_tx(struct emac_priv * priv)1487 static void emac_configure_tx(struct emac_priv *priv)
1488 {
1489 	u32 val;
1490 
1491 	/* Set base address */
1492 	val = (u32)priv->tx_ring.desc_dma_addr;
1493 	emac_wr(priv, DMA_TRANSMIT_BASE_ADDRESS, val);
1494 
1495 	/* Set TX inter-frame gap value, enable transmit */
1496 	val = emac_rd(priv, MAC_TRANSMIT_CONTROL);
1497 	val &= ~MREGBIT_IFG_LEN;
1498 	val |= MREGBIT_TRANSMIT_ENABLE;
1499 	val |= MREGBIT_TRANSMIT_AUTO_RETRY;
1500 	emac_wr(priv, MAC_TRANSMIT_CONTROL, val);
1501 
1502 	emac_wr(priv, DMA_TRANSMIT_AUTO_POLL_COUNTER, 0x0);
1503 
1504 	/* Start TX DMA */
1505 	val = emac_rd(priv, DMA_CONTROL);
1506 	val |= MREGBIT_START_STOP_TRANSMIT_DMA;
1507 	emac_wr(priv, DMA_CONTROL, val);
1508 }
1509 
emac_configure_rx(struct emac_priv * priv)1510 static void emac_configure_rx(struct emac_priv *priv)
1511 {
1512 	u32 val;
1513 
1514 	/* Set base address */
1515 	val = (u32)priv->rx_ring.desc_dma_addr;
1516 	emac_wr(priv, DMA_RECEIVE_BASE_ADDRESS, val);
1517 
1518 	/* Enable receive */
1519 	val = emac_rd(priv, MAC_RECEIVE_CONTROL);
1520 	val |= MREGBIT_RECEIVE_ENABLE;
1521 	val |= MREGBIT_STORE_FORWARD;
1522 	emac_wr(priv, MAC_RECEIVE_CONTROL, val);
1523 
1524 	/* Start RX DMA */
1525 	val = emac_rd(priv, DMA_CONTROL);
1526 	val |= MREGBIT_START_STOP_RECEIVE_DMA;
1527 	emac_wr(priv, DMA_CONTROL, val);
1528 }
1529 
emac_adjust_link(struct net_device * dev)1530 static void emac_adjust_link(struct net_device *dev)
1531 {
1532 	struct emac_priv *priv = netdev_priv(dev);
1533 	struct phy_device *phydev = dev->phydev;
1534 	u32 ctrl;
1535 
1536 	if (phydev->link) {
1537 		ctrl = emac_rd(priv, MAC_GLOBAL_CONTROL);
1538 
1539 		/* Update duplex and speed from PHY */
1540 
1541 		FIELD_MODIFY(MREGBIT_FULL_DUPLEX_MODE, &ctrl,
1542 			     phydev->duplex == DUPLEX_FULL);
1543 
1544 		ctrl &= ~MREGBIT_SPEED;
1545 
1546 		switch (phydev->speed) {
1547 		case SPEED_1000:
1548 			ctrl |= MREGBIT_SPEED_1000M;
1549 			break;
1550 		case SPEED_100:
1551 			ctrl |= MREGBIT_SPEED_100M;
1552 			break;
1553 		case SPEED_10:
1554 			ctrl |= MREGBIT_SPEED_10M;
1555 			break;
1556 		default:
1557 			netdev_err(dev, "Unknown speed: %d\n", phydev->speed);
1558 			phydev->speed = SPEED_UNKNOWN;
1559 			break;
1560 		}
1561 
1562 		emac_wr(priv, MAC_GLOBAL_CONTROL, ctrl);
1563 
1564 		/*
1565 		 * Reschedule stats updates now that link is up. See comments in
1566 		 * emac_stats_update().
1567 		 */
1568 		mod_timer(&priv->stats_timer, jiffies);
1569 	}
1570 
1571 	phy_print_status(phydev);
1572 }
1573 
emac_update_delay_line(struct emac_priv * priv)1574 static void emac_update_delay_line(struct emac_priv *priv)
1575 {
1576 	u32 mask = 0, val = 0;
1577 
1578 	mask |= EMAC_RX_DLINE_EN;
1579 	mask |= EMAC_RX_DLINE_STEP_MASK | EMAC_RX_DLINE_CODE_MASK;
1580 	mask |= EMAC_TX_DLINE_EN;
1581 	mask |= EMAC_TX_DLINE_STEP_MASK | EMAC_TX_DLINE_CODE_MASK;
1582 
1583 	if (phy_interface_mode_is_rgmii(priv->phy_interface)) {
1584 		val |= EMAC_RX_DLINE_EN;
1585 		val |= FIELD_PREP(EMAC_RX_DLINE_STEP_MASK,
1586 				  EMAC_DLINE_STEP_15P6);
1587 		val |= FIELD_PREP(EMAC_RX_DLINE_CODE_MASK, priv->rx_delay);
1588 
1589 		val |= EMAC_TX_DLINE_EN;
1590 		val |= FIELD_PREP(EMAC_TX_DLINE_STEP_MASK,
1591 				  EMAC_DLINE_STEP_15P6);
1592 		val |= FIELD_PREP(EMAC_TX_DLINE_CODE_MASK, priv->tx_delay);
1593 	}
1594 
1595 	regmap_update_bits(priv->regmap_apmu,
1596 			   priv->regmap_apmu_offset + APMU_EMAC_DLINE_REG,
1597 			   mask, val);
1598 }
1599 
emac_phy_connect(struct net_device * ndev)1600 static int emac_phy_connect(struct net_device *ndev)
1601 {
1602 	struct emac_priv *priv = netdev_priv(ndev);
1603 	struct device *dev = &priv->pdev->dev;
1604 	struct phy_device *phydev;
1605 	struct device_node *np;
1606 	int ret;
1607 
1608 	ret = of_get_phy_mode(dev->of_node, &priv->phy_interface);
1609 	if (ret) {
1610 		netdev_err(ndev, "No phy-mode found");
1611 		return ret;
1612 	}
1613 
1614 	switch (priv->phy_interface) {
1615 	case PHY_INTERFACE_MODE_RMII:
1616 	case PHY_INTERFACE_MODE_RGMII:
1617 	case PHY_INTERFACE_MODE_RGMII_ID:
1618 	case PHY_INTERFACE_MODE_RGMII_RXID:
1619 	case PHY_INTERFACE_MODE_RGMII_TXID:
1620 		break;
1621 	default:
1622 		netdev_err(ndev, "Unsupported PHY interface %s",
1623 			   phy_modes(priv->phy_interface));
1624 		return -EINVAL;
1625 	}
1626 
1627 	np = of_parse_phandle(dev->of_node, "phy-handle", 0);
1628 	if (!np && of_phy_is_fixed_link(dev->of_node))
1629 		np = of_node_get(dev->of_node);
1630 
1631 	if (!np) {
1632 		netdev_err(ndev, "No PHY specified");
1633 		return -ENODEV;
1634 	}
1635 
1636 	ret = emac_phy_interface_config(priv);
1637 	if (ret)
1638 		goto err_node_put;
1639 
1640 	phydev = of_phy_connect(ndev, np, &emac_adjust_link, 0,
1641 				priv->phy_interface);
1642 	if (!phydev) {
1643 		netdev_err(ndev, "Could not attach to PHY\n");
1644 		ret = -ENODEV;
1645 		goto err_node_put;
1646 	}
1647 
1648 	phydev->mac_managed_pm = true;
1649 
1650 	emac_update_delay_line(priv);
1651 
1652 	phy_attached_info(phydev);
1653 
1654 err_node_put:
1655 	of_node_put(np);
1656 	return ret;
1657 }
1658 
emac_up(struct emac_priv * priv)1659 static int emac_up(struct emac_priv *priv)
1660 {
1661 	struct platform_device *pdev = priv->pdev;
1662 	struct net_device *ndev = priv->ndev;
1663 	int ret;
1664 
1665 	pm_runtime_get_sync(&pdev->dev);
1666 
1667 	ret = emac_phy_connect(ndev);
1668 	if (ret) {
1669 		dev_err(&pdev->dev, "emac_phy_connect failed\n");
1670 		goto err_pm_put;
1671 	}
1672 
1673 	emac_init_hw(priv);
1674 
1675 	emac_set_mac_addr(priv, ndev->dev_addr);
1676 	emac_configure_tx(priv);
1677 	emac_configure_rx(priv);
1678 
1679 	emac_alloc_rx_desc_buffers(priv);
1680 
1681 	phy_start(ndev->phydev);
1682 
1683 	ret = request_irq(priv->irq, emac_interrupt_handler, IRQF_SHARED,
1684 			  ndev->name, ndev);
1685 	if (ret) {
1686 		dev_err(&pdev->dev, "request_irq failed\n");
1687 		goto err_reset_disconnect_phy;
1688 	}
1689 
1690 	/* Don't enable MAC interrupts */
1691 	emac_wr(priv, MAC_INTERRUPT_ENABLE, 0x0);
1692 
1693 	/* Enable DMA interrupts */
1694 	emac_wr(priv, DMA_INTERRUPT_ENABLE,
1695 		MREGBIT_TRANSMIT_TRANSFER_DONE_INTR_ENABLE |
1696 			MREGBIT_TRANSMIT_DMA_STOPPED_INTR_ENABLE |
1697 			MREGBIT_RECEIVE_TRANSFER_DONE_INTR_ENABLE |
1698 			MREGBIT_RECEIVE_DMA_STOPPED_INTR_ENABLE |
1699 			MREGBIT_RECEIVE_MISSED_FRAME_INTR_ENABLE);
1700 
1701 	napi_enable(&priv->napi);
1702 
1703 	netif_start_queue(ndev);
1704 
1705 	mod_timer(&priv->stats_timer, jiffies);
1706 
1707 	return 0;
1708 
1709 err_reset_disconnect_phy:
1710 	emac_reset_hw(priv);
1711 	phy_disconnect(ndev->phydev);
1712 
1713 err_pm_put:
1714 	pm_runtime_put_sync(&pdev->dev);
1715 	return ret;
1716 }
1717 
emac_down(struct emac_priv * priv)1718 static int emac_down(struct emac_priv *priv)
1719 {
1720 	struct platform_device *pdev = priv->pdev;
1721 	struct net_device *ndev = priv->ndev;
1722 
1723 	netif_stop_queue(ndev);
1724 
1725 	phy_disconnect(ndev->phydev);
1726 
1727 	emac_wr(priv, MAC_INTERRUPT_ENABLE, 0x0);
1728 	emac_wr(priv, DMA_INTERRUPT_ENABLE, 0x0);
1729 
1730 	free_irq(priv->irq, ndev);
1731 
1732 	napi_disable(&priv->napi);
1733 
1734 	timer_delete_sync(&priv->txtimer);
1735 	cancel_work_sync(&priv->tx_timeout_task);
1736 
1737 	timer_delete_sync(&priv->stats_timer);
1738 
1739 	emac_reset_hw(priv);
1740 
1741 	/* Update and save current stats, see emac_stats_update() for usage */
1742 
1743 	spin_lock_bh(&priv->stats_lock);
1744 
1745 	emac_stats_update(priv);
1746 
1747 	priv->tx_stats_off = priv->tx_stats;
1748 	priv->rx_stats_off = priv->rx_stats;
1749 
1750 	spin_unlock_bh(&priv->stats_lock);
1751 
1752 	pm_runtime_put_sync(&pdev->dev);
1753 	return 0;
1754 }
1755 
1756 /* Called when net interface is brought up. */
emac_open(struct net_device * ndev)1757 static int emac_open(struct net_device *ndev)
1758 {
1759 	struct emac_priv *priv = netdev_priv(ndev);
1760 	struct device *dev = &priv->pdev->dev;
1761 	int ret;
1762 
1763 	ret = emac_alloc_tx_resources(priv);
1764 	if (ret) {
1765 		dev_err(dev, "Cannot allocate TX resources\n");
1766 		return ret;
1767 	}
1768 
1769 	ret = emac_alloc_rx_resources(priv);
1770 	if (ret) {
1771 		dev_err(dev, "Cannot allocate RX resources\n");
1772 		goto err_free_tx;
1773 	}
1774 
1775 	ret = emac_up(priv);
1776 	if (ret) {
1777 		dev_err(dev, "Error when bringing interface up\n");
1778 		goto err_free_rx;
1779 	}
1780 	return 0;
1781 
1782 err_free_rx:
1783 	emac_free_rx_resources(priv);
1784 err_free_tx:
1785 	emac_free_tx_resources(priv);
1786 
1787 	return ret;
1788 }
1789 
1790 /* Called when interface is brought down. */
emac_stop(struct net_device * ndev)1791 static int emac_stop(struct net_device *ndev)
1792 {
1793 	struct emac_priv *priv = netdev_priv(ndev);
1794 
1795 	emac_down(priv);
1796 	emac_free_tx_resources(priv);
1797 	emac_free_rx_resources(priv);
1798 
1799 	return 0;
1800 }
1801 
1802 static const struct ethtool_ops emac_ethtool_ops = {
1803 	.get_link_ksettings	= phy_ethtool_get_link_ksettings,
1804 	.set_link_ksettings	= phy_ethtool_set_link_ksettings,
1805 	.nway_reset		= phy_ethtool_nway_reset,
1806 	.get_drvinfo		= emac_get_drvinfo,
1807 	.get_link		= ethtool_op_get_link,
1808 
1809 	.get_regs		= emac_ethtool_get_regs,
1810 	.get_regs_len		= emac_ethtool_get_regs_len,
1811 
1812 	.get_rmon_stats		= emac_get_rmon_stats,
1813 	.get_pause_stats	= emac_get_pause_stats,
1814 	.get_eth_mac_stats	= emac_get_eth_mac_stats,
1815 
1816 	.get_sset_count		= emac_get_sset_count,
1817 	.get_strings		= emac_get_strings,
1818 	.get_ethtool_stats	= emac_get_ethtool_stats,
1819 };
1820 
1821 static const struct net_device_ops emac_netdev_ops = {
1822 	.ndo_open               = emac_open,
1823 	.ndo_stop               = emac_stop,
1824 	.ndo_start_xmit         = emac_start_xmit,
1825 	.ndo_validate_addr	= eth_validate_addr,
1826 	.ndo_set_mac_address    = emac_set_mac_address,
1827 	.ndo_eth_ioctl          = phy_do_ioctl_running,
1828 	.ndo_change_mtu         = emac_change_mtu,
1829 	.ndo_tx_timeout         = emac_tx_timeout,
1830 	.ndo_set_rx_mode        = emac_set_rx_mode,
1831 	.ndo_get_stats64	= emac_get_stats64,
1832 };
1833 
1834 /* Currently we always use 15.6 ps/step for the delay line */
1835 
delay_ps_to_unit(u32 ps)1836 static u32 delay_ps_to_unit(u32 ps)
1837 {
1838 	return DIV_ROUND_CLOSEST(ps * 10, 156);
1839 }
1840 
delay_unit_to_ps(u32 unit)1841 static u32 delay_unit_to_ps(u32 unit)
1842 {
1843 	return DIV_ROUND_CLOSEST(unit * 156, 10);
1844 }
1845 
1846 #define EMAC_MAX_DELAY_UNIT	FIELD_MAX(EMAC_TX_DLINE_CODE_MASK)
1847 
1848 /* Minus one just to be safe from rounding errors */
1849 #define EMAC_MAX_DELAY_PS	(delay_unit_to_ps(EMAC_MAX_DELAY_UNIT - 1))
1850 
emac_config_dt(struct platform_device * pdev,struct emac_priv * priv)1851 static int emac_config_dt(struct platform_device *pdev, struct emac_priv *priv)
1852 {
1853 	struct device_node *np = pdev->dev.of_node;
1854 	struct device *dev = &pdev->dev;
1855 	u8 mac_addr[ETH_ALEN] = { 0 };
1856 	int ret;
1857 
1858 	priv->iobase = devm_platform_ioremap_resource(pdev, 0);
1859 	if (IS_ERR(priv->iobase))
1860 		return dev_err_probe(dev, PTR_ERR(priv->iobase),
1861 				     "ioremap failed\n");
1862 
1863 	priv->regmap_apmu =
1864 		syscon_regmap_lookup_by_phandle_args(np, "spacemit,apmu", 1,
1865 						     &priv->regmap_apmu_offset);
1866 
1867 	if (IS_ERR(priv->regmap_apmu))
1868 		return dev_err_probe(dev, PTR_ERR(priv->regmap_apmu),
1869 				     "failed to get syscon\n");
1870 
1871 	priv->irq = platform_get_irq(pdev, 0);
1872 	if (priv->irq < 0)
1873 		return priv->irq;
1874 
1875 	ret = of_get_mac_address(np, mac_addr);
1876 	if (ret) {
1877 		if (ret == -EPROBE_DEFER)
1878 			return dev_err_probe(dev, ret,
1879 					     "Can't get MAC address\n");
1880 
1881 		dev_info(&pdev->dev, "Using random MAC address\n");
1882 		eth_hw_addr_random(priv->ndev);
1883 	} else {
1884 		eth_hw_addr_set(priv->ndev, mac_addr);
1885 	}
1886 
1887 	priv->tx_delay = 0;
1888 	priv->rx_delay = 0;
1889 
1890 	of_property_read_u32(np, "tx-internal-delay-ps", &priv->tx_delay);
1891 	of_property_read_u32(np, "rx-internal-delay-ps", &priv->rx_delay);
1892 
1893 	if (priv->tx_delay > EMAC_MAX_DELAY_PS) {
1894 		dev_err(&pdev->dev,
1895 			"tx-internal-delay-ps too large: max %d, got %d",
1896 			EMAC_MAX_DELAY_PS, priv->tx_delay);
1897 		return -EINVAL;
1898 	}
1899 
1900 	if (priv->rx_delay > EMAC_MAX_DELAY_PS) {
1901 		dev_err(&pdev->dev,
1902 			"rx-internal-delay-ps too large: max %d, got %d",
1903 			EMAC_MAX_DELAY_PS, priv->rx_delay);
1904 		return -EINVAL;
1905 	}
1906 
1907 	priv->tx_delay = delay_ps_to_unit(priv->tx_delay);
1908 	priv->rx_delay = delay_ps_to_unit(priv->rx_delay);
1909 
1910 	return 0;
1911 }
1912 
emac_phy_deregister_fixed_link(void * data)1913 static void emac_phy_deregister_fixed_link(void *data)
1914 {
1915 	struct device_node *of_node = data;
1916 
1917 	of_phy_deregister_fixed_link(of_node);
1918 }
1919 
emac_probe(struct platform_device * pdev)1920 static int emac_probe(struct platform_device *pdev)
1921 {
1922 	struct device *dev = &pdev->dev;
1923 	struct reset_control *reset;
1924 	struct net_device *ndev;
1925 	struct emac_priv *priv;
1926 	int ret;
1927 
1928 	ndev = devm_alloc_etherdev(dev, sizeof(struct emac_priv));
1929 	if (!ndev)
1930 		return -ENOMEM;
1931 
1932 	ndev->hw_features = NETIF_F_SG;
1933 	ndev->features |= ndev->hw_features;
1934 
1935 	ndev->max_mtu = EMAC_RX_BUF_MAX - (ETH_HLEN + 2 * VLAN_HLEN + ETH_FCS_LEN);
1936 	ndev->pcpu_stat_type = NETDEV_PCPU_STAT_DSTATS;
1937 
1938 	priv = netdev_priv(ndev);
1939 	priv->ndev = ndev;
1940 	priv->pdev = pdev;
1941 	platform_set_drvdata(pdev, priv);
1942 
1943 	ret = emac_config_dt(pdev, priv);
1944 	if (ret < 0)
1945 		return dev_err_probe(dev, ret, "Configuration failed\n");
1946 
1947 	ndev->watchdog_timeo = 5 * HZ;
1948 	ndev->base_addr = (unsigned long)priv->iobase;
1949 	ndev->irq = priv->irq;
1950 
1951 	ndev->ethtool_ops = &emac_ethtool_ops;
1952 	ndev->netdev_ops = &emac_netdev_ops;
1953 
1954 	devm_pm_runtime_enable(&pdev->dev);
1955 
1956 	priv->bus_clk = devm_clk_get_enabled(&pdev->dev, NULL);
1957 	if (IS_ERR(priv->bus_clk))
1958 		return dev_err_probe(dev, PTR_ERR(priv->bus_clk),
1959 				     "Failed to get clock\n");
1960 
1961 	reset = devm_reset_control_get_optional_exclusive_deasserted(&pdev->dev,
1962 								     NULL);
1963 	if (IS_ERR(reset))
1964 		return dev_err_probe(dev, PTR_ERR(reset),
1965 				     "Failed to get reset\n");
1966 
1967 	if (of_phy_is_fixed_link(dev->of_node)) {
1968 		ret = of_phy_register_fixed_link(dev->of_node);
1969 		if (ret)
1970 			return dev_err_probe(dev, ret,
1971 					     "Failed to register fixed-link\n");
1972 
1973 		ret = devm_add_action_or_reset(dev,
1974 					       emac_phy_deregister_fixed_link,
1975 					       dev->of_node);
1976 
1977 		if (ret) {
1978 			dev_err(dev, "devm_add_action_or_reset failed\n");
1979 			return ret;
1980 		}
1981 	}
1982 
1983 	emac_sw_init(priv);
1984 
1985 	ret = emac_mdio_init(priv);
1986 	if (ret)
1987 		goto err_timer_delete;
1988 
1989 	SET_NETDEV_DEV(ndev, &pdev->dev);
1990 
1991 	ret = devm_register_netdev(dev, ndev);
1992 	if (ret) {
1993 		dev_err(dev, "devm_register_netdev failed\n");
1994 		goto err_timer_delete;
1995 	}
1996 
1997 	netif_napi_add(ndev, &priv->napi, emac_rx_poll);
1998 	netif_carrier_off(ndev);
1999 
2000 	return 0;
2001 
2002 err_timer_delete:
2003 	timer_delete_sync(&priv->txtimer);
2004 	timer_delete_sync(&priv->stats_timer);
2005 
2006 	return ret;
2007 }
2008 
emac_remove(struct platform_device * pdev)2009 static void emac_remove(struct platform_device *pdev)
2010 {
2011 	struct emac_priv *priv = platform_get_drvdata(pdev);
2012 
2013 	timer_shutdown_sync(&priv->txtimer);
2014 	cancel_work_sync(&priv->tx_timeout_task);
2015 
2016 	timer_shutdown_sync(&priv->stats_timer);
2017 
2018 	emac_reset_hw(priv);
2019 }
2020 
emac_resume(struct device * dev)2021 static int emac_resume(struct device *dev)
2022 {
2023 	struct emac_priv *priv = dev_get_drvdata(dev);
2024 	struct net_device *ndev = priv->ndev;
2025 	int ret;
2026 
2027 	ret = clk_prepare_enable(priv->bus_clk);
2028 	if (ret < 0) {
2029 		dev_err(dev, "Failed to enable bus clock: %d\n", ret);
2030 		return ret;
2031 	}
2032 
2033 	if (!netif_running(ndev))
2034 		return 0;
2035 
2036 	ret = emac_open(ndev);
2037 	if (ret) {
2038 		clk_disable_unprepare(priv->bus_clk);
2039 		return ret;
2040 	}
2041 
2042 	netif_device_attach(ndev);
2043 
2044 	mod_timer(&priv->stats_timer, jiffies);
2045 
2046 	return 0;
2047 }
2048 
emac_suspend(struct device * dev)2049 static int emac_suspend(struct device *dev)
2050 {
2051 	struct emac_priv *priv = dev_get_drvdata(dev);
2052 	struct net_device *ndev = priv->ndev;
2053 
2054 	if (!ndev || !netif_running(ndev)) {
2055 		clk_disable_unprepare(priv->bus_clk);
2056 		return 0;
2057 	}
2058 
2059 	emac_stop(ndev);
2060 
2061 	clk_disable_unprepare(priv->bus_clk);
2062 	netif_device_detach(ndev);
2063 	return 0;
2064 }
2065 
2066 static const struct dev_pm_ops emac_pm_ops = {
2067 	SYSTEM_SLEEP_PM_OPS(emac_suspend, emac_resume)
2068 };
2069 
2070 static const struct of_device_id emac_of_match[] = {
2071 	{ .compatible = "spacemit,k1-emac" },
2072 	{ /* sentinel */ },
2073 };
2074 MODULE_DEVICE_TABLE(of, emac_of_match);
2075 
2076 static struct platform_driver emac_driver = {
2077 	.probe = emac_probe,
2078 	.remove = emac_remove,
2079 	.driver = {
2080 		.name = DRIVER_NAME,
2081 		.of_match_table = of_match_ptr(emac_of_match),
2082 		.pm = &emac_pm_ops,
2083 	},
2084 };
2085 module_platform_driver(emac_driver);
2086 
2087 MODULE_DESCRIPTION("SpacemiT K1 Ethernet driver");
2088 MODULE_AUTHOR("Vivian Wang <wangruikang@iscas.ac.cn>");
2089 MODULE_LICENSE("GPL");
2090