xref: /linux/drivers/net/ethernet/spacemit/k1_emac.c (revision 1cac38910ecb881b09f61f57545a771bbe57ba68)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * SpacemiT K1 Ethernet driver
4  *
5  * Copyright (C) 2023-2025 SpacemiT (Hangzhou) Technology Co. Ltd
6  * Copyright (C) 2025 Vivian Wang <wangruikang@iscas.ac.cn>
7  */
8 
9 #include <linux/bitfield.h>
10 #include <linux/clk.h>
11 #include <linux/delay.h>
12 #include <linux/dma-mapping.h>
13 #include <linux/etherdevice.h>
14 #include <linux/ethtool.h>
15 #include <linux/interrupt.h>
16 #include <linux/io.h>
17 #include <linux/iopoll.h>
18 #include <linux/kernel.h>
19 #include <linux/mfd/syscon.h>
20 #include <linux/module.h>
21 #include <linux/of.h>
22 #include <linux/of_irq.h>
23 #include <linux/of_mdio.h>
24 #include <linux/of_net.h>
25 #include <linux/phy.h>
26 #include <linux/platform_device.h>
27 #include <linux/pm_runtime.h>
28 #include <linux/pm.h>
29 #include <linux/regmap.h>
30 #include <linux/reset.h>
31 #include <linux/rtnetlink.h>
32 #include <linux/timer.h>
33 #include <linux/types.h>
34 
35 #include "k1_emac.h"
36 
37 #define DRIVER_NAME "k1_emac"
38 
39 #define EMAC_DEFAULT_BUFSIZE		1536
40 #define EMAC_RX_BUF_2K			2048
41 #define EMAC_RX_BUF_4K			4096
42 
43 /* Tuning parameters from SpacemiT */
44 #define EMAC_TX_FRAMES			64
45 #define EMAC_TX_COAL_TIMEOUT		40000
46 #define EMAC_RX_FRAMES			64
47 #define EMAC_RX_COAL_TIMEOUT		(600 * 312)
48 
49 #define DEFAULT_FC_PAUSE_TIME		0xffff
50 #define DEFAULT_FC_FIFO_HIGH		1600
51 #define DEFAULT_TX_ALMOST_FULL		0x1f8
52 #define DEFAULT_TX_THRESHOLD		1518
53 #define DEFAULT_RX_THRESHOLD		12
54 #define DEFAULT_TX_RING_NUM		1024
55 #define DEFAULT_RX_RING_NUM		1024
56 #define DEFAULT_DMA_BURST		MREGBIT_BURST_16WORD
57 #define HASH_TABLE_SIZE			64
58 
59 struct desc_buf {
60 	u64 dma_addr;
61 	void *buff_addr;
62 	u16 dma_len;
63 	u8 map_as_page;
64 };
65 
66 struct emac_tx_desc_buffer {
67 	struct sk_buff *skb;
68 	struct desc_buf buf[2];
69 };
70 
71 struct emac_rx_desc_buffer {
72 	struct sk_buff *skb;
73 	u64 dma_addr;
74 	void *buff_addr;
75 	u16 dma_len;
76 	u8 map_as_page;
77 };
78 
79 /**
80  * struct emac_desc_ring - Software-side information for one descriptor ring
81  * Same structure used for both RX and TX
82  * @desc_addr: Virtual address to the descriptor ring memory
83  * @desc_dma_addr: DMA address of the descriptor ring
84  * @total_size: Size of ring in bytes
85  * @total_cnt: Number of descriptors
86  * @head: Next descriptor to associate a buffer with
87  * @tail: Next descriptor to check status bit
88  * @rx_desc_buf: Array of descriptors for RX
89  * @tx_desc_buf: Array of descriptors for TX, with max of two buffers each
90  */
91 struct emac_desc_ring {
92 	void *desc_addr;
93 	dma_addr_t desc_dma_addr;
94 	u32 total_size;
95 	u32 total_cnt;
96 	u32 head;
97 	u32 tail;
98 	union {
99 		struct emac_rx_desc_buffer *rx_desc_buf;
100 		struct emac_tx_desc_buffer *tx_desc_buf;
101 	};
102 };
103 
104 struct emac_priv {
105 	void __iomem *iobase;
106 	u32 dma_buf_sz;
107 	struct emac_desc_ring tx_ring;
108 	struct emac_desc_ring rx_ring;
109 
110 	struct net_device *ndev;
111 	struct napi_struct napi;
112 	struct platform_device *pdev;
113 	struct clk *bus_clk;
114 	struct clk *ref_clk;
115 	struct regmap *regmap_apmu;
116 	u32 regmap_apmu_offset;
117 	int irq;
118 
119 	phy_interface_t phy_interface;
120 
121 	union emac_hw_tx_stats tx_stats, tx_stats_off;
122 	union emac_hw_rx_stats rx_stats, rx_stats_off;
123 
124 	u32 tx_count_frames;
125 	u32 tx_coal_frames;
126 	u32 tx_coal_timeout;
127 	struct work_struct tx_timeout_task;
128 
129 	struct timer_list txtimer;
130 	struct timer_list stats_timer;
131 
132 	u32 tx_delay;
133 	u32 rx_delay;
134 
135 	bool flow_control_autoneg;
136 	u8 flow_control;
137 
138 	/* Softirq-safe, hold while touching hardware statistics */
139 	spinlock_t stats_lock;
140 };
141 
emac_wr(struct emac_priv * priv,u32 reg,u32 val)142 static void emac_wr(struct emac_priv *priv, u32 reg, u32 val)
143 {
144 	writel(val, priv->iobase + reg);
145 }
146 
emac_rd(struct emac_priv * priv,u32 reg)147 static u32 emac_rd(struct emac_priv *priv, u32 reg)
148 {
149 	return readl(priv->iobase + reg);
150 }
151 
emac_phy_interface_config(struct emac_priv * priv)152 static int emac_phy_interface_config(struct emac_priv *priv)
153 {
154 	u32 val = 0, mask = REF_CLK_SEL | RGMII_TX_CLK_SEL | PHY_INTF_RGMII;
155 
156 	if (phy_interface_mode_is_rgmii(priv->phy_interface))
157 		val |= PHY_INTF_RGMII;
158 
159 	regmap_update_bits(priv->regmap_apmu,
160 			   priv->regmap_apmu_offset + APMU_EMAC_CTRL_REG,
161 			   mask, val);
162 
163 	return 0;
164 }
165 
166 /*
167  * Where the hardware expects a MAC address, it is laid out in this high, med,
168  * low order in three consecutive registers and in this format.
169  */
170 
emac_set_mac_addr_reg(struct emac_priv * priv,const unsigned char * addr,u32 reg)171 static void emac_set_mac_addr_reg(struct emac_priv *priv,
172 				  const unsigned char *addr,
173 				  u32 reg)
174 {
175 	emac_wr(priv, reg + sizeof(u32) * 0, addr[1] << 8 | addr[0]);
176 	emac_wr(priv, reg + sizeof(u32) * 1, addr[3] << 8 | addr[2]);
177 	emac_wr(priv, reg + sizeof(u32) * 2, addr[5] << 8 | addr[4]);
178 }
179 
emac_set_mac_addr(struct emac_priv * priv,const unsigned char * addr)180 static void emac_set_mac_addr(struct emac_priv *priv, const unsigned char *addr)
181 {
182 	/* We use only one address, so set the same for flow control as well */
183 	emac_set_mac_addr_reg(priv, addr, MAC_ADDRESS1_HIGH);
184 	emac_set_mac_addr_reg(priv, addr, MAC_FC_SOURCE_ADDRESS_HIGH);
185 }
186 
emac_reset_hw(struct emac_priv * priv)187 static void emac_reset_hw(struct emac_priv *priv)
188 {
189 	/* Disable all interrupts */
190 	emac_wr(priv, MAC_INTERRUPT_ENABLE, 0x0);
191 	emac_wr(priv, DMA_INTERRUPT_ENABLE, 0x0);
192 
193 	/* Disable transmit and receive units */
194 	emac_wr(priv, MAC_RECEIVE_CONTROL, 0x0);
195 	emac_wr(priv, MAC_TRANSMIT_CONTROL, 0x0);
196 
197 	/* Disable DMA */
198 	emac_wr(priv, DMA_CONTROL, 0x0);
199 }
200 
emac_init_hw(struct emac_priv * priv)201 static void emac_init_hw(struct emac_priv *priv)
202 {
203 	/* Destination address for 802.3x Ethernet flow control */
204 	u8 fc_dest_addr[ETH_ALEN] = { 0x01, 0x80, 0xc2, 0x00, 0x00, 0x01 };
205 
206 	u32 rxirq = 0, dma = 0;
207 
208 	regmap_set_bits(priv->regmap_apmu,
209 			priv->regmap_apmu_offset + APMU_EMAC_CTRL_REG,
210 			AXI_SINGLE_ID);
211 
212 	/* Disable transmit and receive units */
213 	emac_wr(priv, MAC_RECEIVE_CONTROL, 0x0);
214 	emac_wr(priv, MAC_TRANSMIT_CONTROL, 0x0);
215 
216 	/* Enable MAC address 1 filtering */
217 	emac_wr(priv, MAC_ADDRESS_CONTROL, MREGBIT_MAC_ADDRESS1_ENABLE);
218 
219 	/* Zero initialize the multicast hash table */
220 	emac_wr(priv, MAC_MULTICAST_HASH_TABLE1, 0x0);
221 	emac_wr(priv, MAC_MULTICAST_HASH_TABLE2, 0x0);
222 	emac_wr(priv, MAC_MULTICAST_HASH_TABLE3, 0x0);
223 	emac_wr(priv, MAC_MULTICAST_HASH_TABLE4, 0x0);
224 
225 	/* Configure thresholds */
226 	emac_wr(priv, MAC_TRANSMIT_FIFO_ALMOST_FULL, DEFAULT_TX_ALMOST_FULL);
227 	emac_wr(priv, MAC_TRANSMIT_PACKET_START_THRESHOLD,
228 		DEFAULT_TX_THRESHOLD);
229 	emac_wr(priv, MAC_RECEIVE_PACKET_START_THRESHOLD, DEFAULT_RX_THRESHOLD);
230 
231 	/* Configure flow control (enabled in emac_adjust_link() later) */
232 	emac_set_mac_addr_reg(priv, fc_dest_addr, MAC_FC_SOURCE_ADDRESS_HIGH);
233 	emac_wr(priv, MAC_FC_PAUSE_HIGH_THRESHOLD, DEFAULT_FC_FIFO_HIGH);
234 	emac_wr(priv, MAC_FC_HIGH_PAUSE_TIME, DEFAULT_FC_PAUSE_TIME);
235 	emac_wr(priv, MAC_FC_PAUSE_LOW_THRESHOLD, 0);
236 
237 	/* RX IRQ mitigation */
238 	rxirq = FIELD_PREP(MREGBIT_RECEIVE_IRQ_FRAME_COUNTER_MASK,
239 			   EMAC_RX_FRAMES);
240 	rxirq |= FIELD_PREP(MREGBIT_RECEIVE_IRQ_TIMEOUT_COUNTER_MASK,
241 			    EMAC_RX_COAL_TIMEOUT);
242 	rxirq |= MREGBIT_RECEIVE_IRQ_MITIGATION_ENABLE;
243 	emac_wr(priv, DMA_RECEIVE_IRQ_MITIGATION_CTRL, rxirq);
244 
245 	/* Disable and set DMA config */
246 	emac_wr(priv, DMA_CONTROL, 0x0);
247 
248 	emac_wr(priv, DMA_CONFIGURATION, MREGBIT_SOFTWARE_RESET);
249 	usleep_range(9000, 10000);
250 	emac_wr(priv, DMA_CONFIGURATION, 0x0);
251 	usleep_range(9000, 10000);
252 
253 	dma |= MREGBIT_STRICT_BURST;
254 	dma |= MREGBIT_DMA_64BIT_MODE;
255 	dma |= DEFAULT_DMA_BURST;
256 
257 	emac_wr(priv, DMA_CONFIGURATION, dma);
258 }
259 
emac_dma_start_transmit(struct emac_priv * priv)260 static void emac_dma_start_transmit(struct emac_priv *priv)
261 {
262 	/* The actual value written does not matter */
263 	emac_wr(priv, DMA_TRANSMIT_POLL_DEMAND, 1);
264 }
265 
emac_enable_interrupt(struct emac_priv * priv)266 static void emac_enable_interrupt(struct emac_priv *priv)
267 {
268 	u32 val;
269 
270 	val = emac_rd(priv, DMA_INTERRUPT_ENABLE);
271 	val |= MREGBIT_TRANSMIT_TRANSFER_DONE_INTR_ENABLE;
272 	val |= MREGBIT_RECEIVE_TRANSFER_DONE_INTR_ENABLE;
273 	emac_wr(priv, DMA_INTERRUPT_ENABLE, val);
274 }
275 
emac_disable_interrupt(struct emac_priv * priv)276 static void emac_disable_interrupt(struct emac_priv *priv)
277 {
278 	u32 val;
279 
280 	val = emac_rd(priv, DMA_INTERRUPT_ENABLE);
281 	val &= ~MREGBIT_TRANSMIT_TRANSFER_DONE_INTR_ENABLE;
282 	val &= ~MREGBIT_RECEIVE_TRANSFER_DONE_INTR_ENABLE;
283 	emac_wr(priv, DMA_INTERRUPT_ENABLE, val);
284 }
285 
emac_tx_avail(struct emac_priv * priv)286 static u32 emac_tx_avail(struct emac_priv *priv)
287 {
288 	struct emac_desc_ring *tx_ring = &priv->tx_ring;
289 	u32 avail;
290 
291 	if (tx_ring->tail > tx_ring->head)
292 		avail = tx_ring->tail - tx_ring->head - 1;
293 	else
294 		avail = tx_ring->total_cnt - tx_ring->head + tx_ring->tail - 1;
295 
296 	return avail;
297 }
298 
emac_tx_coal_timer_resched(struct emac_priv * priv)299 static void emac_tx_coal_timer_resched(struct emac_priv *priv)
300 {
301 	mod_timer(&priv->txtimer,
302 		  jiffies + usecs_to_jiffies(priv->tx_coal_timeout));
303 }
304 
emac_tx_coal_timer(struct timer_list * t)305 static void emac_tx_coal_timer(struct timer_list *t)
306 {
307 	struct emac_priv *priv = timer_container_of(priv, t, txtimer);
308 
309 	napi_schedule(&priv->napi);
310 }
311 
emac_tx_should_interrupt(struct emac_priv * priv,u32 pkt_num)312 static bool emac_tx_should_interrupt(struct emac_priv *priv, u32 pkt_num)
313 {
314 	priv->tx_count_frames += pkt_num;
315 	if (likely(priv->tx_coal_frames > priv->tx_count_frames)) {
316 		emac_tx_coal_timer_resched(priv);
317 		return false;
318 	}
319 
320 	priv->tx_count_frames = 0;
321 	return true;
322 }
323 
emac_free_tx_buf(struct emac_priv * priv,int i)324 static void emac_free_tx_buf(struct emac_priv *priv, int i)
325 {
326 	struct emac_tx_desc_buffer *tx_buf;
327 	struct emac_desc_ring *tx_ring;
328 	struct desc_buf *buf;
329 	int j;
330 
331 	tx_ring = &priv->tx_ring;
332 	tx_buf = &tx_ring->tx_desc_buf[i];
333 
334 	for (j = 0; j < 2; j++) {
335 		buf = &tx_buf->buf[j];
336 		if (!buf->dma_addr)
337 			continue;
338 
339 		if (buf->map_as_page)
340 			dma_unmap_page(&priv->pdev->dev, buf->dma_addr,
341 				       buf->dma_len, DMA_TO_DEVICE);
342 		else
343 			dma_unmap_single(&priv->pdev->dev,
344 					 buf->dma_addr, buf->dma_len,
345 					 DMA_TO_DEVICE);
346 
347 		buf->dma_addr = 0;
348 		buf->map_as_page = false;
349 		buf->buff_addr = NULL;
350 	}
351 
352 	if (tx_buf->skb) {
353 		dev_kfree_skb_any(tx_buf->skb);
354 		tx_buf->skb = NULL;
355 	}
356 }
357 
emac_clean_tx_desc_ring(struct emac_priv * priv)358 static void emac_clean_tx_desc_ring(struct emac_priv *priv)
359 {
360 	struct emac_desc_ring *tx_ring = &priv->tx_ring;
361 	u32 i;
362 
363 	for (i = 0; i < tx_ring->total_cnt; i++)
364 		emac_free_tx_buf(priv, i);
365 
366 	tx_ring->head = 0;
367 	tx_ring->tail = 0;
368 }
369 
emac_clean_rx_desc_ring(struct emac_priv * priv)370 static void emac_clean_rx_desc_ring(struct emac_priv *priv)
371 {
372 	struct emac_rx_desc_buffer *rx_buf;
373 	struct emac_desc_ring *rx_ring;
374 	u32 i;
375 
376 	rx_ring = &priv->rx_ring;
377 
378 	for (i = 0; i < rx_ring->total_cnt; i++) {
379 		rx_buf = &rx_ring->rx_desc_buf[i];
380 
381 		if (!rx_buf->skb)
382 			continue;
383 
384 		dma_unmap_single(&priv->pdev->dev, rx_buf->dma_addr,
385 				 rx_buf->dma_len, DMA_FROM_DEVICE);
386 
387 		dev_kfree_skb(rx_buf->skb);
388 		rx_buf->skb = NULL;
389 	}
390 
391 	rx_ring->tail = 0;
392 	rx_ring->head = 0;
393 }
394 
emac_alloc_tx_resources(struct emac_priv * priv)395 static int emac_alloc_tx_resources(struct emac_priv *priv)
396 {
397 	struct emac_desc_ring *tx_ring = &priv->tx_ring;
398 	struct platform_device *pdev = priv->pdev;
399 
400 	tx_ring->tx_desc_buf = kcalloc(tx_ring->total_cnt,
401 				       sizeof(*tx_ring->tx_desc_buf),
402 				       GFP_KERNEL);
403 
404 	if (!tx_ring->tx_desc_buf)
405 		return -ENOMEM;
406 
407 	tx_ring->total_size = tx_ring->total_cnt * sizeof(struct emac_desc);
408 	tx_ring->total_size = ALIGN(tx_ring->total_size, PAGE_SIZE);
409 
410 	tx_ring->desc_addr = dma_alloc_coherent(&pdev->dev, tx_ring->total_size,
411 						&tx_ring->desc_dma_addr,
412 						GFP_KERNEL);
413 	if (!tx_ring->desc_addr) {
414 		kfree(tx_ring->tx_desc_buf);
415 		return -ENOMEM;
416 	}
417 
418 	tx_ring->head = 0;
419 	tx_ring->tail = 0;
420 
421 	return 0;
422 }
423 
emac_alloc_rx_resources(struct emac_priv * priv)424 static int emac_alloc_rx_resources(struct emac_priv *priv)
425 {
426 	struct emac_desc_ring *rx_ring = &priv->rx_ring;
427 	struct platform_device *pdev = priv->pdev;
428 
429 	rx_ring->rx_desc_buf = kcalloc(rx_ring->total_cnt,
430 				       sizeof(*rx_ring->rx_desc_buf),
431 				       GFP_KERNEL);
432 	if (!rx_ring->rx_desc_buf)
433 		return -ENOMEM;
434 
435 	rx_ring->total_size = rx_ring->total_cnt * sizeof(struct emac_desc);
436 
437 	rx_ring->total_size = ALIGN(rx_ring->total_size, PAGE_SIZE);
438 
439 	rx_ring->desc_addr = dma_alloc_coherent(&pdev->dev, rx_ring->total_size,
440 						&rx_ring->desc_dma_addr,
441 						GFP_KERNEL);
442 	if (!rx_ring->desc_addr) {
443 		kfree(rx_ring->rx_desc_buf);
444 		return -ENOMEM;
445 	}
446 
447 	rx_ring->head = 0;
448 	rx_ring->tail = 0;
449 
450 	return 0;
451 }
452 
emac_free_tx_resources(struct emac_priv * priv)453 static void emac_free_tx_resources(struct emac_priv *priv)
454 {
455 	struct emac_desc_ring *tr = &priv->tx_ring;
456 	struct device *dev = &priv->pdev->dev;
457 
458 	emac_clean_tx_desc_ring(priv);
459 
460 	kfree(tr->tx_desc_buf);
461 	tr->tx_desc_buf = NULL;
462 
463 	dma_free_coherent(dev, tr->total_size, tr->desc_addr,
464 			  tr->desc_dma_addr);
465 	tr->desc_addr = NULL;
466 }
467 
emac_free_rx_resources(struct emac_priv * priv)468 static void emac_free_rx_resources(struct emac_priv *priv)
469 {
470 	struct emac_desc_ring *rr = &priv->rx_ring;
471 	struct device *dev = &priv->pdev->dev;
472 
473 	emac_clean_rx_desc_ring(priv);
474 
475 	kfree(rr->rx_desc_buf);
476 	rr->rx_desc_buf = NULL;
477 
478 	dma_free_coherent(dev, rr->total_size, rr->desc_addr,
479 			  rr->desc_dma_addr);
480 	rr->desc_addr = NULL;
481 }
482 
emac_tx_clean_desc(struct emac_priv * priv)483 static int emac_tx_clean_desc(struct emac_priv *priv)
484 {
485 	struct net_device *ndev = priv->ndev;
486 	struct emac_desc_ring *tx_ring;
487 	struct emac_desc *tx_desc;
488 	u32 i;
489 
490 	netif_tx_lock(ndev);
491 
492 	tx_ring = &priv->tx_ring;
493 
494 	i = tx_ring->tail;
495 
496 	while (i != tx_ring->head) {
497 		tx_desc = &((struct emac_desc *)tx_ring->desc_addr)[i];
498 
499 		/* Stop checking if desc still own by DMA */
500 		if (READ_ONCE(tx_desc->desc0) & TX_DESC_0_OWN)
501 			break;
502 
503 		emac_free_tx_buf(priv, i);
504 		memset(tx_desc, 0, sizeof(struct emac_desc));
505 
506 		if (++i == tx_ring->total_cnt)
507 			i = 0;
508 	}
509 
510 	tx_ring->tail = i;
511 
512 	if (unlikely(netif_queue_stopped(ndev) &&
513 		     emac_tx_avail(priv) > tx_ring->total_cnt / 4))
514 		netif_wake_queue(ndev);
515 
516 	netif_tx_unlock(ndev);
517 
518 	return 0;
519 }
520 
emac_rx_frame_good(struct emac_priv * priv,struct emac_desc * desc)521 static bool emac_rx_frame_good(struct emac_priv *priv, struct emac_desc *desc)
522 {
523 	const char *msg;
524 	u32 len;
525 
526 	len = FIELD_GET(RX_DESC_0_FRAME_PACKET_LENGTH_MASK, desc->desc0);
527 
528 	if (WARN_ON_ONCE(!(desc->desc0 & RX_DESC_0_LAST_DESCRIPTOR)))
529 		msg = "Not last descriptor"; /* This would be a bug */
530 	else if (desc->desc0 & RX_DESC_0_FRAME_RUNT)
531 		msg = "Runt frame";
532 	else if (desc->desc0 & RX_DESC_0_FRAME_CRC_ERR)
533 		msg = "Frame CRC error";
534 	else if (desc->desc0 & RX_DESC_0_FRAME_MAX_LEN_ERR)
535 		msg = "Frame exceeds max length";
536 	else if (desc->desc0 & RX_DESC_0_FRAME_JABBER_ERR)
537 		msg = "Frame jabber error";
538 	else if (desc->desc0 & RX_DESC_0_FRAME_LENGTH_ERR)
539 		msg = "Frame length error";
540 	else if (len <= ETH_FCS_LEN || len > priv->dma_buf_sz)
541 		msg = "Frame length unacceptable";
542 	else
543 		return true; /* All good */
544 
545 	dev_dbg_ratelimited(&priv->ndev->dev, "RX error: %s", msg);
546 
547 	return false;
548 }
549 
emac_alloc_rx_desc_buffers(struct emac_priv * priv)550 static void emac_alloc_rx_desc_buffers(struct emac_priv *priv)
551 {
552 	struct emac_desc_ring *rx_ring = &priv->rx_ring;
553 	struct emac_desc rx_desc, *rx_desc_addr;
554 	struct net_device *ndev = priv->ndev;
555 	struct emac_rx_desc_buffer *rx_buf;
556 	struct sk_buff *skb;
557 	u32 i;
558 
559 	i = rx_ring->head;
560 	rx_buf = &rx_ring->rx_desc_buf[i];
561 
562 	while (!rx_buf->skb) {
563 		skb = netdev_alloc_skb_ip_align(ndev, priv->dma_buf_sz);
564 		if (!skb)
565 			break;
566 
567 		skb->dev = ndev;
568 
569 		rx_buf->skb = skb;
570 		rx_buf->dma_len = priv->dma_buf_sz;
571 		rx_buf->dma_addr = dma_map_single(&priv->pdev->dev, skb->data,
572 						  priv->dma_buf_sz,
573 						  DMA_FROM_DEVICE);
574 		if (dma_mapping_error(&priv->pdev->dev, rx_buf->dma_addr)) {
575 			dev_err_ratelimited(&ndev->dev, "Mapping skb failed\n");
576 			goto err_free_skb;
577 		}
578 
579 		rx_desc_addr = &((struct emac_desc *)rx_ring->desc_addr)[i];
580 
581 		memset(&rx_desc, 0, sizeof(rx_desc));
582 
583 		rx_desc.buffer_addr_1 = rx_buf->dma_addr;
584 		rx_desc.desc1 = FIELD_PREP(RX_DESC_1_BUFFER_SIZE_1_MASK,
585 					   rx_buf->dma_len);
586 
587 		if (++i == rx_ring->total_cnt) {
588 			rx_desc.desc1 |= RX_DESC_1_END_RING;
589 			i = 0;
590 		}
591 
592 		*rx_desc_addr = rx_desc;
593 		dma_wmb();
594 		WRITE_ONCE(rx_desc_addr->desc0, rx_desc.desc0 | RX_DESC_0_OWN);
595 
596 		rx_buf = &rx_ring->rx_desc_buf[i];
597 	}
598 
599 	rx_ring->head = i;
600 	return;
601 
602 err_free_skb:
603 	dev_kfree_skb_any(skb);
604 	rx_buf->skb = NULL;
605 }
606 
607 /* Returns number of packets received */
emac_rx_clean_desc(struct emac_priv * priv,int budget)608 static int emac_rx_clean_desc(struct emac_priv *priv, int budget)
609 {
610 	struct net_device *ndev = priv->ndev;
611 	struct emac_rx_desc_buffer *rx_buf;
612 	struct emac_desc_ring *rx_ring;
613 	struct sk_buff *skb = NULL;
614 	struct emac_desc *rx_desc;
615 	u32 got = 0, skb_len, i;
616 
617 	rx_ring = &priv->rx_ring;
618 
619 	i = rx_ring->tail;
620 
621 	while (budget--) {
622 		rx_desc = &((struct emac_desc *)rx_ring->desc_addr)[i];
623 
624 		/* Stop checking if rx_desc still owned by DMA */
625 		if (READ_ONCE(rx_desc->desc0) & RX_DESC_0_OWN)
626 			break;
627 
628 		dma_rmb();
629 
630 		rx_buf = &rx_ring->rx_desc_buf[i];
631 
632 		if (!rx_buf->skb)
633 			break;
634 
635 		got++;
636 
637 		dma_unmap_single(&priv->pdev->dev, rx_buf->dma_addr,
638 				 rx_buf->dma_len, DMA_FROM_DEVICE);
639 
640 		if (likely(emac_rx_frame_good(priv, rx_desc))) {
641 			skb = rx_buf->skb;
642 
643 			skb_len = FIELD_GET(RX_DESC_0_FRAME_PACKET_LENGTH_MASK,
644 					    rx_desc->desc0);
645 			skb_len -= ETH_FCS_LEN;
646 
647 			skb_put(skb, skb_len);
648 			skb->dev = ndev;
649 			ndev->hard_header_len = ETH_HLEN;
650 
651 			skb->protocol = eth_type_trans(skb, ndev);
652 
653 			skb->ip_summed = CHECKSUM_NONE;
654 
655 			napi_gro_receive(&priv->napi, skb);
656 
657 			memset(rx_desc, 0, sizeof(struct emac_desc));
658 			rx_buf->skb = NULL;
659 		} else {
660 			dev_kfree_skb_irq(rx_buf->skb);
661 			rx_buf->skb = NULL;
662 		}
663 
664 		if (++i == rx_ring->total_cnt)
665 			i = 0;
666 	}
667 
668 	rx_ring->tail = i;
669 
670 	emac_alloc_rx_desc_buffers(priv);
671 
672 	return got;
673 }
674 
emac_rx_poll(struct napi_struct * napi,int budget)675 static int emac_rx_poll(struct napi_struct *napi, int budget)
676 {
677 	struct emac_priv *priv = container_of(napi, struct emac_priv, napi);
678 	int work_done;
679 
680 	emac_tx_clean_desc(priv);
681 
682 	work_done = emac_rx_clean_desc(priv, budget);
683 	if (work_done < budget && napi_complete_done(napi, work_done))
684 		emac_enable_interrupt(priv);
685 
686 	return work_done;
687 }
688 
689 /*
690  * For convenience, skb->data is fragment 0, frags[0] is fragment 1, etc.
691  *
692  * Each descriptor can hold up to two fragments, called buffer 1 and 2. For each
693  * fragment f, if f % 2 == 0, it uses buffer 1, otherwise it uses buffer 2.
694  */
695 
emac_tx_map_frag(struct device * dev,struct emac_desc * tx_desc,struct emac_tx_desc_buffer * tx_buf,struct sk_buff * skb,u32 frag_idx)696 static int emac_tx_map_frag(struct device *dev, struct emac_desc *tx_desc,
697 			    struct emac_tx_desc_buffer *tx_buf,
698 			    struct sk_buff *skb, u32 frag_idx)
699 {
700 	bool map_as_page, buf_idx;
701 	const skb_frag_t *frag;
702 	phys_addr_t addr;
703 	u32 len;
704 	int ret;
705 
706 	buf_idx = frag_idx % 2;
707 
708 	if (frag_idx == 0) {
709 		/* Non-fragmented part */
710 		len = skb_headlen(skb);
711 		addr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
712 		map_as_page = false;
713 	} else {
714 		/* Fragment */
715 		frag = &skb_shinfo(skb)->frags[frag_idx - 1];
716 		len = skb_frag_size(frag);
717 		addr = skb_frag_dma_map(dev, frag, 0, len, DMA_TO_DEVICE);
718 		map_as_page = true;
719 	}
720 
721 	ret = dma_mapping_error(dev, addr);
722 	if (ret)
723 		return ret;
724 
725 	tx_buf->buf[buf_idx].dma_addr = addr;
726 	tx_buf->buf[buf_idx].dma_len = len;
727 	tx_buf->buf[buf_idx].map_as_page = map_as_page;
728 
729 	if (buf_idx == 0) {
730 		tx_desc->buffer_addr_1 = addr;
731 		tx_desc->desc1 |= FIELD_PREP(TX_DESC_1_BUFFER_SIZE_1_MASK, len);
732 	} else {
733 		tx_desc->buffer_addr_2 = addr;
734 		tx_desc->desc1 |= FIELD_PREP(TX_DESC_1_BUFFER_SIZE_2_MASK, len);
735 	}
736 
737 	return 0;
738 }
739 
emac_tx_mem_map(struct emac_priv * priv,struct sk_buff * skb)740 static void emac_tx_mem_map(struct emac_priv *priv, struct sk_buff *skb)
741 {
742 	struct emac_desc_ring *tx_ring = &priv->tx_ring;
743 	struct emac_desc tx_desc, *tx_desc_addr;
744 	struct device *dev = &priv->pdev->dev;
745 	struct emac_tx_desc_buffer *tx_buf;
746 	u32 head, old_head, frag_num, f;
747 	bool buf_idx;
748 
749 	frag_num = skb_shinfo(skb)->nr_frags;
750 	head = tx_ring->head;
751 	old_head = head;
752 
753 	for (f = 0; f < frag_num + 1; f++) {
754 		buf_idx = f % 2;
755 
756 		/*
757 		 * If using buffer 1, initialize a new desc. Otherwise, use
758 		 * buffer 2 of previous fragment's desc.
759 		 */
760 		if (!buf_idx) {
761 			tx_buf = &tx_ring->tx_desc_buf[head];
762 			tx_desc_addr =
763 				&((struct emac_desc *)tx_ring->desc_addr)[head];
764 			memset(&tx_desc, 0, sizeof(tx_desc));
765 
766 			/*
767 			 * Give ownership for all but first desc initially. For
768 			 * first desc, give at the end so DMA cannot start
769 			 * reading uninitialized descs.
770 			 */
771 			if (head != old_head)
772 				tx_desc.desc0 |= TX_DESC_0_OWN;
773 
774 			if (++head == tx_ring->total_cnt) {
775 				/* Just used last desc in ring */
776 				tx_desc.desc1 |= TX_DESC_1_END_RING;
777 				head = 0;
778 			}
779 		}
780 
781 		if (emac_tx_map_frag(dev, &tx_desc, tx_buf, skb, f)) {
782 			dev_err_ratelimited(&priv->ndev->dev,
783 					    "Map TX frag %d failed\n", f);
784 			goto err_free_skb;
785 		}
786 
787 		if (f == 0)
788 			tx_desc.desc1 |= TX_DESC_1_FIRST_SEGMENT;
789 
790 		if (f == frag_num) {
791 			tx_desc.desc1 |= TX_DESC_1_LAST_SEGMENT;
792 			tx_buf->skb = skb;
793 			if (emac_tx_should_interrupt(priv, frag_num + 1))
794 				tx_desc.desc1 |=
795 					TX_DESC_1_INTERRUPT_ON_COMPLETION;
796 		}
797 
798 		*tx_desc_addr = tx_desc;
799 	}
800 
801 	/* All descriptors are ready, give ownership for first desc */
802 	tx_desc_addr = &((struct emac_desc *)tx_ring->desc_addr)[old_head];
803 	dma_wmb();
804 	WRITE_ONCE(tx_desc_addr->desc0, tx_desc_addr->desc0 | TX_DESC_0_OWN);
805 
806 	emac_dma_start_transmit(priv);
807 
808 	tx_ring->head = head;
809 
810 	return;
811 
812 err_free_skb:
813 	dev_dstats_tx_dropped(priv->ndev);
814 	dev_kfree_skb_any(skb);
815 }
816 
emac_start_xmit(struct sk_buff * skb,struct net_device * ndev)817 static netdev_tx_t emac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
818 {
819 	struct emac_priv *priv = netdev_priv(ndev);
820 	int nfrags = skb_shinfo(skb)->nr_frags;
821 	struct device *dev = &priv->pdev->dev;
822 
823 	if (unlikely(emac_tx_avail(priv) < nfrags + 1)) {
824 		if (!netif_queue_stopped(ndev)) {
825 			netif_stop_queue(ndev);
826 			dev_err_ratelimited(dev, "TX ring full, stop TX queue\n");
827 		}
828 		return NETDEV_TX_BUSY;
829 	}
830 
831 	emac_tx_mem_map(priv, skb);
832 
833 	/* Make sure there is space in the ring for the next TX. */
834 	if (unlikely(emac_tx_avail(priv) <= MAX_SKB_FRAGS + 2))
835 		netif_stop_queue(ndev);
836 
837 	return NETDEV_TX_OK;
838 }
839 
emac_set_mac_address(struct net_device * ndev,void * addr)840 static int emac_set_mac_address(struct net_device *ndev, void *addr)
841 {
842 	struct emac_priv *priv = netdev_priv(ndev);
843 	int ret = eth_mac_addr(ndev, addr);
844 
845 	if (ret)
846 		return ret;
847 
848 	/* If running, set now; if not running it will be set in emac_up. */
849 	if (netif_running(ndev))
850 		emac_set_mac_addr(priv, ndev->dev_addr);
851 
852 	return 0;
853 }
854 
emac_mac_multicast_filter_clear(struct emac_priv * priv)855 static void emac_mac_multicast_filter_clear(struct emac_priv *priv)
856 {
857 	emac_wr(priv, MAC_MULTICAST_HASH_TABLE1, 0x0);
858 	emac_wr(priv, MAC_MULTICAST_HASH_TABLE2, 0x0);
859 	emac_wr(priv, MAC_MULTICAST_HASH_TABLE3, 0x0);
860 	emac_wr(priv, MAC_MULTICAST_HASH_TABLE4, 0x0);
861 }
862 
863 /*
864  * The upper 6 bits of the Ethernet CRC of the MAC address is used as the hash
865  * when matching multicast addresses.
866  */
emac_ether_addr_hash(u8 addr[ETH_ALEN])867 static u32 emac_ether_addr_hash(u8 addr[ETH_ALEN])
868 {
869 	u32 crc32 = ether_crc(ETH_ALEN, addr);
870 
871 	return crc32 >> 26;
872 }
873 
874 /* Configure Multicast and Promiscuous modes */
emac_set_rx_mode(struct net_device * ndev)875 static void emac_set_rx_mode(struct net_device *ndev)
876 {
877 	struct emac_priv *priv = netdev_priv(ndev);
878 	struct netdev_hw_addr *ha;
879 	u32 mc_filter[4] = { 0 };
880 	u32 hash, reg, bit, val;
881 
882 	val = emac_rd(priv, MAC_ADDRESS_CONTROL);
883 
884 	val &= ~MREGBIT_PROMISCUOUS_MODE;
885 
886 	if (ndev->flags & IFF_PROMISC) {
887 		/* Enable promisc mode */
888 		val |= MREGBIT_PROMISCUOUS_MODE;
889 	} else if ((ndev->flags & IFF_ALLMULTI) ||
890 		   (netdev_mc_count(ndev) > HASH_TABLE_SIZE)) {
891 		/* Accept all multicast frames by setting every bit */
892 		emac_wr(priv, MAC_MULTICAST_HASH_TABLE1, 0xffff);
893 		emac_wr(priv, MAC_MULTICAST_HASH_TABLE2, 0xffff);
894 		emac_wr(priv, MAC_MULTICAST_HASH_TABLE3, 0xffff);
895 		emac_wr(priv, MAC_MULTICAST_HASH_TABLE4, 0xffff);
896 	} else if (!netdev_mc_empty(ndev)) {
897 		emac_mac_multicast_filter_clear(priv);
898 		netdev_for_each_mc_addr(ha, ndev) {
899 			/*
900 			 * The hash table is an array of 4 16-bit registers. It
901 			 * is treated like an array of 64 bits (bits[hash]).
902 			 */
903 			hash = emac_ether_addr_hash(ha->addr);
904 			reg = hash / 16;
905 			bit = hash % 16;
906 			mc_filter[reg] |= BIT(bit);
907 		}
908 		emac_wr(priv, MAC_MULTICAST_HASH_TABLE1, mc_filter[0]);
909 		emac_wr(priv, MAC_MULTICAST_HASH_TABLE2, mc_filter[1]);
910 		emac_wr(priv, MAC_MULTICAST_HASH_TABLE3, mc_filter[2]);
911 		emac_wr(priv, MAC_MULTICAST_HASH_TABLE4, mc_filter[3]);
912 	}
913 
914 	emac_wr(priv, MAC_ADDRESS_CONTROL, val);
915 }
916 
emac_change_mtu(struct net_device * ndev,int mtu)917 static int emac_change_mtu(struct net_device *ndev, int mtu)
918 {
919 	struct emac_priv *priv = netdev_priv(ndev);
920 	u32 frame_len;
921 
922 	if (netif_running(ndev)) {
923 		netdev_err(ndev, "must be stopped to change MTU\n");
924 		return -EBUSY;
925 	}
926 
927 	frame_len = mtu + ETH_HLEN + ETH_FCS_LEN;
928 
929 	if (frame_len <= EMAC_DEFAULT_BUFSIZE)
930 		priv->dma_buf_sz = EMAC_DEFAULT_BUFSIZE;
931 	else if (frame_len <= EMAC_RX_BUF_2K)
932 		priv->dma_buf_sz = EMAC_RX_BUF_2K;
933 	else
934 		priv->dma_buf_sz = EMAC_RX_BUF_4K;
935 
936 	ndev->mtu = mtu;
937 
938 	return 0;
939 }
940 
emac_tx_timeout(struct net_device * ndev,unsigned int txqueue)941 static void emac_tx_timeout(struct net_device *ndev, unsigned int txqueue)
942 {
943 	struct emac_priv *priv = netdev_priv(ndev);
944 
945 	schedule_work(&priv->tx_timeout_task);
946 }
947 
emac_mii_read(struct mii_bus * bus,int phy_addr,int regnum)948 static int emac_mii_read(struct mii_bus *bus, int phy_addr, int regnum)
949 {
950 	struct emac_priv *priv = bus->priv;
951 	u32 cmd = 0, val;
952 	int ret;
953 
954 	cmd |= FIELD_PREP(MREGBIT_PHY_ADDRESS, phy_addr);
955 	cmd |= FIELD_PREP(MREGBIT_REGISTER_ADDRESS, regnum);
956 	cmd |= MREGBIT_START_MDIO_TRANS | MREGBIT_MDIO_READ_WRITE;
957 
958 	emac_wr(priv, MAC_MDIO_DATA, 0x0);
959 	emac_wr(priv, MAC_MDIO_CONTROL, cmd);
960 
961 	ret = readl_poll_timeout(priv->iobase + MAC_MDIO_CONTROL, val,
962 				 !(val & MREGBIT_START_MDIO_TRANS), 100, 10000);
963 
964 	if (ret)
965 		return ret;
966 
967 	val = emac_rd(priv, MAC_MDIO_DATA);
968 	return FIELD_GET(MREGBIT_MDIO_DATA, val);
969 }
970 
emac_mii_write(struct mii_bus * bus,int phy_addr,int regnum,u16 value)971 static int emac_mii_write(struct mii_bus *bus, int phy_addr, int regnum,
972 			  u16 value)
973 {
974 	struct emac_priv *priv = bus->priv;
975 	u32 cmd = 0, val;
976 	int ret;
977 
978 	emac_wr(priv, MAC_MDIO_DATA, value);
979 
980 	cmd |= FIELD_PREP(MREGBIT_PHY_ADDRESS, phy_addr);
981 	cmd |= FIELD_PREP(MREGBIT_REGISTER_ADDRESS, regnum);
982 	cmd |= MREGBIT_START_MDIO_TRANS;
983 
984 	emac_wr(priv, MAC_MDIO_CONTROL, cmd);
985 
986 	ret = readl_poll_timeout(priv->iobase + MAC_MDIO_CONTROL, val,
987 				 !(val & MREGBIT_START_MDIO_TRANS), 100, 10000);
988 
989 	return ret;
990 }
991 
emac_mdio_init(struct emac_priv * priv)992 static int emac_mdio_init(struct emac_priv *priv)
993 {
994 	struct device *dev = &priv->pdev->dev;
995 	struct device_node *mii_np;
996 	struct mii_bus *mii;
997 	int ret;
998 
999 	mii = devm_mdiobus_alloc(dev);
1000 	if (!mii)
1001 		return -ENOMEM;
1002 
1003 	mii->priv = priv;
1004 	mii->name = "k1_emac_mii";
1005 	mii->read = emac_mii_read;
1006 	mii->write = emac_mii_write;
1007 	mii->parent = dev;
1008 	mii->phy_mask = ~0;
1009 	snprintf(mii->id, MII_BUS_ID_SIZE, "%s", priv->pdev->name);
1010 
1011 	mii_np = of_get_available_child_by_name(dev->of_node, "mdio-bus");
1012 
1013 	ret = devm_of_mdiobus_register(dev, mii, mii_np);
1014 	if (ret)
1015 		dev_err_probe(dev, ret, "Failed to register mdio bus\n");
1016 
1017 	of_node_put(mii_np);
1018 	return ret;
1019 }
1020 
emac_set_tx_fc(struct emac_priv * priv,bool enable)1021 static void emac_set_tx_fc(struct emac_priv *priv, bool enable)
1022 {
1023 	u32 val;
1024 
1025 	val = emac_rd(priv, MAC_FC_CONTROL);
1026 
1027 	FIELD_MODIFY(MREGBIT_FC_GENERATION_ENABLE, &val, enable);
1028 	FIELD_MODIFY(MREGBIT_AUTO_FC_GENERATION_ENABLE, &val, enable);
1029 
1030 	emac_wr(priv, MAC_FC_CONTROL, val);
1031 }
1032 
emac_set_rx_fc(struct emac_priv * priv,bool enable)1033 static void emac_set_rx_fc(struct emac_priv *priv, bool enable)
1034 {
1035 	u32 val = emac_rd(priv, MAC_FC_CONTROL);
1036 
1037 	FIELD_MODIFY(MREGBIT_FC_DECODE_ENABLE, &val, enable);
1038 
1039 	emac_wr(priv, MAC_FC_CONTROL, val);
1040 }
1041 
emac_set_fc(struct emac_priv * priv,u8 fc)1042 static void emac_set_fc(struct emac_priv *priv, u8 fc)
1043 {
1044 	emac_set_tx_fc(priv, fc & FLOW_CTRL_TX);
1045 	emac_set_rx_fc(priv, fc & FLOW_CTRL_RX);
1046 	priv->flow_control = fc;
1047 }
1048 
emac_set_fc_autoneg(struct emac_priv * priv)1049 static void emac_set_fc_autoneg(struct emac_priv *priv)
1050 {
1051 	struct phy_device *phydev = priv->ndev->phydev;
1052 	u32 local_adv, remote_adv;
1053 	u8 fc;
1054 
1055 	local_adv = linkmode_adv_to_lcl_adv_t(phydev->advertising);
1056 
1057 	remote_adv = 0;
1058 
1059 	if (phydev->pause)
1060 		remote_adv |= LPA_PAUSE_CAP;
1061 
1062 	if (phydev->asym_pause)
1063 		remote_adv |= LPA_PAUSE_ASYM;
1064 
1065 	fc = mii_resolve_flowctrl_fdx(local_adv, remote_adv);
1066 
1067 	priv->flow_control_autoneg = true;
1068 
1069 	emac_set_fc(priv, fc);
1070 }
1071 
1072 /*
1073  * Even though this MAC supports gigabit operation, it only provides 32-bit
1074  * statistics counters. The most overflow-prone counters are the "bytes" ones,
1075  * which at gigabit overflow about twice a minute.
1076  *
1077  * Therefore, we maintain the high 32 bits of counters ourselves, incrementing
1078  * every time statistics seem to go backwards. Also, update periodically to
1079  * catch overflows when we are not otherwise checking the statistics often
1080  * enough.
1081  */
1082 
1083 #define EMAC_STATS_TIMER_PERIOD		20
1084 
emac_read_stat_cnt(struct emac_priv * priv,u8 cnt,u32 * res,u32 control_reg,u32 high_reg,u32 low_reg)1085 static int emac_read_stat_cnt(struct emac_priv *priv, u8 cnt, u32 *res,
1086 			      u32 control_reg, u32 high_reg, u32 low_reg)
1087 {
1088 	u32 val, high, low;
1089 	int ret;
1090 
1091 	/* The "read" bit is the same for TX and RX */
1092 
1093 	val = MREGBIT_START_TX_COUNTER_READ | cnt;
1094 	emac_wr(priv, control_reg, val);
1095 	val = emac_rd(priv, control_reg);
1096 
1097 	ret = readl_poll_timeout_atomic(priv->iobase + control_reg, val,
1098 					!(val & MREGBIT_START_TX_COUNTER_READ),
1099 					100, 10000);
1100 
1101 	if (ret) {
1102 		/*
1103 		 * This could be caused by the PHY stopping its refclk even when
1104 		 * the link is up, for power saving. See also comments in
1105 		 * emac_stats_update().
1106 		 */
1107 		dev_err_ratelimited(&priv->ndev->dev,
1108 				    "Read stat timeout. PHY clock stopped?\n");
1109 		return ret;
1110 	}
1111 
1112 	high = emac_rd(priv, high_reg);
1113 	low = emac_rd(priv, low_reg);
1114 	*res = high << 16 | lower_16_bits(low);
1115 
1116 	return 0;
1117 }
1118 
emac_tx_read_stat_cnt(struct emac_priv * priv,u8 cnt,u32 * res)1119 static int emac_tx_read_stat_cnt(struct emac_priv *priv, u8 cnt, u32 *res)
1120 {
1121 	return emac_read_stat_cnt(priv, cnt, res, MAC_TX_STATCTR_CONTROL,
1122 				  MAC_TX_STATCTR_DATA_HIGH,
1123 				  MAC_TX_STATCTR_DATA_LOW);
1124 }
1125 
emac_rx_read_stat_cnt(struct emac_priv * priv,u8 cnt,u32 * res)1126 static int emac_rx_read_stat_cnt(struct emac_priv *priv, u8 cnt, u32 *res)
1127 {
1128 	return emac_read_stat_cnt(priv, cnt, res, MAC_RX_STATCTR_CONTROL,
1129 				  MAC_RX_STATCTR_DATA_HIGH,
1130 				  MAC_RX_STATCTR_DATA_LOW);
1131 }
1132 
emac_update_counter(u64 * counter,u32 new_low)1133 static void emac_update_counter(u64 *counter, u32 new_low)
1134 {
1135 	u32 old_low = lower_32_bits(*counter);
1136 	u64 high = upper_32_bits(*counter);
1137 
1138 	if (old_low > new_low) {
1139 		/* Overflowed, increment high 32 bits */
1140 		high++;
1141 	}
1142 
1143 	*counter = (high << 32) | new_low;
1144 }
1145 
emac_stats_update(struct emac_priv * priv)1146 static void emac_stats_update(struct emac_priv *priv)
1147 {
1148 	u64 *tx_stats_off = priv->tx_stats_off.array;
1149 	u64 *rx_stats_off = priv->rx_stats_off.array;
1150 	u64 *tx_stats = priv->tx_stats.array;
1151 	u64 *rx_stats = priv->rx_stats.array;
1152 	u32 i, res, offset;
1153 
1154 	assert_spin_locked(&priv->stats_lock);
1155 
1156 	/*
1157 	 * We can't read statistics if the interface is not up. Also, some PHYs
1158 	 * stop their reference clocks for link down power saving, which also
1159 	 * causes reading statistics to time out. Don't update and don't
1160 	 * reschedule in these cases.
1161 	 */
1162 	if (!netif_running(priv->ndev) ||
1163 	    !netif_carrier_ok(priv->ndev) ||
1164 	    !netif_device_present(priv->ndev)) {
1165 		return;
1166 	}
1167 
1168 	for (i = 0; i < sizeof(priv->tx_stats) / sizeof(*tx_stats); i++) {
1169 		/*
1170 		 * If reading stats times out anyway, the stat registers will be
1171 		 * stuck, and we can't really recover from that.
1172 		 *
1173 		 * Reading statistics also can't return an error, so just return
1174 		 * without updating and without rescheduling.
1175 		 */
1176 		if (emac_tx_read_stat_cnt(priv, i, &res))
1177 			return;
1178 
1179 		/*
1180 		 * Re-initializing while bringing interface up resets counters
1181 		 * to zero, so to provide continuity, we add the values saved
1182 		 * last time we did emac_down() to the new hardware-provided
1183 		 * value.
1184 		 */
1185 		offset = lower_32_bits(tx_stats_off[i]);
1186 		emac_update_counter(&tx_stats[i], res + offset);
1187 	}
1188 
1189 	/* Similar remarks as TX stats */
1190 	for (i = 0; i < sizeof(priv->rx_stats) / sizeof(*rx_stats); i++) {
1191 		if (emac_rx_read_stat_cnt(priv, i, &res))
1192 			return;
1193 		offset = lower_32_bits(rx_stats_off[i]);
1194 		emac_update_counter(&rx_stats[i], res + offset);
1195 	}
1196 
1197 	mod_timer(&priv->stats_timer, jiffies + EMAC_STATS_TIMER_PERIOD * HZ);
1198 }
1199 
emac_stats_timer(struct timer_list * t)1200 static void emac_stats_timer(struct timer_list *t)
1201 {
1202 	struct emac_priv *priv = timer_container_of(priv, t, stats_timer);
1203 
1204 	spin_lock(&priv->stats_lock);
1205 
1206 	emac_stats_update(priv);
1207 
1208 	spin_unlock(&priv->stats_lock);
1209 }
1210 
1211 static const struct ethtool_rmon_hist_range emac_rmon_hist_ranges[] = {
1212 	{   64,   64 },
1213 	{   65,  127 },
1214 	{  128,  255 },
1215 	{  256,  511 },
1216 	{  512, 1023 },
1217 	{ 1024, 1518 },
1218 	{ 1519, 4096 },
1219 	{ /* sentinel */ },
1220 };
1221 
1222 /* Like dev_fetch_dstats(), but we only use tx_drops */
emac_get_stat_tx_drops(struct emac_priv * priv)1223 static u64 emac_get_stat_tx_drops(struct emac_priv *priv)
1224 {
1225 	const struct pcpu_dstats *stats;
1226 	u64 tx_drops, total = 0;
1227 	unsigned int start;
1228 	int cpu;
1229 
1230 	for_each_possible_cpu(cpu) {
1231 		stats = per_cpu_ptr(priv->ndev->dstats, cpu);
1232 		do {
1233 			start = u64_stats_fetch_begin(&stats->syncp);
1234 			tx_drops = u64_stats_read(&stats->tx_drops);
1235 		} while (u64_stats_fetch_retry(&stats->syncp, start));
1236 
1237 		total += tx_drops;
1238 	}
1239 
1240 	return total;
1241 }
1242 
emac_get_stats64(struct net_device * dev,struct rtnl_link_stats64 * storage)1243 static void emac_get_stats64(struct net_device *dev,
1244 			     struct rtnl_link_stats64 *storage)
1245 {
1246 	struct emac_priv *priv = netdev_priv(dev);
1247 	union emac_hw_tx_stats *tx_stats;
1248 	union emac_hw_rx_stats *rx_stats;
1249 
1250 	tx_stats = &priv->tx_stats;
1251 	rx_stats = &priv->rx_stats;
1252 
1253 	/* This is the only software counter */
1254 	storage->tx_dropped = emac_get_stat_tx_drops(priv);
1255 
1256 	spin_lock_bh(&priv->stats_lock);
1257 
1258 	emac_stats_update(priv);
1259 
1260 	storage->tx_packets = tx_stats->stats.tx_ok_pkts;
1261 	storage->tx_bytes = tx_stats->stats.tx_ok_bytes;
1262 	storage->tx_errors = tx_stats->stats.tx_err_pkts;
1263 
1264 	storage->rx_packets = rx_stats->stats.rx_ok_pkts;
1265 	storage->rx_bytes = rx_stats->stats.rx_ok_bytes;
1266 	storage->rx_errors = rx_stats->stats.rx_err_total_pkts;
1267 	storage->rx_crc_errors = rx_stats->stats.rx_crc_err_pkts;
1268 	storage->rx_frame_errors = rx_stats->stats.rx_align_err_pkts;
1269 	storage->rx_length_errors = rx_stats->stats.rx_len_err_pkts;
1270 
1271 	storage->collisions = tx_stats->stats.tx_singleclsn_pkts;
1272 	storage->collisions += tx_stats->stats.tx_multiclsn_pkts;
1273 	storage->collisions += tx_stats->stats.tx_excessclsn_pkts;
1274 
1275 	storage->rx_missed_errors = rx_stats->stats.rx_drp_fifo_full_pkts;
1276 	storage->rx_missed_errors += rx_stats->stats.rx_truncate_fifo_full_pkts;
1277 
1278 	spin_unlock_bh(&priv->stats_lock);
1279 }
1280 
emac_get_rmon_stats(struct net_device * dev,struct ethtool_rmon_stats * rmon_stats,const struct ethtool_rmon_hist_range ** ranges)1281 static void emac_get_rmon_stats(struct net_device *dev,
1282 				struct ethtool_rmon_stats *rmon_stats,
1283 				const struct ethtool_rmon_hist_range **ranges)
1284 {
1285 	struct emac_priv *priv = netdev_priv(dev);
1286 	union emac_hw_rx_stats *rx_stats;
1287 
1288 	rx_stats = &priv->rx_stats;
1289 
1290 	*ranges = emac_rmon_hist_ranges;
1291 
1292 	spin_lock_bh(&priv->stats_lock);
1293 
1294 	emac_stats_update(priv);
1295 
1296 	rmon_stats->undersize_pkts = rx_stats->stats.rx_len_undersize_pkts;
1297 	rmon_stats->oversize_pkts = rx_stats->stats.rx_len_oversize_pkts;
1298 	rmon_stats->fragments = rx_stats->stats.rx_len_fragment_pkts;
1299 	rmon_stats->jabbers = rx_stats->stats.rx_len_jabber_pkts;
1300 
1301 	/* Only RX has histogram stats */
1302 
1303 	rmon_stats->hist[0] = rx_stats->stats.rx_64_pkts;
1304 	rmon_stats->hist[1] = rx_stats->stats.rx_65_127_pkts;
1305 	rmon_stats->hist[2] = rx_stats->stats.rx_128_255_pkts;
1306 	rmon_stats->hist[3] = rx_stats->stats.rx_256_511_pkts;
1307 	rmon_stats->hist[4] = rx_stats->stats.rx_512_1023_pkts;
1308 	rmon_stats->hist[5] = rx_stats->stats.rx_1024_1518_pkts;
1309 	rmon_stats->hist[6] = rx_stats->stats.rx_1519_plus_pkts;
1310 
1311 	spin_unlock_bh(&priv->stats_lock);
1312 }
1313 
emac_get_eth_mac_stats(struct net_device * dev,struct ethtool_eth_mac_stats * mac_stats)1314 static void emac_get_eth_mac_stats(struct net_device *dev,
1315 				   struct ethtool_eth_mac_stats *mac_stats)
1316 {
1317 	struct emac_priv *priv = netdev_priv(dev);
1318 	union emac_hw_tx_stats *tx_stats;
1319 	union emac_hw_rx_stats *rx_stats;
1320 
1321 	tx_stats = &priv->tx_stats;
1322 	rx_stats = &priv->rx_stats;
1323 
1324 	spin_lock_bh(&priv->stats_lock);
1325 
1326 	emac_stats_update(priv);
1327 
1328 	mac_stats->MulticastFramesXmittedOK = tx_stats->stats.tx_multicast_pkts;
1329 	mac_stats->BroadcastFramesXmittedOK = tx_stats->stats.tx_broadcast_pkts;
1330 
1331 	mac_stats->MulticastFramesReceivedOK =
1332 		rx_stats->stats.rx_multicast_pkts;
1333 	mac_stats->BroadcastFramesReceivedOK =
1334 		rx_stats->stats.rx_broadcast_pkts;
1335 
1336 	mac_stats->SingleCollisionFrames = tx_stats->stats.tx_singleclsn_pkts;
1337 	mac_stats->MultipleCollisionFrames = tx_stats->stats.tx_multiclsn_pkts;
1338 	mac_stats->LateCollisions = tx_stats->stats.tx_lateclsn_pkts;
1339 	mac_stats->FramesAbortedDueToXSColls =
1340 		tx_stats->stats.tx_excessclsn_pkts;
1341 
1342 	spin_unlock_bh(&priv->stats_lock);
1343 }
1344 
emac_get_pause_stats(struct net_device * dev,struct ethtool_pause_stats * pause_stats)1345 static void emac_get_pause_stats(struct net_device *dev,
1346 				 struct ethtool_pause_stats *pause_stats)
1347 {
1348 	struct emac_priv *priv = netdev_priv(dev);
1349 	union emac_hw_tx_stats *tx_stats;
1350 	union emac_hw_rx_stats *rx_stats;
1351 
1352 	tx_stats = &priv->tx_stats;
1353 	rx_stats = &priv->rx_stats;
1354 
1355 	spin_lock_bh(&priv->stats_lock);
1356 
1357 	emac_stats_update(priv);
1358 
1359 	pause_stats->tx_pause_frames = tx_stats->stats.tx_pause_pkts;
1360 	pause_stats->rx_pause_frames = rx_stats->stats.rx_pause_pkts;
1361 
1362 	spin_unlock_bh(&priv->stats_lock);
1363 }
1364 
1365 /* Other statistics that are not derivable from standard statistics */
1366 
1367 #define EMAC_ETHTOOL_STAT(type, name) \
1368 	{ offsetof(type, stats.name) / sizeof(u64), #name }
1369 
1370 static const struct emac_ethtool_stats {
1371 	size_t offset;
1372 	char str[ETH_GSTRING_LEN];
1373 } emac_ethtool_rx_stats[] = {
1374 	EMAC_ETHTOOL_STAT(union emac_hw_rx_stats, rx_drp_fifo_full_pkts),
1375 	EMAC_ETHTOOL_STAT(union emac_hw_rx_stats, rx_truncate_fifo_full_pkts),
1376 };
1377 
emac_get_sset_count(struct net_device * dev,int sset)1378 static int emac_get_sset_count(struct net_device *dev, int sset)
1379 {
1380 	switch (sset) {
1381 	case ETH_SS_STATS:
1382 		return ARRAY_SIZE(emac_ethtool_rx_stats);
1383 	default:
1384 		return -EOPNOTSUPP;
1385 	}
1386 }
1387 
emac_get_strings(struct net_device * dev,u32 stringset,u8 * data)1388 static void emac_get_strings(struct net_device *dev, u32 stringset, u8 *data)
1389 {
1390 	int i;
1391 
1392 	switch (stringset) {
1393 	case ETH_SS_STATS:
1394 		for (i = 0; i < ARRAY_SIZE(emac_ethtool_rx_stats); i++) {
1395 			memcpy(data, emac_ethtool_rx_stats[i].str,
1396 			       ETH_GSTRING_LEN);
1397 			data += ETH_GSTRING_LEN;
1398 		}
1399 		break;
1400 	}
1401 }
1402 
emac_get_ethtool_stats(struct net_device * dev,struct ethtool_stats * stats,u64 * data)1403 static void emac_get_ethtool_stats(struct net_device *dev,
1404 				   struct ethtool_stats *stats, u64 *data)
1405 {
1406 	struct emac_priv *priv = netdev_priv(dev);
1407 	u64 *rx_stats = (u64 *)&priv->rx_stats;
1408 	int i;
1409 
1410 	spin_lock_bh(&priv->stats_lock);
1411 
1412 	emac_stats_update(priv);
1413 
1414 	for (i = 0; i < ARRAY_SIZE(emac_ethtool_rx_stats); i++)
1415 		data[i] = rx_stats[emac_ethtool_rx_stats[i].offset];
1416 
1417 	spin_unlock_bh(&priv->stats_lock);
1418 }
1419 
emac_ethtool_get_regs_len(struct net_device * dev)1420 static int emac_ethtool_get_regs_len(struct net_device *dev)
1421 {
1422 	return (EMAC_DMA_REG_CNT + EMAC_MAC_REG_CNT) * sizeof(u32);
1423 }
1424 
emac_ethtool_get_regs(struct net_device * dev,struct ethtool_regs * regs,void * space)1425 static void emac_ethtool_get_regs(struct net_device *dev,
1426 				  struct ethtool_regs *regs, void *space)
1427 {
1428 	struct emac_priv *priv = netdev_priv(dev);
1429 	u32 *reg_space = space;
1430 	int i;
1431 
1432 	regs->version = 1;
1433 
1434 	for (i = 0; i < EMAC_DMA_REG_CNT; i++)
1435 		reg_space[i] = emac_rd(priv, DMA_CONFIGURATION + i * 4);
1436 
1437 	for (i = 0; i < EMAC_MAC_REG_CNT; i++)
1438 		reg_space[i + EMAC_DMA_REG_CNT] =
1439 			emac_rd(priv, MAC_GLOBAL_CONTROL + i * 4);
1440 }
1441 
emac_get_pauseparam(struct net_device * dev,struct ethtool_pauseparam * pause)1442 static void emac_get_pauseparam(struct net_device *dev,
1443 				struct ethtool_pauseparam *pause)
1444 {
1445 	struct emac_priv *priv = netdev_priv(dev);
1446 
1447 	pause->autoneg = priv->flow_control_autoneg;
1448 	pause->tx_pause = !!(priv->flow_control & FLOW_CTRL_TX);
1449 	pause->rx_pause = !!(priv->flow_control & FLOW_CTRL_RX);
1450 }
1451 
emac_set_pauseparam(struct net_device * dev,struct ethtool_pauseparam * pause)1452 static int emac_set_pauseparam(struct net_device *dev,
1453 			       struct ethtool_pauseparam *pause)
1454 {
1455 	struct emac_priv *priv = netdev_priv(dev);
1456 	u8 fc = 0;
1457 
1458 	if (!netif_running(dev))
1459 		return -ENETDOWN;
1460 
1461 	priv->flow_control_autoneg = pause->autoneg;
1462 
1463 	if (pause->autoneg) {
1464 		emac_set_fc_autoneg(priv);
1465 	} else {
1466 		if (pause->tx_pause)
1467 			fc |= FLOW_CTRL_TX;
1468 
1469 		if (pause->rx_pause)
1470 			fc |= FLOW_CTRL_RX;
1471 
1472 		emac_set_fc(priv, fc);
1473 	}
1474 
1475 	return 0;
1476 }
1477 
emac_get_drvinfo(struct net_device * dev,struct ethtool_drvinfo * info)1478 static void emac_get_drvinfo(struct net_device *dev,
1479 			     struct ethtool_drvinfo *info)
1480 {
1481 	strscpy(info->driver, DRIVER_NAME, sizeof(info->driver));
1482 	info->n_stats = ARRAY_SIZE(emac_ethtool_rx_stats);
1483 }
1484 
emac_tx_timeout_task(struct work_struct * work)1485 static void emac_tx_timeout_task(struct work_struct *work)
1486 {
1487 	struct net_device *ndev;
1488 	struct emac_priv *priv;
1489 
1490 	priv = container_of(work, struct emac_priv, tx_timeout_task);
1491 	ndev = priv->ndev;
1492 
1493 	rtnl_lock();
1494 
1495 	/* No need to reset if already down */
1496 	if (!netif_running(ndev)) {
1497 		rtnl_unlock();
1498 		return;
1499 	}
1500 
1501 	netdev_err(ndev, "MAC reset due to TX timeout\n");
1502 
1503 	netif_trans_update(ndev); /* prevent tx timeout */
1504 	dev_close(ndev);
1505 	dev_open(ndev, NULL);
1506 
1507 	rtnl_unlock();
1508 }
1509 
emac_sw_init(struct emac_priv * priv)1510 static void emac_sw_init(struct emac_priv *priv)
1511 {
1512 	priv->dma_buf_sz = EMAC_DEFAULT_BUFSIZE;
1513 
1514 	priv->tx_ring.total_cnt = DEFAULT_TX_RING_NUM;
1515 	priv->rx_ring.total_cnt = DEFAULT_RX_RING_NUM;
1516 
1517 	spin_lock_init(&priv->stats_lock);
1518 
1519 	INIT_WORK(&priv->tx_timeout_task, emac_tx_timeout_task);
1520 
1521 	priv->tx_coal_frames = EMAC_TX_FRAMES;
1522 	priv->tx_coal_timeout = EMAC_TX_COAL_TIMEOUT;
1523 
1524 	timer_setup(&priv->txtimer, emac_tx_coal_timer, 0);
1525 	timer_setup(&priv->stats_timer, emac_stats_timer, 0);
1526 }
1527 
emac_interrupt_handler(int irq,void * dev_id)1528 static irqreturn_t emac_interrupt_handler(int irq, void *dev_id)
1529 {
1530 	struct net_device *ndev = (struct net_device *)dev_id;
1531 	struct emac_priv *priv = netdev_priv(ndev);
1532 	bool should_schedule = false;
1533 	u32 clr = 0;
1534 	u32 status;
1535 
1536 	status = emac_rd(priv, DMA_STATUS_IRQ);
1537 
1538 	if (status & MREGBIT_TRANSMIT_TRANSFER_DONE_IRQ) {
1539 		clr |= MREGBIT_TRANSMIT_TRANSFER_DONE_IRQ;
1540 		should_schedule = true;
1541 	}
1542 
1543 	if (status & MREGBIT_TRANSMIT_DES_UNAVAILABLE_IRQ)
1544 		clr |= MREGBIT_TRANSMIT_DES_UNAVAILABLE_IRQ;
1545 
1546 	if (status & MREGBIT_TRANSMIT_DMA_STOPPED_IRQ)
1547 		clr |= MREGBIT_TRANSMIT_DMA_STOPPED_IRQ;
1548 
1549 	if (status & MREGBIT_RECEIVE_TRANSFER_DONE_IRQ) {
1550 		clr |= MREGBIT_RECEIVE_TRANSFER_DONE_IRQ;
1551 		should_schedule = true;
1552 	}
1553 
1554 	if (status & MREGBIT_RECEIVE_DES_UNAVAILABLE_IRQ)
1555 		clr |= MREGBIT_RECEIVE_DES_UNAVAILABLE_IRQ;
1556 
1557 	if (status & MREGBIT_RECEIVE_DMA_STOPPED_IRQ)
1558 		clr |= MREGBIT_RECEIVE_DMA_STOPPED_IRQ;
1559 
1560 	if (status & MREGBIT_RECEIVE_MISSED_FRAME_IRQ)
1561 		clr |= MREGBIT_RECEIVE_MISSED_FRAME_IRQ;
1562 
1563 	if (should_schedule) {
1564 		if (napi_schedule_prep(&priv->napi)) {
1565 			emac_disable_interrupt(priv);
1566 			__napi_schedule_irqoff(&priv->napi);
1567 		}
1568 	}
1569 
1570 	emac_wr(priv, DMA_STATUS_IRQ, clr);
1571 
1572 	return IRQ_HANDLED;
1573 }
1574 
emac_configure_tx(struct emac_priv * priv)1575 static void emac_configure_tx(struct emac_priv *priv)
1576 {
1577 	u32 val;
1578 
1579 	/* Set base address */
1580 	val = (u32)priv->tx_ring.desc_dma_addr;
1581 	emac_wr(priv, DMA_TRANSMIT_BASE_ADDRESS, val);
1582 
1583 	/* Set TX inter-frame gap value, enable transmit */
1584 	val = emac_rd(priv, MAC_TRANSMIT_CONTROL);
1585 	val &= ~MREGBIT_IFG_LEN;
1586 	val |= MREGBIT_TRANSMIT_ENABLE;
1587 	val |= MREGBIT_TRANSMIT_AUTO_RETRY;
1588 	emac_wr(priv, MAC_TRANSMIT_CONTROL, val);
1589 
1590 	emac_wr(priv, DMA_TRANSMIT_AUTO_POLL_COUNTER, 0x0);
1591 
1592 	/* Start TX DMA */
1593 	val = emac_rd(priv, DMA_CONTROL);
1594 	val |= MREGBIT_START_STOP_TRANSMIT_DMA;
1595 	emac_wr(priv, DMA_CONTROL, val);
1596 }
1597 
emac_configure_rx(struct emac_priv * priv)1598 static void emac_configure_rx(struct emac_priv *priv)
1599 {
1600 	u32 val;
1601 
1602 	/* Set base address */
1603 	val = (u32)priv->rx_ring.desc_dma_addr;
1604 	emac_wr(priv, DMA_RECEIVE_BASE_ADDRESS, val);
1605 
1606 	/* Enable receive */
1607 	val = emac_rd(priv, MAC_RECEIVE_CONTROL);
1608 	val |= MREGBIT_RECEIVE_ENABLE;
1609 	val |= MREGBIT_STORE_FORWARD;
1610 	emac_wr(priv, MAC_RECEIVE_CONTROL, val);
1611 
1612 	/* Start RX DMA */
1613 	val = emac_rd(priv, DMA_CONTROL);
1614 	val |= MREGBIT_START_STOP_RECEIVE_DMA;
1615 	emac_wr(priv, DMA_CONTROL, val);
1616 }
1617 
emac_adjust_link(struct net_device * dev)1618 static void emac_adjust_link(struct net_device *dev)
1619 {
1620 	struct emac_priv *priv = netdev_priv(dev);
1621 	struct phy_device *phydev = dev->phydev;
1622 	u32 ctrl;
1623 
1624 	if (phydev->link) {
1625 		ctrl = emac_rd(priv, MAC_GLOBAL_CONTROL);
1626 
1627 		/* Update duplex and speed from PHY */
1628 
1629 		FIELD_MODIFY(MREGBIT_FULL_DUPLEX_MODE, &ctrl,
1630 			     phydev->duplex == DUPLEX_FULL);
1631 
1632 		ctrl &= ~MREGBIT_SPEED;
1633 
1634 		switch (phydev->speed) {
1635 		case SPEED_1000:
1636 			ctrl |= MREGBIT_SPEED_1000M;
1637 			break;
1638 		case SPEED_100:
1639 			ctrl |= MREGBIT_SPEED_100M;
1640 			break;
1641 		case SPEED_10:
1642 			ctrl |= MREGBIT_SPEED_10M;
1643 			break;
1644 		default:
1645 			netdev_err(dev, "Unknown speed: %d\n", phydev->speed);
1646 			phydev->speed = SPEED_UNKNOWN;
1647 			break;
1648 		}
1649 
1650 		emac_wr(priv, MAC_GLOBAL_CONTROL, ctrl);
1651 
1652 		emac_set_fc_autoneg(priv);
1653 
1654 		/*
1655 		 * Reschedule stats updates now that link is up. See comments in
1656 		 * emac_stats_update().
1657 		 */
1658 		mod_timer(&priv->stats_timer, jiffies);
1659 	}
1660 
1661 	phy_print_status(phydev);
1662 }
1663 
emac_update_delay_line(struct emac_priv * priv)1664 static void emac_update_delay_line(struct emac_priv *priv)
1665 {
1666 	u32 mask = 0, val = 0;
1667 
1668 	mask |= EMAC_RX_DLINE_EN;
1669 	mask |= EMAC_RX_DLINE_STEP_MASK | EMAC_RX_DLINE_CODE_MASK;
1670 	mask |= EMAC_TX_DLINE_EN;
1671 	mask |= EMAC_TX_DLINE_STEP_MASK | EMAC_TX_DLINE_CODE_MASK;
1672 
1673 	if (phy_interface_mode_is_rgmii(priv->phy_interface)) {
1674 		val |= EMAC_RX_DLINE_EN;
1675 		val |= FIELD_PREP(EMAC_RX_DLINE_STEP_MASK,
1676 				  EMAC_DLINE_STEP_15P6);
1677 		val |= FIELD_PREP(EMAC_RX_DLINE_CODE_MASK, priv->rx_delay);
1678 
1679 		val |= EMAC_TX_DLINE_EN;
1680 		val |= FIELD_PREP(EMAC_TX_DLINE_STEP_MASK,
1681 				  EMAC_DLINE_STEP_15P6);
1682 		val |= FIELD_PREP(EMAC_TX_DLINE_CODE_MASK, priv->tx_delay);
1683 	}
1684 
1685 	regmap_update_bits(priv->regmap_apmu,
1686 			   priv->regmap_apmu_offset + APMU_EMAC_DLINE_REG,
1687 			   mask, val);
1688 }
1689 
emac_phy_connect(struct net_device * ndev)1690 static int emac_phy_connect(struct net_device *ndev)
1691 {
1692 	struct emac_priv *priv = netdev_priv(ndev);
1693 	struct device *dev = &priv->pdev->dev;
1694 	struct phy_device *phydev;
1695 	struct device_node *np;
1696 	int ret;
1697 
1698 	ret = of_get_phy_mode(dev->of_node, &priv->phy_interface);
1699 	if (ret) {
1700 		netdev_err(ndev, "No phy-mode found");
1701 		return ret;
1702 	}
1703 
1704 	switch (priv->phy_interface) {
1705 	case PHY_INTERFACE_MODE_RMII:
1706 	case PHY_INTERFACE_MODE_RGMII:
1707 	case PHY_INTERFACE_MODE_RGMII_ID:
1708 	case PHY_INTERFACE_MODE_RGMII_RXID:
1709 	case PHY_INTERFACE_MODE_RGMII_TXID:
1710 		break;
1711 	default:
1712 		netdev_err(ndev, "Unsupported PHY interface %s",
1713 			   phy_modes(priv->phy_interface));
1714 		return -EINVAL;
1715 	}
1716 
1717 	np = of_parse_phandle(dev->of_node, "phy-handle", 0);
1718 	if (!np && of_phy_is_fixed_link(dev->of_node))
1719 		np = of_node_get(dev->of_node);
1720 
1721 	if (!np) {
1722 		netdev_err(ndev, "No PHY specified");
1723 		return -ENODEV;
1724 	}
1725 
1726 	ret = emac_phy_interface_config(priv);
1727 	if (ret)
1728 		goto err_node_put;
1729 
1730 	phydev = of_phy_connect(ndev, np, &emac_adjust_link, 0,
1731 				priv->phy_interface);
1732 	if (!phydev) {
1733 		netdev_err(ndev, "Could not attach to PHY\n");
1734 		ret = -ENODEV;
1735 		goto err_node_put;
1736 	}
1737 
1738 	phy_support_asym_pause(phydev);
1739 
1740 	phydev->mac_managed_pm = true;
1741 
1742 	emac_update_delay_line(priv);
1743 
1744 err_node_put:
1745 	of_node_put(np);
1746 	return ret;
1747 }
1748 
emac_up(struct emac_priv * priv)1749 static int emac_up(struct emac_priv *priv)
1750 {
1751 	struct platform_device *pdev = priv->pdev;
1752 	struct net_device *ndev = priv->ndev;
1753 	int ret;
1754 
1755 	pm_runtime_get_sync(&pdev->dev);
1756 
1757 	ret = emac_phy_connect(ndev);
1758 	if (ret) {
1759 		dev_err(&pdev->dev, "emac_phy_connect failed\n");
1760 		goto err_pm_put;
1761 	}
1762 
1763 	emac_init_hw(priv);
1764 
1765 	emac_set_mac_addr(priv, ndev->dev_addr);
1766 	emac_configure_tx(priv);
1767 	emac_configure_rx(priv);
1768 
1769 	emac_alloc_rx_desc_buffers(priv);
1770 
1771 	phy_start(ndev->phydev);
1772 
1773 	ret = request_irq(priv->irq, emac_interrupt_handler, IRQF_SHARED,
1774 			  ndev->name, ndev);
1775 	if (ret) {
1776 		dev_err(&pdev->dev, "request_irq failed\n");
1777 		goto err_reset_disconnect_phy;
1778 	}
1779 
1780 	/* Don't enable MAC interrupts */
1781 	emac_wr(priv, MAC_INTERRUPT_ENABLE, 0x0);
1782 
1783 	/* Enable DMA interrupts */
1784 	emac_wr(priv, DMA_INTERRUPT_ENABLE,
1785 		MREGBIT_TRANSMIT_TRANSFER_DONE_INTR_ENABLE |
1786 			MREGBIT_TRANSMIT_DMA_STOPPED_INTR_ENABLE |
1787 			MREGBIT_RECEIVE_TRANSFER_DONE_INTR_ENABLE |
1788 			MREGBIT_RECEIVE_DMA_STOPPED_INTR_ENABLE |
1789 			MREGBIT_RECEIVE_MISSED_FRAME_INTR_ENABLE);
1790 
1791 	napi_enable(&priv->napi);
1792 
1793 	netif_start_queue(ndev);
1794 
1795 	mod_timer(&priv->stats_timer, jiffies);
1796 
1797 	return 0;
1798 
1799 err_reset_disconnect_phy:
1800 	emac_reset_hw(priv);
1801 	phy_disconnect(ndev->phydev);
1802 
1803 err_pm_put:
1804 	pm_runtime_put_sync(&pdev->dev);
1805 	return ret;
1806 }
1807 
emac_down(struct emac_priv * priv)1808 static int emac_down(struct emac_priv *priv)
1809 {
1810 	struct platform_device *pdev = priv->pdev;
1811 	struct net_device *ndev = priv->ndev;
1812 
1813 	netif_stop_queue(ndev);
1814 
1815 	phy_disconnect(ndev->phydev);
1816 
1817 	emac_wr(priv, MAC_INTERRUPT_ENABLE, 0x0);
1818 	emac_wr(priv, DMA_INTERRUPT_ENABLE, 0x0);
1819 
1820 	free_irq(priv->irq, ndev);
1821 
1822 	napi_disable(&priv->napi);
1823 
1824 	timer_delete_sync(&priv->txtimer);
1825 	cancel_work_sync(&priv->tx_timeout_task);
1826 
1827 	timer_delete_sync(&priv->stats_timer);
1828 
1829 	emac_reset_hw(priv);
1830 
1831 	/* Update and save current stats, see emac_stats_update() for usage */
1832 
1833 	spin_lock_bh(&priv->stats_lock);
1834 
1835 	emac_stats_update(priv);
1836 
1837 	priv->tx_stats_off = priv->tx_stats;
1838 	priv->rx_stats_off = priv->rx_stats;
1839 
1840 	spin_unlock_bh(&priv->stats_lock);
1841 
1842 	pm_runtime_put_sync(&pdev->dev);
1843 	return 0;
1844 }
1845 
1846 /* Called when net interface is brought up. */
emac_open(struct net_device * ndev)1847 static int emac_open(struct net_device *ndev)
1848 {
1849 	struct emac_priv *priv = netdev_priv(ndev);
1850 	struct device *dev = &priv->pdev->dev;
1851 	int ret;
1852 
1853 	ret = emac_alloc_tx_resources(priv);
1854 	if (ret) {
1855 		dev_err(dev, "Cannot allocate TX resources\n");
1856 		return ret;
1857 	}
1858 
1859 	ret = emac_alloc_rx_resources(priv);
1860 	if (ret) {
1861 		dev_err(dev, "Cannot allocate RX resources\n");
1862 		goto err_free_tx;
1863 	}
1864 
1865 	ret = emac_up(priv);
1866 	if (ret) {
1867 		dev_err(dev, "Error when bringing interface up\n");
1868 		goto err_free_rx;
1869 	}
1870 	return 0;
1871 
1872 err_free_rx:
1873 	emac_free_rx_resources(priv);
1874 err_free_tx:
1875 	emac_free_tx_resources(priv);
1876 
1877 	return ret;
1878 }
1879 
1880 /* Called when interface is brought down. */
emac_stop(struct net_device * ndev)1881 static int emac_stop(struct net_device *ndev)
1882 {
1883 	struct emac_priv *priv = netdev_priv(ndev);
1884 
1885 	emac_down(priv);
1886 	emac_free_tx_resources(priv);
1887 	emac_free_rx_resources(priv);
1888 
1889 	return 0;
1890 }
1891 
1892 static const struct ethtool_ops emac_ethtool_ops = {
1893 	.get_link_ksettings	= phy_ethtool_get_link_ksettings,
1894 	.set_link_ksettings	= phy_ethtool_set_link_ksettings,
1895 	.nway_reset		= phy_ethtool_nway_reset,
1896 	.get_drvinfo		= emac_get_drvinfo,
1897 	.get_link		= ethtool_op_get_link,
1898 
1899 	.get_regs		= emac_ethtool_get_regs,
1900 	.get_regs_len		= emac_ethtool_get_regs_len,
1901 
1902 	.get_rmon_stats		= emac_get_rmon_stats,
1903 	.get_pause_stats	= emac_get_pause_stats,
1904 	.get_eth_mac_stats	= emac_get_eth_mac_stats,
1905 
1906 	.get_sset_count		= emac_get_sset_count,
1907 	.get_strings		= emac_get_strings,
1908 	.get_ethtool_stats	= emac_get_ethtool_stats,
1909 
1910 	.get_pauseparam		= emac_get_pauseparam,
1911 	.set_pauseparam		= emac_set_pauseparam,
1912 };
1913 
1914 static const struct net_device_ops emac_netdev_ops = {
1915 	.ndo_open               = emac_open,
1916 	.ndo_stop               = emac_stop,
1917 	.ndo_start_xmit         = emac_start_xmit,
1918 	.ndo_validate_addr	= eth_validate_addr,
1919 	.ndo_set_mac_address    = emac_set_mac_address,
1920 	.ndo_eth_ioctl          = phy_do_ioctl_running,
1921 	.ndo_change_mtu         = emac_change_mtu,
1922 	.ndo_tx_timeout         = emac_tx_timeout,
1923 	.ndo_set_rx_mode        = emac_set_rx_mode,
1924 	.ndo_get_stats64	= emac_get_stats64,
1925 };
1926 
1927 /* Currently we always use 15.6 ps/step for the delay line */
1928 
delay_ps_to_unit(u32 ps)1929 static u32 delay_ps_to_unit(u32 ps)
1930 {
1931 	return DIV_ROUND_CLOSEST(ps * 10, 156);
1932 }
1933 
delay_unit_to_ps(u32 unit)1934 static u32 delay_unit_to_ps(u32 unit)
1935 {
1936 	return DIV_ROUND_CLOSEST(unit * 156, 10);
1937 }
1938 
1939 #define EMAC_MAX_DELAY_UNIT	FIELD_MAX(EMAC_TX_DLINE_CODE_MASK)
1940 
1941 /* Minus one just to be safe from rounding errors */
1942 #define EMAC_MAX_DELAY_PS	(delay_unit_to_ps(EMAC_MAX_DELAY_UNIT - 1))
1943 
emac_config_dt(struct platform_device * pdev,struct emac_priv * priv)1944 static int emac_config_dt(struct platform_device *pdev, struct emac_priv *priv)
1945 {
1946 	struct device_node *np = pdev->dev.of_node;
1947 	struct device *dev = &pdev->dev;
1948 	u8 mac_addr[ETH_ALEN] = { 0 };
1949 	int ret;
1950 
1951 	priv->iobase = devm_platform_ioremap_resource(pdev, 0);
1952 	if (IS_ERR(priv->iobase))
1953 		return dev_err_probe(dev, PTR_ERR(priv->iobase),
1954 				     "ioremap failed\n");
1955 
1956 	priv->regmap_apmu =
1957 		syscon_regmap_lookup_by_phandle_args(np, "spacemit,apmu", 1,
1958 						     &priv->regmap_apmu_offset);
1959 
1960 	if (IS_ERR(priv->regmap_apmu))
1961 		return dev_err_probe(dev, PTR_ERR(priv->regmap_apmu),
1962 				     "failed to get syscon\n");
1963 
1964 	priv->irq = platform_get_irq(pdev, 0);
1965 	if (priv->irq < 0)
1966 		return priv->irq;
1967 
1968 	ret = of_get_mac_address(np, mac_addr);
1969 	if (ret) {
1970 		if (ret == -EPROBE_DEFER)
1971 			return dev_err_probe(dev, ret,
1972 					     "Can't get MAC address\n");
1973 
1974 		dev_info(&pdev->dev, "Using random MAC address\n");
1975 		eth_hw_addr_random(priv->ndev);
1976 	} else {
1977 		eth_hw_addr_set(priv->ndev, mac_addr);
1978 	}
1979 
1980 	priv->tx_delay = 0;
1981 	priv->rx_delay = 0;
1982 
1983 	of_property_read_u32(np, "tx-internal-delay-ps", &priv->tx_delay);
1984 	of_property_read_u32(np, "rx-internal-delay-ps", &priv->rx_delay);
1985 
1986 	if (priv->tx_delay > EMAC_MAX_DELAY_PS) {
1987 		dev_err(&pdev->dev,
1988 			"tx-internal-delay-ps too large: max %d, got %d",
1989 			EMAC_MAX_DELAY_PS, priv->tx_delay);
1990 		return -EINVAL;
1991 	}
1992 
1993 	if (priv->rx_delay > EMAC_MAX_DELAY_PS) {
1994 		dev_err(&pdev->dev,
1995 			"rx-internal-delay-ps too large: max %d, got %d",
1996 			EMAC_MAX_DELAY_PS, priv->rx_delay);
1997 		return -EINVAL;
1998 	}
1999 
2000 	priv->tx_delay = delay_ps_to_unit(priv->tx_delay);
2001 	priv->rx_delay = delay_ps_to_unit(priv->rx_delay);
2002 
2003 	return 0;
2004 }
2005 
emac_phy_deregister_fixed_link(void * data)2006 static void emac_phy_deregister_fixed_link(void *data)
2007 {
2008 	struct device_node *of_node = data;
2009 
2010 	of_phy_deregister_fixed_link(of_node);
2011 }
2012 
emac_probe(struct platform_device * pdev)2013 static int emac_probe(struct platform_device *pdev)
2014 {
2015 	struct device *dev = &pdev->dev;
2016 	struct reset_control *reset;
2017 	struct net_device *ndev;
2018 	struct emac_priv *priv;
2019 	int ret;
2020 
2021 	ndev = devm_alloc_etherdev(dev, sizeof(struct emac_priv));
2022 	if (!ndev)
2023 		return -ENOMEM;
2024 
2025 	ndev->hw_features = NETIF_F_SG;
2026 	ndev->features |= ndev->hw_features;
2027 
2028 	ndev->max_mtu = EMAC_RX_BUF_4K - (ETH_HLEN + ETH_FCS_LEN);
2029 	ndev->pcpu_stat_type = NETDEV_PCPU_STAT_DSTATS;
2030 
2031 	priv = netdev_priv(ndev);
2032 	priv->ndev = ndev;
2033 	priv->pdev = pdev;
2034 	platform_set_drvdata(pdev, priv);
2035 
2036 	ret = emac_config_dt(pdev, priv);
2037 	if (ret < 0)
2038 		return dev_err_probe(dev, ret, "Configuration failed\n");
2039 
2040 	ndev->watchdog_timeo = 5 * HZ;
2041 	ndev->base_addr = (unsigned long)priv->iobase;
2042 	ndev->irq = priv->irq;
2043 
2044 	ndev->ethtool_ops = &emac_ethtool_ops;
2045 	ndev->netdev_ops = &emac_netdev_ops;
2046 
2047 	devm_pm_runtime_enable(&pdev->dev);
2048 
2049 	priv->bus_clk = devm_clk_get_enabled(&pdev->dev, NULL);
2050 	if (IS_ERR(priv->bus_clk))
2051 		return dev_err_probe(dev, PTR_ERR(priv->bus_clk),
2052 				     "Failed to get clock\n");
2053 
2054 	reset = devm_reset_control_get_optional_exclusive_deasserted(&pdev->dev,
2055 								     NULL);
2056 	if (IS_ERR(reset))
2057 		return dev_err_probe(dev, PTR_ERR(reset),
2058 				     "Failed to get reset\n");
2059 
2060 	if (of_phy_is_fixed_link(dev->of_node)) {
2061 		ret = of_phy_register_fixed_link(dev->of_node);
2062 		if (ret)
2063 			return dev_err_probe(dev, ret,
2064 					     "Failed to register fixed-link\n");
2065 
2066 		ret = devm_add_action_or_reset(dev,
2067 					       emac_phy_deregister_fixed_link,
2068 					       dev->of_node);
2069 
2070 		if (ret) {
2071 			dev_err(dev, "devm_add_action_or_reset failed\n");
2072 			return ret;
2073 		}
2074 	}
2075 
2076 	emac_sw_init(priv);
2077 
2078 	ret = emac_mdio_init(priv);
2079 	if (ret)
2080 		goto err_timer_delete;
2081 
2082 	SET_NETDEV_DEV(ndev, &pdev->dev);
2083 
2084 	ret = devm_register_netdev(dev, ndev);
2085 	if (ret) {
2086 		dev_err(dev, "devm_register_netdev failed\n");
2087 		goto err_timer_delete;
2088 	}
2089 
2090 	netif_napi_add(ndev, &priv->napi, emac_rx_poll);
2091 	netif_carrier_off(ndev);
2092 
2093 	return 0;
2094 
2095 err_timer_delete:
2096 	timer_delete_sync(&priv->txtimer);
2097 	timer_delete_sync(&priv->stats_timer);
2098 
2099 	return ret;
2100 }
2101 
emac_remove(struct platform_device * pdev)2102 static void emac_remove(struct platform_device *pdev)
2103 {
2104 	struct emac_priv *priv = platform_get_drvdata(pdev);
2105 
2106 	timer_shutdown_sync(&priv->txtimer);
2107 	cancel_work_sync(&priv->tx_timeout_task);
2108 
2109 	timer_shutdown_sync(&priv->stats_timer);
2110 
2111 	emac_reset_hw(priv);
2112 }
2113 
emac_resume(struct device * dev)2114 static int emac_resume(struct device *dev)
2115 {
2116 	struct emac_priv *priv = dev_get_drvdata(dev);
2117 	struct net_device *ndev = priv->ndev;
2118 	int ret;
2119 
2120 	ret = clk_prepare_enable(priv->bus_clk);
2121 	if (ret < 0) {
2122 		dev_err(dev, "Failed to enable bus clock: %d\n", ret);
2123 		return ret;
2124 	}
2125 
2126 	if (!netif_running(ndev))
2127 		return 0;
2128 
2129 	ret = emac_open(ndev);
2130 	if (ret) {
2131 		clk_disable_unprepare(priv->bus_clk);
2132 		return ret;
2133 	}
2134 
2135 	netif_device_attach(ndev);
2136 
2137 	mod_timer(&priv->stats_timer, jiffies);
2138 
2139 	return 0;
2140 }
2141 
emac_suspend(struct device * dev)2142 static int emac_suspend(struct device *dev)
2143 {
2144 	struct emac_priv *priv = dev_get_drvdata(dev);
2145 	struct net_device *ndev = priv->ndev;
2146 
2147 	if (!ndev || !netif_running(ndev)) {
2148 		clk_disable_unprepare(priv->bus_clk);
2149 		return 0;
2150 	}
2151 
2152 	emac_stop(ndev);
2153 
2154 	clk_disable_unprepare(priv->bus_clk);
2155 	netif_device_detach(ndev);
2156 	return 0;
2157 }
2158 
2159 static const struct dev_pm_ops emac_pm_ops = {
2160 	SYSTEM_SLEEP_PM_OPS(emac_suspend, emac_resume)
2161 };
2162 
2163 static const struct of_device_id emac_of_match[] = {
2164 	{ .compatible = "spacemit,k1-emac" },
2165 	{ /* sentinel */ },
2166 };
2167 MODULE_DEVICE_TABLE(of, emac_of_match);
2168 
2169 static struct platform_driver emac_driver = {
2170 	.probe = emac_probe,
2171 	.remove = emac_remove,
2172 	.driver = {
2173 		.name = DRIVER_NAME,
2174 		.of_match_table = of_match_ptr(emac_of_match),
2175 		.pm = &emac_pm_ops,
2176 	},
2177 };
2178 module_platform_driver(emac_driver);
2179 
2180 MODULE_DESCRIPTION("SpacemiT K1 Ethernet driver");
2181 MODULE_AUTHOR("Vivian Wang <wangruikang@iscas.ac.cn>");
2182 MODULE_LICENSE("GPL");
2183