1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * SpacemiT K1 Ethernet driver
4 *
5 * Copyright (C) 2023-2025 SpacemiT (Hangzhou) Technology Co. Ltd
6 * Copyright (C) 2025 Vivian Wang <wangruikang@iscas.ac.cn>
7 */
8
9 #include <linux/bitfield.h>
10 #include <linux/clk.h>
11 #include <linux/delay.h>
12 #include <linux/dma-mapping.h>
13 #include <linux/etherdevice.h>
14 #include <linux/ethtool.h>
15 #include <linux/interrupt.h>
16 #include <linux/io.h>
17 #include <linux/iopoll.h>
18 #include <linux/kernel.h>
19 #include <linux/mfd/syscon.h>
20 #include <linux/module.h>
21 #include <linux/of.h>
22 #include <linux/of_irq.h>
23 #include <linux/of_mdio.h>
24 #include <linux/of_net.h>
25 #include <linux/phy.h>
26 #include <linux/platform_device.h>
27 #include <linux/pm_runtime.h>
28 #include <linux/pm.h>
29 #include <linux/regmap.h>
30 #include <linux/reset.h>
31 #include <linux/rtnetlink.h>
32 #include <linux/timer.h>
33 #include <linux/types.h>
34
35 #include "k1_emac.h"
36
37 #define DRIVER_NAME "k1_emac"
38
39 #define EMAC_DEFAULT_BUFSIZE 1536
40 #define EMAC_RX_BUF_2K 2048
41 #define EMAC_RX_BUF_4K 4096
42
43 /* Tuning parameters from SpacemiT */
44 #define EMAC_TX_FRAMES 64
45 #define EMAC_TX_COAL_TIMEOUT 40000
46 #define EMAC_RX_FRAMES 64
47 #define EMAC_RX_COAL_TIMEOUT (600 * 312)
48
49 #define DEFAULT_FC_PAUSE_TIME 0xffff
50 #define DEFAULT_FC_FIFO_HIGH 1600
51 #define DEFAULT_TX_ALMOST_FULL 0x1f8
52 #define DEFAULT_TX_THRESHOLD 1518
53 #define DEFAULT_RX_THRESHOLD 12
54 #define DEFAULT_TX_RING_NUM 1024
55 #define DEFAULT_RX_RING_NUM 1024
56 #define DEFAULT_DMA_BURST MREGBIT_BURST_16WORD
57 #define HASH_TABLE_SIZE 64
58
59 struct desc_buf {
60 u64 dma_addr;
61 void *buff_addr;
62 u16 dma_len;
63 u8 map_as_page;
64 };
65
66 struct emac_tx_desc_buffer {
67 struct sk_buff *skb;
68 struct desc_buf buf[2];
69 };
70
71 struct emac_rx_desc_buffer {
72 struct sk_buff *skb;
73 u64 dma_addr;
74 void *buff_addr;
75 u16 dma_len;
76 u8 map_as_page;
77 };
78
79 /**
80 * struct emac_desc_ring - Software-side information for one descriptor ring
81 * Same structure used for both RX and TX
82 * @desc_addr: Virtual address to the descriptor ring memory
83 * @desc_dma_addr: DMA address of the descriptor ring
84 * @total_size: Size of ring in bytes
85 * @total_cnt: Number of descriptors
86 * @head: Next descriptor to associate a buffer with
87 * @tail: Next descriptor to check status bit
88 * @rx_desc_buf: Array of descriptors for RX
89 * @tx_desc_buf: Array of descriptors for TX, with max of two buffers each
90 */
91 struct emac_desc_ring {
92 void *desc_addr;
93 dma_addr_t desc_dma_addr;
94 u32 total_size;
95 u32 total_cnt;
96 u32 head;
97 u32 tail;
98 union {
99 struct emac_rx_desc_buffer *rx_desc_buf;
100 struct emac_tx_desc_buffer *tx_desc_buf;
101 };
102 };
103
104 struct emac_priv {
105 void __iomem *iobase;
106 u32 dma_buf_sz;
107 struct emac_desc_ring tx_ring;
108 struct emac_desc_ring rx_ring;
109
110 struct net_device *ndev;
111 struct napi_struct napi;
112 struct platform_device *pdev;
113 struct clk *bus_clk;
114 struct clk *ref_clk;
115 struct regmap *regmap_apmu;
116 u32 regmap_apmu_offset;
117 int irq;
118
119 phy_interface_t phy_interface;
120
121 union emac_hw_tx_stats tx_stats, tx_stats_off;
122 union emac_hw_rx_stats rx_stats, rx_stats_off;
123
124 u32 tx_count_frames;
125 u32 tx_coal_frames;
126 u32 tx_coal_timeout;
127 struct work_struct tx_timeout_task;
128
129 struct timer_list txtimer;
130 struct timer_list stats_timer;
131
132 u32 tx_delay;
133 u32 rx_delay;
134
135 bool flow_control_autoneg;
136 u8 flow_control;
137
138 /* Softirq-safe, hold while touching hardware statistics */
139 spinlock_t stats_lock;
140 };
141
emac_wr(struct emac_priv * priv,u32 reg,u32 val)142 static void emac_wr(struct emac_priv *priv, u32 reg, u32 val)
143 {
144 writel(val, priv->iobase + reg);
145 }
146
emac_rd(struct emac_priv * priv,u32 reg)147 static u32 emac_rd(struct emac_priv *priv, u32 reg)
148 {
149 return readl(priv->iobase + reg);
150 }
151
emac_phy_interface_config(struct emac_priv * priv)152 static int emac_phy_interface_config(struct emac_priv *priv)
153 {
154 u32 val = 0, mask = REF_CLK_SEL | RGMII_TX_CLK_SEL | PHY_INTF_RGMII;
155
156 if (phy_interface_mode_is_rgmii(priv->phy_interface))
157 val |= PHY_INTF_RGMII;
158
159 regmap_update_bits(priv->regmap_apmu,
160 priv->regmap_apmu_offset + APMU_EMAC_CTRL_REG,
161 mask, val);
162
163 return 0;
164 }
165
166 /*
167 * Where the hardware expects a MAC address, it is laid out in this high, med,
168 * low order in three consecutive registers and in this format.
169 */
170
emac_set_mac_addr_reg(struct emac_priv * priv,const unsigned char * addr,u32 reg)171 static void emac_set_mac_addr_reg(struct emac_priv *priv,
172 const unsigned char *addr,
173 u32 reg)
174 {
175 emac_wr(priv, reg + sizeof(u32) * 0, addr[1] << 8 | addr[0]);
176 emac_wr(priv, reg + sizeof(u32) * 1, addr[3] << 8 | addr[2]);
177 emac_wr(priv, reg + sizeof(u32) * 2, addr[5] << 8 | addr[4]);
178 }
179
emac_set_mac_addr(struct emac_priv * priv,const unsigned char * addr)180 static void emac_set_mac_addr(struct emac_priv *priv, const unsigned char *addr)
181 {
182 /* We use only one address, so set the same for flow control as well */
183 emac_set_mac_addr_reg(priv, addr, MAC_ADDRESS1_HIGH);
184 emac_set_mac_addr_reg(priv, addr, MAC_FC_SOURCE_ADDRESS_HIGH);
185 }
186
emac_reset_hw(struct emac_priv * priv)187 static void emac_reset_hw(struct emac_priv *priv)
188 {
189 /* Disable all interrupts */
190 emac_wr(priv, MAC_INTERRUPT_ENABLE, 0x0);
191 emac_wr(priv, DMA_INTERRUPT_ENABLE, 0x0);
192
193 /* Disable transmit and receive units */
194 emac_wr(priv, MAC_RECEIVE_CONTROL, 0x0);
195 emac_wr(priv, MAC_TRANSMIT_CONTROL, 0x0);
196
197 /* Disable DMA */
198 emac_wr(priv, DMA_CONTROL, 0x0);
199 }
200
emac_init_hw(struct emac_priv * priv)201 static void emac_init_hw(struct emac_priv *priv)
202 {
203 /* Destination address for 802.3x Ethernet flow control */
204 u8 fc_dest_addr[ETH_ALEN] = { 0x01, 0x80, 0xc2, 0x00, 0x00, 0x01 };
205
206 u32 rxirq = 0, dma = 0;
207
208 regmap_set_bits(priv->regmap_apmu,
209 priv->regmap_apmu_offset + APMU_EMAC_CTRL_REG,
210 AXI_SINGLE_ID);
211
212 /* Disable transmit and receive units */
213 emac_wr(priv, MAC_RECEIVE_CONTROL, 0x0);
214 emac_wr(priv, MAC_TRANSMIT_CONTROL, 0x0);
215
216 /* Enable MAC address 1 filtering */
217 emac_wr(priv, MAC_ADDRESS_CONTROL, MREGBIT_MAC_ADDRESS1_ENABLE);
218
219 /* Zero initialize the multicast hash table */
220 emac_wr(priv, MAC_MULTICAST_HASH_TABLE1, 0x0);
221 emac_wr(priv, MAC_MULTICAST_HASH_TABLE2, 0x0);
222 emac_wr(priv, MAC_MULTICAST_HASH_TABLE3, 0x0);
223 emac_wr(priv, MAC_MULTICAST_HASH_TABLE4, 0x0);
224
225 /* Configure thresholds */
226 emac_wr(priv, MAC_TRANSMIT_FIFO_ALMOST_FULL, DEFAULT_TX_ALMOST_FULL);
227 emac_wr(priv, MAC_TRANSMIT_PACKET_START_THRESHOLD,
228 DEFAULT_TX_THRESHOLD);
229 emac_wr(priv, MAC_RECEIVE_PACKET_START_THRESHOLD, DEFAULT_RX_THRESHOLD);
230
231 /* Configure flow control (enabled in emac_adjust_link() later) */
232 emac_set_mac_addr_reg(priv, fc_dest_addr, MAC_FC_SOURCE_ADDRESS_HIGH);
233 emac_wr(priv, MAC_FC_PAUSE_HIGH_THRESHOLD, DEFAULT_FC_FIFO_HIGH);
234 emac_wr(priv, MAC_FC_HIGH_PAUSE_TIME, DEFAULT_FC_PAUSE_TIME);
235 emac_wr(priv, MAC_FC_PAUSE_LOW_THRESHOLD, 0);
236
237 /* RX IRQ mitigation */
238 rxirq = FIELD_PREP(MREGBIT_RECEIVE_IRQ_FRAME_COUNTER_MASK,
239 EMAC_RX_FRAMES);
240 rxirq |= FIELD_PREP(MREGBIT_RECEIVE_IRQ_TIMEOUT_COUNTER_MASK,
241 EMAC_RX_COAL_TIMEOUT);
242 rxirq |= MREGBIT_RECEIVE_IRQ_MITIGATION_ENABLE;
243 emac_wr(priv, DMA_RECEIVE_IRQ_MITIGATION_CTRL, rxirq);
244
245 /* Disable and set DMA config */
246 emac_wr(priv, DMA_CONTROL, 0x0);
247
248 emac_wr(priv, DMA_CONFIGURATION, MREGBIT_SOFTWARE_RESET);
249 usleep_range(9000, 10000);
250 emac_wr(priv, DMA_CONFIGURATION, 0x0);
251 usleep_range(9000, 10000);
252
253 dma |= MREGBIT_STRICT_BURST;
254 dma |= MREGBIT_DMA_64BIT_MODE;
255 dma |= DEFAULT_DMA_BURST;
256
257 emac_wr(priv, DMA_CONFIGURATION, dma);
258 }
259
emac_dma_start_transmit(struct emac_priv * priv)260 static void emac_dma_start_transmit(struct emac_priv *priv)
261 {
262 /* The actual value written does not matter */
263 emac_wr(priv, DMA_TRANSMIT_POLL_DEMAND, 1);
264 }
265
emac_enable_interrupt(struct emac_priv * priv)266 static void emac_enable_interrupt(struct emac_priv *priv)
267 {
268 u32 val;
269
270 val = emac_rd(priv, DMA_INTERRUPT_ENABLE);
271 val |= MREGBIT_TRANSMIT_TRANSFER_DONE_INTR_ENABLE;
272 val |= MREGBIT_RECEIVE_TRANSFER_DONE_INTR_ENABLE;
273 emac_wr(priv, DMA_INTERRUPT_ENABLE, val);
274 }
275
emac_disable_interrupt(struct emac_priv * priv)276 static void emac_disable_interrupt(struct emac_priv *priv)
277 {
278 u32 val;
279
280 val = emac_rd(priv, DMA_INTERRUPT_ENABLE);
281 val &= ~MREGBIT_TRANSMIT_TRANSFER_DONE_INTR_ENABLE;
282 val &= ~MREGBIT_RECEIVE_TRANSFER_DONE_INTR_ENABLE;
283 emac_wr(priv, DMA_INTERRUPT_ENABLE, val);
284 }
285
emac_tx_avail(struct emac_priv * priv)286 static u32 emac_tx_avail(struct emac_priv *priv)
287 {
288 struct emac_desc_ring *tx_ring = &priv->tx_ring;
289 u32 avail;
290
291 if (tx_ring->tail > tx_ring->head)
292 avail = tx_ring->tail - tx_ring->head - 1;
293 else
294 avail = tx_ring->total_cnt - tx_ring->head + tx_ring->tail - 1;
295
296 return avail;
297 }
298
emac_tx_coal_timer_resched(struct emac_priv * priv)299 static void emac_tx_coal_timer_resched(struct emac_priv *priv)
300 {
301 mod_timer(&priv->txtimer,
302 jiffies + usecs_to_jiffies(priv->tx_coal_timeout));
303 }
304
emac_tx_coal_timer(struct timer_list * t)305 static void emac_tx_coal_timer(struct timer_list *t)
306 {
307 struct emac_priv *priv = timer_container_of(priv, t, txtimer);
308
309 napi_schedule(&priv->napi);
310 }
311
emac_tx_should_interrupt(struct emac_priv * priv,u32 pkt_num)312 static bool emac_tx_should_interrupt(struct emac_priv *priv, u32 pkt_num)
313 {
314 priv->tx_count_frames += pkt_num;
315 if (likely(priv->tx_coal_frames > priv->tx_count_frames)) {
316 emac_tx_coal_timer_resched(priv);
317 return false;
318 }
319
320 priv->tx_count_frames = 0;
321 return true;
322 }
323
emac_free_tx_buf(struct emac_priv * priv,int i)324 static void emac_free_tx_buf(struct emac_priv *priv, int i)
325 {
326 struct emac_tx_desc_buffer *tx_buf;
327 struct emac_desc_ring *tx_ring;
328 struct desc_buf *buf;
329 int j;
330
331 tx_ring = &priv->tx_ring;
332 tx_buf = &tx_ring->tx_desc_buf[i];
333
334 for (j = 0; j < 2; j++) {
335 buf = &tx_buf->buf[j];
336 if (!buf->dma_addr)
337 continue;
338
339 if (buf->map_as_page)
340 dma_unmap_page(&priv->pdev->dev, buf->dma_addr,
341 buf->dma_len, DMA_TO_DEVICE);
342 else
343 dma_unmap_single(&priv->pdev->dev,
344 buf->dma_addr, buf->dma_len,
345 DMA_TO_DEVICE);
346
347 buf->dma_addr = 0;
348 buf->map_as_page = false;
349 buf->buff_addr = NULL;
350 }
351
352 if (tx_buf->skb) {
353 dev_kfree_skb_any(tx_buf->skb);
354 tx_buf->skb = NULL;
355 }
356 }
357
emac_clean_tx_desc_ring(struct emac_priv * priv)358 static void emac_clean_tx_desc_ring(struct emac_priv *priv)
359 {
360 struct emac_desc_ring *tx_ring = &priv->tx_ring;
361 u32 i;
362
363 for (i = 0; i < tx_ring->total_cnt; i++)
364 emac_free_tx_buf(priv, i);
365
366 tx_ring->head = 0;
367 tx_ring->tail = 0;
368 }
369
emac_clean_rx_desc_ring(struct emac_priv * priv)370 static void emac_clean_rx_desc_ring(struct emac_priv *priv)
371 {
372 struct emac_rx_desc_buffer *rx_buf;
373 struct emac_desc_ring *rx_ring;
374 u32 i;
375
376 rx_ring = &priv->rx_ring;
377
378 for (i = 0; i < rx_ring->total_cnt; i++) {
379 rx_buf = &rx_ring->rx_desc_buf[i];
380
381 if (!rx_buf->skb)
382 continue;
383
384 dma_unmap_single(&priv->pdev->dev, rx_buf->dma_addr,
385 rx_buf->dma_len, DMA_FROM_DEVICE);
386
387 dev_kfree_skb(rx_buf->skb);
388 rx_buf->skb = NULL;
389 }
390
391 rx_ring->tail = 0;
392 rx_ring->head = 0;
393 }
394
emac_alloc_tx_resources(struct emac_priv * priv)395 static int emac_alloc_tx_resources(struct emac_priv *priv)
396 {
397 struct emac_desc_ring *tx_ring = &priv->tx_ring;
398 struct platform_device *pdev = priv->pdev;
399
400 tx_ring->tx_desc_buf = kcalloc(tx_ring->total_cnt,
401 sizeof(*tx_ring->tx_desc_buf),
402 GFP_KERNEL);
403
404 if (!tx_ring->tx_desc_buf)
405 return -ENOMEM;
406
407 tx_ring->total_size = tx_ring->total_cnt * sizeof(struct emac_desc);
408 tx_ring->total_size = ALIGN(tx_ring->total_size, PAGE_SIZE);
409
410 tx_ring->desc_addr = dma_alloc_coherent(&pdev->dev, tx_ring->total_size,
411 &tx_ring->desc_dma_addr,
412 GFP_KERNEL);
413 if (!tx_ring->desc_addr) {
414 kfree(tx_ring->tx_desc_buf);
415 return -ENOMEM;
416 }
417
418 tx_ring->head = 0;
419 tx_ring->tail = 0;
420
421 return 0;
422 }
423
emac_alloc_rx_resources(struct emac_priv * priv)424 static int emac_alloc_rx_resources(struct emac_priv *priv)
425 {
426 struct emac_desc_ring *rx_ring = &priv->rx_ring;
427 struct platform_device *pdev = priv->pdev;
428
429 rx_ring->rx_desc_buf = kcalloc(rx_ring->total_cnt,
430 sizeof(*rx_ring->rx_desc_buf),
431 GFP_KERNEL);
432 if (!rx_ring->rx_desc_buf)
433 return -ENOMEM;
434
435 rx_ring->total_size = rx_ring->total_cnt * sizeof(struct emac_desc);
436
437 rx_ring->total_size = ALIGN(rx_ring->total_size, PAGE_SIZE);
438
439 rx_ring->desc_addr = dma_alloc_coherent(&pdev->dev, rx_ring->total_size,
440 &rx_ring->desc_dma_addr,
441 GFP_KERNEL);
442 if (!rx_ring->desc_addr) {
443 kfree(rx_ring->rx_desc_buf);
444 return -ENOMEM;
445 }
446
447 rx_ring->head = 0;
448 rx_ring->tail = 0;
449
450 return 0;
451 }
452
emac_free_tx_resources(struct emac_priv * priv)453 static void emac_free_tx_resources(struct emac_priv *priv)
454 {
455 struct emac_desc_ring *tr = &priv->tx_ring;
456 struct device *dev = &priv->pdev->dev;
457
458 emac_clean_tx_desc_ring(priv);
459
460 kfree(tr->tx_desc_buf);
461 tr->tx_desc_buf = NULL;
462
463 dma_free_coherent(dev, tr->total_size, tr->desc_addr,
464 tr->desc_dma_addr);
465 tr->desc_addr = NULL;
466 }
467
emac_free_rx_resources(struct emac_priv * priv)468 static void emac_free_rx_resources(struct emac_priv *priv)
469 {
470 struct emac_desc_ring *rr = &priv->rx_ring;
471 struct device *dev = &priv->pdev->dev;
472
473 emac_clean_rx_desc_ring(priv);
474
475 kfree(rr->rx_desc_buf);
476 rr->rx_desc_buf = NULL;
477
478 dma_free_coherent(dev, rr->total_size, rr->desc_addr,
479 rr->desc_dma_addr);
480 rr->desc_addr = NULL;
481 }
482
emac_tx_clean_desc(struct emac_priv * priv)483 static int emac_tx_clean_desc(struct emac_priv *priv)
484 {
485 struct net_device *ndev = priv->ndev;
486 struct emac_desc_ring *tx_ring;
487 struct emac_desc *tx_desc;
488 u32 i;
489
490 netif_tx_lock(ndev);
491
492 tx_ring = &priv->tx_ring;
493
494 i = tx_ring->tail;
495
496 while (i != tx_ring->head) {
497 tx_desc = &((struct emac_desc *)tx_ring->desc_addr)[i];
498
499 /* Stop checking if desc still own by DMA */
500 if (READ_ONCE(tx_desc->desc0) & TX_DESC_0_OWN)
501 break;
502
503 emac_free_tx_buf(priv, i);
504 memset(tx_desc, 0, sizeof(struct emac_desc));
505
506 if (++i == tx_ring->total_cnt)
507 i = 0;
508 }
509
510 tx_ring->tail = i;
511
512 if (unlikely(netif_queue_stopped(ndev) &&
513 emac_tx_avail(priv) > tx_ring->total_cnt / 4))
514 netif_wake_queue(ndev);
515
516 netif_tx_unlock(ndev);
517
518 return 0;
519 }
520
emac_rx_frame_good(struct emac_priv * priv,struct emac_desc * desc)521 static bool emac_rx_frame_good(struct emac_priv *priv, struct emac_desc *desc)
522 {
523 const char *msg;
524 u32 len;
525
526 len = FIELD_GET(RX_DESC_0_FRAME_PACKET_LENGTH_MASK, desc->desc0);
527
528 if (WARN_ON_ONCE(!(desc->desc0 & RX_DESC_0_LAST_DESCRIPTOR)))
529 msg = "Not last descriptor"; /* This would be a bug */
530 else if (desc->desc0 & RX_DESC_0_FRAME_RUNT)
531 msg = "Runt frame";
532 else if (desc->desc0 & RX_DESC_0_FRAME_CRC_ERR)
533 msg = "Frame CRC error";
534 else if (desc->desc0 & RX_DESC_0_FRAME_MAX_LEN_ERR)
535 msg = "Frame exceeds max length";
536 else if (desc->desc0 & RX_DESC_0_FRAME_JABBER_ERR)
537 msg = "Frame jabber error";
538 else if (desc->desc0 & RX_DESC_0_FRAME_LENGTH_ERR)
539 msg = "Frame length error";
540 else if (len <= ETH_FCS_LEN || len > priv->dma_buf_sz)
541 msg = "Frame length unacceptable";
542 else
543 return true; /* All good */
544
545 dev_dbg_ratelimited(&priv->ndev->dev, "RX error: %s", msg);
546
547 return false;
548 }
549
emac_alloc_rx_desc_buffers(struct emac_priv * priv)550 static void emac_alloc_rx_desc_buffers(struct emac_priv *priv)
551 {
552 struct emac_desc_ring *rx_ring = &priv->rx_ring;
553 struct emac_desc rx_desc, *rx_desc_addr;
554 struct net_device *ndev = priv->ndev;
555 struct emac_rx_desc_buffer *rx_buf;
556 struct sk_buff *skb;
557 u32 i;
558
559 i = rx_ring->head;
560 rx_buf = &rx_ring->rx_desc_buf[i];
561
562 while (!rx_buf->skb) {
563 skb = netdev_alloc_skb_ip_align(ndev, priv->dma_buf_sz);
564 if (!skb)
565 break;
566
567 skb->dev = ndev;
568
569 rx_buf->skb = skb;
570 rx_buf->dma_len = priv->dma_buf_sz;
571 rx_buf->dma_addr = dma_map_single(&priv->pdev->dev, skb->data,
572 priv->dma_buf_sz,
573 DMA_FROM_DEVICE);
574 if (dma_mapping_error(&priv->pdev->dev, rx_buf->dma_addr)) {
575 dev_err_ratelimited(&ndev->dev, "Mapping skb failed\n");
576 goto err_free_skb;
577 }
578
579 rx_desc_addr = &((struct emac_desc *)rx_ring->desc_addr)[i];
580
581 memset(&rx_desc, 0, sizeof(rx_desc));
582
583 rx_desc.buffer_addr_1 = rx_buf->dma_addr;
584 rx_desc.desc1 = FIELD_PREP(RX_DESC_1_BUFFER_SIZE_1_MASK,
585 rx_buf->dma_len);
586
587 if (++i == rx_ring->total_cnt) {
588 rx_desc.desc1 |= RX_DESC_1_END_RING;
589 i = 0;
590 }
591
592 *rx_desc_addr = rx_desc;
593 dma_wmb();
594 WRITE_ONCE(rx_desc_addr->desc0, rx_desc.desc0 | RX_DESC_0_OWN);
595
596 rx_buf = &rx_ring->rx_desc_buf[i];
597 }
598
599 rx_ring->head = i;
600 return;
601
602 err_free_skb:
603 dev_kfree_skb_any(skb);
604 rx_buf->skb = NULL;
605 }
606
607 /* Returns number of packets received */
emac_rx_clean_desc(struct emac_priv * priv,int budget)608 static int emac_rx_clean_desc(struct emac_priv *priv, int budget)
609 {
610 struct net_device *ndev = priv->ndev;
611 struct emac_rx_desc_buffer *rx_buf;
612 struct emac_desc_ring *rx_ring;
613 struct sk_buff *skb = NULL;
614 struct emac_desc *rx_desc;
615 u32 got = 0, skb_len, i;
616
617 rx_ring = &priv->rx_ring;
618
619 i = rx_ring->tail;
620
621 while (budget--) {
622 rx_desc = &((struct emac_desc *)rx_ring->desc_addr)[i];
623
624 /* Stop checking if rx_desc still owned by DMA */
625 if (READ_ONCE(rx_desc->desc0) & RX_DESC_0_OWN)
626 break;
627
628 dma_rmb();
629
630 rx_buf = &rx_ring->rx_desc_buf[i];
631
632 if (!rx_buf->skb)
633 break;
634
635 got++;
636
637 dma_unmap_single(&priv->pdev->dev, rx_buf->dma_addr,
638 rx_buf->dma_len, DMA_FROM_DEVICE);
639
640 if (likely(emac_rx_frame_good(priv, rx_desc))) {
641 skb = rx_buf->skb;
642
643 skb_len = FIELD_GET(RX_DESC_0_FRAME_PACKET_LENGTH_MASK,
644 rx_desc->desc0);
645 skb_len -= ETH_FCS_LEN;
646
647 skb_put(skb, skb_len);
648 skb->dev = ndev;
649 ndev->hard_header_len = ETH_HLEN;
650
651 skb->protocol = eth_type_trans(skb, ndev);
652
653 skb->ip_summed = CHECKSUM_NONE;
654
655 napi_gro_receive(&priv->napi, skb);
656
657 memset(rx_desc, 0, sizeof(struct emac_desc));
658 rx_buf->skb = NULL;
659 } else {
660 dev_kfree_skb_irq(rx_buf->skb);
661 rx_buf->skb = NULL;
662 }
663
664 if (++i == rx_ring->total_cnt)
665 i = 0;
666 }
667
668 rx_ring->tail = i;
669
670 emac_alloc_rx_desc_buffers(priv);
671
672 return got;
673 }
674
emac_rx_poll(struct napi_struct * napi,int budget)675 static int emac_rx_poll(struct napi_struct *napi, int budget)
676 {
677 struct emac_priv *priv = container_of(napi, struct emac_priv, napi);
678 int work_done;
679
680 emac_tx_clean_desc(priv);
681
682 work_done = emac_rx_clean_desc(priv, budget);
683 if (work_done < budget && napi_complete_done(napi, work_done))
684 emac_enable_interrupt(priv);
685
686 return work_done;
687 }
688
689 /*
690 * For convenience, skb->data is fragment 0, frags[0] is fragment 1, etc.
691 *
692 * Each descriptor can hold up to two fragments, called buffer 1 and 2. For each
693 * fragment f, if f % 2 == 0, it uses buffer 1, otherwise it uses buffer 2.
694 */
695
emac_tx_map_frag(struct device * dev,struct emac_desc * tx_desc,struct emac_tx_desc_buffer * tx_buf,struct sk_buff * skb,u32 frag_idx)696 static int emac_tx_map_frag(struct device *dev, struct emac_desc *tx_desc,
697 struct emac_tx_desc_buffer *tx_buf,
698 struct sk_buff *skb, u32 frag_idx)
699 {
700 bool map_as_page, buf_idx;
701 const skb_frag_t *frag;
702 phys_addr_t addr;
703 u32 len;
704 int ret;
705
706 buf_idx = frag_idx % 2;
707
708 if (frag_idx == 0) {
709 /* Non-fragmented part */
710 len = skb_headlen(skb);
711 addr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
712 map_as_page = false;
713 } else {
714 /* Fragment */
715 frag = &skb_shinfo(skb)->frags[frag_idx - 1];
716 len = skb_frag_size(frag);
717 addr = skb_frag_dma_map(dev, frag, 0, len, DMA_TO_DEVICE);
718 map_as_page = true;
719 }
720
721 ret = dma_mapping_error(dev, addr);
722 if (ret)
723 return ret;
724
725 tx_buf->buf[buf_idx].dma_addr = addr;
726 tx_buf->buf[buf_idx].dma_len = len;
727 tx_buf->buf[buf_idx].map_as_page = map_as_page;
728
729 if (buf_idx == 0) {
730 tx_desc->buffer_addr_1 = addr;
731 tx_desc->desc1 |= FIELD_PREP(TX_DESC_1_BUFFER_SIZE_1_MASK, len);
732 } else {
733 tx_desc->buffer_addr_2 = addr;
734 tx_desc->desc1 |= FIELD_PREP(TX_DESC_1_BUFFER_SIZE_2_MASK, len);
735 }
736
737 return 0;
738 }
739
emac_tx_mem_map(struct emac_priv * priv,struct sk_buff * skb)740 static void emac_tx_mem_map(struct emac_priv *priv, struct sk_buff *skb)
741 {
742 struct emac_desc_ring *tx_ring = &priv->tx_ring;
743 struct emac_desc tx_desc, *tx_desc_addr;
744 struct device *dev = &priv->pdev->dev;
745 struct emac_tx_desc_buffer *tx_buf;
746 u32 head, old_head, frag_num, f;
747 bool buf_idx;
748
749 frag_num = skb_shinfo(skb)->nr_frags;
750 head = tx_ring->head;
751 old_head = head;
752
753 for (f = 0; f < frag_num + 1; f++) {
754 buf_idx = f % 2;
755
756 /*
757 * If using buffer 1, initialize a new desc. Otherwise, use
758 * buffer 2 of previous fragment's desc.
759 */
760 if (!buf_idx) {
761 tx_buf = &tx_ring->tx_desc_buf[head];
762 tx_desc_addr =
763 &((struct emac_desc *)tx_ring->desc_addr)[head];
764 memset(&tx_desc, 0, sizeof(tx_desc));
765
766 /*
767 * Give ownership for all but first desc initially. For
768 * first desc, give at the end so DMA cannot start
769 * reading uninitialized descs.
770 */
771 if (head != old_head)
772 tx_desc.desc0 |= TX_DESC_0_OWN;
773
774 if (++head == tx_ring->total_cnt) {
775 /* Just used last desc in ring */
776 tx_desc.desc1 |= TX_DESC_1_END_RING;
777 head = 0;
778 }
779 }
780
781 if (emac_tx_map_frag(dev, &tx_desc, tx_buf, skb, f)) {
782 dev_err_ratelimited(&priv->ndev->dev,
783 "Map TX frag %d failed\n", f);
784 goto err_free_skb;
785 }
786
787 if (f == 0)
788 tx_desc.desc1 |= TX_DESC_1_FIRST_SEGMENT;
789
790 if (f == frag_num) {
791 tx_desc.desc1 |= TX_DESC_1_LAST_SEGMENT;
792 tx_buf->skb = skb;
793 if (emac_tx_should_interrupt(priv, frag_num + 1))
794 tx_desc.desc1 |=
795 TX_DESC_1_INTERRUPT_ON_COMPLETION;
796 }
797
798 *tx_desc_addr = tx_desc;
799 }
800
801 /* All descriptors are ready, give ownership for first desc */
802 tx_desc_addr = &((struct emac_desc *)tx_ring->desc_addr)[old_head];
803 dma_wmb();
804 WRITE_ONCE(tx_desc_addr->desc0, tx_desc_addr->desc0 | TX_DESC_0_OWN);
805
806 emac_dma_start_transmit(priv);
807
808 tx_ring->head = head;
809
810 return;
811
812 err_free_skb:
813 dev_dstats_tx_dropped(priv->ndev);
814 dev_kfree_skb_any(skb);
815 }
816
emac_start_xmit(struct sk_buff * skb,struct net_device * ndev)817 static netdev_tx_t emac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
818 {
819 struct emac_priv *priv = netdev_priv(ndev);
820 int nfrags = skb_shinfo(skb)->nr_frags;
821 struct device *dev = &priv->pdev->dev;
822
823 if (unlikely(emac_tx_avail(priv) < nfrags + 1)) {
824 if (!netif_queue_stopped(ndev)) {
825 netif_stop_queue(ndev);
826 dev_err_ratelimited(dev, "TX ring full, stop TX queue\n");
827 }
828 return NETDEV_TX_BUSY;
829 }
830
831 emac_tx_mem_map(priv, skb);
832
833 /* Make sure there is space in the ring for the next TX. */
834 if (unlikely(emac_tx_avail(priv) <= MAX_SKB_FRAGS + 2))
835 netif_stop_queue(ndev);
836
837 return NETDEV_TX_OK;
838 }
839
emac_set_mac_address(struct net_device * ndev,void * addr)840 static int emac_set_mac_address(struct net_device *ndev, void *addr)
841 {
842 struct emac_priv *priv = netdev_priv(ndev);
843 int ret = eth_mac_addr(ndev, addr);
844
845 if (ret)
846 return ret;
847
848 /* If running, set now; if not running it will be set in emac_up. */
849 if (netif_running(ndev))
850 emac_set_mac_addr(priv, ndev->dev_addr);
851
852 return 0;
853 }
854
emac_mac_multicast_filter_clear(struct emac_priv * priv)855 static void emac_mac_multicast_filter_clear(struct emac_priv *priv)
856 {
857 emac_wr(priv, MAC_MULTICAST_HASH_TABLE1, 0x0);
858 emac_wr(priv, MAC_MULTICAST_HASH_TABLE2, 0x0);
859 emac_wr(priv, MAC_MULTICAST_HASH_TABLE3, 0x0);
860 emac_wr(priv, MAC_MULTICAST_HASH_TABLE4, 0x0);
861 }
862
863 /*
864 * The upper 6 bits of the Ethernet CRC of the MAC address is used as the hash
865 * when matching multicast addresses.
866 */
emac_ether_addr_hash(u8 addr[ETH_ALEN])867 static u32 emac_ether_addr_hash(u8 addr[ETH_ALEN])
868 {
869 u32 crc32 = ether_crc(ETH_ALEN, addr);
870
871 return crc32 >> 26;
872 }
873
874 /* Configure Multicast and Promiscuous modes */
emac_set_rx_mode(struct net_device * ndev)875 static void emac_set_rx_mode(struct net_device *ndev)
876 {
877 struct emac_priv *priv = netdev_priv(ndev);
878 struct netdev_hw_addr *ha;
879 u32 mc_filter[4] = { 0 };
880 u32 hash, reg, bit, val;
881
882 val = emac_rd(priv, MAC_ADDRESS_CONTROL);
883
884 val &= ~MREGBIT_PROMISCUOUS_MODE;
885
886 if (ndev->flags & IFF_PROMISC) {
887 /* Enable promisc mode */
888 val |= MREGBIT_PROMISCUOUS_MODE;
889 } else if ((ndev->flags & IFF_ALLMULTI) ||
890 (netdev_mc_count(ndev) > HASH_TABLE_SIZE)) {
891 /* Accept all multicast frames by setting every bit */
892 emac_wr(priv, MAC_MULTICAST_HASH_TABLE1, 0xffff);
893 emac_wr(priv, MAC_MULTICAST_HASH_TABLE2, 0xffff);
894 emac_wr(priv, MAC_MULTICAST_HASH_TABLE3, 0xffff);
895 emac_wr(priv, MAC_MULTICAST_HASH_TABLE4, 0xffff);
896 } else if (!netdev_mc_empty(ndev)) {
897 emac_mac_multicast_filter_clear(priv);
898 netdev_for_each_mc_addr(ha, ndev) {
899 /*
900 * The hash table is an array of 4 16-bit registers. It
901 * is treated like an array of 64 bits (bits[hash]).
902 */
903 hash = emac_ether_addr_hash(ha->addr);
904 reg = hash / 16;
905 bit = hash % 16;
906 mc_filter[reg] |= BIT(bit);
907 }
908 emac_wr(priv, MAC_MULTICAST_HASH_TABLE1, mc_filter[0]);
909 emac_wr(priv, MAC_MULTICAST_HASH_TABLE2, mc_filter[1]);
910 emac_wr(priv, MAC_MULTICAST_HASH_TABLE3, mc_filter[2]);
911 emac_wr(priv, MAC_MULTICAST_HASH_TABLE4, mc_filter[3]);
912 }
913
914 emac_wr(priv, MAC_ADDRESS_CONTROL, val);
915 }
916
emac_change_mtu(struct net_device * ndev,int mtu)917 static int emac_change_mtu(struct net_device *ndev, int mtu)
918 {
919 struct emac_priv *priv = netdev_priv(ndev);
920 u32 frame_len;
921
922 if (netif_running(ndev)) {
923 netdev_err(ndev, "must be stopped to change MTU\n");
924 return -EBUSY;
925 }
926
927 frame_len = mtu + ETH_HLEN + ETH_FCS_LEN;
928
929 if (frame_len <= EMAC_DEFAULT_BUFSIZE)
930 priv->dma_buf_sz = EMAC_DEFAULT_BUFSIZE;
931 else if (frame_len <= EMAC_RX_BUF_2K)
932 priv->dma_buf_sz = EMAC_RX_BUF_2K;
933 else
934 priv->dma_buf_sz = EMAC_RX_BUF_4K;
935
936 ndev->mtu = mtu;
937
938 return 0;
939 }
940
emac_tx_timeout(struct net_device * ndev,unsigned int txqueue)941 static void emac_tx_timeout(struct net_device *ndev, unsigned int txqueue)
942 {
943 struct emac_priv *priv = netdev_priv(ndev);
944
945 schedule_work(&priv->tx_timeout_task);
946 }
947
emac_mii_read(struct mii_bus * bus,int phy_addr,int regnum)948 static int emac_mii_read(struct mii_bus *bus, int phy_addr, int regnum)
949 {
950 struct emac_priv *priv = bus->priv;
951 u32 cmd = 0, val;
952 int ret;
953
954 cmd |= FIELD_PREP(MREGBIT_PHY_ADDRESS, phy_addr);
955 cmd |= FIELD_PREP(MREGBIT_REGISTER_ADDRESS, regnum);
956 cmd |= MREGBIT_START_MDIO_TRANS | MREGBIT_MDIO_READ_WRITE;
957
958 emac_wr(priv, MAC_MDIO_DATA, 0x0);
959 emac_wr(priv, MAC_MDIO_CONTROL, cmd);
960
961 ret = readl_poll_timeout(priv->iobase + MAC_MDIO_CONTROL, val,
962 !(val & MREGBIT_START_MDIO_TRANS), 100, 10000);
963
964 if (ret)
965 return ret;
966
967 val = emac_rd(priv, MAC_MDIO_DATA);
968 return FIELD_GET(MREGBIT_MDIO_DATA, val);
969 }
970
emac_mii_write(struct mii_bus * bus,int phy_addr,int regnum,u16 value)971 static int emac_mii_write(struct mii_bus *bus, int phy_addr, int regnum,
972 u16 value)
973 {
974 struct emac_priv *priv = bus->priv;
975 u32 cmd = 0, val;
976 int ret;
977
978 emac_wr(priv, MAC_MDIO_DATA, value);
979
980 cmd |= FIELD_PREP(MREGBIT_PHY_ADDRESS, phy_addr);
981 cmd |= FIELD_PREP(MREGBIT_REGISTER_ADDRESS, regnum);
982 cmd |= MREGBIT_START_MDIO_TRANS;
983
984 emac_wr(priv, MAC_MDIO_CONTROL, cmd);
985
986 ret = readl_poll_timeout(priv->iobase + MAC_MDIO_CONTROL, val,
987 !(val & MREGBIT_START_MDIO_TRANS), 100, 10000);
988
989 return ret;
990 }
991
emac_mdio_init(struct emac_priv * priv)992 static int emac_mdio_init(struct emac_priv *priv)
993 {
994 struct device *dev = &priv->pdev->dev;
995 struct device_node *mii_np;
996 struct mii_bus *mii;
997 int ret;
998
999 mii = devm_mdiobus_alloc(dev);
1000 if (!mii)
1001 return -ENOMEM;
1002
1003 mii->priv = priv;
1004 mii->name = "k1_emac_mii";
1005 mii->read = emac_mii_read;
1006 mii->write = emac_mii_write;
1007 mii->parent = dev;
1008 mii->phy_mask = ~0;
1009 snprintf(mii->id, MII_BUS_ID_SIZE, "%s", priv->pdev->name);
1010
1011 mii_np = of_get_available_child_by_name(dev->of_node, "mdio-bus");
1012
1013 ret = devm_of_mdiobus_register(dev, mii, mii_np);
1014 if (ret)
1015 dev_err_probe(dev, ret, "Failed to register mdio bus\n");
1016
1017 of_node_put(mii_np);
1018 return ret;
1019 }
1020
emac_set_tx_fc(struct emac_priv * priv,bool enable)1021 static void emac_set_tx_fc(struct emac_priv *priv, bool enable)
1022 {
1023 u32 val;
1024
1025 val = emac_rd(priv, MAC_FC_CONTROL);
1026
1027 FIELD_MODIFY(MREGBIT_FC_GENERATION_ENABLE, &val, enable);
1028 FIELD_MODIFY(MREGBIT_AUTO_FC_GENERATION_ENABLE, &val, enable);
1029
1030 emac_wr(priv, MAC_FC_CONTROL, val);
1031 }
1032
emac_set_rx_fc(struct emac_priv * priv,bool enable)1033 static void emac_set_rx_fc(struct emac_priv *priv, bool enable)
1034 {
1035 u32 val = emac_rd(priv, MAC_FC_CONTROL);
1036
1037 FIELD_MODIFY(MREGBIT_FC_DECODE_ENABLE, &val, enable);
1038
1039 emac_wr(priv, MAC_FC_CONTROL, val);
1040 }
1041
emac_set_fc(struct emac_priv * priv,u8 fc)1042 static void emac_set_fc(struct emac_priv *priv, u8 fc)
1043 {
1044 emac_set_tx_fc(priv, fc & FLOW_CTRL_TX);
1045 emac_set_rx_fc(priv, fc & FLOW_CTRL_RX);
1046 priv->flow_control = fc;
1047 }
1048
emac_set_fc_autoneg(struct emac_priv * priv)1049 static void emac_set_fc_autoneg(struct emac_priv *priv)
1050 {
1051 struct phy_device *phydev = priv->ndev->phydev;
1052 u32 local_adv, remote_adv;
1053 u8 fc;
1054
1055 local_adv = linkmode_adv_to_lcl_adv_t(phydev->advertising);
1056
1057 remote_adv = 0;
1058
1059 if (phydev->pause)
1060 remote_adv |= LPA_PAUSE_CAP;
1061
1062 if (phydev->asym_pause)
1063 remote_adv |= LPA_PAUSE_ASYM;
1064
1065 fc = mii_resolve_flowctrl_fdx(local_adv, remote_adv);
1066
1067 priv->flow_control_autoneg = true;
1068
1069 emac_set_fc(priv, fc);
1070 }
1071
1072 /*
1073 * Even though this MAC supports gigabit operation, it only provides 32-bit
1074 * statistics counters. The most overflow-prone counters are the "bytes" ones,
1075 * which at gigabit overflow about twice a minute.
1076 *
1077 * Therefore, we maintain the high 32 bits of counters ourselves, incrementing
1078 * every time statistics seem to go backwards. Also, update periodically to
1079 * catch overflows when we are not otherwise checking the statistics often
1080 * enough.
1081 */
1082
1083 #define EMAC_STATS_TIMER_PERIOD 20
1084
emac_read_stat_cnt(struct emac_priv * priv,u8 cnt,u32 * res,u32 control_reg,u32 high_reg,u32 low_reg)1085 static int emac_read_stat_cnt(struct emac_priv *priv, u8 cnt, u32 *res,
1086 u32 control_reg, u32 high_reg, u32 low_reg)
1087 {
1088 u32 val, high, low;
1089 int ret;
1090
1091 /* The "read" bit is the same for TX and RX */
1092
1093 val = MREGBIT_START_TX_COUNTER_READ | cnt;
1094 emac_wr(priv, control_reg, val);
1095 val = emac_rd(priv, control_reg);
1096
1097 ret = readl_poll_timeout_atomic(priv->iobase + control_reg, val,
1098 !(val & MREGBIT_START_TX_COUNTER_READ),
1099 100, 10000);
1100
1101 if (ret) {
1102 netdev_err(priv->ndev, "Read stat timeout\n");
1103 return ret;
1104 }
1105
1106 high = emac_rd(priv, high_reg);
1107 low = emac_rd(priv, low_reg);
1108 *res = high << 16 | lower_16_bits(low);
1109
1110 return 0;
1111 }
1112
emac_tx_read_stat_cnt(struct emac_priv * priv,u8 cnt,u32 * res)1113 static int emac_tx_read_stat_cnt(struct emac_priv *priv, u8 cnt, u32 *res)
1114 {
1115 return emac_read_stat_cnt(priv, cnt, res, MAC_TX_STATCTR_CONTROL,
1116 MAC_TX_STATCTR_DATA_HIGH,
1117 MAC_TX_STATCTR_DATA_LOW);
1118 }
1119
emac_rx_read_stat_cnt(struct emac_priv * priv,u8 cnt,u32 * res)1120 static int emac_rx_read_stat_cnt(struct emac_priv *priv, u8 cnt, u32 *res)
1121 {
1122 return emac_read_stat_cnt(priv, cnt, res, MAC_RX_STATCTR_CONTROL,
1123 MAC_RX_STATCTR_DATA_HIGH,
1124 MAC_RX_STATCTR_DATA_LOW);
1125 }
1126
emac_update_counter(u64 * counter,u32 new_low)1127 static void emac_update_counter(u64 *counter, u32 new_low)
1128 {
1129 u32 old_low = lower_32_bits(*counter);
1130 u64 high = upper_32_bits(*counter);
1131
1132 if (old_low > new_low) {
1133 /* Overflowed, increment high 32 bits */
1134 high++;
1135 }
1136
1137 *counter = (high << 32) | new_low;
1138 }
1139
emac_stats_update(struct emac_priv * priv)1140 static void emac_stats_update(struct emac_priv *priv)
1141 {
1142 u64 *tx_stats_off = priv->tx_stats_off.array;
1143 u64 *rx_stats_off = priv->rx_stats_off.array;
1144 u64 *tx_stats = priv->tx_stats.array;
1145 u64 *rx_stats = priv->rx_stats.array;
1146 u32 i, res, offset;
1147
1148 assert_spin_locked(&priv->stats_lock);
1149
1150 if (!netif_running(priv->ndev) || !netif_device_present(priv->ndev)) {
1151 /* Not up, don't try to update */
1152 return;
1153 }
1154
1155 for (i = 0; i < sizeof(priv->tx_stats) / sizeof(*tx_stats); i++) {
1156 /*
1157 * If reading stats times out, everything is broken and there's
1158 * nothing we can do. Reading statistics also can't return an
1159 * error, so just return without updating and without
1160 * rescheduling.
1161 */
1162 if (emac_tx_read_stat_cnt(priv, i, &res))
1163 return;
1164
1165 /*
1166 * Re-initializing while bringing interface up resets counters
1167 * to zero, so to provide continuity, we add the values saved
1168 * last time we did emac_down() to the new hardware-provided
1169 * value.
1170 */
1171 offset = lower_32_bits(tx_stats_off[i]);
1172 emac_update_counter(&tx_stats[i], res + offset);
1173 }
1174
1175 /* Similar remarks as TX stats */
1176 for (i = 0; i < sizeof(priv->rx_stats) / sizeof(*rx_stats); i++) {
1177 if (emac_rx_read_stat_cnt(priv, i, &res))
1178 return;
1179 offset = lower_32_bits(rx_stats_off[i]);
1180 emac_update_counter(&rx_stats[i], res + offset);
1181 }
1182
1183 mod_timer(&priv->stats_timer, jiffies + EMAC_STATS_TIMER_PERIOD * HZ);
1184 }
1185
emac_stats_timer(struct timer_list * t)1186 static void emac_stats_timer(struct timer_list *t)
1187 {
1188 struct emac_priv *priv = timer_container_of(priv, t, stats_timer);
1189
1190 spin_lock(&priv->stats_lock);
1191
1192 emac_stats_update(priv);
1193
1194 spin_unlock(&priv->stats_lock);
1195 }
1196
1197 static const struct ethtool_rmon_hist_range emac_rmon_hist_ranges[] = {
1198 { 64, 64 },
1199 { 65, 127 },
1200 { 128, 255 },
1201 { 256, 511 },
1202 { 512, 1023 },
1203 { 1024, 1518 },
1204 { 1519, 4096 },
1205 { /* sentinel */ },
1206 };
1207
1208 /* Like dev_fetch_dstats(), but we only use tx_drops */
emac_get_stat_tx_drops(struct emac_priv * priv)1209 static u64 emac_get_stat_tx_drops(struct emac_priv *priv)
1210 {
1211 const struct pcpu_dstats *stats;
1212 u64 tx_drops, total = 0;
1213 unsigned int start;
1214 int cpu;
1215
1216 for_each_possible_cpu(cpu) {
1217 stats = per_cpu_ptr(priv->ndev->dstats, cpu);
1218 do {
1219 start = u64_stats_fetch_begin(&stats->syncp);
1220 tx_drops = u64_stats_read(&stats->tx_drops);
1221 } while (u64_stats_fetch_retry(&stats->syncp, start));
1222
1223 total += tx_drops;
1224 }
1225
1226 return total;
1227 }
1228
emac_get_stats64(struct net_device * dev,struct rtnl_link_stats64 * storage)1229 static void emac_get_stats64(struct net_device *dev,
1230 struct rtnl_link_stats64 *storage)
1231 {
1232 struct emac_priv *priv = netdev_priv(dev);
1233 union emac_hw_tx_stats *tx_stats;
1234 union emac_hw_rx_stats *rx_stats;
1235
1236 tx_stats = &priv->tx_stats;
1237 rx_stats = &priv->rx_stats;
1238
1239 /* This is the only software counter */
1240 storage->tx_dropped = emac_get_stat_tx_drops(priv);
1241
1242 spin_lock_bh(&priv->stats_lock);
1243
1244 emac_stats_update(priv);
1245
1246 storage->tx_packets = tx_stats->stats.tx_ok_pkts;
1247 storage->tx_bytes = tx_stats->stats.tx_ok_bytes;
1248 storage->tx_errors = tx_stats->stats.tx_err_pkts;
1249
1250 storage->rx_packets = rx_stats->stats.rx_ok_pkts;
1251 storage->rx_bytes = rx_stats->stats.rx_ok_bytes;
1252 storage->rx_errors = rx_stats->stats.rx_err_total_pkts;
1253 storage->rx_crc_errors = rx_stats->stats.rx_crc_err_pkts;
1254 storage->rx_frame_errors = rx_stats->stats.rx_align_err_pkts;
1255 storage->rx_length_errors = rx_stats->stats.rx_len_err_pkts;
1256
1257 storage->collisions = tx_stats->stats.tx_singleclsn_pkts;
1258 storage->collisions += tx_stats->stats.tx_multiclsn_pkts;
1259 storage->collisions += tx_stats->stats.tx_excessclsn_pkts;
1260
1261 storage->rx_missed_errors = rx_stats->stats.rx_drp_fifo_full_pkts;
1262 storage->rx_missed_errors += rx_stats->stats.rx_truncate_fifo_full_pkts;
1263
1264 spin_unlock_bh(&priv->stats_lock);
1265 }
1266
emac_get_rmon_stats(struct net_device * dev,struct ethtool_rmon_stats * rmon_stats,const struct ethtool_rmon_hist_range ** ranges)1267 static void emac_get_rmon_stats(struct net_device *dev,
1268 struct ethtool_rmon_stats *rmon_stats,
1269 const struct ethtool_rmon_hist_range **ranges)
1270 {
1271 struct emac_priv *priv = netdev_priv(dev);
1272 union emac_hw_rx_stats *rx_stats;
1273
1274 rx_stats = &priv->rx_stats;
1275
1276 *ranges = emac_rmon_hist_ranges;
1277
1278 spin_lock_bh(&priv->stats_lock);
1279
1280 emac_stats_update(priv);
1281
1282 rmon_stats->undersize_pkts = rx_stats->stats.rx_len_undersize_pkts;
1283 rmon_stats->oversize_pkts = rx_stats->stats.rx_len_oversize_pkts;
1284 rmon_stats->fragments = rx_stats->stats.rx_len_fragment_pkts;
1285 rmon_stats->jabbers = rx_stats->stats.rx_len_jabber_pkts;
1286
1287 /* Only RX has histogram stats */
1288
1289 rmon_stats->hist[0] = rx_stats->stats.rx_64_pkts;
1290 rmon_stats->hist[1] = rx_stats->stats.rx_65_127_pkts;
1291 rmon_stats->hist[2] = rx_stats->stats.rx_128_255_pkts;
1292 rmon_stats->hist[3] = rx_stats->stats.rx_256_511_pkts;
1293 rmon_stats->hist[4] = rx_stats->stats.rx_512_1023_pkts;
1294 rmon_stats->hist[5] = rx_stats->stats.rx_1024_1518_pkts;
1295 rmon_stats->hist[6] = rx_stats->stats.rx_1519_plus_pkts;
1296
1297 spin_unlock_bh(&priv->stats_lock);
1298 }
1299
emac_get_eth_mac_stats(struct net_device * dev,struct ethtool_eth_mac_stats * mac_stats)1300 static void emac_get_eth_mac_stats(struct net_device *dev,
1301 struct ethtool_eth_mac_stats *mac_stats)
1302 {
1303 struct emac_priv *priv = netdev_priv(dev);
1304 union emac_hw_tx_stats *tx_stats;
1305 union emac_hw_rx_stats *rx_stats;
1306
1307 tx_stats = &priv->tx_stats;
1308 rx_stats = &priv->rx_stats;
1309
1310 spin_lock_bh(&priv->stats_lock);
1311
1312 emac_stats_update(priv);
1313
1314 mac_stats->MulticastFramesXmittedOK = tx_stats->stats.tx_multicast_pkts;
1315 mac_stats->BroadcastFramesXmittedOK = tx_stats->stats.tx_broadcast_pkts;
1316
1317 mac_stats->MulticastFramesReceivedOK =
1318 rx_stats->stats.rx_multicast_pkts;
1319 mac_stats->BroadcastFramesReceivedOK =
1320 rx_stats->stats.rx_broadcast_pkts;
1321
1322 mac_stats->SingleCollisionFrames = tx_stats->stats.tx_singleclsn_pkts;
1323 mac_stats->MultipleCollisionFrames = tx_stats->stats.tx_multiclsn_pkts;
1324 mac_stats->LateCollisions = tx_stats->stats.tx_lateclsn_pkts;
1325 mac_stats->FramesAbortedDueToXSColls =
1326 tx_stats->stats.tx_excessclsn_pkts;
1327
1328 spin_unlock_bh(&priv->stats_lock);
1329 }
1330
emac_get_pause_stats(struct net_device * dev,struct ethtool_pause_stats * pause_stats)1331 static void emac_get_pause_stats(struct net_device *dev,
1332 struct ethtool_pause_stats *pause_stats)
1333 {
1334 struct emac_priv *priv = netdev_priv(dev);
1335 union emac_hw_tx_stats *tx_stats;
1336 union emac_hw_rx_stats *rx_stats;
1337
1338 tx_stats = &priv->tx_stats;
1339 rx_stats = &priv->rx_stats;
1340
1341 spin_lock_bh(&priv->stats_lock);
1342
1343 emac_stats_update(priv);
1344
1345 pause_stats->tx_pause_frames = tx_stats->stats.tx_pause_pkts;
1346 pause_stats->rx_pause_frames = rx_stats->stats.rx_pause_pkts;
1347
1348 spin_unlock_bh(&priv->stats_lock);
1349 }
1350
1351 /* Other statistics that are not derivable from standard statistics */
1352
1353 #define EMAC_ETHTOOL_STAT(type, name) \
1354 { offsetof(type, stats.name) / sizeof(u64), #name }
1355
1356 static const struct emac_ethtool_stats {
1357 size_t offset;
1358 char str[ETH_GSTRING_LEN];
1359 } emac_ethtool_rx_stats[] = {
1360 EMAC_ETHTOOL_STAT(union emac_hw_rx_stats, rx_drp_fifo_full_pkts),
1361 EMAC_ETHTOOL_STAT(union emac_hw_rx_stats, rx_truncate_fifo_full_pkts),
1362 };
1363
emac_get_sset_count(struct net_device * dev,int sset)1364 static int emac_get_sset_count(struct net_device *dev, int sset)
1365 {
1366 switch (sset) {
1367 case ETH_SS_STATS:
1368 return ARRAY_SIZE(emac_ethtool_rx_stats);
1369 default:
1370 return -EOPNOTSUPP;
1371 }
1372 }
1373
emac_get_strings(struct net_device * dev,u32 stringset,u8 * data)1374 static void emac_get_strings(struct net_device *dev, u32 stringset, u8 *data)
1375 {
1376 int i;
1377
1378 switch (stringset) {
1379 case ETH_SS_STATS:
1380 for (i = 0; i < ARRAY_SIZE(emac_ethtool_rx_stats); i++) {
1381 memcpy(data, emac_ethtool_rx_stats[i].str,
1382 ETH_GSTRING_LEN);
1383 data += ETH_GSTRING_LEN;
1384 }
1385 break;
1386 }
1387 }
1388
emac_get_ethtool_stats(struct net_device * dev,struct ethtool_stats * stats,u64 * data)1389 static void emac_get_ethtool_stats(struct net_device *dev,
1390 struct ethtool_stats *stats, u64 *data)
1391 {
1392 struct emac_priv *priv = netdev_priv(dev);
1393 u64 *rx_stats = (u64 *)&priv->rx_stats;
1394 int i;
1395
1396 spin_lock_bh(&priv->stats_lock);
1397
1398 emac_stats_update(priv);
1399
1400 for (i = 0; i < ARRAY_SIZE(emac_ethtool_rx_stats); i++)
1401 data[i] = rx_stats[emac_ethtool_rx_stats[i].offset];
1402
1403 spin_unlock_bh(&priv->stats_lock);
1404 }
1405
emac_ethtool_get_regs_len(struct net_device * dev)1406 static int emac_ethtool_get_regs_len(struct net_device *dev)
1407 {
1408 return (EMAC_DMA_REG_CNT + EMAC_MAC_REG_CNT) * sizeof(u32);
1409 }
1410
emac_ethtool_get_regs(struct net_device * dev,struct ethtool_regs * regs,void * space)1411 static void emac_ethtool_get_regs(struct net_device *dev,
1412 struct ethtool_regs *regs, void *space)
1413 {
1414 struct emac_priv *priv = netdev_priv(dev);
1415 u32 *reg_space = space;
1416 int i;
1417
1418 regs->version = 1;
1419
1420 for (i = 0; i < EMAC_DMA_REG_CNT; i++)
1421 reg_space[i] = emac_rd(priv, DMA_CONFIGURATION + i * 4);
1422
1423 for (i = 0; i < EMAC_MAC_REG_CNT; i++)
1424 reg_space[i + EMAC_DMA_REG_CNT] =
1425 emac_rd(priv, MAC_GLOBAL_CONTROL + i * 4);
1426 }
1427
emac_get_pauseparam(struct net_device * dev,struct ethtool_pauseparam * pause)1428 static void emac_get_pauseparam(struct net_device *dev,
1429 struct ethtool_pauseparam *pause)
1430 {
1431 struct emac_priv *priv = netdev_priv(dev);
1432
1433 pause->autoneg = priv->flow_control_autoneg;
1434 pause->tx_pause = !!(priv->flow_control & FLOW_CTRL_TX);
1435 pause->rx_pause = !!(priv->flow_control & FLOW_CTRL_RX);
1436 }
1437
emac_set_pauseparam(struct net_device * dev,struct ethtool_pauseparam * pause)1438 static int emac_set_pauseparam(struct net_device *dev,
1439 struct ethtool_pauseparam *pause)
1440 {
1441 struct emac_priv *priv = netdev_priv(dev);
1442 u8 fc = 0;
1443
1444 if (!netif_running(dev))
1445 return -ENETDOWN;
1446
1447 priv->flow_control_autoneg = pause->autoneg;
1448
1449 if (pause->autoneg) {
1450 emac_set_fc_autoneg(priv);
1451 } else {
1452 if (pause->tx_pause)
1453 fc |= FLOW_CTRL_TX;
1454
1455 if (pause->rx_pause)
1456 fc |= FLOW_CTRL_RX;
1457
1458 emac_set_fc(priv, fc);
1459 }
1460
1461 return 0;
1462 }
1463
emac_get_drvinfo(struct net_device * dev,struct ethtool_drvinfo * info)1464 static void emac_get_drvinfo(struct net_device *dev,
1465 struct ethtool_drvinfo *info)
1466 {
1467 strscpy(info->driver, DRIVER_NAME, sizeof(info->driver));
1468 info->n_stats = ARRAY_SIZE(emac_ethtool_rx_stats);
1469 }
1470
emac_tx_timeout_task(struct work_struct * work)1471 static void emac_tx_timeout_task(struct work_struct *work)
1472 {
1473 struct net_device *ndev;
1474 struct emac_priv *priv;
1475
1476 priv = container_of(work, struct emac_priv, tx_timeout_task);
1477 ndev = priv->ndev;
1478
1479 rtnl_lock();
1480
1481 /* No need to reset if already down */
1482 if (!netif_running(ndev)) {
1483 rtnl_unlock();
1484 return;
1485 }
1486
1487 netdev_err(ndev, "MAC reset due to TX timeout\n");
1488
1489 netif_trans_update(ndev); /* prevent tx timeout */
1490 dev_close(ndev);
1491 dev_open(ndev, NULL);
1492
1493 rtnl_unlock();
1494 }
1495
emac_sw_init(struct emac_priv * priv)1496 static void emac_sw_init(struct emac_priv *priv)
1497 {
1498 priv->dma_buf_sz = EMAC_DEFAULT_BUFSIZE;
1499
1500 priv->tx_ring.total_cnt = DEFAULT_TX_RING_NUM;
1501 priv->rx_ring.total_cnt = DEFAULT_RX_RING_NUM;
1502
1503 spin_lock_init(&priv->stats_lock);
1504
1505 INIT_WORK(&priv->tx_timeout_task, emac_tx_timeout_task);
1506
1507 priv->tx_coal_frames = EMAC_TX_FRAMES;
1508 priv->tx_coal_timeout = EMAC_TX_COAL_TIMEOUT;
1509
1510 timer_setup(&priv->txtimer, emac_tx_coal_timer, 0);
1511 timer_setup(&priv->stats_timer, emac_stats_timer, 0);
1512 }
1513
emac_interrupt_handler(int irq,void * dev_id)1514 static irqreturn_t emac_interrupt_handler(int irq, void *dev_id)
1515 {
1516 struct net_device *ndev = (struct net_device *)dev_id;
1517 struct emac_priv *priv = netdev_priv(ndev);
1518 bool should_schedule = false;
1519 u32 clr = 0;
1520 u32 status;
1521
1522 status = emac_rd(priv, DMA_STATUS_IRQ);
1523
1524 if (status & MREGBIT_TRANSMIT_TRANSFER_DONE_IRQ) {
1525 clr |= MREGBIT_TRANSMIT_TRANSFER_DONE_IRQ;
1526 should_schedule = true;
1527 }
1528
1529 if (status & MREGBIT_TRANSMIT_DES_UNAVAILABLE_IRQ)
1530 clr |= MREGBIT_TRANSMIT_DES_UNAVAILABLE_IRQ;
1531
1532 if (status & MREGBIT_TRANSMIT_DMA_STOPPED_IRQ)
1533 clr |= MREGBIT_TRANSMIT_DMA_STOPPED_IRQ;
1534
1535 if (status & MREGBIT_RECEIVE_TRANSFER_DONE_IRQ) {
1536 clr |= MREGBIT_RECEIVE_TRANSFER_DONE_IRQ;
1537 should_schedule = true;
1538 }
1539
1540 if (status & MREGBIT_RECEIVE_DES_UNAVAILABLE_IRQ)
1541 clr |= MREGBIT_RECEIVE_DES_UNAVAILABLE_IRQ;
1542
1543 if (status & MREGBIT_RECEIVE_DMA_STOPPED_IRQ)
1544 clr |= MREGBIT_RECEIVE_DMA_STOPPED_IRQ;
1545
1546 if (status & MREGBIT_RECEIVE_MISSED_FRAME_IRQ)
1547 clr |= MREGBIT_RECEIVE_MISSED_FRAME_IRQ;
1548
1549 if (should_schedule) {
1550 if (napi_schedule_prep(&priv->napi)) {
1551 emac_disable_interrupt(priv);
1552 __napi_schedule_irqoff(&priv->napi);
1553 }
1554 }
1555
1556 emac_wr(priv, DMA_STATUS_IRQ, clr);
1557
1558 return IRQ_HANDLED;
1559 }
1560
emac_configure_tx(struct emac_priv * priv)1561 static void emac_configure_tx(struct emac_priv *priv)
1562 {
1563 u32 val;
1564
1565 /* Set base address */
1566 val = (u32)priv->tx_ring.desc_dma_addr;
1567 emac_wr(priv, DMA_TRANSMIT_BASE_ADDRESS, val);
1568
1569 /* Set TX inter-frame gap value, enable transmit */
1570 val = emac_rd(priv, MAC_TRANSMIT_CONTROL);
1571 val &= ~MREGBIT_IFG_LEN;
1572 val |= MREGBIT_TRANSMIT_ENABLE;
1573 val |= MREGBIT_TRANSMIT_AUTO_RETRY;
1574 emac_wr(priv, MAC_TRANSMIT_CONTROL, val);
1575
1576 emac_wr(priv, DMA_TRANSMIT_AUTO_POLL_COUNTER, 0x0);
1577
1578 /* Start TX DMA */
1579 val = emac_rd(priv, DMA_CONTROL);
1580 val |= MREGBIT_START_STOP_TRANSMIT_DMA;
1581 emac_wr(priv, DMA_CONTROL, val);
1582 }
1583
emac_configure_rx(struct emac_priv * priv)1584 static void emac_configure_rx(struct emac_priv *priv)
1585 {
1586 u32 val;
1587
1588 /* Set base address */
1589 val = (u32)priv->rx_ring.desc_dma_addr;
1590 emac_wr(priv, DMA_RECEIVE_BASE_ADDRESS, val);
1591
1592 /* Enable receive */
1593 val = emac_rd(priv, MAC_RECEIVE_CONTROL);
1594 val |= MREGBIT_RECEIVE_ENABLE;
1595 val |= MREGBIT_STORE_FORWARD;
1596 emac_wr(priv, MAC_RECEIVE_CONTROL, val);
1597
1598 /* Start RX DMA */
1599 val = emac_rd(priv, DMA_CONTROL);
1600 val |= MREGBIT_START_STOP_RECEIVE_DMA;
1601 emac_wr(priv, DMA_CONTROL, val);
1602 }
1603
emac_adjust_link(struct net_device * dev)1604 static void emac_adjust_link(struct net_device *dev)
1605 {
1606 struct emac_priv *priv = netdev_priv(dev);
1607 struct phy_device *phydev = dev->phydev;
1608 u32 ctrl;
1609
1610 if (phydev->link) {
1611 ctrl = emac_rd(priv, MAC_GLOBAL_CONTROL);
1612
1613 /* Update duplex and speed from PHY */
1614
1615 FIELD_MODIFY(MREGBIT_FULL_DUPLEX_MODE, &ctrl,
1616 phydev->duplex == DUPLEX_FULL);
1617
1618 ctrl &= ~MREGBIT_SPEED;
1619
1620 switch (phydev->speed) {
1621 case SPEED_1000:
1622 ctrl |= MREGBIT_SPEED_1000M;
1623 break;
1624 case SPEED_100:
1625 ctrl |= MREGBIT_SPEED_100M;
1626 break;
1627 case SPEED_10:
1628 ctrl |= MREGBIT_SPEED_10M;
1629 break;
1630 default:
1631 netdev_err(dev, "Unknown speed: %d\n", phydev->speed);
1632 phydev->speed = SPEED_UNKNOWN;
1633 break;
1634 }
1635
1636 emac_wr(priv, MAC_GLOBAL_CONTROL, ctrl);
1637
1638 emac_set_fc_autoneg(priv);
1639 }
1640
1641 phy_print_status(phydev);
1642 }
1643
emac_update_delay_line(struct emac_priv * priv)1644 static void emac_update_delay_line(struct emac_priv *priv)
1645 {
1646 u32 mask = 0, val = 0;
1647
1648 mask |= EMAC_RX_DLINE_EN;
1649 mask |= EMAC_RX_DLINE_STEP_MASK | EMAC_RX_DLINE_CODE_MASK;
1650 mask |= EMAC_TX_DLINE_EN;
1651 mask |= EMAC_TX_DLINE_STEP_MASK | EMAC_TX_DLINE_CODE_MASK;
1652
1653 if (phy_interface_mode_is_rgmii(priv->phy_interface)) {
1654 val |= EMAC_RX_DLINE_EN;
1655 val |= FIELD_PREP(EMAC_RX_DLINE_STEP_MASK,
1656 EMAC_DLINE_STEP_15P6);
1657 val |= FIELD_PREP(EMAC_RX_DLINE_CODE_MASK, priv->rx_delay);
1658
1659 val |= EMAC_TX_DLINE_EN;
1660 val |= FIELD_PREP(EMAC_TX_DLINE_STEP_MASK,
1661 EMAC_DLINE_STEP_15P6);
1662 val |= FIELD_PREP(EMAC_TX_DLINE_CODE_MASK, priv->tx_delay);
1663 }
1664
1665 regmap_update_bits(priv->regmap_apmu,
1666 priv->regmap_apmu_offset + APMU_EMAC_DLINE_REG,
1667 mask, val);
1668 }
1669
emac_phy_connect(struct net_device * ndev)1670 static int emac_phy_connect(struct net_device *ndev)
1671 {
1672 struct emac_priv *priv = netdev_priv(ndev);
1673 struct device *dev = &priv->pdev->dev;
1674 struct phy_device *phydev;
1675 struct device_node *np;
1676 int ret;
1677
1678 ret = of_get_phy_mode(dev->of_node, &priv->phy_interface);
1679 if (ret) {
1680 netdev_err(ndev, "No phy-mode found");
1681 return ret;
1682 }
1683
1684 switch (priv->phy_interface) {
1685 case PHY_INTERFACE_MODE_RMII:
1686 case PHY_INTERFACE_MODE_RGMII:
1687 case PHY_INTERFACE_MODE_RGMII_ID:
1688 case PHY_INTERFACE_MODE_RGMII_RXID:
1689 case PHY_INTERFACE_MODE_RGMII_TXID:
1690 break;
1691 default:
1692 netdev_err(ndev, "Unsupported PHY interface %s",
1693 phy_modes(priv->phy_interface));
1694 return -EINVAL;
1695 }
1696
1697 np = of_parse_phandle(dev->of_node, "phy-handle", 0);
1698 if (!np && of_phy_is_fixed_link(dev->of_node))
1699 np = of_node_get(dev->of_node);
1700
1701 if (!np) {
1702 netdev_err(ndev, "No PHY specified");
1703 return -ENODEV;
1704 }
1705
1706 ret = emac_phy_interface_config(priv);
1707 if (ret)
1708 goto err_node_put;
1709
1710 phydev = of_phy_connect(ndev, np, &emac_adjust_link, 0,
1711 priv->phy_interface);
1712 if (!phydev) {
1713 netdev_err(ndev, "Could not attach to PHY\n");
1714 ret = -ENODEV;
1715 goto err_node_put;
1716 }
1717
1718 phy_support_asym_pause(phydev);
1719
1720 phydev->mac_managed_pm = true;
1721
1722 emac_update_delay_line(priv);
1723
1724 err_node_put:
1725 of_node_put(np);
1726 return ret;
1727 }
1728
emac_up(struct emac_priv * priv)1729 static int emac_up(struct emac_priv *priv)
1730 {
1731 struct platform_device *pdev = priv->pdev;
1732 struct net_device *ndev = priv->ndev;
1733 int ret;
1734
1735 pm_runtime_get_sync(&pdev->dev);
1736
1737 ret = emac_phy_connect(ndev);
1738 if (ret) {
1739 dev_err(&pdev->dev, "emac_phy_connect failed\n");
1740 goto err_pm_put;
1741 }
1742
1743 emac_init_hw(priv);
1744
1745 emac_set_mac_addr(priv, ndev->dev_addr);
1746 emac_configure_tx(priv);
1747 emac_configure_rx(priv);
1748
1749 emac_alloc_rx_desc_buffers(priv);
1750
1751 phy_start(ndev->phydev);
1752
1753 ret = request_irq(priv->irq, emac_interrupt_handler, IRQF_SHARED,
1754 ndev->name, ndev);
1755 if (ret) {
1756 dev_err(&pdev->dev, "request_irq failed\n");
1757 goto err_reset_disconnect_phy;
1758 }
1759
1760 /* Don't enable MAC interrupts */
1761 emac_wr(priv, MAC_INTERRUPT_ENABLE, 0x0);
1762
1763 /* Enable DMA interrupts */
1764 emac_wr(priv, DMA_INTERRUPT_ENABLE,
1765 MREGBIT_TRANSMIT_TRANSFER_DONE_INTR_ENABLE |
1766 MREGBIT_TRANSMIT_DMA_STOPPED_INTR_ENABLE |
1767 MREGBIT_RECEIVE_TRANSFER_DONE_INTR_ENABLE |
1768 MREGBIT_RECEIVE_DMA_STOPPED_INTR_ENABLE |
1769 MREGBIT_RECEIVE_MISSED_FRAME_INTR_ENABLE);
1770
1771 napi_enable(&priv->napi);
1772
1773 netif_start_queue(ndev);
1774
1775 mod_timer(&priv->stats_timer, jiffies);
1776
1777 return 0;
1778
1779 err_reset_disconnect_phy:
1780 emac_reset_hw(priv);
1781 phy_disconnect(ndev->phydev);
1782
1783 err_pm_put:
1784 pm_runtime_put_sync(&pdev->dev);
1785 return ret;
1786 }
1787
emac_down(struct emac_priv * priv)1788 static int emac_down(struct emac_priv *priv)
1789 {
1790 struct platform_device *pdev = priv->pdev;
1791 struct net_device *ndev = priv->ndev;
1792
1793 netif_stop_queue(ndev);
1794
1795 phy_disconnect(ndev->phydev);
1796
1797 emac_wr(priv, MAC_INTERRUPT_ENABLE, 0x0);
1798 emac_wr(priv, DMA_INTERRUPT_ENABLE, 0x0);
1799
1800 free_irq(priv->irq, ndev);
1801
1802 napi_disable(&priv->napi);
1803
1804 timer_delete_sync(&priv->txtimer);
1805 cancel_work_sync(&priv->tx_timeout_task);
1806
1807 timer_delete_sync(&priv->stats_timer);
1808
1809 emac_reset_hw(priv);
1810
1811 /* Update and save current stats, see emac_stats_update() for usage */
1812
1813 spin_lock_bh(&priv->stats_lock);
1814
1815 emac_stats_update(priv);
1816
1817 priv->tx_stats_off = priv->tx_stats;
1818 priv->rx_stats_off = priv->rx_stats;
1819
1820 spin_unlock_bh(&priv->stats_lock);
1821
1822 pm_runtime_put_sync(&pdev->dev);
1823 return 0;
1824 }
1825
1826 /* Called when net interface is brought up. */
emac_open(struct net_device * ndev)1827 static int emac_open(struct net_device *ndev)
1828 {
1829 struct emac_priv *priv = netdev_priv(ndev);
1830 struct device *dev = &priv->pdev->dev;
1831 int ret;
1832
1833 ret = emac_alloc_tx_resources(priv);
1834 if (ret) {
1835 dev_err(dev, "Cannot allocate TX resources\n");
1836 return ret;
1837 }
1838
1839 ret = emac_alloc_rx_resources(priv);
1840 if (ret) {
1841 dev_err(dev, "Cannot allocate RX resources\n");
1842 goto err_free_tx;
1843 }
1844
1845 ret = emac_up(priv);
1846 if (ret) {
1847 dev_err(dev, "Error when bringing interface up\n");
1848 goto err_free_rx;
1849 }
1850 return 0;
1851
1852 err_free_rx:
1853 emac_free_rx_resources(priv);
1854 err_free_tx:
1855 emac_free_tx_resources(priv);
1856
1857 return ret;
1858 }
1859
1860 /* Called when interface is brought down. */
emac_stop(struct net_device * ndev)1861 static int emac_stop(struct net_device *ndev)
1862 {
1863 struct emac_priv *priv = netdev_priv(ndev);
1864
1865 emac_down(priv);
1866 emac_free_tx_resources(priv);
1867 emac_free_rx_resources(priv);
1868
1869 return 0;
1870 }
1871
1872 static const struct ethtool_ops emac_ethtool_ops = {
1873 .get_link_ksettings = phy_ethtool_get_link_ksettings,
1874 .set_link_ksettings = phy_ethtool_set_link_ksettings,
1875 .nway_reset = phy_ethtool_nway_reset,
1876 .get_drvinfo = emac_get_drvinfo,
1877 .get_link = ethtool_op_get_link,
1878
1879 .get_regs = emac_ethtool_get_regs,
1880 .get_regs_len = emac_ethtool_get_regs_len,
1881
1882 .get_rmon_stats = emac_get_rmon_stats,
1883 .get_pause_stats = emac_get_pause_stats,
1884 .get_eth_mac_stats = emac_get_eth_mac_stats,
1885
1886 .get_sset_count = emac_get_sset_count,
1887 .get_strings = emac_get_strings,
1888 .get_ethtool_stats = emac_get_ethtool_stats,
1889
1890 .get_pauseparam = emac_get_pauseparam,
1891 .set_pauseparam = emac_set_pauseparam,
1892 };
1893
1894 static const struct net_device_ops emac_netdev_ops = {
1895 .ndo_open = emac_open,
1896 .ndo_stop = emac_stop,
1897 .ndo_start_xmit = emac_start_xmit,
1898 .ndo_validate_addr = eth_validate_addr,
1899 .ndo_set_mac_address = emac_set_mac_address,
1900 .ndo_eth_ioctl = phy_do_ioctl_running,
1901 .ndo_change_mtu = emac_change_mtu,
1902 .ndo_tx_timeout = emac_tx_timeout,
1903 .ndo_set_rx_mode = emac_set_rx_mode,
1904 .ndo_get_stats64 = emac_get_stats64,
1905 };
1906
1907 /* Currently we always use 15.6 ps/step for the delay line */
1908
delay_ps_to_unit(u32 ps)1909 static u32 delay_ps_to_unit(u32 ps)
1910 {
1911 return DIV_ROUND_CLOSEST(ps * 10, 156);
1912 }
1913
delay_unit_to_ps(u32 unit)1914 static u32 delay_unit_to_ps(u32 unit)
1915 {
1916 return DIV_ROUND_CLOSEST(unit * 156, 10);
1917 }
1918
1919 #define EMAC_MAX_DELAY_UNIT FIELD_MAX(EMAC_TX_DLINE_CODE_MASK)
1920
1921 /* Minus one just to be safe from rounding errors */
1922 #define EMAC_MAX_DELAY_PS (delay_unit_to_ps(EMAC_MAX_DELAY_UNIT - 1))
1923
emac_config_dt(struct platform_device * pdev,struct emac_priv * priv)1924 static int emac_config_dt(struct platform_device *pdev, struct emac_priv *priv)
1925 {
1926 struct device_node *np = pdev->dev.of_node;
1927 struct device *dev = &pdev->dev;
1928 u8 mac_addr[ETH_ALEN] = { 0 };
1929 int ret;
1930
1931 priv->iobase = devm_platform_ioremap_resource(pdev, 0);
1932 if (IS_ERR(priv->iobase))
1933 return dev_err_probe(dev, PTR_ERR(priv->iobase),
1934 "ioremap failed\n");
1935
1936 priv->regmap_apmu =
1937 syscon_regmap_lookup_by_phandle_args(np, "spacemit,apmu", 1,
1938 &priv->regmap_apmu_offset);
1939
1940 if (IS_ERR(priv->regmap_apmu))
1941 return dev_err_probe(dev, PTR_ERR(priv->regmap_apmu),
1942 "failed to get syscon\n");
1943
1944 priv->irq = platform_get_irq(pdev, 0);
1945 if (priv->irq < 0)
1946 return priv->irq;
1947
1948 ret = of_get_mac_address(np, mac_addr);
1949 if (ret) {
1950 if (ret == -EPROBE_DEFER)
1951 return dev_err_probe(dev, ret,
1952 "Can't get MAC address\n");
1953
1954 dev_info(&pdev->dev, "Using random MAC address\n");
1955 eth_hw_addr_random(priv->ndev);
1956 } else {
1957 eth_hw_addr_set(priv->ndev, mac_addr);
1958 }
1959
1960 priv->tx_delay = 0;
1961 priv->rx_delay = 0;
1962
1963 of_property_read_u32(np, "tx-internal-delay-ps", &priv->tx_delay);
1964 of_property_read_u32(np, "rx-internal-delay-ps", &priv->rx_delay);
1965
1966 if (priv->tx_delay > EMAC_MAX_DELAY_PS) {
1967 dev_err(&pdev->dev,
1968 "tx-internal-delay-ps too large: max %d, got %d",
1969 EMAC_MAX_DELAY_PS, priv->tx_delay);
1970 return -EINVAL;
1971 }
1972
1973 if (priv->rx_delay > EMAC_MAX_DELAY_PS) {
1974 dev_err(&pdev->dev,
1975 "rx-internal-delay-ps too large: max %d, got %d",
1976 EMAC_MAX_DELAY_PS, priv->rx_delay);
1977 return -EINVAL;
1978 }
1979
1980 priv->tx_delay = delay_ps_to_unit(priv->tx_delay);
1981 priv->rx_delay = delay_ps_to_unit(priv->rx_delay);
1982
1983 return 0;
1984 }
1985
emac_phy_deregister_fixed_link(void * data)1986 static void emac_phy_deregister_fixed_link(void *data)
1987 {
1988 struct device_node *of_node = data;
1989
1990 of_phy_deregister_fixed_link(of_node);
1991 }
1992
emac_probe(struct platform_device * pdev)1993 static int emac_probe(struct platform_device *pdev)
1994 {
1995 struct device *dev = &pdev->dev;
1996 struct reset_control *reset;
1997 struct net_device *ndev;
1998 struct emac_priv *priv;
1999 int ret;
2000
2001 ndev = devm_alloc_etherdev(dev, sizeof(struct emac_priv));
2002 if (!ndev)
2003 return -ENOMEM;
2004
2005 ndev->hw_features = NETIF_F_SG;
2006 ndev->features |= ndev->hw_features;
2007
2008 ndev->max_mtu = EMAC_RX_BUF_4K - (ETH_HLEN + ETH_FCS_LEN);
2009 ndev->pcpu_stat_type = NETDEV_PCPU_STAT_DSTATS;
2010
2011 priv = netdev_priv(ndev);
2012 priv->ndev = ndev;
2013 priv->pdev = pdev;
2014 platform_set_drvdata(pdev, priv);
2015
2016 ret = emac_config_dt(pdev, priv);
2017 if (ret < 0)
2018 return dev_err_probe(dev, ret, "Configuration failed\n");
2019
2020 ndev->watchdog_timeo = 5 * HZ;
2021 ndev->base_addr = (unsigned long)priv->iobase;
2022 ndev->irq = priv->irq;
2023
2024 ndev->ethtool_ops = &emac_ethtool_ops;
2025 ndev->netdev_ops = &emac_netdev_ops;
2026
2027 devm_pm_runtime_enable(&pdev->dev);
2028
2029 priv->bus_clk = devm_clk_get_enabled(&pdev->dev, NULL);
2030 if (IS_ERR(priv->bus_clk))
2031 return dev_err_probe(dev, PTR_ERR(priv->bus_clk),
2032 "Failed to get clock\n");
2033
2034 reset = devm_reset_control_get_optional_exclusive_deasserted(&pdev->dev,
2035 NULL);
2036 if (IS_ERR(reset))
2037 return dev_err_probe(dev, PTR_ERR(reset),
2038 "Failed to get reset\n");
2039
2040 if (of_phy_is_fixed_link(dev->of_node)) {
2041 ret = of_phy_register_fixed_link(dev->of_node);
2042 if (ret)
2043 return dev_err_probe(dev, ret,
2044 "Failed to register fixed-link\n");
2045
2046 ret = devm_add_action_or_reset(dev,
2047 emac_phy_deregister_fixed_link,
2048 dev->of_node);
2049
2050 if (ret) {
2051 dev_err(dev, "devm_add_action_or_reset failed\n");
2052 return ret;
2053 }
2054 }
2055
2056 emac_sw_init(priv);
2057
2058 ret = emac_mdio_init(priv);
2059 if (ret)
2060 goto err_timer_delete;
2061
2062 SET_NETDEV_DEV(ndev, &pdev->dev);
2063
2064 ret = devm_register_netdev(dev, ndev);
2065 if (ret) {
2066 dev_err(dev, "devm_register_netdev failed\n");
2067 goto err_timer_delete;
2068 }
2069
2070 netif_napi_add(ndev, &priv->napi, emac_rx_poll);
2071 netif_carrier_off(ndev);
2072
2073 return 0;
2074
2075 err_timer_delete:
2076 timer_delete_sync(&priv->txtimer);
2077 timer_delete_sync(&priv->stats_timer);
2078
2079 return ret;
2080 }
2081
emac_remove(struct platform_device * pdev)2082 static void emac_remove(struct platform_device *pdev)
2083 {
2084 struct emac_priv *priv = platform_get_drvdata(pdev);
2085
2086 timer_shutdown_sync(&priv->txtimer);
2087 cancel_work_sync(&priv->tx_timeout_task);
2088
2089 timer_shutdown_sync(&priv->stats_timer);
2090
2091 emac_reset_hw(priv);
2092 }
2093
emac_resume(struct device * dev)2094 static int emac_resume(struct device *dev)
2095 {
2096 struct emac_priv *priv = dev_get_drvdata(dev);
2097 struct net_device *ndev = priv->ndev;
2098 int ret;
2099
2100 ret = clk_prepare_enable(priv->bus_clk);
2101 if (ret < 0) {
2102 dev_err(dev, "Failed to enable bus clock: %d\n", ret);
2103 return ret;
2104 }
2105
2106 if (!netif_running(ndev))
2107 return 0;
2108
2109 ret = emac_open(ndev);
2110 if (ret) {
2111 clk_disable_unprepare(priv->bus_clk);
2112 return ret;
2113 }
2114
2115 netif_device_attach(ndev);
2116
2117 mod_timer(&priv->stats_timer, jiffies);
2118
2119 return 0;
2120 }
2121
emac_suspend(struct device * dev)2122 static int emac_suspend(struct device *dev)
2123 {
2124 struct emac_priv *priv = dev_get_drvdata(dev);
2125 struct net_device *ndev = priv->ndev;
2126
2127 if (!ndev || !netif_running(ndev)) {
2128 clk_disable_unprepare(priv->bus_clk);
2129 return 0;
2130 }
2131
2132 emac_stop(ndev);
2133
2134 clk_disable_unprepare(priv->bus_clk);
2135 netif_device_detach(ndev);
2136 return 0;
2137 }
2138
2139 static const struct dev_pm_ops emac_pm_ops = {
2140 SYSTEM_SLEEP_PM_OPS(emac_suspend, emac_resume)
2141 };
2142
2143 static const struct of_device_id emac_of_match[] = {
2144 { .compatible = "spacemit,k1-emac" },
2145 { /* sentinel */ },
2146 };
2147 MODULE_DEVICE_TABLE(of, emac_of_match);
2148
2149 static struct platform_driver emac_driver = {
2150 .probe = emac_probe,
2151 .remove = emac_remove,
2152 .driver = {
2153 .name = DRIVER_NAME,
2154 .of_match_table = of_match_ptr(emac_of_match),
2155 .pm = &emac_pm_ops,
2156 },
2157 };
2158 module_platform_driver(emac_driver);
2159
2160 MODULE_DESCRIPTION("SpacemiT K1 Ethernet driver");
2161 MODULE_AUTHOR("Vivian Wang <wangruikang@iscas.ac.cn>");
2162 MODULE_LICENSE("GPL");
2163