1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2 /*
3 * rtase is the Linux device driver released for Realtek Automotive Switch
4 * controllers with PCI-Express interface.
5 *
6 * Copyright(c) 2024 Realtek Semiconductor Corp.
7 *
8 * Below is a simplified block diagram of the chip and its relevant interfaces.
9 *
10 * *************************
11 * * *
12 * * CPU network device *
13 * * *
14 * * +-------------+ *
15 * * | PCIE Host | *
16 * ***********++************
17 * ||
18 * PCIE
19 * ||
20 * ********************++**********************
21 * * | PCIE Endpoint | *
22 * * +---------------+ *
23 * * | GMAC | *
24 * * +--++--+ Realtek *
25 * * || RTL90xx Series *
26 * * || *
27 * * +-------------++----------------+ *
28 * * | | MAC | | *
29 * * | +-----+ | *
30 * * | | *
31 * * | Ethernet Switch Core | *
32 * * | | *
33 * * | +-----+ +-----+ | *
34 * * | | MAC |...........| MAC | | *
35 * * +---+-----+-----------+-----+---+ *
36 * * | PHY |...........| PHY | *
37 * * +--++-+ +--++-+ *
38 * *************||****************||***********
39 *
40 * The block of the Realtek RTL90xx series is our entire chip architecture,
41 * the GMAC is connected to the switch core, and there is no PHY in between.
42 * In addition, this driver is mainly used to control GMAC, but does not
43 * control the switch core, so it is not the same as DSA. Linux only plays
44 * the role of a normal leaf node in this model.
45 */
46
47 #include <linux/crc32.h>
48 #include <linux/dma-mapping.h>
49 #include <linux/etherdevice.h>
50 #include <linux/if_vlan.h>
51 #include <linux/in.h>
52 #include <linux/init.h>
53 #include <linux/interrupt.h>
54 #include <linux/io.h>
55 #include <linux/iopoll.h>
56 #include <linux/ip.h>
57 #include <linux/ipv6.h>
58 #include <linux/mdio.h>
59 #include <linux/module.h>
60 #include <linux/netdevice.h>
61 #include <linux/pci.h>
62 #include <linux/pm_runtime.h>
63 #include <linux/prefetch.h>
64 #include <linux/rtnetlink.h>
65 #include <linux/tcp.h>
66 #include <asm/irq.h>
67 #include <net/ip6_checksum.h>
68 #include <net/netdev_queues.h>
69 #include <net/page_pool/helpers.h>
70 #include <net/pkt_cls.h>
71
72 #include "rtase.h"
73
74 #define RTK_OPTS1_DEBUG_VALUE 0x0BADBEEF
75 #define RTK_MAGIC_NUMBER 0x0BADBADBADBADBAD
76
77 static const struct pci_device_id rtase_pci_tbl[] = {
78 {PCI_VDEVICE(REALTEK, 0x906A)},
79 {}
80 };
81
82 MODULE_DEVICE_TABLE(pci, rtase_pci_tbl);
83
84 MODULE_AUTHOR("Realtek ARD Software Team");
85 MODULE_DESCRIPTION("Network Driver for the PCIe interface of Realtek Automotive Ethernet Switch");
86 MODULE_LICENSE("Dual BSD/GPL");
87
88 struct rtase_counters {
89 __le64 tx_packets;
90 __le64 rx_packets;
91 __le64 tx_errors;
92 __le32 rx_errors;
93 __le16 rx_missed;
94 __le16 align_errors;
95 __le32 tx_one_collision;
96 __le32 tx_multi_collision;
97 __le64 rx_unicast;
98 __le64 rx_broadcast;
99 __le32 rx_multicast;
100 __le16 tx_aborted;
101 __le16 tx_underrun;
102 } __packed;
103
rtase_w8(const struct rtase_private * tp,u16 reg,u8 val8)104 static void rtase_w8(const struct rtase_private *tp, u16 reg, u8 val8)
105 {
106 writeb(val8, tp->mmio_addr + reg);
107 }
108
rtase_w16(const struct rtase_private * tp,u16 reg,u16 val16)109 static void rtase_w16(const struct rtase_private *tp, u16 reg, u16 val16)
110 {
111 writew(val16, tp->mmio_addr + reg);
112 }
113
rtase_w32(const struct rtase_private * tp,u16 reg,u32 val32)114 static void rtase_w32(const struct rtase_private *tp, u16 reg, u32 val32)
115 {
116 writel(val32, tp->mmio_addr + reg);
117 }
118
rtase_r8(const struct rtase_private * tp,u16 reg)119 static u8 rtase_r8(const struct rtase_private *tp, u16 reg)
120 {
121 return readb(tp->mmio_addr + reg);
122 }
123
rtase_r16(const struct rtase_private * tp,u16 reg)124 static u16 rtase_r16(const struct rtase_private *tp, u16 reg)
125 {
126 return readw(tp->mmio_addr + reg);
127 }
128
rtase_r32(const struct rtase_private * tp,u16 reg)129 static u32 rtase_r32(const struct rtase_private *tp, u16 reg)
130 {
131 return readl(tp->mmio_addr + reg);
132 }
133
rtase_free_desc(struct rtase_private * tp)134 static void rtase_free_desc(struct rtase_private *tp)
135 {
136 struct pci_dev *pdev = tp->pdev;
137 u32 i;
138
139 for (i = 0; i < tp->func_tx_queue_num; i++) {
140 if (!tp->tx_ring[i].desc)
141 continue;
142
143 dma_free_coherent(&pdev->dev, RTASE_TX_RING_DESC_SIZE,
144 tp->tx_ring[i].desc,
145 tp->tx_ring[i].phy_addr);
146 tp->tx_ring[i].desc = NULL;
147 }
148
149 for (i = 0; i < tp->func_rx_queue_num; i++) {
150 if (!tp->rx_ring[i].desc)
151 continue;
152
153 dma_free_coherent(&pdev->dev, RTASE_RX_RING_DESC_SIZE,
154 tp->rx_ring[i].desc,
155 tp->rx_ring[i].phy_addr);
156 tp->rx_ring[i].desc = NULL;
157 }
158 }
159
rtase_alloc_desc(struct rtase_private * tp)160 static int rtase_alloc_desc(struct rtase_private *tp)
161 {
162 struct pci_dev *pdev = tp->pdev;
163 u32 i;
164
165 /* rx and tx descriptors needs 256 bytes alignment.
166 * dma_alloc_coherent provides more.
167 */
168 for (i = 0; i < tp->func_tx_queue_num; i++) {
169 tp->tx_ring[i].desc =
170 dma_alloc_coherent(&pdev->dev,
171 RTASE_TX_RING_DESC_SIZE,
172 &tp->tx_ring[i].phy_addr,
173 GFP_KERNEL);
174 if (!tp->tx_ring[i].desc)
175 goto err_out;
176 }
177
178 for (i = 0; i < tp->func_rx_queue_num; i++) {
179 tp->rx_ring[i].desc =
180 dma_alloc_coherent(&pdev->dev,
181 RTASE_RX_RING_DESC_SIZE,
182 &tp->rx_ring[i].phy_addr,
183 GFP_KERNEL);
184 if (!tp->rx_ring[i].desc)
185 goto err_out;
186 }
187
188 return 0;
189
190 err_out:
191 rtase_free_desc(tp);
192 return -ENOMEM;
193 }
194
rtase_unmap_tx_skb(struct pci_dev * pdev,u32 len,struct rtase_tx_desc * desc)195 static void rtase_unmap_tx_skb(struct pci_dev *pdev, u32 len,
196 struct rtase_tx_desc *desc)
197 {
198 dma_unmap_single(&pdev->dev, le64_to_cpu(desc->addr), len,
199 DMA_TO_DEVICE);
200 desc->opts1 = cpu_to_le32(RTK_OPTS1_DEBUG_VALUE);
201 desc->opts2 = 0x00;
202 desc->addr = cpu_to_le64(RTK_MAGIC_NUMBER);
203 }
204
rtase_tx_clear_range(struct rtase_ring * ring,u32 start,u32 n)205 static void rtase_tx_clear_range(struct rtase_ring *ring, u32 start, u32 n)
206 {
207 struct rtase_tx_desc *desc_base = ring->desc;
208 struct rtase_private *tp = ring->ivec->tp;
209 u32 i;
210
211 for (i = 0; i < n; i++) {
212 u32 entry = (start + i) % RTASE_NUM_DESC;
213 struct rtase_tx_desc *desc = desc_base + entry;
214 u32 len = ring->mis.len[entry];
215 struct sk_buff *skb;
216
217 if (len == 0)
218 continue;
219
220 rtase_unmap_tx_skb(tp->pdev, len, desc);
221 ring->mis.len[entry] = 0;
222 skb = ring->skbuff[entry];
223 if (!skb)
224 continue;
225
226 tp->stats.tx_dropped++;
227 dev_kfree_skb_any(skb);
228 ring->skbuff[entry] = NULL;
229 }
230 }
231
rtase_tx_clear(struct rtase_private * tp)232 static void rtase_tx_clear(struct rtase_private *tp)
233 {
234 struct rtase_ring *ring;
235 u16 i;
236
237 for (i = 0; i < tp->func_tx_queue_num; i++) {
238 ring = &tp->tx_ring[i];
239 rtase_tx_clear_range(ring, ring->dirty_idx, RTASE_NUM_DESC);
240 ring->cur_idx = 0;
241 ring->dirty_idx = 0;
242 }
243 }
244
rtase_mark_to_asic(union rtase_rx_desc * desc,u32 rx_buf_sz)245 static void rtase_mark_to_asic(union rtase_rx_desc *desc, u32 rx_buf_sz)
246 {
247 u32 eor = le32_to_cpu(desc->desc_cmd.opts1) & RTASE_RING_END;
248
249 desc->desc_status.opts2 = 0;
250 /* force memory writes to complete before releasing descriptor */
251 dma_wmb();
252 WRITE_ONCE(desc->desc_cmd.opts1,
253 cpu_to_le32(RTASE_DESC_OWN | eor | rx_buf_sz));
254 }
255
rtase_tx_avail(struct rtase_ring * ring)256 static u32 rtase_tx_avail(struct rtase_ring *ring)
257 {
258 return READ_ONCE(ring->dirty_idx) + RTASE_NUM_DESC -
259 READ_ONCE(ring->cur_idx);
260 }
261
tx_handler(struct rtase_ring * ring,int budget)262 static int tx_handler(struct rtase_ring *ring, int budget)
263 {
264 const struct rtase_private *tp = ring->ivec->tp;
265 struct net_device *dev = tp->dev;
266 u32 dirty_tx, tx_left;
267 u32 bytes_compl = 0;
268 u32 pkts_compl = 0;
269 int workdone = 0;
270
271 dirty_tx = ring->dirty_idx;
272 tx_left = READ_ONCE(ring->cur_idx) - dirty_tx;
273
274 while (tx_left > 0) {
275 u32 entry = dirty_tx % RTASE_NUM_DESC;
276 struct rtase_tx_desc *desc = ring->desc +
277 sizeof(struct rtase_tx_desc) * entry;
278 u32 status;
279
280 status = le32_to_cpu(desc->opts1);
281
282 if (status & RTASE_DESC_OWN)
283 break;
284
285 rtase_unmap_tx_skb(tp->pdev, ring->mis.len[entry], desc);
286 ring->mis.len[entry] = 0;
287 if (ring->skbuff[entry]) {
288 pkts_compl++;
289 bytes_compl += ring->skbuff[entry]->len;
290 napi_consume_skb(ring->skbuff[entry], budget);
291 ring->skbuff[entry] = NULL;
292 }
293
294 dirty_tx++;
295 tx_left--;
296 workdone++;
297
298 if (workdone == RTASE_TX_BUDGET_DEFAULT)
299 break;
300 }
301
302 if (ring->dirty_idx != dirty_tx) {
303 dev_sw_netstats_tx_add(dev, pkts_compl, bytes_compl);
304 WRITE_ONCE(ring->dirty_idx, dirty_tx);
305
306 netif_subqueue_completed_wake(dev, ring->index, pkts_compl,
307 bytes_compl,
308 rtase_tx_avail(ring),
309 RTASE_TX_START_THRS);
310
311 if (ring->cur_idx != dirty_tx)
312 rtase_w8(tp, RTASE_TPPOLL, BIT(ring->index));
313 }
314
315 return 0;
316 }
317
rtase_tx_desc_init(struct rtase_private * tp,u16 idx)318 static void rtase_tx_desc_init(struct rtase_private *tp, u16 idx)
319 {
320 struct rtase_ring *ring = &tp->tx_ring[idx];
321 struct rtase_tx_desc *desc;
322 u32 i;
323
324 memset(ring->desc, 0x0, RTASE_TX_RING_DESC_SIZE);
325 memset(ring->skbuff, 0x0, sizeof(ring->skbuff));
326 ring->cur_idx = 0;
327 ring->dirty_idx = 0;
328 ring->index = idx;
329 ring->alloc_fail = 0;
330
331 for (i = 0; i < RTASE_NUM_DESC; i++) {
332 ring->mis.len[i] = 0;
333 if ((RTASE_NUM_DESC - 1) == i) {
334 desc = ring->desc + sizeof(struct rtase_tx_desc) * i;
335 desc->opts1 = cpu_to_le32(RTASE_RING_END);
336 }
337 }
338
339 ring->ring_handler = tx_handler;
340 if (idx < 4) {
341 ring->ivec = &tp->int_vector[idx];
342 list_add_tail(&ring->ring_entry,
343 &tp->int_vector[idx].ring_list);
344 } else {
345 ring->ivec = &tp->int_vector[0];
346 list_add_tail(&ring->ring_entry, &tp->int_vector[0].ring_list);
347 }
348 }
349
rtase_map_to_asic(union rtase_rx_desc * desc,dma_addr_t mapping,u32 rx_buf_sz)350 static void rtase_map_to_asic(union rtase_rx_desc *desc, dma_addr_t mapping,
351 u32 rx_buf_sz)
352 {
353 desc->desc_cmd.addr = cpu_to_le64(mapping);
354
355 rtase_mark_to_asic(desc, rx_buf_sz);
356 }
357
rtase_make_unusable_by_asic(union rtase_rx_desc * desc)358 static void rtase_make_unusable_by_asic(union rtase_rx_desc *desc)
359 {
360 desc->desc_cmd.addr = cpu_to_le64(RTK_MAGIC_NUMBER);
361 desc->desc_cmd.opts1 &= ~cpu_to_le32(RTASE_DESC_OWN | RSVD_MASK);
362 }
363
rtase_alloc_rx_data_buf(struct rtase_ring * ring,void ** p_data_buf,union rtase_rx_desc * desc,dma_addr_t * rx_phy_addr)364 static int rtase_alloc_rx_data_buf(struct rtase_ring *ring,
365 void **p_data_buf,
366 union rtase_rx_desc *desc,
367 dma_addr_t *rx_phy_addr)
368 {
369 struct rtase_int_vector *ivec = ring->ivec;
370 const struct rtase_private *tp = ivec->tp;
371 dma_addr_t mapping;
372 struct page *page;
373
374 page = page_pool_dev_alloc_pages(tp->page_pool);
375 if (!page) {
376 ring->alloc_fail++;
377 goto err_out;
378 }
379
380 *p_data_buf = page_address(page);
381 mapping = page_pool_get_dma_addr(page);
382 *rx_phy_addr = mapping;
383 rtase_map_to_asic(desc, mapping, tp->rx_buf_sz);
384
385 return 0;
386
387 err_out:
388 rtase_make_unusable_by_asic(desc);
389
390 return -ENOMEM;
391 }
392
rtase_rx_ring_fill(struct rtase_ring * ring,u32 ring_start,u32 ring_end)393 static u32 rtase_rx_ring_fill(struct rtase_ring *ring, u32 ring_start,
394 u32 ring_end)
395 {
396 union rtase_rx_desc *desc_base = ring->desc;
397 u32 cur;
398
399 for (cur = ring_start; ring_end - cur > 0; cur++) {
400 u32 i = cur % RTASE_NUM_DESC;
401 union rtase_rx_desc *desc = desc_base + i;
402 int ret;
403
404 if (ring->data_buf[i])
405 continue;
406
407 ret = rtase_alloc_rx_data_buf(ring, &ring->data_buf[i], desc,
408 &ring->mis.data_phy_addr[i]);
409 if (ret)
410 break;
411 }
412
413 return cur - ring_start;
414 }
415
rtase_mark_as_last_descriptor(union rtase_rx_desc * desc)416 static void rtase_mark_as_last_descriptor(union rtase_rx_desc *desc)
417 {
418 desc->desc_cmd.opts1 |= cpu_to_le32(RTASE_RING_END);
419 }
420
rtase_rx_ring_clear(struct page_pool * page_pool,struct rtase_ring * ring)421 static void rtase_rx_ring_clear(struct page_pool *page_pool,
422 struct rtase_ring *ring)
423 {
424 union rtase_rx_desc *desc;
425 struct page *page;
426 u32 i;
427
428 for (i = 0; i < RTASE_NUM_DESC; i++) {
429 desc = ring->desc + sizeof(union rtase_rx_desc) * i;
430 page = virt_to_head_page(ring->data_buf[i]);
431
432 if (ring->data_buf[i])
433 page_pool_put_full_page(page_pool, page, true);
434
435 rtase_make_unusable_by_asic(desc);
436 }
437 }
438
rtase_fragmented_frame(u32 status)439 static int rtase_fragmented_frame(u32 status)
440 {
441 return (status & (RTASE_RX_FIRST_FRAG | RTASE_RX_LAST_FRAG)) !=
442 (RTASE_RX_FIRST_FRAG | RTASE_RX_LAST_FRAG);
443 }
444
rtase_rx_csum(const struct rtase_private * tp,struct sk_buff * skb,const union rtase_rx_desc * desc)445 static void rtase_rx_csum(const struct rtase_private *tp, struct sk_buff *skb,
446 const union rtase_rx_desc *desc)
447 {
448 u32 opts2 = le32_to_cpu(desc->desc_status.opts2);
449
450 /* rx csum offload */
451 if (((opts2 & RTASE_RX_V4F) && !(opts2 & RTASE_RX_IPF)) ||
452 (opts2 & RTASE_RX_V6F)) {
453 if (((opts2 & RTASE_RX_TCPT) && !(opts2 & RTASE_RX_TCPF)) ||
454 ((opts2 & RTASE_RX_UDPT) && !(opts2 & RTASE_RX_UDPF)))
455 skb->ip_summed = CHECKSUM_UNNECESSARY;
456 else
457 skb->ip_summed = CHECKSUM_NONE;
458 } else {
459 skb->ip_summed = CHECKSUM_NONE;
460 }
461 }
462
rtase_rx_vlan_skb(union rtase_rx_desc * desc,struct sk_buff * skb)463 static void rtase_rx_vlan_skb(union rtase_rx_desc *desc, struct sk_buff *skb)
464 {
465 u32 opts2 = le32_to_cpu(desc->desc_status.opts2);
466
467 if (!(opts2 & RTASE_RX_VLAN_TAG))
468 return;
469
470 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
471 swab16(opts2 & RTASE_VLAN_TAG_MASK));
472 }
473
rtase_rx_skb(const struct rtase_ring * ring,struct sk_buff * skb)474 static void rtase_rx_skb(const struct rtase_ring *ring, struct sk_buff *skb)
475 {
476 struct rtase_int_vector *ivec = ring->ivec;
477
478 napi_gro_receive(&ivec->napi, skb);
479 }
480
rx_handler(struct rtase_ring * ring,int budget)481 static int rx_handler(struct rtase_ring *ring, int budget)
482 {
483 union rtase_rx_desc *desc_base = ring->desc;
484 u32 pkt_size, cur_rx, delta, entry, status;
485 struct rtase_private *tp = ring->ivec->tp;
486 struct net_device *dev = tp->dev;
487 union rtase_rx_desc *desc;
488 struct sk_buff *skb;
489 int workdone = 0;
490
491 cur_rx = ring->cur_idx;
492 entry = cur_rx % RTASE_NUM_DESC;
493 desc = &desc_base[entry];
494
495 while (workdone < budget) {
496 status = le32_to_cpu(desc->desc_status.opts1);
497
498 if (status & RTASE_DESC_OWN)
499 break;
500
501 /* This barrier is needed to keep us from reading
502 * any other fields out of the rx descriptor until
503 * we know the status of RTASE_DESC_OWN
504 */
505 dma_rmb();
506
507 if (unlikely(status & RTASE_RX_RES)) {
508 if (net_ratelimit())
509 netdev_warn(dev, "Rx ERROR. status = %08x\n",
510 status);
511
512 tp->stats.rx_errors++;
513
514 if (status & (RTASE_RX_RWT | RTASE_RX_RUNT))
515 tp->stats.rx_length_errors++;
516
517 if (status & RTASE_RX_CRC)
518 tp->stats.rx_crc_errors++;
519
520 if (dev->features & NETIF_F_RXALL)
521 goto process_pkt;
522
523 rtase_mark_to_asic(desc, tp->rx_buf_sz);
524 goto skip_process_pkt;
525 }
526
527 process_pkt:
528 pkt_size = status & RTASE_RX_PKT_SIZE_MASK;
529 if (likely(!(dev->features & NETIF_F_RXFCS)))
530 pkt_size -= ETH_FCS_LEN;
531
532 /* The driver does not support incoming fragmented frames.
533 * They are seen as a symptom of over-mtu sized frames.
534 */
535 if (unlikely(rtase_fragmented_frame(status))) {
536 tp->stats.rx_dropped++;
537 tp->stats.rx_length_errors++;
538 rtase_mark_to_asic(desc, tp->rx_buf_sz);
539 goto skip_process_pkt;
540 }
541
542 dma_sync_single_for_cpu(&tp->pdev->dev,
543 ring->mis.data_phy_addr[entry],
544 tp->rx_buf_sz, DMA_FROM_DEVICE);
545
546 skb = build_skb(ring->data_buf[entry], PAGE_SIZE);
547 if (!skb) {
548 tp->stats.rx_dropped++;
549 rtase_mark_to_asic(desc, tp->rx_buf_sz);
550 goto skip_process_pkt;
551 }
552 ring->data_buf[entry] = NULL;
553
554 if (dev->features & NETIF_F_RXCSUM)
555 rtase_rx_csum(tp, skb, desc);
556
557 skb_put(skb, pkt_size);
558 skb_mark_for_recycle(skb);
559 skb->protocol = eth_type_trans(skb, dev);
560
561 if (skb->pkt_type == PACKET_MULTICAST)
562 tp->stats.multicast++;
563
564 rtase_rx_vlan_skb(desc, skb);
565 rtase_rx_skb(ring, skb);
566
567 dev_sw_netstats_rx_add(dev, pkt_size);
568
569 skip_process_pkt:
570 workdone++;
571 cur_rx++;
572 entry = cur_rx % RTASE_NUM_DESC;
573 desc = ring->desc + sizeof(union rtase_rx_desc) * entry;
574 }
575
576 ring->cur_idx = cur_rx;
577 delta = rtase_rx_ring_fill(ring, ring->dirty_idx, ring->cur_idx);
578 ring->dirty_idx += delta;
579
580 return workdone;
581 }
582
rtase_rx_desc_init(struct rtase_private * tp,u16 idx)583 static void rtase_rx_desc_init(struct rtase_private *tp, u16 idx)
584 {
585 struct rtase_ring *ring = &tp->rx_ring[idx];
586 u16 i;
587
588 memset(ring->desc, 0x0, RTASE_RX_RING_DESC_SIZE);
589 memset(ring->data_buf, 0x0, sizeof(ring->data_buf));
590 ring->cur_idx = 0;
591 ring->dirty_idx = 0;
592 ring->index = idx;
593 ring->alloc_fail = 0;
594
595 for (i = 0; i < RTASE_NUM_DESC; i++)
596 ring->mis.data_phy_addr[i] = 0;
597
598 ring->ring_handler = rx_handler;
599 ring->ivec = &tp->int_vector[idx];
600 list_add_tail(&ring->ring_entry, &tp->int_vector[idx].ring_list);
601 }
602
rtase_rx_clear(struct rtase_private * tp)603 static void rtase_rx_clear(struct rtase_private *tp)
604 {
605 u32 i;
606
607 for (i = 0; i < tp->func_rx_queue_num; i++)
608 rtase_rx_ring_clear(tp->page_pool, &tp->rx_ring[i]);
609
610 page_pool_destroy(tp->page_pool);
611 tp->page_pool = NULL;
612 }
613
rtase_init_ring(const struct net_device * dev)614 static int rtase_init_ring(const struct net_device *dev)
615 {
616 struct rtase_private *tp = netdev_priv(dev);
617 struct page_pool_params pp_params = { 0 };
618 struct page_pool *page_pool;
619 u32 num;
620 u16 i;
621
622 pp_params.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV;
623 pp_params.order = 0;
624 pp_params.pool_size = RTASE_NUM_DESC * tp->func_rx_queue_num;
625 pp_params.nid = dev_to_node(&tp->pdev->dev);
626 pp_params.dev = &tp->pdev->dev;
627 pp_params.dma_dir = DMA_FROM_DEVICE;
628 pp_params.max_len = PAGE_SIZE;
629 pp_params.offset = 0;
630
631 page_pool = page_pool_create(&pp_params);
632 if (IS_ERR(page_pool)) {
633 netdev_err(tp->dev, "failed to create page pool\n");
634 return -ENOMEM;
635 }
636
637 tp->page_pool = page_pool;
638
639 for (i = 0; i < tp->func_tx_queue_num; i++)
640 rtase_tx_desc_init(tp, i);
641
642 for (i = 0; i < tp->func_rx_queue_num; i++) {
643 rtase_rx_desc_init(tp, i);
644
645 num = rtase_rx_ring_fill(&tp->rx_ring[i], 0, RTASE_NUM_DESC);
646 if (num != RTASE_NUM_DESC)
647 goto err_out;
648
649 rtase_mark_as_last_descriptor(tp->rx_ring[i].desc +
650 sizeof(union rtase_rx_desc) *
651 (RTASE_NUM_DESC - 1));
652 }
653
654 return 0;
655
656 err_out:
657 rtase_rx_clear(tp);
658 return -ENOMEM;
659 }
660
rtase_interrupt_mitigation(const struct rtase_private * tp)661 static void rtase_interrupt_mitigation(const struct rtase_private *tp)
662 {
663 u32 i;
664
665 for (i = 0; i < tp->func_tx_queue_num; i++)
666 rtase_w16(tp, RTASE_INT_MITI_TX + i * 2, tp->tx_int_mit);
667
668 for (i = 0; i < tp->func_rx_queue_num; i++)
669 rtase_w16(tp, RTASE_INT_MITI_RX + i * 2, tp->rx_int_mit);
670 }
671
rtase_tally_counter_addr_fill(const struct rtase_private * tp)672 static void rtase_tally_counter_addr_fill(const struct rtase_private *tp)
673 {
674 rtase_w32(tp, RTASE_DTCCR4, upper_32_bits(tp->tally_paddr));
675 rtase_w32(tp, RTASE_DTCCR0, lower_32_bits(tp->tally_paddr));
676 }
677
rtase_tally_counter_clear(const struct rtase_private * tp)678 static void rtase_tally_counter_clear(const struct rtase_private *tp)
679 {
680 u32 cmd = lower_32_bits(tp->tally_paddr);
681
682 rtase_w32(tp, RTASE_DTCCR4, upper_32_bits(tp->tally_paddr));
683 rtase_w32(tp, RTASE_DTCCR0, cmd | RTASE_COUNTER_RESET);
684 }
685
rtase_desc_addr_fill(const struct rtase_private * tp)686 static void rtase_desc_addr_fill(const struct rtase_private *tp)
687 {
688 const struct rtase_ring *ring;
689 u16 i, cmd, val;
690 int err;
691
692 for (i = 0; i < tp->func_tx_queue_num; i++) {
693 ring = &tp->tx_ring[i];
694
695 rtase_w32(tp, RTASE_TX_DESC_ADDR0,
696 lower_32_bits(ring->phy_addr));
697 rtase_w32(tp, RTASE_TX_DESC_ADDR4,
698 upper_32_bits(ring->phy_addr));
699
700 cmd = i | RTASE_TX_DESC_CMD_WE | RTASE_TX_DESC_CMD_CS;
701 rtase_w16(tp, RTASE_TX_DESC_COMMAND, cmd);
702
703 err = read_poll_timeout(rtase_r16, val,
704 !(val & RTASE_TX_DESC_CMD_CS), 10,
705 1000, false, tp,
706 RTASE_TX_DESC_COMMAND);
707
708 if (err == -ETIMEDOUT)
709 netdev_err(tp->dev,
710 "error occurred in fill tx descriptor\n");
711 }
712
713 for (i = 0; i < tp->func_rx_queue_num; i++) {
714 ring = &tp->rx_ring[i];
715
716 if (i == 0) {
717 rtase_w32(tp, RTASE_Q0_RX_DESC_ADDR0,
718 lower_32_bits(ring->phy_addr));
719 rtase_w32(tp, RTASE_Q0_RX_DESC_ADDR4,
720 upper_32_bits(ring->phy_addr));
721 } else {
722 rtase_w32(tp, (RTASE_Q1_RX_DESC_ADDR0 + ((i - 1) * 8)),
723 lower_32_bits(ring->phy_addr));
724 rtase_w32(tp, (RTASE_Q1_RX_DESC_ADDR4 + ((i - 1) * 8)),
725 upper_32_bits(ring->phy_addr));
726 }
727 }
728 }
729
rtase_hw_set_features(const struct net_device * dev,netdev_features_t features)730 static void rtase_hw_set_features(const struct net_device *dev,
731 netdev_features_t features)
732 {
733 const struct rtase_private *tp = netdev_priv(dev);
734 u16 rx_config, val;
735
736 rx_config = rtase_r16(tp, RTASE_RX_CONFIG_0);
737 if (features & NETIF_F_RXALL)
738 rx_config |= (RTASE_ACCEPT_ERR | RTASE_ACCEPT_RUNT);
739 else
740 rx_config &= ~(RTASE_ACCEPT_ERR | RTASE_ACCEPT_RUNT);
741
742 rtase_w16(tp, RTASE_RX_CONFIG_0, rx_config);
743
744 val = rtase_r16(tp, RTASE_CPLUS_CMD);
745 if (features & NETIF_F_RXCSUM)
746 rtase_w16(tp, RTASE_CPLUS_CMD, val | RTASE_RX_CHKSUM);
747 else
748 rtase_w16(tp, RTASE_CPLUS_CMD, val & ~RTASE_RX_CHKSUM);
749
750 rx_config = rtase_r16(tp, RTASE_RX_CONFIG_1);
751 if (dev->features & NETIF_F_HW_VLAN_CTAG_RX)
752 rx_config |= (RTASE_INNER_VLAN_DETAG_EN |
753 RTASE_OUTER_VLAN_DETAG_EN);
754 else
755 rx_config &= ~(RTASE_INNER_VLAN_DETAG_EN |
756 RTASE_OUTER_VLAN_DETAG_EN);
757
758 rtase_w16(tp, RTASE_RX_CONFIG_1, rx_config);
759 }
760
rtase_hw_set_rx_packet_filter(struct net_device * dev)761 static void rtase_hw_set_rx_packet_filter(struct net_device *dev)
762 {
763 u32 mc_filter[2] = { 0xFFFFFFFF, 0xFFFFFFFF };
764 struct rtase_private *tp = netdev_priv(dev);
765 u16 rx_mode;
766
767 rx_mode = rtase_r16(tp, RTASE_RX_CONFIG_0) & ~RTASE_ACCEPT_MASK;
768 rx_mode |= RTASE_ACCEPT_BROADCAST | RTASE_ACCEPT_MYPHYS;
769
770 if (dev->flags & IFF_PROMISC) {
771 rx_mode |= RTASE_ACCEPT_MULTICAST | RTASE_ACCEPT_ALLPHYS;
772 } else if (dev->flags & IFF_ALLMULTI) {
773 rx_mode |= RTASE_ACCEPT_MULTICAST;
774 } else {
775 struct netdev_hw_addr *hw_addr;
776
777 mc_filter[0] = 0;
778 mc_filter[1] = 0;
779
780 netdev_for_each_mc_addr(hw_addr, dev) {
781 u32 bit_nr = eth_hw_addr_crc(hw_addr);
782 u32 idx = u32_get_bits(bit_nr, BIT(31));
783 u32 bit = u32_get_bits(bit_nr,
784 RTASE_MULTICAST_FILTER_MASK);
785
786 mc_filter[idx] |= BIT(bit);
787 rx_mode |= RTASE_ACCEPT_MULTICAST;
788 }
789 }
790
791 if (dev->features & NETIF_F_RXALL)
792 rx_mode |= RTASE_ACCEPT_ERR | RTASE_ACCEPT_RUNT;
793
794 rtase_w32(tp, RTASE_MAR0, swab32(mc_filter[1]));
795 rtase_w32(tp, RTASE_MAR1, swab32(mc_filter[0]));
796 rtase_w16(tp, RTASE_RX_CONFIG_0, rx_mode);
797 }
798
rtase_irq_dis_and_clear(const struct rtase_private * tp)799 static void rtase_irq_dis_and_clear(const struct rtase_private *tp)
800 {
801 const struct rtase_int_vector *ivec = &tp->int_vector[0];
802 u32 val1;
803 u16 val2;
804 u8 i;
805
806 rtase_w32(tp, ivec->imr_addr, 0);
807 val1 = rtase_r32(tp, ivec->isr_addr);
808 rtase_w32(tp, ivec->isr_addr, val1);
809
810 for (i = 1; i < tp->int_nums; i++) {
811 ivec = &tp->int_vector[i];
812 rtase_w16(tp, ivec->imr_addr, 0);
813 val2 = rtase_r16(tp, ivec->isr_addr);
814 rtase_w16(tp, ivec->isr_addr, val2);
815 }
816 }
817
rtase_poll_timeout(const struct rtase_private * tp,u32 cond,u32 sleep_us,u64 timeout_us,u16 reg)818 static void rtase_poll_timeout(const struct rtase_private *tp, u32 cond,
819 u32 sleep_us, u64 timeout_us, u16 reg)
820 {
821 int err;
822 u8 val;
823
824 err = read_poll_timeout(rtase_r8, val, val & cond, sleep_us,
825 timeout_us, false, tp, reg);
826
827 if (err == -ETIMEDOUT)
828 netdev_err(tp->dev, "poll reg 0x00%x timeout\n", reg);
829 }
830
rtase_nic_reset(const struct net_device * dev)831 static void rtase_nic_reset(const struct net_device *dev)
832 {
833 const struct rtase_private *tp = netdev_priv(dev);
834 u16 rx_config;
835 u8 val;
836
837 rx_config = rtase_r16(tp, RTASE_RX_CONFIG_0);
838 rtase_w16(tp, RTASE_RX_CONFIG_0, rx_config & ~RTASE_ACCEPT_MASK);
839
840 val = rtase_r8(tp, RTASE_MISC);
841 rtase_w8(tp, RTASE_MISC, val | RTASE_RX_DV_GATE_EN);
842
843 val = rtase_r8(tp, RTASE_CHIP_CMD);
844 rtase_w8(tp, RTASE_CHIP_CMD, val | RTASE_STOP_REQ);
845 mdelay(2);
846
847 rtase_poll_timeout(tp, RTASE_STOP_REQ_DONE, 100, 150000,
848 RTASE_CHIP_CMD);
849
850 rtase_poll_timeout(tp, RTASE_TX_FIFO_EMPTY, 100, 100000,
851 RTASE_FIFOR);
852
853 rtase_poll_timeout(tp, RTASE_RX_FIFO_EMPTY, 100, 100000,
854 RTASE_FIFOR);
855
856 val = rtase_r8(tp, RTASE_CHIP_CMD);
857 rtase_w8(tp, RTASE_CHIP_CMD, val & ~(RTASE_TE | RTASE_RE));
858 val = rtase_r8(tp, RTASE_CHIP_CMD);
859 rtase_w8(tp, RTASE_CHIP_CMD, val & ~RTASE_STOP_REQ);
860
861 rtase_w16(tp, RTASE_RX_CONFIG_0, rx_config);
862 }
863
rtase_hw_reset(const struct net_device * dev)864 static void rtase_hw_reset(const struct net_device *dev)
865 {
866 const struct rtase_private *tp = netdev_priv(dev);
867
868 rtase_irq_dis_and_clear(tp);
869
870 rtase_nic_reset(dev);
871 }
872
rtase_set_rx_queue(const struct rtase_private * tp)873 static void rtase_set_rx_queue(const struct rtase_private *tp)
874 {
875 u16 reg_data;
876
877 reg_data = rtase_r16(tp, RTASE_FCR);
878 switch (tp->func_rx_queue_num) {
879 case 1:
880 u16p_replace_bits(®_data, 0x1, RTASE_FCR_RXQ_MASK);
881 break;
882 case 2:
883 u16p_replace_bits(®_data, 0x2, RTASE_FCR_RXQ_MASK);
884 break;
885 case 4:
886 u16p_replace_bits(®_data, 0x3, RTASE_FCR_RXQ_MASK);
887 break;
888 }
889 rtase_w16(tp, RTASE_FCR, reg_data);
890 }
891
rtase_set_tx_queue(const struct rtase_private * tp)892 static void rtase_set_tx_queue(const struct rtase_private *tp)
893 {
894 u16 reg_data;
895
896 reg_data = rtase_r16(tp, RTASE_TX_CONFIG_1);
897 switch (tp->tx_queue_ctrl) {
898 case 1:
899 u16p_replace_bits(®_data, 0x0, RTASE_TC_MODE_MASK);
900 break;
901 case 2:
902 u16p_replace_bits(®_data, 0x1, RTASE_TC_MODE_MASK);
903 break;
904 case 3:
905 case 4:
906 u16p_replace_bits(®_data, 0x2, RTASE_TC_MODE_MASK);
907 break;
908 default:
909 u16p_replace_bits(®_data, 0x3, RTASE_TC_MODE_MASK);
910 break;
911 }
912 rtase_w16(tp, RTASE_TX_CONFIG_1, reg_data);
913 }
914
rtase_hw_config(struct net_device * dev)915 static void rtase_hw_config(struct net_device *dev)
916 {
917 const struct rtase_private *tp = netdev_priv(dev);
918 u32 reg_data32;
919 u16 reg_data16;
920
921 rtase_hw_reset(dev);
922
923 /* set rx dma burst */
924 reg_data16 = rtase_r16(tp, RTASE_RX_CONFIG_0);
925 reg_data16 &= ~(RTASE_RX_SINGLE_TAG | RTASE_RX_SINGLE_FETCH);
926 u16p_replace_bits(®_data16, RTASE_RX_DMA_BURST_256,
927 RTASE_RX_MX_DMA_MASK);
928 rtase_w16(tp, RTASE_RX_CONFIG_0, reg_data16);
929
930 /* new rx descritpor */
931 reg_data16 = rtase_r16(tp, RTASE_RX_CONFIG_1);
932 reg_data16 |= RTASE_RX_NEW_DESC_FORMAT_EN | RTASE_PCIE_NEW_FLOW;
933 u16p_replace_bits(®_data16, 0xF, RTASE_RX_MAX_FETCH_DESC_MASK);
934 rtase_w16(tp, RTASE_RX_CONFIG_1, reg_data16);
935
936 rtase_set_rx_queue(tp);
937
938 rtase_interrupt_mitigation(tp);
939
940 /* set tx dma burst size and interframe gap time */
941 reg_data32 = rtase_r32(tp, RTASE_TX_CONFIG_0);
942 u32p_replace_bits(®_data32, RTASE_TX_DMA_BURST_UNLIMITED,
943 RTASE_TX_DMA_MASK);
944 u32p_replace_bits(®_data32, RTASE_INTERFRAMEGAP,
945 RTASE_TX_INTER_FRAME_GAP_MASK);
946 rtase_w32(tp, RTASE_TX_CONFIG_0, reg_data32);
947
948 /* new tx descriptor */
949 reg_data16 = rtase_r16(tp, RTASE_TFUN_CTRL);
950 rtase_w16(tp, RTASE_TFUN_CTRL, reg_data16 |
951 RTASE_TX_NEW_DESC_FORMAT_EN);
952
953 /* tx fetch desc number */
954 rtase_w8(tp, RTASE_TDFNR, 0x10);
955
956 /* tag num select */
957 reg_data16 = rtase_r16(tp, RTASE_MTPS);
958 u16p_replace_bits(®_data16, 0x4, RTASE_TAG_NUM_SEL_MASK);
959 rtase_w16(tp, RTASE_MTPS, reg_data16);
960
961 rtase_set_tx_queue(tp);
962
963 rtase_w16(tp, RTASE_TOKSEL, 0x5555);
964
965 rtase_tally_counter_addr_fill(tp);
966 rtase_desc_addr_fill(tp);
967 rtase_hw_set_features(dev, dev->features);
968
969 /* enable flow control */
970 reg_data16 = rtase_r16(tp, RTASE_CPLUS_CMD);
971 reg_data16 |= (RTASE_FORCE_TXFLOW_EN | RTASE_FORCE_RXFLOW_EN);
972 rtase_w16(tp, RTASE_CPLUS_CMD, reg_data16);
973 /* set near fifo threshold - rx missed issue. */
974 rtase_w16(tp, RTASE_RFIFONFULL, 0x190);
975
976 rtase_w16(tp, RTASE_RMS, tp->rx_buf_sz);
977
978 rtase_hw_set_rx_packet_filter(dev);
979 }
980
rtase_nic_enable(const struct net_device * dev)981 static void rtase_nic_enable(const struct net_device *dev)
982 {
983 const struct rtase_private *tp = netdev_priv(dev);
984 u16 rcr = rtase_r16(tp, RTASE_RX_CONFIG_1);
985 u8 val;
986
987 rtase_w16(tp, RTASE_RX_CONFIG_1, rcr & ~RTASE_PCIE_RELOAD_EN);
988 rtase_w16(tp, RTASE_RX_CONFIG_1, rcr | RTASE_PCIE_RELOAD_EN);
989
990 val = rtase_r8(tp, RTASE_CHIP_CMD);
991 rtase_w8(tp, RTASE_CHIP_CMD, val | RTASE_TE | RTASE_RE);
992
993 val = rtase_r8(tp, RTASE_MISC);
994 rtase_w8(tp, RTASE_MISC, val & ~RTASE_RX_DV_GATE_EN);
995 }
996
rtase_enable_hw_interrupt(const struct rtase_private * tp)997 static void rtase_enable_hw_interrupt(const struct rtase_private *tp)
998 {
999 const struct rtase_int_vector *ivec = &tp->int_vector[0];
1000 u32 i;
1001
1002 rtase_w32(tp, ivec->imr_addr, ivec->imr);
1003
1004 for (i = 1; i < tp->int_nums; i++) {
1005 ivec = &tp->int_vector[i];
1006 rtase_w16(tp, ivec->imr_addr, ivec->imr);
1007 }
1008 }
1009
rtase_hw_start(const struct net_device * dev)1010 static void rtase_hw_start(const struct net_device *dev)
1011 {
1012 const struct rtase_private *tp = netdev_priv(dev);
1013
1014 rtase_nic_enable(dev);
1015 rtase_enable_hw_interrupt(tp);
1016 }
1017
1018 /* the interrupt handler does RXQ0 and TXQ0, TXQ4~7 interrutp status
1019 */
rtase_interrupt(int irq,void * dev_instance)1020 static irqreturn_t rtase_interrupt(int irq, void *dev_instance)
1021 {
1022 const struct rtase_private *tp;
1023 struct rtase_int_vector *ivec;
1024 u32 status;
1025
1026 ivec = dev_instance;
1027 tp = ivec->tp;
1028 status = rtase_r32(tp, ivec->isr_addr);
1029
1030 rtase_w32(tp, ivec->imr_addr, 0x0);
1031 rtase_w32(tp, ivec->isr_addr, status & ~RTASE_FOVW);
1032
1033 if (napi_schedule_prep(&ivec->napi))
1034 __napi_schedule(&ivec->napi);
1035
1036 return IRQ_HANDLED;
1037 }
1038
1039 /* the interrupt handler does RXQ1&TXQ1 or RXQ2&TXQ2 or RXQ3&TXQ3 interrupt
1040 * status according to interrupt vector
1041 */
rtase_q_interrupt(int irq,void * dev_instance)1042 static irqreturn_t rtase_q_interrupt(int irq, void *dev_instance)
1043 {
1044 const struct rtase_private *tp;
1045 struct rtase_int_vector *ivec;
1046 u16 status;
1047
1048 ivec = dev_instance;
1049 tp = ivec->tp;
1050 status = rtase_r16(tp, ivec->isr_addr);
1051
1052 rtase_w16(tp, ivec->imr_addr, 0x0);
1053 rtase_w16(tp, ivec->isr_addr, status);
1054
1055 if (napi_schedule_prep(&ivec->napi))
1056 __napi_schedule(&ivec->napi);
1057
1058 return IRQ_HANDLED;
1059 }
1060
rtase_poll(struct napi_struct * napi,int budget)1061 static int rtase_poll(struct napi_struct *napi, int budget)
1062 {
1063 const struct rtase_int_vector *ivec;
1064 const struct rtase_private *tp;
1065 struct rtase_ring *ring;
1066 int total_workdone = 0;
1067
1068 ivec = container_of(napi, struct rtase_int_vector, napi);
1069 tp = ivec->tp;
1070
1071 list_for_each_entry(ring, &ivec->ring_list, ring_entry)
1072 total_workdone += ring->ring_handler(ring, budget);
1073
1074 if (total_workdone >= budget)
1075 return budget;
1076
1077 if (napi_complete_done(napi, total_workdone)) {
1078 if (!ivec->index)
1079 rtase_w32(tp, ivec->imr_addr, ivec->imr);
1080 else
1081 rtase_w16(tp, ivec->imr_addr, ivec->imr);
1082 }
1083
1084 return total_workdone;
1085 }
1086
rtase_open(struct net_device * dev)1087 static int rtase_open(struct net_device *dev)
1088 {
1089 struct rtase_private *tp = netdev_priv(dev);
1090 const struct pci_dev *pdev = tp->pdev;
1091 struct rtase_int_vector *ivec;
1092 u16 i = 0, j;
1093 int ret;
1094
1095 ivec = &tp->int_vector[0];
1096 tp->rx_buf_sz = RTASE_RX_BUF_SIZE;
1097
1098 ret = rtase_alloc_desc(tp);
1099 if (ret)
1100 return ret;
1101
1102 ret = rtase_init_ring(dev);
1103 if (ret)
1104 goto err_free_all_allocated_mem;
1105
1106 rtase_hw_config(dev);
1107
1108 if (tp->sw_flag & RTASE_SWF_MSIX_ENABLED) {
1109 ret = request_irq(ivec->irq, rtase_interrupt, 0,
1110 dev->name, ivec);
1111 if (ret)
1112 goto err_free_all_allocated_irq;
1113
1114 /* request other interrupts to handle multiqueue */
1115 for (i = 1; i < tp->int_nums; i++) {
1116 ivec = &tp->int_vector[i];
1117 snprintf(ivec->name, sizeof(ivec->name), "%s_int%i",
1118 tp->dev->name, i);
1119 ret = request_irq(ivec->irq, rtase_q_interrupt, 0,
1120 ivec->name, ivec);
1121 if (ret)
1122 goto err_free_all_allocated_irq;
1123 }
1124 } else {
1125 ret = request_irq(pdev->irq, rtase_interrupt, 0, dev->name,
1126 ivec);
1127 if (ret)
1128 goto err_free_all_allocated_mem;
1129 }
1130
1131 rtase_hw_start(dev);
1132
1133 for (i = 0; i < tp->int_nums; i++) {
1134 ivec = &tp->int_vector[i];
1135 napi_enable(&ivec->napi);
1136 }
1137
1138 netif_carrier_on(dev);
1139 netif_wake_queue(dev);
1140
1141 return 0;
1142
1143 err_free_all_allocated_irq:
1144 for (j = 0; j < i; j++)
1145 free_irq(tp->int_vector[j].irq, &tp->int_vector[j]);
1146
1147 err_free_all_allocated_mem:
1148 rtase_free_desc(tp);
1149
1150 return ret;
1151 }
1152
rtase_down(struct net_device * dev)1153 static void rtase_down(struct net_device *dev)
1154 {
1155 struct rtase_private *tp = netdev_priv(dev);
1156 struct rtase_int_vector *ivec;
1157 struct rtase_ring *ring, *tmp;
1158 u32 i;
1159
1160 for (i = 0; i < tp->int_nums; i++) {
1161 ivec = &tp->int_vector[i];
1162 napi_disable(&ivec->napi);
1163 list_for_each_entry_safe(ring, tmp, &ivec->ring_list,
1164 ring_entry)
1165 list_del(&ring->ring_entry);
1166 }
1167
1168 netif_tx_disable(dev);
1169
1170 netif_carrier_off(dev);
1171
1172 rtase_hw_reset(dev);
1173
1174 rtase_tx_clear(tp);
1175
1176 rtase_rx_clear(tp);
1177 }
1178
rtase_close(struct net_device * dev)1179 static int rtase_close(struct net_device *dev)
1180 {
1181 struct rtase_private *tp = netdev_priv(dev);
1182 const struct pci_dev *pdev = tp->pdev;
1183 u32 i;
1184
1185 rtase_down(dev);
1186
1187 if (tp->sw_flag & RTASE_SWF_MSIX_ENABLED) {
1188 for (i = 0; i < tp->int_nums; i++)
1189 free_irq(tp->int_vector[i].irq, &tp->int_vector[i]);
1190
1191 } else {
1192 free_irq(pdev->irq, &tp->int_vector[0]);
1193 }
1194
1195 rtase_free_desc(tp);
1196
1197 return 0;
1198 }
1199
rtase_tx_vlan_tag(const struct rtase_private * tp,const struct sk_buff * skb)1200 static u32 rtase_tx_vlan_tag(const struct rtase_private *tp,
1201 const struct sk_buff *skb)
1202 {
1203 return (skb_vlan_tag_present(skb)) ?
1204 (RTASE_TX_VLAN_TAG | swab16(skb_vlan_tag_get(skb))) : 0x00;
1205 }
1206
rtase_tx_csum(struct sk_buff * skb,const struct net_device * dev)1207 static u32 rtase_tx_csum(struct sk_buff *skb, const struct net_device *dev)
1208 {
1209 u32 csum_cmd = 0;
1210 u8 ip_protocol;
1211
1212 switch (vlan_get_protocol(skb)) {
1213 case htons(ETH_P_IP):
1214 csum_cmd = RTASE_TX_IPCS_C;
1215 ip_protocol = ip_hdr(skb)->protocol;
1216 break;
1217
1218 case htons(ETH_P_IPV6):
1219 csum_cmd = RTASE_TX_IPV6F_C;
1220 ip_protocol = ipv6_hdr(skb)->nexthdr;
1221 break;
1222
1223 default:
1224 ip_protocol = IPPROTO_RAW;
1225 break;
1226 }
1227
1228 if (ip_protocol == IPPROTO_TCP)
1229 csum_cmd |= RTASE_TX_TCPCS_C;
1230 else if (ip_protocol == IPPROTO_UDP)
1231 csum_cmd |= RTASE_TX_UDPCS_C;
1232
1233 csum_cmd |= u32_encode_bits(skb_transport_offset(skb),
1234 RTASE_TCPHO_MASK);
1235
1236 return csum_cmd;
1237 }
1238
rtase_xmit_frags(struct rtase_ring * ring,struct sk_buff * skb,u32 opts1,u32 opts2)1239 static int rtase_xmit_frags(struct rtase_ring *ring, struct sk_buff *skb,
1240 u32 opts1, u32 opts2)
1241 {
1242 const struct skb_shared_info *info = skb_shinfo(skb);
1243 const struct rtase_private *tp = ring->ivec->tp;
1244 const u8 nr_frags = info->nr_frags;
1245 struct rtase_tx_desc *txd = NULL;
1246 u32 cur_frag, entry;
1247
1248 entry = ring->cur_idx;
1249 for (cur_frag = 0; cur_frag < nr_frags; cur_frag++) {
1250 const skb_frag_t *frag = &info->frags[cur_frag];
1251 dma_addr_t mapping;
1252 u32 status, len;
1253 void *addr;
1254
1255 entry = (entry + 1) % RTASE_NUM_DESC;
1256
1257 txd = ring->desc + sizeof(struct rtase_tx_desc) * entry;
1258 len = skb_frag_size(frag);
1259 addr = skb_frag_address(frag);
1260 mapping = dma_map_single(&tp->pdev->dev, addr, len,
1261 DMA_TO_DEVICE);
1262
1263 if (unlikely(dma_mapping_error(&tp->pdev->dev, mapping))) {
1264 if (unlikely(net_ratelimit()))
1265 netdev_err(tp->dev,
1266 "Failed to map TX fragments DMA!\n");
1267
1268 goto err_out;
1269 }
1270
1271 if (((entry + 1) % RTASE_NUM_DESC) == 0)
1272 status = (opts1 | len | RTASE_RING_END);
1273 else
1274 status = opts1 | len;
1275
1276 if (cur_frag == (nr_frags - 1)) {
1277 ring->skbuff[entry] = skb;
1278 status |= RTASE_TX_LAST_FRAG;
1279 }
1280
1281 ring->mis.len[entry] = len;
1282 txd->addr = cpu_to_le64(mapping);
1283 txd->opts2 = cpu_to_le32(opts2);
1284
1285 /* make sure the operating fields have been updated */
1286 dma_wmb();
1287 txd->opts1 = cpu_to_le32(status);
1288 }
1289
1290 return cur_frag;
1291
1292 err_out:
1293 rtase_tx_clear_range(ring, ring->cur_idx + 1, cur_frag);
1294 return -EIO;
1295 }
1296
rtase_start_xmit(struct sk_buff * skb,struct net_device * dev)1297 static netdev_tx_t rtase_start_xmit(struct sk_buff *skb,
1298 struct net_device *dev)
1299 {
1300 struct skb_shared_info *shinfo = skb_shinfo(skb);
1301 struct rtase_private *tp = netdev_priv(dev);
1302 u32 q_idx, entry, len, opts1, opts2;
1303 struct netdev_queue *tx_queue;
1304 bool stop_queue, door_bell;
1305 u32 mss = shinfo->gso_size;
1306 struct rtase_tx_desc *txd;
1307 struct rtase_ring *ring;
1308 dma_addr_t mapping;
1309 int frags;
1310
1311 /* multiqueues */
1312 q_idx = skb_get_queue_mapping(skb);
1313 ring = &tp->tx_ring[q_idx];
1314 tx_queue = netdev_get_tx_queue(dev, q_idx);
1315
1316 if (unlikely(!rtase_tx_avail(ring))) {
1317 if (net_ratelimit())
1318 netdev_err(dev,
1319 "BUG! Tx Ring full when queue awake!\n");
1320
1321 netif_stop_queue(dev);
1322 return NETDEV_TX_BUSY;
1323 }
1324
1325 entry = ring->cur_idx % RTASE_NUM_DESC;
1326 txd = ring->desc + sizeof(struct rtase_tx_desc) * entry;
1327
1328 opts1 = RTASE_DESC_OWN;
1329 opts2 = rtase_tx_vlan_tag(tp, skb);
1330
1331 /* tcp segmentation offload (or tcp large send) */
1332 if (mss) {
1333 if (shinfo->gso_type & SKB_GSO_TCPV4) {
1334 opts1 |= RTASE_GIANT_SEND_V4;
1335 } else if (shinfo->gso_type & SKB_GSO_TCPV6) {
1336 if (skb_cow_head(skb, 0))
1337 goto err_dma_0;
1338
1339 tcp_v6_gso_csum_prep(skb);
1340 opts1 |= RTASE_GIANT_SEND_V6;
1341 } else {
1342 WARN_ON_ONCE(1);
1343 }
1344
1345 opts1 |= u32_encode_bits(skb_transport_offset(skb),
1346 RTASE_TCPHO_MASK);
1347 opts2 |= u32_encode_bits(mss, RTASE_MSS_MASK);
1348 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
1349 opts2 |= rtase_tx_csum(skb, dev);
1350 }
1351
1352 frags = rtase_xmit_frags(ring, skb, opts1, opts2);
1353 if (unlikely(frags < 0))
1354 goto err_dma_0;
1355
1356 if (frags) {
1357 len = skb_headlen(skb);
1358 opts1 |= RTASE_TX_FIRST_FRAG;
1359 } else {
1360 len = skb->len;
1361 ring->skbuff[entry] = skb;
1362 opts1 |= RTASE_TX_FIRST_FRAG | RTASE_TX_LAST_FRAG;
1363 }
1364
1365 if (((entry + 1) % RTASE_NUM_DESC) == 0)
1366 opts1 |= (len | RTASE_RING_END);
1367 else
1368 opts1 |= len;
1369
1370 mapping = dma_map_single(&tp->pdev->dev, skb->data, len,
1371 DMA_TO_DEVICE);
1372
1373 if (unlikely(dma_mapping_error(&tp->pdev->dev, mapping))) {
1374 if (unlikely(net_ratelimit()))
1375 netdev_err(dev, "Failed to map TX DMA!\n");
1376
1377 goto err_dma_1;
1378 }
1379
1380 ring->mis.len[entry] = len;
1381 txd->addr = cpu_to_le64(mapping);
1382 txd->opts2 = cpu_to_le32(opts2);
1383 txd->opts1 = cpu_to_le32(opts1 & ~RTASE_DESC_OWN);
1384
1385 /* make sure the operating fields have been updated */
1386 dma_wmb();
1387
1388 door_bell = __netdev_tx_sent_queue(tx_queue, skb->len,
1389 netdev_xmit_more());
1390
1391 txd->opts1 = cpu_to_le32(opts1);
1392
1393 skb_tx_timestamp(skb);
1394
1395 /* tx needs to see descriptor changes before updated cur_idx */
1396 smp_wmb();
1397
1398 WRITE_ONCE(ring->cur_idx, ring->cur_idx + frags + 1);
1399
1400 stop_queue = !netif_subqueue_maybe_stop(dev, ring->index,
1401 rtase_tx_avail(ring),
1402 RTASE_TX_STOP_THRS,
1403 RTASE_TX_START_THRS);
1404
1405 if (door_bell || stop_queue)
1406 rtase_w8(tp, RTASE_TPPOLL, BIT(ring->index));
1407
1408 return NETDEV_TX_OK;
1409
1410 err_dma_1:
1411 ring->skbuff[entry] = NULL;
1412 rtase_tx_clear_range(ring, ring->cur_idx + 1, frags);
1413
1414 err_dma_0:
1415 tp->stats.tx_dropped++;
1416 dev_kfree_skb_any(skb);
1417 return NETDEV_TX_OK;
1418 }
1419
rtase_set_rx_mode(struct net_device * dev)1420 static void rtase_set_rx_mode(struct net_device *dev)
1421 {
1422 rtase_hw_set_rx_packet_filter(dev);
1423 }
1424
rtase_enable_eem_write(const struct rtase_private * tp)1425 static void rtase_enable_eem_write(const struct rtase_private *tp)
1426 {
1427 u8 val;
1428
1429 val = rtase_r8(tp, RTASE_EEM);
1430 rtase_w8(tp, RTASE_EEM, val | RTASE_EEM_UNLOCK);
1431 }
1432
rtase_disable_eem_write(const struct rtase_private * tp)1433 static void rtase_disable_eem_write(const struct rtase_private *tp)
1434 {
1435 u8 val;
1436
1437 val = rtase_r8(tp, RTASE_EEM);
1438 rtase_w8(tp, RTASE_EEM, val & ~RTASE_EEM_UNLOCK);
1439 }
1440
rtase_rar_set(const struct rtase_private * tp,const u8 * addr)1441 static void rtase_rar_set(const struct rtase_private *tp, const u8 *addr)
1442 {
1443 u32 rar_low, rar_high;
1444
1445 rar_low = (u32)addr[0] | ((u32)addr[1] << 8) |
1446 ((u32)addr[2] << 16) | ((u32)addr[3] << 24);
1447
1448 rar_high = (u32)addr[4] | ((u32)addr[5] << 8);
1449
1450 rtase_enable_eem_write(tp);
1451 rtase_w32(tp, RTASE_MAC0, rar_low);
1452 rtase_w32(tp, RTASE_MAC4, rar_high);
1453 rtase_disable_eem_write(tp);
1454 rtase_w16(tp, RTASE_LBK_CTRL, RTASE_LBK_ATLD | RTASE_LBK_CLR);
1455 }
1456
rtase_set_mac_address(struct net_device * dev,void * p)1457 static int rtase_set_mac_address(struct net_device *dev, void *p)
1458 {
1459 struct rtase_private *tp = netdev_priv(dev);
1460 int ret;
1461
1462 ret = eth_mac_addr(dev, p);
1463 if (ret)
1464 return ret;
1465
1466 rtase_rar_set(tp, dev->dev_addr);
1467
1468 return 0;
1469 }
1470
rtase_change_mtu(struct net_device * dev,int new_mtu)1471 static int rtase_change_mtu(struct net_device *dev, int new_mtu)
1472 {
1473 dev->mtu = new_mtu;
1474
1475 netdev_update_features(dev);
1476
1477 return 0;
1478 }
1479
rtase_wait_for_quiescence(const struct net_device * dev)1480 static void rtase_wait_for_quiescence(const struct net_device *dev)
1481 {
1482 struct rtase_private *tp = netdev_priv(dev);
1483 struct rtase_int_vector *ivec;
1484 u32 i;
1485
1486 for (i = 0; i < tp->int_nums; i++) {
1487 ivec = &tp->int_vector[i];
1488 synchronize_irq(ivec->irq);
1489 /* wait for any pending NAPI task to complete */
1490 napi_disable(&ivec->napi);
1491 }
1492
1493 rtase_irq_dis_and_clear(tp);
1494
1495 for (i = 0; i < tp->int_nums; i++) {
1496 ivec = &tp->int_vector[i];
1497 napi_enable(&ivec->napi);
1498 }
1499 }
1500
rtase_sw_reset(struct net_device * dev)1501 static void rtase_sw_reset(struct net_device *dev)
1502 {
1503 struct rtase_private *tp = netdev_priv(dev);
1504 int ret;
1505
1506 netif_stop_queue(dev);
1507 netif_carrier_off(dev);
1508 rtase_hw_reset(dev);
1509
1510 /* let's wait a bit while any (async) irq lands on */
1511 rtase_wait_for_quiescence(dev);
1512 rtase_tx_clear(tp);
1513 rtase_rx_clear(tp);
1514
1515 ret = rtase_init_ring(dev);
1516 if (ret) {
1517 netdev_err(dev, "unable to init ring\n");
1518 rtase_free_desc(tp);
1519 return;
1520 }
1521
1522 rtase_hw_config(dev);
1523 /* always link, so start to transmit & receive */
1524 rtase_hw_start(dev);
1525
1526 netif_carrier_on(dev);
1527 netif_wake_queue(dev);
1528 }
1529
rtase_dump_tally_counter(const struct rtase_private * tp)1530 static void rtase_dump_tally_counter(const struct rtase_private *tp)
1531 {
1532 dma_addr_t paddr = tp->tally_paddr;
1533 u32 cmd = lower_32_bits(paddr);
1534 u32 val;
1535 int err;
1536
1537 rtase_w32(tp, RTASE_DTCCR4, upper_32_bits(paddr));
1538 rtase_w32(tp, RTASE_DTCCR0, cmd);
1539 rtase_w32(tp, RTASE_DTCCR0, cmd | RTASE_COUNTER_DUMP);
1540
1541 err = read_poll_timeout(rtase_r32, val, !(val & RTASE_COUNTER_DUMP),
1542 10, 250, false, tp, RTASE_DTCCR0);
1543
1544 if (err == -ETIMEDOUT)
1545 netdev_err(tp->dev, "error occurred in dump tally counter\n");
1546 }
1547
rtase_dump_state(const struct net_device * dev)1548 static void rtase_dump_state(const struct net_device *dev)
1549 {
1550 const struct rtase_private *tp = netdev_priv(dev);
1551 int max_reg_size = RTASE_PCI_REGS_SIZE;
1552 const struct rtase_counters *counters;
1553 const struct rtase_ring *ring;
1554 u32 dword_rd;
1555 int n = 0;
1556
1557 ring = &tp->tx_ring[0];
1558 netdev_err(dev, "Tx descriptor info:\n");
1559 netdev_err(dev, "Tx curIdx = 0x%x\n", ring->cur_idx);
1560 netdev_err(dev, "Tx dirtyIdx = 0x%x\n", ring->dirty_idx);
1561 netdev_err(dev, "Tx phyAddr = %pad\n", &ring->phy_addr);
1562
1563 ring = &tp->rx_ring[0];
1564 netdev_err(dev, "Rx descriptor info:\n");
1565 netdev_err(dev, "Rx curIdx = 0x%x\n", ring->cur_idx);
1566 netdev_err(dev, "Rx dirtyIdx = 0x%x\n", ring->dirty_idx);
1567 netdev_err(dev, "Rx phyAddr = %pad\n", &ring->phy_addr);
1568
1569 netdev_err(dev, "Device Registers:\n");
1570 netdev_err(dev, "Chip Command = 0x%02x\n",
1571 rtase_r8(tp, RTASE_CHIP_CMD));
1572 netdev_err(dev, "IMR = %08x\n", rtase_r32(tp, RTASE_IMR0));
1573 netdev_err(dev, "ISR = %08x\n", rtase_r32(tp, RTASE_ISR0));
1574 netdev_err(dev, "Boot Ctrl Reg(0xE004) = %04x\n",
1575 rtase_r16(tp, RTASE_BOOT_CTL));
1576 netdev_err(dev, "EPHY ISR(0xE014) = %04x\n",
1577 rtase_r16(tp, RTASE_EPHY_ISR));
1578 netdev_err(dev, "EPHY IMR(0xE016) = %04x\n",
1579 rtase_r16(tp, RTASE_EPHY_IMR));
1580 netdev_err(dev, "CLKSW SET REG(0xE018) = %04x\n",
1581 rtase_r16(tp, RTASE_CLKSW_SET));
1582
1583 netdev_err(dev, "Dump PCI Registers:\n");
1584
1585 while (n < max_reg_size) {
1586 if ((n % RTASE_DWORD_MOD) == 0)
1587 netdev_err(tp->dev, "0x%03x:\n", n);
1588
1589 pci_read_config_dword(tp->pdev, n, &dword_rd);
1590 netdev_err(tp->dev, "%08x\n", dword_rd);
1591 n += 4;
1592 }
1593
1594 netdev_err(dev, "Dump tally counter:\n");
1595 counters = tp->tally_vaddr;
1596 rtase_dump_tally_counter(tp);
1597
1598 netdev_err(dev, "tx_packets %lld\n",
1599 le64_to_cpu(counters->tx_packets));
1600 netdev_err(dev, "rx_packets %lld\n",
1601 le64_to_cpu(counters->rx_packets));
1602 netdev_err(dev, "tx_errors %lld\n",
1603 le64_to_cpu(counters->tx_errors));
1604 netdev_err(dev, "rx_errors %d\n",
1605 le32_to_cpu(counters->rx_errors));
1606 netdev_err(dev, "rx_missed %d\n",
1607 le16_to_cpu(counters->rx_missed));
1608 netdev_err(dev, "align_errors %d\n",
1609 le16_to_cpu(counters->align_errors));
1610 netdev_err(dev, "tx_one_collision %d\n",
1611 le32_to_cpu(counters->tx_one_collision));
1612 netdev_err(dev, "tx_multi_collision %d\n",
1613 le32_to_cpu(counters->tx_multi_collision));
1614 netdev_err(dev, "rx_unicast %lld\n",
1615 le64_to_cpu(counters->rx_unicast));
1616 netdev_err(dev, "rx_broadcast %lld\n",
1617 le64_to_cpu(counters->rx_broadcast));
1618 netdev_err(dev, "rx_multicast %d\n",
1619 le32_to_cpu(counters->rx_multicast));
1620 netdev_err(dev, "tx_aborted %d\n",
1621 le16_to_cpu(counters->tx_aborted));
1622 netdev_err(dev, "tx_underrun %d\n",
1623 le16_to_cpu(counters->tx_underrun));
1624 }
1625
rtase_tx_timeout(struct net_device * dev,unsigned int txqueue)1626 static void rtase_tx_timeout(struct net_device *dev, unsigned int txqueue)
1627 {
1628 rtase_dump_state(dev);
1629 rtase_sw_reset(dev);
1630 }
1631
rtase_get_stats64(struct net_device * dev,struct rtnl_link_stats64 * stats)1632 static void rtase_get_stats64(struct net_device *dev,
1633 struct rtnl_link_stats64 *stats)
1634 {
1635 const struct rtase_private *tp = netdev_priv(dev);
1636 const struct rtase_counters *counters;
1637
1638 counters = tp->tally_vaddr;
1639
1640 dev_fetch_sw_netstats(stats, dev->tstats);
1641
1642 /* fetch additional counter values missing in stats collected by driver
1643 * from tally counter
1644 */
1645 rtase_dump_tally_counter(tp);
1646 stats->rx_errors = tp->stats.rx_errors;
1647 stats->tx_errors = le64_to_cpu(counters->tx_errors);
1648 stats->rx_dropped = tp->stats.rx_dropped;
1649 stats->tx_dropped = tp->stats.tx_dropped;
1650 stats->multicast = tp->stats.multicast;
1651 stats->rx_length_errors = tp->stats.rx_length_errors;
1652 }
1653
rtase_fix_features(struct net_device * dev,netdev_features_t features)1654 static netdev_features_t rtase_fix_features(struct net_device *dev,
1655 netdev_features_t features)
1656 {
1657 netdev_features_t features_fix = features;
1658
1659 /* not support TSO for jumbo frames */
1660 if (dev->mtu > ETH_DATA_LEN)
1661 features_fix &= ~NETIF_F_ALL_TSO;
1662
1663 return features_fix;
1664 }
1665
rtase_set_features(struct net_device * dev,netdev_features_t features)1666 static int rtase_set_features(struct net_device *dev,
1667 netdev_features_t features)
1668 {
1669 netdev_features_t features_set = features;
1670
1671 features_set &= NETIF_F_RXALL | NETIF_F_RXCSUM |
1672 NETIF_F_HW_VLAN_CTAG_RX;
1673
1674 if (features_set ^ dev->features)
1675 rtase_hw_set_features(dev, features_set);
1676
1677 return 0;
1678 }
1679
1680 static const struct net_device_ops rtase_netdev_ops = {
1681 .ndo_open = rtase_open,
1682 .ndo_stop = rtase_close,
1683 .ndo_start_xmit = rtase_start_xmit,
1684 .ndo_set_rx_mode = rtase_set_rx_mode,
1685 .ndo_set_mac_address = rtase_set_mac_address,
1686 .ndo_change_mtu = rtase_change_mtu,
1687 .ndo_tx_timeout = rtase_tx_timeout,
1688 .ndo_get_stats64 = rtase_get_stats64,
1689 .ndo_fix_features = rtase_fix_features,
1690 .ndo_set_features = rtase_set_features,
1691 };
1692
rtase_get_mac_address(struct net_device * dev)1693 static void rtase_get_mac_address(struct net_device *dev)
1694 {
1695 struct rtase_private *tp = netdev_priv(dev);
1696 u8 mac_addr[ETH_ALEN] __aligned(2) = {};
1697 u32 i;
1698
1699 for (i = 0; i < ETH_ALEN; i++)
1700 mac_addr[i] = rtase_r8(tp, RTASE_MAC0 + i);
1701
1702 if (!is_valid_ether_addr(mac_addr)) {
1703 eth_hw_addr_random(dev);
1704 netdev_warn(dev, "Random ether addr %pM\n", dev->dev_addr);
1705 } else {
1706 eth_hw_addr_set(dev, mac_addr);
1707 ether_addr_copy(dev->perm_addr, dev->dev_addr);
1708 }
1709
1710 rtase_rar_set(tp, dev->dev_addr);
1711 }
1712
rtase_get_settings(struct net_device * dev,struct ethtool_link_ksettings * cmd)1713 static int rtase_get_settings(struct net_device *dev,
1714 struct ethtool_link_ksettings *cmd)
1715 {
1716 u32 supported = SUPPORTED_MII | SUPPORTED_Pause | SUPPORTED_Asym_Pause;
1717 const struct rtase_private *tp = netdev_priv(dev);
1718
1719 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
1720 supported);
1721
1722 switch (tp->hw_ver) {
1723 case RTASE_HW_VER_906X_7XA:
1724 case RTASE_HW_VER_906X_7XC:
1725 cmd->base.speed = SPEED_5000;
1726 break;
1727 case RTASE_HW_VER_907XD_V1:
1728 cmd->base.speed = SPEED_10000;
1729 break;
1730 }
1731
1732 cmd->base.duplex = DUPLEX_FULL;
1733 cmd->base.port = PORT_MII;
1734 cmd->base.autoneg = AUTONEG_DISABLE;
1735
1736 return 0;
1737 }
1738
rtase_get_pauseparam(struct net_device * dev,struct ethtool_pauseparam * pause)1739 static void rtase_get_pauseparam(struct net_device *dev,
1740 struct ethtool_pauseparam *pause)
1741 {
1742 const struct rtase_private *tp = netdev_priv(dev);
1743 u16 value = rtase_r16(tp, RTASE_CPLUS_CMD);
1744
1745 pause->autoneg = AUTONEG_DISABLE;
1746 pause->tx_pause = !!(value & RTASE_FORCE_TXFLOW_EN);
1747 pause->rx_pause = !!(value & RTASE_FORCE_RXFLOW_EN);
1748 }
1749
rtase_set_pauseparam(struct net_device * dev,struct ethtool_pauseparam * pause)1750 static int rtase_set_pauseparam(struct net_device *dev,
1751 struct ethtool_pauseparam *pause)
1752 {
1753 const struct rtase_private *tp = netdev_priv(dev);
1754 u16 value = rtase_r16(tp, RTASE_CPLUS_CMD);
1755
1756 if (pause->autoneg)
1757 return -EOPNOTSUPP;
1758
1759 value &= ~(RTASE_FORCE_TXFLOW_EN | RTASE_FORCE_RXFLOW_EN);
1760
1761 if (pause->tx_pause)
1762 value |= RTASE_FORCE_TXFLOW_EN;
1763
1764 if (pause->rx_pause)
1765 value |= RTASE_FORCE_RXFLOW_EN;
1766
1767 rtase_w16(tp, RTASE_CPLUS_CMD, value);
1768 return 0;
1769 }
1770
rtase_get_eth_mac_stats(struct net_device * dev,struct ethtool_eth_mac_stats * stats)1771 static void rtase_get_eth_mac_stats(struct net_device *dev,
1772 struct ethtool_eth_mac_stats *stats)
1773 {
1774 struct rtase_private *tp = netdev_priv(dev);
1775 const struct rtase_counters *counters;
1776
1777 counters = tp->tally_vaddr;
1778
1779 rtase_dump_tally_counter(tp);
1780
1781 stats->FramesTransmittedOK = le64_to_cpu(counters->tx_packets);
1782 stats->FramesReceivedOK = le64_to_cpu(counters->rx_packets);
1783 stats->FramesLostDueToIntMACXmitError =
1784 le64_to_cpu(counters->tx_errors);
1785 stats->BroadcastFramesReceivedOK = le64_to_cpu(counters->rx_broadcast);
1786 }
1787
1788 static const struct ethtool_ops rtase_ethtool_ops = {
1789 .get_link = ethtool_op_get_link,
1790 .get_link_ksettings = rtase_get_settings,
1791 .get_pauseparam = rtase_get_pauseparam,
1792 .set_pauseparam = rtase_set_pauseparam,
1793 .get_eth_mac_stats = rtase_get_eth_mac_stats,
1794 .get_ts_info = ethtool_op_get_ts_info,
1795 };
1796
rtase_init_netdev_ops(struct net_device * dev)1797 static void rtase_init_netdev_ops(struct net_device *dev)
1798 {
1799 dev->netdev_ops = &rtase_netdev_ops;
1800 dev->ethtool_ops = &rtase_ethtool_ops;
1801 }
1802
rtase_reset_interrupt(struct pci_dev * pdev,const struct rtase_private * tp)1803 static void rtase_reset_interrupt(struct pci_dev *pdev,
1804 const struct rtase_private *tp)
1805 {
1806 if (tp->sw_flag & RTASE_SWF_MSIX_ENABLED)
1807 pci_disable_msix(pdev);
1808 else
1809 pci_disable_msi(pdev);
1810 }
1811
rtase_alloc_msix(struct pci_dev * pdev,struct rtase_private * tp)1812 static int rtase_alloc_msix(struct pci_dev *pdev, struct rtase_private *tp)
1813 {
1814 int ret, irq;
1815 u16 i;
1816
1817 memset(tp->msix_entry, 0x0, RTASE_NUM_MSIX *
1818 sizeof(struct msix_entry));
1819
1820 for (i = 0; i < RTASE_NUM_MSIX; i++)
1821 tp->msix_entry[i].entry = i;
1822
1823 ret = pci_enable_msix_exact(pdev, tp->msix_entry, tp->int_nums);
1824
1825 if (ret)
1826 return ret;
1827
1828 for (i = 0; i < tp->int_nums; i++) {
1829 irq = pci_irq_vector(pdev, i);
1830 if (!irq) {
1831 pci_disable_msix(pdev);
1832 return irq;
1833 }
1834
1835 tp->int_vector[i].irq = irq;
1836 }
1837
1838 return 0;
1839 }
1840
rtase_alloc_interrupt(struct pci_dev * pdev,struct rtase_private * tp)1841 static int rtase_alloc_interrupt(struct pci_dev *pdev,
1842 struct rtase_private *tp)
1843 {
1844 int ret;
1845
1846 ret = rtase_alloc_msix(pdev, tp);
1847 if (ret) {
1848 ret = pci_enable_msi(pdev);
1849 if (ret) {
1850 dev_err(&pdev->dev,
1851 "unable to alloc interrupt.(MSI)\n");
1852 return ret;
1853 }
1854
1855 tp->sw_flag |= RTASE_SWF_MSI_ENABLED;
1856 } else {
1857 tp->sw_flag |= RTASE_SWF_MSIX_ENABLED;
1858 }
1859
1860 return 0;
1861 }
1862
rtase_init_hardware(const struct rtase_private * tp)1863 static void rtase_init_hardware(const struct rtase_private *tp)
1864 {
1865 u16 i;
1866
1867 for (i = 0; i < RTASE_VLAN_FILTER_ENTRY_NUM; i++)
1868 rtase_w32(tp, RTASE_VLAN_ENTRY_0 + i * 4, 0);
1869 }
1870
rtase_init_int_vector(struct rtase_private * tp)1871 static void rtase_init_int_vector(struct rtase_private *tp)
1872 {
1873 u16 i;
1874
1875 /* interrupt vector 0 */
1876 tp->int_vector[0].tp = tp;
1877 tp->int_vector[0].index = 0;
1878 tp->int_vector[0].imr_addr = RTASE_IMR0;
1879 tp->int_vector[0].isr_addr = RTASE_ISR0;
1880 tp->int_vector[0].imr = RTASE_ROK | RTASE_RDU | RTASE_TOK |
1881 RTASE_TOK4 | RTASE_TOK5 | RTASE_TOK6 |
1882 RTASE_TOK7;
1883 tp->int_vector[0].poll = rtase_poll;
1884
1885 memset(tp->int_vector[0].name, 0x0, sizeof(tp->int_vector[0].name));
1886 INIT_LIST_HEAD(&tp->int_vector[0].ring_list);
1887
1888 netif_napi_add(tp->dev, &tp->int_vector[0].napi,
1889 tp->int_vector[0].poll);
1890
1891 /* interrupt vector 1 ~ 3 */
1892 for (i = 1; i < tp->int_nums; i++) {
1893 tp->int_vector[i].tp = tp;
1894 tp->int_vector[i].index = i;
1895 tp->int_vector[i].imr_addr = RTASE_IMR1 + (i - 1) * 4;
1896 tp->int_vector[i].isr_addr = RTASE_ISR1 + (i - 1) * 4;
1897 tp->int_vector[i].imr = RTASE_Q_ROK | RTASE_Q_RDU |
1898 RTASE_Q_TOK;
1899 tp->int_vector[i].poll = rtase_poll;
1900
1901 memset(tp->int_vector[i].name, 0x0,
1902 sizeof(tp->int_vector[0].name));
1903 INIT_LIST_HEAD(&tp->int_vector[i].ring_list);
1904
1905 netif_napi_add(tp->dev, &tp->int_vector[i].napi,
1906 tp->int_vector[i].poll);
1907 }
1908 }
1909
rtase_calc_time_mitigation(u32 time_us)1910 static u16 rtase_calc_time_mitigation(u32 time_us)
1911 {
1912 u8 msb, time_count, time_unit;
1913 u16 int_miti;
1914
1915 time_us = min_t(int, time_us, RTASE_MITI_MAX_TIME);
1916
1917 msb = fls(time_us);
1918 if (msb >= RTASE_MITI_COUNT_BIT_NUM) {
1919 time_unit = msb - RTASE_MITI_COUNT_BIT_NUM;
1920 time_count = time_us >> (msb - RTASE_MITI_COUNT_BIT_NUM);
1921 } else {
1922 time_unit = 0;
1923 time_count = time_us;
1924 }
1925
1926 int_miti = u16_encode_bits(time_count, RTASE_MITI_TIME_COUNT_MASK) |
1927 u16_encode_bits(time_unit, RTASE_MITI_TIME_UNIT_MASK);
1928
1929 return int_miti;
1930 }
1931
rtase_calc_packet_num_mitigation(u16 pkt_num)1932 static u16 rtase_calc_packet_num_mitigation(u16 pkt_num)
1933 {
1934 u8 msb, pkt_num_count, pkt_num_unit;
1935 u16 int_miti;
1936
1937 pkt_num = min_t(int, pkt_num, RTASE_MITI_MAX_PKT_NUM);
1938
1939 if (pkt_num > 60) {
1940 pkt_num_unit = RTASE_MITI_MAX_PKT_NUM_IDX;
1941 pkt_num_count = pkt_num / RTASE_MITI_MAX_PKT_NUM_UNIT;
1942 } else {
1943 msb = fls(pkt_num);
1944 if (msb >= RTASE_MITI_COUNT_BIT_NUM) {
1945 pkt_num_unit = msb - RTASE_MITI_COUNT_BIT_NUM;
1946 pkt_num_count = pkt_num >> (msb -
1947 RTASE_MITI_COUNT_BIT_NUM);
1948 } else {
1949 pkt_num_unit = 0;
1950 pkt_num_count = pkt_num;
1951 }
1952 }
1953
1954 int_miti = u16_encode_bits(pkt_num_count,
1955 RTASE_MITI_PKT_NUM_COUNT_MASK) |
1956 u16_encode_bits(pkt_num_unit,
1957 RTASE_MITI_PKT_NUM_UNIT_MASK);
1958
1959 return int_miti;
1960 }
1961
rtase_init_software_variable(struct pci_dev * pdev,struct rtase_private * tp)1962 static void rtase_init_software_variable(struct pci_dev *pdev,
1963 struct rtase_private *tp)
1964 {
1965 u16 int_miti;
1966
1967 tp->tx_queue_ctrl = RTASE_TXQ_CTRL;
1968 tp->func_tx_queue_num = RTASE_FUNC_TXQ_NUM;
1969 tp->func_rx_queue_num = RTASE_FUNC_RXQ_NUM;
1970 tp->int_nums = RTASE_INTERRUPT_NUM;
1971
1972 int_miti = rtase_calc_time_mitigation(RTASE_MITI_DEFAULT_TIME) |
1973 rtase_calc_packet_num_mitigation(RTASE_MITI_DEFAULT_PKT_NUM);
1974 tp->tx_int_mit = int_miti;
1975 tp->rx_int_mit = int_miti;
1976
1977 tp->sw_flag = 0;
1978
1979 rtase_init_int_vector(tp);
1980
1981 /* MTU range: 60 - hw-specific max */
1982 tp->dev->min_mtu = ETH_ZLEN;
1983 tp->dev->max_mtu = RTASE_MAX_JUMBO_SIZE;
1984 }
1985
rtase_check_mac_version_valid(struct rtase_private * tp)1986 static int rtase_check_mac_version_valid(struct rtase_private *tp)
1987 {
1988 int ret = -ENODEV;
1989
1990 tp->hw_ver = rtase_r32(tp, RTASE_TX_CONFIG_0) & RTASE_HW_VER_MASK;
1991
1992 switch (tp->hw_ver) {
1993 case RTASE_HW_VER_906X_7XA:
1994 case RTASE_HW_VER_906X_7XC:
1995 case RTASE_HW_VER_907XD_V1:
1996 ret = 0;
1997 break;
1998 }
1999
2000 return ret;
2001 }
2002
rtase_init_board(struct pci_dev * pdev,struct net_device ** dev_out,void __iomem ** ioaddr_out)2003 static int rtase_init_board(struct pci_dev *pdev, struct net_device **dev_out,
2004 void __iomem **ioaddr_out)
2005 {
2006 struct net_device *dev;
2007 void __iomem *ioaddr;
2008 int ret = -ENOMEM;
2009
2010 /* dev zeroed in alloc_etherdev */
2011 dev = alloc_etherdev_mq(sizeof(struct rtase_private),
2012 RTASE_FUNC_TXQ_NUM);
2013 if (!dev)
2014 goto err_out;
2015
2016 SET_NETDEV_DEV(dev, &pdev->dev);
2017
2018 ret = pci_enable_device(pdev);
2019 if (ret < 0)
2020 goto err_out_free_dev;
2021
2022 /* make sure PCI base addr 1 is MMIO */
2023 if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
2024 ret = -ENODEV;
2025 goto err_out_disable;
2026 }
2027
2028 /* check for weird/broken PCI region reporting */
2029 if (pci_resource_len(pdev, 2) < RTASE_REGS_SIZE) {
2030 ret = -ENODEV;
2031 goto err_out_disable;
2032 }
2033
2034 ret = pci_request_regions(pdev, KBUILD_MODNAME);
2035 if (ret < 0)
2036 goto err_out_disable;
2037
2038 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
2039 if (ret) {
2040 dev_err(&pdev->dev, "no usable dma addressing method\n");
2041 goto err_out_free_res;
2042 }
2043
2044 pci_set_master(pdev);
2045
2046 /* ioremap MMIO region */
2047 ioaddr = ioremap(pci_resource_start(pdev, 2),
2048 pci_resource_len(pdev, 2));
2049 if (!ioaddr) {
2050 ret = -EIO;
2051 goto err_out_free_res;
2052 }
2053
2054 *ioaddr_out = ioaddr;
2055 *dev_out = dev;
2056
2057 return ret;
2058
2059 err_out_free_res:
2060 pci_release_regions(pdev);
2061
2062 err_out_disable:
2063 pci_disable_device(pdev);
2064
2065 err_out_free_dev:
2066 free_netdev(dev);
2067
2068 err_out:
2069 *ioaddr_out = NULL;
2070 *dev_out = NULL;
2071
2072 return ret;
2073 }
2074
rtase_release_board(struct pci_dev * pdev,struct net_device * dev,void __iomem * ioaddr)2075 static void rtase_release_board(struct pci_dev *pdev, struct net_device *dev,
2076 void __iomem *ioaddr)
2077 {
2078 const struct rtase_private *tp = netdev_priv(dev);
2079
2080 rtase_rar_set(tp, tp->dev->perm_addr);
2081 iounmap(ioaddr);
2082
2083 if (tp->sw_flag & RTASE_SWF_MSIX_ENABLED)
2084 pci_disable_msix(pdev);
2085 else
2086 pci_disable_msi(pdev);
2087
2088 pci_release_regions(pdev);
2089 pci_disable_device(pdev);
2090 free_netdev(dev);
2091 }
2092
rtase_init_one(struct pci_dev * pdev,const struct pci_device_id * ent)2093 static int rtase_init_one(struct pci_dev *pdev,
2094 const struct pci_device_id *ent)
2095 {
2096 struct net_device *dev = NULL;
2097 struct rtase_int_vector *ivec;
2098 void __iomem *ioaddr = NULL;
2099 struct rtase_private *tp;
2100 int ret, i;
2101
2102 if (!pdev->is_physfn && pdev->is_virtfn) {
2103 dev_err(&pdev->dev,
2104 "This module does not support a virtual function.");
2105 return -EINVAL;
2106 }
2107
2108 dev_dbg(&pdev->dev, "Automotive Switch Ethernet driver loaded\n");
2109
2110 ret = rtase_init_board(pdev, &dev, &ioaddr);
2111 if (ret != 0)
2112 return ret;
2113
2114 tp = netdev_priv(dev);
2115 tp->mmio_addr = ioaddr;
2116 tp->dev = dev;
2117 tp->pdev = pdev;
2118
2119 /* identify chip attached to board */
2120 ret = rtase_check_mac_version_valid(tp);
2121 if (ret != 0) {
2122 dev_err(&pdev->dev,
2123 "unknown chip version: 0x%08x, contact rtase maintainers (see MAINTAINERS file)\n",
2124 tp->hw_ver);
2125 goto err_out_release_board;
2126 }
2127
2128 rtase_init_software_variable(pdev, tp);
2129 rtase_init_hardware(tp);
2130
2131 ret = rtase_alloc_interrupt(pdev, tp);
2132 if (ret < 0) {
2133 dev_err(&pdev->dev, "unable to alloc MSIX/MSI\n");
2134 goto err_out_del_napi;
2135 }
2136
2137 rtase_init_netdev_ops(dev);
2138
2139 dev->pcpu_stat_type = NETDEV_PCPU_STAT_TSTATS;
2140
2141 dev->features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX |
2142 NETIF_F_IP_CSUM | NETIF_F_HIGHDMA |
2143 NETIF_F_RXCSUM | NETIF_F_SG |
2144 NETIF_F_TSO | NETIF_F_IPV6_CSUM |
2145 NETIF_F_TSO6;
2146
2147 dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM |
2148 NETIF_F_TSO | NETIF_F_RXCSUM |
2149 NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX |
2150 NETIF_F_RXALL | NETIF_F_RXFCS |
2151 NETIF_F_IPV6_CSUM | NETIF_F_TSO6;
2152
2153 dev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO |
2154 NETIF_F_HIGHDMA;
2155 dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
2156 netif_set_tso_max_size(dev, RTASE_LSO_64K);
2157 netif_set_tso_max_segs(dev, RTASE_NIC_MAX_PHYS_BUF_COUNT_LSO2);
2158
2159 rtase_get_mac_address(dev);
2160
2161 tp->tally_vaddr = dma_alloc_coherent(&pdev->dev,
2162 sizeof(*tp->tally_vaddr),
2163 &tp->tally_paddr,
2164 GFP_KERNEL);
2165 if (!tp->tally_vaddr) {
2166 ret = -ENOMEM;
2167 goto err_out_free_dma;
2168 }
2169
2170 rtase_tally_counter_clear(tp);
2171
2172 pci_set_drvdata(pdev, dev);
2173
2174 netif_carrier_off(dev);
2175
2176 ret = register_netdev(dev);
2177 if (ret != 0)
2178 goto err_out_free_dma;
2179
2180 netdev_dbg(dev, "%pM, IRQ %d\n", dev->dev_addr, dev->irq);
2181
2182 return 0;
2183
2184 err_out_free_dma:
2185 if (tp->tally_vaddr) {
2186 dma_free_coherent(&pdev->dev,
2187 sizeof(*tp->tally_vaddr),
2188 tp->tally_vaddr,
2189 tp->tally_paddr);
2190
2191 tp->tally_vaddr = NULL;
2192 }
2193
2194 err_out_del_napi:
2195 for (i = 0; i < tp->int_nums; i++) {
2196 ivec = &tp->int_vector[i];
2197 netif_napi_del(&ivec->napi);
2198 }
2199
2200 err_out_release_board:
2201 rtase_release_board(pdev, dev, ioaddr);
2202
2203 return ret;
2204 }
2205
rtase_remove_one(struct pci_dev * pdev)2206 static void rtase_remove_one(struct pci_dev *pdev)
2207 {
2208 struct net_device *dev = pci_get_drvdata(pdev);
2209 struct rtase_private *tp = netdev_priv(dev);
2210 struct rtase_int_vector *ivec;
2211 u32 i;
2212
2213 unregister_netdev(dev);
2214
2215 for (i = 0; i < tp->int_nums; i++) {
2216 ivec = &tp->int_vector[i];
2217 netif_napi_del(&ivec->napi);
2218 }
2219
2220 rtase_reset_interrupt(pdev, tp);
2221 if (tp->tally_vaddr) {
2222 dma_free_coherent(&pdev->dev,
2223 sizeof(*tp->tally_vaddr),
2224 tp->tally_vaddr,
2225 tp->tally_paddr);
2226 tp->tally_vaddr = NULL;
2227 }
2228
2229 rtase_release_board(pdev, dev, tp->mmio_addr);
2230 pci_set_drvdata(pdev, NULL);
2231 }
2232
rtase_shutdown(struct pci_dev * pdev)2233 static void rtase_shutdown(struct pci_dev *pdev)
2234 {
2235 struct net_device *dev = pci_get_drvdata(pdev);
2236 const struct rtase_private *tp;
2237
2238 tp = netdev_priv(dev);
2239
2240 if (netif_running(dev))
2241 rtase_close(dev);
2242
2243 rtase_reset_interrupt(pdev, tp);
2244 }
2245
rtase_suspend(struct device * device)2246 static int rtase_suspend(struct device *device)
2247 {
2248 struct net_device *dev = dev_get_drvdata(device);
2249
2250 if (netif_running(dev)) {
2251 netif_device_detach(dev);
2252 rtase_hw_reset(dev);
2253 }
2254
2255 return 0;
2256 }
2257
rtase_resume(struct device * device)2258 static int rtase_resume(struct device *device)
2259 {
2260 struct net_device *dev = dev_get_drvdata(device);
2261 struct rtase_private *tp = netdev_priv(dev);
2262 int ret;
2263
2264 /* restore last modified mac address */
2265 rtase_rar_set(tp, dev->dev_addr);
2266
2267 if (!netif_running(dev))
2268 goto out;
2269
2270 rtase_wait_for_quiescence(dev);
2271
2272 rtase_tx_clear(tp);
2273 rtase_rx_clear(tp);
2274
2275 ret = rtase_init_ring(dev);
2276 if (ret) {
2277 netdev_err(dev, "unable to init ring\n");
2278 rtase_free_desc(tp);
2279 return -ENOMEM;
2280 }
2281
2282 rtase_hw_config(dev);
2283 /* always link, so start to transmit & receive */
2284 rtase_hw_start(dev);
2285
2286 netif_device_attach(dev);
2287 out:
2288
2289 return 0;
2290 }
2291
2292 static const struct dev_pm_ops rtase_pm_ops = {
2293 SYSTEM_SLEEP_PM_OPS(rtase_suspend, rtase_resume)
2294 };
2295
2296 static struct pci_driver rtase_pci_driver = {
2297 .name = KBUILD_MODNAME,
2298 .id_table = rtase_pci_tbl,
2299 .probe = rtase_init_one,
2300 .remove = rtase_remove_one,
2301 .shutdown = rtase_shutdown,
2302 .driver.pm = pm_ptr(&rtase_pm_ops),
2303 };
2304
2305 module_pci_driver(rtase_pci_driver);
2306