1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2 /*
3 * rtase is the Linux device driver released for Realtek Automotive Switch
4 * controllers with PCI-Express interface.
5 *
6 * Copyright(c) 2024 Realtek Semiconductor Corp.
7 *
8 * Below is a simplified block diagram of the chip and its relevant interfaces.
9 *
10 * *************************
11 * * *
12 * * CPU network device *
13 * * *
14 * * +-------------+ *
15 * * | PCIE Host | *
16 * ***********++************
17 * ||
18 * PCIE
19 * ||
20 * ********************++**********************
21 * * | PCIE Endpoint | *
22 * * +---------------+ *
23 * * | GMAC | *
24 * * +--++--+ Realtek *
25 * * || RTL90xx Series *
26 * * || *
27 * * +-------------++----------------+ *
28 * * | | MAC | | *
29 * * | +-----+ | *
30 * * | | *
31 * * | Ethernet Switch Core | *
32 * * | | *
33 * * | +-----+ +-----+ | *
34 * * | | MAC |...........| MAC | | *
35 * * +---+-----+-----------+-----+---+ *
36 * * | PHY |...........| PHY | *
37 * * +--++-+ +--++-+ *
38 * *************||****************||***********
39 *
40 * The block of the Realtek RTL90xx series is our entire chip architecture,
41 * the GMAC is connected to the switch core, and there is no PHY in between.
42 * In addition, this driver is mainly used to control GMAC, but does not
43 * control the switch core, so it is not the same as DSA. Linux only plays
44 * the role of a normal leaf node in this model.
45 */
46
47 #include <linux/crc32.h>
48 #include <linux/dma-mapping.h>
49 #include <linux/etherdevice.h>
50 #include <linux/if_vlan.h>
51 #include <linux/in.h>
52 #include <linux/init.h>
53 #include <linux/interrupt.h>
54 #include <linux/io.h>
55 #include <linux/iopoll.h>
56 #include <linux/ip.h>
57 #include <linux/ipv6.h>
58 #include <linux/mdio.h>
59 #include <linux/module.h>
60 #include <linux/netdevice.h>
61 #include <linux/pci.h>
62 #include <linux/pm_runtime.h>
63 #include <linux/prefetch.h>
64 #include <linux/rtnetlink.h>
65 #include <linux/tcp.h>
66 #include <asm/irq.h>
67 #include <net/ip6_checksum.h>
68 #include <net/netdev_queues.h>
69 #include <net/page_pool/helpers.h>
70 #include <net/pkt_cls.h>
71
72 #include "rtase.h"
73
74 #define RTK_OPTS1_DEBUG_VALUE 0x0BADBEEF
75 #define RTK_MAGIC_NUMBER 0x0BADBADBADBADBAD
76
77 static const struct pci_device_id rtase_pci_tbl[] = {
78 {PCI_VDEVICE(REALTEK, 0x906A)},
79 {}
80 };
81
82 MODULE_DEVICE_TABLE(pci, rtase_pci_tbl);
83
84 MODULE_AUTHOR("Realtek ARD Software Team");
85 MODULE_DESCRIPTION("Network Driver for the PCIe interface of Realtek Automotive Ethernet Switch");
86 MODULE_LICENSE("Dual BSD/GPL");
87
88 struct rtase_counters {
89 __le64 tx_packets;
90 __le64 rx_packets;
91 __le64 tx_errors;
92 __le32 rx_errors;
93 __le16 rx_missed;
94 __le16 align_errors;
95 __le32 tx_one_collision;
96 __le32 tx_multi_collision;
97 __le64 rx_unicast;
98 __le64 rx_broadcast;
99 __le32 rx_multicast;
100 __le16 tx_aborted;
101 __le16 tx_underrun;
102 } __packed;
103
rtase_w8(const struct rtase_private * tp,u16 reg,u8 val8)104 static void rtase_w8(const struct rtase_private *tp, u16 reg, u8 val8)
105 {
106 writeb(val8, tp->mmio_addr + reg);
107 }
108
rtase_w16(const struct rtase_private * tp,u16 reg,u16 val16)109 static void rtase_w16(const struct rtase_private *tp, u16 reg, u16 val16)
110 {
111 writew(val16, tp->mmio_addr + reg);
112 }
113
rtase_w32(const struct rtase_private * tp,u16 reg,u32 val32)114 static void rtase_w32(const struct rtase_private *tp, u16 reg, u32 val32)
115 {
116 writel(val32, tp->mmio_addr + reg);
117 }
118
rtase_r8(const struct rtase_private * tp,u16 reg)119 static u8 rtase_r8(const struct rtase_private *tp, u16 reg)
120 {
121 return readb(tp->mmio_addr + reg);
122 }
123
rtase_r16(const struct rtase_private * tp,u16 reg)124 static u16 rtase_r16(const struct rtase_private *tp, u16 reg)
125 {
126 return readw(tp->mmio_addr + reg);
127 }
128
rtase_r32(const struct rtase_private * tp,u16 reg)129 static u32 rtase_r32(const struct rtase_private *tp, u16 reg)
130 {
131 return readl(tp->mmio_addr + reg);
132 }
133
rtase_free_desc(struct rtase_private * tp)134 static void rtase_free_desc(struct rtase_private *tp)
135 {
136 struct pci_dev *pdev = tp->pdev;
137 u32 i;
138
139 for (i = 0; i < tp->func_tx_queue_num; i++) {
140 if (!tp->tx_ring[i].desc)
141 continue;
142
143 dma_free_coherent(&pdev->dev, RTASE_TX_RING_DESC_SIZE,
144 tp->tx_ring[i].desc,
145 tp->tx_ring[i].phy_addr);
146 tp->tx_ring[i].desc = NULL;
147 }
148
149 for (i = 0; i < tp->func_rx_queue_num; i++) {
150 if (!tp->rx_ring[i].desc)
151 continue;
152
153 dma_free_coherent(&pdev->dev, RTASE_RX_RING_DESC_SIZE,
154 tp->rx_ring[i].desc,
155 tp->rx_ring[i].phy_addr);
156 tp->rx_ring[i].desc = NULL;
157 }
158 }
159
rtase_alloc_desc(struct rtase_private * tp)160 static int rtase_alloc_desc(struct rtase_private *tp)
161 {
162 struct pci_dev *pdev = tp->pdev;
163 u32 i;
164
165 /* rx and tx descriptors needs 256 bytes alignment.
166 * dma_alloc_coherent provides more.
167 */
168 for (i = 0; i < tp->func_tx_queue_num; i++) {
169 tp->tx_ring[i].desc =
170 dma_alloc_coherent(&pdev->dev,
171 RTASE_TX_RING_DESC_SIZE,
172 &tp->tx_ring[i].phy_addr,
173 GFP_KERNEL);
174 if (!tp->tx_ring[i].desc)
175 goto err_out;
176 }
177
178 for (i = 0; i < tp->func_rx_queue_num; i++) {
179 tp->rx_ring[i].desc =
180 dma_alloc_coherent(&pdev->dev,
181 RTASE_RX_RING_DESC_SIZE,
182 &tp->rx_ring[i].phy_addr,
183 GFP_KERNEL);
184 if (!tp->rx_ring[i].desc)
185 goto err_out;
186 }
187
188 return 0;
189
190 err_out:
191 rtase_free_desc(tp);
192 return -ENOMEM;
193 }
194
rtase_unmap_tx_skb(struct pci_dev * pdev,u32 len,struct rtase_tx_desc * desc)195 static void rtase_unmap_tx_skb(struct pci_dev *pdev, u32 len,
196 struct rtase_tx_desc *desc)
197 {
198 dma_unmap_single(&pdev->dev, le64_to_cpu(desc->addr), len,
199 DMA_TO_DEVICE);
200 desc->opts1 = cpu_to_le32(RTK_OPTS1_DEBUG_VALUE);
201 desc->opts2 = 0x00;
202 desc->addr = cpu_to_le64(RTK_MAGIC_NUMBER);
203 }
204
rtase_tx_clear_range(struct rtase_ring * ring,u32 start,u32 n)205 static void rtase_tx_clear_range(struct rtase_ring *ring, u32 start, u32 n)
206 {
207 struct rtase_tx_desc *desc_base = ring->desc;
208 struct rtase_private *tp = ring->ivec->tp;
209 u32 i;
210
211 for (i = 0; i < n; i++) {
212 u32 entry = (start + i) % RTASE_NUM_DESC;
213 struct rtase_tx_desc *desc = desc_base + entry;
214 u32 len = ring->mis.len[entry];
215 struct sk_buff *skb;
216
217 if (len == 0)
218 continue;
219
220 rtase_unmap_tx_skb(tp->pdev, len, desc);
221 ring->mis.len[entry] = 0;
222 skb = ring->skbuff[entry];
223 if (!skb)
224 continue;
225
226 tp->stats.tx_dropped++;
227 dev_kfree_skb_any(skb);
228 ring->skbuff[entry] = NULL;
229 }
230 }
231
rtase_tx_clear(struct rtase_private * tp)232 static void rtase_tx_clear(struct rtase_private *tp)
233 {
234 struct rtase_ring *ring;
235 u16 i;
236
237 for (i = 0; i < tp->func_tx_queue_num; i++) {
238 ring = &tp->tx_ring[i];
239 rtase_tx_clear_range(ring, ring->dirty_idx, RTASE_NUM_DESC);
240 ring->cur_idx = 0;
241 ring->dirty_idx = 0;
242 }
243 }
244
rtase_mark_to_asic(union rtase_rx_desc * desc,u32 rx_buf_sz)245 static void rtase_mark_to_asic(union rtase_rx_desc *desc, u32 rx_buf_sz)
246 {
247 u32 eor = le32_to_cpu(desc->desc_cmd.opts1) & RTASE_RING_END;
248
249 desc->desc_status.opts2 = 0;
250 /* force memory writes to complete before releasing descriptor */
251 dma_wmb();
252 WRITE_ONCE(desc->desc_cmd.opts1,
253 cpu_to_le32(RTASE_DESC_OWN | eor | rx_buf_sz));
254 }
255
rtase_tx_avail(struct rtase_ring * ring)256 static u32 rtase_tx_avail(struct rtase_ring *ring)
257 {
258 return READ_ONCE(ring->dirty_idx) + RTASE_NUM_DESC -
259 READ_ONCE(ring->cur_idx);
260 }
261
tx_handler(struct rtase_ring * ring,int budget)262 static int tx_handler(struct rtase_ring *ring, int budget)
263 {
264 const struct rtase_private *tp = ring->ivec->tp;
265 struct net_device *dev = tp->dev;
266 u32 dirty_tx, tx_left;
267 u32 bytes_compl = 0;
268 u32 pkts_compl = 0;
269 int workdone = 0;
270
271 dirty_tx = ring->dirty_idx;
272 tx_left = READ_ONCE(ring->cur_idx) - dirty_tx;
273
274 while (tx_left > 0) {
275 u32 entry = dirty_tx % RTASE_NUM_DESC;
276 struct rtase_tx_desc *desc = ring->desc +
277 sizeof(struct rtase_tx_desc) * entry;
278 u32 status;
279
280 status = le32_to_cpu(desc->opts1);
281
282 if (status & RTASE_DESC_OWN)
283 break;
284
285 rtase_unmap_tx_skb(tp->pdev, ring->mis.len[entry], desc);
286 ring->mis.len[entry] = 0;
287 if (ring->skbuff[entry]) {
288 pkts_compl++;
289 bytes_compl += ring->skbuff[entry]->len;
290 napi_consume_skb(ring->skbuff[entry], budget);
291 ring->skbuff[entry] = NULL;
292 }
293
294 dirty_tx++;
295 tx_left--;
296 workdone++;
297
298 if (workdone == RTASE_TX_BUDGET_DEFAULT)
299 break;
300 }
301
302 if (ring->dirty_idx != dirty_tx) {
303 dev_sw_netstats_tx_add(dev, pkts_compl, bytes_compl);
304 WRITE_ONCE(ring->dirty_idx, dirty_tx);
305
306 netif_subqueue_completed_wake(dev, ring->index, pkts_compl,
307 bytes_compl,
308 rtase_tx_avail(ring),
309 RTASE_TX_START_THRS);
310
311 if (ring->cur_idx != dirty_tx)
312 rtase_w8(tp, RTASE_TPPOLL, BIT(ring->index));
313 }
314
315 return 0;
316 }
317
rtase_tx_desc_init(struct rtase_private * tp,u16 idx)318 static void rtase_tx_desc_init(struct rtase_private *tp, u16 idx)
319 {
320 struct rtase_ring *ring = &tp->tx_ring[idx];
321 struct rtase_tx_desc *desc;
322 u32 i;
323
324 memset(ring->desc, 0x0, RTASE_TX_RING_DESC_SIZE);
325 memset(ring->skbuff, 0x0, sizeof(ring->skbuff));
326 ring->cur_idx = 0;
327 ring->dirty_idx = 0;
328 ring->index = idx;
329 ring->type = NETDEV_QUEUE_TYPE_TX;
330 ring->alloc_fail = 0;
331
332 for (i = 0; i < RTASE_NUM_DESC; i++) {
333 ring->mis.len[i] = 0;
334 if ((RTASE_NUM_DESC - 1) == i) {
335 desc = ring->desc + sizeof(struct rtase_tx_desc) * i;
336 desc->opts1 = cpu_to_le32(RTASE_RING_END);
337 }
338 }
339
340 ring->ring_handler = tx_handler;
341 if (idx < 4) {
342 ring->ivec = &tp->int_vector[idx];
343 list_add_tail(&ring->ring_entry,
344 &tp->int_vector[idx].ring_list);
345 } else {
346 ring->ivec = &tp->int_vector[0];
347 list_add_tail(&ring->ring_entry, &tp->int_vector[0].ring_list);
348 }
349
350 netif_queue_set_napi(tp->dev, ring->index,
351 ring->type, &ring->ivec->napi);
352 }
353
rtase_map_to_asic(union rtase_rx_desc * desc,dma_addr_t mapping,u32 rx_buf_sz)354 static void rtase_map_to_asic(union rtase_rx_desc *desc, dma_addr_t mapping,
355 u32 rx_buf_sz)
356 {
357 desc->desc_cmd.addr = cpu_to_le64(mapping);
358
359 rtase_mark_to_asic(desc, rx_buf_sz);
360 }
361
rtase_make_unusable_by_asic(union rtase_rx_desc * desc)362 static void rtase_make_unusable_by_asic(union rtase_rx_desc *desc)
363 {
364 desc->desc_cmd.addr = cpu_to_le64(RTK_MAGIC_NUMBER);
365 desc->desc_cmd.opts1 &= ~cpu_to_le32(RTASE_DESC_OWN | RSVD_MASK);
366 }
367
rtase_alloc_rx_data_buf(struct rtase_ring * ring,void ** p_data_buf,union rtase_rx_desc * desc,dma_addr_t * rx_phy_addr)368 static int rtase_alloc_rx_data_buf(struct rtase_ring *ring,
369 void **p_data_buf,
370 union rtase_rx_desc *desc,
371 dma_addr_t *rx_phy_addr)
372 {
373 struct rtase_int_vector *ivec = ring->ivec;
374 const struct rtase_private *tp = ivec->tp;
375 dma_addr_t mapping;
376 struct page *page;
377
378 page = page_pool_dev_alloc_pages(tp->page_pool);
379 if (!page) {
380 ring->alloc_fail++;
381 goto err_out;
382 }
383
384 *p_data_buf = page_address(page);
385 mapping = page_pool_get_dma_addr(page);
386 *rx_phy_addr = mapping;
387 rtase_map_to_asic(desc, mapping, tp->rx_buf_sz);
388
389 return 0;
390
391 err_out:
392 rtase_make_unusable_by_asic(desc);
393
394 return -ENOMEM;
395 }
396
rtase_rx_ring_fill(struct rtase_ring * ring,u32 ring_start,u32 ring_end)397 static u32 rtase_rx_ring_fill(struct rtase_ring *ring, u32 ring_start,
398 u32 ring_end)
399 {
400 union rtase_rx_desc *desc_base = ring->desc;
401 u32 cur;
402
403 for (cur = ring_start; ring_end - cur > 0; cur++) {
404 u32 i = cur % RTASE_NUM_DESC;
405 union rtase_rx_desc *desc = desc_base + i;
406 int ret;
407
408 if (ring->data_buf[i])
409 continue;
410
411 ret = rtase_alloc_rx_data_buf(ring, &ring->data_buf[i], desc,
412 &ring->mis.data_phy_addr[i]);
413 if (ret)
414 break;
415 }
416
417 return cur - ring_start;
418 }
419
rtase_mark_as_last_descriptor(union rtase_rx_desc * desc)420 static void rtase_mark_as_last_descriptor(union rtase_rx_desc *desc)
421 {
422 desc->desc_cmd.opts1 |= cpu_to_le32(RTASE_RING_END);
423 }
424
rtase_rx_ring_clear(struct page_pool * page_pool,struct rtase_ring * ring)425 static void rtase_rx_ring_clear(struct page_pool *page_pool,
426 struct rtase_ring *ring)
427 {
428 union rtase_rx_desc *desc;
429 struct page *page;
430 u32 i;
431
432 for (i = 0; i < RTASE_NUM_DESC; i++) {
433 desc = ring->desc + sizeof(union rtase_rx_desc) * i;
434 page = virt_to_head_page(ring->data_buf[i]);
435
436 if (ring->data_buf[i])
437 page_pool_put_full_page(page_pool, page, true);
438
439 rtase_make_unusable_by_asic(desc);
440 }
441 }
442
rtase_fragmented_frame(u32 status)443 static int rtase_fragmented_frame(u32 status)
444 {
445 return (status & (RTASE_RX_FIRST_FRAG | RTASE_RX_LAST_FRAG)) !=
446 (RTASE_RX_FIRST_FRAG | RTASE_RX_LAST_FRAG);
447 }
448
rtase_rx_csum(const struct rtase_private * tp,struct sk_buff * skb,const union rtase_rx_desc * desc)449 static void rtase_rx_csum(const struct rtase_private *tp, struct sk_buff *skb,
450 const union rtase_rx_desc *desc)
451 {
452 u32 opts2 = le32_to_cpu(desc->desc_status.opts2);
453
454 /* rx csum offload */
455 if (((opts2 & RTASE_RX_V4F) && !(opts2 & RTASE_RX_IPF)) ||
456 (opts2 & RTASE_RX_V6F)) {
457 if (((opts2 & RTASE_RX_TCPT) && !(opts2 & RTASE_RX_TCPF)) ||
458 ((opts2 & RTASE_RX_UDPT) && !(opts2 & RTASE_RX_UDPF)))
459 skb->ip_summed = CHECKSUM_UNNECESSARY;
460 else
461 skb->ip_summed = CHECKSUM_NONE;
462 } else {
463 skb->ip_summed = CHECKSUM_NONE;
464 }
465 }
466
rtase_rx_vlan_skb(union rtase_rx_desc * desc,struct sk_buff * skb)467 static void rtase_rx_vlan_skb(union rtase_rx_desc *desc, struct sk_buff *skb)
468 {
469 u32 opts2 = le32_to_cpu(desc->desc_status.opts2);
470
471 if (!(opts2 & RTASE_RX_VLAN_TAG))
472 return;
473
474 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
475 swab16(opts2 & RTASE_VLAN_TAG_MASK));
476 }
477
rtase_rx_skb(const struct rtase_ring * ring,struct sk_buff * skb)478 static void rtase_rx_skb(const struct rtase_ring *ring, struct sk_buff *skb)
479 {
480 struct rtase_int_vector *ivec = ring->ivec;
481
482 napi_gro_receive(&ivec->napi, skb);
483 }
484
rx_handler(struct rtase_ring * ring,int budget)485 static int rx_handler(struct rtase_ring *ring, int budget)
486 {
487 union rtase_rx_desc *desc_base = ring->desc;
488 u32 pkt_size, cur_rx, delta, entry, status;
489 struct rtase_private *tp = ring->ivec->tp;
490 struct net_device *dev = tp->dev;
491 union rtase_rx_desc *desc;
492 struct sk_buff *skb;
493 int workdone = 0;
494
495 cur_rx = ring->cur_idx;
496 entry = cur_rx % RTASE_NUM_DESC;
497 desc = &desc_base[entry];
498
499 while (workdone < budget) {
500 status = le32_to_cpu(desc->desc_status.opts1);
501
502 if (status & RTASE_DESC_OWN)
503 break;
504
505 /* This barrier is needed to keep us from reading
506 * any other fields out of the rx descriptor until
507 * we know the status of RTASE_DESC_OWN
508 */
509 dma_rmb();
510
511 if (unlikely(status & RTASE_RX_RES)) {
512 if (net_ratelimit())
513 netdev_warn(dev, "Rx ERROR. status = %08x\n",
514 status);
515
516 tp->stats.rx_errors++;
517
518 if (status & (RTASE_RX_RWT | RTASE_RX_RUNT))
519 tp->stats.rx_length_errors++;
520
521 if (status & RTASE_RX_CRC)
522 tp->stats.rx_crc_errors++;
523
524 if (dev->features & NETIF_F_RXALL)
525 goto process_pkt;
526
527 rtase_mark_to_asic(desc, tp->rx_buf_sz);
528 goto skip_process_pkt;
529 }
530
531 process_pkt:
532 pkt_size = status & RTASE_RX_PKT_SIZE_MASK;
533 if (likely(!(dev->features & NETIF_F_RXFCS)))
534 pkt_size -= ETH_FCS_LEN;
535
536 /* The driver does not support incoming fragmented frames.
537 * They are seen as a symptom of over-mtu sized frames.
538 */
539 if (unlikely(rtase_fragmented_frame(status))) {
540 tp->stats.rx_dropped++;
541 tp->stats.rx_length_errors++;
542 rtase_mark_to_asic(desc, tp->rx_buf_sz);
543 goto skip_process_pkt;
544 }
545
546 dma_sync_single_for_cpu(&tp->pdev->dev,
547 ring->mis.data_phy_addr[entry],
548 tp->rx_buf_sz, DMA_FROM_DEVICE);
549
550 skb = build_skb(ring->data_buf[entry], PAGE_SIZE);
551 if (!skb) {
552 tp->stats.rx_dropped++;
553 rtase_mark_to_asic(desc, tp->rx_buf_sz);
554 goto skip_process_pkt;
555 }
556 ring->data_buf[entry] = NULL;
557
558 if (dev->features & NETIF_F_RXCSUM)
559 rtase_rx_csum(tp, skb, desc);
560
561 skb_put(skb, pkt_size);
562 skb_mark_for_recycle(skb);
563 skb->protocol = eth_type_trans(skb, dev);
564
565 if (skb->pkt_type == PACKET_MULTICAST)
566 tp->stats.multicast++;
567
568 rtase_rx_vlan_skb(desc, skb);
569 rtase_rx_skb(ring, skb);
570
571 dev_sw_netstats_rx_add(dev, pkt_size);
572
573 skip_process_pkt:
574 workdone++;
575 cur_rx++;
576 entry = cur_rx % RTASE_NUM_DESC;
577 desc = ring->desc + sizeof(union rtase_rx_desc) * entry;
578 }
579
580 ring->cur_idx = cur_rx;
581 delta = rtase_rx_ring_fill(ring, ring->dirty_idx, ring->cur_idx);
582 ring->dirty_idx += delta;
583
584 return workdone;
585 }
586
rtase_rx_desc_init(struct rtase_private * tp,u16 idx)587 static void rtase_rx_desc_init(struct rtase_private *tp, u16 idx)
588 {
589 struct rtase_ring *ring = &tp->rx_ring[idx];
590 u16 i;
591
592 memset(ring->desc, 0x0, RTASE_RX_RING_DESC_SIZE);
593 memset(ring->data_buf, 0x0, sizeof(ring->data_buf));
594 ring->cur_idx = 0;
595 ring->dirty_idx = 0;
596 ring->index = idx;
597 ring->type = NETDEV_QUEUE_TYPE_RX;
598 ring->alloc_fail = 0;
599
600 for (i = 0; i < RTASE_NUM_DESC; i++)
601 ring->mis.data_phy_addr[i] = 0;
602
603 ring->ring_handler = rx_handler;
604 ring->ivec = &tp->int_vector[idx];
605 netif_queue_set_napi(tp->dev, ring->index,
606 ring->type, &ring->ivec->napi);
607 list_add_tail(&ring->ring_entry, &tp->int_vector[idx].ring_list);
608 }
609
rtase_rx_clear(struct rtase_private * tp)610 static void rtase_rx_clear(struct rtase_private *tp)
611 {
612 u32 i;
613
614 for (i = 0; i < tp->func_rx_queue_num; i++)
615 rtase_rx_ring_clear(tp->page_pool, &tp->rx_ring[i]);
616
617 page_pool_destroy(tp->page_pool);
618 tp->page_pool = NULL;
619 }
620
rtase_init_ring(const struct net_device * dev)621 static int rtase_init_ring(const struct net_device *dev)
622 {
623 struct rtase_private *tp = netdev_priv(dev);
624 struct page_pool_params pp_params = { 0 };
625 struct page_pool *page_pool;
626 u32 num;
627 u16 i;
628
629 pp_params.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV;
630 pp_params.order = 0;
631 pp_params.pool_size = RTASE_NUM_DESC * tp->func_rx_queue_num;
632 pp_params.nid = dev_to_node(&tp->pdev->dev);
633 pp_params.dev = &tp->pdev->dev;
634 pp_params.dma_dir = DMA_FROM_DEVICE;
635 pp_params.max_len = PAGE_SIZE;
636 pp_params.offset = 0;
637
638 page_pool = page_pool_create(&pp_params);
639 if (IS_ERR(page_pool)) {
640 netdev_err(tp->dev, "failed to create page pool\n");
641 return -ENOMEM;
642 }
643
644 tp->page_pool = page_pool;
645
646 for (i = 0; i < tp->func_tx_queue_num; i++)
647 rtase_tx_desc_init(tp, i);
648
649 for (i = 0; i < tp->func_rx_queue_num; i++) {
650 rtase_rx_desc_init(tp, i);
651
652 num = rtase_rx_ring_fill(&tp->rx_ring[i], 0, RTASE_NUM_DESC);
653 if (num != RTASE_NUM_DESC)
654 goto err_out;
655
656 rtase_mark_as_last_descriptor(tp->rx_ring[i].desc +
657 sizeof(union rtase_rx_desc) *
658 (RTASE_NUM_DESC - 1));
659 }
660
661 return 0;
662
663 err_out:
664 rtase_rx_clear(tp);
665 return -ENOMEM;
666 }
667
rtase_interrupt_mitigation(const struct rtase_private * tp)668 static void rtase_interrupt_mitigation(const struct rtase_private *tp)
669 {
670 u32 i;
671
672 for (i = 0; i < tp->func_tx_queue_num; i++)
673 rtase_w16(tp, RTASE_INT_MITI_TX + i * 2, tp->tx_int_mit);
674
675 for (i = 0; i < tp->func_rx_queue_num; i++)
676 rtase_w16(tp, RTASE_INT_MITI_RX + i * 2, tp->rx_int_mit);
677 }
678
rtase_tally_counter_addr_fill(const struct rtase_private * tp)679 static void rtase_tally_counter_addr_fill(const struct rtase_private *tp)
680 {
681 rtase_w32(tp, RTASE_DTCCR4, upper_32_bits(tp->tally_paddr));
682 rtase_w32(tp, RTASE_DTCCR0, lower_32_bits(tp->tally_paddr));
683 }
684
rtase_tally_counter_clear(const struct rtase_private * tp)685 static void rtase_tally_counter_clear(const struct rtase_private *tp)
686 {
687 u32 cmd = lower_32_bits(tp->tally_paddr);
688
689 rtase_w32(tp, RTASE_DTCCR4, upper_32_bits(tp->tally_paddr));
690 rtase_w32(tp, RTASE_DTCCR0, cmd | RTASE_COUNTER_RESET);
691 }
692
rtase_desc_addr_fill(const struct rtase_private * tp)693 static void rtase_desc_addr_fill(const struct rtase_private *tp)
694 {
695 const struct rtase_ring *ring;
696 u16 i, cmd, val;
697 int err;
698
699 for (i = 0; i < tp->func_tx_queue_num; i++) {
700 ring = &tp->tx_ring[i];
701
702 rtase_w32(tp, RTASE_TX_DESC_ADDR0,
703 lower_32_bits(ring->phy_addr));
704 rtase_w32(tp, RTASE_TX_DESC_ADDR4,
705 upper_32_bits(ring->phy_addr));
706
707 cmd = i | RTASE_TX_DESC_CMD_WE | RTASE_TX_DESC_CMD_CS;
708 rtase_w16(tp, RTASE_TX_DESC_COMMAND, cmd);
709
710 err = read_poll_timeout(rtase_r16, val,
711 !(val & RTASE_TX_DESC_CMD_CS), 10,
712 1000, false, tp,
713 RTASE_TX_DESC_COMMAND);
714
715 if (err == -ETIMEDOUT)
716 netdev_err(tp->dev,
717 "error occurred in fill tx descriptor\n");
718 }
719
720 for (i = 0; i < tp->func_rx_queue_num; i++) {
721 ring = &tp->rx_ring[i];
722
723 if (i == 0) {
724 rtase_w32(tp, RTASE_Q0_RX_DESC_ADDR0,
725 lower_32_bits(ring->phy_addr));
726 rtase_w32(tp, RTASE_Q0_RX_DESC_ADDR4,
727 upper_32_bits(ring->phy_addr));
728 } else {
729 rtase_w32(tp, (RTASE_Q1_RX_DESC_ADDR0 + ((i - 1) * 8)),
730 lower_32_bits(ring->phy_addr));
731 rtase_w32(tp, (RTASE_Q1_RX_DESC_ADDR4 + ((i - 1) * 8)),
732 upper_32_bits(ring->phy_addr));
733 }
734 }
735 }
736
rtase_hw_set_features(const struct net_device * dev,netdev_features_t features)737 static void rtase_hw_set_features(const struct net_device *dev,
738 netdev_features_t features)
739 {
740 const struct rtase_private *tp = netdev_priv(dev);
741 u16 rx_config, val;
742
743 rx_config = rtase_r16(tp, RTASE_RX_CONFIG_0);
744 if (features & NETIF_F_RXALL)
745 rx_config |= (RTASE_ACCEPT_ERR | RTASE_ACCEPT_RUNT);
746 else
747 rx_config &= ~(RTASE_ACCEPT_ERR | RTASE_ACCEPT_RUNT);
748
749 rtase_w16(tp, RTASE_RX_CONFIG_0, rx_config);
750
751 val = rtase_r16(tp, RTASE_CPLUS_CMD);
752 if (features & NETIF_F_RXCSUM)
753 rtase_w16(tp, RTASE_CPLUS_CMD, val | RTASE_RX_CHKSUM);
754 else
755 rtase_w16(tp, RTASE_CPLUS_CMD, val & ~RTASE_RX_CHKSUM);
756
757 rx_config = rtase_r16(tp, RTASE_RX_CONFIG_1);
758 if (dev->features & NETIF_F_HW_VLAN_CTAG_RX)
759 rx_config |= (RTASE_INNER_VLAN_DETAG_EN |
760 RTASE_OUTER_VLAN_DETAG_EN);
761 else
762 rx_config &= ~(RTASE_INNER_VLAN_DETAG_EN |
763 RTASE_OUTER_VLAN_DETAG_EN);
764
765 rtase_w16(tp, RTASE_RX_CONFIG_1, rx_config);
766 }
767
rtase_hw_set_rx_packet_filter(struct net_device * dev)768 static void rtase_hw_set_rx_packet_filter(struct net_device *dev)
769 {
770 u32 mc_filter[2] = { 0xFFFFFFFF, 0xFFFFFFFF };
771 struct rtase_private *tp = netdev_priv(dev);
772 u16 rx_mode;
773
774 rx_mode = rtase_r16(tp, RTASE_RX_CONFIG_0) & ~RTASE_ACCEPT_MASK;
775 rx_mode |= RTASE_ACCEPT_BROADCAST | RTASE_ACCEPT_MYPHYS;
776
777 if (dev->flags & IFF_PROMISC) {
778 rx_mode |= RTASE_ACCEPT_MULTICAST | RTASE_ACCEPT_ALLPHYS;
779 } else if (dev->flags & IFF_ALLMULTI) {
780 rx_mode |= RTASE_ACCEPT_MULTICAST;
781 } else {
782 struct netdev_hw_addr *hw_addr;
783
784 mc_filter[0] = 0;
785 mc_filter[1] = 0;
786
787 netdev_for_each_mc_addr(hw_addr, dev) {
788 u32 bit_nr = eth_hw_addr_crc(hw_addr);
789 u32 idx = u32_get_bits(bit_nr, BIT(31));
790 u32 bit = u32_get_bits(bit_nr,
791 RTASE_MULTICAST_FILTER_MASK);
792
793 mc_filter[idx] |= BIT(bit);
794 rx_mode |= RTASE_ACCEPT_MULTICAST;
795 }
796 }
797
798 if (dev->features & NETIF_F_RXALL)
799 rx_mode |= RTASE_ACCEPT_ERR | RTASE_ACCEPT_RUNT;
800
801 rtase_w32(tp, RTASE_MAR0, swab32(mc_filter[1]));
802 rtase_w32(tp, RTASE_MAR1, swab32(mc_filter[0]));
803 rtase_w16(tp, RTASE_RX_CONFIG_0, rx_mode);
804 }
805
rtase_irq_dis_and_clear(const struct rtase_private * tp)806 static void rtase_irq_dis_and_clear(const struct rtase_private *tp)
807 {
808 const struct rtase_int_vector *ivec = &tp->int_vector[0];
809 u32 val1;
810 u16 val2;
811 u8 i;
812
813 rtase_w32(tp, ivec->imr_addr, 0);
814 val1 = rtase_r32(tp, ivec->isr_addr);
815 rtase_w32(tp, ivec->isr_addr, val1);
816
817 for (i = 1; i < tp->int_nums; i++) {
818 ivec = &tp->int_vector[i];
819 rtase_w16(tp, ivec->imr_addr, 0);
820 val2 = rtase_r16(tp, ivec->isr_addr);
821 rtase_w16(tp, ivec->isr_addr, val2);
822 }
823 }
824
rtase_poll_timeout(const struct rtase_private * tp,u32 cond,u32 sleep_us,u64 timeout_us,u16 reg)825 static void rtase_poll_timeout(const struct rtase_private *tp, u32 cond,
826 u32 sleep_us, u64 timeout_us, u16 reg)
827 {
828 int err;
829 u8 val;
830
831 err = read_poll_timeout(rtase_r8, val, val & cond, sleep_us,
832 timeout_us, false, tp, reg);
833
834 if (err == -ETIMEDOUT)
835 netdev_err(tp->dev, "poll reg 0x00%x timeout\n", reg);
836 }
837
rtase_nic_reset(const struct net_device * dev)838 static void rtase_nic_reset(const struct net_device *dev)
839 {
840 const struct rtase_private *tp = netdev_priv(dev);
841 u16 rx_config;
842 u8 val;
843
844 rx_config = rtase_r16(tp, RTASE_RX_CONFIG_0);
845 rtase_w16(tp, RTASE_RX_CONFIG_0, rx_config & ~RTASE_ACCEPT_MASK);
846
847 val = rtase_r8(tp, RTASE_MISC);
848 rtase_w8(tp, RTASE_MISC, val | RTASE_RX_DV_GATE_EN);
849
850 val = rtase_r8(tp, RTASE_CHIP_CMD);
851 rtase_w8(tp, RTASE_CHIP_CMD, val | RTASE_STOP_REQ);
852 mdelay(2);
853
854 rtase_poll_timeout(tp, RTASE_STOP_REQ_DONE, 100, 150000,
855 RTASE_CHIP_CMD);
856
857 rtase_poll_timeout(tp, RTASE_TX_FIFO_EMPTY, 100, 100000,
858 RTASE_FIFOR);
859
860 rtase_poll_timeout(tp, RTASE_RX_FIFO_EMPTY, 100, 100000,
861 RTASE_FIFOR);
862
863 val = rtase_r8(tp, RTASE_CHIP_CMD);
864 rtase_w8(tp, RTASE_CHIP_CMD, val & ~(RTASE_TE | RTASE_RE));
865 val = rtase_r8(tp, RTASE_CHIP_CMD);
866 rtase_w8(tp, RTASE_CHIP_CMD, val & ~RTASE_STOP_REQ);
867
868 rtase_w16(tp, RTASE_RX_CONFIG_0, rx_config);
869 }
870
rtase_hw_reset(const struct net_device * dev)871 static void rtase_hw_reset(const struct net_device *dev)
872 {
873 const struct rtase_private *tp = netdev_priv(dev);
874
875 rtase_irq_dis_and_clear(tp);
876
877 rtase_nic_reset(dev);
878 }
879
rtase_set_rx_queue(const struct rtase_private * tp)880 static void rtase_set_rx_queue(const struct rtase_private *tp)
881 {
882 u16 reg_data;
883
884 reg_data = rtase_r16(tp, RTASE_FCR);
885 switch (tp->func_rx_queue_num) {
886 case 1:
887 u16p_replace_bits(®_data, 0x1, RTASE_FCR_RXQ_MASK);
888 break;
889 case 2:
890 u16p_replace_bits(®_data, 0x2, RTASE_FCR_RXQ_MASK);
891 break;
892 case 4:
893 u16p_replace_bits(®_data, 0x3, RTASE_FCR_RXQ_MASK);
894 break;
895 }
896 rtase_w16(tp, RTASE_FCR, reg_data);
897 }
898
rtase_set_tx_queue(const struct rtase_private * tp)899 static void rtase_set_tx_queue(const struct rtase_private *tp)
900 {
901 u16 reg_data;
902
903 reg_data = rtase_r16(tp, RTASE_TX_CONFIG_1);
904 switch (tp->tx_queue_ctrl) {
905 case 1:
906 u16p_replace_bits(®_data, 0x0, RTASE_TC_MODE_MASK);
907 break;
908 case 2:
909 u16p_replace_bits(®_data, 0x1, RTASE_TC_MODE_MASK);
910 break;
911 case 3:
912 case 4:
913 u16p_replace_bits(®_data, 0x2, RTASE_TC_MODE_MASK);
914 break;
915 default:
916 u16p_replace_bits(®_data, 0x3, RTASE_TC_MODE_MASK);
917 break;
918 }
919 rtase_w16(tp, RTASE_TX_CONFIG_1, reg_data);
920 }
921
rtase_hw_config(struct net_device * dev)922 static void rtase_hw_config(struct net_device *dev)
923 {
924 const struct rtase_private *tp = netdev_priv(dev);
925 u32 reg_data32;
926 u16 reg_data16;
927
928 rtase_hw_reset(dev);
929
930 /* set rx dma burst */
931 reg_data16 = rtase_r16(tp, RTASE_RX_CONFIG_0);
932 reg_data16 &= ~(RTASE_RX_SINGLE_TAG | RTASE_RX_SINGLE_FETCH);
933 u16p_replace_bits(®_data16, RTASE_RX_DMA_BURST_256,
934 RTASE_RX_MX_DMA_MASK);
935 rtase_w16(tp, RTASE_RX_CONFIG_0, reg_data16);
936
937 /* new rx descritpor */
938 reg_data16 = rtase_r16(tp, RTASE_RX_CONFIG_1);
939 reg_data16 |= RTASE_RX_NEW_DESC_FORMAT_EN | RTASE_PCIE_NEW_FLOW;
940 u16p_replace_bits(®_data16, 0xF, RTASE_RX_MAX_FETCH_DESC_MASK);
941 rtase_w16(tp, RTASE_RX_CONFIG_1, reg_data16);
942
943 rtase_set_rx_queue(tp);
944
945 rtase_interrupt_mitigation(tp);
946
947 /* set tx dma burst size and interframe gap time */
948 reg_data32 = rtase_r32(tp, RTASE_TX_CONFIG_0);
949 u32p_replace_bits(®_data32, RTASE_TX_DMA_BURST_UNLIMITED,
950 RTASE_TX_DMA_MASK);
951 u32p_replace_bits(®_data32, RTASE_INTERFRAMEGAP,
952 RTASE_TX_INTER_FRAME_GAP_MASK);
953 rtase_w32(tp, RTASE_TX_CONFIG_0, reg_data32);
954
955 /* new tx descriptor */
956 reg_data16 = rtase_r16(tp, RTASE_TFUN_CTRL);
957 rtase_w16(tp, RTASE_TFUN_CTRL, reg_data16 |
958 RTASE_TX_NEW_DESC_FORMAT_EN);
959
960 /* tx fetch desc number */
961 rtase_w8(tp, RTASE_TDFNR, 0x10);
962
963 /* tag num select */
964 reg_data16 = rtase_r16(tp, RTASE_MTPS);
965 u16p_replace_bits(®_data16, 0x4, RTASE_TAG_NUM_SEL_MASK);
966 rtase_w16(tp, RTASE_MTPS, reg_data16);
967
968 rtase_set_tx_queue(tp);
969
970 rtase_w16(tp, RTASE_TOKSEL, 0x5555);
971
972 rtase_tally_counter_addr_fill(tp);
973 rtase_desc_addr_fill(tp);
974 rtase_hw_set_features(dev, dev->features);
975
976 /* enable flow control */
977 reg_data16 = rtase_r16(tp, RTASE_CPLUS_CMD);
978 reg_data16 |= (RTASE_FORCE_TXFLOW_EN | RTASE_FORCE_RXFLOW_EN);
979 rtase_w16(tp, RTASE_CPLUS_CMD, reg_data16);
980 /* set near fifo threshold - rx missed issue. */
981 rtase_w16(tp, RTASE_RFIFONFULL, 0x190);
982
983 rtase_w16(tp, RTASE_RMS, tp->rx_buf_sz);
984
985 rtase_hw_set_rx_packet_filter(dev);
986 }
987
rtase_nic_enable(const struct net_device * dev)988 static void rtase_nic_enable(const struct net_device *dev)
989 {
990 const struct rtase_private *tp = netdev_priv(dev);
991 u16 rcr = rtase_r16(tp, RTASE_RX_CONFIG_1);
992 u8 val;
993
994 rtase_w16(tp, RTASE_RX_CONFIG_1, rcr & ~RTASE_PCIE_RELOAD_EN);
995 rtase_w16(tp, RTASE_RX_CONFIG_1, rcr | RTASE_PCIE_RELOAD_EN);
996
997 val = rtase_r8(tp, RTASE_CHIP_CMD);
998 rtase_w8(tp, RTASE_CHIP_CMD, val | RTASE_TE | RTASE_RE);
999
1000 val = rtase_r8(tp, RTASE_MISC);
1001 rtase_w8(tp, RTASE_MISC, val & ~RTASE_RX_DV_GATE_EN);
1002 }
1003
rtase_enable_hw_interrupt(const struct rtase_private * tp)1004 static void rtase_enable_hw_interrupt(const struct rtase_private *tp)
1005 {
1006 const struct rtase_int_vector *ivec = &tp->int_vector[0];
1007 u32 i;
1008
1009 rtase_w32(tp, ivec->imr_addr, ivec->imr);
1010
1011 for (i = 1; i < tp->int_nums; i++) {
1012 ivec = &tp->int_vector[i];
1013 rtase_w16(tp, ivec->imr_addr, ivec->imr);
1014 }
1015 }
1016
rtase_hw_start(const struct net_device * dev)1017 static void rtase_hw_start(const struct net_device *dev)
1018 {
1019 const struct rtase_private *tp = netdev_priv(dev);
1020
1021 rtase_nic_enable(dev);
1022 rtase_enable_hw_interrupt(tp);
1023 }
1024
1025 /* the interrupt handler does RXQ0 and TXQ0, TXQ4~7 interrutp status
1026 */
rtase_interrupt(int irq,void * dev_instance)1027 static irqreturn_t rtase_interrupt(int irq, void *dev_instance)
1028 {
1029 const struct rtase_private *tp;
1030 struct rtase_int_vector *ivec;
1031 u32 status;
1032
1033 ivec = dev_instance;
1034 tp = ivec->tp;
1035 status = rtase_r32(tp, ivec->isr_addr);
1036
1037 rtase_w32(tp, ivec->imr_addr, 0x0);
1038 rtase_w32(tp, ivec->isr_addr, status & ~RTASE_FOVW);
1039
1040 if (napi_schedule_prep(&ivec->napi))
1041 __napi_schedule(&ivec->napi);
1042
1043 return IRQ_HANDLED;
1044 }
1045
1046 /* the interrupt handler does RXQ1&TXQ1 or RXQ2&TXQ2 or RXQ3&TXQ3 interrupt
1047 * status according to interrupt vector
1048 */
rtase_q_interrupt(int irq,void * dev_instance)1049 static irqreturn_t rtase_q_interrupt(int irq, void *dev_instance)
1050 {
1051 const struct rtase_private *tp;
1052 struct rtase_int_vector *ivec;
1053 u16 status;
1054
1055 ivec = dev_instance;
1056 tp = ivec->tp;
1057 status = rtase_r16(tp, ivec->isr_addr);
1058
1059 rtase_w16(tp, ivec->imr_addr, 0x0);
1060 rtase_w16(tp, ivec->isr_addr, status);
1061
1062 if (napi_schedule_prep(&ivec->napi))
1063 __napi_schedule(&ivec->napi);
1064
1065 return IRQ_HANDLED;
1066 }
1067
rtase_poll(struct napi_struct * napi,int budget)1068 static int rtase_poll(struct napi_struct *napi, int budget)
1069 {
1070 const struct rtase_int_vector *ivec;
1071 const struct rtase_private *tp;
1072 struct rtase_ring *ring;
1073 int total_workdone = 0;
1074
1075 ivec = container_of(napi, struct rtase_int_vector, napi);
1076 tp = ivec->tp;
1077
1078 list_for_each_entry(ring, &ivec->ring_list, ring_entry)
1079 total_workdone += ring->ring_handler(ring, budget);
1080
1081 if (total_workdone >= budget)
1082 return budget;
1083
1084 if (napi_complete_done(napi, total_workdone)) {
1085 if (!ivec->index)
1086 rtase_w32(tp, ivec->imr_addr, ivec->imr);
1087 else
1088 rtase_w16(tp, ivec->imr_addr, ivec->imr);
1089 }
1090
1091 return total_workdone;
1092 }
1093
rtase_open(struct net_device * dev)1094 static int rtase_open(struct net_device *dev)
1095 {
1096 struct rtase_private *tp = netdev_priv(dev);
1097 const struct pci_dev *pdev = tp->pdev;
1098 struct rtase_int_vector *ivec;
1099 u16 i = 0, j;
1100 int ret;
1101
1102 ivec = &tp->int_vector[0];
1103 tp->rx_buf_sz = RTASE_RX_BUF_SIZE;
1104
1105 ret = rtase_alloc_desc(tp);
1106 if (ret)
1107 return ret;
1108
1109 ret = rtase_init_ring(dev);
1110 if (ret)
1111 goto err_free_all_allocated_mem;
1112
1113 rtase_hw_config(dev);
1114
1115 if (tp->sw_flag & RTASE_SWF_MSIX_ENABLED) {
1116 ret = request_irq(ivec->irq, rtase_interrupt, 0,
1117 dev->name, ivec);
1118 if (ret)
1119 goto err_free_all_allocated_irq;
1120
1121 /* request other interrupts to handle multiqueue */
1122 for (i = 1; i < tp->int_nums; i++) {
1123 ivec = &tp->int_vector[i];
1124 snprintf(ivec->name, sizeof(ivec->name), "%s_int%u",
1125 tp->dev->name, i);
1126 ret = request_irq(ivec->irq, rtase_q_interrupt, 0,
1127 ivec->name, ivec);
1128 if (ret)
1129 goto err_free_all_allocated_irq;
1130 }
1131 } else {
1132 ret = request_irq(pdev->irq, rtase_interrupt, 0, dev->name,
1133 ivec);
1134 if (ret)
1135 goto err_free_all_allocated_mem;
1136 }
1137
1138 rtase_hw_start(dev);
1139
1140 for (i = 0; i < tp->int_nums; i++) {
1141 ivec = &tp->int_vector[i];
1142 napi_enable(&ivec->napi);
1143 }
1144
1145 netif_carrier_on(dev);
1146 netif_wake_queue(dev);
1147
1148 return 0;
1149
1150 err_free_all_allocated_irq:
1151 for (j = 0; j < i; j++)
1152 free_irq(tp->int_vector[j].irq, &tp->int_vector[j]);
1153
1154 err_free_all_allocated_mem:
1155 rtase_free_desc(tp);
1156
1157 return ret;
1158 }
1159
rtase_down(struct net_device * dev)1160 static void rtase_down(struct net_device *dev)
1161 {
1162 struct rtase_private *tp = netdev_priv(dev);
1163 struct rtase_int_vector *ivec;
1164 struct rtase_ring *ring, *tmp;
1165 u32 i;
1166
1167 for (i = 0; i < tp->int_nums; i++) {
1168 ivec = &tp->int_vector[i];
1169 napi_disable(&ivec->napi);
1170 list_for_each_entry_safe(ring, tmp, &ivec->ring_list,
1171 ring_entry) {
1172 netif_queue_set_napi(tp->dev, ring->index,
1173 ring->type, NULL);
1174
1175 list_del(&ring->ring_entry);
1176 }
1177 }
1178
1179 netif_tx_disable(dev);
1180
1181 netif_carrier_off(dev);
1182
1183 rtase_hw_reset(dev);
1184
1185 rtase_tx_clear(tp);
1186
1187 rtase_rx_clear(tp);
1188 }
1189
rtase_close(struct net_device * dev)1190 static int rtase_close(struct net_device *dev)
1191 {
1192 struct rtase_private *tp = netdev_priv(dev);
1193 const struct pci_dev *pdev = tp->pdev;
1194 u32 i;
1195
1196 rtase_down(dev);
1197
1198 if (tp->sw_flag & RTASE_SWF_MSIX_ENABLED) {
1199 for (i = 0; i < tp->int_nums; i++)
1200 free_irq(tp->int_vector[i].irq, &tp->int_vector[i]);
1201
1202 } else {
1203 free_irq(pdev->irq, &tp->int_vector[0]);
1204 }
1205
1206 rtase_free_desc(tp);
1207
1208 return 0;
1209 }
1210
rtase_tx_vlan_tag(const struct rtase_private * tp,const struct sk_buff * skb)1211 static u32 rtase_tx_vlan_tag(const struct rtase_private *tp,
1212 const struct sk_buff *skb)
1213 {
1214 return (skb_vlan_tag_present(skb)) ?
1215 (RTASE_TX_VLAN_TAG | swab16(skb_vlan_tag_get(skb))) : 0x00;
1216 }
1217
rtase_tx_csum(struct sk_buff * skb,const struct net_device * dev)1218 static u32 rtase_tx_csum(struct sk_buff *skb, const struct net_device *dev)
1219 {
1220 u32 csum_cmd = 0;
1221 u8 ip_protocol;
1222
1223 switch (vlan_get_protocol(skb)) {
1224 case htons(ETH_P_IP):
1225 csum_cmd = RTASE_TX_IPCS_C;
1226 ip_protocol = ip_hdr(skb)->protocol;
1227 break;
1228
1229 case htons(ETH_P_IPV6):
1230 csum_cmd = RTASE_TX_IPV6F_C;
1231 ip_protocol = ipv6_hdr(skb)->nexthdr;
1232 break;
1233
1234 default:
1235 ip_protocol = IPPROTO_RAW;
1236 break;
1237 }
1238
1239 if (ip_protocol == IPPROTO_TCP)
1240 csum_cmd |= RTASE_TX_TCPCS_C;
1241 else if (ip_protocol == IPPROTO_UDP)
1242 csum_cmd |= RTASE_TX_UDPCS_C;
1243
1244 csum_cmd |= u32_encode_bits(skb_transport_offset(skb),
1245 RTASE_TCPHO_MASK);
1246
1247 return csum_cmd;
1248 }
1249
rtase_xmit_frags(struct rtase_ring * ring,struct sk_buff * skb,u32 opts1,u32 opts2)1250 static int rtase_xmit_frags(struct rtase_ring *ring, struct sk_buff *skb,
1251 u32 opts1, u32 opts2)
1252 {
1253 const struct skb_shared_info *info = skb_shinfo(skb);
1254 const struct rtase_private *tp = ring->ivec->tp;
1255 const u8 nr_frags = info->nr_frags;
1256 struct rtase_tx_desc *txd = NULL;
1257 u32 cur_frag, entry;
1258
1259 entry = ring->cur_idx;
1260 for (cur_frag = 0; cur_frag < nr_frags; cur_frag++) {
1261 const skb_frag_t *frag = &info->frags[cur_frag];
1262 dma_addr_t mapping;
1263 u32 status, len;
1264 void *addr;
1265
1266 entry = (entry + 1) % RTASE_NUM_DESC;
1267
1268 txd = ring->desc + sizeof(struct rtase_tx_desc) * entry;
1269 len = skb_frag_size(frag);
1270 addr = skb_frag_address(frag);
1271 mapping = dma_map_single(&tp->pdev->dev, addr, len,
1272 DMA_TO_DEVICE);
1273
1274 if (unlikely(dma_mapping_error(&tp->pdev->dev, mapping))) {
1275 if (unlikely(net_ratelimit()))
1276 netdev_err(tp->dev,
1277 "Failed to map TX fragments DMA!\n");
1278
1279 goto err_out;
1280 }
1281
1282 if (((entry + 1) % RTASE_NUM_DESC) == 0)
1283 status = (opts1 | len | RTASE_RING_END);
1284 else
1285 status = opts1 | len;
1286
1287 if (cur_frag == (nr_frags - 1)) {
1288 ring->skbuff[entry] = skb;
1289 status |= RTASE_TX_LAST_FRAG;
1290 }
1291
1292 ring->mis.len[entry] = len;
1293 txd->addr = cpu_to_le64(mapping);
1294 txd->opts2 = cpu_to_le32(opts2);
1295
1296 /* make sure the operating fields have been updated */
1297 dma_wmb();
1298 txd->opts1 = cpu_to_le32(status);
1299 }
1300
1301 return cur_frag;
1302
1303 err_out:
1304 rtase_tx_clear_range(ring, ring->cur_idx + 1, cur_frag);
1305 return -EIO;
1306 }
1307
rtase_start_xmit(struct sk_buff * skb,struct net_device * dev)1308 static netdev_tx_t rtase_start_xmit(struct sk_buff *skb,
1309 struct net_device *dev)
1310 {
1311 struct skb_shared_info *shinfo = skb_shinfo(skb);
1312 struct rtase_private *tp = netdev_priv(dev);
1313 u32 q_idx, entry, len, opts1, opts2;
1314 struct netdev_queue *tx_queue;
1315 bool stop_queue, door_bell;
1316 u32 mss = shinfo->gso_size;
1317 struct rtase_tx_desc *txd;
1318 struct rtase_ring *ring;
1319 dma_addr_t mapping;
1320 int frags;
1321
1322 /* multiqueues */
1323 q_idx = skb_get_queue_mapping(skb);
1324 ring = &tp->tx_ring[q_idx];
1325 tx_queue = netdev_get_tx_queue(dev, q_idx);
1326
1327 if (unlikely(!rtase_tx_avail(ring))) {
1328 if (net_ratelimit())
1329 netdev_err(dev,
1330 "BUG! Tx Ring full when queue awake!\n");
1331
1332 netif_stop_queue(dev);
1333 return NETDEV_TX_BUSY;
1334 }
1335
1336 entry = ring->cur_idx % RTASE_NUM_DESC;
1337 txd = ring->desc + sizeof(struct rtase_tx_desc) * entry;
1338
1339 opts1 = RTASE_DESC_OWN;
1340 opts2 = rtase_tx_vlan_tag(tp, skb);
1341
1342 /* tcp segmentation offload (or tcp large send) */
1343 if (mss) {
1344 if (shinfo->gso_type & SKB_GSO_TCPV4) {
1345 opts1 |= RTASE_GIANT_SEND_V4;
1346 } else if (shinfo->gso_type & SKB_GSO_TCPV6) {
1347 if (skb_cow_head(skb, 0))
1348 goto err_dma_0;
1349
1350 tcp_v6_gso_csum_prep(skb);
1351 opts1 |= RTASE_GIANT_SEND_V6;
1352 } else {
1353 WARN_ON_ONCE(1);
1354 }
1355
1356 opts1 |= u32_encode_bits(skb_transport_offset(skb),
1357 RTASE_TCPHO_MASK);
1358 opts2 |= u32_encode_bits(mss, RTASE_MSS_MASK);
1359 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
1360 opts2 |= rtase_tx_csum(skb, dev);
1361 }
1362
1363 frags = rtase_xmit_frags(ring, skb, opts1, opts2);
1364 if (unlikely(frags < 0))
1365 goto err_dma_0;
1366
1367 if (frags) {
1368 len = skb_headlen(skb);
1369 opts1 |= RTASE_TX_FIRST_FRAG;
1370 } else {
1371 len = skb->len;
1372 ring->skbuff[entry] = skb;
1373 opts1 |= RTASE_TX_FIRST_FRAG | RTASE_TX_LAST_FRAG;
1374 }
1375
1376 if (((entry + 1) % RTASE_NUM_DESC) == 0)
1377 opts1 |= (len | RTASE_RING_END);
1378 else
1379 opts1 |= len;
1380
1381 mapping = dma_map_single(&tp->pdev->dev, skb->data, len,
1382 DMA_TO_DEVICE);
1383
1384 if (unlikely(dma_mapping_error(&tp->pdev->dev, mapping))) {
1385 if (unlikely(net_ratelimit()))
1386 netdev_err(dev, "Failed to map TX DMA!\n");
1387
1388 goto err_dma_1;
1389 }
1390
1391 ring->mis.len[entry] = len;
1392 txd->addr = cpu_to_le64(mapping);
1393 txd->opts2 = cpu_to_le32(opts2);
1394 txd->opts1 = cpu_to_le32(opts1 & ~RTASE_DESC_OWN);
1395
1396 /* make sure the operating fields have been updated */
1397 dma_wmb();
1398
1399 door_bell = __netdev_tx_sent_queue(tx_queue, skb->len,
1400 netdev_xmit_more());
1401
1402 txd->opts1 = cpu_to_le32(opts1);
1403
1404 skb_tx_timestamp(skb);
1405
1406 /* tx needs to see descriptor changes before updated cur_idx */
1407 smp_wmb();
1408
1409 WRITE_ONCE(ring->cur_idx, ring->cur_idx + frags + 1);
1410
1411 stop_queue = !netif_subqueue_maybe_stop(dev, ring->index,
1412 rtase_tx_avail(ring),
1413 RTASE_TX_STOP_THRS,
1414 RTASE_TX_START_THRS);
1415
1416 if (door_bell || stop_queue)
1417 rtase_w8(tp, RTASE_TPPOLL, BIT(ring->index));
1418
1419 return NETDEV_TX_OK;
1420
1421 err_dma_1:
1422 ring->skbuff[entry] = NULL;
1423 rtase_tx_clear_range(ring, ring->cur_idx + 1, frags);
1424
1425 err_dma_0:
1426 tp->stats.tx_dropped++;
1427 dev_kfree_skb_any(skb);
1428 return NETDEV_TX_OK;
1429 }
1430
rtase_set_rx_mode(struct net_device * dev)1431 static void rtase_set_rx_mode(struct net_device *dev)
1432 {
1433 rtase_hw_set_rx_packet_filter(dev);
1434 }
1435
rtase_enable_eem_write(const struct rtase_private * tp)1436 static void rtase_enable_eem_write(const struct rtase_private *tp)
1437 {
1438 u8 val;
1439
1440 val = rtase_r8(tp, RTASE_EEM);
1441 rtase_w8(tp, RTASE_EEM, val | RTASE_EEM_UNLOCK);
1442 }
1443
rtase_disable_eem_write(const struct rtase_private * tp)1444 static void rtase_disable_eem_write(const struct rtase_private *tp)
1445 {
1446 u8 val;
1447
1448 val = rtase_r8(tp, RTASE_EEM);
1449 rtase_w8(tp, RTASE_EEM, val & ~RTASE_EEM_UNLOCK);
1450 }
1451
rtase_rar_set(const struct rtase_private * tp,const u8 * addr)1452 static void rtase_rar_set(const struct rtase_private *tp, const u8 *addr)
1453 {
1454 u32 rar_low, rar_high;
1455
1456 rar_low = (u32)addr[0] | ((u32)addr[1] << 8) |
1457 ((u32)addr[2] << 16) | ((u32)addr[3] << 24);
1458
1459 rar_high = (u32)addr[4] | ((u32)addr[5] << 8);
1460
1461 rtase_enable_eem_write(tp);
1462 rtase_w32(tp, RTASE_MAC0, rar_low);
1463 rtase_w32(tp, RTASE_MAC4, rar_high);
1464 rtase_disable_eem_write(tp);
1465 rtase_w16(tp, RTASE_LBK_CTRL, RTASE_LBK_ATLD | RTASE_LBK_CLR);
1466 }
1467
rtase_set_mac_address(struct net_device * dev,void * p)1468 static int rtase_set_mac_address(struct net_device *dev, void *p)
1469 {
1470 struct rtase_private *tp = netdev_priv(dev);
1471 int ret;
1472
1473 ret = eth_mac_addr(dev, p);
1474 if (ret)
1475 return ret;
1476
1477 rtase_rar_set(tp, dev->dev_addr);
1478
1479 return 0;
1480 }
1481
rtase_change_mtu(struct net_device * dev,int new_mtu)1482 static int rtase_change_mtu(struct net_device *dev, int new_mtu)
1483 {
1484 dev->mtu = new_mtu;
1485
1486 netdev_update_features(dev);
1487
1488 return 0;
1489 }
1490
rtase_wait_for_quiescence(const struct net_device * dev)1491 static void rtase_wait_for_quiescence(const struct net_device *dev)
1492 {
1493 struct rtase_private *tp = netdev_priv(dev);
1494 struct rtase_int_vector *ivec;
1495 u32 i;
1496
1497 for (i = 0; i < tp->int_nums; i++) {
1498 ivec = &tp->int_vector[i];
1499 synchronize_irq(ivec->irq);
1500 /* wait for any pending NAPI task to complete */
1501 napi_disable(&ivec->napi);
1502 }
1503
1504 rtase_irq_dis_and_clear(tp);
1505
1506 for (i = 0; i < tp->int_nums; i++) {
1507 ivec = &tp->int_vector[i];
1508 napi_enable(&ivec->napi);
1509 }
1510 }
1511
rtase_sw_reset(struct net_device * dev)1512 static void rtase_sw_reset(struct net_device *dev)
1513 {
1514 struct rtase_private *tp = netdev_priv(dev);
1515 struct rtase_ring *ring, *tmp;
1516 struct rtase_int_vector *ivec;
1517 int ret;
1518 u32 i;
1519
1520 netif_stop_queue(dev);
1521 netif_carrier_off(dev);
1522 rtase_hw_reset(dev);
1523
1524 /* let's wait a bit while any (async) irq lands on */
1525 rtase_wait_for_quiescence(dev);
1526 rtase_tx_clear(tp);
1527 rtase_rx_clear(tp);
1528
1529 for (i = 0; i < tp->int_nums; i++) {
1530 ivec = &tp->int_vector[i];
1531 list_for_each_entry_safe(ring, tmp, &ivec->ring_list,
1532 ring_entry) {
1533 netif_queue_set_napi(tp->dev, ring->index,
1534 ring->type, NULL);
1535
1536 list_del(&ring->ring_entry);
1537 }
1538 }
1539
1540 ret = rtase_init_ring(dev);
1541 if (ret) {
1542 netdev_err(dev, "unable to init ring\n");
1543 rtase_free_desc(tp);
1544 return;
1545 }
1546
1547 rtase_hw_config(dev);
1548 /* always link, so start to transmit & receive */
1549 rtase_hw_start(dev);
1550
1551 netif_carrier_on(dev);
1552 netif_wake_queue(dev);
1553 }
1554
rtase_dump_tally_counter(const struct rtase_private * tp)1555 static void rtase_dump_tally_counter(const struct rtase_private *tp)
1556 {
1557 dma_addr_t paddr = tp->tally_paddr;
1558 u32 cmd = lower_32_bits(paddr);
1559 u32 val;
1560 int err;
1561
1562 rtase_w32(tp, RTASE_DTCCR4, upper_32_bits(paddr));
1563 rtase_w32(tp, RTASE_DTCCR0, cmd);
1564 rtase_w32(tp, RTASE_DTCCR0, cmd | RTASE_COUNTER_DUMP);
1565
1566 err = read_poll_timeout(rtase_r32, val, !(val & RTASE_COUNTER_DUMP),
1567 10, 250, false, tp, RTASE_DTCCR0);
1568
1569 if (err == -ETIMEDOUT)
1570 netdev_err(tp->dev, "error occurred in dump tally counter\n");
1571 }
1572
rtase_dump_state(const struct net_device * dev)1573 static void rtase_dump_state(const struct net_device *dev)
1574 {
1575 const struct rtase_private *tp = netdev_priv(dev);
1576 int max_reg_size = RTASE_PCI_REGS_SIZE;
1577 const struct rtase_counters *counters;
1578 const struct rtase_ring *ring;
1579 u32 dword_rd;
1580 int n = 0;
1581
1582 ring = &tp->tx_ring[0];
1583 netdev_err(dev, "Tx descriptor info:\n");
1584 netdev_err(dev, "Tx curIdx = 0x%x\n", ring->cur_idx);
1585 netdev_err(dev, "Tx dirtyIdx = 0x%x\n", ring->dirty_idx);
1586 netdev_err(dev, "Tx phyAddr = %pad\n", &ring->phy_addr);
1587
1588 ring = &tp->rx_ring[0];
1589 netdev_err(dev, "Rx descriptor info:\n");
1590 netdev_err(dev, "Rx curIdx = 0x%x\n", ring->cur_idx);
1591 netdev_err(dev, "Rx dirtyIdx = 0x%x\n", ring->dirty_idx);
1592 netdev_err(dev, "Rx phyAddr = %pad\n", &ring->phy_addr);
1593
1594 netdev_err(dev, "Device Registers:\n");
1595 netdev_err(dev, "Chip Command = 0x%02x\n",
1596 rtase_r8(tp, RTASE_CHIP_CMD));
1597 netdev_err(dev, "IMR = %08x\n", rtase_r32(tp, RTASE_IMR0));
1598 netdev_err(dev, "ISR = %08x\n", rtase_r32(tp, RTASE_ISR0));
1599 netdev_err(dev, "Boot Ctrl Reg(0xE004) = %04x\n",
1600 rtase_r16(tp, RTASE_BOOT_CTL));
1601 netdev_err(dev, "EPHY ISR(0xE014) = %04x\n",
1602 rtase_r16(tp, RTASE_EPHY_ISR));
1603 netdev_err(dev, "EPHY IMR(0xE016) = %04x\n",
1604 rtase_r16(tp, RTASE_EPHY_IMR));
1605 netdev_err(dev, "CLKSW SET REG(0xE018) = %04x\n",
1606 rtase_r16(tp, RTASE_CLKSW_SET));
1607
1608 netdev_err(dev, "Dump PCI Registers:\n");
1609
1610 while (n < max_reg_size) {
1611 if ((n % RTASE_DWORD_MOD) == 0)
1612 netdev_err(tp->dev, "0x%03x:\n", n);
1613
1614 pci_read_config_dword(tp->pdev, n, &dword_rd);
1615 netdev_err(tp->dev, "%08x\n", dword_rd);
1616 n += 4;
1617 }
1618
1619 netdev_err(dev, "Dump tally counter:\n");
1620 counters = tp->tally_vaddr;
1621 rtase_dump_tally_counter(tp);
1622
1623 netdev_err(dev, "tx_packets %lld\n",
1624 le64_to_cpu(counters->tx_packets));
1625 netdev_err(dev, "rx_packets %lld\n",
1626 le64_to_cpu(counters->rx_packets));
1627 netdev_err(dev, "tx_errors %lld\n",
1628 le64_to_cpu(counters->tx_errors));
1629 netdev_err(dev, "rx_errors %d\n",
1630 le32_to_cpu(counters->rx_errors));
1631 netdev_err(dev, "rx_missed %d\n",
1632 le16_to_cpu(counters->rx_missed));
1633 netdev_err(dev, "align_errors %d\n",
1634 le16_to_cpu(counters->align_errors));
1635 netdev_err(dev, "tx_one_collision %d\n",
1636 le32_to_cpu(counters->tx_one_collision));
1637 netdev_err(dev, "tx_multi_collision %d\n",
1638 le32_to_cpu(counters->tx_multi_collision));
1639 netdev_err(dev, "rx_unicast %lld\n",
1640 le64_to_cpu(counters->rx_unicast));
1641 netdev_err(dev, "rx_broadcast %lld\n",
1642 le64_to_cpu(counters->rx_broadcast));
1643 netdev_err(dev, "rx_multicast %d\n",
1644 le32_to_cpu(counters->rx_multicast));
1645 netdev_err(dev, "tx_aborted %d\n",
1646 le16_to_cpu(counters->tx_aborted));
1647 netdev_err(dev, "tx_underrun %d\n",
1648 le16_to_cpu(counters->tx_underrun));
1649 }
1650
rtase_tx_timeout(struct net_device * dev,unsigned int txqueue)1651 static void rtase_tx_timeout(struct net_device *dev, unsigned int txqueue)
1652 {
1653 rtase_dump_state(dev);
1654 rtase_sw_reset(dev);
1655 }
1656
rtase_get_stats64(struct net_device * dev,struct rtnl_link_stats64 * stats)1657 static void rtase_get_stats64(struct net_device *dev,
1658 struct rtnl_link_stats64 *stats)
1659 {
1660 const struct rtase_private *tp = netdev_priv(dev);
1661 const struct rtase_counters *counters;
1662
1663 counters = tp->tally_vaddr;
1664
1665 dev_fetch_sw_netstats(stats, dev->tstats);
1666
1667 /* fetch additional counter values missing in stats collected by driver
1668 * from tally counter
1669 */
1670 rtase_dump_tally_counter(tp);
1671 stats->rx_errors = tp->stats.rx_errors;
1672 stats->tx_errors = le64_to_cpu(counters->tx_errors);
1673 stats->rx_dropped = tp->stats.rx_dropped;
1674 stats->tx_dropped = tp->stats.tx_dropped;
1675 stats->multicast = tp->stats.multicast;
1676 stats->rx_length_errors = tp->stats.rx_length_errors;
1677 }
1678
rtase_set_hw_cbs(const struct rtase_private * tp,u32 queue)1679 static void rtase_set_hw_cbs(const struct rtase_private *tp, u32 queue)
1680 {
1681 u32 idle = tp->tx_qos[queue].idleslope * RTASE_1T_CLOCK;
1682 u32 val, i;
1683
1684 val = u32_encode_bits(idle / RTASE_1T_POWER, RTASE_IDLESLOPE_INT_MASK);
1685 idle %= RTASE_1T_POWER;
1686
1687 for (i = 1; i <= RTASE_IDLESLOPE_INT_SHIFT; i++) {
1688 idle *= 2;
1689 if ((idle / RTASE_1T_POWER) == 1)
1690 val |= BIT(RTASE_IDLESLOPE_INT_SHIFT - i);
1691
1692 idle %= RTASE_1T_POWER;
1693 }
1694
1695 rtase_w32(tp, RTASE_TXQCRDT_0 + queue * 4, val);
1696 }
1697
rtase_setup_tc_cbs(struct rtase_private * tp,const struct tc_cbs_qopt_offload * qopt)1698 static int rtase_setup_tc_cbs(struct rtase_private *tp,
1699 const struct tc_cbs_qopt_offload *qopt)
1700 {
1701 int queue = qopt->queue;
1702
1703 if (queue < 0 || queue >= tp->func_tx_queue_num)
1704 return -EINVAL;
1705
1706 if (!qopt->enable) {
1707 tp->tx_qos[queue].hicredit = 0;
1708 tp->tx_qos[queue].locredit = 0;
1709 tp->tx_qos[queue].idleslope = 0;
1710 tp->tx_qos[queue].sendslope = 0;
1711
1712 rtase_w32(tp, RTASE_TXQCRDT_0 + queue * 4, 0);
1713 } else {
1714 tp->tx_qos[queue].hicredit = qopt->hicredit;
1715 tp->tx_qos[queue].locredit = qopt->locredit;
1716 tp->tx_qos[queue].idleslope = qopt->idleslope;
1717 tp->tx_qos[queue].sendslope = qopt->sendslope;
1718
1719 rtase_set_hw_cbs(tp, queue);
1720 }
1721
1722 return 0;
1723 }
1724
rtase_setup_tc(struct net_device * dev,enum tc_setup_type type,void * type_data)1725 static int rtase_setup_tc(struct net_device *dev, enum tc_setup_type type,
1726 void *type_data)
1727 {
1728 struct rtase_private *tp = netdev_priv(dev);
1729
1730 switch (type) {
1731 case TC_SETUP_QDISC_CBS:
1732 return rtase_setup_tc_cbs(tp, type_data);
1733 default:
1734 return -EOPNOTSUPP;
1735 }
1736 }
1737
rtase_fix_features(struct net_device * dev,netdev_features_t features)1738 static netdev_features_t rtase_fix_features(struct net_device *dev,
1739 netdev_features_t features)
1740 {
1741 netdev_features_t features_fix = features;
1742
1743 /* not support TSO for jumbo frames */
1744 if (dev->mtu > ETH_DATA_LEN)
1745 features_fix &= ~NETIF_F_ALL_TSO;
1746
1747 return features_fix;
1748 }
1749
rtase_set_features(struct net_device * dev,netdev_features_t features)1750 static int rtase_set_features(struct net_device *dev,
1751 netdev_features_t features)
1752 {
1753 netdev_features_t features_set = features;
1754
1755 features_set &= NETIF_F_RXALL | NETIF_F_RXCSUM |
1756 NETIF_F_HW_VLAN_CTAG_RX;
1757
1758 if (features_set ^ dev->features)
1759 rtase_hw_set_features(dev, features_set);
1760
1761 return 0;
1762 }
1763
1764 static const struct net_device_ops rtase_netdev_ops = {
1765 .ndo_open = rtase_open,
1766 .ndo_stop = rtase_close,
1767 .ndo_start_xmit = rtase_start_xmit,
1768 .ndo_set_rx_mode = rtase_set_rx_mode,
1769 .ndo_set_mac_address = rtase_set_mac_address,
1770 .ndo_change_mtu = rtase_change_mtu,
1771 .ndo_tx_timeout = rtase_tx_timeout,
1772 .ndo_get_stats64 = rtase_get_stats64,
1773 .ndo_setup_tc = rtase_setup_tc,
1774 .ndo_fix_features = rtase_fix_features,
1775 .ndo_set_features = rtase_set_features,
1776 };
1777
rtase_get_mac_address(struct net_device * dev)1778 static void rtase_get_mac_address(struct net_device *dev)
1779 {
1780 struct rtase_private *tp = netdev_priv(dev);
1781 u8 mac_addr[ETH_ALEN] __aligned(2) = {};
1782 u32 i;
1783
1784 for (i = 0; i < ETH_ALEN; i++)
1785 mac_addr[i] = rtase_r8(tp, RTASE_MAC0 + i);
1786
1787 if (!is_valid_ether_addr(mac_addr)) {
1788 eth_hw_addr_random(dev);
1789 netdev_warn(dev, "Random ether addr %pM\n", dev->dev_addr);
1790 } else {
1791 eth_hw_addr_set(dev, mac_addr);
1792 ether_addr_copy(dev->perm_addr, dev->dev_addr);
1793 }
1794
1795 rtase_rar_set(tp, dev->dev_addr);
1796 }
1797
rtase_get_settings(struct net_device * dev,struct ethtool_link_ksettings * cmd)1798 static int rtase_get_settings(struct net_device *dev,
1799 struct ethtool_link_ksettings *cmd)
1800 {
1801 u32 supported = SUPPORTED_MII | SUPPORTED_Pause | SUPPORTED_Asym_Pause;
1802 const struct rtase_private *tp = netdev_priv(dev);
1803
1804 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
1805 supported);
1806
1807 switch (tp->hw_ver) {
1808 case RTASE_HW_VER_906X_7XA:
1809 case RTASE_HW_VER_906X_7XC:
1810 cmd->base.speed = SPEED_5000;
1811 break;
1812 case RTASE_HW_VER_907XD_V1:
1813 case RTASE_HW_VER_907XD_VA:
1814 cmd->base.speed = SPEED_10000;
1815 break;
1816 }
1817
1818 cmd->base.duplex = DUPLEX_FULL;
1819 cmd->base.port = PORT_MII;
1820 cmd->base.autoneg = AUTONEG_DISABLE;
1821
1822 return 0;
1823 }
1824
rtase_get_pauseparam(struct net_device * dev,struct ethtool_pauseparam * pause)1825 static void rtase_get_pauseparam(struct net_device *dev,
1826 struct ethtool_pauseparam *pause)
1827 {
1828 const struct rtase_private *tp = netdev_priv(dev);
1829 u16 value = rtase_r16(tp, RTASE_CPLUS_CMD);
1830
1831 pause->autoneg = AUTONEG_DISABLE;
1832 pause->tx_pause = !!(value & RTASE_FORCE_TXFLOW_EN);
1833 pause->rx_pause = !!(value & RTASE_FORCE_RXFLOW_EN);
1834 }
1835
rtase_set_pauseparam(struct net_device * dev,struct ethtool_pauseparam * pause)1836 static int rtase_set_pauseparam(struct net_device *dev,
1837 struct ethtool_pauseparam *pause)
1838 {
1839 const struct rtase_private *tp = netdev_priv(dev);
1840 u16 value = rtase_r16(tp, RTASE_CPLUS_CMD);
1841
1842 if (pause->autoneg)
1843 return -EOPNOTSUPP;
1844
1845 value &= ~(RTASE_FORCE_TXFLOW_EN | RTASE_FORCE_RXFLOW_EN);
1846
1847 if (pause->tx_pause)
1848 value |= RTASE_FORCE_TXFLOW_EN;
1849
1850 if (pause->rx_pause)
1851 value |= RTASE_FORCE_RXFLOW_EN;
1852
1853 rtase_w16(tp, RTASE_CPLUS_CMD, value);
1854 return 0;
1855 }
1856
rtase_get_eth_mac_stats(struct net_device * dev,struct ethtool_eth_mac_stats * stats)1857 static void rtase_get_eth_mac_stats(struct net_device *dev,
1858 struct ethtool_eth_mac_stats *stats)
1859 {
1860 struct rtase_private *tp = netdev_priv(dev);
1861 const struct rtase_counters *counters;
1862
1863 counters = tp->tally_vaddr;
1864
1865 rtase_dump_tally_counter(tp);
1866
1867 stats->FramesTransmittedOK = le64_to_cpu(counters->tx_packets);
1868 stats->FramesReceivedOK = le64_to_cpu(counters->rx_packets);
1869 stats->FramesLostDueToIntMACXmitError =
1870 le64_to_cpu(counters->tx_errors);
1871 stats->BroadcastFramesReceivedOK = le64_to_cpu(counters->rx_broadcast);
1872 }
1873
1874 static const struct ethtool_ops rtase_ethtool_ops = {
1875 .get_link = ethtool_op_get_link,
1876 .get_link_ksettings = rtase_get_settings,
1877 .get_pauseparam = rtase_get_pauseparam,
1878 .set_pauseparam = rtase_set_pauseparam,
1879 .get_eth_mac_stats = rtase_get_eth_mac_stats,
1880 .get_ts_info = ethtool_op_get_ts_info,
1881 };
1882
rtase_init_netdev_ops(struct net_device * dev)1883 static void rtase_init_netdev_ops(struct net_device *dev)
1884 {
1885 dev->netdev_ops = &rtase_netdev_ops;
1886 dev->ethtool_ops = &rtase_ethtool_ops;
1887 }
1888
rtase_init_napi(struct rtase_private * tp)1889 static void rtase_init_napi(struct rtase_private *tp)
1890 {
1891 u16 i;
1892
1893 for (i = 0; i < tp->int_nums; i++) {
1894 netif_napi_add_config(tp->dev, &tp->int_vector[i].napi,
1895 tp->int_vector[i].poll, i);
1896 netif_napi_set_irq(&tp->int_vector[i].napi,
1897 tp->int_vector[i].irq);
1898 }
1899 }
1900
rtase_reset_interrupt(struct pci_dev * pdev,const struct rtase_private * tp)1901 static void rtase_reset_interrupt(struct pci_dev *pdev,
1902 const struct rtase_private *tp)
1903 {
1904 if (tp->sw_flag & RTASE_SWF_MSIX_ENABLED)
1905 pci_disable_msix(pdev);
1906 else
1907 pci_disable_msi(pdev);
1908 }
1909
rtase_alloc_msix(struct pci_dev * pdev,struct rtase_private * tp)1910 static int rtase_alloc_msix(struct pci_dev *pdev, struct rtase_private *tp)
1911 {
1912 int ret, irq;
1913 u16 i;
1914
1915 memset(tp->msix_entry, 0x0, RTASE_NUM_MSIX *
1916 sizeof(struct msix_entry));
1917
1918 for (i = 0; i < RTASE_NUM_MSIX; i++)
1919 tp->msix_entry[i].entry = i;
1920
1921 ret = pci_enable_msix_exact(pdev, tp->msix_entry, tp->int_nums);
1922
1923 if (ret)
1924 return ret;
1925
1926 for (i = 0; i < tp->int_nums; i++) {
1927 irq = pci_irq_vector(pdev, i);
1928 if (irq < 0) {
1929 pci_disable_msix(pdev);
1930 return irq;
1931 }
1932
1933 tp->int_vector[i].irq = irq;
1934 }
1935
1936 return 0;
1937 }
1938
rtase_alloc_interrupt(struct pci_dev * pdev,struct rtase_private * tp)1939 static int rtase_alloc_interrupt(struct pci_dev *pdev,
1940 struct rtase_private *tp)
1941 {
1942 int ret;
1943
1944 ret = rtase_alloc_msix(pdev, tp);
1945 if (ret) {
1946 ret = pci_enable_msi(pdev);
1947 if (ret) {
1948 dev_err(&pdev->dev,
1949 "unable to alloc interrupt.(MSI)\n");
1950 return ret;
1951 }
1952
1953 tp->sw_flag |= RTASE_SWF_MSI_ENABLED;
1954 } else {
1955 tp->sw_flag |= RTASE_SWF_MSIX_ENABLED;
1956 }
1957
1958 return 0;
1959 }
1960
rtase_init_hardware(const struct rtase_private * tp)1961 static void rtase_init_hardware(const struct rtase_private *tp)
1962 {
1963 u16 i;
1964
1965 for (i = 0; i < RTASE_VLAN_FILTER_ENTRY_NUM; i++)
1966 rtase_w32(tp, RTASE_VLAN_ENTRY_0 + i * 4, 0);
1967 }
1968
rtase_init_int_vector(struct rtase_private * tp)1969 static void rtase_init_int_vector(struct rtase_private *tp)
1970 {
1971 u16 i;
1972
1973 /* interrupt vector 0 */
1974 tp->int_vector[0].tp = tp;
1975 tp->int_vector[0].index = 0;
1976 tp->int_vector[0].imr_addr = RTASE_IMR0;
1977 tp->int_vector[0].isr_addr = RTASE_ISR0;
1978 tp->int_vector[0].imr = RTASE_ROK | RTASE_RDU | RTASE_TOK |
1979 RTASE_TOK4 | RTASE_TOK5 | RTASE_TOK6 |
1980 RTASE_TOK7;
1981 tp->int_vector[0].poll = rtase_poll;
1982
1983 memset(tp->int_vector[0].name, 0x0, sizeof(tp->int_vector[0].name));
1984 INIT_LIST_HEAD(&tp->int_vector[0].ring_list);
1985
1986 /* interrupt vector 1 ~ 3 */
1987 for (i = 1; i < tp->int_nums; i++) {
1988 tp->int_vector[i].tp = tp;
1989 tp->int_vector[i].index = i;
1990 tp->int_vector[i].imr_addr = RTASE_IMR1 + (i - 1) * 4;
1991 tp->int_vector[i].isr_addr = RTASE_ISR1 + (i - 1) * 4;
1992 tp->int_vector[i].imr = RTASE_Q_ROK | RTASE_Q_RDU |
1993 RTASE_Q_TOK;
1994 tp->int_vector[i].poll = rtase_poll;
1995
1996 memset(tp->int_vector[i].name, 0x0,
1997 sizeof(tp->int_vector[0].name));
1998 INIT_LIST_HEAD(&tp->int_vector[i].ring_list);
1999 }
2000 }
2001
rtase_calc_time_mitigation(u32 time_us)2002 static u16 rtase_calc_time_mitigation(u32 time_us)
2003 {
2004 u8 msb, time_count, time_unit;
2005 u16 int_miti;
2006
2007 time_us = min(time_us, RTASE_MITI_MAX_TIME);
2008
2009 if (time_us > RTASE_MITI_TIME_COUNT_MASK) {
2010 msb = fls(time_us);
2011 time_unit = msb - RTASE_MITI_COUNT_BIT_NUM;
2012 time_count = time_us >> (msb - RTASE_MITI_COUNT_BIT_NUM);
2013 } else {
2014 time_unit = 0;
2015 time_count = time_us;
2016 }
2017
2018 int_miti = u16_encode_bits(time_count, RTASE_MITI_TIME_COUNT_MASK) |
2019 u16_encode_bits(time_unit, RTASE_MITI_TIME_UNIT_MASK);
2020
2021 return int_miti;
2022 }
2023
rtase_calc_packet_num_mitigation(u16 pkt_num)2024 static u16 rtase_calc_packet_num_mitigation(u16 pkt_num)
2025 {
2026 u8 msb, pkt_num_count, pkt_num_unit;
2027 u16 int_miti;
2028
2029 pkt_num = min(pkt_num, RTASE_MITI_MAX_PKT_NUM);
2030
2031 if (pkt_num > 60) {
2032 pkt_num_unit = RTASE_MITI_MAX_PKT_NUM_IDX;
2033 pkt_num_count = pkt_num / RTASE_MITI_MAX_PKT_NUM_UNIT;
2034 } else {
2035 msb = fls(pkt_num);
2036 if (msb >= RTASE_MITI_COUNT_BIT_NUM) {
2037 pkt_num_unit = msb - RTASE_MITI_COUNT_BIT_NUM;
2038 pkt_num_count = pkt_num >> (msb -
2039 RTASE_MITI_COUNT_BIT_NUM);
2040 } else {
2041 pkt_num_unit = 0;
2042 pkt_num_count = pkt_num;
2043 }
2044 }
2045
2046 int_miti = u16_encode_bits(pkt_num_count,
2047 RTASE_MITI_PKT_NUM_COUNT_MASK) |
2048 u16_encode_bits(pkt_num_unit,
2049 RTASE_MITI_PKT_NUM_UNIT_MASK);
2050
2051 return int_miti;
2052 }
2053
rtase_init_software_variable(struct pci_dev * pdev,struct rtase_private * tp)2054 static void rtase_init_software_variable(struct pci_dev *pdev,
2055 struct rtase_private *tp)
2056 {
2057 u16 int_miti;
2058
2059 tp->tx_queue_ctrl = RTASE_TXQ_CTRL;
2060 tp->func_tx_queue_num = RTASE_FUNC_TXQ_NUM;
2061 tp->func_rx_queue_num = RTASE_FUNC_RXQ_NUM;
2062 tp->int_nums = RTASE_INTERRUPT_NUM;
2063
2064 int_miti = rtase_calc_time_mitigation(RTASE_MITI_DEFAULT_TIME) |
2065 rtase_calc_packet_num_mitigation(RTASE_MITI_DEFAULT_PKT_NUM);
2066 tp->tx_int_mit = int_miti;
2067 tp->rx_int_mit = int_miti;
2068
2069 tp->sw_flag = 0;
2070
2071 rtase_init_int_vector(tp);
2072
2073 /* MTU range: 60 - hw-specific max */
2074 tp->dev->min_mtu = ETH_ZLEN;
2075 tp->dev->max_mtu = RTASE_MAX_JUMBO_SIZE;
2076 }
2077
rtase_check_mac_version_valid(struct rtase_private * tp)2078 static int rtase_check_mac_version_valid(struct rtase_private *tp)
2079 {
2080 int ret = -ENODEV;
2081
2082 tp->hw_ver = rtase_r32(tp, RTASE_TX_CONFIG_0) & RTASE_HW_VER_MASK;
2083
2084 switch (tp->hw_ver) {
2085 case RTASE_HW_VER_906X_7XA:
2086 case RTASE_HW_VER_906X_7XC:
2087 case RTASE_HW_VER_907XD_V1:
2088 case RTASE_HW_VER_907XD_VA:
2089 ret = 0;
2090 break;
2091 }
2092
2093 return ret;
2094 }
2095
rtase_init_board(struct pci_dev * pdev,struct net_device ** dev_out,void __iomem ** ioaddr_out)2096 static int rtase_init_board(struct pci_dev *pdev, struct net_device **dev_out,
2097 void __iomem **ioaddr_out)
2098 {
2099 struct net_device *dev;
2100 void __iomem *ioaddr;
2101 int ret = -ENOMEM;
2102
2103 /* dev zeroed in alloc_etherdev */
2104 dev = alloc_etherdev_mq(sizeof(struct rtase_private),
2105 RTASE_FUNC_TXQ_NUM);
2106 if (!dev)
2107 goto err_out;
2108
2109 SET_NETDEV_DEV(dev, &pdev->dev);
2110
2111 ret = pci_enable_device(pdev);
2112 if (ret)
2113 goto err_out_free_dev;
2114
2115 /* make sure PCI base addr 1 is MMIO */
2116 if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
2117 ret = -ENODEV;
2118 goto err_out_disable;
2119 }
2120
2121 /* check for weird/broken PCI region reporting */
2122 if (pci_resource_len(pdev, 2) < RTASE_REGS_SIZE) {
2123 ret = -ENODEV;
2124 goto err_out_disable;
2125 }
2126
2127 ret = pci_request_regions(pdev, KBUILD_MODNAME);
2128 if (ret)
2129 goto err_out_disable;
2130
2131 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
2132 if (ret) {
2133 dev_err(&pdev->dev, "no usable dma addressing method\n");
2134 goto err_out_free_res;
2135 }
2136
2137 pci_set_master(pdev);
2138
2139 /* ioremap MMIO region */
2140 ioaddr = ioremap(pci_resource_start(pdev, 2),
2141 pci_resource_len(pdev, 2));
2142 if (!ioaddr) {
2143 ret = -EIO;
2144 goto err_out_free_res;
2145 }
2146
2147 *ioaddr_out = ioaddr;
2148 *dev_out = dev;
2149
2150 return ret;
2151
2152 err_out_free_res:
2153 pci_release_regions(pdev);
2154
2155 err_out_disable:
2156 pci_disable_device(pdev);
2157
2158 err_out_free_dev:
2159 free_netdev(dev);
2160
2161 err_out:
2162 *ioaddr_out = NULL;
2163 *dev_out = NULL;
2164
2165 return ret;
2166 }
2167
rtase_release_board(struct pci_dev * pdev,struct net_device * dev,void __iomem * ioaddr)2168 static void rtase_release_board(struct pci_dev *pdev, struct net_device *dev,
2169 void __iomem *ioaddr)
2170 {
2171 const struct rtase_private *tp = netdev_priv(dev);
2172
2173 rtase_rar_set(tp, tp->dev->perm_addr);
2174 iounmap(ioaddr);
2175
2176 if (tp->sw_flag & RTASE_SWF_MSIX_ENABLED)
2177 pci_disable_msix(pdev);
2178 else
2179 pci_disable_msi(pdev);
2180
2181 pci_release_regions(pdev);
2182 pci_disable_device(pdev);
2183 free_netdev(dev);
2184 }
2185
rtase_init_one(struct pci_dev * pdev,const struct pci_device_id * ent)2186 static int rtase_init_one(struct pci_dev *pdev,
2187 const struct pci_device_id *ent)
2188 {
2189 struct net_device *dev = NULL;
2190 struct rtase_int_vector *ivec;
2191 void __iomem *ioaddr = NULL;
2192 struct rtase_private *tp;
2193 int ret, i;
2194
2195 if (!pdev->is_physfn && pdev->is_virtfn) {
2196 dev_err(&pdev->dev,
2197 "This module does not support a virtual function.");
2198 return -EINVAL;
2199 }
2200
2201 dev_dbg(&pdev->dev, "Automotive Switch Ethernet driver loaded\n");
2202
2203 ret = rtase_init_board(pdev, &dev, &ioaddr);
2204 if (ret)
2205 return ret;
2206
2207 tp = netdev_priv(dev);
2208 tp->mmio_addr = ioaddr;
2209 tp->dev = dev;
2210 tp->pdev = pdev;
2211
2212 /* identify chip attached to board */
2213 ret = rtase_check_mac_version_valid(tp);
2214 if (ret) {
2215 dev_err(&pdev->dev,
2216 "unknown chip version: 0x%08x, contact rtase maintainers (see MAINTAINERS file)\n",
2217 tp->hw_ver);
2218 goto err_out_release_board;
2219 }
2220
2221 rtase_init_software_variable(pdev, tp);
2222 rtase_init_hardware(tp);
2223
2224 ret = rtase_alloc_interrupt(pdev, tp);
2225 if (ret) {
2226 dev_err(&pdev->dev, "unable to alloc MSIX/MSI\n");
2227 goto err_out_del_napi;
2228 }
2229
2230 rtase_init_napi(tp);
2231
2232 rtase_init_netdev_ops(dev);
2233
2234 dev->pcpu_stat_type = NETDEV_PCPU_STAT_TSTATS;
2235
2236 dev->features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX |
2237 NETIF_F_IP_CSUM | NETIF_F_HIGHDMA |
2238 NETIF_F_RXCSUM | NETIF_F_SG |
2239 NETIF_F_TSO | NETIF_F_IPV6_CSUM |
2240 NETIF_F_TSO6;
2241
2242 dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM |
2243 NETIF_F_TSO | NETIF_F_RXCSUM |
2244 NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX |
2245 NETIF_F_RXALL | NETIF_F_RXFCS |
2246 NETIF_F_IPV6_CSUM | NETIF_F_TSO6;
2247
2248 dev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO |
2249 NETIF_F_HIGHDMA;
2250 dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
2251 netif_set_tso_max_size(dev, RTASE_LSO_64K);
2252 netif_set_tso_max_segs(dev, RTASE_NIC_MAX_PHYS_BUF_COUNT_LSO2);
2253
2254 rtase_get_mac_address(dev);
2255
2256 tp->tally_vaddr = dma_alloc_coherent(&pdev->dev,
2257 sizeof(*tp->tally_vaddr),
2258 &tp->tally_paddr,
2259 GFP_KERNEL);
2260 if (!tp->tally_vaddr) {
2261 ret = -ENOMEM;
2262 goto err_out_free_dma;
2263 }
2264
2265 rtase_tally_counter_clear(tp);
2266
2267 pci_set_drvdata(pdev, dev);
2268
2269 netif_carrier_off(dev);
2270
2271 ret = register_netdev(dev);
2272 if (ret)
2273 goto err_out_free_dma;
2274
2275 netdev_dbg(dev, "%pM, IRQ %d\n", dev->dev_addr, dev->irq);
2276
2277 return 0;
2278
2279 err_out_free_dma:
2280 if (tp->tally_vaddr) {
2281 dma_free_coherent(&pdev->dev,
2282 sizeof(*tp->tally_vaddr),
2283 tp->tally_vaddr,
2284 tp->tally_paddr);
2285
2286 tp->tally_vaddr = NULL;
2287 }
2288
2289 err_out_del_napi:
2290 for (i = 0; i < tp->int_nums; i++) {
2291 ivec = &tp->int_vector[i];
2292 netif_napi_del(&ivec->napi);
2293 }
2294
2295 err_out_release_board:
2296 rtase_release_board(pdev, dev, ioaddr);
2297
2298 return ret;
2299 }
2300
rtase_remove_one(struct pci_dev * pdev)2301 static void rtase_remove_one(struct pci_dev *pdev)
2302 {
2303 struct net_device *dev = pci_get_drvdata(pdev);
2304 struct rtase_private *tp = netdev_priv(dev);
2305 struct rtase_int_vector *ivec;
2306 u32 i;
2307
2308 unregister_netdev(dev);
2309
2310 for (i = 0; i < tp->int_nums; i++) {
2311 ivec = &tp->int_vector[i];
2312 netif_napi_del(&ivec->napi);
2313 }
2314
2315 rtase_reset_interrupt(pdev, tp);
2316 if (tp->tally_vaddr) {
2317 dma_free_coherent(&pdev->dev,
2318 sizeof(*tp->tally_vaddr),
2319 tp->tally_vaddr,
2320 tp->tally_paddr);
2321 tp->tally_vaddr = NULL;
2322 }
2323
2324 rtase_release_board(pdev, dev, tp->mmio_addr);
2325 pci_set_drvdata(pdev, NULL);
2326 }
2327
rtase_shutdown(struct pci_dev * pdev)2328 static void rtase_shutdown(struct pci_dev *pdev)
2329 {
2330 struct net_device *dev = pci_get_drvdata(pdev);
2331 const struct rtase_private *tp;
2332
2333 tp = netdev_priv(dev);
2334
2335 if (netif_running(dev))
2336 rtase_close(dev);
2337
2338 rtase_reset_interrupt(pdev, tp);
2339 }
2340
rtase_suspend(struct device * device)2341 static int rtase_suspend(struct device *device)
2342 {
2343 struct net_device *dev = dev_get_drvdata(device);
2344
2345 if (netif_running(dev)) {
2346 netif_device_detach(dev);
2347 rtase_hw_reset(dev);
2348 }
2349
2350 return 0;
2351 }
2352
rtase_resume(struct device * device)2353 static int rtase_resume(struct device *device)
2354 {
2355 struct net_device *dev = dev_get_drvdata(device);
2356 struct rtase_private *tp = netdev_priv(dev);
2357 int ret;
2358
2359 /* restore last modified mac address */
2360 rtase_rar_set(tp, dev->dev_addr);
2361
2362 if (!netif_running(dev))
2363 goto out;
2364
2365 rtase_wait_for_quiescence(dev);
2366
2367 rtase_tx_clear(tp);
2368 rtase_rx_clear(tp);
2369
2370 ret = rtase_init_ring(dev);
2371 if (ret) {
2372 netdev_err(dev, "unable to init ring\n");
2373 rtase_free_desc(tp);
2374 return -ENOMEM;
2375 }
2376
2377 rtase_hw_config(dev);
2378 /* always link, so start to transmit & receive */
2379 rtase_hw_start(dev);
2380
2381 netif_device_attach(dev);
2382 out:
2383
2384 return 0;
2385 }
2386
2387 static const struct dev_pm_ops rtase_pm_ops = {
2388 SYSTEM_SLEEP_PM_OPS(rtase_suspend, rtase_resume)
2389 };
2390
2391 static struct pci_driver rtase_pci_driver = {
2392 .name = KBUILD_MODNAME,
2393 .id_table = rtase_pci_tbl,
2394 .probe = rtase_init_one,
2395 .remove = rtase_remove_one,
2396 .shutdown = rtase_shutdown,
2397 .driver.pm = pm_ptr(&rtase_pm_ops),
2398 };
2399
2400 module_pci_driver(rtase_pci_driver);
2401