1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * Applied Micro X-Gene SoC Ethernet v2 Driver
4 *
5 * Copyright (c) 2017, Applied Micro Circuits Corporation
6 * Author(s): Iyappan Subramanian <isubramanian@apm.com>
7 * Keyur Chudgar <kchudgar@apm.com>
8 */
9
10 #include "main.h"
11
xge_get_resources(struct xge_pdata * pdata)12 static int xge_get_resources(struct xge_pdata *pdata)
13 {
14 struct platform_device *pdev;
15 struct net_device *ndev;
16 int phy_mode, ret = 0;
17 struct resource *res;
18 struct device *dev;
19
20 pdev = pdata->pdev;
21 dev = &pdev->dev;
22 ndev = pdata->ndev;
23
24 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
25 if (!res) {
26 dev_err(dev, "Resource enet_csr not defined\n");
27 return -ENODEV;
28 }
29
30 pdata->resources.base_addr = devm_ioremap(dev, res->start,
31 resource_size(res));
32 if (!pdata->resources.base_addr) {
33 dev_err(dev, "Unable to retrieve ENET Port CSR region\n");
34 return -ENOMEM;
35 }
36
37 if (device_get_ethdev_address(dev, ndev))
38 eth_hw_addr_random(ndev);
39
40 memcpy(ndev->perm_addr, ndev->dev_addr, ndev->addr_len);
41
42 phy_mode = device_get_phy_mode(dev);
43 if (phy_mode < 0) {
44 dev_err(dev, "Unable to get phy-connection-type\n");
45 return phy_mode;
46 }
47 pdata->resources.phy_mode = phy_mode;
48
49 if (pdata->resources.phy_mode != PHY_INTERFACE_MODE_RGMII) {
50 dev_err(dev, "Incorrect phy-connection-type specified\n");
51 return -ENODEV;
52 }
53
54 ret = platform_get_irq(pdev, 0);
55 if (ret < 0)
56 return ret;
57 pdata->resources.irq = ret;
58
59 return 0;
60 }
61
xge_refill_buffers(struct net_device * ndev,u32 nbuf)62 static int xge_refill_buffers(struct net_device *ndev, u32 nbuf)
63 {
64 struct xge_pdata *pdata = netdev_priv(ndev);
65 struct xge_desc_ring *ring = pdata->rx_ring;
66 const u8 slots = XGENE_ENET_NUM_DESC - 1;
67 struct device *dev = &pdata->pdev->dev;
68 struct xge_raw_desc *raw_desc;
69 u64 addr_lo, addr_hi;
70 u8 tail = ring->tail;
71 struct sk_buff *skb;
72 dma_addr_t dma_addr;
73 u16 len;
74 int i;
75
76 for (i = 0; i < nbuf; i++) {
77 raw_desc = &ring->raw_desc[tail];
78
79 len = XGENE_ENET_STD_MTU;
80 skb = netdev_alloc_skb(ndev, len);
81 if (unlikely(!skb))
82 return -ENOMEM;
83
84 dma_addr = dma_map_single(dev, skb->data, len, DMA_FROM_DEVICE);
85 if (dma_mapping_error(dev, dma_addr)) {
86 netdev_err(ndev, "DMA mapping error\n");
87 dev_kfree_skb_any(skb);
88 return -EINVAL;
89 }
90
91 ring->pkt_info[tail].skb = skb;
92 ring->pkt_info[tail].dma_addr = dma_addr;
93
94 addr_hi = GET_BITS(NEXT_DESC_ADDRH, le64_to_cpu(raw_desc->m1));
95 addr_lo = GET_BITS(NEXT_DESC_ADDRL, le64_to_cpu(raw_desc->m1));
96 raw_desc->m1 = cpu_to_le64(SET_BITS(NEXT_DESC_ADDRL, addr_lo) |
97 SET_BITS(NEXT_DESC_ADDRH, addr_hi) |
98 SET_BITS(PKT_ADDRH,
99 upper_32_bits(dma_addr)));
100
101 dma_wmb();
102 raw_desc->m0 = cpu_to_le64(SET_BITS(PKT_ADDRL, dma_addr) |
103 SET_BITS(E, 1));
104 tail = (tail + 1) & slots;
105 }
106
107 ring->tail = tail;
108
109 return 0;
110 }
111
xge_init_hw(struct net_device * ndev)112 static int xge_init_hw(struct net_device *ndev)
113 {
114 struct xge_pdata *pdata = netdev_priv(ndev);
115 int ret;
116
117 ret = xge_port_reset(ndev);
118 if (ret)
119 return ret;
120
121 xge_port_init(ndev);
122 pdata->nbufs = NUM_BUFS;
123
124 return 0;
125 }
126
xge_irq(const int irq,void * data)127 static irqreturn_t xge_irq(const int irq, void *data)
128 {
129 struct xge_pdata *pdata = data;
130
131 if (napi_schedule_prep(&pdata->napi)) {
132 xge_intr_disable(pdata);
133 __napi_schedule(&pdata->napi);
134 }
135
136 return IRQ_HANDLED;
137 }
138
xge_request_irq(struct net_device * ndev)139 static int xge_request_irq(struct net_device *ndev)
140 {
141 struct xge_pdata *pdata = netdev_priv(ndev);
142 int ret;
143
144 snprintf(pdata->irq_name, IRQ_ID_SIZE, "%s", ndev->name);
145
146 ret = request_irq(pdata->resources.irq, xge_irq, 0, pdata->irq_name,
147 pdata);
148 if (ret)
149 netdev_err(ndev, "Failed to request irq %s\n", pdata->irq_name);
150
151 return ret;
152 }
153
xge_free_irq(struct net_device * ndev)154 static void xge_free_irq(struct net_device *ndev)
155 {
156 struct xge_pdata *pdata = netdev_priv(ndev);
157
158 free_irq(pdata->resources.irq, pdata);
159 }
160
is_tx_slot_available(struct xge_raw_desc * raw_desc)161 static bool is_tx_slot_available(struct xge_raw_desc *raw_desc)
162 {
163 if (GET_BITS(E, le64_to_cpu(raw_desc->m0)) &&
164 (GET_BITS(PKT_SIZE, le64_to_cpu(raw_desc->m0)) == SLOT_EMPTY))
165 return true;
166
167 return false;
168 }
169
xge_start_xmit(struct sk_buff * skb,struct net_device * ndev)170 static netdev_tx_t xge_start_xmit(struct sk_buff *skb, struct net_device *ndev)
171 {
172 struct xge_pdata *pdata = netdev_priv(ndev);
173 struct device *dev = &pdata->pdev->dev;
174 struct xge_desc_ring *tx_ring;
175 struct xge_raw_desc *raw_desc;
176 static dma_addr_t dma_addr;
177 u64 addr_lo, addr_hi;
178 void *pkt_buf;
179 u8 tail;
180 u16 len;
181
182 tx_ring = pdata->tx_ring;
183 tail = tx_ring->tail;
184 len = skb_headlen(skb);
185 raw_desc = &tx_ring->raw_desc[tail];
186
187 if (!is_tx_slot_available(raw_desc)) {
188 netif_stop_queue(ndev);
189 return NETDEV_TX_BUSY;
190 }
191
192 /* Packet buffers should be 64B aligned */
193 pkt_buf = dma_alloc_coherent(dev, XGENE_ENET_STD_MTU, &dma_addr,
194 GFP_ATOMIC);
195 if (unlikely(!pkt_buf)) {
196 dev_kfree_skb_any(skb);
197 return NETDEV_TX_OK;
198 }
199 memcpy(pkt_buf, skb->data, len);
200
201 addr_hi = GET_BITS(NEXT_DESC_ADDRH, le64_to_cpu(raw_desc->m1));
202 addr_lo = GET_BITS(NEXT_DESC_ADDRL, le64_to_cpu(raw_desc->m1));
203 raw_desc->m1 = cpu_to_le64(SET_BITS(NEXT_DESC_ADDRL, addr_lo) |
204 SET_BITS(NEXT_DESC_ADDRH, addr_hi) |
205 SET_BITS(PKT_ADDRH,
206 upper_32_bits(dma_addr)));
207
208 tx_ring->pkt_info[tail].skb = skb;
209 tx_ring->pkt_info[tail].dma_addr = dma_addr;
210 tx_ring->pkt_info[tail].pkt_buf = pkt_buf;
211
212 dma_wmb();
213
214 raw_desc->m0 = cpu_to_le64(SET_BITS(PKT_ADDRL, dma_addr) |
215 SET_BITS(PKT_SIZE, len) |
216 SET_BITS(E, 0));
217 skb_tx_timestamp(skb);
218 xge_wr_csr(pdata, DMATXCTRL, 1);
219
220 tx_ring->tail = (tail + 1) & (XGENE_ENET_NUM_DESC - 1);
221
222 return NETDEV_TX_OK;
223 }
224
is_tx_hw_done(struct xge_raw_desc * raw_desc)225 static bool is_tx_hw_done(struct xge_raw_desc *raw_desc)
226 {
227 if (GET_BITS(E, le64_to_cpu(raw_desc->m0)) &&
228 !GET_BITS(PKT_SIZE, le64_to_cpu(raw_desc->m0)))
229 return true;
230
231 return false;
232 }
233
xge_txc_poll(struct net_device * ndev)234 static void xge_txc_poll(struct net_device *ndev)
235 {
236 struct xge_pdata *pdata = netdev_priv(ndev);
237 struct device *dev = &pdata->pdev->dev;
238 struct xge_desc_ring *tx_ring;
239 struct xge_raw_desc *raw_desc;
240 dma_addr_t dma_addr;
241 struct sk_buff *skb;
242 void *pkt_buf;
243 u32 data;
244 u8 head;
245
246 tx_ring = pdata->tx_ring;
247 head = tx_ring->head;
248
249 data = xge_rd_csr(pdata, DMATXSTATUS);
250 if (!GET_BITS(TXPKTCOUNT, data))
251 return;
252
253 while (1) {
254 raw_desc = &tx_ring->raw_desc[head];
255
256 if (!is_tx_hw_done(raw_desc))
257 break;
258
259 dma_rmb();
260
261 skb = tx_ring->pkt_info[head].skb;
262 dma_addr = tx_ring->pkt_info[head].dma_addr;
263 pkt_buf = tx_ring->pkt_info[head].pkt_buf;
264 pdata->stats.tx_packets++;
265 pdata->stats.tx_bytes += skb->len;
266 dma_free_coherent(dev, XGENE_ENET_STD_MTU, pkt_buf, dma_addr);
267 dev_kfree_skb_any(skb);
268
269 /* clear pktstart address and pktsize */
270 raw_desc->m0 = cpu_to_le64(SET_BITS(E, 1) |
271 SET_BITS(PKT_SIZE, SLOT_EMPTY));
272 xge_wr_csr(pdata, DMATXSTATUS, 1);
273
274 head = (head + 1) & (XGENE_ENET_NUM_DESC - 1);
275 }
276
277 if (netif_queue_stopped(ndev))
278 netif_wake_queue(ndev);
279
280 tx_ring->head = head;
281 }
282
xge_rx_poll(struct net_device * ndev,unsigned int budget)283 static int xge_rx_poll(struct net_device *ndev, unsigned int budget)
284 {
285 struct xge_pdata *pdata = netdev_priv(ndev);
286 struct device *dev = &pdata->pdev->dev;
287 struct xge_desc_ring *rx_ring;
288 struct xge_raw_desc *raw_desc;
289 struct sk_buff *skb;
290 dma_addr_t dma_addr;
291 int processed = 0;
292 u8 head, rx_error;
293 int i, ret;
294 u32 data;
295 u16 len;
296
297 rx_ring = pdata->rx_ring;
298 head = rx_ring->head;
299
300 data = xge_rd_csr(pdata, DMARXSTATUS);
301 if (!GET_BITS(RXPKTCOUNT, data))
302 return 0;
303
304 for (i = 0; i < budget; i++) {
305 raw_desc = &rx_ring->raw_desc[head];
306
307 if (GET_BITS(E, le64_to_cpu(raw_desc->m0)))
308 break;
309
310 dma_rmb();
311
312 skb = rx_ring->pkt_info[head].skb;
313 rx_ring->pkt_info[head].skb = NULL;
314 dma_addr = rx_ring->pkt_info[head].dma_addr;
315 len = GET_BITS(PKT_SIZE, le64_to_cpu(raw_desc->m0));
316 dma_unmap_single(dev, dma_addr, XGENE_ENET_STD_MTU,
317 DMA_FROM_DEVICE);
318
319 rx_error = GET_BITS(D, le64_to_cpu(raw_desc->m2));
320 if (unlikely(rx_error)) {
321 pdata->stats.rx_errors++;
322 dev_kfree_skb_any(skb);
323 goto out;
324 }
325
326 skb_put(skb, len);
327 skb->protocol = eth_type_trans(skb, ndev);
328
329 pdata->stats.rx_packets++;
330 pdata->stats.rx_bytes += len;
331 napi_gro_receive(&pdata->napi, skb);
332 out:
333 ret = xge_refill_buffers(ndev, 1);
334 xge_wr_csr(pdata, DMARXSTATUS, 1);
335 xge_wr_csr(pdata, DMARXCTRL, 1);
336
337 if (ret)
338 break;
339
340 head = (head + 1) & (XGENE_ENET_NUM_DESC - 1);
341 processed++;
342 }
343
344 rx_ring->head = head;
345
346 return processed;
347 }
348
xge_delete_desc_ring(struct net_device * ndev,struct xge_desc_ring * ring)349 static void xge_delete_desc_ring(struct net_device *ndev,
350 struct xge_desc_ring *ring)
351 {
352 struct xge_pdata *pdata = netdev_priv(ndev);
353 struct device *dev = &pdata->pdev->dev;
354 u16 size;
355
356 if (!ring)
357 return;
358
359 size = XGENE_ENET_DESC_SIZE * XGENE_ENET_NUM_DESC;
360 if (ring->desc_addr)
361 dma_free_coherent(dev, size, ring->desc_addr, ring->dma_addr);
362
363 kfree(ring->pkt_info);
364 kfree(ring);
365 }
366
xge_free_buffers(struct net_device * ndev)367 static void xge_free_buffers(struct net_device *ndev)
368 {
369 struct xge_pdata *pdata = netdev_priv(ndev);
370 struct xge_desc_ring *ring = pdata->rx_ring;
371 struct device *dev = &pdata->pdev->dev;
372 struct sk_buff *skb;
373 dma_addr_t dma_addr;
374 int i;
375
376 for (i = 0; i < XGENE_ENET_NUM_DESC; i++) {
377 skb = ring->pkt_info[i].skb;
378 dma_addr = ring->pkt_info[i].dma_addr;
379
380 if (!skb)
381 continue;
382
383 dma_unmap_single(dev, dma_addr, XGENE_ENET_STD_MTU,
384 DMA_FROM_DEVICE);
385 dev_kfree_skb_any(skb);
386 }
387 }
388
xge_delete_desc_rings(struct net_device * ndev)389 static void xge_delete_desc_rings(struct net_device *ndev)
390 {
391 struct xge_pdata *pdata = netdev_priv(ndev);
392
393 xge_txc_poll(ndev);
394 xge_delete_desc_ring(ndev, pdata->tx_ring);
395
396 xge_rx_poll(ndev, 64);
397 xge_free_buffers(ndev);
398 xge_delete_desc_ring(ndev, pdata->rx_ring);
399 }
400
xge_create_desc_ring(struct net_device * ndev)401 static struct xge_desc_ring *xge_create_desc_ring(struct net_device *ndev)
402 {
403 struct xge_pdata *pdata = netdev_priv(ndev);
404 struct device *dev = &pdata->pdev->dev;
405 struct xge_desc_ring *ring;
406 u16 size;
407
408 ring = kzalloc(sizeof(*ring), GFP_KERNEL);
409 if (!ring)
410 return NULL;
411
412 ring->ndev = ndev;
413
414 size = XGENE_ENET_DESC_SIZE * XGENE_ENET_NUM_DESC;
415 ring->desc_addr = dma_alloc_coherent(dev, size, &ring->dma_addr,
416 GFP_KERNEL);
417 if (!ring->desc_addr)
418 goto err;
419
420 ring->pkt_info = kcalloc(XGENE_ENET_NUM_DESC, sizeof(*ring->pkt_info),
421 GFP_KERNEL);
422 if (!ring->pkt_info)
423 goto err;
424
425 xge_setup_desc(ring);
426
427 return ring;
428
429 err:
430 xge_delete_desc_ring(ndev, ring);
431
432 return NULL;
433 }
434
xge_create_desc_rings(struct net_device * ndev)435 static int xge_create_desc_rings(struct net_device *ndev)
436 {
437 struct xge_pdata *pdata = netdev_priv(ndev);
438 struct xge_desc_ring *ring;
439 int ret;
440
441 /* create tx ring */
442 ring = xge_create_desc_ring(ndev);
443 if (!ring)
444 goto err;
445
446 pdata->tx_ring = ring;
447 xge_update_tx_desc_addr(pdata);
448
449 /* create rx ring */
450 ring = xge_create_desc_ring(ndev);
451 if (!ring)
452 goto err;
453
454 pdata->rx_ring = ring;
455 xge_update_rx_desc_addr(pdata);
456
457 ret = xge_refill_buffers(ndev, XGENE_ENET_NUM_DESC);
458 if (ret)
459 goto err;
460
461 return 0;
462 err:
463 xge_delete_desc_rings(ndev);
464
465 return -ENOMEM;
466 }
467
xge_open(struct net_device * ndev)468 static int xge_open(struct net_device *ndev)
469 {
470 struct xge_pdata *pdata = netdev_priv(ndev);
471 int ret;
472
473 ret = xge_create_desc_rings(ndev);
474 if (ret)
475 return ret;
476
477 napi_enable(&pdata->napi);
478 ret = xge_request_irq(ndev);
479 if (ret)
480 return ret;
481
482 xge_intr_enable(pdata);
483 xge_wr_csr(pdata, DMARXCTRL, 1);
484
485 phy_start(ndev->phydev);
486 xge_mac_enable(pdata);
487 netif_start_queue(ndev);
488
489 return 0;
490 }
491
xge_close(struct net_device * ndev)492 static int xge_close(struct net_device *ndev)
493 {
494 struct xge_pdata *pdata = netdev_priv(ndev);
495
496 netif_stop_queue(ndev);
497 xge_mac_disable(pdata);
498 phy_stop(ndev->phydev);
499
500 xge_intr_disable(pdata);
501 xge_free_irq(ndev);
502 napi_disable(&pdata->napi);
503 xge_delete_desc_rings(ndev);
504
505 return 0;
506 }
507
xge_napi(struct napi_struct * napi,const int budget)508 static int xge_napi(struct napi_struct *napi, const int budget)
509 {
510 struct net_device *ndev = napi->dev;
511 struct xge_pdata *pdata;
512 int processed;
513
514 pdata = netdev_priv(ndev);
515
516 xge_txc_poll(ndev);
517 processed = xge_rx_poll(ndev, budget);
518
519 if (processed < budget) {
520 napi_complete_done(napi, processed);
521 xge_intr_enable(pdata);
522 }
523
524 return processed;
525 }
526
xge_set_mac_addr(struct net_device * ndev,void * addr)527 static int xge_set_mac_addr(struct net_device *ndev, void *addr)
528 {
529 struct xge_pdata *pdata = netdev_priv(ndev);
530 int ret;
531
532 ret = eth_mac_addr(ndev, addr);
533 if (ret)
534 return ret;
535
536 xge_mac_set_station_addr(pdata);
537
538 return 0;
539 }
540
is_tx_pending(struct xge_raw_desc * raw_desc)541 static bool is_tx_pending(struct xge_raw_desc *raw_desc)
542 {
543 if (!GET_BITS(E, le64_to_cpu(raw_desc->m0)))
544 return true;
545
546 return false;
547 }
548
xge_free_pending_skb(struct net_device * ndev)549 static void xge_free_pending_skb(struct net_device *ndev)
550 {
551 struct xge_pdata *pdata = netdev_priv(ndev);
552 struct device *dev = &pdata->pdev->dev;
553 struct xge_desc_ring *tx_ring;
554 struct xge_raw_desc *raw_desc;
555 dma_addr_t dma_addr;
556 struct sk_buff *skb;
557 void *pkt_buf;
558 int i;
559
560 tx_ring = pdata->tx_ring;
561
562 for (i = 0; i < XGENE_ENET_NUM_DESC; i++) {
563 raw_desc = &tx_ring->raw_desc[i];
564
565 if (!is_tx_pending(raw_desc))
566 continue;
567
568 skb = tx_ring->pkt_info[i].skb;
569 dma_addr = tx_ring->pkt_info[i].dma_addr;
570 pkt_buf = tx_ring->pkt_info[i].pkt_buf;
571 dma_free_coherent(dev, XGENE_ENET_STD_MTU, pkt_buf, dma_addr);
572 dev_kfree_skb_any(skb);
573 }
574 }
575
xge_timeout(struct net_device * ndev,unsigned int txqueue)576 static void xge_timeout(struct net_device *ndev, unsigned int txqueue)
577 {
578 struct xge_pdata *pdata = netdev_priv(ndev);
579
580 rtnl_lock();
581
582 if (!netif_running(ndev))
583 goto out;
584
585 netif_stop_queue(ndev);
586 xge_intr_disable(pdata);
587 napi_disable(&pdata->napi);
588
589 xge_wr_csr(pdata, DMATXCTRL, 0);
590 xge_txc_poll(ndev);
591 xge_free_pending_skb(ndev);
592 xge_wr_csr(pdata, DMATXSTATUS, ~0U);
593
594 xge_setup_desc(pdata->tx_ring);
595 xge_update_tx_desc_addr(pdata);
596 xge_mac_init(pdata);
597
598 napi_enable(&pdata->napi);
599 xge_intr_enable(pdata);
600 xge_mac_enable(pdata);
601 netif_start_queue(ndev);
602
603 out:
604 rtnl_unlock();
605 }
606
xge_get_stats64(struct net_device * ndev,struct rtnl_link_stats64 * storage)607 static void xge_get_stats64(struct net_device *ndev,
608 struct rtnl_link_stats64 *storage)
609 {
610 struct xge_pdata *pdata = netdev_priv(ndev);
611 struct xge_stats *stats = &pdata->stats;
612
613 storage->tx_packets += stats->tx_packets;
614 storage->tx_bytes += stats->tx_bytes;
615
616 storage->rx_packets += stats->rx_packets;
617 storage->rx_bytes += stats->rx_bytes;
618 storage->rx_errors += stats->rx_errors;
619 }
620
621 static const struct net_device_ops xgene_ndev_ops = {
622 .ndo_open = xge_open,
623 .ndo_stop = xge_close,
624 .ndo_start_xmit = xge_start_xmit,
625 .ndo_set_mac_address = xge_set_mac_addr,
626 .ndo_tx_timeout = xge_timeout,
627 .ndo_get_stats64 = xge_get_stats64,
628 };
629
xge_probe(struct platform_device * pdev)630 static int xge_probe(struct platform_device *pdev)
631 {
632 struct device *dev = &pdev->dev;
633 struct net_device *ndev;
634 struct xge_pdata *pdata;
635 int ret;
636
637 ndev = alloc_etherdev(sizeof(*pdata));
638 if (!ndev)
639 return -ENOMEM;
640
641 pdata = netdev_priv(ndev);
642
643 pdata->pdev = pdev;
644 pdata->ndev = ndev;
645 SET_NETDEV_DEV(ndev, dev);
646 platform_set_drvdata(pdev, pdata);
647 ndev->netdev_ops = &xgene_ndev_ops;
648
649 ndev->features |= NETIF_F_GSO |
650 NETIF_F_GRO;
651
652 ret = xge_get_resources(pdata);
653 if (ret)
654 goto err;
655
656 ndev->hw_features = ndev->features;
657 xge_set_ethtool_ops(ndev);
658
659 ret = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(64));
660 if (ret) {
661 netdev_err(ndev, "No usable DMA configuration\n");
662 goto err;
663 }
664
665 ret = xge_init_hw(ndev);
666 if (ret)
667 goto err;
668
669 ret = xge_mdio_config(ndev);
670 if (ret)
671 goto err;
672
673 netif_napi_add(ndev, &pdata->napi, xge_napi);
674
675 ret = register_netdev(ndev);
676 if (ret) {
677 netdev_err(ndev, "Failed to register netdev\n");
678 goto err_mdio_remove;
679 }
680
681 return 0;
682
683 err_mdio_remove:
684 xge_mdio_remove(ndev);
685 err:
686 free_netdev(ndev);
687
688 return ret;
689 }
690
xge_remove(struct platform_device * pdev)691 static void xge_remove(struct platform_device *pdev)
692 {
693 struct xge_pdata *pdata;
694 struct net_device *ndev;
695
696 pdata = platform_get_drvdata(pdev);
697 ndev = pdata->ndev;
698
699 rtnl_lock();
700 if (netif_running(ndev))
701 dev_close(ndev);
702 rtnl_unlock();
703
704 xge_mdio_remove(ndev);
705 unregister_netdev(ndev);
706 free_netdev(ndev);
707 }
708
xge_shutdown(struct platform_device * pdev)709 static void xge_shutdown(struct platform_device *pdev)
710 {
711 struct xge_pdata *pdata;
712
713 pdata = platform_get_drvdata(pdev);
714 if (!pdata)
715 return;
716
717 if (!pdata->ndev)
718 return;
719
720 xge_remove(pdev);
721 }
722
723 static const struct acpi_device_id xge_acpi_match[] = {
724 { "APMC0D80" },
725 { }
726 };
727 MODULE_DEVICE_TABLE(acpi, xge_acpi_match);
728
729 static struct platform_driver xge_driver = {
730 .driver = {
731 .name = "xgene-enet-v2",
732 .acpi_match_table = xge_acpi_match,
733 },
734 .probe = xge_probe,
735 .remove = xge_remove,
736 .shutdown = xge_shutdown,
737 };
738 module_platform_driver(xge_driver);
739
740 MODULE_DESCRIPTION("APM X-Gene SoC Ethernet v2 driver");
741 MODULE_AUTHOR("Iyappan Subramanian <isubramanian@apm.com>");
742 MODULE_LICENSE("GPL");
743