xref: /linux/drivers/net/ethernet/apm/xgene-v2/main.c (revision 32a92f8c89326985e05dce8b22d3f0aa07a3e1bd)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Applied Micro X-Gene SoC Ethernet v2 Driver
4  *
5  * Copyright (c) 2017, Applied Micro Circuits Corporation
6  * Author(s): Iyappan Subramanian <isubramanian@apm.com>
7  *	      Keyur Chudgar <kchudgar@apm.com>
8  */
9 
10 #include "main.h"
11 
xge_get_resources(struct xge_pdata * pdata)12 static int xge_get_resources(struct xge_pdata *pdata)
13 {
14 	struct platform_device *pdev;
15 	struct net_device *ndev;
16 	int phy_mode, ret = 0;
17 	struct resource *res;
18 	struct device *dev;
19 
20 	pdev = pdata->pdev;
21 	dev = &pdev->dev;
22 	ndev = pdata->ndev;
23 
24 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
25 	if (!res) {
26 		dev_err(dev, "Resource enet_csr not defined\n");
27 		return -ENODEV;
28 	}
29 
30 	pdata->resources.base_addr = devm_ioremap(dev, res->start,
31 						  resource_size(res));
32 	if (!pdata->resources.base_addr) {
33 		dev_err(dev, "Unable to retrieve ENET Port CSR region\n");
34 		return -ENOMEM;
35 	}
36 
37 	if (device_get_ethdev_address(dev, ndev))
38 		eth_hw_addr_random(ndev);
39 
40 	memcpy(ndev->perm_addr, ndev->dev_addr, ndev->addr_len);
41 
42 	phy_mode = device_get_phy_mode(dev);
43 	if (phy_mode < 0) {
44 		dev_err(dev, "Unable to get phy-connection-type\n");
45 		return phy_mode;
46 	}
47 	pdata->resources.phy_mode = phy_mode;
48 
49 	if (pdata->resources.phy_mode != PHY_INTERFACE_MODE_RGMII) {
50 		dev_err(dev, "Incorrect phy-connection-type specified\n");
51 		return -ENODEV;
52 	}
53 
54 	ret = platform_get_irq(pdev, 0);
55 	if (ret < 0)
56 		return ret;
57 	pdata->resources.irq = ret;
58 
59 	return 0;
60 }
61 
xge_refill_buffers(struct net_device * ndev,u32 nbuf)62 static int xge_refill_buffers(struct net_device *ndev, u32 nbuf)
63 {
64 	struct xge_pdata *pdata = netdev_priv(ndev);
65 	struct xge_desc_ring *ring = pdata->rx_ring;
66 	const u8 slots = XGENE_ENET_NUM_DESC - 1;
67 	struct device *dev = &pdata->pdev->dev;
68 	struct xge_raw_desc *raw_desc;
69 	u64 addr_lo, addr_hi;
70 	u8 tail = ring->tail;
71 	struct sk_buff *skb;
72 	dma_addr_t dma_addr;
73 	u16 len;
74 	int i;
75 
76 	for (i = 0; i < nbuf; i++) {
77 		raw_desc = &ring->raw_desc[tail];
78 
79 		len = XGENE_ENET_STD_MTU;
80 		skb = netdev_alloc_skb(ndev, len);
81 		if (unlikely(!skb))
82 			return -ENOMEM;
83 
84 		dma_addr = dma_map_single(dev, skb->data, len, DMA_FROM_DEVICE);
85 		if (dma_mapping_error(dev, dma_addr)) {
86 			netdev_err(ndev, "DMA mapping error\n");
87 			dev_kfree_skb_any(skb);
88 			return -EINVAL;
89 		}
90 
91 		ring->pkt_info[tail].skb = skb;
92 		ring->pkt_info[tail].dma_addr = dma_addr;
93 
94 		addr_hi = GET_BITS(NEXT_DESC_ADDRH, le64_to_cpu(raw_desc->m1));
95 		addr_lo = GET_BITS(NEXT_DESC_ADDRL, le64_to_cpu(raw_desc->m1));
96 		raw_desc->m1 = cpu_to_le64(SET_BITS(NEXT_DESC_ADDRL, addr_lo) |
97 					   SET_BITS(NEXT_DESC_ADDRH, addr_hi) |
98 					   SET_BITS(PKT_ADDRH,
99 						    upper_32_bits(dma_addr)));
100 
101 		dma_wmb();
102 		raw_desc->m0 = cpu_to_le64(SET_BITS(PKT_ADDRL, dma_addr) |
103 					   SET_BITS(E, 1));
104 		tail = (tail + 1) & slots;
105 	}
106 
107 	ring->tail = tail;
108 
109 	return 0;
110 }
111 
xge_init_hw(struct net_device * ndev)112 static int xge_init_hw(struct net_device *ndev)
113 {
114 	struct xge_pdata *pdata = netdev_priv(ndev);
115 	int ret;
116 
117 	ret = xge_port_reset(ndev);
118 	if (ret)
119 		return ret;
120 
121 	xge_port_init(ndev);
122 	pdata->nbufs = NUM_BUFS;
123 
124 	return 0;
125 }
126 
xge_irq(const int irq,void * data)127 static irqreturn_t xge_irq(const int irq, void *data)
128 {
129 	struct xge_pdata *pdata = data;
130 
131 	if (napi_schedule_prep(&pdata->napi)) {
132 		xge_intr_disable(pdata);
133 		__napi_schedule(&pdata->napi);
134 	}
135 
136 	return IRQ_HANDLED;
137 }
138 
xge_request_irq(struct net_device * ndev)139 static int xge_request_irq(struct net_device *ndev)
140 {
141 	struct xge_pdata *pdata = netdev_priv(ndev);
142 	int ret;
143 
144 	snprintf(pdata->irq_name, IRQ_ID_SIZE, "%s", ndev->name);
145 
146 	ret = request_irq(pdata->resources.irq, xge_irq, 0, pdata->irq_name,
147 			  pdata);
148 	if (ret)
149 		netdev_err(ndev, "Failed to request irq %s\n", pdata->irq_name);
150 
151 	return ret;
152 }
153 
xge_free_irq(struct net_device * ndev)154 static void xge_free_irq(struct net_device *ndev)
155 {
156 	struct xge_pdata *pdata = netdev_priv(ndev);
157 
158 	free_irq(pdata->resources.irq, pdata);
159 }
160 
is_tx_slot_available(struct xge_raw_desc * raw_desc)161 static bool is_tx_slot_available(struct xge_raw_desc *raw_desc)
162 {
163 	if (GET_BITS(E, le64_to_cpu(raw_desc->m0)) &&
164 	    (GET_BITS(PKT_SIZE, le64_to_cpu(raw_desc->m0)) == SLOT_EMPTY))
165 		return true;
166 
167 	return false;
168 }
169 
xge_start_xmit(struct sk_buff * skb,struct net_device * ndev)170 static netdev_tx_t xge_start_xmit(struct sk_buff *skb, struct net_device *ndev)
171 {
172 	struct xge_pdata *pdata = netdev_priv(ndev);
173 	struct device *dev = &pdata->pdev->dev;
174 	struct xge_desc_ring *tx_ring;
175 	struct xge_raw_desc *raw_desc;
176 	static dma_addr_t dma_addr;
177 	u64 addr_lo, addr_hi;
178 	void *pkt_buf;
179 	u8 tail;
180 	u16 len;
181 
182 	tx_ring = pdata->tx_ring;
183 	tail = tx_ring->tail;
184 	len = skb_headlen(skb);
185 	raw_desc = &tx_ring->raw_desc[tail];
186 
187 	if (!is_tx_slot_available(raw_desc)) {
188 		netif_stop_queue(ndev);
189 		return NETDEV_TX_BUSY;
190 	}
191 
192 	/* Packet buffers should be 64B aligned */
193 	pkt_buf = dma_alloc_coherent(dev, XGENE_ENET_STD_MTU, &dma_addr,
194 				     GFP_ATOMIC);
195 	if (unlikely(!pkt_buf)) {
196 		dev_kfree_skb_any(skb);
197 		return NETDEV_TX_OK;
198 	}
199 	memcpy(pkt_buf, skb->data, len);
200 
201 	addr_hi = GET_BITS(NEXT_DESC_ADDRH, le64_to_cpu(raw_desc->m1));
202 	addr_lo = GET_BITS(NEXT_DESC_ADDRL, le64_to_cpu(raw_desc->m1));
203 	raw_desc->m1 = cpu_to_le64(SET_BITS(NEXT_DESC_ADDRL, addr_lo) |
204 				   SET_BITS(NEXT_DESC_ADDRH, addr_hi) |
205 				   SET_BITS(PKT_ADDRH,
206 					    upper_32_bits(dma_addr)));
207 
208 	tx_ring->pkt_info[tail].skb = skb;
209 	tx_ring->pkt_info[tail].dma_addr = dma_addr;
210 	tx_ring->pkt_info[tail].pkt_buf = pkt_buf;
211 
212 	dma_wmb();
213 
214 	raw_desc->m0 = cpu_to_le64(SET_BITS(PKT_ADDRL, dma_addr) |
215 				   SET_BITS(PKT_SIZE, len) |
216 				   SET_BITS(E, 0));
217 	skb_tx_timestamp(skb);
218 	xge_wr_csr(pdata, DMATXCTRL, 1);
219 
220 	tx_ring->tail = (tail + 1) & (XGENE_ENET_NUM_DESC - 1);
221 
222 	return NETDEV_TX_OK;
223 }
224 
is_tx_hw_done(struct xge_raw_desc * raw_desc)225 static bool is_tx_hw_done(struct xge_raw_desc *raw_desc)
226 {
227 	if (GET_BITS(E, le64_to_cpu(raw_desc->m0)) &&
228 	    !GET_BITS(PKT_SIZE, le64_to_cpu(raw_desc->m0)))
229 		return true;
230 
231 	return false;
232 }
233 
xge_txc_poll(struct net_device * ndev)234 static void xge_txc_poll(struct net_device *ndev)
235 {
236 	struct xge_pdata *pdata = netdev_priv(ndev);
237 	struct device *dev = &pdata->pdev->dev;
238 	struct xge_desc_ring *tx_ring;
239 	struct xge_raw_desc *raw_desc;
240 	dma_addr_t dma_addr;
241 	struct sk_buff *skb;
242 	void *pkt_buf;
243 	u32 data;
244 	u8 head;
245 
246 	tx_ring = pdata->tx_ring;
247 	head = tx_ring->head;
248 
249 	data = xge_rd_csr(pdata, DMATXSTATUS);
250 	if (!GET_BITS(TXPKTCOUNT, data))
251 		return;
252 
253 	while (1) {
254 		raw_desc = &tx_ring->raw_desc[head];
255 
256 		if (!is_tx_hw_done(raw_desc))
257 			break;
258 
259 		dma_rmb();
260 
261 		skb = tx_ring->pkt_info[head].skb;
262 		dma_addr = tx_ring->pkt_info[head].dma_addr;
263 		pkt_buf = tx_ring->pkt_info[head].pkt_buf;
264 		pdata->stats.tx_packets++;
265 		pdata->stats.tx_bytes += skb->len;
266 		dma_free_coherent(dev, XGENE_ENET_STD_MTU, pkt_buf, dma_addr);
267 		dev_kfree_skb_any(skb);
268 
269 		/* clear pktstart address and pktsize */
270 		raw_desc->m0 = cpu_to_le64(SET_BITS(E, 1) |
271 					   SET_BITS(PKT_SIZE, SLOT_EMPTY));
272 		xge_wr_csr(pdata, DMATXSTATUS, 1);
273 
274 		head = (head + 1) & (XGENE_ENET_NUM_DESC - 1);
275 	}
276 
277 	if (netif_queue_stopped(ndev))
278 		netif_wake_queue(ndev);
279 
280 	tx_ring->head = head;
281 }
282 
xge_rx_poll(struct net_device * ndev,unsigned int budget)283 static int xge_rx_poll(struct net_device *ndev, unsigned int budget)
284 {
285 	struct xge_pdata *pdata = netdev_priv(ndev);
286 	struct device *dev = &pdata->pdev->dev;
287 	struct xge_desc_ring *rx_ring;
288 	struct xge_raw_desc *raw_desc;
289 	struct sk_buff *skb;
290 	dma_addr_t dma_addr;
291 	int processed = 0;
292 	u8 head, rx_error;
293 	int i, ret;
294 	u32 data;
295 	u16 len;
296 
297 	rx_ring = pdata->rx_ring;
298 	head = rx_ring->head;
299 
300 	data = xge_rd_csr(pdata, DMARXSTATUS);
301 	if (!GET_BITS(RXPKTCOUNT, data))
302 		return 0;
303 
304 	for (i = 0; i < budget; i++) {
305 		raw_desc = &rx_ring->raw_desc[head];
306 
307 		if (GET_BITS(E, le64_to_cpu(raw_desc->m0)))
308 			break;
309 
310 		dma_rmb();
311 
312 		skb = rx_ring->pkt_info[head].skb;
313 		rx_ring->pkt_info[head].skb = NULL;
314 		dma_addr = rx_ring->pkt_info[head].dma_addr;
315 		len = GET_BITS(PKT_SIZE, le64_to_cpu(raw_desc->m0));
316 		dma_unmap_single(dev, dma_addr, XGENE_ENET_STD_MTU,
317 				 DMA_FROM_DEVICE);
318 
319 		rx_error = GET_BITS(D, le64_to_cpu(raw_desc->m2));
320 		if (unlikely(rx_error)) {
321 			pdata->stats.rx_errors++;
322 			dev_kfree_skb_any(skb);
323 			goto out;
324 		}
325 
326 		skb_put(skb, len);
327 		skb->protocol = eth_type_trans(skb, ndev);
328 
329 		pdata->stats.rx_packets++;
330 		pdata->stats.rx_bytes += len;
331 		napi_gro_receive(&pdata->napi, skb);
332 out:
333 		ret = xge_refill_buffers(ndev, 1);
334 		xge_wr_csr(pdata, DMARXSTATUS, 1);
335 		xge_wr_csr(pdata, DMARXCTRL, 1);
336 
337 		if (ret)
338 			break;
339 
340 		head = (head + 1) & (XGENE_ENET_NUM_DESC - 1);
341 		processed++;
342 	}
343 
344 	rx_ring->head = head;
345 
346 	return processed;
347 }
348 
xge_delete_desc_ring(struct net_device * ndev,struct xge_desc_ring * ring)349 static void xge_delete_desc_ring(struct net_device *ndev,
350 				 struct xge_desc_ring *ring)
351 {
352 	struct xge_pdata *pdata = netdev_priv(ndev);
353 	struct device *dev = &pdata->pdev->dev;
354 	u16 size;
355 
356 	if (!ring)
357 		return;
358 
359 	size = XGENE_ENET_DESC_SIZE * XGENE_ENET_NUM_DESC;
360 	if (ring->desc_addr)
361 		dma_free_coherent(dev, size, ring->desc_addr, ring->dma_addr);
362 
363 	kfree(ring->pkt_info);
364 	kfree(ring);
365 }
366 
xge_free_buffers(struct net_device * ndev)367 static void xge_free_buffers(struct net_device *ndev)
368 {
369 	struct xge_pdata *pdata = netdev_priv(ndev);
370 	struct xge_desc_ring *ring = pdata->rx_ring;
371 	struct device *dev = &pdata->pdev->dev;
372 	struct sk_buff *skb;
373 	dma_addr_t dma_addr;
374 	int i;
375 
376 	for (i = 0; i < XGENE_ENET_NUM_DESC; i++) {
377 		skb = ring->pkt_info[i].skb;
378 		dma_addr = ring->pkt_info[i].dma_addr;
379 
380 		if (!skb)
381 			continue;
382 
383 		dma_unmap_single(dev, dma_addr, XGENE_ENET_STD_MTU,
384 				 DMA_FROM_DEVICE);
385 		dev_kfree_skb_any(skb);
386 	}
387 }
388 
xge_delete_desc_rings(struct net_device * ndev)389 static void xge_delete_desc_rings(struct net_device *ndev)
390 {
391 	struct xge_pdata *pdata = netdev_priv(ndev);
392 
393 	xge_txc_poll(ndev);
394 	xge_delete_desc_ring(ndev, pdata->tx_ring);
395 
396 	xge_rx_poll(ndev, 64);
397 	xge_free_buffers(ndev);
398 	xge_delete_desc_ring(ndev, pdata->rx_ring);
399 }
400 
xge_create_desc_ring(struct net_device * ndev)401 static struct xge_desc_ring *xge_create_desc_ring(struct net_device *ndev)
402 {
403 	struct xge_pdata *pdata = netdev_priv(ndev);
404 	struct device *dev = &pdata->pdev->dev;
405 	struct xge_desc_ring *ring;
406 	u16 size;
407 
408 	ring = kzalloc_obj(*ring);
409 	if (!ring)
410 		return NULL;
411 
412 	ring->ndev = ndev;
413 
414 	size = XGENE_ENET_DESC_SIZE * XGENE_ENET_NUM_DESC;
415 	ring->desc_addr = dma_alloc_coherent(dev, size, &ring->dma_addr,
416 					     GFP_KERNEL);
417 	if (!ring->desc_addr)
418 		goto err;
419 
420 	ring->pkt_info = kzalloc_objs(*ring->pkt_info, XGENE_ENET_NUM_DESC);
421 	if (!ring->pkt_info)
422 		goto err;
423 
424 	xge_setup_desc(ring);
425 
426 	return ring;
427 
428 err:
429 	xge_delete_desc_ring(ndev, ring);
430 
431 	return NULL;
432 }
433 
xge_create_desc_rings(struct net_device * ndev)434 static int xge_create_desc_rings(struct net_device *ndev)
435 {
436 	struct xge_pdata *pdata = netdev_priv(ndev);
437 	struct xge_desc_ring *ring;
438 	int ret;
439 
440 	/* create tx ring */
441 	ring = xge_create_desc_ring(ndev);
442 	if (!ring)
443 		goto err;
444 
445 	pdata->tx_ring = ring;
446 	xge_update_tx_desc_addr(pdata);
447 
448 	/* create rx ring */
449 	ring = xge_create_desc_ring(ndev);
450 	if (!ring)
451 		goto err;
452 
453 	pdata->rx_ring = ring;
454 	xge_update_rx_desc_addr(pdata);
455 
456 	ret = xge_refill_buffers(ndev, XGENE_ENET_NUM_DESC);
457 	if (ret)
458 		goto err;
459 
460 	return 0;
461 err:
462 	xge_delete_desc_rings(ndev);
463 
464 	return -ENOMEM;
465 }
466 
xge_open(struct net_device * ndev)467 static int xge_open(struct net_device *ndev)
468 {
469 	struct xge_pdata *pdata = netdev_priv(ndev);
470 	int ret;
471 
472 	ret = xge_create_desc_rings(ndev);
473 	if (ret)
474 		return ret;
475 
476 	napi_enable(&pdata->napi);
477 	ret = xge_request_irq(ndev);
478 	if (ret)
479 		return ret;
480 
481 	xge_intr_enable(pdata);
482 	xge_wr_csr(pdata, DMARXCTRL, 1);
483 
484 	phy_start(ndev->phydev);
485 	xge_mac_enable(pdata);
486 	netif_start_queue(ndev);
487 
488 	return 0;
489 }
490 
xge_close(struct net_device * ndev)491 static int xge_close(struct net_device *ndev)
492 {
493 	struct xge_pdata *pdata = netdev_priv(ndev);
494 
495 	netif_stop_queue(ndev);
496 	xge_mac_disable(pdata);
497 	phy_stop(ndev->phydev);
498 
499 	xge_intr_disable(pdata);
500 	xge_free_irq(ndev);
501 	napi_disable(&pdata->napi);
502 	xge_delete_desc_rings(ndev);
503 
504 	return 0;
505 }
506 
xge_napi(struct napi_struct * napi,const int budget)507 static int xge_napi(struct napi_struct *napi, const int budget)
508 {
509 	struct net_device *ndev = napi->dev;
510 	struct xge_pdata *pdata;
511 	int processed;
512 
513 	pdata = netdev_priv(ndev);
514 
515 	xge_txc_poll(ndev);
516 	processed = xge_rx_poll(ndev, budget);
517 
518 	if (processed < budget) {
519 		napi_complete_done(napi, processed);
520 		xge_intr_enable(pdata);
521 	}
522 
523 	return processed;
524 }
525 
xge_set_mac_addr(struct net_device * ndev,void * addr)526 static int xge_set_mac_addr(struct net_device *ndev, void *addr)
527 {
528 	struct xge_pdata *pdata = netdev_priv(ndev);
529 	int ret;
530 
531 	ret = eth_mac_addr(ndev, addr);
532 	if (ret)
533 		return ret;
534 
535 	xge_mac_set_station_addr(pdata);
536 
537 	return 0;
538 }
539 
is_tx_pending(struct xge_raw_desc * raw_desc)540 static bool is_tx_pending(struct xge_raw_desc *raw_desc)
541 {
542 	if (!GET_BITS(E, le64_to_cpu(raw_desc->m0)))
543 		return true;
544 
545 	return false;
546 }
547 
xge_free_pending_skb(struct net_device * ndev)548 static void xge_free_pending_skb(struct net_device *ndev)
549 {
550 	struct xge_pdata *pdata = netdev_priv(ndev);
551 	struct device *dev = &pdata->pdev->dev;
552 	struct xge_desc_ring *tx_ring;
553 	struct xge_raw_desc *raw_desc;
554 	dma_addr_t dma_addr;
555 	struct sk_buff *skb;
556 	void *pkt_buf;
557 	int i;
558 
559 	tx_ring = pdata->tx_ring;
560 
561 	for (i = 0; i < XGENE_ENET_NUM_DESC; i++) {
562 		raw_desc = &tx_ring->raw_desc[i];
563 
564 		if (!is_tx_pending(raw_desc))
565 			continue;
566 
567 		skb = tx_ring->pkt_info[i].skb;
568 		dma_addr = tx_ring->pkt_info[i].dma_addr;
569 		pkt_buf = tx_ring->pkt_info[i].pkt_buf;
570 		dma_free_coherent(dev, XGENE_ENET_STD_MTU, pkt_buf, dma_addr);
571 		dev_kfree_skb_any(skb);
572 	}
573 }
574 
xge_timeout(struct net_device * ndev,unsigned int txqueue)575 static void xge_timeout(struct net_device *ndev, unsigned int txqueue)
576 {
577 	struct xge_pdata *pdata = netdev_priv(ndev);
578 
579 	rtnl_lock();
580 
581 	if (!netif_running(ndev))
582 		goto out;
583 
584 	netif_stop_queue(ndev);
585 	xge_intr_disable(pdata);
586 	napi_disable(&pdata->napi);
587 
588 	xge_wr_csr(pdata, DMATXCTRL, 0);
589 	xge_txc_poll(ndev);
590 	xge_free_pending_skb(ndev);
591 	xge_wr_csr(pdata, DMATXSTATUS, ~0U);
592 
593 	xge_setup_desc(pdata->tx_ring);
594 	xge_update_tx_desc_addr(pdata);
595 	xge_mac_init(pdata);
596 
597 	napi_enable(&pdata->napi);
598 	xge_intr_enable(pdata);
599 	xge_mac_enable(pdata);
600 	netif_start_queue(ndev);
601 
602 out:
603 	rtnl_unlock();
604 }
605 
xge_get_stats64(struct net_device * ndev,struct rtnl_link_stats64 * storage)606 static void xge_get_stats64(struct net_device *ndev,
607 			    struct rtnl_link_stats64 *storage)
608 {
609 	struct xge_pdata *pdata = netdev_priv(ndev);
610 	struct xge_stats *stats = &pdata->stats;
611 
612 	storage->tx_packets += stats->tx_packets;
613 	storage->tx_bytes += stats->tx_bytes;
614 
615 	storage->rx_packets += stats->rx_packets;
616 	storage->rx_bytes += stats->rx_bytes;
617 	storage->rx_errors += stats->rx_errors;
618 }
619 
620 static const struct net_device_ops xgene_ndev_ops = {
621 	.ndo_open = xge_open,
622 	.ndo_stop = xge_close,
623 	.ndo_start_xmit = xge_start_xmit,
624 	.ndo_set_mac_address = xge_set_mac_addr,
625 	.ndo_tx_timeout = xge_timeout,
626 	.ndo_get_stats64 = xge_get_stats64,
627 };
628 
xge_probe(struct platform_device * pdev)629 static int xge_probe(struct platform_device *pdev)
630 {
631 	struct device *dev = &pdev->dev;
632 	struct net_device *ndev;
633 	struct xge_pdata *pdata;
634 	int ret;
635 
636 	ndev = alloc_etherdev(sizeof(*pdata));
637 	if (!ndev)
638 		return -ENOMEM;
639 
640 	pdata = netdev_priv(ndev);
641 
642 	pdata->pdev = pdev;
643 	pdata->ndev = ndev;
644 	SET_NETDEV_DEV(ndev, dev);
645 	platform_set_drvdata(pdev, pdata);
646 	ndev->netdev_ops = &xgene_ndev_ops;
647 
648 	ndev->features |= NETIF_F_GSO |
649 			  NETIF_F_GRO;
650 
651 	ret = xge_get_resources(pdata);
652 	if (ret)
653 		goto err;
654 
655 	ndev->hw_features = ndev->features;
656 	xge_set_ethtool_ops(ndev);
657 
658 	ret = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(64));
659 	if (ret) {
660 		netdev_err(ndev, "No usable DMA configuration\n");
661 		goto err;
662 	}
663 
664 	ret = xge_init_hw(ndev);
665 	if (ret)
666 		goto err;
667 
668 	ret = xge_mdio_config(ndev);
669 	if (ret)
670 		goto err;
671 
672 	netif_napi_add(ndev, &pdata->napi, xge_napi);
673 
674 	ret = register_netdev(ndev);
675 	if (ret) {
676 		netdev_err(ndev, "Failed to register netdev\n");
677 		goto err_mdio_remove;
678 	}
679 
680 	return 0;
681 
682 err_mdio_remove:
683 	xge_mdio_remove(ndev);
684 err:
685 	free_netdev(ndev);
686 
687 	return ret;
688 }
689 
xge_remove(struct platform_device * pdev)690 static void xge_remove(struct platform_device *pdev)
691 {
692 	struct xge_pdata *pdata;
693 	struct net_device *ndev;
694 
695 	pdata = platform_get_drvdata(pdev);
696 	ndev = pdata->ndev;
697 
698 	rtnl_lock();
699 	if (netif_running(ndev))
700 		dev_close(ndev);
701 	rtnl_unlock();
702 
703 	xge_mdio_remove(ndev);
704 	unregister_netdev(ndev);
705 	free_netdev(ndev);
706 }
707 
xge_shutdown(struct platform_device * pdev)708 static void xge_shutdown(struct platform_device *pdev)
709 {
710 	struct xge_pdata *pdata;
711 
712 	pdata = platform_get_drvdata(pdev);
713 	if (!pdata)
714 		return;
715 
716 	if (!pdata->ndev)
717 		return;
718 
719 	xge_remove(pdev);
720 }
721 
722 static const struct acpi_device_id xge_acpi_match[] = {
723 	{ "APMC0D80" },
724 	{ }
725 };
726 MODULE_DEVICE_TABLE(acpi, xge_acpi_match);
727 
728 static struct platform_driver xge_driver = {
729 	.driver = {
730 		   .name = "xgene-enet-v2",
731 		   .acpi_match_table = xge_acpi_match,
732 	},
733 	.probe = xge_probe,
734 	.remove = xge_remove,
735 	.shutdown = xge_shutdown,
736 };
737 module_platform_driver(xge_driver);
738 
739 MODULE_DESCRIPTION("APM X-Gene SoC Ethernet v2 driver");
740 MODULE_AUTHOR("Iyappan Subramanian <isubramanian@apm.com>");
741 MODULE_LICENSE("GPL");
742