xref: /linux/drivers/net/ethernet/apm/xgene/xgene_enet_main.c (revision 4f58e6dceb0e44ca8f21568ed81e1df24e55964c)
1 /* Applied Micro X-Gene SoC Ethernet Driver
2  *
3  * Copyright (c) 2014, Applied Micro Circuits Corporation
4  * Authors: Iyappan Subramanian <isubramanian@apm.com>
5  *	    Ravi Patel <rapatel@apm.com>
6  *	    Keyur Chudgar <kchudgar@apm.com>
7  *
8  * This program is free software; you can redistribute  it and/or modify it
9  * under  the terms of  the GNU General  Public License as published by the
10  * Free Software Foundation;  either version 2 of the  License, or (at your
11  * option) any later version.
12  *
13  * This program is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16  * GNU General Public License for more details.
17  *
18  * You should have received a copy of the GNU General Public License
19  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
20  */
21 
22 #include <linux/gpio.h>
23 #include "xgene_enet_main.h"
24 #include "xgene_enet_hw.h"
25 #include "xgene_enet_sgmac.h"
26 #include "xgene_enet_xgmac.h"
27 
28 #define RES_ENET_CSR	0
29 #define RES_RING_CSR	1
30 #define RES_RING_CMD	2
31 
32 static const struct of_device_id xgene_enet_of_match[];
33 static const struct acpi_device_id xgene_enet_acpi_match[];
34 
35 static void xgene_enet_init_bufpool(struct xgene_enet_desc_ring *buf_pool)
36 {
37 	struct xgene_enet_raw_desc16 *raw_desc;
38 	int i;
39 
40 	for (i = 0; i < buf_pool->slots; i++) {
41 		raw_desc = &buf_pool->raw_desc16[i];
42 
43 		/* Hardware expects descriptor in little endian format */
44 		raw_desc->m0 = cpu_to_le64(i |
45 				SET_VAL(FPQNUM, buf_pool->dst_ring_num) |
46 				SET_VAL(STASH, 3));
47 	}
48 }
49 
50 static int xgene_enet_refill_bufpool(struct xgene_enet_desc_ring *buf_pool,
51 				     u32 nbuf)
52 {
53 	struct sk_buff *skb;
54 	struct xgene_enet_raw_desc16 *raw_desc;
55 	struct xgene_enet_pdata *pdata;
56 	struct net_device *ndev;
57 	struct device *dev;
58 	dma_addr_t dma_addr;
59 	u32 tail = buf_pool->tail;
60 	u32 slots = buf_pool->slots - 1;
61 	u16 bufdatalen, len;
62 	int i;
63 
64 	ndev = buf_pool->ndev;
65 	dev = ndev_to_dev(buf_pool->ndev);
66 	pdata = netdev_priv(ndev);
67 	bufdatalen = BUF_LEN_CODE_2K | (SKB_BUFFER_SIZE & GENMASK(11, 0));
68 	len = XGENE_ENET_MAX_MTU;
69 
70 	for (i = 0; i < nbuf; i++) {
71 		raw_desc = &buf_pool->raw_desc16[tail];
72 
73 		skb = netdev_alloc_skb_ip_align(ndev, len);
74 		if (unlikely(!skb))
75 			return -ENOMEM;
76 
77 		dma_addr = dma_map_single(dev, skb->data, len, DMA_FROM_DEVICE);
78 		if (dma_mapping_error(dev, dma_addr)) {
79 			netdev_err(ndev, "DMA mapping error\n");
80 			dev_kfree_skb_any(skb);
81 			return -EINVAL;
82 		}
83 
84 		buf_pool->rx_skb[tail] = skb;
85 
86 		raw_desc->m1 = cpu_to_le64(SET_VAL(DATAADDR, dma_addr) |
87 					   SET_VAL(BUFDATALEN, bufdatalen) |
88 					   SET_BIT(COHERENT));
89 		tail = (tail + 1) & slots;
90 	}
91 
92 	pdata->ring_ops->wr_cmd(buf_pool, nbuf);
93 	buf_pool->tail = tail;
94 
95 	return 0;
96 }
97 
98 static u8 xgene_enet_hdr_len(const void *data)
99 {
100 	const struct ethhdr *eth = data;
101 
102 	return (eth->h_proto == htons(ETH_P_8021Q)) ? VLAN_ETH_HLEN : ETH_HLEN;
103 }
104 
105 static void xgene_enet_delete_bufpool(struct xgene_enet_desc_ring *buf_pool)
106 {
107 	struct device *dev = ndev_to_dev(buf_pool->ndev);
108 	struct xgene_enet_raw_desc16 *raw_desc;
109 	dma_addr_t dma_addr;
110 	int i;
111 
112 	/* Free up the buffers held by hardware */
113 	for (i = 0; i < buf_pool->slots; i++) {
114 		if (buf_pool->rx_skb[i]) {
115 			dev_kfree_skb_any(buf_pool->rx_skb[i]);
116 
117 			raw_desc = &buf_pool->raw_desc16[i];
118 			dma_addr = GET_VAL(DATAADDR, le64_to_cpu(raw_desc->m1));
119 			dma_unmap_single(dev, dma_addr, XGENE_ENET_MAX_MTU,
120 					 DMA_FROM_DEVICE);
121 		}
122 	}
123 }
124 
125 static irqreturn_t xgene_enet_rx_irq(const int irq, void *data)
126 {
127 	struct xgene_enet_desc_ring *rx_ring = data;
128 
129 	if (napi_schedule_prep(&rx_ring->napi)) {
130 		disable_irq_nosync(irq);
131 		__napi_schedule(&rx_ring->napi);
132 	}
133 
134 	return IRQ_HANDLED;
135 }
136 
137 static int xgene_enet_tx_completion(struct xgene_enet_desc_ring *cp_ring,
138 				    struct xgene_enet_raw_desc *raw_desc)
139 {
140 	struct xgene_enet_pdata *pdata = netdev_priv(cp_ring->ndev);
141 	struct sk_buff *skb;
142 	struct device *dev;
143 	skb_frag_t *frag;
144 	dma_addr_t *frag_dma_addr;
145 	u16 skb_index;
146 	u8 status;
147 	int i, ret = 0;
148 	u8 mss_index;
149 
150 	skb_index = GET_VAL(USERINFO, le64_to_cpu(raw_desc->m0));
151 	skb = cp_ring->cp_skb[skb_index];
152 	frag_dma_addr = &cp_ring->frag_dma_addr[skb_index * MAX_SKB_FRAGS];
153 
154 	dev = ndev_to_dev(cp_ring->ndev);
155 	dma_unmap_single(dev, GET_VAL(DATAADDR, le64_to_cpu(raw_desc->m1)),
156 			 skb_headlen(skb),
157 			 DMA_TO_DEVICE);
158 
159 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
160 		frag = &skb_shinfo(skb)->frags[i];
161 		dma_unmap_page(dev, frag_dma_addr[i], skb_frag_size(frag),
162 			       DMA_TO_DEVICE);
163 	}
164 
165 	if (GET_BIT(ET, le64_to_cpu(raw_desc->m3))) {
166 		mss_index = GET_VAL(MSS, le64_to_cpu(raw_desc->m3));
167 		spin_lock(&pdata->mss_lock);
168 		pdata->mss_refcnt[mss_index]--;
169 		spin_unlock(&pdata->mss_lock);
170 	}
171 
172 	/* Checking for error */
173 	status = GET_VAL(LERR, le64_to_cpu(raw_desc->m0));
174 	if (unlikely(status > 2)) {
175 		xgene_enet_parse_error(cp_ring, netdev_priv(cp_ring->ndev),
176 				       status);
177 		ret = -EIO;
178 	}
179 
180 	if (likely(skb)) {
181 		dev_kfree_skb_any(skb);
182 	} else {
183 		netdev_err(cp_ring->ndev, "completion skb is NULL\n");
184 		ret = -EIO;
185 	}
186 
187 	return ret;
188 }
189 
190 static int xgene_enet_setup_mss(struct net_device *ndev, u32 mss)
191 {
192 	struct xgene_enet_pdata *pdata = netdev_priv(ndev);
193 	bool mss_index_found = false;
194 	int mss_index;
195 	int i;
196 
197 	spin_lock(&pdata->mss_lock);
198 
199 	/* Reuse the slot if MSS matches */
200 	for (i = 0; !mss_index_found && i < NUM_MSS_REG; i++) {
201 		if (pdata->mss[i] == mss) {
202 			pdata->mss_refcnt[i]++;
203 			mss_index = i;
204 			mss_index_found = true;
205 		}
206 	}
207 
208 	/* Overwrite the slot with ref_count = 0 */
209 	for (i = 0; !mss_index_found && i < NUM_MSS_REG; i++) {
210 		if (!pdata->mss_refcnt[i]) {
211 			pdata->mss_refcnt[i]++;
212 			pdata->mac_ops->set_mss(pdata, mss, i);
213 			pdata->mss[i] = mss;
214 			mss_index = i;
215 			mss_index_found = true;
216 		}
217 	}
218 
219 	spin_unlock(&pdata->mss_lock);
220 
221 	/* No slots with ref_count = 0 available, return busy */
222 	if (!mss_index_found)
223 		return -EBUSY;
224 
225 	return mss_index;
226 }
227 
228 static int xgene_enet_work_msg(struct sk_buff *skb, u64 *hopinfo)
229 {
230 	struct net_device *ndev = skb->dev;
231 	struct iphdr *iph;
232 	u8 l3hlen = 0, l4hlen = 0;
233 	u8 ethhdr, proto = 0, csum_enable = 0;
234 	u32 hdr_len, mss = 0;
235 	u32 i, len, nr_frags;
236 	int mss_index;
237 
238 	ethhdr = xgene_enet_hdr_len(skb->data);
239 
240 	if (unlikely(skb->protocol != htons(ETH_P_IP)) &&
241 	    unlikely(skb->protocol != htons(ETH_P_8021Q)))
242 		goto out;
243 
244 	if (unlikely(!(skb->dev->features & NETIF_F_IP_CSUM)))
245 		goto out;
246 
247 	iph = ip_hdr(skb);
248 	if (unlikely(ip_is_fragment(iph)))
249 		goto out;
250 
251 	if (likely(iph->protocol == IPPROTO_TCP)) {
252 		l4hlen = tcp_hdrlen(skb) >> 2;
253 		csum_enable = 1;
254 		proto = TSO_IPPROTO_TCP;
255 		if (ndev->features & NETIF_F_TSO) {
256 			hdr_len = ethhdr + ip_hdrlen(skb) + tcp_hdrlen(skb);
257 			mss = skb_shinfo(skb)->gso_size;
258 
259 			if (skb_is_nonlinear(skb)) {
260 				len = skb_headlen(skb);
261 				nr_frags = skb_shinfo(skb)->nr_frags;
262 
263 				for (i = 0; i < 2 && i < nr_frags; i++)
264 					len += skb_shinfo(skb)->frags[i].size;
265 
266 				/* HW requires header must reside in 3 buffer */
267 				if (unlikely(hdr_len > len)) {
268 					if (skb_linearize(skb))
269 						return 0;
270 				}
271 			}
272 
273 			if (!mss || ((skb->len - hdr_len) <= mss))
274 				goto out;
275 
276 			mss_index = xgene_enet_setup_mss(ndev, mss);
277 			if (unlikely(mss_index < 0))
278 				return -EBUSY;
279 
280 			*hopinfo |= SET_BIT(ET) | SET_VAL(MSS, mss_index);
281 		}
282 	} else if (iph->protocol == IPPROTO_UDP) {
283 		l4hlen = UDP_HDR_SIZE;
284 		csum_enable = 1;
285 	}
286 out:
287 	l3hlen = ip_hdrlen(skb) >> 2;
288 	*hopinfo |= SET_VAL(TCPHDR, l4hlen) |
289 		    SET_VAL(IPHDR, l3hlen) |
290 		    SET_VAL(ETHHDR, ethhdr) |
291 		    SET_VAL(EC, csum_enable) |
292 		    SET_VAL(IS, proto) |
293 		    SET_BIT(IC) |
294 		    SET_BIT(TYPE_ETH_WORK_MESSAGE);
295 
296 	return 0;
297 }
298 
299 static u16 xgene_enet_encode_len(u16 len)
300 {
301 	return (len == BUFLEN_16K) ? 0 : len;
302 }
303 
304 static void xgene_set_addr_len(__le64 *desc, u32 idx, dma_addr_t addr, u32 len)
305 {
306 	desc[idx ^ 1] = cpu_to_le64(SET_VAL(DATAADDR, addr) |
307 				    SET_VAL(BUFDATALEN, len));
308 }
309 
310 static __le64 *xgene_enet_get_exp_bufs(struct xgene_enet_desc_ring *ring)
311 {
312 	__le64 *exp_bufs;
313 
314 	exp_bufs = &ring->exp_bufs[ring->exp_buf_tail * MAX_EXP_BUFFS];
315 	memset(exp_bufs, 0, sizeof(__le64) * MAX_EXP_BUFFS);
316 	ring->exp_buf_tail = (ring->exp_buf_tail + 1) & ((ring->slots / 2) - 1);
317 
318 	return exp_bufs;
319 }
320 
321 static dma_addr_t *xgene_get_frag_dma_array(struct xgene_enet_desc_ring *ring)
322 {
323 	return &ring->cp_ring->frag_dma_addr[ring->tail * MAX_SKB_FRAGS];
324 }
325 
326 static int xgene_enet_setup_tx_desc(struct xgene_enet_desc_ring *tx_ring,
327 				    struct sk_buff *skb)
328 {
329 	struct device *dev = ndev_to_dev(tx_ring->ndev);
330 	struct xgene_enet_pdata *pdata = netdev_priv(tx_ring->ndev);
331 	struct xgene_enet_raw_desc *raw_desc;
332 	__le64 *exp_desc = NULL, *exp_bufs = NULL;
333 	dma_addr_t dma_addr, pbuf_addr, *frag_dma_addr;
334 	skb_frag_t *frag;
335 	u16 tail = tx_ring->tail;
336 	u64 hopinfo = 0;
337 	u32 len, hw_len;
338 	u8 ll = 0, nv = 0, idx = 0;
339 	bool split = false;
340 	u32 size, offset, ell_bytes = 0;
341 	u32 i, fidx, nr_frags, count = 1;
342 	int ret;
343 
344 	raw_desc = &tx_ring->raw_desc[tail];
345 	tail = (tail + 1) & (tx_ring->slots - 1);
346 	memset(raw_desc, 0, sizeof(struct xgene_enet_raw_desc));
347 
348 	ret = xgene_enet_work_msg(skb, &hopinfo);
349 	if (ret)
350 		return ret;
351 
352 	raw_desc->m3 = cpu_to_le64(SET_VAL(HENQNUM, tx_ring->dst_ring_num) |
353 				   hopinfo);
354 
355 	len = skb_headlen(skb);
356 	hw_len = xgene_enet_encode_len(len);
357 
358 	dma_addr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
359 	if (dma_mapping_error(dev, dma_addr)) {
360 		netdev_err(tx_ring->ndev, "DMA mapping error\n");
361 		return -EINVAL;
362 	}
363 
364 	/* Hardware expects descriptor in little endian format */
365 	raw_desc->m1 = cpu_to_le64(SET_VAL(DATAADDR, dma_addr) |
366 				   SET_VAL(BUFDATALEN, hw_len) |
367 				   SET_BIT(COHERENT));
368 
369 	if (!skb_is_nonlinear(skb))
370 		goto out;
371 
372 	/* scatter gather */
373 	nv = 1;
374 	exp_desc = (void *)&tx_ring->raw_desc[tail];
375 	tail = (tail + 1) & (tx_ring->slots - 1);
376 	memset(exp_desc, 0, sizeof(struct xgene_enet_raw_desc));
377 
378 	nr_frags = skb_shinfo(skb)->nr_frags;
379 	for (i = nr_frags; i < 4 ; i++)
380 		exp_desc[i ^ 1] = cpu_to_le64(LAST_BUFFER);
381 
382 	frag_dma_addr = xgene_get_frag_dma_array(tx_ring);
383 
384 	for (i = 0, fidx = 0; split || (fidx < nr_frags); i++) {
385 		if (!split) {
386 			frag = &skb_shinfo(skb)->frags[fidx];
387 			size = skb_frag_size(frag);
388 			offset = 0;
389 
390 			pbuf_addr = skb_frag_dma_map(dev, frag, 0, size,
391 						     DMA_TO_DEVICE);
392 			if (dma_mapping_error(dev, pbuf_addr))
393 				return -EINVAL;
394 
395 			frag_dma_addr[fidx] = pbuf_addr;
396 			fidx++;
397 
398 			if (size > BUFLEN_16K)
399 				split = true;
400 		}
401 
402 		if (size > BUFLEN_16K) {
403 			len = BUFLEN_16K;
404 			size -= BUFLEN_16K;
405 		} else {
406 			len = size;
407 			split = false;
408 		}
409 
410 		dma_addr = pbuf_addr + offset;
411 		hw_len = xgene_enet_encode_len(len);
412 
413 		switch (i) {
414 		case 0:
415 		case 1:
416 		case 2:
417 			xgene_set_addr_len(exp_desc, i, dma_addr, hw_len);
418 			break;
419 		case 3:
420 			if (split || (fidx != nr_frags)) {
421 				exp_bufs = xgene_enet_get_exp_bufs(tx_ring);
422 				xgene_set_addr_len(exp_bufs, idx, dma_addr,
423 						   hw_len);
424 				idx++;
425 				ell_bytes += len;
426 			} else {
427 				xgene_set_addr_len(exp_desc, i, dma_addr,
428 						   hw_len);
429 			}
430 			break;
431 		default:
432 			xgene_set_addr_len(exp_bufs, idx, dma_addr, hw_len);
433 			idx++;
434 			ell_bytes += len;
435 			break;
436 		}
437 
438 		if (split)
439 			offset += BUFLEN_16K;
440 	}
441 	count++;
442 
443 	if (idx) {
444 		ll = 1;
445 		dma_addr = dma_map_single(dev, exp_bufs,
446 					  sizeof(u64) * MAX_EXP_BUFFS,
447 					  DMA_TO_DEVICE);
448 		if (dma_mapping_error(dev, dma_addr)) {
449 			dev_kfree_skb_any(skb);
450 			return -EINVAL;
451 		}
452 		i = ell_bytes >> LL_BYTES_LSB_LEN;
453 		exp_desc[2] = cpu_to_le64(SET_VAL(DATAADDR, dma_addr) |
454 					  SET_VAL(LL_BYTES_MSB, i) |
455 					  SET_VAL(LL_LEN, idx));
456 		raw_desc->m2 = cpu_to_le64(SET_VAL(LL_BYTES_LSB, ell_bytes));
457 	}
458 
459 out:
460 	raw_desc->m0 = cpu_to_le64(SET_VAL(LL, ll) | SET_VAL(NV, nv) |
461 				   SET_VAL(USERINFO, tx_ring->tail));
462 	tx_ring->cp_ring->cp_skb[tx_ring->tail] = skb;
463 	pdata->tx_level[tx_ring->cp_ring->index] += count;
464 	tx_ring->tail = tail;
465 
466 	return count;
467 }
468 
469 static netdev_tx_t xgene_enet_start_xmit(struct sk_buff *skb,
470 					 struct net_device *ndev)
471 {
472 	struct xgene_enet_pdata *pdata = netdev_priv(ndev);
473 	struct xgene_enet_desc_ring *tx_ring;
474 	int index = skb->queue_mapping;
475 	u32 tx_level = pdata->tx_level[index];
476 	int count;
477 
478 	tx_ring = pdata->tx_ring[index];
479 	if (tx_level < pdata->txc_level[index])
480 		tx_level += ((typeof(pdata->tx_level[index]))~0U);
481 
482 	if ((tx_level - pdata->txc_level[index]) > pdata->tx_qcnt_hi) {
483 		netif_stop_subqueue(ndev, index);
484 		return NETDEV_TX_BUSY;
485 	}
486 
487 	if (skb_padto(skb, XGENE_MIN_ENET_FRAME_SIZE))
488 		return NETDEV_TX_OK;
489 
490 	count = xgene_enet_setup_tx_desc(tx_ring, skb);
491 	if (count == -EBUSY)
492 		return NETDEV_TX_BUSY;
493 
494 	if (count <= 0) {
495 		dev_kfree_skb_any(skb);
496 		return NETDEV_TX_OK;
497 	}
498 
499 	skb_tx_timestamp(skb);
500 
501 	tx_ring->tx_packets++;
502 	tx_ring->tx_bytes += skb->len;
503 
504 	pdata->ring_ops->wr_cmd(tx_ring, count);
505 	return NETDEV_TX_OK;
506 }
507 
508 static void xgene_enet_skip_csum(struct sk_buff *skb)
509 {
510 	struct iphdr *iph = ip_hdr(skb);
511 
512 	if (!ip_is_fragment(iph) ||
513 	    (iph->protocol != IPPROTO_TCP && iph->protocol != IPPROTO_UDP)) {
514 		skb->ip_summed = CHECKSUM_UNNECESSARY;
515 	}
516 }
517 
518 static int xgene_enet_rx_frame(struct xgene_enet_desc_ring *rx_ring,
519 			       struct xgene_enet_raw_desc *raw_desc)
520 {
521 	struct net_device *ndev;
522 	struct device *dev;
523 	struct xgene_enet_desc_ring *buf_pool;
524 	u32 datalen, skb_index;
525 	struct sk_buff *skb;
526 	u8 status;
527 	int ret = 0;
528 
529 	ndev = rx_ring->ndev;
530 	dev = ndev_to_dev(rx_ring->ndev);
531 	buf_pool = rx_ring->buf_pool;
532 
533 	dma_unmap_single(dev, GET_VAL(DATAADDR, le64_to_cpu(raw_desc->m1)),
534 			 XGENE_ENET_MAX_MTU, DMA_FROM_DEVICE);
535 	skb_index = GET_VAL(USERINFO, le64_to_cpu(raw_desc->m0));
536 	skb = buf_pool->rx_skb[skb_index];
537 	buf_pool->rx_skb[skb_index] = NULL;
538 
539 	/* checking for error */
540 	status = (GET_VAL(ELERR, le64_to_cpu(raw_desc->m0)) << LERR_LEN) ||
541 		  GET_VAL(LERR, le64_to_cpu(raw_desc->m0));
542 	if (unlikely(status > 2)) {
543 		dev_kfree_skb_any(skb);
544 		xgene_enet_parse_error(rx_ring, netdev_priv(rx_ring->ndev),
545 				       status);
546 		ret = -EIO;
547 		goto out;
548 	}
549 
550 	/* strip off CRC as HW isn't doing this */
551 	datalen = GET_VAL(BUFDATALEN, le64_to_cpu(raw_desc->m1));
552 	datalen = (datalen & DATALEN_MASK) - 4;
553 	prefetch(skb->data - NET_IP_ALIGN);
554 	skb_put(skb, datalen);
555 
556 	skb_checksum_none_assert(skb);
557 	skb->protocol = eth_type_trans(skb, ndev);
558 	if (likely((ndev->features & NETIF_F_IP_CSUM) &&
559 		   skb->protocol == htons(ETH_P_IP))) {
560 		xgene_enet_skip_csum(skb);
561 	}
562 
563 	rx_ring->rx_packets++;
564 	rx_ring->rx_bytes += datalen;
565 	napi_gro_receive(&rx_ring->napi, skb);
566 out:
567 	if (--rx_ring->nbufpool == 0) {
568 		ret = xgene_enet_refill_bufpool(buf_pool, NUM_BUFPOOL);
569 		rx_ring->nbufpool = NUM_BUFPOOL;
570 	}
571 
572 	return ret;
573 }
574 
575 static bool is_rx_desc(struct xgene_enet_raw_desc *raw_desc)
576 {
577 	return GET_VAL(FPQNUM, le64_to_cpu(raw_desc->m0)) ? true : false;
578 }
579 
580 static int xgene_enet_process_ring(struct xgene_enet_desc_ring *ring,
581 				   int budget)
582 {
583 	struct net_device *ndev = ring->ndev;
584 	struct xgene_enet_pdata *pdata = netdev_priv(ndev);
585 	struct xgene_enet_raw_desc *raw_desc, *exp_desc;
586 	u16 head = ring->head;
587 	u16 slots = ring->slots - 1;
588 	int ret, desc_count, count = 0, processed = 0;
589 	bool is_completion;
590 
591 	do {
592 		raw_desc = &ring->raw_desc[head];
593 		desc_count = 0;
594 		is_completion = false;
595 		exp_desc = NULL;
596 		if (unlikely(xgene_enet_is_desc_slot_empty(raw_desc)))
597 			break;
598 
599 		/* read fpqnum field after dataaddr field */
600 		dma_rmb();
601 		if (GET_BIT(NV, le64_to_cpu(raw_desc->m0))) {
602 			head = (head + 1) & slots;
603 			exp_desc = &ring->raw_desc[head];
604 
605 			if (unlikely(xgene_enet_is_desc_slot_empty(exp_desc))) {
606 				head = (head - 1) & slots;
607 				break;
608 			}
609 			dma_rmb();
610 			count++;
611 			desc_count++;
612 		}
613 		if (is_rx_desc(raw_desc)) {
614 			ret = xgene_enet_rx_frame(ring, raw_desc);
615 		} else {
616 			ret = xgene_enet_tx_completion(ring, raw_desc);
617 			is_completion = true;
618 		}
619 		xgene_enet_mark_desc_slot_empty(raw_desc);
620 		if (exp_desc)
621 			xgene_enet_mark_desc_slot_empty(exp_desc);
622 
623 		head = (head + 1) & slots;
624 		count++;
625 		desc_count++;
626 		processed++;
627 		if (is_completion)
628 			pdata->txc_level[ring->index] += desc_count;
629 
630 		if (ret)
631 			break;
632 	} while (--budget);
633 
634 	if (likely(count)) {
635 		pdata->ring_ops->wr_cmd(ring, -count);
636 		ring->head = head;
637 
638 		if (__netif_subqueue_stopped(ndev, ring->index))
639 			netif_start_subqueue(ndev, ring->index);
640 	}
641 
642 	return processed;
643 }
644 
645 static int xgene_enet_napi(struct napi_struct *napi, const int budget)
646 {
647 	struct xgene_enet_desc_ring *ring;
648 	int processed;
649 
650 	ring = container_of(napi, struct xgene_enet_desc_ring, napi);
651 	processed = xgene_enet_process_ring(ring, budget);
652 
653 	if (processed != budget) {
654 		napi_complete(napi);
655 		enable_irq(ring->irq);
656 	}
657 
658 	return processed;
659 }
660 
661 static void xgene_enet_timeout(struct net_device *ndev)
662 {
663 	struct xgene_enet_pdata *pdata = netdev_priv(ndev);
664 	struct netdev_queue *txq;
665 	int i;
666 
667 	pdata->mac_ops->reset(pdata);
668 
669 	for (i = 0; i < pdata->txq_cnt; i++) {
670 		txq = netdev_get_tx_queue(ndev, i);
671 		txq->trans_start = jiffies;
672 		netif_tx_start_queue(txq);
673 	}
674 }
675 
676 static void xgene_enet_set_irq_name(struct net_device *ndev)
677 {
678 	struct xgene_enet_pdata *pdata = netdev_priv(ndev);
679 	struct xgene_enet_desc_ring *ring;
680 	int i;
681 
682 	for (i = 0; i < pdata->rxq_cnt; i++) {
683 		ring = pdata->rx_ring[i];
684 		if (!pdata->cq_cnt) {
685 			snprintf(ring->irq_name, IRQ_ID_SIZE, "%s-rx-txc",
686 				 ndev->name);
687 		} else {
688 			snprintf(ring->irq_name, IRQ_ID_SIZE, "%s-rx-%d",
689 				 ndev->name, i);
690 		}
691 	}
692 
693 	for (i = 0; i < pdata->cq_cnt; i++) {
694 		ring = pdata->tx_ring[i]->cp_ring;
695 		snprintf(ring->irq_name, IRQ_ID_SIZE, "%s-txc-%d",
696 			 ndev->name, i);
697 	}
698 }
699 
700 static int xgene_enet_register_irq(struct net_device *ndev)
701 {
702 	struct xgene_enet_pdata *pdata = netdev_priv(ndev);
703 	struct device *dev = ndev_to_dev(ndev);
704 	struct xgene_enet_desc_ring *ring;
705 	int ret = 0, i;
706 
707 	xgene_enet_set_irq_name(ndev);
708 	for (i = 0; i < pdata->rxq_cnt; i++) {
709 		ring = pdata->rx_ring[i];
710 		irq_set_status_flags(ring->irq, IRQ_DISABLE_UNLAZY);
711 		ret = devm_request_irq(dev, ring->irq, xgene_enet_rx_irq,
712 				       0, ring->irq_name, ring);
713 		if (ret) {
714 			netdev_err(ndev, "Failed to request irq %s\n",
715 				   ring->irq_name);
716 		}
717 	}
718 
719 	for (i = 0; i < pdata->cq_cnt; i++) {
720 		ring = pdata->tx_ring[i]->cp_ring;
721 		irq_set_status_flags(ring->irq, IRQ_DISABLE_UNLAZY);
722 		ret = devm_request_irq(dev, ring->irq, xgene_enet_rx_irq,
723 				       0, ring->irq_name, ring);
724 		if (ret) {
725 			netdev_err(ndev, "Failed to request irq %s\n",
726 				   ring->irq_name);
727 		}
728 	}
729 
730 	return ret;
731 }
732 
733 static void xgene_enet_free_irq(struct net_device *ndev)
734 {
735 	struct xgene_enet_pdata *pdata;
736 	struct xgene_enet_desc_ring *ring;
737 	struct device *dev;
738 	int i;
739 
740 	pdata = netdev_priv(ndev);
741 	dev = ndev_to_dev(ndev);
742 
743 	for (i = 0; i < pdata->rxq_cnt; i++) {
744 		ring = pdata->rx_ring[i];
745 		irq_clear_status_flags(ring->irq, IRQ_DISABLE_UNLAZY);
746 		devm_free_irq(dev, ring->irq, ring);
747 	}
748 
749 	for (i = 0; i < pdata->cq_cnt; i++) {
750 		ring = pdata->tx_ring[i]->cp_ring;
751 		irq_clear_status_flags(ring->irq, IRQ_DISABLE_UNLAZY);
752 		devm_free_irq(dev, ring->irq, ring);
753 	}
754 }
755 
756 static void xgene_enet_napi_enable(struct xgene_enet_pdata *pdata)
757 {
758 	struct napi_struct *napi;
759 	int i;
760 
761 	for (i = 0; i < pdata->rxq_cnt; i++) {
762 		napi = &pdata->rx_ring[i]->napi;
763 		napi_enable(napi);
764 	}
765 
766 	for (i = 0; i < pdata->cq_cnt; i++) {
767 		napi = &pdata->tx_ring[i]->cp_ring->napi;
768 		napi_enable(napi);
769 	}
770 }
771 
772 static void xgene_enet_napi_disable(struct xgene_enet_pdata *pdata)
773 {
774 	struct napi_struct *napi;
775 	int i;
776 
777 	for (i = 0; i < pdata->rxq_cnt; i++) {
778 		napi = &pdata->rx_ring[i]->napi;
779 		napi_disable(napi);
780 	}
781 
782 	for (i = 0; i < pdata->cq_cnt; i++) {
783 		napi = &pdata->tx_ring[i]->cp_ring->napi;
784 		napi_disable(napi);
785 	}
786 }
787 
788 static int xgene_enet_open(struct net_device *ndev)
789 {
790 	struct xgene_enet_pdata *pdata = netdev_priv(ndev);
791 	const struct xgene_mac_ops *mac_ops = pdata->mac_ops;
792 	int ret;
793 
794 	ret = netif_set_real_num_tx_queues(ndev, pdata->txq_cnt);
795 	if (ret)
796 		return ret;
797 
798 	ret = netif_set_real_num_rx_queues(ndev, pdata->rxq_cnt);
799 	if (ret)
800 		return ret;
801 
802 	xgene_enet_napi_enable(pdata);
803 	ret = xgene_enet_register_irq(ndev);
804 	if (ret)
805 		return ret;
806 
807 	if (ndev->phydev) {
808 		phy_start(ndev->phydev);
809 	} else {
810 		schedule_delayed_work(&pdata->link_work, PHY_POLL_LINK_OFF);
811 		netif_carrier_off(ndev);
812 	}
813 
814 	mac_ops->tx_enable(pdata);
815 	mac_ops->rx_enable(pdata);
816 	netif_tx_start_all_queues(ndev);
817 
818 	return ret;
819 }
820 
821 static int xgene_enet_close(struct net_device *ndev)
822 {
823 	struct xgene_enet_pdata *pdata = netdev_priv(ndev);
824 	const struct xgene_mac_ops *mac_ops = pdata->mac_ops;
825 	int i;
826 
827 	netif_tx_stop_all_queues(ndev);
828 	mac_ops->tx_disable(pdata);
829 	mac_ops->rx_disable(pdata);
830 
831 	if (ndev->phydev)
832 		phy_stop(ndev->phydev);
833 	else
834 		cancel_delayed_work_sync(&pdata->link_work);
835 
836 	xgene_enet_free_irq(ndev);
837 	xgene_enet_napi_disable(pdata);
838 	for (i = 0; i < pdata->rxq_cnt; i++)
839 		xgene_enet_process_ring(pdata->rx_ring[i], -1);
840 
841 	return 0;
842 }
843 static void xgene_enet_delete_ring(struct xgene_enet_desc_ring *ring)
844 {
845 	struct xgene_enet_pdata *pdata;
846 	struct device *dev;
847 
848 	pdata = netdev_priv(ring->ndev);
849 	dev = ndev_to_dev(ring->ndev);
850 
851 	pdata->ring_ops->clear(ring);
852 	dmam_free_coherent(dev, ring->size, ring->desc_addr, ring->dma);
853 }
854 
855 static void xgene_enet_delete_desc_rings(struct xgene_enet_pdata *pdata)
856 {
857 	struct xgene_enet_desc_ring *buf_pool;
858 	struct xgene_enet_desc_ring *ring;
859 	int i;
860 
861 	for (i = 0; i < pdata->txq_cnt; i++) {
862 		ring = pdata->tx_ring[i];
863 		if (ring) {
864 			xgene_enet_delete_ring(ring);
865 			pdata->port_ops->clear(pdata, ring);
866 			if (pdata->cq_cnt)
867 				xgene_enet_delete_ring(ring->cp_ring);
868 			pdata->tx_ring[i] = NULL;
869 		}
870 	}
871 
872 	for (i = 0; i < pdata->rxq_cnt; i++) {
873 		ring = pdata->rx_ring[i];
874 		if (ring) {
875 			buf_pool = ring->buf_pool;
876 			xgene_enet_delete_bufpool(buf_pool);
877 			xgene_enet_delete_ring(buf_pool);
878 			pdata->port_ops->clear(pdata, buf_pool);
879 			xgene_enet_delete_ring(ring);
880 			pdata->rx_ring[i] = NULL;
881 		}
882 	}
883 }
884 
885 static int xgene_enet_get_ring_size(struct device *dev,
886 				    enum xgene_enet_ring_cfgsize cfgsize)
887 {
888 	int size = -EINVAL;
889 
890 	switch (cfgsize) {
891 	case RING_CFGSIZE_512B:
892 		size = 0x200;
893 		break;
894 	case RING_CFGSIZE_2KB:
895 		size = 0x800;
896 		break;
897 	case RING_CFGSIZE_16KB:
898 		size = 0x4000;
899 		break;
900 	case RING_CFGSIZE_64KB:
901 		size = 0x10000;
902 		break;
903 	case RING_CFGSIZE_512KB:
904 		size = 0x80000;
905 		break;
906 	default:
907 		dev_err(dev, "Unsupported cfg ring size %d\n", cfgsize);
908 		break;
909 	}
910 
911 	return size;
912 }
913 
914 static void xgene_enet_free_desc_ring(struct xgene_enet_desc_ring *ring)
915 {
916 	struct xgene_enet_pdata *pdata;
917 	struct device *dev;
918 
919 	if (!ring)
920 		return;
921 
922 	dev = ndev_to_dev(ring->ndev);
923 	pdata = netdev_priv(ring->ndev);
924 
925 	if (ring->desc_addr) {
926 		pdata->ring_ops->clear(ring);
927 		dmam_free_coherent(dev, ring->size, ring->desc_addr, ring->dma);
928 	}
929 	devm_kfree(dev, ring);
930 }
931 
932 static void xgene_enet_free_desc_rings(struct xgene_enet_pdata *pdata)
933 {
934 	struct device *dev = &pdata->pdev->dev;
935 	struct xgene_enet_desc_ring *ring;
936 	int i;
937 
938 	for (i = 0; i < pdata->txq_cnt; i++) {
939 		ring = pdata->tx_ring[i];
940 		if (ring) {
941 			if (ring->cp_ring && ring->cp_ring->cp_skb)
942 				devm_kfree(dev, ring->cp_ring->cp_skb);
943 			if (ring->cp_ring && pdata->cq_cnt)
944 				xgene_enet_free_desc_ring(ring->cp_ring);
945 			xgene_enet_free_desc_ring(ring);
946 		}
947 	}
948 
949 	for (i = 0; i < pdata->rxq_cnt; i++) {
950 		ring = pdata->rx_ring[i];
951 		if (ring) {
952 			if (ring->buf_pool) {
953 				if (ring->buf_pool->rx_skb)
954 					devm_kfree(dev, ring->buf_pool->rx_skb);
955 				xgene_enet_free_desc_ring(ring->buf_pool);
956 			}
957 			xgene_enet_free_desc_ring(ring);
958 		}
959 	}
960 }
961 
962 static bool is_irq_mbox_required(struct xgene_enet_pdata *pdata,
963 				 struct xgene_enet_desc_ring *ring)
964 {
965 	if ((pdata->enet_id == XGENE_ENET2) &&
966 	    (xgene_enet_ring_owner(ring->id) == RING_OWNER_CPU)) {
967 		return true;
968 	}
969 
970 	return false;
971 }
972 
973 static void __iomem *xgene_enet_ring_cmd_base(struct xgene_enet_pdata *pdata,
974 					      struct xgene_enet_desc_ring *ring)
975 {
976 	u8 num_ring_id_shift = pdata->ring_ops->num_ring_id_shift;
977 
978 	return pdata->ring_cmd_addr + (ring->num << num_ring_id_shift);
979 }
980 
981 static struct xgene_enet_desc_ring *xgene_enet_create_desc_ring(
982 			struct net_device *ndev, u32 ring_num,
983 			enum xgene_enet_ring_cfgsize cfgsize, u32 ring_id)
984 {
985 	struct xgene_enet_pdata *pdata = netdev_priv(ndev);
986 	struct device *dev = ndev_to_dev(ndev);
987 	struct xgene_enet_desc_ring *ring;
988 	void *irq_mbox_addr;
989 	int size;
990 
991 	size = xgene_enet_get_ring_size(dev, cfgsize);
992 	if (size < 0)
993 		return NULL;
994 
995 	ring = devm_kzalloc(dev, sizeof(struct xgene_enet_desc_ring),
996 			    GFP_KERNEL);
997 	if (!ring)
998 		return NULL;
999 
1000 	ring->ndev = ndev;
1001 	ring->num = ring_num;
1002 	ring->cfgsize = cfgsize;
1003 	ring->id = ring_id;
1004 
1005 	ring->desc_addr = dmam_alloc_coherent(dev, size, &ring->dma,
1006 					      GFP_KERNEL | __GFP_ZERO);
1007 	if (!ring->desc_addr) {
1008 		devm_kfree(dev, ring);
1009 		return NULL;
1010 	}
1011 	ring->size = size;
1012 
1013 	if (is_irq_mbox_required(pdata, ring)) {
1014 		irq_mbox_addr = dmam_alloc_coherent(dev, INTR_MBOX_SIZE,
1015 						    &ring->irq_mbox_dma,
1016 						    GFP_KERNEL | __GFP_ZERO);
1017 		if (!irq_mbox_addr) {
1018 			dmam_free_coherent(dev, size, ring->desc_addr,
1019 					   ring->dma);
1020 			devm_kfree(dev, ring);
1021 			return NULL;
1022 		}
1023 		ring->irq_mbox_addr = irq_mbox_addr;
1024 	}
1025 
1026 	ring->cmd_base = xgene_enet_ring_cmd_base(pdata, ring);
1027 	ring->cmd = ring->cmd_base + INC_DEC_CMD_ADDR;
1028 	ring = pdata->ring_ops->setup(ring);
1029 	netdev_dbg(ndev, "ring info: num=%d  size=%d  id=%d  slots=%d\n",
1030 		   ring->num, ring->size, ring->id, ring->slots);
1031 
1032 	return ring;
1033 }
1034 
1035 static u16 xgene_enet_get_ring_id(enum xgene_ring_owner owner, u8 bufnum)
1036 {
1037 	return (owner << 6) | (bufnum & GENMASK(5, 0));
1038 }
1039 
1040 static enum xgene_ring_owner xgene_derive_ring_owner(struct xgene_enet_pdata *p)
1041 {
1042 	enum xgene_ring_owner owner;
1043 
1044 	if (p->enet_id == XGENE_ENET1) {
1045 		switch (p->phy_mode) {
1046 		case PHY_INTERFACE_MODE_SGMII:
1047 			owner = RING_OWNER_ETH0;
1048 			break;
1049 		default:
1050 			owner = (!p->port_id) ? RING_OWNER_ETH0 :
1051 						RING_OWNER_ETH1;
1052 			break;
1053 		}
1054 	} else {
1055 		owner = (!p->port_id) ? RING_OWNER_ETH0 : RING_OWNER_ETH1;
1056 	}
1057 
1058 	return owner;
1059 }
1060 
1061 static u8 xgene_start_cpu_bufnum(struct xgene_enet_pdata *pdata)
1062 {
1063 	struct device *dev = &pdata->pdev->dev;
1064 	u32 cpu_bufnum;
1065 	int ret;
1066 
1067 	ret = device_property_read_u32(dev, "channel", &cpu_bufnum);
1068 
1069 	return (!ret) ? cpu_bufnum : pdata->cpu_bufnum;
1070 }
1071 
1072 static int xgene_enet_create_desc_rings(struct net_device *ndev)
1073 {
1074 	struct xgene_enet_pdata *pdata = netdev_priv(ndev);
1075 	struct device *dev = ndev_to_dev(ndev);
1076 	struct xgene_enet_desc_ring *rx_ring, *tx_ring, *cp_ring;
1077 	struct xgene_enet_desc_ring *buf_pool = NULL;
1078 	enum xgene_ring_owner owner;
1079 	dma_addr_t dma_exp_bufs;
1080 	u8 cpu_bufnum;
1081 	u8 eth_bufnum = pdata->eth_bufnum;
1082 	u8 bp_bufnum = pdata->bp_bufnum;
1083 	u16 ring_num = pdata->ring_num;
1084 	__le64 *exp_bufs;
1085 	u16 ring_id;
1086 	int i, ret, size;
1087 
1088 	cpu_bufnum = xgene_start_cpu_bufnum(pdata);
1089 
1090 	for (i = 0; i < pdata->rxq_cnt; i++) {
1091 		/* allocate rx descriptor ring */
1092 		owner = xgene_derive_ring_owner(pdata);
1093 		ring_id = xgene_enet_get_ring_id(RING_OWNER_CPU, cpu_bufnum++);
1094 		rx_ring = xgene_enet_create_desc_ring(ndev, ring_num++,
1095 						      RING_CFGSIZE_16KB,
1096 						      ring_id);
1097 		if (!rx_ring) {
1098 			ret = -ENOMEM;
1099 			goto err;
1100 		}
1101 
1102 		/* allocate buffer pool for receiving packets */
1103 		owner = xgene_derive_ring_owner(pdata);
1104 		ring_id = xgene_enet_get_ring_id(owner, bp_bufnum++);
1105 		buf_pool = xgene_enet_create_desc_ring(ndev, ring_num++,
1106 						       RING_CFGSIZE_2KB,
1107 						       ring_id);
1108 		if (!buf_pool) {
1109 			ret = -ENOMEM;
1110 			goto err;
1111 		}
1112 
1113 		rx_ring->nbufpool = NUM_BUFPOOL;
1114 		rx_ring->buf_pool = buf_pool;
1115 		rx_ring->irq = pdata->irqs[i];
1116 		buf_pool->rx_skb = devm_kcalloc(dev, buf_pool->slots,
1117 						sizeof(struct sk_buff *),
1118 						GFP_KERNEL);
1119 		if (!buf_pool->rx_skb) {
1120 			ret = -ENOMEM;
1121 			goto err;
1122 		}
1123 
1124 		buf_pool->dst_ring_num = xgene_enet_dst_ring_num(buf_pool);
1125 		rx_ring->buf_pool = buf_pool;
1126 		pdata->rx_ring[i] = rx_ring;
1127 	}
1128 
1129 	for (i = 0; i < pdata->txq_cnt; i++) {
1130 		/* allocate tx descriptor ring */
1131 		owner = xgene_derive_ring_owner(pdata);
1132 		ring_id = xgene_enet_get_ring_id(owner, eth_bufnum++);
1133 		tx_ring = xgene_enet_create_desc_ring(ndev, ring_num++,
1134 						      RING_CFGSIZE_16KB,
1135 						      ring_id);
1136 		if (!tx_ring) {
1137 			ret = -ENOMEM;
1138 			goto err;
1139 		}
1140 
1141 		size = (tx_ring->slots / 2) * sizeof(__le64) * MAX_EXP_BUFFS;
1142 		exp_bufs = dmam_alloc_coherent(dev, size, &dma_exp_bufs,
1143 					       GFP_KERNEL | __GFP_ZERO);
1144 		if (!exp_bufs) {
1145 			ret = -ENOMEM;
1146 			goto err;
1147 		}
1148 		tx_ring->exp_bufs = exp_bufs;
1149 
1150 		pdata->tx_ring[i] = tx_ring;
1151 
1152 		if (!pdata->cq_cnt) {
1153 			cp_ring = pdata->rx_ring[i];
1154 		} else {
1155 			/* allocate tx completion descriptor ring */
1156 			ring_id = xgene_enet_get_ring_id(RING_OWNER_CPU,
1157 							 cpu_bufnum++);
1158 			cp_ring = xgene_enet_create_desc_ring(ndev, ring_num++,
1159 							      RING_CFGSIZE_16KB,
1160 							      ring_id);
1161 			if (!cp_ring) {
1162 				ret = -ENOMEM;
1163 				goto err;
1164 			}
1165 
1166 			cp_ring->irq = pdata->irqs[pdata->rxq_cnt + i];
1167 			cp_ring->index = i;
1168 		}
1169 
1170 		cp_ring->cp_skb = devm_kcalloc(dev, tx_ring->slots,
1171 					       sizeof(struct sk_buff *),
1172 					       GFP_KERNEL);
1173 		if (!cp_ring->cp_skb) {
1174 			ret = -ENOMEM;
1175 			goto err;
1176 		}
1177 
1178 		size = sizeof(dma_addr_t) * MAX_SKB_FRAGS;
1179 		cp_ring->frag_dma_addr = devm_kcalloc(dev, tx_ring->slots,
1180 						      size, GFP_KERNEL);
1181 		if (!cp_ring->frag_dma_addr) {
1182 			devm_kfree(dev, cp_ring->cp_skb);
1183 			ret = -ENOMEM;
1184 			goto err;
1185 		}
1186 
1187 		tx_ring->cp_ring = cp_ring;
1188 		tx_ring->dst_ring_num = xgene_enet_dst_ring_num(cp_ring);
1189 	}
1190 
1191 	pdata->ring_ops->coalesce(pdata->tx_ring[0]);
1192 	pdata->tx_qcnt_hi = pdata->tx_ring[0]->slots - 128;
1193 
1194 	return 0;
1195 
1196 err:
1197 	xgene_enet_free_desc_rings(pdata);
1198 	return ret;
1199 }
1200 
1201 static struct rtnl_link_stats64 *xgene_enet_get_stats64(
1202 			struct net_device *ndev,
1203 			struct rtnl_link_stats64 *storage)
1204 {
1205 	struct xgene_enet_pdata *pdata = netdev_priv(ndev);
1206 	struct rtnl_link_stats64 *stats = &pdata->stats;
1207 	struct xgene_enet_desc_ring *ring;
1208 	int i;
1209 
1210 	memset(stats, 0, sizeof(struct rtnl_link_stats64));
1211 	for (i = 0; i < pdata->txq_cnt; i++) {
1212 		ring = pdata->tx_ring[i];
1213 		if (ring) {
1214 			stats->tx_packets += ring->tx_packets;
1215 			stats->tx_bytes += ring->tx_bytes;
1216 		}
1217 	}
1218 
1219 	for (i = 0; i < pdata->rxq_cnt; i++) {
1220 		ring = pdata->rx_ring[i];
1221 		if (ring) {
1222 			stats->rx_packets += ring->rx_packets;
1223 			stats->rx_bytes += ring->rx_bytes;
1224 			stats->rx_errors += ring->rx_length_errors +
1225 				ring->rx_crc_errors +
1226 				ring->rx_frame_errors +
1227 				ring->rx_fifo_errors;
1228 			stats->rx_dropped += ring->rx_dropped;
1229 		}
1230 	}
1231 	memcpy(storage, stats, sizeof(struct rtnl_link_stats64));
1232 
1233 	return storage;
1234 }
1235 
1236 static int xgene_enet_set_mac_address(struct net_device *ndev, void *addr)
1237 {
1238 	struct xgene_enet_pdata *pdata = netdev_priv(ndev);
1239 	int ret;
1240 
1241 	ret = eth_mac_addr(ndev, addr);
1242 	if (ret)
1243 		return ret;
1244 	pdata->mac_ops->set_mac_addr(pdata);
1245 
1246 	return ret;
1247 }
1248 
1249 static const struct net_device_ops xgene_ndev_ops = {
1250 	.ndo_open = xgene_enet_open,
1251 	.ndo_stop = xgene_enet_close,
1252 	.ndo_start_xmit = xgene_enet_start_xmit,
1253 	.ndo_tx_timeout = xgene_enet_timeout,
1254 	.ndo_get_stats64 = xgene_enet_get_stats64,
1255 	.ndo_set_mac_address = xgene_enet_set_mac_address,
1256 };
1257 
1258 #ifdef CONFIG_ACPI
1259 static void xgene_get_port_id_acpi(struct device *dev,
1260 				  struct xgene_enet_pdata *pdata)
1261 {
1262 	acpi_status status;
1263 	u64 temp;
1264 
1265 	status = acpi_evaluate_integer(ACPI_HANDLE(dev), "_SUN", NULL, &temp);
1266 	if (ACPI_FAILURE(status)) {
1267 		pdata->port_id = 0;
1268 	} else {
1269 		pdata->port_id = temp;
1270 	}
1271 
1272 	return;
1273 }
1274 #endif
1275 
1276 static void xgene_get_port_id_dt(struct device *dev, struct xgene_enet_pdata *pdata)
1277 {
1278 	u32 id = 0;
1279 
1280 	of_property_read_u32(dev->of_node, "port-id", &id);
1281 
1282 	pdata->port_id = id & BIT(0);
1283 
1284 	return;
1285 }
1286 
1287 static int xgene_get_tx_delay(struct xgene_enet_pdata *pdata)
1288 {
1289 	struct device *dev = &pdata->pdev->dev;
1290 	int delay, ret;
1291 
1292 	ret = of_property_read_u32(dev->of_node, "tx-delay", &delay);
1293 	if (ret) {
1294 		pdata->tx_delay = 4;
1295 		return 0;
1296 	}
1297 
1298 	if (delay < 0 || delay > 7) {
1299 		dev_err(dev, "Invalid tx-delay specified\n");
1300 		return -EINVAL;
1301 	}
1302 
1303 	pdata->tx_delay = delay;
1304 
1305 	return 0;
1306 }
1307 
1308 static int xgene_get_rx_delay(struct xgene_enet_pdata *pdata)
1309 {
1310 	struct device *dev = &pdata->pdev->dev;
1311 	int delay, ret;
1312 
1313 	ret = of_property_read_u32(dev->of_node, "rx-delay", &delay);
1314 	if (ret) {
1315 		pdata->rx_delay = 2;
1316 		return 0;
1317 	}
1318 
1319 	if (delay < 0 || delay > 7) {
1320 		dev_err(dev, "Invalid rx-delay specified\n");
1321 		return -EINVAL;
1322 	}
1323 
1324 	pdata->rx_delay = delay;
1325 
1326 	return 0;
1327 }
1328 
1329 static int xgene_enet_get_irqs(struct xgene_enet_pdata *pdata)
1330 {
1331 	struct platform_device *pdev = pdata->pdev;
1332 	struct device *dev = &pdev->dev;
1333 	int i, ret, max_irqs;
1334 
1335 	if (pdata->phy_mode == PHY_INTERFACE_MODE_RGMII)
1336 		max_irqs = 1;
1337 	else if (pdata->phy_mode == PHY_INTERFACE_MODE_SGMII)
1338 		max_irqs = 2;
1339 	else
1340 		max_irqs = XGENE_MAX_ENET_IRQ;
1341 
1342 	for (i = 0; i < max_irqs; i++) {
1343 		ret = platform_get_irq(pdev, i);
1344 		if (ret <= 0) {
1345 			if (pdata->phy_mode == PHY_INTERFACE_MODE_XGMII) {
1346 				max_irqs = i;
1347 				pdata->rxq_cnt = max_irqs / 2;
1348 				pdata->txq_cnt = max_irqs / 2;
1349 				pdata->cq_cnt = max_irqs / 2;
1350 				break;
1351 			}
1352 			dev_err(dev, "Unable to get ENET IRQ\n");
1353 			ret = ret ? : -ENXIO;
1354 			return ret;
1355 		}
1356 		pdata->irqs[i] = ret;
1357 	}
1358 
1359 	return 0;
1360 }
1361 
1362 static int xgene_enet_check_phy_handle(struct xgene_enet_pdata *pdata)
1363 {
1364 	int ret;
1365 
1366 	if (pdata->phy_mode == PHY_INTERFACE_MODE_XGMII)
1367 		return 0;
1368 
1369 	if (!IS_ENABLED(CONFIG_MDIO_XGENE))
1370 		return 0;
1371 
1372 	ret = xgene_enet_phy_connect(pdata->ndev);
1373 	if (!ret)
1374 		pdata->mdio_driver = true;
1375 
1376 	return 0;
1377 }
1378 
1379 static void xgene_enet_gpiod_get(struct xgene_enet_pdata *pdata)
1380 {
1381 	struct device *dev = &pdata->pdev->dev;
1382 
1383 	pdata->sfp_gpio_en = false;
1384 	if (pdata->phy_mode != PHY_INTERFACE_MODE_XGMII ||
1385 	    (!device_property_present(dev, "sfp-gpios") &&
1386 	     !device_property_present(dev, "rxlos-gpios")))
1387 		return;
1388 
1389 	pdata->sfp_gpio_en = true;
1390 	pdata->sfp_rdy = gpiod_get(dev, "rxlos", GPIOD_IN);
1391 	if (IS_ERR(pdata->sfp_rdy))
1392 		pdata->sfp_rdy = gpiod_get(dev, "sfp", GPIOD_IN);
1393 }
1394 
1395 static int xgene_enet_get_resources(struct xgene_enet_pdata *pdata)
1396 {
1397 	struct platform_device *pdev;
1398 	struct net_device *ndev;
1399 	struct device *dev;
1400 	struct resource *res;
1401 	void __iomem *base_addr;
1402 	u32 offset;
1403 	int ret = 0;
1404 
1405 	pdev = pdata->pdev;
1406 	dev = &pdev->dev;
1407 	ndev = pdata->ndev;
1408 
1409 	res = platform_get_resource(pdev, IORESOURCE_MEM, RES_ENET_CSR);
1410 	if (!res) {
1411 		dev_err(dev, "Resource enet_csr not defined\n");
1412 		return -ENODEV;
1413 	}
1414 	pdata->base_addr = devm_ioremap(dev, res->start, resource_size(res));
1415 	if (!pdata->base_addr) {
1416 		dev_err(dev, "Unable to retrieve ENET Port CSR region\n");
1417 		return -ENOMEM;
1418 	}
1419 
1420 	res = platform_get_resource(pdev, IORESOURCE_MEM, RES_RING_CSR);
1421 	if (!res) {
1422 		dev_err(dev, "Resource ring_csr not defined\n");
1423 		return -ENODEV;
1424 	}
1425 	pdata->ring_csr_addr = devm_ioremap(dev, res->start,
1426 							resource_size(res));
1427 	if (!pdata->ring_csr_addr) {
1428 		dev_err(dev, "Unable to retrieve ENET Ring CSR region\n");
1429 		return -ENOMEM;
1430 	}
1431 
1432 	res = platform_get_resource(pdev, IORESOURCE_MEM, RES_RING_CMD);
1433 	if (!res) {
1434 		dev_err(dev, "Resource ring_cmd not defined\n");
1435 		return -ENODEV;
1436 	}
1437 	pdata->ring_cmd_addr = devm_ioremap(dev, res->start,
1438 							resource_size(res));
1439 	if (!pdata->ring_cmd_addr) {
1440 		dev_err(dev, "Unable to retrieve ENET Ring command region\n");
1441 		return -ENOMEM;
1442 	}
1443 
1444 	if (dev->of_node)
1445 		xgene_get_port_id_dt(dev, pdata);
1446 #ifdef CONFIG_ACPI
1447 	else
1448 		xgene_get_port_id_acpi(dev, pdata);
1449 #endif
1450 
1451 	if (!device_get_mac_address(dev, ndev->dev_addr, ETH_ALEN))
1452 		eth_hw_addr_random(ndev);
1453 
1454 	memcpy(ndev->perm_addr, ndev->dev_addr, ndev->addr_len);
1455 
1456 	pdata->phy_mode = device_get_phy_mode(dev);
1457 	if (pdata->phy_mode < 0) {
1458 		dev_err(dev, "Unable to get phy-connection-type\n");
1459 		return pdata->phy_mode;
1460 	}
1461 	if (pdata->phy_mode != PHY_INTERFACE_MODE_RGMII &&
1462 	    pdata->phy_mode != PHY_INTERFACE_MODE_SGMII &&
1463 	    pdata->phy_mode != PHY_INTERFACE_MODE_XGMII) {
1464 		dev_err(dev, "Incorrect phy-connection-type specified\n");
1465 		return -ENODEV;
1466 	}
1467 
1468 	ret = xgene_get_tx_delay(pdata);
1469 	if (ret)
1470 		return ret;
1471 
1472 	ret = xgene_get_rx_delay(pdata);
1473 	if (ret)
1474 		return ret;
1475 
1476 	ret = xgene_enet_get_irqs(pdata);
1477 	if (ret)
1478 		return ret;
1479 
1480 	ret = xgene_enet_check_phy_handle(pdata);
1481 	if (ret)
1482 		return ret;
1483 
1484 	xgene_enet_gpiod_get(pdata);
1485 
1486 	pdata->clk = devm_clk_get(&pdev->dev, NULL);
1487 	if (IS_ERR(pdata->clk)) {
1488 		/* Firmware may have set up the clock already. */
1489 		dev_info(dev, "clocks have been setup already\n");
1490 	}
1491 
1492 	if (pdata->phy_mode != PHY_INTERFACE_MODE_XGMII)
1493 		base_addr = pdata->base_addr - (pdata->port_id * MAC_OFFSET);
1494 	else
1495 		base_addr = pdata->base_addr;
1496 	pdata->eth_csr_addr = base_addr + BLOCK_ETH_CSR_OFFSET;
1497 	pdata->cle.base = base_addr + BLOCK_ETH_CLE_CSR_OFFSET;
1498 	pdata->eth_ring_if_addr = base_addr + BLOCK_ETH_RING_IF_OFFSET;
1499 	pdata->eth_diag_csr_addr = base_addr + BLOCK_ETH_DIAG_CSR_OFFSET;
1500 	if (pdata->phy_mode == PHY_INTERFACE_MODE_RGMII ||
1501 	    pdata->phy_mode == PHY_INTERFACE_MODE_SGMII) {
1502 		pdata->mcx_mac_addr = pdata->base_addr + BLOCK_ETH_MAC_OFFSET;
1503 		offset = (pdata->enet_id == XGENE_ENET1) ?
1504 			  BLOCK_ETH_MAC_CSR_OFFSET :
1505 			  X2_BLOCK_ETH_MAC_CSR_OFFSET;
1506 		pdata->mcx_mac_csr_addr = base_addr + offset;
1507 	} else {
1508 		pdata->mcx_mac_addr = base_addr + BLOCK_AXG_MAC_OFFSET;
1509 		pdata->mcx_mac_csr_addr = base_addr + BLOCK_AXG_MAC_CSR_OFFSET;
1510 		pdata->pcs_addr = base_addr + BLOCK_PCS_OFFSET;
1511 	}
1512 	pdata->rx_buff_cnt = NUM_PKT_BUF;
1513 
1514 	return 0;
1515 }
1516 
1517 static int xgene_enet_init_hw(struct xgene_enet_pdata *pdata)
1518 {
1519 	struct xgene_enet_cle *enet_cle = &pdata->cle;
1520 	struct net_device *ndev = pdata->ndev;
1521 	struct xgene_enet_desc_ring *buf_pool;
1522 	u16 dst_ring_num;
1523 	int i, ret;
1524 
1525 	ret = pdata->port_ops->reset(pdata);
1526 	if (ret)
1527 		return ret;
1528 
1529 	ret = xgene_enet_create_desc_rings(ndev);
1530 	if (ret) {
1531 		netdev_err(ndev, "Error in ring configuration\n");
1532 		return ret;
1533 	}
1534 
1535 	/* setup buffer pool */
1536 	for (i = 0; i < pdata->rxq_cnt; i++) {
1537 		buf_pool = pdata->rx_ring[i]->buf_pool;
1538 		xgene_enet_init_bufpool(buf_pool);
1539 		ret = xgene_enet_refill_bufpool(buf_pool, pdata->rx_buff_cnt);
1540 		if (ret)
1541 			goto err;
1542 	}
1543 
1544 	dst_ring_num = xgene_enet_dst_ring_num(pdata->rx_ring[0]);
1545 	buf_pool = pdata->rx_ring[0]->buf_pool;
1546 	if (pdata->phy_mode == PHY_INTERFACE_MODE_XGMII) {
1547 		/* Initialize and Enable  PreClassifier Tree */
1548 		enet_cle->max_nodes = 512;
1549 		enet_cle->max_dbptrs = 1024;
1550 		enet_cle->parsers = 3;
1551 		enet_cle->active_parser = PARSER_ALL;
1552 		enet_cle->ptree.start_node = 0;
1553 		enet_cle->ptree.start_dbptr = 0;
1554 		enet_cle->jump_bytes = 8;
1555 		ret = pdata->cle_ops->cle_init(pdata);
1556 		if (ret) {
1557 			netdev_err(ndev, "Preclass Tree init error\n");
1558 			goto err;
1559 		}
1560 	} else {
1561 		pdata->port_ops->cle_bypass(pdata, dst_ring_num, buf_pool->id);
1562 	}
1563 
1564 	pdata->phy_speed = SPEED_UNKNOWN;
1565 	pdata->mac_ops->init(pdata);
1566 
1567 	return ret;
1568 
1569 err:
1570 	xgene_enet_delete_desc_rings(pdata);
1571 	return ret;
1572 }
1573 
1574 static void xgene_enet_setup_ops(struct xgene_enet_pdata *pdata)
1575 {
1576 	switch (pdata->phy_mode) {
1577 	case PHY_INTERFACE_MODE_RGMII:
1578 		pdata->mac_ops = &xgene_gmac_ops;
1579 		pdata->port_ops = &xgene_gport_ops;
1580 		pdata->rm = RM3;
1581 		pdata->rxq_cnt = 1;
1582 		pdata->txq_cnt = 1;
1583 		pdata->cq_cnt = 0;
1584 		break;
1585 	case PHY_INTERFACE_MODE_SGMII:
1586 		pdata->mac_ops = &xgene_sgmac_ops;
1587 		pdata->port_ops = &xgene_sgport_ops;
1588 		pdata->rm = RM1;
1589 		pdata->rxq_cnt = 1;
1590 		pdata->txq_cnt = 1;
1591 		pdata->cq_cnt = 1;
1592 		break;
1593 	default:
1594 		pdata->mac_ops = &xgene_xgmac_ops;
1595 		pdata->port_ops = &xgene_xgport_ops;
1596 		pdata->cle_ops = &xgene_cle3in_ops;
1597 		pdata->rm = RM0;
1598 		if (!pdata->rxq_cnt) {
1599 			pdata->rxq_cnt = XGENE_NUM_RX_RING;
1600 			pdata->txq_cnt = XGENE_NUM_TX_RING;
1601 			pdata->cq_cnt = XGENE_NUM_TXC_RING;
1602 		}
1603 		break;
1604 	}
1605 
1606 	if (pdata->enet_id == XGENE_ENET1) {
1607 		switch (pdata->port_id) {
1608 		case 0:
1609 			if (pdata->phy_mode == PHY_INTERFACE_MODE_XGMII) {
1610 				pdata->cpu_bufnum = X2_START_CPU_BUFNUM_0;
1611 				pdata->eth_bufnum = X2_START_ETH_BUFNUM_0;
1612 				pdata->bp_bufnum = X2_START_BP_BUFNUM_0;
1613 				pdata->ring_num = START_RING_NUM_0;
1614 			} else {
1615 				pdata->cpu_bufnum = START_CPU_BUFNUM_0;
1616 				pdata->eth_bufnum = START_ETH_BUFNUM_0;
1617 				pdata->bp_bufnum = START_BP_BUFNUM_0;
1618 				pdata->ring_num = START_RING_NUM_0;
1619 			}
1620 			break;
1621 		case 1:
1622 			if (pdata->phy_mode == PHY_INTERFACE_MODE_XGMII) {
1623 				pdata->cpu_bufnum = XG_START_CPU_BUFNUM_1;
1624 				pdata->eth_bufnum = XG_START_ETH_BUFNUM_1;
1625 				pdata->bp_bufnum = XG_START_BP_BUFNUM_1;
1626 				pdata->ring_num = XG_START_RING_NUM_1;
1627 			} else {
1628 				pdata->cpu_bufnum = START_CPU_BUFNUM_1;
1629 				pdata->eth_bufnum = START_ETH_BUFNUM_1;
1630 				pdata->bp_bufnum = START_BP_BUFNUM_1;
1631 				pdata->ring_num = START_RING_NUM_1;
1632 			}
1633 			break;
1634 		default:
1635 			break;
1636 		}
1637 		pdata->ring_ops = &xgene_ring1_ops;
1638 	} else {
1639 		switch (pdata->port_id) {
1640 		case 0:
1641 			pdata->cpu_bufnum = X2_START_CPU_BUFNUM_0;
1642 			pdata->eth_bufnum = X2_START_ETH_BUFNUM_0;
1643 			pdata->bp_bufnum = X2_START_BP_BUFNUM_0;
1644 			pdata->ring_num = X2_START_RING_NUM_0;
1645 			break;
1646 		case 1:
1647 			pdata->cpu_bufnum = X2_START_CPU_BUFNUM_1;
1648 			pdata->eth_bufnum = X2_START_ETH_BUFNUM_1;
1649 			pdata->bp_bufnum = X2_START_BP_BUFNUM_1;
1650 			pdata->ring_num = X2_START_RING_NUM_1;
1651 			break;
1652 		default:
1653 			break;
1654 		}
1655 		pdata->rm = RM0;
1656 		pdata->ring_ops = &xgene_ring2_ops;
1657 	}
1658 }
1659 
1660 static void xgene_enet_napi_add(struct xgene_enet_pdata *pdata)
1661 {
1662 	struct napi_struct *napi;
1663 	int i;
1664 
1665 	for (i = 0; i < pdata->rxq_cnt; i++) {
1666 		napi = &pdata->rx_ring[i]->napi;
1667 		netif_napi_add(pdata->ndev, napi, xgene_enet_napi,
1668 			       NAPI_POLL_WEIGHT);
1669 	}
1670 
1671 	for (i = 0; i < pdata->cq_cnt; i++) {
1672 		napi = &pdata->tx_ring[i]->cp_ring->napi;
1673 		netif_napi_add(pdata->ndev, napi, xgene_enet_napi,
1674 			       NAPI_POLL_WEIGHT);
1675 	}
1676 }
1677 
1678 static int xgene_enet_probe(struct platform_device *pdev)
1679 {
1680 	struct net_device *ndev;
1681 	struct xgene_enet_pdata *pdata;
1682 	struct device *dev = &pdev->dev;
1683 	void (*link_state)(struct work_struct *);
1684 	const struct of_device_id *of_id;
1685 	int ret;
1686 
1687 	ndev = alloc_etherdev_mqs(sizeof(struct xgene_enet_pdata),
1688 				  XGENE_NUM_RX_RING, XGENE_NUM_TX_RING);
1689 	if (!ndev)
1690 		return -ENOMEM;
1691 
1692 	pdata = netdev_priv(ndev);
1693 
1694 	pdata->pdev = pdev;
1695 	pdata->ndev = ndev;
1696 	SET_NETDEV_DEV(ndev, dev);
1697 	platform_set_drvdata(pdev, pdata);
1698 	ndev->netdev_ops = &xgene_ndev_ops;
1699 	xgene_enet_set_ethtool_ops(ndev);
1700 	ndev->features |= NETIF_F_IP_CSUM |
1701 			  NETIF_F_GSO |
1702 			  NETIF_F_GRO |
1703 			  NETIF_F_SG;
1704 
1705 	of_id = of_match_device(xgene_enet_of_match, &pdev->dev);
1706 	if (of_id) {
1707 		pdata->enet_id = (enum xgene_enet_id)of_id->data;
1708 	}
1709 #ifdef CONFIG_ACPI
1710 	else {
1711 		const struct acpi_device_id *acpi_id;
1712 
1713 		acpi_id = acpi_match_device(xgene_enet_acpi_match, &pdev->dev);
1714 		if (acpi_id)
1715 			pdata->enet_id = (enum xgene_enet_id) acpi_id->driver_data;
1716 	}
1717 #endif
1718 	if (!pdata->enet_id) {
1719 		ret = -ENODEV;
1720 		goto err;
1721 	}
1722 
1723 	ret = xgene_enet_get_resources(pdata);
1724 	if (ret)
1725 		goto err;
1726 
1727 	xgene_enet_setup_ops(pdata);
1728 
1729 	if (pdata->phy_mode == PHY_INTERFACE_MODE_XGMII) {
1730 		ndev->features |= NETIF_F_TSO;
1731 		spin_lock_init(&pdata->mss_lock);
1732 	}
1733 	ndev->hw_features = ndev->features;
1734 
1735 	ret = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(64));
1736 	if (ret) {
1737 		netdev_err(ndev, "No usable DMA configuration\n");
1738 		goto err;
1739 	}
1740 
1741 	ret = xgene_enet_init_hw(pdata);
1742 	if (ret)
1743 		goto err;
1744 
1745 	link_state = pdata->mac_ops->link_state;
1746 	if (pdata->phy_mode == PHY_INTERFACE_MODE_XGMII) {
1747 		INIT_DELAYED_WORK(&pdata->link_work, link_state);
1748 	} else if (!pdata->mdio_driver) {
1749 		if (pdata->phy_mode == PHY_INTERFACE_MODE_RGMII)
1750 			ret = xgene_enet_mdio_config(pdata);
1751 		else
1752 			INIT_DELAYED_WORK(&pdata->link_work, link_state);
1753 
1754 		if (ret)
1755 			goto err1;
1756 	}
1757 
1758 	xgene_enet_napi_add(pdata);
1759 	ret = register_netdev(ndev);
1760 	if (ret) {
1761 		netdev_err(ndev, "Failed to register netdev\n");
1762 		goto err2;
1763 	}
1764 
1765 	return 0;
1766 
1767 err2:
1768 	/*
1769 	 * If necessary, free_netdev() will call netif_napi_del() and undo
1770 	 * the effects of xgene_enet_napi_add()'s calls to netif_napi_add().
1771 	 */
1772 
1773 	if (pdata->mdio_driver)
1774 		xgene_enet_phy_disconnect(pdata);
1775 	else if (pdata->phy_mode == PHY_INTERFACE_MODE_RGMII)
1776 		xgene_enet_mdio_remove(pdata);
1777 err1:
1778 	xgene_enet_delete_desc_rings(pdata);
1779 err:
1780 	free_netdev(ndev);
1781 	return ret;
1782 }
1783 
1784 static int xgene_enet_remove(struct platform_device *pdev)
1785 {
1786 	struct xgene_enet_pdata *pdata;
1787 	struct net_device *ndev;
1788 
1789 	pdata = platform_get_drvdata(pdev);
1790 	ndev = pdata->ndev;
1791 
1792 	rtnl_lock();
1793 	if (netif_running(ndev))
1794 		dev_close(ndev);
1795 	rtnl_unlock();
1796 
1797 	if (pdata->mdio_driver)
1798 		xgene_enet_phy_disconnect(pdata);
1799 	else if (pdata->phy_mode == PHY_INTERFACE_MODE_RGMII)
1800 		xgene_enet_mdio_remove(pdata);
1801 
1802 	unregister_netdev(ndev);
1803 	pdata->port_ops->shutdown(pdata);
1804 	xgene_enet_delete_desc_rings(pdata);
1805 	free_netdev(ndev);
1806 
1807 	return 0;
1808 }
1809 
1810 static void xgene_enet_shutdown(struct platform_device *pdev)
1811 {
1812 	struct xgene_enet_pdata *pdata;
1813 
1814 	pdata = platform_get_drvdata(pdev);
1815 	if (!pdata)
1816 		return;
1817 
1818 	if (!pdata->ndev)
1819 		return;
1820 
1821 	xgene_enet_remove(pdev);
1822 }
1823 
1824 #ifdef CONFIG_ACPI
1825 static const struct acpi_device_id xgene_enet_acpi_match[] = {
1826 	{ "APMC0D05", XGENE_ENET1},
1827 	{ "APMC0D30", XGENE_ENET1},
1828 	{ "APMC0D31", XGENE_ENET1},
1829 	{ "APMC0D3F", XGENE_ENET1},
1830 	{ "APMC0D26", XGENE_ENET2},
1831 	{ "APMC0D25", XGENE_ENET2},
1832 	{ }
1833 };
1834 MODULE_DEVICE_TABLE(acpi, xgene_enet_acpi_match);
1835 #endif
1836 
1837 #ifdef CONFIG_OF
1838 static const struct of_device_id xgene_enet_of_match[] = {
1839 	{.compatible = "apm,xgene-enet",    .data = (void *)XGENE_ENET1},
1840 	{.compatible = "apm,xgene1-sgenet", .data = (void *)XGENE_ENET1},
1841 	{.compatible = "apm,xgene1-xgenet", .data = (void *)XGENE_ENET1},
1842 	{.compatible = "apm,xgene2-sgenet", .data = (void *)XGENE_ENET2},
1843 	{.compatible = "apm,xgene2-xgenet", .data = (void *)XGENE_ENET2},
1844 	{},
1845 };
1846 
1847 MODULE_DEVICE_TABLE(of, xgene_enet_of_match);
1848 #endif
1849 
1850 static struct platform_driver xgene_enet_driver = {
1851 	.driver = {
1852 		   .name = "xgene-enet",
1853 		   .of_match_table = of_match_ptr(xgene_enet_of_match),
1854 		   .acpi_match_table = ACPI_PTR(xgene_enet_acpi_match),
1855 	},
1856 	.probe = xgene_enet_probe,
1857 	.remove = xgene_enet_remove,
1858 	.shutdown = xgene_enet_shutdown,
1859 };
1860 
1861 module_platform_driver(xgene_enet_driver);
1862 
1863 MODULE_DESCRIPTION("APM X-Gene SoC Ethernet driver");
1864 MODULE_VERSION(XGENE_DRV_VERSION);
1865 MODULE_AUTHOR("Iyappan Subramanian <isubramanian@apm.com>");
1866 MODULE_AUTHOR("Keyur Chudgar <kchudgar@apm.com>");
1867 MODULE_LICENSE("GPL");
1868