xref: /linux/drivers/net/ethernet/xilinx/xilinx_axienet_main.c (revision a634dda26186cf9a51567020fcce52bcba5e1e59)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Xilinx Axi Ethernet device driver
4  *
5  * Copyright (c) 2008 Nissin Systems Co., Ltd.,  Yoshio Kashiwagi
6  * Copyright (c) 2005-2008 DLA Systems,  David H. Lynch Jr. <dhlii@dlasys.net>
7  * Copyright (c) 2008-2009 Secret Lab Technologies Ltd.
8  * Copyright (c) 2010 - 2011 Michal Simek <monstr@monstr.eu>
9  * Copyright (c) 2010 - 2011 PetaLogix
10  * Copyright (c) 2019 - 2022 Calian Advanced Technologies
11  * Copyright (c) 2010 - 2012 Xilinx, Inc. All rights reserved.
12  *
13  * This is a driver for the Xilinx Axi Ethernet which is used in the Virtex6
14  * and Spartan6.
15  *
16  * TODO:
17  *  - Add Axi Fifo support.
18  *  - Factor out Axi DMA code into separate driver.
19  *  - Test and fix basic multicast filtering.
20  *  - Add support for extended multicast filtering.
21  *  - Test basic VLAN support.
22  *  - Add support for extended VLAN support.
23  */
24 
25 #include <linux/clk.h>
26 #include <linux/delay.h>
27 #include <linux/etherdevice.h>
28 #include <linux/module.h>
29 #include <linux/netdevice.h>
30 #include <linux/of.h>
31 #include <linux/of_mdio.h>
32 #include <linux/of_net.h>
33 #include <linux/of_irq.h>
34 #include <linux/of_address.h>
35 #include <linux/platform_device.h>
36 #include <linux/skbuff.h>
37 #include <linux/math64.h>
38 #include <linux/phy.h>
39 #include <linux/mii.h>
40 #include <linux/ethtool.h>
41 #include <linux/dmaengine.h>
42 #include <linux/dma-mapping.h>
43 #include <linux/dma/xilinx_dma.h>
44 #include <linux/circ_buf.h>
45 #include <net/netdev_queues.h>
46 
47 #include "xilinx_axienet.h"
48 
49 /* Descriptors defines for Tx and Rx DMA */
50 #define TX_BD_NUM_DEFAULT		128
51 #define RX_BD_NUM_DEFAULT		1024
52 #define TX_BD_NUM_MIN			(MAX_SKB_FRAGS + 1)
53 #define TX_BD_NUM_MAX			4096
54 #define RX_BD_NUM_MAX			4096
55 #define DMA_NUM_APP_WORDS		5
56 #define LEN_APP				4
57 #define RX_BUF_NUM_DEFAULT		128
58 
59 /* Must be shorter than length of ethtool_drvinfo.driver field to fit */
60 #define DRIVER_NAME		"xaxienet"
61 #define DRIVER_DESCRIPTION	"Xilinx Axi Ethernet driver"
62 #define DRIVER_VERSION		"1.00a"
63 
64 #define AXIENET_REGS_N		40
65 
66 static void axienet_rx_submit_desc(struct net_device *ndev);
67 
68 /* Match table for of_platform binding */
69 static const struct of_device_id axienet_of_match[] = {
70 	{ .compatible = "xlnx,axi-ethernet-1.00.a", },
71 	{ .compatible = "xlnx,axi-ethernet-1.01.a", },
72 	{ .compatible = "xlnx,axi-ethernet-2.01.a", },
73 	{},
74 };
75 
76 MODULE_DEVICE_TABLE(of, axienet_of_match);
77 
78 /* Option table for setting up Axi Ethernet hardware options */
79 static struct axienet_option axienet_options[] = {
80 	/* Turn on jumbo packet support for both Rx and Tx */
81 	{
82 		.opt = XAE_OPTION_JUMBO,
83 		.reg = XAE_TC_OFFSET,
84 		.m_or = XAE_TC_JUM_MASK,
85 	}, {
86 		.opt = XAE_OPTION_JUMBO,
87 		.reg = XAE_RCW1_OFFSET,
88 		.m_or = XAE_RCW1_JUM_MASK,
89 	}, { /* Turn on VLAN packet support for both Rx and Tx */
90 		.opt = XAE_OPTION_VLAN,
91 		.reg = XAE_TC_OFFSET,
92 		.m_or = XAE_TC_VLAN_MASK,
93 	}, {
94 		.opt = XAE_OPTION_VLAN,
95 		.reg = XAE_RCW1_OFFSET,
96 		.m_or = XAE_RCW1_VLAN_MASK,
97 	}, { /* Turn on FCS stripping on receive packets */
98 		.opt = XAE_OPTION_FCS_STRIP,
99 		.reg = XAE_RCW1_OFFSET,
100 		.m_or = XAE_RCW1_FCS_MASK,
101 	}, { /* Turn on FCS insertion on transmit packets */
102 		.opt = XAE_OPTION_FCS_INSERT,
103 		.reg = XAE_TC_OFFSET,
104 		.m_or = XAE_TC_FCS_MASK,
105 	}, { /* Turn off length/type field checking on receive packets */
106 		.opt = XAE_OPTION_LENTYPE_ERR,
107 		.reg = XAE_RCW1_OFFSET,
108 		.m_or = XAE_RCW1_LT_DIS_MASK,
109 	}, { /* Turn on Rx flow control */
110 		.opt = XAE_OPTION_FLOW_CONTROL,
111 		.reg = XAE_FCC_OFFSET,
112 		.m_or = XAE_FCC_FCRX_MASK,
113 	}, { /* Turn on Tx flow control */
114 		.opt = XAE_OPTION_FLOW_CONTROL,
115 		.reg = XAE_FCC_OFFSET,
116 		.m_or = XAE_FCC_FCTX_MASK,
117 	}, { /* Turn on promiscuous frame filtering */
118 		.opt = XAE_OPTION_PROMISC,
119 		.reg = XAE_FMI_OFFSET,
120 		.m_or = XAE_FMI_PM_MASK,
121 	}, { /* Enable transmitter */
122 		.opt = XAE_OPTION_TXEN,
123 		.reg = XAE_TC_OFFSET,
124 		.m_or = XAE_TC_TX_MASK,
125 	}, { /* Enable receiver */
126 		.opt = XAE_OPTION_RXEN,
127 		.reg = XAE_RCW1_OFFSET,
128 		.m_or = XAE_RCW1_RX_MASK,
129 	},
130 	{}
131 };
132 
133 static struct skbuf_dma_descriptor *axienet_get_rx_desc(struct axienet_local *lp, int i)
134 {
135 	return lp->rx_skb_ring[i & (RX_BUF_NUM_DEFAULT - 1)];
136 }
137 
138 static struct skbuf_dma_descriptor *axienet_get_tx_desc(struct axienet_local *lp, int i)
139 {
140 	return lp->tx_skb_ring[i & (TX_BD_NUM_MAX - 1)];
141 }
142 
143 /**
144  * axienet_dma_in32 - Memory mapped Axi DMA register read
145  * @lp:		Pointer to axienet local structure
146  * @reg:	Address offset from the base address of the Axi DMA core
147  *
148  * Return: The contents of the Axi DMA register
149  *
150  * This function returns the contents of the corresponding Axi DMA register.
151  */
152 static inline u32 axienet_dma_in32(struct axienet_local *lp, off_t reg)
153 {
154 	return ioread32(lp->dma_regs + reg);
155 }
156 
157 static void desc_set_phys_addr(struct axienet_local *lp, dma_addr_t addr,
158 			       struct axidma_bd *desc)
159 {
160 	desc->phys = lower_32_bits(addr);
161 	if (lp->features & XAE_FEATURE_DMA_64BIT)
162 		desc->phys_msb = upper_32_bits(addr);
163 }
164 
165 static dma_addr_t desc_get_phys_addr(struct axienet_local *lp,
166 				     struct axidma_bd *desc)
167 {
168 	dma_addr_t ret = desc->phys;
169 
170 	if (lp->features & XAE_FEATURE_DMA_64BIT)
171 		ret |= ((dma_addr_t)desc->phys_msb << 16) << 16;
172 
173 	return ret;
174 }
175 
176 /**
177  * axienet_dma_bd_release - Release buffer descriptor rings
178  * @ndev:	Pointer to the net_device structure
179  *
180  * This function is used to release the descriptors allocated in
181  * axienet_dma_bd_init. axienet_dma_bd_release is called when Axi Ethernet
182  * driver stop api is called.
183  */
184 static void axienet_dma_bd_release(struct net_device *ndev)
185 {
186 	int i;
187 	struct axienet_local *lp = netdev_priv(ndev);
188 
189 	/* If we end up here, tx_bd_v must have been DMA allocated. */
190 	dma_free_coherent(lp->dev,
191 			  sizeof(*lp->tx_bd_v) * lp->tx_bd_num,
192 			  lp->tx_bd_v,
193 			  lp->tx_bd_p);
194 
195 	if (!lp->rx_bd_v)
196 		return;
197 
198 	for (i = 0; i < lp->rx_bd_num; i++) {
199 		dma_addr_t phys;
200 
201 		/* A NULL skb means this descriptor has not been initialised
202 		 * at all.
203 		 */
204 		if (!lp->rx_bd_v[i].skb)
205 			break;
206 
207 		dev_kfree_skb(lp->rx_bd_v[i].skb);
208 
209 		/* For each descriptor, we programmed cntrl with the (non-zero)
210 		 * descriptor size, after it had been successfully allocated.
211 		 * So a non-zero value in there means we need to unmap it.
212 		 */
213 		if (lp->rx_bd_v[i].cntrl) {
214 			phys = desc_get_phys_addr(lp, &lp->rx_bd_v[i]);
215 			dma_unmap_single(lp->dev, phys,
216 					 lp->max_frm_size, DMA_FROM_DEVICE);
217 		}
218 	}
219 
220 	dma_free_coherent(lp->dev,
221 			  sizeof(*lp->rx_bd_v) * lp->rx_bd_num,
222 			  lp->rx_bd_v,
223 			  lp->rx_bd_p);
224 }
225 
226 /**
227  * axienet_usec_to_timer - Calculate IRQ delay timer value
228  * @lp:		Pointer to the axienet_local structure
229  * @coalesce_usec: Microseconds to convert into timer value
230  */
231 static u32 axienet_usec_to_timer(struct axienet_local *lp, u32 coalesce_usec)
232 {
233 	u32 result;
234 	u64 clk_rate = 125000000; /* arbitrary guess if no clock rate set */
235 
236 	if (lp->axi_clk)
237 		clk_rate = clk_get_rate(lp->axi_clk);
238 
239 	/* 1 Timeout Interval = 125 * (clock period of SG clock) */
240 	result = DIV64_U64_ROUND_CLOSEST((u64)coalesce_usec * clk_rate,
241 					 (u64)125000000);
242 	if (result > 255)
243 		result = 255;
244 
245 	return result;
246 }
247 
248 /**
249  * axienet_dma_start - Set up DMA registers and start DMA operation
250  * @lp:		Pointer to the axienet_local structure
251  */
252 static void axienet_dma_start(struct axienet_local *lp)
253 {
254 	/* Start updating the Rx channel control register */
255 	lp->rx_dma_cr = (lp->coalesce_count_rx << XAXIDMA_COALESCE_SHIFT) |
256 			XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_ERROR_MASK;
257 	/* Only set interrupt delay timer if not generating an interrupt on
258 	 * the first RX packet. Otherwise leave at 0 to disable delay interrupt.
259 	 */
260 	if (lp->coalesce_count_rx > 1)
261 		lp->rx_dma_cr |= (axienet_usec_to_timer(lp, lp->coalesce_usec_rx)
262 					<< XAXIDMA_DELAY_SHIFT) |
263 				 XAXIDMA_IRQ_DELAY_MASK;
264 	axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, lp->rx_dma_cr);
265 
266 	/* Start updating the Tx channel control register */
267 	lp->tx_dma_cr = (lp->coalesce_count_tx << XAXIDMA_COALESCE_SHIFT) |
268 			XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_ERROR_MASK;
269 	/* Only set interrupt delay timer if not generating an interrupt on
270 	 * the first TX packet. Otherwise leave at 0 to disable delay interrupt.
271 	 */
272 	if (lp->coalesce_count_tx > 1)
273 		lp->tx_dma_cr |= (axienet_usec_to_timer(lp, lp->coalesce_usec_tx)
274 					<< XAXIDMA_DELAY_SHIFT) |
275 				 XAXIDMA_IRQ_DELAY_MASK;
276 	axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, lp->tx_dma_cr);
277 
278 	/* Populate the tail pointer and bring the Rx Axi DMA engine out of
279 	 * halted state. This will make the Rx side ready for reception.
280 	 */
281 	axienet_dma_out_addr(lp, XAXIDMA_RX_CDESC_OFFSET, lp->rx_bd_p);
282 	lp->rx_dma_cr |= XAXIDMA_CR_RUNSTOP_MASK;
283 	axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, lp->rx_dma_cr);
284 	axienet_dma_out_addr(lp, XAXIDMA_RX_TDESC_OFFSET, lp->rx_bd_p +
285 			     (sizeof(*lp->rx_bd_v) * (lp->rx_bd_num - 1)));
286 
287 	/* Write to the RS (Run-stop) bit in the Tx channel control register.
288 	 * Tx channel is now ready to run. But only after we write to the
289 	 * tail pointer register that the Tx channel will start transmitting.
290 	 */
291 	axienet_dma_out_addr(lp, XAXIDMA_TX_CDESC_OFFSET, lp->tx_bd_p);
292 	lp->tx_dma_cr |= XAXIDMA_CR_RUNSTOP_MASK;
293 	axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, lp->tx_dma_cr);
294 }
295 
296 /**
297  * axienet_dma_bd_init - Setup buffer descriptor rings for Axi DMA
298  * @ndev:	Pointer to the net_device structure
299  *
300  * Return: 0, on success -ENOMEM, on failure
301  *
302  * This function is called to initialize the Rx and Tx DMA descriptor
303  * rings. This initializes the descriptors with required default values
304  * and is called when Axi Ethernet driver reset is called.
305  */
306 static int axienet_dma_bd_init(struct net_device *ndev)
307 {
308 	int i;
309 	struct sk_buff *skb;
310 	struct axienet_local *lp = netdev_priv(ndev);
311 
312 	/* Reset the indexes which are used for accessing the BDs */
313 	lp->tx_bd_ci = 0;
314 	lp->tx_bd_tail = 0;
315 	lp->rx_bd_ci = 0;
316 
317 	/* Allocate the Tx and Rx buffer descriptors. */
318 	lp->tx_bd_v = dma_alloc_coherent(lp->dev,
319 					 sizeof(*lp->tx_bd_v) * lp->tx_bd_num,
320 					 &lp->tx_bd_p, GFP_KERNEL);
321 	if (!lp->tx_bd_v)
322 		return -ENOMEM;
323 
324 	lp->rx_bd_v = dma_alloc_coherent(lp->dev,
325 					 sizeof(*lp->rx_bd_v) * lp->rx_bd_num,
326 					 &lp->rx_bd_p, GFP_KERNEL);
327 	if (!lp->rx_bd_v)
328 		goto out;
329 
330 	for (i = 0; i < lp->tx_bd_num; i++) {
331 		dma_addr_t addr = lp->tx_bd_p +
332 				  sizeof(*lp->tx_bd_v) *
333 				  ((i + 1) % lp->tx_bd_num);
334 
335 		lp->tx_bd_v[i].next = lower_32_bits(addr);
336 		if (lp->features & XAE_FEATURE_DMA_64BIT)
337 			lp->tx_bd_v[i].next_msb = upper_32_bits(addr);
338 	}
339 
340 	for (i = 0; i < lp->rx_bd_num; i++) {
341 		dma_addr_t addr;
342 
343 		addr = lp->rx_bd_p + sizeof(*lp->rx_bd_v) *
344 			((i + 1) % lp->rx_bd_num);
345 		lp->rx_bd_v[i].next = lower_32_bits(addr);
346 		if (lp->features & XAE_FEATURE_DMA_64BIT)
347 			lp->rx_bd_v[i].next_msb = upper_32_bits(addr);
348 
349 		skb = netdev_alloc_skb_ip_align(ndev, lp->max_frm_size);
350 		if (!skb)
351 			goto out;
352 
353 		lp->rx_bd_v[i].skb = skb;
354 		addr = dma_map_single(lp->dev, skb->data,
355 				      lp->max_frm_size, DMA_FROM_DEVICE);
356 		if (dma_mapping_error(lp->dev, addr)) {
357 			netdev_err(ndev, "DMA mapping error\n");
358 			goto out;
359 		}
360 		desc_set_phys_addr(lp, addr, &lp->rx_bd_v[i]);
361 
362 		lp->rx_bd_v[i].cntrl = lp->max_frm_size;
363 	}
364 
365 	axienet_dma_start(lp);
366 
367 	return 0;
368 out:
369 	axienet_dma_bd_release(ndev);
370 	return -ENOMEM;
371 }
372 
373 /**
374  * axienet_set_mac_address - Write the MAC address
375  * @ndev:	Pointer to the net_device structure
376  * @address:	6 byte Address to be written as MAC address
377  *
378  * This function is called to initialize the MAC address of the Axi Ethernet
379  * core. It writes to the UAW0 and UAW1 registers of the core.
380  */
381 static void axienet_set_mac_address(struct net_device *ndev,
382 				    const void *address)
383 {
384 	struct axienet_local *lp = netdev_priv(ndev);
385 
386 	if (address)
387 		eth_hw_addr_set(ndev, address);
388 	if (!is_valid_ether_addr(ndev->dev_addr))
389 		eth_hw_addr_random(ndev);
390 
391 	/* Set up unicast MAC address filter set its mac address */
392 	axienet_iow(lp, XAE_UAW0_OFFSET,
393 		    (ndev->dev_addr[0]) |
394 		    (ndev->dev_addr[1] << 8) |
395 		    (ndev->dev_addr[2] << 16) |
396 		    (ndev->dev_addr[3] << 24));
397 	axienet_iow(lp, XAE_UAW1_OFFSET,
398 		    (((axienet_ior(lp, XAE_UAW1_OFFSET)) &
399 		      ~XAE_UAW1_UNICASTADDR_MASK) |
400 		     (ndev->dev_addr[4] |
401 		     (ndev->dev_addr[5] << 8))));
402 }
403 
404 /**
405  * netdev_set_mac_address - Write the MAC address (from outside the driver)
406  * @ndev:	Pointer to the net_device structure
407  * @p:		6 byte Address to be written as MAC address
408  *
409  * Return: 0 for all conditions. Presently, there is no failure case.
410  *
411  * This function is called to initialize the MAC address of the Axi Ethernet
412  * core. It calls the core specific axienet_set_mac_address. This is the
413  * function that goes into net_device_ops structure entry ndo_set_mac_address.
414  */
415 static int netdev_set_mac_address(struct net_device *ndev, void *p)
416 {
417 	struct sockaddr *addr = p;
418 
419 	axienet_set_mac_address(ndev, addr->sa_data);
420 	return 0;
421 }
422 
423 /**
424  * axienet_set_multicast_list - Prepare the multicast table
425  * @ndev:	Pointer to the net_device structure
426  *
427  * This function is called to initialize the multicast table during
428  * initialization. The Axi Ethernet basic multicast support has a four-entry
429  * multicast table which is initialized here. Additionally this function
430  * goes into the net_device_ops structure entry ndo_set_multicast_list. This
431  * means whenever the multicast table entries need to be updated this
432  * function gets called.
433  */
434 static void axienet_set_multicast_list(struct net_device *ndev)
435 {
436 	int i = 0;
437 	u32 reg, af0reg, af1reg;
438 	struct axienet_local *lp = netdev_priv(ndev);
439 
440 	reg = axienet_ior(lp, XAE_FMI_OFFSET);
441 	reg &= ~XAE_FMI_PM_MASK;
442 	if (ndev->flags & IFF_PROMISC)
443 		reg |= XAE_FMI_PM_MASK;
444 	else
445 		reg &= ~XAE_FMI_PM_MASK;
446 	axienet_iow(lp, XAE_FMI_OFFSET, reg);
447 
448 	if (ndev->flags & IFF_ALLMULTI ||
449 	    netdev_mc_count(ndev) > XAE_MULTICAST_CAM_TABLE_NUM) {
450 		reg &= 0xFFFFFF00;
451 		axienet_iow(lp, XAE_FMI_OFFSET, reg);
452 		axienet_iow(lp, XAE_AF0_OFFSET, 1); /* Multicast bit */
453 		axienet_iow(lp, XAE_AF1_OFFSET, 0);
454 		axienet_iow(lp, XAE_AM0_OFFSET, 1); /* ditto */
455 		axienet_iow(lp, XAE_AM1_OFFSET, 0);
456 		axienet_iow(lp, XAE_FFE_OFFSET, 1);
457 		i = 1;
458 	} else if (!netdev_mc_empty(ndev)) {
459 		struct netdev_hw_addr *ha;
460 
461 		netdev_for_each_mc_addr(ha, ndev) {
462 			if (i >= XAE_MULTICAST_CAM_TABLE_NUM)
463 				break;
464 
465 			af0reg = (ha->addr[0]);
466 			af0reg |= (ha->addr[1] << 8);
467 			af0reg |= (ha->addr[2] << 16);
468 			af0reg |= (ha->addr[3] << 24);
469 
470 			af1reg = (ha->addr[4]);
471 			af1reg |= (ha->addr[5] << 8);
472 
473 			reg &= 0xFFFFFF00;
474 			reg |= i;
475 
476 			axienet_iow(lp, XAE_FMI_OFFSET, reg);
477 			axienet_iow(lp, XAE_AF0_OFFSET, af0reg);
478 			axienet_iow(lp, XAE_AF1_OFFSET, af1reg);
479 			axienet_iow(lp, XAE_AM0_OFFSET, 0xffffffff);
480 			axienet_iow(lp, XAE_AM1_OFFSET, 0x0000ffff);
481 			axienet_iow(lp, XAE_FFE_OFFSET, 1);
482 			i++;
483 		}
484 	}
485 
486 	for (; i < XAE_MULTICAST_CAM_TABLE_NUM; i++) {
487 		reg &= 0xFFFFFF00;
488 		reg |= i;
489 		axienet_iow(lp, XAE_FMI_OFFSET, reg);
490 		axienet_iow(lp, XAE_FFE_OFFSET, 0);
491 	}
492 }
493 
494 /**
495  * axienet_setoptions - Set an Axi Ethernet option
496  * @ndev:	Pointer to the net_device structure
497  * @options:	Option to be enabled/disabled
498  *
499  * The Axi Ethernet core has multiple features which can be selectively turned
500  * on or off. The typical options could be jumbo frame option, basic VLAN
501  * option, promiscuous mode option etc. This function is used to set or clear
502  * these options in the Axi Ethernet hardware. This is done through
503  * axienet_option structure .
504  */
505 static void axienet_setoptions(struct net_device *ndev, u32 options)
506 {
507 	int reg;
508 	struct axienet_local *lp = netdev_priv(ndev);
509 	struct axienet_option *tp = &axienet_options[0];
510 
511 	while (tp->opt) {
512 		reg = ((axienet_ior(lp, tp->reg)) & ~(tp->m_or));
513 		if (options & tp->opt)
514 			reg |= tp->m_or;
515 		axienet_iow(lp, tp->reg, reg);
516 		tp++;
517 	}
518 
519 	lp->options |= options;
520 }
521 
522 static u64 axienet_stat(struct axienet_local *lp, enum temac_stat stat)
523 {
524 	u32 counter;
525 
526 	if (lp->reset_in_progress)
527 		return lp->hw_stat_base[stat];
528 
529 	counter = axienet_ior(lp, XAE_STATS_OFFSET + stat * 8);
530 	return lp->hw_stat_base[stat] + (counter - lp->hw_last_counter[stat]);
531 }
532 
533 static void axienet_stats_update(struct axienet_local *lp, bool reset)
534 {
535 	enum temac_stat stat;
536 
537 	write_seqcount_begin(&lp->hw_stats_seqcount);
538 	lp->reset_in_progress = reset;
539 	for (stat = 0; stat < STAT_COUNT; stat++) {
540 		u32 counter = axienet_ior(lp, XAE_STATS_OFFSET + stat * 8);
541 
542 		lp->hw_stat_base[stat] += counter - lp->hw_last_counter[stat];
543 		lp->hw_last_counter[stat] = counter;
544 	}
545 	write_seqcount_end(&lp->hw_stats_seqcount);
546 }
547 
548 static void axienet_refresh_stats(struct work_struct *work)
549 {
550 	struct axienet_local *lp = container_of(work, struct axienet_local,
551 						stats_work.work);
552 
553 	mutex_lock(&lp->stats_lock);
554 	axienet_stats_update(lp, false);
555 	mutex_unlock(&lp->stats_lock);
556 
557 	/* Just less than 2^32 bytes at 2.5 GBit/s */
558 	schedule_delayed_work(&lp->stats_work, 13 * HZ);
559 }
560 
561 static int __axienet_device_reset(struct axienet_local *lp)
562 {
563 	u32 value;
564 	int ret;
565 
566 	/* Save statistics counters in case they will be reset */
567 	mutex_lock(&lp->stats_lock);
568 	if (lp->features & XAE_FEATURE_STATS)
569 		axienet_stats_update(lp, true);
570 
571 	/* Reset Axi DMA. This would reset Axi Ethernet core as well. The reset
572 	 * process of Axi DMA takes a while to complete as all pending
573 	 * commands/transfers will be flushed or completed during this
574 	 * reset process.
575 	 * Note that even though both TX and RX have their own reset register,
576 	 * they both reset the entire DMA core, so only one needs to be used.
577 	 */
578 	axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, XAXIDMA_CR_RESET_MASK);
579 	ret = read_poll_timeout(axienet_dma_in32, value,
580 				!(value & XAXIDMA_CR_RESET_MASK),
581 				DELAY_OF_ONE_MILLISEC, 50000, false, lp,
582 				XAXIDMA_TX_CR_OFFSET);
583 	if (ret) {
584 		dev_err(lp->dev, "%s: DMA reset timeout!\n", __func__);
585 		goto out;
586 	}
587 
588 	/* Wait for PhyRstCmplt bit to be set, indicating the PHY reset has finished */
589 	ret = read_poll_timeout(axienet_ior, value,
590 				value & XAE_INT_PHYRSTCMPLT_MASK,
591 				DELAY_OF_ONE_MILLISEC, 50000, false, lp,
592 				XAE_IS_OFFSET);
593 	if (ret) {
594 		dev_err(lp->dev, "%s: timeout waiting for PhyRstCmplt\n", __func__);
595 		goto out;
596 	}
597 
598 	/* Update statistics counters with new values */
599 	if (lp->features & XAE_FEATURE_STATS) {
600 		enum temac_stat stat;
601 
602 		write_seqcount_begin(&lp->hw_stats_seqcount);
603 		lp->reset_in_progress = false;
604 		for (stat = 0; stat < STAT_COUNT; stat++) {
605 			u32 counter =
606 				axienet_ior(lp, XAE_STATS_OFFSET + stat * 8);
607 
608 			lp->hw_stat_base[stat] +=
609 				lp->hw_last_counter[stat] - counter;
610 			lp->hw_last_counter[stat] = counter;
611 		}
612 		write_seqcount_end(&lp->hw_stats_seqcount);
613 	}
614 
615 out:
616 	mutex_unlock(&lp->stats_lock);
617 	return ret;
618 }
619 
620 /**
621  * axienet_dma_stop - Stop DMA operation
622  * @lp:		Pointer to the axienet_local structure
623  */
624 static void axienet_dma_stop(struct axienet_local *lp)
625 {
626 	int count;
627 	u32 cr, sr;
628 
629 	cr = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET);
630 	cr &= ~(XAXIDMA_CR_RUNSTOP_MASK | XAXIDMA_IRQ_ALL_MASK);
631 	axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, cr);
632 	synchronize_irq(lp->rx_irq);
633 
634 	cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET);
635 	cr &= ~(XAXIDMA_CR_RUNSTOP_MASK | XAXIDMA_IRQ_ALL_MASK);
636 	axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, cr);
637 	synchronize_irq(lp->tx_irq);
638 
639 	/* Give DMAs a chance to halt gracefully */
640 	sr = axienet_dma_in32(lp, XAXIDMA_RX_SR_OFFSET);
641 	for (count = 0; !(sr & XAXIDMA_SR_HALT_MASK) && count < 5; ++count) {
642 		msleep(20);
643 		sr = axienet_dma_in32(lp, XAXIDMA_RX_SR_OFFSET);
644 	}
645 
646 	sr = axienet_dma_in32(lp, XAXIDMA_TX_SR_OFFSET);
647 	for (count = 0; !(sr & XAXIDMA_SR_HALT_MASK) && count < 5; ++count) {
648 		msleep(20);
649 		sr = axienet_dma_in32(lp, XAXIDMA_TX_SR_OFFSET);
650 	}
651 
652 	/* Do a reset to ensure DMA is really stopped */
653 	axienet_lock_mii(lp);
654 	__axienet_device_reset(lp);
655 	axienet_unlock_mii(lp);
656 }
657 
658 /**
659  * axienet_device_reset - Reset and initialize the Axi Ethernet hardware.
660  * @ndev:	Pointer to the net_device structure
661  *
662  * This function is called to reset and initialize the Axi Ethernet core. This
663  * is typically called during initialization. It does a reset of the Axi DMA
664  * Rx/Tx channels and initializes the Axi DMA BDs. Since Axi DMA reset lines
665  * are connected to Axi Ethernet reset lines, this in turn resets the Axi
666  * Ethernet core. No separate hardware reset is done for the Axi Ethernet
667  * core.
668  * Returns 0 on success or a negative error number otherwise.
669  */
670 static int axienet_device_reset(struct net_device *ndev)
671 {
672 	u32 axienet_status;
673 	struct axienet_local *lp = netdev_priv(ndev);
674 	int ret;
675 
676 	lp->max_frm_size = XAE_MAX_VLAN_FRAME_SIZE;
677 	lp->options |= XAE_OPTION_VLAN;
678 	lp->options &= (~XAE_OPTION_JUMBO);
679 
680 	if (ndev->mtu > XAE_MTU && ndev->mtu <= XAE_JUMBO_MTU) {
681 		lp->max_frm_size = ndev->mtu + VLAN_ETH_HLEN +
682 					XAE_TRL_SIZE;
683 
684 		if (lp->max_frm_size <= lp->rxmem)
685 			lp->options |= XAE_OPTION_JUMBO;
686 	}
687 
688 	if (!lp->use_dmaengine) {
689 		ret = __axienet_device_reset(lp);
690 		if (ret)
691 			return ret;
692 
693 		ret = axienet_dma_bd_init(ndev);
694 		if (ret) {
695 			netdev_err(ndev, "%s: descriptor allocation failed\n",
696 				   __func__);
697 			return ret;
698 		}
699 	}
700 
701 	axienet_status = axienet_ior(lp, XAE_RCW1_OFFSET);
702 	axienet_status &= ~XAE_RCW1_RX_MASK;
703 	axienet_iow(lp, XAE_RCW1_OFFSET, axienet_status);
704 
705 	axienet_status = axienet_ior(lp, XAE_IP_OFFSET);
706 	if (axienet_status & XAE_INT_RXRJECT_MASK)
707 		axienet_iow(lp, XAE_IS_OFFSET, XAE_INT_RXRJECT_MASK);
708 	axienet_iow(lp, XAE_IE_OFFSET, lp->eth_irq > 0 ?
709 		    XAE_INT_RECV_ERROR_MASK : 0);
710 
711 	axienet_iow(lp, XAE_FCC_OFFSET, XAE_FCC_FCRX_MASK);
712 
713 	/* Sync default options with HW but leave receiver and
714 	 * transmitter disabled.
715 	 */
716 	axienet_setoptions(ndev, lp->options &
717 			   ~(XAE_OPTION_TXEN | XAE_OPTION_RXEN));
718 	axienet_set_mac_address(ndev, NULL);
719 	axienet_set_multicast_list(ndev);
720 	axienet_setoptions(ndev, lp->options);
721 
722 	netif_trans_update(ndev);
723 
724 	return 0;
725 }
726 
727 /**
728  * axienet_free_tx_chain - Clean up a series of linked TX descriptors.
729  * @lp:		Pointer to the axienet_local structure
730  * @first_bd:	Index of first descriptor to clean up
731  * @nr_bds:	Max number of descriptors to clean up
732  * @force:	Whether to clean descriptors even if not complete
733  * @sizep:	Pointer to a u32 filled with the total sum of all bytes
734  *		in all cleaned-up descriptors. Ignored if NULL.
735  * @budget:	NAPI budget (use 0 when not called from NAPI poll)
736  *
737  * Would either be called after a successful transmit operation, or after
738  * there was an error when setting up the chain.
739  * Returns the number of packets handled.
740  */
741 static int axienet_free_tx_chain(struct axienet_local *lp, u32 first_bd,
742 				 int nr_bds, bool force, u32 *sizep, int budget)
743 {
744 	struct axidma_bd *cur_p;
745 	unsigned int status;
746 	int i, packets = 0;
747 	dma_addr_t phys;
748 
749 	for (i = 0; i < nr_bds; i++) {
750 		cur_p = &lp->tx_bd_v[(first_bd + i) % lp->tx_bd_num];
751 		status = cur_p->status;
752 
753 		/* If force is not specified, clean up only descriptors
754 		 * that have been completed by the MAC.
755 		 */
756 		if (!force && !(status & XAXIDMA_BD_STS_COMPLETE_MASK))
757 			break;
758 
759 		/* Ensure we see complete descriptor update */
760 		dma_rmb();
761 		phys = desc_get_phys_addr(lp, cur_p);
762 		dma_unmap_single(lp->dev, phys,
763 				 (cur_p->cntrl & XAXIDMA_BD_CTRL_LENGTH_MASK),
764 				 DMA_TO_DEVICE);
765 
766 		if (cur_p->skb && (status & XAXIDMA_BD_STS_COMPLETE_MASK)) {
767 			napi_consume_skb(cur_p->skb, budget);
768 			packets++;
769 		}
770 
771 		cur_p->app0 = 0;
772 		cur_p->app1 = 0;
773 		cur_p->app2 = 0;
774 		cur_p->app4 = 0;
775 		cur_p->skb = NULL;
776 		/* ensure our transmit path and device don't prematurely see status cleared */
777 		wmb();
778 		cur_p->cntrl = 0;
779 		cur_p->status = 0;
780 
781 		if (sizep)
782 			*sizep += status & XAXIDMA_BD_STS_ACTUAL_LEN_MASK;
783 	}
784 
785 	if (!force) {
786 		lp->tx_bd_ci += i;
787 		if (lp->tx_bd_ci >= lp->tx_bd_num)
788 			lp->tx_bd_ci %= lp->tx_bd_num;
789 	}
790 
791 	return packets;
792 }
793 
794 /**
795  * axienet_check_tx_bd_space - Checks if a BD/group of BDs are currently busy
796  * @lp:		Pointer to the axienet_local structure
797  * @num_frag:	The number of BDs to check for
798  *
799  * Return: 0, on success
800  *	    NETDEV_TX_BUSY, if any of the descriptors are not free
801  *
802  * This function is invoked before BDs are allocated and transmission starts.
803  * This function returns 0 if a BD or group of BDs can be allocated for
804  * transmission. If the BD or any of the BDs are not free the function
805  * returns a busy status.
806  */
807 static inline int axienet_check_tx_bd_space(struct axienet_local *lp,
808 					    int num_frag)
809 {
810 	struct axidma_bd *cur_p;
811 
812 	/* Ensure we see all descriptor updates from device or TX polling */
813 	rmb();
814 	cur_p = &lp->tx_bd_v[(READ_ONCE(lp->tx_bd_tail) + num_frag) %
815 			     lp->tx_bd_num];
816 	if (cur_p->cntrl)
817 		return NETDEV_TX_BUSY;
818 	return 0;
819 }
820 
821 /**
822  * axienet_dma_tx_cb - DMA engine callback for TX channel.
823  * @data:       Pointer to the axienet_local structure.
824  * @result:     error reporting through dmaengine_result.
825  * This function is called by dmaengine driver for TX channel to notify
826  * that the transmit is done.
827  */
828 static void axienet_dma_tx_cb(void *data, const struct dmaengine_result *result)
829 {
830 	struct skbuf_dma_descriptor *skbuf_dma;
831 	struct axienet_local *lp = data;
832 	struct netdev_queue *txq;
833 	int len;
834 
835 	skbuf_dma = axienet_get_tx_desc(lp, lp->tx_ring_tail++);
836 	len = skbuf_dma->skb->len;
837 	txq = skb_get_tx_queue(lp->ndev, skbuf_dma->skb);
838 	u64_stats_update_begin(&lp->tx_stat_sync);
839 	u64_stats_add(&lp->tx_bytes, len);
840 	u64_stats_add(&lp->tx_packets, 1);
841 	u64_stats_update_end(&lp->tx_stat_sync);
842 	dma_unmap_sg(lp->dev, skbuf_dma->sgl, skbuf_dma->sg_len, DMA_TO_DEVICE);
843 	dev_consume_skb_any(skbuf_dma->skb);
844 	netif_txq_completed_wake(txq, 1, len,
845 				 CIRC_SPACE(lp->tx_ring_head, lp->tx_ring_tail, TX_BD_NUM_MAX),
846 				 2 * MAX_SKB_FRAGS);
847 }
848 
849 /**
850  * axienet_start_xmit_dmaengine - Starts the transmission.
851  * @skb:        sk_buff pointer that contains data to be Txed.
852  * @ndev:       Pointer to net_device structure.
853  *
854  * Return: NETDEV_TX_OK on success or any non space errors.
855  *         NETDEV_TX_BUSY when free element in TX skb ring buffer
856  *         is not available.
857  *
858  * This function is invoked to initiate transmission. The
859  * function sets the skbs, register dma callback API and submit
860  * the dma transaction.
861  * Additionally if checksum offloading is supported,
862  * it populates AXI Stream Control fields with appropriate values.
863  */
864 static netdev_tx_t
865 axienet_start_xmit_dmaengine(struct sk_buff *skb, struct net_device *ndev)
866 {
867 	struct dma_async_tx_descriptor *dma_tx_desc = NULL;
868 	struct axienet_local *lp = netdev_priv(ndev);
869 	u32 app_metadata[DMA_NUM_APP_WORDS] = {0};
870 	struct skbuf_dma_descriptor *skbuf_dma;
871 	struct dma_device *dma_dev;
872 	struct netdev_queue *txq;
873 	u32 csum_start_off;
874 	u32 csum_index_off;
875 	int sg_len;
876 	int ret;
877 
878 	dma_dev = lp->tx_chan->device;
879 	sg_len = skb_shinfo(skb)->nr_frags + 1;
880 	if (CIRC_SPACE(lp->tx_ring_head, lp->tx_ring_tail, TX_BD_NUM_MAX) <= sg_len) {
881 		netif_stop_queue(ndev);
882 		if (net_ratelimit())
883 			netdev_warn(ndev, "TX ring unexpectedly full\n");
884 		return NETDEV_TX_BUSY;
885 	}
886 
887 	skbuf_dma = axienet_get_tx_desc(lp, lp->tx_ring_head);
888 	if (!skbuf_dma)
889 		goto xmit_error_drop_skb;
890 
891 	lp->tx_ring_head++;
892 	sg_init_table(skbuf_dma->sgl, sg_len);
893 	ret = skb_to_sgvec(skb, skbuf_dma->sgl, 0, skb->len);
894 	if (ret < 0)
895 		goto xmit_error_drop_skb;
896 
897 	ret = dma_map_sg(lp->dev, skbuf_dma->sgl, sg_len, DMA_TO_DEVICE);
898 	if (!ret)
899 		goto xmit_error_drop_skb;
900 
901 	/* Fill up app fields for checksum */
902 	if (skb->ip_summed == CHECKSUM_PARTIAL) {
903 		if (lp->features & XAE_FEATURE_FULL_TX_CSUM) {
904 			/* Tx Full Checksum Offload Enabled */
905 			app_metadata[0] |= 2;
906 		} else if (lp->features & XAE_FEATURE_PARTIAL_TX_CSUM) {
907 			csum_start_off = skb_transport_offset(skb);
908 			csum_index_off = csum_start_off + skb->csum_offset;
909 			/* Tx Partial Checksum Offload Enabled */
910 			app_metadata[0] |= 1;
911 			app_metadata[1] = (csum_start_off << 16) | csum_index_off;
912 		}
913 	} else if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
914 		app_metadata[0] |= 2; /* Tx Full Checksum Offload Enabled */
915 	}
916 
917 	dma_tx_desc = dma_dev->device_prep_slave_sg(lp->tx_chan, skbuf_dma->sgl,
918 			sg_len, DMA_MEM_TO_DEV,
919 			DMA_PREP_INTERRUPT, (void *)app_metadata);
920 	if (!dma_tx_desc)
921 		goto xmit_error_unmap_sg;
922 
923 	skbuf_dma->skb = skb;
924 	skbuf_dma->sg_len = sg_len;
925 	dma_tx_desc->callback_param = lp;
926 	dma_tx_desc->callback_result = axienet_dma_tx_cb;
927 	txq = skb_get_tx_queue(lp->ndev, skb);
928 	netdev_tx_sent_queue(txq, skb->len);
929 	netif_txq_maybe_stop(txq, CIRC_SPACE(lp->tx_ring_head, lp->tx_ring_tail, TX_BD_NUM_MAX),
930 			     MAX_SKB_FRAGS + 1, 2 * MAX_SKB_FRAGS);
931 
932 	dmaengine_submit(dma_tx_desc);
933 	dma_async_issue_pending(lp->tx_chan);
934 	return NETDEV_TX_OK;
935 
936 xmit_error_unmap_sg:
937 	dma_unmap_sg(lp->dev, skbuf_dma->sgl, sg_len, DMA_TO_DEVICE);
938 xmit_error_drop_skb:
939 	dev_kfree_skb_any(skb);
940 	return NETDEV_TX_OK;
941 }
942 
943 /**
944  * axienet_tx_poll - Invoked once a transmit is completed by the
945  * Axi DMA Tx channel.
946  * @napi:	Pointer to NAPI structure.
947  * @budget:	Max number of TX packets to process.
948  *
949  * Return: Number of TX packets processed.
950  *
951  * This function is invoked from the NAPI processing to notify the completion
952  * of transmit operation. It clears fields in the corresponding Tx BDs and
953  * unmaps the corresponding buffer so that CPU can regain ownership of the
954  * buffer. It finally invokes "netif_wake_queue" to restart transmission if
955  * required.
956  */
957 static int axienet_tx_poll(struct napi_struct *napi, int budget)
958 {
959 	struct axienet_local *lp = container_of(napi, struct axienet_local, napi_tx);
960 	struct net_device *ndev = lp->ndev;
961 	u32 size = 0;
962 	int packets;
963 
964 	packets = axienet_free_tx_chain(lp, lp->tx_bd_ci, lp->tx_bd_num, false,
965 					&size, budget);
966 
967 	if (packets) {
968 		u64_stats_update_begin(&lp->tx_stat_sync);
969 		u64_stats_add(&lp->tx_packets, packets);
970 		u64_stats_add(&lp->tx_bytes, size);
971 		u64_stats_update_end(&lp->tx_stat_sync);
972 
973 		/* Matches barrier in axienet_start_xmit */
974 		smp_mb();
975 
976 		if (!axienet_check_tx_bd_space(lp, MAX_SKB_FRAGS + 1))
977 			netif_wake_queue(ndev);
978 	}
979 
980 	if (packets < budget && napi_complete_done(napi, packets)) {
981 		/* Re-enable TX completion interrupts. This should
982 		 * cause an immediate interrupt if any TX packets are
983 		 * already pending.
984 		 */
985 		axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, lp->tx_dma_cr);
986 	}
987 	return packets;
988 }
989 
990 /**
991  * axienet_start_xmit - Starts the transmission.
992  * @skb:	sk_buff pointer that contains data to be Txed.
993  * @ndev:	Pointer to net_device structure.
994  *
995  * Return: NETDEV_TX_OK, on success
996  *	    NETDEV_TX_BUSY, if any of the descriptors are not free
997  *
998  * This function is invoked from upper layers to initiate transmission. The
999  * function uses the next available free BDs and populates their fields to
1000  * start the transmission. Additionally if checksum offloading is supported,
1001  * it populates AXI Stream Control fields with appropriate values.
1002  */
1003 static netdev_tx_t
1004 axienet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
1005 {
1006 	u32 ii;
1007 	u32 num_frag;
1008 	u32 csum_start_off;
1009 	u32 csum_index_off;
1010 	skb_frag_t *frag;
1011 	dma_addr_t tail_p, phys;
1012 	u32 orig_tail_ptr, new_tail_ptr;
1013 	struct axienet_local *lp = netdev_priv(ndev);
1014 	struct axidma_bd *cur_p;
1015 
1016 	orig_tail_ptr = lp->tx_bd_tail;
1017 	new_tail_ptr = orig_tail_ptr;
1018 
1019 	num_frag = skb_shinfo(skb)->nr_frags;
1020 	cur_p = &lp->tx_bd_v[orig_tail_ptr];
1021 
1022 	if (axienet_check_tx_bd_space(lp, num_frag + 1)) {
1023 		/* Should not happen as last start_xmit call should have
1024 		 * checked for sufficient space and queue should only be
1025 		 * woken when sufficient space is available.
1026 		 */
1027 		netif_stop_queue(ndev);
1028 		if (net_ratelimit())
1029 			netdev_warn(ndev, "TX ring unexpectedly full\n");
1030 		return NETDEV_TX_BUSY;
1031 	}
1032 
1033 	if (skb->ip_summed == CHECKSUM_PARTIAL) {
1034 		if (lp->features & XAE_FEATURE_FULL_TX_CSUM) {
1035 			/* Tx Full Checksum Offload Enabled */
1036 			cur_p->app0 |= 2;
1037 		} else if (lp->features & XAE_FEATURE_PARTIAL_TX_CSUM) {
1038 			csum_start_off = skb_transport_offset(skb);
1039 			csum_index_off = csum_start_off + skb->csum_offset;
1040 			/* Tx Partial Checksum Offload Enabled */
1041 			cur_p->app0 |= 1;
1042 			cur_p->app1 = (csum_start_off << 16) | csum_index_off;
1043 		}
1044 	} else if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
1045 		cur_p->app0 |= 2; /* Tx Full Checksum Offload Enabled */
1046 	}
1047 
1048 	phys = dma_map_single(lp->dev, skb->data,
1049 			      skb_headlen(skb), DMA_TO_DEVICE);
1050 	if (unlikely(dma_mapping_error(lp->dev, phys))) {
1051 		if (net_ratelimit())
1052 			netdev_err(ndev, "TX DMA mapping error\n");
1053 		ndev->stats.tx_dropped++;
1054 		dev_kfree_skb_any(skb);
1055 		return NETDEV_TX_OK;
1056 	}
1057 	desc_set_phys_addr(lp, phys, cur_p);
1058 	cur_p->cntrl = skb_headlen(skb) | XAXIDMA_BD_CTRL_TXSOF_MASK;
1059 
1060 	for (ii = 0; ii < num_frag; ii++) {
1061 		if (++new_tail_ptr >= lp->tx_bd_num)
1062 			new_tail_ptr = 0;
1063 		cur_p = &lp->tx_bd_v[new_tail_ptr];
1064 		frag = &skb_shinfo(skb)->frags[ii];
1065 		phys = dma_map_single(lp->dev,
1066 				      skb_frag_address(frag),
1067 				      skb_frag_size(frag),
1068 				      DMA_TO_DEVICE);
1069 		if (unlikely(dma_mapping_error(lp->dev, phys))) {
1070 			if (net_ratelimit())
1071 				netdev_err(ndev, "TX DMA mapping error\n");
1072 			ndev->stats.tx_dropped++;
1073 			axienet_free_tx_chain(lp, orig_tail_ptr, ii + 1,
1074 					      true, NULL, 0);
1075 			dev_kfree_skb_any(skb);
1076 			return NETDEV_TX_OK;
1077 		}
1078 		desc_set_phys_addr(lp, phys, cur_p);
1079 		cur_p->cntrl = skb_frag_size(frag);
1080 	}
1081 
1082 	cur_p->cntrl |= XAXIDMA_BD_CTRL_TXEOF_MASK;
1083 	cur_p->skb = skb;
1084 
1085 	tail_p = lp->tx_bd_p + sizeof(*lp->tx_bd_v) * new_tail_ptr;
1086 	if (++new_tail_ptr >= lp->tx_bd_num)
1087 		new_tail_ptr = 0;
1088 	WRITE_ONCE(lp->tx_bd_tail, new_tail_ptr);
1089 
1090 	/* Start the transfer */
1091 	axienet_dma_out_addr(lp, XAXIDMA_TX_TDESC_OFFSET, tail_p);
1092 
1093 	/* Stop queue if next transmit may not have space */
1094 	if (axienet_check_tx_bd_space(lp, MAX_SKB_FRAGS + 1)) {
1095 		netif_stop_queue(ndev);
1096 
1097 		/* Matches barrier in axienet_tx_poll */
1098 		smp_mb();
1099 
1100 		/* Space might have just been freed - check again */
1101 		if (!axienet_check_tx_bd_space(lp, MAX_SKB_FRAGS + 1))
1102 			netif_wake_queue(ndev);
1103 	}
1104 
1105 	return NETDEV_TX_OK;
1106 }
1107 
1108 /**
1109  * axienet_dma_rx_cb - DMA engine callback for RX channel.
1110  * @data:       Pointer to the skbuf_dma_descriptor structure.
1111  * @result:     error reporting through dmaengine_result.
1112  * This function is called by dmaengine driver for RX channel to notify
1113  * that the packet is received.
1114  */
1115 static void axienet_dma_rx_cb(void *data, const struct dmaengine_result *result)
1116 {
1117 	struct skbuf_dma_descriptor *skbuf_dma;
1118 	size_t meta_len, meta_max_len, rx_len;
1119 	struct axienet_local *lp = data;
1120 	struct sk_buff *skb;
1121 	u32 *app_metadata;
1122 
1123 	skbuf_dma = axienet_get_rx_desc(lp, lp->rx_ring_tail++);
1124 	skb = skbuf_dma->skb;
1125 	app_metadata = dmaengine_desc_get_metadata_ptr(skbuf_dma->desc, &meta_len,
1126 						       &meta_max_len);
1127 	dma_unmap_single(lp->dev, skbuf_dma->dma_address, lp->max_frm_size,
1128 			 DMA_FROM_DEVICE);
1129 	/* TODO: Derive app word index programmatically */
1130 	rx_len = (app_metadata[LEN_APP] & 0xFFFF);
1131 	skb_put(skb, rx_len);
1132 	skb->protocol = eth_type_trans(skb, lp->ndev);
1133 	skb->ip_summed = CHECKSUM_NONE;
1134 
1135 	__netif_rx(skb);
1136 	u64_stats_update_begin(&lp->rx_stat_sync);
1137 	u64_stats_add(&lp->rx_packets, 1);
1138 	u64_stats_add(&lp->rx_bytes, rx_len);
1139 	u64_stats_update_end(&lp->rx_stat_sync);
1140 	axienet_rx_submit_desc(lp->ndev);
1141 	dma_async_issue_pending(lp->rx_chan);
1142 }
1143 
1144 /**
1145  * axienet_rx_poll - Triggered by RX ISR to complete the BD processing.
1146  * @napi:	Pointer to NAPI structure.
1147  * @budget:	Max number of RX packets to process.
1148  *
1149  * Return: Number of RX packets processed.
1150  */
1151 static int axienet_rx_poll(struct napi_struct *napi, int budget)
1152 {
1153 	u32 length;
1154 	u32 csumstatus;
1155 	u32 size = 0;
1156 	int packets = 0;
1157 	dma_addr_t tail_p = 0;
1158 	struct axidma_bd *cur_p;
1159 	struct sk_buff *skb, *new_skb;
1160 	struct axienet_local *lp = container_of(napi, struct axienet_local, napi_rx);
1161 
1162 	cur_p = &lp->rx_bd_v[lp->rx_bd_ci];
1163 
1164 	while (packets < budget && (cur_p->status & XAXIDMA_BD_STS_COMPLETE_MASK)) {
1165 		dma_addr_t phys;
1166 
1167 		/* Ensure we see complete descriptor update */
1168 		dma_rmb();
1169 
1170 		skb = cur_p->skb;
1171 		cur_p->skb = NULL;
1172 
1173 		/* skb could be NULL if a previous pass already received the
1174 		 * packet for this slot in the ring, but failed to refill it
1175 		 * with a newly allocated buffer. In this case, don't try to
1176 		 * receive it again.
1177 		 */
1178 		if (likely(skb)) {
1179 			length = cur_p->app4 & 0x0000FFFF;
1180 
1181 			phys = desc_get_phys_addr(lp, cur_p);
1182 			dma_unmap_single(lp->dev, phys, lp->max_frm_size,
1183 					 DMA_FROM_DEVICE);
1184 
1185 			skb_put(skb, length);
1186 			skb->protocol = eth_type_trans(skb, lp->ndev);
1187 			/*skb_checksum_none_assert(skb);*/
1188 			skb->ip_summed = CHECKSUM_NONE;
1189 
1190 			/* if we're doing Rx csum offload, set it up */
1191 			if (lp->features & XAE_FEATURE_FULL_RX_CSUM) {
1192 				csumstatus = (cur_p->app2 &
1193 					      XAE_FULL_CSUM_STATUS_MASK) >> 3;
1194 				if (csumstatus == XAE_IP_TCP_CSUM_VALIDATED ||
1195 				    csumstatus == XAE_IP_UDP_CSUM_VALIDATED) {
1196 					skb->ip_summed = CHECKSUM_UNNECESSARY;
1197 				}
1198 			} else if (lp->features & XAE_FEATURE_PARTIAL_RX_CSUM) {
1199 				skb->csum = be32_to_cpu(cur_p->app3 & 0xFFFF);
1200 				skb->ip_summed = CHECKSUM_COMPLETE;
1201 			}
1202 
1203 			napi_gro_receive(napi, skb);
1204 
1205 			size += length;
1206 			packets++;
1207 		}
1208 
1209 		new_skb = napi_alloc_skb(napi, lp->max_frm_size);
1210 		if (!new_skb)
1211 			break;
1212 
1213 		phys = dma_map_single(lp->dev, new_skb->data,
1214 				      lp->max_frm_size,
1215 				      DMA_FROM_DEVICE);
1216 		if (unlikely(dma_mapping_error(lp->dev, phys))) {
1217 			if (net_ratelimit())
1218 				netdev_err(lp->ndev, "RX DMA mapping error\n");
1219 			dev_kfree_skb(new_skb);
1220 			break;
1221 		}
1222 		desc_set_phys_addr(lp, phys, cur_p);
1223 
1224 		cur_p->cntrl = lp->max_frm_size;
1225 		cur_p->status = 0;
1226 		cur_p->skb = new_skb;
1227 
1228 		/* Only update tail_p to mark this slot as usable after it has
1229 		 * been successfully refilled.
1230 		 */
1231 		tail_p = lp->rx_bd_p + sizeof(*lp->rx_bd_v) * lp->rx_bd_ci;
1232 
1233 		if (++lp->rx_bd_ci >= lp->rx_bd_num)
1234 			lp->rx_bd_ci = 0;
1235 		cur_p = &lp->rx_bd_v[lp->rx_bd_ci];
1236 	}
1237 
1238 	u64_stats_update_begin(&lp->rx_stat_sync);
1239 	u64_stats_add(&lp->rx_packets, packets);
1240 	u64_stats_add(&lp->rx_bytes, size);
1241 	u64_stats_update_end(&lp->rx_stat_sync);
1242 
1243 	if (tail_p)
1244 		axienet_dma_out_addr(lp, XAXIDMA_RX_TDESC_OFFSET, tail_p);
1245 
1246 	if (packets < budget && napi_complete_done(napi, packets)) {
1247 		/* Re-enable RX completion interrupts. This should
1248 		 * cause an immediate interrupt if any RX packets are
1249 		 * already pending.
1250 		 */
1251 		axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, lp->rx_dma_cr);
1252 	}
1253 	return packets;
1254 }
1255 
1256 /**
1257  * axienet_tx_irq - Tx Done Isr.
1258  * @irq:	irq number
1259  * @_ndev:	net_device pointer
1260  *
1261  * Return: IRQ_HANDLED if device generated a TX interrupt, IRQ_NONE otherwise.
1262  *
1263  * This is the Axi DMA Tx done Isr. It invokes NAPI polling to complete the
1264  * TX BD processing.
1265  */
1266 static irqreturn_t axienet_tx_irq(int irq, void *_ndev)
1267 {
1268 	unsigned int status;
1269 	struct net_device *ndev = _ndev;
1270 	struct axienet_local *lp = netdev_priv(ndev);
1271 
1272 	status = axienet_dma_in32(lp, XAXIDMA_TX_SR_OFFSET);
1273 
1274 	if (!(status & XAXIDMA_IRQ_ALL_MASK))
1275 		return IRQ_NONE;
1276 
1277 	axienet_dma_out32(lp, XAXIDMA_TX_SR_OFFSET, status);
1278 
1279 	if (unlikely(status & XAXIDMA_IRQ_ERROR_MASK)) {
1280 		netdev_err(ndev, "DMA Tx error 0x%x\n", status);
1281 		netdev_err(ndev, "Current BD is at: 0x%x%08x\n",
1282 			   (lp->tx_bd_v[lp->tx_bd_ci]).phys_msb,
1283 			   (lp->tx_bd_v[lp->tx_bd_ci]).phys);
1284 		schedule_work(&lp->dma_err_task);
1285 	} else {
1286 		/* Disable further TX completion interrupts and schedule
1287 		 * NAPI to handle the completions.
1288 		 */
1289 		u32 cr = lp->tx_dma_cr;
1290 
1291 		cr &= ~(XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_DELAY_MASK);
1292 		if (napi_schedule_prep(&lp->napi_tx)) {
1293 			axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, cr);
1294 			__napi_schedule(&lp->napi_tx);
1295 		}
1296 	}
1297 
1298 	return IRQ_HANDLED;
1299 }
1300 
1301 /**
1302  * axienet_rx_irq - Rx Isr.
1303  * @irq:	irq number
1304  * @_ndev:	net_device pointer
1305  *
1306  * Return: IRQ_HANDLED if device generated a RX interrupt, IRQ_NONE otherwise.
1307  *
1308  * This is the Axi DMA Rx Isr. It invokes NAPI polling to complete the RX BD
1309  * processing.
1310  */
1311 static irqreturn_t axienet_rx_irq(int irq, void *_ndev)
1312 {
1313 	unsigned int status;
1314 	struct net_device *ndev = _ndev;
1315 	struct axienet_local *lp = netdev_priv(ndev);
1316 
1317 	status = axienet_dma_in32(lp, XAXIDMA_RX_SR_OFFSET);
1318 
1319 	if (!(status & XAXIDMA_IRQ_ALL_MASK))
1320 		return IRQ_NONE;
1321 
1322 	axienet_dma_out32(lp, XAXIDMA_RX_SR_OFFSET, status);
1323 
1324 	if (unlikely(status & XAXIDMA_IRQ_ERROR_MASK)) {
1325 		netdev_err(ndev, "DMA Rx error 0x%x\n", status);
1326 		netdev_err(ndev, "Current BD is at: 0x%x%08x\n",
1327 			   (lp->rx_bd_v[lp->rx_bd_ci]).phys_msb,
1328 			   (lp->rx_bd_v[lp->rx_bd_ci]).phys);
1329 		schedule_work(&lp->dma_err_task);
1330 	} else {
1331 		/* Disable further RX completion interrupts and schedule
1332 		 * NAPI receive.
1333 		 */
1334 		u32 cr = lp->rx_dma_cr;
1335 
1336 		cr &= ~(XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_DELAY_MASK);
1337 		if (napi_schedule_prep(&lp->napi_rx)) {
1338 			axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, cr);
1339 			__napi_schedule(&lp->napi_rx);
1340 		}
1341 	}
1342 
1343 	return IRQ_HANDLED;
1344 }
1345 
1346 /**
1347  * axienet_eth_irq - Ethernet core Isr.
1348  * @irq:	irq number
1349  * @_ndev:	net_device pointer
1350  *
1351  * Return: IRQ_HANDLED if device generated a core interrupt, IRQ_NONE otherwise.
1352  *
1353  * Handle miscellaneous conditions indicated by Ethernet core IRQ.
1354  */
1355 static irqreturn_t axienet_eth_irq(int irq, void *_ndev)
1356 {
1357 	struct net_device *ndev = _ndev;
1358 	struct axienet_local *lp = netdev_priv(ndev);
1359 	unsigned int pending;
1360 
1361 	pending = axienet_ior(lp, XAE_IP_OFFSET);
1362 	if (!pending)
1363 		return IRQ_NONE;
1364 
1365 	if (pending & XAE_INT_RXFIFOOVR_MASK)
1366 		ndev->stats.rx_missed_errors++;
1367 
1368 	if (pending & XAE_INT_RXRJECT_MASK)
1369 		ndev->stats.rx_dropped++;
1370 
1371 	axienet_iow(lp, XAE_IS_OFFSET, pending);
1372 	return IRQ_HANDLED;
1373 }
1374 
1375 static void axienet_dma_err_handler(struct work_struct *work);
1376 
1377 /**
1378  * axienet_rx_submit_desc - Submit the rx descriptors to dmaengine.
1379  * allocate skbuff, map the scatterlist and obtain a descriptor
1380  * and then add the callback information and submit descriptor.
1381  *
1382  * @ndev:	net_device pointer
1383  *
1384  */
1385 static void axienet_rx_submit_desc(struct net_device *ndev)
1386 {
1387 	struct dma_async_tx_descriptor *dma_rx_desc = NULL;
1388 	struct axienet_local *lp = netdev_priv(ndev);
1389 	struct skbuf_dma_descriptor *skbuf_dma;
1390 	struct sk_buff *skb;
1391 	dma_addr_t addr;
1392 
1393 	skbuf_dma = axienet_get_rx_desc(lp, lp->rx_ring_head);
1394 	if (!skbuf_dma)
1395 		return;
1396 
1397 	lp->rx_ring_head++;
1398 	skb = netdev_alloc_skb(ndev, lp->max_frm_size);
1399 	if (!skb)
1400 		return;
1401 
1402 	sg_init_table(skbuf_dma->sgl, 1);
1403 	addr = dma_map_single(lp->dev, skb->data, lp->max_frm_size, DMA_FROM_DEVICE);
1404 	if (unlikely(dma_mapping_error(lp->dev, addr))) {
1405 		if (net_ratelimit())
1406 			netdev_err(ndev, "DMA mapping error\n");
1407 		goto rx_submit_err_free_skb;
1408 	}
1409 	sg_dma_address(skbuf_dma->sgl) = addr;
1410 	sg_dma_len(skbuf_dma->sgl) = lp->max_frm_size;
1411 	dma_rx_desc = dmaengine_prep_slave_sg(lp->rx_chan, skbuf_dma->sgl,
1412 					      1, DMA_DEV_TO_MEM,
1413 					      DMA_PREP_INTERRUPT);
1414 	if (!dma_rx_desc)
1415 		goto rx_submit_err_unmap_skb;
1416 
1417 	skbuf_dma->skb = skb;
1418 	skbuf_dma->dma_address = sg_dma_address(skbuf_dma->sgl);
1419 	skbuf_dma->desc = dma_rx_desc;
1420 	dma_rx_desc->callback_param = lp;
1421 	dma_rx_desc->callback_result = axienet_dma_rx_cb;
1422 	dmaengine_submit(dma_rx_desc);
1423 
1424 	return;
1425 
1426 rx_submit_err_unmap_skb:
1427 	dma_unmap_single(lp->dev, addr, lp->max_frm_size, DMA_FROM_DEVICE);
1428 rx_submit_err_free_skb:
1429 	dev_kfree_skb(skb);
1430 }
1431 
1432 /**
1433  * axienet_init_dmaengine - init the dmaengine code.
1434  * @ndev:       Pointer to net_device structure
1435  *
1436  * Return: 0, on success.
1437  *          non-zero error value on failure
1438  *
1439  * This is the dmaengine initialization code.
1440  */
1441 static int axienet_init_dmaengine(struct net_device *ndev)
1442 {
1443 	struct axienet_local *lp = netdev_priv(ndev);
1444 	struct skbuf_dma_descriptor *skbuf_dma;
1445 	int i, ret;
1446 
1447 	lp->tx_chan = dma_request_chan(lp->dev, "tx_chan0");
1448 	if (IS_ERR(lp->tx_chan)) {
1449 		dev_err(lp->dev, "No Ethernet DMA (TX) channel found\n");
1450 		return PTR_ERR(lp->tx_chan);
1451 	}
1452 
1453 	lp->rx_chan = dma_request_chan(lp->dev, "rx_chan0");
1454 	if (IS_ERR(lp->rx_chan)) {
1455 		ret = PTR_ERR(lp->rx_chan);
1456 		dev_err(lp->dev, "No Ethernet DMA (RX) channel found\n");
1457 		goto err_dma_release_tx;
1458 	}
1459 
1460 	lp->tx_ring_tail = 0;
1461 	lp->tx_ring_head = 0;
1462 	lp->rx_ring_tail = 0;
1463 	lp->rx_ring_head = 0;
1464 	lp->tx_skb_ring = kcalloc(TX_BD_NUM_MAX, sizeof(*lp->tx_skb_ring),
1465 				  GFP_KERNEL);
1466 	if (!lp->tx_skb_ring) {
1467 		ret = -ENOMEM;
1468 		goto err_dma_release_rx;
1469 	}
1470 	for (i = 0; i < TX_BD_NUM_MAX; i++) {
1471 		skbuf_dma = kzalloc(sizeof(*skbuf_dma), GFP_KERNEL);
1472 		if (!skbuf_dma) {
1473 			ret = -ENOMEM;
1474 			goto err_free_tx_skb_ring;
1475 		}
1476 		lp->tx_skb_ring[i] = skbuf_dma;
1477 	}
1478 
1479 	lp->rx_skb_ring = kcalloc(RX_BUF_NUM_DEFAULT, sizeof(*lp->rx_skb_ring),
1480 				  GFP_KERNEL);
1481 	if (!lp->rx_skb_ring) {
1482 		ret = -ENOMEM;
1483 		goto err_free_tx_skb_ring;
1484 	}
1485 	for (i = 0; i < RX_BUF_NUM_DEFAULT; i++) {
1486 		skbuf_dma = kzalloc(sizeof(*skbuf_dma), GFP_KERNEL);
1487 		if (!skbuf_dma) {
1488 			ret = -ENOMEM;
1489 			goto err_free_rx_skb_ring;
1490 		}
1491 		lp->rx_skb_ring[i] = skbuf_dma;
1492 	}
1493 	/* TODO: Instead of BD_NUM_DEFAULT use runtime support */
1494 	for (i = 0; i < RX_BUF_NUM_DEFAULT; i++)
1495 		axienet_rx_submit_desc(ndev);
1496 	dma_async_issue_pending(lp->rx_chan);
1497 
1498 	return 0;
1499 
1500 err_free_rx_skb_ring:
1501 	for (i = 0; i < RX_BUF_NUM_DEFAULT; i++)
1502 		kfree(lp->rx_skb_ring[i]);
1503 	kfree(lp->rx_skb_ring);
1504 err_free_tx_skb_ring:
1505 	for (i = 0; i < TX_BD_NUM_MAX; i++)
1506 		kfree(lp->tx_skb_ring[i]);
1507 	kfree(lp->tx_skb_ring);
1508 err_dma_release_rx:
1509 	dma_release_channel(lp->rx_chan);
1510 err_dma_release_tx:
1511 	dma_release_channel(lp->tx_chan);
1512 	return ret;
1513 }
1514 
1515 /**
1516  * axienet_init_legacy_dma - init the dma legacy code.
1517  * @ndev:       Pointer to net_device structure
1518  *
1519  * Return: 0, on success.
1520  *          non-zero error value on failure
1521  *
1522  * This is the dma  initialization code. It also allocates interrupt
1523  * service routines, enables the interrupt lines and ISR handling.
1524  *
1525  */
1526 static int axienet_init_legacy_dma(struct net_device *ndev)
1527 {
1528 	int ret;
1529 	struct axienet_local *lp = netdev_priv(ndev);
1530 
1531 	/* Enable worker thread for Axi DMA error handling */
1532 	lp->stopping = false;
1533 	INIT_WORK(&lp->dma_err_task, axienet_dma_err_handler);
1534 
1535 	napi_enable(&lp->napi_rx);
1536 	napi_enable(&lp->napi_tx);
1537 
1538 	/* Enable interrupts for Axi DMA Tx */
1539 	ret = request_irq(lp->tx_irq, axienet_tx_irq, IRQF_SHARED,
1540 			  ndev->name, ndev);
1541 	if (ret)
1542 		goto err_tx_irq;
1543 	/* Enable interrupts for Axi DMA Rx */
1544 	ret = request_irq(lp->rx_irq, axienet_rx_irq, IRQF_SHARED,
1545 			  ndev->name, ndev);
1546 	if (ret)
1547 		goto err_rx_irq;
1548 	/* Enable interrupts for Axi Ethernet core (if defined) */
1549 	if (lp->eth_irq > 0) {
1550 		ret = request_irq(lp->eth_irq, axienet_eth_irq, IRQF_SHARED,
1551 				  ndev->name, ndev);
1552 		if (ret)
1553 			goto err_eth_irq;
1554 	}
1555 
1556 	return 0;
1557 
1558 err_eth_irq:
1559 	free_irq(lp->rx_irq, ndev);
1560 err_rx_irq:
1561 	free_irq(lp->tx_irq, ndev);
1562 err_tx_irq:
1563 	napi_disable(&lp->napi_tx);
1564 	napi_disable(&lp->napi_rx);
1565 	cancel_work_sync(&lp->dma_err_task);
1566 	dev_err(lp->dev, "request_irq() failed\n");
1567 	return ret;
1568 }
1569 
1570 /**
1571  * axienet_open - Driver open routine.
1572  * @ndev:	Pointer to net_device structure
1573  *
1574  * Return: 0, on success.
1575  *	    non-zero error value on failure
1576  *
1577  * This is the driver open routine. It calls phylink_start to start the
1578  * PHY device.
1579  * It also allocates interrupt service routines, enables the interrupt lines
1580  * and ISR handling. Axi Ethernet core is reset through Axi DMA core. Buffer
1581  * descriptors are initialized.
1582  */
1583 static int axienet_open(struct net_device *ndev)
1584 {
1585 	int ret;
1586 	struct axienet_local *lp = netdev_priv(ndev);
1587 
1588 	/* When we do an Axi Ethernet reset, it resets the complete core
1589 	 * including the MDIO. MDIO must be disabled before resetting.
1590 	 * Hold MDIO bus lock to avoid MDIO accesses during the reset.
1591 	 */
1592 	axienet_lock_mii(lp);
1593 	ret = axienet_device_reset(ndev);
1594 	axienet_unlock_mii(lp);
1595 
1596 	ret = phylink_of_phy_connect(lp->phylink, lp->dev->of_node, 0);
1597 	if (ret) {
1598 		dev_err(lp->dev, "phylink_of_phy_connect() failed: %d\n", ret);
1599 		return ret;
1600 	}
1601 
1602 	phylink_start(lp->phylink);
1603 
1604 	/* Start the statistics refresh work */
1605 	schedule_delayed_work(&lp->stats_work, 0);
1606 
1607 	if (lp->use_dmaengine) {
1608 		/* Enable interrupts for Axi Ethernet core (if defined) */
1609 		if (lp->eth_irq > 0) {
1610 			ret = request_irq(lp->eth_irq, axienet_eth_irq, IRQF_SHARED,
1611 					  ndev->name, ndev);
1612 			if (ret)
1613 				goto err_phy;
1614 		}
1615 
1616 		ret = axienet_init_dmaengine(ndev);
1617 		if (ret < 0)
1618 			goto err_free_eth_irq;
1619 	} else {
1620 		ret = axienet_init_legacy_dma(ndev);
1621 		if (ret)
1622 			goto err_phy;
1623 	}
1624 
1625 	return 0;
1626 
1627 err_free_eth_irq:
1628 	if (lp->eth_irq > 0)
1629 		free_irq(lp->eth_irq, ndev);
1630 err_phy:
1631 	cancel_delayed_work_sync(&lp->stats_work);
1632 	phylink_stop(lp->phylink);
1633 	phylink_disconnect_phy(lp->phylink);
1634 	return ret;
1635 }
1636 
1637 /**
1638  * axienet_stop - Driver stop routine.
1639  * @ndev:	Pointer to net_device structure
1640  *
1641  * Return: 0, on success.
1642  *
1643  * This is the driver stop routine. It calls phylink_disconnect to stop the PHY
1644  * device. It also removes the interrupt handlers and disables the interrupts.
1645  * The Axi DMA Tx/Rx BDs are released.
1646  */
1647 static int axienet_stop(struct net_device *ndev)
1648 {
1649 	struct axienet_local *lp = netdev_priv(ndev);
1650 	int i;
1651 
1652 	if (!lp->use_dmaengine) {
1653 		WRITE_ONCE(lp->stopping, true);
1654 		flush_work(&lp->dma_err_task);
1655 
1656 		napi_disable(&lp->napi_tx);
1657 		napi_disable(&lp->napi_rx);
1658 	}
1659 
1660 	cancel_delayed_work_sync(&lp->stats_work);
1661 
1662 	phylink_stop(lp->phylink);
1663 	phylink_disconnect_phy(lp->phylink);
1664 
1665 	axienet_setoptions(ndev, lp->options &
1666 			   ~(XAE_OPTION_TXEN | XAE_OPTION_RXEN));
1667 
1668 	if (!lp->use_dmaengine) {
1669 		axienet_dma_stop(lp);
1670 		cancel_work_sync(&lp->dma_err_task);
1671 		free_irq(lp->tx_irq, ndev);
1672 		free_irq(lp->rx_irq, ndev);
1673 		axienet_dma_bd_release(ndev);
1674 	} else {
1675 		dmaengine_terminate_sync(lp->tx_chan);
1676 		dmaengine_synchronize(lp->tx_chan);
1677 		dmaengine_terminate_sync(lp->rx_chan);
1678 		dmaengine_synchronize(lp->rx_chan);
1679 
1680 		for (i = 0; i < TX_BD_NUM_MAX; i++)
1681 			kfree(lp->tx_skb_ring[i]);
1682 		kfree(lp->tx_skb_ring);
1683 		for (i = 0; i < RX_BUF_NUM_DEFAULT; i++)
1684 			kfree(lp->rx_skb_ring[i]);
1685 		kfree(lp->rx_skb_ring);
1686 
1687 		dma_release_channel(lp->rx_chan);
1688 		dma_release_channel(lp->tx_chan);
1689 	}
1690 
1691 	axienet_iow(lp, XAE_IE_OFFSET, 0);
1692 
1693 	if (lp->eth_irq > 0)
1694 		free_irq(lp->eth_irq, ndev);
1695 	return 0;
1696 }
1697 
1698 /**
1699  * axienet_change_mtu - Driver change mtu routine.
1700  * @ndev:	Pointer to net_device structure
1701  * @new_mtu:	New mtu value to be applied
1702  *
1703  * Return: Always returns 0 (success).
1704  *
1705  * This is the change mtu driver routine. It checks if the Axi Ethernet
1706  * hardware supports jumbo frames before changing the mtu. This can be
1707  * called only when the device is not up.
1708  */
1709 static int axienet_change_mtu(struct net_device *ndev, int new_mtu)
1710 {
1711 	struct axienet_local *lp = netdev_priv(ndev);
1712 
1713 	if (netif_running(ndev))
1714 		return -EBUSY;
1715 
1716 	if ((new_mtu + VLAN_ETH_HLEN +
1717 		XAE_TRL_SIZE) > lp->rxmem)
1718 		return -EINVAL;
1719 
1720 	WRITE_ONCE(ndev->mtu, new_mtu);
1721 
1722 	return 0;
1723 }
1724 
1725 #ifdef CONFIG_NET_POLL_CONTROLLER
1726 /**
1727  * axienet_poll_controller - Axi Ethernet poll mechanism.
1728  * @ndev:	Pointer to net_device structure
1729  *
1730  * This implements Rx/Tx ISR poll mechanisms. The interrupts are disabled prior
1731  * to polling the ISRs and are enabled back after the polling is done.
1732  */
1733 static void axienet_poll_controller(struct net_device *ndev)
1734 {
1735 	struct axienet_local *lp = netdev_priv(ndev);
1736 
1737 	disable_irq(lp->tx_irq);
1738 	disable_irq(lp->rx_irq);
1739 	axienet_rx_irq(lp->tx_irq, ndev);
1740 	axienet_tx_irq(lp->rx_irq, ndev);
1741 	enable_irq(lp->tx_irq);
1742 	enable_irq(lp->rx_irq);
1743 }
1744 #endif
1745 
1746 static int axienet_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1747 {
1748 	struct axienet_local *lp = netdev_priv(dev);
1749 
1750 	if (!netif_running(dev))
1751 		return -EINVAL;
1752 
1753 	return phylink_mii_ioctl(lp->phylink, rq, cmd);
1754 }
1755 
1756 static void
1757 axienet_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
1758 {
1759 	struct axienet_local *lp = netdev_priv(dev);
1760 	unsigned int start;
1761 
1762 	netdev_stats_to_stats64(stats, &dev->stats);
1763 
1764 	do {
1765 		start = u64_stats_fetch_begin(&lp->rx_stat_sync);
1766 		stats->rx_packets = u64_stats_read(&lp->rx_packets);
1767 		stats->rx_bytes = u64_stats_read(&lp->rx_bytes);
1768 	} while (u64_stats_fetch_retry(&lp->rx_stat_sync, start));
1769 
1770 	do {
1771 		start = u64_stats_fetch_begin(&lp->tx_stat_sync);
1772 		stats->tx_packets = u64_stats_read(&lp->tx_packets);
1773 		stats->tx_bytes = u64_stats_read(&lp->tx_bytes);
1774 	} while (u64_stats_fetch_retry(&lp->tx_stat_sync, start));
1775 
1776 	if (!(lp->features & XAE_FEATURE_STATS))
1777 		return;
1778 
1779 	do {
1780 		start = read_seqcount_begin(&lp->hw_stats_seqcount);
1781 		stats->rx_length_errors =
1782 			axienet_stat(lp, STAT_RX_LENGTH_ERRORS);
1783 		stats->rx_crc_errors = axienet_stat(lp, STAT_RX_FCS_ERRORS);
1784 		stats->rx_frame_errors =
1785 			axienet_stat(lp, STAT_RX_ALIGNMENT_ERRORS);
1786 		stats->rx_errors = axienet_stat(lp, STAT_UNDERSIZE_FRAMES) +
1787 				   axienet_stat(lp, STAT_FRAGMENT_FRAMES) +
1788 				   stats->rx_length_errors +
1789 				   stats->rx_crc_errors +
1790 				   stats->rx_frame_errors;
1791 		stats->multicast = axienet_stat(lp, STAT_RX_MULTICAST_FRAMES);
1792 
1793 		stats->tx_aborted_errors =
1794 			axienet_stat(lp, STAT_TX_EXCESS_COLLISIONS);
1795 		stats->tx_fifo_errors =
1796 			axienet_stat(lp, STAT_TX_UNDERRUN_ERRORS);
1797 		stats->tx_window_errors =
1798 			axienet_stat(lp, STAT_TX_LATE_COLLISIONS);
1799 		stats->tx_errors = axienet_stat(lp, STAT_TX_EXCESS_DEFERRAL) +
1800 				   stats->tx_aborted_errors +
1801 				   stats->tx_fifo_errors +
1802 				   stats->tx_window_errors;
1803 	} while (read_seqcount_retry(&lp->hw_stats_seqcount, start));
1804 }
1805 
1806 static const struct net_device_ops axienet_netdev_ops = {
1807 	.ndo_open = axienet_open,
1808 	.ndo_stop = axienet_stop,
1809 	.ndo_start_xmit = axienet_start_xmit,
1810 	.ndo_get_stats64 = axienet_get_stats64,
1811 	.ndo_change_mtu	= axienet_change_mtu,
1812 	.ndo_set_mac_address = netdev_set_mac_address,
1813 	.ndo_validate_addr = eth_validate_addr,
1814 	.ndo_eth_ioctl = axienet_ioctl,
1815 	.ndo_set_rx_mode = axienet_set_multicast_list,
1816 #ifdef CONFIG_NET_POLL_CONTROLLER
1817 	.ndo_poll_controller = axienet_poll_controller,
1818 #endif
1819 };
1820 
1821 static const struct net_device_ops axienet_netdev_dmaengine_ops = {
1822 	.ndo_open = axienet_open,
1823 	.ndo_stop = axienet_stop,
1824 	.ndo_start_xmit = axienet_start_xmit_dmaengine,
1825 	.ndo_get_stats64 = axienet_get_stats64,
1826 	.ndo_change_mtu	= axienet_change_mtu,
1827 	.ndo_set_mac_address = netdev_set_mac_address,
1828 	.ndo_validate_addr = eth_validate_addr,
1829 	.ndo_eth_ioctl = axienet_ioctl,
1830 	.ndo_set_rx_mode = axienet_set_multicast_list,
1831 };
1832 
1833 /**
1834  * axienet_ethtools_get_drvinfo - Get various Axi Ethernet driver information.
1835  * @ndev:	Pointer to net_device structure
1836  * @ed:		Pointer to ethtool_drvinfo structure
1837  *
1838  * This implements ethtool command for getting the driver information.
1839  * Issue "ethtool -i ethX" under linux prompt to execute this function.
1840  */
1841 static void axienet_ethtools_get_drvinfo(struct net_device *ndev,
1842 					 struct ethtool_drvinfo *ed)
1843 {
1844 	strscpy(ed->driver, DRIVER_NAME, sizeof(ed->driver));
1845 	strscpy(ed->version, DRIVER_VERSION, sizeof(ed->version));
1846 }
1847 
1848 /**
1849  * axienet_ethtools_get_regs_len - Get the total regs length present in the
1850  *				   AxiEthernet core.
1851  * @ndev:	Pointer to net_device structure
1852  *
1853  * This implements ethtool command for getting the total register length
1854  * information.
1855  *
1856  * Return: the total regs length
1857  */
1858 static int axienet_ethtools_get_regs_len(struct net_device *ndev)
1859 {
1860 	return sizeof(u32) * AXIENET_REGS_N;
1861 }
1862 
1863 /**
1864  * axienet_ethtools_get_regs - Dump the contents of all registers present
1865  *			       in AxiEthernet core.
1866  * @ndev:	Pointer to net_device structure
1867  * @regs:	Pointer to ethtool_regs structure
1868  * @ret:	Void pointer used to return the contents of the registers.
1869  *
1870  * This implements ethtool command for getting the Axi Ethernet register dump.
1871  * Issue "ethtool -d ethX" to execute this function.
1872  */
1873 static void axienet_ethtools_get_regs(struct net_device *ndev,
1874 				      struct ethtool_regs *regs, void *ret)
1875 {
1876 	u32 *data = (u32 *)ret;
1877 	size_t len = sizeof(u32) * AXIENET_REGS_N;
1878 	struct axienet_local *lp = netdev_priv(ndev);
1879 
1880 	regs->version = 0;
1881 	regs->len = len;
1882 
1883 	memset(data, 0, len);
1884 	data[0] = axienet_ior(lp, XAE_RAF_OFFSET);
1885 	data[1] = axienet_ior(lp, XAE_TPF_OFFSET);
1886 	data[2] = axienet_ior(lp, XAE_IFGP_OFFSET);
1887 	data[3] = axienet_ior(lp, XAE_IS_OFFSET);
1888 	data[4] = axienet_ior(lp, XAE_IP_OFFSET);
1889 	data[5] = axienet_ior(lp, XAE_IE_OFFSET);
1890 	data[6] = axienet_ior(lp, XAE_TTAG_OFFSET);
1891 	data[7] = axienet_ior(lp, XAE_RTAG_OFFSET);
1892 	data[8] = axienet_ior(lp, XAE_UAWL_OFFSET);
1893 	data[9] = axienet_ior(lp, XAE_UAWU_OFFSET);
1894 	data[10] = axienet_ior(lp, XAE_TPID0_OFFSET);
1895 	data[11] = axienet_ior(lp, XAE_TPID1_OFFSET);
1896 	data[12] = axienet_ior(lp, XAE_PPST_OFFSET);
1897 	data[13] = axienet_ior(lp, XAE_RCW0_OFFSET);
1898 	data[14] = axienet_ior(lp, XAE_RCW1_OFFSET);
1899 	data[15] = axienet_ior(lp, XAE_TC_OFFSET);
1900 	data[16] = axienet_ior(lp, XAE_FCC_OFFSET);
1901 	data[17] = axienet_ior(lp, XAE_EMMC_OFFSET);
1902 	data[18] = axienet_ior(lp, XAE_PHYC_OFFSET);
1903 	data[19] = axienet_ior(lp, XAE_MDIO_MC_OFFSET);
1904 	data[20] = axienet_ior(lp, XAE_MDIO_MCR_OFFSET);
1905 	data[21] = axienet_ior(lp, XAE_MDIO_MWD_OFFSET);
1906 	data[22] = axienet_ior(lp, XAE_MDIO_MRD_OFFSET);
1907 	data[27] = axienet_ior(lp, XAE_UAW0_OFFSET);
1908 	data[28] = axienet_ior(lp, XAE_UAW1_OFFSET);
1909 	data[29] = axienet_ior(lp, XAE_FMI_OFFSET);
1910 	data[30] = axienet_ior(lp, XAE_AF0_OFFSET);
1911 	data[31] = axienet_ior(lp, XAE_AF1_OFFSET);
1912 	if (!lp->use_dmaengine) {
1913 		data[32] = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET);
1914 		data[33] = axienet_dma_in32(lp, XAXIDMA_TX_SR_OFFSET);
1915 		data[34] = axienet_dma_in32(lp, XAXIDMA_TX_CDESC_OFFSET);
1916 		data[35] = axienet_dma_in32(lp, XAXIDMA_TX_TDESC_OFFSET);
1917 		data[36] = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET);
1918 		data[37] = axienet_dma_in32(lp, XAXIDMA_RX_SR_OFFSET);
1919 		data[38] = axienet_dma_in32(lp, XAXIDMA_RX_CDESC_OFFSET);
1920 		data[39] = axienet_dma_in32(lp, XAXIDMA_RX_TDESC_OFFSET);
1921 	}
1922 }
1923 
1924 static void
1925 axienet_ethtools_get_ringparam(struct net_device *ndev,
1926 			       struct ethtool_ringparam *ering,
1927 			       struct kernel_ethtool_ringparam *kernel_ering,
1928 			       struct netlink_ext_ack *extack)
1929 {
1930 	struct axienet_local *lp = netdev_priv(ndev);
1931 
1932 	ering->rx_max_pending = RX_BD_NUM_MAX;
1933 	ering->rx_mini_max_pending = 0;
1934 	ering->rx_jumbo_max_pending = 0;
1935 	ering->tx_max_pending = TX_BD_NUM_MAX;
1936 	ering->rx_pending = lp->rx_bd_num;
1937 	ering->rx_mini_pending = 0;
1938 	ering->rx_jumbo_pending = 0;
1939 	ering->tx_pending = lp->tx_bd_num;
1940 }
1941 
1942 static int
1943 axienet_ethtools_set_ringparam(struct net_device *ndev,
1944 			       struct ethtool_ringparam *ering,
1945 			       struct kernel_ethtool_ringparam *kernel_ering,
1946 			       struct netlink_ext_ack *extack)
1947 {
1948 	struct axienet_local *lp = netdev_priv(ndev);
1949 
1950 	if (ering->rx_pending > RX_BD_NUM_MAX ||
1951 	    ering->rx_mini_pending ||
1952 	    ering->rx_jumbo_pending ||
1953 	    ering->tx_pending < TX_BD_NUM_MIN ||
1954 	    ering->tx_pending > TX_BD_NUM_MAX)
1955 		return -EINVAL;
1956 
1957 	if (netif_running(ndev))
1958 		return -EBUSY;
1959 
1960 	lp->rx_bd_num = ering->rx_pending;
1961 	lp->tx_bd_num = ering->tx_pending;
1962 	return 0;
1963 }
1964 
1965 /**
1966  * axienet_ethtools_get_pauseparam - Get the pause parameter setting for
1967  *				     Tx and Rx paths.
1968  * @ndev:	Pointer to net_device structure
1969  * @epauseparm:	Pointer to ethtool_pauseparam structure.
1970  *
1971  * This implements ethtool command for getting axi ethernet pause frame
1972  * setting. Issue "ethtool -a ethX" to execute this function.
1973  */
1974 static void
1975 axienet_ethtools_get_pauseparam(struct net_device *ndev,
1976 				struct ethtool_pauseparam *epauseparm)
1977 {
1978 	struct axienet_local *lp = netdev_priv(ndev);
1979 
1980 	phylink_ethtool_get_pauseparam(lp->phylink, epauseparm);
1981 }
1982 
1983 /**
1984  * axienet_ethtools_set_pauseparam - Set device pause parameter(flow control)
1985  *				     settings.
1986  * @ndev:	Pointer to net_device structure
1987  * @epauseparm:Pointer to ethtool_pauseparam structure
1988  *
1989  * This implements ethtool command for enabling flow control on Rx and Tx
1990  * paths. Issue "ethtool -A ethX tx on|off" under linux prompt to execute this
1991  * function.
1992  *
1993  * Return: 0 on success, -EFAULT if device is running
1994  */
1995 static int
1996 axienet_ethtools_set_pauseparam(struct net_device *ndev,
1997 				struct ethtool_pauseparam *epauseparm)
1998 {
1999 	struct axienet_local *lp = netdev_priv(ndev);
2000 
2001 	return phylink_ethtool_set_pauseparam(lp->phylink, epauseparm);
2002 }
2003 
2004 /**
2005  * axienet_ethtools_get_coalesce - Get DMA interrupt coalescing count.
2006  * @ndev:	Pointer to net_device structure
2007  * @ecoalesce:	Pointer to ethtool_coalesce structure
2008  * @kernel_coal: ethtool CQE mode setting structure
2009  * @extack:	extack for reporting error messages
2010  *
2011  * This implements ethtool command for getting the DMA interrupt coalescing
2012  * count on Tx and Rx paths. Issue "ethtool -c ethX" under linux prompt to
2013  * execute this function.
2014  *
2015  * Return: 0 always
2016  */
2017 static int
2018 axienet_ethtools_get_coalesce(struct net_device *ndev,
2019 			      struct ethtool_coalesce *ecoalesce,
2020 			      struct kernel_ethtool_coalesce *kernel_coal,
2021 			      struct netlink_ext_ack *extack)
2022 {
2023 	struct axienet_local *lp = netdev_priv(ndev);
2024 
2025 	ecoalesce->rx_max_coalesced_frames = lp->coalesce_count_rx;
2026 	ecoalesce->rx_coalesce_usecs = lp->coalesce_usec_rx;
2027 	ecoalesce->tx_max_coalesced_frames = lp->coalesce_count_tx;
2028 	ecoalesce->tx_coalesce_usecs = lp->coalesce_usec_tx;
2029 	return 0;
2030 }
2031 
2032 /**
2033  * axienet_ethtools_set_coalesce - Set DMA interrupt coalescing count.
2034  * @ndev:	Pointer to net_device structure
2035  * @ecoalesce:	Pointer to ethtool_coalesce structure
2036  * @kernel_coal: ethtool CQE mode setting structure
2037  * @extack:	extack for reporting error messages
2038  *
2039  * This implements ethtool command for setting the DMA interrupt coalescing
2040  * count on Tx and Rx paths. Issue "ethtool -C ethX rx-frames 5" under linux
2041  * prompt to execute this function.
2042  *
2043  * Return: 0, on success, Non-zero error value on failure.
2044  */
2045 static int
2046 axienet_ethtools_set_coalesce(struct net_device *ndev,
2047 			      struct ethtool_coalesce *ecoalesce,
2048 			      struct kernel_ethtool_coalesce *kernel_coal,
2049 			      struct netlink_ext_ack *extack)
2050 {
2051 	struct axienet_local *lp = netdev_priv(ndev);
2052 
2053 	if (netif_running(ndev)) {
2054 		NL_SET_ERR_MSG(extack,
2055 			       "Please stop netif before applying configuration");
2056 		return -EBUSY;
2057 	}
2058 
2059 	if (ecoalesce->rx_max_coalesced_frames > 255 ||
2060 	    ecoalesce->tx_max_coalesced_frames > 255) {
2061 		NL_SET_ERR_MSG(extack, "frames must be less than 256");
2062 		return -EINVAL;
2063 	}
2064 
2065 	if (ecoalesce->rx_max_coalesced_frames)
2066 		lp->coalesce_count_rx = ecoalesce->rx_max_coalesced_frames;
2067 	if (ecoalesce->rx_coalesce_usecs)
2068 		lp->coalesce_usec_rx = ecoalesce->rx_coalesce_usecs;
2069 	if (ecoalesce->tx_max_coalesced_frames)
2070 		lp->coalesce_count_tx = ecoalesce->tx_max_coalesced_frames;
2071 	if (ecoalesce->tx_coalesce_usecs)
2072 		lp->coalesce_usec_tx = ecoalesce->tx_coalesce_usecs;
2073 
2074 	return 0;
2075 }
2076 
2077 static int
2078 axienet_ethtools_get_link_ksettings(struct net_device *ndev,
2079 				    struct ethtool_link_ksettings *cmd)
2080 {
2081 	struct axienet_local *lp = netdev_priv(ndev);
2082 
2083 	return phylink_ethtool_ksettings_get(lp->phylink, cmd);
2084 }
2085 
2086 static int
2087 axienet_ethtools_set_link_ksettings(struct net_device *ndev,
2088 				    const struct ethtool_link_ksettings *cmd)
2089 {
2090 	struct axienet_local *lp = netdev_priv(ndev);
2091 
2092 	return phylink_ethtool_ksettings_set(lp->phylink, cmd);
2093 }
2094 
2095 static int axienet_ethtools_nway_reset(struct net_device *dev)
2096 {
2097 	struct axienet_local *lp = netdev_priv(dev);
2098 
2099 	return phylink_ethtool_nway_reset(lp->phylink);
2100 }
2101 
2102 static void axienet_ethtools_get_ethtool_stats(struct net_device *dev,
2103 					       struct ethtool_stats *stats,
2104 					       u64 *data)
2105 {
2106 	struct axienet_local *lp = netdev_priv(dev);
2107 	unsigned int start;
2108 
2109 	do {
2110 		start = read_seqcount_begin(&lp->hw_stats_seqcount);
2111 		data[0] = axienet_stat(lp, STAT_RX_BYTES);
2112 		data[1] = axienet_stat(lp, STAT_TX_BYTES);
2113 		data[2] = axienet_stat(lp, STAT_RX_VLAN_FRAMES);
2114 		data[3] = axienet_stat(lp, STAT_TX_VLAN_FRAMES);
2115 		data[6] = axienet_stat(lp, STAT_TX_PFC_FRAMES);
2116 		data[7] = axienet_stat(lp, STAT_RX_PFC_FRAMES);
2117 		data[8] = axienet_stat(lp, STAT_USER_DEFINED0);
2118 		data[9] = axienet_stat(lp, STAT_USER_DEFINED1);
2119 		data[10] = axienet_stat(lp, STAT_USER_DEFINED2);
2120 	} while (read_seqcount_retry(&lp->hw_stats_seqcount, start));
2121 }
2122 
2123 static const char axienet_ethtool_stats_strings[][ETH_GSTRING_LEN] = {
2124 	"Received bytes",
2125 	"Transmitted bytes",
2126 	"RX Good VLAN Tagged Frames",
2127 	"TX Good VLAN Tagged Frames",
2128 	"TX Good PFC Frames",
2129 	"RX Good PFC Frames",
2130 	"User Defined Counter 0",
2131 	"User Defined Counter 1",
2132 	"User Defined Counter 2",
2133 };
2134 
2135 static void axienet_ethtools_get_strings(struct net_device *dev, u32 stringset, u8 *data)
2136 {
2137 	switch (stringset) {
2138 	case ETH_SS_STATS:
2139 		memcpy(data, axienet_ethtool_stats_strings,
2140 		       sizeof(axienet_ethtool_stats_strings));
2141 		break;
2142 	}
2143 }
2144 
2145 static int axienet_ethtools_get_sset_count(struct net_device *dev, int sset)
2146 {
2147 	struct axienet_local *lp = netdev_priv(dev);
2148 
2149 	switch (sset) {
2150 	case ETH_SS_STATS:
2151 		if (lp->features & XAE_FEATURE_STATS)
2152 			return ARRAY_SIZE(axienet_ethtool_stats_strings);
2153 		fallthrough;
2154 	default:
2155 		return -EOPNOTSUPP;
2156 	}
2157 }
2158 
2159 static void
2160 axienet_ethtools_get_pause_stats(struct net_device *dev,
2161 				 struct ethtool_pause_stats *pause_stats)
2162 {
2163 	struct axienet_local *lp = netdev_priv(dev);
2164 	unsigned int start;
2165 
2166 	if (!(lp->features & XAE_FEATURE_STATS))
2167 		return;
2168 
2169 	do {
2170 		start = read_seqcount_begin(&lp->hw_stats_seqcount);
2171 		pause_stats->tx_pause_frames =
2172 			axienet_stat(lp, STAT_TX_PAUSE_FRAMES);
2173 		pause_stats->rx_pause_frames =
2174 			axienet_stat(lp, STAT_RX_PAUSE_FRAMES);
2175 	} while (read_seqcount_retry(&lp->hw_stats_seqcount, start));
2176 }
2177 
2178 static void
2179 axienet_ethtool_get_eth_mac_stats(struct net_device *dev,
2180 				  struct ethtool_eth_mac_stats *mac_stats)
2181 {
2182 	struct axienet_local *lp = netdev_priv(dev);
2183 	unsigned int start;
2184 
2185 	if (!(lp->features & XAE_FEATURE_STATS))
2186 		return;
2187 
2188 	do {
2189 		start = read_seqcount_begin(&lp->hw_stats_seqcount);
2190 		mac_stats->FramesTransmittedOK =
2191 			axienet_stat(lp, STAT_TX_GOOD_FRAMES);
2192 		mac_stats->SingleCollisionFrames =
2193 			axienet_stat(lp, STAT_TX_SINGLE_COLLISION_FRAMES);
2194 		mac_stats->MultipleCollisionFrames =
2195 			axienet_stat(lp, STAT_TX_MULTIPLE_COLLISION_FRAMES);
2196 		mac_stats->FramesReceivedOK =
2197 			axienet_stat(lp, STAT_RX_GOOD_FRAMES);
2198 		mac_stats->FrameCheckSequenceErrors =
2199 			axienet_stat(lp, STAT_RX_FCS_ERRORS);
2200 		mac_stats->AlignmentErrors =
2201 			axienet_stat(lp, STAT_RX_ALIGNMENT_ERRORS);
2202 		mac_stats->FramesWithDeferredXmissions =
2203 			axienet_stat(lp, STAT_TX_DEFERRED_FRAMES);
2204 		mac_stats->LateCollisions =
2205 			axienet_stat(lp, STAT_TX_LATE_COLLISIONS);
2206 		mac_stats->FramesAbortedDueToXSColls =
2207 			axienet_stat(lp, STAT_TX_EXCESS_COLLISIONS);
2208 		mac_stats->MulticastFramesXmittedOK =
2209 			axienet_stat(lp, STAT_TX_MULTICAST_FRAMES);
2210 		mac_stats->BroadcastFramesXmittedOK =
2211 			axienet_stat(lp, STAT_TX_BROADCAST_FRAMES);
2212 		mac_stats->FramesWithExcessiveDeferral =
2213 			axienet_stat(lp, STAT_TX_EXCESS_DEFERRAL);
2214 		mac_stats->MulticastFramesReceivedOK =
2215 			axienet_stat(lp, STAT_RX_MULTICAST_FRAMES);
2216 		mac_stats->BroadcastFramesReceivedOK =
2217 			axienet_stat(lp, STAT_RX_BROADCAST_FRAMES);
2218 		mac_stats->InRangeLengthErrors =
2219 			axienet_stat(lp, STAT_RX_LENGTH_ERRORS);
2220 	} while (read_seqcount_retry(&lp->hw_stats_seqcount, start));
2221 }
2222 
2223 static void
2224 axienet_ethtool_get_eth_ctrl_stats(struct net_device *dev,
2225 				   struct ethtool_eth_ctrl_stats *ctrl_stats)
2226 {
2227 	struct axienet_local *lp = netdev_priv(dev);
2228 	unsigned int start;
2229 
2230 	if (!(lp->features & XAE_FEATURE_STATS))
2231 		return;
2232 
2233 	do {
2234 		start = read_seqcount_begin(&lp->hw_stats_seqcount);
2235 		ctrl_stats->MACControlFramesTransmitted =
2236 			axienet_stat(lp, STAT_TX_CONTROL_FRAMES);
2237 		ctrl_stats->MACControlFramesReceived =
2238 			axienet_stat(lp, STAT_RX_CONTROL_FRAMES);
2239 		ctrl_stats->UnsupportedOpcodesReceived =
2240 			axienet_stat(lp, STAT_RX_CONTROL_OPCODE_ERRORS);
2241 	} while (read_seqcount_retry(&lp->hw_stats_seqcount, start));
2242 }
2243 
2244 static const struct ethtool_rmon_hist_range axienet_rmon_ranges[] = {
2245 	{   64,    64 },
2246 	{   65,   127 },
2247 	{  128,   255 },
2248 	{  256,   511 },
2249 	{  512,  1023 },
2250 	{ 1024,  1518 },
2251 	{ 1519, 16384 },
2252 	{ },
2253 };
2254 
2255 static void
2256 axienet_ethtool_get_rmon_stats(struct net_device *dev,
2257 			       struct ethtool_rmon_stats *rmon_stats,
2258 			       const struct ethtool_rmon_hist_range **ranges)
2259 {
2260 	struct axienet_local *lp = netdev_priv(dev);
2261 	unsigned int start;
2262 
2263 	if (!(lp->features & XAE_FEATURE_STATS))
2264 		return;
2265 
2266 	do {
2267 		start = read_seqcount_begin(&lp->hw_stats_seqcount);
2268 		rmon_stats->undersize_pkts =
2269 			axienet_stat(lp, STAT_UNDERSIZE_FRAMES);
2270 		rmon_stats->oversize_pkts =
2271 			axienet_stat(lp, STAT_RX_OVERSIZE_FRAMES);
2272 		rmon_stats->fragments =
2273 			axienet_stat(lp, STAT_FRAGMENT_FRAMES);
2274 
2275 		rmon_stats->hist[0] =
2276 			axienet_stat(lp, STAT_RX_64_BYTE_FRAMES);
2277 		rmon_stats->hist[1] =
2278 			axienet_stat(lp, STAT_RX_65_127_BYTE_FRAMES);
2279 		rmon_stats->hist[2] =
2280 			axienet_stat(lp, STAT_RX_128_255_BYTE_FRAMES);
2281 		rmon_stats->hist[3] =
2282 			axienet_stat(lp, STAT_RX_256_511_BYTE_FRAMES);
2283 		rmon_stats->hist[4] =
2284 			axienet_stat(lp, STAT_RX_512_1023_BYTE_FRAMES);
2285 		rmon_stats->hist[5] =
2286 			axienet_stat(lp, STAT_RX_1024_MAX_BYTE_FRAMES);
2287 		rmon_stats->hist[6] =
2288 			rmon_stats->oversize_pkts;
2289 
2290 		rmon_stats->hist_tx[0] =
2291 			axienet_stat(lp, STAT_TX_64_BYTE_FRAMES);
2292 		rmon_stats->hist_tx[1] =
2293 			axienet_stat(lp, STAT_TX_65_127_BYTE_FRAMES);
2294 		rmon_stats->hist_tx[2] =
2295 			axienet_stat(lp, STAT_TX_128_255_BYTE_FRAMES);
2296 		rmon_stats->hist_tx[3] =
2297 			axienet_stat(lp, STAT_TX_256_511_BYTE_FRAMES);
2298 		rmon_stats->hist_tx[4] =
2299 			axienet_stat(lp, STAT_TX_512_1023_BYTE_FRAMES);
2300 		rmon_stats->hist_tx[5] =
2301 			axienet_stat(lp, STAT_TX_1024_MAX_BYTE_FRAMES);
2302 		rmon_stats->hist_tx[6] =
2303 			axienet_stat(lp, STAT_TX_OVERSIZE_FRAMES);
2304 	} while (read_seqcount_retry(&lp->hw_stats_seqcount, start));
2305 
2306 	*ranges = axienet_rmon_ranges;
2307 }
2308 
2309 static const struct ethtool_ops axienet_ethtool_ops = {
2310 	.supported_coalesce_params = ETHTOOL_COALESCE_MAX_FRAMES |
2311 				     ETHTOOL_COALESCE_USECS,
2312 	.get_drvinfo    = axienet_ethtools_get_drvinfo,
2313 	.get_regs_len   = axienet_ethtools_get_regs_len,
2314 	.get_regs       = axienet_ethtools_get_regs,
2315 	.get_link       = ethtool_op_get_link,
2316 	.get_ringparam	= axienet_ethtools_get_ringparam,
2317 	.set_ringparam	= axienet_ethtools_set_ringparam,
2318 	.get_pauseparam = axienet_ethtools_get_pauseparam,
2319 	.set_pauseparam = axienet_ethtools_set_pauseparam,
2320 	.get_coalesce   = axienet_ethtools_get_coalesce,
2321 	.set_coalesce   = axienet_ethtools_set_coalesce,
2322 	.get_link_ksettings = axienet_ethtools_get_link_ksettings,
2323 	.set_link_ksettings = axienet_ethtools_set_link_ksettings,
2324 	.nway_reset	= axienet_ethtools_nway_reset,
2325 	.get_ethtool_stats = axienet_ethtools_get_ethtool_stats,
2326 	.get_strings    = axienet_ethtools_get_strings,
2327 	.get_sset_count = axienet_ethtools_get_sset_count,
2328 	.get_pause_stats = axienet_ethtools_get_pause_stats,
2329 	.get_eth_mac_stats = axienet_ethtool_get_eth_mac_stats,
2330 	.get_eth_ctrl_stats = axienet_ethtool_get_eth_ctrl_stats,
2331 	.get_rmon_stats = axienet_ethtool_get_rmon_stats,
2332 };
2333 
2334 static struct axienet_local *pcs_to_axienet_local(struct phylink_pcs *pcs)
2335 {
2336 	return container_of(pcs, struct axienet_local, pcs);
2337 }
2338 
2339 static void axienet_pcs_get_state(struct phylink_pcs *pcs,
2340 				  struct phylink_link_state *state)
2341 {
2342 	struct mdio_device *pcs_phy = pcs_to_axienet_local(pcs)->pcs_phy;
2343 
2344 	phylink_mii_c22_pcs_get_state(pcs_phy, state);
2345 }
2346 
2347 static void axienet_pcs_an_restart(struct phylink_pcs *pcs)
2348 {
2349 	struct mdio_device *pcs_phy = pcs_to_axienet_local(pcs)->pcs_phy;
2350 
2351 	phylink_mii_c22_pcs_an_restart(pcs_phy);
2352 }
2353 
2354 static int axienet_pcs_config(struct phylink_pcs *pcs, unsigned int neg_mode,
2355 			      phy_interface_t interface,
2356 			      const unsigned long *advertising,
2357 			      bool permit_pause_to_mac)
2358 {
2359 	struct mdio_device *pcs_phy = pcs_to_axienet_local(pcs)->pcs_phy;
2360 	struct net_device *ndev = pcs_to_axienet_local(pcs)->ndev;
2361 	struct axienet_local *lp = netdev_priv(ndev);
2362 	int ret;
2363 
2364 	if (lp->switch_x_sgmii) {
2365 		ret = mdiodev_write(pcs_phy, XLNX_MII_STD_SELECT_REG,
2366 				    interface == PHY_INTERFACE_MODE_SGMII ?
2367 					XLNX_MII_STD_SELECT_SGMII : 0);
2368 		if (ret < 0) {
2369 			netdev_warn(ndev,
2370 				    "Failed to switch PHY interface: %d\n",
2371 				    ret);
2372 			return ret;
2373 		}
2374 	}
2375 
2376 	ret = phylink_mii_c22_pcs_config(pcs_phy, interface, advertising,
2377 					 neg_mode);
2378 	if (ret < 0)
2379 		netdev_warn(ndev, "Failed to configure PCS: %d\n", ret);
2380 
2381 	return ret;
2382 }
2383 
2384 static const struct phylink_pcs_ops axienet_pcs_ops = {
2385 	.pcs_get_state = axienet_pcs_get_state,
2386 	.pcs_config = axienet_pcs_config,
2387 	.pcs_an_restart = axienet_pcs_an_restart,
2388 };
2389 
2390 static struct phylink_pcs *axienet_mac_select_pcs(struct phylink_config *config,
2391 						  phy_interface_t interface)
2392 {
2393 	struct net_device *ndev = to_net_dev(config->dev);
2394 	struct axienet_local *lp = netdev_priv(ndev);
2395 
2396 	if (interface == PHY_INTERFACE_MODE_1000BASEX ||
2397 	    interface ==  PHY_INTERFACE_MODE_SGMII)
2398 		return &lp->pcs;
2399 
2400 	return NULL;
2401 }
2402 
2403 static void axienet_mac_config(struct phylink_config *config, unsigned int mode,
2404 			       const struct phylink_link_state *state)
2405 {
2406 	/* nothing meaningful to do */
2407 }
2408 
2409 static void axienet_mac_link_down(struct phylink_config *config,
2410 				  unsigned int mode,
2411 				  phy_interface_t interface)
2412 {
2413 	/* nothing meaningful to do */
2414 }
2415 
2416 static void axienet_mac_link_up(struct phylink_config *config,
2417 				struct phy_device *phy,
2418 				unsigned int mode, phy_interface_t interface,
2419 				int speed, int duplex,
2420 				bool tx_pause, bool rx_pause)
2421 {
2422 	struct net_device *ndev = to_net_dev(config->dev);
2423 	struct axienet_local *lp = netdev_priv(ndev);
2424 	u32 emmc_reg, fcc_reg;
2425 
2426 	emmc_reg = axienet_ior(lp, XAE_EMMC_OFFSET);
2427 	emmc_reg &= ~XAE_EMMC_LINKSPEED_MASK;
2428 
2429 	switch (speed) {
2430 	case SPEED_1000:
2431 		emmc_reg |= XAE_EMMC_LINKSPD_1000;
2432 		break;
2433 	case SPEED_100:
2434 		emmc_reg |= XAE_EMMC_LINKSPD_100;
2435 		break;
2436 	case SPEED_10:
2437 		emmc_reg |= XAE_EMMC_LINKSPD_10;
2438 		break;
2439 	default:
2440 		dev_err(&ndev->dev,
2441 			"Speed other than 10, 100 or 1Gbps is not supported\n");
2442 		break;
2443 	}
2444 
2445 	axienet_iow(lp, XAE_EMMC_OFFSET, emmc_reg);
2446 
2447 	fcc_reg = axienet_ior(lp, XAE_FCC_OFFSET);
2448 	if (tx_pause)
2449 		fcc_reg |= XAE_FCC_FCTX_MASK;
2450 	else
2451 		fcc_reg &= ~XAE_FCC_FCTX_MASK;
2452 	if (rx_pause)
2453 		fcc_reg |= XAE_FCC_FCRX_MASK;
2454 	else
2455 		fcc_reg &= ~XAE_FCC_FCRX_MASK;
2456 	axienet_iow(lp, XAE_FCC_OFFSET, fcc_reg);
2457 }
2458 
2459 static const struct phylink_mac_ops axienet_phylink_ops = {
2460 	.mac_select_pcs = axienet_mac_select_pcs,
2461 	.mac_config = axienet_mac_config,
2462 	.mac_link_down = axienet_mac_link_down,
2463 	.mac_link_up = axienet_mac_link_up,
2464 };
2465 
2466 /**
2467  * axienet_dma_err_handler - Work queue task for Axi DMA Error
2468  * @work:	pointer to work_struct
2469  *
2470  * Resets the Axi DMA and Axi Ethernet devices, and reconfigures the
2471  * Tx/Rx BDs.
2472  */
2473 static void axienet_dma_err_handler(struct work_struct *work)
2474 {
2475 	u32 i;
2476 	u32 axienet_status;
2477 	struct axidma_bd *cur_p;
2478 	struct axienet_local *lp = container_of(work, struct axienet_local,
2479 						dma_err_task);
2480 	struct net_device *ndev = lp->ndev;
2481 
2482 	/* Don't bother if we are going to stop anyway */
2483 	if (READ_ONCE(lp->stopping))
2484 		return;
2485 
2486 	napi_disable(&lp->napi_tx);
2487 	napi_disable(&lp->napi_rx);
2488 
2489 	axienet_setoptions(ndev, lp->options &
2490 			   ~(XAE_OPTION_TXEN | XAE_OPTION_RXEN));
2491 
2492 	axienet_dma_stop(lp);
2493 
2494 	for (i = 0; i < lp->tx_bd_num; i++) {
2495 		cur_p = &lp->tx_bd_v[i];
2496 		if (cur_p->cntrl) {
2497 			dma_addr_t addr = desc_get_phys_addr(lp, cur_p);
2498 
2499 			dma_unmap_single(lp->dev, addr,
2500 					 (cur_p->cntrl &
2501 					  XAXIDMA_BD_CTRL_LENGTH_MASK),
2502 					 DMA_TO_DEVICE);
2503 		}
2504 		if (cur_p->skb)
2505 			dev_kfree_skb_irq(cur_p->skb);
2506 		cur_p->phys = 0;
2507 		cur_p->phys_msb = 0;
2508 		cur_p->cntrl = 0;
2509 		cur_p->status = 0;
2510 		cur_p->app0 = 0;
2511 		cur_p->app1 = 0;
2512 		cur_p->app2 = 0;
2513 		cur_p->app3 = 0;
2514 		cur_p->app4 = 0;
2515 		cur_p->skb = NULL;
2516 	}
2517 
2518 	for (i = 0; i < lp->rx_bd_num; i++) {
2519 		cur_p = &lp->rx_bd_v[i];
2520 		cur_p->status = 0;
2521 		cur_p->app0 = 0;
2522 		cur_p->app1 = 0;
2523 		cur_p->app2 = 0;
2524 		cur_p->app3 = 0;
2525 		cur_p->app4 = 0;
2526 	}
2527 
2528 	lp->tx_bd_ci = 0;
2529 	lp->tx_bd_tail = 0;
2530 	lp->rx_bd_ci = 0;
2531 
2532 	axienet_dma_start(lp);
2533 
2534 	axienet_status = axienet_ior(lp, XAE_RCW1_OFFSET);
2535 	axienet_status &= ~XAE_RCW1_RX_MASK;
2536 	axienet_iow(lp, XAE_RCW1_OFFSET, axienet_status);
2537 
2538 	axienet_status = axienet_ior(lp, XAE_IP_OFFSET);
2539 	if (axienet_status & XAE_INT_RXRJECT_MASK)
2540 		axienet_iow(lp, XAE_IS_OFFSET, XAE_INT_RXRJECT_MASK);
2541 	axienet_iow(lp, XAE_IE_OFFSET, lp->eth_irq > 0 ?
2542 		    XAE_INT_RECV_ERROR_MASK : 0);
2543 	axienet_iow(lp, XAE_FCC_OFFSET, XAE_FCC_FCRX_MASK);
2544 
2545 	/* Sync default options with HW but leave receiver and
2546 	 * transmitter disabled.
2547 	 */
2548 	axienet_setoptions(ndev, lp->options &
2549 			   ~(XAE_OPTION_TXEN | XAE_OPTION_RXEN));
2550 	axienet_set_mac_address(ndev, NULL);
2551 	axienet_set_multicast_list(ndev);
2552 	napi_enable(&lp->napi_rx);
2553 	napi_enable(&lp->napi_tx);
2554 	axienet_setoptions(ndev, lp->options);
2555 }
2556 
2557 /**
2558  * axienet_probe - Axi Ethernet probe function.
2559  * @pdev:	Pointer to platform device structure.
2560  *
2561  * Return: 0, on success
2562  *	    Non-zero error value on failure.
2563  *
2564  * This is the probe routine for Axi Ethernet driver. This is called before
2565  * any other driver routines are invoked. It allocates and sets up the Ethernet
2566  * device. Parses through device tree and populates fields of
2567  * axienet_local. It registers the Ethernet device.
2568  */
2569 static int axienet_probe(struct platform_device *pdev)
2570 {
2571 	int ret;
2572 	struct device_node *np;
2573 	struct axienet_local *lp;
2574 	struct net_device *ndev;
2575 	struct resource *ethres;
2576 	u8 mac_addr[ETH_ALEN];
2577 	int addr_width = 32;
2578 	u32 value;
2579 
2580 	ndev = alloc_etherdev(sizeof(*lp));
2581 	if (!ndev)
2582 		return -ENOMEM;
2583 
2584 	platform_set_drvdata(pdev, ndev);
2585 
2586 	SET_NETDEV_DEV(ndev, &pdev->dev);
2587 	ndev->features = NETIF_F_SG;
2588 	ndev->ethtool_ops = &axienet_ethtool_ops;
2589 
2590 	/* MTU range: 64 - 9000 */
2591 	ndev->min_mtu = 64;
2592 	ndev->max_mtu = XAE_JUMBO_MTU;
2593 
2594 	lp = netdev_priv(ndev);
2595 	lp->ndev = ndev;
2596 	lp->dev = &pdev->dev;
2597 	lp->options = XAE_OPTION_DEFAULTS;
2598 	lp->rx_bd_num = RX_BD_NUM_DEFAULT;
2599 	lp->tx_bd_num = TX_BD_NUM_DEFAULT;
2600 
2601 	u64_stats_init(&lp->rx_stat_sync);
2602 	u64_stats_init(&lp->tx_stat_sync);
2603 
2604 	mutex_init(&lp->stats_lock);
2605 	seqcount_mutex_init(&lp->hw_stats_seqcount, &lp->stats_lock);
2606 	INIT_DEFERRABLE_WORK(&lp->stats_work, axienet_refresh_stats);
2607 
2608 	lp->axi_clk = devm_clk_get_optional(&pdev->dev, "s_axi_lite_clk");
2609 	if (!lp->axi_clk) {
2610 		/* For backward compatibility, if named AXI clock is not present,
2611 		 * treat the first clock specified as the AXI clock.
2612 		 */
2613 		lp->axi_clk = devm_clk_get_optional(&pdev->dev, NULL);
2614 	}
2615 	if (IS_ERR(lp->axi_clk)) {
2616 		ret = PTR_ERR(lp->axi_clk);
2617 		goto free_netdev;
2618 	}
2619 	ret = clk_prepare_enable(lp->axi_clk);
2620 	if (ret) {
2621 		dev_err(&pdev->dev, "Unable to enable AXI clock: %d\n", ret);
2622 		goto free_netdev;
2623 	}
2624 
2625 	lp->misc_clks[0].id = "axis_clk";
2626 	lp->misc_clks[1].id = "ref_clk";
2627 	lp->misc_clks[2].id = "mgt_clk";
2628 
2629 	ret = devm_clk_bulk_get_optional(&pdev->dev, XAE_NUM_MISC_CLOCKS, lp->misc_clks);
2630 	if (ret)
2631 		goto cleanup_clk;
2632 
2633 	ret = clk_bulk_prepare_enable(XAE_NUM_MISC_CLOCKS, lp->misc_clks);
2634 	if (ret)
2635 		goto cleanup_clk;
2636 
2637 	/* Map device registers */
2638 	lp->regs = devm_platform_get_and_ioremap_resource(pdev, 0, &ethres);
2639 	if (IS_ERR(lp->regs)) {
2640 		ret = PTR_ERR(lp->regs);
2641 		goto cleanup_clk;
2642 	}
2643 	lp->regs_start = ethres->start;
2644 
2645 	/* Setup checksum offload, but default to off if not specified */
2646 	lp->features = 0;
2647 
2648 	if (axienet_ior(lp, XAE_ABILITY_OFFSET) & XAE_ABILITY_STATS)
2649 		lp->features |= XAE_FEATURE_STATS;
2650 
2651 	ret = of_property_read_u32(pdev->dev.of_node, "xlnx,txcsum", &value);
2652 	if (!ret) {
2653 		switch (value) {
2654 		case 1:
2655 			lp->features |= XAE_FEATURE_PARTIAL_TX_CSUM;
2656 			/* Can checksum any contiguous range */
2657 			ndev->features |= NETIF_F_HW_CSUM;
2658 			break;
2659 		case 2:
2660 			lp->features |= XAE_FEATURE_FULL_TX_CSUM;
2661 			/* Can checksum TCP/UDP over IPv4. */
2662 			ndev->features |= NETIF_F_IP_CSUM;
2663 			break;
2664 		}
2665 	}
2666 	ret = of_property_read_u32(pdev->dev.of_node, "xlnx,rxcsum", &value);
2667 	if (!ret) {
2668 		switch (value) {
2669 		case 1:
2670 			lp->features |= XAE_FEATURE_PARTIAL_RX_CSUM;
2671 			ndev->features |= NETIF_F_RXCSUM;
2672 			break;
2673 		case 2:
2674 			lp->features |= XAE_FEATURE_FULL_RX_CSUM;
2675 			ndev->features |= NETIF_F_RXCSUM;
2676 			break;
2677 		}
2678 	}
2679 	/* For supporting jumbo frames, the Axi Ethernet hardware must have
2680 	 * a larger Rx/Tx Memory. Typically, the size must be large so that
2681 	 * we can enable jumbo option and start supporting jumbo frames.
2682 	 * Here we check for memory allocated for Rx/Tx in the hardware from
2683 	 * the device-tree and accordingly set flags.
2684 	 */
2685 	of_property_read_u32(pdev->dev.of_node, "xlnx,rxmem", &lp->rxmem);
2686 
2687 	lp->switch_x_sgmii = of_property_read_bool(pdev->dev.of_node,
2688 						   "xlnx,switch-x-sgmii");
2689 
2690 	/* Start with the proprietary, and broken phy_type */
2691 	ret = of_property_read_u32(pdev->dev.of_node, "xlnx,phy-type", &value);
2692 	if (!ret) {
2693 		netdev_warn(ndev, "Please upgrade your device tree binary blob to use phy-mode");
2694 		switch (value) {
2695 		case XAE_PHY_TYPE_MII:
2696 			lp->phy_mode = PHY_INTERFACE_MODE_MII;
2697 			break;
2698 		case XAE_PHY_TYPE_GMII:
2699 			lp->phy_mode = PHY_INTERFACE_MODE_GMII;
2700 			break;
2701 		case XAE_PHY_TYPE_RGMII_2_0:
2702 			lp->phy_mode = PHY_INTERFACE_MODE_RGMII_ID;
2703 			break;
2704 		case XAE_PHY_TYPE_SGMII:
2705 			lp->phy_mode = PHY_INTERFACE_MODE_SGMII;
2706 			break;
2707 		case XAE_PHY_TYPE_1000BASE_X:
2708 			lp->phy_mode = PHY_INTERFACE_MODE_1000BASEX;
2709 			break;
2710 		default:
2711 			ret = -EINVAL;
2712 			goto cleanup_clk;
2713 		}
2714 	} else {
2715 		ret = of_get_phy_mode(pdev->dev.of_node, &lp->phy_mode);
2716 		if (ret)
2717 			goto cleanup_clk;
2718 	}
2719 	if (lp->switch_x_sgmii && lp->phy_mode != PHY_INTERFACE_MODE_SGMII &&
2720 	    lp->phy_mode != PHY_INTERFACE_MODE_1000BASEX) {
2721 		dev_err(&pdev->dev, "xlnx,switch-x-sgmii only supported with SGMII or 1000BaseX\n");
2722 		ret = -EINVAL;
2723 		goto cleanup_clk;
2724 	}
2725 
2726 	if (!of_property_present(pdev->dev.of_node, "dmas")) {
2727 		/* Find the DMA node, map the DMA registers, and decode the DMA IRQs */
2728 		np = of_parse_phandle(pdev->dev.of_node, "axistream-connected", 0);
2729 
2730 		if (np) {
2731 			struct resource dmares;
2732 
2733 			ret = of_address_to_resource(np, 0, &dmares);
2734 			if (ret) {
2735 				dev_err(&pdev->dev,
2736 					"unable to get DMA resource\n");
2737 				of_node_put(np);
2738 				goto cleanup_clk;
2739 			}
2740 			lp->dma_regs = devm_ioremap_resource(&pdev->dev,
2741 							     &dmares);
2742 			lp->rx_irq = irq_of_parse_and_map(np, 1);
2743 			lp->tx_irq = irq_of_parse_and_map(np, 0);
2744 			of_node_put(np);
2745 			lp->eth_irq = platform_get_irq_optional(pdev, 0);
2746 		} else {
2747 			/* Check for these resources directly on the Ethernet node. */
2748 			lp->dma_regs = devm_platform_get_and_ioremap_resource(pdev, 1, NULL);
2749 			lp->rx_irq = platform_get_irq(pdev, 1);
2750 			lp->tx_irq = platform_get_irq(pdev, 0);
2751 			lp->eth_irq = platform_get_irq_optional(pdev, 2);
2752 		}
2753 		if (IS_ERR(lp->dma_regs)) {
2754 			dev_err(&pdev->dev, "could not map DMA regs\n");
2755 			ret = PTR_ERR(lp->dma_regs);
2756 			goto cleanup_clk;
2757 		}
2758 		if (lp->rx_irq <= 0 || lp->tx_irq <= 0) {
2759 			dev_err(&pdev->dev, "could not determine irqs\n");
2760 			ret = -ENOMEM;
2761 			goto cleanup_clk;
2762 		}
2763 
2764 		/* Reset core now that clocks are enabled, prior to accessing MDIO */
2765 		ret = __axienet_device_reset(lp);
2766 		if (ret)
2767 			goto cleanup_clk;
2768 
2769 		/* Autodetect the need for 64-bit DMA pointers.
2770 		 * When the IP is configured for a bus width bigger than 32 bits,
2771 		 * writing the MSB registers is mandatory, even if they are all 0.
2772 		 * We can detect this case by writing all 1's to one such register
2773 		 * and see if that sticks: when the IP is configured for 32 bits
2774 		 * only, those registers are RES0.
2775 		 * Those MSB registers were introduced in IP v7.1, which we check first.
2776 		 */
2777 		if ((axienet_ior(lp, XAE_ID_OFFSET) >> 24) >= 0x9) {
2778 			void __iomem *desc = lp->dma_regs + XAXIDMA_TX_CDESC_OFFSET + 4;
2779 
2780 			iowrite32(0x0, desc);
2781 			if (ioread32(desc) == 0) {	/* sanity check */
2782 				iowrite32(0xffffffff, desc);
2783 				if (ioread32(desc) > 0) {
2784 					lp->features |= XAE_FEATURE_DMA_64BIT;
2785 					addr_width = 64;
2786 					dev_info(&pdev->dev,
2787 						 "autodetected 64-bit DMA range\n");
2788 				}
2789 				iowrite32(0x0, desc);
2790 			}
2791 		}
2792 		if (!IS_ENABLED(CONFIG_64BIT) && lp->features & XAE_FEATURE_DMA_64BIT) {
2793 			dev_err(&pdev->dev, "64-bit addressable DMA is not compatible with 32-bit archecture\n");
2794 			ret = -EINVAL;
2795 			goto cleanup_clk;
2796 		}
2797 
2798 		ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(addr_width));
2799 		if (ret) {
2800 			dev_err(&pdev->dev, "No suitable DMA available\n");
2801 			goto cleanup_clk;
2802 		}
2803 		netif_napi_add(ndev, &lp->napi_rx, axienet_rx_poll);
2804 		netif_napi_add(ndev, &lp->napi_tx, axienet_tx_poll);
2805 	} else {
2806 		struct xilinx_vdma_config cfg;
2807 		struct dma_chan *tx_chan;
2808 
2809 		lp->eth_irq = platform_get_irq_optional(pdev, 0);
2810 		if (lp->eth_irq < 0 && lp->eth_irq != -ENXIO) {
2811 			ret = lp->eth_irq;
2812 			goto cleanup_clk;
2813 		}
2814 		tx_chan = dma_request_chan(lp->dev, "tx_chan0");
2815 		if (IS_ERR(tx_chan)) {
2816 			ret = PTR_ERR(tx_chan);
2817 			dev_err_probe(lp->dev, ret, "No Ethernet DMA (TX) channel found\n");
2818 			goto cleanup_clk;
2819 		}
2820 
2821 		cfg.reset = 1;
2822 		/* As name says VDMA but it has support for DMA channel reset */
2823 		ret = xilinx_vdma_channel_set_config(tx_chan, &cfg);
2824 		if (ret < 0) {
2825 			dev_err(&pdev->dev, "Reset channel failed\n");
2826 			dma_release_channel(tx_chan);
2827 			goto cleanup_clk;
2828 		}
2829 
2830 		dma_release_channel(tx_chan);
2831 		lp->use_dmaengine = 1;
2832 	}
2833 
2834 	if (lp->use_dmaengine)
2835 		ndev->netdev_ops = &axienet_netdev_dmaengine_ops;
2836 	else
2837 		ndev->netdev_ops = &axienet_netdev_ops;
2838 	/* Check for Ethernet core IRQ (optional) */
2839 	if (lp->eth_irq <= 0)
2840 		dev_info(&pdev->dev, "Ethernet core IRQ not defined\n");
2841 
2842 	/* Retrieve the MAC address */
2843 	ret = of_get_mac_address(pdev->dev.of_node, mac_addr);
2844 	if (!ret) {
2845 		axienet_set_mac_address(ndev, mac_addr);
2846 	} else {
2847 		dev_warn(&pdev->dev, "could not find MAC address property: %d\n",
2848 			 ret);
2849 		axienet_set_mac_address(ndev, NULL);
2850 	}
2851 
2852 	lp->coalesce_count_rx = XAXIDMA_DFT_RX_THRESHOLD;
2853 	lp->coalesce_count_tx = XAXIDMA_DFT_TX_THRESHOLD;
2854 	lp->coalesce_usec_rx = XAXIDMA_DFT_RX_USEC;
2855 	lp->coalesce_usec_tx = XAXIDMA_DFT_TX_USEC;
2856 
2857 	ret = axienet_mdio_setup(lp);
2858 	if (ret)
2859 		dev_warn(&pdev->dev,
2860 			 "error registering MDIO bus: %d\n", ret);
2861 
2862 	if (lp->phy_mode == PHY_INTERFACE_MODE_SGMII ||
2863 	    lp->phy_mode == PHY_INTERFACE_MODE_1000BASEX) {
2864 		np = of_parse_phandle(pdev->dev.of_node, "pcs-handle", 0);
2865 		if (!np) {
2866 			/* Deprecated: Always use "pcs-handle" for pcs_phy.
2867 			 * Falling back to "phy-handle" here is only for
2868 			 * backward compatibility with old device trees.
2869 			 */
2870 			np = of_parse_phandle(pdev->dev.of_node, "phy-handle", 0);
2871 		}
2872 		if (!np) {
2873 			dev_err(&pdev->dev, "pcs-handle (preferred) or phy-handle required for 1000BaseX/SGMII\n");
2874 			ret = -EINVAL;
2875 			goto cleanup_mdio;
2876 		}
2877 		lp->pcs_phy = of_mdio_find_device(np);
2878 		if (!lp->pcs_phy) {
2879 			ret = -EPROBE_DEFER;
2880 			of_node_put(np);
2881 			goto cleanup_mdio;
2882 		}
2883 		of_node_put(np);
2884 		lp->pcs.ops = &axienet_pcs_ops;
2885 		lp->pcs.neg_mode = true;
2886 		lp->pcs.poll = true;
2887 	}
2888 
2889 	lp->phylink_config.dev = &ndev->dev;
2890 	lp->phylink_config.type = PHYLINK_NETDEV;
2891 	lp->phylink_config.mac_capabilities = MAC_SYM_PAUSE | MAC_ASYM_PAUSE |
2892 		MAC_10FD | MAC_100FD | MAC_1000FD;
2893 
2894 	__set_bit(lp->phy_mode, lp->phylink_config.supported_interfaces);
2895 	if (lp->switch_x_sgmii) {
2896 		__set_bit(PHY_INTERFACE_MODE_1000BASEX,
2897 			  lp->phylink_config.supported_interfaces);
2898 		__set_bit(PHY_INTERFACE_MODE_SGMII,
2899 			  lp->phylink_config.supported_interfaces);
2900 	}
2901 
2902 	lp->phylink = phylink_create(&lp->phylink_config, pdev->dev.fwnode,
2903 				     lp->phy_mode,
2904 				     &axienet_phylink_ops);
2905 	if (IS_ERR(lp->phylink)) {
2906 		ret = PTR_ERR(lp->phylink);
2907 		dev_err(&pdev->dev, "phylink_create error (%i)\n", ret);
2908 		goto cleanup_mdio;
2909 	}
2910 
2911 	ret = register_netdev(lp->ndev);
2912 	if (ret) {
2913 		dev_err(lp->dev, "register_netdev() error (%i)\n", ret);
2914 		goto cleanup_phylink;
2915 	}
2916 
2917 	return 0;
2918 
2919 cleanup_phylink:
2920 	phylink_destroy(lp->phylink);
2921 
2922 cleanup_mdio:
2923 	if (lp->pcs_phy)
2924 		put_device(&lp->pcs_phy->dev);
2925 	if (lp->mii_bus)
2926 		axienet_mdio_teardown(lp);
2927 cleanup_clk:
2928 	clk_bulk_disable_unprepare(XAE_NUM_MISC_CLOCKS, lp->misc_clks);
2929 	clk_disable_unprepare(lp->axi_clk);
2930 
2931 free_netdev:
2932 	free_netdev(ndev);
2933 
2934 	return ret;
2935 }
2936 
2937 static void axienet_remove(struct platform_device *pdev)
2938 {
2939 	struct net_device *ndev = platform_get_drvdata(pdev);
2940 	struct axienet_local *lp = netdev_priv(ndev);
2941 
2942 	unregister_netdev(ndev);
2943 
2944 	if (lp->phylink)
2945 		phylink_destroy(lp->phylink);
2946 
2947 	if (lp->pcs_phy)
2948 		put_device(&lp->pcs_phy->dev);
2949 
2950 	axienet_mdio_teardown(lp);
2951 
2952 	clk_bulk_disable_unprepare(XAE_NUM_MISC_CLOCKS, lp->misc_clks);
2953 	clk_disable_unprepare(lp->axi_clk);
2954 
2955 	free_netdev(ndev);
2956 }
2957 
2958 static void axienet_shutdown(struct platform_device *pdev)
2959 {
2960 	struct net_device *ndev = platform_get_drvdata(pdev);
2961 
2962 	rtnl_lock();
2963 	netif_device_detach(ndev);
2964 
2965 	if (netif_running(ndev))
2966 		dev_close(ndev);
2967 
2968 	rtnl_unlock();
2969 }
2970 
2971 static int axienet_suspend(struct device *dev)
2972 {
2973 	struct net_device *ndev = dev_get_drvdata(dev);
2974 
2975 	if (!netif_running(ndev))
2976 		return 0;
2977 
2978 	netif_device_detach(ndev);
2979 
2980 	rtnl_lock();
2981 	axienet_stop(ndev);
2982 	rtnl_unlock();
2983 
2984 	return 0;
2985 }
2986 
2987 static int axienet_resume(struct device *dev)
2988 {
2989 	struct net_device *ndev = dev_get_drvdata(dev);
2990 
2991 	if (!netif_running(ndev))
2992 		return 0;
2993 
2994 	rtnl_lock();
2995 	axienet_open(ndev);
2996 	rtnl_unlock();
2997 
2998 	netif_device_attach(ndev);
2999 
3000 	return 0;
3001 }
3002 
3003 static DEFINE_SIMPLE_DEV_PM_OPS(axienet_pm_ops,
3004 				axienet_suspend, axienet_resume);
3005 
3006 static struct platform_driver axienet_driver = {
3007 	.probe = axienet_probe,
3008 	.remove = axienet_remove,
3009 	.shutdown = axienet_shutdown,
3010 	.driver = {
3011 		 .name = "xilinx_axienet",
3012 		 .pm = &axienet_pm_ops,
3013 		 .of_match_table = axienet_of_match,
3014 	},
3015 };
3016 
3017 module_platform_driver(axienet_driver);
3018 
3019 MODULE_DESCRIPTION("Xilinx Axi Ethernet driver");
3020 MODULE_AUTHOR("Xilinx");
3021 MODULE_LICENSE("GPL");
3022