xref: /linux/drivers/net/ethernet/xilinx/xilinx_axienet_main.c (revision f58817c852e9c9eb8116c24d8271a35159636605)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Xilinx Axi Ethernet device driver
4  *
5  * Copyright (c) 2008 Nissin Systems Co., Ltd.,  Yoshio Kashiwagi
6  * Copyright (c) 2005-2008 DLA Systems,  David H. Lynch Jr. <dhlii@dlasys.net>
7  * Copyright (c) 2008-2009 Secret Lab Technologies Ltd.
8  * Copyright (c) 2010 - 2011 Michal Simek <monstr@monstr.eu>
9  * Copyright (c) 2010 - 2011 PetaLogix
10  * Copyright (c) 2019 - 2022 Calian Advanced Technologies
11  * Copyright (c) 2010 - 2012 Xilinx, Inc. All rights reserved.
12  *
13  * This is a driver for the Xilinx Axi Ethernet which is used in the Virtex6
14  * and Spartan6.
15  *
16  * TODO:
17  *  - Add Axi Fifo support.
18  *  - Factor out Axi DMA code into separate driver.
19  *  - Test and fix basic multicast filtering.
20  *  - Add support for extended multicast filtering.
21  *  - Test basic VLAN support.
22  *  - Add support for extended VLAN support.
23  */
24 
25 #include <linux/clk.h>
26 #include <linux/delay.h>
27 #include <linux/etherdevice.h>
28 #include <linux/module.h>
29 #include <linux/netdevice.h>
30 #include <linux/of.h>
31 #include <linux/of_mdio.h>
32 #include <linux/of_net.h>
33 #include <linux/of_irq.h>
34 #include <linux/of_address.h>
35 #include <linux/platform_device.h>
36 #include <linux/skbuff.h>
37 #include <linux/math64.h>
38 #include <linux/phy.h>
39 #include <linux/mii.h>
40 #include <linux/ethtool.h>
41 #include <linux/dmaengine.h>
42 #include <linux/dma-mapping.h>
43 #include <linux/dma/xilinx_dma.h>
44 #include <linux/circ_buf.h>
45 #include <net/netdev_queues.h>
46 
47 #include "xilinx_axienet.h"
48 
49 /* Descriptors defines for Tx and Rx DMA */
50 #define TX_BD_NUM_DEFAULT		128
51 #define RX_BD_NUM_DEFAULT		1024
52 #define TX_BD_NUM_MIN			(MAX_SKB_FRAGS + 1)
53 #define TX_BD_NUM_MAX			4096
54 #define RX_BD_NUM_MAX			4096
55 #define DMA_NUM_APP_WORDS		5
56 #define LEN_APP				4
57 #define RX_BUF_NUM_DEFAULT		128
58 
59 /* Must be shorter than length of ethtool_drvinfo.driver field to fit */
60 #define DRIVER_NAME		"xaxienet"
61 #define DRIVER_DESCRIPTION	"Xilinx Axi Ethernet driver"
62 #define DRIVER_VERSION		"1.00a"
63 
64 #define AXIENET_REGS_N		40
65 
66 static void axienet_rx_submit_desc(struct net_device *ndev);
67 
68 /* Match table for of_platform binding */
69 static const struct of_device_id axienet_of_match[] = {
70 	{ .compatible = "xlnx,axi-ethernet-1.00.a", },
71 	{ .compatible = "xlnx,axi-ethernet-1.01.a", },
72 	{ .compatible = "xlnx,axi-ethernet-2.01.a", },
73 	{},
74 };
75 
76 MODULE_DEVICE_TABLE(of, axienet_of_match);
77 
78 /* Option table for setting up Axi Ethernet hardware options */
79 static struct axienet_option axienet_options[] = {
80 	/* Turn on jumbo packet support for both Rx and Tx */
81 	{
82 		.opt = XAE_OPTION_JUMBO,
83 		.reg = XAE_TC_OFFSET,
84 		.m_or = XAE_TC_JUM_MASK,
85 	}, {
86 		.opt = XAE_OPTION_JUMBO,
87 		.reg = XAE_RCW1_OFFSET,
88 		.m_or = XAE_RCW1_JUM_MASK,
89 	}, { /* Turn on VLAN packet support for both Rx and Tx */
90 		.opt = XAE_OPTION_VLAN,
91 		.reg = XAE_TC_OFFSET,
92 		.m_or = XAE_TC_VLAN_MASK,
93 	}, {
94 		.opt = XAE_OPTION_VLAN,
95 		.reg = XAE_RCW1_OFFSET,
96 		.m_or = XAE_RCW1_VLAN_MASK,
97 	}, { /* Turn on FCS stripping on receive packets */
98 		.opt = XAE_OPTION_FCS_STRIP,
99 		.reg = XAE_RCW1_OFFSET,
100 		.m_or = XAE_RCW1_FCS_MASK,
101 	}, { /* Turn on FCS insertion on transmit packets */
102 		.opt = XAE_OPTION_FCS_INSERT,
103 		.reg = XAE_TC_OFFSET,
104 		.m_or = XAE_TC_FCS_MASK,
105 	}, { /* Turn off length/type field checking on receive packets */
106 		.opt = XAE_OPTION_LENTYPE_ERR,
107 		.reg = XAE_RCW1_OFFSET,
108 		.m_or = XAE_RCW1_LT_DIS_MASK,
109 	}, { /* Turn on Rx flow control */
110 		.opt = XAE_OPTION_FLOW_CONTROL,
111 		.reg = XAE_FCC_OFFSET,
112 		.m_or = XAE_FCC_FCRX_MASK,
113 	}, { /* Turn on Tx flow control */
114 		.opt = XAE_OPTION_FLOW_CONTROL,
115 		.reg = XAE_FCC_OFFSET,
116 		.m_or = XAE_FCC_FCTX_MASK,
117 	}, { /* Turn on promiscuous frame filtering */
118 		.opt = XAE_OPTION_PROMISC,
119 		.reg = XAE_FMI_OFFSET,
120 		.m_or = XAE_FMI_PM_MASK,
121 	}, { /* Enable transmitter */
122 		.opt = XAE_OPTION_TXEN,
123 		.reg = XAE_TC_OFFSET,
124 		.m_or = XAE_TC_TX_MASK,
125 	}, { /* Enable receiver */
126 		.opt = XAE_OPTION_RXEN,
127 		.reg = XAE_RCW1_OFFSET,
128 		.m_or = XAE_RCW1_RX_MASK,
129 	},
130 	{}
131 };
132 
133 static struct skbuf_dma_descriptor *axienet_get_rx_desc(struct axienet_local *lp, int i)
134 {
135 	return lp->rx_skb_ring[i & (RX_BUF_NUM_DEFAULT - 1)];
136 }
137 
138 static struct skbuf_dma_descriptor *axienet_get_tx_desc(struct axienet_local *lp, int i)
139 {
140 	return lp->tx_skb_ring[i & (TX_BD_NUM_MAX - 1)];
141 }
142 
143 /**
144  * axienet_dma_in32 - Memory mapped Axi DMA register read
145  * @lp:		Pointer to axienet local structure
146  * @reg:	Address offset from the base address of the Axi DMA core
147  *
148  * Return: The contents of the Axi DMA register
149  *
150  * This function returns the contents of the corresponding Axi DMA register.
151  */
152 static inline u32 axienet_dma_in32(struct axienet_local *lp, off_t reg)
153 {
154 	return ioread32(lp->dma_regs + reg);
155 }
156 
157 static void desc_set_phys_addr(struct axienet_local *lp, dma_addr_t addr,
158 			       struct axidma_bd *desc)
159 {
160 	desc->phys = lower_32_bits(addr);
161 	if (lp->features & XAE_FEATURE_DMA_64BIT)
162 		desc->phys_msb = upper_32_bits(addr);
163 }
164 
165 static dma_addr_t desc_get_phys_addr(struct axienet_local *lp,
166 				     struct axidma_bd *desc)
167 {
168 	dma_addr_t ret = desc->phys;
169 
170 	if (lp->features & XAE_FEATURE_DMA_64BIT)
171 		ret |= ((dma_addr_t)desc->phys_msb << 16) << 16;
172 
173 	return ret;
174 }
175 
176 /**
177  * axienet_dma_bd_release - Release buffer descriptor rings
178  * @ndev:	Pointer to the net_device structure
179  *
180  * This function is used to release the descriptors allocated in
181  * axienet_dma_bd_init. axienet_dma_bd_release is called when Axi Ethernet
182  * driver stop api is called.
183  */
184 static void axienet_dma_bd_release(struct net_device *ndev)
185 {
186 	int i;
187 	struct axienet_local *lp = netdev_priv(ndev);
188 
189 	/* If we end up here, tx_bd_v must have been DMA allocated. */
190 	dma_free_coherent(lp->dev,
191 			  sizeof(*lp->tx_bd_v) * lp->tx_bd_num,
192 			  lp->tx_bd_v,
193 			  lp->tx_bd_p);
194 
195 	if (!lp->rx_bd_v)
196 		return;
197 
198 	for (i = 0; i < lp->rx_bd_num; i++) {
199 		dma_addr_t phys;
200 
201 		/* A NULL skb means this descriptor has not been initialised
202 		 * at all.
203 		 */
204 		if (!lp->rx_bd_v[i].skb)
205 			break;
206 
207 		dev_kfree_skb(lp->rx_bd_v[i].skb);
208 
209 		/* For each descriptor, we programmed cntrl with the (non-zero)
210 		 * descriptor size, after it had been successfully allocated.
211 		 * So a non-zero value in there means we need to unmap it.
212 		 */
213 		if (lp->rx_bd_v[i].cntrl) {
214 			phys = desc_get_phys_addr(lp, &lp->rx_bd_v[i]);
215 			dma_unmap_single(lp->dev, phys,
216 					 lp->max_frm_size, DMA_FROM_DEVICE);
217 		}
218 	}
219 
220 	dma_free_coherent(lp->dev,
221 			  sizeof(*lp->rx_bd_v) * lp->rx_bd_num,
222 			  lp->rx_bd_v,
223 			  lp->rx_bd_p);
224 }
225 
226 /**
227  * axienet_usec_to_timer - Calculate IRQ delay timer value
228  * @lp:		Pointer to the axienet_local structure
229  * @coalesce_usec: Microseconds to convert into timer value
230  */
231 static u32 axienet_usec_to_timer(struct axienet_local *lp, u32 coalesce_usec)
232 {
233 	u32 result;
234 	u64 clk_rate = 125000000; /* arbitrary guess if no clock rate set */
235 
236 	if (lp->axi_clk)
237 		clk_rate = clk_get_rate(lp->axi_clk);
238 
239 	/* 1 Timeout Interval = 125 * (clock period of SG clock) */
240 	result = DIV64_U64_ROUND_CLOSEST((u64)coalesce_usec * clk_rate,
241 					 (u64)125000000);
242 	if (result > 255)
243 		result = 255;
244 
245 	return result;
246 }
247 
248 /**
249  * axienet_dma_start - Set up DMA registers and start DMA operation
250  * @lp:		Pointer to the axienet_local structure
251  */
252 static void axienet_dma_start(struct axienet_local *lp)
253 {
254 	/* Start updating the Rx channel control register */
255 	lp->rx_dma_cr = (lp->coalesce_count_rx << XAXIDMA_COALESCE_SHIFT) |
256 			XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_ERROR_MASK;
257 	/* Only set interrupt delay timer if not generating an interrupt on
258 	 * the first RX packet. Otherwise leave at 0 to disable delay interrupt.
259 	 */
260 	if (lp->coalesce_count_rx > 1)
261 		lp->rx_dma_cr |= (axienet_usec_to_timer(lp, lp->coalesce_usec_rx)
262 					<< XAXIDMA_DELAY_SHIFT) |
263 				 XAXIDMA_IRQ_DELAY_MASK;
264 	axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, lp->rx_dma_cr);
265 
266 	/* Start updating the Tx channel control register */
267 	lp->tx_dma_cr = (lp->coalesce_count_tx << XAXIDMA_COALESCE_SHIFT) |
268 			XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_ERROR_MASK;
269 	/* Only set interrupt delay timer if not generating an interrupt on
270 	 * the first TX packet. Otherwise leave at 0 to disable delay interrupt.
271 	 */
272 	if (lp->coalesce_count_tx > 1)
273 		lp->tx_dma_cr |= (axienet_usec_to_timer(lp, lp->coalesce_usec_tx)
274 					<< XAXIDMA_DELAY_SHIFT) |
275 				 XAXIDMA_IRQ_DELAY_MASK;
276 	axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, lp->tx_dma_cr);
277 
278 	/* Populate the tail pointer and bring the Rx Axi DMA engine out of
279 	 * halted state. This will make the Rx side ready for reception.
280 	 */
281 	axienet_dma_out_addr(lp, XAXIDMA_RX_CDESC_OFFSET, lp->rx_bd_p);
282 	lp->rx_dma_cr |= XAXIDMA_CR_RUNSTOP_MASK;
283 	axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, lp->rx_dma_cr);
284 	axienet_dma_out_addr(lp, XAXIDMA_RX_TDESC_OFFSET, lp->rx_bd_p +
285 			     (sizeof(*lp->rx_bd_v) * (lp->rx_bd_num - 1)));
286 
287 	/* Write to the RS (Run-stop) bit in the Tx channel control register.
288 	 * Tx channel is now ready to run. But only after we write to the
289 	 * tail pointer register that the Tx channel will start transmitting.
290 	 */
291 	axienet_dma_out_addr(lp, XAXIDMA_TX_CDESC_OFFSET, lp->tx_bd_p);
292 	lp->tx_dma_cr |= XAXIDMA_CR_RUNSTOP_MASK;
293 	axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, lp->tx_dma_cr);
294 }
295 
296 /**
297  * axienet_dma_bd_init - Setup buffer descriptor rings for Axi DMA
298  * @ndev:	Pointer to the net_device structure
299  *
300  * Return: 0, on success -ENOMEM, on failure
301  *
302  * This function is called to initialize the Rx and Tx DMA descriptor
303  * rings. This initializes the descriptors with required default values
304  * and is called when Axi Ethernet driver reset is called.
305  */
306 static int axienet_dma_bd_init(struct net_device *ndev)
307 {
308 	int i;
309 	struct sk_buff *skb;
310 	struct axienet_local *lp = netdev_priv(ndev);
311 
312 	/* Reset the indexes which are used for accessing the BDs */
313 	lp->tx_bd_ci = 0;
314 	lp->tx_bd_tail = 0;
315 	lp->rx_bd_ci = 0;
316 
317 	/* Allocate the Tx and Rx buffer descriptors. */
318 	lp->tx_bd_v = dma_alloc_coherent(lp->dev,
319 					 sizeof(*lp->tx_bd_v) * lp->tx_bd_num,
320 					 &lp->tx_bd_p, GFP_KERNEL);
321 	if (!lp->tx_bd_v)
322 		return -ENOMEM;
323 
324 	lp->rx_bd_v = dma_alloc_coherent(lp->dev,
325 					 sizeof(*lp->rx_bd_v) * lp->rx_bd_num,
326 					 &lp->rx_bd_p, GFP_KERNEL);
327 	if (!lp->rx_bd_v)
328 		goto out;
329 
330 	for (i = 0; i < lp->tx_bd_num; i++) {
331 		dma_addr_t addr = lp->tx_bd_p +
332 				  sizeof(*lp->tx_bd_v) *
333 				  ((i + 1) % lp->tx_bd_num);
334 
335 		lp->tx_bd_v[i].next = lower_32_bits(addr);
336 		if (lp->features & XAE_FEATURE_DMA_64BIT)
337 			lp->tx_bd_v[i].next_msb = upper_32_bits(addr);
338 	}
339 
340 	for (i = 0; i < lp->rx_bd_num; i++) {
341 		dma_addr_t addr;
342 
343 		addr = lp->rx_bd_p + sizeof(*lp->rx_bd_v) *
344 			((i + 1) % lp->rx_bd_num);
345 		lp->rx_bd_v[i].next = lower_32_bits(addr);
346 		if (lp->features & XAE_FEATURE_DMA_64BIT)
347 			lp->rx_bd_v[i].next_msb = upper_32_bits(addr);
348 
349 		skb = netdev_alloc_skb_ip_align(ndev, lp->max_frm_size);
350 		if (!skb)
351 			goto out;
352 
353 		lp->rx_bd_v[i].skb = skb;
354 		addr = dma_map_single(lp->dev, skb->data,
355 				      lp->max_frm_size, DMA_FROM_DEVICE);
356 		if (dma_mapping_error(lp->dev, addr)) {
357 			netdev_err(ndev, "DMA mapping error\n");
358 			goto out;
359 		}
360 		desc_set_phys_addr(lp, addr, &lp->rx_bd_v[i]);
361 
362 		lp->rx_bd_v[i].cntrl = lp->max_frm_size;
363 	}
364 
365 	axienet_dma_start(lp);
366 
367 	return 0;
368 out:
369 	axienet_dma_bd_release(ndev);
370 	return -ENOMEM;
371 }
372 
373 /**
374  * axienet_set_mac_address - Write the MAC address
375  * @ndev:	Pointer to the net_device structure
376  * @address:	6 byte Address to be written as MAC address
377  *
378  * This function is called to initialize the MAC address of the Axi Ethernet
379  * core. It writes to the UAW0 and UAW1 registers of the core.
380  */
381 static void axienet_set_mac_address(struct net_device *ndev,
382 				    const void *address)
383 {
384 	struct axienet_local *lp = netdev_priv(ndev);
385 
386 	if (address)
387 		eth_hw_addr_set(ndev, address);
388 	if (!is_valid_ether_addr(ndev->dev_addr))
389 		eth_hw_addr_random(ndev);
390 
391 	/* Set up unicast MAC address filter set its mac address */
392 	axienet_iow(lp, XAE_UAW0_OFFSET,
393 		    (ndev->dev_addr[0]) |
394 		    (ndev->dev_addr[1] << 8) |
395 		    (ndev->dev_addr[2] << 16) |
396 		    (ndev->dev_addr[3] << 24));
397 	axienet_iow(lp, XAE_UAW1_OFFSET,
398 		    (((axienet_ior(lp, XAE_UAW1_OFFSET)) &
399 		      ~XAE_UAW1_UNICASTADDR_MASK) |
400 		     (ndev->dev_addr[4] |
401 		     (ndev->dev_addr[5] << 8))));
402 }
403 
404 /**
405  * netdev_set_mac_address - Write the MAC address (from outside the driver)
406  * @ndev:	Pointer to the net_device structure
407  * @p:		6 byte Address to be written as MAC address
408  *
409  * Return: 0 for all conditions. Presently, there is no failure case.
410  *
411  * This function is called to initialize the MAC address of the Axi Ethernet
412  * core. It calls the core specific axienet_set_mac_address. This is the
413  * function that goes into net_device_ops structure entry ndo_set_mac_address.
414  */
415 static int netdev_set_mac_address(struct net_device *ndev, void *p)
416 {
417 	struct sockaddr *addr = p;
418 
419 	axienet_set_mac_address(ndev, addr->sa_data);
420 	return 0;
421 }
422 
423 /**
424  * axienet_set_multicast_list - Prepare the multicast table
425  * @ndev:	Pointer to the net_device structure
426  *
427  * This function is called to initialize the multicast table during
428  * initialization. The Axi Ethernet basic multicast support has a four-entry
429  * multicast table which is initialized here. Additionally this function
430  * goes into the net_device_ops structure entry ndo_set_multicast_list. This
431  * means whenever the multicast table entries need to be updated this
432  * function gets called.
433  */
434 static void axienet_set_multicast_list(struct net_device *ndev)
435 {
436 	int i = 0;
437 	u32 reg, af0reg, af1reg;
438 	struct axienet_local *lp = netdev_priv(ndev);
439 
440 	reg = axienet_ior(lp, XAE_FMI_OFFSET);
441 	reg &= ~XAE_FMI_PM_MASK;
442 	if (ndev->flags & IFF_PROMISC)
443 		reg |= XAE_FMI_PM_MASK;
444 	else
445 		reg &= ~XAE_FMI_PM_MASK;
446 	axienet_iow(lp, XAE_FMI_OFFSET, reg);
447 
448 	if (ndev->flags & IFF_ALLMULTI ||
449 	    netdev_mc_count(ndev) > XAE_MULTICAST_CAM_TABLE_NUM) {
450 		reg &= 0xFFFFFF00;
451 		axienet_iow(lp, XAE_FMI_OFFSET, reg);
452 		axienet_iow(lp, XAE_AF0_OFFSET, 1); /* Multicast bit */
453 		axienet_iow(lp, XAE_AF1_OFFSET, 0);
454 		axienet_iow(lp, XAE_AM0_OFFSET, 1); /* ditto */
455 		axienet_iow(lp, XAE_AM1_OFFSET, 0);
456 		axienet_iow(lp, XAE_FFE_OFFSET, 1);
457 		i = 1;
458 	} else if (!netdev_mc_empty(ndev)) {
459 		struct netdev_hw_addr *ha;
460 
461 		netdev_for_each_mc_addr(ha, ndev) {
462 			if (i >= XAE_MULTICAST_CAM_TABLE_NUM)
463 				break;
464 
465 			af0reg = (ha->addr[0]);
466 			af0reg |= (ha->addr[1] << 8);
467 			af0reg |= (ha->addr[2] << 16);
468 			af0reg |= (ha->addr[3] << 24);
469 
470 			af1reg = (ha->addr[4]);
471 			af1reg |= (ha->addr[5] << 8);
472 
473 			reg &= 0xFFFFFF00;
474 			reg |= i;
475 
476 			axienet_iow(lp, XAE_FMI_OFFSET, reg);
477 			axienet_iow(lp, XAE_AF0_OFFSET, af0reg);
478 			axienet_iow(lp, XAE_AF1_OFFSET, af1reg);
479 			axienet_iow(lp, XAE_AM0_OFFSET, 0xffffffff);
480 			axienet_iow(lp, XAE_AM1_OFFSET, 0x0000ffff);
481 			axienet_iow(lp, XAE_FFE_OFFSET, 1);
482 			i++;
483 		}
484 	}
485 
486 	for (; i < XAE_MULTICAST_CAM_TABLE_NUM; i++) {
487 		reg &= 0xFFFFFF00;
488 		reg |= i;
489 		axienet_iow(lp, XAE_FMI_OFFSET, reg);
490 		axienet_iow(lp, XAE_FFE_OFFSET, 0);
491 	}
492 }
493 
494 /**
495  * axienet_setoptions - Set an Axi Ethernet option
496  * @ndev:	Pointer to the net_device structure
497  * @options:	Option to be enabled/disabled
498  *
499  * The Axi Ethernet core has multiple features which can be selectively turned
500  * on or off. The typical options could be jumbo frame option, basic VLAN
501  * option, promiscuous mode option etc. This function is used to set or clear
502  * these options in the Axi Ethernet hardware. This is done through
503  * axienet_option structure .
504  */
505 static void axienet_setoptions(struct net_device *ndev, u32 options)
506 {
507 	int reg;
508 	struct axienet_local *lp = netdev_priv(ndev);
509 	struct axienet_option *tp = &axienet_options[0];
510 
511 	while (tp->opt) {
512 		reg = ((axienet_ior(lp, tp->reg)) & ~(tp->m_or));
513 		if (options & tp->opt)
514 			reg |= tp->m_or;
515 		axienet_iow(lp, tp->reg, reg);
516 		tp++;
517 	}
518 
519 	lp->options |= options;
520 }
521 
522 static u64 axienet_stat(struct axienet_local *lp, enum temac_stat stat)
523 {
524 	u32 counter;
525 
526 	if (lp->reset_in_progress)
527 		return lp->hw_stat_base[stat];
528 
529 	counter = axienet_ior(lp, XAE_STATS_OFFSET + stat * 8);
530 	return lp->hw_stat_base[stat] + (counter - lp->hw_last_counter[stat]);
531 }
532 
533 static void axienet_stats_update(struct axienet_local *lp, bool reset)
534 {
535 	enum temac_stat stat;
536 
537 	write_seqcount_begin(&lp->hw_stats_seqcount);
538 	lp->reset_in_progress = reset;
539 	for (stat = 0; stat < STAT_COUNT; stat++) {
540 		u32 counter = axienet_ior(lp, XAE_STATS_OFFSET + stat * 8);
541 
542 		lp->hw_stat_base[stat] += counter - lp->hw_last_counter[stat];
543 		lp->hw_last_counter[stat] = counter;
544 	}
545 	write_seqcount_end(&lp->hw_stats_seqcount);
546 }
547 
548 static void axienet_refresh_stats(struct work_struct *work)
549 {
550 	struct axienet_local *lp = container_of(work, struct axienet_local,
551 						stats_work.work);
552 
553 	mutex_lock(&lp->stats_lock);
554 	axienet_stats_update(lp, false);
555 	mutex_unlock(&lp->stats_lock);
556 
557 	/* Just less than 2^32 bytes at 2.5 GBit/s */
558 	schedule_delayed_work(&lp->stats_work, 13 * HZ);
559 }
560 
561 static int __axienet_device_reset(struct axienet_local *lp)
562 {
563 	u32 value;
564 	int ret;
565 
566 	/* Save statistics counters in case they will be reset */
567 	mutex_lock(&lp->stats_lock);
568 	if (lp->features & XAE_FEATURE_STATS)
569 		axienet_stats_update(lp, true);
570 
571 	/* Reset Axi DMA. This would reset Axi Ethernet core as well. The reset
572 	 * process of Axi DMA takes a while to complete as all pending
573 	 * commands/transfers will be flushed or completed during this
574 	 * reset process.
575 	 * Note that even though both TX and RX have their own reset register,
576 	 * they both reset the entire DMA core, so only one needs to be used.
577 	 */
578 	axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, XAXIDMA_CR_RESET_MASK);
579 	ret = read_poll_timeout(axienet_dma_in32, value,
580 				!(value & XAXIDMA_CR_RESET_MASK),
581 				DELAY_OF_ONE_MILLISEC, 50000, false, lp,
582 				XAXIDMA_TX_CR_OFFSET);
583 	if (ret) {
584 		dev_err(lp->dev, "%s: DMA reset timeout!\n", __func__);
585 		goto out;
586 	}
587 
588 	/* Wait for PhyRstCmplt bit to be set, indicating the PHY reset has finished */
589 	ret = read_poll_timeout(axienet_ior, value,
590 				value & XAE_INT_PHYRSTCMPLT_MASK,
591 				DELAY_OF_ONE_MILLISEC, 50000, false, lp,
592 				XAE_IS_OFFSET);
593 	if (ret) {
594 		dev_err(lp->dev, "%s: timeout waiting for PhyRstCmplt\n", __func__);
595 		goto out;
596 	}
597 
598 	/* Update statistics counters with new values */
599 	if (lp->features & XAE_FEATURE_STATS) {
600 		enum temac_stat stat;
601 
602 		write_seqcount_begin(&lp->hw_stats_seqcount);
603 		lp->reset_in_progress = false;
604 		for (stat = 0; stat < STAT_COUNT; stat++) {
605 			u32 counter =
606 				axienet_ior(lp, XAE_STATS_OFFSET + stat * 8);
607 
608 			lp->hw_stat_base[stat] +=
609 				lp->hw_last_counter[stat] - counter;
610 			lp->hw_last_counter[stat] = counter;
611 		}
612 		write_seqcount_end(&lp->hw_stats_seqcount);
613 	}
614 
615 out:
616 	mutex_unlock(&lp->stats_lock);
617 	return ret;
618 }
619 
620 /**
621  * axienet_dma_stop - Stop DMA operation
622  * @lp:		Pointer to the axienet_local structure
623  */
624 static void axienet_dma_stop(struct axienet_local *lp)
625 {
626 	int count;
627 	u32 cr, sr;
628 
629 	cr = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET);
630 	cr &= ~(XAXIDMA_CR_RUNSTOP_MASK | XAXIDMA_IRQ_ALL_MASK);
631 	axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, cr);
632 	synchronize_irq(lp->rx_irq);
633 
634 	cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET);
635 	cr &= ~(XAXIDMA_CR_RUNSTOP_MASK | XAXIDMA_IRQ_ALL_MASK);
636 	axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, cr);
637 	synchronize_irq(lp->tx_irq);
638 
639 	/* Give DMAs a chance to halt gracefully */
640 	sr = axienet_dma_in32(lp, XAXIDMA_RX_SR_OFFSET);
641 	for (count = 0; !(sr & XAXIDMA_SR_HALT_MASK) && count < 5; ++count) {
642 		msleep(20);
643 		sr = axienet_dma_in32(lp, XAXIDMA_RX_SR_OFFSET);
644 	}
645 
646 	sr = axienet_dma_in32(lp, XAXIDMA_TX_SR_OFFSET);
647 	for (count = 0; !(sr & XAXIDMA_SR_HALT_MASK) && count < 5; ++count) {
648 		msleep(20);
649 		sr = axienet_dma_in32(lp, XAXIDMA_TX_SR_OFFSET);
650 	}
651 
652 	/* Do a reset to ensure DMA is really stopped */
653 	axienet_lock_mii(lp);
654 	__axienet_device_reset(lp);
655 	axienet_unlock_mii(lp);
656 }
657 
658 /**
659  * axienet_device_reset - Reset and initialize the Axi Ethernet hardware.
660  * @ndev:	Pointer to the net_device structure
661  *
662  * This function is called to reset and initialize the Axi Ethernet core. This
663  * is typically called during initialization. It does a reset of the Axi DMA
664  * Rx/Tx channels and initializes the Axi DMA BDs. Since Axi DMA reset lines
665  * are connected to Axi Ethernet reset lines, this in turn resets the Axi
666  * Ethernet core. No separate hardware reset is done for the Axi Ethernet
667  * core.
668  * Returns 0 on success or a negative error number otherwise.
669  */
670 static int axienet_device_reset(struct net_device *ndev)
671 {
672 	u32 axienet_status;
673 	struct axienet_local *lp = netdev_priv(ndev);
674 	int ret;
675 
676 	lp->max_frm_size = XAE_MAX_VLAN_FRAME_SIZE;
677 	lp->options |= XAE_OPTION_VLAN;
678 	lp->options &= (~XAE_OPTION_JUMBO);
679 
680 	if (ndev->mtu > XAE_MTU && ndev->mtu <= XAE_JUMBO_MTU) {
681 		lp->max_frm_size = ndev->mtu + VLAN_ETH_HLEN +
682 					XAE_TRL_SIZE;
683 
684 		if (lp->max_frm_size <= lp->rxmem)
685 			lp->options |= XAE_OPTION_JUMBO;
686 	}
687 
688 	if (!lp->use_dmaengine) {
689 		ret = __axienet_device_reset(lp);
690 		if (ret)
691 			return ret;
692 
693 		ret = axienet_dma_bd_init(ndev);
694 		if (ret) {
695 			netdev_err(ndev, "%s: descriptor allocation failed\n",
696 				   __func__);
697 			return ret;
698 		}
699 	}
700 
701 	axienet_status = axienet_ior(lp, XAE_RCW1_OFFSET);
702 	axienet_status &= ~XAE_RCW1_RX_MASK;
703 	axienet_iow(lp, XAE_RCW1_OFFSET, axienet_status);
704 
705 	axienet_status = axienet_ior(lp, XAE_IP_OFFSET);
706 	if (axienet_status & XAE_INT_RXRJECT_MASK)
707 		axienet_iow(lp, XAE_IS_OFFSET, XAE_INT_RXRJECT_MASK);
708 	axienet_iow(lp, XAE_IE_OFFSET, lp->eth_irq > 0 ?
709 		    XAE_INT_RECV_ERROR_MASK : 0);
710 
711 	axienet_iow(lp, XAE_FCC_OFFSET, XAE_FCC_FCRX_MASK);
712 
713 	/* Sync default options with HW but leave receiver and
714 	 * transmitter disabled.
715 	 */
716 	axienet_setoptions(ndev, lp->options &
717 			   ~(XAE_OPTION_TXEN | XAE_OPTION_RXEN));
718 	axienet_set_mac_address(ndev, NULL);
719 	axienet_set_multicast_list(ndev);
720 	axienet_setoptions(ndev, lp->options);
721 
722 	netif_trans_update(ndev);
723 
724 	return 0;
725 }
726 
727 /**
728  * axienet_free_tx_chain - Clean up a series of linked TX descriptors.
729  * @lp:		Pointer to the axienet_local structure
730  * @first_bd:	Index of first descriptor to clean up
731  * @nr_bds:	Max number of descriptors to clean up
732  * @force:	Whether to clean descriptors even if not complete
733  * @sizep:	Pointer to a u32 filled with the total sum of all bytes
734  *		in all cleaned-up descriptors. Ignored if NULL.
735  * @budget:	NAPI budget (use 0 when not called from NAPI poll)
736  *
737  * Would either be called after a successful transmit operation, or after
738  * there was an error when setting up the chain.
739  * Returns the number of descriptors handled.
740  */
741 static int axienet_free_tx_chain(struct axienet_local *lp, u32 first_bd,
742 				 int nr_bds, bool force, u32 *sizep, int budget)
743 {
744 	struct axidma_bd *cur_p;
745 	unsigned int status;
746 	dma_addr_t phys;
747 	int i;
748 
749 	for (i = 0; i < nr_bds; i++) {
750 		cur_p = &lp->tx_bd_v[(first_bd + i) % lp->tx_bd_num];
751 		status = cur_p->status;
752 
753 		/* If force is not specified, clean up only descriptors
754 		 * that have been completed by the MAC.
755 		 */
756 		if (!force && !(status & XAXIDMA_BD_STS_COMPLETE_MASK))
757 			break;
758 
759 		/* Ensure we see complete descriptor update */
760 		dma_rmb();
761 		phys = desc_get_phys_addr(lp, cur_p);
762 		dma_unmap_single(lp->dev, phys,
763 				 (cur_p->cntrl & XAXIDMA_BD_CTRL_LENGTH_MASK),
764 				 DMA_TO_DEVICE);
765 
766 		if (cur_p->skb && (status & XAXIDMA_BD_STS_COMPLETE_MASK))
767 			napi_consume_skb(cur_p->skb, budget);
768 
769 		cur_p->app0 = 0;
770 		cur_p->app1 = 0;
771 		cur_p->app2 = 0;
772 		cur_p->app4 = 0;
773 		cur_p->skb = NULL;
774 		/* ensure our transmit path and device don't prematurely see status cleared */
775 		wmb();
776 		cur_p->cntrl = 0;
777 		cur_p->status = 0;
778 
779 		if (sizep)
780 			*sizep += status & XAXIDMA_BD_STS_ACTUAL_LEN_MASK;
781 	}
782 
783 	return i;
784 }
785 
786 /**
787  * axienet_check_tx_bd_space - Checks if a BD/group of BDs are currently busy
788  * @lp:		Pointer to the axienet_local structure
789  * @num_frag:	The number of BDs to check for
790  *
791  * Return: 0, on success
792  *	    NETDEV_TX_BUSY, if any of the descriptors are not free
793  *
794  * This function is invoked before BDs are allocated and transmission starts.
795  * This function returns 0 if a BD or group of BDs can be allocated for
796  * transmission. If the BD or any of the BDs are not free the function
797  * returns a busy status.
798  */
799 static inline int axienet_check_tx_bd_space(struct axienet_local *lp,
800 					    int num_frag)
801 {
802 	struct axidma_bd *cur_p;
803 
804 	/* Ensure we see all descriptor updates from device or TX polling */
805 	rmb();
806 	cur_p = &lp->tx_bd_v[(READ_ONCE(lp->tx_bd_tail) + num_frag) %
807 			     lp->tx_bd_num];
808 	if (cur_p->cntrl)
809 		return NETDEV_TX_BUSY;
810 	return 0;
811 }
812 
813 /**
814  * axienet_dma_tx_cb - DMA engine callback for TX channel.
815  * @data:       Pointer to the axienet_local structure.
816  * @result:     error reporting through dmaengine_result.
817  * This function is called by dmaengine driver for TX channel to notify
818  * that the transmit is done.
819  */
820 static void axienet_dma_tx_cb(void *data, const struct dmaengine_result *result)
821 {
822 	struct skbuf_dma_descriptor *skbuf_dma;
823 	struct axienet_local *lp = data;
824 	struct netdev_queue *txq;
825 	int len;
826 
827 	skbuf_dma = axienet_get_tx_desc(lp, lp->tx_ring_tail++);
828 	len = skbuf_dma->skb->len;
829 	txq = skb_get_tx_queue(lp->ndev, skbuf_dma->skb);
830 	u64_stats_update_begin(&lp->tx_stat_sync);
831 	u64_stats_add(&lp->tx_bytes, len);
832 	u64_stats_add(&lp->tx_packets, 1);
833 	u64_stats_update_end(&lp->tx_stat_sync);
834 	dma_unmap_sg(lp->dev, skbuf_dma->sgl, skbuf_dma->sg_len, DMA_TO_DEVICE);
835 	dev_consume_skb_any(skbuf_dma->skb);
836 	netif_txq_completed_wake(txq, 1, len,
837 				 CIRC_SPACE(lp->tx_ring_head, lp->tx_ring_tail, TX_BD_NUM_MAX),
838 				 2 * MAX_SKB_FRAGS);
839 }
840 
841 /**
842  * axienet_start_xmit_dmaengine - Starts the transmission.
843  * @skb:        sk_buff pointer that contains data to be Txed.
844  * @ndev:       Pointer to net_device structure.
845  *
846  * Return: NETDEV_TX_OK on success or any non space errors.
847  *         NETDEV_TX_BUSY when free element in TX skb ring buffer
848  *         is not available.
849  *
850  * This function is invoked to initiate transmission. The
851  * function sets the skbs, register dma callback API and submit
852  * the dma transaction.
853  * Additionally if checksum offloading is supported,
854  * it populates AXI Stream Control fields with appropriate values.
855  */
856 static netdev_tx_t
857 axienet_start_xmit_dmaengine(struct sk_buff *skb, struct net_device *ndev)
858 {
859 	struct dma_async_tx_descriptor *dma_tx_desc = NULL;
860 	struct axienet_local *lp = netdev_priv(ndev);
861 	u32 app_metadata[DMA_NUM_APP_WORDS] = {0};
862 	struct skbuf_dma_descriptor *skbuf_dma;
863 	struct dma_device *dma_dev;
864 	struct netdev_queue *txq;
865 	u32 csum_start_off;
866 	u32 csum_index_off;
867 	int sg_len;
868 	int ret;
869 
870 	dma_dev = lp->tx_chan->device;
871 	sg_len = skb_shinfo(skb)->nr_frags + 1;
872 	if (CIRC_SPACE(lp->tx_ring_head, lp->tx_ring_tail, TX_BD_NUM_MAX) <= sg_len) {
873 		netif_stop_queue(ndev);
874 		if (net_ratelimit())
875 			netdev_warn(ndev, "TX ring unexpectedly full\n");
876 		return NETDEV_TX_BUSY;
877 	}
878 
879 	skbuf_dma = axienet_get_tx_desc(lp, lp->tx_ring_head);
880 	if (!skbuf_dma)
881 		goto xmit_error_drop_skb;
882 
883 	lp->tx_ring_head++;
884 	sg_init_table(skbuf_dma->sgl, sg_len);
885 	ret = skb_to_sgvec(skb, skbuf_dma->sgl, 0, skb->len);
886 	if (ret < 0)
887 		goto xmit_error_drop_skb;
888 
889 	ret = dma_map_sg(lp->dev, skbuf_dma->sgl, sg_len, DMA_TO_DEVICE);
890 	if (!ret)
891 		goto xmit_error_drop_skb;
892 
893 	/* Fill up app fields for checksum */
894 	if (skb->ip_summed == CHECKSUM_PARTIAL) {
895 		if (lp->features & XAE_FEATURE_FULL_TX_CSUM) {
896 			/* Tx Full Checksum Offload Enabled */
897 			app_metadata[0] |= 2;
898 		} else if (lp->features & XAE_FEATURE_PARTIAL_TX_CSUM) {
899 			csum_start_off = skb_transport_offset(skb);
900 			csum_index_off = csum_start_off + skb->csum_offset;
901 			/* Tx Partial Checksum Offload Enabled */
902 			app_metadata[0] |= 1;
903 			app_metadata[1] = (csum_start_off << 16) | csum_index_off;
904 		}
905 	} else if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
906 		app_metadata[0] |= 2; /* Tx Full Checksum Offload Enabled */
907 	}
908 
909 	dma_tx_desc = dma_dev->device_prep_slave_sg(lp->tx_chan, skbuf_dma->sgl,
910 			sg_len, DMA_MEM_TO_DEV,
911 			DMA_PREP_INTERRUPT, (void *)app_metadata);
912 	if (!dma_tx_desc)
913 		goto xmit_error_unmap_sg;
914 
915 	skbuf_dma->skb = skb;
916 	skbuf_dma->sg_len = sg_len;
917 	dma_tx_desc->callback_param = lp;
918 	dma_tx_desc->callback_result = axienet_dma_tx_cb;
919 	dmaengine_submit(dma_tx_desc);
920 	dma_async_issue_pending(lp->tx_chan);
921 	txq = skb_get_tx_queue(lp->ndev, skb);
922 	netdev_tx_sent_queue(txq, skb->len);
923 	netif_txq_maybe_stop(txq, CIRC_SPACE(lp->tx_ring_head, lp->tx_ring_tail, TX_BD_NUM_MAX),
924 			     MAX_SKB_FRAGS + 1, 2 * MAX_SKB_FRAGS);
925 
926 	return NETDEV_TX_OK;
927 
928 xmit_error_unmap_sg:
929 	dma_unmap_sg(lp->dev, skbuf_dma->sgl, sg_len, DMA_TO_DEVICE);
930 xmit_error_drop_skb:
931 	dev_kfree_skb_any(skb);
932 	return NETDEV_TX_OK;
933 }
934 
935 /**
936  * axienet_tx_poll - Invoked once a transmit is completed by the
937  * Axi DMA Tx channel.
938  * @napi:	Pointer to NAPI structure.
939  * @budget:	Max number of TX packets to process.
940  *
941  * Return: Number of TX packets processed.
942  *
943  * This function is invoked from the NAPI processing to notify the completion
944  * of transmit operation. It clears fields in the corresponding Tx BDs and
945  * unmaps the corresponding buffer so that CPU can regain ownership of the
946  * buffer. It finally invokes "netif_wake_queue" to restart transmission if
947  * required.
948  */
949 static int axienet_tx_poll(struct napi_struct *napi, int budget)
950 {
951 	struct axienet_local *lp = container_of(napi, struct axienet_local, napi_tx);
952 	struct net_device *ndev = lp->ndev;
953 	u32 size = 0;
954 	int packets;
955 
956 	packets = axienet_free_tx_chain(lp, lp->tx_bd_ci, budget, false, &size, budget);
957 
958 	if (packets) {
959 		lp->tx_bd_ci += packets;
960 		if (lp->tx_bd_ci >= lp->tx_bd_num)
961 			lp->tx_bd_ci %= lp->tx_bd_num;
962 
963 		u64_stats_update_begin(&lp->tx_stat_sync);
964 		u64_stats_add(&lp->tx_packets, packets);
965 		u64_stats_add(&lp->tx_bytes, size);
966 		u64_stats_update_end(&lp->tx_stat_sync);
967 
968 		/* Matches barrier in axienet_start_xmit */
969 		smp_mb();
970 
971 		if (!axienet_check_tx_bd_space(lp, MAX_SKB_FRAGS + 1))
972 			netif_wake_queue(ndev);
973 	}
974 
975 	if (packets < budget && napi_complete_done(napi, packets)) {
976 		/* Re-enable TX completion interrupts. This should
977 		 * cause an immediate interrupt if any TX packets are
978 		 * already pending.
979 		 */
980 		axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, lp->tx_dma_cr);
981 	}
982 	return packets;
983 }
984 
985 /**
986  * axienet_start_xmit - Starts the transmission.
987  * @skb:	sk_buff pointer that contains data to be Txed.
988  * @ndev:	Pointer to net_device structure.
989  *
990  * Return: NETDEV_TX_OK, on success
991  *	    NETDEV_TX_BUSY, if any of the descriptors are not free
992  *
993  * This function is invoked from upper layers to initiate transmission. The
994  * function uses the next available free BDs and populates their fields to
995  * start the transmission. Additionally if checksum offloading is supported,
996  * it populates AXI Stream Control fields with appropriate values.
997  */
998 static netdev_tx_t
999 axienet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
1000 {
1001 	u32 ii;
1002 	u32 num_frag;
1003 	u32 csum_start_off;
1004 	u32 csum_index_off;
1005 	skb_frag_t *frag;
1006 	dma_addr_t tail_p, phys;
1007 	u32 orig_tail_ptr, new_tail_ptr;
1008 	struct axienet_local *lp = netdev_priv(ndev);
1009 	struct axidma_bd *cur_p;
1010 
1011 	orig_tail_ptr = lp->tx_bd_tail;
1012 	new_tail_ptr = orig_tail_ptr;
1013 
1014 	num_frag = skb_shinfo(skb)->nr_frags;
1015 	cur_p = &lp->tx_bd_v[orig_tail_ptr];
1016 
1017 	if (axienet_check_tx_bd_space(lp, num_frag + 1)) {
1018 		/* Should not happen as last start_xmit call should have
1019 		 * checked for sufficient space and queue should only be
1020 		 * woken when sufficient space is available.
1021 		 */
1022 		netif_stop_queue(ndev);
1023 		if (net_ratelimit())
1024 			netdev_warn(ndev, "TX ring unexpectedly full\n");
1025 		return NETDEV_TX_BUSY;
1026 	}
1027 
1028 	if (skb->ip_summed == CHECKSUM_PARTIAL) {
1029 		if (lp->features & XAE_FEATURE_FULL_TX_CSUM) {
1030 			/* Tx Full Checksum Offload Enabled */
1031 			cur_p->app0 |= 2;
1032 		} else if (lp->features & XAE_FEATURE_PARTIAL_TX_CSUM) {
1033 			csum_start_off = skb_transport_offset(skb);
1034 			csum_index_off = csum_start_off + skb->csum_offset;
1035 			/* Tx Partial Checksum Offload Enabled */
1036 			cur_p->app0 |= 1;
1037 			cur_p->app1 = (csum_start_off << 16) | csum_index_off;
1038 		}
1039 	} else if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
1040 		cur_p->app0 |= 2; /* Tx Full Checksum Offload Enabled */
1041 	}
1042 
1043 	phys = dma_map_single(lp->dev, skb->data,
1044 			      skb_headlen(skb), DMA_TO_DEVICE);
1045 	if (unlikely(dma_mapping_error(lp->dev, phys))) {
1046 		if (net_ratelimit())
1047 			netdev_err(ndev, "TX DMA mapping error\n");
1048 		ndev->stats.tx_dropped++;
1049 		return NETDEV_TX_OK;
1050 	}
1051 	desc_set_phys_addr(lp, phys, cur_p);
1052 	cur_p->cntrl = skb_headlen(skb) | XAXIDMA_BD_CTRL_TXSOF_MASK;
1053 
1054 	for (ii = 0; ii < num_frag; ii++) {
1055 		if (++new_tail_ptr >= lp->tx_bd_num)
1056 			new_tail_ptr = 0;
1057 		cur_p = &lp->tx_bd_v[new_tail_ptr];
1058 		frag = &skb_shinfo(skb)->frags[ii];
1059 		phys = dma_map_single(lp->dev,
1060 				      skb_frag_address(frag),
1061 				      skb_frag_size(frag),
1062 				      DMA_TO_DEVICE);
1063 		if (unlikely(dma_mapping_error(lp->dev, phys))) {
1064 			if (net_ratelimit())
1065 				netdev_err(ndev, "TX DMA mapping error\n");
1066 			ndev->stats.tx_dropped++;
1067 			axienet_free_tx_chain(lp, orig_tail_ptr, ii + 1,
1068 					      true, NULL, 0);
1069 			return NETDEV_TX_OK;
1070 		}
1071 		desc_set_phys_addr(lp, phys, cur_p);
1072 		cur_p->cntrl = skb_frag_size(frag);
1073 	}
1074 
1075 	cur_p->cntrl |= XAXIDMA_BD_CTRL_TXEOF_MASK;
1076 	cur_p->skb = skb;
1077 
1078 	tail_p = lp->tx_bd_p + sizeof(*lp->tx_bd_v) * new_tail_ptr;
1079 	if (++new_tail_ptr >= lp->tx_bd_num)
1080 		new_tail_ptr = 0;
1081 	WRITE_ONCE(lp->tx_bd_tail, new_tail_ptr);
1082 
1083 	/* Start the transfer */
1084 	axienet_dma_out_addr(lp, XAXIDMA_TX_TDESC_OFFSET, tail_p);
1085 
1086 	/* Stop queue if next transmit may not have space */
1087 	if (axienet_check_tx_bd_space(lp, MAX_SKB_FRAGS + 1)) {
1088 		netif_stop_queue(ndev);
1089 
1090 		/* Matches barrier in axienet_tx_poll */
1091 		smp_mb();
1092 
1093 		/* Space might have just been freed - check again */
1094 		if (!axienet_check_tx_bd_space(lp, MAX_SKB_FRAGS + 1))
1095 			netif_wake_queue(ndev);
1096 	}
1097 
1098 	return NETDEV_TX_OK;
1099 }
1100 
1101 /**
1102  * axienet_dma_rx_cb - DMA engine callback for RX channel.
1103  * @data:       Pointer to the skbuf_dma_descriptor structure.
1104  * @result:     error reporting through dmaengine_result.
1105  * This function is called by dmaengine driver for RX channel to notify
1106  * that the packet is received.
1107  */
1108 static void axienet_dma_rx_cb(void *data, const struct dmaengine_result *result)
1109 {
1110 	struct skbuf_dma_descriptor *skbuf_dma;
1111 	size_t meta_len, meta_max_len, rx_len;
1112 	struct axienet_local *lp = data;
1113 	struct sk_buff *skb;
1114 	u32 *app_metadata;
1115 
1116 	skbuf_dma = axienet_get_rx_desc(lp, lp->rx_ring_tail++);
1117 	skb = skbuf_dma->skb;
1118 	app_metadata = dmaengine_desc_get_metadata_ptr(skbuf_dma->desc, &meta_len,
1119 						       &meta_max_len);
1120 	dma_unmap_single(lp->dev, skbuf_dma->dma_address, lp->max_frm_size,
1121 			 DMA_FROM_DEVICE);
1122 	/* TODO: Derive app word index programmatically */
1123 	rx_len = (app_metadata[LEN_APP] & 0xFFFF);
1124 	skb_put(skb, rx_len);
1125 	skb->protocol = eth_type_trans(skb, lp->ndev);
1126 	skb->ip_summed = CHECKSUM_NONE;
1127 
1128 	__netif_rx(skb);
1129 	u64_stats_update_begin(&lp->rx_stat_sync);
1130 	u64_stats_add(&lp->rx_packets, 1);
1131 	u64_stats_add(&lp->rx_bytes, rx_len);
1132 	u64_stats_update_end(&lp->rx_stat_sync);
1133 	axienet_rx_submit_desc(lp->ndev);
1134 	dma_async_issue_pending(lp->rx_chan);
1135 }
1136 
1137 /**
1138  * axienet_rx_poll - Triggered by RX ISR to complete the BD processing.
1139  * @napi:	Pointer to NAPI structure.
1140  * @budget:	Max number of RX packets to process.
1141  *
1142  * Return: Number of RX packets processed.
1143  */
1144 static int axienet_rx_poll(struct napi_struct *napi, int budget)
1145 {
1146 	u32 length;
1147 	u32 csumstatus;
1148 	u32 size = 0;
1149 	int packets = 0;
1150 	dma_addr_t tail_p = 0;
1151 	struct axidma_bd *cur_p;
1152 	struct sk_buff *skb, *new_skb;
1153 	struct axienet_local *lp = container_of(napi, struct axienet_local, napi_rx);
1154 
1155 	cur_p = &lp->rx_bd_v[lp->rx_bd_ci];
1156 
1157 	while (packets < budget && (cur_p->status & XAXIDMA_BD_STS_COMPLETE_MASK)) {
1158 		dma_addr_t phys;
1159 
1160 		/* Ensure we see complete descriptor update */
1161 		dma_rmb();
1162 
1163 		skb = cur_p->skb;
1164 		cur_p->skb = NULL;
1165 
1166 		/* skb could be NULL if a previous pass already received the
1167 		 * packet for this slot in the ring, but failed to refill it
1168 		 * with a newly allocated buffer. In this case, don't try to
1169 		 * receive it again.
1170 		 */
1171 		if (likely(skb)) {
1172 			length = cur_p->app4 & 0x0000FFFF;
1173 
1174 			phys = desc_get_phys_addr(lp, cur_p);
1175 			dma_unmap_single(lp->dev, phys, lp->max_frm_size,
1176 					 DMA_FROM_DEVICE);
1177 
1178 			skb_put(skb, length);
1179 			skb->protocol = eth_type_trans(skb, lp->ndev);
1180 			/*skb_checksum_none_assert(skb);*/
1181 			skb->ip_summed = CHECKSUM_NONE;
1182 
1183 			/* if we're doing Rx csum offload, set it up */
1184 			if (lp->features & XAE_FEATURE_FULL_RX_CSUM) {
1185 				csumstatus = (cur_p->app2 &
1186 					      XAE_FULL_CSUM_STATUS_MASK) >> 3;
1187 				if (csumstatus == XAE_IP_TCP_CSUM_VALIDATED ||
1188 				    csumstatus == XAE_IP_UDP_CSUM_VALIDATED) {
1189 					skb->ip_summed = CHECKSUM_UNNECESSARY;
1190 				}
1191 			} else if ((lp->features & XAE_FEATURE_PARTIAL_RX_CSUM) != 0 &&
1192 				   skb->protocol == htons(ETH_P_IP) &&
1193 				   skb->len > 64) {
1194 				skb->csum = be32_to_cpu(cur_p->app3 & 0xFFFF);
1195 				skb->ip_summed = CHECKSUM_COMPLETE;
1196 			}
1197 
1198 			napi_gro_receive(napi, skb);
1199 
1200 			size += length;
1201 			packets++;
1202 		}
1203 
1204 		new_skb = napi_alloc_skb(napi, lp->max_frm_size);
1205 		if (!new_skb)
1206 			break;
1207 
1208 		phys = dma_map_single(lp->dev, new_skb->data,
1209 				      lp->max_frm_size,
1210 				      DMA_FROM_DEVICE);
1211 		if (unlikely(dma_mapping_error(lp->dev, phys))) {
1212 			if (net_ratelimit())
1213 				netdev_err(lp->ndev, "RX DMA mapping error\n");
1214 			dev_kfree_skb(new_skb);
1215 			break;
1216 		}
1217 		desc_set_phys_addr(lp, phys, cur_p);
1218 
1219 		cur_p->cntrl = lp->max_frm_size;
1220 		cur_p->status = 0;
1221 		cur_p->skb = new_skb;
1222 
1223 		/* Only update tail_p to mark this slot as usable after it has
1224 		 * been successfully refilled.
1225 		 */
1226 		tail_p = lp->rx_bd_p + sizeof(*lp->rx_bd_v) * lp->rx_bd_ci;
1227 
1228 		if (++lp->rx_bd_ci >= lp->rx_bd_num)
1229 			lp->rx_bd_ci = 0;
1230 		cur_p = &lp->rx_bd_v[lp->rx_bd_ci];
1231 	}
1232 
1233 	u64_stats_update_begin(&lp->rx_stat_sync);
1234 	u64_stats_add(&lp->rx_packets, packets);
1235 	u64_stats_add(&lp->rx_bytes, size);
1236 	u64_stats_update_end(&lp->rx_stat_sync);
1237 
1238 	if (tail_p)
1239 		axienet_dma_out_addr(lp, XAXIDMA_RX_TDESC_OFFSET, tail_p);
1240 
1241 	if (packets < budget && napi_complete_done(napi, packets)) {
1242 		/* Re-enable RX completion interrupts. This should
1243 		 * cause an immediate interrupt if any RX packets are
1244 		 * already pending.
1245 		 */
1246 		axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, lp->rx_dma_cr);
1247 	}
1248 	return packets;
1249 }
1250 
1251 /**
1252  * axienet_tx_irq - Tx Done Isr.
1253  * @irq:	irq number
1254  * @_ndev:	net_device pointer
1255  *
1256  * Return: IRQ_HANDLED if device generated a TX interrupt, IRQ_NONE otherwise.
1257  *
1258  * This is the Axi DMA Tx done Isr. It invokes NAPI polling to complete the
1259  * TX BD processing.
1260  */
1261 static irqreturn_t axienet_tx_irq(int irq, void *_ndev)
1262 {
1263 	unsigned int status;
1264 	struct net_device *ndev = _ndev;
1265 	struct axienet_local *lp = netdev_priv(ndev);
1266 
1267 	status = axienet_dma_in32(lp, XAXIDMA_TX_SR_OFFSET);
1268 
1269 	if (!(status & XAXIDMA_IRQ_ALL_MASK))
1270 		return IRQ_NONE;
1271 
1272 	axienet_dma_out32(lp, XAXIDMA_TX_SR_OFFSET, status);
1273 
1274 	if (unlikely(status & XAXIDMA_IRQ_ERROR_MASK)) {
1275 		netdev_err(ndev, "DMA Tx error 0x%x\n", status);
1276 		netdev_err(ndev, "Current BD is at: 0x%x%08x\n",
1277 			   (lp->tx_bd_v[lp->tx_bd_ci]).phys_msb,
1278 			   (lp->tx_bd_v[lp->tx_bd_ci]).phys);
1279 		schedule_work(&lp->dma_err_task);
1280 	} else {
1281 		/* Disable further TX completion interrupts and schedule
1282 		 * NAPI to handle the completions.
1283 		 */
1284 		u32 cr = lp->tx_dma_cr;
1285 
1286 		cr &= ~(XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_DELAY_MASK);
1287 		axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, cr);
1288 
1289 		napi_schedule(&lp->napi_tx);
1290 	}
1291 
1292 	return IRQ_HANDLED;
1293 }
1294 
1295 /**
1296  * axienet_rx_irq - Rx Isr.
1297  * @irq:	irq number
1298  * @_ndev:	net_device pointer
1299  *
1300  * Return: IRQ_HANDLED if device generated a RX interrupt, IRQ_NONE otherwise.
1301  *
1302  * This is the Axi DMA Rx Isr. It invokes NAPI polling to complete the RX BD
1303  * processing.
1304  */
1305 static irqreturn_t axienet_rx_irq(int irq, void *_ndev)
1306 {
1307 	unsigned int status;
1308 	struct net_device *ndev = _ndev;
1309 	struct axienet_local *lp = netdev_priv(ndev);
1310 
1311 	status = axienet_dma_in32(lp, XAXIDMA_RX_SR_OFFSET);
1312 
1313 	if (!(status & XAXIDMA_IRQ_ALL_MASK))
1314 		return IRQ_NONE;
1315 
1316 	axienet_dma_out32(lp, XAXIDMA_RX_SR_OFFSET, status);
1317 
1318 	if (unlikely(status & XAXIDMA_IRQ_ERROR_MASK)) {
1319 		netdev_err(ndev, "DMA Rx error 0x%x\n", status);
1320 		netdev_err(ndev, "Current BD is at: 0x%x%08x\n",
1321 			   (lp->rx_bd_v[lp->rx_bd_ci]).phys_msb,
1322 			   (lp->rx_bd_v[lp->rx_bd_ci]).phys);
1323 		schedule_work(&lp->dma_err_task);
1324 	} else {
1325 		/* Disable further RX completion interrupts and schedule
1326 		 * NAPI receive.
1327 		 */
1328 		u32 cr = lp->rx_dma_cr;
1329 
1330 		cr &= ~(XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_DELAY_MASK);
1331 		axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, cr);
1332 
1333 		napi_schedule(&lp->napi_rx);
1334 	}
1335 
1336 	return IRQ_HANDLED;
1337 }
1338 
1339 /**
1340  * axienet_eth_irq - Ethernet core Isr.
1341  * @irq:	irq number
1342  * @_ndev:	net_device pointer
1343  *
1344  * Return: IRQ_HANDLED if device generated a core interrupt, IRQ_NONE otherwise.
1345  *
1346  * Handle miscellaneous conditions indicated by Ethernet core IRQ.
1347  */
1348 static irqreturn_t axienet_eth_irq(int irq, void *_ndev)
1349 {
1350 	struct net_device *ndev = _ndev;
1351 	struct axienet_local *lp = netdev_priv(ndev);
1352 	unsigned int pending;
1353 
1354 	pending = axienet_ior(lp, XAE_IP_OFFSET);
1355 	if (!pending)
1356 		return IRQ_NONE;
1357 
1358 	if (pending & XAE_INT_RXFIFOOVR_MASK)
1359 		ndev->stats.rx_missed_errors++;
1360 
1361 	if (pending & XAE_INT_RXRJECT_MASK)
1362 		ndev->stats.rx_dropped++;
1363 
1364 	axienet_iow(lp, XAE_IS_OFFSET, pending);
1365 	return IRQ_HANDLED;
1366 }
1367 
1368 static void axienet_dma_err_handler(struct work_struct *work);
1369 
1370 /**
1371  * axienet_rx_submit_desc - Submit the rx descriptors to dmaengine.
1372  * allocate skbuff, map the scatterlist and obtain a descriptor
1373  * and then add the callback information and submit descriptor.
1374  *
1375  * @ndev:	net_device pointer
1376  *
1377  */
1378 static void axienet_rx_submit_desc(struct net_device *ndev)
1379 {
1380 	struct dma_async_tx_descriptor *dma_rx_desc = NULL;
1381 	struct axienet_local *lp = netdev_priv(ndev);
1382 	struct skbuf_dma_descriptor *skbuf_dma;
1383 	struct sk_buff *skb;
1384 	dma_addr_t addr;
1385 
1386 	skbuf_dma = axienet_get_rx_desc(lp, lp->rx_ring_head);
1387 	if (!skbuf_dma)
1388 		return;
1389 
1390 	lp->rx_ring_head++;
1391 	skb = netdev_alloc_skb(ndev, lp->max_frm_size);
1392 	if (!skb)
1393 		return;
1394 
1395 	sg_init_table(skbuf_dma->sgl, 1);
1396 	addr = dma_map_single(lp->dev, skb->data, lp->max_frm_size, DMA_FROM_DEVICE);
1397 	if (unlikely(dma_mapping_error(lp->dev, addr))) {
1398 		if (net_ratelimit())
1399 			netdev_err(ndev, "DMA mapping error\n");
1400 		goto rx_submit_err_free_skb;
1401 	}
1402 	sg_dma_address(skbuf_dma->sgl) = addr;
1403 	sg_dma_len(skbuf_dma->sgl) = lp->max_frm_size;
1404 	dma_rx_desc = dmaengine_prep_slave_sg(lp->rx_chan, skbuf_dma->sgl,
1405 					      1, DMA_DEV_TO_MEM,
1406 					      DMA_PREP_INTERRUPT);
1407 	if (!dma_rx_desc)
1408 		goto rx_submit_err_unmap_skb;
1409 
1410 	skbuf_dma->skb = skb;
1411 	skbuf_dma->dma_address = sg_dma_address(skbuf_dma->sgl);
1412 	skbuf_dma->desc = dma_rx_desc;
1413 	dma_rx_desc->callback_param = lp;
1414 	dma_rx_desc->callback_result = axienet_dma_rx_cb;
1415 	dmaengine_submit(dma_rx_desc);
1416 
1417 	return;
1418 
1419 rx_submit_err_unmap_skb:
1420 	dma_unmap_single(lp->dev, addr, lp->max_frm_size, DMA_FROM_DEVICE);
1421 rx_submit_err_free_skb:
1422 	dev_kfree_skb(skb);
1423 }
1424 
1425 /**
1426  * axienet_init_dmaengine - init the dmaengine code.
1427  * @ndev:       Pointer to net_device structure
1428  *
1429  * Return: 0, on success.
1430  *          non-zero error value on failure
1431  *
1432  * This is the dmaengine initialization code.
1433  */
1434 static int axienet_init_dmaengine(struct net_device *ndev)
1435 {
1436 	struct axienet_local *lp = netdev_priv(ndev);
1437 	struct skbuf_dma_descriptor *skbuf_dma;
1438 	int i, ret;
1439 
1440 	lp->tx_chan = dma_request_chan(lp->dev, "tx_chan0");
1441 	if (IS_ERR(lp->tx_chan)) {
1442 		dev_err(lp->dev, "No Ethernet DMA (TX) channel found\n");
1443 		return PTR_ERR(lp->tx_chan);
1444 	}
1445 
1446 	lp->rx_chan = dma_request_chan(lp->dev, "rx_chan0");
1447 	if (IS_ERR(lp->rx_chan)) {
1448 		ret = PTR_ERR(lp->rx_chan);
1449 		dev_err(lp->dev, "No Ethernet DMA (RX) channel found\n");
1450 		goto err_dma_release_tx;
1451 	}
1452 
1453 	lp->tx_ring_tail = 0;
1454 	lp->tx_ring_head = 0;
1455 	lp->rx_ring_tail = 0;
1456 	lp->rx_ring_head = 0;
1457 	lp->tx_skb_ring = kcalloc(TX_BD_NUM_MAX, sizeof(*lp->tx_skb_ring),
1458 				  GFP_KERNEL);
1459 	if (!lp->tx_skb_ring) {
1460 		ret = -ENOMEM;
1461 		goto err_dma_release_rx;
1462 	}
1463 	for (i = 0; i < TX_BD_NUM_MAX; i++) {
1464 		skbuf_dma = kzalloc(sizeof(*skbuf_dma), GFP_KERNEL);
1465 		if (!skbuf_dma) {
1466 			ret = -ENOMEM;
1467 			goto err_free_tx_skb_ring;
1468 		}
1469 		lp->tx_skb_ring[i] = skbuf_dma;
1470 	}
1471 
1472 	lp->rx_skb_ring = kcalloc(RX_BUF_NUM_DEFAULT, sizeof(*lp->rx_skb_ring),
1473 				  GFP_KERNEL);
1474 	if (!lp->rx_skb_ring) {
1475 		ret = -ENOMEM;
1476 		goto err_free_tx_skb_ring;
1477 	}
1478 	for (i = 0; i < RX_BUF_NUM_DEFAULT; i++) {
1479 		skbuf_dma = kzalloc(sizeof(*skbuf_dma), GFP_KERNEL);
1480 		if (!skbuf_dma) {
1481 			ret = -ENOMEM;
1482 			goto err_free_rx_skb_ring;
1483 		}
1484 		lp->rx_skb_ring[i] = skbuf_dma;
1485 	}
1486 	/* TODO: Instead of BD_NUM_DEFAULT use runtime support */
1487 	for (i = 0; i < RX_BUF_NUM_DEFAULT; i++)
1488 		axienet_rx_submit_desc(ndev);
1489 	dma_async_issue_pending(lp->rx_chan);
1490 
1491 	return 0;
1492 
1493 err_free_rx_skb_ring:
1494 	for (i = 0; i < RX_BUF_NUM_DEFAULT; i++)
1495 		kfree(lp->rx_skb_ring[i]);
1496 	kfree(lp->rx_skb_ring);
1497 err_free_tx_skb_ring:
1498 	for (i = 0; i < TX_BD_NUM_MAX; i++)
1499 		kfree(lp->tx_skb_ring[i]);
1500 	kfree(lp->tx_skb_ring);
1501 err_dma_release_rx:
1502 	dma_release_channel(lp->rx_chan);
1503 err_dma_release_tx:
1504 	dma_release_channel(lp->tx_chan);
1505 	return ret;
1506 }
1507 
1508 /**
1509  * axienet_init_legacy_dma - init the dma legacy code.
1510  * @ndev:       Pointer to net_device structure
1511  *
1512  * Return: 0, on success.
1513  *          non-zero error value on failure
1514  *
1515  * This is the dma  initialization code. It also allocates interrupt
1516  * service routines, enables the interrupt lines and ISR handling.
1517  *
1518  */
1519 static int axienet_init_legacy_dma(struct net_device *ndev)
1520 {
1521 	int ret;
1522 	struct axienet_local *lp = netdev_priv(ndev);
1523 
1524 	/* Enable worker thread for Axi DMA error handling */
1525 	lp->stopping = false;
1526 	INIT_WORK(&lp->dma_err_task, axienet_dma_err_handler);
1527 
1528 	napi_enable(&lp->napi_rx);
1529 	napi_enable(&lp->napi_tx);
1530 
1531 	/* Enable interrupts for Axi DMA Tx */
1532 	ret = request_irq(lp->tx_irq, axienet_tx_irq, IRQF_SHARED,
1533 			  ndev->name, ndev);
1534 	if (ret)
1535 		goto err_tx_irq;
1536 	/* Enable interrupts for Axi DMA Rx */
1537 	ret = request_irq(lp->rx_irq, axienet_rx_irq, IRQF_SHARED,
1538 			  ndev->name, ndev);
1539 	if (ret)
1540 		goto err_rx_irq;
1541 	/* Enable interrupts for Axi Ethernet core (if defined) */
1542 	if (lp->eth_irq > 0) {
1543 		ret = request_irq(lp->eth_irq, axienet_eth_irq, IRQF_SHARED,
1544 				  ndev->name, ndev);
1545 		if (ret)
1546 			goto err_eth_irq;
1547 	}
1548 
1549 	return 0;
1550 
1551 err_eth_irq:
1552 	free_irq(lp->rx_irq, ndev);
1553 err_rx_irq:
1554 	free_irq(lp->tx_irq, ndev);
1555 err_tx_irq:
1556 	napi_disable(&lp->napi_tx);
1557 	napi_disable(&lp->napi_rx);
1558 	cancel_work_sync(&lp->dma_err_task);
1559 	dev_err(lp->dev, "request_irq() failed\n");
1560 	return ret;
1561 }
1562 
1563 /**
1564  * axienet_open - Driver open routine.
1565  * @ndev:	Pointer to net_device structure
1566  *
1567  * Return: 0, on success.
1568  *	    non-zero error value on failure
1569  *
1570  * This is the driver open routine. It calls phylink_start to start the
1571  * PHY device.
1572  * It also allocates interrupt service routines, enables the interrupt lines
1573  * and ISR handling. Axi Ethernet core is reset through Axi DMA core. Buffer
1574  * descriptors are initialized.
1575  */
1576 static int axienet_open(struct net_device *ndev)
1577 {
1578 	int ret;
1579 	struct axienet_local *lp = netdev_priv(ndev);
1580 
1581 	/* When we do an Axi Ethernet reset, it resets the complete core
1582 	 * including the MDIO. MDIO must be disabled before resetting.
1583 	 * Hold MDIO bus lock to avoid MDIO accesses during the reset.
1584 	 */
1585 	axienet_lock_mii(lp);
1586 	ret = axienet_device_reset(ndev);
1587 	axienet_unlock_mii(lp);
1588 
1589 	ret = phylink_of_phy_connect(lp->phylink, lp->dev->of_node, 0);
1590 	if (ret) {
1591 		dev_err(lp->dev, "phylink_of_phy_connect() failed: %d\n", ret);
1592 		return ret;
1593 	}
1594 
1595 	phylink_start(lp->phylink);
1596 
1597 	/* Start the statistics refresh work */
1598 	schedule_delayed_work(&lp->stats_work, 0);
1599 
1600 	if (lp->use_dmaengine) {
1601 		/* Enable interrupts for Axi Ethernet core (if defined) */
1602 		if (lp->eth_irq > 0) {
1603 			ret = request_irq(lp->eth_irq, axienet_eth_irq, IRQF_SHARED,
1604 					  ndev->name, ndev);
1605 			if (ret)
1606 				goto err_phy;
1607 		}
1608 
1609 		ret = axienet_init_dmaengine(ndev);
1610 		if (ret < 0)
1611 			goto err_free_eth_irq;
1612 	} else {
1613 		ret = axienet_init_legacy_dma(ndev);
1614 		if (ret)
1615 			goto err_phy;
1616 	}
1617 
1618 	return 0;
1619 
1620 err_free_eth_irq:
1621 	if (lp->eth_irq > 0)
1622 		free_irq(lp->eth_irq, ndev);
1623 err_phy:
1624 	cancel_delayed_work_sync(&lp->stats_work);
1625 	phylink_stop(lp->phylink);
1626 	phylink_disconnect_phy(lp->phylink);
1627 	return ret;
1628 }
1629 
1630 /**
1631  * axienet_stop - Driver stop routine.
1632  * @ndev:	Pointer to net_device structure
1633  *
1634  * Return: 0, on success.
1635  *
1636  * This is the driver stop routine. It calls phylink_disconnect to stop the PHY
1637  * device. It also removes the interrupt handlers and disables the interrupts.
1638  * The Axi DMA Tx/Rx BDs are released.
1639  */
1640 static int axienet_stop(struct net_device *ndev)
1641 {
1642 	struct axienet_local *lp = netdev_priv(ndev);
1643 	int i;
1644 
1645 	if (!lp->use_dmaengine) {
1646 		WRITE_ONCE(lp->stopping, true);
1647 		flush_work(&lp->dma_err_task);
1648 
1649 		napi_disable(&lp->napi_tx);
1650 		napi_disable(&lp->napi_rx);
1651 	}
1652 
1653 	cancel_delayed_work_sync(&lp->stats_work);
1654 
1655 	phylink_stop(lp->phylink);
1656 	phylink_disconnect_phy(lp->phylink);
1657 
1658 	axienet_setoptions(ndev, lp->options &
1659 			   ~(XAE_OPTION_TXEN | XAE_OPTION_RXEN));
1660 
1661 	if (!lp->use_dmaengine) {
1662 		axienet_dma_stop(lp);
1663 		cancel_work_sync(&lp->dma_err_task);
1664 		free_irq(lp->tx_irq, ndev);
1665 		free_irq(lp->rx_irq, ndev);
1666 		axienet_dma_bd_release(ndev);
1667 	} else {
1668 		dmaengine_terminate_sync(lp->tx_chan);
1669 		dmaengine_synchronize(lp->tx_chan);
1670 		dmaengine_terminate_sync(lp->rx_chan);
1671 		dmaengine_synchronize(lp->rx_chan);
1672 
1673 		for (i = 0; i < TX_BD_NUM_MAX; i++)
1674 			kfree(lp->tx_skb_ring[i]);
1675 		kfree(lp->tx_skb_ring);
1676 		for (i = 0; i < RX_BUF_NUM_DEFAULT; i++)
1677 			kfree(lp->rx_skb_ring[i]);
1678 		kfree(lp->rx_skb_ring);
1679 
1680 		dma_release_channel(lp->rx_chan);
1681 		dma_release_channel(lp->tx_chan);
1682 	}
1683 
1684 	axienet_iow(lp, XAE_IE_OFFSET, 0);
1685 
1686 	if (lp->eth_irq > 0)
1687 		free_irq(lp->eth_irq, ndev);
1688 	return 0;
1689 }
1690 
1691 /**
1692  * axienet_change_mtu - Driver change mtu routine.
1693  * @ndev:	Pointer to net_device structure
1694  * @new_mtu:	New mtu value to be applied
1695  *
1696  * Return: Always returns 0 (success).
1697  *
1698  * This is the change mtu driver routine. It checks if the Axi Ethernet
1699  * hardware supports jumbo frames before changing the mtu. This can be
1700  * called only when the device is not up.
1701  */
1702 static int axienet_change_mtu(struct net_device *ndev, int new_mtu)
1703 {
1704 	struct axienet_local *lp = netdev_priv(ndev);
1705 
1706 	if (netif_running(ndev))
1707 		return -EBUSY;
1708 
1709 	if ((new_mtu + VLAN_ETH_HLEN +
1710 		XAE_TRL_SIZE) > lp->rxmem)
1711 		return -EINVAL;
1712 
1713 	WRITE_ONCE(ndev->mtu, new_mtu);
1714 
1715 	return 0;
1716 }
1717 
1718 #ifdef CONFIG_NET_POLL_CONTROLLER
1719 /**
1720  * axienet_poll_controller - Axi Ethernet poll mechanism.
1721  * @ndev:	Pointer to net_device structure
1722  *
1723  * This implements Rx/Tx ISR poll mechanisms. The interrupts are disabled prior
1724  * to polling the ISRs and are enabled back after the polling is done.
1725  */
1726 static void axienet_poll_controller(struct net_device *ndev)
1727 {
1728 	struct axienet_local *lp = netdev_priv(ndev);
1729 
1730 	disable_irq(lp->tx_irq);
1731 	disable_irq(lp->rx_irq);
1732 	axienet_rx_irq(lp->tx_irq, ndev);
1733 	axienet_tx_irq(lp->rx_irq, ndev);
1734 	enable_irq(lp->tx_irq);
1735 	enable_irq(lp->rx_irq);
1736 }
1737 #endif
1738 
1739 static int axienet_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1740 {
1741 	struct axienet_local *lp = netdev_priv(dev);
1742 
1743 	if (!netif_running(dev))
1744 		return -EINVAL;
1745 
1746 	return phylink_mii_ioctl(lp->phylink, rq, cmd);
1747 }
1748 
1749 static void
1750 axienet_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
1751 {
1752 	struct axienet_local *lp = netdev_priv(dev);
1753 	unsigned int start;
1754 
1755 	netdev_stats_to_stats64(stats, &dev->stats);
1756 
1757 	do {
1758 		start = u64_stats_fetch_begin(&lp->rx_stat_sync);
1759 		stats->rx_packets = u64_stats_read(&lp->rx_packets);
1760 		stats->rx_bytes = u64_stats_read(&lp->rx_bytes);
1761 	} while (u64_stats_fetch_retry(&lp->rx_stat_sync, start));
1762 
1763 	do {
1764 		start = u64_stats_fetch_begin(&lp->tx_stat_sync);
1765 		stats->tx_packets = u64_stats_read(&lp->tx_packets);
1766 		stats->tx_bytes = u64_stats_read(&lp->tx_bytes);
1767 	} while (u64_stats_fetch_retry(&lp->tx_stat_sync, start));
1768 
1769 	if (!(lp->features & XAE_FEATURE_STATS))
1770 		return;
1771 
1772 	do {
1773 		start = read_seqcount_begin(&lp->hw_stats_seqcount);
1774 		stats->rx_length_errors =
1775 			axienet_stat(lp, STAT_RX_LENGTH_ERRORS);
1776 		stats->rx_crc_errors = axienet_stat(lp, STAT_RX_FCS_ERRORS);
1777 		stats->rx_frame_errors =
1778 			axienet_stat(lp, STAT_RX_ALIGNMENT_ERRORS);
1779 		stats->rx_errors = axienet_stat(lp, STAT_UNDERSIZE_FRAMES) +
1780 				   axienet_stat(lp, STAT_FRAGMENT_FRAMES) +
1781 				   stats->rx_length_errors +
1782 				   stats->rx_crc_errors +
1783 				   stats->rx_frame_errors;
1784 		stats->multicast = axienet_stat(lp, STAT_RX_MULTICAST_FRAMES);
1785 
1786 		stats->tx_aborted_errors =
1787 			axienet_stat(lp, STAT_TX_EXCESS_COLLISIONS);
1788 		stats->tx_fifo_errors =
1789 			axienet_stat(lp, STAT_TX_UNDERRUN_ERRORS);
1790 		stats->tx_window_errors =
1791 			axienet_stat(lp, STAT_TX_LATE_COLLISIONS);
1792 		stats->tx_errors = axienet_stat(lp, STAT_TX_EXCESS_DEFERRAL) +
1793 				   stats->tx_aborted_errors +
1794 				   stats->tx_fifo_errors +
1795 				   stats->tx_window_errors;
1796 	} while (read_seqcount_retry(&lp->hw_stats_seqcount, start));
1797 }
1798 
1799 static const struct net_device_ops axienet_netdev_ops = {
1800 	.ndo_open = axienet_open,
1801 	.ndo_stop = axienet_stop,
1802 	.ndo_start_xmit = axienet_start_xmit,
1803 	.ndo_get_stats64 = axienet_get_stats64,
1804 	.ndo_change_mtu	= axienet_change_mtu,
1805 	.ndo_set_mac_address = netdev_set_mac_address,
1806 	.ndo_validate_addr = eth_validate_addr,
1807 	.ndo_eth_ioctl = axienet_ioctl,
1808 	.ndo_set_rx_mode = axienet_set_multicast_list,
1809 #ifdef CONFIG_NET_POLL_CONTROLLER
1810 	.ndo_poll_controller = axienet_poll_controller,
1811 #endif
1812 };
1813 
1814 static const struct net_device_ops axienet_netdev_dmaengine_ops = {
1815 	.ndo_open = axienet_open,
1816 	.ndo_stop = axienet_stop,
1817 	.ndo_start_xmit = axienet_start_xmit_dmaengine,
1818 	.ndo_get_stats64 = axienet_get_stats64,
1819 	.ndo_change_mtu	= axienet_change_mtu,
1820 	.ndo_set_mac_address = netdev_set_mac_address,
1821 	.ndo_validate_addr = eth_validate_addr,
1822 	.ndo_eth_ioctl = axienet_ioctl,
1823 	.ndo_set_rx_mode = axienet_set_multicast_list,
1824 };
1825 
1826 /**
1827  * axienet_ethtools_get_drvinfo - Get various Axi Ethernet driver information.
1828  * @ndev:	Pointer to net_device structure
1829  * @ed:		Pointer to ethtool_drvinfo structure
1830  *
1831  * This implements ethtool command for getting the driver information.
1832  * Issue "ethtool -i ethX" under linux prompt to execute this function.
1833  */
1834 static void axienet_ethtools_get_drvinfo(struct net_device *ndev,
1835 					 struct ethtool_drvinfo *ed)
1836 {
1837 	strscpy(ed->driver, DRIVER_NAME, sizeof(ed->driver));
1838 	strscpy(ed->version, DRIVER_VERSION, sizeof(ed->version));
1839 }
1840 
1841 /**
1842  * axienet_ethtools_get_regs_len - Get the total regs length present in the
1843  *				   AxiEthernet core.
1844  * @ndev:	Pointer to net_device structure
1845  *
1846  * This implements ethtool command for getting the total register length
1847  * information.
1848  *
1849  * Return: the total regs length
1850  */
1851 static int axienet_ethtools_get_regs_len(struct net_device *ndev)
1852 {
1853 	return sizeof(u32) * AXIENET_REGS_N;
1854 }
1855 
1856 /**
1857  * axienet_ethtools_get_regs - Dump the contents of all registers present
1858  *			       in AxiEthernet core.
1859  * @ndev:	Pointer to net_device structure
1860  * @regs:	Pointer to ethtool_regs structure
1861  * @ret:	Void pointer used to return the contents of the registers.
1862  *
1863  * This implements ethtool command for getting the Axi Ethernet register dump.
1864  * Issue "ethtool -d ethX" to execute this function.
1865  */
1866 static void axienet_ethtools_get_regs(struct net_device *ndev,
1867 				      struct ethtool_regs *regs, void *ret)
1868 {
1869 	u32 *data = (u32 *)ret;
1870 	size_t len = sizeof(u32) * AXIENET_REGS_N;
1871 	struct axienet_local *lp = netdev_priv(ndev);
1872 
1873 	regs->version = 0;
1874 	regs->len = len;
1875 
1876 	memset(data, 0, len);
1877 	data[0] = axienet_ior(lp, XAE_RAF_OFFSET);
1878 	data[1] = axienet_ior(lp, XAE_TPF_OFFSET);
1879 	data[2] = axienet_ior(lp, XAE_IFGP_OFFSET);
1880 	data[3] = axienet_ior(lp, XAE_IS_OFFSET);
1881 	data[4] = axienet_ior(lp, XAE_IP_OFFSET);
1882 	data[5] = axienet_ior(lp, XAE_IE_OFFSET);
1883 	data[6] = axienet_ior(lp, XAE_TTAG_OFFSET);
1884 	data[7] = axienet_ior(lp, XAE_RTAG_OFFSET);
1885 	data[8] = axienet_ior(lp, XAE_UAWL_OFFSET);
1886 	data[9] = axienet_ior(lp, XAE_UAWU_OFFSET);
1887 	data[10] = axienet_ior(lp, XAE_TPID0_OFFSET);
1888 	data[11] = axienet_ior(lp, XAE_TPID1_OFFSET);
1889 	data[12] = axienet_ior(lp, XAE_PPST_OFFSET);
1890 	data[13] = axienet_ior(lp, XAE_RCW0_OFFSET);
1891 	data[14] = axienet_ior(lp, XAE_RCW1_OFFSET);
1892 	data[15] = axienet_ior(lp, XAE_TC_OFFSET);
1893 	data[16] = axienet_ior(lp, XAE_FCC_OFFSET);
1894 	data[17] = axienet_ior(lp, XAE_EMMC_OFFSET);
1895 	data[18] = axienet_ior(lp, XAE_PHYC_OFFSET);
1896 	data[19] = axienet_ior(lp, XAE_MDIO_MC_OFFSET);
1897 	data[20] = axienet_ior(lp, XAE_MDIO_MCR_OFFSET);
1898 	data[21] = axienet_ior(lp, XAE_MDIO_MWD_OFFSET);
1899 	data[22] = axienet_ior(lp, XAE_MDIO_MRD_OFFSET);
1900 	data[27] = axienet_ior(lp, XAE_UAW0_OFFSET);
1901 	data[28] = axienet_ior(lp, XAE_UAW1_OFFSET);
1902 	data[29] = axienet_ior(lp, XAE_FMI_OFFSET);
1903 	data[30] = axienet_ior(lp, XAE_AF0_OFFSET);
1904 	data[31] = axienet_ior(lp, XAE_AF1_OFFSET);
1905 	if (!lp->use_dmaengine) {
1906 		data[32] = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET);
1907 		data[33] = axienet_dma_in32(lp, XAXIDMA_TX_SR_OFFSET);
1908 		data[34] = axienet_dma_in32(lp, XAXIDMA_TX_CDESC_OFFSET);
1909 		data[35] = axienet_dma_in32(lp, XAXIDMA_TX_TDESC_OFFSET);
1910 		data[36] = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET);
1911 		data[37] = axienet_dma_in32(lp, XAXIDMA_RX_SR_OFFSET);
1912 		data[38] = axienet_dma_in32(lp, XAXIDMA_RX_CDESC_OFFSET);
1913 		data[39] = axienet_dma_in32(lp, XAXIDMA_RX_TDESC_OFFSET);
1914 	}
1915 }
1916 
1917 static void
1918 axienet_ethtools_get_ringparam(struct net_device *ndev,
1919 			       struct ethtool_ringparam *ering,
1920 			       struct kernel_ethtool_ringparam *kernel_ering,
1921 			       struct netlink_ext_ack *extack)
1922 {
1923 	struct axienet_local *lp = netdev_priv(ndev);
1924 
1925 	ering->rx_max_pending = RX_BD_NUM_MAX;
1926 	ering->rx_mini_max_pending = 0;
1927 	ering->rx_jumbo_max_pending = 0;
1928 	ering->tx_max_pending = TX_BD_NUM_MAX;
1929 	ering->rx_pending = lp->rx_bd_num;
1930 	ering->rx_mini_pending = 0;
1931 	ering->rx_jumbo_pending = 0;
1932 	ering->tx_pending = lp->tx_bd_num;
1933 }
1934 
1935 static int
1936 axienet_ethtools_set_ringparam(struct net_device *ndev,
1937 			       struct ethtool_ringparam *ering,
1938 			       struct kernel_ethtool_ringparam *kernel_ering,
1939 			       struct netlink_ext_ack *extack)
1940 {
1941 	struct axienet_local *lp = netdev_priv(ndev);
1942 
1943 	if (ering->rx_pending > RX_BD_NUM_MAX ||
1944 	    ering->rx_mini_pending ||
1945 	    ering->rx_jumbo_pending ||
1946 	    ering->tx_pending < TX_BD_NUM_MIN ||
1947 	    ering->tx_pending > TX_BD_NUM_MAX)
1948 		return -EINVAL;
1949 
1950 	if (netif_running(ndev))
1951 		return -EBUSY;
1952 
1953 	lp->rx_bd_num = ering->rx_pending;
1954 	lp->tx_bd_num = ering->tx_pending;
1955 	return 0;
1956 }
1957 
1958 /**
1959  * axienet_ethtools_get_pauseparam - Get the pause parameter setting for
1960  *				     Tx and Rx paths.
1961  * @ndev:	Pointer to net_device structure
1962  * @epauseparm:	Pointer to ethtool_pauseparam structure.
1963  *
1964  * This implements ethtool command for getting axi ethernet pause frame
1965  * setting. Issue "ethtool -a ethX" to execute this function.
1966  */
1967 static void
1968 axienet_ethtools_get_pauseparam(struct net_device *ndev,
1969 				struct ethtool_pauseparam *epauseparm)
1970 {
1971 	struct axienet_local *lp = netdev_priv(ndev);
1972 
1973 	phylink_ethtool_get_pauseparam(lp->phylink, epauseparm);
1974 }
1975 
1976 /**
1977  * axienet_ethtools_set_pauseparam - Set device pause parameter(flow control)
1978  *				     settings.
1979  * @ndev:	Pointer to net_device structure
1980  * @epauseparm:Pointer to ethtool_pauseparam structure
1981  *
1982  * This implements ethtool command for enabling flow control on Rx and Tx
1983  * paths. Issue "ethtool -A ethX tx on|off" under linux prompt to execute this
1984  * function.
1985  *
1986  * Return: 0 on success, -EFAULT if device is running
1987  */
1988 static int
1989 axienet_ethtools_set_pauseparam(struct net_device *ndev,
1990 				struct ethtool_pauseparam *epauseparm)
1991 {
1992 	struct axienet_local *lp = netdev_priv(ndev);
1993 
1994 	return phylink_ethtool_set_pauseparam(lp->phylink, epauseparm);
1995 }
1996 
1997 /**
1998  * axienet_ethtools_get_coalesce - Get DMA interrupt coalescing count.
1999  * @ndev:	Pointer to net_device structure
2000  * @ecoalesce:	Pointer to ethtool_coalesce structure
2001  * @kernel_coal: ethtool CQE mode setting structure
2002  * @extack:	extack for reporting error messages
2003  *
2004  * This implements ethtool command for getting the DMA interrupt coalescing
2005  * count on Tx and Rx paths. Issue "ethtool -c ethX" under linux prompt to
2006  * execute this function.
2007  *
2008  * Return: 0 always
2009  */
2010 static int
2011 axienet_ethtools_get_coalesce(struct net_device *ndev,
2012 			      struct ethtool_coalesce *ecoalesce,
2013 			      struct kernel_ethtool_coalesce *kernel_coal,
2014 			      struct netlink_ext_ack *extack)
2015 {
2016 	struct axienet_local *lp = netdev_priv(ndev);
2017 
2018 	ecoalesce->rx_max_coalesced_frames = lp->coalesce_count_rx;
2019 	ecoalesce->rx_coalesce_usecs = lp->coalesce_usec_rx;
2020 	ecoalesce->tx_max_coalesced_frames = lp->coalesce_count_tx;
2021 	ecoalesce->tx_coalesce_usecs = lp->coalesce_usec_tx;
2022 	return 0;
2023 }
2024 
2025 /**
2026  * axienet_ethtools_set_coalesce - Set DMA interrupt coalescing count.
2027  * @ndev:	Pointer to net_device structure
2028  * @ecoalesce:	Pointer to ethtool_coalesce structure
2029  * @kernel_coal: ethtool CQE mode setting structure
2030  * @extack:	extack for reporting error messages
2031  *
2032  * This implements ethtool command for setting the DMA interrupt coalescing
2033  * count on Tx and Rx paths. Issue "ethtool -C ethX rx-frames 5" under linux
2034  * prompt to execute this function.
2035  *
2036  * Return: 0, on success, Non-zero error value on failure.
2037  */
2038 static int
2039 axienet_ethtools_set_coalesce(struct net_device *ndev,
2040 			      struct ethtool_coalesce *ecoalesce,
2041 			      struct kernel_ethtool_coalesce *kernel_coal,
2042 			      struct netlink_ext_ack *extack)
2043 {
2044 	struct axienet_local *lp = netdev_priv(ndev);
2045 
2046 	if (netif_running(ndev)) {
2047 		NL_SET_ERR_MSG(extack,
2048 			       "Please stop netif before applying configuration");
2049 		return -EBUSY;
2050 	}
2051 
2052 	if (ecoalesce->rx_max_coalesced_frames)
2053 		lp->coalesce_count_rx = ecoalesce->rx_max_coalesced_frames;
2054 	if (ecoalesce->rx_coalesce_usecs)
2055 		lp->coalesce_usec_rx = ecoalesce->rx_coalesce_usecs;
2056 	if (ecoalesce->tx_max_coalesced_frames)
2057 		lp->coalesce_count_tx = ecoalesce->tx_max_coalesced_frames;
2058 	if (ecoalesce->tx_coalesce_usecs)
2059 		lp->coalesce_usec_tx = ecoalesce->tx_coalesce_usecs;
2060 
2061 	return 0;
2062 }
2063 
2064 static int
2065 axienet_ethtools_get_link_ksettings(struct net_device *ndev,
2066 				    struct ethtool_link_ksettings *cmd)
2067 {
2068 	struct axienet_local *lp = netdev_priv(ndev);
2069 
2070 	return phylink_ethtool_ksettings_get(lp->phylink, cmd);
2071 }
2072 
2073 static int
2074 axienet_ethtools_set_link_ksettings(struct net_device *ndev,
2075 				    const struct ethtool_link_ksettings *cmd)
2076 {
2077 	struct axienet_local *lp = netdev_priv(ndev);
2078 
2079 	return phylink_ethtool_ksettings_set(lp->phylink, cmd);
2080 }
2081 
2082 static int axienet_ethtools_nway_reset(struct net_device *dev)
2083 {
2084 	struct axienet_local *lp = netdev_priv(dev);
2085 
2086 	return phylink_ethtool_nway_reset(lp->phylink);
2087 }
2088 
2089 static void axienet_ethtools_get_ethtool_stats(struct net_device *dev,
2090 					       struct ethtool_stats *stats,
2091 					       u64 *data)
2092 {
2093 	struct axienet_local *lp = netdev_priv(dev);
2094 	unsigned int start;
2095 
2096 	do {
2097 		start = read_seqcount_begin(&lp->hw_stats_seqcount);
2098 		data[0] = axienet_stat(lp, STAT_RX_BYTES);
2099 		data[1] = axienet_stat(lp, STAT_TX_BYTES);
2100 		data[2] = axienet_stat(lp, STAT_RX_VLAN_FRAMES);
2101 		data[3] = axienet_stat(lp, STAT_TX_VLAN_FRAMES);
2102 		data[6] = axienet_stat(lp, STAT_TX_PFC_FRAMES);
2103 		data[7] = axienet_stat(lp, STAT_RX_PFC_FRAMES);
2104 		data[8] = axienet_stat(lp, STAT_USER_DEFINED0);
2105 		data[9] = axienet_stat(lp, STAT_USER_DEFINED1);
2106 		data[10] = axienet_stat(lp, STAT_USER_DEFINED2);
2107 	} while (read_seqcount_retry(&lp->hw_stats_seqcount, start));
2108 }
2109 
2110 static const char axienet_ethtool_stats_strings[][ETH_GSTRING_LEN] = {
2111 	"Received bytes",
2112 	"Transmitted bytes",
2113 	"RX Good VLAN Tagged Frames",
2114 	"TX Good VLAN Tagged Frames",
2115 	"TX Good PFC Frames",
2116 	"RX Good PFC Frames",
2117 	"User Defined Counter 0",
2118 	"User Defined Counter 1",
2119 	"User Defined Counter 2",
2120 };
2121 
2122 static void axienet_ethtools_get_strings(struct net_device *dev, u32 stringset, u8 *data)
2123 {
2124 	switch (stringset) {
2125 	case ETH_SS_STATS:
2126 		memcpy(data, axienet_ethtool_stats_strings,
2127 		       sizeof(axienet_ethtool_stats_strings));
2128 		break;
2129 	}
2130 }
2131 
2132 static int axienet_ethtools_get_sset_count(struct net_device *dev, int sset)
2133 {
2134 	struct axienet_local *lp = netdev_priv(dev);
2135 
2136 	switch (sset) {
2137 	case ETH_SS_STATS:
2138 		if (lp->features & XAE_FEATURE_STATS)
2139 			return ARRAY_SIZE(axienet_ethtool_stats_strings);
2140 		fallthrough;
2141 	default:
2142 		return -EOPNOTSUPP;
2143 	}
2144 }
2145 
2146 static void
2147 axienet_ethtools_get_pause_stats(struct net_device *dev,
2148 				 struct ethtool_pause_stats *pause_stats)
2149 {
2150 	struct axienet_local *lp = netdev_priv(dev);
2151 	unsigned int start;
2152 
2153 	if (!(lp->features & XAE_FEATURE_STATS))
2154 		return;
2155 
2156 	do {
2157 		start = read_seqcount_begin(&lp->hw_stats_seqcount);
2158 		pause_stats->tx_pause_frames =
2159 			axienet_stat(lp, STAT_TX_PAUSE_FRAMES);
2160 		pause_stats->rx_pause_frames =
2161 			axienet_stat(lp, STAT_RX_PAUSE_FRAMES);
2162 	} while (read_seqcount_retry(&lp->hw_stats_seqcount, start));
2163 }
2164 
2165 static void
2166 axienet_ethtool_get_eth_mac_stats(struct net_device *dev,
2167 				  struct ethtool_eth_mac_stats *mac_stats)
2168 {
2169 	struct axienet_local *lp = netdev_priv(dev);
2170 	unsigned int start;
2171 
2172 	if (!(lp->features & XAE_FEATURE_STATS))
2173 		return;
2174 
2175 	do {
2176 		start = read_seqcount_begin(&lp->hw_stats_seqcount);
2177 		mac_stats->FramesTransmittedOK =
2178 			axienet_stat(lp, STAT_TX_GOOD_FRAMES);
2179 		mac_stats->SingleCollisionFrames =
2180 			axienet_stat(lp, STAT_TX_SINGLE_COLLISION_FRAMES);
2181 		mac_stats->MultipleCollisionFrames =
2182 			axienet_stat(lp, STAT_TX_MULTIPLE_COLLISION_FRAMES);
2183 		mac_stats->FramesReceivedOK =
2184 			axienet_stat(lp, STAT_RX_GOOD_FRAMES);
2185 		mac_stats->FrameCheckSequenceErrors =
2186 			axienet_stat(lp, STAT_RX_FCS_ERRORS);
2187 		mac_stats->AlignmentErrors =
2188 			axienet_stat(lp, STAT_RX_ALIGNMENT_ERRORS);
2189 		mac_stats->FramesWithDeferredXmissions =
2190 			axienet_stat(lp, STAT_TX_DEFERRED_FRAMES);
2191 		mac_stats->LateCollisions =
2192 			axienet_stat(lp, STAT_TX_LATE_COLLISIONS);
2193 		mac_stats->FramesAbortedDueToXSColls =
2194 			axienet_stat(lp, STAT_TX_EXCESS_COLLISIONS);
2195 		mac_stats->MulticastFramesXmittedOK =
2196 			axienet_stat(lp, STAT_TX_MULTICAST_FRAMES);
2197 		mac_stats->BroadcastFramesXmittedOK =
2198 			axienet_stat(lp, STAT_TX_BROADCAST_FRAMES);
2199 		mac_stats->FramesWithExcessiveDeferral =
2200 			axienet_stat(lp, STAT_TX_EXCESS_DEFERRAL);
2201 		mac_stats->MulticastFramesReceivedOK =
2202 			axienet_stat(lp, STAT_RX_MULTICAST_FRAMES);
2203 		mac_stats->BroadcastFramesReceivedOK =
2204 			axienet_stat(lp, STAT_RX_BROADCAST_FRAMES);
2205 		mac_stats->InRangeLengthErrors =
2206 			axienet_stat(lp, STAT_RX_LENGTH_ERRORS);
2207 	} while (read_seqcount_retry(&lp->hw_stats_seqcount, start));
2208 }
2209 
2210 static void
2211 axienet_ethtool_get_eth_ctrl_stats(struct net_device *dev,
2212 				   struct ethtool_eth_ctrl_stats *ctrl_stats)
2213 {
2214 	struct axienet_local *lp = netdev_priv(dev);
2215 	unsigned int start;
2216 
2217 	if (!(lp->features & XAE_FEATURE_STATS))
2218 		return;
2219 
2220 	do {
2221 		start = read_seqcount_begin(&lp->hw_stats_seqcount);
2222 		ctrl_stats->MACControlFramesTransmitted =
2223 			axienet_stat(lp, STAT_TX_CONTROL_FRAMES);
2224 		ctrl_stats->MACControlFramesReceived =
2225 			axienet_stat(lp, STAT_RX_CONTROL_FRAMES);
2226 		ctrl_stats->UnsupportedOpcodesReceived =
2227 			axienet_stat(lp, STAT_RX_CONTROL_OPCODE_ERRORS);
2228 	} while (read_seqcount_retry(&lp->hw_stats_seqcount, start));
2229 }
2230 
2231 static const struct ethtool_rmon_hist_range axienet_rmon_ranges[] = {
2232 	{   64,    64 },
2233 	{   65,   127 },
2234 	{  128,   255 },
2235 	{  256,   511 },
2236 	{  512,  1023 },
2237 	{ 1024,  1518 },
2238 	{ 1519, 16384 },
2239 	{ },
2240 };
2241 
2242 static void
2243 axienet_ethtool_get_rmon_stats(struct net_device *dev,
2244 			       struct ethtool_rmon_stats *rmon_stats,
2245 			       const struct ethtool_rmon_hist_range **ranges)
2246 {
2247 	struct axienet_local *lp = netdev_priv(dev);
2248 	unsigned int start;
2249 
2250 	if (!(lp->features & XAE_FEATURE_STATS))
2251 		return;
2252 
2253 	do {
2254 		start = read_seqcount_begin(&lp->hw_stats_seqcount);
2255 		rmon_stats->undersize_pkts =
2256 			axienet_stat(lp, STAT_UNDERSIZE_FRAMES);
2257 		rmon_stats->oversize_pkts =
2258 			axienet_stat(lp, STAT_RX_OVERSIZE_FRAMES);
2259 		rmon_stats->fragments =
2260 			axienet_stat(lp, STAT_FRAGMENT_FRAMES);
2261 
2262 		rmon_stats->hist[0] =
2263 			axienet_stat(lp, STAT_RX_64_BYTE_FRAMES);
2264 		rmon_stats->hist[1] =
2265 			axienet_stat(lp, STAT_RX_65_127_BYTE_FRAMES);
2266 		rmon_stats->hist[2] =
2267 			axienet_stat(lp, STAT_RX_128_255_BYTE_FRAMES);
2268 		rmon_stats->hist[3] =
2269 			axienet_stat(lp, STAT_RX_256_511_BYTE_FRAMES);
2270 		rmon_stats->hist[4] =
2271 			axienet_stat(lp, STAT_RX_512_1023_BYTE_FRAMES);
2272 		rmon_stats->hist[5] =
2273 			axienet_stat(lp, STAT_RX_1024_MAX_BYTE_FRAMES);
2274 		rmon_stats->hist[6] =
2275 			rmon_stats->oversize_pkts;
2276 
2277 		rmon_stats->hist_tx[0] =
2278 			axienet_stat(lp, STAT_TX_64_BYTE_FRAMES);
2279 		rmon_stats->hist_tx[1] =
2280 			axienet_stat(lp, STAT_TX_65_127_BYTE_FRAMES);
2281 		rmon_stats->hist_tx[2] =
2282 			axienet_stat(lp, STAT_TX_128_255_BYTE_FRAMES);
2283 		rmon_stats->hist_tx[3] =
2284 			axienet_stat(lp, STAT_TX_256_511_BYTE_FRAMES);
2285 		rmon_stats->hist_tx[4] =
2286 			axienet_stat(lp, STAT_TX_512_1023_BYTE_FRAMES);
2287 		rmon_stats->hist_tx[5] =
2288 			axienet_stat(lp, STAT_TX_1024_MAX_BYTE_FRAMES);
2289 		rmon_stats->hist_tx[6] =
2290 			axienet_stat(lp, STAT_TX_OVERSIZE_FRAMES);
2291 	} while (read_seqcount_retry(&lp->hw_stats_seqcount, start));
2292 
2293 	*ranges = axienet_rmon_ranges;
2294 }
2295 
2296 static const struct ethtool_ops axienet_ethtool_ops = {
2297 	.supported_coalesce_params = ETHTOOL_COALESCE_MAX_FRAMES |
2298 				     ETHTOOL_COALESCE_USECS,
2299 	.get_drvinfo    = axienet_ethtools_get_drvinfo,
2300 	.get_regs_len   = axienet_ethtools_get_regs_len,
2301 	.get_regs       = axienet_ethtools_get_regs,
2302 	.get_link       = ethtool_op_get_link,
2303 	.get_ringparam	= axienet_ethtools_get_ringparam,
2304 	.set_ringparam	= axienet_ethtools_set_ringparam,
2305 	.get_pauseparam = axienet_ethtools_get_pauseparam,
2306 	.set_pauseparam = axienet_ethtools_set_pauseparam,
2307 	.get_coalesce   = axienet_ethtools_get_coalesce,
2308 	.set_coalesce   = axienet_ethtools_set_coalesce,
2309 	.get_link_ksettings = axienet_ethtools_get_link_ksettings,
2310 	.set_link_ksettings = axienet_ethtools_set_link_ksettings,
2311 	.nway_reset	= axienet_ethtools_nway_reset,
2312 	.get_ethtool_stats = axienet_ethtools_get_ethtool_stats,
2313 	.get_strings    = axienet_ethtools_get_strings,
2314 	.get_sset_count = axienet_ethtools_get_sset_count,
2315 	.get_pause_stats = axienet_ethtools_get_pause_stats,
2316 	.get_eth_mac_stats = axienet_ethtool_get_eth_mac_stats,
2317 	.get_eth_ctrl_stats = axienet_ethtool_get_eth_ctrl_stats,
2318 	.get_rmon_stats = axienet_ethtool_get_rmon_stats,
2319 };
2320 
2321 static struct axienet_local *pcs_to_axienet_local(struct phylink_pcs *pcs)
2322 {
2323 	return container_of(pcs, struct axienet_local, pcs);
2324 }
2325 
2326 static void axienet_pcs_get_state(struct phylink_pcs *pcs,
2327 				  struct phylink_link_state *state)
2328 {
2329 	struct mdio_device *pcs_phy = pcs_to_axienet_local(pcs)->pcs_phy;
2330 
2331 	phylink_mii_c22_pcs_get_state(pcs_phy, state);
2332 }
2333 
2334 static void axienet_pcs_an_restart(struct phylink_pcs *pcs)
2335 {
2336 	struct mdio_device *pcs_phy = pcs_to_axienet_local(pcs)->pcs_phy;
2337 
2338 	phylink_mii_c22_pcs_an_restart(pcs_phy);
2339 }
2340 
2341 static int axienet_pcs_config(struct phylink_pcs *pcs, unsigned int neg_mode,
2342 			      phy_interface_t interface,
2343 			      const unsigned long *advertising,
2344 			      bool permit_pause_to_mac)
2345 {
2346 	struct mdio_device *pcs_phy = pcs_to_axienet_local(pcs)->pcs_phy;
2347 	struct net_device *ndev = pcs_to_axienet_local(pcs)->ndev;
2348 	struct axienet_local *lp = netdev_priv(ndev);
2349 	int ret;
2350 
2351 	if (lp->switch_x_sgmii) {
2352 		ret = mdiodev_write(pcs_phy, XLNX_MII_STD_SELECT_REG,
2353 				    interface == PHY_INTERFACE_MODE_SGMII ?
2354 					XLNX_MII_STD_SELECT_SGMII : 0);
2355 		if (ret < 0) {
2356 			netdev_warn(ndev,
2357 				    "Failed to switch PHY interface: %d\n",
2358 				    ret);
2359 			return ret;
2360 		}
2361 	}
2362 
2363 	ret = phylink_mii_c22_pcs_config(pcs_phy, interface, advertising,
2364 					 neg_mode);
2365 	if (ret < 0)
2366 		netdev_warn(ndev, "Failed to configure PCS: %d\n", ret);
2367 
2368 	return ret;
2369 }
2370 
2371 static const struct phylink_pcs_ops axienet_pcs_ops = {
2372 	.pcs_get_state = axienet_pcs_get_state,
2373 	.pcs_config = axienet_pcs_config,
2374 	.pcs_an_restart = axienet_pcs_an_restart,
2375 };
2376 
2377 static struct phylink_pcs *axienet_mac_select_pcs(struct phylink_config *config,
2378 						  phy_interface_t interface)
2379 {
2380 	struct net_device *ndev = to_net_dev(config->dev);
2381 	struct axienet_local *lp = netdev_priv(ndev);
2382 
2383 	if (interface == PHY_INTERFACE_MODE_1000BASEX ||
2384 	    interface ==  PHY_INTERFACE_MODE_SGMII)
2385 		return &lp->pcs;
2386 
2387 	return NULL;
2388 }
2389 
2390 static void axienet_mac_config(struct phylink_config *config, unsigned int mode,
2391 			       const struct phylink_link_state *state)
2392 {
2393 	/* nothing meaningful to do */
2394 }
2395 
2396 static void axienet_mac_link_down(struct phylink_config *config,
2397 				  unsigned int mode,
2398 				  phy_interface_t interface)
2399 {
2400 	/* nothing meaningful to do */
2401 }
2402 
2403 static void axienet_mac_link_up(struct phylink_config *config,
2404 				struct phy_device *phy,
2405 				unsigned int mode, phy_interface_t interface,
2406 				int speed, int duplex,
2407 				bool tx_pause, bool rx_pause)
2408 {
2409 	struct net_device *ndev = to_net_dev(config->dev);
2410 	struct axienet_local *lp = netdev_priv(ndev);
2411 	u32 emmc_reg, fcc_reg;
2412 
2413 	emmc_reg = axienet_ior(lp, XAE_EMMC_OFFSET);
2414 	emmc_reg &= ~XAE_EMMC_LINKSPEED_MASK;
2415 
2416 	switch (speed) {
2417 	case SPEED_1000:
2418 		emmc_reg |= XAE_EMMC_LINKSPD_1000;
2419 		break;
2420 	case SPEED_100:
2421 		emmc_reg |= XAE_EMMC_LINKSPD_100;
2422 		break;
2423 	case SPEED_10:
2424 		emmc_reg |= XAE_EMMC_LINKSPD_10;
2425 		break;
2426 	default:
2427 		dev_err(&ndev->dev,
2428 			"Speed other than 10, 100 or 1Gbps is not supported\n");
2429 		break;
2430 	}
2431 
2432 	axienet_iow(lp, XAE_EMMC_OFFSET, emmc_reg);
2433 
2434 	fcc_reg = axienet_ior(lp, XAE_FCC_OFFSET);
2435 	if (tx_pause)
2436 		fcc_reg |= XAE_FCC_FCTX_MASK;
2437 	else
2438 		fcc_reg &= ~XAE_FCC_FCTX_MASK;
2439 	if (rx_pause)
2440 		fcc_reg |= XAE_FCC_FCRX_MASK;
2441 	else
2442 		fcc_reg &= ~XAE_FCC_FCRX_MASK;
2443 	axienet_iow(lp, XAE_FCC_OFFSET, fcc_reg);
2444 }
2445 
2446 static const struct phylink_mac_ops axienet_phylink_ops = {
2447 	.mac_select_pcs = axienet_mac_select_pcs,
2448 	.mac_config = axienet_mac_config,
2449 	.mac_link_down = axienet_mac_link_down,
2450 	.mac_link_up = axienet_mac_link_up,
2451 };
2452 
2453 /**
2454  * axienet_dma_err_handler - Work queue task for Axi DMA Error
2455  * @work:	pointer to work_struct
2456  *
2457  * Resets the Axi DMA and Axi Ethernet devices, and reconfigures the
2458  * Tx/Rx BDs.
2459  */
2460 static void axienet_dma_err_handler(struct work_struct *work)
2461 {
2462 	u32 i;
2463 	u32 axienet_status;
2464 	struct axidma_bd *cur_p;
2465 	struct axienet_local *lp = container_of(work, struct axienet_local,
2466 						dma_err_task);
2467 	struct net_device *ndev = lp->ndev;
2468 
2469 	/* Don't bother if we are going to stop anyway */
2470 	if (READ_ONCE(lp->stopping))
2471 		return;
2472 
2473 	napi_disable(&lp->napi_tx);
2474 	napi_disable(&lp->napi_rx);
2475 
2476 	axienet_setoptions(ndev, lp->options &
2477 			   ~(XAE_OPTION_TXEN | XAE_OPTION_RXEN));
2478 
2479 	axienet_dma_stop(lp);
2480 
2481 	for (i = 0; i < lp->tx_bd_num; i++) {
2482 		cur_p = &lp->tx_bd_v[i];
2483 		if (cur_p->cntrl) {
2484 			dma_addr_t addr = desc_get_phys_addr(lp, cur_p);
2485 
2486 			dma_unmap_single(lp->dev, addr,
2487 					 (cur_p->cntrl &
2488 					  XAXIDMA_BD_CTRL_LENGTH_MASK),
2489 					 DMA_TO_DEVICE);
2490 		}
2491 		if (cur_p->skb)
2492 			dev_kfree_skb_irq(cur_p->skb);
2493 		cur_p->phys = 0;
2494 		cur_p->phys_msb = 0;
2495 		cur_p->cntrl = 0;
2496 		cur_p->status = 0;
2497 		cur_p->app0 = 0;
2498 		cur_p->app1 = 0;
2499 		cur_p->app2 = 0;
2500 		cur_p->app3 = 0;
2501 		cur_p->app4 = 0;
2502 		cur_p->skb = NULL;
2503 	}
2504 
2505 	for (i = 0; i < lp->rx_bd_num; i++) {
2506 		cur_p = &lp->rx_bd_v[i];
2507 		cur_p->status = 0;
2508 		cur_p->app0 = 0;
2509 		cur_p->app1 = 0;
2510 		cur_p->app2 = 0;
2511 		cur_p->app3 = 0;
2512 		cur_p->app4 = 0;
2513 	}
2514 
2515 	lp->tx_bd_ci = 0;
2516 	lp->tx_bd_tail = 0;
2517 	lp->rx_bd_ci = 0;
2518 
2519 	axienet_dma_start(lp);
2520 
2521 	axienet_status = axienet_ior(lp, XAE_RCW1_OFFSET);
2522 	axienet_status &= ~XAE_RCW1_RX_MASK;
2523 	axienet_iow(lp, XAE_RCW1_OFFSET, axienet_status);
2524 
2525 	axienet_status = axienet_ior(lp, XAE_IP_OFFSET);
2526 	if (axienet_status & XAE_INT_RXRJECT_MASK)
2527 		axienet_iow(lp, XAE_IS_OFFSET, XAE_INT_RXRJECT_MASK);
2528 	axienet_iow(lp, XAE_IE_OFFSET, lp->eth_irq > 0 ?
2529 		    XAE_INT_RECV_ERROR_MASK : 0);
2530 	axienet_iow(lp, XAE_FCC_OFFSET, XAE_FCC_FCRX_MASK);
2531 
2532 	/* Sync default options with HW but leave receiver and
2533 	 * transmitter disabled.
2534 	 */
2535 	axienet_setoptions(ndev, lp->options &
2536 			   ~(XAE_OPTION_TXEN | XAE_OPTION_RXEN));
2537 	axienet_set_mac_address(ndev, NULL);
2538 	axienet_set_multicast_list(ndev);
2539 	napi_enable(&lp->napi_rx);
2540 	napi_enable(&lp->napi_tx);
2541 	axienet_setoptions(ndev, lp->options);
2542 }
2543 
2544 /**
2545  * axienet_probe - Axi Ethernet probe function.
2546  * @pdev:	Pointer to platform device structure.
2547  *
2548  * Return: 0, on success
2549  *	    Non-zero error value on failure.
2550  *
2551  * This is the probe routine for Axi Ethernet driver. This is called before
2552  * any other driver routines are invoked. It allocates and sets up the Ethernet
2553  * device. Parses through device tree and populates fields of
2554  * axienet_local. It registers the Ethernet device.
2555  */
2556 static int axienet_probe(struct platform_device *pdev)
2557 {
2558 	int ret;
2559 	struct device_node *np;
2560 	struct axienet_local *lp;
2561 	struct net_device *ndev;
2562 	struct resource *ethres;
2563 	u8 mac_addr[ETH_ALEN];
2564 	int addr_width = 32;
2565 	u32 value;
2566 
2567 	ndev = alloc_etherdev(sizeof(*lp));
2568 	if (!ndev)
2569 		return -ENOMEM;
2570 
2571 	platform_set_drvdata(pdev, ndev);
2572 
2573 	SET_NETDEV_DEV(ndev, &pdev->dev);
2574 	ndev->features = NETIF_F_SG;
2575 	ndev->ethtool_ops = &axienet_ethtool_ops;
2576 
2577 	/* MTU range: 64 - 9000 */
2578 	ndev->min_mtu = 64;
2579 	ndev->max_mtu = XAE_JUMBO_MTU;
2580 
2581 	lp = netdev_priv(ndev);
2582 	lp->ndev = ndev;
2583 	lp->dev = &pdev->dev;
2584 	lp->options = XAE_OPTION_DEFAULTS;
2585 	lp->rx_bd_num = RX_BD_NUM_DEFAULT;
2586 	lp->tx_bd_num = TX_BD_NUM_DEFAULT;
2587 
2588 	u64_stats_init(&lp->rx_stat_sync);
2589 	u64_stats_init(&lp->tx_stat_sync);
2590 
2591 	mutex_init(&lp->stats_lock);
2592 	seqcount_mutex_init(&lp->hw_stats_seqcount, &lp->stats_lock);
2593 	INIT_DEFERRABLE_WORK(&lp->stats_work, axienet_refresh_stats);
2594 
2595 	lp->axi_clk = devm_clk_get_optional(&pdev->dev, "s_axi_lite_clk");
2596 	if (!lp->axi_clk) {
2597 		/* For backward compatibility, if named AXI clock is not present,
2598 		 * treat the first clock specified as the AXI clock.
2599 		 */
2600 		lp->axi_clk = devm_clk_get_optional(&pdev->dev, NULL);
2601 	}
2602 	if (IS_ERR(lp->axi_clk)) {
2603 		ret = PTR_ERR(lp->axi_clk);
2604 		goto free_netdev;
2605 	}
2606 	ret = clk_prepare_enable(lp->axi_clk);
2607 	if (ret) {
2608 		dev_err(&pdev->dev, "Unable to enable AXI clock: %d\n", ret);
2609 		goto free_netdev;
2610 	}
2611 
2612 	lp->misc_clks[0].id = "axis_clk";
2613 	lp->misc_clks[1].id = "ref_clk";
2614 	lp->misc_clks[2].id = "mgt_clk";
2615 
2616 	ret = devm_clk_bulk_get_optional(&pdev->dev, XAE_NUM_MISC_CLOCKS, lp->misc_clks);
2617 	if (ret)
2618 		goto cleanup_clk;
2619 
2620 	ret = clk_bulk_prepare_enable(XAE_NUM_MISC_CLOCKS, lp->misc_clks);
2621 	if (ret)
2622 		goto cleanup_clk;
2623 
2624 	/* Map device registers */
2625 	lp->regs = devm_platform_get_and_ioremap_resource(pdev, 0, &ethres);
2626 	if (IS_ERR(lp->regs)) {
2627 		ret = PTR_ERR(lp->regs);
2628 		goto cleanup_clk;
2629 	}
2630 	lp->regs_start = ethres->start;
2631 
2632 	/* Setup checksum offload, but default to off if not specified */
2633 	lp->features = 0;
2634 
2635 	if (axienet_ior(lp, XAE_ABILITY_OFFSET) & XAE_ABILITY_STATS)
2636 		lp->features |= XAE_FEATURE_STATS;
2637 
2638 	ret = of_property_read_u32(pdev->dev.of_node, "xlnx,txcsum", &value);
2639 	if (!ret) {
2640 		switch (value) {
2641 		case 1:
2642 			lp->csum_offload_on_tx_path =
2643 				XAE_FEATURE_PARTIAL_TX_CSUM;
2644 			lp->features |= XAE_FEATURE_PARTIAL_TX_CSUM;
2645 			/* Can checksum TCP/UDP over IPv4. */
2646 			ndev->features |= NETIF_F_IP_CSUM;
2647 			break;
2648 		case 2:
2649 			lp->csum_offload_on_tx_path =
2650 				XAE_FEATURE_FULL_TX_CSUM;
2651 			lp->features |= XAE_FEATURE_FULL_TX_CSUM;
2652 			/* Can checksum TCP/UDP over IPv4. */
2653 			ndev->features |= NETIF_F_IP_CSUM;
2654 			break;
2655 		default:
2656 			lp->csum_offload_on_tx_path = XAE_NO_CSUM_OFFLOAD;
2657 		}
2658 	}
2659 	ret = of_property_read_u32(pdev->dev.of_node, "xlnx,rxcsum", &value);
2660 	if (!ret) {
2661 		switch (value) {
2662 		case 1:
2663 			lp->csum_offload_on_rx_path =
2664 				XAE_FEATURE_PARTIAL_RX_CSUM;
2665 			lp->features |= XAE_FEATURE_PARTIAL_RX_CSUM;
2666 			break;
2667 		case 2:
2668 			lp->csum_offload_on_rx_path =
2669 				XAE_FEATURE_FULL_RX_CSUM;
2670 			lp->features |= XAE_FEATURE_FULL_RX_CSUM;
2671 			break;
2672 		default:
2673 			lp->csum_offload_on_rx_path = XAE_NO_CSUM_OFFLOAD;
2674 		}
2675 	}
2676 	/* For supporting jumbo frames, the Axi Ethernet hardware must have
2677 	 * a larger Rx/Tx Memory. Typically, the size must be large so that
2678 	 * we can enable jumbo option and start supporting jumbo frames.
2679 	 * Here we check for memory allocated for Rx/Tx in the hardware from
2680 	 * the device-tree and accordingly set flags.
2681 	 */
2682 	of_property_read_u32(pdev->dev.of_node, "xlnx,rxmem", &lp->rxmem);
2683 
2684 	lp->switch_x_sgmii = of_property_read_bool(pdev->dev.of_node,
2685 						   "xlnx,switch-x-sgmii");
2686 
2687 	/* Start with the proprietary, and broken phy_type */
2688 	ret = of_property_read_u32(pdev->dev.of_node, "xlnx,phy-type", &value);
2689 	if (!ret) {
2690 		netdev_warn(ndev, "Please upgrade your device tree binary blob to use phy-mode");
2691 		switch (value) {
2692 		case XAE_PHY_TYPE_MII:
2693 			lp->phy_mode = PHY_INTERFACE_MODE_MII;
2694 			break;
2695 		case XAE_PHY_TYPE_GMII:
2696 			lp->phy_mode = PHY_INTERFACE_MODE_GMII;
2697 			break;
2698 		case XAE_PHY_TYPE_RGMII_2_0:
2699 			lp->phy_mode = PHY_INTERFACE_MODE_RGMII_ID;
2700 			break;
2701 		case XAE_PHY_TYPE_SGMII:
2702 			lp->phy_mode = PHY_INTERFACE_MODE_SGMII;
2703 			break;
2704 		case XAE_PHY_TYPE_1000BASE_X:
2705 			lp->phy_mode = PHY_INTERFACE_MODE_1000BASEX;
2706 			break;
2707 		default:
2708 			ret = -EINVAL;
2709 			goto cleanup_clk;
2710 		}
2711 	} else {
2712 		ret = of_get_phy_mode(pdev->dev.of_node, &lp->phy_mode);
2713 		if (ret)
2714 			goto cleanup_clk;
2715 	}
2716 	if (lp->switch_x_sgmii && lp->phy_mode != PHY_INTERFACE_MODE_SGMII &&
2717 	    lp->phy_mode != PHY_INTERFACE_MODE_1000BASEX) {
2718 		dev_err(&pdev->dev, "xlnx,switch-x-sgmii only supported with SGMII or 1000BaseX\n");
2719 		ret = -EINVAL;
2720 		goto cleanup_clk;
2721 	}
2722 
2723 	if (!of_property_present(pdev->dev.of_node, "dmas")) {
2724 		/* Find the DMA node, map the DMA registers, and decode the DMA IRQs */
2725 		np = of_parse_phandle(pdev->dev.of_node, "axistream-connected", 0);
2726 
2727 		if (np) {
2728 			struct resource dmares;
2729 
2730 			ret = of_address_to_resource(np, 0, &dmares);
2731 			if (ret) {
2732 				dev_err(&pdev->dev,
2733 					"unable to get DMA resource\n");
2734 				of_node_put(np);
2735 				goto cleanup_clk;
2736 			}
2737 			lp->dma_regs = devm_ioremap_resource(&pdev->dev,
2738 							     &dmares);
2739 			lp->rx_irq = irq_of_parse_and_map(np, 1);
2740 			lp->tx_irq = irq_of_parse_and_map(np, 0);
2741 			of_node_put(np);
2742 			lp->eth_irq = platform_get_irq_optional(pdev, 0);
2743 		} else {
2744 			/* Check for these resources directly on the Ethernet node. */
2745 			lp->dma_regs = devm_platform_get_and_ioremap_resource(pdev, 1, NULL);
2746 			lp->rx_irq = platform_get_irq(pdev, 1);
2747 			lp->tx_irq = platform_get_irq(pdev, 0);
2748 			lp->eth_irq = platform_get_irq_optional(pdev, 2);
2749 		}
2750 		if (IS_ERR(lp->dma_regs)) {
2751 			dev_err(&pdev->dev, "could not map DMA regs\n");
2752 			ret = PTR_ERR(lp->dma_regs);
2753 			goto cleanup_clk;
2754 		}
2755 		if (lp->rx_irq <= 0 || lp->tx_irq <= 0) {
2756 			dev_err(&pdev->dev, "could not determine irqs\n");
2757 			ret = -ENOMEM;
2758 			goto cleanup_clk;
2759 		}
2760 
2761 		/* Reset core now that clocks are enabled, prior to accessing MDIO */
2762 		ret = __axienet_device_reset(lp);
2763 		if (ret)
2764 			goto cleanup_clk;
2765 
2766 		/* Autodetect the need for 64-bit DMA pointers.
2767 		 * When the IP is configured for a bus width bigger than 32 bits,
2768 		 * writing the MSB registers is mandatory, even if they are all 0.
2769 		 * We can detect this case by writing all 1's to one such register
2770 		 * and see if that sticks: when the IP is configured for 32 bits
2771 		 * only, those registers are RES0.
2772 		 * Those MSB registers were introduced in IP v7.1, which we check first.
2773 		 */
2774 		if ((axienet_ior(lp, XAE_ID_OFFSET) >> 24) >= 0x9) {
2775 			void __iomem *desc = lp->dma_regs + XAXIDMA_TX_CDESC_OFFSET + 4;
2776 
2777 			iowrite32(0x0, desc);
2778 			if (ioread32(desc) == 0) {	/* sanity check */
2779 				iowrite32(0xffffffff, desc);
2780 				if (ioread32(desc) > 0) {
2781 					lp->features |= XAE_FEATURE_DMA_64BIT;
2782 					addr_width = 64;
2783 					dev_info(&pdev->dev,
2784 						 "autodetected 64-bit DMA range\n");
2785 				}
2786 				iowrite32(0x0, desc);
2787 			}
2788 		}
2789 		if (!IS_ENABLED(CONFIG_64BIT) && lp->features & XAE_FEATURE_DMA_64BIT) {
2790 			dev_err(&pdev->dev, "64-bit addressable DMA is not compatible with 32-bit archecture\n");
2791 			ret = -EINVAL;
2792 			goto cleanup_clk;
2793 		}
2794 
2795 		ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(addr_width));
2796 		if (ret) {
2797 			dev_err(&pdev->dev, "No suitable DMA available\n");
2798 			goto cleanup_clk;
2799 		}
2800 		netif_napi_add(ndev, &lp->napi_rx, axienet_rx_poll);
2801 		netif_napi_add(ndev, &lp->napi_tx, axienet_tx_poll);
2802 	} else {
2803 		struct xilinx_vdma_config cfg;
2804 		struct dma_chan *tx_chan;
2805 
2806 		lp->eth_irq = platform_get_irq_optional(pdev, 0);
2807 		if (lp->eth_irq < 0 && lp->eth_irq != -ENXIO) {
2808 			ret = lp->eth_irq;
2809 			goto cleanup_clk;
2810 		}
2811 		tx_chan = dma_request_chan(lp->dev, "tx_chan0");
2812 		if (IS_ERR(tx_chan)) {
2813 			ret = PTR_ERR(tx_chan);
2814 			dev_err_probe(lp->dev, ret, "No Ethernet DMA (TX) channel found\n");
2815 			goto cleanup_clk;
2816 		}
2817 
2818 		cfg.reset = 1;
2819 		/* As name says VDMA but it has support for DMA channel reset */
2820 		ret = xilinx_vdma_channel_set_config(tx_chan, &cfg);
2821 		if (ret < 0) {
2822 			dev_err(&pdev->dev, "Reset channel failed\n");
2823 			dma_release_channel(tx_chan);
2824 			goto cleanup_clk;
2825 		}
2826 
2827 		dma_release_channel(tx_chan);
2828 		lp->use_dmaengine = 1;
2829 	}
2830 
2831 	if (lp->use_dmaengine)
2832 		ndev->netdev_ops = &axienet_netdev_dmaengine_ops;
2833 	else
2834 		ndev->netdev_ops = &axienet_netdev_ops;
2835 	/* Check for Ethernet core IRQ (optional) */
2836 	if (lp->eth_irq <= 0)
2837 		dev_info(&pdev->dev, "Ethernet core IRQ not defined\n");
2838 
2839 	/* Retrieve the MAC address */
2840 	ret = of_get_mac_address(pdev->dev.of_node, mac_addr);
2841 	if (!ret) {
2842 		axienet_set_mac_address(ndev, mac_addr);
2843 	} else {
2844 		dev_warn(&pdev->dev, "could not find MAC address property: %d\n",
2845 			 ret);
2846 		axienet_set_mac_address(ndev, NULL);
2847 	}
2848 
2849 	lp->coalesce_count_rx = XAXIDMA_DFT_RX_THRESHOLD;
2850 	lp->coalesce_count_tx = XAXIDMA_DFT_TX_THRESHOLD;
2851 	lp->coalesce_usec_rx = XAXIDMA_DFT_RX_USEC;
2852 	lp->coalesce_usec_tx = XAXIDMA_DFT_TX_USEC;
2853 
2854 	ret = axienet_mdio_setup(lp);
2855 	if (ret)
2856 		dev_warn(&pdev->dev,
2857 			 "error registering MDIO bus: %d\n", ret);
2858 
2859 	if (lp->phy_mode == PHY_INTERFACE_MODE_SGMII ||
2860 	    lp->phy_mode == PHY_INTERFACE_MODE_1000BASEX) {
2861 		np = of_parse_phandle(pdev->dev.of_node, "pcs-handle", 0);
2862 		if (!np) {
2863 			/* Deprecated: Always use "pcs-handle" for pcs_phy.
2864 			 * Falling back to "phy-handle" here is only for
2865 			 * backward compatibility with old device trees.
2866 			 */
2867 			np = of_parse_phandle(pdev->dev.of_node, "phy-handle", 0);
2868 		}
2869 		if (!np) {
2870 			dev_err(&pdev->dev, "pcs-handle (preferred) or phy-handle required for 1000BaseX/SGMII\n");
2871 			ret = -EINVAL;
2872 			goto cleanup_mdio;
2873 		}
2874 		lp->pcs_phy = of_mdio_find_device(np);
2875 		if (!lp->pcs_phy) {
2876 			ret = -EPROBE_DEFER;
2877 			of_node_put(np);
2878 			goto cleanup_mdio;
2879 		}
2880 		of_node_put(np);
2881 		lp->pcs.ops = &axienet_pcs_ops;
2882 		lp->pcs.neg_mode = true;
2883 		lp->pcs.poll = true;
2884 	}
2885 
2886 	lp->phylink_config.dev = &ndev->dev;
2887 	lp->phylink_config.type = PHYLINK_NETDEV;
2888 	lp->phylink_config.mac_capabilities = MAC_SYM_PAUSE | MAC_ASYM_PAUSE |
2889 		MAC_10FD | MAC_100FD | MAC_1000FD;
2890 
2891 	__set_bit(lp->phy_mode, lp->phylink_config.supported_interfaces);
2892 	if (lp->switch_x_sgmii) {
2893 		__set_bit(PHY_INTERFACE_MODE_1000BASEX,
2894 			  lp->phylink_config.supported_interfaces);
2895 		__set_bit(PHY_INTERFACE_MODE_SGMII,
2896 			  lp->phylink_config.supported_interfaces);
2897 	}
2898 
2899 	lp->phylink = phylink_create(&lp->phylink_config, pdev->dev.fwnode,
2900 				     lp->phy_mode,
2901 				     &axienet_phylink_ops);
2902 	if (IS_ERR(lp->phylink)) {
2903 		ret = PTR_ERR(lp->phylink);
2904 		dev_err(&pdev->dev, "phylink_create error (%i)\n", ret);
2905 		goto cleanup_mdio;
2906 	}
2907 
2908 	ret = register_netdev(lp->ndev);
2909 	if (ret) {
2910 		dev_err(lp->dev, "register_netdev() error (%i)\n", ret);
2911 		goto cleanup_phylink;
2912 	}
2913 
2914 	return 0;
2915 
2916 cleanup_phylink:
2917 	phylink_destroy(lp->phylink);
2918 
2919 cleanup_mdio:
2920 	if (lp->pcs_phy)
2921 		put_device(&lp->pcs_phy->dev);
2922 	if (lp->mii_bus)
2923 		axienet_mdio_teardown(lp);
2924 cleanup_clk:
2925 	clk_bulk_disable_unprepare(XAE_NUM_MISC_CLOCKS, lp->misc_clks);
2926 	clk_disable_unprepare(lp->axi_clk);
2927 
2928 free_netdev:
2929 	free_netdev(ndev);
2930 
2931 	return ret;
2932 }
2933 
2934 static void axienet_remove(struct platform_device *pdev)
2935 {
2936 	struct net_device *ndev = platform_get_drvdata(pdev);
2937 	struct axienet_local *lp = netdev_priv(ndev);
2938 
2939 	unregister_netdev(ndev);
2940 
2941 	if (lp->phylink)
2942 		phylink_destroy(lp->phylink);
2943 
2944 	if (lp->pcs_phy)
2945 		put_device(&lp->pcs_phy->dev);
2946 
2947 	axienet_mdio_teardown(lp);
2948 
2949 	clk_bulk_disable_unprepare(XAE_NUM_MISC_CLOCKS, lp->misc_clks);
2950 	clk_disable_unprepare(lp->axi_clk);
2951 
2952 	free_netdev(ndev);
2953 }
2954 
2955 static void axienet_shutdown(struct platform_device *pdev)
2956 {
2957 	struct net_device *ndev = platform_get_drvdata(pdev);
2958 
2959 	rtnl_lock();
2960 	netif_device_detach(ndev);
2961 
2962 	if (netif_running(ndev))
2963 		dev_close(ndev);
2964 
2965 	rtnl_unlock();
2966 }
2967 
2968 static int axienet_suspend(struct device *dev)
2969 {
2970 	struct net_device *ndev = dev_get_drvdata(dev);
2971 
2972 	if (!netif_running(ndev))
2973 		return 0;
2974 
2975 	netif_device_detach(ndev);
2976 
2977 	rtnl_lock();
2978 	axienet_stop(ndev);
2979 	rtnl_unlock();
2980 
2981 	return 0;
2982 }
2983 
2984 static int axienet_resume(struct device *dev)
2985 {
2986 	struct net_device *ndev = dev_get_drvdata(dev);
2987 
2988 	if (!netif_running(ndev))
2989 		return 0;
2990 
2991 	rtnl_lock();
2992 	axienet_open(ndev);
2993 	rtnl_unlock();
2994 
2995 	netif_device_attach(ndev);
2996 
2997 	return 0;
2998 }
2999 
3000 static DEFINE_SIMPLE_DEV_PM_OPS(axienet_pm_ops,
3001 				axienet_suspend, axienet_resume);
3002 
3003 static struct platform_driver axienet_driver = {
3004 	.probe = axienet_probe,
3005 	.remove_new = axienet_remove,
3006 	.shutdown = axienet_shutdown,
3007 	.driver = {
3008 		 .name = "xilinx_axienet",
3009 		 .pm = &axienet_pm_ops,
3010 		 .of_match_table = axienet_of_match,
3011 	},
3012 };
3013 
3014 module_platform_driver(axienet_driver);
3015 
3016 MODULE_DESCRIPTION("Xilinx Axi Ethernet driver");
3017 MODULE_AUTHOR("Xilinx");
3018 MODULE_LICENSE("GPL");
3019