xref: /linux/drivers/net/ethernet/xilinx/xilinx_axienet_main.c (revision 4b66d18918f8e4d85e51974a9e3ce9abad5c7c3d)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Xilinx Axi Ethernet device driver
4  *
5  * Copyright (c) 2008 Nissin Systems Co., Ltd.,  Yoshio Kashiwagi
6  * Copyright (c) 2005-2008 DLA Systems,  David H. Lynch Jr. <dhlii@dlasys.net>
7  * Copyright (c) 2008-2009 Secret Lab Technologies Ltd.
8  * Copyright (c) 2010 - 2011 Michal Simek <monstr@monstr.eu>
9  * Copyright (c) 2010 - 2011 PetaLogix
10  * Copyright (c) 2019 - 2022 Calian Advanced Technologies
11  * Copyright (c) 2010 - 2012 Xilinx, Inc. All rights reserved.
12  *
13  * This is a driver for the Xilinx Axi Ethernet which is used in the Virtex6
14  * and Spartan6.
15  *
16  * TODO:
17  *  - Add Axi Fifo support.
18  *  - Factor out Axi DMA code into separate driver.
19  *  - Test and fix basic multicast filtering.
20  *  - Add support for extended multicast filtering.
21  *  - Test basic VLAN support.
22  *  - Add support for extended VLAN support.
23  */
24 
25 #include <linux/clk.h>
26 #include <linux/delay.h>
27 #include <linux/etherdevice.h>
28 #include <linux/module.h>
29 #include <linux/netdevice.h>
30 #include <linux/of.h>
31 #include <linux/of_mdio.h>
32 #include <linux/of_net.h>
33 #include <linux/of_irq.h>
34 #include <linux/of_address.h>
35 #include <linux/platform_device.h>
36 #include <linux/skbuff.h>
37 #include <linux/math64.h>
38 #include <linux/phy.h>
39 #include <linux/mii.h>
40 #include <linux/ethtool.h>
41 #include <linux/dmaengine.h>
42 #include <linux/dma-mapping.h>
43 #include <linux/dma/xilinx_dma.h>
44 #include <linux/circ_buf.h>
45 #include <net/netdev_queues.h>
46 
47 #include "xilinx_axienet.h"
48 
49 /* Descriptors defines for Tx and Rx DMA */
50 #define TX_BD_NUM_DEFAULT		128
51 #define RX_BD_NUM_DEFAULT		1024
52 #define TX_BD_NUM_MIN			(MAX_SKB_FRAGS + 1)
53 #define TX_BD_NUM_MAX			4096
54 #define RX_BD_NUM_MAX			4096
55 #define DMA_NUM_APP_WORDS		5
56 #define LEN_APP				4
57 #define RX_BUF_NUM_DEFAULT		128
58 
59 /* Must be shorter than length of ethtool_drvinfo.driver field to fit */
60 #define DRIVER_NAME		"xaxienet"
61 #define DRIVER_DESCRIPTION	"Xilinx Axi Ethernet driver"
62 #define DRIVER_VERSION		"1.00a"
63 
64 #define AXIENET_REGS_N		40
65 
66 static void axienet_rx_submit_desc(struct net_device *ndev);
67 
68 /* Match table for of_platform binding */
69 static const struct of_device_id axienet_of_match[] = {
70 	{ .compatible = "xlnx,axi-ethernet-1.00.a", },
71 	{ .compatible = "xlnx,axi-ethernet-1.01.a", },
72 	{ .compatible = "xlnx,axi-ethernet-2.01.a", },
73 	{},
74 };
75 
76 MODULE_DEVICE_TABLE(of, axienet_of_match);
77 
78 /* Option table for setting up Axi Ethernet hardware options */
79 static struct axienet_option axienet_options[] = {
80 	/* Turn on jumbo packet support for both Rx and Tx */
81 	{
82 		.opt = XAE_OPTION_JUMBO,
83 		.reg = XAE_TC_OFFSET,
84 		.m_or = XAE_TC_JUM_MASK,
85 	}, {
86 		.opt = XAE_OPTION_JUMBO,
87 		.reg = XAE_RCW1_OFFSET,
88 		.m_or = XAE_RCW1_JUM_MASK,
89 	}, { /* Turn on VLAN packet support for both Rx and Tx */
90 		.opt = XAE_OPTION_VLAN,
91 		.reg = XAE_TC_OFFSET,
92 		.m_or = XAE_TC_VLAN_MASK,
93 	}, {
94 		.opt = XAE_OPTION_VLAN,
95 		.reg = XAE_RCW1_OFFSET,
96 		.m_or = XAE_RCW1_VLAN_MASK,
97 	}, { /* Turn on FCS stripping on receive packets */
98 		.opt = XAE_OPTION_FCS_STRIP,
99 		.reg = XAE_RCW1_OFFSET,
100 		.m_or = XAE_RCW1_FCS_MASK,
101 	}, { /* Turn on FCS insertion on transmit packets */
102 		.opt = XAE_OPTION_FCS_INSERT,
103 		.reg = XAE_TC_OFFSET,
104 		.m_or = XAE_TC_FCS_MASK,
105 	}, { /* Turn off length/type field checking on receive packets */
106 		.opt = XAE_OPTION_LENTYPE_ERR,
107 		.reg = XAE_RCW1_OFFSET,
108 		.m_or = XAE_RCW1_LT_DIS_MASK,
109 	}, { /* Turn on Rx flow control */
110 		.opt = XAE_OPTION_FLOW_CONTROL,
111 		.reg = XAE_FCC_OFFSET,
112 		.m_or = XAE_FCC_FCRX_MASK,
113 	}, { /* Turn on Tx flow control */
114 		.opt = XAE_OPTION_FLOW_CONTROL,
115 		.reg = XAE_FCC_OFFSET,
116 		.m_or = XAE_FCC_FCTX_MASK,
117 	}, { /* Turn on promiscuous frame filtering */
118 		.opt = XAE_OPTION_PROMISC,
119 		.reg = XAE_FMI_OFFSET,
120 		.m_or = XAE_FMI_PM_MASK,
121 	}, { /* Enable transmitter */
122 		.opt = XAE_OPTION_TXEN,
123 		.reg = XAE_TC_OFFSET,
124 		.m_or = XAE_TC_TX_MASK,
125 	}, { /* Enable receiver */
126 		.opt = XAE_OPTION_RXEN,
127 		.reg = XAE_RCW1_OFFSET,
128 		.m_or = XAE_RCW1_RX_MASK,
129 	},
130 	{}
131 };
132 
133 static struct skbuf_dma_descriptor *axienet_get_rx_desc(struct axienet_local *lp, int i)
134 {
135 	return lp->rx_skb_ring[i & (RX_BUF_NUM_DEFAULT - 1)];
136 }
137 
138 static struct skbuf_dma_descriptor *axienet_get_tx_desc(struct axienet_local *lp, int i)
139 {
140 	return lp->tx_skb_ring[i & (TX_BD_NUM_MAX - 1)];
141 }
142 
143 /**
144  * axienet_dma_in32 - Memory mapped Axi DMA register read
145  * @lp:		Pointer to axienet local structure
146  * @reg:	Address offset from the base address of the Axi DMA core
147  *
148  * Return: The contents of the Axi DMA register
149  *
150  * This function returns the contents of the corresponding Axi DMA register.
151  */
152 static inline u32 axienet_dma_in32(struct axienet_local *lp, off_t reg)
153 {
154 	return ioread32(lp->dma_regs + reg);
155 }
156 
157 static void desc_set_phys_addr(struct axienet_local *lp, dma_addr_t addr,
158 			       struct axidma_bd *desc)
159 {
160 	desc->phys = lower_32_bits(addr);
161 	if (lp->features & XAE_FEATURE_DMA_64BIT)
162 		desc->phys_msb = upper_32_bits(addr);
163 }
164 
165 static dma_addr_t desc_get_phys_addr(struct axienet_local *lp,
166 				     struct axidma_bd *desc)
167 {
168 	dma_addr_t ret = desc->phys;
169 
170 	if (lp->features & XAE_FEATURE_DMA_64BIT)
171 		ret |= ((dma_addr_t)desc->phys_msb << 16) << 16;
172 
173 	return ret;
174 }
175 
176 /**
177  * axienet_dma_bd_release - Release buffer descriptor rings
178  * @ndev:	Pointer to the net_device structure
179  *
180  * This function is used to release the descriptors allocated in
181  * axienet_dma_bd_init. axienet_dma_bd_release is called when Axi Ethernet
182  * driver stop api is called.
183  */
184 static void axienet_dma_bd_release(struct net_device *ndev)
185 {
186 	int i;
187 	struct axienet_local *lp = netdev_priv(ndev);
188 
189 	/* If we end up here, tx_bd_v must have been DMA allocated. */
190 	dma_free_coherent(lp->dev,
191 			  sizeof(*lp->tx_bd_v) * lp->tx_bd_num,
192 			  lp->tx_bd_v,
193 			  lp->tx_bd_p);
194 
195 	if (!lp->rx_bd_v)
196 		return;
197 
198 	for (i = 0; i < lp->rx_bd_num; i++) {
199 		dma_addr_t phys;
200 
201 		/* A NULL skb means this descriptor has not been initialised
202 		 * at all.
203 		 */
204 		if (!lp->rx_bd_v[i].skb)
205 			break;
206 
207 		dev_kfree_skb(lp->rx_bd_v[i].skb);
208 
209 		/* For each descriptor, we programmed cntrl with the (non-zero)
210 		 * descriptor size, after it had been successfully allocated.
211 		 * So a non-zero value in there means we need to unmap it.
212 		 */
213 		if (lp->rx_bd_v[i].cntrl) {
214 			phys = desc_get_phys_addr(lp, &lp->rx_bd_v[i]);
215 			dma_unmap_single(lp->dev, phys,
216 					 lp->max_frm_size, DMA_FROM_DEVICE);
217 		}
218 	}
219 
220 	dma_free_coherent(lp->dev,
221 			  sizeof(*lp->rx_bd_v) * lp->rx_bd_num,
222 			  lp->rx_bd_v,
223 			  lp->rx_bd_p);
224 }
225 
226 static u64 axienet_dma_rate(struct axienet_local *lp)
227 {
228 	if (lp->axi_clk)
229 		return clk_get_rate(lp->axi_clk);
230 	return 125000000; /* arbitrary guess if no clock rate set */
231 }
232 
233 /**
234  * axienet_calc_cr() - Calculate control register value
235  * @lp: Device private data
236  * @count: Number of completions before an interrupt
237  * @usec: Microseconds after the last completion before an interrupt
238  *
239  * Calculate a control register value based on the coalescing settings. The
240  * run/stop bit is not set.
241  */
242 static u32 axienet_calc_cr(struct axienet_local *lp, u32 count, u32 usec)
243 {
244 	u32 cr;
245 
246 	cr = FIELD_PREP(XAXIDMA_COALESCE_MASK, count) | XAXIDMA_IRQ_IOC_MASK |
247 	     XAXIDMA_IRQ_ERROR_MASK;
248 	/* Only set interrupt delay timer if not generating an interrupt on
249 	 * the first packet. Otherwise leave at 0 to disable delay interrupt.
250 	 */
251 	if (count > 1) {
252 		u64 clk_rate = axienet_dma_rate(lp);
253 		u32 timer;
254 
255 		/* 1 Timeout Interval = 125 * (clock period of SG clock) */
256 		timer = DIV64_U64_ROUND_CLOSEST((u64)usec * clk_rate,
257 						XAXIDMA_DELAY_SCALE);
258 
259 		timer = min(timer, FIELD_MAX(XAXIDMA_DELAY_MASK));
260 		cr |= FIELD_PREP(XAXIDMA_DELAY_MASK, timer) |
261 		      XAXIDMA_IRQ_DELAY_MASK;
262 	}
263 
264 	return cr;
265 }
266 
267 /**
268  * axienet_coalesce_params() - Extract coalesce parameters from the CR
269  * @lp: Device private data
270  * @cr: The control register to parse
271  * @count: Number of packets before an interrupt
272  * @usec: Idle time (in usec) before an interrupt
273  */
274 static void axienet_coalesce_params(struct axienet_local *lp, u32 cr,
275 				    u32 *count, u32 *usec)
276 {
277 	u64 clk_rate = axienet_dma_rate(lp);
278 	u64 timer = FIELD_GET(XAXIDMA_DELAY_MASK, cr);
279 
280 	*count = FIELD_GET(XAXIDMA_COALESCE_MASK, cr);
281 	*usec = DIV64_U64_ROUND_CLOSEST(timer * XAXIDMA_DELAY_SCALE, clk_rate);
282 }
283 
284 /**
285  * axienet_dma_start - Set up DMA registers and start DMA operation
286  * @lp:		Pointer to the axienet_local structure
287  */
288 static void axienet_dma_start(struct axienet_local *lp)
289 {
290 	spin_lock_irq(&lp->rx_cr_lock);
291 
292 	/* Start updating the Rx channel control register */
293 	lp->rx_dma_cr &= ~XAXIDMA_CR_RUNSTOP_MASK;
294 	axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, lp->rx_dma_cr);
295 
296 	/* Populate the tail pointer and bring the Rx Axi DMA engine out of
297 	 * halted state. This will make the Rx side ready for reception.
298 	 */
299 	axienet_dma_out_addr(lp, XAXIDMA_RX_CDESC_OFFSET, lp->rx_bd_p);
300 	lp->rx_dma_cr |= XAXIDMA_CR_RUNSTOP_MASK;
301 	axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, lp->rx_dma_cr);
302 	axienet_dma_out_addr(lp, XAXIDMA_RX_TDESC_OFFSET, lp->rx_bd_p +
303 			     (sizeof(*lp->rx_bd_v) * (lp->rx_bd_num - 1)));
304 	lp->rx_dma_started = true;
305 
306 	spin_unlock_irq(&lp->rx_cr_lock);
307 	spin_lock_irq(&lp->tx_cr_lock);
308 
309 	/* Start updating the Tx channel control register */
310 	lp->tx_dma_cr &= ~XAXIDMA_CR_RUNSTOP_MASK;
311 	axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, lp->tx_dma_cr);
312 
313 	/* Write to the RS (Run-stop) bit in the Tx channel control register.
314 	 * Tx channel is now ready to run. But only after we write to the
315 	 * tail pointer register that the Tx channel will start transmitting.
316 	 */
317 	axienet_dma_out_addr(lp, XAXIDMA_TX_CDESC_OFFSET, lp->tx_bd_p);
318 	lp->tx_dma_cr |= XAXIDMA_CR_RUNSTOP_MASK;
319 	axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, lp->tx_dma_cr);
320 	lp->tx_dma_started = true;
321 
322 	spin_unlock_irq(&lp->tx_cr_lock);
323 }
324 
325 /**
326  * axienet_dma_bd_init - Setup buffer descriptor rings for Axi DMA
327  * @ndev:	Pointer to the net_device structure
328  *
329  * Return: 0, on success -ENOMEM, on failure
330  *
331  * This function is called to initialize the Rx and Tx DMA descriptor
332  * rings. This initializes the descriptors with required default values
333  * and is called when Axi Ethernet driver reset is called.
334  */
335 static int axienet_dma_bd_init(struct net_device *ndev)
336 {
337 	int i;
338 	struct sk_buff *skb;
339 	struct axienet_local *lp = netdev_priv(ndev);
340 
341 	/* Reset the indexes which are used for accessing the BDs */
342 	lp->tx_bd_ci = 0;
343 	lp->tx_bd_tail = 0;
344 	lp->rx_bd_ci = 0;
345 
346 	/* Allocate the Tx and Rx buffer descriptors. */
347 	lp->tx_bd_v = dma_alloc_coherent(lp->dev,
348 					 sizeof(*lp->tx_bd_v) * lp->tx_bd_num,
349 					 &lp->tx_bd_p, GFP_KERNEL);
350 	if (!lp->tx_bd_v)
351 		return -ENOMEM;
352 
353 	lp->rx_bd_v = dma_alloc_coherent(lp->dev,
354 					 sizeof(*lp->rx_bd_v) * lp->rx_bd_num,
355 					 &lp->rx_bd_p, GFP_KERNEL);
356 	if (!lp->rx_bd_v)
357 		goto out;
358 
359 	for (i = 0; i < lp->tx_bd_num; i++) {
360 		dma_addr_t addr = lp->tx_bd_p +
361 				  sizeof(*lp->tx_bd_v) *
362 				  ((i + 1) % lp->tx_bd_num);
363 
364 		lp->tx_bd_v[i].next = lower_32_bits(addr);
365 		if (lp->features & XAE_FEATURE_DMA_64BIT)
366 			lp->tx_bd_v[i].next_msb = upper_32_bits(addr);
367 	}
368 
369 	for (i = 0; i < lp->rx_bd_num; i++) {
370 		dma_addr_t addr;
371 
372 		addr = lp->rx_bd_p + sizeof(*lp->rx_bd_v) *
373 			((i + 1) % lp->rx_bd_num);
374 		lp->rx_bd_v[i].next = lower_32_bits(addr);
375 		if (lp->features & XAE_FEATURE_DMA_64BIT)
376 			lp->rx_bd_v[i].next_msb = upper_32_bits(addr);
377 
378 		skb = netdev_alloc_skb_ip_align(ndev, lp->max_frm_size);
379 		if (!skb)
380 			goto out;
381 
382 		lp->rx_bd_v[i].skb = skb;
383 		addr = dma_map_single(lp->dev, skb->data,
384 				      lp->max_frm_size, DMA_FROM_DEVICE);
385 		if (dma_mapping_error(lp->dev, addr)) {
386 			netdev_err(ndev, "DMA mapping error\n");
387 			goto out;
388 		}
389 		desc_set_phys_addr(lp, addr, &lp->rx_bd_v[i]);
390 
391 		lp->rx_bd_v[i].cntrl = lp->max_frm_size;
392 	}
393 
394 	axienet_dma_start(lp);
395 
396 	return 0;
397 out:
398 	axienet_dma_bd_release(ndev);
399 	return -ENOMEM;
400 }
401 
402 /**
403  * axienet_set_mac_address - Write the MAC address
404  * @ndev:	Pointer to the net_device structure
405  * @address:	6 byte Address to be written as MAC address
406  *
407  * This function is called to initialize the MAC address of the Axi Ethernet
408  * core. It writes to the UAW0 and UAW1 registers of the core.
409  */
410 static void axienet_set_mac_address(struct net_device *ndev,
411 				    const void *address)
412 {
413 	struct axienet_local *lp = netdev_priv(ndev);
414 
415 	if (address)
416 		eth_hw_addr_set(ndev, address);
417 	if (!is_valid_ether_addr(ndev->dev_addr))
418 		eth_hw_addr_random(ndev);
419 
420 	/* Set up unicast MAC address filter set its mac address */
421 	axienet_iow(lp, XAE_UAW0_OFFSET,
422 		    (ndev->dev_addr[0]) |
423 		    (ndev->dev_addr[1] << 8) |
424 		    (ndev->dev_addr[2] << 16) |
425 		    (ndev->dev_addr[3] << 24));
426 	axienet_iow(lp, XAE_UAW1_OFFSET,
427 		    (((axienet_ior(lp, XAE_UAW1_OFFSET)) &
428 		      ~XAE_UAW1_UNICASTADDR_MASK) |
429 		     (ndev->dev_addr[4] |
430 		     (ndev->dev_addr[5] << 8))));
431 }
432 
433 /**
434  * netdev_set_mac_address - Write the MAC address (from outside the driver)
435  * @ndev:	Pointer to the net_device structure
436  * @p:		6 byte Address to be written as MAC address
437  *
438  * Return: 0 for all conditions. Presently, there is no failure case.
439  *
440  * This function is called to initialize the MAC address of the Axi Ethernet
441  * core. It calls the core specific axienet_set_mac_address. This is the
442  * function that goes into net_device_ops structure entry ndo_set_mac_address.
443  */
444 static int netdev_set_mac_address(struct net_device *ndev, void *p)
445 {
446 	struct sockaddr *addr = p;
447 
448 	axienet_set_mac_address(ndev, addr->sa_data);
449 	return 0;
450 }
451 
452 /**
453  * axienet_set_multicast_list - Prepare the multicast table
454  * @ndev:	Pointer to the net_device structure
455  *
456  * This function is called to initialize the multicast table during
457  * initialization. The Axi Ethernet basic multicast support has a four-entry
458  * multicast table which is initialized here. Additionally this function
459  * goes into the net_device_ops structure entry ndo_set_multicast_list. This
460  * means whenever the multicast table entries need to be updated this
461  * function gets called.
462  */
463 static void axienet_set_multicast_list(struct net_device *ndev)
464 {
465 	int i = 0;
466 	u32 reg, af0reg, af1reg;
467 	struct axienet_local *lp = netdev_priv(ndev);
468 
469 	reg = axienet_ior(lp, XAE_FMI_OFFSET);
470 	reg &= ~XAE_FMI_PM_MASK;
471 	if (ndev->flags & IFF_PROMISC)
472 		reg |= XAE_FMI_PM_MASK;
473 	else
474 		reg &= ~XAE_FMI_PM_MASK;
475 	axienet_iow(lp, XAE_FMI_OFFSET, reg);
476 
477 	if (ndev->flags & IFF_ALLMULTI ||
478 	    netdev_mc_count(ndev) > XAE_MULTICAST_CAM_TABLE_NUM) {
479 		reg &= 0xFFFFFF00;
480 		axienet_iow(lp, XAE_FMI_OFFSET, reg);
481 		axienet_iow(lp, XAE_AF0_OFFSET, 1); /* Multicast bit */
482 		axienet_iow(lp, XAE_AF1_OFFSET, 0);
483 		axienet_iow(lp, XAE_AM0_OFFSET, 1); /* ditto */
484 		axienet_iow(lp, XAE_AM1_OFFSET, 0);
485 		axienet_iow(lp, XAE_FFE_OFFSET, 1);
486 		i = 1;
487 	} else if (!netdev_mc_empty(ndev)) {
488 		struct netdev_hw_addr *ha;
489 
490 		netdev_for_each_mc_addr(ha, ndev) {
491 			if (i >= XAE_MULTICAST_CAM_TABLE_NUM)
492 				break;
493 
494 			af0reg = (ha->addr[0]);
495 			af0reg |= (ha->addr[1] << 8);
496 			af0reg |= (ha->addr[2] << 16);
497 			af0reg |= (ha->addr[3] << 24);
498 
499 			af1reg = (ha->addr[4]);
500 			af1reg |= (ha->addr[5] << 8);
501 
502 			reg &= 0xFFFFFF00;
503 			reg |= i;
504 
505 			axienet_iow(lp, XAE_FMI_OFFSET, reg);
506 			axienet_iow(lp, XAE_AF0_OFFSET, af0reg);
507 			axienet_iow(lp, XAE_AF1_OFFSET, af1reg);
508 			axienet_iow(lp, XAE_AM0_OFFSET, 0xffffffff);
509 			axienet_iow(lp, XAE_AM1_OFFSET, 0x0000ffff);
510 			axienet_iow(lp, XAE_FFE_OFFSET, 1);
511 			i++;
512 		}
513 	}
514 
515 	for (; i < XAE_MULTICAST_CAM_TABLE_NUM; i++) {
516 		reg &= 0xFFFFFF00;
517 		reg |= i;
518 		axienet_iow(lp, XAE_FMI_OFFSET, reg);
519 		axienet_iow(lp, XAE_FFE_OFFSET, 0);
520 	}
521 }
522 
523 /**
524  * axienet_setoptions - Set an Axi Ethernet option
525  * @ndev:	Pointer to the net_device structure
526  * @options:	Option to be enabled/disabled
527  *
528  * The Axi Ethernet core has multiple features which can be selectively turned
529  * on or off. The typical options could be jumbo frame option, basic VLAN
530  * option, promiscuous mode option etc. This function is used to set or clear
531  * these options in the Axi Ethernet hardware. This is done through
532  * axienet_option structure .
533  */
534 static void axienet_setoptions(struct net_device *ndev, u32 options)
535 {
536 	int reg;
537 	struct axienet_local *lp = netdev_priv(ndev);
538 	struct axienet_option *tp = &axienet_options[0];
539 
540 	while (tp->opt) {
541 		reg = ((axienet_ior(lp, tp->reg)) & ~(tp->m_or));
542 		if (options & tp->opt)
543 			reg |= tp->m_or;
544 		axienet_iow(lp, tp->reg, reg);
545 		tp++;
546 	}
547 
548 	lp->options |= options;
549 }
550 
551 static u64 axienet_stat(struct axienet_local *lp, enum temac_stat stat)
552 {
553 	u32 counter;
554 
555 	if (lp->reset_in_progress)
556 		return lp->hw_stat_base[stat];
557 
558 	counter = axienet_ior(lp, XAE_STATS_OFFSET + stat * 8);
559 	return lp->hw_stat_base[stat] + (counter - lp->hw_last_counter[stat]);
560 }
561 
562 static void axienet_stats_update(struct axienet_local *lp, bool reset)
563 {
564 	enum temac_stat stat;
565 
566 	write_seqcount_begin(&lp->hw_stats_seqcount);
567 	lp->reset_in_progress = reset;
568 	for (stat = 0; stat < STAT_COUNT; stat++) {
569 		u32 counter = axienet_ior(lp, XAE_STATS_OFFSET + stat * 8);
570 
571 		lp->hw_stat_base[stat] += counter - lp->hw_last_counter[stat];
572 		lp->hw_last_counter[stat] = counter;
573 	}
574 	write_seqcount_end(&lp->hw_stats_seqcount);
575 }
576 
577 static void axienet_refresh_stats(struct work_struct *work)
578 {
579 	struct axienet_local *lp = container_of(work, struct axienet_local,
580 						stats_work.work);
581 
582 	mutex_lock(&lp->stats_lock);
583 	axienet_stats_update(lp, false);
584 	mutex_unlock(&lp->stats_lock);
585 
586 	/* Just less than 2^32 bytes at 2.5 GBit/s */
587 	schedule_delayed_work(&lp->stats_work, 13 * HZ);
588 }
589 
590 static int __axienet_device_reset(struct axienet_local *lp)
591 {
592 	u32 value;
593 	int ret;
594 
595 	/* Save statistics counters in case they will be reset */
596 	mutex_lock(&lp->stats_lock);
597 	if (lp->features & XAE_FEATURE_STATS)
598 		axienet_stats_update(lp, true);
599 
600 	/* Reset Axi DMA. This would reset Axi Ethernet core as well. The reset
601 	 * process of Axi DMA takes a while to complete as all pending
602 	 * commands/transfers will be flushed or completed during this
603 	 * reset process.
604 	 * Note that even though both TX and RX have their own reset register,
605 	 * they both reset the entire DMA core, so only one needs to be used.
606 	 */
607 	axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, XAXIDMA_CR_RESET_MASK);
608 	ret = read_poll_timeout(axienet_dma_in32, value,
609 				!(value & XAXIDMA_CR_RESET_MASK),
610 				DELAY_OF_ONE_MILLISEC, 50000, false, lp,
611 				XAXIDMA_TX_CR_OFFSET);
612 	if (ret) {
613 		dev_err(lp->dev, "%s: DMA reset timeout!\n", __func__);
614 		goto out;
615 	}
616 
617 	/* Wait for PhyRstCmplt bit to be set, indicating the PHY reset has finished */
618 	ret = read_poll_timeout(axienet_ior, value,
619 				value & XAE_INT_PHYRSTCMPLT_MASK,
620 				DELAY_OF_ONE_MILLISEC, 50000, false, lp,
621 				XAE_IS_OFFSET);
622 	if (ret) {
623 		dev_err(lp->dev, "%s: timeout waiting for PhyRstCmplt\n", __func__);
624 		goto out;
625 	}
626 
627 	/* Update statistics counters with new values */
628 	if (lp->features & XAE_FEATURE_STATS) {
629 		enum temac_stat stat;
630 
631 		write_seqcount_begin(&lp->hw_stats_seqcount);
632 		lp->reset_in_progress = false;
633 		for (stat = 0; stat < STAT_COUNT; stat++) {
634 			u32 counter =
635 				axienet_ior(lp, XAE_STATS_OFFSET + stat * 8);
636 
637 			lp->hw_stat_base[stat] +=
638 				lp->hw_last_counter[stat] - counter;
639 			lp->hw_last_counter[stat] = counter;
640 		}
641 		write_seqcount_end(&lp->hw_stats_seqcount);
642 	}
643 
644 out:
645 	mutex_unlock(&lp->stats_lock);
646 	return ret;
647 }
648 
649 /**
650  * axienet_dma_stop - Stop DMA operation
651  * @lp:		Pointer to the axienet_local structure
652  */
653 static void axienet_dma_stop(struct axienet_local *lp)
654 {
655 	int count;
656 	u32 cr, sr;
657 
658 	spin_lock_irq(&lp->rx_cr_lock);
659 
660 	cr = lp->rx_dma_cr & ~(XAXIDMA_CR_RUNSTOP_MASK | XAXIDMA_IRQ_ALL_MASK);
661 	axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, cr);
662 	lp->rx_dma_started = false;
663 
664 	spin_unlock_irq(&lp->rx_cr_lock);
665 	synchronize_irq(lp->rx_irq);
666 
667 	spin_lock_irq(&lp->tx_cr_lock);
668 
669 	cr = lp->tx_dma_cr & ~(XAXIDMA_CR_RUNSTOP_MASK | XAXIDMA_IRQ_ALL_MASK);
670 	axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, cr);
671 	lp->tx_dma_started = false;
672 
673 	spin_unlock_irq(&lp->tx_cr_lock);
674 	synchronize_irq(lp->tx_irq);
675 
676 	/* Give DMAs a chance to halt gracefully */
677 	sr = axienet_dma_in32(lp, XAXIDMA_RX_SR_OFFSET);
678 	for (count = 0; !(sr & XAXIDMA_SR_HALT_MASK) && count < 5; ++count) {
679 		msleep(20);
680 		sr = axienet_dma_in32(lp, XAXIDMA_RX_SR_OFFSET);
681 	}
682 
683 	sr = axienet_dma_in32(lp, XAXIDMA_TX_SR_OFFSET);
684 	for (count = 0; !(sr & XAXIDMA_SR_HALT_MASK) && count < 5; ++count) {
685 		msleep(20);
686 		sr = axienet_dma_in32(lp, XAXIDMA_TX_SR_OFFSET);
687 	}
688 
689 	/* Do a reset to ensure DMA is really stopped */
690 	axienet_lock_mii(lp);
691 	__axienet_device_reset(lp);
692 	axienet_unlock_mii(lp);
693 }
694 
695 /**
696  * axienet_device_reset - Reset and initialize the Axi Ethernet hardware.
697  * @ndev:	Pointer to the net_device structure
698  *
699  * This function is called to reset and initialize the Axi Ethernet core. This
700  * is typically called during initialization. It does a reset of the Axi DMA
701  * Rx/Tx channels and initializes the Axi DMA BDs. Since Axi DMA reset lines
702  * are connected to Axi Ethernet reset lines, this in turn resets the Axi
703  * Ethernet core. No separate hardware reset is done for the Axi Ethernet
704  * core.
705  * Returns 0 on success or a negative error number otherwise.
706  */
707 static int axienet_device_reset(struct net_device *ndev)
708 {
709 	u32 axienet_status;
710 	struct axienet_local *lp = netdev_priv(ndev);
711 	int ret;
712 
713 	lp->max_frm_size = XAE_MAX_VLAN_FRAME_SIZE;
714 	lp->options |= XAE_OPTION_VLAN;
715 	lp->options &= (~XAE_OPTION_JUMBO);
716 
717 	if (ndev->mtu > XAE_MTU && ndev->mtu <= XAE_JUMBO_MTU) {
718 		lp->max_frm_size = ndev->mtu + VLAN_ETH_HLEN +
719 					XAE_TRL_SIZE;
720 
721 		if (lp->max_frm_size <= lp->rxmem)
722 			lp->options |= XAE_OPTION_JUMBO;
723 	}
724 
725 	if (!lp->use_dmaengine) {
726 		ret = __axienet_device_reset(lp);
727 		if (ret)
728 			return ret;
729 
730 		ret = axienet_dma_bd_init(ndev);
731 		if (ret) {
732 			netdev_err(ndev, "%s: descriptor allocation failed\n",
733 				   __func__);
734 			return ret;
735 		}
736 	}
737 
738 	axienet_status = axienet_ior(lp, XAE_RCW1_OFFSET);
739 	axienet_status &= ~XAE_RCW1_RX_MASK;
740 	axienet_iow(lp, XAE_RCW1_OFFSET, axienet_status);
741 
742 	axienet_status = axienet_ior(lp, XAE_IP_OFFSET);
743 	if (axienet_status & XAE_INT_RXRJECT_MASK)
744 		axienet_iow(lp, XAE_IS_OFFSET, XAE_INT_RXRJECT_MASK);
745 	axienet_iow(lp, XAE_IE_OFFSET, lp->eth_irq > 0 ?
746 		    XAE_INT_RECV_ERROR_MASK : 0);
747 
748 	axienet_iow(lp, XAE_FCC_OFFSET, XAE_FCC_FCRX_MASK);
749 
750 	/* Sync default options with HW but leave receiver and
751 	 * transmitter disabled.
752 	 */
753 	axienet_setoptions(ndev, lp->options &
754 			   ~(XAE_OPTION_TXEN | XAE_OPTION_RXEN));
755 	axienet_set_mac_address(ndev, NULL);
756 	axienet_set_multicast_list(ndev);
757 	axienet_setoptions(ndev, lp->options);
758 
759 	netif_trans_update(ndev);
760 
761 	return 0;
762 }
763 
764 /**
765  * axienet_free_tx_chain - Clean up a series of linked TX descriptors.
766  * @lp:		Pointer to the axienet_local structure
767  * @first_bd:	Index of first descriptor to clean up
768  * @nr_bds:	Max number of descriptors to clean up
769  * @force:	Whether to clean descriptors even if not complete
770  * @sizep:	Pointer to a u32 filled with the total sum of all bytes
771  *		in all cleaned-up descriptors. Ignored if NULL.
772  * @budget:	NAPI budget (use 0 when not called from NAPI poll)
773  *
774  * Would either be called after a successful transmit operation, or after
775  * there was an error when setting up the chain.
776  * Returns the number of packets handled.
777  */
778 static int axienet_free_tx_chain(struct axienet_local *lp, u32 first_bd,
779 				 int nr_bds, bool force, u32 *sizep, int budget)
780 {
781 	struct axidma_bd *cur_p;
782 	unsigned int status;
783 	int i, packets = 0;
784 	dma_addr_t phys;
785 
786 	for (i = 0; i < nr_bds; i++) {
787 		cur_p = &lp->tx_bd_v[(first_bd + i) % lp->tx_bd_num];
788 		status = cur_p->status;
789 
790 		/* If force is not specified, clean up only descriptors
791 		 * that have been completed by the MAC.
792 		 */
793 		if (!force && !(status & XAXIDMA_BD_STS_COMPLETE_MASK))
794 			break;
795 
796 		/* Ensure we see complete descriptor update */
797 		dma_rmb();
798 		phys = desc_get_phys_addr(lp, cur_p);
799 		dma_unmap_single(lp->dev, phys,
800 				 (cur_p->cntrl & XAXIDMA_BD_CTRL_LENGTH_MASK),
801 				 DMA_TO_DEVICE);
802 
803 		if (cur_p->skb && (status & XAXIDMA_BD_STS_COMPLETE_MASK)) {
804 			napi_consume_skb(cur_p->skb, budget);
805 			packets++;
806 		}
807 
808 		cur_p->app0 = 0;
809 		cur_p->app1 = 0;
810 		cur_p->app2 = 0;
811 		cur_p->app4 = 0;
812 		cur_p->skb = NULL;
813 		/* ensure our transmit path and device don't prematurely see status cleared */
814 		wmb();
815 		cur_p->cntrl = 0;
816 		cur_p->status = 0;
817 
818 		if (sizep)
819 			*sizep += status & XAXIDMA_BD_STS_ACTUAL_LEN_MASK;
820 	}
821 
822 	if (!force) {
823 		lp->tx_bd_ci += i;
824 		if (lp->tx_bd_ci >= lp->tx_bd_num)
825 			lp->tx_bd_ci %= lp->tx_bd_num;
826 	}
827 
828 	return packets;
829 }
830 
831 /**
832  * axienet_check_tx_bd_space - Checks if a BD/group of BDs are currently busy
833  * @lp:		Pointer to the axienet_local structure
834  * @num_frag:	The number of BDs to check for
835  *
836  * Return: 0, on success
837  *	    NETDEV_TX_BUSY, if any of the descriptors are not free
838  *
839  * This function is invoked before BDs are allocated and transmission starts.
840  * This function returns 0 if a BD or group of BDs can be allocated for
841  * transmission. If the BD or any of the BDs are not free the function
842  * returns a busy status.
843  */
844 static inline int axienet_check_tx_bd_space(struct axienet_local *lp,
845 					    int num_frag)
846 {
847 	struct axidma_bd *cur_p;
848 
849 	/* Ensure we see all descriptor updates from device or TX polling */
850 	rmb();
851 	cur_p = &lp->tx_bd_v[(READ_ONCE(lp->tx_bd_tail) + num_frag) %
852 			     lp->tx_bd_num];
853 	if (cur_p->cntrl)
854 		return NETDEV_TX_BUSY;
855 	return 0;
856 }
857 
858 /**
859  * axienet_dma_tx_cb - DMA engine callback for TX channel.
860  * @data:       Pointer to the axienet_local structure.
861  * @result:     error reporting through dmaengine_result.
862  * This function is called by dmaengine driver for TX channel to notify
863  * that the transmit is done.
864  */
865 static void axienet_dma_tx_cb(void *data, const struct dmaengine_result *result)
866 {
867 	struct skbuf_dma_descriptor *skbuf_dma;
868 	struct axienet_local *lp = data;
869 	struct netdev_queue *txq;
870 	int len;
871 
872 	skbuf_dma = axienet_get_tx_desc(lp, lp->tx_ring_tail++);
873 	len = skbuf_dma->skb->len;
874 	txq = skb_get_tx_queue(lp->ndev, skbuf_dma->skb);
875 	u64_stats_update_begin(&lp->tx_stat_sync);
876 	u64_stats_add(&lp->tx_bytes, len);
877 	u64_stats_add(&lp->tx_packets, 1);
878 	u64_stats_update_end(&lp->tx_stat_sync);
879 	dma_unmap_sg(lp->dev, skbuf_dma->sgl, skbuf_dma->sg_len, DMA_TO_DEVICE);
880 	dev_consume_skb_any(skbuf_dma->skb);
881 	netif_txq_completed_wake(txq, 1, len,
882 				 CIRC_SPACE(lp->tx_ring_head, lp->tx_ring_tail, TX_BD_NUM_MAX),
883 				 2);
884 }
885 
886 /**
887  * axienet_start_xmit_dmaengine - Starts the transmission.
888  * @skb:        sk_buff pointer that contains data to be Txed.
889  * @ndev:       Pointer to net_device structure.
890  *
891  * Return: NETDEV_TX_OK on success or any non space errors.
892  *         NETDEV_TX_BUSY when free element in TX skb ring buffer
893  *         is not available.
894  *
895  * This function is invoked to initiate transmission. The
896  * function sets the skbs, register dma callback API and submit
897  * the dma transaction.
898  * Additionally if checksum offloading is supported,
899  * it populates AXI Stream Control fields with appropriate values.
900  */
901 static netdev_tx_t
902 axienet_start_xmit_dmaengine(struct sk_buff *skb, struct net_device *ndev)
903 {
904 	struct dma_async_tx_descriptor *dma_tx_desc = NULL;
905 	struct axienet_local *lp = netdev_priv(ndev);
906 	u32 app_metadata[DMA_NUM_APP_WORDS] = {0};
907 	struct skbuf_dma_descriptor *skbuf_dma;
908 	struct dma_device *dma_dev;
909 	struct netdev_queue *txq;
910 	u32 csum_start_off;
911 	u32 csum_index_off;
912 	int sg_len;
913 	int ret;
914 
915 	dma_dev = lp->tx_chan->device;
916 	sg_len = skb_shinfo(skb)->nr_frags + 1;
917 	if (CIRC_SPACE(lp->tx_ring_head, lp->tx_ring_tail, TX_BD_NUM_MAX) <= 1) {
918 		netif_stop_queue(ndev);
919 		if (net_ratelimit())
920 			netdev_warn(ndev, "TX ring unexpectedly full\n");
921 		return NETDEV_TX_BUSY;
922 	}
923 
924 	skbuf_dma = axienet_get_tx_desc(lp, lp->tx_ring_head);
925 	if (!skbuf_dma)
926 		goto xmit_error_drop_skb;
927 
928 	lp->tx_ring_head++;
929 	sg_init_table(skbuf_dma->sgl, sg_len);
930 	ret = skb_to_sgvec(skb, skbuf_dma->sgl, 0, skb->len);
931 	if (ret < 0)
932 		goto xmit_error_drop_skb;
933 
934 	ret = dma_map_sg(lp->dev, skbuf_dma->sgl, sg_len, DMA_TO_DEVICE);
935 	if (!ret)
936 		goto xmit_error_drop_skb;
937 
938 	/* Fill up app fields for checksum */
939 	if (skb->ip_summed == CHECKSUM_PARTIAL) {
940 		if (lp->features & XAE_FEATURE_FULL_TX_CSUM) {
941 			/* Tx Full Checksum Offload Enabled */
942 			app_metadata[0] |= 2;
943 		} else if (lp->features & XAE_FEATURE_PARTIAL_TX_CSUM) {
944 			csum_start_off = skb_transport_offset(skb);
945 			csum_index_off = csum_start_off + skb->csum_offset;
946 			/* Tx Partial Checksum Offload Enabled */
947 			app_metadata[0] |= 1;
948 			app_metadata[1] = (csum_start_off << 16) | csum_index_off;
949 		}
950 	} else if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
951 		app_metadata[0] |= 2; /* Tx Full Checksum Offload Enabled */
952 	}
953 
954 	dma_tx_desc = dma_dev->device_prep_slave_sg(lp->tx_chan, skbuf_dma->sgl,
955 			sg_len, DMA_MEM_TO_DEV,
956 			DMA_PREP_INTERRUPT, (void *)app_metadata);
957 	if (!dma_tx_desc)
958 		goto xmit_error_unmap_sg;
959 
960 	skbuf_dma->skb = skb;
961 	skbuf_dma->sg_len = sg_len;
962 	dma_tx_desc->callback_param = lp;
963 	dma_tx_desc->callback_result = axienet_dma_tx_cb;
964 	txq = skb_get_tx_queue(lp->ndev, skb);
965 	netdev_tx_sent_queue(txq, skb->len);
966 	netif_txq_maybe_stop(txq, CIRC_SPACE(lp->tx_ring_head, lp->tx_ring_tail, TX_BD_NUM_MAX),
967 			     1, 2);
968 
969 	dmaengine_submit(dma_tx_desc);
970 	dma_async_issue_pending(lp->tx_chan);
971 	return NETDEV_TX_OK;
972 
973 xmit_error_unmap_sg:
974 	dma_unmap_sg(lp->dev, skbuf_dma->sgl, sg_len, DMA_TO_DEVICE);
975 xmit_error_drop_skb:
976 	dev_kfree_skb_any(skb);
977 	return NETDEV_TX_OK;
978 }
979 
980 /**
981  * axienet_tx_poll - Invoked once a transmit is completed by the
982  * Axi DMA Tx channel.
983  * @napi:	Pointer to NAPI structure.
984  * @budget:	Max number of TX packets to process.
985  *
986  * Return: Number of TX packets processed.
987  *
988  * This function is invoked from the NAPI processing to notify the completion
989  * of transmit operation. It clears fields in the corresponding Tx BDs and
990  * unmaps the corresponding buffer so that CPU can regain ownership of the
991  * buffer. It finally invokes "netif_wake_queue" to restart transmission if
992  * required.
993  */
994 static int axienet_tx_poll(struct napi_struct *napi, int budget)
995 {
996 	struct axienet_local *lp = container_of(napi, struct axienet_local, napi_tx);
997 	struct net_device *ndev = lp->ndev;
998 	u32 size = 0;
999 	int packets;
1000 
1001 	packets = axienet_free_tx_chain(lp, lp->tx_bd_ci, lp->tx_bd_num, false,
1002 					&size, budget);
1003 
1004 	if (packets) {
1005 		netdev_completed_queue(ndev, packets, size);
1006 		u64_stats_update_begin(&lp->tx_stat_sync);
1007 		u64_stats_add(&lp->tx_packets, packets);
1008 		u64_stats_add(&lp->tx_bytes, size);
1009 		u64_stats_update_end(&lp->tx_stat_sync);
1010 
1011 		/* Matches barrier in axienet_start_xmit */
1012 		smp_mb();
1013 
1014 		if (!axienet_check_tx_bd_space(lp, MAX_SKB_FRAGS + 1))
1015 			netif_wake_queue(ndev);
1016 	}
1017 
1018 	if (packets < budget && napi_complete_done(napi, packets)) {
1019 		/* Re-enable TX completion interrupts. This should
1020 		 * cause an immediate interrupt if any TX packets are
1021 		 * already pending.
1022 		 */
1023 		spin_lock_irq(&lp->tx_cr_lock);
1024 		axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, lp->tx_dma_cr);
1025 		spin_unlock_irq(&lp->tx_cr_lock);
1026 	}
1027 	return packets;
1028 }
1029 
1030 /**
1031  * axienet_start_xmit - Starts the transmission.
1032  * @skb:	sk_buff pointer that contains data to be Txed.
1033  * @ndev:	Pointer to net_device structure.
1034  *
1035  * Return: NETDEV_TX_OK, on success
1036  *	    NETDEV_TX_BUSY, if any of the descriptors are not free
1037  *
1038  * This function is invoked from upper layers to initiate transmission. The
1039  * function uses the next available free BDs and populates their fields to
1040  * start the transmission. Additionally if checksum offloading is supported,
1041  * it populates AXI Stream Control fields with appropriate values.
1042  */
1043 static netdev_tx_t
1044 axienet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
1045 {
1046 	u32 ii;
1047 	u32 num_frag;
1048 	u32 csum_start_off;
1049 	u32 csum_index_off;
1050 	skb_frag_t *frag;
1051 	dma_addr_t tail_p, phys;
1052 	u32 orig_tail_ptr, new_tail_ptr;
1053 	struct axienet_local *lp = netdev_priv(ndev);
1054 	struct axidma_bd *cur_p;
1055 
1056 	orig_tail_ptr = lp->tx_bd_tail;
1057 	new_tail_ptr = orig_tail_ptr;
1058 
1059 	num_frag = skb_shinfo(skb)->nr_frags;
1060 	cur_p = &lp->tx_bd_v[orig_tail_ptr];
1061 
1062 	if (axienet_check_tx_bd_space(lp, num_frag + 1)) {
1063 		/* Should not happen as last start_xmit call should have
1064 		 * checked for sufficient space and queue should only be
1065 		 * woken when sufficient space is available.
1066 		 */
1067 		netif_stop_queue(ndev);
1068 		if (net_ratelimit())
1069 			netdev_warn(ndev, "TX ring unexpectedly full\n");
1070 		return NETDEV_TX_BUSY;
1071 	}
1072 
1073 	if (skb->ip_summed == CHECKSUM_PARTIAL) {
1074 		if (lp->features & XAE_FEATURE_FULL_TX_CSUM) {
1075 			/* Tx Full Checksum Offload Enabled */
1076 			cur_p->app0 |= 2;
1077 		} else if (lp->features & XAE_FEATURE_PARTIAL_TX_CSUM) {
1078 			csum_start_off = skb_transport_offset(skb);
1079 			csum_index_off = csum_start_off + skb->csum_offset;
1080 			/* Tx Partial Checksum Offload Enabled */
1081 			cur_p->app0 |= 1;
1082 			cur_p->app1 = (csum_start_off << 16) | csum_index_off;
1083 		}
1084 	} else if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
1085 		cur_p->app0 |= 2; /* Tx Full Checksum Offload Enabled */
1086 	}
1087 
1088 	phys = dma_map_single(lp->dev, skb->data,
1089 			      skb_headlen(skb), DMA_TO_DEVICE);
1090 	if (unlikely(dma_mapping_error(lp->dev, phys))) {
1091 		if (net_ratelimit())
1092 			netdev_err(ndev, "TX DMA mapping error\n");
1093 		ndev->stats.tx_dropped++;
1094 		dev_kfree_skb_any(skb);
1095 		return NETDEV_TX_OK;
1096 	}
1097 	desc_set_phys_addr(lp, phys, cur_p);
1098 	cur_p->cntrl = skb_headlen(skb) | XAXIDMA_BD_CTRL_TXSOF_MASK;
1099 
1100 	for (ii = 0; ii < num_frag; ii++) {
1101 		if (++new_tail_ptr >= lp->tx_bd_num)
1102 			new_tail_ptr = 0;
1103 		cur_p = &lp->tx_bd_v[new_tail_ptr];
1104 		frag = &skb_shinfo(skb)->frags[ii];
1105 		phys = dma_map_single(lp->dev,
1106 				      skb_frag_address(frag),
1107 				      skb_frag_size(frag),
1108 				      DMA_TO_DEVICE);
1109 		if (unlikely(dma_mapping_error(lp->dev, phys))) {
1110 			if (net_ratelimit())
1111 				netdev_err(ndev, "TX DMA mapping error\n");
1112 			ndev->stats.tx_dropped++;
1113 			axienet_free_tx_chain(lp, orig_tail_ptr, ii + 1,
1114 					      true, NULL, 0);
1115 			dev_kfree_skb_any(skb);
1116 			return NETDEV_TX_OK;
1117 		}
1118 		desc_set_phys_addr(lp, phys, cur_p);
1119 		cur_p->cntrl = skb_frag_size(frag);
1120 	}
1121 
1122 	cur_p->cntrl |= XAXIDMA_BD_CTRL_TXEOF_MASK;
1123 	cur_p->skb = skb;
1124 
1125 	tail_p = lp->tx_bd_p + sizeof(*lp->tx_bd_v) * new_tail_ptr;
1126 	if (++new_tail_ptr >= lp->tx_bd_num)
1127 		new_tail_ptr = 0;
1128 	WRITE_ONCE(lp->tx_bd_tail, new_tail_ptr);
1129 	netdev_sent_queue(ndev, skb->len);
1130 
1131 	/* Start the transfer */
1132 	axienet_dma_out_addr(lp, XAXIDMA_TX_TDESC_OFFSET, tail_p);
1133 
1134 	/* Stop queue if next transmit may not have space */
1135 	if (axienet_check_tx_bd_space(lp, MAX_SKB_FRAGS + 1)) {
1136 		netif_stop_queue(ndev);
1137 
1138 		/* Matches barrier in axienet_tx_poll */
1139 		smp_mb();
1140 
1141 		/* Space might have just been freed - check again */
1142 		if (!axienet_check_tx_bd_space(lp, MAX_SKB_FRAGS + 1))
1143 			netif_wake_queue(ndev);
1144 	}
1145 
1146 	return NETDEV_TX_OK;
1147 }
1148 
1149 /**
1150  * axienet_dma_rx_cb - DMA engine callback for RX channel.
1151  * @data:       Pointer to the skbuf_dma_descriptor structure.
1152  * @result:     error reporting through dmaengine_result.
1153  * This function is called by dmaengine driver for RX channel to notify
1154  * that the packet is received.
1155  */
1156 static void axienet_dma_rx_cb(void *data, const struct dmaengine_result *result)
1157 {
1158 	struct skbuf_dma_descriptor *skbuf_dma;
1159 	size_t meta_len, meta_max_len, rx_len;
1160 	struct axienet_local *lp = data;
1161 	struct sk_buff *skb;
1162 	u32 *app_metadata;
1163 	int i;
1164 
1165 	skbuf_dma = axienet_get_rx_desc(lp, lp->rx_ring_tail++);
1166 	skb = skbuf_dma->skb;
1167 	app_metadata = dmaengine_desc_get_metadata_ptr(skbuf_dma->desc, &meta_len,
1168 						       &meta_max_len);
1169 	dma_unmap_single(lp->dev, skbuf_dma->dma_address, lp->max_frm_size,
1170 			 DMA_FROM_DEVICE);
1171 
1172 	if (IS_ERR(app_metadata)) {
1173 		if (net_ratelimit())
1174 			netdev_err(lp->ndev, "Failed to get RX metadata pointer\n");
1175 		dev_kfree_skb_any(skb);
1176 		lp->ndev->stats.rx_dropped++;
1177 		goto rx_submit;
1178 	}
1179 
1180 	/* TODO: Derive app word index programmatically */
1181 	rx_len = (app_metadata[LEN_APP] & 0xFFFF);
1182 	skb_put(skb, rx_len);
1183 	skb->protocol = eth_type_trans(skb, lp->ndev);
1184 	skb->ip_summed = CHECKSUM_NONE;
1185 
1186 	__netif_rx(skb);
1187 	u64_stats_update_begin(&lp->rx_stat_sync);
1188 	u64_stats_add(&lp->rx_packets, 1);
1189 	u64_stats_add(&lp->rx_bytes, rx_len);
1190 	u64_stats_update_end(&lp->rx_stat_sync);
1191 
1192 rx_submit:
1193 	for (i = 0; i < CIRC_SPACE(lp->rx_ring_head, lp->rx_ring_tail,
1194 				   RX_BUF_NUM_DEFAULT); i++)
1195 		axienet_rx_submit_desc(lp->ndev);
1196 	dma_async_issue_pending(lp->rx_chan);
1197 }
1198 
1199 /**
1200  * axienet_rx_poll - Triggered by RX ISR to complete the BD processing.
1201  * @napi:	Pointer to NAPI structure.
1202  * @budget:	Max number of RX packets to process.
1203  *
1204  * Return: Number of RX packets processed.
1205  */
1206 static int axienet_rx_poll(struct napi_struct *napi, int budget)
1207 {
1208 	u32 length;
1209 	u32 csumstatus;
1210 	u32 size = 0;
1211 	int packets = 0;
1212 	dma_addr_t tail_p = 0;
1213 	struct axidma_bd *cur_p;
1214 	struct sk_buff *skb, *new_skb;
1215 	struct axienet_local *lp = container_of(napi, struct axienet_local, napi_rx);
1216 
1217 	cur_p = &lp->rx_bd_v[lp->rx_bd_ci];
1218 
1219 	while (packets < budget && (cur_p->status & XAXIDMA_BD_STS_COMPLETE_MASK)) {
1220 		dma_addr_t phys;
1221 
1222 		/* Ensure we see complete descriptor update */
1223 		dma_rmb();
1224 
1225 		skb = cur_p->skb;
1226 		cur_p->skb = NULL;
1227 
1228 		/* skb could be NULL if a previous pass already received the
1229 		 * packet for this slot in the ring, but failed to refill it
1230 		 * with a newly allocated buffer. In this case, don't try to
1231 		 * receive it again.
1232 		 */
1233 		if (likely(skb)) {
1234 			length = cur_p->app4 & 0x0000FFFF;
1235 
1236 			phys = desc_get_phys_addr(lp, cur_p);
1237 			dma_unmap_single(lp->dev, phys, lp->max_frm_size,
1238 					 DMA_FROM_DEVICE);
1239 
1240 			skb_put(skb, length);
1241 			skb->protocol = eth_type_trans(skb, lp->ndev);
1242 			/*skb_checksum_none_assert(skb);*/
1243 			skb->ip_summed = CHECKSUM_NONE;
1244 
1245 			/* if we're doing Rx csum offload, set it up */
1246 			if (lp->features & XAE_FEATURE_FULL_RX_CSUM) {
1247 				csumstatus = (cur_p->app2 &
1248 					      XAE_FULL_CSUM_STATUS_MASK) >> 3;
1249 				if (csumstatus == XAE_IP_TCP_CSUM_VALIDATED ||
1250 				    csumstatus == XAE_IP_UDP_CSUM_VALIDATED) {
1251 					skb->ip_summed = CHECKSUM_UNNECESSARY;
1252 				}
1253 			} else if (lp->features & XAE_FEATURE_PARTIAL_RX_CSUM) {
1254 				skb->csum = be32_to_cpu(cur_p->app3 & 0xFFFF);
1255 				skb->ip_summed = CHECKSUM_COMPLETE;
1256 			}
1257 
1258 			napi_gro_receive(napi, skb);
1259 
1260 			size += length;
1261 			packets++;
1262 		}
1263 
1264 		new_skb = napi_alloc_skb(napi, lp->max_frm_size);
1265 		if (!new_skb)
1266 			break;
1267 
1268 		phys = dma_map_single(lp->dev, new_skb->data,
1269 				      lp->max_frm_size,
1270 				      DMA_FROM_DEVICE);
1271 		if (unlikely(dma_mapping_error(lp->dev, phys))) {
1272 			if (net_ratelimit())
1273 				netdev_err(lp->ndev, "RX DMA mapping error\n");
1274 			dev_kfree_skb(new_skb);
1275 			break;
1276 		}
1277 		desc_set_phys_addr(lp, phys, cur_p);
1278 
1279 		cur_p->cntrl = lp->max_frm_size;
1280 		cur_p->status = 0;
1281 		cur_p->skb = new_skb;
1282 
1283 		/* Only update tail_p to mark this slot as usable after it has
1284 		 * been successfully refilled.
1285 		 */
1286 		tail_p = lp->rx_bd_p + sizeof(*lp->rx_bd_v) * lp->rx_bd_ci;
1287 
1288 		if (++lp->rx_bd_ci >= lp->rx_bd_num)
1289 			lp->rx_bd_ci = 0;
1290 		cur_p = &lp->rx_bd_v[lp->rx_bd_ci];
1291 	}
1292 
1293 	u64_stats_update_begin(&lp->rx_stat_sync);
1294 	u64_stats_add(&lp->rx_packets, packets);
1295 	u64_stats_add(&lp->rx_bytes, size);
1296 	u64_stats_update_end(&lp->rx_stat_sync);
1297 
1298 	if (tail_p)
1299 		axienet_dma_out_addr(lp, XAXIDMA_RX_TDESC_OFFSET, tail_p);
1300 
1301 	if (packets < budget && napi_complete_done(napi, packets)) {
1302 		if (READ_ONCE(lp->rx_dim_enabled)) {
1303 			struct dim_sample sample = {
1304 				.time = ktime_get(),
1305 				/* Safe because we are the only writer */
1306 				.pkt_ctr = u64_stats_read(&lp->rx_packets),
1307 				.byte_ctr = u64_stats_read(&lp->rx_bytes),
1308 				.event_ctr = READ_ONCE(lp->rx_irqs),
1309 			};
1310 
1311 			net_dim(&lp->rx_dim, &sample);
1312 		}
1313 
1314 		/* Re-enable RX completion interrupts. This should
1315 		 * cause an immediate interrupt if any RX packets are
1316 		 * already pending.
1317 		 */
1318 		spin_lock_irq(&lp->rx_cr_lock);
1319 		axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, lp->rx_dma_cr);
1320 		spin_unlock_irq(&lp->rx_cr_lock);
1321 	}
1322 	return packets;
1323 }
1324 
1325 /**
1326  * axienet_tx_irq - Tx Done Isr.
1327  * @irq:	irq number
1328  * @_ndev:	net_device pointer
1329  *
1330  * Return: IRQ_HANDLED if device generated a TX interrupt, IRQ_NONE otherwise.
1331  *
1332  * This is the Axi DMA Tx done Isr. It invokes NAPI polling to complete the
1333  * TX BD processing.
1334  */
1335 static irqreturn_t axienet_tx_irq(int irq, void *_ndev)
1336 {
1337 	unsigned int status;
1338 	struct net_device *ndev = _ndev;
1339 	struct axienet_local *lp = netdev_priv(ndev);
1340 
1341 	status = axienet_dma_in32(lp, XAXIDMA_TX_SR_OFFSET);
1342 
1343 	if (!(status & XAXIDMA_IRQ_ALL_MASK))
1344 		return IRQ_NONE;
1345 
1346 	axienet_dma_out32(lp, XAXIDMA_TX_SR_OFFSET, status);
1347 
1348 	if (unlikely(status & XAXIDMA_IRQ_ERROR_MASK)) {
1349 		netdev_err(ndev, "DMA Tx error 0x%x\n", status);
1350 		netdev_err(ndev, "Current BD is at: 0x%x%08x\n",
1351 			   (lp->tx_bd_v[lp->tx_bd_ci]).phys_msb,
1352 			   (lp->tx_bd_v[lp->tx_bd_ci]).phys);
1353 		schedule_work(&lp->dma_err_task);
1354 	} else {
1355 		/* Disable further TX completion interrupts and schedule
1356 		 * NAPI to handle the completions.
1357 		 */
1358 		if (napi_schedule_prep(&lp->napi_tx)) {
1359 			u32 cr;
1360 
1361 			spin_lock(&lp->tx_cr_lock);
1362 			cr = lp->tx_dma_cr;
1363 			cr &= ~(XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_DELAY_MASK);
1364 			axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, cr);
1365 			spin_unlock(&lp->tx_cr_lock);
1366 			__napi_schedule(&lp->napi_tx);
1367 		}
1368 	}
1369 
1370 	return IRQ_HANDLED;
1371 }
1372 
1373 /**
1374  * axienet_rx_irq - Rx Isr.
1375  * @irq:	irq number
1376  * @_ndev:	net_device pointer
1377  *
1378  * Return: IRQ_HANDLED if device generated a RX interrupt, IRQ_NONE otherwise.
1379  *
1380  * This is the Axi DMA Rx Isr. It invokes NAPI polling to complete the RX BD
1381  * processing.
1382  */
1383 static irqreturn_t axienet_rx_irq(int irq, void *_ndev)
1384 {
1385 	unsigned int status;
1386 	struct net_device *ndev = _ndev;
1387 	struct axienet_local *lp = netdev_priv(ndev);
1388 
1389 	status = axienet_dma_in32(lp, XAXIDMA_RX_SR_OFFSET);
1390 
1391 	if (!(status & XAXIDMA_IRQ_ALL_MASK))
1392 		return IRQ_NONE;
1393 
1394 	axienet_dma_out32(lp, XAXIDMA_RX_SR_OFFSET, status);
1395 
1396 	if (unlikely(status & XAXIDMA_IRQ_ERROR_MASK)) {
1397 		netdev_err(ndev, "DMA Rx error 0x%x\n", status);
1398 		netdev_err(ndev, "Current BD is at: 0x%x%08x\n",
1399 			   (lp->rx_bd_v[lp->rx_bd_ci]).phys_msb,
1400 			   (lp->rx_bd_v[lp->rx_bd_ci]).phys);
1401 		schedule_work(&lp->dma_err_task);
1402 	} else {
1403 		/* Disable further RX completion interrupts and schedule
1404 		 * NAPI receive.
1405 		 */
1406 		WRITE_ONCE(lp->rx_irqs, READ_ONCE(lp->rx_irqs) + 1);
1407 		if (napi_schedule_prep(&lp->napi_rx)) {
1408 			u32 cr;
1409 
1410 			spin_lock(&lp->rx_cr_lock);
1411 			cr = lp->rx_dma_cr;
1412 			cr &= ~(XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_DELAY_MASK);
1413 			axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, cr);
1414 			spin_unlock(&lp->rx_cr_lock);
1415 
1416 			__napi_schedule(&lp->napi_rx);
1417 		}
1418 	}
1419 
1420 	return IRQ_HANDLED;
1421 }
1422 
1423 /**
1424  * axienet_eth_irq - Ethernet core Isr.
1425  * @irq:	irq number
1426  * @_ndev:	net_device pointer
1427  *
1428  * Return: IRQ_HANDLED if device generated a core interrupt, IRQ_NONE otherwise.
1429  *
1430  * Handle miscellaneous conditions indicated by Ethernet core IRQ.
1431  */
1432 static irqreturn_t axienet_eth_irq(int irq, void *_ndev)
1433 {
1434 	struct net_device *ndev = _ndev;
1435 	struct axienet_local *lp = netdev_priv(ndev);
1436 	unsigned int pending;
1437 
1438 	pending = axienet_ior(lp, XAE_IP_OFFSET);
1439 	if (!pending)
1440 		return IRQ_NONE;
1441 
1442 	if (pending & XAE_INT_RXFIFOOVR_MASK)
1443 		ndev->stats.rx_missed_errors++;
1444 
1445 	if (pending & XAE_INT_RXRJECT_MASK)
1446 		ndev->stats.rx_dropped++;
1447 
1448 	axienet_iow(lp, XAE_IS_OFFSET, pending);
1449 	return IRQ_HANDLED;
1450 }
1451 
1452 static void axienet_dma_err_handler(struct work_struct *work);
1453 
1454 /**
1455  * axienet_rx_submit_desc - Submit the rx descriptors to dmaengine.
1456  * allocate skbuff, map the scatterlist and obtain a descriptor
1457  * and then add the callback information and submit descriptor.
1458  *
1459  * @ndev:	net_device pointer
1460  *
1461  */
1462 static void axienet_rx_submit_desc(struct net_device *ndev)
1463 {
1464 	struct dma_async_tx_descriptor *dma_rx_desc = NULL;
1465 	struct axienet_local *lp = netdev_priv(ndev);
1466 	struct skbuf_dma_descriptor *skbuf_dma;
1467 	struct sk_buff *skb;
1468 	dma_addr_t addr;
1469 
1470 	skbuf_dma = axienet_get_rx_desc(lp, lp->rx_ring_head);
1471 	if (!skbuf_dma)
1472 		return;
1473 
1474 	skb = netdev_alloc_skb(ndev, lp->max_frm_size);
1475 	if (!skb)
1476 		return;
1477 
1478 	sg_init_table(skbuf_dma->sgl, 1);
1479 	addr = dma_map_single(lp->dev, skb->data, lp->max_frm_size, DMA_FROM_DEVICE);
1480 	if (unlikely(dma_mapping_error(lp->dev, addr))) {
1481 		if (net_ratelimit())
1482 			netdev_err(ndev, "DMA mapping error\n");
1483 		goto rx_submit_err_free_skb;
1484 	}
1485 	sg_dma_address(skbuf_dma->sgl) = addr;
1486 	sg_dma_len(skbuf_dma->sgl) = lp->max_frm_size;
1487 	dma_rx_desc = dmaengine_prep_slave_sg(lp->rx_chan, skbuf_dma->sgl,
1488 					      1, DMA_DEV_TO_MEM,
1489 					      DMA_PREP_INTERRUPT);
1490 	if (!dma_rx_desc)
1491 		goto rx_submit_err_unmap_skb;
1492 
1493 	skbuf_dma->skb = skb;
1494 	skbuf_dma->dma_address = sg_dma_address(skbuf_dma->sgl);
1495 	skbuf_dma->desc = dma_rx_desc;
1496 	dma_rx_desc->callback_param = lp;
1497 	dma_rx_desc->callback_result = axienet_dma_rx_cb;
1498 	lp->rx_ring_head++;
1499 	dmaengine_submit(dma_rx_desc);
1500 
1501 	return;
1502 
1503 rx_submit_err_unmap_skb:
1504 	dma_unmap_single(lp->dev, addr, lp->max_frm_size, DMA_FROM_DEVICE);
1505 rx_submit_err_free_skb:
1506 	dev_kfree_skb(skb);
1507 }
1508 
1509 /**
1510  * axienet_init_dmaengine - init the dmaengine code.
1511  * @ndev:       Pointer to net_device structure
1512  *
1513  * Return: 0, on success.
1514  *          non-zero error value on failure
1515  *
1516  * This is the dmaengine initialization code.
1517  */
1518 static int axienet_init_dmaengine(struct net_device *ndev)
1519 {
1520 	struct axienet_local *lp = netdev_priv(ndev);
1521 	struct skbuf_dma_descriptor *skbuf_dma;
1522 	int i, ret;
1523 
1524 	lp->tx_chan = dma_request_chan(lp->dev, "tx_chan0");
1525 	if (IS_ERR(lp->tx_chan)) {
1526 		dev_err(lp->dev, "No Ethernet DMA (TX) channel found\n");
1527 		return PTR_ERR(lp->tx_chan);
1528 	}
1529 
1530 	lp->rx_chan = dma_request_chan(lp->dev, "rx_chan0");
1531 	if (IS_ERR(lp->rx_chan)) {
1532 		ret = PTR_ERR(lp->rx_chan);
1533 		dev_err(lp->dev, "No Ethernet DMA (RX) channel found\n");
1534 		goto err_dma_release_tx;
1535 	}
1536 
1537 	lp->tx_ring_tail = 0;
1538 	lp->tx_ring_head = 0;
1539 	lp->rx_ring_tail = 0;
1540 	lp->rx_ring_head = 0;
1541 	lp->tx_skb_ring = kcalloc(TX_BD_NUM_MAX, sizeof(*lp->tx_skb_ring),
1542 				  GFP_KERNEL);
1543 	if (!lp->tx_skb_ring) {
1544 		ret = -ENOMEM;
1545 		goto err_dma_release_rx;
1546 	}
1547 	for (i = 0; i < TX_BD_NUM_MAX; i++) {
1548 		skbuf_dma = kzalloc(sizeof(*skbuf_dma), GFP_KERNEL);
1549 		if (!skbuf_dma) {
1550 			ret = -ENOMEM;
1551 			goto err_free_tx_skb_ring;
1552 		}
1553 		lp->tx_skb_ring[i] = skbuf_dma;
1554 	}
1555 
1556 	lp->rx_skb_ring = kcalloc(RX_BUF_NUM_DEFAULT, sizeof(*lp->rx_skb_ring),
1557 				  GFP_KERNEL);
1558 	if (!lp->rx_skb_ring) {
1559 		ret = -ENOMEM;
1560 		goto err_free_tx_skb_ring;
1561 	}
1562 	for (i = 0; i < RX_BUF_NUM_DEFAULT; i++) {
1563 		skbuf_dma = kzalloc(sizeof(*skbuf_dma), GFP_KERNEL);
1564 		if (!skbuf_dma) {
1565 			ret = -ENOMEM;
1566 			goto err_free_rx_skb_ring;
1567 		}
1568 		lp->rx_skb_ring[i] = skbuf_dma;
1569 	}
1570 	/* TODO: Instead of BD_NUM_DEFAULT use runtime support */
1571 	for (i = 0; i < RX_BUF_NUM_DEFAULT; i++)
1572 		axienet_rx_submit_desc(ndev);
1573 	dma_async_issue_pending(lp->rx_chan);
1574 
1575 	return 0;
1576 
1577 err_free_rx_skb_ring:
1578 	for (i = 0; i < RX_BUF_NUM_DEFAULT; i++)
1579 		kfree(lp->rx_skb_ring[i]);
1580 	kfree(lp->rx_skb_ring);
1581 err_free_tx_skb_ring:
1582 	for (i = 0; i < TX_BD_NUM_MAX; i++)
1583 		kfree(lp->tx_skb_ring[i]);
1584 	kfree(lp->tx_skb_ring);
1585 err_dma_release_rx:
1586 	dma_release_channel(lp->rx_chan);
1587 err_dma_release_tx:
1588 	dma_release_channel(lp->tx_chan);
1589 	return ret;
1590 }
1591 
1592 /**
1593  * axienet_init_legacy_dma - init the dma legacy code.
1594  * @ndev:       Pointer to net_device structure
1595  *
1596  * Return: 0, on success.
1597  *          non-zero error value on failure
1598  *
1599  * This is the dma  initialization code. It also allocates interrupt
1600  * service routines, enables the interrupt lines and ISR handling.
1601  *
1602  */
1603 static int axienet_init_legacy_dma(struct net_device *ndev)
1604 {
1605 	int ret;
1606 	struct axienet_local *lp = netdev_priv(ndev);
1607 
1608 	/* Enable worker thread for Axi DMA error handling */
1609 	lp->stopping = false;
1610 	INIT_WORK(&lp->dma_err_task, axienet_dma_err_handler);
1611 
1612 	napi_enable(&lp->napi_rx);
1613 	napi_enable(&lp->napi_tx);
1614 
1615 	/* Enable interrupts for Axi DMA Tx */
1616 	ret = request_irq(lp->tx_irq, axienet_tx_irq, IRQF_SHARED,
1617 			  ndev->name, ndev);
1618 	if (ret)
1619 		goto err_tx_irq;
1620 	/* Enable interrupts for Axi DMA Rx */
1621 	ret = request_irq(lp->rx_irq, axienet_rx_irq, IRQF_SHARED,
1622 			  ndev->name, ndev);
1623 	if (ret)
1624 		goto err_rx_irq;
1625 	/* Enable interrupts for Axi Ethernet core (if defined) */
1626 	if (lp->eth_irq > 0) {
1627 		ret = request_irq(lp->eth_irq, axienet_eth_irq, IRQF_SHARED,
1628 				  ndev->name, ndev);
1629 		if (ret)
1630 			goto err_eth_irq;
1631 	}
1632 
1633 	return 0;
1634 
1635 err_eth_irq:
1636 	free_irq(lp->rx_irq, ndev);
1637 err_rx_irq:
1638 	free_irq(lp->tx_irq, ndev);
1639 err_tx_irq:
1640 	napi_disable(&lp->napi_tx);
1641 	napi_disable(&lp->napi_rx);
1642 	cancel_work_sync(&lp->dma_err_task);
1643 	dev_err(lp->dev, "request_irq() failed\n");
1644 	return ret;
1645 }
1646 
1647 /**
1648  * axienet_open - Driver open routine.
1649  * @ndev:	Pointer to net_device structure
1650  *
1651  * Return: 0, on success.
1652  *	    non-zero error value on failure
1653  *
1654  * This is the driver open routine. It calls phylink_start to start the
1655  * PHY device.
1656  * It also allocates interrupt service routines, enables the interrupt lines
1657  * and ISR handling. Axi Ethernet core is reset through Axi DMA core. Buffer
1658  * descriptors are initialized.
1659  */
1660 static int axienet_open(struct net_device *ndev)
1661 {
1662 	int ret;
1663 	struct axienet_local *lp = netdev_priv(ndev);
1664 
1665 	/* When we do an Axi Ethernet reset, it resets the complete core
1666 	 * including the MDIO. MDIO must be disabled before resetting.
1667 	 * Hold MDIO bus lock to avoid MDIO accesses during the reset.
1668 	 */
1669 	axienet_lock_mii(lp);
1670 	ret = axienet_device_reset(ndev);
1671 	axienet_unlock_mii(lp);
1672 
1673 	ret = phylink_of_phy_connect(lp->phylink, lp->dev->of_node, 0);
1674 	if (ret) {
1675 		dev_err(lp->dev, "phylink_of_phy_connect() failed: %d\n", ret);
1676 		return ret;
1677 	}
1678 
1679 	phylink_start(lp->phylink);
1680 
1681 	/* Start the statistics refresh work */
1682 	schedule_delayed_work(&lp->stats_work, 0);
1683 
1684 	if (lp->use_dmaengine) {
1685 		/* Enable interrupts for Axi Ethernet core (if defined) */
1686 		if (lp->eth_irq > 0) {
1687 			ret = request_irq(lp->eth_irq, axienet_eth_irq, IRQF_SHARED,
1688 					  ndev->name, ndev);
1689 			if (ret)
1690 				goto err_phy;
1691 		}
1692 
1693 		ret = axienet_init_dmaengine(ndev);
1694 		if (ret < 0)
1695 			goto err_free_eth_irq;
1696 	} else {
1697 		ret = axienet_init_legacy_dma(ndev);
1698 		if (ret)
1699 			goto err_phy;
1700 	}
1701 
1702 	return 0;
1703 
1704 err_free_eth_irq:
1705 	if (lp->eth_irq > 0)
1706 		free_irq(lp->eth_irq, ndev);
1707 err_phy:
1708 	cancel_work_sync(&lp->rx_dim.work);
1709 	cancel_delayed_work_sync(&lp->stats_work);
1710 	phylink_stop(lp->phylink);
1711 	phylink_disconnect_phy(lp->phylink);
1712 	return ret;
1713 }
1714 
1715 /**
1716  * axienet_stop - Driver stop routine.
1717  * @ndev:	Pointer to net_device structure
1718  *
1719  * Return: 0, on success.
1720  *
1721  * This is the driver stop routine. It calls phylink_disconnect to stop the PHY
1722  * device. It also removes the interrupt handlers and disables the interrupts.
1723  * The Axi DMA Tx/Rx BDs are released.
1724  */
1725 static int axienet_stop(struct net_device *ndev)
1726 {
1727 	struct axienet_local *lp = netdev_priv(ndev);
1728 	int i;
1729 
1730 	if (!lp->use_dmaengine) {
1731 		WRITE_ONCE(lp->stopping, true);
1732 		flush_work(&lp->dma_err_task);
1733 
1734 		napi_disable(&lp->napi_tx);
1735 		napi_disable(&lp->napi_rx);
1736 	}
1737 
1738 	cancel_work_sync(&lp->rx_dim.work);
1739 	cancel_delayed_work_sync(&lp->stats_work);
1740 
1741 	phylink_stop(lp->phylink);
1742 	phylink_disconnect_phy(lp->phylink);
1743 
1744 	axienet_setoptions(ndev, lp->options &
1745 			   ~(XAE_OPTION_TXEN | XAE_OPTION_RXEN));
1746 
1747 	if (!lp->use_dmaengine) {
1748 		axienet_dma_stop(lp);
1749 		cancel_work_sync(&lp->dma_err_task);
1750 		free_irq(lp->tx_irq, ndev);
1751 		free_irq(lp->rx_irq, ndev);
1752 		axienet_dma_bd_release(ndev);
1753 	} else {
1754 		dmaengine_terminate_sync(lp->tx_chan);
1755 		dmaengine_synchronize(lp->tx_chan);
1756 		dmaengine_terminate_sync(lp->rx_chan);
1757 		dmaengine_synchronize(lp->rx_chan);
1758 
1759 		for (i = 0; i < TX_BD_NUM_MAX; i++)
1760 			kfree(lp->tx_skb_ring[i]);
1761 		kfree(lp->tx_skb_ring);
1762 		for (i = 0; i < RX_BUF_NUM_DEFAULT; i++)
1763 			kfree(lp->rx_skb_ring[i]);
1764 		kfree(lp->rx_skb_ring);
1765 
1766 		dma_release_channel(lp->rx_chan);
1767 		dma_release_channel(lp->tx_chan);
1768 	}
1769 
1770 	netdev_reset_queue(ndev);
1771 	axienet_iow(lp, XAE_IE_OFFSET, 0);
1772 
1773 	if (lp->eth_irq > 0)
1774 		free_irq(lp->eth_irq, ndev);
1775 	return 0;
1776 }
1777 
1778 /**
1779  * axienet_change_mtu - Driver change mtu routine.
1780  * @ndev:	Pointer to net_device structure
1781  * @new_mtu:	New mtu value to be applied
1782  *
1783  * Return: Always returns 0 (success).
1784  *
1785  * This is the change mtu driver routine. It checks if the Axi Ethernet
1786  * hardware supports jumbo frames before changing the mtu. This can be
1787  * called only when the device is not up.
1788  */
1789 static int axienet_change_mtu(struct net_device *ndev, int new_mtu)
1790 {
1791 	struct axienet_local *lp = netdev_priv(ndev);
1792 
1793 	if (netif_running(ndev))
1794 		return -EBUSY;
1795 
1796 	if ((new_mtu + VLAN_ETH_HLEN +
1797 		XAE_TRL_SIZE) > lp->rxmem)
1798 		return -EINVAL;
1799 
1800 	WRITE_ONCE(ndev->mtu, new_mtu);
1801 
1802 	return 0;
1803 }
1804 
1805 #ifdef CONFIG_NET_POLL_CONTROLLER
1806 /**
1807  * axienet_poll_controller - Axi Ethernet poll mechanism.
1808  * @ndev:	Pointer to net_device structure
1809  *
1810  * This implements Rx/Tx ISR poll mechanisms. The interrupts are disabled prior
1811  * to polling the ISRs and are enabled back after the polling is done.
1812  */
1813 static void axienet_poll_controller(struct net_device *ndev)
1814 {
1815 	struct axienet_local *lp = netdev_priv(ndev);
1816 
1817 	disable_irq(lp->tx_irq);
1818 	disable_irq(lp->rx_irq);
1819 	axienet_rx_irq(lp->tx_irq, ndev);
1820 	axienet_tx_irq(lp->rx_irq, ndev);
1821 	enable_irq(lp->tx_irq);
1822 	enable_irq(lp->rx_irq);
1823 }
1824 #endif
1825 
1826 static int axienet_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1827 {
1828 	struct axienet_local *lp = netdev_priv(dev);
1829 
1830 	if (!netif_running(dev))
1831 		return -EINVAL;
1832 
1833 	return phylink_mii_ioctl(lp->phylink, rq, cmd);
1834 }
1835 
1836 static void
1837 axienet_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
1838 {
1839 	struct axienet_local *lp = netdev_priv(dev);
1840 	unsigned int start;
1841 
1842 	netdev_stats_to_stats64(stats, &dev->stats);
1843 
1844 	do {
1845 		start = u64_stats_fetch_begin(&lp->rx_stat_sync);
1846 		stats->rx_packets = u64_stats_read(&lp->rx_packets);
1847 		stats->rx_bytes = u64_stats_read(&lp->rx_bytes);
1848 	} while (u64_stats_fetch_retry(&lp->rx_stat_sync, start));
1849 
1850 	do {
1851 		start = u64_stats_fetch_begin(&lp->tx_stat_sync);
1852 		stats->tx_packets = u64_stats_read(&lp->tx_packets);
1853 		stats->tx_bytes = u64_stats_read(&lp->tx_bytes);
1854 	} while (u64_stats_fetch_retry(&lp->tx_stat_sync, start));
1855 
1856 	if (!(lp->features & XAE_FEATURE_STATS))
1857 		return;
1858 
1859 	do {
1860 		start = read_seqcount_begin(&lp->hw_stats_seqcount);
1861 		stats->rx_length_errors =
1862 			axienet_stat(lp, STAT_RX_LENGTH_ERRORS);
1863 		stats->rx_crc_errors = axienet_stat(lp, STAT_RX_FCS_ERRORS);
1864 		stats->rx_frame_errors =
1865 			axienet_stat(lp, STAT_RX_ALIGNMENT_ERRORS);
1866 		stats->rx_errors = axienet_stat(lp, STAT_UNDERSIZE_FRAMES) +
1867 				   axienet_stat(lp, STAT_FRAGMENT_FRAMES) +
1868 				   stats->rx_length_errors +
1869 				   stats->rx_crc_errors +
1870 				   stats->rx_frame_errors;
1871 		stats->multicast = axienet_stat(lp, STAT_RX_MULTICAST_FRAMES);
1872 
1873 		stats->tx_aborted_errors =
1874 			axienet_stat(lp, STAT_TX_EXCESS_COLLISIONS);
1875 		stats->tx_fifo_errors =
1876 			axienet_stat(lp, STAT_TX_UNDERRUN_ERRORS);
1877 		stats->tx_window_errors =
1878 			axienet_stat(lp, STAT_TX_LATE_COLLISIONS);
1879 		stats->tx_errors = axienet_stat(lp, STAT_TX_EXCESS_DEFERRAL) +
1880 				   stats->tx_aborted_errors +
1881 				   stats->tx_fifo_errors +
1882 				   stats->tx_window_errors;
1883 	} while (read_seqcount_retry(&lp->hw_stats_seqcount, start));
1884 }
1885 
1886 static const struct net_device_ops axienet_netdev_ops = {
1887 	.ndo_open = axienet_open,
1888 	.ndo_stop = axienet_stop,
1889 	.ndo_start_xmit = axienet_start_xmit,
1890 	.ndo_get_stats64 = axienet_get_stats64,
1891 	.ndo_change_mtu	= axienet_change_mtu,
1892 	.ndo_set_mac_address = netdev_set_mac_address,
1893 	.ndo_validate_addr = eth_validate_addr,
1894 	.ndo_eth_ioctl = axienet_ioctl,
1895 	.ndo_set_rx_mode = axienet_set_multicast_list,
1896 #ifdef CONFIG_NET_POLL_CONTROLLER
1897 	.ndo_poll_controller = axienet_poll_controller,
1898 #endif
1899 };
1900 
1901 static const struct net_device_ops axienet_netdev_dmaengine_ops = {
1902 	.ndo_open = axienet_open,
1903 	.ndo_stop = axienet_stop,
1904 	.ndo_start_xmit = axienet_start_xmit_dmaengine,
1905 	.ndo_get_stats64 = axienet_get_stats64,
1906 	.ndo_change_mtu	= axienet_change_mtu,
1907 	.ndo_set_mac_address = netdev_set_mac_address,
1908 	.ndo_validate_addr = eth_validate_addr,
1909 	.ndo_eth_ioctl = axienet_ioctl,
1910 	.ndo_set_rx_mode = axienet_set_multicast_list,
1911 };
1912 
1913 /**
1914  * axienet_ethtools_get_drvinfo - Get various Axi Ethernet driver information.
1915  * @ndev:	Pointer to net_device structure
1916  * @ed:		Pointer to ethtool_drvinfo structure
1917  *
1918  * This implements ethtool command for getting the driver information.
1919  * Issue "ethtool -i ethX" under linux prompt to execute this function.
1920  */
1921 static void axienet_ethtools_get_drvinfo(struct net_device *ndev,
1922 					 struct ethtool_drvinfo *ed)
1923 {
1924 	strscpy(ed->driver, DRIVER_NAME, sizeof(ed->driver));
1925 	strscpy(ed->version, DRIVER_VERSION, sizeof(ed->version));
1926 }
1927 
1928 /**
1929  * axienet_ethtools_get_regs_len - Get the total regs length present in the
1930  *				   AxiEthernet core.
1931  * @ndev:	Pointer to net_device structure
1932  *
1933  * This implements ethtool command for getting the total register length
1934  * information.
1935  *
1936  * Return: the total regs length
1937  */
1938 static int axienet_ethtools_get_regs_len(struct net_device *ndev)
1939 {
1940 	return sizeof(u32) * AXIENET_REGS_N;
1941 }
1942 
1943 /**
1944  * axienet_ethtools_get_regs - Dump the contents of all registers present
1945  *			       in AxiEthernet core.
1946  * @ndev:	Pointer to net_device structure
1947  * @regs:	Pointer to ethtool_regs structure
1948  * @ret:	Void pointer used to return the contents of the registers.
1949  *
1950  * This implements ethtool command for getting the Axi Ethernet register dump.
1951  * Issue "ethtool -d ethX" to execute this function.
1952  */
1953 static void axienet_ethtools_get_regs(struct net_device *ndev,
1954 				      struct ethtool_regs *regs, void *ret)
1955 {
1956 	u32 *data = (u32 *)ret;
1957 	size_t len = sizeof(u32) * AXIENET_REGS_N;
1958 	struct axienet_local *lp = netdev_priv(ndev);
1959 
1960 	regs->version = 0;
1961 	regs->len = len;
1962 
1963 	memset(data, 0, len);
1964 	data[0] = axienet_ior(lp, XAE_RAF_OFFSET);
1965 	data[1] = axienet_ior(lp, XAE_TPF_OFFSET);
1966 	data[2] = axienet_ior(lp, XAE_IFGP_OFFSET);
1967 	data[3] = axienet_ior(lp, XAE_IS_OFFSET);
1968 	data[4] = axienet_ior(lp, XAE_IP_OFFSET);
1969 	data[5] = axienet_ior(lp, XAE_IE_OFFSET);
1970 	data[6] = axienet_ior(lp, XAE_TTAG_OFFSET);
1971 	data[7] = axienet_ior(lp, XAE_RTAG_OFFSET);
1972 	data[8] = axienet_ior(lp, XAE_UAWL_OFFSET);
1973 	data[9] = axienet_ior(lp, XAE_UAWU_OFFSET);
1974 	data[10] = axienet_ior(lp, XAE_TPID0_OFFSET);
1975 	data[11] = axienet_ior(lp, XAE_TPID1_OFFSET);
1976 	data[12] = axienet_ior(lp, XAE_PPST_OFFSET);
1977 	data[13] = axienet_ior(lp, XAE_RCW0_OFFSET);
1978 	data[14] = axienet_ior(lp, XAE_RCW1_OFFSET);
1979 	data[15] = axienet_ior(lp, XAE_TC_OFFSET);
1980 	data[16] = axienet_ior(lp, XAE_FCC_OFFSET);
1981 	data[17] = axienet_ior(lp, XAE_EMMC_OFFSET);
1982 	data[18] = axienet_ior(lp, XAE_PHYC_OFFSET);
1983 	data[19] = axienet_ior(lp, XAE_MDIO_MC_OFFSET);
1984 	data[20] = axienet_ior(lp, XAE_MDIO_MCR_OFFSET);
1985 	data[21] = axienet_ior(lp, XAE_MDIO_MWD_OFFSET);
1986 	data[22] = axienet_ior(lp, XAE_MDIO_MRD_OFFSET);
1987 	data[27] = axienet_ior(lp, XAE_UAW0_OFFSET);
1988 	data[28] = axienet_ior(lp, XAE_UAW1_OFFSET);
1989 	data[29] = axienet_ior(lp, XAE_FMI_OFFSET);
1990 	data[30] = axienet_ior(lp, XAE_AF0_OFFSET);
1991 	data[31] = axienet_ior(lp, XAE_AF1_OFFSET);
1992 	if (!lp->use_dmaengine) {
1993 		data[32] = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET);
1994 		data[33] = axienet_dma_in32(lp, XAXIDMA_TX_SR_OFFSET);
1995 		data[34] = axienet_dma_in32(lp, XAXIDMA_TX_CDESC_OFFSET);
1996 		data[35] = axienet_dma_in32(lp, XAXIDMA_TX_TDESC_OFFSET);
1997 		data[36] = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET);
1998 		data[37] = axienet_dma_in32(lp, XAXIDMA_RX_SR_OFFSET);
1999 		data[38] = axienet_dma_in32(lp, XAXIDMA_RX_CDESC_OFFSET);
2000 		data[39] = axienet_dma_in32(lp, XAXIDMA_RX_TDESC_OFFSET);
2001 	}
2002 }
2003 
2004 static void
2005 axienet_ethtools_get_ringparam(struct net_device *ndev,
2006 			       struct ethtool_ringparam *ering,
2007 			       struct kernel_ethtool_ringparam *kernel_ering,
2008 			       struct netlink_ext_ack *extack)
2009 {
2010 	struct axienet_local *lp = netdev_priv(ndev);
2011 
2012 	ering->rx_max_pending = RX_BD_NUM_MAX;
2013 	ering->rx_mini_max_pending = 0;
2014 	ering->rx_jumbo_max_pending = 0;
2015 	ering->tx_max_pending = TX_BD_NUM_MAX;
2016 	ering->rx_pending = lp->rx_bd_num;
2017 	ering->rx_mini_pending = 0;
2018 	ering->rx_jumbo_pending = 0;
2019 	ering->tx_pending = lp->tx_bd_num;
2020 }
2021 
2022 static int
2023 axienet_ethtools_set_ringparam(struct net_device *ndev,
2024 			       struct ethtool_ringparam *ering,
2025 			       struct kernel_ethtool_ringparam *kernel_ering,
2026 			       struct netlink_ext_ack *extack)
2027 {
2028 	struct axienet_local *lp = netdev_priv(ndev);
2029 
2030 	if (ering->rx_pending > RX_BD_NUM_MAX ||
2031 	    ering->rx_mini_pending ||
2032 	    ering->rx_jumbo_pending ||
2033 	    ering->tx_pending < TX_BD_NUM_MIN ||
2034 	    ering->tx_pending > TX_BD_NUM_MAX)
2035 		return -EINVAL;
2036 
2037 	if (netif_running(ndev))
2038 		return -EBUSY;
2039 
2040 	lp->rx_bd_num = ering->rx_pending;
2041 	lp->tx_bd_num = ering->tx_pending;
2042 	return 0;
2043 }
2044 
2045 /**
2046  * axienet_ethtools_get_pauseparam - Get the pause parameter setting for
2047  *				     Tx and Rx paths.
2048  * @ndev:	Pointer to net_device structure
2049  * @epauseparm:	Pointer to ethtool_pauseparam structure.
2050  *
2051  * This implements ethtool command for getting axi ethernet pause frame
2052  * setting. Issue "ethtool -a ethX" to execute this function.
2053  */
2054 static void
2055 axienet_ethtools_get_pauseparam(struct net_device *ndev,
2056 				struct ethtool_pauseparam *epauseparm)
2057 {
2058 	struct axienet_local *lp = netdev_priv(ndev);
2059 
2060 	phylink_ethtool_get_pauseparam(lp->phylink, epauseparm);
2061 }
2062 
2063 /**
2064  * axienet_ethtools_set_pauseparam - Set device pause parameter(flow control)
2065  *				     settings.
2066  * @ndev:	Pointer to net_device structure
2067  * @epauseparm:Pointer to ethtool_pauseparam structure
2068  *
2069  * This implements ethtool command for enabling flow control on Rx and Tx
2070  * paths. Issue "ethtool -A ethX tx on|off" under linux prompt to execute this
2071  * function.
2072  *
2073  * Return: 0 on success, -EFAULT if device is running
2074  */
2075 static int
2076 axienet_ethtools_set_pauseparam(struct net_device *ndev,
2077 				struct ethtool_pauseparam *epauseparm)
2078 {
2079 	struct axienet_local *lp = netdev_priv(ndev);
2080 
2081 	return phylink_ethtool_set_pauseparam(lp->phylink, epauseparm);
2082 }
2083 
2084 /**
2085  * axienet_update_coalesce_rx() - Set RX CR
2086  * @lp: Device private data
2087  * @cr: Value to write to the RX CR
2088  * @mask: Bits to set from @cr
2089  */
2090 static void axienet_update_coalesce_rx(struct axienet_local *lp, u32 cr,
2091 				       u32 mask)
2092 {
2093 	spin_lock_irq(&lp->rx_cr_lock);
2094 	lp->rx_dma_cr &= ~mask;
2095 	lp->rx_dma_cr |= cr;
2096 	/* If DMA isn't started, then the settings will be applied the next
2097 	 * time dma_start() is called.
2098 	 */
2099 	if (lp->rx_dma_started) {
2100 		u32 reg = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET);
2101 
2102 		/* Don't enable IRQs if they are disabled by NAPI */
2103 		if (reg & XAXIDMA_IRQ_ALL_MASK)
2104 			cr = lp->rx_dma_cr;
2105 		else
2106 			cr = lp->rx_dma_cr & ~XAXIDMA_IRQ_ALL_MASK;
2107 		axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, cr);
2108 	}
2109 	spin_unlock_irq(&lp->rx_cr_lock);
2110 }
2111 
2112 /**
2113  * axienet_dim_coalesce_count_rx() - RX coalesce count for DIM
2114  * @lp: Device private data
2115  */
2116 static u32 axienet_dim_coalesce_count_rx(struct axienet_local *lp)
2117 {
2118 	return min(1 << (lp->rx_dim.profile_ix << 1), 255);
2119 }
2120 
2121 /**
2122  * axienet_rx_dim_work() - Adjust RX DIM settings
2123  * @work: The work struct
2124  */
2125 static void axienet_rx_dim_work(struct work_struct *work)
2126 {
2127 	struct axienet_local *lp =
2128 		container_of(work, struct axienet_local, rx_dim.work);
2129 	u32 cr = axienet_calc_cr(lp, axienet_dim_coalesce_count_rx(lp), 0);
2130 	u32 mask = XAXIDMA_COALESCE_MASK | XAXIDMA_IRQ_IOC_MASK |
2131 		   XAXIDMA_IRQ_ERROR_MASK;
2132 
2133 	axienet_update_coalesce_rx(lp, cr, mask);
2134 	lp->rx_dim.state = DIM_START_MEASURE;
2135 }
2136 
2137 /**
2138  * axienet_update_coalesce_tx() - Set TX CR
2139  * @lp: Device private data
2140  * @cr: Value to write to the TX CR
2141  * @mask: Bits to set from @cr
2142  */
2143 static void axienet_update_coalesce_tx(struct axienet_local *lp, u32 cr,
2144 				       u32 mask)
2145 {
2146 	spin_lock_irq(&lp->tx_cr_lock);
2147 	lp->tx_dma_cr &= ~mask;
2148 	lp->tx_dma_cr |= cr;
2149 	/* If DMA isn't started, then the settings will be applied the next
2150 	 * time dma_start() is called.
2151 	 */
2152 	if (lp->tx_dma_started) {
2153 		u32 reg = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET);
2154 
2155 		/* Don't enable IRQs if they are disabled by NAPI */
2156 		if (reg & XAXIDMA_IRQ_ALL_MASK)
2157 			cr = lp->tx_dma_cr;
2158 		else
2159 			cr = lp->tx_dma_cr & ~XAXIDMA_IRQ_ALL_MASK;
2160 		axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, cr);
2161 	}
2162 	spin_unlock_irq(&lp->tx_cr_lock);
2163 }
2164 
2165 /**
2166  * axienet_ethtools_get_coalesce - Get DMA interrupt coalescing count.
2167  * @ndev:	Pointer to net_device structure
2168  * @ecoalesce:	Pointer to ethtool_coalesce structure
2169  * @kernel_coal: ethtool CQE mode setting structure
2170  * @extack:	extack for reporting error messages
2171  *
2172  * This implements ethtool command for getting the DMA interrupt coalescing
2173  * count on Tx and Rx paths. Issue "ethtool -c ethX" under linux prompt to
2174  * execute this function.
2175  *
2176  * Return: 0 always
2177  */
2178 static int
2179 axienet_ethtools_get_coalesce(struct net_device *ndev,
2180 			      struct ethtool_coalesce *ecoalesce,
2181 			      struct kernel_ethtool_coalesce *kernel_coal,
2182 			      struct netlink_ext_ack *extack)
2183 {
2184 	struct axienet_local *lp = netdev_priv(ndev);
2185 	u32 cr;
2186 
2187 	ecoalesce->use_adaptive_rx_coalesce = lp->rx_dim_enabled;
2188 
2189 	spin_lock_irq(&lp->rx_cr_lock);
2190 	cr = lp->rx_dma_cr;
2191 	spin_unlock_irq(&lp->rx_cr_lock);
2192 	axienet_coalesce_params(lp, cr,
2193 				&ecoalesce->rx_max_coalesced_frames,
2194 				&ecoalesce->rx_coalesce_usecs);
2195 
2196 	spin_lock_irq(&lp->tx_cr_lock);
2197 	cr = lp->tx_dma_cr;
2198 	spin_unlock_irq(&lp->tx_cr_lock);
2199 	axienet_coalesce_params(lp, cr,
2200 				&ecoalesce->tx_max_coalesced_frames,
2201 				&ecoalesce->tx_coalesce_usecs);
2202 	return 0;
2203 }
2204 
2205 /**
2206  * axienet_ethtools_set_coalesce - Set DMA interrupt coalescing count.
2207  * @ndev:	Pointer to net_device structure
2208  * @ecoalesce:	Pointer to ethtool_coalesce structure
2209  * @kernel_coal: ethtool CQE mode setting structure
2210  * @extack:	extack for reporting error messages
2211  *
2212  * This implements ethtool command for setting the DMA interrupt coalescing
2213  * count on Tx and Rx paths. Issue "ethtool -C ethX rx-frames 5" under linux
2214  * prompt to execute this function.
2215  *
2216  * Return: 0, on success, Non-zero error value on failure.
2217  */
2218 static int
2219 axienet_ethtools_set_coalesce(struct net_device *ndev,
2220 			      struct ethtool_coalesce *ecoalesce,
2221 			      struct kernel_ethtool_coalesce *kernel_coal,
2222 			      struct netlink_ext_ack *extack)
2223 {
2224 	struct axienet_local *lp = netdev_priv(ndev);
2225 	bool new_dim = ecoalesce->use_adaptive_rx_coalesce;
2226 	bool old_dim = lp->rx_dim_enabled;
2227 	u32 cr, mask = ~XAXIDMA_CR_RUNSTOP_MASK;
2228 
2229 	if (ecoalesce->rx_max_coalesced_frames > 255 ||
2230 	    ecoalesce->tx_max_coalesced_frames > 255) {
2231 		NL_SET_ERR_MSG(extack, "frames must be less than 256");
2232 		return -EINVAL;
2233 	}
2234 
2235 	if (!ecoalesce->rx_max_coalesced_frames ||
2236 	    !ecoalesce->tx_max_coalesced_frames) {
2237 		NL_SET_ERR_MSG(extack, "frames must be non-zero");
2238 		return -EINVAL;
2239 	}
2240 
2241 	if (((ecoalesce->rx_max_coalesced_frames > 1 || new_dim) &&
2242 	     !ecoalesce->rx_coalesce_usecs) ||
2243 	    (ecoalesce->tx_max_coalesced_frames > 1 &&
2244 	     !ecoalesce->tx_coalesce_usecs)) {
2245 		NL_SET_ERR_MSG(extack,
2246 			       "usecs must be non-zero when frames is greater than one");
2247 		return -EINVAL;
2248 	}
2249 
2250 	if (new_dim && !old_dim) {
2251 		cr = axienet_calc_cr(lp, axienet_dim_coalesce_count_rx(lp),
2252 				     ecoalesce->rx_coalesce_usecs);
2253 	} else if (!new_dim) {
2254 		if (old_dim) {
2255 			WRITE_ONCE(lp->rx_dim_enabled, false);
2256 			napi_synchronize(&lp->napi_rx);
2257 			flush_work(&lp->rx_dim.work);
2258 		}
2259 
2260 		cr = axienet_calc_cr(lp, ecoalesce->rx_max_coalesced_frames,
2261 				     ecoalesce->rx_coalesce_usecs);
2262 	} else {
2263 		/* Dummy value for count just to calculate timer */
2264 		cr = axienet_calc_cr(lp, 2, ecoalesce->rx_coalesce_usecs);
2265 		mask = XAXIDMA_DELAY_MASK | XAXIDMA_IRQ_DELAY_MASK;
2266 	}
2267 
2268 	axienet_update_coalesce_rx(lp, cr, mask);
2269 	if (new_dim && !old_dim)
2270 		WRITE_ONCE(lp->rx_dim_enabled, true);
2271 
2272 	cr = axienet_calc_cr(lp, ecoalesce->tx_max_coalesced_frames,
2273 			     ecoalesce->tx_coalesce_usecs);
2274 	axienet_update_coalesce_tx(lp, cr, ~XAXIDMA_CR_RUNSTOP_MASK);
2275 	return 0;
2276 }
2277 
2278 static int
2279 axienet_ethtools_get_link_ksettings(struct net_device *ndev,
2280 				    struct ethtool_link_ksettings *cmd)
2281 {
2282 	struct axienet_local *lp = netdev_priv(ndev);
2283 
2284 	return phylink_ethtool_ksettings_get(lp->phylink, cmd);
2285 }
2286 
2287 static int
2288 axienet_ethtools_set_link_ksettings(struct net_device *ndev,
2289 				    const struct ethtool_link_ksettings *cmd)
2290 {
2291 	struct axienet_local *lp = netdev_priv(ndev);
2292 
2293 	return phylink_ethtool_ksettings_set(lp->phylink, cmd);
2294 }
2295 
2296 static int axienet_ethtools_nway_reset(struct net_device *dev)
2297 {
2298 	struct axienet_local *lp = netdev_priv(dev);
2299 
2300 	return phylink_ethtool_nway_reset(lp->phylink);
2301 }
2302 
2303 static void axienet_ethtools_get_ethtool_stats(struct net_device *dev,
2304 					       struct ethtool_stats *stats,
2305 					       u64 *data)
2306 {
2307 	struct axienet_local *lp = netdev_priv(dev);
2308 	unsigned int start;
2309 
2310 	do {
2311 		start = read_seqcount_begin(&lp->hw_stats_seqcount);
2312 		data[0] = axienet_stat(lp, STAT_RX_BYTES);
2313 		data[1] = axienet_stat(lp, STAT_TX_BYTES);
2314 		data[2] = axienet_stat(lp, STAT_RX_VLAN_FRAMES);
2315 		data[3] = axienet_stat(lp, STAT_TX_VLAN_FRAMES);
2316 		data[6] = axienet_stat(lp, STAT_TX_PFC_FRAMES);
2317 		data[7] = axienet_stat(lp, STAT_RX_PFC_FRAMES);
2318 		data[8] = axienet_stat(lp, STAT_USER_DEFINED0);
2319 		data[9] = axienet_stat(lp, STAT_USER_DEFINED1);
2320 		data[10] = axienet_stat(lp, STAT_USER_DEFINED2);
2321 	} while (read_seqcount_retry(&lp->hw_stats_seqcount, start));
2322 }
2323 
2324 static const char axienet_ethtool_stats_strings[][ETH_GSTRING_LEN] = {
2325 	"Received bytes",
2326 	"Transmitted bytes",
2327 	"RX Good VLAN Tagged Frames",
2328 	"TX Good VLAN Tagged Frames",
2329 	"TX Good PFC Frames",
2330 	"RX Good PFC Frames",
2331 	"User Defined Counter 0",
2332 	"User Defined Counter 1",
2333 	"User Defined Counter 2",
2334 };
2335 
2336 static void axienet_ethtools_get_strings(struct net_device *dev, u32 stringset, u8 *data)
2337 {
2338 	switch (stringset) {
2339 	case ETH_SS_STATS:
2340 		memcpy(data, axienet_ethtool_stats_strings,
2341 		       sizeof(axienet_ethtool_stats_strings));
2342 		break;
2343 	}
2344 }
2345 
2346 static int axienet_ethtools_get_sset_count(struct net_device *dev, int sset)
2347 {
2348 	struct axienet_local *lp = netdev_priv(dev);
2349 
2350 	switch (sset) {
2351 	case ETH_SS_STATS:
2352 		if (lp->features & XAE_FEATURE_STATS)
2353 			return ARRAY_SIZE(axienet_ethtool_stats_strings);
2354 		fallthrough;
2355 	default:
2356 		return -EOPNOTSUPP;
2357 	}
2358 }
2359 
2360 static void
2361 axienet_ethtools_get_pause_stats(struct net_device *dev,
2362 				 struct ethtool_pause_stats *pause_stats)
2363 {
2364 	struct axienet_local *lp = netdev_priv(dev);
2365 	unsigned int start;
2366 
2367 	if (!(lp->features & XAE_FEATURE_STATS))
2368 		return;
2369 
2370 	do {
2371 		start = read_seqcount_begin(&lp->hw_stats_seqcount);
2372 		pause_stats->tx_pause_frames =
2373 			axienet_stat(lp, STAT_TX_PAUSE_FRAMES);
2374 		pause_stats->rx_pause_frames =
2375 			axienet_stat(lp, STAT_RX_PAUSE_FRAMES);
2376 	} while (read_seqcount_retry(&lp->hw_stats_seqcount, start));
2377 }
2378 
2379 static void
2380 axienet_ethtool_get_eth_mac_stats(struct net_device *dev,
2381 				  struct ethtool_eth_mac_stats *mac_stats)
2382 {
2383 	struct axienet_local *lp = netdev_priv(dev);
2384 	unsigned int start;
2385 
2386 	if (!(lp->features & XAE_FEATURE_STATS))
2387 		return;
2388 
2389 	do {
2390 		start = read_seqcount_begin(&lp->hw_stats_seqcount);
2391 		mac_stats->FramesTransmittedOK =
2392 			axienet_stat(lp, STAT_TX_GOOD_FRAMES);
2393 		mac_stats->SingleCollisionFrames =
2394 			axienet_stat(lp, STAT_TX_SINGLE_COLLISION_FRAMES);
2395 		mac_stats->MultipleCollisionFrames =
2396 			axienet_stat(lp, STAT_TX_MULTIPLE_COLLISION_FRAMES);
2397 		mac_stats->FramesReceivedOK =
2398 			axienet_stat(lp, STAT_RX_GOOD_FRAMES);
2399 		mac_stats->FrameCheckSequenceErrors =
2400 			axienet_stat(lp, STAT_RX_FCS_ERRORS);
2401 		mac_stats->AlignmentErrors =
2402 			axienet_stat(lp, STAT_RX_ALIGNMENT_ERRORS);
2403 		mac_stats->FramesWithDeferredXmissions =
2404 			axienet_stat(lp, STAT_TX_DEFERRED_FRAMES);
2405 		mac_stats->LateCollisions =
2406 			axienet_stat(lp, STAT_TX_LATE_COLLISIONS);
2407 		mac_stats->FramesAbortedDueToXSColls =
2408 			axienet_stat(lp, STAT_TX_EXCESS_COLLISIONS);
2409 		mac_stats->MulticastFramesXmittedOK =
2410 			axienet_stat(lp, STAT_TX_MULTICAST_FRAMES);
2411 		mac_stats->BroadcastFramesXmittedOK =
2412 			axienet_stat(lp, STAT_TX_BROADCAST_FRAMES);
2413 		mac_stats->FramesWithExcessiveDeferral =
2414 			axienet_stat(lp, STAT_TX_EXCESS_DEFERRAL);
2415 		mac_stats->MulticastFramesReceivedOK =
2416 			axienet_stat(lp, STAT_RX_MULTICAST_FRAMES);
2417 		mac_stats->BroadcastFramesReceivedOK =
2418 			axienet_stat(lp, STAT_RX_BROADCAST_FRAMES);
2419 		mac_stats->InRangeLengthErrors =
2420 			axienet_stat(lp, STAT_RX_LENGTH_ERRORS);
2421 	} while (read_seqcount_retry(&lp->hw_stats_seqcount, start));
2422 }
2423 
2424 static void
2425 axienet_ethtool_get_eth_ctrl_stats(struct net_device *dev,
2426 				   struct ethtool_eth_ctrl_stats *ctrl_stats)
2427 {
2428 	struct axienet_local *lp = netdev_priv(dev);
2429 	unsigned int start;
2430 
2431 	if (!(lp->features & XAE_FEATURE_STATS))
2432 		return;
2433 
2434 	do {
2435 		start = read_seqcount_begin(&lp->hw_stats_seqcount);
2436 		ctrl_stats->MACControlFramesTransmitted =
2437 			axienet_stat(lp, STAT_TX_CONTROL_FRAMES);
2438 		ctrl_stats->MACControlFramesReceived =
2439 			axienet_stat(lp, STAT_RX_CONTROL_FRAMES);
2440 		ctrl_stats->UnsupportedOpcodesReceived =
2441 			axienet_stat(lp, STAT_RX_CONTROL_OPCODE_ERRORS);
2442 	} while (read_seqcount_retry(&lp->hw_stats_seqcount, start));
2443 }
2444 
2445 static const struct ethtool_rmon_hist_range axienet_rmon_ranges[] = {
2446 	{   64,    64 },
2447 	{   65,   127 },
2448 	{  128,   255 },
2449 	{  256,   511 },
2450 	{  512,  1023 },
2451 	{ 1024,  1518 },
2452 	{ 1519, 16384 },
2453 	{ },
2454 };
2455 
2456 static void
2457 axienet_ethtool_get_rmon_stats(struct net_device *dev,
2458 			       struct ethtool_rmon_stats *rmon_stats,
2459 			       const struct ethtool_rmon_hist_range **ranges)
2460 {
2461 	struct axienet_local *lp = netdev_priv(dev);
2462 	unsigned int start;
2463 
2464 	if (!(lp->features & XAE_FEATURE_STATS))
2465 		return;
2466 
2467 	do {
2468 		start = read_seqcount_begin(&lp->hw_stats_seqcount);
2469 		rmon_stats->undersize_pkts =
2470 			axienet_stat(lp, STAT_UNDERSIZE_FRAMES);
2471 		rmon_stats->oversize_pkts =
2472 			axienet_stat(lp, STAT_RX_OVERSIZE_FRAMES);
2473 		rmon_stats->fragments =
2474 			axienet_stat(lp, STAT_FRAGMENT_FRAMES);
2475 
2476 		rmon_stats->hist[0] =
2477 			axienet_stat(lp, STAT_RX_64_BYTE_FRAMES);
2478 		rmon_stats->hist[1] =
2479 			axienet_stat(lp, STAT_RX_65_127_BYTE_FRAMES);
2480 		rmon_stats->hist[2] =
2481 			axienet_stat(lp, STAT_RX_128_255_BYTE_FRAMES);
2482 		rmon_stats->hist[3] =
2483 			axienet_stat(lp, STAT_RX_256_511_BYTE_FRAMES);
2484 		rmon_stats->hist[4] =
2485 			axienet_stat(lp, STAT_RX_512_1023_BYTE_FRAMES);
2486 		rmon_stats->hist[5] =
2487 			axienet_stat(lp, STAT_RX_1024_MAX_BYTE_FRAMES);
2488 		rmon_stats->hist[6] =
2489 			rmon_stats->oversize_pkts;
2490 
2491 		rmon_stats->hist_tx[0] =
2492 			axienet_stat(lp, STAT_TX_64_BYTE_FRAMES);
2493 		rmon_stats->hist_tx[1] =
2494 			axienet_stat(lp, STAT_TX_65_127_BYTE_FRAMES);
2495 		rmon_stats->hist_tx[2] =
2496 			axienet_stat(lp, STAT_TX_128_255_BYTE_FRAMES);
2497 		rmon_stats->hist_tx[3] =
2498 			axienet_stat(lp, STAT_TX_256_511_BYTE_FRAMES);
2499 		rmon_stats->hist_tx[4] =
2500 			axienet_stat(lp, STAT_TX_512_1023_BYTE_FRAMES);
2501 		rmon_stats->hist_tx[5] =
2502 			axienet_stat(lp, STAT_TX_1024_MAX_BYTE_FRAMES);
2503 		rmon_stats->hist_tx[6] =
2504 			axienet_stat(lp, STAT_TX_OVERSIZE_FRAMES);
2505 	} while (read_seqcount_retry(&lp->hw_stats_seqcount, start));
2506 
2507 	*ranges = axienet_rmon_ranges;
2508 }
2509 
2510 static const struct ethtool_ops axienet_ethtool_ops = {
2511 	.supported_coalesce_params = ETHTOOL_COALESCE_MAX_FRAMES |
2512 				     ETHTOOL_COALESCE_USECS |
2513 				     ETHTOOL_COALESCE_USE_ADAPTIVE_RX,
2514 	.get_drvinfo    = axienet_ethtools_get_drvinfo,
2515 	.get_regs_len   = axienet_ethtools_get_regs_len,
2516 	.get_regs       = axienet_ethtools_get_regs,
2517 	.get_link       = ethtool_op_get_link,
2518 	.get_ringparam	= axienet_ethtools_get_ringparam,
2519 	.set_ringparam	= axienet_ethtools_set_ringparam,
2520 	.get_pauseparam = axienet_ethtools_get_pauseparam,
2521 	.set_pauseparam = axienet_ethtools_set_pauseparam,
2522 	.get_coalesce   = axienet_ethtools_get_coalesce,
2523 	.set_coalesce   = axienet_ethtools_set_coalesce,
2524 	.get_link_ksettings = axienet_ethtools_get_link_ksettings,
2525 	.set_link_ksettings = axienet_ethtools_set_link_ksettings,
2526 	.nway_reset	= axienet_ethtools_nway_reset,
2527 	.get_ethtool_stats = axienet_ethtools_get_ethtool_stats,
2528 	.get_strings    = axienet_ethtools_get_strings,
2529 	.get_sset_count = axienet_ethtools_get_sset_count,
2530 	.get_pause_stats = axienet_ethtools_get_pause_stats,
2531 	.get_eth_mac_stats = axienet_ethtool_get_eth_mac_stats,
2532 	.get_eth_ctrl_stats = axienet_ethtool_get_eth_ctrl_stats,
2533 	.get_rmon_stats = axienet_ethtool_get_rmon_stats,
2534 };
2535 
2536 static struct axienet_local *pcs_to_axienet_local(struct phylink_pcs *pcs)
2537 {
2538 	return container_of(pcs, struct axienet_local, pcs);
2539 }
2540 
2541 static void axienet_pcs_get_state(struct phylink_pcs *pcs,
2542 				  unsigned int neg_mode,
2543 				  struct phylink_link_state *state)
2544 {
2545 	struct mdio_device *pcs_phy = pcs_to_axienet_local(pcs)->pcs_phy;
2546 
2547 	phylink_mii_c22_pcs_get_state(pcs_phy, neg_mode, state);
2548 }
2549 
2550 static void axienet_pcs_an_restart(struct phylink_pcs *pcs)
2551 {
2552 	struct mdio_device *pcs_phy = pcs_to_axienet_local(pcs)->pcs_phy;
2553 
2554 	phylink_mii_c22_pcs_an_restart(pcs_phy);
2555 }
2556 
2557 static int axienet_pcs_config(struct phylink_pcs *pcs, unsigned int neg_mode,
2558 			      phy_interface_t interface,
2559 			      const unsigned long *advertising,
2560 			      bool permit_pause_to_mac)
2561 {
2562 	struct mdio_device *pcs_phy = pcs_to_axienet_local(pcs)->pcs_phy;
2563 	struct net_device *ndev = pcs_to_axienet_local(pcs)->ndev;
2564 	struct axienet_local *lp = netdev_priv(ndev);
2565 	int ret;
2566 
2567 	if (lp->switch_x_sgmii) {
2568 		ret = mdiodev_write(pcs_phy, XLNX_MII_STD_SELECT_REG,
2569 				    interface == PHY_INTERFACE_MODE_SGMII ?
2570 					XLNX_MII_STD_SELECT_SGMII : 0);
2571 		if (ret < 0) {
2572 			netdev_warn(ndev,
2573 				    "Failed to switch PHY interface: %d\n",
2574 				    ret);
2575 			return ret;
2576 		}
2577 	}
2578 
2579 	ret = phylink_mii_c22_pcs_config(pcs_phy, interface, advertising,
2580 					 neg_mode);
2581 	if (ret < 0)
2582 		netdev_warn(ndev, "Failed to configure PCS: %d\n", ret);
2583 
2584 	return ret;
2585 }
2586 
2587 static const struct phylink_pcs_ops axienet_pcs_ops = {
2588 	.pcs_get_state = axienet_pcs_get_state,
2589 	.pcs_config = axienet_pcs_config,
2590 	.pcs_an_restart = axienet_pcs_an_restart,
2591 };
2592 
2593 static struct phylink_pcs *axienet_mac_select_pcs(struct phylink_config *config,
2594 						  phy_interface_t interface)
2595 {
2596 	struct net_device *ndev = to_net_dev(config->dev);
2597 	struct axienet_local *lp = netdev_priv(ndev);
2598 
2599 	if (interface == PHY_INTERFACE_MODE_1000BASEX ||
2600 	    interface ==  PHY_INTERFACE_MODE_SGMII)
2601 		return &lp->pcs;
2602 
2603 	return NULL;
2604 }
2605 
2606 static void axienet_mac_config(struct phylink_config *config, unsigned int mode,
2607 			       const struct phylink_link_state *state)
2608 {
2609 	/* nothing meaningful to do */
2610 }
2611 
2612 static void axienet_mac_link_down(struct phylink_config *config,
2613 				  unsigned int mode,
2614 				  phy_interface_t interface)
2615 {
2616 	/* nothing meaningful to do */
2617 }
2618 
2619 static void axienet_mac_link_up(struct phylink_config *config,
2620 				struct phy_device *phy,
2621 				unsigned int mode, phy_interface_t interface,
2622 				int speed, int duplex,
2623 				bool tx_pause, bool rx_pause)
2624 {
2625 	struct net_device *ndev = to_net_dev(config->dev);
2626 	struct axienet_local *lp = netdev_priv(ndev);
2627 	u32 emmc_reg, fcc_reg;
2628 
2629 	emmc_reg = axienet_ior(lp, XAE_EMMC_OFFSET);
2630 	emmc_reg &= ~XAE_EMMC_LINKSPEED_MASK;
2631 
2632 	switch (speed) {
2633 	case SPEED_1000:
2634 		emmc_reg |= XAE_EMMC_LINKSPD_1000;
2635 		break;
2636 	case SPEED_100:
2637 		emmc_reg |= XAE_EMMC_LINKSPD_100;
2638 		break;
2639 	case SPEED_10:
2640 		emmc_reg |= XAE_EMMC_LINKSPD_10;
2641 		break;
2642 	default:
2643 		dev_err(&ndev->dev,
2644 			"Speed other than 10, 100 or 1Gbps is not supported\n");
2645 		break;
2646 	}
2647 
2648 	axienet_iow(lp, XAE_EMMC_OFFSET, emmc_reg);
2649 
2650 	fcc_reg = axienet_ior(lp, XAE_FCC_OFFSET);
2651 	if (tx_pause)
2652 		fcc_reg |= XAE_FCC_FCTX_MASK;
2653 	else
2654 		fcc_reg &= ~XAE_FCC_FCTX_MASK;
2655 	if (rx_pause)
2656 		fcc_reg |= XAE_FCC_FCRX_MASK;
2657 	else
2658 		fcc_reg &= ~XAE_FCC_FCRX_MASK;
2659 	axienet_iow(lp, XAE_FCC_OFFSET, fcc_reg);
2660 }
2661 
2662 static const struct phylink_mac_ops axienet_phylink_ops = {
2663 	.mac_select_pcs = axienet_mac_select_pcs,
2664 	.mac_config = axienet_mac_config,
2665 	.mac_link_down = axienet_mac_link_down,
2666 	.mac_link_up = axienet_mac_link_up,
2667 };
2668 
2669 /**
2670  * axienet_dma_err_handler - Work queue task for Axi DMA Error
2671  * @work:	pointer to work_struct
2672  *
2673  * Resets the Axi DMA and Axi Ethernet devices, and reconfigures the
2674  * Tx/Rx BDs.
2675  */
2676 static void axienet_dma_err_handler(struct work_struct *work)
2677 {
2678 	u32 i;
2679 	u32 axienet_status;
2680 	struct axidma_bd *cur_p;
2681 	struct axienet_local *lp = container_of(work, struct axienet_local,
2682 						dma_err_task);
2683 	struct net_device *ndev = lp->ndev;
2684 
2685 	/* Don't bother if we are going to stop anyway */
2686 	if (READ_ONCE(lp->stopping))
2687 		return;
2688 
2689 	napi_disable(&lp->napi_tx);
2690 	napi_disable(&lp->napi_rx);
2691 
2692 	axienet_setoptions(ndev, lp->options &
2693 			   ~(XAE_OPTION_TXEN | XAE_OPTION_RXEN));
2694 
2695 	axienet_dma_stop(lp);
2696 	netdev_reset_queue(ndev);
2697 
2698 	for (i = 0; i < lp->tx_bd_num; i++) {
2699 		cur_p = &lp->tx_bd_v[i];
2700 		if (cur_p->cntrl) {
2701 			dma_addr_t addr = desc_get_phys_addr(lp, cur_p);
2702 
2703 			dma_unmap_single(lp->dev, addr,
2704 					 (cur_p->cntrl &
2705 					  XAXIDMA_BD_CTRL_LENGTH_MASK),
2706 					 DMA_TO_DEVICE);
2707 		}
2708 		if (cur_p->skb)
2709 			dev_kfree_skb_irq(cur_p->skb);
2710 		cur_p->phys = 0;
2711 		cur_p->phys_msb = 0;
2712 		cur_p->cntrl = 0;
2713 		cur_p->status = 0;
2714 		cur_p->app0 = 0;
2715 		cur_p->app1 = 0;
2716 		cur_p->app2 = 0;
2717 		cur_p->app3 = 0;
2718 		cur_p->app4 = 0;
2719 		cur_p->skb = NULL;
2720 	}
2721 
2722 	for (i = 0; i < lp->rx_bd_num; i++) {
2723 		cur_p = &lp->rx_bd_v[i];
2724 		cur_p->status = 0;
2725 		cur_p->app0 = 0;
2726 		cur_p->app1 = 0;
2727 		cur_p->app2 = 0;
2728 		cur_p->app3 = 0;
2729 		cur_p->app4 = 0;
2730 	}
2731 
2732 	lp->tx_bd_ci = 0;
2733 	lp->tx_bd_tail = 0;
2734 	lp->rx_bd_ci = 0;
2735 
2736 	axienet_dma_start(lp);
2737 
2738 	axienet_status = axienet_ior(lp, XAE_RCW1_OFFSET);
2739 	axienet_status &= ~XAE_RCW1_RX_MASK;
2740 	axienet_iow(lp, XAE_RCW1_OFFSET, axienet_status);
2741 
2742 	axienet_status = axienet_ior(lp, XAE_IP_OFFSET);
2743 	if (axienet_status & XAE_INT_RXRJECT_MASK)
2744 		axienet_iow(lp, XAE_IS_OFFSET, XAE_INT_RXRJECT_MASK);
2745 	axienet_iow(lp, XAE_IE_OFFSET, lp->eth_irq > 0 ?
2746 		    XAE_INT_RECV_ERROR_MASK : 0);
2747 	axienet_iow(lp, XAE_FCC_OFFSET, XAE_FCC_FCRX_MASK);
2748 
2749 	/* Sync default options with HW but leave receiver and
2750 	 * transmitter disabled.
2751 	 */
2752 	axienet_setoptions(ndev, lp->options &
2753 			   ~(XAE_OPTION_TXEN | XAE_OPTION_RXEN));
2754 	axienet_set_mac_address(ndev, NULL);
2755 	axienet_set_multicast_list(ndev);
2756 	napi_enable(&lp->napi_rx);
2757 	napi_enable(&lp->napi_tx);
2758 	axienet_setoptions(ndev, lp->options);
2759 }
2760 
2761 /**
2762  * axienet_probe - Axi Ethernet probe function.
2763  * @pdev:	Pointer to platform device structure.
2764  *
2765  * Return: 0, on success
2766  *	    Non-zero error value on failure.
2767  *
2768  * This is the probe routine for Axi Ethernet driver. This is called before
2769  * any other driver routines are invoked. It allocates and sets up the Ethernet
2770  * device. Parses through device tree and populates fields of
2771  * axienet_local. It registers the Ethernet device.
2772  */
2773 static int axienet_probe(struct platform_device *pdev)
2774 {
2775 	int ret;
2776 	struct device_node *np;
2777 	struct axienet_local *lp;
2778 	struct net_device *ndev;
2779 	struct resource *ethres;
2780 	u8 mac_addr[ETH_ALEN];
2781 	int addr_width = 32;
2782 	u32 value;
2783 
2784 	ndev = alloc_etherdev(sizeof(*lp));
2785 	if (!ndev)
2786 		return -ENOMEM;
2787 
2788 	platform_set_drvdata(pdev, ndev);
2789 
2790 	SET_NETDEV_DEV(ndev, &pdev->dev);
2791 	ndev->features = NETIF_F_SG;
2792 	ndev->ethtool_ops = &axienet_ethtool_ops;
2793 
2794 	/* MTU range: 64 - 9000 */
2795 	ndev->min_mtu = 64;
2796 	ndev->max_mtu = XAE_JUMBO_MTU;
2797 
2798 	lp = netdev_priv(ndev);
2799 	lp->ndev = ndev;
2800 	lp->dev = &pdev->dev;
2801 	lp->options = XAE_OPTION_DEFAULTS;
2802 	lp->rx_bd_num = RX_BD_NUM_DEFAULT;
2803 	lp->tx_bd_num = TX_BD_NUM_DEFAULT;
2804 
2805 	u64_stats_init(&lp->rx_stat_sync);
2806 	u64_stats_init(&lp->tx_stat_sync);
2807 
2808 	mutex_init(&lp->stats_lock);
2809 	seqcount_mutex_init(&lp->hw_stats_seqcount, &lp->stats_lock);
2810 	INIT_DEFERRABLE_WORK(&lp->stats_work, axienet_refresh_stats);
2811 
2812 	lp->axi_clk = devm_clk_get_optional(&pdev->dev, "s_axi_lite_clk");
2813 	if (!lp->axi_clk) {
2814 		/* For backward compatibility, if named AXI clock is not present,
2815 		 * treat the first clock specified as the AXI clock.
2816 		 */
2817 		lp->axi_clk = devm_clk_get_optional(&pdev->dev, NULL);
2818 	}
2819 	if (IS_ERR(lp->axi_clk)) {
2820 		ret = PTR_ERR(lp->axi_clk);
2821 		goto free_netdev;
2822 	}
2823 	ret = clk_prepare_enable(lp->axi_clk);
2824 	if (ret) {
2825 		dev_err(&pdev->dev, "Unable to enable AXI clock: %d\n", ret);
2826 		goto free_netdev;
2827 	}
2828 
2829 	lp->misc_clks[0].id = "axis_clk";
2830 	lp->misc_clks[1].id = "ref_clk";
2831 	lp->misc_clks[2].id = "mgt_clk";
2832 
2833 	ret = devm_clk_bulk_get_optional(&pdev->dev, XAE_NUM_MISC_CLOCKS, lp->misc_clks);
2834 	if (ret)
2835 		goto cleanup_clk;
2836 
2837 	ret = clk_bulk_prepare_enable(XAE_NUM_MISC_CLOCKS, lp->misc_clks);
2838 	if (ret)
2839 		goto cleanup_clk;
2840 
2841 	/* Map device registers */
2842 	lp->regs = devm_platform_get_and_ioremap_resource(pdev, 0, &ethres);
2843 	if (IS_ERR(lp->regs)) {
2844 		ret = PTR_ERR(lp->regs);
2845 		goto cleanup_clk;
2846 	}
2847 	lp->regs_start = ethres->start;
2848 
2849 	/* Setup checksum offload, but default to off if not specified */
2850 	lp->features = 0;
2851 
2852 	if (axienet_ior(lp, XAE_ABILITY_OFFSET) & XAE_ABILITY_STATS)
2853 		lp->features |= XAE_FEATURE_STATS;
2854 
2855 	ret = of_property_read_u32(pdev->dev.of_node, "xlnx,txcsum", &value);
2856 	if (!ret) {
2857 		switch (value) {
2858 		case 1:
2859 			lp->features |= XAE_FEATURE_PARTIAL_TX_CSUM;
2860 			/* Can checksum any contiguous range */
2861 			ndev->features |= NETIF_F_HW_CSUM;
2862 			break;
2863 		case 2:
2864 			lp->features |= XAE_FEATURE_FULL_TX_CSUM;
2865 			/* Can checksum TCP/UDP over IPv4. */
2866 			ndev->features |= NETIF_F_IP_CSUM;
2867 			break;
2868 		}
2869 	}
2870 	ret = of_property_read_u32(pdev->dev.of_node, "xlnx,rxcsum", &value);
2871 	if (!ret) {
2872 		switch (value) {
2873 		case 1:
2874 			lp->features |= XAE_FEATURE_PARTIAL_RX_CSUM;
2875 			ndev->features |= NETIF_F_RXCSUM;
2876 			break;
2877 		case 2:
2878 			lp->features |= XAE_FEATURE_FULL_RX_CSUM;
2879 			ndev->features |= NETIF_F_RXCSUM;
2880 			break;
2881 		}
2882 	}
2883 	/* For supporting jumbo frames, the Axi Ethernet hardware must have
2884 	 * a larger Rx/Tx Memory. Typically, the size must be large so that
2885 	 * we can enable jumbo option and start supporting jumbo frames.
2886 	 * Here we check for memory allocated for Rx/Tx in the hardware from
2887 	 * the device-tree and accordingly set flags.
2888 	 */
2889 	of_property_read_u32(pdev->dev.of_node, "xlnx,rxmem", &lp->rxmem);
2890 
2891 	lp->switch_x_sgmii = of_property_read_bool(pdev->dev.of_node,
2892 						   "xlnx,switch-x-sgmii");
2893 
2894 	/* Start with the proprietary, and broken phy_type */
2895 	ret = of_property_read_u32(pdev->dev.of_node, "xlnx,phy-type", &value);
2896 	if (!ret) {
2897 		netdev_warn(ndev, "Please upgrade your device tree binary blob to use phy-mode");
2898 		switch (value) {
2899 		case XAE_PHY_TYPE_MII:
2900 			lp->phy_mode = PHY_INTERFACE_MODE_MII;
2901 			break;
2902 		case XAE_PHY_TYPE_GMII:
2903 			lp->phy_mode = PHY_INTERFACE_MODE_GMII;
2904 			break;
2905 		case XAE_PHY_TYPE_RGMII_2_0:
2906 			lp->phy_mode = PHY_INTERFACE_MODE_RGMII_ID;
2907 			break;
2908 		case XAE_PHY_TYPE_SGMII:
2909 			lp->phy_mode = PHY_INTERFACE_MODE_SGMII;
2910 			break;
2911 		case XAE_PHY_TYPE_1000BASE_X:
2912 			lp->phy_mode = PHY_INTERFACE_MODE_1000BASEX;
2913 			break;
2914 		default:
2915 			ret = -EINVAL;
2916 			goto cleanup_clk;
2917 		}
2918 	} else {
2919 		ret = of_get_phy_mode(pdev->dev.of_node, &lp->phy_mode);
2920 		if (ret)
2921 			goto cleanup_clk;
2922 	}
2923 	if (lp->switch_x_sgmii && lp->phy_mode != PHY_INTERFACE_MODE_SGMII &&
2924 	    lp->phy_mode != PHY_INTERFACE_MODE_1000BASEX) {
2925 		dev_err(&pdev->dev, "xlnx,switch-x-sgmii only supported with SGMII or 1000BaseX\n");
2926 		ret = -EINVAL;
2927 		goto cleanup_clk;
2928 	}
2929 
2930 	if (!of_property_present(pdev->dev.of_node, "dmas")) {
2931 		/* Find the DMA node, map the DMA registers, and decode the DMA IRQs */
2932 		np = of_parse_phandle(pdev->dev.of_node, "axistream-connected", 0);
2933 
2934 		if (np) {
2935 			struct resource dmares;
2936 
2937 			ret = of_address_to_resource(np, 0, &dmares);
2938 			if (ret) {
2939 				dev_err(&pdev->dev,
2940 					"unable to get DMA resource\n");
2941 				of_node_put(np);
2942 				goto cleanup_clk;
2943 			}
2944 			lp->dma_regs = devm_ioremap_resource(&pdev->dev,
2945 							     &dmares);
2946 			lp->rx_irq = irq_of_parse_and_map(np, 1);
2947 			lp->tx_irq = irq_of_parse_and_map(np, 0);
2948 			of_node_put(np);
2949 			lp->eth_irq = platform_get_irq_optional(pdev, 0);
2950 		} else {
2951 			/* Check for these resources directly on the Ethernet node. */
2952 			lp->dma_regs = devm_platform_get_and_ioremap_resource(pdev, 1, NULL);
2953 			lp->rx_irq = platform_get_irq(pdev, 1);
2954 			lp->tx_irq = platform_get_irq(pdev, 0);
2955 			lp->eth_irq = platform_get_irq_optional(pdev, 2);
2956 		}
2957 		if (IS_ERR(lp->dma_regs)) {
2958 			dev_err(&pdev->dev, "could not map DMA regs\n");
2959 			ret = PTR_ERR(lp->dma_regs);
2960 			goto cleanup_clk;
2961 		}
2962 		if (lp->rx_irq <= 0 || lp->tx_irq <= 0) {
2963 			dev_err(&pdev->dev, "could not determine irqs\n");
2964 			ret = -ENOMEM;
2965 			goto cleanup_clk;
2966 		}
2967 
2968 		/* Reset core now that clocks are enabled, prior to accessing MDIO */
2969 		ret = __axienet_device_reset(lp);
2970 		if (ret)
2971 			goto cleanup_clk;
2972 
2973 		/* Autodetect the need for 64-bit DMA pointers.
2974 		 * When the IP is configured for a bus width bigger than 32 bits,
2975 		 * writing the MSB registers is mandatory, even if they are all 0.
2976 		 * We can detect this case by writing all 1's to one such register
2977 		 * and see if that sticks: when the IP is configured for 32 bits
2978 		 * only, those registers are RES0.
2979 		 * Those MSB registers were introduced in IP v7.1, which we check first.
2980 		 */
2981 		if ((axienet_ior(lp, XAE_ID_OFFSET) >> 24) >= 0x9) {
2982 			void __iomem *desc = lp->dma_regs + XAXIDMA_TX_CDESC_OFFSET + 4;
2983 
2984 			iowrite32(0x0, desc);
2985 			if (ioread32(desc) == 0) {	/* sanity check */
2986 				iowrite32(0xffffffff, desc);
2987 				if (ioread32(desc) > 0) {
2988 					lp->features |= XAE_FEATURE_DMA_64BIT;
2989 					addr_width = 64;
2990 					dev_info(&pdev->dev,
2991 						 "autodetected 64-bit DMA range\n");
2992 				}
2993 				iowrite32(0x0, desc);
2994 			}
2995 		}
2996 		if (!IS_ENABLED(CONFIG_64BIT) && lp->features & XAE_FEATURE_DMA_64BIT) {
2997 			dev_err(&pdev->dev, "64-bit addressable DMA is not compatible with 32-bit architecture\n");
2998 			ret = -EINVAL;
2999 			goto cleanup_clk;
3000 		}
3001 
3002 		ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(addr_width));
3003 		if (ret) {
3004 			dev_err(&pdev->dev, "No suitable DMA available\n");
3005 			goto cleanup_clk;
3006 		}
3007 		netif_napi_add(ndev, &lp->napi_rx, axienet_rx_poll);
3008 		netif_napi_add(ndev, &lp->napi_tx, axienet_tx_poll);
3009 	} else {
3010 		struct xilinx_vdma_config cfg;
3011 		struct dma_chan *tx_chan;
3012 
3013 		lp->eth_irq = platform_get_irq_optional(pdev, 0);
3014 		if (lp->eth_irq < 0 && lp->eth_irq != -ENXIO) {
3015 			ret = lp->eth_irq;
3016 			goto cleanup_clk;
3017 		}
3018 		tx_chan = dma_request_chan(lp->dev, "tx_chan0");
3019 		if (IS_ERR(tx_chan)) {
3020 			ret = PTR_ERR(tx_chan);
3021 			dev_err_probe(lp->dev, ret, "No Ethernet DMA (TX) channel found\n");
3022 			goto cleanup_clk;
3023 		}
3024 
3025 		cfg.reset = 1;
3026 		/* As name says VDMA but it has support for DMA channel reset */
3027 		ret = xilinx_vdma_channel_set_config(tx_chan, &cfg);
3028 		if (ret < 0) {
3029 			dev_err(&pdev->dev, "Reset channel failed\n");
3030 			dma_release_channel(tx_chan);
3031 			goto cleanup_clk;
3032 		}
3033 
3034 		dma_release_channel(tx_chan);
3035 		lp->use_dmaengine = 1;
3036 	}
3037 
3038 	if (lp->use_dmaengine)
3039 		ndev->netdev_ops = &axienet_netdev_dmaengine_ops;
3040 	else
3041 		ndev->netdev_ops = &axienet_netdev_ops;
3042 	/* Check for Ethernet core IRQ (optional) */
3043 	if (lp->eth_irq <= 0)
3044 		dev_info(&pdev->dev, "Ethernet core IRQ not defined\n");
3045 
3046 	/* Retrieve the MAC address */
3047 	ret = of_get_mac_address(pdev->dev.of_node, mac_addr);
3048 	if (!ret) {
3049 		axienet_set_mac_address(ndev, mac_addr);
3050 	} else {
3051 		dev_warn(&pdev->dev, "could not find MAC address property: %d\n",
3052 			 ret);
3053 		axienet_set_mac_address(ndev, NULL);
3054 	}
3055 
3056 	spin_lock_init(&lp->rx_cr_lock);
3057 	spin_lock_init(&lp->tx_cr_lock);
3058 	INIT_WORK(&lp->rx_dim.work, axienet_rx_dim_work);
3059 	lp->rx_dim_enabled = true;
3060 	lp->rx_dim.profile_ix = 1;
3061 	lp->rx_dma_cr = axienet_calc_cr(lp, axienet_dim_coalesce_count_rx(lp),
3062 					XAXIDMA_DFT_RX_USEC);
3063 	lp->tx_dma_cr = axienet_calc_cr(lp, XAXIDMA_DFT_TX_THRESHOLD,
3064 					XAXIDMA_DFT_TX_USEC);
3065 
3066 	ret = axienet_mdio_setup(lp);
3067 	if (ret)
3068 		dev_warn(&pdev->dev,
3069 			 "error registering MDIO bus: %d\n", ret);
3070 
3071 	if (lp->phy_mode == PHY_INTERFACE_MODE_SGMII ||
3072 	    lp->phy_mode == PHY_INTERFACE_MODE_1000BASEX) {
3073 		np = of_parse_phandle(pdev->dev.of_node, "pcs-handle", 0);
3074 		if (!np) {
3075 			/* Deprecated: Always use "pcs-handle" for pcs_phy.
3076 			 * Falling back to "phy-handle" here is only for
3077 			 * backward compatibility with old device trees.
3078 			 */
3079 			np = of_parse_phandle(pdev->dev.of_node, "phy-handle", 0);
3080 		}
3081 		if (!np) {
3082 			dev_err(&pdev->dev, "pcs-handle (preferred) or phy-handle required for 1000BaseX/SGMII\n");
3083 			ret = -EINVAL;
3084 			goto cleanup_mdio;
3085 		}
3086 		lp->pcs_phy = of_mdio_find_device(np);
3087 		if (!lp->pcs_phy) {
3088 			ret = -EPROBE_DEFER;
3089 			of_node_put(np);
3090 			goto cleanup_mdio;
3091 		}
3092 		of_node_put(np);
3093 		lp->pcs.ops = &axienet_pcs_ops;
3094 		lp->pcs.poll = true;
3095 	}
3096 
3097 	lp->phylink_config.dev = &ndev->dev;
3098 	lp->phylink_config.type = PHYLINK_NETDEV;
3099 	lp->phylink_config.mac_managed_pm = true;
3100 	lp->phylink_config.mac_capabilities = MAC_SYM_PAUSE | MAC_ASYM_PAUSE |
3101 		MAC_10FD | MAC_100FD | MAC_1000FD;
3102 
3103 	__set_bit(lp->phy_mode, lp->phylink_config.supported_interfaces);
3104 	if (lp->switch_x_sgmii) {
3105 		__set_bit(PHY_INTERFACE_MODE_1000BASEX,
3106 			  lp->phylink_config.supported_interfaces);
3107 		__set_bit(PHY_INTERFACE_MODE_SGMII,
3108 			  lp->phylink_config.supported_interfaces);
3109 	}
3110 
3111 	lp->phylink = phylink_create(&lp->phylink_config, pdev->dev.fwnode,
3112 				     lp->phy_mode,
3113 				     &axienet_phylink_ops);
3114 	if (IS_ERR(lp->phylink)) {
3115 		ret = PTR_ERR(lp->phylink);
3116 		dev_err(&pdev->dev, "phylink_create error (%i)\n", ret);
3117 		goto cleanup_mdio;
3118 	}
3119 
3120 	ret = register_netdev(lp->ndev);
3121 	if (ret) {
3122 		dev_err(lp->dev, "register_netdev() error (%i)\n", ret);
3123 		goto cleanup_phylink;
3124 	}
3125 
3126 	return 0;
3127 
3128 cleanup_phylink:
3129 	phylink_destroy(lp->phylink);
3130 
3131 cleanup_mdio:
3132 	if (lp->pcs_phy)
3133 		put_device(&lp->pcs_phy->dev);
3134 	if (lp->mii_bus)
3135 		axienet_mdio_teardown(lp);
3136 cleanup_clk:
3137 	clk_bulk_disable_unprepare(XAE_NUM_MISC_CLOCKS, lp->misc_clks);
3138 	clk_disable_unprepare(lp->axi_clk);
3139 
3140 free_netdev:
3141 	free_netdev(ndev);
3142 
3143 	return ret;
3144 }
3145 
3146 static void axienet_remove(struct platform_device *pdev)
3147 {
3148 	struct net_device *ndev = platform_get_drvdata(pdev);
3149 	struct axienet_local *lp = netdev_priv(ndev);
3150 
3151 	unregister_netdev(ndev);
3152 
3153 	if (lp->phylink)
3154 		phylink_destroy(lp->phylink);
3155 
3156 	if (lp->pcs_phy)
3157 		put_device(&lp->pcs_phy->dev);
3158 
3159 	axienet_mdio_teardown(lp);
3160 
3161 	clk_bulk_disable_unprepare(XAE_NUM_MISC_CLOCKS, lp->misc_clks);
3162 	clk_disable_unprepare(lp->axi_clk);
3163 
3164 	free_netdev(ndev);
3165 }
3166 
3167 static void axienet_shutdown(struct platform_device *pdev)
3168 {
3169 	struct net_device *ndev = platform_get_drvdata(pdev);
3170 
3171 	rtnl_lock();
3172 	netif_device_detach(ndev);
3173 
3174 	if (netif_running(ndev))
3175 		dev_close(ndev);
3176 
3177 	rtnl_unlock();
3178 }
3179 
3180 static int axienet_suspend(struct device *dev)
3181 {
3182 	struct net_device *ndev = dev_get_drvdata(dev);
3183 
3184 	if (!netif_running(ndev))
3185 		return 0;
3186 
3187 	netif_device_detach(ndev);
3188 
3189 	rtnl_lock();
3190 	axienet_stop(ndev);
3191 	rtnl_unlock();
3192 
3193 	return 0;
3194 }
3195 
3196 static int axienet_resume(struct device *dev)
3197 {
3198 	struct net_device *ndev = dev_get_drvdata(dev);
3199 
3200 	if (!netif_running(ndev))
3201 		return 0;
3202 
3203 	rtnl_lock();
3204 	axienet_open(ndev);
3205 	rtnl_unlock();
3206 
3207 	netif_device_attach(ndev);
3208 
3209 	return 0;
3210 }
3211 
3212 static DEFINE_SIMPLE_DEV_PM_OPS(axienet_pm_ops,
3213 				axienet_suspend, axienet_resume);
3214 
3215 static struct platform_driver axienet_driver = {
3216 	.probe = axienet_probe,
3217 	.remove = axienet_remove,
3218 	.shutdown = axienet_shutdown,
3219 	.driver = {
3220 		 .name = "xilinx_axienet",
3221 		 .pm = &axienet_pm_ops,
3222 		 .of_match_table = axienet_of_match,
3223 	},
3224 };
3225 
3226 module_platform_driver(axienet_driver);
3227 
3228 MODULE_DESCRIPTION("Xilinx Axi Ethernet driver");
3229 MODULE_AUTHOR("Xilinx");
3230 MODULE_LICENSE("GPL");
3231