xref: /linux/drivers/net/ethernet/xilinx/xilinx_axienet_main.c (revision 07d6bf634bc8f93caf8920c9d61df761645336e2)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Xilinx Axi Ethernet device driver
4  *
5  * Copyright (c) 2008 Nissin Systems Co., Ltd.,  Yoshio Kashiwagi
6  * Copyright (c) 2005-2008 DLA Systems,  David H. Lynch Jr. <dhlii@dlasys.net>
7  * Copyright (c) 2008-2009 Secret Lab Technologies Ltd.
8  * Copyright (c) 2010 - 2011 Michal Simek <monstr@monstr.eu>
9  * Copyright (c) 2010 - 2011 PetaLogix
10  * Copyright (c) 2019 - 2022 Calian Advanced Technologies
11  * Copyright (c) 2010 - 2012 Xilinx, Inc. All rights reserved.
12  *
13  * This is a driver for the Xilinx Axi Ethernet which is used in the Virtex6
14  * and Spartan6.
15  *
16  * TODO:
17  *  - Add Axi Fifo support.
18  *  - Factor out Axi DMA code into separate driver.
19  *  - Test and fix basic multicast filtering.
20  *  - Add support for extended multicast filtering.
21  *  - Test basic VLAN support.
22  *  - Add support for extended VLAN support.
23  */
24 
25 #include <linux/clk.h>
26 #include <linux/delay.h>
27 #include <linux/etherdevice.h>
28 #include <linux/module.h>
29 #include <linux/netdevice.h>
30 #include <linux/of.h>
31 #include <linux/of_mdio.h>
32 #include <linux/of_net.h>
33 #include <linux/of_irq.h>
34 #include <linux/of_address.h>
35 #include <linux/platform_device.h>
36 #include <linux/skbuff.h>
37 #include <linux/math64.h>
38 #include <linux/phy.h>
39 #include <linux/mii.h>
40 #include <linux/ethtool.h>
41 #include <linux/dmaengine.h>
42 #include <linux/dma-mapping.h>
43 #include <linux/dma/xilinx_dma.h>
44 #include <linux/circ_buf.h>
45 #include <net/netdev_queues.h>
46 
47 #include "xilinx_axienet.h"
48 
49 /* Descriptors defines for Tx and Rx DMA */
50 #define TX_BD_NUM_DEFAULT		128
51 #define RX_BD_NUM_DEFAULT		1024
52 #define TX_BD_NUM_MIN			(MAX_SKB_FRAGS + 1)
53 #define TX_BD_NUM_MAX			4096
54 #define RX_BD_NUM_MAX			4096
55 #define DMA_NUM_APP_WORDS		5
56 #define LEN_APP				4
57 #define RX_BUF_NUM_DEFAULT		128
58 
59 /* Must be shorter than length of ethtool_drvinfo.driver field to fit */
60 #define DRIVER_NAME		"xaxienet"
61 #define DRIVER_DESCRIPTION	"Xilinx Axi Ethernet driver"
62 #define DRIVER_VERSION		"1.00a"
63 
64 #define AXIENET_REGS_N		40
65 
66 static void axienet_rx_submit_desc(struct net_device *ndev);
67 
68 /* Match table for of_platform binding */
69 static const struct of_device_id axienet_of_match[] = {
70 	{ .compatible = "xlnx,axi-ethernet-1.00.a", },
71 	{ .compatible = "xlnx,axi-ethernet-1.01.a", },
72 	{ .compatible = "xlnx,axi-ethernet-2.01.a", },
73 	{},
74 };
75 
76 MODULE_DEVICE_TABLE(of, axienet_of_match);
77 
78 /* Option table for setting up Axi Ethernet hardware options */
79 static struct axienet_option axienet_options[] = {
80 	/* Turn on jumbo packet support for both Rx and Tx */
81 	{
82 		.opt = XAE_OPTION_JUMBO,
83 		.reg = XAE_TC_OFFSET,
84 		.m_or = XAE_TC_JUM_MASK,
85 	}, {
86 		.opt = XAE_OPTION_JUMBO,
87 		.reg = XAE_RCW1_OFFSET,
88 		.m_or = XAE_RCW1_JUM_MASK,
89 	}, { /* Turn on VLAN packet support for both Rx and Tx */
90 		.opt = XAE_OPTION_VLAN,
91 		.reg = XAE_TC_OFFSET,
92 		.m_or = XAE_TC_VLAN_MASK,
93 	}, {
94 		.opt = XAE_OPTION_VLAN,
95 		.reg = XAE_RCW1_OFFSET,
96 		.m_or = XAE_RCW1_VLAN_MASK,
97 	}, { /* Turn on FCS stripping on receive packets */
98 		.opt = XAE_OPTION_FCS_STRIP,
99 		.reg = XAE_RCW1_OFFSET,
100 		.m_or = XAE_RCW1_FCS_MASK,
101 	}, { /* Turn on FCS insertion on transmit packets */
102 		.opt = XAE_OPTION_FCS_INSERT,
103 		.reg = XAE_TC_OFFSET,
104 		.m_or = XAE_TC_FCS_MASK,
105 	}, { /* Turn off length/type field checking on receive packets */
106 		.opt = XAE_OPTION_LENTYPE_ERR,
107 		.reg = XAE_RCW1_OFFSET,
108 		.m_or = XAE_RCW1_LT_DIS_MASK,
109 	}, { /* Turn on Rx flow control */
110 		.opt = XAE_OPTION_FLOW_CONTROL,
111 		.reg = XAE_FCC_OFFSET,
112 		.m_or = XAE_FCC_FCRX_MASK,
113 	}, { /* Turn on Tx flow control */
114 		.opt = XAE_OPTION_FLOW_CONTROL,
115 		.reg = XAE_FCC_OFFSET,
116 		.m_or = XAE_FCC_FCTX_MASK,
117 	}, { /* Turn on promiscuous frame filtering */
118 		.opt = XAE_OPTION_PROMISC,
119 		.reg = XAE_FMI_OFFSET,
120 		.m_or = XAE_FMI_PM_MASK,
121 	}, { /* Enable transmitter */
122 		.opt = XAE_OPTION_TXEN,
123 		.reg = XAE_TC_OFFSET,
124 		.m_or = XAE_TC_TX_MASK,
125 	}, { /* Enable receiver */
126 		.opt = XAE_OPTION_RXEN,
127 		.reg = XAE_RCW1_OFFSET,
128 		.m_or = XAE_RCW1_RX_MASK,
129 	},
130 	{}
131 };
132 
axienet_get_rx_desc(struct axienet_local * lp,int i)133 static struct skbuf_dma_descriptor *axienet_get_rx_desc(struct axienet_local *lp, int i)
134 {
135 	return lp->rx_skb_ring[i & (RX_BUF_NUM_DEFAULT - 1)];
136 }
137 
axienet_get_tx_desc(struct axienet_local * lp,int i)138 static struct skbuf_dma_descriptor *axienet_get_tx_desc(struct axienet_local *lp, int i)
139 {
140 	return lp->tx_skb_ring[i & (TX_BD_NUM_MAX - 1)];
141 }
142 
143 /**
144  * axienet_dma_in32 - Memory mapped Axi DMA register read
145  * @lp:		Pointer to axienet local structure
146  * @reg:	Address offset from the base address of the Axi DMA core
147  *
148  * Return: The contents of the Axi DMA register
149  *
150  * This function returns the contents of the corresponding Axi DMA register.
151  */
axienet_dma_in32(struct axienet_local * lp,off_t reg)152 static inline u32 axienet_dma_in32(struct axienet_local *lp, off_t reg)
153 {
154 	return ioread32(lp->dma_regs + reg);
155 }
156 
desc_set_phys_addr(struct axienet_local * lp,dma_addr_t addr,struct axidma_bd * desc)157 static void desc_set_phys_addr(struct axienet_local *lp, dma_addr_t addr,
158 			       struct axidma_bd *desc)
159 {
160 	desc->phys = lower_32_bits(addr);
161 	if (lp->features & XAE_FEATURE_DMA_64BIT)
162 		desc->phys_msb = upper_32_bits(addr);
163 }
164 
desc_get_phys_addr(struct axienet_local * lp,struct axidma_bd * desc)165 static dma_addr_t desc_get_phys_addr(struct axienet_local *lp,
166 				     struct axidma_bd *desc)
167 {
168 	dma_addr_t ret = desc->phys;
169 
170 	if (lp->features & XAE_FEATURE_DMA_64BIT)
171 		ret |= ((dma_addr_t)desc->phys_msb << 16) << 16;
172 
173 	return ret;
174 }
175 
176 /**
177  * axienet_dma_bd_release - Release buffer descriptor rings
178  * @ndev:	Pointer to the net_device structure
179  *
180  * This function is used to release the descriptors allocated in
181  * axienet_dma_bd_init. axienet_dma_bd_release is called when Axi Ethernet
182  * driver stop api is called.
183  */
axienet_dma_bd_release(struct net_device * ndev)184 static void axienet_dma_bd_release(struct net_device *ndev)
185 {
186 	int i;
187 	struct axienet_local *lp = netdev_priv(ndev);
188 
189 	/* If we end up here, tx_bd_v must have been DMA allocated. */
190 	dma_free_coherent(lp->dev,
191 			  sizeof(*lp->tx_bd_v) * lp->tx_bd_num,
192 			  lp->tx_bd_v,
193 			  lp->tx_bd_p);
194 
195 	if (!lp->rx_bd_v)
196 		return;
197 
198 	for (i = 0; i < lp->rx_bd_num; i++) {
199 		dma_addr_t phys;
200 
201 		/* A NULL skb means this descriptor has not been initialised
202 		 * at all.
203 		 */
204 		if (!lp->rx_bd_v[i].skb)
205 			break;
206 
207 		dev_kfree_skb(lp->rx_bd_v[i].skb);
208 
209 		/* For each descriptor, we programmed cntrl with the (non-zero)
210 		 * descriptor size, after it had been successfully allocated.
211 		 * So a non-zero value in there means we need to unmap it.
212 		 */
213 		if (lp->rx_bd_v[i].cntrl) {
214 			phys = desc_get_phys_addr(lp, &lp->rx_bd_v[i]);
215 			dma_unmap_single(lp->dev, phys,
216 					 lp->max_frm_size, DMA_FROM_DEVICE);
217 		}
218 	}
219 
220 	dma_free_coherent(lp->dev,
221 			  sizeof(*lp->rx_bd_v) * lp->rx_bd_num,
222 			  lp->rx_bd_v,
223 			  lp->rx_bd_p);
224 }
225 
226 /**
227  * axienet_usec_to_timer - Calculate IRQ delay timer value
228  * @lp:		Pointer to the axienet_local structure
229  * @coalesce_usec: Microseconds to convert into timer value
230  */
axienet_usec_to_timer(struct axienet_local * lp,u32 coalesce_usec)231 static u32 axienet_usec_to_timer(struct axienet_local *lp, u32 coalesce_usec)
232 {
233 	u32 result;
234 	u64 clk_rate = 125000000; /* arbitrary guess if no clock rate set */
235 
236 	if (lp->axi_clk)
237 		clk_rate = clk_get_rate(lp->axi_clk);
238 
239 	/* 1 Timeout Interval = 125 * (clock period of SG clock) */
240 	result = DIV64_U64_ROUND_CLOSEST((u64)coalesce_usec * clk_rate,
241 					 (u64)125000000);
242 	if (result > 255)
243 		result = 255;
244 
245 	return result;
246 }
247 
248 /**
249  * axienet_dma_start - Set up DMA registers and start DMA operation
250  * @lp:		Pointer to the axienet_local structure
251  */
axienet_dma_start(struct axienet_local * lp)252 static void axienet_dma_start(struct axienet_local *lp)
253 {
254 	/* Start updating the Rx channel control register */
255 	lp->rx_dma_cr = (lp->coalesce_count_rx << XAXIDMA_COALESCE_SHIFT) |
256 			XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_ERROR_MASK;
257 	/* Only set interrupt delay timer if not generating an interrupt on
258 	 * the first RX packet. Otherwise leave at 0 to disable delay interrupt.
259 	 */
260 	if (lp->coalesce_count_rx > 1)
261 		lp->rx_dma_cr |= (axienet_usec_to_timer(lp, lp->coalesce_usec_rx)
262 					<< XAXIDMA_DELAY_SHIFT) |
263 				 XAXIDMA_IRQ_DELAY_MASK;
264 	axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, lp->rx_dma_cr);
265 
266 	/* Start updating the Tx channel control register */
267 	lp->tx_dma_cr = (lp->coalesce_count_tx << XAXIDMA_COALESCE_SHIFT) |
268 			XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_ERROR_MASK;
269 	/* Only set interrupt delay timer if not generating an interrupt on
270 	 * the first TX packet. Otherwise leave at 0 to disable delay interrupt.
271 	 */
272 	if (lp->coalesce_count_tx > 1)
273 		lp->tx_dma_cr |= (axienet_usec_to_timer(lp, lp->coalesce_usec_tx)
274 					<< XAXIDMA_DELAY_SHIFT) |
275 				 XAXIDMA_IRQ_DELAY_MASK;
276 	axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, lp->tx_dma_cr);
277 
278 	/* Populate the tail pointer and bring the Rx Axi DMA engine out of
279 	 * halted state. This will make the Rx side ready for reception.
280 	 */
281 	axienet_dma_out_addr(lp, XAXIDMA_RX_CDESC_OFFSET, lp->rx_bd_p);
282 	lp->rx_dma_cr |= XAXIDMA_CR_RUNSTOP_MASK;
283 	axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, lp->rx_dma_cr);
284 	axienet_dma_out_addr(lp, XAXIDMA_RX_TDESC_OFFSET, lp->rx_bd_p +
285 			     (sizeof(*lp->rx_bd_v) * (lp->rx_bd_num - 1)));
286 
287 	/* Write to the RS (Run-stop) bit in the Tx channel control register.
288 	 * Tx channel is now ready to run. But only after we write to the
289 	 * tail pointer register that the Tx channel will start transmitting.
290 	 */
291 	axienet_dma_out_addr(lp, XAXIDMA_TX_CDESC_OFFSET, lp->tx_bd_p);
292 	lp->tx_dma_cr |= XAXIDMA_CR_RUNSTOP_MASK;
293 	axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, lp->tx_dma_cr);
294 }
295 
296 /**
297  * axienet_dma_bd_init - Setup buffer descriptor rings for Axi DMA
298  * @ndev:	Pointer to the net_device structure
299  *
300  * Return: 0, on success -ENOMEM, on failure
301  *
302  * This function is called to initialize the Rx and Tx DMA descriptor
303  * rings. This initializes the descriptors with required default values
304  * and is called when Axi Ethernet driver reset is called.
305  */
axienet_dma_bd_init(struct net_device * ndev)306 static int axienet_dma_bd_init(struct net_device *ndev)
307 {
308 	int i;
309 	struct sk_buff *skb;
310 	struct axienet_local *lp = netdev_priv(ndev);
311 
312 	/* Reset the indexes which are used for accessing the BDs */
313 	lp->tx_bd_ci = 0;
314 	lp->tx_bd_tail = 0;
315 	lp->rx_bd_ci = 0;
316 
317 	/* Allocate the Tx and Rx buffer descriptors. */
318 	lp->tx_bd_v = dma_alloc_coherent(lp->dev,
319 					 sizeof(*lp->tx_bd_v) * lp->tx_bd_num,
320 					 &lp->tx_bd_p, GFP_KERNEL);
321 	if (!lp->tx_bd_v)
322 		return -ENOMEM;
323 
324 	lp->rx_bd_v = dma_alloc_coherent(lp->dev,
325 					 sizeof(*lp->rx_bd_v) * lp->rx_bd_num,
326 					 &lp->rx_bd_p, GFP_KERNEL);
327 	if (!lp->rx_bd_v)
328 		goto out;
329 
330 	for (i = 0; i < lp->tx_bd_num; i++) {
331 		dma_addr_t addr = lp->tx_bd_p +
332 				  sizeof(*lp->tx_bd_v) *
333 				  ((i + 1) % lp->tx_bd_num);
334 
335 		lp->tx_bd_v[i].next = lower_32_bits(addr);
336 		if (lp->features & XAE_FEATURE_DMA_64BIT)
337 			lp->tx_bd_v[i].next_msb = upper_32_bits(addr);
338 	}
339 
340 	for (i = 0; i < lp->rx_bd_num; i++) {
341 		dma_addr_t addr;
342 
343 		addr = lp->rx_bd_p + sizeof(*lp->rx_bd_v) *
344 			((i + 1) % lp->rx_bd_num);
345 		lp->rx_bd_v[i].next = lower_32_bits(addr);
346 		if (lp->features & XAE_FEATURE_DMA_64BIT)
347 			lp->rx_bd_v[i].next_msb = upper_32_bits(addr);
348 
349 		skb = netdev_alloc_skb_ip_align(ndev, lp->max_frm_size);
350 		if (!skb)
351 			goto out;
352 
353 		lp->rx_bd_v[i].skb = skb;
354 		addr = dma_map_single(lp->dev, skb->data,
355 				      lp->max_frm_size, DMA_FROM_DEVICE);
356 		if (dma_mapping_error(lp->dev, addr)) {
357 			netdev_err(ndev, "DMA mapping error\n");
358 			goto out;
359 		}
360 		desc_set_phys_addr(lp, addr, &lp->rx_bd_v[i]);
361 
362 		lp->rx_bd_v[i].cntrl = lp->max_frm_size;
363 	}
364 
365 	axienet_dma_start(lp);
366 
367 	return 0;
368 out:
369 	axienet_dma_bd_release(ndev);
370 	return -ENOMEM;
371 }
372 
373 /**
374  * axienet_set_mac_address - Write the MAC address
375  * @ndev:	Pointer to the net_device structure
376  * @address:	6 byte Address to be written as MAC address
377  *
378  * This function is called to initialize the MAC address of the Axi Ethernet
379  * core. It writes to the UAW0 and UAW1 registers of the core.
380  */
axienet_set_mac_address(struct net_device * ndev,const void * address)381 static void axienet_set_mac_address(struct net_device *ndev,
382 				    const void *address)
383 {
384 	struct axienet_local *lp = netdev_priv(ndev);
385 
386 	if (address)
387 		eth_hw_addr_set(ndev, address);
388 	if (!is_valid_ether_addr(ndev->dev_addr))
389 		eth_hw_addr_random(ndev);
390 
391 	/* Set up unicast MAC address filter set its mac address */
392 	axienet_iow(lp, XAE_UAW0_OFFSET,
393 		    (ndev->dev_addr[0]) |
394 		    (ndev->dev_addr[1] << 8) |
395 		    (ndev->dev_addr[2] << 16) |
396 		    (ndev->dev_addr[3] << 24));
397 	axienet_iow(lp, XAE_UAW1_OFFSET,
398 		    (((axienet_ior(lp, XAE_UAW1_OFFSET)) &
399 		      ~XAE_UAW1_UNICASTADDR_MASK) |
400 		     (ndev->dev_addr[4] |
401 		     (ndev->dev_addr[5] << 8))));
402 }
403 
404 /**
405  * netdev_set_mac_address - Write the MAC address (from outside the driver)
406  * @ndev:	Pointer to the net_device structure
407  * @p:		6 byte Address to be written as MAC address
408  *
409  * Return: 0 for all conditions. Presently, there is no failure case.
410  *
411  * This function is called to initialize the MAC address of the Axi Ethernet
412  * core. It calls the core specific axienet_set_mac_address. This is the
413  * function that goes into net_device_ops structure entry ndo_set_mac_address.
414  */
netdev_set_mac_address(struct net_device * ndev,void * p)415 static int netdev_set_mac_address(struct net_device *ndev, void *p)
416 {
417 	struct sockaddr *addr = p;
418 
419 	axienet_set_mac_address(ndev, addr->sa_data);
420 	return 0;
421 }
422 
423 /**
424  * axienet_set_multicast_list - Prepare the multicast table
425  * @ndev:	Pointer to the net_device structure
426  *
427  * This function is called to initialize the multicast table during
428  * initialization. The Axi Ethernet basic multicast support has a four-entry
429  * multicast table which is initialized here. Additionally this function
430  * goes into the net_device_ops structure entry ndo_set_multicast_list. This
431  * means whenever the multicast table entries need to be updated this
432  * function gets called.
433  */
axienet_set_multicast_list(struct net_device * ndev)434 static void axienet_set_multicast_list(struct net_device *ndev)
435 {
436 	int i = 0;
437 	u32 reg, af0reg, af1reg;
438 	struct axienet_local *lp = netdev_priv(ndev);
439 
440 	reg = axienet_ior(lp, XAE_FMI_OFFSET);
441 	reg &= ~XAE_FMI_PM_MASK;
442 	if (ndev->flags & IFF_PROMISC)
443 		reg |= XAE_FMI_PM_MASK;
444 	else
445 		reg &= ~XAE_FMI_PM_MASK;
446 	axienet_iow(lp, XAE_FMI_OFFSET, reg);
447 
448 	if (ndev->flags & IFF_ALLMULTI ||
449 	    netdev_mc_count(ndev) > XAE_MULTICAST_CAM_TABLE_NUM) {
450 		reg &= 0xFFFFFF00;
451 		axienet_iow(lp, XAE_FMI_OFFSET, reg);
452 		axienet_iow(lp, XAE_AF0_OFFSET, 1); /* Multicast bit */
453 		axienet_iow(lp, XAE_AF1_OFFSET, 0);
454 		axienet_iow(lp, XAE_AM0_OFFSET, 1); /* ditto */
455 		axienet_iow(lp, XAE_AM1_OFFSET, 0);
456 		axienet_iow(lp, XAE_FFE_OFFSET, 1);
457 		i = 1;
458 	} else if (!netdev_mc_empty(ndev)) {
459 		struct netdev_hw_addr *ha;
460 
461 		netdev_for_each_mc_addr(ha, ndev) {
462 			if (i >= XAE_MULTICAST_CAM_TABLE_NUM)
463 				break;
464 
465 			af0reg = (ha->addr[0]);
466 			af0reg |= (ha->addr[1] << 8);
467 			af0reg |= (ha->addr[2] << 16);
468 			af0reg |= (ha->addr[3] << 24);
469 
470 			af1reg = (ha->addr[4]);
471 			af1reg |= (ha->addr[5] << 8);
472 
473 			reg &= 0xFFFFFF00;
474 			reg |= i;
475 
476 			axienet_iow(lp, XAE_FMI_OFFSET, reg);
477 			axienet_iow(lp, XAE_AF0_OFFSET, af0reg);
478 			axienet_iow(lp, XAE_AF1_OFFSET, af1reg);
479 			axienet_iow(lp, XAE_AM0_OFFSET, 0xffffffff);
480 			axienet_iow(lp, XAE_AM1_OFFSET, 0x0000ffff);
481 			axienet_iow(lp, XAE_FFE_OFFSET, 1);
482 			i++;
483 		}
484 	}
485 
486 	for (; i < XAE_MULTICAST_CAM_TABLE_NUM; i++) {
487 		reg &= 0xFFFFFF00;
488 		reg |= i;
489 		axienet_iow(lp, XAE_FMI_OFFSET, reg);
490 		axienet_iow(lp, XAE_FFE_OFFSET, 0);
491 	}
492 }
493 
494 /**
495  * axienet_setoptions - Set an Axi Ethernet option
496  * @ndev:	Pointer to the net_device structure
497  * @options:	Option to be enabled/disabled
498  *
499  * The Axi Ethernet core has multiple features which can be selectively turned
500  * on or off. The typical options could be jumbo frame option, basic VLAN
501  * option, promiscuous mode option etc. This function is used to set or clear
502  * these options in the Axi Ethernet hardware. This is done through
503  * axienet_option structure .
504  */
axienet_setoptions(struct net_device * ndev,u32 options)505 static void axienet_setoptions(struct net_device *ndev, u32 options)
506 {
507 	int reg;
508 	struct axienet_local *lp = netdev_priv(ndev);
509 	struct axienet_option *tp = &axienet_options[0];
510 
511 	while (tp->opt) {
512 		reg = ((axienet_ior(lp, tp->reg)) & ~(tp->m_or));
513 		if (options & tp->opt)
514 			reg |= tp->m_or;
515 		axienet_iow(lp, tp->reg, reg);
516 		tp++;
517 	}
518 
519 	lp->options |= options;
520 }
521 
axienet_stat(struct axienet_local * lp,enum temac_stat stat)522 static u64 axienet_stat(struct axienet_local *lp, enum temac_stat stat)
523 {
524 	u32 counter;
525 
526 	if (lp->reset_in_progress)
527 		return lp->hw_stat_base[stat];
528 
529 	counter = axienet_ior(lp, XAE_STATS_OFFSET + stat * 8);
530 	return lp->hw_stat_base[stat] + (counter - lp->hw_last_counter[stat]);
531 }
532 
axienet_stats_update(struct axienet_local * lp,bool reset)533 static void axienet_stats_update(struct axienet_local *lp, bool reset)
534 {
535 	enum temac_stat stat;
536 
537 	write_seqcount_begin(&lp->hw_stats_seqcount);
538 	lp->reset_in_progress = reset;
539 	for (stat = 0; stat < STAT_COUNT; stat++) {
540 		u32 counter = axienet_ior(lp, XAE_STATS_OFFSET + stat * 8);
541 
542 		lp->hw_stat_base[stat] += counter - lp->hw_last_counter[stat];
543 		lp->hw_last_counter[stat] = counter;
544 	}
545 	write_seqcount_end(&lp->hw_stats_seqcount);
546 }
547 
axienet_refresh_stats(struct work_struct * work)548 static void axienet_refresh_stats(struct work_struct *work)
549 {
550 	struct axienet_local *lp = container_of(work, struct axienet_local,
551 						stats_work.work);
552 
553 	mutex_lock(&lp->stats_lock);
554 	axienet_stats_update(lp, false);
555 	mutex_unlock(&lp->stats_lock);
556 
557 	/* Just less than 2^32 bytes at 2.5 GBit/s */
558 	schedule_delayed_work(&lp->stats_work, 13 * HZ);
559 }
560 
__axienet_device_reset(struct axienet_local * lp)561 static int __axienet_device_reset(struct axienet_local *lp)
562 {
563 	u32 value;
564 	int ret;
565 
566 	/* Save statistics counters in case they will be reset */
567 	mutex_lock(&lp->stats_lock);
568 	if (lp->features & XAE_FEATURE_STATS)
569 		axienet_stats_update(lp, true);
570 
571 	/* Reset Axi DMA. This would reset Axi Ethernet core as well. The reset
572 	 * process of Axi DMA takes a while to complete as all pending
573 	 * commands/transfers will be flushed or completed during this
574 	 * reset process.
575 	 * Note that even though both TX and RX have their own reset register,
576 	 * they both reset the entire DMA core, so only one needs to be used.
577 	 */
578 	axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, XAXIDMA_CR_RESET_MASK);
579 	ret = read_poll_timeout(axienet_dma_in32, value,
580 				!(value & XAXIDMA_CR_RESET_MASK),
581 				DELAY_OF_ONE_MILLISEC, 50000, false, lp,
582 				XAXIDMA_TX_CR_OFFSET);
583 	if (ret) {
584 		dev_err(lp->dev, "%s: DMA reset timeout!\n", __func__);
585 		goto out;
586 	}
587 
588 	/* Wait for PhyRstCmplt bit to be set, indicating the PHY reset has finished */
589 	ret = read_poll_timeout(axienet_ior, value,
590 				value & XAE_INT_PHYRSTCMPLT_MASK,
591 				DELAY_OF_ONE_MILLISEC, 50000, false, lp,
592 				XAE_IS_OFFSET);
593 	if (ret) {
594 		dev_err(lp->dev, "%s: timeout waiting for PhyRstCmplt\n", __func__);
595 		goto out;
596 	}
597 
598 	/* Update statistics counters with new values */
599 	if (lp->features & XAE_FEATURE_STATS) {
600 		enum temac_stat stat;
601 
602 		write_seqcount_begin(&lp->hw_stats_seqcount);
603 		lp->reset_in_progress = false;
604 		for (stat = 0; stat < STAT_COUNT; stat++) {
605 			u32 counter =
606 				axienet_ior(lp, XAE_STATS_OFFSET + stat * 8);
607 
608 			lp->hw_stat_base[stat] +=
609 				lp->hw_last_counter[stat] - counter;
610 			lp->hw_last_counter[stat] = counter;
611 		}
612 		write_seqcount_end(&lp->hw_stats_seqcount);
613 	}
614 
615 out:
616 	mutex_unlock(&lp->stats_lock);
617 	return ret;
618 }
619 
620 /**
621  * axienet_dma_stop - Stop DMA operation
622  * @lp:		Pointer to the axienet_local structure
623  */
axienet_dma_stop(struct axienet_local * lp)624 static void axienet_dma_stop(struct axienet_local *lp)
625 {
626 	int count;
627 	u32 cr, sr;
628 
629 	cr = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET);
630 	cr &= ~(XAXIDMA_CR_RUNSTOP_MASK | XAXIDMA_IRQ_ALL_MASK);
631 	axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, cr);
632 	synchronize_irq(lp->rx_irq);
633 
634 	cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET);
635 	cr &= ~(XAXIDMA_CR_RUNSTOP_MASK | XAXIDMA_IRQ_ALL_MASK);
636 	axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, cr);
637 	synchronize_irq(lp->tx_irq);
638 
639 	/* Give DMAs a chance to halt gracefully */
640 	sr = axienet_dma_in32(lp, XAXIDMA_RX_SR_OFFSET);
641 	for (count = 0; !(sr & XAXIDMA_SR_HALT_MASK) && count < 5; ++count) {
642 		msleep(20);
643 		sr = axienet_dma_in32(lp, XAXIDMA_RX_SR_OFFSET);
644 	}
645 
646 	sr = axienet_dma_in32(lp, XAXIDMA_TX_SR_OFFSET);
647 	for (count = 0; !(sr & XAXIDMA_SR_HALT_MASK) && count < 5; ++count) {
648 		msleep(20);
649 		sr = axienet_dma_in32(lp, XAXIDMA_TX_SR_OFFSET);
650 	}
651 
652 	/* Do a reset to ensure DMA is really stopped */
653 	axienet_lock_mii(lp);
654 	__axienet_device_reset(lp);
655 	axienet_unlock_mii(lp);
656 }
657 
658 /**
659  * axienet_device_reset - Reset and initialize the Axi Ethernet hardware.
660  * @ndev:	Pointer to the net_device structure
661  *
662  * This function is called to reset and initialize the Axi Ethernet core. This
663  * is typically called during initialization. It does a reset of the Axi DMA
664  * Rx/Tx channels and initializes the Axi DMA BDs. Since Axi DMA reset lines
665  * are connected to Axi Ethernet reset lines, this in turn resets the Axi
666  * Ethernet core. No separate hardware reset is done for the Axi Ethernet
667  * core.
668  * Returns 0 on success or a negative error number otherwise.
669  */
axienet_device_reset(struct net_device * ndev)670 static int axienet_device_reset(struct net_device *ndev)
671 {
672 	u32 axienet_status;
673 	struct axienet_local *lp = netdev_priv(ndev);
674 	int ret;
675 
676 	lp->max_frm_size = XAE_MAX_VLAN_FRAME_SIZE;
677 	lp->options |= XAE_OPTION_VLAN;
678 	lp->options &= (~XAE_OPTION_JUMBO);
679 
680 	if (ndev->mtu > XAE_MTU && ndev->mtu <= XAE_JUMBO_MTU) {
681 		lp->max_frm_size = ndev->mtu + VLAN_ETH_HLEN +
682 					XAE_TRL_SIZE;
683 
684 		if (lp->max_frm_size <= lp->rxmem)
685 			lp->options |= XAE_OPTION_JUMBO;
686 	}
687 
688 	if (!lp->use_dmaengine) {
689 		ret = __axienet_device_reset(lp);
690 		if (ret)
691 			return ret;
692 
693 		ret = axienet_dma_bd_init(ndev);
694 		if (ret) {
695 			netdev_err(ndev, "%s: descriptor allocation failed\n",
696 				   __func__);
697 			return ret;
698 		}
699 	}
700 
701 	axienet_status = axienet_ior(lp, XAE_RCW1_OFFSET);
702 	axienet_status &= ~XAE_RCW1_RX_MASK;
703 	axienet_iow(lp, XAE_RCW1_OFFSET, axienet_status);
704 
705 	axienet_status = axienet_ior(lp, XAE_IP_OFFSET);
706 	if (axienet_status & XAE_INT_RXRJECT_MASK)
707 		axienet_iow(lp, XAE_IS_OFFSET, XAE_INT_RXRJECT_MASK);
708 	axienet_iow(lp, XAE_IE_OFFSET, lp->eth_irq > 0 ?
709 		    XAE_INT_RECV_ERROR_MASK : 0);
710 
711 	axienet_iow(lp, XAE_FCC_OFFSET, XAE_FCC_FCRX_MASK);
712 
713 	/* Sync default options with HW but leave receiver and
714 	 * transmitter disabled.
715 	 */
716 	axienet_setoptions(ndev, lp->options &
717 			   ~(XAE_OPTION_TXEN | XAE_OPTION_RXEN));
718 	axienet_set_mac_address(ndev, NULL);
719 	axienet_set_multicast_list(ndev);
720 	axienet_setoptions(ndev, lp->options);
721 
722 	netif_trans_update(ndev);
723 
724 	return 0;
725 }
726 
727 /**
728  * axienet_free_tx_chain - Clean up a series of linked TX descriptors.
729  * @lp:		Pointer to the axienet_local structure
730  * @first_bd:	Index of first descriptor to clean up
731  * @nr_bds:	Max number of descriptors to clean up
732  * @force:	Whether to clean descriptors even if not complete
733  * @sizep:	Pointer to a u32 filled with the total sum of all bytes
734  *		in all cleaned-up descriptors. Ignored if NULL.
735  * @budget:	NAPI budget (use 0 when not called from NAPI poll)
736  *
737  * Would either be called after a successful transmit operation, or after
738  * there was an error when setting up the chain.
739  * Returns the number of packets handled.
740  */
axienet_free_tx_chain(struct axienet_local * lp,u32 first_bd,int nr_bds,bool force,u32 * sizep,int budget)741 static int axienet_free_tx_chain(struct axienet_local *lp, u32 first_bd,
742 				 int nr_bds, bool force, u32 *sizep, int budget)
743 {
744 	struct axidma_bd *cur_p;
745 	unsigned int status;
746 	int i, packets = 0;
747 	dma_addr_t phys;
748 
749 	for (i = 0; i < nr_bds; i++) {
750 		cur_p = &lp->tx_bd_v[(first_bd + i) % lp->tx_bd_num];
751 		status = cur_p->status;
752 
753 		/* If force is not specified, clean up only descriptors
754 		 * that have been completed by the MAC.
755 		 */
756 		if (!force && !(status & XAXIDMA_BD_STS_COMPLETE_MASK))
757 			break;
758 
759 		/* Ensure we see complete descriptor update */
760 		dma_rmb();
761 		phys = desc_get_phys_addr(lp, cur_p);
762 		dma_unmap_single(lp->dev, phys,
763 				 (cur_p->cntrl & XAXIDMA_BD_CTRL_LENGTH_MASK),
764 				 DMA_TO_DEVICE);
765 
766 		if (cur_p->skb && (status & XAXIDMA_BD_STS_COMPLETE_MASK)) {
767 			napi_consume_skb(cur_p->skb, budget);
768 			packets++;
769 		}
770 
771 		cur_p->app0 = 0;
772 		cur_p->app1 = 0;
773 		cur_p->app2 = 0;
774 		cur_p->app4 = 0;
775 		cur_p->skb = NULL;
776 		/* ensure our transmit path and device don't prematurely see status cleared */
777 		wmb();
778 		cur_p->cntrl = 0;
779 		cur_p->status = 0;
780 
781 		if (sizep)
782 			*sizep += status & XAXIDMA_BD_STS_ACTUAL_LEN_MASK;
783 	}
784 
785 	if (!force) {
786 		lp->tx_bd_ci += i;
787 		if (lp->tx_bd_ci >= lp->tx_bd_num)
788 			lp->tx_bd_ci %= lp->tx_bd_num;
789 	}
790 
791 	return packets;
792 }
793 
794 /**
795  * axienet_check_tx_bd_space - Checks if a BD/group of BDs are currently busy
796  * @lp:		Pointer to the axienet_local structure
797  * @num_frag:	The number of BDs to check for
798  *
799  * Return: 0, on success
800  *	    NETDEV_TX_BUSY, if any of the descriptors are not free
801  *
802  * This function is invoked before BDs are allocated and transmission starts.
803  * This function returns 0 if a BD or group of BDs can be allocated for
804  * transmission. If the BD or any of the BDs are not free the function
805  * returns a busy status.
806  */
axienet_check_tx_bd_space(struct axienet_local * lp,int num_frag)807 static inline int axienet_check_tx_bd_space(struct axienet_local *lp,
808 					    int num_frag)
809 {
810 	struct axidma_bd *cur_p;
811 
812 	/* Ensure we see all descriptor updates from device or TX polling */
813 	rmb();
814 	cur_p = &lp->tx_bd_v[(READ_ONCE(lp->tx_bd_tail) + num_frag) %
815 			     lp->tx_bd_num];
816 	if (cur_p->cntrl)
817 		return NETDEV_TX_BUSY;
818 	return 0;
819 }
820 
821 /**
822  * axienet_dma_tx_cb - DMA engine callback for TX channel.
823  * @data:       Pointer to the axienet_local structure.
824  * @result:     error reporting through dmaengine_result.
825  * This function is called by dmaengine driver for TX channel to notify
826  * that the transmit is done.
827  */
axienet_dma_tx_cb(void * data,const struct dmaengine_result * result)828 static void axienet_dma_tx_cb(void *data, const struct dmaengine_result *result)
829 {
830 	struct skbuf_dma_descriptor *skbuf_dma;
831 	struct axienet_local *lp = data;
832 	struct netdev_queue *txq;
833 	int len;
834 
835 	skbuf_dma = axienet_get_tx_desc(lp, lp->tx_ring_tail++);
836 	len = skbuf_dma->skb->len;
837 	txq = skb_get_tx_queue(lp->ndev, skbuf_dma->skb);
838 	u64_stats_update_begin(&lp->tx_stat_sync);
839 	u64_stats_add(&lp->tx_bytes, len);
840 	u64_stats_add(&lp->tx_packets, 1);
841 	u64_stats_update_end(&lp->tx_stat_sync);
842 	dma_unmap_sg(lp->dev, skbuf_dma->sgl, skbuf_dma->sg_len, DMA_TO_DEVICE);
843 	dev_consume_skb_any(skbuf_dma->skb);
844 	netif_txq_completed_wake(txq, 1, len,
845 				 CIRC_SPACE(lp->tx_ring_head, lp->tx_ring_tail, TX_BD_NUM_MAX),
846 				 2 * MAX_SKB_FRAGS);
847 }
848 
849 /**
850  * axienet_start_xmit_dmaengine - Starts the transmission.
851  * @skb:        sk_buff pointer that contains data to be Txed.
852  * @ndev:       Pointer to net_device structure.
853  *
854  * Return: NETDEV_TX_OK on success or any non space errors.
855  *         NETDEV_TX_BUSY when free element in TX skb ring buffer
856  *         is not available.
857  *
858  * This function is invoked to initiate transmission. The
859  * function sets the skbs, register dma callback API and submit
860  * the dma transaction.
861  * Additionally if checksum offloading is supported,
862  * it populates AXI Stream Control fields with appropriate values.
863  */
864 static netdev_tx_t
axienet_start_xmit_dmaengine(struct sk_buff * skb,struct net_device * ndev)865 axienet_start_xmit_dmaengine(struct sk_buff *skb, struct net_device *ndev)
866 {
867 	struct dma_async_tx_descriptor *dma_tx_desc = NULL;
868 	struct axienet_local *lp = netdev_priv(ndev);
869 	u32 app_metadata[DMA_NUM_APP_WORDS] = {0};
870 	struct skbuf_dma_descriptor *skbuf_dma;
871 	struct dma_device *dma_dev;
872 	struct netdev_queue *txq;
873 	u32 csum_start_off;
874 	u32 csum_index_off;
875 	int sg_len;
876 	int ret;
877 
878 	dma_dev = lp->tx_chan->device;
879 	sg_len = skb_shinfo(skb)->nr_frags + 1;
880 	if (CIRC_SPACE(lp->tx_ring_head, lp->tx_ring_tail, TX_BD_NUM_MAX) <= sg_len) {
881 		netif_stop_queue(ndev);
882 		if (net_ratelimit())
883 			netdev_warn(ndev, "TX ring unexpectedly full\n");
884 		return NETDEV_TX_BUSY;
885 	}
886 
887 	skbuf_dma = axienet_get_tx_desc(lp, lp->tx_ring_head);
888 	if (!skbuf_dma)
889 		goto xmit_error_drop_skb;
890 
891 	lp->tx_ring_head++;
892 	sg_init_table(skbuf_dma->sgl, sg_len);
893 	ret = skb_to_sgvec(skb, skbuf_dma->sgl, 0, skb->len);
894 	if (ret < 0)
895 		goto xmit_error_drop_skb;
896 
897 	ret = dma_map_sg(lp->dev, skbuf_dma->sgl, sg_len, DMA_TO_DEVICE);
898 	if (!ret)
899 		goto xmit_error_drop_skb;
900 
901 	/* Fill up app fields for checksum */
902 	if (skb->ip_summed == CHECKSUM_PARTIAL) {
903 		if (lp->features & XAE_FEATURE_FULL_TX_CSUM) {
904 			/* Tx Full Checksum Offload Enabled */
905 			app_metadata[0] |= 2;
906 		} else if (lp->features & XAE_FEATURE_PARTIAL_TX_CSUM) {
907 			csum_start_off = skb_transport_offset(skb);
908 			csum_index_off = csum_start_off + skb->csum_offset;
909 			/* Tx Partial Checksum Offload Enabled */
910 			app_metadata[0] |= 1;
911 			app_metadata[1] = (csum_start_off << 16) | csum_index_off;
912 		}
913 	} else if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
914 		app_metadata[0] |= 2; /* Tx Full Checksum Offload Enabled */
915 	}
916 
917 	dma_tx_desc = dma_dev->device_prep_slave_sg(lp->tx_chan, skbuf_dma->sgl,
918 			sg_len, DMA_MEM_TO_DEV,
919 			DMA_PREP_INTERRUPT, (void *)app_metadata);
920 	if (!dma_tx_desc)
921 		goto xmit_error_unmap_sg;
922 
923 	skbuf_dma->skb = skb;
924 	skbuf_dma->sg_len = sg_len;
925 	dma_tx_desc->callback_param = lp;
926 	dma_tx_desc->callback_result = axienet_dma_tx_cb;
927 	dmaengine_submit(dma_tx_desc);
928 	dma_async_issue_pending(lp->tx_chan);
929 	txq = skb_get_tx_queue(lp->ndev, skb);
930 	netdev_tx_sent_queue(txq, skb->len);
931 	netif_txq_maybe_stop(txq, CIRC_SPACE(lp->tx_ring_head, lp->tx_ring_tail, TX_BD_NUM_MAX),
932 			     MAX_SKB_FRAGS + 1, 2 * MAX_SKB_FRAGS);
933 
934 	return NETDEV_TX_OK;
935 
936 xmit_error_unmap_sg:
937 	dma_unmap_sg(lp->dev, skbuf_dma->sgl, sg_len, DMA_TO_DEVICE);
938 xmit_error_drop_skb:
939 	dev_kfree_skb_any(skb);
940 	return NETDEV_TX_OK;
941 }
942 
943 /**
944  * axienet_tx_poll - Invoked once a transmit is completed by the
945  * Axi DMA Tx channel.
946  * @napi:	Pointer to NAPI structure.
947  * @budget:	Max number of TX packets to process.
948  *
949  * Return: Number of TX packets processed.
950  *
951  * This function is invoked from the NAPI processing to notify the completion
952  * of transmit operation. It clears fields in the corresponding Tx BDs and
953  * unmaps the corresponding buffer so that CPU can regain ownership of the
954  * buffer. It finally invokes "netif_wake_queue" to restart transmission if
955  * required.
956  */
axienet_tx_poll(struct napi_struct * napi,int budget)957 static int axienet_tx_poll(struct napi_struct *napi, int budget)
958 {
959 	struct axienet_local *lp = container_of(napi, struct axienet_local, napi_tx);
960 	struct net_device *ndev = lp->ndev;
961 	u32 size = 0;
962 	int packets;
963 
964 	packets = axienet_free_tx_chain(lp, lp->tx_bd_ci, lp->tx_bd_num, false,
965 					&size, budget);
966 
967 	if (packets) {
968 		u64_stats_update_begin(&lp->tx_stat_sync);
969 		u64_stats_add(&lp->tx_packets, packets);
970 		u64_stats_add(&lp->tx_bytes, size);
971 		u64_stats_update_end(&lp->tx_stat_sync);
972 
973 		/* Matches barrier in axienet_start_xmit */
974 		smp_mb();
975 
976 		if (!axienet_check_tx_bd_space(lp, MAX_SKB_FRAGS + 1))
977 			netif_wake_queue(ndev);
978 	}
979 
980 	if (packets < budget && napi_complete_done(napi, packets)) {
981 		/* Re-enable TX completion interrupts. This should
982 		 * cause an immediate interrupt if any TX packets are
983 		 * already pending.
984 		 */
985 		axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, lp->tx_dma_cr);
986 	}
987 	return packets;
988 }
989 
990 /**
991  * axienet_start_xmit - Starts the transmission.
992  * @skb:	sk_buff pointer that contains data to be Txed.
993  * @ndev:	Pointer to net_device structure.
994  *
995  * Return: NETDEV_TX_OK, on success
996  *	    NETDEV_TX_BUSY, if any of the descriptors are not free
997  *
998  * This function is invoked from upper layers to initiate transmission. The
999  * function uses the next available free BDs and populates their fields to
1000  * start the transmission. Additionally if checksum offloading is supported,
1001  * it populates AXI Stream Control fields with appropriate values.
1002  */
1003 static netdev_tx_t
axienet_start_xmit(struct sk_buff * skb,struct net_device * ndev)1004 axienet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
1005 {
1006 	u32 ii;
1007 	u32 num_frag;
1008 	u32 csum_start_off;
1009 	u32 csum_index_off;
1010 	skb_frag_t *frag;
1011 	dma_addr_t tail_p, phys;
1012 	u32 orig_tail_ptr, new_tail_ptr;
1013 	struct axienet_local *lp = netdev_priv(ndev);
1014 	struct axidma_bd *cur_p;
1015 
1016 	orig_tail_ptr = lp->tx_bd_tail;
1017 	new_tail_ptr = orig_tail_ptr;
1018 
1019 	num_frag = skb_shinfo(skb)->nr_frags;
1020 	cur_p = &lp->tx_bd_v[orig_tail_ptr];
1021 
1022 	if (axienet_check_tx_bd_space(lp, num_frag + 1)) {
1023 		/* Should not happen as last start_xmit call should have
1024 		 * checked for sufficient space and queue should only be
1025 		 * woken when sufficient space is available.
1026 		 */
1027 		netif_stop_queue(ndev);
1028 		if (net_ratelimit())
1029 			netdev_warn(ndev, "TX ring unexpectedly full\n");
1030 		return NETDEV_TX_BUSY;
1031 	}
1032 
1033 	if (skb->ip_summed == CHECKSUM_PARTIAL) {
1034 		if (lp->features & XAE_FEATURE_FULL_TX_CSUM) {
1035 			/* Tx Full Checksum Offload Enabled */
1036 			cur_p->app0 |= 2;
1037 		} else if (lp->features & XAE_FEATURE_PARTIAL_TX_CSUM) {
1038 			csum_start_off = skb_transport_offset(skb);
1039 			csum_index_off = csum_start_off + skb->csum_offset;
1040 			/* Tx Partial Checksum Offload Enabled */
1041 			cur_p->app0 |= 1;
1042 			cur_p->app1 = (csum_start_off << 16) | csum_index_off;
1043 		}
1044 	} else if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
1045 		cur_p->app0 |= 2; /* Tx Full Checksum Offload Enabled */
1046 	}
1047 
1048 	phys = dma_map_single(lp->dev, skb->data,
1049 			      skb_headlen(skb), DMA_TO_DEVICE);
1050 	if (unlikely(dma_mapping_error(lp->dev, phys))) {
1051 		if (net_ratelimit())
1052 			netdev_err(ndev, "TX DMA mapping error\n");
1053 		ndev->stats.tx_dropped++;
1054 		dev_kfree_skb_any(skb);
1055 		return NETDEV_TX_OK;
1056 	}
1057 	desc_set_phys_addr(lp, phys, cur_p);
1058 	cur_p->cntrl = skb_headlen(skb) | XAXIDMA_BD_CTRL_TXSOF_MASK;
1059 
1060 	for (ii = 0; ii < num_frag; ii++) {
1061 		if (++new_tail_ptr >= lp->tx_bd_num)
1062 			new_tail_ptr = 0;
1063 		cur_p = &lp->tx_bd_v[new_tail_ptr];
1064 		frag = &skb_shinfo(skb)->frags[ii];
1065 		phys = dma_map_single(lp->dev,
1066 				      skb_frag_address(frag),
1067 				      skb_frag_size(frag),
1068 				      DMA_TO_DEVICE);
1069 		if (unlikely(dma_mapping_error(lp->dev, phys))) {
1070 			if (net_ratelimit())
1071 				netdev_err(ndev, "TX DMA mapping error\n");
1072 			ndev->stats.tx_dropped++;
1073 			axienet_free_tx_chain(lp, orig_tail_ptr, ii + 1,
1074 					      true, NULL, 0);
1075 			dev_kfree_skb_any(skb);
1076 			return NETDEV_TX_OK;
1077 		}
1078 		desc_set_phys_addr(lp, phys, cur_p);
1079 		cur_p->cntrl = skb_frag_size(frag);
1080 	}
1081 
1082 	cur_p->cntrl |= XAXIDMA_BD_CTRL_TXEOF_MASK;
1083 	cur_p->skb = skb;
1084 
1085 	tail_p = lp->tx_bd_p + sizeof(*lp->tx_bd_v) * new_tail_ptr;
1086 	if (++new_tail_ptr >= lp->tx_bd_num)
1087 		new_tail_ptr = 0;
1088 	WRITE_ONCE(lp->tx_bd_tail, new_tail_ptr);
1089 
1090 	/* Start the transfer */
1091 	axienet_dma_out_addr(lp, XAXIDMA_TX_TDESC_OFFSET, tail_p);
1092 
1093 	/* Stop queue if next transmit may not have space */
1094 	if (axienet_check_tx_bd_space(lp, MAX_SKB_FRAGS + 1)) {
1095 		netif_stop_queue(ndev);
1096 
1097 		/* Matches barrier in axienet_tx_poll */
1098 		smp_mb();
1099 
1100 		/* Space might have just been freed - check again */
1101 		if (!axienet_check_tx_bd_space(lp, MAX_SKB_FRAGS + 1))
1102 			netif_wake_queue(ndev);
1103 	}
1104 
1105 	return NETDEV_TX_OK;
1106 }
1107 
1108 /**
1109  * axienet_dma_rx_cb - DMA engine callback for RX channel.
1110  * @data:       Pointer to the skbuf_dma_descriptor structure.
1111  * @result:     error reporting through dmaengine_result.
1112  * This function is called by dmaengine driver for RX channel to notify
1113  * that the packet is received.
1114  */
axienet_dma_rx_cb(void * data,const struct dmaengine_result * result)1115 static void axienet_dma_rx_cb(void *data, const struct dmaengine_result *result)
1116 {
1117 	struct skbuf_dma_descriptor *skbuf_dma;
1118 	size_t meta_len, meta_max_len, rx_len;
1119 	struct axienet_local *lp = data;
1120 	struct sk_buff *skb;
1121 	u32 *app_metadata;
1122 
1123 	skbuf_dma = axienet_get_rx_desc(lp, lp->rx_ring_tail++);
1124 	skb = skbuf_dma->skb;
1125 	app_metadata = dmaengine_desc_get_metadata_ptr(skbuf_dma->desc, &meta_len,
1126 						       &meta_max_len);
1127 	dma_unmap_single(lp->dev, skbuf_dma->dma_address, lp->max_frm_size,
1128 			 DMA_FROM_DEVICE);
1129 	/* TODO: Derive app word index programmatically */
1130 	rx_len = (app_metadata[LEN_APP] & 0xFFFF);
1131 	skb_put(skb, rx_len);
1132 	skb->protocol = eth_type_trans(skb, lp->ndev);
1133 	skb->ip_summed = CHECKSUM_NONE;
1134 
1135 	__netif_rx(skb);
1136 	u64_stats_update_begin(&lp->rx_stat_sync);
1137 	u64_stats_add(&lp->rx_packets, 1);
1138 	u64_stats_add(&lp->rx_bytes, rx_len);
1139 	u64_stats_update_end(&lp->rx_stat_sync);
1140 	axienet_rx_submit_desc(lp->ndev);
1141 	dma_async_issue_pending(lp->rx_chan);
1142 }
1143 
1144 /**
1145  * axienet_rx_poll - Triggered by RX ISR to complete the BD processing.
1146  * @napi:	Pointer to NAPI structure.
1147  * @budget:	Max number of RX packets to process.
1148  *
1149  * Return: Number of RX packets processed.
1150  */
axienet_rx_poll(struct napi_struct * napi,int budget)1151 static int axienet_rx_poll(struct napi_struct *napi, int budget)
1152 {
1153 	u32 length;
1154 	u32 csumstatus;
1155 	u32 size = 0;
1156 	int packets = 0;
1157 	dma_addr_t tail_p = 0;
1158 	struct axidma_bd *cur_p;
1159 	struct sk_buff *skb, *new_skb;
1160 	struct axienet_local *lp = container_of(napi, struct axienet_local, napi_rx);
1161 
1162 	cur_p = &lp->rx_bd_v[lp->rx_bd_ci];
1163 
1164 	while (packets < budget && (cur_p->status & XAXIDMA_BD_STS_COMPLETE_MASK)) {
1165 		dma_addr_t phys;
1166 
1167 		/* Ensure we see complete descriptor update */
1168 		dma_rmb();
1169 
1170 		skb = cur_p->skb;
1171 		cur_p->skb = NULL;
1172 
1173 		/* skb could be NULL if a previous pass already received the
1174 		 * packet for this slot in the ring, but failed to refill it
1175 		 * with a newly allocated buffer. In this case, don't try to
1176 		 * receive it again.
1177 		 */
1178 		if (likely(skb)) {
1179 			length = cur_p->app4 & 0x0000FFFF;
1180 
1181 			phys = desc_get_phys_addr(lp, cur_p);
1182 			dma_unmap_single(lp->dev, phys, lp->max_frm_size,
1183 					 DMA_FROM_DEVICE);
1184 
1185 			skb_put(skb, length);
1186 			skb->protocol = eth_type_trans(skb, lp->ndev);
1187 			/*skb_checksum_none_assert(skb);*/
1188 			skb->ip_summed = CHECKSUM_NONE;
1189 
1190 			/* if we're doing Rx csum offload, set it up */
1191 			if (lp->features & XAE_FEATURE_FULL_RX_CSUM) {
1192 				csumstatus = (cur_p->app2 &
1193 					      XAE_FULL_CSUM_STATUS_MASK) >> 3;
1194 				if (csumstatus == XAE_IP_TCP_CSUM_VALIDATED ||
1195 				    csumstatus == XAE_IP_UDP_CSUM_VALIDATED) {
1196 					skb->ip_summed = CHECKSUM_UNNECESSARY;
1197 				}
1198 			} else if (lp->features & XAE_FEATURE_PARTIAL_RX_CSUM) {
1199 				skb->csum = be32_to_cpu(cur_p->app3 & 0xFFFF);
1200 				skb->ip_summed = CHECKSUM_COMPLETE;
1201 			}
1202 
1203 			napi_gro_receive(napi, skb);
1204 
1205 			size += length;
1206 			packets++;
1207 		}
1208 
1209 		new_skb = napi_alloc_skb(napi, lp->max_frm_size);
1210 		if (!new_skb)
1211 			break;
1212 
1213 		phys = dma_map_single(lp->dev, new_skb->data,
1214 				      lp->max_frm_size,
1215 				      DMA_FROM_DEVICE);
1216 		if (unlikely(dma_mapping_error(lp->dev, phys))) {
1217 			if (net_ratelimit())
1218 				netdev_err(lp->ndev, "RX DMA mapping error\n");
1219 			dev_kfree_skb(new_skb);
1220 			break;
1221 		}
1222 		desc_set_phys_addr(lp, phys, cur_p);
1223 
1224 		cur_p->cntrl = lp->max_frm_size;
1225 		cur_p->status = 0;
1226 		cur_p->skb = new_skb;
1227 
1228 		/* Only update tail_p to mark this slot as usable after it has
1229 		 * been successfully refilled.
1230 		 */
1231 		tail_p = lp->rx_bd_p + sizeof(*lp->rx_bd_v) * lp->rx_bd_ci;
1232 
1233 		if (++lp->rx_bd_ci >= lp->rx_bd_num)
1234 			lp->rx_bd_ci = 0;
1235 		cur_p = &lp->rx_bd_v[lp->rx_bd_ci];
1236 	}
1237 
1238 	u64_stats_update_begin(&lp->rx_stat_sync);
1239 	u64_stats_add(&lp->rx_packets, packets);
1240 	u64_stats_add(&lp->rx_bytes, size);
1241 	u64_stats_update_end(&lp->rx_stat_sync);
1242 
1243 	if (tail_p)
1244 		axienet_dma_out_addr(lp, XAXIDMA_RX_TDESC_OFFSET, tail_p);
1245 
1246 	if (packets < budget && napi_complete_done(napi, packets)) {
1247 		/* Re-enable RX completion interrupts. This should
1248 		 * cause an immediate interrupt if any RX packets are
1249 		 * already pending.
1250 		 */
1251 		axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, lp->rx_dma_cr);
1252 	}
1253 	return packets;
1254 }
1255 
1256 /**
1257  * axienet_tx_irq - Tx Done Isr.
1258  * @irq:	irq number
1259  * @_ndev:	net_device pointer
1260  *
1261  * Return: IRQ_HANDLED if device generated a TX interrupt, IRQ_NONE otherwise.
1262  *
1263  * This is the Axi DMA Tx done Isr. It invokes NAPI polling to complete the
1264  * TX BD processing.
1265  */
axienet_tx_irq(int irq,void * _ndev)1266 static irqreturn_t axienet_tx_irq(int irq, void *_ndev)
1267 {
1268 	unsigned int status;
1269 	struct net_device *ndev = _ndev;
1270 	struct axienet_local *lp = netdev_priv(ndev);
1271 
1272 	status = axienet_dma_in32(lp, XAXIDMA_TX_SR_OFFSET);
1273 
1274 	if (!(status & XAXIDMA_IRQ_ALL_MASK))
1275 		return IRQ_NONE;
1276 
1277 	axienet_dma_out32(lp, XAXIDMA_TX_SR_OFFSET, status);
1278 
1279 	if (unlikely(status & XAXIDMA_IRQ_ERROR_MASK)) {
1280 		netdev_err(ndev, "DMA Tx error 0x%x\n", status);
1281 		netdev_err(ndev, "Current BD is at: 0x%x%08x\n",
1282 			   (lp->tx_bd_v[lp->tx_bd_ci]).phys_msb,
1283 			   (lp->tx_bd_v[lp->tx_bd_ci]).phys);
1284 		schedule_work(&lp->dma_err_task);
1285 	} else {
1286 		/* Disable further TX completion interrupts and schedule
1287 		 * NAPI to handle the completions.
1288 		 */
1289 		u32 cr = lp->tx_dma_cr;
1290 
1291 		cr &= ~(XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_DELAY_MASK);
1292 		if (napi_schedule_prep(&lp->napi_tx)) {
1293 			axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, cr);
1294 			__napi_schedule(&lp->napi_tx);
1295 		}
1296 	}
1297 
1298 	return IRQ_HANDLED;
1299 }
1300 
1301 /**
1302  * axienet_rx_irq - Rx Isr.
1303  * @irq:	irq number
1304  * @_ndev:	net_device pointer
1305  *
1306  * Return: IRQ_HANDLED if device generated a RX interrupt, IRQ_NONE otherwise.
1307  *
1308  * This is the Axi DMA Rx Isr. It invokes NAPI polling to complete the RX BD
1309  * processing.
1310  */
axienet_rx_irq(int irq,void * _ndev)1311 static irqreturn_t axienet_rx_irq(int irq, void *_ndev)
1312 {
1313 	unsigned int status;
1314 	struct net_device *ndev = _ndev;
1315 	struct axienet_local *lp = netdev_priv(ndev);
1316 
1317 	status = axienet_dma_in32(lp, XAXIDMA_RX_SR_OFFSET);
1318 
1319 	if (!(status & XAXIDMA_IRQ_ALL_MASK))
1320 		return IRQ_NONE;
1321 
1322 	axienet_dma_out32(lp, XAXIDMA_RX_SR_OFFSET, status);
1323 
1324 	if (unlikely(status & XAXIDMA_IRQ_ERROR_MASK)) {
1325 		netdev_err(ndev, "DMA Rx error 0x%x\n", status);
1326 		netdev_err(ndev, "Current BD is at: 0x%x%08x\n",
1327 			   (lp->rx_bd_v[lp->rx_bd_ci]).phys_msb,
1328 			   (lp->rx_bd_v[lp->rx_bd_ci]).phys);
1329 		schedule_work(&lp->dma_err_task);
1330 	} else {
1331 		/* Disable further RX completion interrupts and schedule
1332 		 * NAPI receive.
1333 		 */
1334 		u32 cr = lp->rx_dma_cr;
1335 
1336 		cr &= ~(XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_DELAY_MASK);
1337 		if (napi_schedule_prep(&lp->napi_rx)) {
1338 			axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, cr);
1339 			__napi_schedule(&lp->napi_rx);
1340 		}
1341 	}
1342 
1343 	return IRQ_HANDLED;
1344 }
1345 
1346 /**
1347  * axienet_eth_irq - Ethernet core Isr.
1348  * @irq:	irq number
1349  * @_ndev:	net_device pointer
1350  *
1351  * Return: IRQ_HANDLED if device generated a core interrupt, IRQ_NONE otherwise.
1352  *
1353  * Handle miscellaneous conditions indicated by Ethernet core IRQ.
1354  */
axienet_eth_irq(int irq,void * _ndev)1355 static irqreturn_t axienet_eth_irq(int irq, void *_ndev)
1356 {
1357 	struct net_device *ndev = _ndev;
1358 	struct axienet_local *lp = netdev_priv(ndev);
1359 	unsigned int pending;
1360 
1361 	pending = axienet_ior(lp, XAE_IP_OFFSET);
1362 	if (!pending)
1363 		return IRQ_NONE;
1364 
1365 	if (pending & XAE_INT_RXFIFOOVR_MASK)
1366 		ndev->stats.rx_missed_errors++;
1367 
1368 	if (pending & XAE_INT_RXRJECT_MASK)
1369 		ndev->stats.rx_dropped++;
1370 
1371 	axienet_iow(lp, XAE_IS_OFFSET, pending);
1372 	return IRQ_HANDLED;
1373 }
1374 
1375 static void axienet_dma_err_handler(struct work_struct *work);
1376 
1377 /**
1378  * axienet_rx_submit_desc - Submit the rx descriptors to dmaengine.
1379  * allocate skbuff, map the scatterlist and obtain a descriptor
1380  * and then add the callback information and submit descriptor.
1381  *
1382  * @ndev:	net_device pointer
1383  *
1384  */
axienet_rx_submit_desc(struct net_device * ndev)1385 static void axienet_rx_submit_desc(struct net_device *ndev)
1386 {
1387 	struct dma_async_tx_descriptor *dma_rx_desc = NULL;
1388 	struct axienet_local *lp = netdev_priv(ndev);
1389 	struct skbuf_dma_descriptor *skbuf_dma;
1390 	struct sk_buff *skb;
1391 	dma_addr_t addr;
1392 
1393 	skbuf_dma = axienet_get_rx_desc(lp, lp->rx_ring_head);
1394 	if (!skbuf_dma)
1395 		return;
1396 
1397 	lp->rx_ring_head++;
1398 	skb = netdev_alloc_skb(ndev, lp->max_frm_size);
1399 	if (!skb)
1400 		return;
1401 
1402 	sg_init_table(skbuf_dma->sgl, 1);
1403 	addr = dma_map_single(lp->dev, skb->data, lp->max_frm_size, DMA_FROM_DEVICE);
1404 	if (unlikely(dma_mapping_error(lp->dev, addr))) {
1405 		if (net_ratelimit())
1406 			netdev_err(ndev, "DMA mapping error\n");
1407 		goto rx_submit_err_free_skb;
1408 	}
1409 	sg_dma_address(skbuf_dma->sgl) = addr;
1410 	sg_dma_len(skbuf_dma->sgl) = lp->max_frm_size;
1411 	dma_rx_desc = dmaengine_prep_slave_sg(lp->rx_chan, skbuf_dma->sgl,
1412 					      1, DMA_DEV_TO_MEM,
1413 					      DMA_PREP_INTERRUPT);
1414 	if (!dma_rx_desc)
1415 		goto rx_submit_err_unmap_skb;
1416 
1417 	skbuf_dma->skb = skb;
1418 	skbuf_dma->dma_address = sg_dma_address(skbuf_dma->sgl);
1419 	skbuf_dma->desc = dma_rx_desc;
1420 	dma_rx_desc->callback_param = lp;
1421 	dma_rx_desc->callback_result = axienet_dma_rx_cb;
1422 	dmaengine_submit(dma_rx_desc);
1423 
1424 	return;
1425 
1426 rx_submit_err_unmap_skb:
1427 	dma_unmap_single(lp->dev, addr, lp->max_frm_size, DMA_FROM_DEVICE);
1428 rx_submit_err_free_skb:
1429 	dev_kfree_skb(skb);
1430 }
1431 
1432 /**
1433  * axienet_init_dmaengine - init the dmaengine code.
1434  * @ndev:       Pointer to net_device structure
1435  *
1436  * Return: 0, on success.
1437  *          non-zero error value on failure
1438  *
1439  * This is the dmaengine initialization code.
1440  */
axienet_init_dmaengine(struct net_device * ndev)1441 static int axienet_init_dmaengine(struct net_device *ndev)
1442 {
1443 	struct axienet_local *lp = netdev_priv(ndev);
1444 	struct skbuf_dma_descriptor *skbuf_dma;
1445 	int i, ret;
1446 
1447 	lp->tx_chan = dma_request_chan(lp->dev, "tx_chan0");
1448 	if (IS_ERR(lp->tx_chan)) {
1449 		dev_err(lp->dev, "No Ethernet DMA (TX) channel found\n");
1450 		return PTR_ERR(lp->tx_chan);
1451 	}
1452 
1453 	lp->rx_chan = dma_request_chan(lp->dev, "rx_chan0");
1454 	if (IS_ERR(lp->rx_chan)) {
1455 		ret = PTR_ERR(lp->rx_chan);
1456 		dev_err(lp->dev, "No Ethernet DMA (RX) channel found\n");
1457 		goto err_dma_release_tx;
1458 	}
1459 
1460 	lp->tx_ring_tail = 0;
1461 	lp->tx_ring_head = 0;
1462 	lp->rx_ring_tail = 0;
1463 	lp->rx_ring_head = 0;
1464 	lp->tx_skb_ring = kcalloc(TX_BD_NUM_MAX, sizeof(*lp->tx_skb_ring),
1465 				  GFP_KERNEL);
1466 	if (!lp->tx_skb_ring) {
1467 		ret = -ENOMEM;
1468 		goto err_dma_release_rx;
1469 	}
1470 	for (i = 0; i < TX_BD_NUM_MAX; i++) {
1471 		skbuf_dma = kzalloc(sizeof(*skbuf_dma), GFP_KERNEL);
1472 		if (!skbuf_dma) {
1473 			ret = -ENOMEM;
1474 			goto err_free_tx_skb_ring;
1475 		}
1476 		lp->tx_skb_ring[i] = skbuf_dma;
1477 	}
1478 
1479 	lp->rx_skb_ring = kcalloc(RX_BUF_NUM_DEFAULT, sizeof(*lp->rx_skb_ring),
1480 				  GFP_KERNEL);
1481 	if (!lp->rx_skb_ring) {
1482 		ret = -ENOMEM;
1483 		goto err_free_tx_skb_ring;
1484 	}
1485 	for (i = 0; i < RX_BUF_NUM_DEFAULT; i++) {
1486 		skbuf_dma = kzalloc(sizeof(*skbuf_dma), GFP_KERNEL);
1487 		if (!skbuf_dma) {
1488 			ret = -ENOMEM;
1489 			goto err_free_rx_skb_ring;
1490 		}
1491 		lp->rx_skb_ring[i] = skbuf_dma;
1492 	}
1493 	/* TODO: Instead of BD_NUM_DEFAULT use runtime support */
1494 	for (i = 0; i < RX_BUF_NUM_DEFAULT; i++)
1495 		axienet_rx_submit_desc(ndev);
1496 	dma_async_issue_pending(lp->rx_chan);
1497 
1498 	return 0;
1499 
1500 err_free_rx_skb_ring:
1501 	for (i = 0; i < RX_BUF_NUM_DEFAULT; i++)
1502 		kfree(lp->rx_skb_ring[i]);
1503 	kfree(lp->rx_skb_ring);
1504 err_free_tx_skb_ring:
1505 	for (i = 0; i < TX_BD_NUM_MAX; i++)
1506 		kfree(lp->tx_skb_ring[i]);
1507 	kfree(lp->tx_skb_ring);
1508 err_dma_release_rx:
1509 	dma_release_channel(lp->rx_chan);
1510 err_dma_release_tx:
1511 	dma_release_channel(lp->tx_chan);
1512 	return ret;
1513 }
1514 
1515 /**
1516  * axienet_init_legacy_dma - init the dma legacy code.
1517  * @ndev:       Pointer to net_device structure
1518  *
1519  * Return: 0, on success.
1520  *          non-zero error value on failure
1521  *
1522  * This is the dma  initialization code. It also allocates interrupt
1523  * service routines, enables the interrupt lines and ISR handling.
1524  *
1525  */
axienet_init_legacy_dma(struct net_device * ndev)1526 static int axienet_init_legacy_dma(struct net_device *ndev)
1527 {
1528 	int ret;
1529 	struct axienet_local *lp = netdev_priv(ndev);
1530 
1531 	/* Enable worker thread for Axi DMA error handling */
1532 	lp->stopping = false;
1533 	INIT_WORK(&lp->dma_err_task, axienet_dma_err_handler);
1534 
1535 	napi_enable(&lp->napi_rx);
1536 	napi_enable(&lp->napi_tx);
1537 
1538 	/* Enable interrupts for Axi DMA Tx */
1539 	ret = request_irq(lp->tx_irq, axienet_tx_irq, IRQF_SHARED,
1540 			  ndev->name, ndev);
1541 	if (ret)
1542 		goto err_tx_irq;
1543 	/* Enable interrupts for Axi DMA Rx */
1544 	ret = request_irq(lp->rx_irq, axienet_rx_irq, IRQF_SHARED,
1545 			  ndev->name, ndev);
1546 	if (ret)
1547 		goto err_rx_irq;
1548 	/* Enable interrupts for Axi Ethernet core (if defined) */
1549 	if (lp->eth_irq > 0) {
1550 		ret = request_irq(lp->eth_irq, axienet_eth_irq, IRQF_SHARED,
1551 				  ndev->name, ndev);
1552 		if (ret)
1553 			goto err_eth_irq;
1554 	}
1555 
1556 	return 0;
1557 
1558 err_eth_irq:
1559 	free_irq(lp->rx_irq, ndev);
1560 err_rx_irq:
1561 	free_irq(lp->tx_irq, ndev);
1562 err_tx_irq:
1563 	napi_disable(&lp->napi_tx);
1564 	napi_disable(&lp->napi_rx);
1565 	cancel_work_sync(&lp->dma_err_task);
1566 	dev_err(lp->dev, "request_irq() failed\n");
1567 	return ret;
1568 }
1569 
1570 /**
1571  * axienet_open - Driver open routine.
1572  * @ndev:	Pointer to net_device structure
1573  *
1574  * Return: 0, on success.
1575  *	    non-zero error value on failure
1576  *
1577  * This is the driver open routine. It calls phylink_start to start the
1578  * PHY device.
1579  * It also allocates interrupt service routines, enables the interrupt lines
1580  * and ISR handling. Axi Ethernet core is reset through Axi DMA core. Buffer
1581  * descriptors are initialized.
1582  */
axienet_open(struct net_device * ndev)1583 static int axienet_open(struct net_device *ndev)
1584 {
1585 	int ret;
1586 	struct axienet_local *lp = netdev_priv(ndev);
1587 
1588 	/* When we do an Axi Ethernet reset, it resets the complete core
1589 	 * including the MDIO. MDIO must be disabled before resetting.
1590 	 * Hold MDIO bus lock to avoid MDIO accesses during the reset.
1591 	 */
1592 	axienet_lock_mii(lp);
1593 	ret = axienet_device_reset(ndev);
1594 	axienet_unlock_mii(lp);
1595 
1596 	ret = phylink_of_phy_connect(lp->phylink, lp->dev->of_node, 0);
1597 	if (ret) {
1598 		dev_err(lp->dev, "phylink_of_phy_connect() failed: %d\n", ret);
1599 		return ret;
1600 	}
1601 
1602 	phylink_start(lp->phylink);
1603 
1604 	/* Start the statistics refresh work */
1605 	schedule_delayed_work(&lp->stats_work, 0);
1606 
1607 	if (lp->use_dmaengine) {
1608 		/* Enable interrupts for Axi Ethernet core (if defined) */
1609 		if (lp->eth_irq > 0) {
1610 			ret = request_irq(lp->eth_irq, axienet_eth_irq, IRQF_SHARED,
1611 					  ndev->name, ndev);
1612 			if (ret)
1613 				goto err_phy;
1614 		}
1615 
1616 		ret = axienet_init_dmaengine(ndev);
1617 		if (ret < 0)
1618 			goto err_free_eth_irq;
1619 	} else {
1620 		ret = axienet_init_legacy_dma(ndev);
1621 		if (ret)
1622 			goto err_phy;
1623 	}
1624 
1625 	return 0;
1626 
1627 err_free_eth_irq:
1628 	if (lp->eth_irq > 0)
1629 		free_irq(lp->eth_irq, ndev);
1630 err_phy:
1631 	cancel_delayed_work_sync(&lp->stats_work);
1632 	phylink_stop(lp->phylink);
1633 	phylink_disconnect_phy(lp->phylink);
1634 	return ret;
1635 }
1636 
1637 /**
1638  * axienet_stop - Driver stop routine.
1639  * @ndev:	Pointer to net_device structure
1640  *
1641  * Return: 0, on success.
1642  *
1643  * This is the driver stop routine. It calls phylink_disconnect to stop the PHY
1644  * device. It also removes the interrupt handlers and disables the interrupts.
1645  * The Axi DMA Tx/Rx BDs are released.
1646  */
axienet_stop(struct net_device * ndev)1647 static int axienet_stop(struct net_device *ndev)
1648 {
1649 	struct axienet_local *lp = netdev_priv(ndev);
1650 	int i;
1651 
1652 	if (!lp->use_dmaengine) {
1653 		WRITE_ONCE(lp->stopping, true);
1654 		flush_work(&lp->dma_err_task);
1655 
1656 		napi_disable(&lp->napi_tx);
1657 		napi_disable(&lp->napi_rx);
1658 	}
1659 
1660 	cancel_delayed_work_sync(&lp->stats_work);
1661 
1662 	phylink_stop(lp->phylink);
1663 	phylink_disconnect_phy(lp->phylink);
1664 
1665 	axienet_setoptions(ndev, lp->options &
1666 			   ~(XAE_OPTION_TXEN | XAE_OPTION_RXEN));
1667 
1668 	if (!lp->use_dmaengine) {
1669 		axienet_dma_stop(lp);
1670 		cancel_work_sync(&lp->dma_err_task);
1671 		free_irq(lp->tx_irq, ndev);
1672 		free_irq(lp->rx_irq, ndev);
1673 		axienet_dma_bd_release(ndev);
1674 	} else {
1675 		dmaengine_terminate_sync(lp->tx_chan);
1676 		dmaengine_synchronize(lp->tx_chan);
1677 		dmaengine_terminate_sync(lp->rx_chan);
1678 		dmaengine_synchronize(lp->rx_chan);
1679 
1680 		for (i = 0; i < TX_BD_NUM_MAX; i++)
1681 			kfree(lp->tx_skb_ring[i]);
1682 		kfree(lp->tx_skb_ring);
1683 		for (i = 0; i < RX_BUF_NUM_DEFAULT; i++)
1684 			kfree(lp->rx_skb_ring[i]);
1685 		kfree(lp->rx_skb_ring);
1686 
1687 		dma_release_channel(lp->rx_chan);
1688 		dma_release_channel(lp->tx_chan);
1689 	}
1690 
1691 	axienet_iow(lp, XAE_IE_OFFSET, 0);
1692 
1693 	if (lp->eth_irq > 0)
1694 		free_irq(lp->eth_irq, ndev);
1695 	return 0;
1696 }
1697 
1698 /**
1699  * axienet_change_mtu - Driver change mtu routine.
1700  * @ndev:	Pointer to net_device structure
1701  * @new_mtu:	New mtu value to be applied
1702  *
1703  * Return: Always returns 0 (success).
1704  *
1705  * This is the change mtu driver routine. It checks if the Axi Ethernet
1706  * hardware supports jumbo frames before changing the mtu. This can be
1707  * called only when the device is not up.
1708  */
axienet_change_mtu(struct net_device * ndev,int new_mtu)1709 static int axienet_change_mtu(struct net_device *ndev, int new_mtu)
1710 {
1711 	struct axienet_local *lp = netdev_priv(ndev);
1712 
1713 	if (netif_running(ndev))
1714 		return -EBUSY;
1715 
1716 	if ((new_mtu + VLAN_ETH_HLEN +
1717 		XAE_TRL_SIZE) > lp->rxmem)
1718 		return -EINVAL;
1719 
1720 	WRITE_ONCE(ndev->mtu, new_mtu);
1721 
1722 	return 0;
1723 }
1724 
1725 #ifdef CONFIG_NET_POLL_CONTROLLER
1726 /**
1727  * axienet_poll_controller - Axi Ethernet poll mechanism.
1728  * @ndev:	Pointer to net_device structure
1729  *
1730  * This implements Rx/Tx ISR poll mechanisms. The interrupts are disabled prior
1731  * to polling the ISRs and are enabled back after the polling is done.
1732  */
axienet_poll_controller(struct net_device * ndev)1733 static void axienet_poll_controller(struct net_device *ndev)
1734 {
1735 	struct axienet_local *lp = netdev_priv(ndev);
1736 
1737 	disable_irq(lp->tx_irq);
1738 	disable_irq(lp->rx_irq);
1739 	axienet_rx_irq(lp->tx_irq, ndev);
1740 	axienet_tx_irq(lp->rx_irq, ndev);
1741 	enable_irq(lp->tx_irq);
1742 	enable_irq(lp->rx_irq);
1743 }
1744 #endif
1745 
axienet_ioctl(struct net_device * dev,struct ifreq * rq,int cmd)1746 static int axienet_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1747 {
1748 	struct axienet_local *lp = netdev_priv(dev);
1749 
1750 	if (!netif_running(dev))
1751 		return -EINVAL;
1752 
1753 	return phylink_mii_ioctl(lp->phylink, rq, cmd);
1754 }
1755 
1756 static void
axienet_get_stats64(struct net_device * dev,struct rtnl_link_stats64 * stats)1757 axienet_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
1758 {
1759 	struct axienet_local *lp = netdev_priv(dev);
1760 	unsigned int start;
1761 
1762 	netdev_stats_to_stats64(stats, &dev->stats);
1763 
1764 	do {
1765 		start = u64_stats_fetch_begin(&lp->rx_stat_sync);
1766 		stats->rx_packets = u64_stats_read(&lp->rx_packets);
1767 		stats->rx_bytes = u64_stats_read(&lp->rx_bytes);
1768 	} while (u64_stats_fetch_retry(&lp->rx_stat_sync, start));
1769 
1770 	do {
1771 		start = u64_stats_fetch_begin(&lp->tx_stat_sync);
1772 		stats->tx_packets = u64_stats_read(&lp->tx_packets);
1773 		stats->tx_bytes = u64_stats_read(&lp->tx_bytes);
1774 	} while (u64_stats_fetch_retry(&lp->tx_stat_sync, start));
1775 
1776 	if (!(lp->features & XAE_FEATURE_STATS))
1777 		return;
1778 
1779 	do {
1780 		start = read_seqcount_begin(&lp->hw_stats_seqcount);
1781 		stats->rx_length_errors =
1782 			axienet_stat(lp, STAT_RX_LENGTH_ERRORS);
1783 		stats->rx_crc_errors = axienet_stat(lp, STAT_RX_FCS_ERRORS);
1784 		stats->rx_frame_errors =
1785 			axienet_stat(lp, STAT_RX_ALIGNMENT_ERRORS);
1786 		stats->rx_errors = axienet_stat(lp, STAT_UNDERSIZE_FRAMES) +
1787 				   axienet_stat(lp, STAT_FRAGMENT_FRAMES) +
1788 				   stats->rx_length_errors +
1789 				   stats->rx_crc_errors +
1790 				   stats->rx_frame_errors;
1791 		stats->multicast = axienet_stat(lp, STAT_RX_MULTICAST_FRAMES);
1792 
1793 		stats->tx_aborted_errors =
1794 			axienet_stat(lp, STAT_TX_EXCESS_COLLISIONS);
1795 		stats->tx_fifo_errors =
1796 			axienet_stat(lp, STAT_TX_UNDERRUN_ERRORS);
1797 		stats->tx_window_errors =
1798 			axienet_stat(lp, STAT_TX_LATE_COLLISIONS);
1799 		stats->tx_errors = axienet_stat(lp, STAT_TX_EXCESS_DEFERRAL) +
1800 				   stats->tx_aborted_errors +
1801 				   stats->tx_fifo_errors +
1802 				   stats->tx_window_errors;
1803 	} while (read_seqcount_retry(&lp->hw_stats_seqcount, start));
1804 }
1805 
1806 static const struct net_device_ops axienet_netdev_ops = {
1807 	.ndo_open = axienet_open,
1808 	.ndo_stop = axienet_stop,
1809 	.ndo_start_xmit = axienet_start_xmit,
1810 	.ndo_get_stats64 = axienet_get_stats64,
1811 	.ndo_change_mtu	= axienet_change_mtu,
1812 	.ndo_set_mac_address = netdev_set_mac_address,
1813 	.ndo_validate_addr = eth_validate_addr,
1814 	.ndo_eth_ioctl = axienet_ioctl,
1815 	.ndo_set_rx_mode = axienet_set_multicast_list,
1816 #ifdef CONFIG_NET_POLL_CONTROLLER
1817 	.ndo_poll_controller = axienet_poll_controller,
1818 #endif
1819 };
1820 
1821 static const struct net_device_ops axienet_netdev_dmaengine_ops = {
1822 	.ndo_open = axienet_open,
1823 	.ndo_stop = axienet_stop,
1824 	.ndo_start_xmit = axienet_start_xmit_dmaengine,
1825 	.ndo_get_stats64 = axienet_get_stats64,
1826 	.ndo_change_mtu	= axienet_change_mtu,
1827 	.ndo_set_mac_address = netdev_set_mac_address,
1828 	.ndo_validate_addr = eth_validate_addr,
1829 	.ndo_eth_ioctl = axienet_ioctl,
1830 	.ndo_set_rx_mode = axienet_set_multicast_list,
1831 };
1832 
1833 /**
1834  * axienet_ethtools_get_drvinfo - Get various Axi Ethernet driver information.
1835  * @ndev:	Pointer to net_device structure
1836  * @ed:		Pointer to ethtool_drvinfo structure
1837  *
1838  * This implements ethtool command for getting the driver information.
1839  * Issue "ethtool -i ethX" under linux prompt to execute this function.
1840  */
axienet_ethtools_get_drvinfo(struct net_device * ndev,struct ethtool_drvinfo * ed)1841 static void axienet_ethtools_get_drvinfo(struct net_device *ndev,
1842 					 struct ethtool_drvinfo *ed)
1843 {
1844 	strscpy(ed->driver, DRIVER_NAME, sizeof(ed->driver));
1845 	strscpy(ed->version, DRIVER_VERSION, sizeof(ed->version));
1846 }
1847 
1848 /**
1849  * axienet_ethtools_get_regs_len - Get the total regs length present in the
1850  *				   AxiEthernet core.
1851  * @ndev:	Pointer to net_device structure
1852  *
1853  * This implements ethtool command for getting the total register length
1854  * information.
1855  *
1856  * Return: the total regs length
1857  */
axienet_ethtools_get_regs_len(struct net_device * ndev)1858 static int axienet_ethtools_get_regs_len(struct net_device *ndev)
1859 {
1860 	return sizeof(u32) * AXIENET_REGS_N;
1861 }
1862 
1863 /**
1864  * axienet_ethtools_get_regs - Dump the contents of all registers present
1865  *			       in AxiEthernet core.
1866  * @ndev:	Pointer to net_device structure
1867  * @regs:	Pointer to ethtool_regs structure
1868  * @ret:	Void pointer used to return the contents of the registers.
1869  *
1870  * This implements ethtool command for getting the Axi Ethernet register dump.
1871  * Issue "ethtool -d ethX" to execute this function.
1872  */
axienet_ethtools_get_regs(struct net_device * ndev,struct ethtool_regs * regs,void * ret)1873 static void axienet_ethtools_get_regs(struct net_device *ndev,
1874 				      struct ethtool_regs *regs, void *ret)
1875 {
1876 	u32 *data = (u32 *)ret;
1877 	size_t len = sizeof(u32) * AXIENET_REGS_N;
1878 	struct axienet_local *lp = netdev_priv(ndev);
1879 
1880 	regs->version = 0;
1881 	regs->len = len;
1882 
1883 	memset(data, 0, len);
1884 	data[0] = axienet_ior(lp, XAE_RAF_OFFSET);
1885 	data[1] = axienet_ior(lp, XAE_TPF_OFFSET);
1886 	data[2] = axienet_ior(lp, XAE_IFGP_OFFSET);
1887 	data[3] = axienet_ior(lp, XAE_IS_OFFSET);
1888 	data[4] = axienet_ior(lp, XAE_IP_OFFSET);
1889 	data[5] = axienet_ior(lp, XAE_IE_OFFSET);
1890 	data[6] = axienet_ior(lp, XAE_TTAG_OFFSET);
1891 	data[7] = axienet_ior(lp, XAE_RTAG_OFFSET);
1892 	data[8] = axienet_ior(lp, XAE_UAWL_OFFSET);
1893 	data[9] = axienet_ior(lp, XAE_UAWU_OFFSET);
1894 	data[10] = axienet_ior(lp, XAE_TPID0_OFFSET);
1895 	data[11] = axienet_ior(lp, XAE_TPID1_OFFSET);
1896 	data[12] = axienet_ior(lp, XAE_PPST_OFFSET);
1897 	data[13] = axienet_ior(lp, XAE_RCW0_OFFSET);
1898 	data[14] = axienet_ior(lp, XAE_RCW1_OFFSET);
1899 	data[15] = axienet_ior(lp, XAE_TC_OFFSET);
1900 	data[16] = axienet_ior(lp, XAE_FCC_OFFSET);
1901 	data[17] = axienet_ior(lp, XAE_EMMC_OFFSET);
1902 	data[18] = axienet_ior(lp, XAE_PHYC_OFFSET);
1903 	data[19] = axienet_ior(lp, XAE_MDIO_MC_OFFSET);
1904 	data[20] = axienet_ior(lp, XAE_MDIO_MCR_OFFSET);
1905 	data[21] = axienet_ior(lp, XAE_MDIO_MWD_OFFSET);
1906 	data[22] = axienet_ior(lp, XAE_MDIO_MRD_OFFSET);
1907 	data[27] = axienet_ior(lp, XAE_UAW0_OFFSET);
1908 	data[28] = axienet_ior(lp, XAE_UAW1_OFFSET);
1909 	data[29] = axienet_ior(lp, XAE_FMI_OFFSET);
1910 	data[30] = axienet_ior(lp, XAE_AF0_OFFSET);
1911 	data[31] = axienet_ior(lp, XAE_AF1_OFFSET);
1912 	if (!lp->use_dmaengine) {
1913 		data[32] = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET);
1914 		data[33] = axienet_dma_in32(lp, XAXIDMA_TX_SR_OFFSET);
1915 		data[34] = axienet_dma_in32(lp, XAXIDMA_TX_CDESC_OFFSET);
1916 		data[35] = axienet_dma_in32(lp, XAXIDMA_TX_TDESC_OFFSET);
1917 		data[36] = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET);
1918 		data[37] = axienet_dma_in32(lp, XAXIDMA_RX_SR_OFFSET);
1919 		data[38] = axienet_dma_in32(lp, XAXIDMA_RX_CDESC_OFFSET);
1920 		data[39] = axienet_dma_in32(lp, XAXIDMA_RX_TDESC_OFFSET);
1921 	}
1922 }
1923 
1924 static void
axienet_ethtools_get_ringparam(struct net_device * ndev,struct ethtool_ringparam * ering,struct kernel_ethtool_ringparam * kernel_ering,struct netlink_ext_ack * extack)1925 axienet_ethtools_get_ringparam(struct net_device *ndev,
1926 			       struct ethtool_ringparam *ering,
1927 			       struct kernel_ethtool_ringparam *kernel_ering,
1928 			       struct netlink_ext_ack *extack)
1929 {
1930 	struct axienet_local *lp = netdev_priv(ndev);
1931 
1932 	ering->rx_max_pending = RX_BD_NUM_MAX;
1933 	ering->rx_mini_max_pending = 0;
1934 	ering->rx_jumbo_max_pending = 0;
1935 	ering->tx_max_pending = TX_BD_NUM_MAX;
1936 	ering->rx_pending = lp->rx_bd_num;
1937 	ering->rx_mini_pending = 0;
1938 	ering->rx_jumbo_pending = 0;
1939 	ering->tx_pending = lp->tx_bd_num;
1940 }
1941 
1942 static int
axienet_ethtools_set_ringparam(struct net_device * ndev,struct ethtool_ringparam * ering,struct kernel_ethtool_ringparam * kernel_ering,struct netlink_ext_ack * extack)1943 axienet_ethtools_set_ringparam(struct net_device *ndev,
1944 			       struct ethtool_ringparam *ering,
1945 			       struct kernel_ethtool_ringparam *kernel_ering,
1946 			       struct netlink_ext_ack *extack)
1947 {
1948 	struct axienet_local *lp = netdev_priv(ndev);
1949 
1950 	if (ering->rx_pending > RX_BD_NUM_MAX ||
1951 	    ering->rx_mini_pending ||
1952 	    ering->rx_jumbo_pending ||
1953 	    ering->tx_pending < TX_BD_NUM_MIN ||
1954 	    ering->tx_pending > TX_BD_NUM_MAX)
1955 		return -EINVAL;
1956 
1957 	if (netif_running(ndev))
1958 		return -EBUSY;
1959 
1960 	lp->rx_bd_num = ering->rx_pending;
1961 	lp->tx_bd_num = ering->tx_pending;
1962 	return 0;
1963 }
1964 
1965 /**
1966  * axienet_ethtools_get_pauseparam - Get the pause parameter setting for
1967  *				     Tx and Rx paths.
1968  * @ndev:	Pointer to net_device structure
1969  * @epauseparm:	Pointer to ethtool_pauseparam structure.
1970  *
1971  * This implements ethtool command for getting axi ethernet pause frame
1972  * setting. Issue "ethtool -a ethX" to execute this function.
1973  */
1974 static void
axienet_ethtools_get_pauseparam(struct net_device * ndev,struct ethtool_pauseparam * epauseparm)1975 axienet_ethtools_get_pauseparam(struct net_device *ndev,
1976 				struct ethtool_pauseparam *epauseparm)
1977 {
1978 	struct axienet_local *lp = netdev_priv(ndev);
1979 
1980 	phylink_ethtool_get_pauseparam(lp->phylink, epauseparm);
1981 }
1982 
1983 /**
1984  * axienet_ethtools_set_pauseparam - Set device pause parameter(flow control)
1985  *				     settings.
1986  * @ndev:	Pointer to net_device structure
1987  * @epauseparm:Pointer to ethtool_pauseparam structure
1988  *
1989  * This implements ethtool command for enabling flow control on Rx and Tx
1990  * paths. Issue "ethtool -A ethX tx on|off" under linux prompt to execute this
1991  * function.
1992  *
1993  * Return: 0 on success, -EFAULT if device is running
1994  */
1995 static int
axienet_ethtools_set_pauseparam(struct net_device * ndev,struct ethtool_pauseparam * epauseparm)1996 axienet_ethtools_set_pauseparam(struct net_device *ndev,
1997 				struct ethtool_pauseparam *epauseparm)
1998 {
1999 	struct axienet_local *lp = netdev_priv(ndev);
2000 
2001 	return phylink_ethtool_set_pauseparam(lp->phylink, epauseparm);
2002 }
2003 
2004 /**
2005  * axienet_ethtools_get_coalesce - Get DMA interrupt coalescing count.
2006  * @ndev:	Pointer to net_device structure
2007  * @ecoalesce:	Pointer to ethtool_coalesce structure
2008  * @kernel_coal: ethtool CQE mode setting structure
2009  * @extack:	extack for reporting error messages
2010  *
2011  * This implements ethtool command for getting the DMA interrupt coalescing
2012  * count on Tx and Rx paths. Issue "ethtool -c ethX" under linux prompt to
2013  * execute this function.
2014  *
2015  * Return: 0 always
2016  */
2017 static int
axienet_ethtools_get_coalesce(struct net_device * ndev,struct ethtool_coalesce * ecoalesce,struct kernel_ethtool_coalesce * kernel_coal,struct netlink_ext_ack * extack)2018 axienet_ethtools_get_coalesce(struct net_device *ndev,
2019 			      struct ethtool_coalesce *ecoalesce,
2020 			      struct kernel_ethtool_coalesce *kernel_coal,
2021 			      struct netlink_ext_ack *extack)
2022 {
2023 	struct axienet_local *lp = netdev_priv(ndev);
2024 
2025 	ecoalesce->rx_max_coalesced_frames = lp->coalesce_count_rx;
2026 	ecoalesce->rx_coalesce_usecs = lp->coalesce_usec_rx;
2027 	ecoalesce->tx_max_coalesced_frames = lp->coalesce_count_tx;
2028 	ecoalesce->tx_coalesce_usecs = lp->coalesce_usec_tx;
2029 	return 0;
2030 }
2031 
2032 /**
2033  * axienet_ethtools_set_coalesce - Set DMA interrupt coalescing count.
2034  * @ndev:	Pointer to net_device structure
2035  * @ecoalesce:	Pointer to ethtool_coalesce structure
2036  * @kernel_coal: ethtool CQE mode setting structure
2037  * @extack:	extack for reporting error messages
2038  *
2039  * This implements ethtool command for setting the DMA interrupt coalescing
2040  * count on Tx and Rx paths. Issue "ethtool -C ethX rx-frames 5" under linux
2041  * prompt to execute this function.
2042  *
2043  * Return: 0, on success, Non-zero error value on failure.
2044  */
2045 static int
axienet_ethtools_set_coalesce(struct net_device * ndev,struct ethtool_coalesce * ecoalesce,struct kernel_ethtool_coalesce * kernel_coal,struct netlink_ext_ack * extack)2046 axienet_ethtools_set_coalesce(struct net_device *ndev,
2047 			      struct ethtool_coalesce *ecoalesce,
2048 			      struct kernel_ethtool_coalesce *kernel_coal,
2049 			      struct netlink_ext_ack *extack)
2050 {
2051 	struct axienet_local *lp = netdev_priv(ndev);
2052 
2053 	if (netif_running(ndev)) {
2054 		NL_SET_ERR_MSG(extack,
2055 			       "Please stop netif before applying configuration");
2056 		return -EBUSY;
2057 	}
2058 
2059 	if (ecoalesce->rx_max_coalesced_frames)
2060 		lp->coalesce_count_rx = ecoalesce->rx_max_coalesced_frames;
2061 	if (ecoalesce->rx_coalesce_usecs)
2062 		lp->coalesce_usec_rx = ecoalesce->rx_coalesce_usecs;
2063 	if (ecoalesce->tx_max_coalesced_frames)
2064 		lp->coalesce_count_tx = ecoalesce->tx_max_coalesced_frames;
2065 	if (ecoalesce->tx_coalesce_usecs)
2066 		lp->coalesce_usec_tx = ecoalesce->tx_coalesce_usecs;
2067 
2068 	return 0;
2069 }
2070 
2071 static int
axienet_ethtools_get_link_ksettings(struct net_device * ndev,struct ethtool_link_ksettings * cmd)2072 axienet_ethtools_get_link_ksettings(struct net_device *ndev,
2073 				    struct ethtool_link_ksettings *cmd)
2074 {
2075 	struct axienet_local *lp = netdev_priv(ndev);
2076 
2077 	return phylink_ethtool_ksettings_get(lp->phylink, cmd);
2078 }
2079 
2080 static int
axienet_ethtools_set_link_ksettings(struct net_device * ndev,const struct ethtool_link_ksettings * cmd)2081 axienet_ethtools_set_link_ksettings(struct net_device *ndev,
2082 				    const struct ethtool_link_ksettings *cmd)
2083 {
2084 	struct axienet_local *lp = netdev_priv(ndev);
2085 
2086 	return phylink_ethtool_ksettings_set(lp->phylink, cmd);
2087 }
2088 
axienet_ethtools_nway_reset(struct net_device * dev)2089 static int axienet_ethtools_nway_reset(struct net_device *dev)
2090 {
2091 	struct axienet_local *lp = netdev_priv(dev);
2092 
2093 	return phylink_ethtool_nway_reset(lp->phylink);
2094 }
2095 
axienet_ethtools_get_ethtool_stats(struct net_device * dev,struct ethtool_stats * stats,u64 * data)2096 static void axienet_ethtools_get_ethtool_stats(struct net_device *dev,
2097 					       struct ethtool_stats *stats,
2098 					       u64 *data)
2099 {
2100 	struct axienet_local *lp = netdev_priv(dev);
2101 	unsigned int start;
2102 
2103 	do {
2104 		start = read_seqcount_begin(&lp->hw_stats_seqcount);
2105 		data[0] = axienet_stat(lp, STAT_RX_BYTES);
2106 		data[1] = axienet_stat(lp, STAT_TX_BYTES);
2107 		data[2] = axienet_stat(lp, STAT_RX_VLAN_FRAMES);
2108 		data[3] = axienet_stat(lp, STAT_TX_VLAN_FRAMES);
2109 		data[6] = axienet_stat(lp, STAT_TX_PFC_FRAMES);
2110 		data[7] = axienet_stat(lp, STAT_RX_PFC_FRAMES);
2111 		data[8] = axienet_stat(lp, STAT_USER_DEFINED0);
2112 		data[9] = axienet_stat(lp, STAT_USER_DEFINED1);
2113 		data[10] = axienet_stat(lp, STAT_USER_DEFINED2);
2114 	} while (read_seqcount_retry(&lp->hw_stats_seqcount, start));
2115 }
2116 
2117 static const char axienet_ethtool_stats_strings[][ETH_GSTRING_LEN] = {
2118 	"Received bytes",
2119 	"Transmitted bytes",
2120 	"RX Good VLAN Tagged Frames",
2121 	"TX Good VLAN Tagged Frames",
2122 	"TX Good PFC Frames",
2123 	"RX Good PFC Frames",
2124 	"User Defined Counter 0",
2125 	"User Defined Counter 1",
2126 	"User Defined Counter 2",
2127 };
2128 
axienet_ethtools_get_strings(struct net_device * dev,u32 stringset,u8 * data)2129 static void axienet_ethtools_get_strings(struct net_device *dev, u32 stringset, u8 *data)
2130 {
2131 	switch (stringset) {
2132 	case ETH_SS_STATS:
2133 		memcpy(data, axienet_ethtool_stats_strings,
2134 		       sizeof(axienet_ethtool_stats_strings));
2135 		break;
2136 	}
2137 }
2138 
axienet_ethtools_get_sset_count(struct net_device * dev,int sset)2139 static int axienet_ethtools_get_sset_count(struct net_device *dev, int sset)
2140 {
2141 	struct axienet_local *lp = netdev_priv(dev);
2142 
2143 	switch (sset) {
2144 	case ETH_SS_STATS:
2145 		if (lp->features & XAE_FEATURE_STATS)
2146 			return ARRAY_SIZE(axienet_ethtool_stats_strings);
2147 		fallthrough;
2148 	default:
2149 		return -EOPNOTSUPP;
2150 	}
2151 }
2152 
2153 static void
axienet_ethtools_get_pause_stats(struct net_device * dev,struct ethtool_pause_stats * pause_stats)2154 axienet_ethtools_get_pause_stats(struct net_device *dev,
2155 				 struct ethtool_pause_stats *pause_stats)
2156 {
2157 	struct axienet_local *lp = netdev_priv(dev);
2158 	unsigned int start;
2159 
2160 	if (!(lp->features & XAE_FEATURE_STATS))
2161 		return;
2162 
2163 	do {
2164 		start = read_seqcount_begin(&lp->hw_stats_seqcount);
2165 		pause_stats->tx_pause_frames =
2166 			axienet_stat(lp, STAT_TX_PAUSE_FRAMES);
2167 		pause_stats->rx_pause_frames =
2168 			axienet_stat(lp, STAT_RX_PAUSE_FRAMES);
2169 	} while (read_seqcount_retry(&lp->hw_stats_seqcount, start));
2170 }
2171 
2172 static void
axienet_ethtool_get_eth_mac_stats(struct net_device * dev,struct ethtool_eth_mac_stats * mac_stats)2173 axienet_ethtool_get_eth_mac_stats(struct net_device *dev,
2174 				  struct ethtool_eth_mac_stats *mac_stats)
2175 {
2176 	struct axienet_local *lp = netdev_priv(dev);
2177 	unsigned int start;
2178 
2179 	if (!(lp->features & XAE_FEATURE_STATS))
2180 		return;
2181 
2182 	do {
2183 		start = read_seqcount_begin(&lp->hw_stats_seqcount);
2184 		mac_stats->FramesTransmittedOK =
2185 			axienet_stat(lp, STAT_TX_GOOD_FRAMES);
2186 		mac_stats->SingleCollisionFrames =
2187 			axienet_stat(lp, STAT_TX_SINGLE_COLLISION_FRAMES);
2188 		mac_stats->MultipleCollisionFrames =
2189 			axienet_stat(lp, STAT_TX_MULTIPLE_COLLISION_FRAMES);
2190 		mac_stats->FramesReceivedOK =
2191 			axienet_stat(lp, STAT_RX_GOOD_FRAMES);
2192 		mac_stats->FrameCheckSequenceErrors =
2193 			axienet_stat(lp, STAT_RX_FCS_ERRORS);
2194 		mac_stats->AlignmentErrors =
2195 			axienet_stat(lp, STAT_RX_ALIGNMENT_ERRORS);
2196 		mac_stats->FramesWithDeferredXmissions =
2197 			axienet_stat(lp, STAT_TX_DEFERRED_FRAMES);
2198 		mac_stats->LateCollisions =
2199 			axienet_stat(lp, STAT_TX_LATE_COLLISIONS);
2200 		mac_stats->FramesAbortedDueToXSColls =
2201 			axienet_stat(lp, STAT_TX_EXCESS_COLLISIONS);
2202 		mac_stats->MulticastFramesXmittedOK =
2203 			axienet_stat(lp, STAT_TX_MULTICAST_FRAMES);
2204 		mac_stats->BroadcastFramesXmittedOK =
2205 			axienet_stat(lp, STAT_TX_BROADCAST_FRAMES);
2206 		mac_stats->FramesWithExcessiveDeferral =
2207 			axienet_stat(lp, STAT_TX_EXCESS_DEFERRAL);
2208 		mac_stats->MulticastFramesReceivedOK =
2209 			axienet_stat(lp, STAT_RX_MULTICAST_FRAMES);
2210 		mac_stats->BroadcastFramesReceivedOK =
2211 			axienet_stat(lp, STAT_RX_BROADCAST_FRAMES);
2212 		mac_stats->InRangeLengthErrors =
2213 			axienet_stat(lp, STAT_RX_LENGTH_ERRORS);
2214 	} while (read_seqcount_retry(&lp->hw_stats_seqcount, start));
2215 }
2216 
2217 static void
axienet_ethtool_get_eth_ctrl_stats(struct net_device * dev,struct ethtool_eth_ctrl_stats * ctrl_stats)2218 axienet_ethtool_get_eth_ctrl_stats(struct net_device *dev,
2219 				   struct ethtool_eth_ctrl_stats *ctrl_stats)
2220 {
2221 	struct axienet_local *lp = netdev_priv(dev);
2222 	unsigned int start;
2223 
2224 	if (!(lp->features & XAE_FEATURE_STATS))
2225 		return;
2226 
2227 	do {
2228 		start = read_seqcount_begin(&lp->hw_stats_seqcount);
2229 		ctrl_stats->MACControlFramesTransmitted =
2230 			axienet_stat(lp, STAT_TX_CONTROL_FRAMES);
2231 		ctrl_stats->MACControlFramesReceived =
2232 			axienet_stat(lp, STAT_RX_CONTROL_FRAMES);
2233 		ctrl_stats->UnsupportedOpcodesReceived =
2234 			axienet_stat(lp, STAT_RX_CONTROL_OPCODE_ERRORS);
2235 	} while (read_seqcount_retry(&lp->hw_stats_seqcount, start));
2236 }
2237 
2238 static const struct ethtool_rmon_hist_range axienet_rmon_ranges[] = {
2239 	{   64,    64 },
2240 	{   65,   127 },
2241 	{  128,   255 },
2242 	{  256,   511 },
2243 	{  512,  1023 },
2244 	{ 1024,  1518 },
2245 	{ 1519, 16384 },
2246 	{ },
2247 };
2248 
2249 static void
axienet_ethtool_get_rmon_stats(struct net_device * dev,struct ethtool_rmon_stats * rmon_stats,const struct ethtool_rmon_hist_range ** ranges)2250 axienet_ethtool_get_rmon_stats(struct net_device *dev,
2251 			       struct ethtool_rmon_stats *rmon_stats,
2252 			       const struct ethtool_rmon_hist_range **ranges)
2253 {
2254 	struct axienet_local *lp = netdev_priv(dev);
2255 	unsigned int start;
2256 
2257 	if (!(lp->features & XAE_FEATURE_STATS))
2258 		return;
2259 
2260 	do {
2261 		start = read_seqcount_begin(&lp->hw_stats_seqcount);
2262 		rmon_stats->undersize_pkts =
2263 			axienet_stat(lp, STAT_UNDERSIZE_FRAMES);
2264 		rmon_stats->oversize_pkts =
2265 			axienet_stat(lp, STAT_RX_OVERSIZE_FRAMES);
2266 		rmon_stats->fragments =
2267 			axienet_stat(lp, STAT_FRAGMENT_FRAMES);
2268 
2269 		rmon_stats->hist[0] =
2270 			axienet_stat(lp, STAT_RX_64_BYTE_FRAMES);
2271 		rmon_stats->hist[1] =
2272 			axienet_stat(lp, STAT_RX_65_127_BYTE_FRAMES);
2273 		rmon_stats->hist[2] =
2274 			axienet_stat(lp, STAT_RX_128_255_BYTE_FRAMES);
2275 		rmon_stats->hist[3] =
2276 			axienet_stat(lp, STAT_RX_256_511_BYTE_FRAMES);
2277 		rmon_stats->hist[4] =
2278 			axienet_stat(lp, STAT_RX_512_1023_BYTE_FRAMES);
2279 		rmon_stats->hist[5] =
2280 			axienet_stat(lp, STAT_RX_1024_MAX_BYTE_FRAMES);
2281 		rmon_stats->hist[6] =
2282 			rmon_stats->oversize_pkts;
2283 
2284 		rmon_stats->hist_tx[0] =
2285 			axienet_stat(lp, STAT_TX_64_BYTE_FRAMES);
2286 		rmon_stats->hist_tx[1] =
2287 			axienet_stat(lp, STAT_TX_65_127_BYTE_FRAMES);
2288 		rmon_stats->hist_tx[2] =
2289 			axienet_stat(lp, STAT_TX_128_255_BYTE_FRAMES);
2290 		rmon_stats->hist_tx[3] =
2291 			axienet_stat(lp, STAT_TX_256_511_BYTE_FRAMES);
2292 		rmon_stats->hist_tx[4] =
2293 			axienet_stat(lp, STAT_TX_512_1023_BYTE_FRAMES);
2294 		rmon_stats->hist_tx[5] =
2295 			axienet_stat(lp, STAT_TX_1024_MAX_BYTE_FRAMES);
2296 		rmon_stats->hist_tx[6] =
2297 			axienet_stat(lp, STAT_TX_OVERSIZE_FRAMES);
2298 	} while (read_seqcount_retry(&lp->hw_stats_seqcount, start));
2299 
2300 	*ranges = axienet_rmon_ranges;
2301 }
2302 
2303 static const struct ethtool_ops axienet_ethtool_ops = {
2304 	.supported_coalesce_params = ETHTOOL_COALESCE_MAX_FRAMES |
2305 				     ETHTOOL_COALESCE_USECS,
2306 	.get_drvinfo    = axienet_ethtools_get_drvinfo,
2307 	.get_regs_len   = axienet_ethtools_get_regs_len,
2308 	.get_regs       = axienet_ethtools_get_regs,
2309 	.get_link       = ethtool_op_get_link,
2310 	.get_ringparam	= axienet_ethtools_get_ringparam,
2311 	.set_ringparam	= axienet_ethtools_set_ringparam,
2312 	.get_pauseparam = axienet_ethtools_get_pauseparam,
2313 	.set_pauseparam = axienet_ethtools_set_pauseparam,
2314 	.get_coalesce   = axienet_ethtools_get_coalesce,
2315 	.set_coalesce   = axienet_ethtools_set_coalesce,
2316 	.get_link_ksettings = axienet_ethtools_get_link_ksettings,
2317 	.set_link_ksettings = axienet_ethtools_set_link_ksettings,
2318 	.nway_reset	= axienet_ethtools_nway_reset,
2319 	.get_ethtool_stats = axienet_ethtools_get_ethtool_stats,
2320 	.get_strings    = axienet_ethtools_get_strings,
2321 	.get_sset_count = axienet_ethtools_get_sset_count,
2322 	.get_pause_stats = axienet_ethtools_get_pause_stats,
2323 	.get_eth_mac_stats = axienet_ethtool_get_eth_mac_stats,
2324 	.get_eth_ctrl_stats = axienet_ethtool_get_eth_ctrl_stats,
2325 	.get_rmon_stats = axienet_ethtool_get_rmon_stats,
2326 };
2327 
pcs_to_axienet_local(struct phylink_pcs * pcs)2328 static struct axienet_local *pcs_to_axienet_local(struct phylink_pcs *pcs)
2329 {
2330 	return container_of(pcs, struct axienet_local, pcs);
2331 }
2332 
axienet_pcs_get_state(struct phylink_pcs * pcs,struct phylink_link_state * state)2333 static void axienet_pcs_get_state(struct phylink_pcs *pcs,
2334 				  struct phylink_link_state *state)
2335 {
2336 	struct mdio_device *pcs_phy = pcs_to_axienet_local(pcs)->pcs_phy;
2337 
2338 	phylink_mii_c22_pcs_get_state(pcs_phy, state);
2339 }
2340 
axienet_pcs_an_restart(struct phylink_pcs * pcs)2341 static void axienet_pcs_an_restart(struct phylink_pcs *pcs)
2342 {
2343 	struct mdio_device *pcs_phy = pcs_to_axienet_local(pcs)->pcs_phy;
2344 
2345 	phylink_mii_c22_pcs_an_restart(pcs_phy);
2346 }
2347 
axienet_pcs_config(struct phylink_pcs * pcs,unsigned int neg_mode,phy_interface_t interface,const unsigned long * advertising,bool permit_pause_to_mac)2348 static int axienet_pcs_config(struct phylink_pcs *pcs, unsigned int neg_mode,
2349 			      phy_interface_t interface,
2350 			      const unsigned long *advertising,
2351 			      bool permit_pause_to_mac)
2352 {
2353 	struct mdio_device *pcs_phy = pcs_to_axienet_local(pcs)->pcs_phy;
2354 	struct net_device *ndev = pcs_to_axienet_local(pcs)->ndev;
2355 	struct axienet_local *lp = netdev_priv(ndev);
2356 	int ret;
2357 
2358 	if (lp->switch_x_sgmii) {
2359 		ret = mdiodev_write(pcs_phy, XLNX_MII_STD_SELECT_REG,
2360 				    interface == PHY_INTERFACE_MODE_SGMII ?
2361 					XLNX_MII_STD_SELECT_SGMII : 0);
2362 		if (ret < 0) {
2363 			netdev_warn(ndev,
2364 				    "Failed to switch PHY interface: %d\n",
2365 				    ret);
2366 			return ret;
2367 		}
2368 	}
2369 
2370 	ret = phylink_mii_c22_pcs_config(pcs_phy, interface, advertising,
2371 					 neg_mode);
2372 	if (ret < 0)
2373 		netdev_warn(ndev, "Failed to configure PCS: %d\n", ret);
2374 
2375 	return ret;
2376 }
2377 
2378 static const struct phylink_pcs_ops axienet_pcs_ops = {
2379 	.pcs_get_state = axienet_pcs_get_state,
2380 	.pcs_config = axienet_pcs_config,
2381 	.pcs_an_restart = axienet_pcs_an_restart,
2382 };
2383 
axienet_mac_select_pcs(struct phylink_config * config,phy_interface_t interface)2384 static struct phylink_pcs *axienet_mac_select_pcs(struct phylink_config *config,
2385 						  phy_interface_t interface)
2386 {
2387 	struct net_device *ndev = to_net_dev(config->dev);
2388 	struct axienet_local *lp = netdev_priv(ndev);
2389 
2390 	if (interface == PHY_INTERFACE_MODE_1000BASEX ||
2391 	    interface ==  PHY_INTERFACE_MODE_SGMII)
2392 		return &lp->pcs;
2393 
2394 	return NULL;
2395 }
2396 
axienet_mac_config(struct phylink_config * config,unsigned int mode,const struct phylink_link_state * state)2397 static void axienet_mac_config(struct phylink_config *config, unsigned int mode,
2398 			       const struct phylink_link_state *state)
2399 {
2400 	/* nothing meaningful to do */
2401 }
2402 
axienet_mac_link_down(struct phylink_config * config,unsigned int mode,phy_interface_t interface)2403 static void axienet_mac_link_down(struct phylink_config *config,
2404 				  unsigned int mode,
2405 				  phy_interface_t interface)
2406 {
2407 	/* nothing meaningful to do */
2408 }
2409 
axienet_mac_link_up(struct phylink_config * config,struct phy_device * phy,unsigned int mode,phy_interface_t interface,int speed,int duplex,bool tx_pause,bool rx_pause)2410 static void axienet_mac_link_up(struct phylink_config *config,
2411 				struct phy_device *phy,
2412 				unsigned int mode, phy_interface_t interface,
2413 				int speed, int duplex,
2414 				bool tx_pause, bool rx_pause)
2415 {
2416 	struct net_device *ndev = to_net_dev(config->dev);
2417 	struct axienet_local *lp = netdev_priv(ndev);
2418 	u32 emmc_reg, fcc_reg;
2419 
2420 	emmc_reg = axienet_ior(lp, XAE_EMMC_OFFSET);
2421 	emmc_reg &= ~XAE_EMMC_LINKSPEED_MASK;
2422 
2423 	switch (speed) {
2424 	case SPEED_1000:
2425 		emmc_reg |= XAE_EMMC_LINKSPD_1000;
2426 		break;
2427 	case SPEED_100:
2428 		emmc_reg |= XAE_EMMC_LINKSPD_100;
2429 		break;
2430 	case SPEED_10:
2431 		emmc_reg |= XAE_EMMC_LINKSPD_10;
2432 		break;
2433 	default:
2434 		dev_err(&ndev->dev,
2435 			"Speed other than 10, 100 or 1Gbps is not supported\n");
2436 		break;
2437 	}
2438 
2439 	axienet_iow(lp, XAE_EMMC_OFFSET, emmc_reg);
2440 
2441 	fcc_reg = axienet_ior(lp, XAE_FCC_OFFSET);
2442 	if (tx_pause)
2443 		fcc_reg |= XAE_FCC_FCTX_MASK;
2444 	else
2445 		fcc_reg &= ~XAE_FCC_FCTX_MASK;
2446 	if (rx_pause)
2447 		fcc_reg |= XAE_FCC_FCRX_MASK;
2448 	else
2449 		fcc_reg &= ~XAE_FCC_FCRX_MASK;
2450 	axienet_iow(lp, XAE_FCC_OFFSET, fcc_reg);
2451 }
2452 
2453 static const struct phylink_mac_ops axienet_phylink_ops = {
2454 	.mac_select_pcs = axienet_mac_select_pcs,
2455 	.mac_config = axienet_mac_config,
2456 	.mac_link_down = axienet_mac_link_down,
2457 	.mac_link_up = axienet_mac_link_up,
2458 };
2459 
2460 /**
2461  * axienet_dma_err_handler - Work queue task for Axi DMA Error
2462  * @work:	pointer to work_struct
2463  *
2464  * Resets the Axi DMA and Axi Ethernet devices, and reconfigures the
2465  * Tx/Rx BDs.
2466  */
axienet_dma_err_handler(struct work_struct * work)2467 static void axienet_dma_err_handler(struct work_struct *work)
2468 {
2469 	u32 i;
2470 	u32 axienet_status;
2471 	struct axidma_bd *cur_p;
2472 	struct axienet_local *lp = container_of(work, struct axienet_local,
2473 						dma_err_task);
2474 	struct net_device *ndev = lp->ndev;
2475 
2476 	/* Don't bother if we are going to stop anyway */
2477 	if (READ_ONCE(lp->stopping))
2478 		return;
2479 
2480 	napi_disable(&lp->napi_tx);
2481 	napi_disable(&lp->napi_rx);
2482 
2483 	axienet_setoptions(ndev, lp->options &
2484 			   ~(XAE_OPTION_TXEN | XAE_OPTION_RXEN));
2485 
2486 	axienet_dma_stop(lp);
2487 
2488 	for (i = 0; i < lp->tx_bd_num; i++) {
2489 		cur_p = &lp->tx_bd_v[i];
2490 		if (cur_p->cntrl) {
2491 			dma_addr_t addr = desc_get_phys_addr(lp, cur_p);
2492 
2493 			dma_unmap_single(lp->dev, addr,
2494 					 (cur_p->cntrl &
2495 					  XAXIDMA_BD_CTRL_LENGTH_MASK),
2496 					 DMA_TO_DEVICE);
2497 		}
2498 		if (cur_p->skb)
2499 			dev_kfree_skb_irq(cur_p->skb);
2500 		cur_p->phys = 0;
2501 		cur_p->phys_msb = 0;
2502 		cur_p->cntrl = 0;
2503 		cur_p->status = 0;
2504 		cur_p->app0 = 0;
2505 		cur_p->app1 = 0;
2506 		cur_p->app2 = 0;
2507 		cur_p->app3 = 0;
2508 		cur_p->app4 = 0;
2509 		cur_p->skb = NULL;
2510 	}
2511 
2512 	for (i = 0; i < lp->rx_bd_num; i++) {
2513 		cur_p = &lp->rx_bd_v[i];
2514 		cur_p->status = 0;
2515 		cur_p->app0 = 0;
2516 		cur_p->app1 = 0;
2517 		cur_p->app2 = 0;
2518 		cur_p->app3 = 0;
2519 		cur_p->app4 = 0;
2520 	}
2521 
2522 	lp->tx_bd_ci = 0;
2523 	lp->tx_bd_tail = 0;
2524 	lp->rx_bd_ci = 0;
2525 
2526 	axienet_dma_start(lp);
2527 
2528 	axienet_status = axienet_ior(lp, XAE_RCW1_OFFSET);
2529 	axienet_status &= ~XAE_RCW1_RX_MASK;
2530 	axienet_iow(lp, XAE_RCW1_OFFSET, axienet_status);
2531 
2532 	axienet_status = axienet_ior(lp, XAE_IP_OFFSET);
2533 	if (axienet_status & XAE_INT_RXRJECT_MASK)
2534 		axienet_iow(lp, XAE_IS_OFFSET, XAE_INT_RXRJECT_MASK);
2535 	axienet_iow(lp, XAE_IE_OFFSET, lp->eth_irq > 0 ?
2536 		    XAE_INT_RECV_ERROR_MASK : 0);
2537 	axienet_iow(lp, XAE_FCC_OFFSET, XAE_FCC_FCRX_MASK);
2538 
2539 	/* Sync default options with HW but leave receiver and
2540 	 * transmitter disabled.
2541 	 */
2542 	axienet_setoptions(ndev, lp->options &
2543 			   ~(XAE_OPTION_TXEN | XAE_OPTION_RXEN));
2544 	axienet_set_mac_address(ndev, NULL);
2545 	axienet_set_multicast_list(ndev);
2546 	napi_enable(&lp->napi_rx);
2547 	napi_enable(&lp->napi_tx);
2548 	axienet_setoptions(ndev, lp->options);
2549 }
2550 
2551 /**
2552  * axienet_probe - Axi Ethernet probe function.
2553  * @pdev:	Pointer to platform device structure.
2554  *
2555  * Return: 0, on success
2556  *	    Non-zero error value on failure.
2557  *
2558  * This is the probe routine for Axi Ethernet driver. This is called before
2559  * any other driver routines are invoked. It allocates and sets up the Ethernet
2560  * device. Parses through device tree and populates fields of
2561  * axienet_local. It registers the Ethernet device.
2562  */
axienet_probe(struct platform_device * pdev)2563 static int axienet_probe(struct platform_device *pdev)
2564 {
2565 	int ret;
2566 	struct device_node *np;
2567 	struct axienet_local *lp;
2568 	struct net_device *ndev;
2569 	struct resource *ethres;
2570 	u8 mac_addr[ETH_ALEN];
2571 	int addr_width = 32;
2572 	u32 value;
2573 
2574 	ndev = alloc_etherdev(sizeof(*lp));
2575 	if (!ndev)
2576 		return -ENOMEM;
2577 
2578 	platform_set_drvdata(pdev, ndev);
2579 
2580 	SET_NETDEV_DEV(ndev, &pdev->dev);
2581 	ndev->features = NETIF_F_SG;
2582 	ndev->ethtool_ops = &axienet_ethtool_ops;
2583 
2584 	/* MTU range: 64 - 9000 */
2585 	ndev->min_mtu = 64;
2586 	ndev->max_mtu = XAE_JUMBO_MTU;
2587 
2588 	lp = netdev_priv(ndev);
2589 	lp->ndev = ndev;
2590 	lp->dev = &pdev->dev;
2591 	lp->options = XAE_OPTION_DEFAULTS;
2592 	lp->rx_bd_num = RX_BD_NUM_DEFAULT;
2593 	lp->tx_bd_num = TX_BD_NUM_DEFAULT;
2594 
2595 	u64_stats_init(&lp->rx_stat_sync);
2596 	u64_stats_init(&lp->tx_stat_sync);
2597 
2598 	mutex_init(&lp->stats_lock);
2599 	seqcount_mutex_init(&lp->hw_stats_seqcount, &lp->stats_lock);
2600 	INIT_DEFERRABLE_WORK(&lp->stats_work, axienet_refresh_stats);
2601 
2602 	lp->axi_clk = devm_clk_get_optional(&pdev->dev, "s_axi_lite_clk");
2603 	if (!lp->axi_clk) {
2604 		/* For backward compatibility, if named AXI clock is not present,
2605 		 * treat the first clock specified as the AXI clock.
2606 		 */
2607 		lp->axi_clk = devm_clk_get_optional(&pdev->dev, NULL);
2608 	}
2609 	if (IS_ERR(lp->axi_clk)) {
2610 		ret = PTR_ERR(lp->axi_clk);
2611 		goto free_netdev;
2612 	}
2613 	ret = clk_prepare_enable(lp->axi_clk);
2614 	if (ret) {
2615 		dev_err(&pdev->dev, "Unable to enable AXI clock: %d\n", ret);
2616 		goto free_netdev;
2617 	}
2618 
2619 	lp->misc_clks[0].id = "axis_clk";
2620 	lp->misc_clks[1].id = "ref_clk";
2621 	lp->misc_clks[2].id = "mgt_clk";
2622 
2623 	ret = devm_clk_bulk_get_optional(&pdev->dev, XAE_NUM_MISC_CLOCKS, lp->misc_clks);
2624 	if (ret)
2625 		goto cleanup_clk;
2626 
2627 	ret = clk_bulk_prepare_enable(XAE_NUM_MISC_CLOCKS, lp->misc_clks);
2628 	if (ret)
2629 		goto cleanup_clk;
2630 
2631 	/* Map device registers */
2632 	lp->regs = devm_platform_get_and_ioremap_resource(pdev, 0, &ethres);
2633 	if (IS_ERR(lp->regs)) {
2634 		ret = PTR_ERR(lp->regs);
2635 		goto cleanup_clk;
2636 	}
2637 	lp->regs_start = ethres->start;
2638 
2639 	/* Setup checksum offload, but default to off if not specified */
2640 	lp->features = 0;
2641 
2642 	if (axienet_ior(lp, XAE_ABILITY_OFFSET) & XAE_ABILITY_STATS)
2643 		lp->features |= XAE_FEATURE_STATS;
2644 
2645 	ret = of_property_read_u32(pdev->dev.of_node, "xlnx,txcsum", &value);
2646 	if (!ret) {
2647 		switch (value) {
2648 		case 1:
2649 			lp->features |= XAE_FEATURE_PARTIAL_TX_CSUM;
2650 			/* Can checksum any contiguous range */
2651 			ndev->features |= NETIF_F_HW_CSUM;
2652 			break;
2653 		case 2:
2654 			lp->features |= XAE_FEATURE_FULL_TX_CSUM;
2655 			/* Can checksum TCP/UDP over IPv4. */
2656 			ndev->features |= NETIF_F_IP_CSUM;
2657 			break;
2658 		}
2659 	}
2660 	ret = of_property_read_u32(pdev->dev.of_node, "xlnx,rxcsum", &value);
2661 	if (!ret) {
2662 		switch (value) {
2663 		case 1:
2664 			lp->features |= XAE_FEATURE_PARTIAL_RX_CSUM;
2665 			ndev->features |= NETIF_F_RXCSUM;
2666 			break;
2667 		case 2:
2668 			lp->features |= XAE_FEATURE_FULL_RX_CSUM;
2669 			ndev->features |= NETIF_F_RXCSUM;
2670 			break;
2671 		}
2672 	}
2673 	/* For supporting jumbo frames, the Axi Ethernet hardware must have
2674 	 * a larger Rx/Tx Memory. Typically, the size must be large so that
2675 	 * we can enable jumbo option and start supporting jumbo frames.
2676 	 * Here we check for memory allocated for Rx/Tx in the hardware from
2677 	 * the device-tree and accordingly set flags.
2678 	 */
2679 	of_property_read_u32(pdev->dev.of_node, "xlnx,rxmem", &lp->rxmem);
2680 
2681 	lp->switch_x_sgmii = of_property_read_bool(pdev->dev.of_node,
2682 						   "xlnx,switch-x-sgmii");
2683 
2684 	/* Start with the proprietary, and broken phy_type */
2685 	ret = of_property_read_u32(pdev->dev.of_node, "xlnx,phy-type", &value);
2686 	if (!ret) {
2687 		netdev_warn(ndev, "Please upgrade your device tree binary blob to use phy-mode");
2688 		switch (value) {
2689 		case XAE_PHY_TYPE_MII:
2690 			lp->phy_mode = PHY_INTERFACE_MODE_MII;
2691 			break;
2692 		case XAE_PHY_TYPE_GMII:
2693 			lp->phy_mode = PHY_INTERFACE_MODE_GMII;
2694 			break;
2695 		case XAE_PHY_TYPE_RGMII_2_0:
2696 			lp->phy_mode = PHY_INTERFACE_MODE_RGMII_ID;
2697 			break;
2698 		case XAE_PHY_TYPE_SGMII:
2699 			lp->phy_mode = PHY_INTERFACE_MODE_SGMII;
2700 			break;
2701 		case XAE_PHY_TYPE_1000BASE_X:
2702 			lp->phy_mode = PHY_INTERFACE_MODE_1000BASEX;
2703 			break;
2704 		default:
2705 			ret = -EINVAL;
2706 			goto cleanup_clk;
2707 		}
2708 	} else {
2709 		ret = of_get_phy_mode(pdev->dev.of_node, &lp->phy_mode);
2710 		if (ret)
2711 			goto cleanup_clk;
2712 	}
2713 	if (lp->switch_x_sgmii && lp->phy_mode != PHY_INTERFACE_MODE_SGMII &&
2714 	    lp->phy_mode != PHY_INTERFACE_MODE_1000BASEX) {
2715 		dev_err(&pdev->dev, "xlnx,switch-x-sgmii only supported with SGMII or 1000BaseX\n");
2716 		ret = -EINVAL;
2717 		goto cleanup_clk;
2718 	}
2719 
2720 	if (!of_property_present(pdev->dev.of_node, "dmas")) {
2721 		/* Find the DMA node, map the DMA registers, and decode the DMA IRQs */
2722 		np = of_parse_phandle(pdev->dev.of_node, "axistream-connected", 0);
2723 
2724 		if (np) {
2725 			struct resource dmares;
2726 
2727 			ret = of_address_to_resource(np, 0, &dmares);
2728 			if (ret) {
2729 				dev_err(&pdev->dev,
2730 					"unable to get DMA resource\n");
2731 				of_node_put(np);
2732 				goto cleanup_clk;
2733 			}
2734 			lp->dma_regs = devm_ioremap_resource(&pdev->dev,
2735 							     &dmares);
2736 			lp->rx_irq = irq_of_parse_and_map(np, 1);
2737 			lp->tx_irq = irq_of_parse_and_map(np, 0);
2738 			of_node_put(np);
2739 			lp->eth_irq = platform_get_irq_optional(pdev, 0);
2740 		} else {
2741 			/* Check for these resources directly on the Ethernet node. */
2742 			lp->dma_regs = devm_platform_get_and_ioremap_resource(pdev, 1, NULL);
2743 			lp->rx_irq = platform_get_irq(pdev, 1);
2744 			lp->tx_irq = platform_get_irq(pdev, 0);
2745 			lp->eth_irq = platform_get_irq_optional(pdev, 2);
2746 		}
2747 		if (IS_ERR(lp->dma_regs)) {
2748 			dev_err(&pdev->dev, "could not map DMA regs\n");
2749 			ret = PTR_ERR(lp->dma_regs);
2750 			goto cleanup_clk;
2751 		}
2752 		if (lp->rx_irq <= 0 || lp->tx_irq <= 0) {
2753 			dev_err(&pdev->dev, "could not determine irqs\n");
2754 			ret = -ENOMEM;
2755 			goto cleanup_clk;
2756 		}
2757 
2758 		/* Reset core now that clocks are enabled, prior to accessing MDIO */
2759 		ret = __axienet_device_reset(lp);
2760 		if (ret)
2761 			goto cleanup_clk;
2762 
2763 		/* Autodetect the need for 64-bit DMA pointers.
2764 		 * When the IP is configured for a bus width bigger than 32 bits,
2765 		 * writing the MSB registers is mandatory, even if they are all 0.
2766 		 * We can detect this case by writing all 1's to one such register
2767 		 * and see if that sticks: when the IP is configured for 32 bits
2768 		 * only, those registers are RES0.
2769 		 * Those MSB registers were introduced in IP v7.1, which we check first.
2770 		 */
2771 		if ((axienet_ior(lp, XAE_ID_OFFSET) >> 24) >= 0x9) {
2772 			void __iomem *desc = lp->dma_regs + XAXIDMA_TX_CDESC_OFFSET + 4;
2773 
2774 			iowrite32(0x0, desc);
2775 			if (ioread32(desc) == 0) {	/* sanity check */
2776 				iowrite32(0xffffffff, desc);
2777 				if (ioread32(desc) > 0) {
2778 					lp->features |= XAE_FEATURE_DMA_64BIT;
2779 					addr_width = 64;
2780 					dev_info(&pdev->dev,
2781 						 "autodetected 64-bit DMA range\n");
2782 				}
2783 				iowrite32(0x0, desc);
2784 			}
2785 		}
2786 		if (!IS_ENABLED(CONFIG_64BIT) && lp->features & XAE_FEATURE_DMA_64BIT) {
2787 			dev_err(&pdev->dev, "64-bit addressable DMA is not compatible with 32-bit archecture\n");
2788 			ret = -EINVAL;
2789 			goto cleanup_clk;
2790 		}
2791 
2792 		ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(addr_width));
2793 		if (ret) {
2794 			dev_err(&pdev->dev, "No suitable DMA available\n");
2795 			goto cleanup_clk;
2796 		}
2797 		netif_napi_add(ndev, &lp->napi_rx, axienet_rx_poll);
2798 		netif_napi_add(ndev, &lp->napi_tx, axienet_tx_poll);
2799 	} else {
2800 		struct xilinx_vdma_config cfg;
2801 		struct dma_chan *tx_chan;
2802 
2803 		lp->eth_irq = platform_get_irq_optional(pdev, 0);
2804 		if (lp->eth_irq < 0 && lp->eth_irq != -ENXIO) {
2805 			ret = lp->eth_irq;
2806 			goto cleanup_clk;
2807 		}
2808 		tx_chan = dma_request_chan(lp->dev, "tx_chan0");
2809 		if (IS_ERR(tx_chan)) {
2810 			ret = PTR_ERR(tx_chan);
2811 			dev_err_probe(lp->dev, ret, "No Ethernet DMA (TX) channel found\n");
2812 			goto cleanup_clk;
2813 		}
2814 
2815 		cfg.reset = 1;
2816 		/* As name says VDMA but it has support for DMA channel reset */
2817 		ret = xilinx_vdma_channel_set_config(tx_chan, &cfg);
2818 		if (ret < 0) {
2819 			dev_err(&pdev->dev, "Reset channel failed\n");
2820 			dma_release_channel(tx_chan);
2821 			goto cleanup_clk;
2822 		}
2823 
2824 		dma_release_channel(tx_chan);
2825 		lp->use_dmaengine = 1;
2826 	}
2827 
2828 	if (lp->use_dmaengine)
2829 		ndev->netdev_ops = &axienet_netdev_dmaengine_ops;
2830 	else
2831 		ndev->netdev_ops = &axienet_netdev_ops;
2832 	/* Check for Ethernet core IRQ (optional) */
2833 	if (lp->eth_irq <= 0)
2834 		dev_info(&pdev->dev, "Ethernet core IRQ not defined\n");
2835 
2836 	/* Retrieve the MAC address */
2837 	ret = of_get_mac_address(pdev->dev.of_node, mac_addr);
2838 	if (!ret) {
2839 		axienet_set_mac_address(ndev, mac_addr);
2840 	} else {
2841 		dev_warn(&pdev->dev, "could not find MAC address property: %d\n",
2842 			 ret);
2843 		axienet_set_mac_address(ndev, NULL);
2844 	}
2845 
2846 	lp->coalesce_count_rx = XAXIDMA_DFT_RX_THRESHOLD;
2847 	lp->coalesce_count_tx = XAXIDMA_DFT_TX_THRESHOLD;
2848 	lp->coalesce_usec_rx = XAXIDMA_DFT_RX_USEC;
2849 	lp->coalesce_usec_tx = XAXIDMA_DFT_TX_USEC;
2850 
2851 	ret = axienet_mdio_setup(lp);
2852 	if (ret)
2853 		dev_warn(&pdev->dev,
2854 			 "error registering MDIO bus: %d\n", ret);
2855 
2856 	if (lp->phy_mode == PHY_INTERFACE_MODE_SGMII ||
2857 	    lp->phy_mode == PHY_INTERFACE_MODE_1000BASEX) {
2858 		np = of_parse_phandle(pdev->dev.of_node, "pcs-handle", 0);
2859 		if (!np) {
2860 			/* Deprecated: Always use "pcs-handle" for pcs_phy.
2861 			 * Falling back to "phy-handle" here is only for
2862 			 * backward compatibility with old device trees.
2863 			 */
2864 			np = of_parse_phandle(pdev->dev.of_node, "phy-handle", 0);
2865 		}
2866 		if (!np) {
2867 			dev_err(&pdev->dev, "pcs-handle (preferred) or phy-handle required for 1000BaseX/SGMII\n");
2868 			ret = -EINVAL;
2869 			goto cleanup_mdio;
2870 		}
2871 		lp->pcs_phy = of_mdio_find_device(np);
2872 		if (!lp->pcs_phy) {
2873 			ret = -EPROBE_DEFER;
2874 			of_node_put(np);
2875 			goto cleanup_mdio;
2876 		}
2877 		of_node_put(np);
2878 		lp->pcs.ops = &axienet_pcs_ops;
2879 		lp->pcs.neg_mode = true;
2880 		lp->pcs.poll = true;
2881 	}
2882 
2883 	lp->phylink_config.dev = &ndev->dev;
2884 	lp->phylink_config.type = PHYLINK_NETDEV;
2885 	lp->phylink_config.mac_capabilities = MAC_SYM_PAUSE | MAC_ASYM_PAUSE |
2886 		MAC_10FD | MAC_100FD | MAC_1000FD;
2887 
2888 	__set_bit(lp->phy_mode, lp->phylink_config.supported_interfaces);
2889 	if (lp->switch_x_sgmii) {
2890 		__set_bit(PHY_INTERFACE_MODE_1000BASEX,
2891 			  lp->phylink_config.supported_interfaces);
2892 		__set_bit(PHY_INTERFACE_MODE_SGMII,
2893 			  lp->phylink_config.supported_interfaces);
2894 	}
2895 
2896 	lp->phylink = phylink_create(&lp->phylink_config, pdev->dev.fwnode,
2897 				     lp->phy_mode,
2898 				     &axienet_phylink_ops);
2899 	if (IS_ERR(lp->phylink)) {
2900 		ret = PTR_ERR(lp->phylink);
2901 		dev_err(&pdev->dev, "phylink_create error (%i)\n", ret);
2902 		goto cleanup_mdio;
2903 	}
2904 
2905 	ret = register_netdev(lp->ndev);
2906 	if (ret) {
2907 		dev_err(lp->dev, "register_netdev() error (%i)\n", ret);
2908 		goto cleanup_phylink;
2909 	}
2910 
2911 	return 0;
2912 
2913 cleanup_phylink:
2914 	phylink_destroy(lp->phylink);
2915 
2916 cleanup_mdio:
2917 	if (lp->pcs_phy)
2918 		put_device(&lp->pcs_phy->dev);
2919 	if (lp->mii_bus)
2920 		axienet_mdio_teardown(lp);
2921 cleanup_clk:
2922 	clk_bulk_disable_unprepare(XAE_NUM_MISC_CLOCKS, lp->misc_clks);
2923 	clk_disable_unprepare(lp->axi_clk);
2924 
2925 free_netdev:
2926 	free_netdev(ndev);
2927 
2928 	return ret;
2929 }
2930 
axienet_remove(struct platform_device * pdev)2931 static void axienet_remove(struct platform_device *pdev)
2932 {
2933 	struct net_device *ndev = platform_get_drvdata(pdev);
2934 	struct axienet_local *lp = netdev_priv(ndev);
2935 
2936 	unregister_netdev(ndev);
2937 
2938 	if (lp->phylink)
2939 		phylink_destroy(lp->phylink);
2940 
2941 	if (lp->pcs_phy)
2942 		put_device(&lp->pcs_phy->dev);
2943 
2944 	axienet_mdio_teardown(lp);
2945 
2946 	clk_bulk_disable_unprepare(XAE_NUM_MISC_CLOCKS, lp->misc_clks);
2947 	clk_disable_unprepare(lp->axi_clk);
2948 
2949 	free_netdev(ndev);
2950 }
2951 
axienet_shutdown(struct platform_device * pdev)2952 static void axienet_shutdown(struct platform_device *pdev)
2953 {
2954 	struct net_device *ndev = platform_get_drvdata(pdev);
2955 
2956 	rtnl_lock();
2957 	netif_device_detach(ndev);
2958 
2959 	if (netif_running(ndev))
2960 		dev_close(ndev);
2961 
2962 	rtnl_unlock();
2963 }
2964 
axienet_suspend(struct device * dev)2965 static int axienet_suspend(struct device *dev)
2966 {
2967 	struct net_device *ndev = dev_get_drvdata(dev);
2968 
2969 	if (!netif_running(ndev))
2970 		return 0;
2971 
2972 	netif_device_detach(ndev);
2973 
2974 	rtnl_lock();
2975 	axienet_stop(ndev);
2976 	rtnl_unlock();
2977 
2978 	return 0;
2979 }
2980 
axienet_resume(struct device * dev)2981 static int axienet_resume(struct device *dev)
2982 {
2983 	struct net_device *ndev = dev_get_drvdata(dev);
2984 
2985 	if (!netif_running(ndev))
2986 		return 0;
2987 
2988 	rtnl_lock();
2989 	axienet_open(ndev);
2990 	rtnl_unlock();
2991 
2992 	netif_device_attach(ndev);
2993 
2994 	return 0;
2995 }
2996 
2997 static DEFINE_SIMPLE_DEV_PM_OPS(axienet_pm_ops,
2998 				axienet_suspend, axienet_resume);
2999 
3000 static struct platform_driver axienet_driver = {
3001 	.probe = axienet_probe,
3002 	.remove_new = axienet_remove,
3003 	.shutdown = axienet_shutdown,
3004 	.driver = {
3005 		 .name = "xilinx_axienet",
3006 		 .pm = &axienet_pm_ops,
3007 		 .of_match_table = axienet_of_match,
3008 	},
3009 };
3010 
3011 module_platform_driver(axienet_driver);
3012 
3013 MODULE_DESCRIPTION("Xilinx Axi Ethernet driver");
3014 MODULE_AUTHOR("Xilinx");
3015 MODULE_LICENSE("GPL");
3016