1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Xilinx Axi Ethernet device driver
4 *
5 * Copyright (c) 2008 Nissin Systems Co., Ltd., Yoshio Kashiwagi
6 * Copyright (c) 2005-2008 DLA Systems, David H. Lynch Jr. <dhlii@dlasys.net>
7 * Copyright (c) 2008-2009 Secret Lab Technologies Ltd.
8 * Copyright (c) 2010 - 2011 Michal Simek <monstr@monstr.eu>
9 * Copyright (c) 2010 - 2011 PetaLogix
10 * Copyright (c) 2019 - 2022 Calian Advanced Technologies
11 * Copyright (c) 2010 - 2012 Xilinx, Inc. All rights reserved.
12 *
13 * This is a driver for the Xilinx Axi Ethernet which is used in the Virtex6
14 * and Spartan6.
15 *
16 * TODO:
17 * - Add Axi Fifo support.
18 * - Factor out Axi DMA code into separate driver.
19 * - Test and fix basic multicast filtering.
20 * - Add support for extended multicast filtering.
21 * - Test basic VLAN support.
22 * - Add support for extended VLAN support.
23 */
24
25 #include <linux/clk.h>
26 #include <linux/delay.h>
27 #include <linux/etherdevice.h>
28 #include <linux/module.h>
29 #include <linux/netdevice.h>
30 #include <linux/of.h>
31 #include <linux/of_mdio.h>
32 #include <linux/of_net.h>
33 #include <linux/of_irq.h>
34 #include <linux/of_address.h>
35 #include <linux/platform_device.h>
36 #include <linux/skbuff.h>
37 #include <linux/math64.h>
38 #include <linux/phy.h>
39 #include <linux/mii.h>
40 #include <linux/ethtool.h>
41 #include <linux/dmaengine.h>
42 #include <linux/dma-mapping.h>
43 #include <linux/dma/xilinx_dma.h>
44 #include <linux/circ_buf.h>
45 #include <net/netdev_queues.h>
46
47 #include "xilinx_axienet.h"
48
49 /* Descriptors defines for Tx and Rx DMA */
50 #define TX_BD_NUM_DEFAULT 128
51 #define RX_BD_NUM_DEFAULT 1024
52 #define TX_BD_NUM_MIN (MAX_SKB_FRAGS + 1)
53 #define TX_BD_NUM_MAX 4096
54 #define RX_BD_NUM_MAX 4096
55 #define DMA_NUM_APP_WORDS 5
56 #define LEN_APP 4
57 #define RX_BUF_NUM_DEFAULT 128
58
59 /* Must be shorter than length of ethtool_drvinfo.driver field to fit */
60 #define DRIVER_NAME "xaxienet"
61 #define DRIVER_DESCRIPTION "Xilinx Axi Ethernet driver"
62 #define DRIVER_VERSION "1.00a"
63
64 #define AXIENET_REGS_N 40
65
66 static void axienet_rx_submit_desc(struct net_device *ndev);
67
68 /* Match table for of_platform binding */
69 static const struct of_device_id axienet_of_match[] = {
70 { .compatible = "xlnx,axi-ethernet-1.00.a", },
71 { .compatible = "xlnx,axi-ethernet-1.01.a", },
72 { .compatible = "xlnx,axi-ethernet-2.01.a", },
73 {},
74 };
75
76 MODULE_DEVICE_TABLE(of, axienet_of_match);
77
78 /* Option table for setting up Axi Ethernet hardware options */
79 static struct axienet_option axienet_options[] = {
80 /* Turn on jumbo packet support for both Rx and Tx */
81 {
82 .opt = XAE_OPTION_JUMBO,
83 .reg = XAE_TC_OFFSET,
84 .m_or = XAE_TC_JUM_MASK,
85 }, {
86 .opt = XAE_OPTION_JUMBO,
87 .reg = XAE_RCW1_OFFSET,
88 .m_or = XAE_RCW1_JUM_MASK,
89 }, { /* Turn on VLAN packet support for both Rx and Tx */
90 .opt = XAE_OPTION_VLAN,
91 .reg = XAE_TC_OFFSET,
92 .m_or = XAE_TC_VLAN_MASK,
93 }, {
94 .opt = XAE_OPTION_VLAN,
95 .reg = XAE_RCW1_OFFSET,
96 .m_or = XAE_RCW1_VLAN_MASK,
97 }, { /* Turn on FCS stripping on receive packets */
98 .opt = XAE_OPTION_FCS_STRIP,
99 .reg = XAE_RCW1_OFFSET,
100 .m_or = XAE_RCW1_FCS_MASK,
101 }, { /* Turn on FCS insertion on transmit packets */
102 .opt = XAE_OPTION_FCS_INSERT,
103 .reg = XAE_TC_OFFSET,
104 .m_or = XAE_TC_FCS_MASK,
105 }, { /* Turn off length/type field checking on receive packets */
106 .opt = XAE_OPTION_LENTYPE_ERR,
107 .reg = XAE_RCW1_OFFSET,
108 .m_or = XAE_RCW1_LT_DIS_MASK,
109 }, { /* Turn on Rx flow control */
110 .opt = XAE_OPTION_FLOW_CONTROL,
111 .reg = XAE_FCC_OFFSET,
112 .m_or = XAE_FCC_FCRX_MASK,
113 }, { /* Turn on Tx flow control */
114 .opt = XAE_OPTION_FLOW_CONTROL,
115 .reg = XAE_FCC_OFFSET,
116 .m_or = XAE_FCC_FCTX_MASK,
117 }, { /* Turn on promiscuous frame filtering */
118 .opt = XAE_OPTION_PROMISC,
119 .reg = XAE_FMI_OFFSET,
120 .m_or = XAE_FMI_PM_MASK,
121 }, { /* Enable transmitter */
122 .opt = XAE_OPTION_TXEN,
123 .reg = XAE_TC_OFFSET,
124 .m_or = XAE_TC_TX_MASK,
125 }, { /* Enable receiver */
126 .opt = XAE_OPTION_RXEN,
127 .reg = XAE_RCW1_OFFSET,
128 .m_or = XAE_RCW1_RX_MASK,
129 },
130 {}
131 };
132
axienet_get_rx_desc(struct axienet_local * lp,int i)133 static struct skbuf_dma_descriptor *axienet_get_rx_desc(struct axienet_local *lp, int i)
134 {
135 return lp->rx_skb_ring[i & (RX_BUF_NUM_DEFAULT - 1)];
136 }
137
axienet_get_tx_desc(struct axienet_local * lp,int i)138 static struct skbuf_dma_descriptor *axienet_get_tx_desc(struct axienet_local *lp, int i)
139 {
140 return lp->tx_skb_ring[i & (TX_BD_NUM_MAX - 1)];
141 }
142
143 /**
144 * axienet_dma_in32 - Memory mapped Axi DMA register read
145 * @lp: Pointer to axienet local structure
146 * @reg: Address offset from the base address of the Axi DMA core
147 *
148 * Return: The contents of the Axi DMA register
149 *
150 * This function returns the contents of the corresponding Axi DMA register.
151 */
axienet_dma_in32(struct axienet_local * lp,off_t reg)152 static inline u32 axienet_dma_in32(struct axienet_local *lp, off_t reg)
153 {
154 return ioread32(lp->dma_regs + reg);
155 }
156
desc_set_phys_addr(struct axienet_local * lp,dma_addr_t addr,struct axidma_bd * desc)157 static void desc_set_phys_addr(struct axienet_local *lp, dma_addr_t addr,
158 struct axidma_bd *desc)
159 {
160 desc->phys = lower_32_bits(addr);
161 if (lp->features & XAE_FEATURE_DMA_64BIT)
162 desc->phys_msb = upper_32_bits(addr);
163 }
164
desc_get_phys_addr(struct axienet_local * lp,struct axidma_bd * desc)165 static dma_addr_t desc_get_phys_addr(struct axienet_local *lp,
166 struct axidma_bd *desc)
167 {
168 dma_addr_t ret = desc->phys;
169
170 if (lp->features & XAE_FEATURE_DMA_64BIT)
171 ret |= ((dma_addr_t)desc->phys_msb << 16) << 16;
172
173 return ret;
174 }
175
176 /**
177 * axienet_dma_bd_release - Release buffer descriptor rings
178 * @ndev: Pointer to the net_device structure
179 *
180 * This function is used to release the descriptors allocated in
181 * axienet_dma_bd_init. axienet_dma_bd_release is called when Axi Ethernet
182 * driver stop api is called.
183 */
axienet_dma_bd_release(struct net_device * ndev)184 static void axienet_dma_bd_release(struct net_device *ndev)
185 {
186 int i;
187 struct axienet_local *lp = netdev_priv(ndev);
188
189 /* If we end up here, tx_bd_v must have been DMA allocated. */
190 dma_free_coherent(lp->dev,
191 sizeof(*lp->tx_bd_v) * lp->tx_bd_num,
192 lp->tx_bd_v,
193 lp->tx_bd_p);
194
195 if (!lp->rx_bd_v)
196 return;
197
198 for (i = 0; i < lp->rx_bd_num; i++) {
199 dma_addr_t phys;
200
201 /* A NULL skb means this descriptor has not been initialised
202 * at all.
203 */
204 if (!lp->rx_bd_v[i].skb)
205 break;
206
207 dev_kfree_skb(lp->rx_bd_v[i].skb);
208
209 /* For each descriptor, we programmed cntrl with the (non-zero)
210 * descriptor size, after it had been successfully allocated.
211 * So a non-zero value in there means we need to unmap it.
212 */
213 if (lp->rx_bd_v[i].cntrl) {
214 phys = desc_get_phys_addr(lp, &lp->rx_bd_v[i]);
215 dma_unmap_single(lp->dev, phys,
216 lp->max_frm_size, DMA_FROM_DEVICE);
217 }
218 }
219
220 dma_free_coherent(lp->dev,
221 sizeof(*lp->rx_bd_v) * lp->rx_bd_num,
222 lp->rx_bd_v,
223 lp->rx_bd_p);
224 }
225
axienet_dma_rate(struct axienet_local * lp)226 static u64 axienet_dma_rate(struct axienet_local *lp)
227 {
228 if (lp->axi_clk)
229 return clk_get_rate(lp->axi_clk);
230 return 125000000; /* arbitrary guess if no clock rate set */
231 }
232
233 /**
234 * axienet_calc_cr() - Calculate control register value
235 * @lp: Device private data
236 * @count: Number of completions before an interrupt
237 * @usec: Microseconds after the last completion before an interrupt
238 *
239 * Calculate a control register value based on the coalescing settings. The
240 * run/stop bit is not set.
241 *
242 * Return: Control register value with coalescing settings configured.
243 */
axienet_calc_cr(struct axienet_local * lp,u32 count,u32 usec)244 static u32 axienet_calc_cr(struct axienet_local *lp, u32 count, u32 usec)
245 {
246 u32 cr;
247
248 cr = FIELD_PREP(XAXIDMA_COALESCE_MASK, count) | XAXIDMA_IRQ_IOC_MASK |
249 XAXIDMA_IRQ_ERROR_MASK;
250 /* Only set interrupt delay timer if not generating an interrupt on
251 * the first packet. Otherwise leave at 0 to disable delay interrupt.
252 */
253 if (count > 1) {
254 u64 clk_rate = axienet_dma_rate(lp);
255 u32 timer;
256
257 /* 1 Timeout Interval = 125 * (clock period of SG clock) */
258 timer = DIV64_U64_ROUND_CLOSEST((u64)usec * clk_rate,
259 XAXIDMA_DELAY_SCALE);
260
261 timer = min(timer, FIELD_MAX(XAXIDMA_DELAY_MASK));
262 cr |= FIELD_PREP(XAXIDMA_DELAY_MASK, timer) |
263 XAXIDMA_IRQ_DELAY_MASK;
264 }
265
266 return cr;
267 }
268
269 /**
270 * axienet_coalesce_params() - Extract coalesce parameters from the CR
271 * @lp: Device private data
272 * @cr: The control register to parse
273 * @count: Number of packets before an interrupt
274 * @usec: Idle time (in usec) before an interrupt
275 */
axienet_coalesce_params(struct axienet_local * lp,u32 cr,u32 * count,u32 * usec)276 static void axienet_coalesce_params(struct axienet_local *lp, u32 cr,
277 u32 *count, u32 *usec)
278 {
279 u64 clk_rate = axienet_dma_rate(lp);
280 u64 timer = FIELD_GET(XAXIDMA_DELAY_MASK, cr);
281
282 *count = FIELD_GET(XAXIDMA_COALESCE_MASK, cr);
283 *usec = DIV64_U64_ROUND_CLOSEST(timer * XAXIDMA_DELAY_SCALE, clk_rate);
284 }
285
286 /**
287 * axienet_dma_start - Set up DMA registers and start DMA operation
288 * @lp: Pointer to the axienet_local structure
289 */
axienet_dma_start(struct axienet_local * lp)290 static void axienet_dma_start(struct axienet_local *lp)
291 {
292 spin_lock_irq(&lp->rx_cr_lock);
293
294 /* Start updating the Rx channel control register */
295 lp->rx_dma_cr &= ~XAXIDMA_CR_RUNSTOP_MASK;
296 axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, lp->rx_dma_cr);
297
298 /* Populate the tail pointer and bring the Rx Axi DMA engine out of
299 * halted state. This will make the Rx side ready for reception.
300 */
301 axienet_dma_out_addr(lp, XAXIDMA_RX_CDESC_OFFSET, lp->rx_bd_p);
302 lp->rx_dma_cr |= XAXIDMA_CR_RUNSTOP_MASK;
303 axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, lp->rx_dma_cr);
304 axienet_dma_out_addr(lp, XAXIDMA_RX_TDESC_OFFSET, lp->rx_bd_p +
305 (sizeof(*lp->rx_bd_v) * (lp->rx_bd_num - 1)));
306 lp->rx_dma_started = true;
307
308 spin_unlock_irq(&lp->rx_cr_lock);
309 spin_lock_irq(&lp->tx_cr_lock);
310
311 /* Start updating the Tx channel control register */
312 lp->tx_dma_cr &= ~XAXIDMA_CR_RUNSTOP_MASK;
313 axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, lp->tx_dma_cr);
314
315 /* Write to the RS (Run-stop) bit in the Tx channel control register.
316 * Tx channel is now ready to run. But only after we write to the
317 * tail pointer register that the Tx channel will start transmitting.
318 */
319 axienet_dma_out_addr(lp, XAXIDMA_TX_CDESC_OFFSET, lp->tx_bd_p);
320 lp->tx_dma_cr |= XAXIDMA_CR_RUNSTOP_MASK;
321 axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, lp->tx_dma_cr);
322 lp->tx_dma_started = true;
323
324 spin_unlock_irq(&lp->tx_cr_lock);
325 }
326
327 /**
328 * axienet_dma_bd_init - Setup buffer descriptor rings for Axi DMA
329 * @ndev: Pointer to the net_device structure
330 *
331 * Return: 0, on success -ENOMEM, on failure
332 *
333 * This function is called to initialize the Rx and Tx DMA descriptor
334 * rings. This initializes the descriptors with required default values
335 * and is called when Axi Ethernet driver reset is called.
336 */
axienet_dma_bd_init(struct net_device * ndev)337 static int axienet_dma_bd_init(struct net_device *ndev)
338 {
339 int i;
340 struct sk_buff *skb;
341 struct axienet_local *lp = netdev_priv(ndev);
342
343 /* Reset the indexes which are used for accessing the BDs */
344 lp->tx_bd_ci = 0;
345 lp->tx_bd_tail = 0;
346 lp->rx_bd_ci = 0;
347
348 /* Allocate the Tx and Rx buffer descriptors. */
349 lp->tx_bd_v = dma_alloc_coherent(lp->dev,
350 sizeof(*lp->tx_bd_v) * lp->tx_bd_num,
351 &lp->tx_bd_p, GFP_KERNEL);
352 if (!lp->tx_bd_v)
353 return -ENOMEM;
354
355 lp->rx_bd_v = dma_alloc_coherent(lp->dev,
356 sizeof(*lp->rx_bd_v) * lp->rx_bd_num,
357 &lp->rx_bd_p, GFP_KERNEL);
358 if (!lp->rx_bd_v)
359 goto out;
360
361 for (i = 0; i < lp->tx_bd_num; i++) {
362 dma_addr_t addr = lp->tx_bd_p +
363 sizeof(*lp->tx_bd_v) *
364 ((i + 1) % lp->tx_bd_num);
365
366 lp->tx_bd_v[i].next = lower_32_bits(addr);
367 if (lp->features & XAE_FEATURE_DMA_64BIT)
368 lp->tx_bd_v[i].next_msb = upper_32_bits(addr);
369 }
370
371 for (i = 0; i < lp->rx_bd_num; i++) {
372 dma_addr_t addr;
373
374 addr = lp->rx_bd_p + sizeof(*lp->rx_bd_v) *
375 ((i + 1) % lp->rx_bd_num);
376 lp->rx_bd_v[i].next = lower_32_bits(addr);
377 if (lp->features & XAE_FEATURE_DMA_64BIT)
378 lp->rx_bd_v[i].next_msb = upper_32_bits(addr);
379
380 skb = netdev_alloc_skb_ip_align(ndev, lp->max_frm_size);
381 if (!skb)
382 goto out;
383
384 lp->rx_bd_v[i].skb = skb;
385 addr = dma_map_single(lp->dev, skb->data,
386 lp->max_frm_size, DMA_FROM_DEVICE);
387 if (dma_mapping_error(lp->dev, addr)) {
388 netdev_err(ndev, "DMA mapping error\n");
389 goto out;
390 }
391 desc_set_phys_addr(lp, addr, &lp->rx_bd_v[i]);
392
393 lp->rx_bd_v[i].cntrl = lp->max_frm_size;
394 }
395
396 axienet_dma_start(lp);
397
398 return 0;
399 out:
400 axienet_dma_bd_release(ndev);
401 return -ENOMEM;
402 }
403
404 /**
405 * axienet_set_mac_address - Write the MAC address
406 * @ndev: Pointer to the net_device structure
407 * @address: 6 byte Address to be written as MAC address
408 *
409 * This function is called to initialize the MAC address of the Axi Ethernet
410 * core. It writes to the UAW0 and UAW1 registers of the core.
411 */
axienet_set_mac_address(struct net_device * ndev,const void * address)412 static void axienet_set_mac_address(struct net_device *ndev,
413 const void *address)
414 {
415 struct axienet_local *lp = netdev_priv(ndev);
416
417 if (address)
418 eth_hw_addr_set(ndev, address);
419 if (!is_valid_ether_addr(ndev->dev_addr))
420 eth_hw_addr_random(ndev);
421
422 /* Set up unicast MAC address filter set its mac address */
423 axienet_iow(lp, XAE_UAW0_OFFSET,
424 (ndev->dev_addr[0]) |
425 (ndev->dev_addr[1] << 8) |
426 (ndev->dev_addr[2] << 16) |
427 (ndev->dev_addr[3] << 24));
428 axienet_iow(lp, XAE_UAW1_OFFSET,
429 (((axienet_ior(lp, XAE_UAW1_OFFSET)) &
430 ~XAE_UAW1_UNICASTADDR_MASK) |
431 (ndev->dev_addr[4] |
432 (ndev->dev_addr[5] << 8))));
433 }
434
435 /**
436 * netdev_set_mac_address - Write the MAC address (from outside the driver)
437 * @ndev: Pointer to the net_device structure
438 * @p: 6 byte Address to be written as MAC address
439 *
440 * Return: 0 for all conditions. Presently, there is no failure case.
441 *
442 * This function is called to initialize the MAC address of the Axi Ethernet
443 * core. It calls the core specific axienet_set_mac_address. This is the
444 * function that goes into net_device_ops structure entry ndo_set_mac_address.
445 */
netdev_set_mac_address(struct net_device * ndev,void * p)446 static int netdev_set_mac_address(struct net_device *ndev, void *p)
447 {
448 struct sockaddr *addr = p;
449
450 axienet_set_mac_address(ndev, addr->sa_data);
451 return 0;
452 }
453
454 /**
455 * axienet_set_multicast_list - Prepare the multicast table
456 * @ndev: Pointer to the net_device structure
457 *
458 * This function is called to initialize the multicast table during
459 * initialization. The Axi Ethernet basic multicast support has a four-entry
460 * multicast table which is initialized here. Additionally this function
461 * goes into the net_device_ops structure entry ndo_set_multicast_list. This
462 * means whenever the multicast table entries need to be updated this
463 * function gets called.
464 */
axienet_set_multicast_list(struct net_device * ndev)465 static void axienet_set_multicast_list(struct net_device *ndev)
466 {
467 int i = 0;
468 u32 reg, af0reg, af1reg;
469 struct axienet_local *lp = netdev_priv(ndev);
470
471 reg = axienet_ior(lp, XAE_FMI_OFFSET);
472 reg &= ~XAE_FMI_PM_MASK;
473 if (ndev->flags & IFF_PROMISC)
474 reg |= XAE_FMI_PM_MASK;
475 else
476 reg &= ~XAE_FMI_PM_MASK;
477 axienet_iow(lp, XAE_FMI_OFFSET, reg);
478
479 if (ndev->flags & IFF_ALLMULTI ||
480 netdev_mc_count(ndev) > XAE_MULTICAST_CAM_TABLE_NUM) {
481 reg &= 0xFFFFFF00;
482 axienet_iow(lp, XAE_FMI_OFFSET, reg);
483 axienet_iow(lp, XAE_AF0_OFFSET, 1); /* Multicast bit */
484 axienet_iow(lp, XAE_AF1_OFFSET, 0);
485 axienet_iow(lp, XAE_AM0_OFFSET, 1); /* ditto */
486 axienet_iow(lp, XAE_AM1_OFFSET, 0);
487 axienet_iow(lp, XAE_FFE_OFFSET, 1);
488 i = 1;
489 } else if (!netdev_mc_empty(ndev)) {
490 struct netdev_hw_addr *ha;
491
492 netdev_for_each_mc_addr(ha, ndev) {
493 if (i >= XAE_MULTICAST_CAM_TABLE_NUM)
494 break;
495
496 af0reg = (ha->addr[0]);
497 af0reg |= (ha->addr[1] << 8);
498 af0reg |= (ha->addr[2] << 16);
499 af0reg |= (ha->addr[3] << 24);
500
501 af1reg = (ha->addr[4]);
502 af1reg |= (ha->addr[5] << 8);
503
504 reg &= 0xFFFFFF00;
505 reg |= i;
506
507 axienet_iow(lp, XAE_FMI_OFFSET, reg);
508 axienet_iow(lp, XAE_AF0_OFFSET, af0reg);
509 axienet_iow(lp, XAE_AF1_OFFSET, af1reg);
510 axienet_iow(lp, XAE_AM0_OFFSET, 0xffffffff);
511 axienet_iow(lp, XAE_AM1_OFFSET, 0x0000ffff);
512 axienet_iow(lp, XAE_FFE_OFFSET, 1);
513 i++;
514 }
515 }
516
517 for (; i < XAE_MULTICAST_CAM_TABLE_NUM; i++) {
518 reg &= 0xFFFFFF00;
519 reg |= i;
520 axienet_iow(lp, XAE_FMI_OFFSET, reg);
521 axienet_iow(lp, XAE_FFE_OFFSET, 0);
522 }
523 }
524
525 /**
526 * axienet_setoptions - Set an Axi Ethernet option
527 * @ndev: Pointer to the net_device structure
528 * @options: Option to be enabled/disabled
529 *
530 * The Axi Ethernet core has multiple features which can be selectively turned
531 * on or off. The typical options could be jumbo frame option, basic VLAN
532 * option, promiscuous mode option etc. This function is used to set or clear
533 * these options in the Axi Ethernet hardware. This is done through
534 * axienet_option structure .
535 */
axienet_setoptions(struct net_device * ndev,u32 options)536 static void axienet_setoptions(struct net_device *ndev, u32 options)
537 {
538 int reg;
539 struct axienet_local *lp = netdev_priv(ndev);
540 struct axienet_option *tp = &axienet_options[0];
541
542 while (tp->opt) {
543 reg = ((axienet_ior(lp, tp->reg)) & ~(tp->m_or));
544 if (options & tp->opt)
545 reg |= tp->m_or;
546 axienet_iow(lp, tp->reg, reg);
547 tp++;
548 }
549
550 lp->options |= options;
551 }
552
axienet_stat(struct axienet_local * lp,enum temac_stat stat)553 static u64 axienet_stat(struct axienet_local *lp, enum temac_stat stat)
554 {
555 u32 counter;
556
557 if (lp->reset_in_progress)
558 return lp->hw_stat_base[stat];
559
560 counter = axienet_ior(lp, XAE_STATS_OFFSET + stat * 8);
561 return lp->hw_stat_base[stat] + (counter - lp->hw_last_counter[stat]);
562 }
563
axienet_stats_update(struct axienet_local * lp,bool reset)564 static void axienet_stats_update(struct axienet_local *lp, bool reset)
565 {
566 enum temac_stat stat;
567
568 write_seqcount_begin(&lp->hw_stats_seqcount);
569 lp->reset_in_progress = reset;
570 for (stat = 0; stat < STAT_COUNT; stat++) {
571 u32 counter = axienet_ior(lp, XAE_STATS_OFFSET + stat * 8);
572
573 lp->hw_stat_base[stat] += counter - lp->hw_last_counter[stat];
574 lp->hw_last_counter[stat] = counter;
575 }
576 write_seqcount_end(&lp->hw_stats_seqcount);
577 }
578
axienet_refresh_stats(struct work_struct * work)579 static void axienet_refresh_stats(struct work_struct *work)
580 {
581 struct axienet_local *lp = container_of(work, struct axienet_local,
582 stats_work.work);
583
584 mutex_lock(&lp->stats_lock);
585 axienet_stats_update(lp, false);
586 mutex_unlock(&lp->stats_lock);
587
588 /* Just less than 2^32 bytes at 2.5 GBit/s */
589 schedule_delayed_work(&lp->stats_work, 13 * HZ);
590 }
591
__axienet_device_reset(struct axienet_local * lp)592 static int __axienet_device_reset(struct axienet_local *lp)
593 {
594 u32 value;
595 int ret;
596
597 /* Save statistics counters in case they will be reset */
598 mutex_lock(&lp->stats_lock);
599 if (lp->features & XAE_FEATURE_STATS)
600 axienet_stats_update(lp, true);
601
602 /* Reset Axi DMA. This would reset Axi Ethernet core as well. The reset
603 * process of Axi DMA takes a while to complete as all pending
604 * commands/transfers will be flushed or completed during this
605 * reset process.
606 * Note that even though both TX and RX have their own reset register,
607 * they both reset the entire DMA core, so only one needs to be used.
608 */
609 axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, XAXIDMA_CR_RESET_MASK);
610 ret = read_poll_timeout(axienet_dma_in32, value,
611 !(value & XAXIDMA_CR_RESET_MASK),
612 DELAY_OF_ONE_MILLISEC, 50000, false, lp,
613 XAXIDMA_TX_CR_OFFSET);
614 if (ret) {
615 dev_err(lp->dev, "%s: DMA reset timeout!\n", __func__);
616 goto out;
617 }
618
619 /* Wait for PhyRstCmplt bit to be set, indicating the PHY reset has finished */
620 ret = read_poll_timeout(axienet_ior, value,
621 value & XAE_INT_PHYRSTCMPLT_MASK,
622 DELAY_OF_ONE_MILLISEC, 50000, false, lp,
623 XAE_IS_OFFSET);
624 if (ret) {
625 dev_err(lp->dev, "%s: timeout waiting for PhyRstCmplt\n", __func__);
626 goto out;
627 }
628
629 /* Update statistics counters with new values */
630 if (lp->features & XAE_FEATURE_STATS) {
631 enum temac_stat stat;
632
633 write_seqcount_begin(&lp->hw_stats_seqcount);
634 lp->reset_in_progress = false;
635 for (stat = 0; stat < STAT_COUNT; stat++) {
636 u32 counter =
637 axienet_ior(lp, XAE_STATS_OFFSET + stat * 8);
638
639 lp->hw_stat_base[stat] +=
640 lp->hw_last_counter[stat] - counter;
641 lp->hw_last_counter[stat] = counter;
642 }
643 write_seqcount_end(&lp->hw_stats_seqcount);
644 }
645
646 out:
647 mutex_unlock(&lp->stats_lock);
648 return ret;
649 }
650
651 /**
652 * axienet_dma_stop - Stop DMA operation
653 * @lp: Pointer to the axienet_local structure
654 */
axienet_dma_stop(struct axienet_local * lp)655 static void axienet_dma_stop(struct axienet_local *lp)
656 {
657 int count;
658 u32 cr, sr;
659
660 spin_lock_irq(&lp->rx_cr_lock);
661
662 cr = lp->rx_dma_cr & ~(XAXIDMA_CR_RUNSTOP_MASK | XAXIDMA_IRQ_ALL_MASK);
663 axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, cr);
664 lp->rx_dma_started = false;
665
666 spin_unlock_irq(&lp->rx_cr_lock);
667 synchronize_irq(lp->rx_irq);
668
669 spin_lock_irq(&lp->tx_cr_lock);
670
671 cr = lp->tx_dma_cr & ~(XAXIDMA_CR_RUNSTOP_MASK | XAXIDMA_IRQ_ALL_MASK);
672 axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, cr);
673 lp->tx_dma_started = false;
674
675 spin_unlock_irq(&lp->tx_cr_lock);
676 synchronize_irq(lp->tx_irq);
677
678 /* Give DMAs a chance to halt gracefully */
679 sr = axienet_dma_in32(lp, XAXIDMA_RX_SR_OFFSET);
680 for (count = 0; !(sr & XAXIDMA_SR_HALT_MASK) && count < 5; ++count) {
681 msleep(20);
682 sr = axienet_dma_in32(lp, XAXIDMA_RX_SR_OFFSET);
683 }
684
685 sr = axienet_dma_in32(lp, XAXIDMA_TX_SR_OFFSET);
686 for (count = 0; !(sr & XAXIDMA_SR_HALT_MASK) && count < 5; ++count) {
687 msleep(20);
688 sr = axienet_dma_in32(lp, XAXIDMA_TX_SR_OFFSET);
689 }
690
691 /* Do a reset to ensure DMA is really stopped */
692 axienet_lock_mii(lp);
693 __axienet_device_reset(lp);
694 axienet_unlock_mii(lp);
695 }
696
697 /**
698 * axienet_device_reset - Reset and initialize the Axi Ethernet hardware.
699 * @ndev: Pointer to the net_device structure
700 *
701 * This function is called to reset and initialize the Axi Ethernet core. This
702 * is typically called during initialization. It does a reset of the Axi DMA
703 * Rx/Tx channels and initializes the Axi DMA BDs. Since Axi DMA reset lines
704 * are connected to Axi Ethernet reset lines, this in turn resets the Axi
705 * Ethernet core. No separate hardware reset is done for the Axi Ethernet
706 * core.
707 *
708 * Return: 0 on success or a negative error number otherwise.
709 */
axienet_device_reset(struct net_device * ndev)710 static int axienet_device_reset(struct net_device *ndev)
711 {
712 u32 axienet_status;
713 struct axienet_local *lp = netdev_priv(ndev);
714 int ret;
715
716 lp->max_frm_size = XAE_MAX_VLAN_FRAME_SIZE;
717 lp->options |= XAE_OPTION_VLAN;
718 lp->options &= (~XAE_OPTION_JUMBO);
719
720 if (ndev->mtu > XAE_MTU && ndev->mtu <= XAE_JUMBO_MTU) {
721 lp->max_frm_size = ndev->mtu + VLAN_ETH_HLEN +
722 XAE_TRL_SIZE;
723
724 if (lp->max_frm_size <= lp->rxmem)
725 lp->options |= XAE_OPTION_JUMBO;
726 }
727
728 if (!lp->use_dmaengine) {
729 ret = __axienet_device_reset(lp);
730 if (ret)
731 return ret;
732
733 ret = axienet_dma_bd_init(ndev);
734 if (ret) {
735 netdev_err(ndev, "%s: descriptor allocation failed\n",
736 __func__);
737 return ret;
738 }
739 }
740
741 axienet_status = axienet_ior(lp, XAE_RCW1_OFFSET);
742 axienet_status &= ~XAE_RCW1_RX_MASK;
743 axienet_iow(lp, XAE_RCW1_OFFSET, axienet_status);
744
745 axienet_status = axienet_ior(lp, XAE_IP_OFFSET);
746 if (axienet_status & XAE_INT_RXRJECT_MASK)
747 axienet_iow(lp, XAE_IS_OFFSET, XAE_INT_RXRJECT_MASK);
748 axienet_iow(lp, XAE_IE_OFFSET, lp->eth_irq > 0 ?
749 XAE_INT_RECV_ERROR_MASK : 0);
750
751 axienet_iow(lp, XAE_FCC_OFFSET, XAE_FCC_FCRX_MASK);
752
753 /* Sync default options with HW but leave receiver and
754 * transmitter disabled.
755 */
756 axienet_setoptions(ndev, lp->options &
757 ~(XAE_OPTION_TXEN | XAE_OPTION_RXEN));
758 axienet_set_mac_address(ndev, NULL);
759 axienet_set_multicast_list(ndev);
760 axienet_setoptions(ndev, lp->options);
761
762 netif_trans_update(ndev);
763
764 return 0;
765 }
766
767 /**
768 * axienet_free_tx_chain - Clean up a series of linked TX descriptors.
769 * @lp: Pointer to the axienet_local structure
770 * @first_bd: Index of first descriptor to clean up
771 * @nr_bds: Max number of descriptors to clean up
772 * @force: Whether to clean descriptors even if not complete
773 * @sizep: Pointer to a u32 accumulating the total byte count of
774 * completed packets (using skb->len). Ignored if NULL.
775 * @budget: NAPI budget (use 0 when not called from NAPI poll)
776 *
777 * Would either be called after a successful transmit operation, or after
778 * there was an error when setting up the chain.
779 *
780 * Return: The number of packets handled.
781 */
axienet_free_tx_chain(struct axienet_local * lp,u32 first_bd,int nr_bds,bool force,u32 * sizep,int budget)782 static int axienet_free_tx_chain(struct axienet_local *lp, u32 first_bd,
783 int nr_bds, bool force, u32 *sizep, int budget)
784 {
785 struct axidma_bd *cur_p;
786 unsigned int status;
787 int i, packets = 0;
788 dma_addr_t phys;
789
790 for (i = 0; i < nr_bds; i++) {
791 cur_p = &lp->tx_bd_v[(first_bd + i) % lp->tx_bd_num];
792 status = cur_p->status;
793
794 /* If force is not specified, clean up only descriptors
795 * that have been completed by the MAC.
796 */
797 if (!force && !(status & XAXIDMA_BD_STS_COMPLETE_MASK))
798 break;
799
800 /* Ensure we see complete descriptor update */
801 dma_rmb();
802 phys = desc_get_phys_addr(lp, cur_p);
803 dma_unmap_single(lp->dev, phys,
804 (cur_p->cntrl & XAXIDMA_BD_CTRL_LENGTH_MASK),
805 DMA_TO_DEVICE);
806
807 if (cur_p->skb && (status & XAXIDMA_BD_STS_COMPLETE_MASK)) {
808 if (sizep)
809 *sizep += cur_p->skb->len;
810 napi_consume_skb(cur_p->skb, budget);
811 packets++;
812 }
813
814 cur_p->app0 = 0;
815 cur_p->app1 = 0;
816 cur_p->app2 = 0;
817 cur_p->app4 = 0;
818 cur_p->skb = NULL;
819 /* ensure our transmit path and device don't prematurely see status cleared */
820 wmb();
821 cur_p->cntrl = 0;
822 cur_p->status = 0;
823 }
824
825 if (!force) {
826 lp->tx_bd_ci += i;
827 if (lp->tx_bd_ci >= lp->tx_bd_num)
828 lp->tx_bd_ci %= lp->tx_bd_num;
829 }
830
831 return packets;
832 }
833
834 /**
835 * axienet_check_tx_bd_space - Checks if a BD/group of BDs are currently busy
836 * @lp: Pointer to the axienet_local structure
837 * @num_frag: The number of BDs to check for
838 *
839 * Return: 0, on success
840 * NETDEV_TX_BUSY, if any of the descriptors are not free
841 *
842 * This function is invoked before BDs are allocated and transmission starts.
843 * This function returns 0 if a BD or group of BDs can be allocated for
844 * transmission. If the BD or any of the BDs are not free the function
845 * returns a busy status.
846 */
axienet_check_tx_bd_space(struct axienet_local * lp,int num_frag)847 static inline int axienet_check_tx_bd_space(struct axienet_local *lp,
848 int num_frag)
849 {
850 struct axidma_bd *cur_p;
851
852 /* Ensure we see all descriptor updates from device or TX polling */
853 rmb();
854 cur_p = &lp->tx_bd_v[(READ_ONCE(lp->tx_bd_tail) + num_frag) %
855 lp->tx_bd_num];
856 if (cur_p->cntrl)
857 return NETDEV_TX_BUSY;
858 return 0;
859 }
860
861 /**
862 * axienet_dma_tx_cb - DMA engine callback for TX channel.
863 * @data: Pointer to the axienet_local structure.
864 * @result: error reporting through dmaengine_result.
865 * This function is called by dmaengine driver for TX channel to notify
866 * that the transmit is done.
867 */
axienet_dma_tx_cb(void * data,const struct dmaengine_result * result)868 static void axienet_dma_tx_cb(void *data, const struct dmaengine_result *result)
869 {
870 struct skbuf_dma_descriptor *skbuf_dma;
871 struct axienet_local *lp = data;
872 struct netdev_queue *txq;
873 int len;
874
875 skbuf_dma = axienet_get_tx_desc(lp, lp->tx_ring_tail++);
876 len = skbuf_dma->skb->len;
877 txq = skb_get_tx_queue(lp->ndev, skbuf_dma->skb);
878 u64_stats_update_begin(&lp->tx_stat_sync);
879 u64_stats_add(&lp->tx_bytes, len);
880 u64_stats_add(&lp->tx_packets, 1);
881 u64_stats_update_end(&lp->tx_stat_sync);
882 dma_unmap_sg(lp->dev, skbuf_dma->sgl, skbuf_dma->sg_len, DMA_TO_DEVICE);
883 dev_consume_skb_any(skbuf_dma->skb);
884 netif_txq_completed_wake(txq, 1, len,
885 CIRC_SPACE(lp->tx_ring_head, lp->tx_ring_tail, TX_BD_NUM_MAX),
886 2);
887 }
888
889 /**
890 * axienet_start_xmit_dmaengine - Starts the transmission.
891 * @skb: sk_buff pointer that contains data to be Txed.
892 * @ndev: Pointer to net_device structure.
893 *
894 * Return: NETDEV_TX_OK on success or any non space errors.
895 * NETDEV_TX_BUSY when free element in TX skb ring buffer
896 * is not available.
897 *
898 * This function is invoked to initiate transmission. The
899 * function sets the skbs, register dma callback API and submit
900 * the dma transaction.
901 * Additionally if checksum offloading is supported,
902 * it populates AXI Stream Control fields with appropriate values.
903 */
904 static netdev_tx_t
axienet_start_xmit_dmaengine(struct sk_buff * skb,struct net_device * ndev)905 axienet_start_xmit_dmaengine(struct sk_buff *skb, struct net_device *ndev)
906 {
907 struct dma_async_tx_descriptor *dma_tx_desc = NULL;
908 struct axienet_local *lp = netdev_priv(ndev);
909 u32 app_metadata[DMA_NUM_APP_WORDS] = {0};
910 struct skbuf_dma_descriptor *skbuf_dma;
911 struct dma_device *dma_dev;
912 struct netdev_queue *txq;
913 u32 csum_start_off;
914 u32 csum_index_off;
915 int sg_len;
916 int ret;
917
918 dma_dev = lp->tx_chan->device;
919 sg_len = skb_shinfo(skb)->nr_frags + 1;
920 if (CIRC_SPACE(lp->tx_ring_head, lp->tx_ring_tail, TX_BD_NUM_MAX) <= 1) {
921 netif_stop_queue(ndev);
922 if (net_ratelimit())
923 netdev_warn(ndev, "TX ring unexpectedly full\n");
924 return NETDEV_TX_BUSY;
925 }
926
927 skbuf_dma = axienet_get_tx_desc(lp, lp->tx_ring_head);
928 if (!skbuf_dma)
929 goto xmit_error_drop_skb;
930
931 lp->tx_ring_head++;
932 sg_init_table(skbuf_dma->sgl, sg_len);
933 ret = skb_to_sgvec(skb, skbuf_dma->sgl, 0, skb->len);
934 if (ret < 0)
935 goto xmit_error_drop_skb;
936
937 ret = dma_map_sg(lp->dev, skbuf_dma->sgl, sg_len, DMA_TO_DEVICE);
938 if (!ret)
939 goto xmit_error_drop_skb;
940
941 /* Fill up app fields for checksum */
942 if (skb->ip_summed == CHECKSUM_PARTIAL) {
943 if (lp->features & XAE_FEATURE_FULL_TX_CSUM) {
944 /* Tx Full Checksum Offload Enabled */
945 app_metadata[0] |= 2;
946 } else if (lp->features & XAE_FEATURE_PARTIAL_TX_CSUM) {
947 csum_start_off = skb_transport_offset(skb);
948 csum_index_off = csum_start_off + skb->csum_offset;
949 /* Tx Partial Checksum Offload Enabled */
950 app_metadata[0] |= 1;
951 app_metadata[1] = (csum_start_off << 16) | csum_index_off;
952 }
953 } else if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
954 app_metadata[0] |= 2; /* Tx Full Checksum Offload Enabled */
955 }
956
957 dma_tx_desc = dma_dev->device_prep_slave_sg(lp->tx_chan, skbuf_dma->sgl,
958 sg_len, DMA_MEM_TO_DEV,
959 DMA_PREP_INTERRUPT, (void *)app_metadata);
960 if (!dma_tx_desc)
961 goto xmit_error_unmap_sg;
962
963 skbuf_dma->skb = skb;
964 skbuf_dma->sg_len = sg_len;
965 dma_tx_desc->callback_param = lp;
966 dma_tx_desc->callback_result = axienet_dma_tx_cb;
967 txq = skb_get_tx_queue(lp->ndev, skb);
968 netdev_tx_sent_queue(txq, skb->len);
969 netif_txq_maybe_stop(txq, CIRC_SPACE(lp->tx_ring_head, lp->tx_ring_tail, TX_BD_NUM_MAX),
970 1, 2);
971
972 dmaengine_submit(dma_tx_desc);
973 dma_async_issue_pending(lp->tx_chan);
974 return NETDEV_TX_OK;
975
976 xmit_error_unmap_sg:
977 dma_unmap_sg(lp->dev, skbuf_dma->sgl, sg_len, DMA_TO_DEVICE);
978 xmit_error_drop_skb:
979 dev_kfree_skb_any(skb);
980 return NETDEV_TX_OK;
981 }
982
983 /**
984 * axienet_tx_poll - Invoked once a transmit is completed by the
985 * Axi DMA Tx channel.
986 * @napi: Pointer to NAPI structure.
987 * @budget: Max number of TX packets to process.
988 *
989 * Return: Number of TX packets processed.
990 *
991 * This function is invoked from the NAPI processing to notify the completion
992 * of transmit operation. It clears fields in the corresponding Tx BDs and
993 * unmaps the corresponding buffer so that CPU can regain ownership of the
994 * buffer. It finally invokes "netif_wake_queue" to restart transmission if
995 * required.
996 */
axienet_tx_poll(struct napi_struct * napi,int budget)997 static int axienet_tx_poll(struct napi_struct *napi, int budget)
998 {
999 struct axienet_local *lp = container_of(napi, struct axienet_local, napi_tx);
1000 struct net_device *ndev = lp->ndev;
1001 u32 size = 0;
1002 int packets;
1003
1004 packets = axienet_free_tx_chain(lp, lp->tx_bd_ci, lp->tx_bd_num, false,
1005 &size, budget);
1006
1007 if (packets) {
1008 netdev_completed_queue(ndev, packets, size);
1009 u64_stats_update_begin(&lp->tx_stat_sync);
1010 u64_stats_add(&lp->tx_packets, packets);
1011 u64_stats_add(&lp->tx_bytes, size);
1012 u64_stats_update_end(&lp->tx_stat_sync);
1013
1014 /* Matches barrier in axienet_start_xmit */
1015 smp_mb();
1016
1017 if (!axienet_check_tx_bd_space(lp, MAX_SKB_FRAGS + 1))
1018 netif_wake_queue(ndev);
1019 }
1020
1021 if (packets < budget && napi_complete_done(napi, packets)) {
1022 /* Re-enable TX completion interrupts. This should
1023 * cause an immediate interrupt if any TX packets are
1024 * already pending.
1025 */
1026 spin_lock_irq(&lp->tx_cr_lock);
1027 axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, lp->tx_dma_cr);
1028 spin_unlock_irq(&lp->tx_cr_lock);
1029 }
1030 return packets;
1031 }
1032
1033 /**
1034 * axienet_start_xmit - Starts the transmission.
1035 * @skb: sk_buff pointer that contains data to be Txed.
1036 * @ndev: Pointer to net_device structure.
1037 *
1038 * Return: NETDEV_TX_OK, on success
1039 * NETDEV_TX_BUSY, if any of the descriptors are not free
1040 *
1041 * This function is invoked from upper layers to initiate transmission. The
1042 * function uses the next available free BDs and populates their fields to
1043 * start the transmission. Additionally if checksum offloading is supported,
1044 * it populates AXI Stream Control fields with appropriate values.
1045 */
1046 static netdev_tx_t
axienet_start_xmit(struct sk_buff * skb,struct net_device * ndev)1047 axienet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
1048 {
1049 u32 ii;
1050 u32 num_frag;
1051 u32 csum_start_off;
1052 u32 csum_index_off;
1053 skb_frag_t *frag;
1054 dma_addr_t tail_p, phys;
1055 u32 orig_tail_ptr, new_tail_ptr;
1056 struct axienet_local *lp = netdev_priv(ndev);
1057 struct axidma_bd *cur_p;
1058
1059 orig_tail_ptr = lp->tx_bd_tail;
1060 new_tail_ptr = orig_tail_ptr;
1061
1062 num_frag = skb_shinfo(skb)->nr_frags;
1063 cur_p = &lp->tx_bd_v[orig_tail_ptr];
1064
1065 if (axienet_check_tx_bd_space(lp, num_frag + 1)) {
1066 /* Should not happen as last start_xmit call should have
1067 * checked for sufficient space and queue should only be
1068 * woken when sufficient space is available.
1069 */
1070 netif_stop_queue(ndev);
1071 if (net_ratelimit())
1072 netdev_warn(ndev, "TX ring unexpectedly full\n");
1073 return NETDEV_TX_BUSY;
1074 }
1075
1076 if (skb->ip_summed == CHECKSUM_PARTIAL) {
1077 if (lp->features & XAE_FEATURE_FULL_TX_CSUM) {
1078 /* Tx Full Checksum Offload Enabled */
1079 cur_p->app0 |= 2;
1080 } else if (lp->features & XAE_FEATURE_PARTIAL_TX_CSUM) {
1081 csum_start_off = skb_transport_offset(skb);
1082 csum_index_off = csum_start_off + skb->csum_offset;
1083 /* Tx Partial Checksum Offload Enabled */
1084 cur_p->app0 |= 1;
1085 cur_p->app1 = (csum_start_off << 16) | csum_index_off;
1086 }
1087 } else if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
1088 cur_p->app0 |= 2; /* Tx Full Checksum Offload Enabled */
1089 }
1090
1091 phys = dma_map_single(lp->dev, skb->data,
1092 skb_headlen(skb), DMA_TO_DEVICE);
1093 if (unlikely(dma_mapping_error(lp->dev, phys))) {
1094 if (net_ratelimit())
1095 netdev_err(ndev, "TX DMA mapping error\n");
1096 ndev->stats.tx_dropped++;
1097 dev_kfree_skb_any(skb);
1098 return NETDEV_TX_OK;
1099 }
1100 desc_set_phys_addr(lp, phys, cur_p);
1101 cur_p->cntrl = skb_headlen(skb) | XAXIDMA_BD_CTRL_TXSOF_MASK;
1102
1103 for (ii = 0; ii < num_frag; ii++) {
1104 if (++new_tail_ptr >= lp->tx_bd_num)
1105 new_tail_ptr = 0;
1106 cur_p = &lp->tx_bd_v[new_tail_ptr];
1107 frag = &skb_shinfo(skb)->frags[ii];
1108 phys = dma_map_single(lp->dev,
1109 skb_frag_address(frag),
1110 skb_frag_size(frag),
1111 DMA_TO_DEVICE);
1112 if (unlikely(dma_mapping_error(lp->dev, phys))) {
1113 if (net_ratelimit())
1114 netdev_err(ndev, "TX DMA mapping error\n");
1115 ndev->stats.tx_dropped++;
1116 axienet_free_tx_chain(lp, orig_tail_ptr, ii + 1,
1117 true, NULL, 0);
1118 dev_kfree_skb_any(skb);
1119 return NETDEV_TX_OK;
1120 }
1121 desc_set_phys_addr(lp, phys, cur_p);
1122 cur_p->cntrl = skb_frag_size(frag);
1123 }
1124
1125 cur_p->cntrl |= XAXIDMA_BD_CTRL_TXEOF_MASK;
1126 cur_p->skb = skb;
1127
1128 tail_p = lp->tx_bd_p + sizeof(*lp->tx_bd_v) * new_tail_ptr;
1129 if (++new_tail_ptr >= lp->tx_bd_num)
1130 new_tail_ptr = 0;
1131 WRITE_ONCE(lp->tx_bd_tail, new_tail_ptr);
1132 netdev_sent_queue(ndev, skb->len);
1133
1134 /* Start the transfer */
1135 axienet_dma_out_addr(lp, XAXIDMA_TX_TDESC_OFFSET, tail_p);
1136
1137 /* Stop queue if next transmit may not have space */
1138 if (axienet_check_tx_bd_space(lp, MAX_SKB_FRAGS + 1)) {
1139 netif_stop_queue(ndev);
1140
1141 /* Matches barrier in axienet_tx_poll */
1142 smp_mb();
1143
1144 /* Space might have just been freed - check again */
1145 if (!axienet_check_tx_bd_space(lp, MAX_SKB_FRAGS + 1))
1146 netif_wake_queue(ndev);
1147 }
1148
1149 return NETDEV_TX_OK;
1150 }
1151
1152 /**
1153 * axienet_dma_rx_cb - DMA engine callback for RX channel.
1154 * @data: Pointer to the skbuf_dma_descriptor structure.
1155 * @result: error reporting through dmaengine_result.
1156 * This function is called by dmaengine driver for RX channel to notify
1157 * that the packet is received.
1158 */
axienet_dma_rx_cb(void * data,const struct dmaengine_result * result)1159 static void axienet_dma_rx_cb(void *data, const struct dmaengine_result *result)
1160 {
1161 struct skbuf_dma_descriptor *skbuf_dma;
1162 size_t meta_len, meta_max_len, rx_len;
1163 struct axienet_local *lp = data;
1164 struct sk_buff *skb;
1165 u32 *app_metadata;
1166 int i;
1167
1168 skbuf_dma = axienet_get_rx_desc(lp, lp->rx_ring_tail++);
1169 skb = skbuf_dma->skb;
1170 app_metadata = dmaengine_desc_get_metadata_ptr(skbuf_dma->desc, &meta_len,
1171 &meta_max_len);
1172 dma_unmap_single(lp->dev, skbuf_dma->dma_address, lp->max_frm_size,
1173 DMA_FROM_DEVICE);
1174
1175 if (IS_ERR(app_metadata)) {
1176 if (net_ratelimit())
1177 netdev_err(lp->ndev, "Failed to get RX metadata pointer\n");
1178 dev_kfree_skb_any(skb);
1179 lp->ndev->stats.rx_dropped++;
1180 goto rx_submit;
1181 }
1182
1183 /* TODO: Derive app word index programmatically */
1184 rx_len = (app_metadata[LEN_APP] & 0xFFFF);
1185 skb_put(skb, rx_len);
1186 skb->protocol = eth_type_trans(skb, lp->ndev);
1187 skb->ip_summed = CHECKSUM_NONE;
1188
1189 __netif_rx(skb);
1190 u64_stats_update_begin(&lp->rx_stat_sync);
1191 u64_stats_add(&lp->rx_packets, 1);
1192 u64_stats_add(&lp->rx_bytes, rx_len);
1193 u64_stats_update_end(&lp->rx_stat_sync);
1194
1195 rx_submit:
1196 for (i = 0; i < CIRC_SPACE(lp->rx_ring_head, lp->rx_ring_tail,
1197 RX_BUF_NUM_DEFAULT); i++)
1198 axienet_rx_submit_desc(lp->ndev);
1199 dma_async_issue_pending(lp->rx_chan);
1200 }
1201
1202 /**
1203 * axienet_rx_poll - Triggered by RX ISR to complete the BD processing.
1204 * @napi: Pointer to NAPI structure.
1205 * @budget: Max number of RX packets to process.
1206 *
1207 * Return: Number of RX packets processed.
1208 */
axienet_rx_poll(struct napi_struct * napi,int budget)1209 static int axienet_rx_poll(struct napi_struct *napi, int budget)
1210 {
1211 u32 length;
1212 u32 csumstatus;
1213 u32 size = 0;
1214 int packets = 0;
1215 dma_addr_t tail_p = 0;
1216 struct axidma_bd *cur_p;
1217 struct sk_buff *skb, *new_skb;
1218 struct axienet_local *lp = container_of(napi, struct axienet_local, napi_rx);
1219
1220 cur_p = &lp->rx_bd_v[lp->rx_bd_ci];
1221
1222 while (packets < budget && (cur_p->status & XAXIDMA_BD_STS_COMPLETE_MASK)) {
1223 dma_addr_t phys;
1224
1225 /* Ensure we see complete descriptor update */
1226 dma_rmb();
1227
1228 skb = cur_p->skb;
1229 cur_p->skb = NULL;
1230
1231 /* skb could be NULL if a previous pass already received the
1232 * packet for this slot in the ring, but failed to refill it
1233 * with a newly allocated buffer. In this case, don't try to
1234 * receive it again.
1235 */
1236 if (likely(skb)) {
1237 length = cur_p->app4 & 0x0000FFFF;
1238
1239 phys = desc_get_phys_addr(lp, cur_p);
1240 dma_unmap_single(lp->dev, phys, lp->max_frm_size,
1241 DMA_FROM_DEVICE);
1242
1243 skb_put(skb, length);
1244 skb->protocol = eth_type_trans(skb, lp->ndev);
1245 /*skb_checksum_none_assert(skb);*/
1246 skb->ip_summed = CHECKSUM_NONE;
1247
1248 /* if we're doing Rx csum offload, set it up */
1249 if (lp->features & XAE_FEATURE_FULL_RX_CSUM) {
1250 csumstatus = (cur_p->app2 &
1251 XAE_FULL_CSUM_STATUS_MASK) >> 3;
1252 if (csumstatus == XAE_IP_TCP_CSUM_VALIDATED ||
1253 csumstatus == XAE_IP_UDP_CSUM_VALIDATED) {
1254 skb->ip_summed = CHECKSUM_UNNECESSARY;
1255 }
1256 } else if (lp->features & XAE_FEATURE_PARTIAL_RX_CSUM) {
1257 skb->csum = be32_to_cpu(cur_p->app3 & 0xFFFF);
1258 skb->ip_summed = CHECKSUM_COMPLETE;
1259 }
1260
1261 napi_gro_receive(napi, skb);
1262
1263 size += length;
1264 packets++;
1265 }
1266
1267 new_skb = napi_alloc_skb(napi, lp->max_frm_size);
1268 if (!new_skb)
1269 break;
1270
1271 phys = dma_map_single(lp->dev, new_skb->data,
1272 lp->max_frm_size,
1273 DMA_FROM_DEVICE);
1274 if (unlikely(dma_mapping_error(lp->dev, phys))) {
1275 if (net_ratelimit())
1276 netdev_err(lp->ndev, "RX DMA mapping error\n");
1277 dev_kfree_skb(new_skb);
1278 break;
1279 }
1280 desc_set_phys_addr(lp, phys, cur_p);
1281
1282 cur_p->cntrl = lp->max_frm_size;
1283 cur_p->status = 0;
1284 cur_p->skb = new_skb;
1285
1286 /* Only update tail_p to mark this slot as usable after it has
1287 * been successfully refilled.
1288 */
1289 tail_p = lp->rx_bd_p + sizeof(*lp->rx_bd_v) * lp->rx_bd_ci;
1290
1291 if (++lp->rx_bd_ci >= lp->rx_bd_num)
1292 lp->rx_bd_ci = 0;
1293 cur_p = &lp->rx_bd_v[lp->rx_bd_ci];
1294 }
1295
1296 u64_stats_update_begin(&lp->rx_stat_sync);
1297 u64_stats_add(&lp->rx_packets, packets);
1298 u64_stats_add(&lp->rx_bytes, size);
1299 u64_stats_update_end(&lp->rx_stat_sync);
1300
1301 if (tail_p)
1302 axienet_dma_out_addr(lp, XAXIDMA_RX_TDESC_OFFSET, tail_p);
1303
1304 if (packets < budget && napi_complete_done(napi, packets)) {
1305 if (READ_ONCE(lp->rx_dim_enabled)) {
1306 struct dim_sample sample = {
1307 .time = ktime_get(),
1308 /* Safe because we are the only writer */
1309 .pkt_ctr = u64_stats_read(&lp->rx_packets),
1310 .byte_ctr = u64_stats_read(&lp->rx_bytes),
1311 .event_ctr = READ_ONCE(lp->rx_irqs),
1312 };
1313
1314 net_dim(&lp->rx_dim, &sample);
1315 }
1316
1317 /* Re-enable RX completion interrupts. This should
1318 * cause an immediate interrupt if any RX packets are
1319 * already pending.
1320 */
1321 spin_lock_irq(&lp->rx_cr_lock);
1322 axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, lp->rx_dma_cr);
1323 spin_unlock_irq(&lp->rx_cr_lock);
1324 }
1325 return packets;
1326 }
1327
1328 /**
1329 * axienet_tx_irq - Tx Done Isr.
1330 * @irq: irq number
1331 * @_ndev: net_device pointer
1332 *
1333 * Return: IRQ_HANDLED if device generated a TX interrupt, IRQ_NONE otherwise.
1334 *
1335 * This is the Axi DMA Tx done Isr. It invokes NAPI polling to complete the
1336 * TX BD processing.
1337 */
axienet_tx_irq(int irq,void * _ndev)1338 static irqreturn_t axienet_tx_irq(int irq, void *_ndev)
1339 {
1340 unsigned int status;
1341 struct net_device *ndev = _ndev;
1342 struct axienet_local *lp = netdev_priv(ndev);
1343
1344 status = axienet_dma_in32(lp, XAXIDMA_TX_SR_OFFSET);
1345
1346 if (!(status & XAXIDMA_IRQ_ALL_MASK))
1347 return IRQ_NONE;
1348
1349 axienet_dma_out32(lp, XAXIDMA_TX_SR_OFFSET, status);
1350
1351 if (unlikely(status & XAXIDMA_IRQ_ERROR_MASK)) {
1352 netdev_err(ndev, "DMA Tx error 0x%x\n", status);
1353 netdev_err(ndev, "Current BD is at: 0x%x%08x\n",
1354 (lp->tx_bd_v[lp->tx_bd_ci]).phys_msb,
1355 (lp->tx_bd_v[lp->tx_bd_ci]).phys);
1356 schedule_work(&lp->dma_err_task);
1357 } else {
1358 /* Disable further TX completion interrupts and schedule
1359 * NAPI to handle the completions.
1360 */
1361 if (napi_schedule_prep(&lp->napi_tx)) {
1362 u32 cr;
1363
1364 spin_lock(&lp->tx_cr_lock);
1365 cr = lp->tx_dma_cr;
1366 cr &= ~(XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_DELAY_MASK);
1367 axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, cr);
1368 spin_unlock(&lp->tx_cr_lock);
1369 __napi_schedule(&lp->napi_tx);
1370 }
1371 }
1372
1373 return IRQ_HANDLED;
1374 }
1375
1376 /**
1377 * axienet_rx_irq - Rx Isr.
1378 * @irq: irq number
1379 * @_ndev: net_device pointer
1380 *
1381 * Return: IRQ_HANDLED if device generated a RX interrupt, IRQ_NONE otherwise.
1382 *
1383 * This is the Axi DMA Rx Isr. It invokes NAPI polling to complete the RX BD
1384 * processing.
1385 */
axienet_rx_irq(int irq,void * _ndev)1386 static irqreturn_t axienet_rx_irq(int irq, void *_ndev)
1387 {
1388 unsigned int status;
1389 struct net_device *ndev = _ndev;
1390 struct axienet_local *lp = netdev_priv(ndev);
1391
1392 status = axienet_dma_in32(lp, XAXIDMA_RX_SR_OFFSET);
1393
1394 if (!(status & XAXIDMA_IRQ_ALL_MASK))
1395 return IRQ_NONE;
1396
1397 axienet_dma_out32(lp, XAXIDMA_RX_SR_OFFSET, status);
1398
1399 if (unlikely(status & XAXIDMA_IRQ_ERROR_MASK)) {
1400 netdev_err(ndev, "DMA Rx error 0x%x\n", status);
1401 netdev_err(ndev, "Current BD is at: 0x%x%08x\n",
1402 (lp->rx_bd_v[lp->rx_bd_ci]).phys_msb,
1403 (lp->rx_bd_v[lp->rx_bd_ci]).phys);
1404 schedule_work(&lp->dma_err_task);
1405 } else {
1406 /* Disable further RX completion interrupts and schedule
1407 * NAPI receive.
1408 */
1409 WRITE_ONCE(lp->rx_irqs, READ_ONCE(lp->rx_irqs) + 1);
1410 if (napi_schedule_prep(&lp->napi_rx)) {
1411 u32 cr;
1412
1413 spin_lock(&lp->rx_cr_lock);
1414 cr = lp->rx_dma_cr;
1415 cr &= ~(XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_DELAY_MASK);
1416 axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, cr);
1417 spin_unlock(&lp->rx_cr_lock);
1418
1419 __napi_schedule(&lp->napi_rx);
1420 }
1421 }
1422
1423 return IRQ_HANDLED;
1424 }
1425
1426 /**
1427 * axienet_eth_irq - Ethernet core Isr.
1428 * @irq: irq number
1429 * @_ndev: net_device pointer
1430 *
1431 * Return: IRQ_HANDLED if device generated a core interrupt, IRQ_NONE otherwise.
1432 *
1433 * Handle miscellaneous conditions indicated by Ethernet core IRQ.
1434 */
axienet_eth_irq(int irq,void * _ndev)1435 static irqreturn_t axienet_eth_irq(int irq, void *_ndev)
1436 {
1437 struct net_device *ndev = _ndev;
1438 struct axienet_local *lp = netdev_priv(ndev);
1439 unsigned int pending;
1440
1441 pending = axienet_ior(lp, XAE_IP_OFFSET);
1442 if (!pending)
1443 return IRQ_NONE;
1444
1445 if (pending & XAE_INT_RXFIFOOVR_MASK)
1446 ndev->stats.rx_missed_errors++;
1447
1448 if (pending & XAE_INT_RXRJECT_MASK)
1449 ndev->stats.rx_dropped++;
1450
1451 axienet_iow(lp, XAE_IS_OFFSET, pending);
1452 return IRQ_HANDLED;
1453 }
1454
1455 static void axienet_dma_err_handler(struct work_struct *work);
1456
1457 /**
1458 * axienet_rx_submit_desc - Submit the rx descriptors to dmaengine.
1459 * allocate skbuff, map the scatterlist and obtain a descriptor
1460 * and then add the callback information and submit descriptor.
1461 *
1462 * @ndev: net_device pointer
1463 *
1464 */
axienet_rx_submit_desc(struct net_device * ndev)1465 static void axienet_rx_submit_desc(struct net_device *ndev)
1466 {
1467 struct dma_async_tx_descriptor *dma_rx_desc = NULL;
1468 struct axienet_local *lp = netdev_priv(ndev);
1469 struct skbuf_dma_descriptor *skbuf_dma;
1470 struct sk_buff *skb;
1471 dma_addr_t addr;
1472
1473 skbuf_dma = axienet_get_rx_desc(lp, lp->rx_ring_head);
1474 if (!skbuf_dma)
1475 return;
1476
1477 skb = netdev_alloc_skb(ndev, lp->max_frm_size);
1478 if (!skb)
1479 return;
1480
1481 sg_init_table(skbuf_dma->sgl, 1);
1482 addr = dma_map_single(lp->dev, skb->data, lp->max_frm_size, DMA_FROM_DEVICE);
1483 if (unlikely(dma_mapping_error(lp->dev, addr))) {
1484 if (net_ratelimit())
1485 netdev_err(ndev, "DMA mapping error\n");
1486 goto rx_submit_err_free_skb;
1487 }
1488 sg_dma_address(skbuf_dma->sgl) = addr;
1489 sg_dma_len(skbuf_dma->sgl) = lp->max_frm_size;
1490 dma_rx_desc = dmaengine_prep_slave_sg(lp->rx_chan, skbuf_dma->sgl,
1491 1, DMA_DEV_TO_MEM,
1492 DMA_PREP_INTERRUPT);
1493 if (!dma_rx_desc)
1494 goto rx_submit_err_unmap_skb;
1495
1496 skbuf_dma->skb = skb;
1497 skbuf_dma->dma_address = sg_dma_address(skbuf_dma->sgl);
1498 skbuf_dma->desc = dma_rx_desc;
1499 dma_rx_desc->callback_param = lp;
1500 dma_rx_desc->callback_result = axienet_dma_rx_cb;
1501 lp->rx_ring_head++;
1502 dmaengine_submit(dma_rx_desc);
1503
1504 return;
1505
1506 rx_submit_err_unmap_skb:
1507 dma_unmap_single(lp->dev, addr, lp->max_frm_size, DMA_FROM_DEVICE);
1508 rx_submit_err_free_skb:
1509 dev_kfree_skb(skb);
1510 }
1511
1512 /**
1513 * axienet_init_dmaengine - init the dmaengine code.
1514 * @ndev: Pointer to net_device structure
1515 *
1516 * Return: 0, on success.
1517 * non-zero error value on failure
1518 *
1519 * This is the dmaengine initialization code.
1520 */
axienet_init_dmaengine(struct net_device * ndev)1521 static int axienet_init_dmaengine(struct net_device *ndev)
1522 {
1523 struct axienet_local *lp = netdev_priv(ndev);
1524 struct skbuf_dma_descriptor *skbuf_dma;
1525 int i, ret;
1526
1527 lp->tx_chan = dma_request_chan(lp->dev, "tx_chan0");
1528 if (IS_ERR(lp->tx_chan)) {
1529 dev_err(lp->dev, "No Ethernet DMA (TX) channel found\n");
1530 return PTR_ERR(lp->tx_chan);
1531 }
1532
1533 lp->rx_chan = dma_request_chan(lp->dev, "rx_chan0");
1534 if (IS_ERR(lp->rx_chan)) {
1535 ret = PTR_ERR(lp->rx_chan);
1536 dev_err(lp->dev, "No Ethernet DMA (RX) channel found\n");
1537 goto err_dma_release_tx;
1538 }
1539
1540 lp->tx_ring_tail = 0;
1541 lp->tx_ring_head = 0;
1542 lp->rx_ring_tail = 0;
1543 lp->rx_ring_head = 0;
1544 lp->tx_skb_ring = kzalloc_objs(*lp->tx_skb_ring, TX_BD_NUM_MAX);
1545 if (!lp->tx_skb_ring) {
1546 ret = -ENOMEM;
1547 goto err_dma_release_rx;
1548 }
1549 for (i = 0; i < TX_BD_NUM_MAX; i++) {
1550 skbuf_dma = kzalloc_obj(*skbuf_dma);
1551 if (!skbuf_dma) {
1552 ret = -ENOMEM;
1553 goto err_free_tx_skb_ring;
1554 }
1555 lp->tx_skb_ring[i] = skbuf_dma;
1556 }
1557
1558 lp->rx_skb_ring = kzalloc_objs(*lp->rx_skb_ring, RX_BUF_NUM_DEFAULT);
1559 if (!lp->rx_skb_ring) {
1560 ret = -ENOMEM;
1561 goto err_free_tx_skb_ring;
1562 }
1563 for (i = 0; i < RX_BUF_NUM_DEFAULT; i++) {
1564 skbuf_dma = kzalloc_obj(*skbuf_dma);
1565 if (!skbuf_dma) {
1566 ret = -ENOMEM;
1567 goto err_free_rx_skb_ring;
1568 }
1569 lp->rx_skb_ring[i] = skbuf_dma;
1570 }
1571 /* TODO: Instead of BD_NUM_DEFAULT use runtime support */
1572 for (i = 0; i < RX_BUF_NUM_DEFAULT; i++)
1573 axienet_rx_submit_desc(ndev);
1574 dma_async_issue_pending(lp->rx_chan);
1575
1576 return 0;
1577
1578 err_free_rx_skb_ring:
1579 for (i = 0; i < RX_BUF_NUM_DEFAULT; i++)
1580 kfree(lp->rx_skb_ring[i]);
1581 kfree(lp->rx_skb_ring);
1582 err_free_tx_skb_ring:
1583 for (i = 0; i < TX_BD_NUM_MAX; i++)
1584 kfree(lp->tx_skb_ring[i]);
1585 kfree(lp->tx_skb_ring);
1586 err_dma_release_rx:
1587 dma_release_channel(lp->rx_chan);
1588 err_dma_release_tx:
1589 dma_release_channel(lp->tx_chan);
1590 return ret;
1591 }
1592
1593 /**
1594 * axienet_init_legacy_dma - init the dma legacy code.
1595 * @ndev: Pointer to net_device structure
1596 *
1597 * Return: 0, on success.
1598 * non-zero error value on failure
1599 *
1600 * This is the dma initialization code. It also allocates interrupt
1601 * service routines, enables the interrupt lines and ISR handling.
1602 *
1603 */
axienet_init_legacy_dma(struct net_device * ndev)1604 static int axienet_init_legacy_dma(struct net_device *ndev)
1605 {
1606 int ret;
1607 struct axienet_local *lp = netdev_priv(ndev);
1608
1609 /* Enable worker thread for Axi DMA error handling */
1610 lp->stopping = false;
1611 INIT_WORK(&lp->dma_err_task, axienet_dma_err_handler);
1612
1613 napi_enable(&lp->napi_rx);
1614 napi_enable(&lp->napi_tx);
1615
1616 /* Enable interrupts for Axi DMA Tx */
1617 ret = request_irq(lp->tx_irq, axienet_tx_irq, IRQF_SHARED,
1618 ndev->name, ndev);
1619 if (ret)
1620 goto err_tx_irq;
1621 /* Enable interrupts for Axi DMA Rx */
1622 ret = request_irq(lp->rx_irq, axienet_rx_irq, IRQF_SHARED,
1623 ndev->name, ndev);
1624 if (ret)
1625 goto err_rx_irq;
1626 /* Enable interrupts for Axi Ethernet core (if defined) */
1627 if (lp->eth_irq > 0) {
1628 ret = request_irq(lp->eth_irq, axienet_eth_irq, IRQF_SHARED,
1629 ndev->name, ndev);
1630 if (ret)
1631 goto err_eth_irq;
1632 }
1633
1634 return 0;
1635
1636 err_eth_irq:
1637 free_irq(lp->rx_irq, ndev);
1638 err_rx_irq:
1639 free_irq(lp->tx_irq, ndev);
1640 err_tx_irq:
1641 napi_disable(&lp->napi_tx);
1642 napi_disable(&lp->napi_rx);
1643 cancel_work_sync(&lp->dma_err_task);
1644 dev_err(lp->dev, "request_irq() failed\n");
1645 return ret;
1646 }
1647
1648 /**
1649 * axienet_open - Driver open routine.
1650 * @ndev: Pointer to net_device structure
1651 *
1652 * Return: 0, on success.
1653 * non-zero error value on failure
1654 *
1655 * This is the driver open routine. It calls phylink_start to start the
1656 * PHY device.
1657 * It also allocates interrupt service routines, enables the interrupt lines
1658 * and ISR handling. Axi Ethernet core is reset through Axi DMA core. Buffer
1659 * descriptors are initialized.
1660 */
axienet_open(struct net_device * ndev)1661 static int axienet_open(struct net_device *ndev)
1662 {
1663 int ret;
1664 struct axienet_local *lp = netdev_priv(ndev);
1665
1666 /* When we do an Axi Ethernet reset, it resets the complete core
1667 * including the MDIO. MDIO must be disabled before resetting.
1668 * Hold MDIO bus lock to avoid MDIO accesses during the reset.
1669 */
1670 axienet_lock_mii(lp);
1671 ret = axienet_device_reset(ndev);
1672 axienet_unlock_mii(lp);
1673
1674 ret = phylink_of_phy_connect(lp->phylink, lp->dev->of_node, 0);
1675 if (ret) {
1676 dev_err(lp->dev, "phylink_of_phy_connect() failed: %d\n", ret);
1677 return ret;
1678 }
1679
1680 phylink_start(lp->phylink);
1681
1682 /* Start the statistics refresh work */
1683 schedule_delayed_work(&lp->stats_work, 0);
1684
1685 if (lp->use_dmaengine) {
1686 /* Enable interrupts for Axi Ethernet core (if defined) */
1687 if (lp->eth_irq > 0) {
1688 ret = request_irq(lp->eth_irq, axienet_eth_irq, IRQF_SHARED,
1689 ndev->name, ndev);
1690 if (ret)
1691 goto err_phy;
1692 }
1693
1694 ret = axienet_init_dmaengine(ndev);
1695 if (ret < 0)
1696 goto err_free_eth_irq;
1697 } else {
1698 ret = axienet_init_legacy_dma(ndev);
1699 if (ret)
1700 goto err_phy;
1701 }
1702
1703 return 0;
1704
1705 err_free_eth_irq:
1706 if (lp->eth_irq > 0)
1707 free_irq(lp->eth_irq, ndev);
1708 err_phy:
1709 cancel_work_sync(&lp->rx_dim.work);
1710 cancel_delayed_work_sync(&lp->stats_work);
1711 phylink_stop(lp->phylink);
1712 phylink_disconnect_phy(lp->phylink);
1713 return ret;
1714 }
1715
1716 /**
1717 * axienet_stop - Driver stop routine.
1718 * @ndev: Pointer to net_device structure
1719 *
1720 * Return: 0, on success.
1721 *
1722 * This is the driver stop routine. It calls phylink_disconnect to stop the PHY
1723 * device. It also removes the interrupt handlers and disables the interrupts.
1724 * The Axi DMA Tx/Rx BDs are released.
1725 */
axienet_stop(struct net_device * ndev)1726 static int axienet_stop(struct net_device *ndev)
1727 {
1728 struct axienet_local *lp = netdev_priv(ndev);
1729 int i;
1730
1731 if (!lp->use_dmaengine) {
1732 WRITE_ONCE(lp->stopping, true);
1733 flush_work(&lp->dma_err_task);
1734
1735 napi_disable(&lp->napi_tx);
1736 napi_disable(&lp->napi_rx);
1737 }
1738
1739 cancel_work_sync(&lp->rx_dim.work);
1740 cancel_delayed_work_sync(&lp->stats_work);
1741
1742 phylink_stop(lp->phylink);
1743 phylink_disconnect_phy(lp->phylink);
1744
1745 axienet_setoptions(ndev, lp->options &
1746 ~(XAE_OPTION_TXEN | XAE_OPTION_RXEN));
1747
1748 if (!lp->use_dmaengine) {
1749 axienet_dma_stop(lp);
1750 cancel_work_sync(&lp->dma_err_task);
1751 free_irq(lp->tx_irq, ndev);
1752 free_irq(lp->rx_irq, ndev);
1753 axienet_dma_bd_release(ndev);
1754 } else {
1755 dmaengine_terminate_sync(lp->tx_chan);
1756 dmaengine_synchronize(lp->tx_chan);
1757 dmaengine_terminate_sync(lp->rx_chan);
1758 dmaengine_synchronize(lp->rx_chan);
1759
1760 for (i = 0; i < TX_BD_NUM_MAX; i++)
1761 kfree(lp->tx_skb_ring[i]);
1762 kfree(lp->tx_skb_ring);
1763 for (i = 0; i < RX_BUF_NUM_DEFAULT; i++)
1764 kfree(lp->rx_skb_ring[i]);
1765 kfree(lp->rx_skb_ring);
1766
1767 dma_release_channel(lp->rx_chan);
1768 dma_release_channel(lp->tx_chan);
1769 }
1770
1771 netdev_reset_queue(ndev);
1772 axienet_iow(lp, XAE_IE_OFFSET, 0);
1773
1774 if (lp->eth_irq > 0)
1775 free_irq(lp->eth_irq, ndev);
1776 return 0;
1777 }
1778
1779 /**
1780 * axienet_change_mtu - Driver change mtu routine.
1781 * @ndev: Pointer to net_device structure
1782 * @new_mtu: New mtu value to be applied
1783 *
1784 * Return: Always returns 0 (success).
1785 *
1786 * This is the change mtu driver routine. It checks if the Axi Ethernet
1787 * hardware supports jumbo frames before changing the mtu. This can be
1788 * called only when the device is not up.
1789 */
axienet_change_mtu(struct net_device * ndev,int new_mtu)1790 static int axienet_change_mtu(struct net_device *ndev, int new_mtu)
1791 {
1792 struct axienet_local *lp = netdev_priv(ndev);
1793
1794 if (netif_running(ndev))
1795 return -EBUSY;
1796
1797 if ((new_mtu + VLAN_ETH_HLEN +
1798 XAE_TRL_SIZE) > lp->rxmem)
1799 return -EINVAL;
1800
1801 WRITE_ONCE(ndev->mtu, new_mtu);
1802
1803 return 0;
1804 }
1805
1806 #ifdef CONFIG_NET_POLL_CONTROLLER
1807 /**
1808 * axienet_poll_controller - Axi Ethernet poll mechanism.
1809 * @ndev: Pointer to net_device structure
1810 *
1811 * This implements Rx/Tx ISR poll mechanisms. The interrupts are disabled prior
1812 * to polling the ISRs and are enabled back after the polling is done.
1813 */
axienet_poll_controller(struct net_device * ndev)1814 static void axienet_poll_controller(struct net_device *ndev)
1815 {
1816 struct axienet_local *lp = netdev_priv(ndev);
1817
1818 disable_irq(lp->tx_irq);
1819 disable_irq(lp->rx_irq);
1820 axienet_rx_irq(lp->tx_irq, ndev);
1821 axienet_tx_irq(lp->rx_irq, ndev);
1822 enable_irq(lp->tx_irq);
1823 enable_irq(lp->rx_irq);
1824 }
1825 #endif
1826
axienet_ioctl(struct net_device * dev,struct ifreq * rq,int cmd)1827 static int axienet_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1828 {
1829 struct axienet_local *lp = netdev_priv(dev);
1830
1831 if (!netif_running(dev))
1832 return -EINVAL;
1833
1834 return phylink_mii_ioctl(lp->phylink, rq, cmd);
1835 }
1836
1837 static void
axienet_get_stats64(struct net_device * dev,struct rtnl_link_stats64 * stats)1838 axienet_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
1839 {
1840 struct axienet_local *lp = netdev_priv(dev);
1841 unsigned int start;
1842
1843 netdev_stats_to_stats64(stats, &dev->stats);
1844
1845 do {
1846 start = u64_stats_fetch_begin(&lp->rx_stat_sync);
1847 stats->rx_packets = u64_stats_read(&lp->rx_packets);
1848 stats->rx_bytes = u64_stats_read(&lp->rx_bytes);
1849 } while (u64_stats_fetch_retry(&lp->rx_stat_sync, start));
1850
1851 do {
1852 start = u64_stats_fetch_begin(&lp->tx_stat_sync);
1853 stats->tx_packets = u64_stats_read(&lp->tx_packets);
1854 stats->tx_bytes = u64_stats_read(&lp->tx_bytes);
1855 } while (u64_stats_fetch_retry(&lp->tx_stat_sync, start));
1856
1857 if (!(lp->features & XAE_FEATURE_STATS))
1858 return;
1859
1860 do {
1861 start = read_seqcount_begin(&lp->hw_stats_seqcount);
1862 stats->rx_length_errors =
1863 axienet_stat(lp, STAT_RX_LENGTH_ERRORS);
1864 stats->rx_crc_errors = axienet_stat(lp, STAT_RX_FCS_ERRORS);
1865 stats->rx_frame_errors =
1866 axienet_stat(lp, STAT_RX_ALIGNMENT_ERRORS);
1867 stats->rx_errors = axienet_stat(lp, STAT_UNDERSIZE_FRAMES) +
1868 axienet_stat(lp, STAT_FRAGMENT_FRAMES) +
1869 stats->rx_length_errors +
1870 stats->rx_crc_errors +
1871 stats->rx_frame_errors;
1872 stats->multicast = axienet_stat(lp, STAT_RX_MULTICAST_FRAMES);
1873
1874 stats->tx_aborted_errors =
1875 axienet_stat(lp, STAT_TX_EXCESS_COLLISIONS);
1876 stats->tx_fifo_errors =
1877 axienet_stat(lp, STAT_TX_UNDERRUN_ERRORS);
1878 stats->tx_window_errors =
1879 axienet_stat(lp, STAT_TX_LATE_COLLISIONS);
1880 stats->tx_errors = axienet_stat(lp, STAT_TX_EXCESS_DEFERRAL) +
1881 stats->tx_aborted_errors +
1882 stats->tx_fifo_errors +
1883 stats->tx_window_errors;
1884 } while (read_seqcount_retry(&lp->hw_stats_seqcount, start));
1885 }
1886
1887 static const struct net_device_ops axienet_netdev_ops = {
1888 .ndo_open = axienet_open,
1889 .ndo_stop = axienet_stop,
1890 .ndo_start_xmit = axienet_start_xmit,
1891 .ndo_get_stats64 = axienet_get_stats64,
1892 .ndo_change_mtu = axienet_change_mtu,
1893 .ndo_set_mac_address = netdev_set_mac_address,
1894 .ndo_validate_addr = eth_validate_addr,
1895 .ndo_eth_ioctl = axienet_ioctl,
1896 .ndo_set_rx_mode = axienet_set_multicast_list,
1897 #ifdef CONFIG_NET_POLL_CONTROLLER
1898 .ndo_poll_controller = axienet_poll_controller,
1899 #endif
1900 };
1901
1902 static const struct net_device_ops axienet_netdev_dmaengine_ops = {
1903 .ndo_open = axienet_open,
1904 .ndo_stop = axienet_stop,
1905 .ndo_start_xmit = axienet_start_xmit_dmaengine,
1906 .ndo_get_stats64 = axienet_get_stats64,
1907 .ndo_change_mtu = axienet_change_mtu,
1908 .ndo_set_mac_address = netdev_set_mac_address,
1909 .ndo_validate_addr = eth_validate_addr,
1910 .ndo_eth_ioctl = axienet_ioctl,
1911 .ndo_set_rx_mode = axienet_set_multicast_list,
1912 };
1913
1914 /**
1915 * axienet_ethtools_get_drvinfo - Get various Axi Ethernet driver information.
1916 * @ndev: Pointer to net_device structure
1917 * @ed: Pointer to ethtool_drvinfo structure
1918 *
1919 * This implements ethtool command for getting the driver information.
1920 * Issue "ethtool -i ethX" under linux prompt to execute this function.
1921 */
axienet_ethtools_get_drvinfo(struct net_device * ndev,struct ethtool_drvinfo * ed)1922 static void axienet_ethtools_get_drvinfo(struct net_device *ndev,
1923 struct ethtool_drvinfo *ed)
1924 {
1925 strscpy(ed->driver, DRIVER_NAME, sizeof(ed->driver));
1926 strscpy(ed->version, DRIVER_VERSION, sizeof(ed->version));
1927 }
1928
1929 /**
1930 * axienet_ethtools_get_regs_len - Get the total regs length present in the
1931 * AxiEthernet core.
1932 * @ndev: Pointer to net_device structure
1933 *
1934 * This implements ethtool command for getting the total register length
1935 * information.
1936 *
1937 * Return: the total regs length
1938 */
axienet_ethtools_get_regs_len(struct net_device * ndev)1939 static int axienet_ethtools_get_regs_len(struct net_device *ndev)
1940 {
1941 return sizeof(u32) * AXIENET_REGS_N;
1942 }
1943
1944 /**
1945 * axienet_ethtools_get_regs - Dump the contents of all registers present
1946 * in AxiEthernet core.
1947 * @ndev: Pointer to net_device structure
1948 * @regs: Pointer to ethtool_regs structure
1949 * @ret: Void pointer used to return the contents of the registers.
1950 *
1951 * This implements ethtool command for getting the Axi Ethernet register dump.
1952 * Issue "ethtool -d ethX" to execute this function.
1953 */
axienet_ethtools_get_regs(struct net_device * ndev,struct ethtool_regs * regs,void * ret)1954 static void axienet_ethtools_get_regs(struct net_device *ndev,
1955 struct ethtool_regs *regs, void *ret)
1956 {
1957 u32 *data = (u32 *)ret;
1958 size_t len = sizeof(u32) * AXIENET_REGS_N;
1959 struct axienet_local *lp = netdev_priv(ndev);
1960
1961 regs->version = 0;
1962 regs->len = len;
1963
1964 memset(data, 0, len);
1965 data[0] = axienet_ior(lp, XAE_RAF_OFFSET);
1966 data[1] = axienet_ior(lp, XAE_TPF_OFFSET);
1967 data[2] = axienet_ior(lp, XAE_IFGP_OFFSET);
1968 data[3] = axienet_ior(lp, XAE_IS_OFFSET);
1969 data[4] = axienet_ior(lp, XAE_IP_OFFSET);
1970 data[5] = axienet_ior(lp, XAE_IE_OFFSET);
1971 data[6] = axienet_ior(lp, XAE_TTAG_OFFSET);
1972 data[7] = axienet_ior(lp, XAE_RTAG_OFFSET);
1973 data[8] = axienet_ior(lp, XAE_UAWL_OFFSET);
1974 data[9] = axienet_ior(lp, XAE_UAWU_OFFSET);
1975 data[10] = axienet_ior(lp, XAE_TPID0_OFFSET);
1976 data[11] = axienet_ior(lp, XAE_TPID1_OFFSET);
1977 data[12] = axienet_ior(lp, XAE_PPST_OFFSET);
1978 data[13] = axienet_ior(lp, XAE_RCW0_OFFSET);
1979 data[14] = axienet_ior(lp, XAE_RCW1_OFFSET);
1980 data[15] = axienet_ior(lp, XAE_TC_OFFSET);
1981 data[16] = axienet_ior(lp, XAE_FCC_OFFSET);
1982 data[17] = axienet_ior(lp, XAE_EMMC_OFFSET);
1983 data[18] = axienet_ior(lp, XAE_PHYC_OFFSET);
1984 data[19] = axienet_ior(lp, XAE_MDIO_MC_OFFSET);
1985 data[20] = axienet_ior(lp, XAE_MDIO_MCR_OFFSET);
1986 data[21] = axienet_ior(lp, XAE_MDIO_MWD_OFFSET);
1987 data[22] = axienet_ior(lp, XAE_MDIO_MRD_OFFSET);
1988 data[27] = axienet_ior(lp, XAE_UAW0_OFFSET);
1989 data[28] = axienet_ior(lp, XAE_UAW1_OFFSET);
1990 data[29] = axienet_ior(lp, XAE_FMI_OFFSET);
1991 data[30] = axienet_ior(lp, XAE_AF0_OFFSET);
1992 data[31] = axienet_ior(lp, XAE_AF1_OFFSET);
1993 if (!lp->use_dmaengine) {
1994 data[32] = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET);
1995 data[33] = axienet_dma_in32(lp, XAXIDMA_TX_SR_OFFSET);
1996 data[34] = axienet_dma_in32(lp, XAXIDMA_TX_CDESC_OFFSET);
1997 data[35] = axienet_dma_in32(lp, XAXIDMA_TX_TDESC_OFFSET);
1998 data[36] = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET);
1999 data[37] = axienet_dma_in32(lp, XAXIDMA_RX_SR_OFFSET);
2000 data[38] = axienet_dma_in32(lp, XAXIDMA_RX_CDESC_OFFSET);
2001 data[39] = axienet_dma_in32(lp, XAXIDMA_RX_TDESC_OFFSET);
2002 }
2003 }
2004
2005 static void
axienet_ethtools_get_ringparam(struct net_device * ndev,struct ethtool_ringparam * ering,struct kernel_ethtool_ringparam * kernel_ering,struct netlink_ext_ack * extack)2006 axienet_ethtools_get_ringparam(struct net_device *ndev,
2007 struct ethtool_ringparam *ering,
2008 struct kernel_ethtool_ringparam *kernel_ering,
2009 struct netlink_ext_ack *extack)
2010 {
2011 struct axienet_local *lp = netdev_priv(ndev);
2012
2013 ering->rx_max_pending = RX_BD_NUM_MAX;
2014 ering->rx_mini_max_pending = 0;
2015 ering->rx_jumbo_max_pending = 0;
2016 ering->tx_max_pending = TX_BD_NUM_MAX;
2017 ering->rx_pending = lp->rx_bd_num;
2018 ering->rx_mini_pending = 0;
2019 ering->rx_jumbo_pending = 0;
2020 ering->tx_pending = lp->tx_bd_num;
2021 }
2022
2023 static int
axienet_ethtools_set_ringparam(struct net_device * ndev,struct ethtool_ringparam * ering,struct kernel_ethtool_ringparam * kernel_ering,struct netlink_ext_ack * extack)2024 axienet_ethtools_set_ringparam(struct net_device *ndev,
2025 struct ethtool_ringparam *ering,
2026 struct kernel_ethtool_ringparam *kernel_ering,
2027 struct netlink_ext_ack *extack)
2028 {
2029 struct axienet_local *lp = netdev_priv(ndev);
2030
2031 if (ering->rx_pending > RX_BD_NUM_MAX ||
2032 ering->rx_mini_pending ||
2033 ering->rx_jumbo_pending ||
2034 ering->tx_pending < TX_BD_NUM_MIN ||
2035 ering->tx_pending > TX_BD_NUM_MAX)
2036 return -EINVAL;
2037
2038 if (netif_running(ndev))
2039 return -EBUSY;
2040
2041 lp->rx_bd_num = ering->rx_pending;
2042 lp->tx_bd_num = ering->tx_pending;
2043 return 0;
2044 }
2045
2046 /**
2047 * axienet_ethtools_get_pauseparam - Get the pause parameter setting for
2048 * Tx and Rx paths.
2049 * @ndev: Pointer to net_device structure
2050 * @epauseparm: Pointer to ethtool_pauseparam structure.
2051 *
2052 * This implements ethtool command for getting axi ethernet pause frame
2053 * setting. Issue "ethtool -a ethX" to execute this function.
2054 */
2055 static void
axienet_ethtools_get_pauseparam(struct net_device * ndev,struct ethtool_pauseparam * epauseparm)2056 axienet_ethtools_get_pauseparam(struct net_device *ndev,
2057 struct ethtool_pauseparam *epauseparm)
2058 {
2059 struct axienet_local *lp = netdev_priv(ndev);
2060
2061 phylink_ethtool_get_pauseparam(lp->phylink, epauseparm);
2062 }
2063
2064 /**
2065 * axienet_ethtools_set_pauseparam - Set device pause parameter(flow control)
2066 * settings.
2067 * @ndev: Pointer to net_device structure
2068 * @epauseparm:Pointer to ethtool_pauseparam structure
2069 *
2070 * This implements ethtool command for enabling flow control on Rx and Tx
2071 * paths. Issue "ethtool -A ethX tx on|off" under linux prompt to execute this
2072 * function.
2073 *
2074 * Return: 0 on success, -EFAULT if device is running
2075 */
2076 static int
axienet_ethtools_set_pauseparam(struct net_device * ndev,struct ethtool_pauseparam * epauseparm)2077 axienet_ethtools_set_pauseparam(struct net_device *ndev,
2078 struct ethtool_pauseparam *epauseparm)
2079 {
2080 struct axienet_local *lp = netdev_priv(ndev);
2081
2082 return phylink_ethtool_set_pauseparam(lp->phylink, epauseparm);
2083 }
2084
2085 /**
2086 * axienet_update_coalesce_rx() - Set RX CR
2087 * @lp: Device private data
2088 * @cr: Value to write to the RX CR
2089 * @mask: Bits to set from @cr
2090 */
axienet_update_coalesce_rx(struct axienet_local * lp,u32 cr,u32 mask)2091 static void axienet_update_coalesce_rx(struct axienet_local *lp, u32 cr,
2092 u32 mask)
2093 {
2094 spin_lock_irq(&lp->rx_cr_lock);
2095 lp->rx_dma_cr &= ~mask;
2096 lp->rx_dma_cr |= cr;
2097 /* If DMA isn't started, then the settings will be applied the next
2098 * time dma_start() is called.
2099 */
2100 if (lp->rx_dma_started) {
2101 u32 reg = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET);
2102
2103 /* Don't enable IRQs if they are disabled by NAPI */
2104 if (reg & XAXIDMA_IRQ_ALL_MASK)
2105 cr = lp->rx_dma_cr;
2106 else
2107 cr = lp->rx_dma_cr & ~XAXIDMA_IRQ_ALL_MASK;
2108 axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, cr);
2109 }
2110 spin_unlock_irq(&lp->rx_cr_lock);
2111 }
2112
2113 /**
2114 * axienet_dim_coalesce_count_rx() - RX coalesce count for DIM
2115 * @lp: Device private data
2116 *
2117 * Return: RX coalescing frame count value for DIM.
2118 */
axienet_dim_coalesce_count_rx(struct axienet_local * lp)2119 static u32 axienet_dim_coalesce_count_rx(struct axienet_local *lp)
2120 {
2121 return min(1 << (lp->rx_dim.profile_ix << 1), 255);
2122 }
2123
2124 /**
2125 * axienet_rx_dim_work() - Adjust RX DIM settings
2126 * @work: The work struct
2127 */
axienet_rx_dim_work(struct work_struct * work)2128 static void axienet_rx_dim_work(struct work_struct *work)
2129 {
2130 struct axienet_local *lp =
2131 container_of(work, struct axienet_local, rx_dim.work);
2132 u32 cr = axienet_calc_cr(lp, axienet_dim_coalesce_count_rx(lp), 0);
2133 u32 mask = XAXIDMA_COALESCE_MASK | XAXIDMA_IRQ_IOC_MASK |
2134 XAXIDMA_IRQ_ERROR_MASK;
2135
2136 axienet_update_coalesce_rx(lp, cr, mask);
2137 lp->rx_dim.state = DIM_START_MEASURE;
2138 }
2139
2140 /**
2141 * axienet_update_coalesce_tx() - Set TX CR
2142 * @lp: Device private data
2143 * @cr: Value to write to the TX CR
2144 * @mask: Bits to set from @cr
2145 */
axienet_update_coalesce_tx(struct axienet_local * lp,u32 cr,u32 mask)2146 static void axienet_update_coalesce_tx(struct axienet_local *lp, u32 cr,
2147 u32 mask)
2148 {
2149 spin_lock_irq(&lp->tx_cr_lock);
2150 lp->tx_dma_cr &= ~mask;
2151 lp->tx_dma_cr |= cr;
2152 /* If DMA isn't started, then the settings will be applied the next
2153 * time dma_start() is called.
2154 */
2155 if (lp->tx_dma_started) {
2156 u32 reg = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET);
2157
2158 /* Don't enable IRQs if they are disabled by NAPI */
2159 if (reg & XAXIDMA_IRQ_ALL_MASK)
2160 cr = lp->tx_dma_cr;
2161 else
2162 cr = lp->tx_dma_cr & ~XAXIDMA_IRQ_ALL_MASK;
2163 axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, cr);
2164 }
2165 spin_unlock_irq(&lp->tx_cr_lock);
2166 }
2167
2168 /**
2169 * axienet_ethtools_get_coalesce - Get DMA interrupt coalescing count.
2170 * @ndev: Pointer to net_device structure
2171 * @ecoalesce: Pointer to ethtool_coalesce structure
2172 * @kernel_coal: ethtool CQE mode setting structure
2173 * @extack: extack for reporting error messages
2174 *
2175 * This implements ethtool command for getting the DMA interrupt coalescing
2176 * count on Tx and Rx paths. Issue "ethtool -c ethX" under linux prompt to
2177 * execute this function.
2178 *
2179 * Return: 0 always
2180 */
2181 static int
axienet_ethtools_get_coalesce(struct net_device * ndev,struct ethtool_coalesce * ecoalesce,struct kernel_ethtool_coalesce * kernel_coal,struct netlink_ext_ack * extack)2182 axienet_ethtools_get_coalesce(struct net_device *ndev,
2183 struct ethtool_coalesce *ecoalesce,
2184 struct kernel_ethtool_coalesce *kernel_coal,
2185 struct netlink_ext_ack *extack)
2186 {
2187 struct axienet_local *lp = netdev_priv(ndev);
2188 u32 cr;
2189
2190 ecoalesce->use_adaptive_rx_coalesce = lp->rx_dim_enabled;
2191
2192 spin_lock_irq(&lp->rx_cr_lock);
2193 cr = lp->rx_dma_cr;
2194 spin_unlock_irq(&lp->rx_cr_lock);
2195 axienet_coalesce_params(lp, cr,
2196 &ecoalesce->rx_max_coalesced_frames,
2197 &ecoalesce->rx_coalesce_usecs);
2198
2199 spin_lock_irq(&lp->tx_cr_lock);
2200 cr = lp->tx_dma_cr;
2201 spin_unlock_irq(&lp->tx_cr_lock);
2202 axienet_coalesce_params(lp, cr,
2203 &ecoalesce->tx_max_coalesced_frames,
2204 &ecoalesce->tx_coalesce_usecs);
2205 return 0;
2206 }
2207
2208 /**
2209 * axienet_ethtools_set_coalesce - Set DMA interrupt coalescing count.
2210 * @ndev: Pointer to net_device structure
2211 * @ecoalesce: Pointer to ethtool_coalesce structure
2212 * @kernel_coal: ethtool CQE mode setting structure
2213 * @extack: extack for reporting error messages
2214 *
2215 * This implements ethtool command for setting the DMA interrupt coalescing
2216 * count on Tx and Rx paths. Issue "ethtool -C ethX rx-frames 5" under linux
2217 * prompt to execute this function.
2218 *
2219 * Return: 0, on success, Non-zero error value on failure.
2220 */
2221 static int
axienet_ethtools_set_coalesce(struct net_device * ndev,struct ethtool_coalesce * ecoalesce,struct kernel_ethtool_coalesce * kernel_coal,struct netlink_ext_ack * extack)2222 axienet_ethtools_set_coalesce(struct net_device *ndev,
2223 struct ethtool_coalesce *ecoalesce,
2224 struct kernel_ethtool_coalesce *kernel_coal,
2225 struct netlink_ext_ack *extack)
2226 {
2227 struct axienet_local *lp = netdev_priv(ndev);
2228 bool new_dim = ecoalesce->use_adaptive_rx_coalesce;
2229 bool old_dim = lp->rx_dim_enabled;
2230 u32 cr, mask = ~XAXIDMA_CR_RUNSTOP_MASK;
2231
2232 if (ecoalesce->rx_max_coalesced_frames > 255 ||
2233 ecoalesce->tx_max_coalesced_frames > 255) {
2234 NL_SET_ERR_MSG(extack, "frames must be less than 256");
2235 return -EINVAL;
2236 }
2237
2238 if (!ecoalesce->rx_max_coalesced_frames ||
2239 !ecoalesce->tx_max_coalesced_frames) {
2240 NL_SET_ERR_MSG(extack, "frames must be non-zero");
2241 return -EINVAL;
2242 }
2243
2244 if (((ecoalesce->rx_max_coalesced_frames > 1 || new_dim) &&
2245 !ecoalesce->rx_coalesce_usecs) ||
2246 (ecoalesce->tx_max_coalesced_frames > 1 &&
2247 !ecoalesce->tx_coalesce_usecs)) {
2248 NL_SET_ERR_MSG(extack,
2249 "usecs must be non-zero when frames is greater than one");
2250 return -EINVAL;
2251 }
2252
2253 if (new_dim && !old_dim) {
2254 cr = axienet_calc_cr(lp, axienet_dim_coalesce_count_rx(lp),
2255 ecoalesce->rx_coalesce_usecs);
2256 } else if (!new_dim) {
2257 if (old_dim) {
2258 WRITE_ONCE(lp->rx_dim_enabled, false);
2259 napi_synchronize(&lp->napi_rx);
2260 flush_work(&lp->rx_dim.work);
2261 }
2262
2263 cr = axienet_calc_cr(lp, ecoalesce->rx_max_coalesced_frames,
2264 ecoalesce->rx_coalesce_usecs);
2265 } else {
2266 /* Dummy value for count just to calculate timer */
2267 cr = axienet_calc_cr(lp, 2, ecoalesce->rx_coalesce_usecs);
2268 mask = XAXIDMA_DELAY_MASK | XAXIDMA_IRQ_DELAY_MASK;
2269 }
2270
2271 axienet_update_coalesce_rx(lp, cr, mask);
2272 if (new_dim && !old_dim)
2273 WRITE_ONCE(lp->rx_dim_enabled, true);
2274
2275 cr = axienet_calc_cr(lp, ecoalesce->tx_max_coalesced_frames,
2276 ecoalesce->tx_coalesce_usecs);
2277 axienet_update_coalesce_tx(lp, cr, ~XAXIDMA_CR_RUNSTOP_MASK);
2278 return 0;
2279 }
2280
2281 static int
axienet_ethtools_get_link_ksettings(struct net_device * ndev,struct ethtool_link_ksettings * cmd)2282 axienet_ethtools_get_link_ksettings(struct net_device *ndev,
2283 struct ethtool_link_ksettings *cmd)
2284 {
2285 struct axienet_local *lp = netdev_priv(ndev);
2286
2287 return phylink_ethtool_ksettings_get(lp->phylink, cmd);
2288 }
2289
2290 static int
axienet_ethtools_set_link_ksettings(struct net_device * ndev,const struct ethtool_link_ksettings * cmd)2291 axienet_ethtools_set_link_ksettings(struct net_device *ndev,
2292 const struct ethtool_link_ksettings *cmd)
2293 {
2294 struct axienet_local *lp = netdev_priv(ndev);
2295
2296 return phylink_ethtool_ksettings_set(lp->phylink, cmd);
2297 }
2298
axienet_ethtools_nway_reset(struct net_device * dev)2299 static int axienet_ethtools_nway_reset(struct net_device *dev)
2300 {
2301 struct axienet_local *lp = netdev_priv(dev);
2302
2303 return phylink_ethtool_nway_reset(lp->phylink);
2304 }
2305
axienet_ethtools_get_ethtool_stats(struct net_device * dev,struct ethtool_stats * stats,u64 * data)2306 static void axienet_ethtools_get_ethtool_stats(struct net_device *dev,
2307 struct ethtool_stats *stats,
2308 u64 *data)
2309 {
2310 struct axienet_local *lp = netdev_priv(dev);
2311 unsigned int start;
2312
2313 do {
2314 start = read_seqcount_begin(&lp->hw_stats_seqcount);
2315 data[0] = axienet_stat(lp, STAT_RX_BYTES);
2316 data[1] = axienet_stat(lp, STAT_TX_BYTES);
2317 data[2] = axienet_stat(lp, STAT_RX_VLAN_FRAMES);
2318 data[3] = axienet_stat(lp, STAT_TX_VLAN_FRAMES);
2319 data[6] = axienet_stat(lp, STAT_TX_PFC_FRAMES);
2320 data[7] = axienet_stat(lp, STAT_RX_PFC_FRAMES);
2321 data[8] = axienet_stat(lp, STAT_USER_DEFINED0);
2322 data[9] = axienet_stat(lp, STAT_USER_DEFINED1);
2323 data[10] = axienet_stat(lp, STAT_USER_DEFINED2);
2324 } while (read_seqcount_retry(&lp->hw_stats_seqcount, start));
2325 }
2326
2327 static const char axienet_ethtool_stats_strings[][ETH_GSTRING_LEN] = {
2328 "Received bytes",
2329 "Transmitted bytes",
2330 "RX Good VLAN Tagged Frames",
2331 "TX Good VLAN Tagged Frames",
2332 "TX Good PFC Frames",
2333 "RX Good PFC Frames",
2334 "User Defined Counter 0",
2335 "User Defined Counter 1",
2336 "User Defined Counter 2",
2337 };
2338
axienet_ethtools_get_strings(struct net_device * dev,u32 stringset,u8 * data)2339 static void axienet_ethtools_get_strings(struct net_device *dev, u32 stringset, u8 *data)
2340 {
2341 switch (stringset) {
2342 case ETH_SS_STATS:
2343 memcpy(data, axienet_ethtool_stats_strings,
2344 sizeof(axienet_ethtool_stats_strings));
2345 break;
2346 }
2347 }
2348
axienet_ethtools_get_sset_count(struct net_device * dev,int sset)2349 static int axienet_ethtools_get_sset_count(struct net_device *dev, int sset)
2350 {
2351 struct axienet_local *lp = netdev_priv(dev);
2352
2353 switch (sset) {
2354 case ETH_SS_STATS:
2355 if (lp->features & XAE_FEATURE_STATS)
2356 return ARRAY_SIZE(axienet_ethtool_stats_strings);
2357 fallthrough;
2358 default:
2359 return -EOPNOTSUPP;
2360 }
2361 }
2362
2363 static void
axienet_ethtools_get_pause_stats(struct net_device * dev,struct ethtool_pause_stats * pause_stats)2364 axienet_ethtools_get_pause_stats(struct net_device *dev,
2365 struct ethtool_pause_stats *pause_stats)
2366 {
2367 struct axienet_local *lp = netdev_priv(dev);
2368 unsigned int start;
2369
2370 if (!(lp->features & XAE_FEATURE_STATS))
2371 return;
2372
2373 do {
2374 start = read_seqcount_begin(&lp->hw_stats_seqcount);
2375 pause_stats->tx_pause_frames =
2376 axienet_stat(lp, STAT_TX_PAUSE_FRAMES);
2377 pause_stats->rx_pause_frames =
2378 axienet_stat(lp, STAT_RX_PAUSE_FRAMES);
2379 } while (read_seqcount_retry(&lp->hw_stats_seqcount, start));
2380 }
2381
2382 static void
axienet_ethtool_get_eth_mac_stats(struct net_device * dev,struct ethtool_eth_mac_stats * mac_stats)2383 axienet_ethtool_get_eth_mac_stats(struct net_device *dev,
2384 struct ethtool_eth_mac_stats *mac_stats)
2385 {
2386 struct axienet_local *lp = netdev_priv(dev);
2387 unsigned int start;
2388
2389 if (!(lp->features & XAE_FEATURE_STATS))
2390 return;
2391
2392 do {
2393 start = read_seqcount_begin(&lp->hw_stats_seqcount);
2394 mac_stats->FramesTransmittedOK =
2395 axienet_stat(lp, STAT_TX_GOOD_FRAMES);
2396 mac_stats->SingleCollisionFrames =
2397 axienet_stat(lp, STAT_TX_SINGLE_COLLISION_FRAMES);
2398 mac_stats->MultipleCollisionFrames =
2399 axienet_stat(lp, STAT_TX_MULTIPLE_COLLISION_FRAMES);
2400 mac_stats->FramesReceivedOK =
2401 axienet_stat(lp, STAT_RX_GOOD_FRAMES);
2402 mac_stats->FrameCheckSequenceErrors =
2403 axienet_stat(lp, STAT_RX_FCS_ERRORS);
2404 mac_stats->AlignmentErrors =
2405 axienet_stat(lp, STAT_RX_ALIGNMENT_ERRORS);
2406 mac_stats->FramesWithDeferredXmissions =
2407 axienet_stat(lp, STAT_TX_DEFERRED_FRAMES);
2408 mac_stats->LateCollisions =
2409 axienet_stat(lp, STAT_TX_LATE_COLLISIONS);
2410 mac_stats->FramesAbortedDueToXSColls =
2411 axienet_stat(lp, STAT_TX_EXCESS_COLLISIONS);
2412 mac_stats->MulticastFramesXmittedOK =
2413 axienet_stat(lp, STAT_TX_MULTICAST_FRAMES);
2414 mac_stats->BroadcastFramesXmittedOK =
2415 axienet_stat(lp, STAT_TX_BROADCAST_FRAMES);
2416 mac_stats->FramesWithExcessiveDeferral =
2417 axienet_stat(lp, STAT_TX_EXCESS_DEFERRAL);
2418 mac_stats->MulticastFramesReceivedOK =
2419 axienet_stat(lp, STAT_RX_MULTICAST_FRAMES);
2420 mac_stats->BroadcastFramesReceivedOK =
2421 axienet_stat(lp, STAT_RX_BROADCAST_FRAMES);
2422 mac_stats->InRangeLengthErrors =
2423 axienet_stat(lp, STAT_RX_LENGTH_ERRORS);
2424 } while (read_seqcount_retry(&lp->hw_stats_seqcount, start));
2425 }
2426
2427 static void
axienet_ethtool_get_eth_ctrl_stats(struct net_device * dev,struct ethtool_eth_ctrl_stats * ctrl_stats)2428 axienet_ethtool_get_eth_ctrl_stats(struct net_device *dev,
2429 struct ethtool_eth_ctrl_stats *ctrl_stats)
2430 {
2431 struct axienet_local *lp = netdev_priv(dev);
2432 unsigned int start;
2433
2434 if (!(lp->features & XAE_FEATURE_STATS))
2435 return;
2436
2437 do {
2438 start = read_seqcount_begin(&lp->hw_stats_seqcount);
2439 ctrl_stats->MACControlFramesTransmitted =
2440 axienet_stat(lp, STAT_TX_CONTROL_FRAMES);
2441 ctrl_stats->MACControlFramesReceived =
2442 axienet_stat(lp, STAT_RX_CONTROL_FRAMES);
2443 ctrl_stats->UnsupportedOpcodesReceived =
2444 axienet_stat(lp, STAT_RX_CONTROL_OPCODE_ERRORS);
2445 } while (read_seqcount_retry(&lp->hw_stats_seqcount, start));
2446 }
2447
2448 static const struct ethtool_rmon_hist_range axienet_rmon_ranges[] = {
2449 { 64, 64 },
2450 { 65, 127 },
2451 { 128, 255 },
2452 { 256, 511 },
2453 { 512, 1023 },
2454 { 1024, 1518 },
2455 { 1519, 16384 },
2456 { },
2457 };
2458
2459 static void
axienet_ethtool_get_rmon_stats(struct net_device * dev,struct ethtool_rmon_stats * rmon_stats,const struct ethtool_rmon_hist_range ** ranges)2460 axienet_ethtool_get_rmon_stats(struct net_device *dev,
2461 struct ethtool_rmon_stats *rmon_stats,
2462 const struct ethtool_rmon_hist_range **ranges)
2463 {
2464 struct axienet_local *lp = netdev_priv(dev);
2465 unsigned int start;
2466
2467 if (!(lp->features & XAE_FEATURE_STATS))
2468 return;
2469
2470 do {
2471 start = read_seqcount_begin(&lp->hw_stats_seqcount);
2472 rmon_stats->undersize_pkts =
2473 axienet_stat(lp, STAT_UNDERSIZE_FRAMES);
2474 rmon_stats->oversize_pkts =
2475 axienet_stat(lp, STAT_RX_OVERSIZE_FRAMES);
2476 rmon_stats->fragments =
2477 axienet_stat(lp, STAT_FRAGMENT_FRAMES);
2478
2479 rmon_stats->hist[0] =
2480 axienet_stat(lp, STAT_RX_64_BYTE_FRAMES);
2481 rmon_stats->hist[1] =
2482 axienet_stat(lp, STAT_RX_65_127_BYTE_FRAMES);
2483 rmon_stats->hist[2] =
2484 axienet_stat(lp, STAT_RX_128_255_BYTE_FRAMES);
2485 rmon_stats->hist[3] =
2486 axienet_stat(lp, STAT_RX_256_511_BYTE_FRAMES);
2487 rmon_stats->hist[4] =
2488 axienet_stat(lp, STAT_RX_512_1023_BYTE_FRAMES);
2489 rmon_stats->hist[5] =
2490 axienet_stat(lp, STAT_RX_1024_MAX_BYTE_FRAMES);
2491 rmon_stats->hist[6] =
2492 rmon_stats->oversize_pkts;
2493
2494 rmon_stats->hist_tx[0] =
2495 axienet_stat(lp, STAT_TX_64_BYTE_FRAMES);
2496 rmon_stats->hist_tx[1] =
2497 axienet_stat(lp, STAT_TX_65_127_BYTE_FRAMES);
2498 rmon_stats->hist_tx[2] =
2499 axienet_stat(lp, STAT_TX_128_255_BYTE_FRAMES);
2500 rmon_stats->hist_tx[3] =
2501 axienet_stat(lp, STAT_TX_256_511_BYTE_FRAMES);
2502 rmon_stats->hist_tx[4] =
2503 axienet_stat(lp, STAT_TX_512_1023_BYTE_FRAMES);
2504 rmon_stats->hist_tx[5] =
2505 axienet_stat(lp, STAT_TX_1024_MAX_BYTE_FRAMES);
2506 rmon_stats->hist_tx[6] =
2507 axienet_stat(lp, STAT_TX_OVERSIZE_FRAMES);
2508 } while (read_seqcount_retry(&lp->hw_stats_seqcount, start));
2509
2510 *ranges = axienet_rmon_ranges;
2511 }
2512
2513 static const struct ethtool_ops axienet_ethtool_ops = {
2514 .supported_coalesce_params = ETHTOOL_COALESCE_MAX_FRAMES |
2515 ETHTOOL_COALESCE_USECS |
2516 ETHTOOL_COALESCE_USE_ADAPTIVE_RX,
2517 .get_drvinfo = axienet_ethtools_get_drvinfo,
2518 .get_regs_len = axienet_ethtools_get_regs_len,
2519 .get_regs = axienet_ethtools_get_regs,
2520 .get_link = ethtool_op_get_link,
2521 .get_ringparam = axienet_ethtools_get_ringparam,
2522 .set_ringparam = axienet_ethtools_set_ringparam,
2523 .get_pauseparam = axienet_ethtools_get_pauseparam,
2524 .set_pauseparam = axienet_ethtools_set_pauseparam,
2525 .get_coalesce = axienet_ethtools_get_coalesce,
2526 .set_coalesce = axienet_ethtools_set_coalesce,
2527 .get_link_ksettings = axienet_ethtools_get_link_ksettings,
2528 .set_link_ksettings = axienet_ethtools_set_link_ksettings,
2529 .nway_reset = axienet_ethtools_nway_reset,
2530 .get_ethtool_stats = axienet_ethtools_get_ethtool_stats,
2531 .get_strings = axienet_ethtools_get_strings,
2532 .get_sset_count = axienet_ethtools_get_sset_count,
2533 .get_pause_stats = axienet_ethtools_get_pause_stats,
2534 .get_eth_mac_stats = axienet_ethtool_get_eth_mac_stats,
2535 .get_eth_ctrl_stats = axienet_ethtool_get_eth_ctrl_stats,
2536 .get_rmon_stats = axienet_ethtool_get_rmon_stats,
2537 };
2538
pcs_to_axienet_local(struct phylink_pcs * pcs)2539 static struct axienet_local *pcs_to_axienet_local(struct phylink_pcs *pcs)
2540 {
2541 return container_of(pcs, struct axienet_local, pcs);
2542 }
2543
axienet_pcs_get_state(struct phylink_pcs * pcs,unsigned int neg_mode,struct phylink_link_state * state)2544 static void axienet_pcs_get_state(struct phylink_pcs *pcs,
2545 unsigned int neg_mode,
2546 struct phylink_link_state *state)
2547 {
2548 struct mdio_device *pcs_phy = pcs_to_axienet_local(pcs)->pcs_phy;
2549
2550 phylink_mii_c22_pcs_get_state(pcs_phy, neg_mode, state);
2551 }
2552
axienet_pcs_an_restart(struct phylink_pcs * pcs)2553 static void axienet_pcs_an_restart(struct phylink_pcs *pcs)
2554 {
2555 struct mdio_device *pcs_phy = pcs_to_axienet_local(pcs)->pcs_phy;
2556
2557 phylink_mii_c22_pcs_an_restart(pcs_phy);
2558 }
2559
axienet_pcs_config(struct phylink_pcs * pcs,unsigned int neg_mode,phy_interface_t interface,const unsigned long * advertising,bool permit_pause_to_mac)2560 static int axienet_pcs_config(struct phylink_pcs *pcs, unsigned int neg_mode,
2561 phy_interface_t interface,
2562 const unsigned long *advertising,
2563 bool permit_pause_to_mac)
2564 {
2565 struct mdio_device *pcs_phy = pcs_to_axienet_local(pcs)->pcs_phy;
2566 struct net_device *ndev = pcs_to_axienet_local(pcs)->ndev;
2567 struct axienet_local *lp = netdev_priv(ndev);
2568 int ret;
2569
2570 if (lp->switch_x_sgmii) {
2571 ret = mdiodev_write(pcs_phy, XLNX_MII_STD_SELECT_REG,
2572 interface == PHY_INTERFACE_MODE_SGMII ?
2573 XLNX_MII_STD_SELECT_SGMII : 0);
2574 if (ret < 0) {
2575 netdev_warn(ndev,
2576 "Failed to switch PHY interface: %d\n",
2577 ret);
2578 return ret;
2579 }
2580 }
2581
2582 ret = phylink_mii_c22_pcs_config(pcs_phy, interface, advertising,
2583 neg_mode);
2584 if (ret < 0)
2585 netdev_warn(ndev, "Failed to configure PCS: %d\n", ret);
2586
2587 return ret;
2588 }
2589
2590 static const struct phylink_pcs_ops axienet_pcs_ops = {
2591 .pcs_get_state = axienet_pcs_get_state,
2592 .pcs_config = axienet_pcs_config,
2593 .pcs_an_restart = axienet_pcs_an_restart,
2594 };
2595
axienet_mac_select_pcs(struct phylink_config * config,phy_interface_t interface)2596 static struct phylink_pcs *axienet_mac_select_pcs(struct phylink_config *config,
2597 phy_interface_t interface)
2598 {
2599 struct net_device *ndev = to_net_dev(config->dev);
2600 struct axienet_local *lp = netdev_priv(ndev);
2601
2602 if (interface == PHY_INTERFACE_MODE_1000BASEX ||
2603 interface == PHY_INTERFACE_MODE_SGMII)
2604 return &lp->pcs;
2605
2606 return NULL;
2607 }
2608
axienet_mac_config(struct phylink_config * config,unsigned int mode,const struct phylink_link_state * state)2609 static void axienet_mac_config(struct phylink_config *config, unsigned int mode,
2610 const struct phylink_link_state *state)
2611 {
2612 /* nothing meaningful to do */
2613 }
2614
axienet_mac_link_down(struct phylink_config * config,unsigned int mode,phy_interface_t interface)2615 static void axienet_mac_link_down(struct phylink_config *config,
2616 unsigned int mode,
2617 phy_interface_t interface)
2618 {
2619 /* nothing meaningful to do */
2620 }
2621
axienet_mac_link_up(struct phylink_config * config,struct phy_device * phy,unsigned int mode,phy_interface_t interface,int speed,int duplex,bool tx_pause,bool rx_pause)2622 static void axienet_mac_link_up(struct phylink_config *config,
2623 struct phy_device *phy,
2624 unsigned int mode, phy_interface_t interface,
2625 int speed, int duplex,
2626 bool tx_pause, bool rx_pause)
2627 {
2628 struct net_device *ndev = to_net_dev(config->dev);
2629 struct axienet_local *lp = netdev_priv(ndev);
2630 u32 emmc_reg, fcc_reg;
2631
2632 emmc_reg = axienet_ior(lp, XAE_EMMC_OFFSET);
2633 emmc_reg &= ~XAE_EMMC_LINKSPEED_MASK;
2634
2635 switch (speed) {
2636 case SPEED_1000:
2637 emmc_reg |= XAE_EMMC_LINKSPD_1000;
2638 break;
2639 case SPEED_100:
2640 emmc_reg |= XAE_EMMC_LINKSPD_100;
2641 break;
2642 case SPEED_10:
2643 emmc_reg |= XAE_EMMC_LINKSPD_10;
2644 break;
2645 default:
2646 dev_err(&ndev->dev,
2647 "Speed other than 10, 100 or 1Gbps is not supported\n");
2648 break;
2649 }
2650
2651 axienet_iow(lp, XAE_EMMC_OFFSET, emmc_reg);
2652
2653 fcc_reg = axienet_ior(lp, XAE_FCC_OFFSET);
2654 if (tx_pause)
2655 fcc_reg |= XAE_FCC_FCTX_MASK;
2656 else
2657 fcc_reg &= ~XAE_FCC_FCTX_MASK;
2658 if (rx_pause)
2659 fcc_reg |= XAE_FCC_FCRX_MASK;
2660 else
2661 fcc_reg &= ~XAE_FCC_FCRX_MASK;
2662 axienet_iow(lp, XAE_FCC_OFFSET, fcc_reg);
2663 }
2664
2665 static const struct phylink_mac_ops axienet_phylink_ops = {
2666 .mac_select_pcs = axienet_mac_select_pcs,
2667 .mac_config = axienet_mac_config,
2668 .mac_link_down = axienet_mac_link_down,
2669 .mac_link_up = axienet_mac_link_up,
2670 };
2671
2672 /**
2673 * axienet_dma_err_handler - Work queue task for Axi DMA Error
2674 * @work: pointer to work_struct
2675 *
2676 * Resets the Axi DMA and Axi Ethernet devices, and reconfigures the
2677 * Tx/Rx BDs.
2678 */
axienet_dma_err_handler(struct work_struct * work)2679 static void axienet_dma_err_handler(struct work_struct *work)
2680 {
2681 u32 i;
2682 u32 axienet_status;
2683 struct axidma_bd *cur_p;
2684 struct axienet_local *lp = container_of(work, struct axienet_local,
2685 dma_err_task);
2686 struct net_device *ndev = lp->ndev;
2687
2688 /* Don't bother if we are going to stop anyway */
2689 if (READ_ONCE(lp->stopping))
2690 return;
2691
2692 napi_disable(&lp->napi_tx);
2693 napi_disable(&lp->napi_rx);
2694
2695 axienet_setoptions(ndev, lp->options &
2696 ~(XAE_OPTION_TXEN | XAE_OPTION_RXEN));
2697
2698 axienet_dma_stop(lp);
2699 netdev_reset_queue(ndev);
2700
2701 for (i = 0; i < lp->tx_bd_num; i++) {
2702 cur_p = &lp->tx_bd_v[i];
2703 if (cur_p->cntrl) {
2704 dma_addr_t addr = desc_get_phys_addr(lp, cur_p);
2705
2706 dma_unmap_single(lp->dev, addr,
2707 (cur_p->cntrl &
2708 XAXIDMA_BD_CTRL_LENGTH_MASK),
2709 DMA_TO_DEVICE);
2710 }
2711 if (cur_p->skb)
2712 dev_kfree_skb_irq(cur_p->skb);
2713 cur_p->phys = 0;
2714 cur_p->phys_msb = 0;
2715 cur_p->cntrl = 0;
2716 cur_p->status = 0;
2717 cur_p->app0 = 0;
2718 cur_p->app1 = 0;
2719 cur_p->app2 = 0;
2720 cur_p->app3 = 0;
2721 cur_p->app4 = 0;
2722 cur_p->skb = NULL;
2723 }
2724
2725 for (i = 0; i < lp->rx_bd_num; i++) {
2726 cur_p = &lp->rx_bd_v[i];
2727 cur_p->status = 0;
2728 cur_p->app0 = 0;
2729 cur_p->app1 = 0;
2730 cur_p->app2 = 0;
2731 cur_p->app3 = 0;
2732 cur_p->app4 = 0;
2733 }
2734
2735 lp->tx_bd_ci = 0;
2736 lp->tx_bd_tail = 0;
2737 lp->rx_bd_ci = 0;
2738
2739 axienet_dma_start(lp);
2740
2741 axienet_status = axienet_ior(lp, XAE_RCW1_OFFSET);
2742 axienet_status &= ~XAE_RCW1_RX_MASK;
2743 axienet_iow(lp, XAE_RCW1_OFFSET, axienet_status);
2744
2745 axienet_status = axienet_ior(lp, XAE_IP_OFFSET);
2746 if (axienet_status & XAE_INT_RXRJECT_MASK)
2747 axienet_iow(lp, XAE_IS_OFFSET, XAE_INT_RXRJECT_MASK);
2748 axienet_iow(lp, XAE_IE_OFFSET, lp->eth_irq > 0 ?
2749 XAE_INT_RECV_ERROR_MASK : 0);
2750 axienet_iow(lp, XAE_FCC_OFFSET, XAE_FCC_FCRX_MASK);
2751
2752 /* Sync default options with HW but leave receiver and
2753 * transmitter disabled.
2754 */
2755 axienet_setoptions(ndev, lp->options &
2756 ~(XAE_OPTION_TXEN | XAE_OPTION_RXEN));
2757 axienet_set_mac_address(ndev, NULL);
2758 axienet_set_multicast_list(ndev);
2759 napi_enable(&lp->napi_rx);
2760 napi_enable(&lp->napi_tx);
2761 axienet_setoptions(ndev, lp->options);
2762 }
2763
2764 /**
2765 * axienet_probe - Axi Ethernet probe function.
2766 * @pdev: Pointer to platform device structure.
2767 *
2768 * Return: 0, on success
2769 * Non-zero error value on failure.
2770 *
2771 * This is the probe routine for Axi Ethernet driver. This is called before
2772 * any other driver routines are invoked. It allocates and sets up the Ethernet
2773 * device. Parses through device tree and populates fields of
2774 * axienet_local. It registers the Ethernet device.
2775 */
axienet_probe(struct platform_device * pdev)2776 static int axienet_probe(struct platform_device *pdev)
2777 {
2778 int ret;
2779 struct device_node *np;
2780 struct axienet_local *lp;
2781 struct net_device *ndev;
2782 struct resource *ethres;
2783 u8 mac_addr[ETH_ALEN];
2784 int addr_width = 32;
2785 u32 value;
2786
2787 ndev = devm_alloc_etherdev(&pdev->dev, sizeof(*lp));
2788 if (!ndev)
2789 return -ENOMEM;
2790
2791 platform_set_drvdata(pdev, ndev);
2792
2793 SET_NETDEV_DEV(ndev, &pdev->dev);
2794 ndev->features = NETIF_F_SG;
2795 ndev->ethtool_ops = &axienet_ethtool_ops;
2796
2797 /* MTU range: 64 - 9000 */
2798 ndev->min_mtu = 64;
2799 ndev->max_mtu = XAE_JUMBO_MTU;
2800
2801 lp = netdev_priv(ndev);
2802 lp->ndev = ndev;
2803 lp->dev = &pdev->dev;
2804 lp->options = XAE_OPTION_DEFAULTS;
2805 lp->rx_bd_num = RX_BD_NUM_DEFAULT;
2806 lp->tx_bd_num = TX_BD_NUM_DEFAULT;
2807
2808 u64_stats_init(&lp->rx_stat_sync);
2809 u64_stats_init(&lp->tx_stat_sync);
2810
2811 mutex_init(&lp->stats_lock);
2812 seqcount_mutex_init(&lp->hw_stats_seqcount, &lp->stats_lock);
2813 INIT_DEFERRABLE_WORK(&lp->stats_work, axienet_refresh_stats);
2814
2815 lp->axi_clk = devm_clk_get_optional_enabled(&pdev->dev,
2816 "s_axi_lite_clk");
2817 if (!lp->axi_clk) {
2818 /* For backward compatibility, if named AXI clock is not present,
2819 * treat the first clock specified as the AXI clock.
2820 */
2821 lp->axi_clk = devm_clk_get_optional_enabled(&pdev->dev, NULL);
2822 }
2823 if (IS_ERR(lp->axi_clk))
2824 return dev_err_probe(&pdev->dev, PTR_ERR(lp->axi_clk),
2825 "could not get AXI clock\n");
2826
2827 lp->misc_clks[0].id = "axis_clk";
2828 lp->misc_clks[1].id = "ref_clk";
2829 lp->misc_clks[2].id = "mgt_clk";
2830
2831 ret = devm_clk_bulk_get_optional_enable(&pdev->dev, XAE_NUM_MISC_CLOCKS,
2832 lp->misc_clks);
2833 if (ret)
2834 return dev_err_probe(&pdev->dev, ret,
2835 "could not get/enable misc. clocks\n");
2836
2837 /* Map device registers */
2838 lp->regs = devm_platform_get_and_ioremap_resource(pdev, 0, ðres);
2839 if (IS_ERR(lp->regs))
2840 return PTR_ERR(lp->regs);
2841 lp->regs_start = ethres->start;
2842
2843 /* Setup checksum offload, but default to off if not specified */
2844 lp->features = 0;
2845
2846 if (axienet_ior(lp, XAE_ABILITY_OFFSET) & XAE_ABILITY_STATS)
2847 lp->features |= XAE_FEATURE_STATS;
2848
2849 ret = of_property_read_u32(pdev->dev.of_node, "xlnx,txcsum", &value);
2850 if (!ret) {
2851 switch (value) {
2852 case 1:
2853 lp->features |= XAE_FEATURE_PARTIAL_TX_CSUM;
2854 /* Can checksum any contiguous range */
2855 ndev->features |= NETIF_F_HW_CSUM;
2856 break;
2857 case 2:
2858 lp->features |= XAE_FEATURE_FULL_TX_CSUM;
2859 /* Can checksum TCP/UDP over IPv4. */
2860 ndev->features |= NETIF_F_IP_CSUM;
2861 break;
2862 }
2863 }
2864 ret = of_property_read_u32(pdev->dev.of_node, "xlnx,rxcsum", &value);
2865 if (!ret) {
2866 switch (value) {
2867 case 1:
2868 lp->features |= XAE_FEATURE_PARTIAL_RX_CSUM;
2869 ndev->features |= NETIF_F_RXCSUM;
2870 break;
2871 case 2:
2872 lp->features |= XAE_FEATURE_FULL_RX_CSUM;
2873 ndev->features |= NETIF_F_RXCSUM;
2874 break;
2875 }
2876 }
2877 /* For supporting jumbo frames, the Axi Ethernet hardware must have
2878 * a larger Rx/Tx Memory. Typically, the size must be large so that
2879 * we can enable jumbo option and start supporting jumbo frames.
2880 * Here we check for memory allocated for Rx/Tx in the hardware from
2881 * the device-tree and accordingly set flags.
2882 */
2883 of_property_read_u32(pdev->dev.of_node, "xlnx,rxmem", &lp->rxmem);
2884
2885 lp->switch_x_sgmii = of_property_read_bool(pdev->dev.of_node,
2886 "xlnx,switch-x-sgmii");
2887
2888 /* Start with the proprietary, and broken phy_type */
2889 ret = of_property_read_u32(pdev->dev.of_node, "xlnx,phy-type", &value);
2890 if (!ret) {
2891 netdev_warn(ndev, "Please upgrade your device tree binary blob to use phy-mode");
2892 switch (value) {
2893 case XAE_PHY_TYPE_MII:
2894 lp->phy_mode = PHY_INTERFACE_MODE_MII;
2895 break;
2896 case XAE_PHY_TYPE_GMII:
2897 lp->phy_mode = PHY_INTERFACE_MODE_GMII;
2898 break;
2899 case XAE_PHY_TYPE_RGMII_2_0:
2900 lp->phy_mode = PHY_INTERFACE_MODE_RGMII_ID;
2901 break;
2902 case XAE_PHY_TYPE_SGMII:
2903 lp->phy_mode = PHY_INTERFACE_MODE_SGMII;
2904 break;
2905 case XAE_PHY_TYPE_1000BASE_X:
2906 lp->phy_mode = PHY_INTERFACE_MODE_1000BASEX;
2907 break;
2908 default:
2909 return -EINVAL;
2910 }
2911 } else {
2912 ret = of_get_phy_mode(pdev->dev.of_node, &lp->phy_mode);
2913 if (ret)
2914 return ret;
2915 }
2916 if (lp->switch_x_sgmii && lp->phy_mode != PHY_INTERFACE_MODE_SGMII &&
2917 lp->phy_mode != PHY_INTERFACE_MODE_1000BASEX) {
2918 dev_err(&pdev->dev, "xlnx,switch-x-sgmii only supported with SGMII or 1000BaseX\n");
2919 return -EINVAL;
2920 }
2921
2922 if (!of_property_present(pdev->dev.of_node, "dmas")) {
2923 /* Find the DMA node, map the DMA registers, and decode the DMA IRQs */
2924 np = of_parse_phandle(pdev->dev.of_node, "axistream-connected", 0);
2925
2926 if (np) {
2927 struct resource dmares;
2928
2929 ret = of_address_to_resource(np, 0, &dmares);
2930 if (ret) {
2931 dev_err(&pdev->dev,
2932 "unable to get DMA resource\n");
2933 of_node_put(np);
2934 return ret;
2935 }
2936 lp->dma_regs = devm_ioremap_resource(&pdev->dev,
2937 &dmares);
2938 lp->rx_irq = irq_of_parse_and_map(np, 1);
2939 lp->tx_irq = irq_of_parse_and_map(np, 0);
2940 of_node_put(np);
2941 lp->eth_irq = platform_get_irq_optional(pdev, 0);
2942 } else {
2943 /* Check for these resources directly on the Ethernet node. */
2944 lp->dma_regs = devm_platform_get_and_ioremap_resource(pdev, 1, NULL);
2945 lp->rx_irq = platform_get_irq(pdev, 1);
2946 lp->tx_irq = platform_get_irq(pdev, 0);
2947 lp->eth_irq = platform_get_irq_optional(pdev, 2);
2948 }
2949 if (IS_ERR(lp->dma_regs)) {
2950 dev_err(&pdev->dev, "could not map DMA regs\n");
2951 return PTR_ERR(lp->dma_regs);
2952 }
2953 if (lp->rx_irq <= 0 || lp->tx_irq <= 0) {
2954 dev_err(&pdev->dev, "could not determine irqs\n");
2955 return -ENOMEM;
2956 }
2957
2958 /* Reset core now that clocks are enabled, prior to accessing MDIO */
2959 ret = __axienet_device_reset(lp);
2960 if (ret)
2961 return ret;
2962
2963 /* Autodetect the need for 64-bit DMA pointers.
2964 * When the IP is configured for a bus width bigger than 32 bits,
2965 * writing the MSB registers is mandatory, even if they are all 0.
2966 * We can detect this case by writing all 1's to one such register
2967 * and see if that sticks: when the IP is configured for 32 bits
2968 * only, those registers are RES0.
2969 * Those MSB registers were introduced in IP v7.1, which we check first.
2970 */
2971 if ((axienet_ior(lp, XAE_ID_OFFSET) >> 24) >= 0x9) {
2972 void __iomem *desc = lp->dma_regs + XAXIDMA_TX_CDESC_OFFSET + 4;
2973
2974 iowrite32(0x0, desc);
2975 if (ioread32(desc) == 0) { /* sanity check */
2976 iowrite32(0xffffffff, desc);
2977 if (ioread32(desc) > 0) {
2978 lp->features |= XAE_FEATURE_DMA_64BIT;
2979 addr_width = 64;
2980 dev_info(&pdev->dev,
2981 "autodetected 64-bit DMA range\n");
2982 }
2983 iowrite32(0x0, desc);
2984 }
2985 }
2986 if (!IS_ENABLED(CONFIG_64BIT) && lp->features & XAE_FEATURE_DMA_64BIT) {
2987 dev_err(&pdev->dev, "64-bit addressable DMA is not compatible with 32-bit architecture\n");
2988 return -EINVAL;
2989 }
2990
2991 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(addr_width));
2992 if (ret) {
2993 dev_err(&pdev->dev, "No suitable DMA available\n");
2994 return ret;
2995 }
2996 netif_napi_add(ndev, &lp->napi_rx, axienet_rx_poll);
2997 netif_napi_add(ndev, &lp->napi_tx, axienet_tx_poll);
2998 } else {
2999 struct xilinx_vdma_config cfg;
3000 struct dma_chan *tx_chan;
3001
3002 lp->eth_irq = platform_get_irq_optional(pdev, 0);
3003 if (lp->eth_irq < 0 && lp->eth_irq != -ENXIO) {
3004 return lp->eth_irq;
3005 }
3006 tx_chan = dma_request_chan(lp->dev, "tx_chan0");
3007 if (IS_ERR(tx_chan))
3008 return dev_err_probe(lp->dev, PTR_ERR(tx_chan),
3009 "No Ethernet DMA (TX) channel found\n");
3010
3011 cfg.reset = 1;
3012 /* As name says VDMA but it has support for DMA channel reset */
3013 ret = xilinx_vdma_channel_set_config(tx_chan, &cfg);
3014 if (ret < 0) {
3015 dev_err(&pdev->dev, "Reset channel failed\n");
3016 dma_release_channel(tx_chan);
3017 return ret;
3018 }
3019
3020 dma_release_channel(tx_chan);
3021 lp->use_dmaengine = 1;
3022 }
3023
3024 if (lp->use_dmaengine)
3025 ndev->netdev_ops = &axienet_netdev_dmaengine_ops;
3026 else
3027 ndev->netdev_ops = &axienet_netdev_ops;
3028 /* Check for Ethernet core IRQ (optional) */
3029 if (lp->eth_irq <= 0)
3030 dev_info(&pdev->dev, "Ethernet core IRQ not defined\n");
3031
3032 /* Retrieve the MAC address */
3033 ret = of_get_mac_address(pdev->dev.of_node, mac_addr);
3034 if (!ret) {
3035 axienet_set_mac_address(ndev, mac_addr);
3036 } else {
3037 dev_warn(&pdev->dev, "could not find MAC address property: %d\n",
3038 ret);
3039 axienet_set_mac_address(ndev, NULL);
3040 }
3041
3042 spin_lock_init(&lp->rx_cr_lock);
3043 spin_lock_init(&lp->tx_cr_lock);
3044 INIT_WORK(&lp->rx_dim.work, axienet_rx_dim_work);
3045 lp->rx_dim_enabled = true;
3046 lp->rx_dim.profile_ix = 1;
3047 lp->rx_dma_cr = axienet_calc_cr(lp, axienet_dim_coalesce_count_rx(lp),
3048 XAXIDMA_DFT_RX_USEC);
3049 lp->tx_dma_cr = axienet_calc_cr(lp, XAXIDMA_DFT_TX_THRESHOLD,
3050 XAXIDMA_DFT_TX_USEC);
3051
3052 ret = axienet_mdio_setup(lp);
3053 if (ret)
3054 dev_warn(&pdev->dev,
3055 "error registering MDIO bus: %d\n", ret);
3056
3057 if (lp->phy_mode == PHY_INTERFACE_MODE_SGMII ||
3058 lp->phy_mode == PHY_INTERFACE_MODE_1000BASEX) {
3059 np = of_parse_phandle(pdev->dev.of_node, "pcs-handle", 0);
3060 if (!np) {
3061 /* Deprecated: Always use "pcs-handle" for pcs_phy.
3062 * Falling back to "phy-handle" here is only for
3063 * backward compatibility with old device trees.
3064 */
3065 np = of_parse_phandle(pdev->dev.of_node, "phy-handle", 0);
3066 }
3067 if (!np) {
3068 dev_err(&pdev->dev, "pcs-handle (preferred) or phy-handle required for 1000BaseX/SGMII\n");
3069 ret = -EINVAL;
3070 goto cleanup_mdio;
3071 }
3072 lp->pcs_phy = of_mdio_find_device(np);
3073 if (!lp->pcs_phy) {
3074 ret = -EPROBE_DEFER;
3075 of_node_put(np);
3076 goto cleanup_mdio;
3077 }
3078 of_node_put(np);
3079 lp->pcs.ops = &axienet_pcs_ops;
3080 lp->pcs.poll = true;
3081 }
3082
3083 lp->phylink_config.dev = &ndev->dev;
3084 lp->phylink_config.type = PHYLINK_NETDEV;
3085 lp->phylink_config.mac_managed_pm = true;
3086 lp->phylink_config.mac_capabilities = MAC_SYM_PAUSE | MAC_ASYM_PAUSE |
3087 MAC_10FD | MAC_100FD | MAC_1000FD;
3088
3089 __set_bit(lp->phy_mode, lp->phylink_config.supported_interfaces);
3090 if (lp->switch_x_sgmii) {
3091 __set_bit(PHY_INTERFACE_MODE_1000BASEX,
3092 lp->phylink_config.supported_interfaces);
3093 __set_bit(PHY_INTERFACE_MODE_SGMII,
3094 lp->phylink_config.supported_interfaces);
3095 }
3096
3097 lp->phylink = phylink_create(&lp->phylink_config, pdev->dev.fwnode,
3098 lp->phy_mode,
3099 &axienet_phylink_ops);
3100 if (IS_ERR(lp->phylink)) {
3101 ret = PTR_ERR(lp->phylink);
3102 dev_err(&pdev->dev, "phylink_create error (%i)\n", ret);
3103 goto cleanup_mdio;
3104 }
3105
3106 ret = register_netdev(lp->ndev);
3107 if (ret) {
3108 dev_err(lp->dev, "register_netdev() error (%i)\n", ret);
3109 goto cleanup_phylink;
3110 }
3111
3112 return 0;
3113
3114 cleanup_phylink:
3115 phylink_destroy(lp->phylink);
3116
3117 cleanup_mdio:
3118 if (lp->pcs_phy)
3119 put_device(&lp->pcs_phy->dev);
3120 if (lp->mii_bus)
3121 axienet_mdio_teardown(lp);
3122 return ret;
3123 }
3124
axienet_remove(struct platform_device * pdev)3125 static void axienet_remove(struct platform_device *pdev)
3126 {
3127 struct net_device *ndev = platform_get_drvdata(pdev);
3128 struct axienet_local *lp = netdev_priv(ndev);
3129
3130 unregister_netdev(ndev);
3131
3132 if (lp->phylink)
3133 phylink_destroy(lp->phylink);
3134
3135 if (lp->pcs_phy)
3136 put_device(&lp->pcs_phy->dev);
3137
3138 axienet_mdio_teardown(lp);
3139 }
3140
axienet_shutdown(struct platform_device * pdev)3141 static void axienet_shutdown(struct platform_device *pdev)
3142 {
3143 struct net_device *ndev = platform_get_drvdata(pdev);
3144
3145 rtnl_lock();
3146 netif_device_detach(ndev);
3147
3148 if (netif_running(ndev))
3149 dev_close(ndev);
3150
3151 rtnl_unlock();
3152 }
3153
axienet_suspend(struct device * dev)3154 static int axienet_suspend(struct device *dev)
3155 {
3156 struct net_device *ndev = dev_get_drvdata(dev);
3157
3158 if (!netif_running(ndev))
3159 return 0;
3160
3161 netif_device_detach(ndev);
3162
3163 rtnl_lock();
3164 axienet_stop(ndev);
3165 rtnl_unlock();
3166
3167 return 0;
3168 }
3169
axienet_resume(struct device * dev)3170 static int axienet_resume(struct device *dev)
3171 {
3172 struct net_device *ndev = dev_get_drvdata(dev);
3173
3174 if (!netif_running(ndev))
3175 return 0;
3176
3177 rtnl_lock();
3178 axienet_open(ndev);
3179 rtnl_unlock();
3180
3181 netif_device_attach(ndev);
3182
3183 return 0;
3184 }
3185
3186 static DEFINE_SIMPLE_DEV_PM_OPS(axienet_pm_ops,
3187 axienet_suspend, axienet_resume);
3188
3189 static struct platform_driver axienet_driver = {
3190 .probe = axienet_probe,
3191 .remove = axienet_remove,
3192 .shutdown = axienet_shutdown,
3193 .driver = {
3194 .name = "xilinx_axienet",
3195 .pm = &axienet_pm_ops,
3196 .of_match_table = axienet_of_match,
3197 },
3198 };
3199
3200 module_platform_driver(axienet_driver);
3201
3202 MODULE_DESCRIPTION("Xilinx Axi Ethernet driver");
3203 MODULE_AUTHOR("Xilinx");
3204 MODULE_LICENSE("GPL");
3205