1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Xilinx Axi Ethernet device driver
4 *
5 * Copyright (c) 2008 Nissin Systems Co., Ltd., Yoshio Kashiwagi
6 * Copyright (c) 2005-2008 DLA Systems, David H. Lynch Jr. <dhlii@dlasys.net>
7 * Copyright (c) 2008-2009 Secret Lab Technologies Ltd.
8 * Copyright (c) 2010 - 2011 Michal Simek <monstr@monstr.eu>
9 * Copyright (c) 2010 - 2011 PetaLogix
10 * Copyright (c) 2019 - 2022 Calian Advanced Technologies
11 * Copyright (c) 2010 - 2012 Xilinx, Inc. All rights reserved.
12 *
13 * This is a driver for the Xilinx Axi Ethernet which is used in the Virtex6
14 * and Spartan6.
15 *
16 * TODO:
17 * - Add Axi Fifo support.
18 * - Factor out Axi DMA code into separate driver.
19 * - Test and fix basic multicast filtering.
20 * - Add support for extended multicast filtering.
21 * - Test basic VLAN support.
22 * - Add support for extended VLAN support.
23 */
24
25 #include <linux/clk.h>
26 #include <linux/delay.h>
27 #include <linux/etherdevice.h>
28 #include <linux/module.h>
29 #include <linux/netdevice.h>
30 #include <linux/of.h>
31 #include <linux/of_mdio.h>
32 #include <linux/of_net.h>
33 #include <linux/of_irq.h>
34 #include <linux/of_address.h>
35 #include <linux/platform_device.h>
36 #include <linux/skbuff.h>
37 #include <linux/math64.h>
38 #include <linux/phy.h>
39 #include <linux/mii.h>
40 #include <linux/ethtool.h>
41 #include <linux/dmaengine.h>
42 #include <linux/dma-mapping.h>
43 #include <linux/dma/xilinx_dma.h>
44 #include <linux/circ_buf.h>
45 #include <net/netdev_queues.h>
46
47 #include "xilinx_axienet.h"
48
49 /* Descriptors defines for Tx and Rx DMA */
50 #define TX_BD_NUM_DEFAULT 128
51 #define RX_BD_NUM_DEFAULT 1024
52 #define TX_BD_NUM_MIN (MAX_SKB_FRAGS + 1)
53 #define TX_BD_NUM_MAX 4096
54 #define RX_BD_NUM_MAX 4096
55 #define DMA_NUM_APP_WORDS 5
56 #define LEN_APP 4
57 #define RX_BUF_NUM_DEFAULT 128
58
59 /* Must be shorter than length of ethtool_drvinfo.driver field to fit */
60 #define DRIVER_NAME "xaxienet"
61 #define DRIVER_DESCRIPTION "Xilinx Axi Ethernet driver"
62 #define DRIVER_VERSION "1.00a"
63
64 #define AXIENET_REGS_N 40
65
66 static void axienet_rx_submit_desc(struct net_device *ndev);
67
68 /* Match table for of_platform binding */
69 static const struct of_device_id axienet_of_match[] = {
70 { .compatible = "xlnx,axi-ethernet-1.00.a", },
71 { .compatible = "xlnx,axi-ethernet-1.01.a", },
72 { .compatible = "xlnx,axi-ethernet-2.01.a", },
73 {},
74 };
75
76 MODULE_DEVICE_TABLE(of, axienet_of_match);
77
78 /* Option table for setting up Axi Ethernet hardware options */
79 static struct axienet_option axienet_options[] = {
80 /* Turn on jumbo packet support for both Rx and Tx */
81 {
82 .opt = XAE_OPTION_JUMBO,
83 .reg = XAE_TC_OFFSET,
84 .m_or = XAE_TC_JUM_MASK,
85 }, {
86 .opt = XAE_OPTION_JUMBO,
87 .reg = XAE_RCW1_OFFSET,
88 .m_or = XAE_RCW1_JUM_MASK,
89 }, { /* Turn on VLAN packet support for both Rx and Tx */
90 .opt = XAE_OPTION_VLAN,
91 .reg = XAE_TC_OFFSET,
92 .m_or = XAE_TC_VLAN_MASK,
93 }, {
94 .opt = XAE_OPTION_VLAN,
95 .reg = XAE_RCW1_OFFSET,
96 .m_or = XAE_RCW1_VLAN_MASK,
97 }, { /* Turn on FCS stripping on receive packets */
98 .opt = XAE_OPTION_FCS_STRIP,
99 .reg = XAE_RCW1_OFFSET,
100 .m_or = XAE_RCW1_FCS_MASK,
101 }, { /* Turn on FCS insertion on transmit packets */
102 .opt = XAE_OPTION_FCS_INSERT,
103 .reg = XAE_TC_OFFSET,
104 .m_or = XAE_TC_FCS_MASK,
105 }, { /* Turn off length/type field checking on receive packets */
106 .opt = XAE_OPTION_LENTYPE_ERR,
107 .reg = XAE_RCW1_OFFSET,
108 .m_or = XAE_RCW1_LT_DIS_MASK,
109 }, { /* Turn on Rx flow control */
110 .opt = XAE_OPTION_FLOW_CONTROL,
111 .reg = XAE_FCC_OFFSET,
112 .m_or = XAE_FCC_FCRX_MASK,
113 }, { /* Turn on Tx flow control */
114 .opt = XAE_OPTION_FLOW_CONTROL,
115 .reg = XAE_FCC_OFFSET,
116 .m_or = XAE_FCC_FCTX_MASK,
117 }, { /* Turn on promiscuous frame filtering */
118 .opt = XAE_OPTION_PROMISC,
119 .reg = XAE_FMI_OFFSET,
120 .m_or = XAE_FMI_PM_MASK,
121 }, { /* Enable transmitter */
122 .opt = XAE_OPTION_TXEN,
123 .reg = XAE_TC_OFFSET,
124 .m_or = XAE_TC_TX_MASK,
125 }, { /* Enable receiver */
126 .opt = XAE_OPTION_RXEN,
127 .reg = XAE_RCW1_OFFSET,
128 .m_or = XAE_RCW1_RX_MASK,
129 },
130 {}
131 };
132
axienet_get_rx_desc(struct axienet_local * lp,int i)133 static struct skbuf_dma_descriptor *axienet_get_rx_desc(struct axienet_local *lp, int i)
134 {
135 return lp->rx_skb_ring[i & (RX_BUF_NUM_DEFAULT - 1)];
136 }
137
axienet_get_tx_desc(struct axienet_local * lp,int i)138 static struct skbuf_dma_descriptor *axienet_get_tx_desc(struct axienet_local *lp, int i)
139 {
140 return lp->tx_skb_ring[i & (TX_BD_NUM_MAX - 1)];
141 }
142
143 /**
144 * axienet_dma_in32 - Memory mapped Axi DMA register read
145 * @lp: Pointer to axienet local structure
146 * @reg: Address offset from the base address of the Axi DMA core
147 *
148 * Return: The contents of the Axi DMA register
149 *
150 * This function returns the contents of the corresponding Axi DMA register.
151 */
axienet_dma_in32(struct axienet_local * lp,off_t reg)152 static inline u32 axienet_dma_in32(struct axienet_local *lp, off_t reg)
153 {
154 return ioread32(lp->dma_regs + reg);
155 }
156
desc_set_phys_addr(struct axienet_local * lp,dma_addr_t addr,struct axidma_bd * desc)157 static void desc_set_phys_addr(struct axienet_local *lp, dma_addr_t addr,
158 struct axidma_bd *desc)
159 {
160 desc->phys = lower_32_bits(addr);
161 if (lp->features & XAE_FEATURE_DMA_64BIT)
162 desc->phys_msb = upper_32_bits(addr);
163 }
164
desc_get_phys_addr(struct axienet_local * lp,struct axidma_bd * desc)165 static dma_addr_t desc_get_phys_addr(struct axienet_local *lp,
166 struct axidma_bd *desc)
167 {
168 dma_addr_t ret = desc->phys;
169
170 if (lp->features & XAE_FEATURE_DMA_64BIT)
171 ret |= ((dma_addr_t)desc->phys_msb << 16) << 16;
172
173 return ret;
174 }
175
176 /**
177 * axienet_dma_bd_release - Release buffer descriptor rings
178 * @ndev: Pointer to the net_device structure
179 *
180 * This function is used to release the descriptors allocated in
181 * axienet_dma_bd_init. axienet_dma_bd_release is called when Axi Ethernet
182 * driver stop api is called.
183 */
axienet_dma_bd_release(struct net_device * ndev)184 static void axienet_dma_bd_release(struct net_device *ndev)
185 {
186 int i;
187 struct axienet_local *lp = netdev_priv(ndev);
188
189 /* If we end up here, tx_bd_v must have been DMA allocated. */
190 dma_free_coherent(lp->dev,
191 sizeof(*lp->tx_bd_v) * lp->tx_bd_num,
192 lp->tx_bd_v,
193 lp->tx_bd_p);
194
195 if (!lp->rx_bd_v)
196 return;
197
198 for (i = 0; i < lp->rx_bd_num; i++) {
199 dma_addr_t phys;
200
201 /* A NULL skb means this descriptor has not been initialised
202 * at all.
203 */
204 if (!lp->rx_bd_v[i].skb)
205 break;
206
207 dev_kfree_skb(lp->rx_bd_v[i].skb);
208
209 /* For each descriptor, we programmed cntrl with the (non-zero)
210 * descriptor size, after it had been successfully allocated.
211 * So a non-zero value in there means we need to unmap it.
212 */
213 if (lp->rx_bd_v[i].cntrl) {
214 phys = desc_get_phys_addr(lp, &lp->rx_bd_v[i]);
215 dma_unmap_single(lp->dev, phys,
216 lp->max_frm_size, DMA_FROM_DEVICE);
217 }
218 }
219
220 dma_free_coherent(lp->dev,
221 sizeof(*lp->rx_bd_v) * lp->rx_bd_num,
222 lp->rx_bd_v,
223 lp->rx_bd_p);
224 }
225
226 /**
227 * axienet_usec_to_timer - Calculate IRQ delay timer value
228 * @lp: Pointer to the axienet_local structure
229 * @coalesce_usec: Microseconds to convert into timer value
230 */
axienet_usec_to_timer(struct axienet_local * lp,u32 coalesce_usec)231 static u32 axienet_usec_to_timer(struct axienet_local *lp, u32 coalesce_usec)
232 {
233 u32 result;
234 u64 clk_rate = 125000000; /* arbitrary guess if no clock rate set */
235
236 if (lp->axi_clk)
237 clk_rate = clk_get_rate(lp->axi_clk);
238
239 /* 1 Timeout Interval = 125 * (clock period of SG clock) */
240 result = DIV64_U64_ROUND_CLOSEST((u64)coalesce_usec * clk_rate,
241 XAXIDMA_DELAY_SCALE);
242 return min(result, FIELD_MAX(XAXIDMA_DELAY_MASK));
243 }
244
245 /**
246 * axienet_dma_start - Set up DMA registers and start DMA operation
247 * @lp: Pointer to the axienet_local structure
248 */
axienet_dma_start(struct axienet_local * lp)249 static void axienet_dma_start(struct axienet_local *lp)
250 {
251 /* Start updating the Rx channel control register */
252 lp->rx_dma_cr = (lp->coalesce_count_rx << XAXIDMA_COALESCE_SHIFT) |
253 XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_ERROR_MASK;
254 /* Only set interrupt delay timer if not generating an interrupt on
255 * the first RX packet. Otherwise leave at 0 to disable delay interrupt.
256 */
257 if (lp->coalesce_count_rx > 1)
258 lp->rx_dma_cr |= (axienet_usec_to_timer(lp, lp->coalesce_usec_rx)
259 << XAXIDMA_DELAY_SHIFT) |
260 XAXIDMA_IRQ_DELAY_MASK;
261 axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, lp->rx_dma_cr);
262
263 /* Start updating the Tx channel control register */
264 lp->tx_dma_cr = (lp->coalesce_count_tx << XAXIDMA_COALESCE_SHIFT) |
265 XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_ERROR_MASK;
266 /* Only set interrupt delay timer if not generating an interrupt on
267 * the first TX packet. Otherwise leave at 0 to disable delay interrupt.
268 */
269 if (lp->coalesce_count_tx > 1)
270 lp->tx_dma_cr |= (axienet_usec_to_timer(lp, lp->coalesce_usec_tx)
271 << XAXIDMA_DELAY_SHIFT) |
272 XAXIDMA_IRQ_DELAY_MASK;
273 axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, lp->tx_dma_cr);
274
275 /* Populate the tail pointer and bring the Rx Axi DMA engine out of
276 * halted state. This will make the Rx side ready for reception.
277 */
278 axienet_dma_out_addr(lp, XAXIDMA_RX_CDESC_OFFSET, lp->rx_bd_p);
279 lp->rx_dma_cr |= XAXIDMA_CR_RUNSTOP_MASK;
280 axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, lp->rx_dma_cr);
281 axienet_dma_out_addr(lp, XAXIDMA_RX_TDESC_OFFSET, lp->rx_bd_p +
282 (sizeof(*lp->rx_bd_v) * (lp->rx_bd_num - 1)));
283
284 /* Write to the RS (Run-stop) bit in the Tx channel control register.
285 * Tx channel is now ready to run. But only after we write to the
286 * tail pointer register that the Tx channel will start transmitting.
287 */
288 axienet_dma_out_addr(lp, XAXIDMA_TX_CDESC_OFFSET, lp->tx_bd_p);
289 lp->tx_dma_cr |= XAXIDMA_CR_RUNSTOP_MASK;
290 axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, lp->tx_dma_cr);
291 }
292
293 /**
294 * axienet_dma_bd_init - Setup buffer descriptor rings for Axi DMA
295 * @ndev: Pointer to the net_device structure
296 *
297 * Return: 0, on success -ENOMEM, on failure
298 *
299 * This function is called to initialize the Rx and Tx DMA descriptor
300 * rings. This initializes the descriptors with required default values
301 * and is called when Axi Ethernet driver reset is called.
302 */
axienet_dma_bd_init(struct net_device * ndev)303 static int axienet_dma_bd_init(struct net_device *ndev)
304 {
305 int i;
306 struct sk_buff *skb;
307 struct axienet_local *lp = netdev_priv(ndev);
308
309 /* Reset the indexes which are used for accessing the BDs */
310 lp->tx_bd_ci = 0;
311 lp->tx_bd_tail = 0;
312 lp->rx_bd_ci = 0;
313
314 /* Allocate the Tx and Rx buffer descriptors. */
315 lp->tx_bd_v = dma_alloc_coherent(lp->dev,
316 sizeof(*lp->tx_bd_v) * lp->tx_bd_num,
317 &lp->tx_bd_p, GFP_KERNEL);
318 if (!lp->tx_bd_v)
319 return -ENOMEM;
320
321 lp->rx_bd_v = dma_alloc_coherent(lp->dev,
322 sizeof(*lp->rx_bd_v) * lp->rx_bd_num,
323 &lp->rx_bd_p, GFP_KERNEL);
324 if (!lp->rx_bd_v)
325 goto out;
326
327 for (i = 0; i < lp->tx_bd_num; i++) {
328 dma_addr_t addr = lp->tx_bd_p +
329 sizeof(*lp->tx_bd_v) *
330 ((i + 1) % lp->tx_bd_num);
331
332 lp->tx_bd_v[i].next = lower_32_bits(addr);
333 if (lp->features & XAE_FEATURE_DMA_64BIT)
334 lp->tx_bd_v[i].next_msb = upper_32_bits(addr);
335 }
336
337 for (i = 0; i < lp->rx_bd_num; i++) {
338 dma_addr_t addr;
339
340 addr = lp->rx_bd_p + sizeof(*lp->rx_bd_v) *
341 ((i + 1) % lp->rx_bd_num);
342 lp->rx_bd_v[i].next = lower_32_bits(addr);
343 if (lp->features & XAE_FEATURE_DMA_64BIT)
344 lp->rx_bd_v[i].next_msb = upper_32_bits(addr);
345
346 skb = netdev_alloc_skb_ip_align(ndev, lp->max_frm_size);
347 if (!skb)
348 goto out;
349
350 lp->rx_bd_v[i].skb = skb;
351 addr = dma_map_single(lp->dev, skb->data,
352 lp->max_frm_size, DMA_FROM_DEVICE);
353 if (dma_mapping_error(lp->dev, addr)) {
354 netdev_err(ndev, "DMA mapping error\n");
355 goto out;
356 }
357 desc_set_phys_addr(lp, addr, &lp->rx_bd_v[i]);
358
359 lp->rx_bd_v[i].cntrl = lp->max_frm_size;
360 }
361
362 axienet_dma_start(lp);
363
364 return 0;
365 out:
366 axienet_dma_bd_release(ndev);
367 return -ENOMEM;
368 }
369
370 /**
371 * axienet_set_mac_address - Write the MAC address
372 * @ndev: Pointer to the net_device structure
373 * @address: 6 byte Address to be written as MAC address
374 *
375 * This function is called to initialize the MAC address of the Axi Ethernet
376 * core. It writes to the UAW0 and UAW1 registers of the core.
377 */
axienet_set_mac_address(struct net_device * ndev,const void * address)378 static void axienet_set_mac_address(struct net_device *ndev,
379 const void *address)
380 {
381 struct axienet_local *lp = netdev_priv(ndev);
382
383 if (address)
384 eth_hw_addr_set(ndev, address);
385 if (!is_valid_ether_addr(ndev->dev_addr))
386 eth_hw_addr_random(ndev);
387
388 /* Set up unicast MAC address filter set its mac address */
389 axienet_iow(lp, XAE_UAW0_OFFSET,
390 (ndev->dev_addr[0]) |
391 (ndev->dev_addr[1] << 8) |
392 (ndev->dev_addr[2] << 16) |
393 (ndev->dev_addr[3] << 24));
394 axienet_iow(lp, XAE_UAW1_OFFSET,
395 (((axienet_ior(lp, XAE_UAW1_OFFSET)) &
396 ~XAE_UAW1_UNICASTADDR_MASK) |
397 (ndev->dev_addr[4] |
398 (ndev->dev_addr[5] << 8))));
399 }
400
401 /**
402 * netdev_set_mac_address - Write the MAC address (from outside the driver)
403 * @ndev: Pointer to the net_device structure
404 * @p: 6 byte Address to be written as MAC address
405 *
406 * Return: 0 for all conditions. Presently, there is no failure case.
407 *
408 * This function is called to initialize the MAC address of the Axi Ethernet
409 * core. It calls the core specific axienet_set_mac_address. This is the
410 * function that goes into net_device_ops structure entry ndo_set_mac_address.
411 */
netdev_set_mac_address(struct net_device * ndev,void * p)412 static int netdev_set_mac_address(struct net_device *ndev, void *p)
413 {
414 struct sockaddr *addr = p;
415
416 axienet_set_mac_address(ndev, addr->sa_data);
417 return 0;
418 }
419
420 /**
421 * axienet_set_multicast_list - Prepare the multicast table
422 * @ndev: Pointer to the net_device structure
423 *
424 * This function is called to initialize the multicast table during
425 * initialization. The Axi Ethernet basic multicast support has a four-entry
426 * multicast table which is initialized here. Additionally this function
427 * goes into the net_device_ops structure entry ndo_set_multicast_list. This
428 * means whenever the multicast table entries need to be updated this
429 * function gets called.
430 */
axienet_set_multicast_list(struct net_device * ndev)431 static void axienet_set_multicast_list(struct net_device *ndev)
432 {
433 int i = 0;
434 u32 reg, af0reg, af1reg;
435 struct axienet_local *lp = netdev_priv(ndev);
436
437 reg = axienet_ior(lp, XAE_FMI_OFFSET);
438 reg &= ~XAE_FMI_PM_MASK;
439 if (ndev->flags & IFF_PROMISC)
440 reg |= XAE_FMI_PM_MASK;
441 else
442 reg &= ~XAE_FMI_PM_MASK;
443 axienet_iow(lp, XAE_FMI_OFFSET, reg);
444
445 if (ndev->flags & IFF_ALLMULTI ||
446 netdev_mc_count(ndev) > XAE_MULTICAST_CAM_TABLE_NUM) {
447 reg &= 0xFFFFFF00;
448 axienet_iow(lp, XAE_FMI_OFFSET, reg);
449 axienet_iow(lp, XAE_AF0_OFFSET, 1); /* Multicast bit */
450 axienet_iow(lp, XAE_AF1_OFFSET, 0);
451 axienet_iow(lp, XAE_AM0_OFFSET, 1); /* ditto */
452 axienet_iow(lp, XAE_AM1_OFFSET, 0);
453 axienet_iow(lp, XAE_FFE_OFFSET, 1);
454 i = 1;
455 } else if (!netdev_mc_empty(ndev)) {
456 struct netdev_hw_addr *ha;
457
458 netdev_for_each_mc_addr(ha, ndev) {
459 if (i >= XAE_MULTICAST_CAM_TABLE_NUM)
460 break;
461
462 af0reg = (ha->addr[0]);
463 af0reg |= (ha->addr[1] << 8);
464 af0reg |= (ha->addr[2] << 16);
465 af0reg |= (ha->addr[3] << 24);
466
467 af1reg = (ha->addr[4]);
468 af1reg |= (ha->addr[5] << 8);
469
470 reg &= 0xFFFFFF00;
471 reg |= i;
472
473 axienet_iow(lp, XAE_FMI_OFFSET, reg);
474 axienet_iow(lp, XAE_AF0_OFFSET, af0reg);
475 axienet_iow(lp, XAE_AF1_OFFSET, af1reg);
476 axienet_iow(lp, XAE_AM0_OFFSET, 0xffffffff);
477 axienet_iow(lp, XAE_AM1_OFFSET, 0x0000ffff);
478 axienet_iow(lp, XAE_FFE_OFFSET, 1);
479 i++;
480 }
481 }
482
483 for (; i < XAE_MULTICAST_CAM_TABLE_NUM; i++) {
484 reg &= 0xFFFFFF00;
485 reg |= i;
486 axienet_iow(lp, XAE_FMI_OFFSET, reg);
487 axienet_iow(lp, XAE_FFE_OFFSET, 0);
488 }
489 }
490
491 /**
492 * axienet_setoptions - Set an Axi Ethernet option
493 * @ndev: Pointer to the net_device structure
494 * @options: Option to be enabled/disabled
495 *
496 * The Axi Ethernet core has multiple features which can be selectively turned
497 * on or off. The typical options could be jumbo frame option, basic VLAN
498 * option, promiscuous mode option etc. This function is used to set or clear
499 * these options in the Axi Ethernet hardware. This is done through
500 * axienet_option structure .
501 */
axienet_setoptions(struct net_device * ndev,u32 options)502 static void axienet_setoptions(struct net_device *ndev, u32 options)
503 {
504 int reg;
505 struct axienet_local *lp = netdev_priv(ndev);
506 struct axienet_option *tp = &axienet_options[0];
507
508 while (tp->opt) {
509 reg = ((axienet_ior(lp, tp->reg)) & ~(tp->m_or));
510 if (options & tp->opt)
511 reg |= tp->m_or;
512 axienet_iow(lp, tp->reg, reg);
513 tp++;
514 }
515
516 lp->options |= options;
517 }
518
axienet_stat(struct axienet_local * lp,enum temac_stat stat)519 static u64 axienet_stat(struct axienet_local *lp, enum temac_stat stat)
520 {
521 u32 counter;
522
523 if (lp->reset_in_progress)
524 return lp->hw_stat_base[stat];
525
526 counter = axienet_ior(lp, XAE_STATS_OFFSET + stat * 8);
527 return lp->hw_stat_base[stat] + (counter - lp->hw_last_counter[stat]);
528 }
529
axienet_stats_update(struct axienet_local * lp,bool reset)530 static void axienet_stats_update(struct axienet_local *lp, bool reset)
531 {
532 enum temac_stat stat;
533
534 write_seqcount_begin(&lp->hw_stats_seqcount);
535 lp->reset_in_progress = reset;
536 for (stat = 0; stat < STAT_COUNT; stat++) {
537 u32 counter = axienet_ior(lp, XAE_STATS_OFFSET + stat * 8);
538
539 lp->hw_stat_base[stat] += counter - lp->hw_last_counter[stat];
540 lp->hw_last_counter[stat] = counter;
541 }
542 write_seqcount_end(&lp->hw_stats_seqcount);
543 }
544
axienet_refresh_stats(struct work_struct * work)545 static void axienet_refresh_stats(struct work_struct *work)
546 {
547 struct axienet_local *lp = container_of(work, struct axienet_local,
548 stats_work.work);
549
550 mutex_lock(&lp->stats_lock);
551 axienet_stats_update(lp, false);
552 mutex_unlock(&lp->stats_lock);
553
554 /* Just less than 2^32 bytes at 2.5 GBit/s */
555 schedule_delayed_work(&lp->stats_work, 13 * HZ);
556 }
557
__axienet_device_reset(struct axienet_local * lp)558 static int __axienet_device_reset(struct axienet_local *lp)
559 {
560 u32 value;
561 int ret;
562
563 /* Save statistics counters in case they will be reset */
564 mutex_lock(&lp->stats_lock);
565 if (lp->features & XAE_FEATURE_STATS)
566 axienet_stats_update(lp, true);
567
568 /* Reset Axi DMA. This would reset Axi Ethernet core as well. The reset
569 * process of Axi DMA takes a while to complete as all pending
570 * commands/transfers will be flushed or completed during this
571 * reset process.
572 * Note that even though both TX and RX have their own reset register,
573 * they both reset the entire DMA core, so only one needs to be used.
574 */
575 axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, XAXIDMA_CR_RESET_MASK);
576 ret = read_poll_timeout(axienet_dma_in32, value,
577 !(value & XAXIDMA_CR_RESET_MASK),
578 DELAY_OF_ONE_MILLISEC, 50000, false, lp,
579 XAXIDMA_TX_CR_OFFSET);
580 if (ret) {
581 dev_err(lp->dev, "%s: DMA reset timeout!\n", __func__);
582 goto out;
583 }
584
585 /* Wait for PhyRstCmplt bit to be set, indicating the PHY reset has finished */
586 ret = read_poll_timeout(axienet_ior, value,
587 value & XAE_INT_PHYRSTCMPLT_MASK,
588 DELAY_OF_ONE_MILLISEC, 50000, false, lp,
589 XAE_IS_OFFSET);
590 if (ret) {
591 dev_err(lp->dev, "%s: timeout waiting for PhyRstCmplt\n", __func__);
592 goto out;
593 }
594
595 /* Update statistics counters with new values */
596 if (lp->features & XAE_FEATURE_STATS) {
597 enum temac_stat stat;
598
599 write_seqcount_begin(&lp->hw_stats_seqcount);
600 lp->reset_in_progress = false;
601 for (stat = 0; stat < STAT_COUNT; stat++) {
602 u32 counter =
603 axienet_ior(lp, XAE_STATS_OFFSET + stat * 8);
604
605 lp->hw_stat_base[stat] +=
606 lp->hw_last_counter[stat] - counter;
607 lp->hw_last_counter[stat] = counter;
608 }
609 write_seqcount_end(&lp->hw_stats_seqcount);
610 }
611
612 out:
613 mutex_unlock(&lp->stats_lock);
614 return ret;
615 }
616
617 /**
618 * axienet_dma_stop - Stop DMA operation
619 * @lp: Pointer to the axienet_local structure
620 */
axienet_dma_stop(struct axienet_local * lp)621 static void axienet_dma_stop(struct axienet_local *lp)
622 {
623 int count;
624 u32 cr, sr;
625
626 cr = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET);
627 cr &= ~(XAXIDMA_CR_RUNSTOP_MASK | XAXIDMA_IRQ_ALL_MASK);
628 axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, cr);
629 synchronize_irq(lp->rx_irq);
630
631 cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET);
632 cr &= ~(XAXIDMA_CR_RUNSTOP_MASK | XAXIDMA_IRQ_ALL_MASK);
633 axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, cr);
634 synchronize_irq(lp->tx_irq);
635
636 /* Give DMAs a chance to halt gracefully */
637 sr = axienet_dma_in32(lp, XAXIDMA_RX_SR_OFFSET);
638 for (count = 0; !(sr & XAXIDMA_SR_HALT_MASK) && count < 5; ++count) {
639 msleep(20);
640 sr = axienet_dma_in32(lp, XAXIDMA_RX_SR_OFFSET);
641 }
642
643 sr = axienet_dma_in32(lp, XAXIDMA_TX_SR_OFFSET);
644 for (count = 0; !(sr & XAXIDMA_SR_HALT_MASK) && count < 5; ++count) {
645 msleep(20);
646 sr = axienet_dma_in32(lp, XAXIDMA_TX_SR_OFFSET);
647 }
648
649 /* Do a reset to ensure DMA is really stopped */
650 axienet_lock_mii(lp);
651 __axienet_device_reset(lp);
652 axienet_unlock_mii(lp);
653 }
654
655 /**
656 * axienet_device_reset - Reset and initialize the Axi Ethernet hardware.
657 * @ndev: Pointer to the net_device structure
658 *
659 * This function is called to reset and initialize the Axi Ethernet core. This
660 * is typically called during initialization. It does a reset of the Axi DMA
661 * Rx/Tx channels and initializes the Axi DMA BDs. Since Axi DMA reset lines
662 * are connected to Axi Ethernet reset lines, this in turn resets the Axi
663 * Ethernet core. No separate hardware reset is done for the Axi Ethernet
664 * core.
665 * Returns 0 on success or a negative error number otherwise.
666 */
axienet_device_reset(struct net_device * ndev)667 static int axienet_device_reset(struct net_device *ndev)
668 {
669 u32 axienet_status;
670 struct axienet_local *lp = netdev_priv(ndev);
671 int ret;
672
673 lp->max_frm_size = XAE_MAX_VLAN_FRAME_SIZE;
674 lp->options |= XAE_OPTION_VLAN;
675 lp->options &= (~XAE_OPTION_JUMBO);
676
677 if (ndev->mtu > XAE_MTU && ndev->mtu <= XAE_JUMBO_MTU) {
678 lp->max_frm_size = ndev->mtu + VLAN_ETH_HLEN +
679 XAE_TRL_SIZE;
680
681 if (lp->max_frm_size <= lp->rxmem)
682 lp->options |= XAE_OPTION_JUMBO;
683 }
684
685 if (!lp->use_dmaengine) {
686 ret = __axienet_device_reset(lp);
687 if (ret)
688 return ret;
689
690 ret = axienet_dma_bd_init(ndev);
691 if (ret) {
692 netdev_err(ndev, "%s: descriptor allocation failed\n",
693 __func__);
694 return ret;
695 }
696 }
697
698 axienet_status = axienet_ior(lp, XAE_RCW1_OFFSET);
699 axienet_status &= ~XAE_RCW1_RX_MASK;
700 axienet_iow(lp, XAE_RCW1_OFFSET, axienet_status);
701
702 axienet_status = axienet_ior(lp, XAE_IP_OFFSET);
703 if (axienet_status & XAE_INT_RXRJECT_MASK)
704 axienet_iow(lp, XAE_IS_OFFSET, XAE_INT_RXRJECT_MASK);
705 axienet_iow(lp, XAE_IE_OFFSET, lp->eth_irq > 0 ?
706 XAE_INT_RECV_ERROR_MASK : 0);
707
708 axienet_iow(lp, XAE_FCC_OFFSET, XAE_FCC_FCRX_MASK);
709
710 /* Sync default options with HW but leave receiver and
711 * transmitter disabled.
712 */
713 axienet_setoptions(ndev, lp->options &
714 ~(XAE_OPTION_TXEN | XAE_OPTION_RXEN));
715 axienet_set_mac_address(ndev, NULL);
716 axienet_set_multicast_list(ndev);
717 axienet_setoptions(ndev, lp->options);
718
719 netif_trans_update(ndev);
720
721 return 0;
722 }
723
724 /**
725 * axienet_free_tx_chain - Clean up a series of linked TX descriptors.
726 * @lp: Pointer to the axienet_local structure
727 * @first_bd: Index of first descriptor to clean up
728 * @nr_bds: Max number of descriptors to clean up
729 * @force: Whether to clean descriptors even if not complete
730 * @sizep: Pointer to a u32 filled with the total sum of all bytes
731 * in all cleaned-up descriptors. Ignored if NULL.
732 * @budget: NAPI budget (use 0 when not called from NAPI poll)
733 *
734 * Would either be called after a successful transmit operation, or after
735 * there was an error when setting up the chain.
736 * Returns the number of packets handled.
737 */
axienet_free_tx_chain(struct axienet_local * lp,u32 first_bd,int nr_bds,bool force,u32 * sizep,int budget)738 static int axienet_free_tx_chain(struct axienet_local *lp, u32 first_bd,
739 int nr_bds, bool force, u32 *sizep, int budget)
740 {
741 struct axidma_bd *cur_p;
742 unsigned int status;
743 int i, packets = 0;
744 dma_addr_t phys;
745
746 for (i = 0; i < nr_bds; i++) {
747 cur_p = &lp->tx_bd_v[(first_bd + i) % lp->tx_bd_num];
748 status = cur_p->status;
749
750 /* If force is not specified, clean up only descriptors
751 * that have been completed by the MAC.
752 */
753 if (!force && !(status & XAXIDMA_BD_STS_COMPLETE_MASK))
754 break;
755
756 /* Ensure we see complete descriptor update */
757 dma_rmb();
758 phys = desc_get_phys_addr(lp, cur_p);
759 dma_unmap_single(lp->dev, phys,
760 (cur_p->cntrl & XAXIDMA_BD_CTRL_LENGTH_MASK),
761 DMA_TO_DEVICE);
762
763 if (cur_p->skb && (status & XAXIDMA_BD_STS_COMPLETE_MASK)) {
764 napi_consume_skb(cur_p->skb, budget);
765 packets++;
766 }
767
768 cur_p->app0 = 0;
769 cur_p->app1 = 0;
770 cur_p->app2 = 0;
771 cur_p->app4 = 0;
772 cur_p->skb = NULL;
773 /* ensure our transmit path and device don't prematurely see status cleared */
774 wmb();
775 cur_p->cntrl = 0;
776 cur_p->status = 0;
777
778 if (sizep)
779 *sizep += status & XAXIDMA_BD_STS_ACTUAL_LEN_MASK;
780 }
781
782 if (!force) {
783 lp->tx_bd_ci += i;
784 if (lp->tx_bd_ci >= lp->tx_bd_num)
785 lp->tx_bd_ci %= lp->tx_bd_num;
786 }
787
788 return packets;
789 }
790
791 /**
792 * axienet_check_tx_bd_space - Checks if a BD/group of BDs are currently busy
793 * @lp: Pointer to the axienet_local structure
794 * @num_frag: The number of BDs to check for
795 *
796 * Return: 0, on success
797 * NETDEV_TX_BUSY, if any of the descriptors are not free
798 *
799 * This function is invoked before BDs are allocated and transmission starts.
800 * This function returns 0 if a BD or group of BDs can be allocated for
801 * transmission. If the BD or any of the BDs are not free the function
802 * returns a busy status.
803 */
axienet_check_tx_bd_space(struct axienet_local * lp,int num_frag)804 static inline int axienet_check_tx_bd_space(struct axienet_local *lp,
805 int num_frag)
806 {
807 struct axidma_bd *cur_p;
808
809 /* Ensure we see all descriptor updates from device or TX polling */
810 rmb();
811 cur_p = &lp->tx_bd_v[(READ_ONCE(lp->tx_bd_tail) + num_frag) %
812 lp->tx_bd_num];
813 if (cur_p->cntrl)
814 return NETDEV_TX_BUSY;
815 return 0;
816 }
817
818 /**
819 * axienet_dma_tx_cb - DMA engine callback for TX channel.
820 * @data: Pointer to the axienet_local structure.
821 * @result: error reporting through dmaengine_result.
822 * This function is called by dmaengine driver for TX channel to notify
823 * that the transmit is done.
824 */
axienet_dma_tx_cb(void * data,const struct dmaengine_result * result)825 static void axienet_dma_tx_cb(void *data, const struct dmaengine_result *result)
826 {
827 struct skbuf_dma_descriptor *skbuf_dma;
828 struct axienet_local *lp = data;
829 struct netdev_queue *txq;
830 int len;
831
832 skbuf_dma = axienet_get_tx_desc(lp, lp->tx_ring_tail++);
833 len = skbuf_dma->skb->len;
834 txq = skb_get_tx_queue(lp->ndev, skbuf_dma->skb);
835 u64_stats_update_begin(&lp->tx_stat_sync);
836 u64_stats_add(&lp->tx_bytes, len);
837 u64_stats_add(&lp->tx_packets, 1);
838 u64_stats_update_end(&lp->tx_stat_sync);
839 dma_unmap_sg(lp->dev, skbuf_dma->sgl, skbuf_dma->sg_len, DMA_TO_DEVICE);
840 dev_consume_skb_any(skbuf_dma->skb);
841 netif_txq_completed_wake(txq, 1, len,
842 CIRC_SPACE(lp->tx_ring_head, lp->tx_ring_tail, TX_BD_NUM_MAX),
843 2 * MAX_SKB_FRAGS);
844 }
845
846 /**
847 * axienet_start_xmit_dmaengine - Starts the transmission.
848 * @skb: sk_buff pointer that contains data to be Txed.
849 * @ndev: Pointer to net_device structure.
850 *
851 * Return: NETDEV_TX_OK on success or any non space errors.
852 * NETDEV_TX_BUSY when free element in TX skb ring buffer
853 * is not available.
854 *
855 * This function is invoked to initiate transmission. The
856 * function sets the skbs, register dma callback API and submit
857 * the dma transaction.
858 * Additionally if checksum offloading is supported,
859 * it populates AXI Stream Control fields with appropriate values.
860 */
861 static netdev_tx_t
axienet_start_xmit_dmaengine(struct sk_buff * skb,struct net_device * ndev)862 axienet_start_xmit_dmaengine(struct sk_buff *skb, struct net_device *ndev)
863 {
864 struct dma_async_tx_descriptor *dma_tx_desc = NULL;
865 struct axienet_local *lp = netdev_priv(ndev);
866 u32 app_metadata[DMA_NUM_APP_WORDS] = {0};
867 struct skbuf_dma_descriptor *skbuf_dma;
868 struct dma_device *dma_dev;
869 struct netdev_queue *txq;
870 u32 csum_start_off;
871 u32 csum_index_off;
872 int sg_len;
873 int ret;
874
875 dma_dev = lp->tx_chan->device;
876 sg_len = skb_shinfo(skb)->nr_frags + 1;
877 if (CIRC_SPACE(lp->tx_ring_head, lp->tx_ring_tail, TX_BD_NUM_MAX) <= sg_len) {
878 netif_stop_queue(ndev);
879 if (net_ratelimit())
880 netdev_warn(ndev, "TX ring unexpectedly full\n");
881 return NETDEV_TX_BUSY;
882 }
883
884 skbuf_dma = axienet_get_tx_desc(lp, lp->tx_ring_head);
885 if (!skbuf_dma)
886 goto xmit_error_drop_skb;
887
888 lp->tx_ring_head++;
889 sg_init_table(skbuf_dma->sgl, sg_len);
890 ret = skb_to_sgvec(skb, skbuf_dma->sgl, 0, skb->len);
891 if (ret < 0)
892 goto xmit_error_drop_skb;
893
894 ret = dma_map_sg(lp->dev, skbuf_dma->sgl, sg_len, DMA_TO_DEVICE);
895 if (!ret)
896 goto xmit_error_drop_skb;
897
898 /* Fill up app fields for checksum */
899 if (skb->ip_summed == CHECKSUM_PARTIAL) {
900 if (lp->features & XAE_FEATURE_FULL_TX_CSUM) {
901 /* Tx Full Checksum Offload Enabled */
902 app_metadata[0] |= 2;
903 } else if (lp->features & XAE_FEATURE_PARTIAL_TX_CSUM) {
904 csum_start_off = skb_transport_offset(skb);
905 csum_index_off = csum_start_off + skb->csum_offset;
906 /* Tx Partial Checksum Offload Enabled */
907 app_metadata[0] |= 1;
908 app_metadata[1] = (csum_start_off << 16) | csum_index_off;
909 }
910 } else if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
911 app_metadata[0] |= 2; /* Tx Full Checksum Offload Enabled */
912 }
913
914 dma_tx_desc = dma_dev->device_prep_slave_sg(lp->tx_chan, skbuf_dma->sgl,
915 sg_len, DMA_MEM_TO_DEV,
916 DMA_PREP_INTERRUPT, (void *)app_metadata);
917 if (!dma_tx_desc)
918 goto xmit_error_unmap_sg;
919
920 skbuf_dma->skb = skb;
921 skbuf_dma->sg_len = sg_len;
922 dma_tx_desc->callback_param = lp;
923 dma_tx_desc->callback_result = axienet_dma_tx_cb;
924 txq = skb_get_tx_queue(lp->ndev, skb);
925 netdev_tx_sent_queue(txq, skb->len);
926 netif_txq_maybe_stop(txq, CIRC_SPACE(lp->tx_ring_head, lp->tx_ring_tail, TX_BD_NUM_MAX),
927 MAX_SKB_FRAGS + 1, 2 * MAX_SKB_FRAGS);
928
929 dmaengine_submit(dma_tx_desc);
930 dma_async_issue_pending(lp->tx_chan);
931 return NETDEV_TX_OK;
932
933 xmit_error_unmap_sg:
934 dma_unmap_sg(lp->dev, skbuf_dma->sgl, sg_len, DMA_TO_DEVICE);
935 xmit_error_drop_skb:
936 dev_kfree_skb_any(skb);
937 return NETDEV_TX_OK;
938 }
939
940 /**
941 * axienet_tx_poll - Invoked once a transmit is completed by the
942 * Axi DMA Tx channel.
943 * @napi: Pointer to NAPI structure.
944 * @budget: Max number of TX packets to process.
945 *
946 * Return: Number of TX packets processed.
947 *
948 * This function is invoked from the NAPI processing to notify the completion
949 * of transmit operation. It clears fields in the corresponding Tx BDs and
950 * unmaps the corresponding buffer so that CPU can regain ownership of the
951 * buffer. It finally invokes "netif_wake_queue" to restart transmission if
952 * required.
953 */
axienet_tx_poll(struct napi_struct * napi,int budget)954 static int axienet_tx_poll(struct napi_struct *napi, int budget)
955 {
956 struct axienet_local *lp = container_of(napi, struct axienet_local, napi_tx);
957 struct net_device *ndev = lp->ndev;
958 u32 size = 0;
959 int packets;
960
961 packets = axienet_free_tx_chain(lp, lp->tx_bd_ci, lp->tx_bd_num, false,
962 &size, budget);
963
964 if (packets) {
965 u64_stats_update_begin(&lp->tx_stat_sync);
966 u64_stats_add(&lp->tx_packets, packets);
967 u64_stats_add(&lp->tx_bytes, size);
968 u64_stats_update_end(&lp->tx_stat_sync);
969
970 /* Matches barrier in axienet_start_xmit */
971 smp_mb();
972
973 if (!axienet_check_tx_bd_space(lp, MAX_SKB_FRAGS + 1))
974 netif_wake_queue(ndev);
975 }
976
977 if (packets < budget && napi_complete_done(napi, packets)) {
978 /* Re-enable TX completion interrupts. This should
979 * cause an immediate interrupt if any TX packets are
980 * already pending.
981 */
982 axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, lp->tx_dma_cr);
983 }
984 return packets;
985 }
986
987 /**
988 * axienet_start_xmit - Starts the transmission.
989 * @skb: sk_buff pointer that contains data to be Txed.
990 * @ndev: Pointer to net_device structure.
991 *
992 * Return: NETDEV_TX_OK, on success
993 * NETDEV_TX_BUSY, if any of the descriptors are not free
994 *
995 * This function is invoked from upper layers to initiate transmission. The
996 * function uses the next available free BDs and populates their fields to
997 * start the transmission. Additionally if checksum offloading is supported,
998 * it populates AXI Stream Control fields with appropriate values.
999 */
1000 static netdev_tx_t
axienet_start_xmit(struct sk_buff * skb,struct net_device * ndev)1001 axienet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
1002 {
1003 u32 ii;
1004 u32 num_frag;
1005 u32 csum_start_off;
1006 u32 csum_index_off;
1007 skb_frag_t *frag;
1008 dma_addr_t tail_p, phys;
1009 u32 orig_tail_ptr, new_tail_ptr;
1010 struct axienet_local *lp = netdev_priv(ndev);
1011 struct axidma_bd *cur_p;
1012
1013 orig_tail_ptr = lp->tx_bd_tail;
1014 new_tail_ptr = orig_tail_ptr;
1015
1016 num_frag = skb_shinfo(skb)->nr_frags;
1017 cur_p = &lp->tx_bd_v[orig_tail_ptr];
1018
1019 if (axienet_check_tx_bd_space(lp, num_frag + 1)) {
1020 /* Should not happen as last start_xmit call should have
1021 * checked for sufficient space and queue should only be
1022 * woken when sufficient space is available.
1023 */
1024 netif_stop_queue(ndev);
1025 if (net_ratelimit())
1026 netdev_warn(ndev, "TX ring unexpectedly full\n");
1027 return NETDEV_TX_BUSY;
1028 }
1029
1030 if (skb->ip_summed == CHECKSUM_PARTIAL) {
1031 if (lp->features & XAE_FEATURE_FULL_TX_CSUM) {
1032 /* Tx Full Checksum Offload Enabled */
1033 cur_p->app0 |= 2;
1034 } else if (lp->features & XAE_FEATURE_PARTIAL_TX_CSUM) {
1035 csum_start_off = skb_transport_offset(skb);
1036 csum_index_off = csum_start_off + skb->csum_offset;
1037 /* Tx Partial Checksum Offload Enabled */
1038 cur_p->app0 |= 1;
1039 cur_p->app1 = (csum_start_off << 16) | csum_index_off;
1040 }
1041 } else if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
1042 cur_p->app0 |= 2; /* Tx Full Checksum Offload Enabled */
1043 }
1044
1045 phys = dma_map_single(lp->dev, skb->data,
1046 skb_headlen(skb), DMA_TO_DEVICE);
1047 if (unlikely(dma_mapping_error(lp->dev, phys))) {
1048 if (net_ratelimit())
1049 netdev_err(ndev, "TX DMA mapping error\n");
1050 ndev->stats.tx_dropped++;
1051 dev_kfree_skb_any(skb);
1052 return NETDEV_TX_OK;
1053 }
1054 desc_set_phys_addr(lp, phys, cur_p);
1055 cur_p->cntrl = skb_headlen(skb) | XAXIDMA_BD_CTRL_TXSOF_MASK;
1056
1057 for (ii = 0; ii < num_frag; ii++) {
1058 if (++new_tail_ptr >= lp->tx_bd_num)
1059 new_tail_ptr = 0;
1060 cur_p = &lp->tx_bd_v[new_tail_ptr];
1061 frag = &skb_shinfo(skb)->frags[ii];
1062 phys = dma_map_single(lp->dev,
1063 skb_frag_address(frag),
1064 skb_frag_size(frag),
1065 DMA_TO_DEVICE);
1066 if (unlikely(dma_mapping_error(lp->dev, phys))) {
1067 if (net_ratelimit())
1068 netdev_err(ndev, "TX DMA mapping error\n");
1069 ndev->stats.tx_dropped++;
1070 axienet_free_tx_chain(lp, orig_tail_ptr, ii + 1,
1071 true, NULL, 0);
1072 dev_kfree_skb_any(skb);
1073 return NETDEV_TX_OK;
1074 }
1075 desc_set_phys_addr(lp, phys, cur_p);
1076 cur_p->cntrl = skb_frag_size(frag);
1077 }
1078
1079 cur_p->cntrl |= XAXIDMA_BD_CTRL_TXEOF_MASK;
1080 cur_p->skb = skb;
1081
1082 tail_p = lp->tx_bd_p + sizeof(*lp->tx_bd_v) * new_tail_ptr;
1083 if (++new_tail_ptr >= lp->tx_bd_num)
1084 new_tail_ptr = 0;
1085 WRITE_ONCE(lp->tx_bd_tail, new_tail_ptr);
1086
1087 /* Start the transfer */
1088 axienet_dma_out_addr(lp, XAXIDMA_TX_TDESC_OFFSET, tail_p);
1089
1090 /* Stop queue if next transmit may not have space */
1091 if (axienet_check_tx_bd_space(lp, MAX_SKB_FRAGS + 1)) {
1092 netif_stop_queue(ndev);
1093
1094 /* Matches barrier in axienet_tx_poll */
1095 smp_mb();
1096
1097 /* Space might have just been freed - check again */
1098 if (!axienet_check_tx_bd_space(lp, MAX_SKB_FRAGS + 1))
1099 netif_wake_queue(ndev);
1100 }
1101
1102 return NETDEV_TX_OK;
1103 }
1104
1105 /**
1106 * axienet_dma_rx_cb - DMA engine callback for RX channel.
1107 * @data: Pointer to the skbuf_dma_descriptor structure.
1108 * @result: error reporting through dmaengine_result.
1109 * This function is called by dmaengine driver for RX channel to notify
1110 * that the packet is received.
1111 */
axienet_dma_rx_cb(void * data,const struct dmaengine_result * result)1112 static void axienet_dma_rx_cb(void *data, const struct dmaengine_result *result)
1113 {
1114 struct skbuf_dma_descriptor *skbuf_dma;
1115 size_t meta_len, meta_max_len, rx_len;
1116 struct axienet_local *lp = data;
1117 struct sk_buff *skb;
1118 u32 *app_metadata;
1119
1120 skbuf_dma = axienet_get_rx_desc(lp, lp->rx_ring_tail++);
1121 skb = skbuf_dma->skb;
1122 app_metadata = dmaengine_desc_get_metadata_ptr(skbuf_dma->desc, &meta_len,
1123 &meta_max_len);
1124 dma_unmap_single(lp->dev, skbuf_dma->dma_address, lp->max_frm_size,
1125 DMA_FROM_DEVICE);
1126 /* TODO: Derive app word index programmatically */
1127 rx_len = (app_metadata[LEN_APP] & 0xFFFF);
1128 skb_put(skb, rx_len);
1129 skb->protocol = eth_type_trans(skb, lp->ndev);
1130 skb->ip_summed = CHECKSUM_NONE;
1131
1132 __netif_rx(skb);
1133 u64_stats_update_begin(&lp->rx_stat_sync);
1134 u64_stats_add(&lp->rx_packets, 1);
1135 u64_stats_add(&lp->rx_bytes, rx_len);
1136 u64_stats_update_end(&lp->rx_stat_sync);
1137 axienet_rx_submit_desc(lp->ndev);
1138 dma_async_issue_pending(lp->rx_chan);
1139 }
1140
1141 /**
1142 * axienet_rx_poll - Triggered by RX ISR to complete the BD processing.
1143 * @napi: Pointer to NAPI structure.
1144 * @budget: Max number of RX packets to process.
1145 *
1146 * Return: Number of RX packets processed.
1147 */
axienet_rx_poll(struct napi_struct * napi,int budget)1148 static int axienet_rx_poll(struct napi_struct *napi, int budget)
1149 {
1150 u32 length;
1151 u32 csumstatus;
1152 u32 size = 0;
1153 int packets = 0;
1154 dma_addr_t tail_p = 0;
1155 struct axidma_bd *cur_p;
1156 struct sk_buff *skb, *new_skb;
1157 struct axienet_local *lp = container_of(napi, struct axienet_local, napi_rx);
1158
1159 cur_p = &lp->rx_bd_v[lp->rx_bd_ci];
1160
1161 while (packets < budget && (cur_p->status & XAXIDMA_BD_STS_COMPLETE_MASK)) {
1162 dma_addr_t phys;
1163
1164 /* Ensure we see complete descriptor update */
1165 dma_rmb();
1166
1167 skb = cur_p->skb;
1168 cur_p->skb = NULL;
1169
1170 /* skb could be NULL if a previous pass already received the
1171 * packet for this slot in the ring, but failed to refill it
1172 * with a newly allocated buffer. In this case, don't try to
1173 * receive it again.
1174 */
1175 if (likely(skb)) {
1176 length = cur_p->app4 & 0x0000FFFF;
1177
1178 phys = desc_get_phys_addr(lp, cur_p);
1179 dma_unmap_single(lp->dev, phys, lp->max_frm_size,
1180 DMA_FROM_DEVICE);
1181
1182 skb_put(skb, length);
1183 skb->protocol = eth_type_trans(skb, lp->ndev);
1184 /*skb_checksum_none_assert(skb);*/
1185 skb->ip_summed = CHECKSUM_NONE;
1186
1187 /* if we're doing Rx csum offload, set it up */
1188 if (lp->features & XAE_FEATURE_FULL_RX_CSUM) {
1189 csumstatus = (cur_p->app2 &
1190 XAE_FULL_CSUM_STATUS_MASK) >> 3;
1191 if (csumstatus == XAE_IP_TCP_CSUM_VALIDATED ||
1192 csumstatus == XAE_IP_UDP_CSUM_VALIDATED) {
1193 skb->ip_summed = CHECKSUM_UNNECESSARY;
1194 }
1195 } else if (lp->features & XAE_FEATURE_PARTIAL_RX_CSUM) {
1196 skb->csum = be32_to_cpu(cur_p->app3 & 0xFFFF);
1197 skb->ip_summed = CHECKSUM_COMPLETE;
1198 }
1199
1200 napi_gro_receive(napi, skb);
1201
1202 size += length;
1203 packets++;
1204 }
1205
1206 new_skb = napi_alloc_skb(napi, lp->max_frm_size);
1207 if (!new_skb)
1208 break;
1209
1210 phys = dma_map_single(lp->dev, new_skb->data,
1211 lp->max_frm_size,
1212 DMA_FROM_DEVICE);
1213 if (unlikely(dma_mapping_error(lp->dev, phys))) {
1214 if (net_ratelimit())
1215 netdev_err(lp->ndev, "RX DMA mapping error\n");
1216 dev_kfree_skb(new_skb);
1217 break;
1218 }
1219 desc_set_phys_addr(lp, phys, cur_p);
1220
1221 cur_p->cntrl = lp->max_frm_size;
1222 cur_p->status = 0;
1223 cur_p->skb = new_skb;
1224
1225 /* Only update tail_p to mark this slot as usable after it has
1226 * been successfully refilled.
1227 */
1228 tail_p = lp->rx_bd_p + sizeof(*lp->rx_bd_v) * lp->rx_bd_ci;
1229
1230 if (++lp->rx_bd_ci >= lp->rx_bd_num)
1231 lp->rx_bd_ci = 0;
1232 cur_p = &lp->rx_bd_v[lp->rx_bd_ci];
1233 }
1234
1235 u64_stats_update_begin(&lp->rx_stat_sync);
1236 u64_stats_add(&lp->rx_packets, packets);
1237 u64_stats_add(&lp->rx_bytes, size);
1238 u64_stats_update_end(&lp->rx_stat_sync);
1239
1240 if (tail_p)
1241 axienet_dma_out_addr(lp, XAXIDMA_RX_TDESC_OFFSET, tail_p);
1242
1243 if (packets < budget && napi_complete_done(napi, packets)) {
1244 /* Re-enable RX completion interrupts. This should
1245 * cause an immediate interrupt if any RX packets are
1246 * already pending.
1247 */
1248 axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, lp->rx_dma_cr);
1249 }
1250 return packets;
1251 }
1252
1253 /**
1254 * axienet_tx_irq - Tx Done Isr.
1255 * @irq: irq number
1256 * @_ndev: net_device pointer
1257 *
1258 * Return: IRQ_HANDLED if device generated a TX interrupt, IRQ_NONE otherwise.
1259 *
1260 * This is the Axi DMA Tx done Isr. It invokes NAPI polling to complete the
1261 * TX BD processing.
1262 */
axienet_tx_irq(int irq,void * _ndev)1263 static irqreturn_t axienet_tx_irq(int irq, void *_ndev)
1264 {
1265 unsigned int status;
1266 struct net_device *ndev = _ndev;
1267 struct axienet_local *lp = netdev_priv(ndev);
1268
1269 status = axienet_dma_in32(lp, XAXIDMA_TX_SR_OFFSET);
1270
1271 if (!(status & XAXIDMA_IRQ_ALL_MASK))
1272 return IRQ_NONE;
1273
1274 axienet_dma_out32(lp, XAXIDMA_TX_SR_OFFSET, status);
1275
1276 if (unlikely(status & XAXIDMA_IRQ_ERROR_MASK)) {
1277 netdev_err(ndev, "DMA Tx error 0x%x\n", status);
1278 netdev_err(ndev, "Current BD is at: 0x%x%08x\n",
1279 (lp->tx_bd_v[lp->tx_bd_ci]).phys_msb,
1280 (lp->tx_bd_v[lp->tx_bd_ci]).phys);
1281 schedule_work(&lp->dma_err_task);
1282 } else {
1283 /* Disable further TX completion interrupts and schedule
1284 * NAPI to handle the completions.
1285 */
1286 u32 cr = lp->tx_dma_cr;
1287
1288 cr &= ~(XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_DELAY_MASK);
1289 if (napi_schedule_prep(&lp->napi_tx)) {
1290 axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, cr);
1291 __napi_schedule(&lp->napi_tx);
1292 }
1293 }
1294
1295 return IRQ_HANDLED;
1296 }
1297
1298 /**
1299 * axienet_rx_irq - Rx Isr.
1300 * @irq: irq number
1301 * @_ndev: net_device pointer
1302 *
1303 * Return: IRQ_HANDLED if device generated a RX interrupt, IRQ_NONE otherwise.
1304 *
1305 * This is the Axi DMA Rx Isr. It invokes NAPI polling to complete the RX BD
1306 * processing.
1307 */
axienet_rx_irq(int irq,void * _ndev)1308 static irqreturn_t axienet_rx_irq(int irq, void *_ndev)
1309 {
1310 unsigned int status;
1311 struct net_device *ndev = _ndev;
1312 struct axienet_local *lp = netdev_priv(ndev);
1313
1314 status = axienet_dma_in32(lp, XAXIDMA_RX_SR_OFFSET);
1315
1316 if (!(status & XAXIDMA_IRQ_ALL_MASK))
1317 return IRQ_NONE;
1318
1319 axienet_dma_out32(lp, XAXIDMA_RX_SR_OFFSET, status);
1320
1321 if (unlikely(status & XAXIDMA_IRQ_ERROR_MASK)) {
1322 netdev_err(ndev, "DMA Rx error 0x%x\n", status);
1323 netdev_err(ndev, "Current BD is at: 0x%x%08x\n",
1324 (lp->rx_bd_v[lp->rx_bd_ci]).phys_msb,
1325 (lp->rx_bd_v[lp->rx_bd_ci]).phys);
1326 schedule_work(&lp->dma_err_task);
1327 } else {
1328 /* Disable further RX completion interrupts and schedule
1329 * NAPI receive.
1330 */
1331 u32 cr = lp->rx_dma_cr;
1332
1333 cr &= ~(XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_DELAY_MASK);
1334 if (napi_schedule_prep(&lp->napi_rx)) {
1335 axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, cr);
1336 __napi_schedule(&lp->napi_rx);
1337 }
1338 }
1339
1340 return IRQ_HANDLED;
1341 }
1342
1343 /**
1344 * axienet_eth_irq - Ethernet core Isr.
1345 * @irq: irq number
1346 * @_ndev: net_device pointer
1347 *
1348 * Return: IRQ_HANDLED if device generated a core interrupt, IRQ_NONE otherwise.
1349 *
1350 * Handle miscellaneous conditions indicated by Ethernet core IRQ.
1351 */
axienet_eth_irq(int irq,void * _ndev)1352 static irqreturn_t axienet_eth_irq(int irq, void *_ndev)
1353 {
1354 struct net_device *ndev = _ndev;
1355 struct axienet_local *lp = netdev_priv(ndev);
1356 unsigned int pending;
1357
1358 pending = axienet_ior(lp, XAE_IP_OFFSET);
1359 if (!pending)
1360 return IRQ_NONE;
1361
1362 if (pending & XAE_INT_RXFIFOOVR_MASK)
1363 ndev->stats.rx_missed_errors++;
1364
1365 if (pending & XAE_INT_RXRJECT_MASK)
1366 ndev->stats.rx_dropped++;
1367
1368 axienet_iow(lp, XAE_IS_OFFSET, pending);
1369 return IRQ_HANDLED;
1370 }
1371
1372 static void axienet_dma_err_handler(struct work_struct *work);
1373
1374 /**
1375 * axienet_rx_submit_desc - Submit the rx descriptors to dmaengine.
1376 * allocate skbuff, map the scatterlist and obtain a descriptor
1377 * and then add the callback information and submit descriptor.
1378 *
1379 * @ndev: net_device pointer
1380 *
1381 */
axienet_rx_submit_desc(struct net_device * ndev)1382 static void axienet_rx_submit_desc(struct net_device *ndev)
1383 {
1384 struct dma_async_tx_descriptor *dma_rx_desc = NULL;
1385 struct axienet_local *lp = netdev_priv(ndev);
1386 struct skbuf_dma_descriptor *skbuf_dma;
1387 struct sk_buff *skb;
1388 dma_addr_t addr;
1389
1390 skbuf_dma = axienet_get_rx_desc(lp, lp->rx_ring_head);
1391 if (!skbuf_dma)
1392 return;
1393
1394 lp->rx_ring_head++;
1395 skb = netdev_alloc_skb(ndev, lp->max_frm_size);
1396 if (!skb)
1397 return;
1398
1399 sg_init_table(skbuf_dma->sgl, 1);
1400 addr = dma_map_single(lp->dev, skb->data, lp->max_frm_size, DMA_FROM_DEVICE);
1401 if (unlikely(dma_mapping_error(lp->dev, addr))) {
1402 if (net_ratelimit())
1403 netdev_err(ndev, "DMA mapping error\n");
1404 goto rx_submit_err_free_skb;
1405 }
1406 sg_dma_address(skbuf_dma->sgl) = addr;
1407 sg_dma_len(skbuf_dma->sgl) = lp->max_frm_size;
1408 dma_rx_desc = dmaengine_prep_slave_sg(lp->rx_chan, skbuf_dma->sgl,
1409 1, DMA_DEV_TO_MEM,
1410 DMA_PREP_INTERRUPT);
1411 if (!dma_rx_desc)
1412 goto rx_submit_err_unmap_skb;
1413
1414 skbuf_dma->skb = skb;
1415 skbuf_dma->dma_address = sg_dma_address(skbuf_dma->sgl);
1416 skbuf_dma->desc = dma_rx_desc;
1417 dma_rx_desc->callback_param = lp;
1418 dma_rx_desc->callback_result = axienet_dma_rx_cb;
1419 dmaengine_submit(dma_rx_desc);
1420
1421 return;
1422
1423 rx_submit_err_unmap_skb:
1424 dma_unmap_single(lp->dev, addr, lp->max_frm_size, DMA_FROM_DEVICE);
1425 rx_submit_err_free_skb:
1426 dev_kfree_skb(skb);
1427 }
1428
1429 /**
1430 * axienet_init_dmaengine - init the dmaengine code.
1431 * @ndev: Pointer to net_device structure
1432 *
1433 * Return: 0, on success.
1434 * non-zero error value on failure
1435 *
1436 * This is the dmaengine initialization code.
1437 */
axienet_init_dmaengine(struct net_device * ndev)1438 static int axienet_init_dmaengine(struct net_device *ndev)
1439 {
1440 struct axienet_local *lp = netdev_priv(ndev);
1441 struct skbuf_dma_descriptor *skbuf_dma;
1442 int i, ret;
1443
1444 lp->tx_chan = dma_request_chan(lp->dev, "tx_chan0");
1445 if (IS_ERR(lp->tx_chan)) {
1446 dev_err(lp->dev, "No Ethernet DMA (TX) channel found\n");
1447 return PTR_ERR(lp->tx_chan);
1448 }
1449
1450 lp->rx_chan = dma_request_chan(lp->dev, "rx_chan0");
1451 if (IS_ERR(lp->rx_chan)) {
1452 ret = PTR_ERR(lp->rx_chan);
1453 dev_err(lp->dev, "No Ethernet DMA (RX) channel found\n");
1454 goto err_dma_release_tx;
1455 }
1456
1457 lp->tx_ring_tail = 0;
1458 lp->tx_ring_head = 0;
1459 lp->rx_ring_tail = 0;
1460 lp->rx_ring_head = 0;
1461 lp->tx_skb_ring = kcalloc(TX_BD_NUM_MAX, sizeof(*lp->tx_skb_ring),
1462 GFP_KERNEL);
1463 if (!lp->tx_skb_ring) {
1464 ret = -ENOMEM;
1465 goto err_dma_release_rx;
1466 }
1467 for (i = 0; i < TX_BD_NUM_MAX; i++) {
1468 skbuf_dma = kzalloc(sizeof(*skbuf_dma), GFP_KERNEL);
1469 if (!skbuf_dma) {
1470 ret = -ENOMEM;
1471 goto err_free_tx_skb_ring;
1472 }
1473 lp->tx_skb_ring[i] = skbuf_dma;
1474 }
1475
1476 lp->rx_skb_ring = kcalloc(RX_BUF_NUM_DEFAULT, sizeof(*lp->rx_skb_ring),
1477 GFP_KERNEL);
1478 if (!lp->rx_skb_ring) {
1479 ret = -ENOMEM;
1480 goto err_free_tx_skb_ring;
1481 }
1482 for (i = 0; i < RX_BUF_NUM_DEFAULT; i++) {
1483 skbuf_dma = kzalloc(sizeof(*skbuf_dma), GFP_KERNEL);
1484 if (!skbuf_dma) {
1485 ret = -ENOMEM;
1486 goto err_free_rx_skb_ring;
1487 }
1488 lp->rx_skb_ring[i] = skbuf_dma;
1489 }
1490 /* TODO: Instead of BD_NUM_DEFAULT use runtime support */
1491 for (i = 0; i < RX_BUF_NUM_DEFAULT; i++)
1492 axienet_rx_submit_desc(ndev);
1493 dma_async_issue_pending(lp->rx_chan);
1494
1495 return 0;
1496
1497 err_free_rx_skb_ring:
1498 for (i = 0; i < RX_BUF_NUM_DEFAULT; i++)
1499 kfree(lp->rx_skb_ring[i]);
1500 kfree(lp->rx_skb_ring);
1501 err_free_tx_skb_ring:
1502 for (i = 0; i < TX_BD_NUM_MAX; i++)
1503 kfree(lp->tx_skb_ring[i]);
1504 kfree(lp->tx_skb_ring);
1505 err_dma_release_rx:
1506 dma_release_channel(lp->rx_chan);
1507 err_dma_release_tx:
1508 dma_release_channel(lp->tx_chan);
1509 return ret;
1510 }
1511
1512 /**
1513 * axienet_init_legacy_dma - init the dma legacy code.
1514 * @ndev: Pointer to net_device structure
1515 *
1516 * Return: 0, on success.
1517 * non-zero error value on failure
1518 *
1519 * This is the dma initialization code. It also allocates interrupt
1520 * service routines, enables the interrupt lines and ISR handling.
1521 *
1522 */
axienet_init_legacy_dma(struct net_device * ndev)1523 static int axienet_init_legacy_dma(struct net_device *ndev)
1524 {
1525 int ret;
1526 struct axienet_local *lp = netdev_priv(ndev);
1527
1528 /* Enable worker thread for Axi DMA error handling */
1529 lp->stopping = false;
1530 INIT_WORK(&lp->dma_err_task, axienet_dma_err_handler);
1531
1532 napi_enable(&lp->napi_rx);
1533 napi_enable(&lp->napi_tx);
1534
1535 /* Enable interrupts for Axi DMA Tx */
1536 ret = request_irq(lp->tx_irq, axienet_tx_irq, IRQF_SHARED,
1537 ndev->name, ndev);
1538 if (ret)
1539 goto err_tx_irq;
1540 /* Enable interrupts for Axi DMA Rx */
1541 ret = request_irq(lp->rx_irq, axienet_rx_irq, IRQF_SHARED,
1542 ndev->name, ndev);
1543 if (ret)
1544 goto err_rx_irq;
1545 /* Enable interrupts for Axi Ethernet core (if defined) */
1546 if (lp->eth_irq > 0) {
1547 ret = request_irq(lp->eth_irq, axienet_eth_irq, IRQF_SHARED,
1548 ndev->name, ndev);
1549 if (ret)
1550 goto err_eth_irq;
1551 }
1552
1553 return 0;
1554
1555 err_eth_irq:
1556 free_irq(lp->rx_irq, ndev);
1557 err_rx_irq:
1558 free_irq(lp->tx_irq, ndev);
1559 err_tx_irq:
1560 napi_disable(&lp->napi_tx);
1561 napi_disable(&lp->napi_rx);
1562 cancel_work_sync(&lp->dma_err_task);
1563 dev_err(lp->dev, "request_irq() failed\n");
1564 return ret;
1565 }
1566
1567 /**
1568 * axienet_open - Driver open routine.
1569 * @ndev: Pointer to net_device structure
1570 *
1571 * Return: 0, on success.
1572 * non-zero error value on failure
1573 *
1574 * This is the driver open routine. It calls phylink_start to start the
1575 * PHY device.
1576 * It also allocates interrupt service routines, enables the interrupt lines
1577 * and ISR handling. Axi Ethernet core is reset through Axi DMA core. Buffer
1578 * descriptors are initialized.
1579 */
axienet_open(struct net_device * ndev)1580 static int axienet_open(struct net_device *ndev)
1581 {
1582 int ret;
1583 struct axienet_local *lp = netdev_priv(ndev);
1584
1585 /* When we do an Axi Ethernet reset, it resets the complete core
1586 * including the MDIO. MDIO must be disabled before resetting.
1587 * Hold MDIO bus lock to avoid MDIO accesses during the reset.
1588 */
1589 axienet_lock_mii(lp);
1590 ret = axienet_device_reset(ndev);
1591 axienet_unlock_mii(lp);
1592
1593 ret = phylink_of_phy_connect(lp->phylink, lp->dev->of_node, 0);
1594 if (ret) {
1595 dev_err(lp->dev, "phylink_of_phy_connect() failed: %d\n", ret);
1596 return ret;
1597 }
1598
1599 phylink_start(lp->phylink);
1600
1601 /* Start the statistics refresh work */
1602 schedule_delayed_work(&lp->stats_work, 0);
1603
1604 if (lp->use_dmaengine) {
1605 /* Enable interrupts for Axi Ethernet core (if defined) */
1606 if (lp->eth_irq > 0) {
1607 ret = request_irq(lp->eth_irq, axienet_eth_irq, IRQF_SHARED,
1608 ndev->name, ndev);
1609 if (ret)
1610 goto err_phy;
1611 }
1612
1613 ret = axienet_init_dmaengine(ndev);
1614 if (ret < 0)
1615 goto err_free_eth_irq;
1616 } else {
1617 ret = axienet_init_legacy_dma(ndev);
1618 if (ret)
1619 goto err_phy;
1620 }
1621
1622 return 0;
1623
1624 err_free_eth_irq:
1625 if (lp->eth_irq > 0)
1626 free_irq(lp->eth_irq, ndev);
1627 err_phy:
1628 cancel_delayed_work_sync(&lp->stats_work);
1629 phylink_stop(lp->phylink);
1630 phylink_disconnect_phy(lp->phylink);
1631 return ret;
1632 }
1633
1634 /**
1635 * axienet_stop - Driver stop routine.
1636 * @ndev: Pointer to net_device structure
1637 *
1638 * Return: 0, on success.
1639 *
1640 * This is the driver stop routine. It calls phylink_disconnect to stop the PHY
1641 * device. It also removes the interrupt handlers and disables the interrupts.
1642 * The Axi DMA Tx/Rx BDs are released.
1643 */
axienet_stop(struct net_device * ndev)1644 static int axienet_stop(struct net_device *ndev)
1645 {
1646 struct axienet_local *lp = netdev_priv(ndev);
1647 int i;
1648
1649 if (!lp->use_dmaengine) {
1650 WRITE_ONCE(lp->stopping, true);
1651 flush_work(&lp->dma_err_task);
1652
1653 napi_disable(&lp->napi_tx);
1654 napi_disable(&lp->napi_rx);
1655 }
1656
1657 cancel_delayed_work_sync(&lp->stats_work);
1658
1659 phylink_stop(lp->phylink);
1660 phylink_disconnect_phy(lp->phylink);
1661
1662 axienet_setoptions(ndev, lp->options &
1663 ~(XAE_OPTION_TXEN | XAE_OPTION_RXEN));
1664
1665 if (!lp->use_dmaengine) {
1666 axienet_dma_stop(lp);
1667 cancel_work_sync(&lp->dma_err_task);
1668 free_irq(lp->tx_irq, ndev);
1669 free_irq(lp->rx_irq, ndev);
1670 axienet_dma_bd_release(ndev);
1671 } else {
1672 dmaengine_terminate_sync(lp->tx_chan);
1673 dmaengine_synchronize(lp->tx_chan);
1674 dmaengine_terminate_sync(lp->rx_chan);
1675 dmaengine_synchronize(lp->rx_chan);
1676
1677 for (i = 0; i < TX_BD_NUM_MAX; i++)
1678 kfree(lp->tx_skb_ring[i]);
1679 kfree(lp->tx_skb_ring);
1680 for (i = 0; i < RX_BUF_NUM_DEFAULT; i++)
1681 kfree(lp->rx_skb_ring[i]);
1682 kfree(lp->rx_skb_ring);
1683
1684 dma_release_channel(lp->rx_chan);
1685 dma_release_channel(lp->tx_chan);
1686 }
1687
1688 axienet_iow(lp, XAE_IE_OFFSET, 0);
1689
1690 if (lp->eth_irq > 0)
1691 free_irq(lp->eth_irq, ndev);
1692 return 0;
1693 }
1694
1695 /**
1696 * axienet_change_mtu - Driver change mtu routine.
1697 * @ndev: Pointer to net_device structure
1698 * @new_mtu: New mtu value to be applied
1699 *
1700 * Return: Always returns 0 (success).
1701 *
1702 * This is the change mtu driver routine. It checks if the Axi Ethernet
1703 * hardware supports jumbo frames before changing the mtu. This can be
1704 * called only when the device is not up.
1705 */
axienet_change_mtu(struct net_device * ndev,int new_mtu)1706 static int axienet_change_mtu(struct net_device *ndev, int new_mtu)
1707 {
1708 struct axienet_local *lp = netdev_priv(ndev);
1709
1710 if (netif_running(ndev))
1711 return -EBUSY;
1712
1713 if ((new_mtu + VLAN_ETH_HLEN +
1714 XAE_TRL_SIZE) > lp->rxmem)
1715 return -EINVAL;
1716
1717 WRITE_ONCE(ndev->mtu, new_mtu);
1718
1719 return 0;
1720 }
1721
1722 #ifdef CONFIG_NET_POLL_CONTROLLER
1723 /**
1724 * axienet_poll_controller - Axi Ethernet poll mechanism.
1725 * @ndev: Pointer to net_device structure
1726 *
1727 * This implements Rx/Tx ISR poll mechanisms. The interrupts are disabled prior
1728 * to polling the ISRs and are enabled back after the polling is done.
1729 */
axienet_poll_controller(struct net_device * ndev)1730 static void axienet_poll_controller(struct net_device *ndev)
1731 {
1732 struct axienet_local *lp = netdev_priv(ndev);
1733
1734 disable_irq(lp->tx_irq);
1735 disable_irq(lp->rx_irq);
1736 axienet_rx_irq(lp->tx_irq, ndev);
1737 axienet_tx_irq(lp->rx_irq, ndev);
1738 enable_irq(lp->tx_irq);
1739 enable_irq(lp->rx_irq);
1740 }
1741 #endif
1742
axienet_ioctl(struct net_device * dev,struct ifreq * rq,int cmd)1743 static int axienet_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1744 {
1745 struct axienet_local *lp = netdev_priv(dev);
1746
1747 if (!netif_running(dev))
1748 return -EINVAL;
1749
1750 return phylink_mii_ioctl(lp->phylink, rq, cmd);
1751 }
1752
1753 static void
axienet_get_stats64(struct net_device * dev,struct rtnl_link_stats64 * stats)1754 axienet_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
1755 {
1756 struct axienet_local *lp = netdev_priv(dev);
1757 unsigned int start;
1758
1759 netdev_stats_to_stats64(stats, &dev->stats);
1760
1761 do {
1762 start = u64_stats_fetch_begin(&lp->rx_stat_sync);
1763 stats->rx_packets = u64_stats_read(&lp->rx_packets);
1764 stats->rx_bytes = u64_stats_read(&lp->rx_bytes);
1765 } while (u64_stats_fetch_retry(&lp->rx_stat_sync, start));
1766
1767 do {
1768 start = u64_stats_fetch_begin(&lp->tx_stat_sync);
1769 stats->tx_packets = u64_stats_read(&lp->tx_packets);
1770 stats->tx_bytes = u64_stats_read(&lp->tx_bytes);
1771 } while (u64_stats_fetch_retry(&lp->tx_stat_sync, start));
1772
1773 if (!(lp->features & XAE_FEATURE_STATS))
1774 return;
1775
1776 do {
1777 start = read_seqcount_begin(&lp->hw_stats_seqcount);
1778 stats->rx_length_errors =
1779 axienet_stat(lp, STAT_RX_LENGTH_ERRORS);
1780 stats->rx_crc_errors = axienet_stat(lp, STAT_RX_FCS_ERRORS);
1781 stats->rx_frame_errors =
1782 axienet_stat(lp, STAT_RX_ALIGNMENT_ERRORS);
1783 stats->rx_errors = axienet_stat(lp, STAT_UNDERSIZE_FRAMES) +
1784 axienet_stat(lp, STAT_FRAGMENT_FRAMES) +
1785 stats->rx_length_errors +
1786 stats->rx_crc_errors +
1787 stats->rx_frame_errors;
1788 stats->multicast = axienet_stat(lp, STAT_RX_MULTICAST_FRAMES);
1789
1790 stats->tx_aborted_errors =
1791 axienet_stat(lp, STAT_TX_EXCESS_COLLISIONS);
1792 stats->tx_fifo_errors =
1793 axienet_stat(lp, STAT_TX_UNDERRUN_ERRORS);
1794 stats->tx_window_errors =
1795 axienet_stat(lp, STAT_TX_LATE_COLLISIONS);
1796 stats->tx_errors = axienet_stat(lp, STAT_TX_EXCESS_DEFERRAL) +
1797 stats->tx_aborted_errors +
1798 stats->tx_fifo_errors +
1799 stats->tx_window_errors;
1800 } while (read_seqcount_retry(&lp->hw_stats_seqcount, start));
1801 }
1802
1803 static const struct net_device_ops axienet_netdev_ops = {
1804 .ndo_open = axienet_open,
1805 .ndo_stop = axienet_stop,
1806 .ndo_start_xmit = axienet_start_xmit,
1807 .ndo_get_stats64 = axienet_get_stats64,
1808 .ndo_change_mtu = axienet_change_mtu,
1809 .ndo_set_mac_address = netdev_set_mac_address,
1810 .ndo_validate_addr = eth_validate_addr,
1811 .ndo_eth_ioctl = axienet_ioctl,
1812 .ndo_set_rx_mode = axienet_set_multicast_list,
1813 #ifdef CONFIG_NET_POLL_CONTROLLER
1814 .ndo_poll_controller = axienet_poll_controller,
1815 #endif
1816 };
1817
1818 static const struct net_device_ops axienet_netdev_dmaengine_ops = {
1819 .ndo_open = axienet_open,
1820 .ndo_stop = axienet_stop,
1821 .ndo_start_xmit = axienet_start_xmit_dmaengine,
1822 .ndo_get_stats64 = axienet_get_stats64,
1823 .ndo_change_mtu = axienet_change_mtu,
1824 .ndo_set_mac_address = netdev_set_mac_address,
1825 .ndo_validate_addr = eth_validate_addr,
1826 .ndo_eth_ioctl = axienet_ioctl,
1827 .ndo_set_rx_mode = axienet_set_multicast_list,
1828 };
1829
1830 /**
1831 * axienet_ethtools_get_drvinfo - Get various Axi Ethernet driver information.
1832 * @ndev: Pointer to net_device structure
1833 * @ed: Pointer to ethtool_drvinfo structure
1834 *
1835 * This implements ethtool command for getting the driver information.
1836 * Issue "ethtool -i ethX" under linux prompt to execute this function.
1837 */
axienet_ethtools_get_drvinfo(struct net_device * ndev,struct ethtool_drvinfo * ed)1838 static void axienet_ethtools_get_drvinfo(struct net_device *ndev,
1839 struct ethtool_drvinfo *ed)
1840 {
1841 strscpy(ed->driver, DRIVER_NAME, sizeof(ed->driver));
1842 strscpy(ed->version, DRIVER_VERSION, sizeof(ed->version));
1843 }
1844
1845 /**
1846 * axienet_ethtools_get_regs_len - Get the total regs length present in the
1847 * AxiEthernet core.
1848 * @ndev: Pointer to net_device structure
1849 *
1850 * This implements ethtool command for getting the total register length
1851 * information.
1852 *
1853 * Return: the total regs length
1854 */
axienet_ethtools_get_regs_len(struct net_device * ndev)1855 static int axienet_ethtools_get_regs_len(struct net_device *ndev)
1856 {
1857 return sizeof(u32) * AXIENET_REGS_N;
1858 }
1859
1860 /**
1861 * axienet_ethtools_get_regs - Dump the contents of all registers present
1862 * in AxiEthernet core.
1863 * @ndev: Pointer to net_device structure
1864 * @regs: Pointer to ethtool_regs structure
1865 * @ret: Void pointer used to return the contents of the registers.
1866 *
1867 * This implements ethtool command for getting the Axi Ethernet register dump.
1868 * Issue "ethtool -d ethX" to execute this function.
1869 */
axienet_ethtools_get_regs(struct net_device * ndev,struct ethtool_regs * regs,void * ret)1870 static void axienet_ethtools_get_regs(struct net_device *ndev,
1871 struct ethtool_regs *regs, void *ret)
1872 {
1873 u32 *data = (u32 *)ret;
1874 size_t len = sizeof(u32) * AXIENET_REGS_N;
1875 struct axienet_local *lp = netdev_priv(ndev);
1876
1877 regs->version = 0;
1878 regs->len = len;
1879
1880 memset(data, 0, len);
1881 data[0] = axienet_ior(lp, XAE_RAF_OFFSET);
1882 data[1] = axienet_ior(lp, XAE_TPF_OFFSET);
1883 data[2] = axienet_ior(lp, XAE_IFGP_OFFSET);
1884 data[3] = axienet_ior(lp, XAE_IS_OFFSET);
1885 data[4] = axienet_ior(lp, XAE_IP_OFFSET);
1886 data[5] = axienet_ior(lp, XAE_IE_OFFSET);
1887 data[6] = axienet_ior(lp, XAE_TTAG_OFFSET);
1888 data[7] = axienet_ior(lp, XAE_RTAG_OFFSET);
1889 data[8] = axienet_ior(lp, XAE_UAWL_OFFSET);
1890 data[9] = axienet_ior(lp, XAE_UAWU_OFFSET);
1891 data[10] = axienet_ior(lp, XAE_TPID0_OFFSET);
1892 data[11] = axienet_ior(lp, XAE_TPID1_OFFSET);
1893 data[12] = axienet_ior(lp, XAE_PPST_OFFSET);
1894 data[13] = axienet_ior(lp, XAE_RCW0_OFFSET);
1895 data[14] = axienet_ior(lp, XAE_RCW1_OFFSET);
1896 data[15] = axienet_ior(lp, XAE_TC_OFFSET);
1897 data[16] = axienet_ior(lp, XAE_FCC_OFFSET);
1898 data[17] = axienet_ior(lp, XAE_EMMC_OFFSET);
1899 data[18] = axienet_ior(lp, XAE_PHYC_OFFSET);
1900 data[19] = axienet_ior(lp, XAE_MDIO_MC_OFFSET);
1901 data[20] = axienet_ior(lp, XAE_MDIO_MCR_OFFSET);
1902 data[21] = axienet_ior(lp, XAE_MDIO_MWD_OFFSET);
1903 data[22] = axienet_ior(lp, XAE_MDIO_MRD_OFFSET);
1904 data[27] = axienet_ior(lp, XAE_UAW0_OFFSET);
1905 data[28] = axienet_ior(lp, XAE_UAW1_OFFSET);
1906 data[29] = axienet_ior(lp, XAE_FMI_OFFSET);
1907 data[30] = axienet_ior(lp, XAE_AF0_OFFSET);
1908 data[31] = axienet_ior(lp, XAE_AF1_OFFSET);
1909 if (!lp->use_dmaengine) {
1910 data[32] = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET);
1911 data[33] = axienet_dma_in32(lp, XAXIDMA_TX_SR_OFFSET);
1912 data[34] = axienet_dma_in32(lp, XAXIDMA_TX_CDESC_OFFSET);
1913 data[35] = axienet_dma_in32(lp, XAXIDMA_TX_TDESC_OFFSET);
1914 data[36] = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET);
1915 data[37] = axienet_dma_in32(lp, XAXIDMA_RX_SR_OFFSET);
1916 data[38] = axienet_dma_in32(lp, XAXIDMA_RX_CDESC_OFFSET);
1917 data[39] = axienet_dma_in32(lp, XAXIDMA_RX_TDESC_OFFSET);
1918 }
1919 }
1920
1921 static void
axienet_ethtools_get_ringparam(struct net_device * ndev,struct ethtool_ringparam * ering,struct kernel_ethtool_ringparam * kernel_ering,struct netlink_ext_ack * extack)1922 axienet_ethtools_get_ringparam(struct net_device *ndev,
1923 struct ethtool_ringparam *ering,
1924 struct kernel_ethtool_ringparam *kernel_ering,
1925 struct netlink_ext_ack *extack)
1926 {
1927 struct axienet_local *lp = netdev_priv(ndev);
1928
1929 ering->rx_max_pending = RX_BD_NUM_MAX;
1930 ering->rx_mini_max_pending = 0;
1931 ering->rx_jumbo_max_pending = 0;
1932 ering->tx_max_pending = TX_BD_NUM_MAX;
1933 ering->rx_pending = lp->rx_bd_num;
1934 ering->rx_mini_pending = 0;
1935 ering->rx_jumbo_pending = 0;
1936 ering->tx_pending = lp->tx_bd_num;
1937 }
1938
1939 static int
axienet_ethtools_set_ringparam(struct net_device * ndev,struct ethtool_ringparam * ering,struct kernel_ethtool_ringparam * kernel_ering,struct netlink_ext_ack * extack)1940 axienet_ethtools_set_ringparam(struct net_device *ndev,
1941 struct ethtool_ringparam *ering,
1942 struct kernel_ethtool_ringparam *kernel_ering,
1943 struct netlink_ext_ack *extack)
1944 {
1945 struct axienet_local *lp = netdev_priv(ndev);
1946
1947 if (ering->rx_pending > RX_BD_NUM_MAX ||
1948 ering->rx_mini_pending ||
1949 ering->rx_jumbo_pending ||
1950 ering->tx_pending < TX_BD_NUM_MIN ||
1951 ering->tx_pending > TX_BD_NUM_MAX)
1952 return -EINVAL;
1953
1954 if (netif_running(ndev))
1955 return -EBUSY;
1956
1957 lp->rx_bd_num = ering->rx_pending;
1958 lp->tx_bd_num = ering->tx_pending;
1959 return 0;
1960 }
1961
1962 /**
1963 * axienet_ethtools_get_pauseparam - Get the pause parameter setting for
1964 * Tx and Rx paths.
1965 * @ndev: Pointer to net_device structure
1966 * @epauseparm: Pointer to ethtool_pauseparam structure.
1967 *
1968 * This implements ethtool command for getting axi ethernet pause frame
1969 * setting. Issue "ethtool -a ethX" to execute this function.
1970 */
1971 static void
axienet_ethtools_get_pauseparam(struct net_device * ndev,struct ethtool_pauseparam * epauseparm)1972 axienet_ethtools_get_pauseparam(struct net_device *ndev,
1973 struct ethtool_pauseparam *epauseparm)
1974 {
1975 struct axienet_local *lp = netdev_priv(ndev);
1976
1977 phylink_ethtool_get_pauseparam(lp->phylink, epauseparm);
1978 }
1979
1980 /**
1981 * axienet_ethtools_set_pauseparam - Set device pause parameter(flow control)
1982 * settings.
1983 * @ndev: Pointer to net_device structure
1984 * @epauseparm:Pointer to ethtool_pauseparam structure
1985 *
1986 * This implements ethtool command for enabling flow control on Rx and Tx
1987 * paths. Issue "ethtool -A ethX tx on|off" under linux prompt to execute this
1988 * function.
1989 *
1990 * Return: 0 on success, -EFAULT if device is running
1991 */
1992 static int
axienet_ethtools_set_pauseparam(struct net_device * ndev,struct ethtool_pauseparam * epauseparm)1993 axienet_ethtools_set_pauseparam(struct net_device *ndev,
1994 struct ethtool_pauseparam *epauseparm)
1995 {
1996 struct axienet_local *lp = netdev_priv(ndev);
1997
1998 return phylink_ethtool_set_pauseparam(lp->phylink, epauseparm);
1999 }
2000
2001 /**
2002 * axienet_ethtools_get_coalesce - Get DMA interrupt coalescing count.
2003 * @ndev: Pointer to net_device structure
2004 * @ecoalesce: Pointer to ethtool_coalesce structure
2005 * @kernel_coal: ethtool CQE mode setting structure
2006 * @extack: extack for reporting error messages
2007 *
2008 * This implements ethtool command for getting the DMA interrupt coalescing
2009 * count on Tx and Rx paths. Issue "ethtool -c ethX" under linux prompt to
2010 * execute this function.
2011 *
2012 * Return: 0 always
2013 */
2014 static int
axienet_ethtools_get_coalesce(struct net_device * ndev,struct ethtool_coalesce * ecoalesce,struct kernel_ethtool_coalesce * kernel_coal,struct netlink_ext_ack * extack)2015 axienet_ethtools_get_coalesce(struct net_device *ndev,
2016 struct ethtool_coalesce *ecoalesce,
2017 struct kernel_ethtool_coalesce *kernel_coal,
2018 struct netlink_ext_ack *extack)
2019 {
2020 struct axienet_local *lp = netdev_priv(ndev);
2021
2022 ecoalesce->rx_max_coalesced_frames = lp->coalesce_count_rx;
2023 ecoalesce->rx_coalesce_usecs = lp->coalesce_usec_rx;
2024 ecoalesce->tx_max_coalesced_frames = lp->coalesce_count_tx;
2025 ecoalesce->tx_coalesce_usecs = lp->coalesce_usec_tx;
2026 return 0;
2027 }
2028
2029 /**
2030 * axienet_ethtools_set_coalesce - Set DMA interrupt coalescing count.
2031 * @ndev: Pointer to net_device structure
2032 * @ecoalesce: Pointer to ethtool_coalesce structure
2033 * @kernel_coal: ethtool CQE mode setting structure
2034 * @extack: extack for reporting error messages
2035 *
2036 * This implements ethtool command for setting the DMA interrupt coalescing
2037 * count on Tx and Rx paths. Issue "ethtool -C ethX rx-frames 5" under linux
2038 * prompt to execute this function.
2039 *
2040 * Return: 0, on success, Non-zero error value on failure.
2041 */
2042 static int
axienet_ethtools_set_coalesce(struct net_device * ndev,struct ethtool_coalesce * ecoalesce,struct kernel_ethtool_coalesce * kernel_coal,struct netlink_ext_ack * extack)2043 axienet_ethtools_set_coalesce(struct net_device *ndev,
2044 struct ethtool_coalesce *ecoalesce,
2045 struct kernel_ethtool_coalesce *kernel_coal,
2046 struct netlink_ext_ack *extack)
2047 {
2048 struct axienet_local *lp = netdev_priv(ndev);
2049
2050 if (netif_running(ndev)) {
2051 NL_SET_ERR_MSG(extack,
2052 "Please stop netif before applying configuration");
2053 return -EBUSY;
2054 }
2055
2056 if (ecoalesce->rx_max_coalesced_frames > 255 ||
2057 ecoalesce->tx_max_coalesced_frames > 255) {
2058 NL_SET_ERR_MSG(extack, "frames must be less than 256");
2059 return -EINVAL;
2060 }
2061
2062 if (!ecoalesce->rx_max_coalesced_frames ||
2063 !ecoalesce->tx_max_coalesced_frames) {
2064 NL_SET_ERR_MSG(extack, "frames must be non-zero");
2065 return -EINVAL;
2066 }
2067
2068 if ((ecoalesce->rx_max_coalesced_frames > 1 &&
2069 !ecoalesce->rx_coalesce_usecs) ||
2070 (ecoalesce->tx_max_coalesced_frames > 1 &&
2071 !ecoalesce->tx_coalesce_usecs)) {
2072 NL_SET_ERR_MSG(extack,
2073 "usecs must be non-zero when frames is greater than one");
2074 return -EINVAL;
2075 }
2076
2077 lp->coalesce_count_rx = ecoalesce->rx_max_coalesced_frames;
2078 lp->coalesce_usec_rx = ecoalesce->rx_coalesce_usecs;
2079 lp->coalesce_count_tx = ecoalesce->tx_max_coalesced_frames;
2080 lp->coalesce_usec_tx = ecoalesce->tx_coalesce_usecs;
2081
2082 return 0;
2083 }
2084
2085 static int
axienet_ethtools_get_link_ksettings(struct net_device * ndev,struct ethtool_link_ksettings * cmd)2086 axienet_ethtools_get_link_ksettings(struct net_device *ndev,
2087 struct ethtool_link_ksettings *cmd)
2088 {
2089 struct axienet_local *lp = netdev_priv(ndev);
2090
2091 return phylink_ethtool_ksettings_get(lp->phylink, cmd);
2092 }
2093
2094 static int
axienet_ethtools_set_link_ksettings(struct net_device * ndev,const struct ethtool_link_ksettings * cmd)2095 axienet_ethtools_set_link_ksettings(struct net_device *ndev,
2096 const struct ethtool_link_ksettings *cmd)
2097 {
2098 struct axienet_local *lp = netdev_priv(ndev);
2099
2100 return phylink_ethtool_ksettings_set(lp->phylink, cmd);
2101 }
2102
axienet_ethtools_nway_reset(struct net_device * dev)2103 static int axienet_ethtools_nway_reset(struct net_device *dev)
2104 {
2105 struct axienet_local *lp = netdev_priv(dev);
2106
2107 return phylink_ethtool_nway_reset(lp->phylink);
2108 }
2109
axienet_ethtools_get_ethtool_stats(struct net_device * dev,struct ethtool_stats * stats,u64 * data)2110 static void axienet_ethtools_get_ethtool_stats(struct net_device *dev,
2111 struct ethtool_stats *stats,
2112 u64 *data)
2113 {
2114 struct axienet_local *lp = netdev_priv(dev);
2115 unsigned int start;
2116
2117 do {
2118 start = read_seqcount_begin(&lp->hw_stats_seqcount);
2119 data[0] = axienet_stat(lp, STAT_RX_BYTES);
2120 data[1] = axienet_stat(lp, STAT_TX_BYTES);
2121 data[2] = axienet_stat(lp, STAT_RX_VLAN_FRAMES);
2122 data[3] = axienet_stat(lp, STAT_TX_VLAN_FRAMES);
2123 data[6] = axienet_stat(lp, STAT_TX_PFC_FRAMES);
2124 data[7] = axienet_stat(lp, STAT_RX_PFC_FRAMES);
2125 data[8] = axienet_stat(lp, STAT_USER_DEFINED0);
2126 data[9] = axienet_stat(lp, STAT_USER_DEFINED1);
2127 data[10] = axienet_stat(lp, STAT_USER_DEFINED2);
2128 } while (read_seqcount_retry(&lp->hw_stats_seqcount, start));
2129 }
2130
2131 static const char axienet_ethtool_stats_strings[][ETH_GSTRING_LEN] = {
2132 "Received bytes",
2133 "Transmitted bytes",
2134 "RX Good VLAN Tagged Frames",
2135 "TX Good VLAN Tagged Frames",
2136 "TX Good PFC Frames",
2137 "RX Good PFC Frames",
2138 "User Defined Counter 0",
2139 "User Defined Counter 1",
2140 "User Defined Counter 2",
2141 };
2142
axienet_ethtools_get_strings(struct net_device * dev,u32 stringset,u8 * data)2143 static void axienet_ethtools_get_strings(struct net_device *dev, u32 stringset, u8 *data)
2144 {
2145 switch (stringset) {
2146 case ETH_SS_STATS:
2147 memcpy(data, axienet_ethtool_stats_strings,
2148 sizeof(axienet_ethtool_stats_strings));
2149 break;
2150 }
2151 }
2152
axienet_ethtools_get_sset_count(struct net_device * dev,int sset)2153 static int axienet_ethtools_get_sset_count(struct net_device *dev, int sset)
2154 {
2155 struct axienet_local *lp = netdev_priv(dev);
2156
2157 switch (sset) {
2158 case ETH_SS_STATS:
2159 if (lp->features & XAE_FEATURE_STATS)
2160 return ARRAY_SIZE(axienet_ethtool_stats_strings);
2161 fallthrough;
2162 default:
2163 return -EOPNOTSUPP;
2164 }
2165 }
2166
2167 static void
axienet_ethtools_get_pause_stats(struct net_device * dev,struct ethtool_pause_stats * pause_stats)2168 axienet_ethtools_get_pause_stats(struct net_device *dev,
2169 struct ethtool_pause_stats *pause_stats)
2170 {
2171 struct axienet_local *lp = netdev_priv(dev);
2172 unsigned int start;
2173
2174 if (!(lp->features & XAE_FEATURE_STATS))
2175 return;
2176
2177 do {
2178 start = read_seqcount_begin(&lp->hw_stats_seqcount);
2179 pause_stats->tx_pause_frames =
2180 axienet_stat(lp, STAT_TX_PAUSE_FRAMES);
2181 pause_stats->rx_pause_frames =
2182 axienet_stat(lp, STAT_RX_PAUSE_FRAMES);
2183 } while (read_seqcount_retry(&lp->hw_stats_seqcount, start));
2184 }
2185
2186 static void
axienet_ethtool_get_eth_mac_stats(struct net_device * dev,struct ethtool_eth_mac_stats * mac_stats)2187 axienet_ethtool_get_eth_mac_stats(struct net_device *dev,
2188 struct ethtool_eth_mac_stats *mac_stats)
2189 {
2190 struct axienet_local *lp = netdev_priv(dev);
2191 unsigned int start;
2192
2193 if (!(lp->features & XAE_FEATURE_STATS))
2194 return;
2195
2196 do {
2197 start = read_seqcount_begin(&lp->hw_stats_seqcount);
2198 mac_stats->FramesTransmittedOK =
2199 axienet_stat(lp, STAT_TX_GOOD_FRAMES);
2200 mac_stats->SingleCollisionFrames =
2201 axienet_stat(lp, STAT_TX_SINGLE_COLLISION_FRAMES);
2202 mac_stats->MultipleCollisionFrames =
2203 axienet_stat(lp, STAT_TX_MULTIPLE_COLLISION_FRAMES);
2204 mac_stats->FramesReceivedOK =
2205 axienet_stat(lp, STAT_RX_GOOD_FRAMES);
2206 mac_stats->FrameCheckSequenceErrors =
2207 axienet_stat(lp, STAT_RX_FCS_ERRORS);
2208 mac_stats->AlignmentErrors =
2209 axienet_stat(lp, STAT_RX_ALIGNMENT_ERRORS);
2210 mac_stats->FramesWithDeferredXmissions =
2211 axienet_stat(lp, STAT_TX_DEFERRED_FRAMES);
2212 mac_stats->LateCollisions =
2213 axienet_stat(lp, STAT_TX_LATE_COLLISIONS);
2214 mac_stats->FramesAbortedDueToXSColls =
2215 axienet_stat(lp, STAT_TX_EXCESS_COLLISIONS);
2216 mac_stats->MulticastFramesXmittedOK =
2217 axienet_stat(lp, STAT_TX_MULTICAST_FRAMES);
2218 mac_stats->BroadcastFramesXmittedOK =
2219 axienet_stat(lp, STAT_TX_BROADCAST_FRAMES);
2220 mac_stats->FramesWithExcessiveDeferral =
2221 axienet_stat(lp, STAT_TX_EXCESS_DEFERRAL);
2222 mac_stats->MulticastFramesReceivedOK =
2223 axienet_stat(lp, STAT_RX_MULTICAST_FRAMES);
2224 mac_stats->BroadcastFramesReceivedOK =
2225 axienet_stat(lp, STAT_RX_BROADCAST_FRAMES);
2226 mac_stats->InRangeLengthErrors =
2227 axienet_stat(lp, STAT_RX_LENGTH_ERRORS);
2228 } while (read_seqcount_retry(&lp->hw_stats_seqcount, start));
2229 }
2230
2231 static void
axienet_ethtool_get_eth_ctrl_stats(struct net_device * dev,struct ethtool_eth_ctrl_stats * ctrl_stats)2232 axienet_ethtool_get_eth_ctrl_stats(struct net_device *dev,
2233 struct ethtool_eth_ctrl_stats *ctrl_stats)
2234 {
2235 struct axienet_local *lp = netdev_priv(dev);
2236 unsigned int start;
2237
2238 if (!(lp->features & XAE_FEATURE_STATS))
2239 return;
2240
2241 do {
2242 start = read_seqcount_begin(&lp->hw_stats_seqcount);
2243 ctrl_stats->MACControlFramesTransmitted =
2244 axienet_stat(lp, STAT_TX_CONTROL_FRAMES);
2245 ctrl_stats->MACControlFramesReceived =
2246 axienet_stat(lp, STAT_RX_CONTROL_FRAMES);
2247 ctrl_stats->UnsupportedOpcodesReceived =
2248 axienet_stat(lp, STAT_RX_CONTROL_OPCODE_ERRORS);
2249 } while (read_seqcount_retry(&lp->hw_stats_seqcount, start));
2250 }
2251
2252 static const struct ethtool_rmon_hist_range axienet_rmon_ranges[] = {
2253 { 64, 64 },
2254 { 65, 127 },
2255 { 128, 255 },
2256 { 256, 511 },
2257 { 512, 1023 },
2258 { 1024, 1518 },
2259 { 1519, 16384 },
2260 { },
2261 };
2262
2263 static void
axienet_ethtool_get_rmon_stats(struct net_device * dev,struct ethtool_rmon_stats * rmon_stats,const struct ethtool_rmon_hist_range ** ranges)2264 axienet_ethtool_get_rmon_stats(struct net_device *dev,
2265 struct ethtool_rmon_stats *rmon_stats,
2266 const struct ethtool_rmon_hist_range **ranges)
2267 {
2268 struct axienet_local *lp = netdev_priv(dev);
2269 unsigned int start;
2270
2271 if (!(lp->features & XAE_FEATURE_STATS))
2272 return;
2273
2274 do {
2275 start = read_seqcount_begin(&lp->hw_stats_seqcount);
2276 rmon_stats->undersize_pkts =
2277 axienet_stat(lp, STAT_UNDERSIZE_FRAMES);
2278 rmon_stats->oversize_pkts =
2279 axienet_stat(lp, STAT_RX_OVERSIZE_FRAMES);
2280 rmon_stats->fragments =
2281 axienet_stat(lp, STAT_FRAGMENT_FRAMES);
2282
2283 rmon_stats->hist[0] =
2284 axienet_stat(lp, STAT_RX_64_BYTE_FRAMES);
2285 rmon_stats->hist[1] =
2286 axienet_stat(lp, STAT_RX_65_127_BYTE_FRAMES);
2287 rmon_stats->hist[2] =
2288 axienet_stat(lp, STAT_RX_128_255_BYTE_FRAMES);
2289 rmon_stats->hist[3] =
2290 axienet_stat(lp, STAT_RX_256_511_BYTE_FRAMES);
2291 rmon_stats->hist[4] =
2292 axienet_stat(lp, STAT_RX_512_1023_BYTE_FRAMES);
2293 rmon_stats->hist[5] =
2294 axienet_stat(lp, STAT_RX_1024_MAX_BYTE_FRAMES);
2295 rmon_stats->hist[6] =
2296 rmon_stats->oversize_pkts;
2297
2298 rmon_stats->hist_tx[0] =
2299 axienet_stat(lp, STAT_TX_64_BYTE_FRAMES);
2300 rmon_stats->hist_tx[1] =
2301 axienet_stat(lp, STAT_TX_65_127_BYTE_FRAMES);
2302 rmon_stats->hist_tx[2] =
2303 axienet_stat(lp, STAT_TX_128_255_BYTE_FRAMES);
2304 rmon_stats->hist_tx[3] =
2305 axienet_stat(lp, STAT_TX_256_511_BYTE_FRAMES);
2306 rmon_stats->hist_tx[4] =
2307 axienet_stat(lp, STAT_TX_512_1023_BYTE_FRAMES);
2308 rmon_stats->hist_tx[5] =
2309 axienet_stat(lp, STAT_TX_1024_MAX_BYTE_FRAMES);
2310 rmon_stats->hist_tx[6] =
2311 axienet_stat(lp, STAT_TX_OVERSIZE_FRAMES);
2312 } while (read_seqcount_retry(&lp->hw_stats_seqcount, start));
2313
2314 *ranges = axienet_rmon_ranges;
2315 }
2316
2317 static const struct ethtool_ops axienet_ethtool_ops = {
2318 .supported_coalesce_params = ETHTOOL_COALESCE_MAX_FRAMES |
2319 ETHTOOL_COALESCE_USECS,
2320 .get_drvinfo = axienet_ethtools_get_drvinfo,
2321 .get_regs_len = axienet_ethtools_get_regs_len,
2322 .get_regs = axienet_ethtools_get_regs,
2323 .get_link = ethtool_op_get_link,
2324 .get_ringparam = axienet_ethtools_get_ringparam,
2325 .set_ringparam = axienet_ethtools_set_ringparam,
2326 .get_pauseparam = axienet_ethtools_get_pauseparam,
2327 .set_pauseparam = axienet_ethtools_set_pauseparam,
2328 .get_coalesce = axienet_ethtools_get_coalesce,
2329 .set_coalesce = axienet_ethtools_set_coalesce,
2330 .get_link_ksettings = axienet_ethtools_get_link_ksettings,
2331 .set_link_ksettings = axienet_ethtools_set_link_ksettings,
2332 .nway_reset = axienet_ethtools_nway_reset,
2333 .get_ethtool_stats = axienet_ethtools_get_ethtool_stats,
2334 .get_strings = axienet_ethtools_get_strings,
2335 .get_sset_count = axienet_ethtools_get_sset_count,
2336 .get_pause_stats = axienet_ethtools_get_pause_stats,
2337 .get_eth_mac_stats = axienet_ethtool_get_eth_mac_stats,
2338 .get_eth_ctrl_stats = axienet_ethtool_get_eth_ctrl_stats,
2339 .get_rmon_stats = axienet_ethtool_get_rmon_stats,
2340 };
2341
pcs_to_axienet_local(struct phylink_pcs * pcs)2342 static struct axienet_local *pcs_to_axienet_local(struct phylink_pcs *pcs)
2343 {
2344 return container_of(pcs, struct axienet_local, pcs);
2345 }
2346
axienet_pcs_get_state(struct phylink_pcs * pcs,unsigned int neg_mode,struct phylink_link_state * state)2347 static void axienet_pcs_get_state(struct phylink_pcs *pcs,
2348 unsigned int neg_mode,
2349 struct phylink_link_state *state)
2350 {
2351 struct mdio_device *pcs_phy = pcs_to_axienet_local(pcs)->pcs_phy;
2352
2353 phylink_mii_c22_pcs_get_state(pcs_phy, neg_mode, state);
2354 }
2355
axienet_pcs_an_restart(struct phylink_pcs * pcs)2356 static void axienet_pcs_an_restart(struct phylink_pcs *pcs)
2357 {
2358 struct mdio_device *pcs_phy = pcs_to_axienet_local(pcs)->pcs_phy;
2359
2360 phylink_mii_c22_pcs_an_restart(pcs_phy);
2361 }
2362
axienet_pcs_config(struct phylink_pcs * pcs,unsigned int neg_mode,phy_interface_t interface,const unsigned long * advertising,bool permit_pause_to_mac)2363 static int axienet_pcs_config(struct phylink_pcs *pcs, unsigned int neg_mode,
2364 phy_interface_t interface,
2365 const unsigned long *advertising,
2366 bool permit_pause_to_mac)
2367 {
2368 struct mdio_device *pcs_phy = pcs_to_axienet_local(pcs)->pcs_phy;
2369 struct net_device *ndev = pcs_to_axienet_local(pcs)->ndev;
2370 struct axienet_local *lp = netdev_priv(ndev);
2371 int ret;
2372
2373 if (lp->switch_x_sgmii) {
2374 ret = mdiodev_write(pcs_phy, XLNX_MII_STD_SELECT_REG,
2375 interface == PHY_INTERFACE_MODE_SGMII ?
2376 XLNX_MII_STD_SELECT_SGMII : 0);
2377 if (ret < 0) {
2378 netdev_warn(ndev,
2379 "Failed to switch PHY interface: %d\n",
2380 ret);
2381 return ret;
2382 }
2383 }
2384
2385 ret = phylink_mii_c22_pcs_config(pcs_phy, interface, advertising,
2386 neg_mode);
2387 if (ret < 0)
2388 netdev_warn(ndev, "Failed to configure PCS: %d\n", ret);
2389
2390 return ret;
2391 }
2392
2393 static const struct phylink_pcs_ops axienet_pcs_ops = {
2394 .pcs_get_state = axienet_pcs_get_state,
2395 .pcs_config = axienet_pcs_config,
2396 .pcs_an_restart = axienet_pcs_an_restart,
2397 };
2398
axienet_mac_select_pcs(struct phylink_config * config,phy_interface_t interface)2399 static struct phylink_pcs *axienet_mac_select_pcs(struct phylink_config *config,
2400 phy_interface_t interface)
2401 {
2402 struct net_device *ndev = to_net_dev(config->dev);
2403 struct axienet_local *lp = netdev_priv(ndev);
2404
2405 if (interface == PHY_INTERFACE_MODE_1000BASEX ||
2406 interface == PHY_INTERFACE_MODE_SGMII)
2407 return &lp->pcs;
2408
2409 return NULL;
2410 }
2411
axienet_mac_config(struct phylink_config * config,unsigned int mode,const struct phylink_link_state * state)2412 static void axienet_mac_config(struct phylink_config *config, unsigned int mode,
2413 const struct phylink_link_state *state)
2414 {
2415 /* nothing meaningful to do */
2416 }
2417
axienet_mac_link_down(struct phylink_config * config,unsigned int mode,phy_interface_t interface)2418 static void axienet_mac_link_down(struct phylink_config *config,
2419 unsigned int mode,
2420 phy_interface_t interface)
2421 {
2422 /* nothing meaningful to do */
2423 }
2424
axienet_mac_link_up(struct phylink_config * config,struct phy_device * phy,unsigned int mode,phy_interface_t interface,int speed,int duplex,bool tx_pause,bool rx_pause)2425 static void axienet_mac_link_up(struct phylink_config *config,
2426 struct phy_device *phy,
2427 unsigned int mode, phy_interface_t interface,
2428 int speed, int duplex,
2429 bool tx_pause, bool rx_pause)
2430 {
2431 struct net_device *ndev = to_net_dev(config->dev);
2432 struct axienet_local *lp = netdev_priv(ndev);
2433 u32 emmc_reg, fcc_reg;
2434
2435 emmc_reg = axienet_ior(lp, XAE_EMMC_OFFSET);
2436 emmc_reg &= ~XAE_EMMC_LINKSPEED_MASK;
2437
2438 switch (speed) {
2439 case SPEED_1000:
2440 emmc_reg |= XAE_EMMC_LINKSPD_1000;
2441 break;
2442 case SPEED_100:
2443 emmc_reg |= XAE_EMMC_LINKSPD_100;
2444 break;
2445 case SPEED_10:
2446 emmc_reg |= XAE_EMMC_LINKSPD_10;
2447 break;
2448 default:
2449 dev_err(&ndev->dev,
2450 "Speed other than 10, 100 or 1Gbps is not supported\n");
2451 break;
2452 }
2453
2454 axienet_iow(lp, XAE_EMMC_OFFSET, emmc_reg);
2455
2456 fcc_reg = axienet_ior(lp, XAE_FCC_OFFSET);
2457 if (tx_pause)
2458 fcc_reg |= XAE_FCC_FCTX_MASK;
2459 else
2460 fcc_reg &= ~XAE_FCC_FCTX_MASK;
2461 if (rx_pause)
2462 fcc_reg |= XAE_FCC_FCRX_MASK;
2463 else
2464 fcc_reg &= ~XAE_FCC_FCRX_MASK;
2465 axienet_iow(lp, XAE_FCC_OFFSET, fcc_reg);
2466 }
2467
2468 static const struct phylink_mac_ops axienet_phylink_ops = {
2469 .mac_select_pcs = axienet_mac_select_pcs,
2470 .mac_config = axienet_mac_config,
2471 .mac_link_down = axienet_mac_link_down,
2472 .mac_link_up = axienet_mac_link_up,
2473 };
2474
2475 /**
2476 * axienet_dma_err_handler - Work queue task for Axi DMA Error
2477 * @work: pointer to work_struct
2478 *
2479 * Resets the Axi DMA and Axi Ethernet devices, and reconfigures the
2480 * Tx/Rx BDs.
2481 */
axienet_dma_err_handler(struct work_struct * work)2482 static void axienet_dma_err_handler(struct work_struct *work)
2483 {
2484 u32 i;
2485 u32 axienet_status;
2486 struct axidma_bd *cur_p;
2487 struct axienet_local *lp = container_of(work, struct axienet_local,
2488 dma_err_task);
2489 struct net_device *ndev = lp->ndev;
2490
2491 /* Don't bother if we are going to stop anyway */
2492 if (READ_ONCE(lp->stopping))
2493 return;
2494
2495 napi_disable(&lp->napi_tx);
2496 napi_disable(&lp->napi_rx);
2497
2498 axienet_setoptions(ndev, lp->options &
2499 ~(XAE_OPTION_TXEN | XAE_OPTION_RXEN));
2500
2501 axienet_dma_stop(lp);
2502
2503 for (i = 0; i < lp->tx_bd_num; i++) {
2504 cur_p = &lp->tx_bd_v[i];
2505 if (cur_p->cntrl) {
2506 dma_addr_t addr = desc_get_phys_addr(lp, cur_p);
2507
2508 dma_unmap_single(lp->dev, addr,
2509 (cur_p->cntrl &
2510 XAXIDMA_BD_CTRL_LENGTH_MASK),
2511 DMA_TO_DEVICE);
2512 }
2513 if (cur_p->skb)
2514 dev_kfree_skb_irq(cur_p->skb);
2515 cur_p->phys = 0;
2516 cur_p->phys_msb = 0;
2517 cur_p->cntrl = 0;
2518 cur_p->status = 0;
2519 cur_p->app0 = 0;
2520 cur_p->app1 = 0;
2521 cur_p->app2 = 0;
2522 cur_p->app3 = 0;
2523 cur_p->app4 = 0;
2524 cur_p->skb = NULL;
2525 }
2526
2527 for (i = 0; i < lp->rx_bd_num; i++) {
2528 cur_p = &lp->rx_bd_v[i];
2529 cur_p->status = 0;
2530 cur_p->app0 = 0;
2531 cur_p->app1 = 0;
2532 cur_p->app2 = 0;
2533 cur_p->app3 = 0;
2534 cur_p->app4 = 0;
2535 }
2536
2537 lp->tx_bd_ci = 0;
2538 lp->tx_bd_tail = 0;
2539 lp->rx_bd_ci = 0;
2540
2541 axienet_dma_start(lp);
2542
2543 axienet_status = axienet_ior(lp, XAE_RCW1_OFFSET);
2544 axienet_status &= ~XAE_RCW1_RX_MASK;
2545 axienet_iow(lp, XAE_RCW1_OFFSET, axienet_status);
2546
2547 axienet_status = axienet_ior(lp, XAE_IP_OFFSET);
2548 if (axienet_status & XAE_INT_RXRJECT_MASK)
2549 axienet_iow(lp, XAE_IS_OFFSET, XAE_INT_RXRJECT_MASK);
2550 axienet_iow(lp, XAE_IE_OFFSET, lp->eth_irq > 0 ?
2551 XAE_INT_RECV_ERROR_MASK : 0);
2552 axienet_iow(lp, XAE_FCC_OFFSET, XAE_FCC_FCRX_MASK);
2553
2554 /* Sync default options with HW but leave receiver and
2555 * transmitter disabled.
2556 */
2557 axienet_setoptions(ndev, lp->options &
2558 ~(XAE_OPTION_TXEN | XAE_OPTION_RXEN));
2559 axienet_set_mac_address(ndev, NULL);
2560 axienet_set_multicast_list(ndev);
2561 napi_enable(&lp->napi_rx);
2562 napi_enable(&lp->napi_tx);
2563 axienet_setoptions(ndev, lp->options);
2564 }
2565
2566 /**
2567 * axienet_probe - Axi Ethernet probe function.
2568 * @pdev: Pointer to platform device structure.
2569 *
2570 * Return: 0, on success
2571 * Non-zero error value on failure.
2572 *
2573 * This is the probe routine for Axi Ethernet driver. This is called before
2574 * any other driver routines are invoked. It allocates and sets up the Ethernet
2575 * device. Parses through device tree and populates fields of
2576 * axienet_local. It registers the Ethernet device.
2577 */
axienet_probe(struct platform_device * pdev)2578 static int axienet_probe(struct platform_device *pdev)
2579 {
2580 int ret;
2581 struct device_node *np;
2582 struct axienet_local *lp;
2583 struct net_device *ndev;
2584 struct resource *ethres;
2585 u8 mac_addr[ETH_ALEN];
2586 int addr_width = 32;
2587 u32 value;
2588
2589 ndev = alloc_etherdev(sizeof(*lp));
2590 if (!ndev)
2591 return -ENOMEM;
2592
2593 platform_set_drvdata(pdev, ndev);
2594
2595 SET_NETDEV_DEV(ndev, &pdev->dev);
2596 ndev->features = NETIF_F_SG;
2597 ndev->ethtool_ops = &axienet_ethtool_ops;
2598
2599 /* MTU range: 64 - 9000 */
2600 ndev->min_mtu = 64;
2601 ndev->max_mtu = XAE_JUMBO_MTU;
2602
2603 lp = netdev_priv(ndev);
2604 lp->ndev = ndev;
2605 lp->dev = &pdev->dev;
2606 lp->options = XAE_OPTION_DEFAULTS;
2607 lp->rx_bd_num = RX_BD_NUM_DEFAULT;
2608 lp->tx_bd_num = TX_BD_NUM_DEFAULT;
2609
2610 u64_stats_init(&lp->rx_stat_sync);
2611 u64_stats_init(&lp->tx_stat_sync);
2612
2613 mutex_init(&lp->stats_lock);
2614 seqcount_mutex_init(&lp->hw_stats_seqcount, &lp->stats_lock);
2615 INIT_DEFERRABLE_WORK(&lp->stats_work, axienet_refresh_stats);
2616
2617 lp->axi_clk = devm_clk_get_optional(&pdev->dev, "s_axi_lite_clk");
2618 if (!lp->axi_clk) {
2619 /* For backward compatibility, if named AXI clock is not present,
2620 * treat the first clock specified as the AXI clock.
2621 */
2622 lp->axi_clk = devm_clk_get_optional(&pdev->dev, NULL);
2623 }
2624 if (IS_ERR(lp->axi_clk)) {
2625 ret = PTR_ERR(lp->axi_clk);
2626 goto free_netdev;
2627 }
2628 ret = clk_prepare_enable(lp->axi_clk);
2629 if (ret) {
2630 dev_err(&pdev->dev, "Unable to enable AXI clock: %d\n", ret);
2631 goto free_netdev;
2632 }
2633
2634 lp->misc_clks[0].id = "axis_clk";
2635 lp->misc_clks[1].id = "ref_clk";
2636 lp->misc_clks[2].id = "mgt_clk";
2637
2638 ret = devm_clk_bulk_get_optional(&pdev->dev, XAE_NUM_MISC_CLOCKS, lp->misc_clks);
2639 if (ret)
2640 goto cleanup_clk;
2641
2642 ret = clk_bulk_prepare_enable(XAE_NUM_MISC_CLOCKS, lp->misc_clks);
2643 if (ret)
2644 goto cleanup_clk;
2645
2646 /* Map device registers */
2647 lp->regs = devm_platform_get_and_ioremap_resource(pdev, 0, ðres);
2648 if (IS_ERR(lp->regs)) {
2649 ret = PTR_ERR(lp->regs);
2650 goto cleanup_clk;
2651 }
2652 lp->regs_start = ethres->start;
2653
2654 /* Setup checksum offload, but default to off if not specified */
2655 lp->features = 0;
2656
2657 if (axienet_ior(lp, XAE_ABILITY_OFFSET) & XAE_ABILITY_STATS)
2658 lp->features |= XAE_FEATURE_STATS;
2659
2660 ret = of_property_read_u32(pdev->dev.of_node, "xlnx,txcsum", &value);
2661 if (!ret) {
2662 switch (value) {
2663 case 1:
2664 lp->features |= XAE_FEATURE_PARTIAL_TX_CSUM;
2665 /* Can checksum any contiguous range */
2666 ndev->features |= NETIF_F_HW_CSUM;
2667 break;
2668 case 2:
2669 lp->features |= XAE_FEATURE_FULL_TX_CSUM;
2670 /* Can checksum TCP/UDP over IPv4. */
2671 ndev->features |= NETIF_F_IP_CSUM;
2672 break;
2673 }
2674 }
2675 ret = of_property_read_u32(pdev->dev.of_node, "xlnx,rxcsum", &value);
2676 if (!ret) {
2677 switch (value) {
2678 case 1:
2679 lp->features |= XAE_FEATURE_PARTIAL_RX_CSUM;
2680 ndev->features |= NETIF_F_RXCSUM;
2681 break;
2682 case 2:
2683 lp->features |= XAE_FEATURE_FULL_RX_CSUM;
2684 ndev->features |= NETIF_F_RXCSUM;
2685 break;
2686 }
2687 }
2688 /* For supporting jumbo frames, the Axi Ethernet hardware must have
2689 * a larger Rx/Tx Memory. Typically, the size must be large so that
2690 * we can enable jumbo option and start supporting jumbo frames.
2691 * Here we check for memory allocated for Rx/Tx in the hardware from
2692 * the device-tree and accordingly set flags.
2693 */
2694 of_property_read_u32(pdev->dev.of_node, "xlnx,rxmem", &lp->rxmem);
2695
2696 lp->switch_x_sgmii = of_property_read_bool(pdev->dev.of_node,
2697 "xlnx,switch-x-sgmii");
2698
2699 /* Start with the proprietary, and broken phy_type */
2700 ret = of_property_read_u32(pdev->dev.of_node, "xlnx,phy-type", &value);
2701 if (!ret) {
2702 netdev_warn(ndev, "Please upgrade your device tree binary blob to use phy-mode");
2703 switch (value) {
2704 case XAE_PHY_TYPE_MII:
2705 lp->phy_mode = PHY_INTERFACE_MODE_MII;
2706 break;
2707 case XAE_PHY_TYPE_GMII:
2708 lp->phy_mode = PHY_INTERFACE_MODE_GMII;
2709 break;
2710 case XAE_PHY_TYPE_RGMII_2_0:
2711 lp->phy_mode = PHY_INTERFACE_MODE_RGMII_ID;
2712 break;
2713 case XAE_PHY_TYPE_SGMII:
2714 lp->phy_mode = PHY_INTERFACE_MODE_SGMII;
2715 break;
2716 case XAE_PHY_TYPE_1000BASE_X:
2717 lp->phy_mode = PHY_INTERFACE_MODE_1000BASEX;
2718 break;
2719 default:
2720 ret = -EINVAL;
2721 goto cleanup_clk;
2722 }
2723 } else {
2724 ret = of_get_phy_mode(pdev->dev.of_node, &lp->phy_mode);
2725 if (ret)
2726 goto cleanup_clk;
2727 }
2728 if (lp->switch_x_sgmii && lp->phy_mode != PHY_INTERFACE_MODE_SGMII &&
2729 lp->phy_mode != PHY_INTERFACE_MODE_1000BASEX) {
2730 dev_err(&pdev->dev, "xlnx,switch-x-sgmii only supported with SGMII or 1000BaseX\n");
2731 ret = -EINVAL;
2732 goto cleanup_clk;
2733 }
2734
2735 if (!of_property_present(pdev->dev.of_node, "dmas")) {
2736 /* Find the DMA node, map the DMA registers, and decode the DMA IRQs */
2737 np = of_parse_phandle(pdev->dev.of_node, "axistream-connected", 0);
2738
2739 if (np) {
2740 struct resource dmares;
2741
2742 ret = of_address_to_resource(np, 0, &dmares);
2743 if (ret) {
2744 dev_err(&pdev->dev,
2745 "unable to get DMA resource\n");
2746 of_node_put(np);
2747 goto cleanup_clk;
2748 }
2749 lp->dma_regs = devm_ioremap_resource(&pdev->dev,
2750 &dmares);
2751 lp->rx_irq = irq_of_parse_and_map(np, 1);
2752 lp->tx_irq = irq_of_parse_and_map(np, 0);
2753 of_node_put(np);
2754 lp->eth_irq = platform_get_irq_optional(pdev, 0);
2755 } else {
2756 /* Check for these resources directly on the Ethernet node. */
2757 lp->dma_regs = devm_platform_get_and_ioremap_resource(pdev, 1, NULL);
2758 lp->rx_irq = platform_get_irq(pdev, 1);
2759 lp->tx_irq = platform_get_irq(pdev, 0);
2760 lp->eth_irq = platform_get_irq_optional(pdev, 2);
2761 }
2762 if (IS_ERR(lp->dma_regs)) {
2763 dev_err(&pdev->dev, "could not map DMA regs\n");
2764 ret = PTR_ERR(lp->dma_regs);
2765 goto cleanup_clk;
2766 }
2767 if (lp->rx_irq <= 0 || lp->tx_irq <= 0) {
2768 dev_err(&pdev->dev, "could not determine irqs\n");
2769 ret = -ENOMEM;
2770 goto cleanup_clk;
2771 }
2772
2773 /* Reset core now that clocks are enabled, prior to accessing MDIO */
2774 ret = __axienet_device_reset(lp);
2775 if (ret)
2776 goto cleanup_clk;
2777
2778 /* Autodetect the need for 64-bit DMA pointers.
2779 * When the IP is configured for a bus width bigger than 32 bits,
2780 * writing the MSB registers is mandatory, even if they are all 0.
2781 * We can detect this case by writing all 1's to one such register
2782 * and see if that sticks: when the IP is configured for 32 bits
2783 * only, those registers are RES0.
2784 * Those MSB registers were introduced in IP v7.1, which we check first.
2785 */
2786 if ((axienet_ior(lp, XAE_ID_OFFSET) >> 24) >= 0x9) {
2787 void __iomem *desc = lp->dma_regs + XAXIDMA_TX_CDESC_OFFSET + 4;
2788
2789 iowrite32(0x0, desc);
2790 if (ioread32(desc) == 0) { /* sanity check */
2791 iowrite32(0xffffffff, desc);
2792 if (ioread32(desc) > 0) {
2793 lp->features |= XAE_FEATURE_DMA_64BIT;
2794 addr_width = 64;
2795 dev_info(&pdev->dev,
2796 "autodetected 64-bit DMA range\n");
2797 }
2798 iowrite32(0x0, desc);
2799 }
2800 }
2801 if (!IS_ENABLED(CONFIG_64BIT) && lp->features & XAE_FEATURE_DMA_64BIT) {
2802 dev_err(&pdev->dev, "64-bit addressable DMA is not compatible with 32-bit archecture\n");
2803 ret = -EINVAL;
2804 goto cleanup_clk;
2805 }
2806
2807 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(addr_width));
2808 if (ret) {
2809 dev_err(&pdev->dev, "No suitable DMA available\n");
2810 goto cleanup_clk;
2811 }
2812 netif_napi_add(ndev, &lp->napi_rx, axienet_rx_poll);
2813 netif_napi_add(ndev, &lp->napi_tx, axienet_tx_poll);
2814 } else {
2815 struct xilinx_vdma_config cfg;
2816 struct dma_chan *tx_chan;
2817
2818 lp->eth_irq = platform_get_irq_optional(pdev, 0);
2819 if (lp->eth_irq < 0 && lp->eth_irq != -ENXIO) {
2820 ret = lp->eth_irq;
2821 goto cleanup_clk;
2822 }
2823 tx_chan = dma_request_chan(lp->dev, "tx_chan0");
2824 if (IS_ERR(tx_chan)) {
2825 ret = PTR_ERR(tx_chan);
2826 dev_err_probe(lp->dev, ret, "No Ethernet DMA (TX) channel found\n");
2827 goto cleanup_clk;
2828 }
2829
2830 cfg.reset = 1;
2831 /* As name says VDMA but it has support for DMA channel reset */
2832 ret = xilinx_vdma_channel_set_config(tx_chan, &cfg);
2833 if (ret < 0) {
2834 dev_err(&pdev->dev, "Reset channel failed\n");
2835 dma_release_channel(tx_chan);
2836 goto cleanup_clk;
2837 }
2838
2839 dma_release_channel(tx_chan);
2840 lp->use_dmaengine = 1;
2841 }
2842
2843 if (lp->use_dmaengine)
2844 ndev->netdev_ops = &axienet_netdev_dmaengine_ops;
2845 else
2846 ndev->netdev_ops = &axienet_netdev_ops;
2847 /* Check for Ethernet core IRQ (optional) */
2848 if (lp->eth_irq <= 0)
2849 dev_info(&pdev->dev, "Ethernet core IRQ not defined\n");
2850
2851 /* Retrieve the MAC address */
2852 ret = of_get_mac_address(pdev->dev.of_node, mac_addr);
2853 if (!ret) {
2854 axienet_set_mac_address(ndev, mac_addr);
2855 } else {
2856 dev_warn(&pdev->dev, "could not find MAC address property: %d\n",
2857 ret);
2858 axienet_set_mac_address(ndev, NULL);
2859 }
2860
2861 lp->coalesce_count_rx = XAXIDMA_DFT_RX_THRESHOLD;
2862 lp->coalesce_count_tx = XAXIDMA_DFT_TX_THRESHOLD;
2863 lp->coalesce_usec_rx = XAXIDMA_DFT_RX_USEC;
2864 lp->coalesce_usec_tx = XAXIDMA_DFT_TX_USEC;
2865
2866 ret = axienet_mdio_setup(lp);
2867 if (ret)
2868 dev_warn(&pdev->dev,
2869 "error registering MDIO bus: %d\n", ret);
2870
2871 if (lp->phy_mode == PHY_INTERFACE_MODE_SGMII ||
2872 lp->phy_mode == PHY_INTERFACE_MODE_1000BASEX) {
2873 np = of_parse_phandle(pdev->dev.of_node, "pcs-handle", 0);
2874 if (!np) {
2875 /* Deprecated: Always use "pcs-handle" for pcs_phy.
2876 * Falling back to "phy-handle" here is only for
2877 * backward compatibility with old device trees.
2878 */
2879 np = of_parse_phandle(pdev->dev.of_node, "phy-handle", 0);
2880 }
2881 if (!np) {
2882 dev_err(&pdev->dev, "pcs-handle (preferred) or phy-handle required for 1000BaseX/SGMII\n");
2883 ret = -EINVAL;
2884 goto cleanup_mdio;
2885 }
2886 lp->pcs_phy = of_mdio_find_device(np);
2887 if (!lp->pcs_phy) {
2888 ret = -EPROBE_DEFER;
2889 of_node_put(np);
2890 goto cleanup_mdio;
2891 }
2892 of_node_put(np);
2893 lp->pcs.ops = &axienet_pcs_ops;
2894 lp->pcs.neg_mode = true;
2895 lp->pcs.poll = true;
2896 }
2897
2898 lp->phylink_config.dev = &ndev->dev;
2899 lp->phylink_config.type = PHYLINK_NETDEV;
2900 lp->phylink_config.mac_managed_pm = true;
2901 lp->phylink_config.mac_capabilities = MAC_SYM_PAUSE | MAC_ASYM_PAUSE |
2902 MAC_10FD | MAC_100FD | MAC_1000FD;
2903
2904 __set_bit(lp->phy_mode, lp->phylink_config.supported_interfaces);
2905 if (lp->switch_x_sgmii) {
2906 __set_bit(PHY_INTERFACE_MODE_1000BASEX,
2907 lp->phylink_config.supported_interfaces);
2908 __set_bit(PHY_INTERFACE_MODE_SGMII,
2909 lp->phylink_config.supported_interfaces);
2910 }
2911
2912 lp->phylink = phylink_create(&lp->phylink_config, pdev->dev.fwnode,
2913 lp->phy_mode,
2914 &axienet_phylink_ops);
2915 if (IS_ERR(lp->phylink)) {
2916 ret = PTR_ERR(lp->phylink);
2917 dev_err(&pdev->dev, "phylink_create error (%i)\n", ret);
2918 goto cleanup_mdio;
2919 }
2920
2921 ret = register_netdev(lp->ndev);
2922 if (ret) {
2923 dev_err(lp->dev, "register_netdev() error (%i)\n", ret);
2924 goto cleanup_phylink;
2925 }
2926
2927 return 0;
2928
2929 cleanup_phylink:
2930 phylink_destroy(lp->phylink);
2931
2932 cleanup_mdio:
2933 if (lp->pcs_phy)
2934 put_device(&lp->pcs_phy->dev);
2935 if (lp->mii_bus)
2936 axienet_mdio_teardown(lp);
2937 cleanup_clk:
2938 clk_bulk_disable_unprepare(XAE_NUM_MISC_CLOCKS, lp->misc_clks);
2939 clk_disable_unprepare(lp->axi_clk);
2940
2941 free_netdev:
2942 free_netdev(ndev);
2943
2944 return ret;
2945 }
2946
axienet_remove(struct platform_device * pdev)2947 static void axienet_remove(struct platform_device *pdev)
2948 {
2949 struct net_device *ndev = platform_get_drvdata(pdev);
2950 struct axienet_local *lp = netdev_priv(ndev);
2951
2952 unregister_netdev(ndev);
2953
2954 if (lp->phylink)
2955 phylink_destroy(lp->phylink);
2956
2957 if (lp->pcs_phy)
2958 put_device(&lp->pcs_phy->dev);
2959
2960 axienet_mdio_teardown(lp);
2961
2962 clk_bulk_disable_unprepare(XAE_NUM_MISC_CLOCKS, lp->misc_clks);
2963 clk_disable_unprepare(lp->axi_clk);
2964
2965 free_netdev(ndev);
2966 }
2967
axienet_shutdown(struct platform_device * pdev)2968 static void axienet_shutdown(struct platform_device *pdev)
2969 {
2970 struct net_device *ndev = platform_get_drvdata(pdev);
2971
2972 rtnl_lock();
2973 netif_device_detach(ndev);
2974
2975 if (netif_running(ndev))
2976 dev_close(ndev);
2977
2978 rtnl_unlock();
2979 }
2980
axienet_suspend(struct device * dev)2981 static int axienet_suspend(struct device *dev)
2982 {
2983 struct net_device *ndev = dev_get_drvdata(dev);
2984
2985 if (!netif_running(ndev))
2986 return 0;
2987
2988 netif_device_detach(ndev);
2989
2990 rtnl_lock();
2991 axienet_stop(ndev);
2992 rtnl_unlock();
2993
2994 return 0;
2995 }
2996
axienet_resume(struct device * dev)2997 static int axienet_resume(struct device *dev)
2998 {
2999 struct net_device *ndev = dev_get_drvdata(dev);
3000
3001 if (!netif_running(ndev))
3002 return 0;
3003
3004 rtnl_lock();
3005 axienet_open(ndev);
3006 rtnl_unlock();
3007
3008 netif_device_attach(ndev);
3009
3010 return 0;
3011 }
3012
3013 static DEFINE_SIMPLE_DEV_PM_OPS(axienet_pm_ops,
3014 axienet_suspend, axienet_resume);
3015
3016 static struct platform_driver axienet_driver = {
3017 .probe = axienet_probe,
3018 .remove = axienet_remove,
3019 .shutdown = axienet_shutdown,
3020 .driver = {
3021 .name = "xilinx_axienet",
3022 .pm = &axienet_pm_ops,
3023 .of_match_table = axienet_of_match,
3024 },
3025 };
3026
3027 module_platform_driver(axienet_driver);
3028
3029 MODULE_DESCRIPTION("Xilinx Axi Ethernet driver");
3030 MODULE_AUTHOR("Xilinx");
3031 MODULE_LICENSE("GPL");
3032