1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /* drivers/net/ethernet/freescale/gianfar.c
3 *
4 * Gianfar Ethernet Driver
5 * This driver is designed for the non-CPM ethernet controllers
6 * on the 85xx and 83xx family of integrated processors
7 * Based on 8260_io/fcc_enet.c
8 *
9 * Author: Andy Fleming
10 * Maintainer: Kumar Gala
11 * Modifier: Sandeep Gopalpet <sandeep.kumar@freescale.com>
12 *
13 * Copyright 2002-2009, 2011-2013 Freescale Semiconductor, Inc.
14 * Copyright 2007 MontaVista Software, Inc.
15 *
16 * Gianfar: AKA Lambda Draconis, "Dragon"
17 * RA 11 31 24.2
18 * Dec +69 19 52
19 * V 3.84
20 * B-V +1.62
21 *
22 * Theory of operation
23 *
24 * The driver is initialized through of_device. Configuration information
25 * is therefore conveyed through an OF-style device tree.
26 *
27 * The Gianfar Ethernet Controller uses a ring of buffer
28 * descriptors. The beginning is indicated by a register
29 * pointing to the physical address of the start of the ring.
30 * The end is determined by a "wrap" bit being set in the
31 * last descriptor of the ring.
32 *
33 * When a packet is received, the RXF bit in the
34 * IEVENT register is set, triggering an interrupt when the
35 * corresponding bit in the IMASK register is also set (if
36 * interrupt coalescing is active, then the interrupt may not
37 * happen immediately, but will wait until either a set number
38 * of frames or amount of time have passed). In NAPI, the
39 * interrupt handler will signal there is work to be done, and
40 * exit. This method will start at the last known empty
41 * descriptor, and process every subsequent descriptor until there
42 * are none left with data (NAPI will stop after a set number of
43 * packets to give time to other tasks, but will eventually
44 * process all the packets). The data arrives inside a
45 * pre-allocated skb, and so after the skb is passed up to the
46 * stack, a new skb must be allocated, and the address field in
47 * the buffer descriptor must be updated to indicate this new
48 * skb.
49 *
50 * When the kernel requests that a packet be transmitted, the
51 * driver starts where it left off last time, and points the
52 * descriptor at the buffer which was passed in. The driver
53 * then informs the DMA engine that there are packets ready to
54 * be transmitted. Once the controller is finished transmitting
55 * the packet, an interrupt may be triggered (under the same
56 * conditions as for reception, but depending on the TXF bit).
57 * The driver then cleans up the buffer.
58 */
59
60 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
61
62 #include <linux/kernel.h>
63 #include <linux/platform_device.h>
64 #include <linux/string.h>
65 #include <linux/errno.h>
66 #include <linux/unistd.h>
67 #include <linux/slab.h>
68 #include <linux/interrupt.h>
69 #include <linux/delay.h>
70 #include <linux/netdevice.h>
71 #include <linux/etherdevice.h>
72 #include <linux/skbuff.h>
73 #include <linux/if_vlan.h>
74 #include <linux/spinlock.h>
75 #include <linux/mm.h>
76 #include <linux/of_address.h>
77 #include <linux/of_irq.h>
78 #include <linux/of_mdio.h>
79 #include <linux/ip.h>
80 #include <linux/tcp.h>
81 #include <linux/udp.h>
82 #include <linux/in.h>
83 #include <linux/net_tstamp.h>
84
85 #include <asm/io.h>
86 #ifdef CONFIG_PPC
87 #include <asm/reg.h>
88 #include <asm/mpc85xx.h>
89 #endif
90 #include <asm/irq.h>
91 #include <linux/uaccess.h>
92 #include <linux/module.h>
93 #include <linux/dma-mapping.h>
94 #include <linux/crc32.h>
95 #include <linux/mii.h>
96 #include <linux/phy.h>
97 #include <linux/phy_fixed.h>
98 #include <linux/of.h>
99 #include <linux/of_net.h>
100 #include <linux/property.h>
101
102 #include "gianfar.h"
103
104 #define TX_TIMEOUT (5*HZ)
105
106 MODULE_AUTHOR("Freescale Semiconductor, Inc");
107 MODULE_DESCRIPTION("Gianfar Ethernet Driver");
108 MODULE_LICENSE("GPL");
109
gfar_init_rxbdp(struct gfar_priv_rx_q * rx_queue,struct rxbd8 * bdp,dma_addr_t buf)110 static void gfar_init_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp,
111 dma_addr_t buf)
112 {
113 u32 lstatus;
114
115 bdp->bufPtr = cpu_to_be32(buf);
116
117 lstatus = BD_LFLAG(RXBD_EMPTY | RXBD_INTERRUPT);
118 if (bdp == rx_queue->rx_bd_base + rx_queue->rx_ring_size - 1)
119 lstatus |= BD_LFLAG(RXBD_WRAP);
120
121 gfar_wmb();
122
123 bdp->lstatus = cpu_to_be32(lstatus);
124 }
125
gfar_init_tx_rx_base(struct gfar_private * priv)126 static void gfar_init_tx_rx_base(struct gfar_private *priv)
127 {
128 struct gfar __iomem *regs = priv->gfargrp[0].regs;
129 u32 __iomem *baddr;
130 int i;
131
132 baddr = ®s->tbase0;
133 for (i = 0; i < priv->num_tx_queues; i++) {
134 gfar_write(baddr, priv->tx_queue[i]->tx_bd_dma_base);
135 baddr += 2;
136 }
137
138 baddr = ®s->rbase0;
139 for (i = 0; i < priv->num_rx_queues; i++) {
140 gfar_write(baddr, priv->rx_queue[i]->rx_bd_dma_base);
141 baddr += 2;
142 }
143 }
144
gfar_init_rqprm(struct gfar_private * priv)145 static void gfar_init_rqprm(struct gfar_private *priv)
146 {
147 struct gfar __iomem *regs = priv->gfargrp[0].regs;
148 u32 __iomem *baddr;
149 int i;
150
151 baddr = ®s->rqprm0;
152 for (i = 0; i < priv->num_rx_queues; i++) {
153 gfar_write(baddr, priv->rx_queue[i]->rx_ring_size |
154 (DEFAULT_RX_LFC_THR << FBTHR_SHIFT));
155 baddr++;
156 }
157 }
158
gfar_rx_offload_en(struct gfar_private * priv)159 static void gfar_rx_offload_en(struct gfar_private *priv)
160 {
161 /* set this when rx hw offload (TOE) functions are being used */
162 priv->uses_rxfcb = 0;
163
164 if (priv->ndev->features & (NETIF_F_RXCSUM | NETIF_F_HW_VLAN_CTAG_RX))
165 priv->uses_rxfcb = 1;
166
167 if (priv->hwts_rx_en || priv->rx_filer_enable)
168 priv->uses_rxfcb = 1;
169 }
170
gfar_mac_rx_config(struct gfar_private * priv)171 static void gfar_mac_rx_config(struct gfar_private *priv)
172 {
173 struct gfar __iomem *regs = priv->gfargrp[0].regs;
174 u32 rctrl = 0;
175
176 if (priv->rx_filer_enable) {
177 rctrl |= RCTRL_FILREN | RCTRL_PRSDEP_INIT;
178 /* Program the RIR0 reg with the required distribution */
179 gfar_write(®s->rir0, DEFAULT_2RXQ_RIR0);
180 }
181
182 /* Restore PROMISC mode */
183 if (priv->ndev->flags & IFF_PROMISC)
184 rctrl |= RCTRL_PROM;
185
186 if (priv->ndev->features & NETIF_F_RXCSUM)
187 rctrl |= RCTRL_CHECKSUMMING;
188
189 if (priv->extended_hash)
190 rctrl |= RCTRL_EXTHASH | RCTRL_EMEN;
191
192 if (priv->padding) {
193 rctrl &= ~RCTRL_PAL_MASK;
194 rctrl |= RCTRL_PADDING(priv->padding);
195 }
196
197 /* Enable HW time stamping if requested from user space */
198 if (priv->hwts_rx_en)
199 rctrl |= RCTRL_PRSDEP_INIT | RCTRL_TS_ENABLE;
200
201 if (priv->ndev->features & NETIF_F_HW_VLAN_CTAG_RX)
202 rctrl |= RCTRL_VLEX | RCTRL_PRSDEP_INIT;
203
204 /* Clear the LFC bit */
205 gfar_write(®s->rctrl, rctrl);
206 /* Init flow control threshold values */
207 gfar_init_rqprm(priv);
208 gfar_write(®s->ptv, DEFAULT_LFC_PTVVAL);
209 rctrl |= RCTRL_LFC;
210
211 /* Init rctrl based on our settings */
212 gfar_write(®s->rctrl, rctrl);
213 }
214
gfar_mac_tx_config(struct gfar_private * priv)215 static void gfar_mac_tx_config(struct gfar_private *priv)
216 {
217 struct gfar __iomem *regs = priv->gfargrp[0].regs;
218 u32 tctrl = 0;
219
220 if (priv->ndev->features & NETIF_F_IP_CSUM)
221 tctrl |= TCTRL_INIT_CSUM;
222
223 if (priv->prio_sched_en)
224 tctrl |= TCTRL_TXSCHED_PRIO;
225 else {
226 tctrl |= TCTRL_TXSCHED_WRRS;
227 gfar_write(®s->tr03wt, DEFAULT_WRRS_WEIGHT);
228 gfar_write(®s->tr47wt, DEFAULT_WRRS_WEIGHT);
229 }
230
231 if (priv->ndev->features & NETIF_F_HW_VLAN_CTAG_TX)
232 tctrl |= TCTRL_VLINS;
233
234 gfar_write(®s->tctrl, tctrl);
235 }
236
gfar_configure_coalescing(struct gfar_private * priv,unsigned long tx_mask,unsigned long rx_mask)237 static void gfar_configure_coalescing(struct gfar_private *priv,
238 unsigned long tx_mask, unsigned long rx_mask)
239 {
240 struct gfar __iomem *regs = priv->gfargrp[0].regs;
241 u32 __iomem *baddr;
242
243 if (priv->mode == MQ_MG_MODE) {
244 int i = 0;
245
246 baddr = ®s->txic0;
247 for_each_set_bit(i, &tx_mask, priv->num_tx_queues) {
248 gfar_write(baddr + i, 0);
249 if (likely(priv->tx_queue[i]->txcoalescing))
250 gfar_write(baddr + i, priv->tx_queue[i]->txic);
251 }
252
253 baddr = ®s->rxic0;
254 for_each_set_bit(i, &rx_mask, priv->num_rx_queues) {
255 gfar_write(baddr + i, 0);
256 if (likely(priv->rx_queue[i]->rxcoalescing))
257 gfar_write(baddr + i, priv->rx_queue[i]->rxic);
258 }
259 } else {
260 /* Backward compatible case -- even if we enable
261 * multiple queues, there's only single reg to program
262 */
263 gfar_write(®s->txic, 0);
264 if (likely(priv->tx_queue[0]->txcoalescing))
265 gfar_write(®s->txic, priv->tx_queue[0]->txic);
266
267 gfar_write(®s->rxic, 0);
268 if (unlikely(priv->rx_queue[0]->rxcoalescing))
269 gfar_write(®s->rxic, priv->rx_queue[0]->rxic);
270 }
271 }
272
gfar_configure_coalescing_all(struct gfar_private * priv)273 static void gfar_configure_coalescing_all(struct gfar_private *priv)
274 {
275 gfar_configure_coalescing(priv, 0xFF, 0xFF);
276 }
277
gfar_get_stats64(struct net_device * dev,struct rtnl_link_stats64 * stats)278 static void gfar_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
279 {
280 struct gfar_private *priv = netdev_priv(dev);
281 int i;
282
283 for (i = 0; i < priv->num_rx_queues; i++) {
284 stats->rx_packets += priv->rx_queue[i]->stats.rx_packets;
285 stats->rx_bytes += priv->rx_queue[i]->stats.rx_bytes;
286 stats->rx_dropped += priv->rx_queue[i]->stats.rx_dropped;
287 }
288
289 for (i = 0; i < priv->num_tx_queues; i++) {
290 stats->tx_bytes += priv->tx_queue[i]->stats.tx_bytes;
291 stats->tx_packets += priv->tx_queue[i]->stats.tx_packets;
292 }
293
294 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_RMON) {
295 struct rmon_mib __iomem *rmon = &priv->gfargrp[0].regs->rmon;
296 unsigned long flags;
297 u32 rdrp, car, car_before;
298 u64 rdrp_offset;
299
300 spin_lock_irqsave(&priv->rmon_overflow.lock, flags);
301 car = gfar_read(&rmon->car1) & CAR1_C1RDR;
302 do {
303 car_before = car;
304 rdrp = gfar_read(&rmon->rdrp);
305 car = gfar_read(&rmon->car1) & CAR1_C1RDR;
306 } while (car != car_before);
307 if (car) {
308 priv->rmon_overflow.rdrp++;
309 gfar_write(&rmon->car1, car);
310 }
311 rdrp_offset = priv->rmon_overflow.rdrp;
312 spin_unlock_irqrestore(&priv->rmon_overflow.lock, flags);
313
314 stats->rx_missed_errors = rdrp + (rdrp_offset << 16);
315 }
316 }
317
318 /* Set the appropriate hash bit for the given addr */
319 /* The algorithm works like so:
320 * 1) Take the Destination Address (ie the multicast address), and
321 * do a CRC on it (little endian), and reverse the bits of the
322 * result.
323 * 2) Use the 8 most significant bits as a hash into a 256-entry
324 * table. The table is controlled through 8 32-bit registers:
325 * gaddr0-7. gaddr0's MSB is entry 0, and gaddr7's LSB is
326 * gaddr7. This means that the 3 most significant bits in the
327 * hash index which gaddr register to use, and the 5 other bits
328 * indicate which bit (assuming an IBM numbering scheme, which
329 * for PowerPC (tm) is usually the case) in the register holds
330 * the entry.
331 */
gfar_set_hash_for_addr(struct net_device * dev,u8 * addr)332 static void gfar_set_hash_for_addr(struct net_device *dev, u8 *addr)
333 {
334 u32 tempval;
335 struct gfar_private *priv = netdev_priv(dev);
336 u32 result = ether_crc(ETH_ALEN, addr);
337 int width = priv->hash_width;
338 u8 whichbit = (result >> (32 - width)) & 0x1f;
339 u8 whichreg = result >> (32 - width + 5);
340 u32 value = (1 << (31-whichbit));
341
342 tempval = gfar_read(priv->hash_regs[whichreg]);
343 tempval |= value;
344 gfar_write(priv->hash_regs[whichreg], tempval);
345 }
346
347 /* There are multiple MAC Address register pairs on some controllers
348 * This function sets the numth pair to a given address
349 */
gfar_set_mac_for_addr(struct net_device * dev,int num,const u8 * addr)350 static void gfar_set_mac_for_addr(struct net_device *dev, int num,
351 const u8 *addr)
352 {
353 struct gfar_private *priv = netdev_priv(dev);
354 struct gfar __iomem *regs = priv->gfargrp[0].regs;
355 u32 tempval;
356 u32 __iomem *macptr = ®s->macstnaddr1;
357
358 macptr += num*2;
359
360 /* For a station address of 0x12345678ABCD in transmission
361 * order (BE), MACnADDR1 is set to 0xCDAB7856 and
362 * MACnADDR2 is set to 0x34120000.
363 */
364 tempval = (addr[5] << 24) | (addr[4] << 16) |
365 (addr[3] << 8) | addr[2];
366
367 gfar_write(macptr, tempval);
368
369 tempval = (addr[1] << 24) | (addr[0] << 16);
370
371 gfar_write(macptr+1, tempval);
372 }
373
gfar_set_mac_addr(struct net_device * dev,void * p)374 static int gfar_set_mac_addr(struct net_device *dev, void *p)
375 {
376 int ret;
377
378 ret = eth_mac_addr(dev, p);
379 if (ret)
380 return ret;
381
382 gfar_set_mac_for_addr(dev, 0, dev->dev_addr);
383
384 return 0;
385 }
386
gfar_ints_disable(struct gfar_private * priv)387 static void gfar_ints_disable(struct gfar_private *priv)
388 {
389 int i;
390 for (i = 0; i < priv->num_grps; i++) {
391 struct gfar __iomem *regs = priv->gfargrp[i].regs;
392 /* Clear IEVENT */
393 gfar_write(®s->ievent, IEVENT_INIT_CLEAR);
394
395 /* Initialize IMASK */
396 gfar_write(®s->imask, IMASK_INIT_CLEAR);
397 }
398 }
399
gfar_ints_enable(struct gfar_private * priv)400 static void gfar_ints_enable(struct gfar_private *priv)
401 {
402 int i;
403 for (i = 0; i < priv->num_grps; i++) {
404 struct gfar __iomem *regs = priv->gfargrp[i].regs;
405 /* Unmask the interrupts we look for */
406 gfar_write(®s->imask,
407 IMASK_DEFAULT | priv->rmon_overflow.imask);
408 }
409 }
410
gfar_alloc_tx_queues(struct gfar_private * priv)411 static int gfar_alloc_tx_queues(struct gfar_private *priv)
412 {
413 int i;
414
415 for (i = 0; i < priv->num_tx_queues; i++) {
416 priv->tx_queue[i] = kzalloc_obj(struct gfar_priv_tx_q);
417 if (!priv->tx_queue[i])
418 return -ENOMEM;
419
420 priv->tx_queue[i]->tx_skbuff = NULL;
421 priv->tx_queue[i]->qindex = i;
422 priv->tx_queue[i]->dev = priv->ndev;
423 spin_lock_init(&(priv->tx_queue[i]->txlock));
424 }
425 return 0;
426 }
427
gfar_alloc_rx_queues(struct gfar_private * priv)428 static int gfar_alloc_rx_queues(struct gfar_private *priv)
429 {
430 int i;
431
432 for (i = 0; i < priv->num_rx_queues; i++) {
433 priv->rx_queue[i] = kzalloc_obj(struct gfar_priv_rx_q);
434 if (!priv->rx_queue[i])
435 return -ENOMEM;
436
437 priv->rx_queue[i]->qindex = i;
438 priv->rx_queue[i]->ndev = priv->ndev;
439 }
440 return 0;
441 }
442
gfar_free_tx_queues(struct gfar_private * priv)443 static void gfar_free_tx_queues(struct gfar_private *priv)
444 {
445 int i;
446
447 for (i = 0; i < priv->num_tx_queues; i++)
448 kfree(priv->tx_queue[i]);
449 }
450
gfar_free_rx_queues(struct gfar_private * priv)451 static void gfar_free_rx_queues(struct gfar_private *priv)
452 {
453 int i;
454
455 for (i = 0; i < priv->num_rx_queues; i++)
456 kfree(priv->rx_queue[i]);
457 }
458
unmap_group_regs(struct gfar_private * priv)459 static void unmap_group_regs(struct gfar_private *priv)
460 {
461 int i;
462
463 for (i = 0; i < MAXGROUPS; i++)
464 if (priv->gfargrp[i].regs)
465 iounmap(priv->gfargrp[i].regs);
466 }
467
free_gfar_dev(struct gfar_private * priv)468 static void free_gfar_dev(struct gfar_private *priv)
469 {
470 int i, j;
471
472 for (i = 0; i < priv->num_grps; i++)
473 for (j = 0; j < GFAR_NUM_IRQS; j++) {
474 kfree(priv->gfargrp[i].irqinfo[j]);
475 priv->gfargrp[i].irqinfo[j] = NULL;
476 }
477
478 free_netdev(priv->ndev);
479 }
480
disable_napi(struct gfar_private * priv)481 static void disable_napi(struct gfar_private *priv)
482 {
483 int i;
484
485 for (i = 0; i < priv->num_grps; i++) {
486 napi_disable(&priv->gfargrp[i].napi_rx);
487 napi_disable(&priv->gfargrp[i].napi_tx);
488 }
489 }
490
enable_napi(struct gfar_private * priv)491 static void enable_napi(struct gfar_private *priv)
492 {
493 int i;
494
495 for (i = 0; i < priv->num_grps; i++) {
496 napi_enable(&priv->gfargrp[i].napi_rx);
497 napi_enable(&priv->gfargrp[i].napi_tx);
498 }
499 }
500
gfar_parse_group(struct device_node * np,struct gfar_private * priv,const char * model)501 static int gfar_parse_group(struct device_node *np,
502 struct gfar_private *priv, const char *model)
503 {
504 struct gfar_priv_grp *grp = &priv->gfargrp[priv->num_grps];
505 int i;
506
507 for (i = 0; i < GFAR_NUM_IRQS; i++) {
508 grp->irqinfo[i] = kzalloc_obj(struct gfar_irqinfo);
509 if (!grp->irqinfo[i])
510 return -ENOMEM;
511 }
512
513 grp->regs = of_iomap(np, 0);
514 if (!grp->regs)
515 return -ENOMEM;
516
517 gfar_irq(grp, TX)->irq = irq_of_parse_and_map(np, 0);
518
519 /* If we aren't the FEC we have multiple interrupts */
520 if (model && strcasecmp(model, "FEC")) {
521 gfar_irq(grp, RX)->irq = irq_of_parse_and_map(np, 1);
522 gfar_irq(grp, ER)->irq = irq_of_parse_and_map(np, 2);
523 if (!gfar_irq(grp, TX)->irq ||
524 !gfar_irq(grp, RX)->irq ||
525 !gfar_irq(grp, ER)->irq)
526 return -EINVAL;
527 }
528
529 grp->priv = priv;
530 spin_lock_init(&grp->grplock);
531 if (priv->mode == MQ_MG_MODE) {
532 /* One Q per interrupt group: Q0 to G0, Q1 to G1 */
533 grp->rx_bit_map = (DEFAULT_MAPPING >> priv->num_grps);
534 grp->tx_bit_map = (DEFAULT_MAPPING >> priv->num_grps);
535 } else {
536 grp->rx_bit_map = 0xFF;
537 grp->tx_bit_map = 0xFF;
538 }
539
540 /* bit_map's MSB is q0 (from q0 to q7) but, for_each_set_bit parses
541 * right to left, so we need to revert the 8 bits to get the q index
542 */
543 grp->rx_bit_map = bitrev8(grp->rx_bit_map);
544 grp->tx_bit_map = bitrev8(grp->tx_bit_map);
545
546 /* Calculate RSTAT, TSTAT, RQUEUE and TQUEUE values,
547 * also assign queues to groups
548 */
549 for_each_set_bit(i, &grp->rx_bit_map, priv->num_rx_queues) {
550 if (!grp->rx_queue)
551 grp->rx_queue = priv->rx_queue[i];
552 grp->num_rx_queues++;
553 grp->rstat |= (RSTAT_CLEAR_RHALT >> i);
554 priv->rqueue |= ((RQUEUE_EN0 | RQUEUE_EX0) >> i);
555 priv->rx_queue[i]->grp = grp;
556 }
557
558 for_each_set_bit(i, &grp->tx_bit_map, priv->num_tx_queues) {
559 if (!grp->tx_queue)
560 grp->tx_queue = priv->tx_queue[i];
561 grp->num_tx_queues++;
562 grp->tstat |= (TSTAT_CLEAR_THALT >> i);
563 priv->tqueue |= (TQUEUE_EN0 >> i);
564 priv->tx_queue[i]->grp = grp;
565 }
566
567 priv->num_grps++;
568
569 return 0;
570 }
571
572 /* Reads the controller's registers to determine what interface
573 * connects it to the PHY.
574 */
gfar_get_interface(struct net_device * dev)575 static phy_interface_t gfar_get_interface(struct net_device *dev)
576 {
577 struct gfar_private *priv = netdev_priv(dev);
578 struct gfar __iomem *regs = priv->gfargrp[0].regs;
579 u32 ecntrl;
580
581 ecntrl = gfar_read(®s->ecntrl);
582
583 if (ecntrl & ECNTRL_SGMII_MODE)
584 return PHY_INTERFACE_MODE_SGMII;
585
586 if (ecntrl & ECNTRL_TBI_MODE) {
587 if (ecntrl & ECNTRL_REDUCED_MODE)
588 return PHY_INTERFACE_MODE_RTBI;
589 else
590 return PHY_INTERFACE_MODE_TBI;
591 }
592
593 if (ecntrl & ECNTRL_REDUCED_MODE) {
594 if (ecntrl & ECNTRL_REDUCED_MII_MODE) {
595 return PHY_INTERFACE_MODE_RMII;
596 }
597 else {
598 phy_interface_t interface = priv->interface;
599
600 /* This isn't autodetected right now, so it must
601 * be set by the device tree or platform code.
602 */
603 if (interface == PHY_INTERFACE_MODE_RGMII_ID)
604 return PHY_INTERFACE_MODE_RGMII_ID;
605
606 return PHY_INTERFACE_MODE_RGMII;
607 }
608 }
609
610 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_GIGABIT)
611 return PHY_INTERFACE_MODE_GMII;
612
613 return PHY_INTERFACE_MODE_MII;
614 }
615
gfar_of_init(struct platform_device * ofdev,struct net_device ** pdev)616 static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev)
617 {
618 const char *model;
619 int err = 0, i;
620 phy_interface_t interface;
621 struct net_device *dev = NULL;
622 struct gfar_private *priv = NULL;
623 struct device_node *np = ofdev->dev.of_node;
624 struct device_node *child = NULL;
625 u32 stash_len = 0;
626 u32 stash_idx = 0;
627 unsigned int num_tx_qs, num_rx_qs;
628 unsigned short mode;
629
630 if (!np)
631 return -ENODEV;
632
633 if (of_device_is_compatible(np, "fsl,etsec2"))
634 mode = MQ_MG_MODE;
635 else
636 mode = SQ_SG_MODE;
637
638 if (mode == SQ_SG_MODE) {
639 num_tx_qs = 1;
640 num_rx_qs = 1;
641 } else { /* MQ_MG_MODE */
642 /* get the actual number of supported groups */
643 unsigned int num_grps;
644
645 num_grps = device_get_named_child_node_count(&ofdev->dev,
646 "queue-group");
647 if (num_grps == 0 || num_grps > MAXGROUPS) {
648 dev_err(&ofdev->dev, "Invalid # of int groups(%d)\n",
649 num_grps);
650 pr_err("Cannot do alloc_etherdev, aborting\n");
651 return -EINVAL;
652 }
653
654 num_tx_qs = num_grps; /* one txq per int group */
655 num_rx_qs = num_grps; /* one rxq per int group */
656 }
657
658 if (num_tx_qs > MAX_TX_QS) {
659 pr_err("num_tx_qs(=%d) greater than MAX_TX_QS(=%d)\n",
660 num_tx_qs, MAX_TX_QS);
661 pr_err("Cannot do alloc_etherdev, aborting\n");
662 return -EINVAL;
663 }
664
665 if (num_rx_qs > MAX_RX_QS) {
666 pr_err("num_rx_qs(=%d) greater than MAX_RX_QS(=%d)\n",
667 num_rx_qs, MAX_RX_QS);
668 pr_err("Cannot do alloc_etherdev, aborting\n");
669 return -EINVAL;
670 }
671
672 *pdev = alloc_etherdev_mq(sizeof(*priv), num_tx_qs);
673 dev = *pdev;
674 if (NULL == dev)
675 return -ENOMEM;
676
677 priv = netdev_priv(dev);
678 priv->ndev = dev;
679
680 priv->mode = mode;
681
682 priv->num_tx_queues = num_tx_qs;
683 netif_set_real_num_rx_queues(dev, num_rx_qs);
684 priv->num_rx_queues = num_rx_qs;
685
686 err = gfar_alloc_tx_queues(priv);
687 if (err)
688 goto tx_alloc_failed;
689
690 err = gfar_alloc_rx_queues(priv);
691 if (err)
692 goto rx_alloc_failed;
693
694 err = of_property_read_string(np, "model", &model);
695 if (err) {
696 pr_err("Device model property missing, aborting\n");
697 goto rx_alloc_failed;
698 }
699
700 /* Init Rx queue filer rule set linked list */
701 INIT_LIST_HEAD(&priv->rx_list.list);
702 priv->rx_list.count = 0;
703 mutex_init(&priv->rx_queue_access);
704
705 for (i = 0; i < MAXGROUPS; i++)
706 priv->gfargrp[i].regs = NULL;
707
708 /* Parse and initialize group specific information */
709 if (priv->mode == MQ_MG_MODE) {
710 for_each_available_child_of_node(np, child) {
711 if (!of_node_name_eq(child, "queue-group"))
712 continue;
713
714 err = gfar_parse_group(child, priv, model);
715 if (err) {
716 of_node_put(child);
717 goto err_grp_init;
718 }
719 }
720 } else { /* SQ_SG_MODE */
721 err = gfar_parse_group(np, priv, model);
722 if (err)
723 goto err_grp_init;
724 }
725
726 if (of_property_read_bool(np, "bd-stash")) {
727 priv->device_flags |= FSL_GIANFAR_DEV_HAS_BD_STASHING;
728 priv->bd_stash_en = 1;
729 }
730
731 err = of_property_read_u32(np, "rx-stash-len", &stash_len);
732
733 if (err == 0)
734 priv->rx_stash_size = stash_len;
735
736 err = of_property_read_u32(np, "rx-stash-idx", &stash_idx);
737
738 if (err == 0)
739 priv->rx_stash_index = stash_idx;
740
741 if (stash_len || stash_idx)
742 priv->device_flags |= FSL_GIANFAR_DEV_HAS_BUF_STASHING;
743
744 err = of_get_ethdev_address(np, dev);
745 if (err == -EPROBE_DEFER)
746 goto err_grp_init;
747 if (err) {
748 eth_hw_addr_random(dev);
749 dev_info(&ofdev->dev, "Using random MAC address: %pM\n", dev->dev_addr);
750 }
751
752 if (model && !strcasecmp(model, "TSEC"))
753 priv->device_flags |= FSL_GIANFAR_DEV_HAS_GIGABIT |
754 FSL_GIANFAR_DEV_HAS_COALESCE |
755 FSL_GIANFAR_DEV_HAS_RMON |
756 FSL_GIANFAR_DEV_HAS_MULTI_INTR;
757
758 if (model && !strcasecmp(model, "eTSEC"))
759 priv->device_flags |= FSL_GIANFAR_DEV_HAS_GIGABIT |
760 FSL_GIANFAR_DEV_HAS_COALESCE |
761 FSL_GIANFAR_DEV_HAS_RMON |
762 FSL_GIANFAR_DEV_HAS_MULTI_INTR |
763 FSL_GIANFAR_DEV_HAS_CSUM |
764 FSL_GIANFAR_DEV_HAS_VLAN |
765 FSL_GIANFAR_DEV_HAS_MAGIC_PACKET |
766 FSL_GIANFAR_DEV_HAS_EXTENDED_HASH |
767 FSL_GIANFAR_DEV_HAS_TIMER |
768 FSL_GIANFAR_DEV_HAS_RX_FILER;
769
770 /* Use PHY connection type from the DT node if one is specified there.
771 * rgmii-id really needs to be specified. Other types can be
772 * detected by hardware
773 */
774 err = of_get_phy_mode(np, &interface);
775 if (!err)
776 priv->interface = interface;
777 else
778 priv->interface = gfar_get_interface(dev);
779
780 if (of_property_read_bool(np, "fsl,magic-packet"))
781 priv->device_flags |= FSL_GIANFAR_DEV_HAS_MAGIC_PACKET;
782
783 if (of_property_read_bool(np, "fsl,wake-on-filer"))
784 priv->device_flags |= FSL_GIANFAR_DEV_HAS_WAKE_ON_FILER;
785
786 priv->phy_node = of_parse_phandle(np, "phy-handle", 0);
787
788 /* In the case of a fixed PHY, the DT node associated
789 * to the PHY is the Ethernet MAC DT node.
790 */
791 if (!priv->phy_node && of_phy_is_fixed_link(np)) {
792 err = of_phy_register_fixed_link(np);
793 if (err)
794 goto err_grp_init;
795
796 priv->phy_node = of_node_get(np);
797 }
798
799 /* Find the TBI PHY. If it's not there, we don't support SGMII */
800 priv->tbi_node = of_parse_phandle(np, "tbi-handle", 0);
801
802 return 0;
803
804 err_grp_init:
805 unmap_group_regs(priv);
806 rx_alloc_failed:
807 gfar_free_rx_queues(priv);
808 tx_alloc_failed:
809 gfar_free_tx_queues(priv);
810 free_gfar_dev(priv);
811 return err;
812 }
813
cluster_entry_per_class(struct gfar_private * priv,u32 rqfar,u32 class)814 static u32 cluster_entry_per_class(struct gfar_private *priv, u32 rqfar,
815 u32 class)
816 {
817 u32 rqfpr = FPR_FILER_MASK;
818 u32 rqfcr = 0x0;
819
820 rqfar--;
821 rqfcr = RQFCR_CLE | RQFCR_PID_MASK | RQFCR_CMP_EXACT;
822 priv->ftp_rqfpr[rqfar] = rqfpr;
823 priv->ftp_rqfcr[rqfar] = rqfcr;
824 gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
825
826 rqfar--;
827 rqfcr = RQFCR_CMP_NOMATCH;
828 priv->ftp_rqfpr[rqfar] = rqfpr;
829 priv->ftp_rqfcr[rqfar] = rqfcr;
830 gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
831
832 rqfar--;
833 rqfcr = RQFCR_CMP_EXACT | RQFCR_PID_PARSE | RQFCR_CLE | RQFCR_AND;
834 rqfpr = class;
835 priv->ftp_rqfcr[rqfar] = rqfcr;
836 priv->ftp_rqfpr[rqfar] = rqfpr;
837 gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
838
839 rqfar--;
840 rqfcr = RQFCR_CMP_EXACT | RQFCR_PID_MASK | RQFCR_AND;
841 rqfpr = class;
842 priv->ftp_rqfcr[rqfar] = rqfcr;
843 priv->ftp_rqfpr[rqfar] = rqfpr;
844 gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
845
846 return rqfar;
847 }
848
gfar_init_filer_table(struct gfar_private * priv)849 static void gfar_init_filer_table(struct gfar_private *priv)
850 {
851 int i = 0x0;
852 u32 rqfar = MAX_FILER_IDX;
853 u32 rqfcr = 0x0;
854 u32 rqfpr = FPR_FILER_MASK;
855
856 /* Default rule */
857 rqfcr = RQFCR_CMP_MATCH;
858 priv->ftp_rqfcr[rqfar] = rqfcr;
859 priv->ftp_rqfpr[rqfar] = rqfpr;
860 gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
861
862 rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV6);
863 rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV6 | RQFPR_UDP);
864 rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV6 | RQFPR_TCP);
865 rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV4);
866 rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV4 | RQFPR_UDP);
867 rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV4 | RQFPR_TCP);
868
869 /* cur_filer_idx indicated the first non-masked rule */
870 priv->cur_filer_idx = rqfar;
871
872 /* Rest are masked rules */
873 rqfcr = RQFCR_CMP_NOMATCH;
874 for (i = 0; i < rqfar; i++) {
875 priv->ftp_rqfcr[i] = rqfcr;
876 priv->ftp_rqfpr[i] = rqfpr;
877 gfar_write_filer(priv, i, rqfcr, rqfpr);
878 }
879 }
880
881 #ifdef CONFIG_PPC
__gfar_detect_errata_83xx(struct gfar_private * priv)882 static void __gfar_detect_errata_83xx(struct gfar_private *priv)
883 {
884 unsigned int pvr = mfspr(SPRN_PVR);
885 unsigned int svr = mfspr(SPRN_SVR);
886 unsigned int mod = (svr >> 16) & 0xfff6; /* w/o E suffix */
887 unsigned int rev = svr & 0xffff;
888
889 /* MPC8313 Rev 2.0 and higher; All MPC837x */
890 if ((pvr == 0x80850010 && mod == 0x80b0 && rev >= 0x0020) ||
891 (pvr == 0x80861010 && (mod & 0xfff9) == 0x80c0))
892 priv->errata |= GFAR_ERRATA_74;
893
894 /* MPC8313 and MPC837x all rev */
895 if ((pvr == 0x80850010 && mod == 0x80b0) ||
896 (pvr == 0x80861010 && (mod & 0xfff9) == 0x80c0))
897 priv->errata |= GFAR_ERRATA_76;
898
899 /* MPC8313 Rev < 2.0 */
900 if (pvr == 0x80850010 && mod == 0x80b0 && rev < 0x0020)
901 priv->errata |= GFAR_ERRATA_12;
902 }
903
__gfar_detect_errata_85xx(struct gfar_private * priv)904 static void __gfar_detect_errata_85xx(struct gfar_private *priv)
905 {
906 unsigned int svr = mfspr(SPRN_SVR);
907
908 if ((SVR_SOC_VER(svr) == SVR_8548) && (SVR_REV(svr) == 0x20))
909 priv->errata |= GFAR_ERRATA_12;
910 /* P2020/P1010 Rev 1; MPC8548 Rev 2 */
911 if (((SVR_SOC_VER(svr) == SVR_P2020) && (SVR_REV(svr) < 0x20)) ||
912 ((SVR_SOC_VER(svr) == SVR_P2010) && (SVR_REV(svr) < 0x20)) ||
913 ((SVR_SOC_VER(svr) == SVR_8548) && (SVR_REV(svr) < 0x31)))
914 priv->errata |= GFAR_ERRATA_76; /* aka eTSEC 20 */
915 }
916 #endif
917
gfar_detect_errata(struct gfar_private * priv)918 static void gfar_detect_errata(struct gfar_private *priv)
919 {
920 struct device *dev = &priv->ofdev->dev;
921
922 /* no plans to fix */
923 priv->errata |= GFAR_ERRATA_A002;
924
925 #ifdef CONFIG_PPC
926 if (pvr_version_is(PVR_VER_E500V1) || pvr_version_is(PVR_VER_E500V2))
927 __gfar_detect_errata_85xx(priv);
928 else /* non-mpc85xx parts, i.e. e300 core based */
929 __gfar_detect_errata_83xx(priv);
930 #endif
931
932 if (priv->errata)
933 dev_info(dev, "enabled errata workarounds, flags: 0x%x\n",
934 priv->errata);
935 }
936
gfar_init_addr_hash_table(struct gfar_private * priv)937 static void gfar_init_addr_hash_table(struct gfar_private *priv)
938 {
939 struct gfar __iomem *regs = priv->gfargrp[0].regs;
940
941 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_EXTENDED_HASH) {
942 priv->extended_hash = 1;
943 priv->hash_width = 9;
944
945 priv->hash_regs[0] = ®s->igaddr0;
946 priv->hash_regs[1] = ®s->igaddr1;
947 priv->hash_regs[2] = ®s->igaddr2;
948 priv->hash_regs[3] = ®s->igaddr3;
949 priv->hash_regs[4] = ®s->igaddr4;
950 priv->hash_regs[5] = ®s->igaddr5;
951 priv->hash_regs[6] = ®s->igaddr6;
952 priv->hash_regs[7] = ®s->igaddr7;
953 priv->hash_regs[8] = ®s->gaddr0;
954 priv->hash_regs[9] = ®s->gaddr1;
955 priv->hash_regs[10] = ®s->gaddr2;
956 priv->hash_regs[11] = ®s->gaddr3;
957 priv->hash_regs[12] = ®s->gaddr4;
958 priv->hash_regs[13] = ®s->gaddr5;
959 priv->hash_regs[14] = ®s->gaddr6;
960 priv->hash_regs[15] = ®s->gaddr7;
961
962 } else {
963 priv->extended_hash = 0;
964 priv->hash_width = 8;
965
966 priv->hash_regs[0] = ®s->gaddr0;
967 priv->hash_regs[1] = ®s->gaddr1;
968 priv->hash_regs[2] = ®s->gaddr2;
969 priv->hash_regs[3] = ®s->gaddr3;
970 priv->hash_regs[4] = ®s->gaddr4;
971 priv->hash_regs[5] = ®s->gaddr5;
972 priv->hash_regs[6] = ®s->gaddr6;
973 priv->hash_regs[7] = ®s->gaddr7;
974 }
975 }
976
__gfar_is_rx_idle(struct gfar_private * priv)977 static int __gfar_is_rx_idle(struct gfar_private *priv)
978 {
979 u32 res;
980
981 /* Normaly TSEC should not hang on GRS commands, so we should
982 * actually wait for IEVENT_GRSC flag.
983 */
984 if (!gfar_has_errata(priv, GFAR_ERRATA_A002))
985 return 0;
986
987 /* Read the eTSEC register at offset 0xD1C. If bits 7-14 are
988 * the same as bits 23-30, the eTSEC Rx is assumed to be idle
989 * and the Rx can be safely reset.
990 */
991 res = gfar_read((void __iomem *)priv->gfargrp[0].regs + 0xd1c);
992 res &= 0x7f807f80;
993 if ((res & 0xffff) == (res >> 16))
994 return 1;
995
996 return 0;
997 }
998
999 /* Halt the receive and transmit queues */
gfar_halt_nodisable(struct gfar_private * priv)1000 static void gfar_halt_nodisable(struct gfar_private *priv)
1001 {
1002 struct gfar __iomem *regs = priv->gfargrp[0].regs;
1003 u32 tempval;
1004 unsigned int timeout;
1005 int stopped;
1006
1007 gfar_ints_disable(priv);
1008
1009 if (gfar_is_dma_stopped(priv))
1010 return;
1011
1012 /* Stop the DMA, and wait for it to stop */
1013 tempval = gfar_read(®s->dmactrl);
1014 tempval |= (DMACTRL_GRS | DMACTRL_GTS);
1015 gfar_write(®s->dmactrl, tempval);
1016
1017 retry:
1018 timeout = 1000;
1019 while (!(stopped = gfar_is_dma_stopped(priv)) && timeout) {
1020 cpu_relax();
1021 timeout--;
1022 }
1023
1024 if (!timeout)
1025 stopped = gfar_is_dma_stopped(priv);
1026
1027 if (!stopped && !gfar_is_rx_dma_stopped(priv) &&
1028 !__gfar_is_rx_idle(priv))
1029 goto retry;
1030 }
1031
1032 /* Halt the receive and transmit queues */
gfar_halt(struct gfar_private * priv)1033 static void gfar_halt(struct gfar_private *priv)
1034 {
1035 struct gfar __iomem *regs = priv->gfargrp[0].regs;
1036 u32 tempval;
1037
1038 /* Dissable the Rx/Tx hw queues */
1039 gfar_write(®s->rqueue, 0);
1040 gfar_write(®s->tqueue, 0);
1041
1042 mdelay(10);
1043
1044 gfar_halt_nodisable(priv);
1045
1046 /* Disable Rx/Tx DMA */
1047 tempval = gfar_read(®s->maccfg1);
1048 tempval &= ~(MACCFG1_RX_EN | MACCFG1_TX_EN);
1049 gfar_write(®s->maccfg1, tempval);
1050 }
1051
free_skb_tx_queue(struct gfar_priv_tx_q * tx_queue)1052 static void free_skb_tx_queue(struct gfar_priv_tx_q *tx_queue)
1053 {
1054 struct txbd8 *txbdp;
1055 struct gfar_private *priv = netdev_priv(tx_queue->dev);
1056 int i, j;
1057
1058 txbdp = tx_queue->tx_bd_base;
1059
1060 for (i = 0; i < tx_queue->tx_ring_size; i++) {
1061 if (!tx_queue->tx_skbuff[i])
1062 continue;
1063
1064 dma_unmap_single(priv->dev, be32_to_cpu(txbdp->bufPtr),
1065 be16_to_cpu(txbdp->length), DMA_TO_DEVICE);
1066 txbdp->lstatus = 0;
1067 for (j = 0; j < skb_shinfo(tx_queue->tx_skbuff[i])->nr_frags;
1068 j++) {
1069 txbdp++;
1070 dma_unmap_page(priv->dev, be32_to_cpu(txbdp->bufPtr),
1071 be16_to_cpu(txbdp->length),
1072 DMA_TO_DEVICE);
1073 }
1074 txbdp++;
1075 dev_kfree_skb_any(tx_queue->tx_skbuff[i]);
1076 tx_queue->tx_skbuff[i] = NULL;
1077 }
1078 kfree(tx_queue->tx_skbuff);
1079 tx_queue->tx_skbuff = NULL;
1080 }
1081
free_skb_rx_queue(struct gfar_priv_rx_q * rx_queue)1082 static void free_skb_rx_queue(struct gfar_priv_rx_q *rx_queue)
1083 {
1084 int i;
1085
1086 struct rxbd8 *rxbdp = rx_queue->rx_bd_base;
1087
1088 dev_kfree_skb(rx_queue->skb);
1089
1090 for (i = 0; i < rx_queue->rx_ring_size; i++) {
1091 struct gfar_rx_buff *rxb = &rx_queue->rx_buff[i];
1092
1093 rxbdp->lstatus = 0;
1094 rxbdp->bufPtr = 0;
1095 rxbdp++;
1096
1097 if (!rxb->page)
1098 continue;
1099
1100 dma_unmap_page(rx_queue->dev, rxb->dma,
1101 PAGE_SIZE, DMA_FROM_DEVICE);
1102 __free_page(rxb->page);
1103
1104 rxb->page = NULL;
1105 }
1106
1107 kfree(rx_queue->rx_buff);
1108 rx_queue->rx_buff = NULL;
1109 }
1110
1111 /* If there are any tx skbs or rx skbs still around, free them.
1112 * Then free tx_skbuff and rx_skbuff
1113 */
free_skb_resources(struct gfar_private * priv)1114 static void free_skb_resources(struct gfar_private *priv)
1115 {
1116 struct gfar_priv_tx_q *tx_queue = NULL;
1117 struct gfar_priv_rx_q *rx_queue = NULL;
1118 int i;
1119
1120 /* Go through all the buffer descriptors and free their data buffers */
1121 for (i = 0; i < priv->num_tx_queues; i++) {
1122 struct netdev_queue *txq;
1123
1124 tx_queue = priv->tx_queue[i];
1125 txq = netdev_get_tx_queue(tx_queue->dev, tx_queue->qindex);
1126 if (tx_queue->tx_skbuff)
1127 free_skb_tx_queue(tx_queue);
1128 netdev_tx_reset_queue(txq);
1129 }
1130
1131 for (i = 0; i < priv->num_rx_queues; i++) {
1132 rx_queue = priv->rx_queue[i];
1133 if (rx_queue->rx_buff)
1134 free_skb_rx_queue(rx_queue);
1135 }
1136
1137 dma_free_coherent(priv->dev,
1138 sizeof(struct txbd8) * priv->total_tx_ring_size +
1139 sizeof(struct rxbd8) * priv->total_rx_ring_size,
1140 priv->tx_queue[0]->tx_bd_base,
1141 priv->tx_queue[0]->tx_bd_dma_base);
1142 }
1143
stop_gfar(struct net_device * dev)1144 void stop_gfar(struct net_device *dev)
1145 {
1146 struct gfar_private *priv = netdev_priv(dev);
1147
1148 netif_tx_stop_all_queues(dev);
1149
1150 smp_mb__before_atomic();
1151 set_bit(GFAR_DOWN, &priv->state);
1152 smp_mb__after_atomic();
1153
1154 disable_napi(priv);
1155
1156 /* disable ints and gracefully shut down Rx/Tx DMA */
1157 gfar_halt(priv);
1158
1159 phy_stop(dev->phydev);
1160
1161 free_skb_resources(priv);
1162 }
1163
gfar_start(struct gfar_private * priv)1164 static void gfar_start(struct gfar_private *priv)
1165 {
1166 struct gfar __iomem *regs = priv->gfargrp[0].regs;
1167 u32 tempval;
1168 int i = 0;
1169
1170 /* Enable Rx/Tx hw queues */
1171 gfar_write(®s->rqueue, priv->rqueue);
1172 gfar_write(®s->tqueue, priv->tqueue);
1173
1174 /* Initialize DMACTRL to have WWR and WOP */
1175 tempval = gfar_read(®s->dmactrl);
1176 tempval |= DMACTRL_INIT_SETTINGS;
1177 gfar_write(®s->dmactrl, tempval);
1178
1179 /* Make sure we aren't stopped */
1180 tempval = gfar_read(®s->dmactrl);
1181 tempval &= ~(DMACTRL_GRS | DMACTRL_GTS);
1182 gfar_write(®s->dmactrl, tempval);
1183
1184 for (i = 0; i < priv->num_grps; i++) {
1185 regs = priv->gfargrp[i].regs;
1186 /* Clear THLT/RHLT, so that the DMA starts polling now */
1187 gfar_write(®s->tstat, priv->gfargrp[i].tstat);
1188 gfar_write(®s->rstat, priv->gfargrp[i].rstat);
1189 }
1190
1191 /* Enable Rx/Tx DMA */
1192 tempval = gfar_read(®s->maccfg1);
1193 tempval |= (MACCFG1_RX_EN | MACCFG1_TX_EN);
1194 gfar_write(®s->maccfg1, tempval);
1195
1196 gfar_ints_enable(priv);
1197
1198 netif_trans_update(priv->ndev); /* prevent tx timeout */
1199 }
1200
gfar_new_page(struct gfar_priv_rx_q * rxq,struct gfar_rx_buff * rxb)1201 static bool gfar_new_page(struct gfar_priv_rx_q *rxq, struct gfar_rx_buff *rxb)
1202 {
1203 struct page *page;
1204 dma_addr_t addr;
1205
1206 page = dev_alloc_page();
1207 if (unlikely(!page))
1208 return false;
1209
1210 addr = dma_map_page(rxq->dev, page, 0, PAGE_SIZE, DMA_FROM_DEVICE);
1211 if (unlikely(dma_mapping_error(rxq->dev, addr))) {
1212 __free_page(page);
1213
1214 return false;
1215 }
1216
1217 rxb->dma = addr;
1218 rxb->page = page;
1219 rxb->page_offset = 0;
1220
1221 return true;
1222 }
1223
gfar_rx_alloc_err(struct gfar_priv_rx_q * rx_queue)1224 static void gfar_rx_alloc_err(struct gfar_priv_rx_q *rx_queue)
1225 {
1226 struct gfar_private *priv = netdev_priv(rx_queue->ndev);
1227 struct gfar_extra_stats *estats = &priv->extra_stats;
1228
1229 netdev_err(rx_queue->ndev, "Can't alloc RX buffers\n");
1230 atomic64_inc(&estats->rx_alloc_err);
1231 }
1232
gfar_alloc_rx_buffs(struct gfar_priv_rx_q * rx_queue,int alloc_cnt)1233 static void gfar_alloc_rx_buffs(struct gfar_priv_rx_q *rx_queue,
1234 int alloc_cnt)
1235 {
1236 struct rxbd8 *bdp;
1237 struct gfar_rx_buff *rxb;
1238 int i;
1239
1240 i = rx_queue->next_to_use;
1241 bdp = &rx_queue->rx_bd_base[i];
1242 rxb = &rx_queue->rx_buff[i];
1243
1244 while (alloc_cnt--) {
1245 /* try reuse page */
1246 if (unlikely(!rxb->page)) {
1247 if (unlikely(!gfar_new_page(rx_queue, rxb))) {
1248 gfar_rx_alloc_err(rx_queue);
1249 break;
1250 }
1251 }
1252
1253 /* Setup the new RxBD */
1254 gfar_init_rxbdp(rx_queue, bdp,
1255 rxb->dma + rxb->page_offset + RXBUF_ALIGNMENT);
1256
1257 /* Update to the next pointer */
1258 bdp++;
1259 rxb++;
1260
1261 if (unlikely(++i == rx_queue->rx_ring_size)) {
1262 i = 0;
1263 bdp = rx_queue->rx_bd_base;
1264 rxb = rx_queue->rx_buff;
1265 }
1266 }
1267
1268 rx_queue->next_to_use = i;
1269 rx_queue->next_to_alloc = i;
1270 }
1271
gfar_init_bds(struct net_device * ndev)1272 static void gfar_init_bds(struct net_device *ndev)
1273 {
1274 struct gfar_private *priv = netdev_priv(ndev);
1275 struct gfar __iomem *regs = priv->gfargrp[0].regs;
1276 struct gfar_priv_tx_q *tx_queue = NULL;
1277 struct gfar_priv_rx_q *rx_queue = NULL;
1278 struct txbd8 *txbdp;
1279 u32 __iomem *rfbptr;
1280 int i, j;
1281
1282 for (i = 0; i < priv->num_tx_queues; i++) {
1283 tx_queue = priv->tx_queue[i];
1284 /* Initialize some variables in our dev structure */
1285 tx_queue->num_txbdfree = tx_queue->tx_ring_size;
1286 tx_queue->dirty_tx = tx_queue->tx_bd_base;
1287 tx_queue->cur_tx = tx_queue->tx_bd_base;
1288 tx_queue->skb_curtx = 0;
1289 tx_queue->skb_dirtytx = 0;
1290
1291 /* Initialize Transmit Descriptor Ring */
1292 txbdp = tx_queue->tx_bd_base;
1293 for (j = 0; j < tx_queue->tx_ring_size; j++) {
1294 txbdp->lstatus = 0;
1295 txbdp->bufPtr = 0;
1296 txbdp++;
1297 }
1298
1299 /* Set the last descriptor in the ring to indicate wrap */
1300 txbdp--;
1301 txbdp->status = cpu_to_be16(be16_to_cpu(txbdp->status) |
1302 TXBD_WRAP);
1303 }
1304
1305 rfbptr = ®s->rfbptr0;
1306 for (i = 0; i < priv->num_rx_queues; i++) {
1307 rx_queue = priv->rx_queue[i];
1308
1309 rx_queue->next_to_clean = 0;
1310 rx_queue->next_to_use = 0;
1311 rx_queue->next_to_alloc = 0;
1312
1313 /* make sure next_to_clean != next_to_use after this
1314 * by leaving at least 1 unused descriptor
1315 */
1316 gfar_alloc_rx_buffs(rx_queue, gfar_rxbd_unused(rx_queue));
1317
1318 rx_queue->rfbptr = rfbptr;
1319 rfbptr += 2;
1320 }
1321 }
1322
gfar_alloc_skb_resources(struct net_device * ndev)1323 static int gfar_alloc_skb_resources(struct net_device *ndev)
1324 {
1325 void *vaddr;
1326 dma_addr_t addr;
1327 int i, j;
1328 struct gfar_private *priv = netdev_priv(ndev);
1329 struct device *dev = priv->dev;
1330 struct gfar_priv_tx_q *tx_queue = NULL;
1331 struct gfar_priv_rx_q *rx_queue = NULL;
1332
1333 priv->total_tx_ring_size = 0;
1334 for (i = 0; i < priv->num_tx_queues; i++)
1335 priv->total_tx_ring_size += priv->tx_queue[i]->tx_ring_size;
1336
1337 priv->total_rx_ring_size = 0;
1338 for (i = 0; i < priv->num_rx_queues; i++)
1339 priv->total_rx_ring_size += priv->rx_queue[i]->rx_ring_size;
1340
1341 /* Allocate memory for the buffer descriptors */
1342 vaddr = dma_alloc_coherent(dev,
1343 (priv->total_tx_ring_size *
1344 sizeof(struct txbd8)) +
1345 (priv->total_rx_ring_size *
1346 sizeof(struct rxbd8)),
1347 &addr, GFP_KERNEL);
1348 if (!vaddr)
1349 return -ENOMEM;
1350
1351 for (i = 0; i < priv->num_tx_queues; i++) {
1352 tx_queue = priv->tx_queue[i];
1353 tx_queue->tx_bd_base = vaddr;
1354 tx_queue->tx_bd_dma_base = addr;
1355 tx_queue->dev = ndev;
1356 /* enet DMA only understands physical addresses */
1357 addr += sizeof(struct txbd8) * tx_queue->tx_ring_size;
1358 vaddr += sizeof(struct txbd8) * tx_queue->tx_ring_size;
1359 }
1360
1361 /* Start the rx descriptor ring where the tx ring leaves off */
1362 for (i = 0; i < priv->num_rx_queues; i++) {
1363 rx_queue = priv->rx_queue[i];
1364 rx_queue->rx_bd_base = vaddr;
1365 rx_queue->rx_bd_dma_base = addr;
1366 rx_queue->ndev = ndev;
1367 rx_queue->dev = dev;
1368 addr += sizeof(struct rxbd8) * rx_queue->rx_ring_size;
1369 vaddr += sizeof(struct rxbd8) * rx_queue->rx_ring_size;
1370 }
1371
1372 /* Setup the skbuff rings */
1373 for (i = 0; i < priv->num_tx_queues; i++) {
1374 tx_queue = priv->tx_queue[i];
1375 tx_queue->tx_skbuff =
1376 kmalloc_objs(*tx_queue->tx_skbuff,
1377 tx_queue->tx_ring_size);
1378 if (!tx_queue->tx_skbuff)
1379 goto cleanup;
1380
1381 for (j = 0; j < tx_queue->tx_ring_size; j++)
1382 tx_queue->tx_skbuff[j] = NULL;
1383 }
1384
1385 for (i = 0; i < priv->num_rx_queues; i++) {
1386 rx_queue = priv->rx_queue[i];
1387 rx_queue->rx_buff = kzalloc_objs(*rx_queue->rx_buff,
1388 rx_queue->rx_ring_size);
1389 if (!rx_queue->rx_buff)
1390 goto cleanup;
1391 }
1392
1393 gfar_init_bds(ndev);
1394
1395 return 0;
1396
1397 cleanup:
1398 free_skb_resources(priv);
1399 return -ENOMEM;
1400 }
1401
1402 /* Bring the controller up and running */
startup_gfar(struct net_device * ndev)1403 int startup_gfar(struct net_device *ndev)
1404 {
1405 struct gfar_private *priv = netdev_priv(ndev);
1406 int err;
1407
1408 gfar_mac_reset(priv);
1409
1410 err = gfar_alloc_skb_resources(ndev);
1411 if (err)
1412 return err;
1413
1414 gfar_init_tx_rx_base(priv);
1415
1416 smp_mb__before_atomic();
1417 clear_bit(GFAR_DOWN, &priv->state);
1418 smp_mb__after_atomic();
1419
1420 /* Start Rx/Tx DMA and enable the interrupts */
1421 gfar_start(priv);
1422
1423 /* force link state update after mac reset */
1424 priv->oldlink = 0;
1425 priv->oldspeed = 0;
1426 priv->oldduplex = -1;
1427
1428 phy_start(ndev->phydev);
1429
1430 enable_napi(priv);
1431
1432 netif_tx_wake_all_queues(ndev);
1433
1434 return 0;
1435 }
1436
gfar_get_flowctrl_cfg(struct gfar_private * priv)1437 static u32 gfar_get_flowctrl_cfg(struct gfar_private *priv)
1438 {
1439 struct net_device *ndev = priv->ndev;
1440 struct phy_device *phydev = ndev->phydev;
1441 u32 val = 0;
1442
1443 if (!phydev->duplex)
1444 return val;
1445
1446 if (!priv->pause_aneg_en) {
1447 if (priv->tx_pause_en)
1448 val |= MACCFG1_TX_FLOW;
1449 if (priv->rx_pause_en)
1450 val |= MACCFG1_RX_FLOW;
1451 } else {
1452 u16 lcl_adv, rmt_adv;
1453 u8 flowctrl;
1454 /* get link partner capabilities */
1455 rmt_adv = 0;
1456 if (phydev->pause)
1457 rmt_adv = LPA_PAUSE_CAP;
1458 if (phydev->asym_pause)
1459 rmt_adv |= LPA_PAUSE_ASYM;
1460
1461 lcl_adv = linkmode_adv_to_lcl_adv_t(phydev->advertising);
1462 flowctrl = mii_resolve_flowctrl_fdx(lcl_adv, rmt_adv);
1463 if (flowctrl & FLOW_CTRL_TX)
1464 val |= MACCFG1_TX_FLOW;
1465 if (flowctrl & FLOW_CTRL_RX)
1466 val |= MACCFG1_RX_FLOW;
1467 }
1468
1469 return val;
1470 }
1471
gfar_update_link_state(struct gfar_private * priv)1472 static noinline void gfar_update_link_state(struct gfar_private *priv)
1473 {
1474 struct gfar __iomem *regs = priv->gfargrp[0].regs;
1475 struct net_device *ndev = priv->ndev;
1476 struct phy_device *phydev = ndev->phydev;
1477 struct gfar_priv_rx_q *rx_queue = NULL;
1478 int i;
1479
1480 if (unlikely(test_bit(GFAR_RESETTING, &priv->state)))
1481 return;
1482
1483 if (phydev->link) {
1484 u32 tempval1 = gfar_read(®s->maccfg1);
1485 u32 tempval = gfar_read(®s->maccfg2);
1486 u32 ecntrl = gfar_read(®s->ecntrl);
1487 u32 tx_flow_oldval = (tempval1 & MACCFG1_TX_FLOW);
1488
1489 if (phydev->duplex != priv->oldduplex) {
1490 if (!(phydev->duplex))
1491 tempval &= ~(MACCFG2_FULL_DUPLEX);
1492 else
1493 tempval |= MACCFG2_FULL_DUPLEX;
1494
1495 priv->oldduplex = phydev->duplex;
1496 }
1497
1498 if (phydev->speed != priv->oldspeed) {
1499 switch (phydev->speed) {
1500 case 1000:
1501 tempval =
1502 ((tempval & ~(MACCFG2_IF)) | MACCFG2_GMII);
1503
1504 ecntrl &= ~(ECNTRL_R100);
1505 break;
1506 case 100:
1507 case 10:
1508 tempval =
1509 ((tempval & ~(MACCFG2_IF)) | MACCFG2_MII);
1510
1511 /* Reduced mode distinguishes
1512 * between 10 and 100
1513 */
1514 if (phydev->speed == SPEED_100)
1515 ecntrl |= ECNTRL_R100;
1516 else
1517 ecntrl &= ~(ECNTRL_R100);
1518 break;
1519 default:
1520 netif_warn(priv, link, priv->ndev,
1521 "Ack! Speed (%d) is not 10/100/1000!\n",
1522 phydev->speed);
1523 break;
1524 }
1525
1526 priv->oldspeed = phydev->speed;
1527 }
1528
1529 tempval1 &= ~(MACCFG1_TX_FLOW | MACCFG1_RX_FLOW);
1530 tempval1 |= gfar_get_flowctrl_cfg(priv);
1531
1532 /* Turn last free buffer recording on */
1533 if ((tempval1 & MACCFG1_TX_FLOW) && !tx_flow_oldval) {
1534 for (i = 0; i < priv->num_rx_queues; i++) {
1535 u32 bdp_dma;
1536
1537 rx_queue = priv->rx_queue[i];
1538 bdp_dma = gfar_rxbd_dma_lastfree(rx_queue);
1539 gfar_write(rx_queue->rfbptr, bdp_dma);
1540 }
1541
1542 priv->tx_actual_en = 1;
1543 }
1544
1545 if (unlikely(!(tempval1 & MACCFG1_TX_FLOW) && tx_flow_oldval))
1546 priv->tx_actual_en = 0;
1547
1548 gfar_write(®s->maccfg1, tempval1);
1549 gfar_write(®s->maccfg2, tempval);
1550 gfar_write(®s->ecntrl, ecntrl);
1551
1552 if (!priv->oldlink)
1553 priv->oldlink = 1;
1554
1555 } else if (priv->oldlink) {
1556 priv->oldlink = 0;
1557 priv->oldspeed = 0;
1558 priv->oldduplex = -1;
1559 }
1560
1561 if (netif_msg_link(priv))
1562 phy_print_status(phydev);
1563 }
1564
1565 /* Called every time the controller might need to be made
1566 * aware of new link state. The PHY code conveys this
1567 * information through variables in the phydev structure, and this
1568 * function converts those variables into the appropriate
1569 * register values, and can bring down the device if needed.
1570 */
adjust_link(struct net_device * dev)1571 static void adjust_link(struct net_device *dev)
1572 {
1573 struct gfar_private *priv = netdev_priv(dev);
1574 struct phy_device *phydev = dev->phydev;
1575
1576 if (unlikely(phydev->link != priv->oldlink ||
1577 (phydev->link && (phydev->duplex != priv->oldduplex ||
1578 phydev->speed != priv->oldspeed))))
1579 gfar_update_link_state(priv);
1580 }
1581
1582 /* Initialize TBI PHY interface for communicating with the
1583 * SERDES lynx PHY on the chip. We communicate with this PHY
1584 * through the MDIO bus on each controller, treating it as a
1585 * "normal" PHY at the address found in the TBIPA register. We assume
1586 * that the TBIPA register is valid. Either the MDIO bus code will set
1587 * it to a value that doesn't conflict with other PHYs on the bus, or the
1588 * value doesn't matter, as there are no other PHYs on the bus.
1589 */
gfar_configure_serdes(struct net_device * dev)1590 static void gfar_configure_serdes(struct net_device *dev)
1591 {
1592 struct gfar_private *priv = netdev_priv(dev);
1593 struct phy_device *tbiphy;
1594
1595 if (!priv->tbi_node) {
1596 dev_warn(&dev->dev, "error: SGMII mode requires that the "
1597 "device tree specify a tbi-handle\n");
1598 return;
1599 }
1600
1601 tbiphy = of_phy_find_device(priv->tbi_node);
1602 if (!tbiphy) {
1603 dev_err(&dev->dev, "error: Could not get TBI device\n");
1604 return;
1605 }
1606
1607 /* If the link is already up, we must already be ok, and don't need to
1608 * configure and reset the TBI<->SerDes link. Maybe U-Boot configured
1609 * everything for us? Resetting it takes the link down and requires
1610 * several seconds for it to come back.
1611 */
1612 if (phy_read(tbiphy, MII_BMSR) & BMSR_LSTATUS) {
1613 put_device(&tbiphy->mdio.dev);
1614 return;
1615 }
1616
1617 /* Single clk mode, mii mode off(for serdes communication) */
1618 phy_write(tbiphy, MII_TBICON, TBICON_CLK_SELECT);
1619
1620 phy_write(tbiphy, MII_ADVERTISE,
1621 ADVERTISE_1000XFULL | ADVERTISE_1000XPAUSE |
1622 ADVERTISE_1000XPSE_ASYM);
1623
1624 phy_write(tbiphy, MII_BMCR,
1625 BMCR_ANENABLE | BMCR_ANRESTART | BMCR_FULLDPLX |
1626 BMCR_SPEED1000);
1627
1628 put_device(&tbiphy->mdio.dev);
1629 }
1630
1631 /* Initializes driver's PHY state, and attaches to the PHY.
1632 * Returns 0 on success.
1633 */
init_phy(struct net_device * dev)1634 static int init_phy(struct net_device *dev)
1635 {
1636 struct gfar_private *priv = netdev_priv(dev);
1637 phy_interface_t interface = priv->interface;
1638 struct phy_device *phydev;
1639 struct ethtool_keee edata;
1640
1641 priv->oldlink = 0;
1642 priv->oldspeed = 0;
1643 priv->oldduplex = -1;
1644
1645 phydev = of_phy_connect(dev, priv->phy_node, &adjust_link, 0,
1646 interface);
1647 if (!phydev) {
1648 dev_err(&dev->dev, "could not attach to PHY\n");
1649 return -ENODEV;
1650 }
1651
1652 if (interface == PHY_INTERFACE_MODE_SGMII)
1653 gfar_configure_serdes(dev);
1654
1655 if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_GIGABIT))
1656 phy_set_max_speed(phydev, SPEED_100);
1657
1658 /* Add support for flow control */
1659 phy_support_asym_pause(phydev);
1660
1661 /* disable EEE autoneg, EEE not supported by eTSEC */
1662 memset(&edata, 0, sizeof(struct ethtool_keee));
1663 phy_ethtool_set_eee(phydev, &edata);
1664
1665 return 0;
1666 }
1667
gfar_add_fcb(struct sk_buff * skb)1668 static inline struct txfcb *gfar_add_fcb(struct sk_buff *skb)
1669 {
1670 struct txfcb *fcb = skb_push(skb, GMAC_FCB_LEN);
1671
1672 memset(fcb, 0, GMAC_FCB_LEN);
1673
1674 return fcb;
1675 }
1676
gfar_tx_checksum(struct sk_buff * skb,struct txfcb * fcb,int fcb_length)1677 static inline void gfar_tx_checksum(struct sk_buff *skb, struct txfcb *fcb,
1678 int fcb_length)
1679 {
1680 /* If we're here, it's a IP packet with a TCP or UDP
1681 * payload. We set it to checksum, using a pseudo-header
1682 * we provide
1683 */
1684 u8 flags = TXFCB_DEFAULT;
1685
1686 /* Tell the controller what the protocol is
1687 * And provide the already calculated phcs
1688 */
1689 if (ip_hdr(skb)->protocol == IPPROTO_UDP) {
1690 flags |= TXFCB_UDP;
1691 fcb->phcs = (__force __be16)(udp_hdr(skb)->check);
1692 } else
1693 fcb->phcs = (__force __be16)(tcp_hdr(skb)->check);
1694
1695 /* l3os is the distance between the start of the
1696 * frame (skb->data) and the start of the IP hdr.
1697 * l4os is the distance between the start of the
1698 * l3 hdr and the l4 hdr
1699 */
1700 fcb->l3os = (u8)(skb_network_offset(skb) - fcb_length);
1701 fcb->l4os = skb_network_header_len(skb);
1702
1703 fcb->flags = flags;
1704 }
1705
gfar_tx_vlan(struct sk_buff * skb,struct txfcb * fcb)1706 static inline void gfar_tx_vlan(struct sk_buff *skb, struct txfcb *fcb)
1707 {
1708 fcb->flags |= TXFCB_VLN;
1709 fcb->vlctl = cpu_to_be16(skb_vlan_tag_get(skb));
1710 }
1711
skip_txbd(struct txbd8 * bdp,int stride,struct txbd8 * base,int ring_size)1712 static inline struct txbd8 *skip_txbd(struct txbd8 *bdp, int stride,
1713 struct txbd8 *base, int ring_size)
1714 {
1715 struct txbd8 *new_bd = bdp + stride;
1716
1717 return (new_bd >= (base + ring_size)) ? (new_bd - ring_size) : new_bd;
1718 }
1719
next_txbd(struct txbd8 * bdp,struct txbd8 * base,int ring_size)1720 static inline struct txbd8 *next_txbd(struct txbd8 *bdp, struct txbd8 *base,
1721 int ring_size)
1722 {
1723 return skip_txbd(bdp, 1, base, ring_size);
1724 }
1725
1726 /* eTSEC12: csum generation not supported for some fcb offsets */
gfar_csum_errata_12(struct gfar_private * priv,unsigned long fcb_addr)1727 static inline bool gfar_csum_errata_12(struct gfar_private *priv,
1728 unsigned long fcb_addr)
1729 {
1730 return (gfar_has_errata(priv, GFAR_ERRATA_12) &&
1731 (fcb_addr % 0x20) > 0x18);
1732 }
1733
1734 /* eTSEC76: csum generation for frames larger than 2500 may
1735 * cause excess delays before start of transmission
1736 */
gfar_csum_errata_76(struct gfar_private * priv,unsigned int len)1737 static inline bool gfar_csum_errata_76(struct gfar_private *priv,
1738 unsigned int len)
1739 {
1740 return (gfar_has_errata(priv, GFAR_ERRATA_76) &&
1741 (len > 2500));
1742 }
1743
1744 /* This is called by the kernel when a frame is ready for transmission.
1745 * It is pointed to by the dev->hard_start_xmit function pointer
1746 */
gfar_start_xmit(struct sk_buff * skb,struct net_device * dev)1747 static netdev_tx_t gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
1748 {
1749 struct gfar_private *priv = netdev_priv(dev);
1750 struct gfar_priv_tx_q *tx_queue = NULL;
1751 struct netdev_queue *txq;
1752 struct gfar __iomem *regs = NULL;
1753 struct txfcb *fcb = NULL;
1754 struct txbd8 *txbdp, *txbdp_start, *base, *txbdp_tstamp = NULL;
1755 u32 lstatus;
1756 skb_frag_t *frag;
1757 int i, rq = 0;
1758 int do_tstamp, do_csum, do_vlan;
1759 u32 bufaddr;
1760 unsigned int nr_frags, nr_txbds, bytes_sent, fcb_len = 0;
1761
1762 rq = skb->queue_mapping;
1763 tx_queue = priv->tx_queue[rq];
1764 txq = netdev_get_tx_queue(dev, rq);
1765 base = tx_queue->tx_bd_base;
1766 regs = tx_queue->grp->regs;
1767
1768 do_csum = (CHECKSUM_PARTIAL == skb->ip_summed);
1769 do_vlan = skb_vlan_tag_present(skb);
1770 do_tstamp = (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
1771 priv->hwts_tx_en;
1772
1773 if (do_csum || do_vlan)
1774 fcb_len = GMAC_FCB_LEN;
1775
1776 /* check if time stamp should be generated */
1777 if (unlikely(do_tstamp))
1778 fcb_len = GMAC_FCB_LEN + GMAC_TXPAL_LEN;
1779
1780 /* make space for additional header when fcb is needed */
1781 if (fcb_len) {
1782 if (unlikely(skb_cow_head(skb, fcb_len))) {
1783 dev->stats.tx_errors++;
1784 dev_kfree_skb_any(skb);
1785 return NETDEV_TX_OK;
1786 }
1787 }
1788
1789 /* total number of fragments in the SKB */
1790 nr_frags = skb_shinfo(skb)->nr_frags;
1791
1792 /* calculate the required number of TxBDs for this skb */
1793 if (unlikely(do_tstamp))
1794 nr_txbds = nr_frags + 2;
1795 else
1796 nr_txbds = nr_frags + 1;
1797
1798 /* check if there is space to queue this packet */
1799 if (nr_txbds > tx_queue->num_txbdfree) {
1800 /* no space, stop the queue */
1801 netif_tx_stop_queue(txq);
1802 dev->stats.tx_fifo_errors++;
1803 return NETDEV_TX_BUSY;
1804 }
1805
1806 /* Update transmit stats */
1807 bytes_sent = skb->len;
1808 tx_queue->stats.tx_bytes += bytes_sent;
1809 /* keep Tx bytes on wire for BQL accounting */
1810 GFAR_CB(skb)->bytes_sent = bytes_sent;
1811 tx_queue->stats.tx_packets++;
1812
1813 txbdp = txbdp_start = tx_queue->cur_tx;
1814 lstatus = be32_to_cpu(txbdp->lstatus);
1815
1816 /* Add TxPAL between FCB and frame if required */
1817 if (unlikely(do_tstamp)) {
1818 skb_push(skb, GMAC_TXPAL_LEN);
1819 memset(skb->data, 0, GMAC_TXPAL_LEN);
1820 }
1821
1822 /* Add TxFCB if required */
1823 if (fcb_len) {
1824 fcb = gfar_add_fcb(skb);
1825 lstatus |= BD_LFLAG(TXBD_TOE);
1826 }
1827
1828 /* Set up checksumming */
1829 if (do_csum) {
1830 gfar_tx_checksum(skb, fcb, fcb_len);
1831
1832 if (unlikely(gfar_csum_errata_12(priv, (unsigned long)fcb)) ||
1833 unlikely(gfar_csum_errata_76(priv, skb->len))) {
1834 __skb_pull(skb, GMAC_FCB_LEN);
1835 skb_checksum_help(skb);
1836 if (do_vlan || do_tstamp) {
1837 /* put back a new fcb for vlan/tstamp TOE */
1838 fcb = gfar_add_fcb(skb);
1839 } else {
1840 /* Tx TOE not used */
1841 lstatus &= ~(BD_LFLAG(TXBD_TOE));
1842 fcb = NULL;
1843 }
1844 }
1845 }
1846
1847 if (do_vlan)
1848 gfar_tx_vlan(skb, fcb);
1849
1850 bufaddr = dma_map_single(priv->dev, skb->data, skb_headlen(skb),
1851 DMA_TO_DEVICE);
1852 if (unlikely(dma_mapping_error(priv->dev, bufaddr)))
1853 goto dma_map_err;
1854
1855 txbdp_start->bufPtr = cpu_to_be32(bufaddr);
1856
1857 /* Time stamp insertion requires one additional TxBD */
1858 if (unlikely(do_tstamp))
1859 txbdp_tstamp = txbdp = next_txbd(txbdp, base,
1860 tx_queue->tx_ring_size);
1861
1862 if (likely(!nr_frags)) {
1863 if (likely(!do_tstamp))
1864 lstatus |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT);
1865 } else {
1866 u32 lstatus_start = lstatus;
1867
1868 /* Place the fragment addresses and lengths into the TxBDs */
1869 frag = &skb_shinfo(skb)->frags[0];
1870 for (i = 0; i < nr_frags; i++, frag++) {
1871 unsigned int size;
1872
1873 /* Point at the next BD, wrapping as needed */
1874 txbdp = next_txbd(txbdp, base, tx_queue->tx_ring_size);
1875
1876 size = skb_frag_size(frag);
1877
1878 lstatus = be32_to_cpu(txbdp->lstatus) | size |
1879 BD_LFLAG(TXBD_READY);
1880
1881 /* Handle the last BD specially */
1882 if (i == nr_frags - 1)
1883 lstatus |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT);
1884
1885 bufaddr = skb_frag_dma_map(priv->dev, frag, 0,
1886 size, DMA_TO_DEVICE);
1887 if (unlikely(dma_mapping_error(priv->dev, bufaddr)))
1888 goto dma_map_err;
1889
1890 /* set the TxBD length and buffer pointer */
1891 txbdp->bufPtr = cpu_to_be32(bufaddr);
1892 txbdp->lstatus = cpu_to_be32(lstatus);
1893 }
1894
1895 lstatus = lstatus_start;
1896 }
1897
1898 /* If time stamping is requested one additional TxBD must be set up. The
1899 * first TxBD points to the FCB and must have a data length of
1900 * GMAC_FCB_LEN. The second TxBD points to the actual frame data with
1901 * the full frame length.
1902 */
1903 if (unlikely(do_tstamp)) {
1904 u32 lstatus_ts = be32_to_cpu(txbdp_tstamp->lstatus);
1905
1906 bufaddr = be32_to_cpu(txbdp_start->bufPtr);
1907 bufaddr += fcb_len;
1908
1909 lstatus_ts |= BD_LFLAG(TXBD_READY) |
1910 (skb_headlen(skb) - fcb_len);
1911 if (!nr_frags)
1912 lstatus_ts |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT);
1913
1914 txbdp_tstamp->bufPtr = cpu_to_be32(bufaddr);
1915 txbdp_tstamp->lstatus = cpu_to_be32(lstatus_ts);
1916 lstatus |= BD_LFLAG(TXBD_CRC | TXBD_READY) | GMAC_FCB_LEN;
1917
1918 /* Setup tx hardware time stamping */
1919 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
1920 fcb->ptp = 1;
1921 } else {
1922 lstatus |= BD_LFLAG(TXBD_CRC | TXBD_READY) | skb_headlen(skb);
1923 }
1924
1925 skb_tx_timestamp(skb);
1926 netdev_tx_sent_queue(txq, bytes_sent);
1927
1928 gfar_wmb();
1929
1930 txbdp_start->lstatus = cpu_to_be32(lstatus);
1931
1932 gfar_wmb(); /* force lstatus write before tx_skbuff */
1933
1934 tx_queue->tx_skbuff[tx_queue->skb_curtx] = skb;
1935
1936 /* Update the current skb pointer to the next entry we will use
1937 * (wrapping if necessary)
1938 */
1939 tx_queue->skb_curtx = (tx_queue->skb_curtx + 1) &
1940 TX_RING_MOD_MASK(tx_queue->tx_ring_size);
1941
1942 tx_queue->cur_tx = next_txbd(txbdp, base, tx_queue->tx_ring_size);
1943
1944 /* We can work in parallel with gfar_clean_tx_ring(), except
1945 * when modifying num_txbdfree. Note that we didn't grab the lock
1946 * when we were reading the num_txbdfree and checking for available
1947 * space, that's because outside of this function it can only grow.
1948 */
1949 spin_lock_bh(&tx_queue->txlock);
1950 /* reduce TxBD free count */
1951 tx_queue->num_txbdfree -= (nr_txbds);
1952 spin_unlock_bh(&tx_queue->txlock);
1953
1954 /* If the next BD still needs to be cleaned up, then the bds
1955 * are full. We need to tell the kernel to stop sending us stuff.
1956 */
1957 if (!tx_queue->num_txbdfree) {
1958 netif_tx_stop_queue(txq);
1959
1960 dev->stats.tx_fifo_errors++;
1961 }
1962
1963 /* Tell the DMA to go go go */
1964 gfar_write(®s->tstat, TSTAT_CLEAR_THALT >> tx_queue->qindex);
1965
1966 return NETDEV_TX_OK;
1967
1968 dma_map_err:
1969 txbdp = next_txbd(txbdp_start, base, tx_queue->tx_ring_size);
1970 if (do_tstamp)
1971 txbdp = next_txbd(txbdp, base, tx_queue->tx_ring_size);
1972 for (i = 0; i < nr_frags; i++) {
1973 lstatus = be32_to_cpu(txbdp->lstatus);
1974 if (!(lstatus & BD_LFLAG(TXBD_READY)))
1975 break;
1976
1977 lstatus &= ~BD_LFLAG(TXBD_READY);
1978 txbdp->lstatus = cpu_to_be32(lstatus);
1979 bufaddr = be32_to_cpu(txbdp->bufPtr);
1980 dma_unmap_page(priv->dev, bufaddr, be16_to_cpu(txbdp->length),
1981 DMA_TO_DEVICE);
1982 txbdp = next_txbd(txbdp, base, tx_queue->tx_ring_size);
1983 }
1984 gfar_wmb();
1985 dev_kfree_skb_any(skb);
1986 return NETDEV_TX_OK;
1987 }
1988
1989 /* Changes the mac address if the controller is not running. */
gfar_set_mac_address(struct net_device * dev)1990 static int gfar_set_mac_address(struct net_device *dev)
1991 {
1992 gfar_set_mac_for_addr(dev, 0, dev->dev_addr);
1993
1994 return 0;
1995 }
1996
gfar_change_mtu(struct net_device * dev,int new_mtu)1997 static int gfar_change_mtu(struct net_device *dev, int new_mtu)
1998 {
1999 struct gfar_private *priv = netdev_priv(dev);
2000
2001 while (test_and_set_bit_lock(GFAR_RESETTING, &priv->state))
2002 cpu_relax();
2003
2004 if (dev->flags & IFF_UP)
2005 stop_gfar(dev);
2006
2007 WRITE_ONCE(dev->mtu, new_mtu);
2008
2009 if (dev->flags & IFF_UP)
2010 startup_gfar(dev);
2011
2012 clear_bit_unlock(GFAR_RESETTING, &priv->state);
2013
2014 return 0;
2015 }
2016
reset_gfar(struct net_device * ndev)2017 static void reset_gfar(struct net_device *ndev)
2018 {
2019 struct gfar_private *priv = netdev_priv(ndev);
2020
2021 while (test_and_set_bit_lock(GFAR_RESETTING, &priv->state))
2022 cpu_relax();
2023
2024 stop_gfar(ndev);
2025 startup_gfar(ndev);
2026
2027 clear_bit_unlock(GFAR_RESETTING, &priv->state);
2028 }
2029
2030 /* gfar_reset_task gets scheduled when a packet has not been
2031 * transmitted after a set amount of time.
2032 * For now, assume that clearing out all the structures, and
2033 * starting over will fix the problem.
2034 */
gfar_reset_task(struct work_struct * work)2035 static void gfar_reset_task(struct work_struct *work)
2036 {
2037 struct gfar_private *priv = container_of(work, struct gfar_private,
2038 reset_task);
2039 reset_gfar(priv->ndev);
2040 }
2041
gfar_timeout(struct net_device * dev,unsigned int txqueue)2042 static void gfar_timeout(struct net_device *dev, unsigned int txqueue)
2043 {
2044 struct gfar_private *priv = netdev_priv(dev);
2045
2046 dev->stats.tx_errors++;
2047 schedule_work(&priv->reset_task);
2048 }
2049
gfar_hwtstamp_set(struct net_device * netdev,struct kernel_hwtstamp_config * config,struct netlink_ext_ack * extack)2050 static int gfar_hwtstamp_set(struct net_device *netdev,
2051 struct kernel_hwtstamp_config *config,
2052 struct netlink_ext_ack *extack)
2053 {
2054 struct gfar_private *priv = netdev_priv(netdev);
2055
2056 switch (config->tx_type) {
2057 case HWTSTAMP_TX_OFF:
2058 priv->hwts_tx_en = 0;
2059 break;
2060 case HWTSTAMP_TX_ON:
2061 if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER))
2062 return -ERANGE;
2063 priv->hwts_tx_en = 1;
2064 break;
2065 default:
2066 return -ERANGE;
2067 }
2068
2069 switch (config->rx_filter) {
2070 case HWTSTAMP_FILTER_NONE:
2071 if (priv->hwts_rx_en) {
2072 priv->hwts_rx_en = 0;
2073 reset_gfar(netdev);
2074 }
2075 break;
2076 default:
2077 if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER))
2078 return -ERANGE;
2079 if (!priv->hwts_rx_en) {
2080 priv->hwts_rx_en = 1;
2081 reset_gfar(netdev);
2082 }
2083 config->rx_filter = HWTSTAMP_FILTER_ALL;
2084 break;
2085 }
2086
2087 return 0;
2088 }
2089
gfar_hwtstamp_get(struct net_device * netdev,struct kernel_hwtstamp_config * config)2090 static int gfar_hwtstamp_get(struct net_device *netdev,
2091 struct kernel_hwtstamp_config *config)
2092 {
2093 struct gfar_private *priv = netdev_priv(netdev);
2094
2095 config->tx_type = priv->hwts_tx_en ? HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF;
2096 config->rx_filter = priv->hwts_rx_en ? HWTSTAMP_FILTER_ALL :
2097 HWTSTAMP_FILTER_NONE;
2098
2099 return 0;
2100 }
2101
2102 /* Interrupt Handler for Transmit complete */
gfar_clean_tx_ring(struct gfar_priv_tx_q * tx_queue)2103 static void gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue)
2104 {
2105 struct net_device *dev = tx_queue->dev;
2106 struct netdev_queue *txq;
2107 struct gfar_private *priv = netdev_priv(dev);
2108 struct txbd8 *bdp, *next = NULL;
2109 struct txbd8 *lbdp = NULL;
2110 struct txbd8 *base = tx_queue->tx_bd_base;
2111 struct sk_buff *skb;
2112 int skb_dirtytx;
2113 int tx_ring_size = tx_queue->tx_ring_size;
2114 int frags = 0, nr_txbds = 0;
2115 int i;
2116 int howmany = 0;
2117 int tqi = tx_queue->qindex;
2118 unsigned int bytes_sent = 0;
2119 u32 lstatus;
2120 size_t buflen;
2121
2122 txq = netdev_get_tx_queue(dev, tqi);
2123 bdp = tx_queue->dirty_tx;
2124 skb_dirtytx = tx_queue->skb_dirtytx;
2125
2126 while ((skb = tx_queue->tx_skbuff[skb_dirtytx])) {
2127 bool do_tstamp;
2128
2129 do_tstamp = (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
2130 priv->hwts_tx_en;
2131
2132 frags = skb_shinfo(skb)->nr_frags;
2133
2134 /* When time stamping, one additional TxBD must be freed.
2135 * Also, we need to dma_unmap_single() the TxPAL.
2136 */
2137 if (unlikely(do_tstamp))
2138 nr_txbds = frags + 2;
2139 else
2140 nr_txbds = frags + 1;
2141
2142 lbdp = skip_txbd(bdp, nr_txbds - 1, base, tx_ring_size);
2143
2144 lstatus = be32_to_cpu(lbdp->lstatus);
2145
2146 /* Only clean completed frames */
2147 if ((lstatus & BD_LFLAG(TXBD_READY)) &&
2148 (lstatus & BD_LENGTH_MASK))
2149 break;
2150
2151 if (unlikely(do_tstamp)) {
2152 next = next_txbd(bdp, base, tx_ring_size);
2153 buflen = be16_to_cpu(next->length) +
2154 GMAC_FCB_LEN + GMAC_TXPAL_LEN;
2155 } else
2156 buflen = be16_to_cpu(bdp->length);
2157
2158 dma_unmap_single(priv->dev, be32_to_cpu(bdp->bufPtr),
2159 buflen, DMA_TO_DEVICE);
2160
2161 if (unlikely(do_tstamp)) {
2162 struct skb_shared_hwtstamps shhwtstamps;
2163 __be64 *ns;
2164
2165 ns = (__be64 *)(((uintptr_t)skb->data + 0x10) & ~0x7UL);
2166
2167 memset(&shhwtstamps, 0, sizeof(shhwtstamps));
2168 shhwtstamps.hwtstamp = ns_to_ktime(be64_to_cpu(*ns));
2169 skb_pull(skb, GMAC_FCB_LEN + GMAC_TXPAL_LEN);
2170 skb_tstamp_tx(skb, &shhwtstamps);
2171 gfar_clear_txbd_status(bdp);
2172 bdp = next;
2173 }
2174
2175 gfar_clear_txbd_status(bdp);
2176 bdp = next_txbd(bdp, base, tx_ring_size);
2177
2178 for (i = 0; i < frags; i++) {
2179 dma_unmap_page(priv->dev, be32_to_cpu(bdp->bufPtr),
2180 be16_to_cpu(bdp->length),
2181 DMA_TO_DEVICE);
2182 gfar_clear_txbd_status(bdp);
2183 bdp = next_txbd(bdp, base, tx_ring_size);
2184 }
2185
2186 bytes_sent += GFAR_CB(skb)->bytes_sent;
2187
2188 dev_kfree_skb_any(skb);
2189
2190 tx_queue->tx_skbuff[skb_dirtytx] = NULL;
2191
2192 skb_dirtytx = (skb_dirtytx + 1) &
2193 TX_RING_MOD_MASK(tx_ring_size);
2194
2195 howmany++;
2196 spin_lock(&tx_queue->txlock);
2197 tx_queue->num_txbdfree += nr_txbds;
2198 spin_unlock(&tx_queue->txlock);
2199 }
2200
2201 /* If we freed a buffer, we can restart transmission, if necessary */
2202 if (tx_queue->num_txbdfree &&
2203 netif_tx_queue_stopped(txq) &&
2204 !(test_bit(GFAR_DOWN, &priv->state)))
2205 netif_wake_subqueue(priv->ndev, tqi);
2206
2207 /* Update dirty indicators */
2208 tx_queue->skb_dirtytx = skb_dirtytx;
2209 tx_queue->dirty_tx = bdp;
2210
2211 netdev_tx_completed_queue(txq, howmany, bytes_sent);
2212 }
2213
count_errors(u32 lstatus,struct net_device * ndev)2214 static void count_errors(u32 lstatus, struct net_device *ndev)
2215 {
2216 struct gfar_private *priv = netdev_priv(ndev);
2217 struct net_device_stats *stats = &ndev->stats;
2218 struct gfar_extra_stats *estats = &priv->extra_stats;
2219
2220 /* If the packet was truncated, none of the other errors matter */
2221 if (lstatus & BD_LFLAG(RXBD_TRUNCATED)) {
2222 stats->rx_length_errors++;
2223
2224 atomic64_inc(&estats->rx_trunc);
2225
2226 return;
2227 }
2228 /* Count the errors, if there were any */
2229 if (lstatus & BD_LFLAG(RXBD_LARGE | RXBD_SHORT)) {
2230 stats->rx_length_errors++;
2231
2232 if (lstatus & BD_LFLAG(RXBD_LARGE))
2233 atomic64_inc(&estats->rx_large);
2234 else
2235 atomic64_inc(&estats->rx_short);
2236 }
2237 if (lstatus & BD_LFLAG(RXBD_NONOCTET)) {
2238 stats->rx_frame_errors++;
2239 atomic64_inc(&estats->rx_nonoctet);
2240 }
2241 if (lstatus & BD_LFLAG(RXBD_CRCERR)) {
2242 atomic64_inc(&estats->rx_crcerr);
2243 stats->rx_crc_errors++;
2244 }
2245 if (lstatus & BD_LFLAG(RXBD_OVERRUN)) {
2246 atomic64_inc(&estats->rx_overrun);
2247 stats->rx_over_errors++;
2248 }
2249 }
2250
gfar_receive(int irq,void * grp_id)2251 static irqreturn_t gfar_receive(int irq, void *grp_id)
2252 {
2253 struct gfar_priv_grp *grp = (struct gfar_priv_grp *)grp_id;
2254 unsigned long flags;
2255 u32 imask, ievent;
2256
2257 ievent = gfar_read(&grp->regs->ievent);
2258
2259 if (unlikely(ievent & IEVENT_FGPI)) {
2260 gfar_write(&grp->regs->ievent, IEVENT_FGPI);
2261 return IRQ_HANDLED;
2262 }
2263
2264 if (likely(napi_schedule_prep(&grp->napi_rx))) {
2265 spin_lock_irqsave(&grp->grplock, flags);
2266 imask = gfar_read(&grp->regs->imask);
2267 imask &= IMASK_RX_DISABLED | grp->priv->rmon_overflow.imask;
2268 gfar_write(&grp->regs->imask, imask);
2269 spin_unlock_irqrestore(&grp->grplock, flags);
2270 __napi_schedule(&grp->napi_rx);
2271 } else {
2272 /* Clear IEVENT, so interrupts aren't called again
2273 * because of the packets that have already arrived.
2274 */
2275 gfar_write(&grp->regs->ievent, IEVENT_RX_MASK);
2276 }
2277
2278 return IRQ_HANDLED;
2279 }
2280
2281 /* Interrupt Handler for Transmit complete */
gfar_transmit(int irq,void * grp_id)2282 static irqreturn_t gfar_transmit(int irq, void *grp_id)
2283 {
2284 struct gfar_priv_grp *grp = (struct gfar_priv_grp *)grp_id;
2285 unsigned long flags;
2286 u32 imask;
2287
2288 if (likely(napi_schedule_prep(&grp->napi_tx))) {
2289 spin_lock_irqsave(&grp->grplock, flags);
2290 imask = gfar_read(&grp->regs->imask);
2291 imask &= IMASK_TX_DISABLED | grp->priv->rmon_overflow.imask;
2292 gfar_write(&grp->regs->imask, imask);
2293 spin_unlock_irqrestore(&grp->grplock, flags);
2294 __napi_schedule(&grp->napi_tx);
2295 } else {
2296 /* Clear IEVENT, so interrupts aren't called again
2297 * because of the packets that have already arrived.
2298 */
2299 gfar_write(&grp->regs->ievent, IEVENT_TX_MASK);
2300 }
2301
2302 return IRQ_HANDLED;
2303 }
2304
gfar_add_rx_frag(struct gfar_rx_buff * rxb,u32 lstatus,struct sk_buff * skb,bool first)2305 static bool gfar_add_rx_frag(struct gfar_rx_buff *rxb, u32 lstatus,
2306 struct sk_buff *skb, bool first)
2307 {
2308 int size = lstatus & BD_LENGTH_MASK;
2309 struct page *page = rxb->page;
2310
2311 if (likely(first)) {
2312 skb_put(skb, size);
2313 } else {
2314 /* the last fragments' length contains the full frame length */
2315 if (lstatus & BD_LFLAG(RXBD_LAST))
2316 size -= skb->len;
2317
2318 WARN(size < 0, "gianfar: rx fragment size underflow");
2319 if (size < 0)
2320 return false;
2321
2322 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
2323 rxb->page_offset + RXBUF_ALIGNMENT,
2324 size, GFAR_RXB_TRUESIZE);
2325 }
2326
2327 /* try reuse page */
2328 if (unlikely(page_count(page) != 1 || page_is_pfmemalloc(page)))
2329 return false;
2330
2331 /* change offset to the other half */
2332 rxb->page_offset ^= GFAR_RXB_TRUESIZE;
2333
2334 page_ref_inc(page);
2335
2336 return true;
2337 }
2338
gfar_reuse_rx_page(struct gfar_priv_rx_q * rxq,struct gfar_rx_buff * old_rxb)2339 static void gfar_reuse_rx_page(struct gfar_priv_rx_q *rxq,
2340 struct gfar_rx_buff *old_rxb)
2341 {
2342 struct gfar_rx_buff *new_rxb;
2343 u16 nta = rxq->next_to_alloc;
2344
2345 new_rxb = &rxq->rx_buff[nta];
2346
2347 /* find next buf that can reuse a page */
2348 nta++;
2349 rxq->next_to_alloc = (nta < rxq->rx_ring_size) ? nta : 0;
2350
2351 /* copy page reference */
2352 *new_rxb = *old_rxb;
2353
2354 /* sync for use by the device */
2355 dma_sync_single_range_for_device(rxq->dev, old_rxb->dma,
2356 old_rxb->page_offset,
2357 GFAR_RXB_TRUESIZE, DMA_FROM_DEVICE);
2358 }
2359
gfar_get_next_rxbuff(struct gfar_priv_rx_q * rx_queue,u32 lstatus,struct sk_buff * skb)2360 static struct sk_buff *gfar_get_next_rxbuff(struct gfar_priv_rx_q *rx_queue,
2361 u32 lstatus, struct sk_buff *skb)
2362 {
2363 struct gfar_rx_buff *rxb = &rx_queue->rx_buff[rx_queue->next_to_clean];
2364 struct page *page = rxb->page;
2365 bool first = false;
2366
2367 if (likely(!skb)) {
2368 void *buff_addr = page_address(page) + rxb->page_offset;
2369
2370 skb = build_skb(buff_addr, GFAR_SKBFRAG_SIZE);
2371 if (unlikely(!skb)) {
2372 gfar_rx_alloc_err(rx_queue);
2373 return NULL;
2374 }
2375 skb_reserve(skb, RXBUF_ALIGNMENT);
2376 first = true;
2377 }
2378
2379 dma_sync_single_range_for_cpu(rx_queue->dev, rxb->dma, rxb->page_offset,
2380 GFAR_RXB_TRUESIZE, DMA_FROM_DEVICE);
2381
2382 if (gfar_add_rx_frag(rxb, lstatus, skb, first)) {
2383 /* reuse the free half of the page */
2384 gfar_reuse_rx_page(rx_queue, rxb);
2385 } else {
2386 /* page cannot be reused, unmap it */
2387 dma_unmap_page(rx_queue->dev, rxb->dma,
2388 PAGE_SIZE, DMA_FROM_DEVICE);
2389 }
2390
2391 /* clear rxb content */
2392 rxb->page = NULL;
2393
2394 return skb;
2395 }
2396
gfar_rx_checksum(struct sk_buff * skb,struct rxfcb * fcb)2397 static inline void gfar_rx_checksum(struct sk_buff *skb, struct rxfcb *fcb)
2398 {
2399 /* If valid headers were found, and valid sums
2400 * were verified, then we tell the kernel that no
2401 * checksumming is necessary. Otherwise, it is [FIXME]
2402 */
2403 if ((be16_to_cpu(fcb->flags) & RXFCB_CSUM_MASK) ==
2404 (RXFCB_CIP | RXFCB_CTU))
2405 skb->ip_summed = CHECKSUM_UNNECESSARY;
2406 else
2407 skb_checksum_none_assert(skb);
2408 }
2409
2410 /* gfar_process_frame() -- handle one incoming packet if skb isn't NULL. */
gfar_process_frame(struct net_device * ndev,struct sk_buff * skb)2411 static void gfar_process_frame(struct net_device *ndev, struct sk_buff *skb)
2412 {
2413 struct gfar_private *priv = netdev_priv(ndev);
2414 struct rxfcb *fcb = NULL;
2415
2416 /* fcb is at the beginning if exists */
2417 fcb = (struct rxfcb *)skb->data;
2418
2419 /* Remove the FCB from the skb
2420 * Remove the padded bytes, if there are any
2421 */
2422 if (priv->uses_rxfcb)
2423 skb_pull(skb, GMAC_FCB_LEN);
2424
2425 /* Get receive timestamp from the skb */
2426 if (priv->hwts_rx_en) {
2427 struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb);
2428 __be64 *ns = (__be64 *)skb->data;
2429
2430 memset(shhwtstamps, 0, sizeof(*shhwtstamps));
2431 shhwtstamps->hwtstamp = ns_to_ktime(be64_to_cpu(*ns));
2432 }
2433
2434 if (priv->padding)
2435 skb_pull(skb, priv->padding);
2436
2437 /* Trim off the FCS */
2438 pskb_trim(skb, skb->len - ETH_FCS_LEN);
2439
2440 if (ndev->features & NETIF_F_RXCSUM)
2441 gfar_rx_checksum(skb, fcb);
2442
2443 /* There's need to check for NETIF_F_HW_VLAN_CTAG_RX here.
2444 * Even if vlan rx accel is disabled, on some chips
2445 * RXFCB_VLN is pseudo randomly set.
2446 */
2447 if (ndev->features & NETIF_F_HW_VLAN_CTAG_RX &&
2448 be16_to_cpu(fcb->flags) & RXFCB_VLN)
2449 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
2450 be16_to_cpu(fcb->vlctl));
2451 }
2452
2453 /* gfar_clean_rx_ring() -- Processes each frame in the rx ring
2454 * until the budget/quota has been reached. Returns the number
2455 * of frames handled
2456 */
gfar_clean_rx_ring(struct gfar_priv_rx_q * rx_queue,int rx_work_limit)2457 static int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue,
2458 int rx_work_limit)
2459 {
2460 struct net_device *ndev = rx_queue->ndev;
2461 struct gfar_private *priv = netdev_priv(ndev);
2462 struct rxbd8 *bdp;
2463 int i, howmany = 0;
2464 struct sk_buff *skb = rx_queue->skb;
2465 int cleaned_cnt = gfar_rxbd_unused(rx_queue);
2466 unsigned int total_bytes = 0, total_pkts = 0;
2467
2468 /* Get the first full descriptor */
2469 i = rx_queue->next_to_clean;
2470
2471 while (rx_work_limit--) {
2472 u32 lstatus;
2473
2474 if (cleaned_cnt >= GFAR_RX_BUFF_ALLOC) {
2475 gfar_alloc_rx_buffs(rx_queue, cleaned_cnt);
2476 cleaned_cnt = 0;
2477 }
2478
2479 bdp = &rx_queue->rx_bd_base[i];
2480 lstatus = be32_to_cpu(bdp->lstatus);
2481 if (lstatus & BD_LFLAG(RXBD_EMPTY))
2482 break;
2483
2484 /* lost RXBD_LAST descriptor due to overrun */
2485 if (skb &&
2486 (lstatus & BD_LFLAG(RXBD_FIRST))) {
2487 /* discard faulty buffer */
2488 dev_kfree_skb(skb);
2489 skb = NULL;
2490 rx_queue->stats.rx_dropped++;
2491
2492 /* can continue normally */
2493 }
2494
2495 /* order rx buffer descriptor reads */
2496 rmb();
2497
2498 /* fetch next to clean buffer from the ring */
2499 skb = gfar_get_next_rxbuff(rx_queue, lstatus, skb);
2500 if (unlikely(!skb))
2501 break;
2502
2503 cleaned_cnt++;
2504 howmany++;
2505
2506 if (unlikely(++i == rx_queue->rx_ring_size))
2507 i = 0;
2508
2509 rx_queue->next_to_clean = i;
2510
2511 /* fetch next buffer if not the last in frame */
2512 if (!(lstatus & BD_LFLAG(RXBD_LAST)))
2513 continue;
2514
2515 if (unlikely(lstatus & BD_LFLAG(RXBD_ERR))) {
2516 count_errors(lstatus, ndev);
2517
2518 /* discard faulty buffer */
2519 dev_kfree_skb(skb);
2520 skb = NULL;
2521 rx_queue->stats.rx_dropped++;
2522 continue;
2523 }
2524
2525 gfar_process_frame(ndev, skb);
2526
2527 /* Increment the number of packets */
2528 total_pkts++;
2529 total_bytes += skb->len;
2530
2531 skb_record_rx_queue(skb, rx_queue->qindex);
2532
2533 skb->protocol = eth_type_trans(skb, ndev);
2534
2535 /* Send the packet up the stack */
2536 napi_gro_receive(&rx_queue->grp->napi_rx, skb);
2537
2538 skb = NULL;
2539 }
2540
2541 /* Store incomplete frames for completion */
2542 rx_queue->skb = skb;
2543
2544 rx_queue->stats.rx_packets += total_pkts;
2545 rx_queue->stats.rx_bytes += total_bytes;
2546
2547 if (cleaned_cnt)
2548 gfar_alloc_rx_buffs(rx_queue, cleaned_cnt);
2549
2550 /* Update Last Free RxBD pointer for LFC */
2551 if (unlikely(priv->tx_actual_en)) {
2552 u32 bdp_dma = gfar_rxbd_dma_lastfree(rx_queue);
2553
2554 gfar_write(rx_queue->rfbptr, bdp_dma);
2555 }
2556
2557 return howmany;
2558 }
2559
gfar_poll_rx_sq(struct napi_struct * napi,int budget)2560 static int gfar_poll_rx_sq(struct napi_struct *napi, int budget)
2561 {
2562 struct gfar_priv_grp *gfargrp =
2563 container_of(napi, struct gfar_priv_grp, napi_rx);
2564 struct gfar __iomem *regs = gfargrp->regs;
2565 struct gfar_priv_rx_q *rx_queue = gfargrp->rx_queue;
2566 int work_done = 0;
2567
2568 /* Clear IEVENT, so interrupts aren't called again
2569 * because of the packets that have already arrived
2570 */
2571 gfar_write(®s->ievent, IEVENT_RX_MASK);
2572
2573 work_done = gfar_clean_rx_ring(rx_queue, budget);
2574
2575 if (work_done < budget) {
2576 u32 imask;
2577 napi_complete_done(napi, work_done);
2578 /* Clear the halt bit in RSTAT */
2579 gfar_write(®s->rstat, gfargrp->rstat);
2580
2581 spin_lock_irq(&gfargrp->grplock);
2582 imask = gfar_read(®s->imask);
2583 imask |= IMASK_RX_DEFAULT;
2584 gfar_write(®s->imask, imask);
2585 spin_unlock_irq(&gfargrp->grplock);
2586 }
2587
2588 return work_done;
2589 }
2590
gfar_poll_tx_sq(struct napi_struct * napi,int budget)2591 static int gfar_poll_tx_sq(struct napi_struct *napi, int budget)
2592 {
2593 struct gfar_priv_grp *gfargrp =
2594 container_of(napi, struct gfar_priv_grp, napi_tx);
2595 struct gfar __iomem *regs = gfargrp->regs;
2596 struct gfar_priv_tx_q *tx_queue = gfargrp->tx_queue;
2597 u32 imask;
2598
2599 /* Clear IEVENT, so interrupts aren't called again
2600 * because of the packets that have already arrived
2601 */
2602 gfar_write(®s->ievent, IEVENT_TX_MASK);
2603
2604 /* run Tx cleanup to completion */
2605 if (tx_queue->tx_skbuff[tx_queue->skb_dirtytx])
2606 gfar_clean_tx_ring(tx_queue);
2607
2608 napi_complete(napi);
2609
2610 spin_lock_irq(&gfargrp->grplock);
2611 imask = gfar_read(®s->imask);
2612 imask |= IMASK_TX_DEFAULT;
2613 gfar_write(®s->imask, imask);
2614 spin_unlock_irq(&gfargrp->grplock);
2615
2616 return 0;
2617 }
2618
2619 /* GFAR error interrupt handler */
gfar_error(int irq,void * grp_id)2620 static irqreturn_t gfar_error(int irq, void *grp_id)
2621 {
2622 struct gfar_priv_grp *gfargrp = grp_id;
2623 struct gfar __iomem *regs = gfargrp->regs;
2624 struct gfar_private *priv= gfargrp->priv;
2625 struct net_device *dev = priv->ndev;
2626
2627 /* Save ievent for future reference */
2628 u32 events = gfar_read(®s->ievent);
2629
2630 /* Clear IEVENT */
2631 gfar_write(®s->ievent, events & IEVENT_ERR_MASK);
2632
2633 /* Magic Packet is not an error. */
2634 if ((priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET) &&
2635 (events & IEVENT_MAG))
2636 events &= ~IEVENT_MAG;
2637
2638 /* Hmm... */
2639 if (netif_msg_rx_err(priv) || netif_msg_tx_err(priv))
2640 netdev_dbg(dev,
2641 "error interrupt (ievent=0x%08x imask=0x%08x)\n",
2642 events, gfar_read(®s->imask));
2643
2644 /* Update the error counters */
2645 if (events & IEVENT_TXE) {
2646 dev->stats.tx_errors++;
2647
2648 if (events & IEVENT_LC)
2649 dev->stats.tx_window_errors++;
2650 if (events & IEVENT_CRL)
2651 dev->stats.tx_aborted_errors++;
2652 if (events & IEVENT_XFUN) {
2653 netif_dbg(priv, tx_err, dev,
2654 "TX FIFO underrun, packet dropped\n");
2655 dev->stats.tx_dropped++;
2656 atomic64_inc(&priv->extra_stats.tx_underrun);
2657
2658 schedule_work(&priv->reset_task);
2659 }
2660 netif_dbg(priv, tx_err, dev, "Transmit Error\n");
2661 }
2662 if (events & IEVENT_MSRO) {
2663 struct rmon_mib __iomem *rmon = ®s->rmon;
2664 u32 car;
2665
2666 spin_lock(&priv->rmon_overflow.lock);
2667 car = gfar_read(&rmon->car1) & CAR1_C1RDR;
2668 if (car) {
2669 priv->rmon_overflow.rdrp++;
2670 gfar_write(&rmon->car1, car);
2671 }
2672 spin_unlock(&priv->rmon_overflow.lock);
2673 }
2674 if (events & IEVENT_BSY) {
2675 dev->stats.rx_over_errors++;
2676 atomic64_inc(&priv->extra_stats.rx_bsy);
2677
2678 netif_dbg(priv, rx_err, dev, "busy error (rstat: %x)\n",
2679 gfar_read(®s->rstat));
2680 }
2681 if (events & IEVENT_BABR) {
2682 dev->stats.rx_errors++;
2683 atomic64_inc(&priv->extra_stats.rx_babr);
2684
2685 netif_dbg(priv, rx_err, dev, "babbling RX error\n");
2686 }
2687 if (events & IEVENT_EBERR) {
2688 atomic64_inc(&priv->extra_stats.eberr);
2689 netif_dbg(priv, rx_err, dev, "bus error\n");
2690 }
2691 if (events & IEVENT_RXC)
2692 netif_dbg(priv, rx_status, dev, "control frame\n");
2693
2694 if (events & IEVENT_BABT) {
2695 atomic64_inc(&priv->extra_stats.tx_babt);
2696 netif_dbg(priv, tx_err, dev, "babbling TX error\n");
2697 }
2698 return IRQ_HANDLED;
2699 }
2700
2701 /* The interrupt handler for devices with one interrupt */
gfar_interrupt(int irq,void * grp_id)2702 static irqreturn_t gfar_interrupt(int irq, void *grp_id)
2703 {
2704 struct gfar_priv_grp *gfargrp = grp_id;
2705
2706 /* Save ievent for future reference */
2707 u32 events = gfar_read(&gfargrp->regs->ievent);
2708
2709 /* Check for reception */
2710 if (events & IEVENT_RX_MASK)
2711 gfar_receive(irq, grp_id);
2712
2713 /* Check for transmit completion */
2714 if (events & IEVENT_TX_MASK)
2715 gfar_transmit(irq, grp_id);
2716
2717 /* Check for errors */
2718 if (events & IEVENT_ERR_MASK)
2719 gfar_error(irq, grp_id);
2720
2721 return IRQ_HANDLED;
2722 }
2723
2724 #ifdef CONFIG_NET_POLL_CONTROLLER
2725 /* Polling 'interrupt' - used by things like netconsole to send skbs
2726 * without having to re-enable interrupts. It's not called while
2727 * the interrupt routine is executing.
2728 */
gfar_netpoll(struct net_device * dev)2729 static void gfar_netpoll(struct net_device *dev)
2730 {
2731 struct gfar_private *priv = netdev_priv(dev);
2732 int i;
2733
2734 /* If the device has multiple interrupts, run tx/rx */
2735 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
2736 for (i = 0; i < priv->num_grps; i++) {
2737 struct gfar_priv_grp *grp = &priv->gfargrp[i];
2738
2739 disable_irq(gfar_irq(grp, TX)->irq);
2740 disable_irq(gfar_irq(grp, RX)->irq);
2741 disable_irq(gfar_irq(grp, ER)->irq);
2742 gfar_interrupt(gfar_irq(grp, TX)->irq, grp);
2743 enable_irq(gfar_irq(grp, ER)->irq);
2744 enable_irq(gfar_irq(grp, RX)->irq);
2745 enable_irq(gfar_irq(grp, TX)->irq);
2746 }
2747 } else {
2748 for (i = 0; i < priv->num_grps; i++) {
2749 struct gfar_priv_grp *grp = &priv->gfargrp[i];
2750
2751 disable_irq(gfar_irq(grp, TX)->irq);
2752 gfar_interrupt(gfar_irq(grp, TX)->irq, grp);
2753 enable_irq(gfar_irq(grp, TX)->irq);
2754 }
2755 }
2756 }
2757 #endif
2758
free_grp_irqs(struct gfar_priv_grp * grp)2759 static void free_grp_irqs(struct gfar_priv_grp *grp)
2760 {
2761 free_irq(gfar_irq(grp, TX)->irq, grp);
2762 free_irq(gfar_irq(grp, RX)->irq, grp);
2763 free_irq(gfar_irq(grp, ER)->irq, grp);
2764 }
2765
register_grp_irqs(struct gfar_priv_grp * grp)2766 static int register_grp_irqs(struct gfar_priv_grp *grp)
2767 {
2768 struct gfar_private *priv = grp->priv;
2769 struct net_device *dev = priv->ndev;
2770 int err;
2771
2772 /* If the device has multiple interrupts, register for
2773 * them. Otherwise, only register for the one
2774 */
2775 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
2776 /* Install our interrupt handlers for Error,
2777 * Transmit, and Receive
2778 */
2779 err = request_irq(gfar_irq(grp, ER)->irq, gfar_error, 0,
2780 gfar_irq(grp, ER)->name, grp);
2781 if (err < 0) {
2782 netif_err(priv, intr, dev, "Can't get IRQ %d\n",
2783 gfar_irq(grp, ER)->irq);
2784
2785 goto err_irq_fail;
2786 }
2787 enable_irq_wake(gfar_irq(grp, ER)->irq);
2788
2789 err = request_irq(gfar_irq(grp, TX)->irq, gfar_transmit, 0,
2790 gfar_irq(grp, TX)->name, grp);
2791 if (err < 0) {
2792 netif_err(priv, intr, dev, "Can't get IRQ %d\n",
2793 gfar_irq(grp, TX)->irq);
2794 goto tx_irq_fail;
2795 }
2796 err = request_irq(gfar_irq(grp, RX)->irq, gfar_receive, 0,
2797 gfar_irq(grp, RX)->name, grp);
2798 if (err < 0) {
2799 netif_err(priv, intr, dev, "Can't get IRQ %d\n",
2800 gfar_irq(grp, RX)->irq);
2801 goto rx_irq_fail;
2802 }
2803 enable_irq_wake(gfar_irq(grp, RX)->irq);
2804
2805 } else {
2806 err = request_irq(gfar_irq(grp, TX)->irq, gfar_interrupt, 0,
2807 gfar_irq(grp, TX)->name, grp);
2808 if (err < 0) {
2809 netif_err(priv, intr, dev, "Can't get IRQ %d\n",
2810 gfar_irq(grp, TX)->irq);
2811 goto err_irq_fail;
2812 }
2813 enable_irq_wake(gfar_irq(grp, TX)->irq);
2814 }
2815
2816 return 0;
2817
2818 rx_irq_fail:
2819 free_irq(gfar_irq(grp, TX)->irq, grp);
2820 tx_irq_fail:
2821 free_irq(gfar_irq(grp, ER)->irq, grp);
2822 err_irq_fail:
2823 return err;
2824
2825 }
2826
gfar_free_irq(struct gfar_private * priv)2827 static void gfar_free_irq(struct gfar_private *priv)
2828 {
2829 int i;
2830
2831 /* Free the IRQs */
2832 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
2833 for (i = 0; i < priv->num_grps; i++)
2834 free_grp_irqs(&priv->gfargrp[i]);
2835 } else {
2836 for (i = 0; i < priv->num_grps; i++)
2837 free_irq(gfar_irq(&priv->gfargrp[i], TX)->irq,
2838 &priv->gfargrp[i]);
2839 }
2840 }
2841
gfar_request_irq(struct gfar_private * priv)2842 static int gfar_request_irq(struct gfar_private *priv)
2843 {
2844 int err, i, j;
2845
2846 for (i = 0; i < priv->num_grps; i++) {
2847 err = register_grp_irqs(&priv->gfargrp[i]);
2848 if (err) {
2849 for (j = 0; j < i; j++)
2850 free_grp_irqs(&priv->gfargrp[j]);
2851 return err;
2852 }
2853 }
2854
2855 return 0;
2856 }
2857
2858 /* Called when something needs to use the ethernet device
2859 * Returns 0 for success.
2860 */
gfar_enet_open(struct net_device * dev)2861 static int gfar_enet_open(struct net_device *dev)
2862 {
2863 struct gfar_private *priv = netdev_priv(dev);
2864 int err;
2865
2866 err = init_phy(dev);
2867 if (err)
2868 return err;
2869
2870 err = gfar_request_irq(priv);
2871 if (err)
2872 return err;
2873
2874 err = startup_gfar(dev);
2875 if (err)
2876 return err;
2877
2878 return err;
2879 }
2880
2881 /* Stops the kernel queue, and halts the controller */
gfar_close(struct net_device * dev)2882 static int gfar_close(struct net_device *dev)
2883 {
2884 struct gfar_private *priv = netdev_priv(dev);
2885
2886 cancel_work_sync(&priv->reset_task);
2887 stop_gfar(dev);
2888
2889 /* Disconnect from the PHY */
2890 phy_disconnect(dev->phydev);
2891
2892 gfar_free_irq(priv);
2893
2894 return 0;
2895 }
2896
2897 /* Clears each of the exact match registers to zero, so they
2898 * don't interfere with normal reception
2899 */
gfar_clear_exact_match(struct net_device * dev)2900 static void gfar_clear_exact_match(struct net_device *dev)
2901 {
2902 int idx;
2903 static const u8 zero_arr[ETH_ALEN] = {0, 0, 0, 0, 0, 0};
2904
2905 for (idx = 1; idx < GFAR_EM_NUM + 1; idx++)
2906 gfar_set_mac_for_addr(dev, idx, zero_arr);
2907 }
2908
2909 /* Update the hash table based on the current list of multicast
2910 * addresses we subscribe to. Also, change the promiscuity of
2911 * the device based on the flags (this function is called
2912 * whenever dev->flags is changed
2913 */
gfar_set_multi(struct net_device * dev)2914 static void gfar_set_multi(struct net_device *dev)
2915 {
2916 struct netdev_hw_addr *ha;
2917 struct gfar_private *priv = netdev_priv(dev);
2918 struct gfar __iomem *regs = priv->gfargrp[0].regs;
2919 u32 tempval;
2920
2921 if (dev->flags & IFF_PROMISC) {
2922 /* Set RCTRL to PROM */
2923 tempval = gfar_read(®s->rctrl);
2924 tempval |= RCTRL_PROM;
2925 gfar_write(®s->rctrl, tempval);
2926 } else {
2927 /* Set RCTRL to not PROM */
2928 tempval = gfar_read(®s->rctrl);
2929 tempval &= ~(RCTRL_PROM);
2930 gfar_write(®s->rctrl, tempval);
2931 }
2932
2933 if (dev->flags & IFF_ALLMULTI) {
2934 /* Set the hash to rx all multicast frames */
2935 gfar_write(®s->igaddr0, 0xffffffff);
2936 gfar_write(®s->igaddr1, 0xffffffff);
2937 gfar_write(®s->igaddr2, 0xffffffff);
2938 gfar_write(®s->igaddr3, 0xffffffff);
2939 gfar_write(®s->igaddr4, 0xffffffff);
2940 gfar_write(®s->igaddr5, 0xffffffff);
2941 gfar_write(®s->igaddr6, 0xffffffff);
2942 gfar_write(®s->igaddr7, 0xffffffff);
2943 gfar_write(®s->gaddr0, 0xffffffff);
2944 gfar_write(®s->gaddr1, 0xffffffff);
2945 gfar_write(®s->gaddr2, 0xffffffff);
2946 gfar_write(®s->gaddr3, 0xffffffff);
2947 gfar_write(®s->gaddr4, 0xffffffff);
2948 gfar_write(®s->gaddr5, 0xffffffff);
2949 gfar_write(®s->gaddr6, 0xffffffff);
2950 gfar_write(®s->gaddr7, 0xffffffff);
2951 } else {
2952 int em_num;
2953 int idx;
2954
2955 /* zero out the hash */
2956 gfar_write(®s->igaddr0, 0x0);
2957 gfar_write(®s->igaddr1, 0x0);
2958 gfar_write(®s->igaddr2, 0x0);
2959 gfar_write(®s->igaddr3, 0x0);
2960 gfar_write(®s->igaddr4, 0x0);
2961 gfar_write(®s->igaddr5, 0x0);
2962 gfar_write(®s->igaddr6, 0x0);
2963 gfar_write(®s->igaddr7, 0x0);
2964 gfar_write(®s->gaddr0, 0x0);
2965 gfar_write(®s->gaddr1, 0x0);
2966 gfar_write(®s->gaddr2, 0x0);
2967 gfar_write(®s->gaddr3, 0x0);
2968 gfar_write(®s->gaddr4, 0x0);
2969 gfar_write(®s->gaddr5, 0x0);
2970 gfar_write(®s->gaddr6, 0x0);
2971 gfar_write(®s->gaddr7, 0x0);
2972
2973 /* If we have extended hash tables, we need to
2974 * clear the exact match registers to prepare for
2975 * setting them
2976 */
2977 if (priv->extended_hash) {
2978 em_num = GFAR_EM_NUM + 1;
2979 gfar_clear_exact_match(dev);
2980 idx = 1;
2981 } else {
2982 idx = 0;
2983 em_num = 0;
2984 }
2985
2986 if (netdev_mc_empty(dev))
2987 return;
2988
2989 /* Parse the list, and set the appropriate bits */
2990 netdev_for_each_mc_addr(ha, dev) {
2991 if (idx < em_num) {
2992 gfar_set_mac_for_addr(dev, idx, ha->addr);
2993 idx++;
2994 } else
2995 gfar_set_hash_for_addr(dev, ha->addr);
2996 }
2997 }
2998 }
2999
gfar_mac_reset(struct gfar_private * priv)3000 void gfar_mac_reset(struct gfar_private *priv)
3001 {
3002 struct gfar __iomem *regs = priv->gfargrp[0].regs;
3003 u32 tempval;
3004
3005 /* Reset MAC layer */
3006 gfar_write(®s->maccfg1, MACCFG1_SOFT_RESET);
3007
3008 /* We need to delay at least 3 TX clocks */
3009 udelay(3);
3010
3011 /* the soft reset bit is not self-resetting, so we need to
3012 * clear it before resuming normal operation
3013 */
3014 gfar_write(®s->maccfg1, 0);
3015
3016 udelay(3);
3017
3018 gfar_rx_offload_en(priv);
3019
3020 /* Initialize the max receive frame/buffer lengths */
3021 gfar_write(®s->maxfrm, GFAR_JUMBO_FRAME_SIZE);
3022 gfar_write(®s->mrblr, GFAR_RXB_SIZE);
3023
3024 /* Initialize the Minimum Frame Length Register */
3025 gfar_write(®s->minflr, MINFLR_INIT_SETTINGS);
3026
3027 /* Initialize MACCFG2. */
3028 tempval = MACCFG2_INIT_SETTINGS;
3029
3030 /* eTSEC74 erratum: Rx frames of length MAXFRM or MAXFRM-1
3031 * are marked as truncated. Avoid this by MACCFG2[Huge Frame]=1,
3032 * and by checking RxBD[LG] and discarding larger than MAXFRM.
3033 */
3034 if (gfar_has_errata(priv, GFAR_ERRATA_74))
3035 tempval |= MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK;
3036
3037 gfar_write(®s->maccfg2, tempval);
3038
3039 /* Clear mac addr hash registers */
3040 gfar_write(®s->igaddr0, 0);
3041 gfar_write(®s->igaddr1, 0);
3042 gfar_write(®s->igaddr2, 0);
3043 gfar_write(®s->igaddr3, 0);
3044 gfar_write(®s->igaddr4, 0);
3045 gfar_write(®s->igaddr5, 0);
3046 gfar_write(®s->igaddr6, 0);
3047 gfar_write(®s->igaddr7, 0);
3048
3049 gfar_write(®s->gaddr0, 0);
3050 gfar_write(®s->gaddr1, 0);
3051 gfar_write(®s->gaddr2, 0);
3052 gfar_write(®s->gaddr3, 0);
3053 gfar_write(®s->gaddr4, 0);
3054 gfar_write(®s->gaddr5, 0);
3055 gfar_write(®s->gaddr6, 0);
3056 gfar_write(®s->gaddr7, 0);
3057
3058 if (priv->extended_hash)
3059 gfar_clear_exact_match(priv->ndev);
3060
3061 gfar_mac_rx_config(priv);
3062
3063 gfar_mac_tx_config(priv);
3064
3065 gfar_set_mac_address(priv->ndev);
3066
3067 gfar_set_multi(priv->ndev);
3068
3069 /* clear ievent and imask before configuring coalescing */
3070 gfar_ints_disable(priv);
3071
3072 /* Configure the coalescing support */
3073 gfar_configure_coalescing_all(priv);
3074 }
3075
gfar_hw_init(struct gfar_private * priv)3076 static void gfar_hw_init(struct gfar_private *priv)
3077 {
3078 struct gfar __iomem *regs = priv->gfargrp[0].regs;
3079 u32 attrs;
3080
3081 /* Stop the DMA engine now, in case it was running before
3082 * (The firmware could have used it, and left it running).
3083 */
3084 gfar_halt(priv);
3085
3086 gfar_mac_reset(priv);
3087
3088 /* Zero out the rmon mib registers if it has them */
3089 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_RMON) {
3090 memset_io(®s->rmon, 0, offsetof(struct rmon_mib, car1));
3091
3092 /* Mask off the CAM interrupts */
3093 gfar_write(®s->rmon.cam1, 0xffffffff);
3094 gfar_write(®s->rmon.cam2, 0xffffffff);
3095 /* Clear the CAR registers (w1c style) */
3096 gfar_write(®s->rmon.car1, 0xffffffff);
3097 gfar_write(®s->rmon.car2, 0xffffffff);
3098 }
3099
3100 /* Initialize ECNTRL */
3101 gfar_write(®s->ecntrl, ECNTRL_INIT_SETTINGS);
3102
3103 /* Set the extraction length and index */
3104 attrs = ATTRELI_EL(priv->rx_stash_size) |
3105 ATTRELI_EI(priv->rx_stash_index);
3106
3107 gfar_write(®s->attreli, attrs);
3108
3109 /* Start with defaults, and add stashing
3110 * depending on driver parameters
3111 */
3112 attrs = ATTR_INIT_SETTINGS;
3113
3114 if (priv->bd_stash_en)
3115 attrs |= ATTR_BDSTASH;
3116
3117 if (priv->rx_stash_size != 0)
3118 attrs |= ATTR_BUFSTASH;
3119
3120 gfar_write(®s->attr, attrs);
3121
3122 /* FIFO configs */
3123 gfar_write(®s->fifo_tx_thr, DEFAULT_FIFO_TX_THR);
3124 gfar_write(®s->fifo_tx_starve, DEFAULT_FIFO_TX_STARVE);
3125 gfar_write(®s->fifo_tx_starve_shutoff, DEFAULT_FIFO_TX_STARVE_OFF);
3126
3127 /* Program the interrupt steering regs, only for MG devices */
3128 if (priv->num_grps > 1)
3129 gfar_write_isrg(priv);
3130 }
3131
3132 static const struct net_device_ops gfar_netdev_ops = {
3133 .ndo_open = gfar_enet_open,
3134 .ndo_start_xmit = gfar_start_xmit,
3135 .ndo_stop = gfar_close,
3136 .ndo_change_mtu = gfar_change_mtu,
3137 .ndo_set_features = gfar_set_features,
3138 .ndo_set_rx_mode = gfar_set_multi,
3139 .ndo_tx_timeout = gfar_timeout,
3140 .ndo_eth_ioctl = phy_do_ioctl_running,
3141 .ndo_get_stats64 = gfar_get_stats64,
3142 .ndo_change_carrier = fixed_phy_change_carrier,
3143 .ndo_set_mac_address = gfar_set_mac_addr,
3144 .ndo_validate_addr = eth_validate_addr,
3145 #ifdef CONFIG_NET_POLL_CONTROLLER
3146 .ndo_poll_controller = gfar_netpoll,
3147 #endif
3148 .ndo_hwtstamp_get = gfar_hwtstamp_get,
3149 .ndo_hwtstamp_set = gfar_hwtstamp_set,
3150 };
3151
3152 /* Set up the ethernet device structure, private data,
3153 * and anything else we need before we start
3154 */
gfar_probe(struct platform_device * ofdev)3155 static int gfar_probe(struct platform_device *ofdev)
3156 {
3157 struct device_node *np = ofdev->dev.of_node;
3158 struct net_device *dev = NULL;
3159 struct gfar_private *priv = NULL;
3160 int err = 0, i;
3161
3162 err = gfar_of_init(ofdev, &dev);
3163
3164 if (err)
3165 return err;
3166
3167 priv = netdev_priv(dev);
3168 priv->ndev = dev;
3169 priv->ofdev = ofdev;
3170 priv->dev = &ofdev->dev;
3171 SET_NETDEV_DEV(dev, &ofdev->dev);
3172
3173 INIT_WORK(&priv->reset_task, gfar_reset_task);
3174
3175 platform_set_drvdata(ofdev, priv);
3176
3177 gfar_detect_errata(priv);
3178
3179 /* Set the dev->base_addr to the gfar reg region */
3180 dev->base_addr = (unsigned long) priv->gfargrp[0].regs;
3181
3182 /* Fill in the dev structure */
3183 dev->watchdog_timeo = TX_TIMEOUT;
3184 /* MTU range: 50 - 9586 */
3185 dev->mtu = 1500;
3186 dev->min_mtu = 50;
3187 dev->max_mtu = GFAR_JUMBO_FRAME_SIZE - ETH_HLEN;
3188 dev->netdev_ops = &gfar_netdev_ops;
3189 dev->ethtool_ops = &gfar_ethtool_ops;
3190
3191 /* Register for napi ...We are registering NAPI for each grp */
3192 for (i = 0; i < priv->num_grps; i++) {
3193 netif_napi_add(dev, &priv->gfargrp[i].napi_rx,
3194 gfar_poll_rx_sq);
3195 netif_napi_add_tx_weight(dev, &priv->gfargrp[i].napi_tx,
3196 gfar_poll_tx_sq, 2);
3197 }
3198
3199 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_CSUM) {
3200 dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG |
3201 NETIF_F_RXCSUM;
3202 dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG |
3203 NETIF_F_RXCSUM | NETIF_F_HIGHDMA;
3204 }
3205
3206 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_VLAN) {
3207 dev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX |
3208 NETIF_F_HW_VLAN_CTAG_RX;
3209 dev->features |= NETIF_F_HW_VLAN_CTAG_RX;
3210 }
3211
3212 dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
3213
3214 gfar_init_addr_hash_table(priv);
3215
3216 /* Insert receive time stamps into padding alignment bytes, and
3217 * plus 2 bytes padding to ensure the cpu alignment.
3218 */
3219 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER)
3220 priv->padding = 8 + DEFAULT_PADDING;
3221
3222 if (dev->features & NETIF_F_IP_CSUM ||
3223 priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER)
3224 dev->needed_headroom = GMAC_FCB_LEN + GMAC_TXPAL_LEN;
3225
3226 /* Initializing some of the rx/tx queue level parameters */
3227 for (i = 0; i < priv->num_tx_queues; i++) {
3228 priv->tx_queue[i]->tx_ring_size = DEFAULT_TX_RING_SIZE;
3229 priv->tx_queue[i]->num_txbdfree = DEFAULT_TX_RING_SIZE;
3230 priv->tx_queue[i]->txcoalescing = DEFAULT_TX_COALESCE;
3231 priv->tx_queue[i]->txic = DEFAULT_TXIC;
3232 }
3233
3234 for (i = 0; i < priv->num_rx_queues; i++) {
3235 priv->rx_queue[i]->rx_ring_size = DEFAULT_RX_RING_SIZE;
3236 priv->rx_queue[i]->rxcoalescing = DEFAULT_RX_COALESCE;
3237 priv->rx_queue[i]->rxic = DEFAULT_RXIC;
3238 }
3239
3240 /* Always enable rx filer if available */
3241 priv->rx_filer_enable =
3242 (priv->device_flags & FSL_GIANFAR_DEV_HAS_RX_FILER) ? 1 : 0;
3243 /* Enable most messages by default */
3244 priv->msg_enable = (NETIF_MSG_IFUP << 1 ) - 1;
3245 /* use pritority h/w tx queue scheduling for single queue devices */
3246 if (priv->num_tx_queues == 1)
3247 priv->prio_sched_en = 1;
3248
3249 set_bit(GFAR_DOWN, &priv->state);
3250
3251 gfar_hw_init(priv);
3252
3253 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_RMON) {
3254 struct rmon_mib __iomem *rmon = &priv->gfargrp[0].regs->rmon;
3255
3256 spin_lock_init(&priv->rmon_overflow.lock);
3257 priv->rmon_overflow.imask = IMASK_MSRO;
3258 gfar_write(&rmon->cam1, gfar_read(&rmon->cam1) & ~CAM1_M1RDR);
3259 }
3260
3261 /* Carrier starts down, phylib will bring it up */
3262 netif_carrier_off(dev);
3263
3264 err = register_netdev(dev);
3265
3266 if (err) {
3267 pr_err("%s: Cannot register net device, aborting\n", dev->name);
3268 goto register_fail;
3269 }
3270
3271 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET)
3272 priv->wol_supported |= GFAR_WOL_MAGIC;
3273
3274 if ((priv->device_flags & FSL_GIANFAR_DEV_HAS_WAKE_ON_FILER) &&
3275 priv->rx_filer_enable)
3276 priv->wol_supported |= GFAR_WOL_FILER_UCAST;
3277
3278 device_set_wakeup_capable(&ofdev->dev, priv->wol_supported);
3279
3280 /* fill out IRQ number and name fields */
3281 for (i = 0; i < priv->num_grps; i++) {
3282 struct gfar_priv_grp *grp = &priv->gfargrp[i];
3283 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
3284 sprintf(gfar_irq(grp, TX)->name, "%s%s%c%s",
3285 dev->name, "_g", '0' + i, "_tx");
3286 sprintf(gfar_irq(grp, RX)->name, "%s%s%c%s",
3287 dev->name, "_g", '0' + i, "_rx");
3288 sprintf(gfar_irq(grp, ER)->name, "%s%s%c%s",
3289 dev->name, "_g", '0' + i, "_er");
3290 } else
3291 strcpy(gfar_irq(grp, TX)->name, dev->name);
3292 }
3293
3294 /* Initialize the filer table */
3295 gfar_init_filer_table(priv);
3296
3297 /* Print out the device info */
3298 netdev_info(dev, "mac: %pM\n", dev->dev_addr);
3299
3300 /* Even more device info helps when determining which kernel
3301 * provided which set of benchmarks.
3302 */
3303 netdev_info(dev, "Running with NAPI enabled\n");
3304 for (i = 0; i < priv->num_rx_queues; i++)
3305 netdev_info(dev, "RX BD ring size for Q[%d]: %d\n",
3306 i, priv->rx_queue[i]->rx_ring_size);
3307 for (i = 0; i < priv->num_tx_queues; i++)
3308 netdev_info(dev, "TX BD ring size for Q[%d]: %d\n",
3309 i, priv->tx_queue[i]->tx_ring_size);
3310
3311 return 0;
3312
3313 register_fail:
3314 if (of_phy_is_fixed_link(np))
3315 of_phy_deregister_fixed_link(np);
3316 unmap_group_regs(priv);
3317 gfar_free_rx_queues(priv);
3318 gfar_free_tx_queues(priv);
3319 of_node_put(priv->phy_node);
3320 of_node_put(priv->tbi_node);
3321 free_gfar_dev(priv);
3322 return err;
3323 }
3324
gfar_remove(struct platform_device * ofdev)3325 static void gfar_remove(struct platform_device *ofdev)
3326 {
3327 struct gfar_private *priv = platform_get_drvdata(ofdev);
3328 struct device_node *np = ofdev->dev.of_node;
3329
3330 of_node_put(priv->phy_node);
3331 of_node_put(priv->tbi_node);
3332
3333 unregister_netdev(priv->ndev);
3334
3335 if (of_phy_is_fixed_link(np))
3336 of_phy_deregister_fixed_link(np);
3337
3338 unmap_group_regs(priv);
3339 gfar_free_rx_queues(priv);
3340 gfar_free_tx_queues(priv);
3341 free_gfar_dev(priv);
3342 }
3343
3344 #ifdef CONFIG_PM
3345
__gfar_filer_disable(struct gfar_private * priv)3346 static void __gfar_filer_disable(struct gfar_private *priv)
3347 {
3348 struct gfar __iomem *regs = priv->gfargrp[0].regs;
3349 u32 temp;
3350
3351 temp = gfar_read(®s->rctrl);
3352 temp &= ~(RCTRL_FILREN | RCTRL_PRSDEP_INIT);
3353 gfar_write(®s->rctrl, temp);
3354 }
3355
__gfar_filer_enable(struct gfar_private * priv)3356 static void __gfar_filer_enable(struct gfar_private *priv)
3357 {
3358 struct gfar __iomem *regs = priv->gfargrp[0].regs;
3359 u32 temp;
3360
3361 temp = gfar_read(®s->rctrl);
3362 temp |= RCTRL_FILREN | RCTRL_PRSDEP_INIT;
3363 gfar_write(®s->rctrl, temp);
3364 }
3365
3366 /* Filer rules implementing wol capabilities */
gfar_filer_config_wol(struct gfar_private * priv)3367 static void gfar_filer_config_wol(struct gfar_private *priv)
3368 {
3369 unsigned int i;
3370 u32 rqfcr;
3371
3372 __gfar_filer_disable(priv);
3373
3374 /* clear the filer table, reject any packet by default */
3375 rqfcr = RQFCR_RJE | RQFCR_CMP_MATCH;
3376 for (i = 0; i <= MAX_FILER_IDX; i++)
3377 gfar_write_filer(priv, i, rqfcr, 0);
3378
3379 i = 0;
3380 if (priv->wol_opts & GFAR_WOL_FILER_UCAST) {
3381 /* unicast packet, accept it */
3382 struct net_device *ndev = priv->ndev;
3383 /* get the default rx queue index */
3384 u8 qindex = (u8)priv->gfargrp[0].rx_queue->qindex;
3385 u32 dest_mac_addr = (ndev->dev_addr[0] << 16) |
3386 (ndev->dev_addr[1] << 8) |
3387 ndev->dev_addr[2];
3388
3389 rqfcr = (qindex << 10) | RQFCR_AND |
3390 RQFCR_CMP_EXACT | RQFCR_PID_DAH;
3391
3392 gfar_write_filer(priv, i++, rqfcr, dest_mac_addr);
3393
3394 dest_mac_addr = (ndev->dev_addr[3] << 16) |
3395 (ndev->dev_addr[4] << 8) |
3396 ndev->dev_addr[5];
3397 rqfcr = (qindex << 10) | RQFCR_GPI |
3398 RQFCR_CMP_EXACT | RQFCR_PID_DAL;
3399 gfar_write_filer(priv, i++, rqfcr, dest_mac_addr);
3400 }
3401
3402 __gfar_filer_enable(priv);
3403 }
3404
gfar_filer_restore_table(struct gfar_private * priv)3405 static void gfar_filer_restore_table(struct gfar_private *priv)
3406 {
3407 u32 rqfcr, rqfpr;
3408 unsigned int i;
3409
3410 __gfar_filer_disable(priv);
3411
3412 for (i = 0; i <= MAX_FILER_IDX; i++) {
3413 rqfcr = priv->ftp_rqfcr[i];
3414 rqfpr = priv->ftp_rqfpr[i];
3415 gfar_write_filer(priv, i, rqfcr, rqfpr);
3416 }
3417
3418 __gfar_filer_enable(priv);
3419 }
3420
3421 /* gfar_start() for Rx only and with the FGPI filer interrupt enabled */
gfar_start_wol_filer(struct gfar_private * priv)3422 static void gfar_start_wol_filer(struct gfar_private *priv)
3423 {
3424 struct gfar __iomem *regs = priv->gfargrp[0].regs;
3425 u32 tempval;
3426 int i = 0;
3427
3428 /* Enable Rx hw queues */
3429 gfar_write(®s->rqueue, priv->rqueue);
3430
3431 /* Initialize DMACTRL to have WWR and WOP */
3432 tempval = gfar_read(®s->dmactrl);
3433 tempval |= DMACTRL_INIT_SETTINGS;
3434 gfar_write(®s->dmactrl, tempval);
3435
3436 /* Make sure we aren't stopped */
3437 tempval = gfar_read(®s->dmactrl);
3438 tempval &= ~DMACTRL_GRS;
3439 gfar_write(®s->dmactrl, tempval);
3440
3441 for (i = 0; i < priv->num_grps; i++) {
3442 regs = priv->gfargrp[i].regs;
3443 /* Clear RHLT, so that the DMA starts polling now */
3444 gfar_write(®s->rstat, priv->gfargrp[i].rstat);
3445 /* enable the Filer General Purpose Interrupt */
3446 gfar_write(®s->imask, IMASK_FGPI);
3447 }
3448
3449 /* Enable Rx DMA */
3450 tempval = gfar_read(®s->maccfg1);
3451 tempval |= MACCFG1_RX_EN;
3452 gfar_write(®s->maccfg1, tempval);
3453 }
3454
gfar_suspend(struct device * dev)3455 static int gfar_suspend(struct device *dev)
3456 {
3457 struct gfar_private *priv = dev_get_drvdata(dev);
3458 struct net_device *ndev = priv->ndev;
3459 struct gfar __iomem *regs = priv->gfargrp[0].regs;
3460 u32 tempval;
3461 u16 wol = priv->wol_opts;
3462
3463 if (!netif_running(ndev))
3464 return 0;
3465
3466 disable_napi(priv);
3467 netif_tx_lock(ndev);
3468 netif_device_detach(ndev);
3469 netif_tx_unlock(ndev);
3470
3471 gfar_halt(priv);
3472
3473 if (wol & GFAR_WOL_MAGIC) {
3474 /* Enable interrupt on Magic Packet */
3475 gfar_write(®s->imask, IMASK_MAG);
3476
3477 /* Enable Magic Packet mode */
3478 tempval = gfar_read(®s->maccfg2);
3479 tempval |= MACCFG2_MPEN;
3480 gfar_write(®s->maccfg2, tempval);
3481
3482 /* re-enable the Rx block */
3483 tempval = gfar_read(®s->maccfg1);
3484 tempval |= MACCFG1_RX_EN;
3485 gfar_write(®s->maccfg1, tempval);
3486
3487 } else if (wol & GFAR_WOL_FILER_UCAST) {
3488 gfar_filer_config_wol(priv);
3489 gfar_start_wol_filer(priv);
3490
3491 } else {
3492 phy_stop(ndev->phydev);
3493 }
3494
3495 return 0;
3496 }
3497
gfar_resume(struct device * dev)3498 static int gfar_resume(struct device *dev)
3499 {
3500 struct gfar_private *priv = dev_get_drvdata(dev);
3501 struct net_device *ndev = priv->ndev;
3502 struct gfar __iomem *regs = priv->gfargrp[0].regs;
3503 u32 tempval;
3504 u16 wol = priv->wol_opts;
3505
3506 if (!netif_running(ndev))
3507 return 0;
3508
3509 if (wol & GFAR_WOL_MAGIC) {
3510 /* Disable Magic Packet mode */
3511 tempval = gfar_read(®s->maccfg2);
3512 tempval &= ~MACCFG2_MPEN;
3513 gfar_write(®s->maccfg2, tempval);
3514
3515 } else if (wol & GFAR_WOL_FILER_UCAST) {
3516 /* need to stop rx only, tx is already down */
3517 gfar_halt(priv);
3518 gfar_filer_restore_table(priv);
3519
3520 } else {
3521 phy_start(ndev->phydev);
3522 }
3523
3524 gfar_start(priv);
3525
3526 netif_device_attach(ndev);
3527 enable_napi(priv);
3528
3529 return 0;
3530 }
3531
gfar_restore(struct device * dev)3532 static int gfar_restore(struct device *dev)
3533 {
3534 struct gfar_private *priv = dev_get_drvdata(dev);
3535 struct net_device *ndev = priv->ndev;
3536
3537 if (!netif_running(ndev)) {
3538 netif_device_attach(ndev);
3539
3540 return 0;
3541 }
3542
3543 gfar_init_bds(ndev);
3544
3545 gfar_mac_reset(priv);
3546
3547 gfar_init_tx_rx_base(priv);
3548
3549 gfar_start(priv);
3550
3551 priv->oldlink = 0;
3552 priv->oldspeed = 0;
3553 priv->oldduplex = -1;
3554
3555 if (ndev->phydev)
3556 phy_start(ndev->phydev);
3557
3558 netif_device_attach(ndev);
3559 enable_napi(priv);
3560
3561 return 0;
3562 }
3563
3564 static const struct dev_pm_ops gfar_pm_ops = {
3565 .suspend = gfar_suspend,
3566 .resume = gfar_resume,
3567 .freeze = gfar_suspend,
3568 .thaw = gfar_resume,
3569 .restore = gfar_restore,
3570 };
3571
3572 #define GFAR_PM_OPS (&gfar_pm_ops)
3573
3574 #else
3575
3576 #define GFAR_PM_OPS NULL
3577
3578 #endif
3579
3580 static const struct of_device_id gfar_match[] =
3581 {
3582 {
3583 .type = "network",
3584 .compatible = "gianfar",
3585 },
3586 {
3587 .compatible = "fsl,etsec2",
3588 },
3589 {},
3590 };
3591 MODULE_DEVICE_TABLE(of, gfar_match);
3592
3593 /* Structure for a device driver */
3594 static struct platform_driver gfar_driver = {
3595 .driver = {
3596 .name = "fsl-gianfar",
3597 .pm = GFAR_PM_OPS,
3598 .of_match_table = gfar_match,
3599 },
3600 .probe = gfar_probe,
3601 .remove = gfar_remove,
3602 };
3603
3604 module_platform_driver(gfar_driver);
3605