1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * Network device driver for Cell Processor-Based Blade and Celleb platform
4 *
5 * (C) Copyright IBM Corp. 2005
6 * (C) Copyright 2006 TOSHIBA CORPORATION
7 *
8 * Authors : Utz Bacher <utz.bacher@de.ibm.com>
9 * Jens Osterkamp <Jens.Osterkamp@de.ibm.com>
10 */
11
12 #include <linux/compiler.h>
13 #include <linux/crc32.h>
14 #include <linux/delay.h>
15 #include <linux/etherdevice.h>
16 #include <linux/ethtool.h>
17 #include <linux/firmware.h>
18 #include <linux/if_vlan.h>
19 #include <linux/in.h>
20 #include <linux/init.h>
21 #include <linux/interrupt.h>
22 #include <linux/gfp.h>
23 #include <linux/ioport.h>
24 #include <linux/ip.h>
25 #include <linux/kernel.h>
26 #include <linux/mii.h>
27 #include <linux/module.h>
28 #include <linux/netdevice.h>
29 #include <linux/device.h>
30 #include <linux/pci.h>
31 #include <linux/skbuff.h>
32 #include <linux/tcp.h>
33 #include <linux/types.h>
34 #include <linux/vmalloc.h>
35 #include <linux/wait.h>
36 #include <linux/workqueue.h>
37 #include <linux/bitops.h>
38 #include <linux/of.h>
39 #include <net/checksum.h>
40
41 #include "spider_net.h"
42
43 MODULE_AUTHOR("Utz Bacher <utz.bacher@de.ibm.com> and Jens Osterkamp " \
44 "<Jens.Osterkamp@de.ibm.com>");
45 MODULE_DESCRIPTION("Spider Southbridge Gigabit Ethernet driver");
46 MODULE_LICENSE("GPL");
47 MODULE_VERSION(VERSION);
48 MODULE_FIRMWARE(SPIDER_NET_FIRMWARE_NAME);
49
50 static int rx_descriptors = SPIDER_NET_RX_DESCRIPTORS_DEFAULT;
51 static int tx_descriptors = SPIDER_NET_TX_DESCRIPTORS_DEFAULT;
52
53 module_param(rx_descriptors, int, 0444);
54 module_param(tx_descriptors, int, 0444);
55
56 MODULE_PARM_DESC(rx_descriptors, "number of descriptors used " \
57 "in rx chains");
58 MODULE_PARM_DESC(tx_descriptors, "number of descriptors used " \
59 "in tx chain");
60
61 char spider_net_driver_name[] = "spidernet";
62
63 static const struct pci_device_id spider_net_pci_tbl[] = {
64 { PCI_VENDOR_ID_TOSHIBA_2, PCI_DEVICE_ID_TOSHIBA_SPIDER_NET,
65 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
66 { 0, }
67 };
68
69 MODULE_DEVICE_TABLE(pci, spider_net_pci_tbl);
70
71 /**
72 * spider_net_read_reg - reads an SMMIO register of a card
73 * @card: device structure
74 * @reg: register to read from
75 *
76 * returns the content of the specified SMMIO register.
77 */
78 static inline u32
spider_net_read_reg(struct spider_net_card * card,u32 reg)79 spider_net_read_reg(struct spider_net_card *card, u32 reg)
80 {
81 /* We use the powerpc specific variants instead of readl_be() because
82 * we know spidernet is not a real PCI device and we can thus avoid the
83 * performance hit caused by the PCI workarounds.
84 */
85 return in_be32(card->regs + reg);
86 }
87
88 /**
89 * spider_net_write_reg - writes to an SMMIO register of a card
90 * @card: device structure
91 * @reg: register to write to
92 * @value: value to write into the specified SMMIO register
93 */
94 static inline void
spider_net_write_reg(struct spider_net_card * card,u32 reg,u32 value)95 spider_net_write_reg(struct spider_net_card *card, u32 reg, u32 value)
96 {
97 /* We use the powerpc specific variants instead of writel_be() because
98 * we know spidernet is not a real PCI device and we can thus avoid the
99 * performance hit caused by the PCI workarounds.
100 */
101 out_be32(card->regs + reg, value);
102 }
103
104 /**
105 * spider_net_write_phy - write to phy register
106 * @netdev: adapter to be written to
107 * @mii_id: id of MII
108 * @reg: PHY register
109 * @val: value to be written to phy register
110 *
111 * spider_net_write_phy_register writes to an arbitrary PHY
112 * register via the spider GPCWOPCMD register. We assume the queue does
113 * not run full (not more than 15 commands outstanding).
114 **/
115 static void
spider_net_write_phy(struct net_device * netdev,int mii_id,int reg,int val)116 spider_net_write_phy(struct net_device *netdev, int mii_id,
117 int reg, int val)
118 {
119 struct spider_net_card *card = netdev_priv(netdev);
120 u32 writevalue;
121
122 writevalue = ((u32)mii_id << 21) |
123 ((u32)reg << 16) | ((u32)val);
124
125 spider_net_write_reg(card, SPIDER_NET_GPCWOPCMD, writevalue);
126 }
127
128 /**
129 * spider_net_read_phy - read from phy register
130 * @netdev: network device to be read from
131 * @mii_id: id of MII
132 * @reg: PHY register
133 *
134 * Returns value read from PHY register
135 *
136 * spider_net_write_phy reads from an arbitrary PHY
137 * register via the spider GPCROPCMD register
138 **/
139 static int
spider_net_read_phy(struct net_device * netdev,int mii_id,int reg)140 spider_net_read_phy(struct net_device *netdev, int mii_id, int reg)
141 {
142 struct spider_net_card *card = netdev_priv(netdev);
143 u32 readvalue;
144
145 readvalue = ((u32)mii_id << 21) | ((u32)reg << 16);
146 spider_net_write_reg(card, SPIDER_NET_GPCROPCMD, readvalue);
147
148 /* we don't use semaphores to wait for an SPIDER_NET_GPROPCMPINT
149 * interrupt, as we poll for the completion of the read operation
150 * in spider_net_read_phy. Should take about 50 us
151 */
152 do {
153 readvalue = spider_net_read_reg(card, SPIDER_NET_GPCROPCMD);
154 } while (readvalue & SPIDER_NET_GPREXEC);
155
156 readvalue &= SPIDER_NET_GPRDAT_MASK;
157
158 return readvalue;
159 }
160
161 /**
162 * spider_net_setup_aneg - initial auto-negotiation setup
163 * @card: device structure
164 **/
165 static void
spider_net_setup_aneg(struct spider_net_card * card)166 spider_net_setup_aneg(struct spider_net_card *card)
167 {
168 struct mii_phy *phy = &card->phy;
169 u32 advertise = 0;
170 u16 bmsr, estat;
171
172 bmsr = spider_net_read_phy(card->netdev, phy->mii_id, MII_BMSR);
173 estat = spider_net_read_phy(card->netdev, phy->mii_id, MII_ESTATUS);
174
175 if (bmsr & BMSR_10HALF)
176 advertise |= ADVERTISED_10baseT_Half;
177 if (bmsr & BMSR_10FULL)
178 advertise |= ADVERTISED_10baseT_Full;
179 if (bmsr & BMSR_100HALF)
180 advertise |= ADVERTISED_100baseT_Half;
181 if (bmsr & BMSR_100FULL)
182 advertise |= ADVERTISED_100baseT_Full;
183
184 if ((bmsr & BMSR_ESTATEN) && (estat & ESTATUS_1000_TFULL))
185 advertise |= SUPPORTED_1000baseT_Full;
186 if ((bmsr & BMSR_ESTATEN) && (estat & ESTATUS_1000_THALF))
187 advertise |= SUPPORTED_1000baseT_Half;
188
189 sungem_phy_probe(phy, phy->mii_id);
190 phy->def->ops->setup_aneg(phy, advertise);
191
192 }
193
194 /**
195 * spider_net_rx_irq_off - switch off rx irq on this spider card
196 * @card: device structure
197 *
198 * switches off rx irq by masking them out in the GHIINTnMSK register
199 */
200 static void
spider_net_rx_irq_off(struct spider_net_card * card)201 spider_net_rx_irq_off(struct spider_net_card *card)
202 {
203 u32 regvalue;
204
205 regvalue = SPIDER_NET_INT0_MASK_VALUE & (~SPIDER_NET_RXINT);
206 spider_net_write_reg(card, SPIDER_NET_GHIINT0MSK, regvalue);
207 }
208
209 /**
210 * spider_net_rx_irq_on - switch on rx irq on this spider card
211 * @card: device structure
212 *
213 * switches on rx irq by enabling them in the GHIINTnMSK register
214 */
215 static void
spider_net_rx_irq_on(struct spider_net_card * card)216 spider_net_rx_irq_on(struct spider_net_card *card)
217 {
218 u32 regvalue;
219
220 regvalue = SPIDER_NET_INT0_MASK_VALUE | SPIDER_NET_RXINT;
221 spider_net_write_reg(card, SPIDER_NET_GHIINT0MSK, regvalue);
222 }
223
224 /**
225 * spider_net_set_promisc - sets the unicast address or the promiscuous mode
226 * @card: card structure
227 *
228 * spider_net_set_promisc sets the unicast destination address filter and
229 * thus either allows for non-promisc mode or promisc mode
230 */
231 static void
spider_net_set_promisc(struct spider_net_card * card)232 spider_net_set_promisc(struct spider_net_card *card)
233 {
234 u32 macu, macl;
235 struct net_device *netdev = card->netdev;
236
237 if (netdev->flags & IFF_PROMISC) {
238 /* clear destination entry 0 */
239 spider_net_write_reg(card, SPIDER_NET_GMRUAFILnR, 0);
240 spider_net_write_reg(card, SPIDER_NET_GMRUAFILnR + 0x04, 0);
241 spider_net_write_reg(card, SPIDER_NET_GMRUA0FIL15R,
242 SPIDER_NET_PROMISC_VALUE);
243 } else {
244 macu = netdev->dev_addr[0];
245 macu <<= 8;
246 macu |= netdev->dev_addr[1];
247 memcpy(&macl, &netdev->dev_addr[2], sizeof(macl));
248
249 macu |= SPIDER_NET_UA_DESCR_VALUE;
250 spider_net_write_reg(card, SPIDER_NET_GMRUAFILnR, macu);
251 spider_net_write_reg(card, SPIDER_NET_GMRUAFILnR + 0x04, macl);
252 spider_net_write_reg(card, SPIDER_NET_GMRUA0FIL15R,
253 SPIDER_NET_NONPROMISC_VALUE);
254 }
255 }
256
257 /**
258 * spider_net_get_descr_status -- returns the status of a descriptor
259 * @hwdescr: descriptor to look at
260 *
261 * returns the status as in the dmac_cmd_status field of the descriptor
262 */
263 static inline int
spider_net_get_descr_status(struct spider_net_hw_descr * hwdescr)264 spider_net_get_descr_status(struct spider_net_hw_descr *hwdescr)
265 {
266 return hwdescr->dmac_cmd_status & SPIDER_NET_DESCR_IND_PROC_MASK;
267 }
268
269 /**
270 * spider_net_free_chain - free descriptor chain
271 * @card: card structure
272 * @chain: address of chain
273 *
274 */
275 static void
spider_net_free_chain(struct spider_net_card * card,struct spider_net_descr_chain * chain)276 spider_net_free_chain(struct spider_net_card *card,
277 struct spider_net_descr_chain *chain)
278 {
279 struct spider_net_descr *descr;
280
281 descr = chain->ring;
282 do {
283 descr->bus_addr = 0;
284 descr->hwdescr->next_descr_addr = 0;
285 descr = descr->next;
286 } while (descr != chain->ring);
287
288 dma_free_coherent(&card->pdev->dev, chain->num_desc * sizeof(struct spider_net_hw_descr),
289 chain->hwring, chain->dma_addr);
290 }
291
292 /**
293 * spider_net_init_chain - alloc and link descriptor chain
294 * @card: card structure
295 * @chain: address of chain
296 *
297 * We manage a circular list that mirrors the hardware structure,
298 * except that the hardware uses bus addresses.
299 *
300 * Returns 0 on success, <0 on failure
301 */
302 static int
spider_net_init_chain(struct spider_net_card * card,struct spider_net_descr_chain * chain)303 spider_net_init_chain(struct spider_net_card *card,
304 struct spider_net_descr_chain *chain)
305 {
306 int i;
307 struct spider_net_descr *descr;
308 struct spider_net_hw_descr *hwdescr;
309 dma_addr_t buf;
310 size_t alloc_size;
311
312 alloc_size = chain->num_desc * sizeof(struct spider_net_hw_descr);
313
314 chain->hwring = dma_alloc_coherent(&card->pdev->dev, alloc_size,
315 &chain->dma_addr, GFP_KERNEL);
316 if (!chain->hwring)
317 return -ENOMEM;
318
319 /* Set up the hardware pointers in each descriptor */
320 descr = chain->ring;
321 hwdescr = chain->hwring;
322 buf = chain->dma_addr;
323 for (i=0; i < chain->num_desc; i++, descr++, hwdescr++) {
324 hwdescr->dmac_cmd_status = SPIDER_NET_DESCR_NOT_IN_USE;
325 hwdescr->next_descr_addr = 0;
326
327 descr->hwdescr = hwdescr;
328 descr->bus_addr = buf;
329 descr->next = descr + 1;
330 descr->prev = descr - 1;
331
332 buf += sizeof(struct spider_net_hw_descr);
333 }
334 /* do actual circular list */
335 (descr-1)->next = chain->ring;
336 chain->ring->prev = descr-1;
337
338 spin_lock_init(&chain->lock);
339 chain->head = chain->ring;
340 chain->tail = chain->ring;
341 return 0;
342 }
343
344 /**
345 * spider_net_free_rx_chain_contents - frees descr contents in rx chain
346 * @card: card structure
347 *
348 * returns 0 on success, <0 on failure
349 */
350 static void
spider_net_free_rx_chain_contents(struct spider_net_card * card)351 spider_net_free_rx_chain_contents(struct spider_net_card *card)
352 {
353 struct spider_net_descr *descr;
354
355 descr = card->rx_chain.head;
356 do {
357 if (descr->skb) {
358 dma_unmap_single(&card->pdev->dev,
359 descr->hwdescr->buf_addr,
360 SPIDER_NET_MAX_FRAME,
361 DMA_BIDIRECTIONAL);
362 dev_kfree_skb(descr->skb);
363 descr->skb = NULL;
364 }
365 descr = descr->next;
366 } while (descr != card->rx_chain.head);
367 }
368
369 /**
370 * spider_net_prepare_rx_descr - Reinitialize RX descriptor
371 * @card: card structure
372 * @descr: descriptor to re-init
373 *
374 * Return 0 on success, <0 on failure.
375 *
376 * Allocates a new rx skb, iommu-maps it and attaches it to the
377 * descriptor. Mark the descriptor as activated, ready-to-use.
378 */
379 static int
spider_net_prepare_rx_descr(struct spider_net_card * card,struct spider_net_descr * descr)380 spider_net_prepare_rx_descr(struct spider_net_card *card,
381 struct spider_net_descr *descr)
382 {
383 struct spider_net_hw_descr *hwdescr = descr->hwdescr;
384 dma_addr_t buf;
385 int offset;
386 int bufsize;
387
388 /* we need to round up the buffer size to a multiple of 128 */
389 bufsize = (SPIDER_NET_MAX_FRAME + SPIDER_NET_RXBUF_ALIGN - 1) &
390 (~(SPIDER_NET_RXBUF_ALIGN - 1));
391
392 /* and we need to have it 128 byte aligned, therefore we allocate a
393 * bit more
394 */
395 /* allocate an skb */
396 descr->skb = netdev_alloc_skb(card->netdev,
397 bufsize + SPIDER_NET_RXBUF_ALIGN - 1);
398 if (!descr->skb) {
399 if (netif_msg_rx_err(card) && net_ratelimit())
400 dev_err(&card->netdev->dev,
401 "Not enough memory to allocate rx buffer\n");
402 card->spider_stats.alloc_rx_skb_error++;
403 return -ENOMEM;
404 }
405 hwdescr->buf_size = bufsize;
406 hwdescr->result_size = 0;
407 hwdescr->valid_size = 0;
408 hwdescr->data_status = 0;
409 hwdescr->data_error = 0;
410
411 offset = ((unsigned long)descr->skb->data) &
412 (SPIDER_NET_RXBUF_ALIGN - 1);
413 if (offset)
414 skb_reserve(descr->skb, SPIDER_NET_RXBUF_ALIGN - offset);
415 /* iommu-map the skb */
416 buf = dma_map_single(&card->pdev->dev, descr->skb->data,
417 SPIDER_NET_MAX_FRAME, DMA_FROM_DEVICE);
418 if (dma_mapping_error(&card->pdev->dev, buf)) {
419 dev_kfree_skb_any(descr->skb);
420 descr->skb = NULL;
421 if (netif_msg_rx_err(card) && net_ratelimit())
422 dev_err(&card->netdev->dev, "Could not iommu-map rx buffer\n");
423 card->spider_stats.rx_iommu_map_error++;
424 hwdescr->dmac_cmd_status = SPIDER_NET_DESCR_NOT_IN_USE;
425 } else {
426 hwdescr->buf_addr = buf;
427 wmb();
428 hwdescr->dmac_cmd_status = SPIDER_NET_DESCR_CARDOWNED |
429 SPIDER_NET_DMAC_NOINTR_COMPLETE;
430 }
431
432 return 0;
433 }
434
435 /**
436 * spider_net_enable_rxchtails - sets RX dmac chain tail addresses
437 * @card: card structure
438 *
439 * spider_net_enable_rxchtails sets the RX DMAC chain tail addresses in the
440 * chip by writing to the appropriate register. DMA is enabled in
441 * spider_net_enable_rxdmac.
442 */
443 static inline void
spider_net_enable_rxchtails(struct spider_net_card * card)444 spider_net_enable_rxchtails(struct spider_net_card *card)
445 {
446 /* assume chain is aligned correctly */
447 spider_net_write_reg(card, SPIDER_NET_GDADCHA ,
448 card->rx_chain.tail->bus_addr);
449 }
450
451 /**
452 * spider_net_enable_rxdmac - enables a receive DMA controller
453 * @card: card structure
454 *
455 * spider_net_enable_rxdmac enables the DMA controller by setting RX_DMA_EN
456 * in the GDADMACCNTR register
457 */
458 static inline void
spider_net_enable_rxdmac(struct spider_net_card * card)459 spider_net_enable_rxdmac(struct spider_net_card *card)
460 {
461 wmb();
462 spider_net_write_reg(card, SPIDER_NET_GDADMACCNTR,
463 SPIDER_NET_DMA_RX_VALUE);
464 }
465
466 /**
467 * spider_net_disable_rxdmac - disables the receive DMA controller
468 * @card: card structure
469 *
470 * spider_net_disable_rxdmac terminates processing on the DMA controller
471 * by turing off the DMA controller, with the force-end flag set.
472 */
473 static inline void
spider_net_disable_rxdmac(struct spider_net_card * card)474 spider_net_disable_rxdmac(struct spider_net_card *card)
475 {
476 spider_net_write_reg(card, SPIDER_NET_GDADMACCNTR,
477 SPIDER_NET_DMA_RX_FEND_VALUE);
478 }
479
480 /**
481 * spider_net_refill_rx_chain - refills descriptors/skbs in the rx chains
482 * @card: card structure
483 *
484 * refills descriptors in the rx chain: allocates skbs and iommu-maps them.
485 */
486 static void
spider_net_refill_rx_chain(struct spider_net_card * card)487 spider_net_refill_rx_chain(struct spider_net_card *card)
488 {
489 struct spider_net_descr_chain *chain = &card->rx_chain;
490 unsigned long flags;
491
492 /* one context doing the refill (and a second context seeing that
493 * and omitting it) is ok. If called by NAPI, we'll be called again
494 * as spider_net_decode_one_descr is called several times. If some
495 * interrupt calls us, the NAPI is about to clean up anyway.
496 */
497 if (!spin_trylock_irqsave(&chain->lock, flags))
498 return;
499
500 while (spider_net_get_descr_status(chain->head->hwdescr) ==
501 SPIDER_NET_DESCR_NOT_IN_USE) {
502 if (spider_net_prepare_rx_descr(card, chain->head))
503 break;
504 chain->head = chain->head->next;
505 }
506
507 spin_unlock_irqrestore(&chain->lock, flags);
508 }
509
510 /**
511 * spider_net_alloc_rx_skbs - Allocates rx skbs in rx descriptor chains
512 * @card: card structure
513 *
514 * Returns 0 on success, <0 on failure.
515 */
516 static int
spider_net_alloc_rx_skbs(struct spider_net_card * card)517 spider_net_alloc_rx_skbs(struct spider_net_card *card)
518 {
519 struct spider_net_descr_chain *chain = &card->rx_chain;
520 struct spider_net_descr *start = chain->tail;
521 struct spider_net_descr *descr = start;
522
523 /* Link up the hardware chain pointers */
524 do {
525 descr->prev->hwdescr->next_descr_addr = descr->bus_addr;
526 descr = descr->next;
527 } while (descr != start);
528
529 /* Put at least one buffer into the chain. if this fails,
530 * we've got a problem. If not, spider_net_refill_rx_chain
531 * will do the rest at the end of this function.
532 */
533 if (spider_net_prepare_rx_descr(card, chain->head))
534 goto error;
535 else
536 chain->head = chain->head->next;
537
538 /* This will allocate the rest of the rx buffers;
539 * if not, it's business as usual later on.
540 */
541 spider_net_refill_rx_chain(card);
542 spider_net_enable_rxdmac(card);
543 return 0;
544
545 error:
546 spider_net_free_rx_chain_contents(card);
547 return -ENOMEM;
548 }
549
550 /**
551 * spider_net_get_multicast_hash - generates hash for multicast filter table
552 * @netdev: interface device structure
553 * @addr: multicast address
554 *
555 * returns the hash value.
556 *
557 * spider_net_get_multicast_hash calculates a hash value for a given multicast
558 * address, that is used to set the multicast filter tables
559 */
560 static u8
spider_net_get_multicast_hash(struct net_device * netdev,__u8 * addr)561 spider_net_get_multicast_hash(struct net_device *netdev, __u8 *addr)
562 {
563 u32 crc;
564 u8 hash;
565 char addr_for_crc[ETH_ALEN] = { 0, };
566 int i, bit;
567
568 for (i = 0; i < ETH_ALEN * 8; i++) {
569 bit = (addr[i / 8] >> (i % 8)) & 1;
570 addr_for_crc[ETH_ALEN - 1 - i / 8] += bit << (7 - (i % 8));
571 }
572
573 crc = crc32_be(~0, addr_for_crc, netdev->addr_len);
574
575 hash = (crc >> 27);
576 hash <<= 3;
577 hash |= crc & 7;
578 hash &= 0xff;
579
580 return hash;
581 }
582
583 /**
584 * spider_net_set_multi - sets multicast addresses and promisc flags
585 * @netdev: interface device structure
586 *
587 * spider_net_set_multi configures multicast addresses as needed for the
588 * netdev interface. It also sets up multicast, allmulti and promisc
589 * flags appropriately
590 */
591 static void
spider_net_set_multi(struct net_device * netdev)592 spider_net_set_multi(struct net_device *netdev)
593 {
594 struct netdev_hw_addr *ha;
595 u8 hash;
596 int i;
597 u32 reg;
598 struct spider_net_card *card = netdev_priv(netdev);
599 DECLARE_BITMAP(bitmask, SPIDER_NET_MULTICAST_HASHES);
600
601 spider_net_set_promisc(card);
602
603 if (netdev->flags & IFF_ALLMULTI) {
604 bitmap_fill(bitmask, SPIDER_NET_MULTICAST_HASHES);
605 goto write_hash;
606 }
607
608 bitmap_zero(bitmask, SPIDER_NET_MULTICAST_HASHES);
609
610 /* well, we know, what the broadcast hash value is: it's xfd
611 hash = spider_net_get_multicast_hash(netdev, netdev->broadcast); */
612 __set_bit(0xfd, bitmask);
613
614 netdev_for_each_mc_addr(ha, netdev) {
615 hash = spider_net_get_multicast_hash(netdev, ha->addr);
616 __set_bit(hash, bitmask);
617 }
618
619 write_hash:
620 for (i = 0; i < SPIDER_NET_MULTICAST_HASHES / 4; i++) {
621 reg = 0;
622 if (test_bit(i * 4, bitmask))
623 reg += 0x08;
624 reg <<= 8;
625 if (test_bit(i * 4 + 1, bitmask))
626 reg += 0x08;
627 reg <<= 8;
628 if (test_bit(i * 4 + 2, bitmask))
629 reg += 0x08;
630 reg <<= 8;
631 if (test_bit(i * 4 + 3, bitmask))
632 reg += 0x08;
633
634 spider_net_write_reg(card, SPIDER_NET_GMRMHFILnR + i * 4, reg);
635 }
636 }
637
638 /**
639 * spider_net_prepare_tx_descr - fill tx descriptor with skb data
640 * @card: card structure
641 * @skb: packet to use
642 *
643 * returns 0 on success, <0 on failure.
644 *
645 * fills out the descriptor structure with skb data and len. Copies data,
646 * if needed (32bit DMA!)
647 */
648 static int
spider_net_prepare_tx_descr(struct spider_net_card * card,struct sk_buff * skb)649 spider_net_prepare_tx_descr(struct spider_net_card *card,
650 struct sk_buff *skb)
651 {
652 struct spider_net_descr_chain *chain = &card->tx_chain;
653 struct spider_net_descr *descr;
654 struct spider_net_hw_descr *hwdescr;
655 dma_addr_t buf;
656 unsigned long flags;
657
658 buf = dma_map_single(&card->pdev->dev, skb->data, skb->len,
659 DMA_TO_DEVICE);
660 if (dma_mapping_error(&card->pdev->dev, buf)) {
661 if (netif_msg_tx_err(card) && net_ratelimit())
662 dev_err(&card->netdev->dev, "could not iommu-map packet (%p, %i). "
663 "Dropping packet\n", skb->data, skb->len);
664 card->spider_stats.tx_iommu_map_error++;
665 return -ENOMEM;
666 }
667
668 spin_lock_irqsave(&chain->lock, flags);
669 descr = card->tx_chain.head;
670 if (descr->next == chain->tail->prev) {
671 spin_unlock_irqrestore(&chain->lock, flags);
672 dma_unmap_single(&card->pdev->dev, buf, skb->len,
673 DMA_TO_DEVICE);
674 return -ENOMEM;
675 }
676 hwdescr = descr->hwdescr;
677 chain->head = descr->next;
678
679 descr->skb = skb;
680 hwdescr->buf_addr = buf;
681 hwdescr->buf_size = skb->len;
682 hwdescr->next_descr_addr = 0;
683 hwdescr->data_status = 0;
684
685 hwdescr->dmac_cmd_status =
686 SPIDER_NET_DESCR_CARDOWNED | SPIDER_NET_DMAC_TXFRMTL;
687 spin_unlock_irqrestore(&chain->lock, flags);
688
689 if (skb->ip_summed == CHECKSUM_PARTIAL)
690 switch (ip_hdr(skb)->protocol) {
691 case IPPROTO_TCP:
692 hwdescr->dmac_cmd_status |= SPIDER_NET_DMAC_TCP;
693 break;
694 case IPPROTO_UDP:
695 hwdescr->dmac_cmd_status |= SPIDER_NET_DMAC_UDP;
696 break;
697 }
698
699 /* Chain the bus address, so that the DMA engine finds this descr. */
700 wmb();
701 descr->prev->hwdescr->next_descr_addr = descr->bus_addr;
702
703 netif_trans_update(card->netdev); /* set netdev watchdog timer */
704 return 0;
705 }
706
707 static int
spider_net_set_low_watermark(struct spider_net_card * card)708 spider_net_set_low_watermark(struct spider_net_card *card)
709 {
710 struct spider_net_descr *descr = card->tx_chain.tail;
711 struct spider_net_hw_descr *hwdescr;
712 unsigned long flags;
713 int status;
714 int cnt=0;
715 int i;
716
717 /* Measure the length of the queue. Measurement does not
718 * need to be precise -- does not need a lock.
719 */
720 while (descr != card->tx_chain.head) {
721 status = descr->hwdescr->dmac_cmd_status & SPIDER_NET_DESCR_NOT_IN_USE;
722 if (status == SPIDER_NET_DESCR_NOT_IN_USE)
723 break;
724 descr = descr->next;
725 cnt++;
726 }
727
728 /* If TX queue is short, don't even bother with interrupts */
729 if (cnt < card->tx_chain.num_desc/4)
730 return cnt;
731
732 /* Set low-watermark 3/4th's of the way into the queue. */
733 descr = card->tx_chain.tail;
734 cnt = (cnt*3)/4;
735 for (i=0;i<cnt; i++)
736 descr = descr->next;
737
738 /* Set the new watermark, clear the old watermark */
739 spin_lock_irqsave(&card->tx_chain.lock, flags);
740 descr->hwdescr->dmac_cmd_status |= SPIDER_NET_DESCR_TXDESFLG;
741 if (card->low_watermark && card->low_watermark != descr) {
742 hwdescr = card->low_watermark->hwdescr;
743 hwdescr->dmac_cmd_status =
744 hwdescr->dmac_cmd_status & ~SPIDER_NET_DESCR_TXDESFLG;
745 }
746 card->low_watermark = descr;
747 spin_unlock_irqrestore(&card->tx_chain.lock, flags);
748 return cnt;
749 }
750
751 /**
752 * spider_net_release_tx_chain - processes sent tx descriptors
753 * @card: adapter structure
754 * @brutal: if set, don't care about whether descriptor seems to be in use
755 *
756 * returns 0 if the tx ring is empty, otherwise 1.
757 *
758 * spider_net_release_tx_chain releases the tx descriptors that spider has
759 * finished with (if non-brutal) or simply release tx descriptors (if brutal).
760 * If some other context is calling this function, we return 1 so that we're
761 * scheduled again (if we were scheduled) and will not lose initiative.
762 */
763 static int
spider_net_release_tx_chain(struct spider_net_card * card,int brutal)764 spider_net_release_tx_chain(struct spider_net_card *card, int brutal)
765 {
766 struct net_device *dev = card->netdev;
767 struct spider_net_descr_chain *chain = &card->tx_chain;
768 struct spider_net_descr *descr;
769 struct spider_net_hw_descr *hwdescr;
770 struct sk_buff *skb;
771 u32 buf_addr;
772 unsigned long flags;
773 int status;
774
775 while (1) {
776 spin_lock_irqsave(&chain->lock, flags);
777 if (chain->tail == chain->head) {
778 spin_unlock_irqrestore(&chain->lock, flags);
779 return 0;
780 }
781 descr = chain->tail;
782 hwdescr = descr->hwdescr;
783
784 status = spider_net_get_descr_status(hwdescr);
785 switch (status) {
786 case SPIDER_NET_DESCR_COMPLETE:
787 dev->stats.tx_packets++;
788 dev->stats.tx_bytes += descr->skb->len;
789 break;
790
791 case SPIDER_NET_DESCR_CARDOWNED:
792 if (!brutal) {
793 spin_unlock_irqrestore(&chain->lock, flags);
794 return 1;
795 }
796
797 /* fallthrough, if we release the descriptors
798 * brutally (then we don't care about
799 * SPIDER_NET_DESCR_CARDOWNED)
800 */
801 fallthrough;
802
803 case SPIDER_NET_DESCR_RESPONSE_ERROR:
804 case SPIDER_NET_DESCR_PROTECTION_ERROR:
805 case SPIDER_NET_DESCR_FORCE_END:
806 if (netif_msg_tx_err(card))
807 dev_err(&card->netdev->dev, "forcing end of tx descriptor "
808 "with status x%02x\n", status);
809 dev->stats.tx_errors++;
810 break;
811
812 default:
813 dev->stats.tx_dropped++;
814 if (!brutal) {
815 spin_unlock_irqrestore(&chain->lock, flags);
816 return 1;
817 }
818 }
819
820 chain->tail = descr->next;
821 hwdescr->dmac_cmd_status |= SPIDER_NET_DESCR_NOT_IN_USE;
822 skb = descr->skb;
823 descr->skb = NULL;
824 buf_addr = hwdescr->buf_addr;
825 spin_unlock_irqrestore(&chain->lock, flags);
826
827 /* unmap the skb */
828 if (skb) {
829 dma_unmap_single(&card->pdev->dev, buf_addr, skb->len,
830 DMA_TO_DEVICE);
831 dev_consume_skb_any(skb);
832 }
833 }
834 return 0;
835 }
836
837 /**
838 * spider_net_kick_tx_dma - enables TX DMA processing
839 * @card: card structure
840 *
841 * This routine will start the transmit DMA running if
842 * it is not already running. This routine ned only be
843 * called when queueing a new packet to an empty tx queue.
844 * Writes the current tx chain head as start address
845 * of the tx descriptor chain and enables the transmission
846 * DMA engine.
847 */
848 static inline void
spider_net_kick_tx_dma(struct spider_net_card * card)849 spider_net_kick_tx_dma(struct spider_net_card *card)
850 {
851 struct spider_net_descr *descr;
852
853 if (spider_net_read_reg(card, SPIDER_NET_GDTDMACCNTR) &
854 SPIDER_NET_TX_DMA_EN)
855 goto out;
856
857 descr = card->tx_chain.tail;
858 for (;;) {
859 if (spider_net_get_descr_status(descr->hwdescr) ==
860 SPIDER_NET_DESCR_CARDOWNED) {
861 spider_net_write_reg(card, SPIDER_NET_GDTDCHA,
862 descr->bus_addr);
863 spider_net_write_reg(card, SPIDER_NET_GDTDMACCNTR,
864 SPIDER_NET_DMA_TX_VALUE);
865 break;
866 }
867 if (descr == card->tx_chain.head)
868 break;
869 descr = descr->next;
870 }
871
872 out:
873 mod_timer(&card->tx_timer, jiffies + SPIDER_NET_TX_TIMER);
874 }
875
876 /**
877 * spider_net_xmit - transmits a frame over the device
878 * @skb: packet to send out
879 * @netdev: interface device structure
880 *
881 * returns NETDEV_TX_OK on success, NETDEV_TX_BUSY on failure
882 */
883 static netdev_tx_t
spider_net_xmit(struct sk_buff * skb,struct net_device * netdev)884 spider_net_xmit(struct sk_buff *skb, struct net_device *netdev)
885 {
886 int cnt;
887 struct spider_net_card *card = netdev_priv(netdev);
888
889 spider_net_release_tx_chain(card, 0);
890
891 if (spider_net_prepare_tx_descr(card, skb) != 0) {
892 netdev->stats.tx_dropped++;
893 netif_stop_queue(netdev);
894 return NETDEV_TX_BUSY;
895 }
896
897 cnt = spider_net_set_low_watermark(card);
898 if (cnt < 5)
899 spider_net_kick_tx_dma(card);
900 return NETDEV_TX_OK;
901 }
902
903 /**
904 * spider_net_cleanup_tx_ring - cleans up the TX ring
905 * @t: timer context used to obtain the pointer to net card data structure
906 *
907 * spider_net_cleanup_tx_ring is called by either the tx_timer
908 * or from the NAPI polling routine.
909 * This routine releases resources associted with transmitted
910 * packets, including updating the queue tail pointer.
911 */
912 static void
spider_net_cleanup_tx_ring(struct timer_list * t)913 spider_net_cleanup_tx_ring(struct timer_list *t)
914 {
915 struct spider_net_card *card = from_timer(card, t, tx_timer);
916 if ((spider_net_release_tx_chain(card, 0) != 0) &&
917 (card->netdev->flags & IFF_UP)) {
918 spider_net_kick_tx_dma(card);
919 netif_wake_queue(card->netdev);
920 }
921 }
922
923 /**
924 * spider_net_do_ioctl - called for device ioctls
925 * @netdev: interface device structure
926 * @ifr: request parameter structure for ioctl
927 * @cmd: command code for ioctl
928 *
929 * returns 0 on success, <0 on failure. Currently, we have no special ioctls.
930 * -EOPNOTSUPP is returned, if an unknown ioctl was requested
931 */
932 static int
spider_net_do_ioctl(struct net_device * netdev,struct ifreq * ifr,int cmd)933 spider_net_do_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
934 {
935 switch (cmd) {
936 default:
937 return -EOPNOTSUPP;
938 }
939 }
940
941 /**
942 * spider_net_pass_skb_up - takes an skb from a descriptor and passes it on
943 * @descr: descriptor to process
944 * @card: card structure
945 *
946 * Fills out skb structure and passes the data to the stack.
947 * The descriptor state is not changed.
948 */
949 static void
spider_net_pass_skb_up(struct spider_net_descr * descr,struct spider_net_card * card)950 spider_net_pass_skb_up(struct spider_net_descr *descr,
951 struct spider_net_card *card)
952 {
953 struct spider_net_hw_descr *hwdescr = descr->hwdescr;
954 struct sk_buff *skb = descr->skb;
955 struct net_device *netdev = card->netdev;
956 u32 data_status = hwdescr->data_status;
957 u32 data_error = hwdescr->data_error;
958
959 skb_put(skb, hwdescr->valid_size);
960
961 /* the card seems to add 2 bytes of junk in front
962 * of the ethernet frame
963 */
964 #define SPIDER_MISALIGN 2
965 skb_pull(skb, SPIDER_MISALIGN);
966 skb->protocol = eth_type_trans(skb, netdev);
967
968 /* checksum offload */
969 skb_checksum_none_assert(skb);
970 if (netdev->features & NETIF_F_RXCSUM) {
971 if ( ( (data_status & SPIDER_NET_DATA_STATUS_CKSUM_MASK) ==
972 SPIDER_NET_DATA_STATUS_CKSUM_MASK) &&
973 !(data_error & SPIDER_NET_DATA_ERR_CKSUM_MASK))
974 skb->ip_summed = CHECKSUM_UNNECESSARY;
975 }
976
977 if (data_status & SPIDER_NET_VLAN_PACKET) {
978 /* further enhancements: HW-accel VLAN */
979 }
980
981 /* update netdevice statistics */
982 netdev->stats.rx_packets++;
983 netdev->stats.rx_bytes += skb->len;
984
985 /* pass skb up to stack */
986 netif_receive_skb(skb);
987 }
988
show_rx_chain(struct spider_net_card * card)989 static void show_rx_chain(struct spider_net_card *card)
990 {
991 struct spider_net_descr_chain *chain = &card->rx_chain;
992 struct spider_net_descr *start= chain->tail;
993 struct spider_net_descr *descr= start;
994 struct spider_net_hw_descr *hwd = start->hwdescr;
995 struct device *dev = &card->netdev->dev;
996 u32 curr_desc, next_desc;
997 int status;
998
999 int tot = 0;
1000 int cnt = 0;
1001 int off = start - chain->ring;
1002 int cstat = hwd->dmac_cmd_status;
1003
1004 dev_info(dev, "Total number of descrs=%d\n",
1005 chain->num_desc);
1006 dev_info(dev, "Chain tail located at descr=%d, status=0x%x\n",
1007 off, cstat);
1008
1009 curr_desc = spider_net_read_reg(card, SPIDER_NET_GDACTDPA);
1010 next_desc = spider_net_read_reg(card, SPIDER_NET_GDACNEXTDA);
1011
1012 status = cstat;
1013 do
1014 {
1015 hwd = descr->hwdescr;
1016 off = descr - chain->ring;
1017 status = hwd->dmac_cmd_status;
1018
1019 if (descr == chain->head)
1020 dev_info(dev, "Chain head is at %d, head status=0x%x\n",
1021 off, status);
1022
1023 if (curr_desc == descr->bus_addr)
1024 dev_info(dev, "HW curr desc (GDACTDPA) is at %d, status=0x%x\n",
1025 off, status);
1026
1027 if (next_desc == descr->bus_addr)
1028 dev_info(dev, "HW next desc (GDACNEXTDA) is at %d, status=0x%x\n",
1029 off, status);
1030
1031 if (hwd->next_descr_addr == 0)
1032 dev_info(dev, "chain is cut at %d\n", off);
1033
1034 if (cstat != status) {
1035 int from = (chain->num_desc + off - cnt) % chain->num_desc;
1036 int to = (chain->num_desc + off - 1) % chain->num_desc;
1037 dev_info(dev, "Have %d (from %d to %d) descrs "
1038 "with stat=0x%08x\n", cnt, from, to, cstat);
1039 cstat = status;
1040 cnt = 0;
1041 }
1042
1043 cnt ++;
1044 tot ++;
1045 descr = descr->next;
1046 } while (descr != start);
1047
1048 dev_info(dev, "Last %d descrs with stat=0x%08x "
1049 "for a total of %d descrs\n", cnt, cstat, tot);
1050
1051 #ifdef DEBUG
1052 /* Now dump the whole ring */
1053 descr = start;
1054 do
1055 {
1056 struct spider_net_hw_descr *hwd = descr->hwdescr;
1057 status = spider_net_get_descr_status(hwd);
1058 cnt = descr - chain->ring;
1059 dev_info(dev, "Descr %d stat=0x%08x skb=%p\n",
1060 cnt, status, descr->skb);
1061 dev_info(dev, "bus addr=%08x buf addr=%08x sz=%d\n",
1062 descr->bus_addr, hwd->buf_addr, hwd->buf_size);
1063 dev_info(dev, "next=%08x result sz=%d valid sz=%d\n",
1064 hwd->next_descr_addr, hwd->result_size,
1065 hwd->valid_size);
1066 dev_info(dev, "dmac=%08x data stat=%08x data err=%08x\n",
1067 hwd->dmac_cmd_status, hwd->data_status,
1068 hwd->data_error);
1069 dev_info(dev, "\n");
1070
1071 descr = descr->next;
1072 } while (descr != start);
1073 #endif
1074
1075 }
1076
1077 /**
1078 * spider_net_resync_head_ptr - Advance head ptr past empty descrs
1079 * @card: card structure
1080 *
1081 * If the driver fails to keep up and empty the queue, then the
1082 * hardware wil run out of room to put incoming packets. This
1083 * will cause the hardware to skip descrs that are full (instead
1084 * of halting/retrying). Thus, once the driver runs, it wil need
1085 * to "catch up" to where the hardware chain pointer is at.
1086 */
spider_net_resync_head_ptr(struct spider_net_card * card)1087 static void spider_net_resync_head_ptr(struct spider_net_card *card)
1088 {
1089 unsigned long flags;
1090 struct spider_net_descr_chain *chain = &card->rx_chain;
1091 struct spider_net_descr *descr;
1092 int i, status;
1093
1094 /* Advance head pointer past any empty descrs */
1095 descr = chain->head;
1096 status = spider_net_get_descr_status(descr->hwdescr);
1097
1098 if (status == SPIDER_NET_DESCR_NOT_IN_USE)
1099 return;
1100
1101 spin_lock_irqsave(&chain->lock, flags);
1102
1103 descr = chain->head;
1104 status = spider_net_get_descr_status(descr->hwdescr);
1105 for (i=0; i<chain->num_desc; i++) {
1106 if (status != SPIDER_NET_DESCR_CARDOWNED) break;
1107 descr = descr->next;
1108 status = spider_net_get_descr_status(descr->hwdescr);
1109 }
1110 chain->head = descr;
1111
1112 spin_unlock_irqrestore(&chain->lock, flags);
1113 }
1114
spider_net_resync_tail_ptr(struct spider_net_card * card)1115 static int spider_net_resync_tail_ptr(struct spider_net_card *card)
1116 {
1117 struct spider_net_descr_chain *chain = &card->rx_chain;
1118 struct spider_net_descr *descr;
1119 int i, status;
1120
1121 /* Advance tail pointer past any empty and reaped descrs */
1122 descr = chain->tail;
1123 status = spider_net_get_descr_status(descr->hwdescr);
1124
1125 for (i=0; i<chain->num_desc; i++) {
1126 if ((status != SPIDER_NET_DESCR_CARDOWNED) &&
1127 (status != SPIDER_NET_DESCR_NOT_IN_USE)) break;
1128 descr = descr->next;
1129 status = spider_net_get_descr_status(descr->hwdescr);
1130 }
1131 chain->tail = descr;
1132
1133 if ((i == chain->num_desc) || (i == 0))
1134 return 1;
1135 return 0;
1136 }
1137
1138 /**
1139 * spider_net_decode_one_descr - processes an RX descriptor
1140 * @card: card structure
1141 *
1142 * Returns 1 if a packet has been sent to the stack, otherwise 0.
1143 *
1144 * Processes an RX descriptor by iommu-unmapping the data buffer
1145 * and passing the packet up to the stack. This function is called
1146 * in softirq context, e.g. either bottom half from interrupt or
1147 * NAPI polling context.
1148 */
1149 static int
spider_net_decode_one_descr(struct spider_net_card * card)1150 spider_net_decode_one_descr(struct spider_net_card *card)
1151 {
1152 struct net_device *dev = card->netdev;
1153 struct spider_net_descr_chain *chain = &card->rx_chain;
1154 struct spider_net_descr *descr = chain->tail;
1155 struct spider_net_hw_descr *hwdescr = descr->hwdescr;
1156 u32 hw_buf_addr;
1157 int status;
1158
1159 status = spider_net_get_descr_status(hwdescr);
1160
1161 /* Nothing in the descriptor, or ring must be empty */
1162 if ((status == SPIDER_NET_DESCR_CARDOWNED) ||
1163 (status == SPIDER_NET_DESCR_NOT_IN_USE))
1164 return 0;
1165
1166 /* descriptor definitively used -- move on tail */
1167 chain->tail = descr->next;
1168
1169 /* unmap descriptor */
1170 hw_buf_addr = hwdescr->buf_addr;
1171 hwdescr->buf_addr = 0xffffffff;
1172 dma_unmap_single(&card->pdev->dev, hw_buf_addr, SPIDER_NET_MAX_FRAME,
1173 DMA_FROM_DEVICE);
1174
1175 if ( (status == SPIDER_NET_DESCR_RESPONSE_ERROR) ||
1176 (status == SPIDER_NET_DESCR_PROTECTION_ERROR) ||
1177 (status == SPIDER_NET_DESCR_FORCE_END) ) {
1178 if (netif_msg_rx_err(card))
1179 dev_err(&dev->dev,
1180 "dropping RX descriptor with state %d\n", status);
1181 dev->stats.rx_dropped++;
1182 goto bad_desc;
1183 }
1184
1185 if ( (status != SPIDER_NET_DESCR_COMPLETE) &&
1186 (status != SPIDER_NET_DESCR_FRAME_END) ) {
1187 if (netif_msg_rx_err(card))
1188 dev_err(&card->netdev->dev,
1189 "RX descriptor with unknown state %d\n", status);
1190 card->spider_stats.rx_desc_unk_state++;
1191 goto bad_desc;
1192 }
1193
1194 /* The cases we'll throw away the packet immediately */
1195 if (hwdescr->data_error & SPIDER_NET_DESTROY_RX_FLAGS) {
1196 if (netif_msg_rx_err(card))
1197 dev_err(&card->netdev->dev,
1198 "error in received descriptor found, "
1199 "data_status=x%08x, data_error=x%08x\n",
1200 hwdescr->data_status, hwdescr->data_error);
1201 goto bad_desc;
1202 }
1203
1204 if (hwdescr->dmac_cmd_status & SPIDER_NET_DESCR_BAD_STATUS) {
1205 dev_err(&card->netdev->dev, "bad status, cmd_status=x%08x\n",
1206 hwdescr->dmac_cmd_status);
1207 pr_err("buf_addr=x%08x\n", hw_buf_addr);
1208 pr_err("buf_size=x%08x\n", hwdescr->buf_size);
1209 pr_err("next_descr_addr=x%08x\n", hwdescr->next_descr_addr);
1210 pr_err("result_size=x%08x\n", hwdescr->result_size);
1211 pr_err("valid_size=x%08x\n", hwdescr->valid_size);
1212 pr_err("data_status=x%08x\n", hwdescr->data_status);
1213 pr_err("data_error=x%08x\n", hwdescr->data_error);
1214 pr_err("which=%ld\n", descr - card->rx_chain.ring);
1215
1216 card->spider_stats.rx_desc_error++;
1217 goto bad_desc;
1218 }
1219
1220 /* Ok, we've got a packet in descr */
1221 spider_net_pass_skb_up(descr, card);
1222 descr->skb = NULL;
1223 hwdescr->dmac_cmd_status = SPIDER_NET_DESCR_NOT_IN_USE;
1224 return 1;
1225
1226 bad_desc:
1227 if (netif_msg_rx_err(card))
1228 show_rx_chain(card);
1229 dev_kfree_skb_irq(descr->skb);
1230 descr->skb = NULL;
1231 hwdescr->dmac_cmd_status = SPIDER_NET_DESCR_NOT_IN_USE;
1232 return 0;
1233 }
1234
1235 /**
1236 * spider_net_poll - NAPI poll function called by the stack to return packets
1237 * @napi: napi device structure
1238 * @budget: number of packets we can pass to the stack at most
1239 *
1240 * returns 0 if no more packets available to the driver/stack. Returns 1,
1241 * if the quota is exceeded, but the driver has still packets.
1242 *
1243 * spider_net_poll returns all packets from the rx descriptors to the stack
1244 * (using netif_receive_skb). If all/enough packets are up, the driver
1245 * reenables interrupts and returns 0. If not, 1 is returned.
1246 */
spider_net_poll(struct napi_struct * napi,int budget)1247 static int spider_net_poll(struct napi_struct *napi, int budget)
1248 {
1249 struct spider_net_card *card = container_of(napi, struct spider_net_card, napi);
1250 int packets_done = 0;
1251
1252 while (packets_done < budget) {
1253 if (!spider_net_decode_one_descr(card))
1254 break;
1255
1256 packets_done++;
1257 }
1258
1259 if ((packets_done == 0) && (card->num_rx_ints != 0)) {
1260 if (!spider_net_resync_tail_ptr(card))
1261 packets_done = budget;
1262 spider_net_resync_head_ptr(card);
1263 }
1264 card->num_rx_ints = 0;
1265
1266 spider_net_refill_rx_chain(card);
1267 spider_net_enable_rxdmac(card);
1268
1269 spider_net_cleanup_tx_ring(&card->tx_timer);
1270
1271 /* if all packets are in the stack, enable interrupts and return 0 */
1272 /* if not, return 1 */
1273 if (packets_done < budget) {
1274 napi_complete_done(napi, packets_done);
1275 spider_net_rx_irq_on(card);
1276 card->ignore_rx_ramfull = 0;
1277 }
1278
1279 return packets_done;
1280 }
1281
1282 /**
1283 * spider_net_set_mac - sets the MAC of an interface
1284 * @netdev: interface device structure
1285 * @p: pointer to new MAC address
1286 *
1287 * Returns 0 on success, <0 on failure. Currently, we don't support this
1288 * and will always return EOPNOTSUPP.
1289 */
1290 static int
spider_net_set_mac(struct net_device * netdev,void * p)1291 spider_net_set_mac(struct net_device *netdev, void *p)
1292 {
1293 struct spider_net_card *card = netdev_priv(netdev);
1294 u32 macl, macu, regvalue;
1295 struct sockaddr *addr = p;
1296
1297 if (!is_valid_ether_addr(addr->sa_data))
1298 return -EADDRNOTAVAIL;
1299
1300 eth_hw_addr_set(netdev, addr->sa_data);
1301
1302 /* switch off GMACTPE and GMACRPE */
1303 regvalue = spider_net_read_reg(card, SPIDER_NET_GMACOPEMD);
1304 regvalue &= ~((1 << 5) | (1 << 6));
1305 spider_net_write_reg(card, SPIDER_NET_GMACOPEMD, regvalue);
1306
1307 /* write mac */
1308 macu = (netdev->dev_addr[0]<<24) + (netdev->dev_addr[1]<<16) +
1309 (netdev->dev_addr[2]<<8) + (netdev->dev_addr[3]);
1310 macl = (netdev->dev_addr[4]<<8) + (netdev->dev_addr[5]);
1311 spider_net_write_reg(card, SPIDER_NET_GMACUNIMACU, macu);
1312 spider_net_write_reg(card, SPIDER_NET_GMACUNIMACL, macl);
1313
1314 /* switch GMACTPE and GMACRPE back on */
1315 regvalue = spider_net_read_reg(card, SPIDER_NET_GMACOPEMD);
1316 regvalue |= ((1 << 5) | (1 << 6));
1317 spider_net_write_reg(card, SPIDER_NET_GMACOPEMD, regvalue);
1318
1319 spider_net_set_promisc(card);
1320
1321 return 0;
1322 }
1323
1324 /**
1325 * spider_net_link_reset
1326 * @netdev: net device structure
1327 *
1328 * This is called when the PHY_LINK signal is asserted. For the blade this is
1329 * not connected so we should never get here.
1330 *
1331 */
1332 static void
spider_net_link_reset(struct net_device * netdev)1333 spider_net_link_reset(struct net_device *netdev)
1334 {
1335
1336 struct spider_net_card *card = netdev_priv(netdev);
1337
1338 del_timer_sync(&card->aneg_timer);
1339
1340 /* clear interrupt, block further interrupts */
1341 spider_net_write_reg(card, SPIDER_NET_GMACST,
1342 spider_net_read_reg(card, SPIDER_NET_GMACST));
1343 spider_net_write_reg(card, SPIDER_NET_GMACINTEN, 0);
1344
1345 /* reset phy and setup aneg */
1346 card->aneg_count = 0;
1347 card->medium = BCM54XX_COPPER;
1348 spider_net_setup_aneg(card);
1349 mod_timer(&card->aneg_timer, jiffies + SPIDER_NET_ANEG_TIMER);
1350
1351 }
1352
1353 /**
1354 * spider_net_handle_error_irq - handles errors raised by an interrupt
1355 * @card: card structure
1356 * @status_reg: interrupt status register 0 (GHIINT0STS)
1357 * @error_reg1: interrupt status register 1 (GHIINT1STS)
1358 * @error_reg2: interrupt status register 2 (GHIINT2STS)
1359 *
1360 * spider_net_handle_error_irq treats or ignores all error conditions
1361 * found when an interrupt is presented
1362 */
1363 static void
spider_net_handle_error_irq(struct spider_net_card * card,u32 status_reg,u32 error_reg1,u32 error_reg2)1364 spider_net_handle_error_irq(struct spider_net_card *card, u32 status_reg,
1365 u32 error_reg1, u32 error_reg2)
1366 {
1367 u32 i;
1368 int show_error = 1;
1369
1370 /* check GHIINT0STS ************************************/
1371 if (status_reg)
1372 for (i = 0; i < 32; i++)
1373 if (status_reg & (1<<i))
1374 switch (i)
1375 {
1376 /* let error_reg1 and error_reg2 evaluation decide, what to do
1377 case SPIDER_NET_PHYINT:
1378 case SPIDER_NET_GMAC2INT:
1379 case SPIDER_NET_GMAC1INT:
1380 case SPIDER_NET_GFIFOINT:
1381 case SPIDER_NET_DMACINT:
1382 case SPIDER_NET_GSYSINT:
1383 break; */
1384
1385 case SPIDER_NET_GIPSINT:
1386 show_error = 0;
1387 break;
1388
1389 case SPIDER_NET_GPWOPCMPINT:
1390 /* PHY write operation completed */
1391 show_error = 0;
1392 break;
1393 case SPIDER_NET_GPROPCMPINT:
1394 /* PHY read operation completed */
1395 /* we don't use semaphores, as we poll for the completion
1396 * of the read operation in spider_net_read_phy. Should take
1397 * about 50 us
1398 */
1399 show_error = 0;
1400 break;
1401 case SPIDER_NET_GPWFFINT:
1402 /* PHY command queue full */
1403 if (netif_msg_intr(card))
1404 dev_err(&card->netdev->dev, "PHY write queue full\n");
1405 show_error = 0;
1406 break;
1407
1408 /* case SPIDER_NET_GRMDADRINT: not used. print a message */
1409 /* case SPIDER_NET_GRMARPINT: not used. print a message */
1410 /* case SPIDER_NET_GRMMPINT: not used. print a message */
1411
1412 case SPIDER_NET_GDTDEN0INT:
1413 /* someone has set TX_DMA_EN to 0 */
1414 show_error = 0;
1415 break;
1416
1417 case SPIDER_NET_GDDDEN0INT:
1418 case SPIDER_NET_GDCDEN0INT:
1419 case SPIDER_NET_GDBDEN0INT:
1420 case SPIDER_NET_GDADEN0INT:
1421 /* someone has set RX_DMA_EN to 0 */
1422 show_error = 0;
1423 break;
1424
1425 /* RX interrupts */
1426 case SPIDER_NET_GDDFDCINT:
1427 case SPIDER_NET_GDCFDCINT:
1428 case SPIDER_NET_GDBFDCINT:
1429 case SPIDER_NET_GDAFDCINT:
1430 /* case SPIDER_NET_GDNMINT: not used. print a message */
1431 /* case SPIDER_NET_GCNMINT: not used. print a message */
1432 /* case SPIDER_NET_GBNMINT: not used. print a message */
1433 /* case SPIDER_NET_GANMINT: not used. print a message */
1434 /* case SPIDER_NET_GRFNMINT: not used. print a message */
1435 show_error = 0;
1436 break;
1437
1438 /* TX interrupts */
1439 case SPIDER_NET_GDTFDCINT:
1440 show_error = 0;
1441 break;
1442 case SPIDER_NET_GTTEDINT:
1443 show_error = 0;
1444 break;
1445 case SPIDER_NET_GDTDCEINT:
1446 /* chain end. If a descriptor should be sent, kick off
1447 * tx dma
1448 if (card->tx_chain.tail != card->tx_chain.head)
1449 spider_net_kick_tx_dma(card);
1450 */
1451 show_error = 0;
1452 break;
1453
1454 /* case SPIDER_NET_G1TMCNTINT: not used. print a message */
1455 /* case SPIDER_NET_GFREECNTINT: not used. print a message */
1456 }
1457
1458 /* check GHIINT1STS ************************************/
1459 if (error_reg1)
1460 for (i = 0; i < 32; i++)
1461 if (error_reg1 & (1<<i))
1462 switch (i)
1463 {
1464 case SPIDER_NET_GTMFLLINT:
1465 /* TX RAM full may happen on a usual case.
1466 * Logging is not needed.
1467 */
1468 show_error = 0;
1469 break;
1470 case SPIDER_NET_GRFDFLLINT:
1471 case SPIDER_NET_GRFCFLLINT:
1472 case SPIDER_NET_GRFBFLLINT:
1473 case SPIDER_NET_GRFAFLLINT:
1474 case SPIDER_NET_GRMFLLINT:
1475 /* Could happen when rx chain is full */
1476 if (card->ignore_rx_ramfull == 0) {
1477 card->ignore_rx_ramfull = 1;
1478 spider_net_resync_head_ptr(card);
1479 spider_net_refill_rx_chain(card);
1480 spider_net_enable_rxdmac(card);
1481 card->num_rx_ints ++;
1482 napi_schedule(&card->napi);
1483 }
1484 show_error = 0;
1485 break;
1486
1487 /* case SPIDER_NET_GTMSHTINT: problem, print a message */
1488 case SPIDER_NET_GDTINVDINT:
1489 /* allrighty. tx from previous descr ok */
1490 show_error = 0;
1491 break;
1492
1493 /* chain end */
1494 case SPIDER_NET_GDDDCEINT:
1495 case SPIDER_NET_GDCDCEINT:
1496 case SPIDER_NET_GDBDCEINT:
1497 case SPIDER_NET_GDADCEINT:
1498 spider_net_resync_head_ptr(card);
1499 spider_net_refill_rx_chain(card);
1500 spider_net_enable_rxdmac(card);
1501 card->num_rx_ints ++;
1502 napi_schedule(&card->napi);
1503 show_error = 0;
1504 break;
1505
1506 /* invalid descriptor */
1507 case SPIDER_NET_GDDINVDINT:
1508 case SPIDER_NET_GDCINVDINT:
1509 case SPIDER_NET_GDBINVDINT:
1510 case SPIDER_NET_GDAINVDINT:
1511 /* Could happen when rx chain is full */
1512 spider_net_resync_head_ptr(card);
1513 spider_net_refill_rx_chain(card);
1514 spider_net_enable_rxdmac(card);
1515 card->num_rx_ints ++;
1516 napi_schedule(&card->napi);
1517 show_error = 0;
1518 break;
1519
1520 /* case SPIDER_NET_GDTRSERINT: problem, print a message */
1521 /* case SPIDER_NET_GDDRSERINT: problem, print a message */
1522 /* case SPIDER_NET_GDCRSERINT: problem, print a message */
1523 /* case SPIDER_NET_GDBRSERINT: problem, print a message */
1524 /* case SPIDER_NET_GDARSERINT: problem, print a message */
1525 /* case SPIDER_NET_GDSERINT: problem, print a message */
1526 /* case SPIDER_NET_GDTPTERINT: problem, print a message */
1527 /* case SPIDER_NET_GDDPTERINT: problem, print a message */
1528 /* case SPIDER_NET_GDCPTERINT: problem, print a message */
1529 /* case SPIDER_NET_GDBPTERINT: problem, print a message */
1530 /* case SPIDER_NET_GDAPTERINT: problem, print a message */
1531 default:
1532 show_error = 1;
1533 break;
1534 }
1535
1536 /* check GHIINT2STS ************************************/
1537 if (error_reg2)
1538 for (i = 0; i < 32; i++)
1539 if (error_reg2 & (1<<i))
1540 switch (i)
1541 {
1542 /* there is nothing we can (want to) do at this time. Log a
1543 * message, we can switch on and off the specific values later on
1544 case SPIDER_NET_GPROPERINT:
1545 case SPIDER_NET_GMCTCRSNGINT:
1546 case SPIDER_NET_GMCTLCOLINT:
1547 case SPIDER_NET_GMCTTMOTINT:
1548 case SPIDER_NET_GMCRCAERINT:
1549 case SPIDER_NET_GMCRCALERINT:
1550 case SPIDER_NET_GMCRALNERINT:
1551 case SPIDER_NET_GMCROVRINT:
1552 case SPIDER_NET_GMCRRNTINT:
1553 case SPIDER_NET_GMCRRXERINT:
1554 case SPIDER_NET_GTITCSERINT:
1555 case SPIDER_NET_GTIFMTERINT:
1556 case SPIDER_NET_GTIPKTRVKINT:
1557 case SPIDER_NET_GTISPINGINT:
1558 case SPIDER_NET_GTISADNGINT:
1559 case SPIDER_NET_GTISPDNGINT:
1560 case SPIDER_NET_GRIFMTERINT:
1561 case SPIDER_NET_GRIPKTRVKINT:
1562 case SPIDER_NET_GRISPINGINT:
1563 case SPIDER_NET_GRISADNGINT:
1564 case SPIDER_NET_GRISPDNGINT:
1565 break;
1566 */
1567 default:
1568 break;
1569 }
1570
1571 if ((show_error) && (netif_msg_intr(card)) && net_ratelimit())
1572 dev_err(&card->netdev->dev, "Error interrupt, GHIINT0STS = 0x%08x, "
1573 "GHIINT1STS = 0x%08x, GHIINT2STS = 0x%08x\n",
1574 status_reg, error_reg1, error_reg2);
1575
1576 /* clear interrupt sources */
1577 spider_net_write_reg(card, SPIDER_NET_GHIINT1STS, error_reg1);
1578 spider_net_write_reg(card, SPIDER_NET_GHIINT2STS, error_reg2);
1579 }
1580
1581 /**
1582 * spider_net_interrupt - interrupt handler for spider_net
1583 * @irq: interrupt number
1584 * @ptr: pointer to net_device
1585 *
1586 * returns IRQ_HANDLED, if interrupt was for driver, or IRQ_NONE, if no
1587 * interrupt found raised by card.
1588 *
1589 * This is the interrupt handler, that turns off
1590 * interrupts for this device and makes the stack poll the driver
1591 */
1592 static irqreturn_t
spider_net_interrupt(int irq,void * ptr)1593 spider_net_interrupt(int irq, void *ptr)
1594 {
1595 struct net_device *netdev = ptr;
1596 struct spider_net_card *card = netdev_priv(netdev);
1597 u32 status_reg, error_reg1, error_reg2;
1598
1599 status_reg = spider_net_read_reg(card, SPIDER_NET_GHIINT0STS);
1600 error_reg1 = spider_net_read_reg(card, SPIDER_NET_GHIINT1STS);
1601 error_reg2 = spider_net_read_reg(card, SPIDER_NET_GHIINT2STS);
1602
1603 if (!(status_reg & SPIDER_NET_INT0_MASK_VALUE) &&
1604 !(error_reg1 & SPIDER_NET_INT1_MASK_VALUE) &&
1605 !(error_reg2 & SPIDER_NET_INT2_MASK_VALUE))
1606 return IRQ_NONE;
1607
1608 if (status_reg & SPIDER_NET_RXINT ) {
1609 spider_net_rx_irq_off(card);
1610 napi_schedule(&card->napi);
1611 card->num_rx_ints ++;
1612 }
1613 if (status_reg & SPIDER_NET_TXINT)
1614 napi_schedule(&card->napi);
1615
1616 if (status_reg & SPIDER_NET_LINKINT)
1617 spider_net_link_reset(netdev);
1618
1619 if (status_reg & SPIDER_NET_ERRINT )
1620 spider_net_handle_error_irq(card, status_reg,
1621 error_reg1, error_reg2);
1622
1623 /* clear interrupt sources */
1624 spider_net_write_reg(card, SPIDER_NET_GHIINT0STS, status_reg);
1625
1626 return IRQ_HANDLED;
1627 }
1628
1629 #ifdef CONFIG_NET_POLL_CONTROLLER
1630 /**
1631 * spider_net_poll_controller - artificial interrupt for netconsole etc.
1632 * @netdev: interface device structure
1633 *
1634 * see Documentation/networking/netconsole.rst
1635 */
1636 static void
spider_net_poll_controller(struct net_device * netdev)1637 spider_net_poll_controller(struct net_device *netdev)
1638 {
1639 disable_irq(netdev->irq);
1640 spider_net_interrupt(netdev->irq, netdev);
1641 enable_irq(netdev->irq);
1642 }
1643 #endif /* CONFIG_NET_POLL_CONTROLLER */
1644
1645 /**
1646 * spider_net_enable_interrupts - enable interrupts
1647 * @card: card structure
1648 *
1649 * spider_net_enable_interrupt enables several interrupts
1650 */
1651 static void
spider_net_enable_interrupts(struct spider_net_card * card)1652 spider_net_enable_interrupts(struct spider_net_card *card)
1653 {
1654 spider_net_write_reg(card, SPIDER_NET_GHIINT0MSK,
1655 SPIDER_NET_INT0_MASK_VALUE);
1656 spider_net_write_reg(card, SPIDER_NET_GHIINT1MSK,
1657 SPIDER_NET_INT1_MASK_VALUE);
1658 spider_net_write_reg(card, SPIDER_NET_GHIINT2MSK,
1659 SPIDER_NET_INT2_MASK_VALUE);
1660 }
1661
1662 /**
1663 * spider_net_disable_interrupts - disable interrupts
1664 * @card: card structure
1665 *
1666 * spider_net_disable_interrupts disables all the interrupts
1667 */
1668 static void
spider_net_disable_interrupts(struct spider_net_card * card)1669 spider_net_disable_interrupts(struct spider_net_card *card)
1670 {
1671 spider_net_write_reg(card, SPIDER_NET_GHIINT0MSK, 0);
1672 spider_net_write_reg(card, SPIDER_NET_GHIINT1MSK, 0);
1673 spider_net_write_reg(card, SPIDER_NET_GHIINT2MSK, 0);
1674 spider_net_write_reg(card, SPIDER_NET_GMACINTEN, 0);
1675 }
1676
1677 /**
1678 * spider_net_init_card - initializes the card
1679 * @card: card structure
1680 *
1681 * spider_net_init_card initializes the card so that other registers can
1682 * be used
1683 */
1684 static void
spider_net_init_card(struct spider_net_card * card)1685 spider_net_init_card(struct spider_net_card *card)
1686 {
1687 spider_net_write_reg(card, SPIDER_NET_CKRCTRL,
1688 SPIDER_NET_CKRCTRL_STOP_VALUE);
1689
1690 spider_net_write_reg(card, SPIDER_NET_CKRCTRL,
1691 SPIDER_NET_CKRCTRL_RUN_VALUE);
1692
1693 /* trigger ETOMOD signal */
1694 spider_net_write_reg(card, SPIDER_NET_GMACOPEMD,
1695 spider_net_read_reg(card, SPIDER_NET_GMACOPEMD) | 0x4);
1696
1697 spider_net_disable_interrupts(card);
1698 }
1699
1700 /**
1701 * spider_net_enable_card - enables the card by setting all kinds of regs
1702 * @card: card structure
1703 *
1704 * spider_net_enable_card sets a lot of SMMIO registers to enable the device
1705 */
1706 static void
spider_net_enable_card(struct spider_net_card * card)1707 spider_net_enable_card(struct spider_net_card *card)
1708 {
1709 int i;
1710 /* the following array consists of (register),(value) pairs
1711 * that are set in this function. A register of 0 ends the list
1712 */
1713 u32 regs[][2] = {
1714 { SPIDER_NET_GRESUMINTNUM, 0 },
1715 { SPIDER_NET_GREINTNUM, 0 },
1716
1717 /* set interrupt frame number registers */
1718 /* clear the single DMA engine registers first */
1719 { SPIDER_NET_GFAFRMNUM, SPIDER_NET_GFXFRAMES_VALUE },
1720 { SPIDER_NET_GFBFRMNUM, SPIDER_NET_GFXFRAMES_VALUE },
1721 { SPIDER_NET_GFCFRMNUM, SPIDER_NET_GFXFRAMES_VALUE },
1722 { SPIDER_NET_GFDFRMNUM, SPIDER_NET_GFXFRAMES_VALUE },
1723 /* then set, what we really need */
1724 { SPIDER_NET_GFFRMNUM, SPIDER_NET_FRAMENUM_VALUE },
1725
1726 /* timer counter registers and stuff */
1727 { SPIDER_NET_GFREECNNUM, 0 },
1728 { SPIDER_NET_GONETIMENUM, 0 },
1729 { SPIDER_NET_GTOUTFRMNUM, 0 },
1730
1731 /* RX mode setting */
1732 { SPIDER_NET_GRXMDSET, SPIDER_NET_RXMODE_VALUE },
1733 /* TX mode setting */
1734 { SPIDER_NET_GTXMDSET, SPIDER_NET_TXMODE_VALUE },
1735 /* IPSEC mode setting */
1736 { SPIDER_NET_GIPSECINIT, SPIDER_NET_IPSECINIT_VALUE },
1737
1738 { SPIDER_NET_GFTRESTRT, SPIDER_NET_RESTART_VALUE },
1739
1740 { SPIDER_NET_GMRWOLCTRL, 0 },
1741 { SPIDER_NET_GTESTMD, 0x10000000 },
1742 { SPIDER_NET_GTTQMSK, 0x00400040 },
1743
1744 { SPIDER_NET_GMACINTEN, 0 },
1745
1746 /* flow control stuff */
1747 { SPIDER_NET_GMACAPAUSE, SPIDER_NET_MACAPAUSE_VALUE },
1748 { SPIDER_NET_GMACTXPAUSE, SPIDER_NET_TXPAUSE_VALUE },
1749
1750 { SPIDER_NET_GMACBSTLMT, SPIDER_NET_BURSTLMT_VALUE },
1751 { 0, 0}
1752 };
1753
1754 i = 0;
1755 while (regs[i][0]) {
1756 spider_net_write_reg(card, regs[i][0], regs[i][1]);
1757 i++;
1758 }
1759
1760 /* clear unicast filter table entries 1 to 14 */
1761 for (i = 1; i <= 14; i++) {
1762 spider_net_write_reg(card,
1763 SPIDER_NET_GMRUAFILnR + i * 8,
1764 0x00080000);
1765 spider_net_write_reg(card,
1766 SPIDER_NET_GMRUAFILnR + i * 8 + 4,
1767 0x00000000);
1768 }
1769
1770 spider_net_write_reg(card, SPIDER_NET_GMRUA0FIL15R, 0x08080000);
1771
1772 spider_net_write_reg(card, SPIDER_NET_ECMODE, SPIDER_NET_ECMODE_VALUE);
1773
1774 /* set chain tail address for RX chains and
1775 * enable DMA
1776 */
1777 spider_net_enable_rxchtails(card);
1778 spider_net_enable_rxdmac(card);
1779
1780 spider_net_write_reg(card, SPIDER_NET_GRXDMAEN, SPIDER_NET_WOL_VALUE);
1781
1782 spider_net_write_reg(card, SPIDER_NET_GMACLENLMT,
1783 SPIDER_NET_LENLMT_VALUE);
1784 spider_net_write_reg(card, SPIDER_NET_GMACOPEMD,
1785 SPIDER_NET_OPMODE_VALUE);
1786
1787 spider_net_write_reg(card, SPIDER_NET_GDTDMACCNTR,
1788 SPIDER_NET_GDTBSTA);
1789 }
1790
1791 /**
1792 * spider_net_download_firmware - loads firmware into the adapter
1793 * @card: card structure
1794 * @firmware_ptr: pointer to firmware data
1795 *
1796 * spider_net_download_firmware loads the firmware data into the
1797 * adapter. It assumes the length etc. to be allright.
1798 */
1799 static int
spider_net_download_firmware(struct spider_net_card * card,const void * firmware_ptr)1800 spider_net_download_firmware(struct spider_net_card *card,
1801 const void *firmware_ptr)
1802 {
1803 int sequencer, i;
1804 const u32 *fw_ptr = firmware_ptr;
1805
1806 /* stop sequencers */
1807 spider_net_write_reg(card, SPIDER_NET_GSINIT,
1808 SPIDER_NET_STOP_SEQ_VALUE);
1809
1810 for (sequencer = 0; sequencer < SPIDER_NET_FIRMWARE_SEQS;
1811 sequencer++) {
1812 spider_net_write_reg(card,
1813 SPIDER_NET_GSnPRGADR + sequencer * 8, 0);
1814 for (i = 0; i < SPIDER_NET_FIRMWARE_SEQWORDS; i++) {
1815 spider_net_write_reg(card, SPIDER_NET_GSnPRGDAT +
1816 sequencer * 8, *fw_ptr);
1817 fw_ptr++;
1818 }
1819 }
1820
1821 if (spider_net_read_reg(card, SPIDER_NET_GSINIT))
1822 return -EIO;
1823
1824 spider_net_write_reg(card, SPIDER_NET_GSINIT,
1825 SPIDER_NET_RUN_SEQ_VALUE);
1826
1827 return 0;
1828 }
1829
1830 /**
1831 * spider_net_init_firmware - reads in firmware parts
1832 * @card: card structure
1833 *
1834 * Returns 0 on success, <0 on failure
1835 *
1836 * spider_net_init_firmware opens the sequencer firmware and does some basic
1837 * checks. This function opens and releases the firmware structure. A call
1838 * to download the firmware is performed before the release.
1839 *
1840 * Firmware format
1841 * ===============
1842 * spider_fw.bin is expected to be a file containing 6*1024*4 bytes, 4k being
1843 * the program for each sequencer. Use the command
1844 * tail -q -n +2 Seq_code1_0x088.txt Seq_code2_0x090.txt \
1845 * Seq_code3_0x098.txt Seq_code4_0x0A0.txt Seq_code5_0x0A8.txt \
1846 * Seq_code6_0x0B0.txt | xxd -r -p -c4 > spider_fw.bin
1847 *
1848 * to generate spider_fw.bin, if you have sequencer programs with something
1849 * like the following contents for each sequencer:
1850 * <ONE LINE COMMENT>
1851 * <FIRST 4-BYTES-WORD FOR SEQUENCER>
1852 * <SECOND 4-BYTES-WORD FOR SEQUENCER>
1853 * ...
1854 * <1024th 4-BYTES-WORD FOR SEQUENCER>
1855 */
1856 static int
spider_net_init_firmware(struct spider_net_card * card)1857 spider_net_init_firmware(struct spider_net_card *card)
1858 {
1859 struct firmware *firmware = NULL;
1860 struct device_node *dn;
1861 const u8 *fw_prop = NULL;
1862 int err = -ENOENT;
1863 int fw_size;
1864
1865 if (request_firmware((const struct firmware **)&firmware,
1866 SPIDER_NET_FIRMWARE_NAME, &card->pdev->dev) == 0) {
1867 if ( (firmware->size != SPIDER_NET_FIRMWARE_LEN) &&
1868 netif_msg_probe(card) ) {
1869 dev_err(&card->netdev->dev,
1870 "Incorrect size of spidernet firmware in " \
1871 "filesystem. Looking in host firmware...\n");
1872 goto try_host_fw;
1873 }
1874 err = spider_net_download_firmware(card, firmware->data);
1875
1876 release_firmware(firmware);
1877 if (err)
1878 goto try_host_fw;
1879
1880 goto done;
1881 }
1882
1883 try_host_fw:
1884 dn = pci_device_to_OF_node(card->pdev);
1885 if (!dn)
1886 goto out_err;
1887
1888 fw_prop = of_get_property(dn, "firmware", &fw_size);
1889 if (!fw_prop)
1890 goto out_err;
1891
1892 if ( (fw_size != SPIDER_NET_FIRMWARE_LEN) &&
1893 netif_msg_probe(card) ) {
1894 dev_err(&card->netdev->dev,
1895 "Incorrect size of spidernet firmware in host firmware\n");
1896 goto done;
1897 }
1898
1899 err = spider_net_download_firmware(card, fw_prop);
1900
1901 done:
1902 return err;
1903 out_err:
1904 if (netif_msg_probe(card))
1905 dev_err(&card->netdev->dev,
1906 "Couldn't find spidernet firmware in filesystem " \
1907 "or host firmware\n");
1908 return err;
1909 }
1910
1911 /**
1912 * spider_net_open - called upon ifonfig up
1913 * @netdev: interface device structure
1914 *
1915 * returns 0 on success, <0 on failure
1916 *
1917 * spider_net_open allocates all the descriptors and memory needed for
1918 * operation, sets up multicast list and enables interrupts
1919 */
1920 int
spider_net_open(struct net_device * netdev)1921 spider_net_open(struct net_device *netdev)
1922 {
1923 struct spider_net_card *card = netdev_priv(netdev);
1924 int result;
1925
1926 result = spider_net_init_firmware(card);
1927 if (result)
1928 goto init_firmware_failed;
1929
1930 /* start probing with copper */
1931 card->aneg_count = 0;
1932 card->medium = BCM54XX_COPPER;
1933 spider_net_setup_aneg(card);
1934 if (card->phy.def->phy_id)
1935 mod_timer(&card->aneg_timer, jiffies + SPIDER_NET_ANEG_TIMER);
1936
1937 result = spider_net_init_chain(card, &card->tx_chain);
1938 if (result)
1939 goto alloc_tx_failed;
1940 card->low_watermark = NULL;
1941
1942 result = spider_net_init_chain(card, &card->rx_chain);
1943 if (result)
1944 goto alloc_rx_failed;
1945
1946 /* Allocate rx skbs */
1947 result = spider_net_alloc_rx_skbs(card);
1948 if (result)
1949 goto alloc_skbs_failed;
1950
1951 spider_net_set_multi(netdev);
1952
1953 /* further enhancement: setup hw vlan, if needed */
1954
1955 result = -EBUSY;
1956 if (request_irq(netdev->irq, spider_net_interrupt,
1957 IRQF_SHARED, netdev->name, netdev))
1958 goto register_int_failed;
1959
1960 spider_net_enable_card(card);
1961
1962 netif_start_queue(netdev);
1963 netif_carrier_on(netdev);
1964 napi_enable(&card->napi);
1965
1966 spider_net_enable_interrupts(card);
1967
1968 return 0;
1969
1970 register_int_failed:
1971 spider_net_free_rx_chain_contents(card);
1972 alloc_skbs_failed:
1973 spider_net_free_chain(card, &card->rx_chain);
1974 alloc_rx_failed:
1975 spider_net_free_chain(card, &card->tx_chain);
1976 alloc_tx_failed:
1977 del_timer_sync(&card->aneg_timer);
1978 init_firmware_failed:
1979 return result;
1980 }
1981
1982 /**
1983 * spider_net_link_phy
1984 * @t: timer context used to obtain the pointer to net card data structure
1985 */
spider_net_link_phy(struct timer_list * t)1986 static void spider_net_link_phy(struct timer_list *t)
1987 {
1988 struct spider_net_card *card = from_timer(card, t, aneg_timer);
1989 struct mii_phy *phy = &card->phy;
1990
1991 /* if link didn't come up after SPIDER_NET_ANEG_TIMEOUT tries, setup phy again */
1992 if (card->aneg_count > SPIDER_NET_ANEG_TIMEOUT) {
1993
1994 pr_debug("%s: link is down trying to bring it up\n",
1995 card->netdev->name);
1996
1997 switch (card->medium) {
1998 case BCM54XX_COPPER:
1999 /* enable fiber with autonegotiation first */
2000 if (phy->def->ops->enable_fiber)
2001 phy->def->ops->enable_fiber(phy, 1);
2002 card->medium = BCM54XX_FIBER;
2003 break;
2004
2005 case BCM54XX_FIBER:
2006 /* fiber didn't come up, try to disable fiber autoneg */
2007 if (phy->def->ops->enable_fiber)
2008 phy->def->ops->enable_fiber(phy, 0);
2009 card->medium = BCM54XX_UNKNOWN;
2010 break;
2011
2012 case BCM54XX_UNKNOWN:
2013 /* copper, fiber with and without failed,
2014 * retry from beginning
2015 */
2016 spider_net_setup_aneg(card);
2017 card->medium = BCM54XX_COPPER;
2018 break;
2019 }
2020
2021 card->aneg_count = 0;
2022 mod_timer(&card->aneg_timer, jiffies + SPIDER_NET_ANEG_TIMER);
2023 return;
2024 }
2025
2026 /* link still not up, try again later */
2027 if (!(phy->def->ops->poll_link(phy))) {
2028 card->aneg_count++;
2029 mod_timer(&card->aneg_timer, jiffies + SPIDER_NET_ANEG_TIMER);
2030 return;
2031 }
2032
2033 /* link came up, get abilities */
2034 phy->def->ops->read_link(phy);
2035
2036 spider_net_write_reg(card, SPIDER_NET_GMACST,
2037 spider_net_read_reg(card, SPIDER_NET_GMACST));
2038 spider_net_write_reg(card, SPIDER_NET_GMACINTEN, 0x4);
2039
2040 if (phy->speed == 1000)
2041 spider_net_write_reg(card, SPIDER_NET_GMACMODE, 0x00000001);
2042 else
2043 spider_net_write_reg(card, SPIDER_NET_GMACMODE, 0);
2044
2045 card->aneg_count = 0;
2046
2047 pr_info("%s: link up, %i Mbps, %s-duplex %sautoneg.\n",
2048 card->netdev->name, phy->speed,
2049 phy->duplex == 1 ? "Full" : "Half",
2050 phy->autoneg == 1 ? "" : "no ");
2051 }
2052
2053 /**
2054 * spider_net_setup_phy - setup PHY
2055 * @card: card structure
2056 *
2057 * returns 0 on success, <0 on failure
2058 *
2059 * spider_net_setup_phy is used as part of spider_net_probe.
2060 **/
2061 static int
spider_net_setup_phy(struct spider_net_card * card)2062 spider_net_setup_phy(struct spider_net_card *card)
2063 {
2064 struct mii_phy *phy = &card->phy;
2065
2066 spider_net_write_reg(card, SPIDER_NET_GDTDMASEL,
2067 SPIDER_NET_DMASEL_VALUE);
2068 spider_net_write_reg(card, SPIDER_NET_GPCCTRL,
2069 SPIDER_NET_PHY_CTRL_VALUE);
2070
2071 phy->dev = card->netdev;
2072 phy->mdio_read = spider_net_read_phy;
2073 phy->mdio_write = spider_net_write_phy;
2074
2075 for (phy->mii_id = 1; phy->mii_id <= 31; phy->mii_id++) {
2076 unsigned short id;
2077 id = spider_net_read_phy(card->netdev, phy->mii_id, MII_BMSR);
2078 if (id != 0x0000 && id != 0xffff) {
2079 if (!sungem_phy_probe(phy, phy->mii_id)) {
2080 pr_info("Found %s.\n", phy->def->name);
2081 break;
2082 }
2083 }
2084 }
2085
2086 return 0;
2087 }
2088
2089 /**
2090 * spider_net_workaround_rxramfull - work around firmware bug
2091 * @card: card structure
2092 *
2093 * no return value
2094 **/
2095 static void
spider_net_workaround_rxramfull(struct spider_net_card * card)2096 spider_net_workaround_rxramfull(struct spider_net_card *card)
2097 {
2098 int i, sequencer = 0;
2099
2100 /* cancel reset */
2101 spider_net_write_reg(card, SPIDER_NET_CKRCTRL,
2102 SPIDER_NET_CKRCTRL_RUN_VALUE);
2103
2104 /* empty sequencer data */
2105 for (sequencer = 0; sequencer < SPIDER_NET_FIRMWARE_SEQS;
2106 sequencer++) {
2107 spider_net_write_reg(card, SPIDER_NET_GSnPRGADR +
2108 sequencer * 8, 0x0);
2109 for (i = 0; i < SPIDER_NET_FIRMWARE_SEQWORDS; i++) {
2110 spider_net_write_reg(card, SPIDER_NET_GSnPRGDAT +
2111 sequencer * 8, 0x0);
2112 }
2113 }
2114
2115 /* set sequencer operation */
2116 spider_net_write_reg(card, SPIDER_NET_GSINIT, 0x000000fe);
2117
2118 /* reset */
2119 spider_net_write_reg(card, SPIDER_NET_CKRCTRL,
2120 SPIDER_NET_CKRCTRL_STOP_VALUE);
2121 }
2122
2123 /**
2124 * spider_net_stop - called upon ifconfig down
2125 * @netdev: interface device structure
2126 *
2127 * always returns 0
2128 */
2129 int
spider_net_stop(struct net_device * netdev)2130 spider_net_stop(struct net_device *netdev)
2131 {
2132 struct spider_net_card *card = netdev_priv(netdev);
2133
2134 napi_disable(&card->napi);
2135 netif_carrier_off(netdev);
2136 netif_stop_queue(netdev);
2137 del_timer_sync(&card->tx_timer);
2138 del_timer_sync(&card->aneg_timer);
2139
2140 spider_net_disable_interrupts(card);
2141
2142 free_irq(netdev->irq, netdev);
2143
2144 spider_net_write_reg(card, SPIDER_NET_GDTDMACCNTR,
2145 SPIDER_NET_DMA_TX_FEND_VALUE);
2146
2147 /* turn off DMA, force end */
2148 spider_net_disable_rxdmac(card);
2149
2150 /* release chains */
2151 spider_net_release_tx_chain(card, 1);
2152 spider_net_free_rx_chain_contents(card);
2153
2154 spider_net_free_chain(card, &card->tx_chain);
2155 spider_net_free_chain(card, &card->rx_chain);
2156
2157 return 0;
2158 }
2159
2160 /**
2161 * spider_net_tx_timeout_task - task scheduled by the watchdog timeout
2162 * function (to be called not under interrupt status)
2163 * @work: work context used to obtain the pointer to net card data structure
2164 *
2165 * called as task when tx hangs, resets interface (if interface is up)
2166 */
2167 static void
spider_net_tx_timeout_task(struct work_struct * work)2168 spider_net_tx_timeout_task(struct work_struct *work)
2169 {
2170 struct spider_net_card *card =
2171 container_of(work, struct spider_net_card, tx_timeout_task);
2172 struct net_device *netdev = card->netdev;
2173
2174 if (!(netdev->flags & IFF_UP))
2175 goto out;
2176
2177 netif_device_detach(netdev);
2178 spider_net_stop(netdev);
2179
2180 spider_net_workaround_rxramfull(card);
2181 spider_net_init_card(card);
2182
2183 if (spider_net_setup_phy(card))
2184 goto out;
2185
2186 spider_net_open(netdev);
2187 spider_net_kick_tx_dma(card);
2188 netif_device_attach(netdev);
2189
2190 out:
2191 atomic_dec(&card->tx_timeout_task_counter);
2192 }
2193
2194 /**
2195 * spider_net_tx_timeout - called when the tx timeout watchdog kicks in.
2196 * @netdev: interface device structure
2197 * @txqueue: unused
2198 *
2199 * called, if tx hangs. Schedules a task that resets the interface
2200 */
2201 static void
spider_net_tx_timeout(struct net_device * netdev,unsigned int txqueue)2202 spider_net_tx_timeout(struct net_device *netdev, unsigned int txqueue)
2203 {
2204 struct spider_net_card *card;
2205
2206 card = netdev_priv(netdev);
2207 atomic_inc(&card->tx_timeout_task_counter);
2208 if (netdev->flags & IFF_UP)
2209 schedule_work(&card->tx_timeout_task);
2210 else
2211 atomic_dec(&card->tx_timeout_task_counter);
2212 card->spider_stats.tx_timeouts++;
2213 }
2214
2215 static const struct net_device_ops spider_net_ops = {
2216 .ndo_open = spider_net_open,
2217 .ndo_stop = spider_net_stop,
2218 .ndo_start_xmit = spider_net_xmit,
2219 .ndo_set_rx_mode = spider_net_set_multi,
2220 .ndo_set_mac_address = spider_net_set_mac,
2221 .ndo_eth_ioctl = spider_net_do_ioctl,
2222 .ndo_tx_timeout = spider_net_tx_timeout,
2223 .ndo_validate_addr = eth_validate_addr,
2224 /* HW VLAN */
2225 #ifdef CONFIG_NET_POLL_CONTROLLER
2226 /* poll controller */
2227 .ndo_poll_controller = spider_net_poll_controller,
2228 #endif /* CONFIG_NET_POLL_CONTROLLER */
2229 };
2230
2231 /**
2232 * spider_net_setup_netdev_ops - initialization of net_device operations
2233 * @netdev: net_device structure
2234 *
2235 * fills out function pointers in the net_device structure
2236 */
2237 static void
spider_net_setup_netdev_ops(struct net_device * netdev)2238 spider_net_setup_netdev_ops(struct net_device *netdev)
2239 {
2240 netdev->netdev_ops = &spider_net_ops;
2241 netdev->watchdog_timeo = SPIDER_NET_WATCHDOG_TIMEOUT;
2242 /* ethtool ops */
2243 netdev->ethtool_ops = &spider_net_ethtool_ops;
2244 }
2245
2246 /**
2247 * spider_net_setup_netdev - initialization of net_device
2248 * @card: card structure
2249 *
2250 * Returns 0 on success or <0 on failure
2251 *
2252 * spider_net_setup_netdev initializes the net_device structure
2253 **/
2254 static int
spider_net_setup_netdev(struct spider_net_card * card)2255 spider_net_setup_netdev(struct spider_net_card *card)
2256 {
2257 int result;
2258 struct net_device *netdev = card->netdev;
2259 struct device_node *dn;
2260 struct sockaddr addr;
2261 const u8 *mac;
2262
2263 SET_NETDEV_DEV(netdev, &card->pdev->dev);
2264
2265 pci_set_drvdata(card->pdev, netdev);
2266
2267 timer_setup(&card->tx_timer, spider_net_cleanup_tx_ring, 0);
2268 netdev->irq = card->pdev->irq;
2269
2270 card->aneg_count = 0;
2271 timer_setup(&card->aneg_timer, spider_net_link_phy, 0);
2272
2273 netif_napi_add(netdev, &card->napi, spider_net_poll);
2274
2275 spider_net_setup_netdev_ops(netdev);
2276
2277 netdev->hw_features = NETIF_F_RXCSUM | NETIF_F_IP_CSUM;
2278 if (SPIDER_NET_RX_CSUM_DEFAULT)
2279 netdev->features |= NETIF_F_RXCSUM;
2280 netdev->features |= NETIF_F_IP_CSUM;
2281 /* some time: NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX |
2282 * NETIF_F_HW_VLAN_CTAG_FILTER
2283 */
2284 netdev->lltx = true;
2285
2286 /* MTU range: 64 - 2294 */
2287 netdev->min_mtu = SPIDER_NET_MIN_MTU;
2288 netdev->max_mtu = SPIDER_NET_MAX_MTU;
2289
2290 netdev->irq = card->pdev->irq;
2291 card->num_rx_ints = 0;
2292 card->ignore_rx_ramfull = 0;
2293
2294 dn = pci_device_to_OF_node(card->pdev);
2295 if (!dn)
2296 return -EIO;
2297
2298 mac = of_get_property(dn, "local-mac-address", NULL);
2299 if (!mac)
2300 return -EIO;
2301 memcpy(addr.sa_data, mac, ETH_ALEN);
2302
2303 result = spider_net_set_mac(netdev, &addr);
2304 if ((result) && (netif_msg_probe(card)))
2305 dev_err(&card->netdev->dev,
2306 "Failed to set MAC address: %i\n", result);
2307
2308 result = register_netdev(netdev);
2309 if (result) {
2310 if (netif_msg_probe(card))
2311 dev_err(&card->netdev->dev,
2312 "Couldn't register net_device: %i\n", result);
2313 return result;
2314 }
2315
2316 if (netif_msg_probe(card))
2317 pr_info("Initialized device %s.\n", netdev->name);
2318
2319 return 0;
2320 }
2321
2322 /**
2323 * spider_net_alloc_card - allocates net_device and card structure
2324 *
2325 * returns the card structure or NULL in case of errors
2326 *
2327 * the card and net_device structures are linked to each other
2328 */
2329 static struct spider_net_card *
spider_net_alloc_card(void)2330 spider_net_alloc_card(void)
2331 {
2332 struct net_device *netdev;
2333 struct spider_net_card *card;
2334
2335 netdev = alloc_etherdev(struct_size(card, darray,
2336 size_add(tx_descriptors, rx_descriptors)));
2337 if (!netdev)
2338 return NULL;
2339
2340 card = netdev_priv(netdev);
2341 card->netdev = netdev;
2342 card->msg_enable = SPIDER_NET_DEFAULT_MSG;
2343 INIT_WORK(&card->tx_timeout_task, spider_net_tx_timeout_task);
2344 init_waitqueue_head(&card->waitq);
2345 atomic_set(&card->tx_timeout_task_counter, 0);
2346
2347 card->rx_chain.num_desc = rx_descriptors;
2348 card->rx_chain.ring = card->darray;
2349 card->tx_chain.num_desc = tx_descriptors;
2350 card->tx_chain.ring = card->darray + rx_descriptors;
2351
2352 return card;
2353 }
2354
2355 /**
2356 * spider_net_undo_pci_setup - releases PCI ressources
2357 * @card: card structure
2358 *
2359 * spider_net_undo_pci_setup releases the mapped regions
2360 */
2361 static void
spider_net_undo_pci_setup(struct spider_net_card * card)2362 spider_net_undo_pci_setup(struct spider_net_card *card)
2363 {
2364 iounmap(card->regs);
2365 pci_release_regions(card->pdev);
2366 }
2367
2368 /**
2369 * spider_net_setup_pci_dev - sets up the device in terms of PCI operations
2370 * @pdev: PCI device
2371 *
2372 * Returns the card structure or NULL if any errors occur
2373 *
2374 * spider_net_setup_pci_dev initializes pdev and together with the
2375 * functions called in spider_net_open configures the device so that
2376 * data can be transferred over it
2377 * The net_device structure is attached to the card structure, if the
2378 * function returns without error.
2379 **/
2380 static struct spider_net_card *
spider_net_setup_pci_dev(struct pci_dev * pdev)2381 spider_net_setup_pci_dev(struct pci_dev *pdev)
2382 {
2383 struct spider_net_card *card;
2384 unsigned long mmio_start, mmio_len;
2385
2386 if (pci_enable_device(pdev)) {
2387 dev_err(&pdev->dev, "Couldn't enable PCI device\n");
2388 return NULL;
2389 }
2390
2391 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
2392 dev_err(&pdev->dev,
2393 "Couldn't find proper PCI device base address.\n");
2394 goto out_disable_dev;
2395 }
2396
2397 if (pci_request_regions(pdev, spider_net_driver_name)) {
2398 dev_err(&pdev->dev,
2399 "Couldn't obtain PCI resources, aborting.\n");
2400 goto out_disable_dev;
2401 }
2402
2403 pci_set_master(pdev);
2404
2405 card = spider_net_alloc_card();
2406 if (!card) {
2407 dev_err(&pdev->dev,
2408 "Couldn't allocate net_device structure, aborting.\n");
2409 goto out_release_regions;
2410 }
2411 card->pdev = pdev;
2412
2413 /* fetch base address and length of first resource */
2414 mmio_start = pci_resource_start(pdev, 0);
2415 mmio_len = pci_resource_len(pdev, 0);
2416
2417 card->netdev->mem_start = mmio_start;
2418 card->netdev->mem_end = mmio_start + mmio_len;
2419 card->regs = ioremap(mmio_start, mmio_len);
2420
2421 if (!card->regs) {
2422 dev_err(&pdev->dev,
2423 "Couldn't obtain PCI resources, aborting.\n");
2424 goto out_release_regions;
2425 }
2426
2427 return card;
2428
2429 out_release_regions:
2430 pci_release_regions(pdev);
2431 out_disable_dev:
2432 pci_disable_device(pdev);
2433 return NULL;
2434 }
2435
2436 /**
2437 * spider_net_probe - initialization of a device
2438 * @pdev: PCI device
2439 * @ent: entry in the device id list
2440 *
2441 * Returns 0 on success, <0 on failure
2442 *
2443 * spider_net_probe initializes pdev and registers a net_device
2444 * structure for it. After that, the device can be ifconfig'ed up
2445 **/
2446 static int
spider_net_probe(struct pci_dev * pdev,const struct pci_device_id * ent)2447 spider_net_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
2448 {
2449 int err = -EIO;
2450 struct spider_net_card *card;
2451
2452 card = spider_net_setup_pci_dev(pdev);
2453 if (!card)
2454 goto out;
2455
2456 spider_net_workaround_rxramfull(card);
2457 spider_net_init_card(card);
2458
2459 err = spider_net_setup_phy(card);
2460 if (err)
2461 goto out_undo_pci;
2462
2463 err = spider_net_setup_netdev(card);
2464 if (err)
2465 goto out_undo_pci;
2466
2467 return 0;
2468
2469 out_undo_pci:
2470 spider_net_undo_pci_setup(card);
2471 free_netdev(card->netdev);
2472 out:
2473 return err;
2474 }
2475
2476 /**
2477 * spider_net_remove - removal of a device
2478 * @pdev: PCI device
2479 *
2480 * Returns 0 on success, <0 on failure
2481 *
2482 * spider_net_remove is called to remove the device and unregisters the
2483 * net_device
2484 **/
2485 static void
spider_net_remove(struct pci_dev * pdev)2486 spider_net_remove(struct pci_dev *pdev)
2487 {
2488 struct net_device *netdev;
2489 struct spider_net_card *card;
2490
2491 netdev = pci_get_drvdata(pdev);
2492 card = netdev_priv(netdev);
2493
2494 wait_event(card->waitq,
2495 atomic_read(&card->tx_timeout_task_counter) == 0);
2496
2497 unregister_netdev(netdev);
2498
2499 /* switch off card */
2500 spider_net_write_reg(card, SPIDER_NET_CKRCTRL,
2501 SPIDER_NET_CKRCTRL_STOP_VALUE);
2502 spider_net_write_reg(card, SPIDER_NET_CKRCTRL,
2503 SPIDER_NET_CKRCTRL_RUN_VALUE);
2504
2505 spider_net_undo_pci_setup(card);
2506 free_netdev(netdev);
2507 }
2508
2509 static struct pci_driver spider_net_driver = {
2510 .name = spider_net_driver_name,
2511 .id_table = spider_net_pci_tbl,
2512 .probe = spider_net_probe,
2513 .remove = spider_net_remove
2514 };
2515
2516 /**
2517 * spider_net_init - init function when the driver is loaded
2518 *
2519 * spider_net_init registers the device driver
2520 */
spider_net_init(void)2521 static int __init spider_net_init(void)
2522 {
2523 printk(KERN_INFO "Spidernet version %s.\n", VERSION);
2524
2525 if (rx_descriptors < SPIDER_NET_RX_DESCRIPTORS_MIN) {
2526 rx_descriptors = SPIDER_NET_RX_DESCRIPTORS_MIN;
2527 pr_info("adjusting rx descriptors to %i.\n", rx_descriptors);
2528 }
2529 if (rx_descriptors > SPIDER_NET_RX_DESCRIPTORS_MAX) {
2530 rx_descriptors = SPIDER_NET_RX_DESCRIPTORS_MAX;
2531 pr_info("adjusting rx descriptors to %i.\n", rx_descriptors);
2532 }
2533 if (tx_descriptors < SPIDER_NET_TX_DESCRIPTORS_MIN) {
2534 tx_descriptors = SPIDER_NET_TX_DESCRIPTORS_MIN;
2535 pr_info("adjusting tx descriptors to %i.\n", tx_descriptors);
2536 }
2537 if (tx_descriptors > SPIDER_NET_TX_DESCRIPTORS_MAX) {
2538 tx_descriptors = SPIDER_NET_TX_DESCRIPTORS_MAX;
2539 pr_info("adjusting tx descriptors to %i.\n", tx_descriptors);
2540 }
2541
2542 return pci_register_driver(&spider_net_driver);
2543 }
2544
2545 /**
2546 * spider_net_cleanup - exit function when driver is unloaded
2547 *
2548 * spider_net_cleanup unregisters the device driver
2549 */
spider_net_cleanup(void)2550 static void __exit spider_net_cleanup(void)
2551 {
2552 pci_unregister_driver(&spider_net_driver);
2553 }
2554
2555 module_init(spider_net_init);
2556 module_exit(spider_net_cleanup);
2557