xref: /linux/drivers/net/ethernet/dec/tulip/interrupt.c (revision e9f0878c4b2004ac19581274c1ae4c61ae3ca70e)
1 /*
2 	drivers/net/ethernet/dec/tulip/interrupt.c
3 
4 	Copyright 2000,2001  The Linux Kernel Team
5 	Written/copyright 1994-2001 by Donald Becker.
6 
7 	This software may be used and distributed according to the terms
8 	of the GNU General Public License, incorporated herein by reference.
9 
10         Please submit bugs to http://bugzilla.kernel.org/ .
11 */
12 
13 #include <linux/pci.h>
14 #include "tulip.h"
15 #include <linux/etherdevice.h>
16 
17 int tulip_rx_copybreak;
18 unsigned int tulip_max_interrupt_work;
19 
20 #ifdef CONFIG_TULIP_NAPI_HW_MITIGATION
21 #define MIT_SIZE 15
22 #define MIT_TABLE 15 /* We use 0 or max */
23 
24 static unsigned int mit_table[MIT_SIZE+1] =
25 {
26         /*  CRS11 21143 hardware Mitigation Control Interrupt
27             We use only RX mitigation we other techniques for
28             TX intr. mitigation.
29 
30            31    Cycle Size (timer control)
31            30:27 TX timer in 16 * Cycle size
32            26:24 TX No pkts before Int.
33            23:20 RX timer in Cycle size
34            19:17 RX No pkts before Int.
35            16       Continues Mode (CM)
36         */
37 
38         0x0,             /* IM disabled */
39         0x80150000,      /* RX time = 1, RX pkts = 2, CM = 1 */
40         0x80150000,
41         0x80270000,
42         0x80370000,
43         0x80490000,
44         0x80590000,
45         0x80690000,
46         0x807B0000,
47         0x808B0000,
48         0x809D0000,
49         0x80AD0000,
50         0x80BD0000,
51         0x80CF0000,
52         0x80DF0000,
53 //       0x80FF0000      /* RX time = 16, RX pkts = 7, CM = 1 */
54         0x80F10000      /* RX time = 16, RX pkts = 0, CM = 1 */
55 };
56 #endif
57 
58 
59 int tulip_refill_rx(struct net_device *dev)
60 {
61 	struct tulip_private *tp = netdev_priv(dev);
62 	int entry;
63 	int refilled = 0;
64 
65 	/* Refill the Rx ring buffers. */
66 	for (; tp->cur_rx - tp->dirty_rx > 0; tp->dirty_rx++) {
67 		entry = tp->dirty_rx % RX_RING_SIZE;
68 		if (tp->rx_buffers[entry].skb == NULL) {
69 			struct sk_buff *skb;
70 			dma_addr_t mapping;
71 
72 			skb = tp->rx_buffers[entry].skb =
73 				netdev_alloc_skb(dev, PKT_BUF_SZ);
74 			if (skb == NULL)
75 				break;
76 
77 			mapping = pci_map_single(tp->pdev, skb->data, PKT_BUF_SZ,
78 						 PCI_DMA_FROMDEVICE);
79 			if (dma_mapping_error(&tp->pdev->dev, mapping)) {
80 				dev_kfree_skb(skb);
81 				tp->rx_buffers[entry].skb = NULL;
82 				break;
83 			}
84 
85 			tp->rx_buffers[entry].mapping = mapping;
86 
87 			tp->rx_ring[entry].buffer1 = cpu_to_le32(mapping);
88 			refilled++;
89 		}
90 		tp->rx_ring[entry].status = cpu_to_le32(DescOwned);
91 	}
92 	if(tp->chip_id == LC82C168) {
93 		if(((ioread32(tp->base_addr + CSR5)>>17)&0x07) == 4) {
94 			/* Rx stopped due to out of buffers,
95 			 * restart it
96 			 */
97 			iowrite32(0x01, tp->base_addr + CSR2);
98 		}
99 	}
100 	return refilled;
101 }
102 
103 #ifdef CONFIG_TULIP_NAPI
104 
105 void oom_timer(struct timer_list *t)
106 {
107 	struct tulip_private *tp = from_timer(tp, t, oom_timer);
108 
109 	napi_schedule(&tp->napi);
110 }
111 
112 int tulip_poll(struct napi_struct *napi, int budget)
113 {
114 	struct tulip_private *tp = container_of(napi, struct tulip_private, napi);
115 	struct net_device *dev = tp->dev;
116 	int entry = tp->cur_rx % RX_RING_SIZE;
117 	int work_done = 0;
118 #ifdef CONFIG_TULIP_NAPI_HW_MITIGATION
119 	int received = 0;
120 #endif
121 
122 #ifdef CONFIG_TULIP_NAPI_HW_MITIGATION
123 
124 /* that one buffer is needed for mit activation; or might be a
125    bug in the ring buffer code; check later -- JHS*/
126 
127         if (budget >=RX_RING_SIZE) budget--;
128 #endif
129 
130 	if (tulip_debug > 4)
131 		netdev_dbg(dev, " In tulip_rx(), entry %d %08x\n",
132 			   entry, tp->rx_ring[entry].status);
133 
134        do {
135 		if (ioread32(tp->base_addr + CSR5) == 0xffffffff) {
136 			netdev_dbg(dev, " In tulip_poll(), hardware disappeared\n");
137 			break;
138 		}
139                /* Acknowledge current RX interrupt sources. */
140                iowrite32((RxIntr | RxNoBuf), tp->base_addr + CSR5);
141 
142 
143                /* If we own the next entry, it is a new packet. Send it up. */
144                while ( ! (tp->rx_ring[entry].status & cpu_to_le32(DescOwned))) {
145                        s32 status = le32_to_cpu(tp->rx_ring[entry].status);
146 		       short pkt_len;
147 
148                        if (tp->dirty_rx + RX_RING_SIZE == tp->cur_rx)
149                                break;
150 
151 		       if (tulip_debug > 5)
152 				netdev_dbg(dev, "In tulip_rx(), entry %d %08x\n",
153 					   entry, status);
154 
155 		       if (++work_done >= budget)
156                                goto not_done;
157 
158 		       /*
159 			* Omit the four octet CRC from the length.
160 			* (May not be considered valid until we have
161 			* checked status for RxLengthOver2047 bits)
162 			*/
163 		       pkt_len = ((status >> 16) & 0x7ff) - 4;
164 
165 		       /*
166 			* Maximum pkt_len is 1518 (1514 + vlan header)
167 			* Anything higher than this is always invalid
168 			* regardless of RxLengthOver2047 bits
169 			*/
170 
171 		       if ((status & (RxLengthOver2047 |
172 				      RxDescCRCError |
173 				      RxDescCollisionSeen |
174 				      RxDescRunt |
175 				      RxDescDescErr |
176 				      RxWholePkt)) != RxWholePkt ||
177 			   pkt_len > 1518) {
178 			       if ((status & (RxLengthOver2047 |
179 					      RxWholePkt)) != RxWholePkt) {
180                                 /* Ingore earlier buffers. */
181                                        if ((status & 0xffff) != 0x7fff) {
182                                                if (tulip_debug > 1)
183                                                        dev_warn(&dev->dev,
184 								"Oversized Ethernet frame spanned multiple buffers, status %08x!\n",
185 								status);
186 						dev->stats.rx_length_errors++;
187 					}
188 			       } else {
189                                 /* There was a fatal error. */
190 				       if (tulip_debug > 2)
191 						netdev_dbg(dev, "Receive error, Rx status %08x\n",
192 							   status);
193 					dev->stats.rx_errors++; /* end of a packet.*/
194 					if (pkt_len > 1518 ||
195 					    (status & RxDescRunt))
196 						dev->stats.rx_length_errors++;
197 
198 					if (status & 0x0004)
199 						dev->stats.rx_frame_errors++;
200 					if (status & 0x0002)
201 						dev->stats.rx_crc_errors++;
202 					if (status & 0x0001)
203 						dev->stats.rx_fifo_errors++;
204                                }
205                        } else {
206                                struct sk_buff *skb;
207 
208                                /* Check if the packet is long enough to accept without copying
209                                   to a minimally-sized skbuff. */
210                                if (pkt_len < tulip_rx_copybreak &&
211                                    (skb = netdev_alloc_skb(dev, pkt_len + 2)) != NULL) {
212                                        skb_reserve(skb, 2);    /* 16 byte align the IP header */
213                                        pci_dma_sync_single_for_cpu(tp->pdev,
214 								   tp->rx_buffers[entry].mapping,
215 								   pkt_len, PCI_DMA_FROMDEVICE);
216 #if ! defined(__alpha__)
217                                        skb_copy_to_linear_data(skb, tp->rx_buffers[entry].skb->data,
218                                                         pkt_len);
219                                        skb_put(skb, pkt_len);
220 #else
221                                        skb_put_data(skb,
222                                                     tp->rx_buffers[entry].skb->data,
223                                                     pkt_len);
224 #endif
225                                        pci_dma_sync_single_for_device(tp->pdev,
226 								      tp->rx_buffers[entry].mapping,
227 								      pkt_len, PCI_DMA_FROMDEVICE);
228                                } else {        /* Pass up the skb already on the Rx ring. */
229                                        char *temp = skb_put(skb = tp->rx_buffers[entry].skb,
230                                                             pkt_len);
231 
232 #ifndef final_version
233                                        if (tp->rx_buffers[entry].mapping !=
234                                            le32_to_cpu(tp->rx_ring[entry].buffer1)) {
235                                                dev_err(&dev->dev,
236 						       "Internal fault: The skbuff addresses do not match in tulip_rx: %08x vs. %08llx %p / %p\n",
237 						       le32_to_cpu(tp->rx_ring[entry].buffer1),
238 						       (unsigned long long)tp->rx_buffers[entry].mapping,
239 						       skb->head, temp);
240                                        }
241 #endif
242 
243                                        pci_unmap_single(tp->pdev, tp->rx_buffers[entry].mapping,
244                                                         PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
245 
246                                        tp->rx_buffers[entry].skb = NULL;
247                                        tp->rx_buffers[entry].mapping = 0;
248                                }
249                                skb->protocol = eth_type_trans(skb, dev);
250 
251                                netif_receive_skb(skb);
252 
253 				dev->stats.rx_packets++;
254 				dev->stats.rx_bytes += pkt_len;
255                        }
256 #ifdef CONFIG_TULIP_NAPI_HW_MITIGATION
257 		       received++;
258 #endif
259 
260                        entry = (++tp->cur_rx) % RX_RING_SIZE;
261                        if (tp->cur_rx - tp->dirty_rx > RX_RING_SIZE/4)
262                                tulip_refill_rx(dev);
263 
264                 }
265 
266                /* New ack strategy... irq does not ack Rx any longer
267                   hopefully this helps */
268 
269                /* Really bad things can happen here... If new packet arrives
270                 * and an irq arrives (tx or just due to occasionally unset
271                 * mask), it will be acked by irq handler, but new thread
272                 * is not scheduled. It is major hole in design.
273                 * No idea how to fix this if "playing with fire" will fail
274                 * tomorrow (night 011029). If it will not fail, we won
275                 * finally: amount of IO did not increase at all. */
276        } while ((ioread32(tp->base_addr + CSR5) & RxIntr));
277 
278  #ifdef CONFIG_TULIP_NAPI_HW_MITIGATION
279 
280           /* We use this simplistic scheme for IM. It's proven by
281              real life installations. We can have IM enabled
282             continuesly but this would cause unnecessary latency.
283             Unfortunely we can't use all the NET_RX_* feedback here.
284             This would turn on IM for devices that is not contributing
285             to backlog congestion with unnecessary latency.
286 
287              We monitor the device RX-ring and have:
288 
289              HW Interrupt Mitigation either ON or OFF.
290 
291             ON:  More then 1 pkt received (per intr.) OR we are dropping
292              OFF: Only 1 pkt received
293 
294              Note. We only use min and max (0, 15) settings from mit_table */
295 
296 
297           if( tp->flags &  HAS_INTR_MITIGATION) {
298                  if( received > 1 ) {
299                          if( ! tp->mit_on ) {
300                                  tp->mit_on = 1;
301                                  iowrite32(mit_table[MIT_TABLE], tp->base_addr + CSR11);
302                          }
303                   }
304                  else {
305                          if( tp->mit_on ) {
306                                  tp->mit_on = 0;
307                                  iowrite32(0, tp->base_addr + CSR11);
308                          }
309                   }
310           }
311 
312 #endif /* CONFIG_TULIP_NAPI_HW_MITIGATION */
313 
314          tulip_refill_rx(dev);
315 
316          /* If RX ring is not full we are out of memory. */
317          if (tp->rx_buffers[tp->dirty_rx % RX_RING_SIZE].skb == NULL)
318 		 goto oom;
319 
320          /* Remove us from polling list and enable RX intr. */
321 
322 	napi_complete_done(napi, work_done);
323 	iowrite32(tulip_tbl[tp->chip_id].valid_intrs, tp->base_addr+CSR7);
324 
325          /* The last op happens after poll completion. Which means the following:
326           * 1. it can race with disabling irqs in irq handler
327           * 2. it can race with dise/enabling irqs in other poll threads
328           * 3. if an irq raised after beginning loop, it will be immediately
329           *    triggered here.
330           *
331           * Summarizing: the logic results in some redundant irqs both
332           * due to races in masking and due to too late acking of already
333           * processed irqs. But it must not result in losing events.
334           */
335 
336          return work_done;
337 
338  not_done:
339          if (tp->cur_rx - tp->dirty_rx > RX_RING_SIZE/2 ||
340              tp->rx_buffers[tp->dirty_rx % RX_RING_SIZE].skb == NULL)
341                  tulip_refill_rx(dev);
342 
343          if (tp->rx_buffers[tp->dirty_rx % RX_RING_SIZE].skb == NULL)
344 		 goto oom;
345 
346          return work_done;
347 
348  oom:    /* Executed with RX ints disabled */
349 
350          /* Start timer, stop polling, but do not enable rx interrupts. */
351          mod_timer(&tp->oom_timer, jiffies+1);
352 
353          /* Think: timer_pending() was an explicit signature of bug.
354           * Timer can be pending now but fired and completed
355           * before we did napi_complete(). See? We would lose it. */
356 
357          /* remove ourselves from the polling list */
358          napi_complete_done(napi, work_done);
359 
360          return work_done;
361 }
362 
363 #else /* CONFIG_TULIP_NAPI */
364 
365 static int tulip_rx(struct net_device *dev)
366 {
367 	struct tulip_private *tp = netdev_priv(dev);
368 	int entry = tp->cur_rx % RX_RING_SIZE;
369 	int rx_work_limit = tp->dirty_rx + RX_RING_SIZE - tp->cur_rx;
370 	int received = 0;
371 
372 	if (tulip_debug > 4)
373 		netdev_dbg(dev, "In tulip_rx(), entry %d %08x\n",
374 			   entry, tp->rx_ring[entry].status);
375 	/* If we own the next entry, it is a new packet. Send it up. */
376 	while ( ! (tp->rx_ring[entry].status & cpu_to_le32(DescOwned))) {
377 		s32 status = le32_to_cpu(tp->rx_ring[entry].status);
378 		short pkt_len;
379 
380 		if (tulip_debug > 5)
381 			netdev_dbg(dev, "In tulip_rx(), entry %d %08x\n",
382 				   entry, status);
383 		if (--rx_work_limit < 0)
384 			break;
385 
386 		/*
387 		  Omit the four octet CRC from the length.
388 		  (May not be considered valid until we have
389 		  checked status for RxLengthOver2047 bits)
390 		*/
391 		pkt_len = ((status >> 16) & 0x7ff) - 4;
392 		/*
393 		  Maximum pkt_len is 1518 (1514 + vlan header)
394 		  Anything higher than this is always invalid
395 		  regardless of RxLengthOver2047 bits
396 		*/
397 
398 		if ((status & (RxLengthOver2047 |
399 			       RxDescCRCError |
400 			       RxDescCollisionSeen |
401 			       RxDescRunt |
402 			       RxDescDescErr |
403 			       RxWholePkt))        != RxWholePkt ||
404 		    pkt_len > 1518) {
405 			if ((status & (RxLengthOver2047 |
406 			     RxWholePkt))         != RxWholePkt) {
407 				/* Ingore earlier buffers. */
408 				if ((status & 0xffff) != 0x7fff) {
409 					if (tulip_debug > 1)
410 						netdev_warn(dev,
411 							    "Oversized Ethernet frame spanned multiple buffers, status %08x!\n",
412 							    status);
413 					dev->stats.rx_length_errors++;
414 				}
415 			} else {
416 				/* There was a fatal error. */
417 				if (tulip_debug > 2)
418 					netdev_dbg(dev, "Receive error, Rx status %08x\n",
419 						   status);
420 				dev->stats.rx_errors++; /* end of a packet.*/
421 				if (pkt_len > 1518 ||
422 				    (status & RxDescRunt))
423 					dev->stats.rx_length_errors++;
424 				if (status & 0x0004)
425 					dev->stats.rx_frame_errors++;
426 				if (status & 0x0002)
427 					dev->stats.rx_crc_errors++;
428 				if (status & 0x0001)
429 					dev->stats.rx_fifo_errors++;
430 			}
431 		} else {
432 			struct sk_buff *skb;
433 
434 			/* Check if the packet is long enough to accept without copying
435 			   to a minimally-sized skbuff. */
436 			if (pkt_len < tulip_rx_copybreak &&
437 			    (skb = netdev_alloc_skb(dev, pkt_len + 2)) != NULL) {
438 				skb_reserve(skb, 2);	/* 16 byte align the IP header */
439 				pci_dma_sync_single_for_cpu(tp->pdev,
440 							    tp->rx_buffers[entry].mapping,
441 							    pkt_len, PCI_DMA_FROMDEVICE);
442 #if ! defined(__alpha__)
443 				skb_copy_to_linear_data(skb, tp->rx_buffers[entry].skb->data,
444 						 pkt_len);
445 				skb_put(skb, pkt_len);
446 #else
447 				skb_put_data(skb,
448 					     tp->rx_buffers[entry].skb->data,
449 					     pkt_len);
450 #endif
451 				pci_dma_sync_single_for_device(tp->pdev,
452 							       tp->rx_buffers[entry].mapping,
453 							       pkt_len, PCI_DMA_FROMDEVICE);
454 			} else { 	/* Pass up the skb already on the Rx ring. */
455 				char *temp = skb_put(skb = tp->rx_buffers[entry].skb,
456 						     pkt_len);
457 
458 #ifndef final_version
459 				if (tp->rx_buffers[entry].mapping !=
460 				    le32_to_cpu(tp->rx_ring[entry].buffer1)) {
461 					dev_err(&dev->dev,
462 						"Internal fault: The skbuff addresses do not match in tulip_rx: %08x vs. %Lx %p / %p\n",
463 						le32_to_cpu(tp->rx_ring[entry].buffer1),
464 						(long long)tp->rx_buffers[entry].mapping,
465 						skb->head, temp);
466 				}
467 #endif
468 
469 				pci_unmap_single(tp->pdev, tp->rx_buffers[entry].mapping,
470 						 PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
471 
472 				tp->rx_buffers[entry].skb = NULL;
473 				tp->rx_buffers[entry].mapping = 0;
474 			}
475 			skb->protocol = eth_type_trans(skb, dev);
476 
477 			netif_rx(skb);
478 
479 			dev->stats.rx_packets++;
480 			dev->stats.rx_bytes += pkt_len;
481 		}
482 		received++;
483 		entry = (++tp->cur_rx) % RX_RING_SIZE;
484 	}
485 	return received;
486 }
487 #endif  /* CONFIG_TULIP_NAPI */
488 
489 static inline unsigned int phy_interrupt (struct net_device *dev)
490 {
491 #ifdef __hppa__
492 	struct tulip_private *tp = netdev_priv(dev);
493 	int csr12 = ioread32(tp->base_addr + CSR12) & 0xff;
494 
495 	if (csr12 != tp->csr12_shadow) {
496 		/* ack interrupt */
497 		iowrite32(csr12 | 0x02, tp->base_addr + CSR12);
498 		tp->csr12_shadow = csr12;
499 		/* do link change stuff */
500 		spin_lock(&tp->lock);
501 		tulip_check_duplex(dev);
502 		spin_unlock(&tp->lock);
503 		/* clear irq ack bit */
504 		iowrite32(csr12 & ~0x02, tp->base_addr + CSR12);
505 
506 		return 1;
507 	}
508 #endif
509 
510 	return 0;
511 }
512 
513 /* The interrupt handler does all of the Rx thread work and cleans up
514    after the Tx thread. */
515 irqreturn_t tulip_interrupt(int irq, void *dev_instance)
516 {
517 	struct net_device *dev = (struct net_device *)dev_instance;
518 	struct tulip_private *tp = netdev_priv(dev);
519 	void __iomem *ioaddr = tp->base_addr;
520 	int csr5;
521 	int missed;
522 	int rx = 0;
523 	int tx = 0;
524 	int oi = 0;
525 	int maxrx = RX_RING_SIZE;
526 	int maxtx = TX_RING_SIZE;
527 	int maxoi = TX_RING_SIZE;
528 #ifdef CONFIG_TULIP_NAPI
529 	int rxd = 0;
530 #else
531 	int entry;
532 #endif
533 	unsigned int work_count = tulip_max_interrupt_work;
534 	unsigned int handled = 0;
535 
536 	/* Let's see whether the interrupt really is for us */
537 	csr5 = ioread32(ioaddr + CSR5);
538 
539         if (tp->flags & HAS_PHY_IRQ)
540 	        handled = phy_interrupt (dev);
541 
542 	if ((csr5 & (NormalIntr|AbnormalIntr)) == 0)
543 		return IRQ_RETVAL(handled);
544 
545 	tp->nir++;
546 
547 	do {
548 
549 #ifdef CONFIG_TULIP_NAPI
550 
551 		if (!rxd && (csr5 & (RxIntr | RxNoBuf))) {
552 			rxd++;
553 			/* Mask RX intrs and add the device to poll list. */
554 			iowrite32(tulip_tbl[tp->chip_id].valid_intrs&~RxPollInt, ioaddr + CSR7);
555 			napi_schedule(&tp->napi);
556 
557 			if (!(csr5&~(AbnormalIntr|NormalIntr|RxPollInt|TPLnkPass)))
558                                break;
559 		}
560 
561                /* Acknowledge the interrupt sources we handle here ASAP
562                   the poll function does Rx and RxNoBuf acking */
563 
564 		iowrite32(csr5 & 0x0001ff3f, ioaddr + CSR5);
565 
566 #else
567 		/* Acknowledge all of the current interrupt sources ASAP. */
568 		iowrite32(csr5 & 0x0001ffff, ioaddr + CSR5);
569 
570 
571 		if (csr5 & (RxIntr | RxNoBuf)) {
572 				rx += tulip_rx(dev);
573 			tulip_refill_rx(dev);
574 		}
575 
576 #endif /*  CONFIG_TULIP_NAPI */
577 
578 		if (tulip_debug > 4)
579 			netdev_dbg(dev, "interrupt  csr5=%#8.8x new csr5=%#8.8x\n",
580 				   csr5, ioread32(ioaddr + CSR5));
581 
582 
583 		if (csr5 & (TxNoBuf | TxDied | TxIntr | TimerInt)) {
584 			unsigned int dirty_tx;
585 
586 			spin_lock(&tp->lock);
587 
588 			for (dirty_tx = tp->dirty_tx; tp->cur_tx - dirty_tx > 0;
589 				 dirty_tx++) {
590 				int entry = dirty_tx % TX_RING_SIZE;
591 				int status = le32_to_cpu(tp->tx_ring[entry].status);
592 
593 				if (status < 0)
594 					break;			/* It still has not been Txed */
595 
596 				/* Check for Rx filter setup frames. */
597 				if (tp->tx_buffers[entry].skb == NULL) {
598 					/* test because dummy frames not mapped */
599 					if (tp->tx_buffers[entry].mapping)
600 						pci_unmap_single(tp->pdev,
601 							 tp->tx_buffers[entry].mapping,
602 							 sizeof(tp->setup_frame),
603 							 PCI_DMA_TODEVICE);
604 					continue;
605 				}
606 
607 				if (status & 0x8000) {
608 					/* There was an major error, log it. */
609 #ifndef final_version
610 					if (tulip_debug > 1)
611 						netdev_dbg(dev, "Transmit error, Tx status %08x\n",
612 							   status);
613 #endif
614 					dev->stats.tx_errors++;
615 					if (status & 0x4104)
616 						dev->stats.tx_aborted_errors++;
617 					if (status & 0x0C00)
618 						dev->stats.tx_carrier_errors++;
619 					if (status & 0x0200)
620 						dev->stats.tx_window_errors++;
621 					if (status & 0x0002)
622 						dev->stats.tx_fifo_errors++;
623 					if ((status & 0x0080) && tp->full_duplex == 0)
624 						dev->stats.tx_heartbeat_errors++;
625 				} else {
626 					dev->stats.tx_bytes +=
627 						tp->tx_buffers[entry].skb->len;
628 					dev->stats.collisions += (status >> 3) & 15;
629 					dev->stats.tx_packets++;
630 				}
631 
632 				pci_unmap_single(tp->pdev, tp->tx_buffers[entry].mapping,
633 						 tp->tx_buffers[entry].skb->len,
634 						 PCI_DMA_TODEVICE);
635 
636 				/* Free the original skb. */
637 				dev_kfree_skb_irq(tp->tx_buffers[entry].skb);
638 				tp->tx_buffers[entry].skb = NULL;
639 				tp->tx_buffers[entry].mapping = 0;
640 				tx++;
641 			}
642 
643 #ifndef final_version
644 			if (tp->cur_tx - dirty_tx > TX_RING_SIZE) {
645 				dev_err(&dev->dev,
646 					"Out-of-sync dirty pointer, %d vs. %d\n",
647 					dirty_tx, tp->cur_tx);
648 				dirty_tx += TX_RING_SIZE;
649 			}
650 #endif
651 
652 			if (tp->cur_tx - dirty_tx < TX_RING_SIZE - 2)
653 				netif_wake_queue(dev);
654 
655 			tp->dirty_tx = dirty_tx;
656 			if (csr5 & TxDied) {
657 				if (tulip_debug > 2)
658 					dev_warn(&dev->dev,
659 						 "The transmitter stopped.  CSR5 is %x, CSR6 %x, new CSR6 %x\n",
660 						 csr5, ioread32(ioaddr + CSR6),
661 						 tp->csr6);
662 				tulip_restart_rxtx(tp);
663 			}
664 			spin_unlock(&tp->lock);
665 		}
666 
667 		/* Log errors. */
668 		if (csr5 & AbnormalIntr) {	/* Abnormal error summary bit. */
669 			if (csr5 == 0xffffffff)
670 				break;
671 			if (csr5 & TxJabber)
672 				dev->stats.tx_errors++;
673 			if (csr5 & TxFIFOUnderflow) {
674 				if ((tp->csr6 & 0xC000) != 0xC000)
675 					tp->csr6 += 0x4000;	/* Bump up the Tx threshold */
676 				else
677 					tp->csr6 |= 0x00200000;  /* Store-n-forward. */
678 				/* Restart the transmit process. */
679 				tulip_restart_rxtx(tp);
680 				iowrite32(0, ioaddr + CSR1);
681 			}
682 			if (csr5 & (RxDied | RxNoBuf)) {
683 				if (tp->flags & COMET_MAC_ADDR) {
684 					iowrite32(tp->mc_filter[0], ioaddr + 0xAC);
685 					iowrite32(tp->mc_filter[1], ioaddr + 0xB0);
686 				}
687 			}
688 			if (csr5 & RxDied) {		/* Missed a Rx frame. */
689 				dev->stats.rx_missed_errors += ioread32(ioaddr + CSR8) & 0xffff;
690 				dev->stats.rx_errors++;
691 				tulip_start_rxtx(tp);
692 			}
693 			/*
694 			 * NB: t21142_lnk_change() does a del_timer_sync(), so be careful if this
695 			 * call is ever done under the spinlock
696 			 */
697 			if (csr5 & (TPLnkPass | TPLnkFail | 0x08000000)) {
698 				if (tp->link_change)
699 					(tp->link_change)(dev, csr5);
700 			}
701 			if (csr5 & SystemError) {
702 				int error = (csr5 >> 23) & 7;
703 				/* oops, we hit a PCI error.  The code produced corresponds
704 				 * to the reason:
705 				 *  0 - parity error
706 				 *  1 - master abort
707 				 *  2 - target abort
708 				 * Note that on parity error, we should do a software reset
709 				 * of the chip to get it back into a sane state (according
710 				 * to the 21142/3 docs that is).
711 				 *   -- rmk
712 				 */
713 				dev_err(&dev->dev,
714 					"(%lu) System Error occurred (%d)\n",
715 					tp->nir, error);
716 			}
717 			/* Clear all error sources, included undocumented ones! */
718 			iowrite32(0x0800f7ba, ioaddr + CSR5);
719 			oi++;
720 		}
721 		if (csr5 & TimerInt) {
722 
723 			if (tulip_debug > 2)
724 				dev_err(&dev->dev,
725 					"Re-enabling interrupts, %08x\n",
726 					csr5);
727 			iowrite32(tulip_tbl[tp->chip_id].valid_intrs, ioaddr + CSR7);
728 			tp->ttimer = 0;
729 			oi++;
730 		}
731 		if (tx > maxtx || rx > maxrx || oi > maxoi) {
732 			if (tulip_debug > 1)
733 				dev_warn(&dev->dev, "Too much work during an interrupt, csr5=0x%08x. (%lu) (%d,%d,%d)\n",
734 					 csr5, tp->nir, tx, rx, oi);
735 
736                        /* Acknowledge all interrupt sources. */
737                         iowrite32(0x8001ffff, ioaddr + CSR5);
738                         if (tp->flags & HAS_INTR_MITIGATION) {
739                      /* Josip Loncaric at ICASE did extensive experimentation
740 			to develop a good interrupt mitigation setting.*/
741                                 iowrite32(0x8b240000, ioaddr + CSR11);
742                         } else if (tp->chip_id == LC82C168) {
743 				/* the LC82C168 doesn't have a hw timer.*/
744 				iowrite32(0x00, ioaddr + CSR7);
745 				mod_timer(&tp->timer, RUN_AT(HZ/50));
746 			} else {
747                           /* Mask all interrupting sources, set timer to
748 				re-enable. */
749                                 iowrite32(((~csr5) & 0x0001ebef) | AbnormalIntr | TimerInt, ioaddr + CSR7);
750                                 iowrite32(0x0012, ioaddr + CSR11);
751                         }
752 			break;
753 		}
754 
755 		work_count--;
756 		if (work_count == 0)
757 			break;
758 
759 		csr5 = ioread32(ioaddr + CSR5);
760 
761 #ifdef CONFIG_TULIP_NAPI
762 		if (rxd)
763 			csr5 &= ~RxPollInt;
764 	} while ((csr5 & (TxNoBuf |
765 			  TxDied |
766 			  TxIntr |
767 			  TimerInt |
768 			  /* Abnormal intr. */
769 			  RxDied |
770 			  TxFIFOUnderflow |
771 			  TxJabber |
772 			  TPLnkFail |
773 			  SystemError )) != 0);
774 #else
775 	} while ((csr5 & (NormalIntr|AbnormalIntr)) != 0);
776 
777 	tulip_refill_rx(dev);
778 
779 	/* check if the card is in suspend mode */
780 	entry = tp->dirty_rx % RX_RING_SIZE;
781 	if (tp->rx_buffers[entry].skb == NULL) {
782 		if (tulip_debug > 1)
783 			dev_warn(&dev->dev,
784 				 "in rx suspend mode: (%lu) (tp->cur_rx = %u, ttimer = %d, rx = %d) go/stay in suspend mode\n",
785 				 tp->nir, tp->cur_rx, tp->ttimer, rx);
786 		if (tp->chip_id == LC82C168) {
787 			iowrite32(0x00, ioaddr + CSR7);
788 			mod_timer(&tp->timer, RUN_AT(HZ/50));
789 		} else {
790 			if (tp->ttimer == 0 || (ioread32(ioaddr + CSR11) & 0xffff) == 0) {
791 				if (tulip_debug > 1)
792 					dev_warn(&dev->dev,
793 						 "in rx suspend mode: (%lu) set timer\n",
794 						 tp->nir);
795 				iowrite32(tulip_tbl[tp->chip_id].valid_intrs | TimerInt,
796 					ioaddr + CSR7);
797 				iowrite32(TimerInt, ioaddr + CSR5);
798 				iowrite32(12, ioaddr + CSR11);
799 				tp->ttimer = 1;
800 			}
801 		}
802 	}
803 #endif /* CONFIG_TULIP_NAPI */
804 
805 	if ((missed = ioread32(ioaddr + CSR8) & 0x1ffff)) {
806 		dev->stats.rx_dropped += missed & 0x10000 ? 0x10000 : missed;
807 	}
808 
809 	if (tulip_debug > 4)
810 		netdev_dbg(dev, "exiting interrupt, csr5=%#04x\n",
811 			   ioread32(ioaddr + CSR5));
812 
813 	return IRQ_HANDLED;
814 }
815