xref: /linux/drivers/net/ethernet/chelsio/cxgb/sge.c (revision 0d2cd91bf7b1a7cc1d638296111fcc2bcf5c0bb4)
1 /*****************************************************************************
2  *                                                                           *
3  * File: sge.c                                                               *
4  * $Revision: 1.26 $                                                         *
5  * $Date: 2005/06/21 18:29:48 $                                              *
6  * Description:                                                              *
7  *  DMA engine.                                                              *
8  *  part of the Chelsio 10Gb Ethernet Driver.                                *
9  *                                                                           *
10  * This program is free software; you can redistribute it and/or modify      *
11  * it under the terms of the GNU General Public License, version 2, as       *
12  * published by the Free Software Foundation.                                *
13  *                                                                           *
14  * You should have received a copy of the GNU General Public License along   *
15  * with this program; if not, write to the Free Software Foundation, Inc.,   *
16  * 59 Temple Place - Suite 330, Boston, MA  02111-1307, USA.                 *
17  *                                                                           *
18  * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED    *
19  * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF      *
20  * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.                     *
21  *                                                                           *
22  * http://www.chelsio.com                                                    *
23  *                                                                           *
24  * Copyright (c) 2003 - 2005 Chelsio Communications, Inc.                    *
25  * All rights reserved.                                                      *
26  *                                                                           *
27  * Maintainers: maintainers@chelsio.com                                      *
28  *                                                                           *
29  * Authors: Dimitrios Michailidis   <dm@chelsio.com>                         *
30  *          Tina Yang               <tainay@chelsio.com>                     *
31  *          Felix Marti             <felix@chelsio.com>                      *
32  *          Scott Bardone           <sbardone@chelsio.com>                   *
33  *          Kurt Ottaway            <kottaway@chelsio.com>                   *
34  *          Frank DiMambro          <frank@chelsio.com>                      *
35  *                                                                           *
36  * History:                                                                  *
37  *                                                                           *
38  ****************************************************************************/
39 
40 #include "common.h"
41 
42 #include <linux/types.h>
43 #include <linux/errno.h>
44 #include <linux/pci.h>
45 #include <linux/ktime.h>
46 #include <linux/netdevice.h>
47 #include <linux/etherdevice.h>
48 #include <linux/if_vlan.h>
49 #include <linux/skbuff.h>
50 #include <linux/init.h>
51 #include <linux/mm.h>
52 #include <linux/tcp.h>
53 #include <linux/ip.h>
54 #include <linux/in.h>
55 #include <linux/if_arp.h>
56 #include <linux/slab.h>
57 #include <linux/prefetch.h>
58 
59 #include "cpl5_cmd.h"
60 #include "sge.h"
61 #include "regs.h"
62 #include "espi.h"
63 
64 /* This belongs in if_ether.h */
65 #define ETH_P_CPL5 0xf
66 
67 #define SGE_CMDQ_N		2
68 #define SGE_FREELQ_N		2
69 #define SGE_CMDQ0_E_N		1024
70 #define SGE_CMDQ1_E_N		128
71 #define SGE_FREEL_SIZE		4096
72 #define SGE_JUMBO_FREEL_SIZE	512
73 #define SGE_FREEL_REFILL_THRESH	16
74 #define SGE_RESPQ_E_N		1024
75 #define SGE_INTRTIMER_NRES	1000
76 #define SGE_RX_SM_BUF_SIZE	1536
77 #define SGE_TX_DESC_MAX_PLEN	16384
78 
79 #define SGE_RESPQ_REPLENISH_THRES (SGE_RESPQ_E_N / 4)
80 
81 /*
82  * Period of the TX buffer reclaim timer.  This timer does not need to run
83  * frequently as TX buffers are usually reclaimed by new TX packets.
84  */
85 #define TX_RECLAIM_PERIOD (HZ / 4)
86 
87 #define M_CMD_LEN       0x7fffffff
88 #define V_CMD_LEN(v)    (v)
89 #define G_CMD_LEN(v)    ((v) & M_CMD_LEN)
90 #define V_CMD_GEN1(v)   ((v) << 31)
91 #define V_CMD_GEN2(v)   (v)
92 #define F_CMD_DATAVALID (1 << 1)
93 #define F_CMD_SOP       (1 << 2)
94 #define V_CMD_EOP(v)    ((v) << 3)
95 
96 /*
97  * Command queue, receive buffer list, and response queue descriptors.
98  */
99 #if defined(__BIG_ENDIAN_BITFIELD)
100 struct cmdQ_e {
101 	u32 addr_lo;
102 	u32 len_gen;
103 	u32 flags;
104 	u32 addr_hi;
105 };
106 
107 struct freelQ_e {
108 	u32 addr_lo;
109 	u32 len_gen;
110 	u32 gen2;
111 	u32 addr_hi;
112 };
113 
114 struct respQ_e {
115 	u32 Qsleeping		: 4;
116 	u32 Cmdq1CreditReturn	: 5;
117 	u32 Cmdq1DmaComplete	: 5;
118 	u32 Cmdq0CreditReturn	: 5;
119 	u32 Cmdq0DmaComplete	: 5;
120 	u32 FreelistQid		: 2;
121 	u32 CreditValid		: 1;
122 	u32 DataValid		: 1;
123 	u32 Offload		: 1;
124 	u32 Eop			: 1;
125 	u32 Sop			: 1;
126 	u32 GenerationBit	: 1;
127 	u32 BufferLength;
128 };
129 #elif defined(__LITTLE_ENDIAN_BITFIELD)
130 struct cmdQ_e {
131 	u32 len_gen;
132 	u32 addr_lo;
133 	u32 addr_hi;
134 	u32 flags;
135 };
136 
137 struct freelQ_e {
138 	u32 len_gen;
139 	u32 addr_lo;
140 	u32 addr_hi;
141 	u32 gen2;
142 };
143 
144 struct respQ_e {
145 	u32 BufferLength;
146 	u32 GenerationBit	: 1;
147 	u32 Sop			: 1;
148 	u32 Eop			: 1;
149 	u32 Offload		: 1;
150 	u32 DataValid		: 1;
151 	u32 CreditValid		: 1;
152 	u32 FreelistQid		: 2;
153 	u32 Cmdq0DmaComplete	: 5;
154 	u32 Cmdq0CreditReturn	: 5;
155 	u32 Cmdq1DmaComplete	: 5;
156 	u32 Cmdq1CreditReturn	: 5;
157 	u32 Qsleeping		: 4;
158 } ;
159 #endif
160 
161 /*
162  * SW Context Command and Freelist Queue Descriptors
163  */
164 struct cmdQ_ce {
165 	struct sk_buff *skb;
166 	DEFINE_DMA_UNMAP_ADDR(dma_addr);
167 	DEFINE_DMA_UNMAP_LEN(dma_len);
168 };
169 
170 struct freelQ_ce {
171 	struct sk_buff *skb;
172 	DEFINE_DMA_UNMAP_ADDR(dma_addr);
173 	DEFINE_DMA_UNMAP_LEN(dma_len);
174 };
175 
176 /*
177  * SW command, freelist and response rings
178  */
179 struct cmdQ {
180 	unsigned long   status;         /* HW DMA fetch status */
181 	unsigned int    in_use;         /* # of in-use command descriptors */
182 	unsigned int	size;	        /* # of descriptors */
183 	unsigned int    processed;      /* total # of descs HW has processed */
184 	unsigned int    cleaned;        /* total # of descs SW has reclaimed */
185 	unsigned int    stop_thres;     /* SW TX queue suspend threshold */
186 	u16		pidx;           /* producer index (SW) */
187 	u16		cidx;           /* consumer index (HW) */
188 	u8		genbit;         /* current generation (=valid) bit */
189 	u8              sop;            /* is next entry start of packet? */
190 	struct cmdQ_e  *entries;        /* HW command descriptor Q */
191 	struct cmdQ_ce *centries;       /* SW command context descriptor Q */
192 	dma_addr_t	dma_addr;       /* DMA addr HW command descriptor Q */
193 	spinlock_t	lock;           /* Lock to protect cmdQ enqueuing */
194 };
195 
196 struct freelQ {
197 	unsigned int	credits;        /* # of available RX buffers */
198 	unsigned int	size;	        /* free list capacity */
199 	u16		pidx;           /* producer index (SW) */
200 	u16		cidx;           /* consumer index (HW) */
201 	u16		rx_buffer_size; /* Buffer size on this free list */
202 	u16             dma_offset;     /* DMA offset to align IP headers */
203 	u16             recycleq_idx;   /* skb recycle q to use */
204 	u8		genbit;	        /* current generation (=valid) bit */
205 	struct freelQ_e	*entries;       /* HW freelist descriptor Q */
206 	struct freelQ_ce *centries;     /* SW freelist context descriptor Q */
207 	dma_addr_t	dma_addr;       /* DMA addr HW freelist descriptor Q */
208 };
209 
210 struct respQ {
211 	unsigned int	credits;        /* credits to be returned to SGE */
212 	unsigned int	size;	        /* # of response Q descriptors */
213 	u16		cidx;	        /* consumer index (SW) */
214 	u8		genbit;	        /* current generation(=valid) bit */
215 	struct respQ_e *entries;        /* HW response descriptor Q */
216 	dma_addr_t	dma_addr;       /* DMA addr HW response descriptor Q */
217 };
218 
219 /* Bit flags for cmdQ.status */
220 enum {
221 	CMDQ_STAT_RUNNING = 1,          /* fetch engine is running */
222 	CMDQ_STAT_LAST_PKT_DB = 2       /* last packet rung the doorbell */
223 };
224 
225 /* T204 TX SW scheduler */
226 
227 /* Per T204 TX port */
228 struct sched_port {
229 	unsigned int	avail;		/* available bits - quota */
230 	unsigned int	drain_bits_per_1024ns; /* drain rate */
231 	unsigned int	speed;		/* drain rate, mbps */
232 	unsigned int	mtu;		/* mtu size */
233 	struct sk_buff_head skbq;	/* pending skbs */
234 };
235 
236 /* Per T204 device */
237 struct sched {
238 	ktime_t         last_updated;   /* last time quotas were computed */
239 	unsigned int	max_avail;	/* max bits to be sent to any port */
240 	unsigned int	port;		/* port index (round robin ports) */
241 	unsigned int	num;		/* num skbs in per port queues */
242 	struct sched_port p[MAX_NPORTS];
243 	struct tasklet_struct sched_tsk;/* tasklet used to run scheduler */
244 };
245 static void restart_sched(unsigned long);
246 
247 
248 /*
249  * Main SGE data structure
250  *
251  * Interrupts are handled by a single CPU and it is likely that on a MP system
252  * the application is migrated to another CPU. In that scenario, we try to
253  * separate the RX(in irq context) and TX state in order to decrease memory
254  * contention.
255  */
256 struct sge {
257 	struct adapter *adapter;	/* adapter backpointer */
258 	struct net_device *netdev;      /* netdevice backpointer */
259 	struct freelQ	freelQ[SGE_FREELQ_N]; /* buffer free lists */
260 	struct respQ	respQ;		/* response Q */
261 	unsigned long   stopped_tx_queues; /* bitmap of suspended Tx queues */
262 	unsigned int	rx_pkt_pad;     /* RX padding for L2 packets */
263 	unsigned int	jumbo_fl;       /* jumbo freelist Q index */
264 	unsigned int	intrtimer_nres;	/* no-resource interrupt timer */
265 	unsigned int    fixed_intrtimer;/* non-adaptive interrupt timer */
266 	struct timer_list tx_reclaim_timer; /* reclaims TX buffers */
267 	struct timer_list espibug_timer;
268 	unsigned long	espibug_timeout;
269 	struct sk_buff	*espibug_skb[MAX_NPORTS];
270 	u32		sge_control;	/* shadow value of sge control reg */
271 	struct sge_intr_counts stats;
272 	struct sge_port_stats __percpu *port_stats[MAX_NPORTS];
273 	struct sched	*tx_sched;
274 	struct cmdQ cmdQ[SGE_CMDQ_N] ____cacheline_aligned_in_smp;
275 };
276 
277 static const u8 ch_mac_addr[ETH_ALEN] = {
278 	0x0, 0x7, 0x43, 0x0, 0x0, 0x0
279 };
280 
281 /*
282  * stop tasklet and free all pending skb's
283  */
284 static void tx_sched_stop(struct sge *sge)
285 {
286 	struct sched *s = sge->tx_sched;
287 	int i;
288 
289 	tasklet_kill(&s->sched_tsk);
290 
291 	for (i = 0; i < MAX_NPORTS; i++)
292 		__skb_queue_purge(&s->p[s->port].skbq);
293 }
294 
295 /*
296  * t1_sched_update_parms() is called when the MTU or link speed changes. It
297  * re-computes scheduler parameters to scope with the change.
298  */
299 unsigned int t1_sched_update_parms(struct sge *sge, unsigned int port,
300 				   unsigned int mtu, unsigned int speed)
301 {
302 	struct sched *s = sge->tx_sched;
303 	struct sched_port *p = &s->p[port];
304 	unsigned int max_avail_segs;
305 
306 	pr_debug("t1_sched_update_params mtu=%d speed=%d\n", mtu, speed);
307 	if (speed)
308 		p->speed = speed;
309 	if (mtu)
310 		p->mtu = mtu;
311 
312 	if (speed || mtu) {
313 		unsigned long long drain = 1024ULL * p->speed * (p->mtu - 40);
314 		do_div(drain, (p->mtu + 50) * 1000);
315 		p->drain_bits_per_1024ns = (unsigned int) drain;
316 
317 		if (p->speed < 1000)
318 			p->drain_bits_per_1024ns =
319 				90 * p->drain_bits_per_1024ns / 100;
320 	}
321 
322 	if (board_info(sge->adapter)->board == CHBT_BOARD_CHT204) {
323 		p->drain_bits_per_1024ns -= 16;
324 		s->max_avail = max(4096U, p->mtu + 16 + 14 + 4);
325 		max_avail_segs = max(1U, 4096 / (p->mtu - 40));
326 	} else {
327 		s->max_avail = 16384;
328 		max_avail_segs = max(1U, 9000 / (p->mtu - 40));
329 	}
330 
331 	pr_debug("t1_sched_update_parms: mtu %u speed %u max_avail %u "
332 		 "max_avail_segs %u drain_bits_per_1024ns %u\n", p->mtu,
333 		 p->speed, s->max_avail, max_avail_segs,
334 		 p->drain_bits_per_1024ns);
335 
336 	return max_avail_segs * (p->mtu - 40);
337 }
338 
339 #if 0
340 
341 /*
342  * t1_sched_max_avail_bytes() tells the scheduler the maximum amount of
343  * data that can be pushed per port.
344  */
345 void t1_sched_set_max_avail_bytes(struct sge *sge, unsigned int val)
346 {
347 	struct sched *s = sge->tx_sched;
348 	unsigned int i;
349 
350 	s->max_avail = val;
351 	for (i = 0; i < MAX_NPORTS; i++)
352 		t1_sched_update_parms(sge, i, 0, 0);
353 }
354 
355 /*
356  * t1_sched_set_drain_bits_per_us() tells the scheduler at which rate a port
357  * is draining.
358  */
359 void t1_sched_set_drain_bits_per_us(struct sge *sge, unsigned int port,
360 					 unsigned int val)
361 {
362 	struct sched *s = sge->tx_sched;
363 	struct sched_port *p = &s->p[port];
364 	p->drain_bits_per_1024ns = val * 1024 / 1000;
365 	t1_sched_update_parms(sge, port, 0, 0);
366 }
367 
368 #endif  /*  0  */
369 
370 
371 /*
372  * get_clock() implements a ns clock (see ktime_get)
373  */
374 static inline ktime_t get_clock(void)
375 {
376 	struct timespec ts;
377 
378 	ktime_get_ts(&ts);
379 	return timespec_to_ktime(ts);
380 }
381 
382 /*
383  * tx_sched_init() allocates resources and does basic initialization.
384  */
385 static int tx_sched_init(struct sge *sge)
386 {
387 	struct sched *s;
388 	int i;
389 
390 	s = kzalloc(sizeof (struct sched), GFP_KERNEL);
391 	if (!s)
392 		return -ENOMEM;
393 
394 	pr_debug("tx_sched_init\n");
395 	tasklet_init(&s->sched_tsk, restart_sched, (unsigned long) sge);
396 	sge->tx_sched = s;
397 
398 	for (i = 0; i < MAX_NPORTS; i++) {
399 		skb_queue_head_init(&s->p[i].skbq);
400 		t1_sched_update_parms(sge, i, 1500, 1000);
401 	}
402 
403 	return 0;
404 }
405 
406 /*
407  * sched_update_avail() computes the delta since the last time it was called
408  * and updates the per port quota (number of bits that can be sent to the any
409  * port).
410  */
411 static inline int sched_update_avail(struct sge *sge)
412 {
413 	struct sched *s = sge->tx_sched;
414 	ktime_t now = get_clock();
415 	unsigned int i;
416 	long long delta_time_ns;
417 
418 	delta_time_ns = ktime_to_ns(ktime_sub(now, s->last_updated));
419 
420 	pr_debug("sched_update_avail delta=%lld\n", delta_time_ns);
421 	if (delta_time_ns < 15000)
422 		return 0;
423 
424 	for (i = 0; i < MAX_NPORTS; i++) {
425 		struct sched_port *p = &s->p[i];
426 		unsigned int delta_avail;
427 
428 		delta_avail = (p->drain_bits_per_1024ns * delta_time_ns) >> 13;
429 		p->avail = min(p->avail + delta_avail, s->max_avail);
430 	}
431 
432 	s->last_updated = now;
433 
434 	return 1;
435 }
436 
437 /*
438  * sched_skb() is called from two different places. In the tx path, any
439  * packet generating load on an output port will call sched_skb()
440  * (skb != NULL). In addition, sched_skb() is called from the irq/soft irq
441  * context (skb == NULL).
442  * The scheduler only returns a skb (which will then be sent) if the
443  * length of the skb is <= the current quota of the output port.
444  */
445 static struct sk_buff *sched_skb(struct sge *sge, struct sk_buff *skb,
446 				unsigned int credits)
447 {
448 	struct sched *s = sge->tx_sched;
449 	struct sk_buff_head *skbq;
450 	unsigned int i, len, update = 1;
451 
452 	pr_debug("sched_skb %p\n", skb);
453 	if (!skb) {
454 		if (!s->num)
455 			return NULL;
456 	} else {
457 		skbq = &s->p[skb->dev->if_port].skbq;
458 		__skb_queue_tail(skbq, skb);
459 		s->num++;
460 		skb = NULL;
461 	}
462 
463 	if (credits < MAX_SKB_FRAGS + 1)
464 		goto out;
465 
466 again:
467 	for (i = 0; i < MAX_NPORTS; i++) {
468 		s->port = (s->port + 1) & (MAX_NPORTS - 1);
469 		skbq = &s->p[s->port].skbq;
470 
471 		skb = skb_peek(skbq);
472 
473 		if (!skb)
474 			continue;
475 
476 		len = skb->len;
477 		if (len <= s->p[s->port].avail) {
478 			s->p[s->port].avail -= len;
479 			s->num--;
480 			__skb_unlink(skb, skbq);
481 			goto out;
482 		}
483 		skb = NULL;
484 	}
485 
486 	if (update-- && sched_update_avail(sge))
487 		goto again;
488 
489 out:
490 	/* If there are more pending skbs, we use the hardware to schedule us
491 	 * again.
492 	 */
493 	if (s->num && !skb) {
494 		struct cmdQ *q = &sge->cmdQ[0];
495 		clear_bit(CMDQ_STAT_LAST_PKT_DB, &q->status);
496 		if (test_and_set_bit(CMDQ_STAT_RUNNING, &q->status) == 0) {
497 			set_bit(CMDQ_STAT_LAST_PKT_DB, &q->status);
498 			writel(F_CMDQ0_ENABLE, sge->adapter->regs + A_SG_DOORBELL);
499 		}
500 	}
501 	pr_debug("sched_skb ret %p\n", skb);
502 
503 	return skb;
504 }
505 
506 /*
507  * PIO to indicate that memory mapped Q contains valid descriptor(s).
508  */
509 static inline void doorbell_pio(struct adapter *adapter, u32 val)
510 {
511 	wmb();
512 	writel(val, adapter->regs + A_SG_DOORBELL);
513 }
514 
515 /*
516  * Frees all RX buffers on the freelist Q. The caller must make sure that
517  * the SGE is turned off before calling this function.
518  */
519 static void free_freelQ_buffers(struct pci_dev *pdev, struct freelQ *q)
520 {
521 	unsigned int cidx = q->cidx;
522 
523 	while (q->credits--) {
524 		struct freelQ_ce *ce = &q->centries[cidx];
525 
526 		pci_unmap_single(pdev, dma_unmap_addr(ce, dma_addr),
527 				 dma_unmap_len(ce, dma_len),
528 				 PCI_DMA_FROMDEVICE);
529 		dev_kfree_skb(ce->skb);
530 		ce->skb = NULL;
531 		if (++cidx == q->size)
532 			cidx = 0;
533 	}
534 }
535 
536 /*
537  * Free RX free list and response queue resources.
538  */
539 static void free_rx_resources(struct sge *sge)
540 {
541 	struct pci_dev *pdev = sge->adapter->pdev;
542 	unsigned int size, i;
543 
544 	if (sge->respQ.entries) {
545 		size = sizeof(struct respQ_e) * sge->respQ.size;
546 		pci_free_consistent(pdev, size, sge->respQ.entries,
547 				    sge->respQ.dma_addr);
548 	}
549 
550 	for (i = 0; i < SGE_FREELQ_N; i++) {
551 		struct freelQ *q = &sge->freelQ[i];
552 
553 		if (q->centries) {
554 			free_freelQ_buffers(pdev, q);
555 			kfree(q->centries);
556 		}
557 		if (q->entries) {
558 			size = sizeof(struct freelQ_e) * q->size;
559 			pci_free_consistent(pdev, size, q->entries,
560 					    q->dma_addr);
561 		}
562 	}
563 }
564 
565 /*
566  * Allocates basic RX resources, consisting of memory mapped freelist Qs and a
567  * response queue.
568  */
569 static int alloc_rx_resources(struct sge *sge, struct sge_params *p)
570 {
571 	struct pci_dev *pdev = sge->adapter->pdev;
572 	unsigned int size, i;
573 
574 	for (i = 0; i < SGE_FREELQ_N; i++) {
575 		struct freelQ *q = &sge->freelQ[i];
576 
577 		q->genbit = 1;
578 		q->size = p->freelQ_size[i];
579 		q->dma_offset = sge->rx_pkt_pad ? 0 : NET_IP_ALIGN;
580 		size = sizeof(struct freelQ_e) * q->size;
581 		q->entries = pci_alloc_consistent(pdev, size, &q->dma_addr);
582 		if (!q->entries)
583 			goto err_no_mem;
584 
585 		size = sizeof(struct freelQ_ce) * q->size;
586 		q->centries = kzalloc(size, GFP_KERNEL);
587 		if (!q->centries)
588 			goto err_no_mem;
589 	}
590 
591 	/*
592 	 * Calculate the buffer sizes for the two free lists.  FL0 accommodates
593 	 * regular sized Ethernet frames, FL1 is sized not to exceed 16K,
594 	 * including all the sk_buff overhead.
595 	 *
596 	 * Note: For T2 FL0 and FL1 are reversed.
597 	 */
598 	sge->freelQ[!sge->jumbo_fl].rx_buffer_size = SGE_RX_SM_BUF_SIZE +
599 		sizeof(struct cpl_rx_data) +
600 		sge->freelQ[!sge->jumbo_fl].dma_offset;
601 
602 		size = (16 * 1024) -
603 		    SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
604 
605 	sge->freelQ[sge->jumbo_fl].rx_buffer_size = size;
606 
607 	/*
608 	 * Setup which skb recycle Q should be used when recycling buffers from
609 	 * each free list.
610 	 */
611 	sge->freelQ[!sge->jumbo_fl].recycleq_idx = 0;
612 	sge->freelQ[sge->jumbo_fl].recycleq_idx = 1;
613 
614 	sge->respQ.genbit = 1;
615 	sge->respQ.size = SGE_RESPQ_E_N;
616 	sge->respQ.credits = 0;
617 	size = sizeof(struct respQ_e) * sge->respQ.size;
618 	sge->respQ.entries =
619 		pci_alloc_consistent(pdev, size, &sge->respQ.dma_addr);
620 	if (!sge->respQ.entries)
621 		goto err_no_mem;
622 	return 0;
623 
624 err_no_mem:
625 	free_rx_resources(sge);
626 	return -ENOMEM;
627 }
628 
629 /*
630  * Reclaims n TX descriptors and frees the buffers associated with them.
631  */
632 static void free_cmdQ_buffers(struct sge *sge, struct cmdQ *q, unsigned int n)
633 {
634 	struct cmdQ_ce *ce;
635 	struct pci_dev *pdev = sge->adapter->pdev;
636 	unsigned int cidx = q->cidx;
637 
638 	q->in_use -= n;
639 	ce = &q->centries[cidx];
640 	while (n--) {
641 		if (likely(dma_unmap_len(ce, dma_len))) {
642 			pci_unmap_single(pdev, dma_unmap_addr(ce, dma_addr),
643 					 dma_unmap_len(ce, dma_len),
644 					 PCI_DMA_TODEVICE);
645 			if (q->sop)
646 				q->sop = 0;
647 		}
648 		if (ce->skb) {
649 			dev_kfree_skb_any(ce->skb);
650 			q->sop = 1;
651 		}
652 		ce++;
653 		if (++cidx == q->size) {
654 			cidx = 0;
655 			ce = q->centries;
656 		}
657 	}
658 	q->cidx = cidx;
659 }
660 
661 /*
662  * Free TX resources.
663  *
664  * Assumes that SGE is stopped and all interrupts are disabled.
665  */
666 static void free_tx_resources(struct sge *sge)
667 {
668 	struct pci_dev *pdev = sge->adapter->pdev;
669 	unsigned int size, i;
670 
671 	for (i = 0; i < SGE_CMDQ_N; i++) {
672 		struct cmdQ *q = &sge->cmdQ[i];
673 
674 		if (q->centries) {
675 			if (q->in_use)
676 				free_cmdQ_buffers(sge, q, q->in_use);
677 			kfree(q->centries);
678 		}
679 		if (q->entries) {
680 			size = sizeof(struct cmdQ_e) * q->size;
681 			pci_free_consistent(pdev, size, q->entries,
682 					    q->dma_addr);
683 		}
684 	}
685 }
686 
687 /*
688  * Allocates basic TX resources, consisting of memory mapped command Qs.
689  */
690 static int alloc_tx_resources(struct sge *sge, struct sge_params *p)
691 {
692 	struct pci_dev *pdev = sge->adapter->pdev;
693 	unsigned int size, i;
694 
695 	for (i = 0; i < SGE_CMDQ_N; i++) {
696 		struct cmdQ *q = &sge->cmdQ[i];
697 
698 		q->genbit = 1;
699 		q->sop = 1;
700 		q->size = p->cmdQ_size[i];
701 		q->in_use = 0;
702 		q->status = 0;
703 		q->processed = q->cleaned = 0;
704 		q->stop_thres = 0;
705 		spin_lock_init(&q->lock);
706 		size = sizeof(struct cmdQ_e) * q->size;
707 		q->entries = pci_alloc_consistent(pdev, size, &q->dma_addr);
708 		if (!q->entries)
709 			goto err_no_mem;
710 
711 		size = sizeof(struct cmdQ_ce) * q->size;
712 		q->centries = kzalloc(size, GFP_KERNEL);
713 		if (!q->centries)
714 			goto err_no_mem;
715 	}
716 
717 	/*
718 	 * CommandQ 0 handles Ethernet and TOE packets, while queue 1 is TOE
719 	 * only.  For queue 0 set the stop threshold so we can handle one more
720 	 * packet from each port, plus reserve an additional 24 entries for
721 	 * Ethernet packets only.  Queue 1 never suspends nor do we reserve
722 	 * space for Ethernet packets.
723 	 */
724 	sge->cmdQ[0].stop_thres = sge->adapter->params.nports *
725 		(MAX_SKB_FRAGS + 1);
726 	return 0;
727 
728 err_no_mem:
729 	free_tx_resources(sge);
730 	return -ENOMEM;
731 }
732 
733 static inline void setup_ring_params(struct adapter *adapter, u64 addr,
734 				     u32 size, int base_reg_lo,
735 				     int base_reg_hi, int size_reg)
736 {
737 	writel((u32)addr, adapter->regs + base_reg_lo);
738 	writel(addr >> 32, adapter->regs + base_reg_hi);
739 	writel(size, adapter->regs + size_reg);
740 }
741 
742 /*
743  * Enable/disable VLAN acceleration.
744  */
745 void t1_vlan_mode(struct adapter *adapter, u32 features)
746 {
747 	struct sge *sge = adapter->sge;
748 
749 	if (features & NETIF_F_HW_VLAN_RX)
750 		sge->sge_control |= F_VLAN_XTRACT;
751 	else
752 		sge->sge_control &= ~F_VLAN_XTRACT;
753 	if (adapter->open_device_map) {
754 		writel(sge->sge_control, adapter->regs + A_SG_CONTROL);
755 		readl(adapter->regs + A_SG_CONTROL);   /* flush */
756 	}
757 }
758 
759 /*
760  * Programs the various SGE registers. However, the engine is not yet enabled,
761  * but sge->sge_control is setup and ready to go.
762  */
763 static void configure_sge(struct sge *sge, struct sge_params *p)
764 {
765 	struct adapter *ap = sge->adapter;
766 
767 	writel(0, ap->regs + A_SG_CONTROL);
768 	setup_ring_params(ap, sge->cmdQ[0].dma_addr, sge->cmdQ[0].size,
769 			  A_SG_CMD0BASELWR, A_SG_CMD0BASEUPR, A_SG_CMD0SIZE);
770 	setup_ring_params(ap, sge->cmdQ[1].dma_addr, sge->cmdQ[1].size,
771 			  A_SG_CMD1BASELWR, A_SG_CMD1BASEUPR, A_SG_CMD1SIZE);
772 	setup_ring_params(ap, sge->freelQ[0].dma_addr,
773 			  sge->freelQ[0].size, A_SG_FL0BASELWR,
774 			  A_SG_FL0BASEUPR, A_SG_FL0SIZE);
775 	setup_ring_params(ap, sge->freelQ[1].dma_addr,
776 			  sge->freelQ[1].size, A_SG_FL1BASELWR,
777 			  A_SG_FL1BASEUPR, A_SG_FL1SIZE);
778 
779 	/* The threshold comparison uses <. */
780 	writel(SGE_RX_SM_BUF_SIZE + 1, ap->regs + A_SG_FLTHRESHOLD);
781 
782 	setup_ring_params(ap, sge->respQ.dma_addr, sge->respQ.size,
783 			  A_SG_RSPBASELWR, A_SG_RSPBASEUPR, A_SG_RSPSIZE);
784 	writel((u32)sge->respQ.size - 1, ap->regs + A_SG_RSPQUEUECREDIT);
785 
786 	sge->sge_control = F_CMDQ0_ENABLE | F_CMDQ1_ENABLE | F_FL0_ENABLE |
787 		F_FL1_ENABLE | F_CPL_ENABLE | F_RESPONSE_QUEUE_ENABLE |
788 		V_CMDQ_PRIORITY(2) | F_DISABLE_CMDQ1_GTS | F_ISCSI_COALESCE |
789 		V_RX_PKT_OFFSET(sge->rx_pkt_pad);
790 
791 #if defined(__BIG_ENDIAN_BITFIELD)
792 	sge->sge_control |= F_ENABLE_BIG_ENDIAN;
793 #endif
794 
795 	/* Initialize no-resource timer */
796 	sge->intrtimer_nres = SGE_INTRTIMER_NRES * core_ticks_per_usec(ap);
797 
798 	t1_sge_set_coalesce_params(sge, p);
799 }
800 
801 /*
802  * Return the payload capacity of the jumbo free-list buffers.
803  */
804 static inline unsigned int jumbo_payload_capacity(const struct sge *sge)
805 {
806 	return sge->freelQ[sge->jumbo_fl].rx_buffer_size -
807 		sge->freelQ[sge->jumbo_fl].dma_offset -
808 		sizeof(struct cpl_rx_data);
809 }
810 
811 /*
812  * Frees all SGE related resources and the sge structure itself
813  */
814 void t1_sge_destroy(struct sge *sge)
815 {
816 	int i;
817 
818 	for_each_port(sge->adapter, i)
819 		free_percpu(sge->port_stats[i]);
820 
821 	kfree(sge->tx_sched);
822 	free_tx_resources(sge);
823 	free_rx_resources(sge);
824 	kfree(sge);
825 }
826 
827 /*
828  * Allocates new RX buffers on the freelist Q (and tracks them on the freelist
829  * context Q) until the Q is full or alloc_skb fails.
830  *
831  * It is possible that the generation bits already match, indicating that the
832  * buffer is already valid and nothing needs to be done. This happens when we
833  * copied a received buffer into a new sk_buff during the interrupt processing.
834  *
835  * If the SGE doesn't automatically align packets properly (!sge->rx_pkt_pad),
836  * we specify a RX_OFFSET in order to make sure that the IP header is 4B
837  * aligned.
838  */
839 static void refill_free_list(struct sge *sge, struct freelQ *q)
840 {
841 	struct pci_dev *pdev = sge->adapter->pdev;
842 	struct freelQ_ce *ce = &q->centries[q->pidx];
843 	struct freelQ_e *e = &q->entries[q->pidx];
844 	unsigned int dma_len = q->rx_buffer_size - q->dma_offset;
845 
846 	while (q->credits < q->size) {
847 		struct sk_buff *skb;
848 		dma_addr_t mapping;
849 
850 		skb = alloc_skb(q->rx_buffer_size, GFP_ATOMIC);
851 		if (!skb)
852 			break;
853 
854 		skb_reserve(skb, q->dma_offset);
855 		mapping = pci_map_single(pdev, skb->data, dma_len,
856 					 PCI_DMA_FROMDEVICE);
857 		skb_reserve(skb, sge->rx_pkt_pad);
858 
859 		ce->skb = skb;
860 		dma_unmap_addr_set(ce, dma_addr, mapping);
861 		dma_unmap_len_set(ce, dma_len, dma_len);
862 		e->addr_lo = (u32)mapping;
863 		e->addr_hi = (u64)mapping >> 32;
864 		e->len_gen = V_CMD_LEN(dma_len) | V_CMD_GEN1(q->genbit);
865 		wmb();
866 		e->gen2 = V_CMD_GEN2(q->genbit);
867 
868 		e++;
869 		ce++;
870 		if (++q->pidx == q->size) {
871 			q->pidx = 0;
872 			q->genbit ^= 1;
873 			ce = q->centries;
874 			e = q->entries;
875 		}
876 		q->credits++;
877 	}
878 }
879 
880 /*
881  * Calls refill_free_list for both free lists. If we cannot fill at least 1/4
882  * of both rings, we go into 'few interrupt mode' in order to give the system
883  * time to free up resources.
884  */
885 static void freelQs_empty(struct sge *sge)
886 {
887 	struct adapter *adapter = sge->adapter;
888 	u32 irq_reg = readl(adapter->regs + A_SG_INT_ENABLE);
889 	u32 irqholdoff_reg;
890 
891 	refill_free_list(sge, &sge->freelQ[0]);
892 	refill_free_list(sge, &sge->freelQ[1]);
893 
894 	if (sge->freelQ[0].credits > (sge->freelQ[0].size >> 2) &&
895 	    sge->freelQ[1].credits > (sge->freelQ[1].size >> 2)) {
896 		irq_reg |= F_FL_EXHAUSTED;
897 		irqholdoff_reg = sge->fixed_intrtimer;
898 	} else {
899 		/* Clear the F_FL_EXHAUSTED interrupts for now */
900 		irq_reg &= ~F_FL_EXHAUSTED;
901 		irqholdoff_reg = sge->intrtimer_nres;
902 	}
903 	writel(irqholdoff_reg, adapter->regs + A_SG_INTRTIMER);
904 	writel(irq_reg, adapter->regs + A_SG_INT_ENABLE);
905 
906 	/* We reenable the Qs to force a freelist GTS interrupt later */
907 	doorbell_pio(adapter, F_FL0_ENABLE | F_FL1_ENABLE);
908 }
909 
910 #define SGE_PL_INTR_MASK (F_PL_INTR_SGE_ERR | F_PL_INTR_SGE_DATA)
911 #define SGE_INT_FATAL (F_RESPQ_OVERFLOW | F_PACKET_TOO_BIG | F_PACKET_MISMATCH)
912 #define SGE_INT_ENABLE (F_RESPQ_EXHAUSTED | F_RESPQ_OVERFLOW | \
913 			F_FL_EXHAUSTED | F_PACKET_TOO_BIG | F_PACKET_MISMATCH)
914 
915 /*
916  * Disable SGE Interrupts
917  */
918 void t1_sge_intr_disable(struct sge *sge)
919 {
920 	u32 val = readl(sge->adapter->regs + A_PL_ENABLE);
921 
922 	writel(val & ~SGE_PL_INTR_MASK, sge->adapter->regs + A_PL_ENABLE);
923 	writel(0, sge->adapter->regs + A_SG_INT_ENABLE);
924 }
925 
926 /*
927  * Enable SGE interrupts.
928  */
929 void t1_sge_intr_enable(struct sge *sge)
930 {
931 	u32 en = SGE_INT_ENABLE;
932 	u32 val = readl(sge->adapter->regs + A_PL_ENABLE);
933 
934 	if (sge->adapter->port[0].dev->hw_features & NETIF_F_TSO)
935 		en &= ~F_PACKET_TOO_BIG;
936 	writel(en, sge->adapter->regs + A_SG_INT_ENABLE);
937 	writel(val | SGE_PL_INTR_MASK, sge->adapter->regs + A_PL_ENABLE);
938 }
939 
940 /*
941  * Clear SGE interrupts.
942  */
943 void t1_sge_intr_clear(struct sge *sge)
944 {
945 	writel(SGE_PL_INTR_MASK, sge->adapter->regs + A_PL_CAUSE);
946 	writel(0xffffffff, sge->adapter->regs + A_SG_INT_CAUSE);
947 }
948 
949 /*
950  * SGE 'Error' interrupt handler
951  */
952 int t1_sge_intr_error_handler(struct sge *sge)
953 {
954 	struct adapter *adapter = sge->adapter;
955 	u32 cause = readl(adapter->regs + A_SG_INT_CAUSE);
956 
957 	if (adapter->port[0].dev->hw_features & NETIF_F_TSO)
958 		cause &= ~F_PACKET_TOO_BIG;
959 	if (cause & F_RESPQ_EXHAUSTED)
960 		sge->stats.respQ_empty++;
961 	if (cause & F_RESPQ_OVERFLOW) {
962 		sge->stats.respQ_overflow++;
963 		pr_alert("%s: SGE response queue overflow\n",
964 			 adapter->name);
965 	}
966 	if (cause & F_FL_EXHAUSTED) {
967 		sge->stats.freelistQ_empty++;
968 		freelQs_empty(sge);
969 	}
970 	if (cause & F_PACKET_TOO_BIG) {
971 		sge->stats.pkt_too_big++;
972 		pr_alert("%s: SGE max packet size exceeded\n",
973 			 adapter->name);
974 	}
975 	if (cause & F_PACKET_MISMATCH) {
976 		sge->stats.pkt_mismatch++;
977 		pr_alert("%s: SGE packet mismatch\n", adapter->name);
978 	}
979 	if (cause & SGE_INT_FATAL)
980 		t1_fatal_err(adapter);
981 
982 	writel(cause, adapter->regs + A_SG_INT_CAUSE);
983 	return 0;
984 }
985 
986 const struct sge_intr_counts *t1_sge_get_intr_counts(const struct sge *sge)
987 {
988 	return &sge->stats;
989 }
990 
991 void t1_sge_get_port_stats(const struct sge *sge, int port,
992 			   struct sge_port_stats *ss)
993 {
994 	int cpu;
995 
996 	memset(ss, 0, sizeof(*ss));
997 	for_each_possible_cpu(cpu) {
998 		struct sge_port_stats *st = per_cpu_ptr(sge->port_stats[port], cpu);
999 
1000 		ss->rx_cso_good += st->rx_cso_good;
1001 		ss->tx_cso += st->tx_cso;
1002 		ss->tx_tso += st->tx_tso;
1003 		ss->tx_need_hdrroom += st->tx_need_hdrroom;
1004 		ss->vlan_xtract += st->vlan_xtract;
1005 		ss->vlan_insert += st->vlan_insert;
1006 	}
1007 }
1008 
1009 /**
1010  *	recycle_fl_buf - recycle a free list buffer
1011  *	@fl: the free list
1012  *	@idx: index of buffer to recycle
1013  *
1014  *	Recycles the specified buffer on the given free list by adding it at
1015  *	the next available slot on the list.
1016  */
1017 static void recycle_fl_buf(struct freelQ *fl, int idx)
1018 {
1019 	struct freelQ_e *from = &fl->entries[idx];
1020 	struct freelQ_e *to = &fl->entries[fl->pidx];
1021 
1022 	fl->centries[fl->pidx] = fl->centries[idx];
1023 	to->addr_lo = from->addr_lo;
1024 	to->addr_hi = from->addr_hi;
1025 	to->len_gen = G_CMD_LEN(from->len_gen) | V_CMD_GEN1(fl->genbit);
1026 	wmb();
1027 	to->gen2 = V_CMD_GEN2(fl->genbit);
1028 	fl->credits++;
1029 
1030 	if (++fl->pidx == fl->size) {
1031 		fl->pidx = 0;
1032 		fl->genbit ^= 1;
1033 	}
1034 }
1035 
1036 static int copybreak __read_mostly = 256;
1037 module_param(copybreak, int, 0);
1038 MODULE_PARM_DESC(copybreak, "Receive copy threshold");
1039 
1040 /**
1041  *	get_packet - return the next ingress packet buffer
1042  *	@pdev: the PCI device that received the packet
1043  *	@fl: the SGE free list holding the packet
1044  *	@len: the actual packet length, excluding any SGE padding
1045  *
1046  *	Get the next packet from a free list and complete setup of the
1047  *	sk_buff.  If the packet is small we make a copy and recycle the
1048  *	original buffer, otherwise we use the original buffer itself.  If a
1049  *	positive drop threshold is supplied packets are dropped and their
1050  *	buffers recycled if (a) the number of remaining buffers is under the
1051  *	threshold and the packet is too big to copy, or (b) the packet should
1052  *	be copied but there is no memory for the copy.
1053  */
1054 static inline struct sk_buff *get_packet(struct pci_dev *pdev,
1055 					 struct freelQ *fl, unsigned int len)
1056 {
1057 	struct sk_buff *skb;
1058 	const struct freelQ_ce *ce = &fl->centries[fl->cidx];
1059 
1060 	if (len < copybreak) {
1061 		skb = alloc_skb(len + 2, GFP_ATOMIC);
1062 		if (!skb)
1063 			goto use_orig_buf;
1064 
1065 		skb_reserve(skb, 2);	/* align IP header */
1066 		skb_put(skb, len);
1067 		pci_dma_sync_single_for_cpu(pdev,
1068 					    dma_unmap_addr(ce, dma_addr),
1069 					    dma_unmap_len(ce, dma_len),
1070 					    PCI_DMA_FROMDEVICE);
1071 		skb_copy_from_linear_data(ce->skb, skb->data, len);
1072 		pci_dma_sync_single_for_device(pdev,
1073 					       dma_unmap_addr(ce, dma_addr),
1074 					       dma_unmap_len(ce, dma_len),
1075 					       PCI_DMA_FROMDEVICE);
1076 		recycle_fl_buf(fl, fl->cidx);
1077 		return skb;
1078 	}
1079 
1080 use_orig_buf:
1081 	if (fl->credits < 2) {
1082 		recycle_fl_buf(fl, fl->cidx);
1083 		return NULL;
1084 	}
1085 
1086 	pci_unmap_single(pdev, dma_unmap_addr(ce, dma_addr),
1087 			 dma_unmap_len(ce, dma_len), PCI_DMA_FROMDEVICE);
1088 	skb = ce->skb;
1089 	prefetch(skb->data);
1090 
1091 	skb_put(skb, len);
1092 	return skb;
1093 }
1094 
1095 /**
1096  *	unexpected_offload - handle an unexpected offload packet
1097  *	@adapter: the adapter
1098  *	@fl: the free list that received the packet
1099  *
1100  *	Called when we receive an unexpected offload packet (e.g., the TOE
1101  *	function is disabled or the card is a NIC).  Prints a message and
1102  *	recycles the buffer.
1103  */
1104 static void unexpected_offload(struct adapter *adapter, struct freelQ *fl)
1105 {
1106 	struct freelQ_ce *ce = &fl->centries[fl->cidx];
1107 	struct sk_buff *skb = ce->skb;
1108 
1109 	pci_dma_sync_single_for_cpu(adapter->pdev, dma_unmap_addr(ce, dma_addr),
1110 			    dma_unmap_len(ce, dma_len), PCI_DMA_FROMDEVICE);
1111 	pr_err("%s: unexpected offload packet, cmd %u\n",
1112 	       adapter->name, *skb->data);
1113 	recycle_fl_buf(fl, fl->cidx);
1114 }
1115 
1116 /*
1117  * T1/T2 SGE limits the maximum DMA size per TX descriptor to
1118  * SGE_TX_DESC_MAX_PLEN (16KB). If the PAGE_SIZE is larger than 16KB, the
1119  * stack might send more than SGE_TX_DESC_MAX_PLEN in a contiguous manner.
1120  * Note that the *_large_page_tx_descs stuff will be optimized out when
1121  * PAGE_SIZE <= SGE_TX_DESC_MAX_PLEN.
1122  *
1123  * compute_large_page_descs() computes how many additional descriptors are
1124  * required to break down the stack's request.
1125  */
1126 static inline unsigned int compute_large_page_tx_descs(struct sk_buff *skb)
1127 {
1128 	unsigned int count = 0;
1129 
1130 	if (PAGE_SIZE > SGE_TX_DESC_MAX_PLEN) {
1131 		unsigned int nfrags = skb_shinfo(skb)->nr_frags;
1132 		unsigned int i, len = skb_headlen(skb);
1133 		while (len > SGE_TX_DESC_MAX_PLEN) {
1134 			count++;
1135 			len -= SGE_TX_DESC_MAX_PLEN;
1136 		}
1137 		for (i = 0; nfrags--; i++) {
1138 			const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1139 			len = skb_frag_size(frag);
1140 			while (len > SGE_TX_DESC_MAX_PLEN) {
1141 				count++;
1142 				len -= SGE_TX_DESC_MAX_PLEN;
1143 			}
1144 		}
1145 	}
1146 	return count;
1147 }
1148 
1149 /*
1150  * Write a cmdQ entry.
1151  *
1152  * Since this function writes the 'flags' field, it must not be used to
1153  * write the first cmdQ entry.
1154  */
1155 static inline void write_tx_desc(struct cmdQ_e *e, dma_addr_t mapping,
1156 				 unsigned int len, unsigned int gen,
1157 				 unsigned int eop)
1158 {
1159 	BUG_ON(len > SGE_TX_DESC_MAX_PLEN);
1160 
1161 	e->addr_lo = (u32)mapping;
1162 	e->addr_hi = (u64)mapping >> 32;
1163 	e->len_gen = V_CMD_LEN(len) | V_CMD_GEN1(gen);
1164 	e->flags = F_CMD_DATAVALID | V_CMD_EOP(eop) | V_CMD_GEN2(gen);
1165 }
1166 
1167 /*
1168  * See comment for previous function.
1169  *
1170  * write_tx_descs_large_page() writes additional SGE tx descriptors if
1171  * *desc_len exceeds HW's capability.
1172  */
1173 static inline unsigned int write_large_page_tx_descs(unsigned int pidx,
1174 						     struct cmdQ_e **e,
1175 						     struct cmdQ_ce **ce,
1176 						     unsigned int *gen,
1177 						     dma_addr_t *desc_mapping,
1178 						     unsigned int *desc_len,
1179 						     unsigned int nfrags,
1180 						     struct cmdQ *q)
1181 {
1182 	if (PAGE_SIZE > SGE_TX_DESC_MAX_PLEN) {
1183 		struct cmdQ_e *e1 = *e;
1184 		struct cmdQ_ce *ce1 = *ce;
1185 
1186 		while (*desc_len > SGE_TX_DESC_MAX_PLEN) {
1187 			*desc_len -= SGE_TX_DESC_MAX_PLEN;
1188 			write_tx_desc(e1, *desc_mapping, SGE_TX_DESC_MAX_PLEN,
1189 				      *gen, nfrags == 0 && *desc_len == 0);
1190 			ce1->skb = NULL;
1191 			dma_unmap_len_set(ce1, dma_len, 0);
1192 			*desc_mapping += SGE_TX_DESC_MAX_PLEN;
1193 			if (*desc_len) {
1194 				ce1++;
1195 				e1++;
1196 				if (++pidx == q->size) {
1197 					pidx = 0;
1198 					*gen ^= 1;
1199 					ce1 = q->centries;
1200 					e1 = q->entries;
1201 				}
1202 			}
1203 		}
1204 		*e = e1;
1205 		*ce = ce1;
1206 	}
1207 	return pidx;
1208 }
1209 
1210 /*
1211  * Write the command descriptors to transmit the given skb starting at
1212  * descriptor pidx with the given generation.
1213  */
1214 static inline void write_tx_descs(struct adapter *adapter, struct sk_buff *skb,
1215 				  unsigned int pidx, unsigned int gen,
1216 				  struct cmdQ *q)
1217 {
1218 	dma_addr_t mapping, desc_mapping;
1219 	struct cmdQ_e *e, *e1;
1220 	struct cmdQ_ce *ce;
1221 	unsigned int i, flags, first_desc_len, desc_len,
1222 	    nfrags = skb_shinfo(skb)->nr_frags;
1223 
1224 	e = e1 = &q->entries[pidx];
1225 	ce = &q->centries[pidx];
1226 
1227 	mapping = pci_map_single(adapter->pdev, skb->data,
1228 				 skb_headlen(skb), PCI_DMA_TODEVICE);
1229 
1230 	desc_mapping = mapping;
1231 	desc_len = skb_headlen(skb);
1232 
1233 	flags = F_CMD_DATAVALID | F_CMD_SOP |
1234 	    V_CMD_EOP(nfrags == 0 && desc_len <= SGE_TX_DESC_MAX_PLEN) |
1235 	    V_CMD_GEN2(gen);
1236 	first_desc_len = (desc_len <= SGE_TX_DESC_MAX_PLEN) ?
1237 	    desc_len : SGE_TX_DESC_MAX_PLEN;
1238 	e->addr_lo = (u32)desc_mapping;
1239 	e->addr_hi = (u64)desc_mapping >> 32;
1240 	e->len_gen = V_CMD_LEN(first_desc_len) | V_CMD_GEN1(gen);
1241 	ce->skb = NULL;
1242 	dma_unmap_len_set(ce, dma_len, 0);
1243 
1244 	if (PAGE_SIZE > SGE_TX_DESC_MAX_PLEN &&
1245 	    desc_len > SGE_TX_DESC_MAX_PLEN) {
1246 		desc_mapping += first_desc_len;
1247 		desc_len -= first_desc_len;
1248 		e1++;
1249 		ce++;
1250 		if (++pidx == q->size) {
1251 			pidx = 0;
1252 			gen ^= 1;
1253 			e1 = q->entries;
1254 			ce = q->centries;
1255 		}
1256 		pidx = write_large_page_tx_descs(pidx, &e1, &ce, &gen,
1257 						 &desc_mapping, &desc_len,
1258 						 nfrags, q);
1259 
1260 		if (likely(desc_len))
1261 			write_tx_desc(e1, desc_mapping, desc_len, gen,
1262 				      nfrags == 0);
1263 	}
1264 
1265 	ce->skb = NULL;
1266 	dma_unmap_addr_set(ce, dma_addr, mapping);
1267 	dma_unmap_len_set(ce, dma_len, skb_headlen(skb));
1268 
1269 	for (i = 0; nfrags--; i++) {
1270 		skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1271 		e1++;
1272 		ce++;
1273 		if (++pidx == q->size) {
1274 			pidx = 0;
1275 			gen ^= 1;
1276 			e1 = q->entries;
1277 			ce = q->centries;
1278 		}
1279 
1280 		mapping = skb_frag_dma_map(&adapter->pdev->dev, frag, 0,
1281 					   skb_frag_size(frag), DMA_TO_DEVICE);
1282 		desc_mapping = mapping;
1283 		desc_len = skb_frag_size(frag);
1284 
1285 		pidx = write_large_page_tx_descs(pidx, &e1, &ce, &gen,
1286 						 &desc_mapping, &desc_len,
1287 						 nfrags, q);
1288 		if (likely(desc_len))
1289 			write_tx_desc(e1, desc_mapping, desc_len, gen,
1290 				      nfrags == 0);
1291 		ce->skb = NULL;
1292 		dma_unmap_addr_set(ce, dma_addr, mapping);
1293 		dma_unmap_len_set(ce, dma_len, skb_frag_size(frag));
1294 	}
1295 	ce->skb = skb;
1296 	wmb();
1297 	e->flags = flags;
1298 }
1299 
1300 /*
1301  * Clean up completed Tx buffers.
1302  */
1303 static inline void reclaim_completed_tx(struct sge *sge, struct cmdQ *q)
1304 {
1305 	unsigned int reclaim = q->processed - q->cleaned;
1306 
1307 	if (reclaim) {
1308 		pr_debug("reclaim_completed_tx processed:%d cleaned:%d\n",
1309 			 q->processed, q->cleaned);
1310 		free_cmdQ_buffers(sge, q, reclaim);
1311 		q->cleaned += reclaim;
1312 	}
1313 }
1314 
1315 /*
1316  * Called from tasklet. Checks the scheduler for any
1317  * pending skbs that can be sent.
1318  */
1319 static void restart_sched(unsigned long arg)
1320 {
1321 	struct sge *sge = (struct sge *) arg;
1322 	struct adapter *adapter = sge->adapter;
1323 	struct cmdQ *q = &sge->cmdQ[0];
1324 	struct sk_buff *skb;
1325 	unsigned int credits, queued_skb = 0;
1326 
1327 	spin_lock(&q->lock);
1328 	reclaim_completed_tx(sge, q);
1329 
1330 	credits = q->size - q->in_use;
1331 	pr_debug("restart_sched credits=%d\n", credits);
1332 	while ((skb = sched_skb(sge, NULL, credits)) != NULL) {
1333 		unsigned int genbit, pidx, count;
1334 	        count = 1 + skb_shinfo(skb)->nr_frags;
1335 		count += compute_large_page_tx_descs(skb);
1336 		q->in_use += count;
1337 		genbit = q->genbit;
1338 		pidx = q->pidx;
1339 		q->pidx += count;
1340 		if (q->pidx >= q->size) {
1341 			q->pidx -= q->size;
1342 			q->genbit ^= 1;
1343 		}
1344 		write_tx_descs(adapter, skb, pidx, genbit, q);
1345 	        credits = q->size - q->in_use;
1346 		queued_skb = 1;
1347 	}
1348 
1349 	if (queued_skb) {
1350 		clear_bit(CMDQ_STAT_LAST_PKT_DB, &q->status);
1351 		if (test_and_set_bit(CMDQ_STAT_RUNNING, &q->status) == 0) {
1352 			set_bit(CMDQ_STAT_LAST_PKT_DB, &q->status);
1353 			writel(F_CMDQ0_ENABLE, adapter->regs + A_SG_DOORBELL);
1354 		}
1355 	}
1356 	spin_unlock(&q->lock);
1357 }
1358 
1359 /**
1360  *	sge_rx - process an ingress ethernet packet
1361  *	@sge: the sge structure
1362  *	@fl: the free list that contains the packet buffer
1363  *	@len: the packet length
1364  *
1365  *	Process an ingress ethernet pakcet and deliver it to the stack.
1366  */
1367 static void sge_rx(struct sge *sge, struct freelQ *fl, unsigned int len)
1368 {
1369 	struct sk_buff *skb;
1370 	const struct cpl_rx_pkt *p;
1371 	struct adapter *adapter = sge->adapter;
1372 	struct sge_port_stats *st;
1373 	struct net_device *dev;
1374 
1375 	skb = get_packet(adapter->pdev, fl, len - sge->rx_pkt_pad);
1376 	if (unlikely(!skb)) {
1377 		sge->stats.rx_drops++;
1378 		return;
1379 	}
1380 
1381 	p = (const struct cpl_rx_pkt *) skb->data;
1382 	if (p->iff >= adapter->params.nports) {
1383 		kfree_skb(skb);
1384 		return;
1385 	}
1386 	__skb_pull(skb, sizeof(*p));
1387 
1388 	st = this_cpu_ptr(sge->port_stats[p->iff]);
1389 	dev = adapter->port[p->iff].dev;
1390 
1391 	skb->protocol = eth_type_trans(skb, dev);
1392 	if ((dev->features & NETIF_F_RXCSUM) && p->csum == 0xffff &&
1393 	    skb->protocol == htons(ETH_P_IP) &&
1394 	    (skb->data[9] == IPPROTO_TCP || skb->data[9] == IPPROTO_UDP)) {
1395 		++st->rx_cso_good;
1396 		skb->ip_summed = CHECKSUM_UNNECESSARY;
1397 	} else
1398 		skb_checksum_none_assert(skb);
1399 
1400 	if (p->vlan_valid) {
1401 		st->vlan_xtract++;
1402 		__vlan_hwaccel_put_tag(skb, ntohs(p->vlan));
1403 	}
1404 	netif_receive_skb(skb);
1405 }
1406 
1407 /*
1408  * Returns true if a command queue has enough available descriptors that
1409  * we can resume Tx operation after temporarily disabling its packet queue.
1410  */
1411 static inline int enough_free_Tx_descs(const struct cmdQ *q)
1412 {
1413 	unsigned int r = q->processed - q->cleaned;
1414 
1415 	return q->in_use - r < (q->size >> 1);
1416 }
1417 
1418 /*
1419  * Called when sufficient space has become available in the SGE command queues
1420  * after the Tx packet schedulers have been suspended to restart the Tx path.
1421  */
1422 static void restart_tx_queues(struct sge *sge)
1423 {
1424 	struct adapter *adap = sge->adapter;
1425 	int i;
1426 
1427 	if (!enough_free_Tx_descs(&sge->cmdQ[0]))
1428 		return;
1429 
1430 	for_each_port(adap, i) {
1431 		struct net_device *nd = adap->port[i].dev;
1432 
1433 		if (test_and_clear_bit(nd->if_port, &sge->stopped_tx_queues) &&
1434 		    netif_running(nd)) {
1435 			sge->stats.cmdQ_restarted[2]++;
1436 			netif_wake_queue(nd);
1437 		}
1438 	}
1439 }
1440 
1441 /*
1442  * update_tx_info is called from the interrupt handler/NAPI to return cmdQ0
1443  * information.
1444  */
1445 static unsigned int update_tx_info(struct adapter *adapter,
1446 					  unsigned int flags,
1447 					  unsigned int pr0)
1448 {
1449 	struct sge *sge = adapter->sge;
1450 	struct cmdQ *cmdq = &sge->cmdQ[0];
1451 
1452 	cmdq->processed += pr0;
1453 	if (flags & (F_FL0_ENABLE | F_FL1_ENABLE)) {
1454 		freelQs_empty(sge);
1455 		flags &= ~(F_FL0_ENABLE | F_FL1_ENABLE);
1456 	}
1457 	if (flags & F_CMDQ0_ENABLE) {
1458 		clear_bit(CMDQ_STAT_RUNNING, &cmdq->status);
1459 
1460 		if (cmdq->cleaned + cmdq->in_use != cmdq->processed &&
1461 		    !test_and_set_bit(CMDQ_STAT_LAST_PKT_DB, &cmdq->status)) {
1462 			set_bit(CMDQ_STAT_RUNNING, &cmdq->status);
1463 			writel(F_CMDQ0_ENABLE, adapter->regs + A_SG_DOORBELL);
1464 		}
1465 		if (sge->tx_sched)
1466 			tasklet_hi_schedule(&sge->tx_sched->sched_tsk);
1467 
1468 		flags &= ~F_CMDQ0_ENABLE;
1469 	}
1470 
1471 	if (unlikely(sge->stopped_tx_queues != 0))
1472 		restart_tx_queues(sge);
1473 
1474 	return flags;
1475 }
1476 
1477 /*
1478  * Process SGE responses, up to the supplied budget.  Returns the number of
1479  * responses processed.  A negative budget is effectively unlimited.
1480  */
1481 static int process_responses(struct adapter *adapter, int budget)
1482 {
1483 	struct sge *sge = adapter->sge;
1484 	struct respQ *q = &sge->respQ;
1485 	struct respQ_e *e = &q->entries[q->cidx];
1486 	int done = 0;
1487 	unsigned int flags = 0;
1488 	unsigned int cmdq_processed[SGE_CMDQ_N] = {0, 0};
1489 
1490 	while (done < budget && e->GenerationBit == q->genbit) {
1491 		flags |= e->Qsleeping;
1492 
1493 		cmdq_processed[0] += e->Cmdq0CreditReturn;
1494 		cmdq_processed[1] += e->Cmdq1CreditReturn;
1495 
1496 		/* We batch updates to the TX side to avoid cacheline
1497 		 * ping-pong of TX state information on MP where the sender
1498 		 * might run on a different CPU than this function...
1499 		 */
1500 		if (unlikely((flags & F_CMDQ0_ENABLE) || cmdq_processed[0] > 64)) {
1501 			flags = update_tx_info(adapter, flags, cmdq_processed[0]);
1502 			cmdq_processed[0] = 0;
1503 		}
1504 
1505 		if (unlikely(cmdq_processed[1] > 16)) {
1506 			sge->cmdQ[1].processed += cmdq_processed[1];
1507 			cmdq_processed[1] = 0;
1508 		}
1509 
1510 		if (likely(e->DataValid)) {
1511 			struct freelQ *fl = &sge->freelQ[e->FreelistQid];
1512 
1513 			BUG_ON(!e->Sop || !e->Eop);
1514 			if (unlikely(e->Offload))
1515 				unexpected_offload(adapter, fl);
1516 			else
1517 				sge_rx(sge, fl, e->BufferLength);
1518 
1519 			++done;
1520 
1521 			/*
1522 			 * Note: this depends on each packet consuming a
1523 			 * single free-list buffer; cf. the BUG above.
1524 			 */
1525 			if (++fl->cidx == fl->size)
1526 				fl->cidx = 0;
1527 			prefetch(fl->centries[fl->cidx].skb);
1528 
1529 			if (unlikely(--fl->credits <
1530 				     fl->size - SGE_FREEL_REFILL_THRESH))
1531 				refill_free_list(sge, fl);
1532 		} else
1533 			sge->stats.pure_rsps++;
1534 
1535 		e++;
1536 		if (unlikely(++q->cidx == q->size)) {
1537 			q->cidx = 0;
1538 			q->genbit ^= 1;
1539 			e = q->entries;
1540 		}
1541 		prefetch(e);
1542 
1543 		if (++q->credits > SGE_RESPQ_REPLENISH_THRES) {
1544 			writel(q->credits, adapter->regs + A_SG_RSPQUEUECREDIT);
1545 			q->credits = 0;
1546 		}
1547 	}
1548 
1549 	flags = update_tx_info(adapter, flags, cmdq_processed[0]);
1550 	sge->cmdQ[1].processed += cmdq_processed[1];
1551 
1552 	return done;
1553 }
1554 
1555 static inline int responses_pending(const struct adapter *adapter)
1556 {
1557 	const struct respQ *Q = &adapter->sge->respQ;
1558 	const struct respQ_e *e = &Q->entries[Q->cidx];
1559 
1560 	return e->GenerationBit == Q->genbit;
1561 }
1562 
1563 /*
1564  * A simpler version of process_responses() that handles only pure (i.e.,
1565  * non data-carrying) responses.  Such respones are too light-weight to justify
1566  * calling a softirq when using NAPI, so we handle them specially in hard
1567  * interrupt context.  The function is called with a pointer to a response,
1568  * which the caller must ensure is a valid pure response.  Returns 1 if it
1569  * encounters a valid data-carrying response, 0 otherwise.
1570  */
1571 static int process_pure_responses(struct adapter *adapter)
1572 {
1573 	struct sge *sge = adapter->sge;
1574 	struct respQ *q = &sge->respQ;
1575 	struct respQ_e *e = &q->entries[q->cidx];
1576 	const struct freelQ *fl = &sge->freelQ[e->FreelistQid];
1577 	unsigned int flags = 0;
1578 	unsigned int cmdq_processed[SGE_CMDQ_N] = {0, 0};
1579 
1580 	prefetch(fl->centries[fl->cidx].skb);
1581 	if (e->DataValid)
1582 		return 1;
1583 
1584 	do {
1585 		flags |= e->Qsleeping;
1586 
1587 		cmdq_processed[0] += e->Cmdq0CreditReturn;
1588 		cmdq_processed[1] += e->Cmdq1CreditReturn;
1589 
1590 		e++;
1591 		if (unlikely(++q->cidx == q->size)) {
1592 			q->cidx = 0;
1593 			q->genbit ^= 1;
1594 			e = q->entries;
1595 		}
1596 		prefetch(e);
1597 
1598 		if (++q->credits > SGE_RESPQ_REPLENISH_THRES) {
1599 			writel(q->credits, adapter->regs + A_SG_RSPQUEUECREDIT);
1600 			q->credits = 0;
1601 		}
1602 		sge->stats.pure_rsps++;
1603 	} while (e->GenerationBit == q->genbit && !e->DataValid);
1604 
1605 	flags = update_tx_info(adapter, flags, cmdq_processed[0]);
1606 	sge->cmdQ[1].processed += cmdq_processed[1];
1607 
1608 	return e->GenerationBit == q->genbit;
1609 }
1610 
1611 /*
1612  * Handler for new data events when using NAPI.  This does not need any locking
1613  * or protection from interrupts as data interrupts are off at this point and
1614  * other adapter interrupts do not interfere.
1615  */
1616 int t1_poll(struct napi_struct *napi, int budget)
1617 {
1618 	struct adapter *adapter = container_of(napi, struct adapter, napi);
1619 	int work_done = process_responses(adapter, budget);
1620 
1621 	if (likely(work_done < budget)) {
1622 		napi_complete(napi);
1623 		writel(adapter->sge->respQ.cidx,
1624 		       adapter->regs + A_SG_SLEEPING);
1625 	}
1626 	return work_done;
1627 }
1628 
1629 irqreturn_t t1_interrupt(int irq, void *data)
1630 {
1631 	struct adapter *adapter = data;
1632 	struct sge *sge = adapter->sge;
1633 	int handled;
1634 
1635 	if (likely(responses_pending(adapter))) {
1636 		writel(F_PL_INTR_SGE_DATA, adapter->regs + A_PL_CAUSE);
1637 
1638 		if (napi_schedule_prep(&adapter->napi)) {
1639 			if (process_pure_responses(adapter))
1640 				__napi_schedule(&adapter->napi);
1641 			else {
1642 				/* no data, no NAPI needed */
1643 				writel(sge->respQ.cidx, adapter->regs + A_SG_SLEEPING);
1644 				/* undo schedule_prep */
1645 				napi_enable(&adapter->napi);
1646 			}
1647 		}
1648 		return IRQ_HANDLED;
1649 	}
1650 
1651 	spin_lock(&adapter->async_lock);
1652 	handled = t1_slow_intr_handler(adapter);
1653 	spin_unlock(&adapter->async_lock);
1654 
1655 	if (!handled)
1656 		sge->stats.unhandled_irqs++;
1657 
1658 	return IRQ_RETVAL(handled != 0);
1659 }
1660 
1661 /*
1662  * Enqueues the sk_buff onto the cmdQ[qid] and has hardware fetch it.
1663  *
1664  * The code figures out how many entries the sk_buff will require in the
1665  * cmdQ and updates the cmdQ data structure with the state once the enqueue
1666  * has complete. Then, it doesn't access the global structure anymore, but
1667  * uses the corresponding fields on the stack. In conjunction with a spinlock
1668  * around that code, we can make the function reentrant without holding the
1669  * lock when we actually enqueue (which might be expensive, especially on
1670  * architectures with IO MMUs).
1671  *
1672  * This runs with softirqs disabled.
1673  */
1674 static int t1_sge_tx(struct sk_buff *skb, struct adapter *adapter,
1675 		     unsigned int qid, struct net_device *dev)
1676 {
1677 	struct sge *sge = adapter->sge;
1678 	struct cmdQ *q = &sge->cmdQ[qid];
1679 	unsigned int credits, pidx, genbit, count, use_sched_skb = 0;
1680 
1681 	if (!spin_trylock(&q->lock))
1682 		return NETDEV_TX_LOCKED;
1683 
1684 	reclaim_completed_tx(sge, q);
1685 
1686 	pidx = q->pidx;
1687 	credits = q->size - q->in_use;
1688 	count = 1 + skb_shinfo(skb)->nr_frags;
1689 	count += compute_large_page_tx_descs(skb);
1690 
1691 	/* Ethernet packet */
1692 	if (unlikely(credits < count)) {
1693 		if (!netif_queue_stopped(dev)) {
1694 			netif_stop_queue(dev);
1695 			set_bit(dev->if_port, &sge->stopped_tx_queues);
1696 			sge->stats.cmdQ_full[2]++;
1697 			pr_err("%s: Tx ring full while queue awake!\n",
1698 			       adapter->name);
1699 		}
1700 		spin_unlock(&q->lock);
1701 		return NETDEV_TX_BUSY;
1702 	}
1703 
1704 	if (unlikely(credits - count < q->stop_thres)) {
1705 		netif_stop_queue(dev);
1706 		set_bit(dev->if_port, &sge->stopped_tx_queues);
1707 		sge->stats.cmdQ_full[2]++;
1708 	}
1709 
1710 	/* T204 cmdQ0 skbs that are destined for a certain port have to go
1711 	 * through the scheduler.
1712 	 */
1713 	if (sge->tx_sched && !qid && skb->dev) {
1714 use_sched:
1715 		use_sched_skb = 1;
1716 		/* Note that the scheduler might return a different skb than
1717 		 * the one passed in.
1718 		 */
1719 		skb = sched_skb(sge, skb, credits);
1720 		if (!skb) {
1721 			spin_unlock(&q->lock);
1722 			return NETDEV_TX_OK;
1723 		}
1724 		pidx = q->pidx;
1725 		count = 1 + skb_shinfo(skb)->nr_frags;
1726 		count += compute_large_page_tx_descs(skb);
1727 	}
1728 
1729 	q->in_use += count;
1730 	genbit = q->genbit;
1731 	pidx = q->pidx;
1732 	q->pidx += count;
1733 	if (q->pidx >= q->size) {
1734 		q->pidx -= q->size;
1735 		q->genbit ^= 1;
1736 	}
1737 	spin_unlock(&q->lock);
1738 
1739 	write_tx_descs(adapter, skb, pidx, genbit, q);
1740 
1741 	/*
1742 	 * We always ring the doorbell for cmdQ1.  For cmdQ0, we only ring
1743 	 * the doorbell if the Q is asleep. There is a natural race, where
1744 	 * the hardware is going to sleep just after we checked, however,
1745 	 * then the interrupt handler will detect the outstanding TX packet
1746 	 * and ring the doorbell for us.
1747 	 */
1748 	if (qid)
1749 		doorbell_pio(adapter, F_CMDQ1_ENABLE);
1750 	else {
1751 		clear_bit(CMDQ_STAT_LAST_PKT_DB, &q->status);
1752 		if (test_and_set_bit(CMDQ_STAT_RUNNING, &q->status) == 0) {
1753 			set_bit(CMDQ_STAT_LAST_PKT_DB, &q->status);
1754 			writel(F_CMDQ0_ENABLE, adapter->regs + A_SG_DOORBELL);
1755 		}
1756 	}
1757 
1758 	if (use_sched_skb) {
1759 		if (spin_trylock(&q->lock)) {
1760 			credits = q->size - q->in_use;
1761 			skb = NULL;
1762 			goto use_sched;
1763 		}
1764 	}
1765 	return NETDEV_TX_OK;
1766 }
1767 
1768 #define MK_ETH_TYPE_MSS(type, mss) (((mss) & 0x3FFF) | ((type) << 14))
1769 
1770 /*
1771  *	eth_hdr_len - return the length of an Ethernet header
1772  *	@data: pointer to the start of the Ethernet header
1773  *
1774  *	Returns the length of an Ethernet header, including optional VLAN tag.
1775  */
1776 static inline int eth_hdr_len(const void *data)
1777 {
1778 	const struct ethhdr *e = data;
1779 
1780 	return e->h_proto == htons(ETH_P_8021Q) ? VLAN_ETH_HLEN : ETH_HLEN;
1781 }
1782 
1783 /*
1784  * Adds the CPL header to the sk_buff and passes it to t1_sge_tx.
1785  */
1786 netdev_tx_t t1_start_xmit(struct sk_buff *skb, struct net_device *dev)
1787 {
1788 	struct adapter *adapter = dev->ml_priv;
1789 	struct sge *sge = adapter->sge;
1790 	struct sge_port_stats *st = this_cpu_ptr(sge->port_stats[dev->if_port]);
1791 	struct cpl_tx_pkt *cpl;
1792 	struct sk_buff *orig_skb = skb;
1793 	int ret;
1794 
1795 	if (skb->protocol == htons(ETH_P_CPL5))
1796 		goto send;
1797 
1798 	/*
1799 	 * We are using a non-standard hard_header_len.
1800 	 * Allocate more header room in the rare cases it is not big enough.
1801 	 */
1802 	if (unlikely(skb_headroom(skb) < dev->hard_header_len - ETH_HLEN)) {
1803 		skb = skb_realloc_headroom(skb, sizeof(struct cpl_tx_pkt_lso));
1804 		++st->tx_need_hdrroom;
1805 		dev_kfree_skb_any(orig_skb);
1806 		if (!skb)
1807 			return NETDEV_TX_OK;
1808 	}
1809 
1810 	if (skb_shinfo(skb)->gso_size) {
1811 		int eth_type;
1812 		struct cpl_tx_pkt_lso *hdr;
1813 
1814 		++st->tx_tso;
1815 
1816 		eth_type = skb_network_offset(skb) == ETH_HLEN ?
1817 			CPL_ETH_II : CPL_ETH_II_VLAN;
1818 
1819 		hdr = (struct cpl_tx_pkt_lso *)skb_push(skb, sizeof(*hdr));
1820 		hdr->opcode = CPL_TX_PKT_LSO;
1821 		hdr->ip_csum_dis = hdr->l4_csum_dis = 0;
1822 		hdr->ip_hdr_words = ip_hdr(skb)->ihl;
1823 		hdr->tcp_hdr_words = tcp_hdr(skb)->doff;
1824 		hdr->eth_type_mss = htons(MK_ETH_TYPE_MSS(eth_type,
1825 							  skb_shinfo(skb)->gso_size));
1826 		hdr->len = htonl(skb->len - sizeof(*hdr));
1827 		cpl = (struct cpl_tx_pkt *)hdr;
1828 	} else {
1829 		/*
1830 		 * Packets shorter than ETH_HLEN can break the MAC, drop them
1831 		 * early.  Also, we may get oversized packets because some
1832 		 * parts of the kernel don't handle our unusual hard_header_len
1833 		 * right, drop those too.
1834 		 */
1835 		if (unlikely(skb->len < ETH_HLEN ||
1836 			     skb->len > dev->mtu + eth_hdr_len(skb->data))) {
1837 			pr_debug("%s: packet size %d hdr %d mtu%d\n", dev->name,
1838 				 skb->len, eth_hdr_len(skb->data), dev->mtu);
1839 			dev_kfree_skb_any(skb);
1840 			return NETDEV_TX_OK;
1841 		}
1842 
1843 		if (skb->ip_summed == CHECKSUM_PARTIAL &&
1844 		    ip_hdr(skb)->protocol == IPPROTO_UDP) {
1845 			if (unlikely(skb_checksum_help(skb))) {
1846 				pr_debug("%s: unable to do udp checksum\n", dev->name);
1847 				dev_kfree_skb_any(skb);
1848 				return NETDEV_TX_OK;
1849 			}
1850 		}
1851 
1852 		/* Hmmm, assuming to catch the gratious arp... and we'll use
1853 		 * it to flush out stuck espi packets...
1854 		 */
1855 		if ((unlikely(!adapter->sge->espibug_skb[dev->if_port]))) {
1856 			if (skb->protocol == htons(ETH_P_ARP) &&
1857 			    arp_hdr(skb)->ar_op == htons(ARPOP_REQUEST)) {
1858 				adapter->sge->espibug_skb[dev->if_port] = skb;
1859 				/* We want to re-use this skb later. We
1860 				 * simply bump the reference count and it
1861 				 * will not be freed...
1862 				 */
1863 				skb = skb_get(skb);
1864 			}
1865 		}
1866 
1867 		cpl = (struct cpl_tx_pkt *)__skb_push(skb, sizeof(*cpl));
1868 		cpl->opcode = CPL_TX_PKT;
1869 		cpl->ip_csum_dis = 1;    /* SW calculates IP csum */
1870 		cpl->l4_csum_dis = skb->ip_summed == CHECKSUM_PARTIAL ? 0 : 1;
1871 		/* the length field isn't used so don't bother setting it */
1872 
1873 		st->tx_cso += (skb->ip_summed == CHECKSUM_PARTIAL);
1874 	}
1875 	cpl->iff = dev->if_port;
1876 
1877 	if (vlan_tx_tag_present(skb)) {
1878 		cpl->vlan_valid = 1;
1879 		cpl->vlan = htons(vlan_tx_tag_get(skb));
1880 		st->vlan_insert++;
1881 	} else
1882 		cpl->vlan_valid = 0;
1883 
1884 send:
1885 	ret = t1_sge_tx(skb, adapter, 0, dev);
1886 
1887 	/* If transmit busy, and we reallocated skb's due to headroom limit,
1888 	 * then silently discard to avoid leak.
1889 	 */
1890 	if (unlikely(ret != NETDEV_TX_OK && skb != orig_skb)) {
1891 		dev_kfree_skb_any(skb);
1892 		ret = NETDEV_TX_OK;
1893 	}
1894 	return ret;
1895 }
1896 
1897 /*
1898  * Callback for the Tx buffer reclaim timer.  Runs with softirqs disabled.
1899  */
1900 static void sge_tx_reclaim_cb(unsigned long data)
1901 {
1902 	int i;
1903 	struct sge *sge = (struct sge *)data;
1904 
1905 	for (i = 0; i < SGE_CMDQ_N; ++i) {
1906 		struct cmdQ *q = &sge->cmdQ[i];
1907 
1908 		if (!spin_trylock(&q->lock))
1909 			continue;
1910 
1911 		reclaim_completed_tx(sge, q);
1912 		if (i == 0 && q->in_use) {    /* flush pending credits */
1913 			writel(F_CMDQ0_ENABLE, sge->adapter->regs + A_SG_DOORBELL);
1914 		}
1915 		spin_unlock(&q->lock);
1916 	}
1917 	mod_timer(&sge->tx_reclaim_timer, jiffies + TX_RECLAIM_PERIOD);
1918 }
1919 
1920 /*
1921  * Propagate changes of the SGE coalescing parameters to the HW.
1922  */
1923 int t1_sge_set_coalesce_params(struct sge *sge, struct sge_params *p)
1924 {
1925 	sge->fixed_intrtimer = p->rx_coalesce_usecs *
1926 		core_ticks_per_usec(sge->adapter);
1927 	writel(sge->fixed_intrtimer, sge->adapter->regs + A_SG_INTRTIMER);
1928 	return 0;
1929 }
1930 
1931 /*
1932  * Allocates both RX and TX resources and configures the SGE. However,
1933  * the hardware is not enabled yet.
1934  */
1935 int t1_sge_configure(struct sge *sge, struct sge_params *p)
1936 {
1937 	if (alloc_rx_resources(sge, p))
1938 		return -ENOMEM;
1939 	if (alloc_tx_resources(sge, p)) {
1940 		free_rx_resources(sge);
1941 		return -ENOMEM;
1942 	}
1943 	configure_sge(sge, p);
1944 
1945 	/*
1946 	 * Now that we have sized the free lists calculate the payload
1947 	 * capacity of the large buffers.  Other parts of the driver use
1948 	 * this to set the max offload coalescing size so that RX packets
1949 	 * do not overflow our large buffers.
1950 	 */
1951 	p->large_buf_capacity = jumbo_payload_capacity(sge);
1952 	return 0;
1953 }
1954 
1955 /*
1956  * Disables the DMA engine.
1957  */
1958 void t1_sge_stop(struct sge *sge)
1959 {
1960 	int i;
1961 	writel(0, sge->adapter->regs + A_SG_CONTROL);
1962 	readl(sge->adapter->regs + A_SG_CONTROL); /* flush */
1963 
1964 	if (is_T2(sge->adapter))
1965 		del_timer_sync(&sge->espibug_timer);
1966 
1967 	del_timer_sync(&sge->tx_reclaim_timer);
1968 	if (sge->tx_sched)
1969 		tx_sched_stop(sge);
1970 
1971 	for (i = 0; i < MAX_NPORTS; i++)
1972 		kfree_skb(sge->espibug_skb[i]);
1973 }
1974 
1975 /*
1976  * Enables the DMA engine.
1977  */
1978 void t1_sge_start(struct sge *sge)
1979 {
1980 	refill_free_list(sge, &sge->freelQ[0]);
1981 	refill_free_list(sge, &sge->freelQ[1]);
1982 
1983 	writel(sge->sge_control, sge->adapter->regs + A_SG_CONTROL);
1984 	doorbell_pio(sge->adapter, F_FL0_ENABLE | F_FL1_ENABLE);
1985 	readl(sge->adapter->regs + A_SG_CONTROL); /* flush */
1986 
1987 	mod_timer(&sge->tx_reclaim_timer, jiffies + TX_RECLAIM_PERIOD);
1988 
1989 	if (is_T2(sge->adapter))
1990 		mod_timer(&sge->espibug_timer, jiffies + sge->espibug_timeout);
1991 }
1992 
1993 /*
1994  * Callback for the T2 ESPI 'stuck packet feature' workaorund
1995  */
1996 static void espibug_workaround_t204(unsigned long data)
1997 {
1998 	struct adapter *adapter = (struct adapter *)data;
1999 	struct sge *sge = adapter->sge;
2000 	unsigned int nports = adapter->params.nports;
2001 	u32 seop[MAX_NPORTS];
2002 
2003 	if (adapter->open_device_map & PORT_MASK) {
2004 		int i;
2005 
2006 		if (t1_espi_get_mon_t204(adapter, &(seop[0]), 0) < 0)
2007 			return;
2008 
2009 		for (i = 0; i < nports; i++) {
2010 			struct sk_buff *skb = sge->espibug_skb[i];
2011 
2012 			if (!netif_running(adapter->port[i].dev) ||
2013 			    netif_queue_stopped(adapter->port[i].dev) ||
2014 			    !seop[i] || ((seop[i] & 0xfff) != 0) || !skb)
2015 				continue;
2016 
2017 			if (!skb->cb[0]) {
2018 				skb_copy_to_linear_data_offset(skb,
2019 						    sizeof(struct cpl_tx_pkt),
2020 							       ch_mac_addr,
2021 							       ETH_ALEN);
2022 				skb_copy_to_linear_data_offset(skb,
2023 							       skb->len - 10,
2024 							       ch_mac_addr,
2025 							       ETH_ALEN);
2026 				skb->cb[0] = 0xff;
2027 			}
2028 
2029 			/* bump the reference count to avoid freeing of
2030 			 * the skb once the DMA has completed.
2031 			 */
2032 			skb = skb_get(skb);
2033 			t1_sge_tx(skb, adapter, 0, adapter->port[i].dev);
2034 		}
2035 	}
2036 	mod_timer(&sge->espibug_timer, jiffies + sge->espibug_timeout);
2037 }
2038 
2039 static void espibug_workaround(unsigned long data)
2040 {
2041 	struct adapter *adapter = (struct adapter *)data;
2042 	struct sge *sge = adapter->sge;
2043 
2044 	if (netif_running(adapter->port[0].dev)) {
2045 	        struct sk_buff *skb = sge->espibug_skb[0];
2046 	        u32 seop = t1_espi_get_mon(adapter, 0x930, 0);
2047 
2048 	        if ((seop & 0xfff0fff) == 0xfff && skb) {
2049 	                if (!skb->cb[0]) {
2050 	                        skb_copy_to_linear_data_offset(skb,
2051 						     sizeof(struct cpl_tx_pkt),
2052 							       ch_mac_addr,
2053 							       ETH_ALEN);
2054 	                        skb_copy_to_linear_data_offset(skb,
2055 							       skb->len - 10,
2056 							       ch_mac_addr,
2057 							       ETH_ALEN);
2058 	                        skb->cb[0] = 0xff;
2059 	                }
2060 
2061 	                /* bump the reference count to avoid freeing of the
2062 	                 * skb once the DMA has completed.
2063 	                 */
2064 	                skb = skb_get(skb);
2065 	                t1_sge_tx(skb, adapter, 0, adapter->port[0].dev);
2066 	        }
2067 	}
2068 	mod_timer(&sge->espibug_timer, jiffies + sge->espibug_timeout);
2069 }
2070 
2071 /*
2072  * Creates a t1_sge structure and returns suggested resource parameters.
2073  */
2074 struct sge * __devinit t1_sge_create(struct adapter *adapter,
2075 				     struct sge_params *p)
2076 {
2077 	struct sge *sge = kzalloc(sizeof(*sge), GFP_KERNEL);
2078 	int i;
2079 
2080 	if (!sge)
2081 		return NULL;
2082 
2083 	sge->adapter = adapter;
2084 	sge->netdev = adapter->port[0].dev;
2085 	sge->rx_pkt_pad = t1_is_T1B(adapter) ? 0 : 2;
2086 	sge->jumbo_fl = t1_is_T1B(adapter) ? 1 : 0;
2087 
2088 	for_each_port(adapter, i) {
2089 		sge->port_stats[i] = alloc_percpu(struct sge_port_stats);
2090 		if (!sge->port_stats[i])
2091 			goto nomem_port;
2092 	}
2093 
2094 	init_timer(&sge->tx_reclaim_timer);
2095 	sge->tx_reclaim_timer.data = (unsigned long)sge;
2096 	sge->tx_reclaim_timer.function = sge_tx_reclaim_cb;
2097 
2098 	if (is_T2(sge->adapter)) {
2099 		init_timer(&sge->espibug_timer);
2100 
2101 		if (adapter->params.nports > 1) {
2102 			tx_sched_init(sge);
2103 			sge->espibug_timer.function = espibug_workaround_t204;
2104 		} else
2105 			sge->espibug_timer.function = espibug_workaround;
2106 		sge->espibug_timer.data = (unsigned long)sge->adapter;
2107 
2108 		sge->espibug_timeout = 1;
2109 		/* for T204, every 10ms */
2110 		if (adapter->params.nports > 1)
2111 			sge->espibug_timeout = HZ/100;
2112 	}
2113 
2114 
2115 	p->cmdQ_size[0] = SGE_CMDQ0_E_N;
2116 	p->cmdQ_size[1] = SGE_CMDQ1_E_N;
2117 	p->freelQ_size[!sge->jumbo_fl] = SGE_FREEL_SIZE;
2118 	p->freelQ_size[sge->jumbo_fl] = SGE_JUMBO_FREEL_SIZE;
2119 	if (sge->tx_sched) {
2120 		if (board_info(sge->adapter)->board == CHBT_BOARD_CHT204)
2121 			p->rx_coalesce_usecs = 15;
2122 		else
2123 			p->rx_coalesce_usecs = 50;
2124 	} else
2125 		p->rx_coalesce_usecs = 50;
2126 
2127 	p->coalesce_enable = 0;
2128 	p->sample_interval_usecs = 0;
2129 
2130 	return sge;
2131 nomem_port:
2132 	while (i >= 0) {
2133 		free_percpu(sge->port_stats[i]);
2134 		--i;
2135 	}
2136 	kfree(sge);
2137 	return NULL;
2138 
2139 }
2140