xref: /freebsd/sys/dev/cxgb/common/cxgb_t3_hw.c (revision d899be7d43d8df9cb485af5705e2724165a461c7)
1 /**************************************************************************
2 
3 Copyright (c) 2007-2009, Chelsio Inc.
4 All rights reserved.
5 
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
8 
9  1. Redistributions of source code must retain the above copyright notice,
10     this list of conditions and the following disclaimer.
11 
12  2. Neither the name of the Chelsio Corporation nor the names of its
13     contributors may be used to endorse or promote products derived from
14     this software without specific prior written permission.
15 
16 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
17 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
20 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
23 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
24 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
25 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
26 POSSIBILITY OF SUCH DAMAGE.
27 
28 ***************************************************************************/
29 
30 #include <sys/cdefs.h>
31 __FBSDID("$FreeBSD$");
32 
33 
34 #include <cxgb_include.h>
35 
36 #undef msleep
37 #define msleep t3_os_sleep
38 
39 /**
40  *	t3_wait_op_done_val - wait until an operation is completed
41  *	@adapter: the adapter performing the operation
42  *	@reg: the register to check for completion
43  *	@mask: a single-bit field within @reg that indicates completion
44  *	@polarity: the value of the field when the operation is completed
45  *	@attempts: number of check iterations
46  *	@delay: delay in usecs between iterations
47  *	@valp: where to store the value of the register at completion time
48  *
49  *	Wait until an operation is completed by checking a bit in a register
50  *	up to @attempts times.  If @valp is not NULL the value of the register
51  *	at the time it indicated completion is stored there.  Returns 0 if the
52  *	operation completes and	-EAGAIN	otherwise.
53  */
54 int t3_wait_op_done_val(adapter_t *adapter, int reg, u32 mask, int polarity,
55 			int attempts, int delay, u32 *valp)
56 {
57 	while (1) {
58 		u32 val = t3_read_reg(adapter, reg);
59 
60 		if (!!(val & mask) == polarity) {
61 			if (valp)
62 				*valp = val;
63 			return 0;
64 		}
65 		if (--attempts == 0)
66 			return -EAGAIN;
67 		if (delay)
68 			udelay(delay);
69 	}
70 }
71 
72 /**
73  *	t3_write_regs - write a bunch of registers
74  *	@adapter: the adapter to program
75  *	@p: an array of register address/register value pairs
76  *	@n: the number of address/value pairs
77  *	@offset: register address offset
78  *
79  *	Takes an array of register address/register value pairs and writes each
80  *	value to the corresponding register.  Register addresses are adjusted
81  *	by the supplied offset.
82  */
83 void t3_write_regs(adapter_t *adapter, const struct addr_val_pair *p, int n,
84 		   unsigned int offset)
85 {
86 	while (n--) {
87 		t3_write_reg(adapter, p->reg_addr + offset, p->val);
88 		p++;
89 	}
90 }
91 
92 /**
93  *	t3_set_reg_field - set a register field to a value
94  *	@adapter: the adapter to program
95  *	@addr: the register address
96  *	@mask: specifies the portion of the register to modify
97  *	@val: the new value for the register field
98  *
99  *	Sets a register field specified by the supplied mask to the
100  *	given value.
101  */
102 void t3_set_reg_field(adapter_t *adapter, unsigned int addr, u32 mask, u32 val)
103 {
104 	u32 v = t3_read_reg(adapter, addr) & ~mask;
105 
106 	t3_write_reg(adapter, addr, v | val);
107 	(void) t3_read_reg(adapter, addr);      /* flush */
108 }
109 
110 /**
111  *	t3_read_indirect - read indirectly addressed registers
112  *	@adap: the adapter
113  *	@addr_reg: register holding the indirect address
114  *	@data_reg: register holding the value of the indirect register
115  *	@vals: where the read register values are stored
116  *	@start_idx: index of first indirect register to read
117  *	@nregs: how many indirect registers to read
118  *
119  *	Reads registers that are accessed indirectly through an address/data
120  *	register pair.
121  */
122 static void t3_read_indirect(adapter_t *adap, unsigned int addr_reg,
123 		      unsigned int data_reg, u32 *vals, unsigned int nregs,
124 		      unsigned int start_idx)
125 {
126 	while (nregs--) {
127 		t3_write_reg(adap, addr_reg, start_idx);
128 		*vals++ = t3_read_reg(adap, data_reg);
129 		start_idx++;
130 	}
131 }
132 
133 /**
134  *	t3_mc7_bd_read - read from MC7 through backdoor accesses
135  *	@mc7: identifies MC7 to read from
136  *	@start: index of first 64-bit word to read
137  *	@n: number of 64-bit words to read
138  *	@buf: where to store the read result
139  *
140  *	Read n 64-bit words from MC7 starting at word start, using backdoor
141  *	accesses.
142  */
143 int t3_mc7_bd_read(struct mc7 *mc7, unsigned int start, unsigned int n,
144                    u64 *buf)
145 {
146 	static int shift[] = { 0, 0, 16, 24 };
147 	static int step[]  = { 0, 32, 16, 8 };
148 
149 	unsigned int size64 = mc7->size / 8;  /* # of 64-bit words */
150 	adapter_t *adap = mc7->adapter;
151 
152 	if (start >= size64 || start + n > size64)
153 		return -EINVAL;
154 
155 	start *= (8 << mc7->width);
156 	while (n--) {
157 		int i;
158 		u64 val64 = 0;
159 
160 		for (i = (1 << mc7->width) - 1; i >= 0; --i) {
161 			int attempts = 10;
162 			u32 val;
163 
164 			t3_write_reg(adap, mc7->offset + A_MC7_BD_ADDR,
165 				       start);
166 			t3_write_reg(adap, mc7->offset + A_MC7_BD_OP, 0);
167 			val = t3_read_reg(adap, mc7->offset + A_MC7_BD_OP);
168 			while ((val & F_BUSY) && attempts--)
169 				val = t3_read_reg(adap,
170 						  mc7->offset + A_MC7_BD_OP);
171 			if (val & F_BUSY)
172 				return -EIO;
173 
174 			val = t3_read_reg(adap, mc7->offset + A_MC7_BD_DATA1);
175 			if (mc7->width == 0) {
176 				val64 = t3_read_reg(adap,
177 						mc7->offset + A_MC7_BD_DATA0);
178 				val64 |= (u64)val << 32;
179 			} else {
180 				if (mc7->width > 1)
181 					val >>= shift[mc7->width];
182 				val64 |= (u64)val << (step[mc7->width] * i);
183 			}
184 			start += 8;
185 		}
186 		*buf++ = val64;
187 	}
188 	return 0;
189 }
190 
191 /*
192  * Low-level I2C read and write routines.  These simply read and write a
193  * single byte with the option of indicating a "continue" if another operation
194  * is to be chained.  Generally most code will use higher-level routines to
195  * read and write to I2C Slave Devices.
196  */
197 #define I2C_ATTEMPTS 100
198 
199 /*
200  * Read an 8-bit value from the I2C bus.  If the "chained" parameter is
201  * non-zero then a STOP bit will not be written after the read command.  On
202  * error (the read timed out, etc.), a negative errno will be returned (e.g.
203  * -EAGAIN, etc.).  On success, the 8-bit value read from the I2C bus is
204  * stored into the buffer *valp and the value of the I2C ACK bit is returned
205  * as a 0/1 value.
206  */
207 int t3_i2c_read8(adapter_t *adapter, int chained, u8 *valp)
208 {
209 	int ret;
210 	u32 opval;
211 	MDIO_LOCK(adapter);
212 	t3_write_reg(adapter, A_I2C_OP,
213 		     F_I2C_READ | (chained ? F_I2C_CONT : 0));
214 	ret = t3_wait_op_done_val(adapter, A_I2C_OP, F_I2C_BUSY, 0,
215 				  I2C_ATTEMPTS, 10, &opval);
216 	if (ret >= 0) {
217 		ret = ((opval & F_I2C_ACK) == F_I2C_ACK);
218 		*valp = G_I2C_DATA(t3_read_reg(adapter, A_I2C_DATA));
219 	}
220 	MDIO_UNLOCK(adapter);
221 	return ret;
222 }
223 
224 /*
225  * Write an 8-bit value to the I2C bus.  If the "chained" parameter is
226  * non-zero, then a STOP bit will not be written after the write command.  On
227  * error (the write timed out, etc.), a negative errno will be returned (e.g.
228  * -EAGAIN, etc.).  On success, the value of the I2C ACK bit is returned as a
229  * 0/1 value.
230  */
231 int t3_i2c_write8(adapter_t *adapter, int chained, u8 val)
232 {
233 	int ret;
234 	u32 opval;
235 	MDIO_LOCK(adapter);
236 	t3_write_reg(adapter, A_I2C_DATA, V_I2C_DATA(val));
237 	t3_write_reg(adapter, A_I2C_OP,
238 		     F_I2C_WRITE | (chained ? F_I2C_CONT : 0));
239 	ret = t3_wait_op_done_val(adapter, A_I2C_OP, F_I2C_BUSY, 0,
240 				  I2C_ATTEMPTS, 10, &opval);
241 	if (ret >= 0)
242 		ret = ((opval & F_I2C_ACK) == F_I2C_ACK);
243 	MDIO_UNLOCK(adapter);
244 	return ret;
245 }
246 
247 /*
248  * Initialize MI1.
249  */
250 static void mi1_init(adapter_t *adap, const struct adapter_info *ai)
251 {
252         u32 clkdiv = adap->params.vpd.cclk / (2 * adap->params.vpd.mdc) - 1;
253         u32 val = F_PREEN | V_CLKDIV(clkdiv);
254 
255         t3_write_reg(adap, A_MI1_CFG, val);
256 }
257 
258 #define MDIO_ATTEMPTS 20
259 
260 /*
261  * MI1 read/write operations for clause 22 PHYs.
262  */
263 int t3_mi1_read(adapter_t *adapter, int phy_addr, int mmd_addr,
264 		int reg_addr, unsigned int *valp)
265 {
266 	int ret;
267 	u32 addr = V_REGADDR(reg_addr) | V_PHYADDR(phy_addr);
268 
269 	if (mmd_addr)
270 		return -EINVAL;
271 
272 	MDIO_LOCK(adapter);
273 	t3_set_reg_field(adapter, A_MI1_CFG, V_ST(M_ST), V_ST(1));
274 	t3_write_reg(adapter, A_MI1_ADDR, addr);
275 	t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(2));
276 	ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0, MDIO_ATTEMPTS, 10);
277 	if (!ret)
278 		*valp = t3_read_reg(adapter, A_MI1_DATA);
279 	MDIO_UNLOCK(adapter);
280 	return ret;
281 }
282 
283 int t3_mi1_write(adapter_t *adapter, int phy_addr, int mmd_addr,
284 		 int reg_addr, unsigned int val)
285 {
286 	int ret;
287 	u32 addr = V_REGADDR(reg_addr) | V_PHYADDR(phy_addr);
288 
289 	if (mmd_addr)
290 		return -EINVAL;
291 
292 	MDIO_LOCK(adapter);
293 	t3_set_reg_field(adapter, A_MI1_CFG, V_ST(M_ST), V_ST(1));
294 	t3_write_reg(adapter, A_MI1_ADDR, addr);
295 	t3_write_reg(adapter, A_MI1_DATA, val);
296 	t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(1));
297 	ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0, MDIO_ATTEMPTS, 10);
298 	MDIO_UNLOCK(adapter);
299 	return ret;
300 }
301 
302 static struct mdio_ops mi1_mdio_ops = {
303 	t3_mi1_read,
304 	t3_mi1_write
305 };
306 
307 /*
308  * MI1 read/write operations for clause 45 PHYs.
309  */
310 static int mi1_ext_read(adapter_t *adapter, int phy_addr, int mmd_addr,
311 			int reg_addr, unsigned int *valp)
312 {
313 	int ret;
314 	u32 addr = V_REGADDR(mmd_addr) | V_PHYADDR(phy_addr);
315 
316 	MDIO_LOCK(adapter);
317 	t3_set_reg_field(adapter, A_MI1_CFG, V_ST(M_ST), 0);
318 	t3_write_reg(adapter, A_MI1_ADDR, addr);
319 	t3_write_reg(adapter, A_MI1_DATA, reg_addr);
320 	t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(0));
321 	ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0, MDIO_ATTEMPTS, 10);
322 	if (!ret) {
323 		t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(3));
324 		ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0,
325 				      MDIO_ATTEMPTS, 10);
326 		if (!ret)
327 			*valp = t3_read_reg(adapter, A_MI1_DATA);
328 	}
329 	MDIO_UNLOCK(adapter);
330 	return ret;
331 }
332 
333 static int mi1_ext_write(adapter_t *adapter, int phy_addr, int mmd_addr,
334 			 int reg_addr, unsigned int val)
335 {
336 	int ret;
337 	u32 addr = V_REGADDR(mmd_addr) | V_PHYADDR(phy_addr);
338 
339 	MDIO_LOCK(adapter);
340 	t3_set_reg_field(adapter, A_MI1_CFG, V_ST(M_ST), 0);
341 	t3_write_reg(adapter, A_MI1_ADDR, addr);
342 	t3_write_reg(adapter, A_MI1_DATA, reg_addr);
343 	t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(0));
344 	ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0, MDIO_ATTEMPTS, 10);
345 	if (!ret) {
346 		t3_write_reg(adapter, A_MI1_DATA, val);
347 		t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(1));
348 		ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0,
349 				      MDIO_ATTEMPTS, 10);
350 	}
351 	MDIO_UNLOCK(adapter);
352 	return ret;
353 }
354 
355 static struct mdio_ops mi1_mdio_ext_ops = {
356 	mi1_ext_read,
357 	mi1_ext_write
358 };
359 
360 /**
361  *	t3_mdio_change_bits - modify the value of a PHY register
362  *	@phy: the PHY to operate on
363  *	@mmd: the device address
364  *	@reg: the register address
365  *	@clear: what part of the register value to mask off
366  *	@set: what part of the register value to set
367  *
368  *	Changes the value of a PHY register by applying a mask to its current
369  *	value and ORing the result with a new value.
370  */
371 int t3_mdio_change_bits(struct cphy *phy, int mmd, int reg, unsigned int clear,
372 			unsigned int set)
373 {
374 	int ret;
375 	unsigned int val;
376 
377 	ret = mdio_read(phy, mmd, reg, &val);
378 	if (!ret) {
379 		val &= ~clear;
380 		ret = mdio_write(phy, mmd, reg, val | set);
381 	}
382 	return ret;
383 }
384 
385 /**
386  *	t3_phy_reset - reset a PHY block
387  *	@phy: the PHY to operate on
388  *	@mmd: the device address of the PHY block to reset
389  *	@wait: how long to wait for the reset to complete in 1ms increments
390  *
391  *	Resets a PHY block and optionally waits for the reset to complete.
392  *	@mmd should be 0 for 10/100/1000 PHYs and the device address to reset
393  *	for 10G PHYs.
394  */
395 int t3_phy_reset(struct cphy *phy, int mmd, int wait)
396 {
397 	int err;
398 	unsigned int ctl;
399 
400 	err = t3_mdio_change_bits(phy, mmd, MII_BMCR, BMCR_PDOWN, BMCR_RESET);
401 	if (err || !wait)
402 		return err;
403 
404 	do {
405 		err = mdio_read(phy, mmd, MII_BMCR, &ctl);
406 		if (err)
407 			return err;
408 		ctl &= BMCR_RESET;
409 		if (ctl)
410 			msleep(1);
411 	} while (ctl && --wait);
412 
413 	return ctl ? -1 : 0;
414 }
415 
416 /**
417  *	t3_phy_advertise - set the PHY advertisement registers for autoneg
418  *	@phy: the PHY to operate on
419  *	@advert: bitmap of capabilities the PHY should advertise
420  *
421  *	Sets a 10/100/1000 PHY's advertisement registers to advertise the
422  *	requested capabilities.
423  */
424 int t3_phy_advertise(struct cphy *phy, unsigned int advert)
425 {
426 	int err;
427 	unsigned int val = 0;
428 
429 	err = mdio_read(phy, 0, MII_CTRL1000, &val);
430 	if (err)
431 		return err;
432 
433 	val &= ~(ADVERTISE_1000HALF | ADVERTISE_1000FULL);
434 	if (advert & ADVERTISED_1000baseT_Half)
435 		val |= ADVERTISE_1000HALF;
436 	if (advert & ADVERTISED_1000baseT_Full)
437 		val |= ADVERTISE_1000FULL;
438 
439 	err = mdio_write(phy, 0, MII_CTRL1000, val);
440 	if (err)
441 		return err;
442 
443 	val = 1;
444 	if (advert & ADVERTISED_10baseT_Half)
445 		val |= ADVERTISE_10HALF;
446 	if (advert & ADVERTISED_10baseT_Full)
447 		val |= ADVERTISE_10FULL;
448 	if (advert & ADVERTISED_100baseT_Half)
449 		val |= ADVERTISE_100HALF;
450 	if (advert & ADVERTISED_100baseT_Full)
451 		val |= ADVERTISE_100FULL;
452 	if (advert & ADVERTISED_Pause)
453 		val |= ADVERTISE_PAUSE_CAP;
454 	if (advert & ADVERTISED_Asym_Pause)
455 		val |= ADVERTISE_PAUSE_ASYM;
456 	return mdio_write(phy, 0, MII_ADVERTISE, val);
457 }
458 
459 /**
460  *	t3_phy_advertise_fiber - set fiber PHY advertisement register
461  *	@phy: the PHY to operate on
462  *	@advert: bitmap of capabilities the PHY should advertise
463  *
464  *	Sets a fiber PHY's advertisement register to advertise the
465  *	requested capabilities.
466  */
467 int t3_phy_advertise_fiber(struct cphy *phy, unsigned int advert)
468 {
469 	unsigned int val = 0;
470 
471 	if (advert & ADVERTISED_1000baseT_Half)
472 		val |= ADVERTISE_1000XHALF;
473 	if (advert & ADVERTISED_1000baseT_Full)
474 		val |= ADVERTISE_1000XFULL;
475 	if (advert & ADVERTISED_Pause)
476 		val |= ADVERTISE_1000XPAUSE;
477 	if (advert & ADVERTISED_Asym_Pause)
478 		val |= ADVERTISE_1000XPSE_ASYM;
479 	return mdio_write(phy, 0, MII_ADVERTISE, val);
480 }
481 
482 /**
483  *	t3_set_phy_speed_duplex - force PHY speed and duplex
484  *	@phy: the PHY to operate on
485  *	@speed: requested PHY speed
486  *	@duplex: requested PHY duplex
487  *
488  *	Force a 10/100/1000 PHY's speed and duplex.  This also disables
489  *	auto-negotiation except for GigE, where auto-negotiation is mandatory.
490  */
491 int t3_set_phy_speed_duplex(struct cphy *phy, int speed, int duplex)
492 {
493 	int err;
494 	unsigned int ctl;
495 
496 	err = mdio_read(phy, 0, MII_BMCR, &ctl);
497 	if (err)
498 		return err;
499 
500 	if (speed >= 0) {
501 		ctl &= ~(BMCR_SPEED100 | BMCR_SPEED1000 | BMCR_ANENABLE);
502 		if (speed == SPEED_100)
503 			ctl |= BMCR_SPEED100;
504 		else if (speed == SPEED_1000)
505 			ctl |= BMCR_SPEED1000;
506 	}
507 	if (duplex >= 0) {
508 		ctl &= ~(BMCR_FULLDPLX | BMCR_ANENABLE);
509 		if (duplex == DUPLEX_FULL)
510 			ctl |= BMCR_FULLDPLX;
511 	}
512 	if (ctl & BMCR_SPEED1000)  /* auto-negotiation required for GigE */
513 		ctl |= BMCR_ANENABLE;
514 	return mdio_write(phy, 0, MII_BMCR, ctl);
515 }
516 
517 int t3_phy_lasi_intr_enable(struct cphy *phy)
518 {
519 	return mdio_write(phy, MDIO_DEV_PMA_PMD, LASI_CTRL, 1);
520 }
521 
522 int t3_phy_lasi_intr_disable(struct cphy *phy)
523 {
524 	return mdio_write(phy, MDIO_DEV_PMA_PMD, LASI_CTRL, 0);
525 }
526 
527 int t3_phy_lasi_intr_clear(struct cphy *phy)
528 {
529 	u32 val;
530 
531 	return mdio_read(phy, MDIO_DEV_PMA_PMD, LASI_STAT, &val);
532 }
533 
534 int t3_phy_lasi_intr_handler(struct cphy *phy)
535 {
536 	unsigned int status;
537 	int err = mdio_read(phy, MDIO_DEV_PMA_PMD, LASI_STAT, &status);
538 
539 	if (err)
540 		return err;
541 	return (status & 1) ?  cphy_cause_link_change : 0;
542 }
543 
544 static struct adapter_info t3_adap_info[] = {
545 	{ 1, 1, 0,
546 	  F_GPIO2_OEN | F_GPIO4_OEN |
547 	  F_GPIO2_OUT_VAL | F_GPIO4_OUT_VAL, { S_GPIO3, S_GPIO5 }, 0,
548 	  &mi1_mdio_ops, "Chelsio PE9000" },
549 	{ 1, 1, 0,
550 	  F_GPIO2_OEN | F_GPIO4_OEN |
551 	  F_GPIO2_OUT_VAL | F_GPIO4_OUT_VAL, { S_GPIO3, S_GPIO5 }, 0,
552 	  &mi1_mdio_ops, "Chelsio T302" },
553 	{ 1, 0, 0,
554 	  F_GPIO1_OEN | F_GPIO6_OEN | F_GPIO7_OEN | F_GPIO10_OEN |
555 	  F_GPIO11_OEN | F_GPIO1_OUT_VAL | F_GPIO6_OUT_VAL | F_GPIO10_OUT_VAL,
556 	  { 0 }, SUPPORTED_10000baseT_Full | SUPPORTED_AUI,
557 	  &mi1_mdio_ext_ops, "Chelsio T310" },
558 	{ 1, 1, 0,
559 	  F_GPIO1_OEN | F_GPIO2_OEN | F_GPIO4_OEN | F_GPIO5_OEN | F_GPIO6_OEN |
560 	  F_GPIO7_OEN | F_GPIO10_OEN | F_GPIO11_OEN | F_GPIO1_OUT_VAL |
561 	  F_GPIO5_OUT_VAL | F_GPIO6_OUT_VAL | F_GPIO10_OUT_VAL,
562 	  { S_GPIO9, S_GPIO3 }, SUPPORTED_10000baseT_Full | SUPPORTED_AUI,
563 	  &mi1_mdio_ext_ops, "Chelsio T320" },
564 	{ 4, 0, 0,
565 	  F_GPIO5_OEN | F_GPIO6_OEN | F_GPIO7_OEN | F_GPIO5_OUT_VAL |
566 	  F_GPIO6_OUT_VAL | F_GPIO7_OUT_VAL,
567 	  { S_GPIO1, S_GPIO2, S_GPIO3, S_GPIO4 }, SUPPORTED_AUI,
568 	  &mi1_mdio_ops, "Chelsio T304" },
569 	{ 0 },
570 	{ 1, 0, 0,
571 	  F_GPIO1_OEN | F_GPIO2_OEN | F_GPIO4_OEN | F_GPIO6_OEN | F_GPIO7_OEN |
572 	  F_GPIO10_OEN | F_GPIO1_OUT_VAL | F_GPIO6_OUT_VAL | F_GPIO10_OUT_VAL,
573 	  { S_GPIO9 }, SUPPORTED_10000baseT_Full | SUPPORTED_AUI,
574 	  &mi1_mdio_ext_ops, "Chelsio T310" },
575 	{ 1, 0, 0,
576 	  F_GPIO1_OEN | F_GPIO6_OEN | F_GPIO7_OEN |
577 	  F_GPIO1_OUT_VAL | F_GPIO6_OUT_VAL,
578 	  { S_GPIO9 }, SUPPORTED_10000baseT_Full | SUPPORTED_AUI,
579 	  &mi1_mdio_ext_ops, "Chelsio N320E-G2" },
580 };
581 
582 /*
583  * Return the adapter_info structure with a given index.  Out-of-range indices
584  * return NULL.
585  */
586 const struct adapter_info *t3_get_adapter_info(unsigned int id)
587 {
588 	return id < ARRAY_SIZE(t3_adap_info) ? &t3_adap_info[id] : NULL;
589 }
590 
591 struct port_type_info {
592 	int (*phy_prep)(pinfo_t *pinfo, int phy_addr,
593 			const struct mdio_ops *ops);
594 };
595 
596 static struct port_type_info port_types[] = {
597 	{ NULL },
598 	{ t3_ael1002_phy_prep },
599 	{ t3_vsc8211_phy_prep },
600 	{ t3_mv88e1xxx_phy_prep },
601 	{ t3_xaui_direct_phy_prep },
602 	{ t3_ael2005_phy_prep },
603 	{ t3_qt2045_phy_prep },
604 	{ t3_ael1006_phy_prep },
605 	{ t3_tn1010_phy_prep },
606 	{ t3_aq100x_phy_prep },
607 	{ t3_ael2020_phy_prep },
608 };
609 
610 #define VPD_ENTRY(name, len) \
611 	u8 name##_kword[2]; u8 name##_len; u8 name##_data[len]
612 
613 /*
614  * Partial EEPROM Vital Product Data structure.  Includes only the ID and
615  * VPD-R sections.
616  */
617 struct t3_vpd {
618 	u8  id_tag;
619 	u8  id_len[2];
620 	u8  id_data[16];
621 	u8  vpdr_tag;
622 	u8  vpdr_len[2];
623 	VPD_ENTRY(pn, 16);                     /* part number */
624 	VPD_ENTRY(ec, ECNUM_LEN);              /* EC level */
625 	VPD_ENTRY(sn, SERNUM_LEN);             /* serial number */
626 	VPD_ENTRY(na, 12);                     /* MAC address base */
627 	VPD_ENTRY(cclk, 6);                    /* core clock */
628 	VPD_ENTRY(mclk, 6);                    /* mem clock */
629 	VPD_ENTRY(uclk, 6);                    /* uP clk */
630 	VPD_ENTRY(mdc, 6);                     /* MDIO clk */
631 	VPD_ENTRY(mt, 2);                      /* mem timing */
632 	VPD_ENTRY(xaui0cfg, 6);                /* XAUI0 config */
633 	VPD_ENTRY(xaui1cfg, 6);                /* XAUI1 config */
634 	VPD_ENTRY(port0, 2);                   /* PHY0 complex */
635 	VPD_ENTRY(port1, 2);                   /* PHY1 complex */
636 	VPD_ENTRY(port2, 2);                   /* PHY2 complex */
637 	VPD_ENTRY(port3, 2);                   /* PHY3 complex */
638 	VPD_ENTRY(rv, 1);                      /* csum */
639 	u32 pad;                  /* for multiple-of-4 sizing and alignment */
640 };
641 
642 #define EEPROM_MAX_POLL   40
643 #define EEPROM_STAT_ADDR  0x4000
644 #define VPD_BASE          0xc00
645 
646 /**
647  *	t3_seeprom_read - read a VPD EEPROM location
648  *	@adapter: adapter to read
649  *	@addr: EEPROM address
650  *	@data: where to store the read data
651  *
652  *	Read a 32-bit word from a location in VPD EEPROM using the card's PCI
653  *	VPD ROM capability.  A zero is written to the flag bit when the
654  *	addres is written to the control register.  The hardware device will
655  *	set the flag to 1 when 4 bytes have been read into the data register.
656  */
657 int t3_seeprom_read(adapter_t *adapter, u32 addr, u32 *data)
658 {
659 	u16 val;
660 	int attempts = EEPROM_MAX_POLL;
661 	unsigned int base = adapter->params.pci.vpd_cap_addr;
662 
663 	if ((addr >= EEPROMSIZE && addr != EEPROM_STAT_ADDR) || (addr & 3))
664 		return -EINVAL;
665 
666 	t3_os_pci_write_config_2(adapter, base + PCI_VPD_ADDR, (u16)addr);
667 	do {
668 		udelay(10);
669 		t3_os_pci_read_config_2(adapter, base + PCI_VPD_ADDR, &val);
670 	} while (!(val & PCI_VPD_ADDR_F) && --attempts);
671 
672 	if (!(val & PCI_VPD_ADDR_F)) {
673 		CH_ERR(adapter, "reading EEPROM address 0x%x failed\n", addr);
674 		return -EIO;
675 	}
676 	t3_os_pci_read_config_4(adapter, base + PCI_VPD_DATA, data);
677 	*data = le32_to_cpu(*data);
678 	return 0;
679 }
680 
681 /**
682  *	t3_seeprom_write - write a VPD EEPROM location
683  *	@adapter: adapter to write
684  *	@addr: EEPROM address
685  *	@data: value to write
686  *
687  *	Write a 32-bit word to a location in VPD EEPROM using the card's PCI
688  *	VPD ROM capability.
689  */
690 int t3_seeprom_write(adapter_t *adapter, u32 addr, u32 data)
691 {
692 	u16 val;
693 	int attempts = EEPROM_MAX_POLL;
694 	unsigned int base = adapter->params.pci.vpd_cap_addr;
695 
696 	if ((addr >= EEPROMSIZE && addr != EEPROM_STAT_ADDR) || (addr & 3))
697 		return -EINVAL;
698 
699 	t3_os_pci_write_config_4(adapter, base + PCI_VPD_DATA,
700 				 cpu_to_le32(data));
701 	t3_os_pci_write_config_2(adapter, base + PCI_VPD_ADDR,
702 				 (u16)addr | PCI_VPD_ADDR_F);
703 	do {
704 		msleep(1);
705 		t3_os_pci_read_config_2(adapter, base + PCI_VPD_ADDR, &val);
706 	} while ((val & PCI_VPD_ADDR_F) && --attempts);
707 
708 	if (val & PCI_VPD_ADDR_F) {
709 		CH_ERR(adapter, "write to EEPROM address 0x%x failed\n", addr);
710 		return -EIO;
711 	}
712 	return 0;
713 }
714 
715 /**
716  *	t3_seeprom_wp - enable/disable EEPROM write protection
717  *	@adapter: the adapter
718  *	@enable: 1 to enable write protection, 0 to disable it
719  *
720  *	Enables or disables write protection on the serial EEPROM.
721  */
722 int t3_seeprom_wp(adapter_t *adapter, int enable)
723 {
724 	return t3_seeprom_write(adapter, EEPROM_STAT_ADDR, enable ? 0xc : 0);
725 }
726 
727 /*
728  * Convert a character holding a hex digit to a number.
729  */
730 static unsigned int hex2int(unsigned char c)
731 {
732 	return isdigit(c) ? c - '0' : toupper(c) - 'A' + 10;
733 }
734 
735 /**
736  * 	get_desc_len - get the length of a vpd descriptor.
737  *	@adapter: the adapter
738  *	@offset: first byte offset of the vpd descriptor
739  *
740  *	Retrieves the length of the small/large resource
741  *	data type starting at offset.
742  */
743 static int get_desc_len(adapter_t *adapter, u32 offset)
744 {
745 	u32 read_offset, tmp, shift, len = 0;
746 	u8 tag, buf[8];
747 	int ret;
748 
749 	read_offset = offset & 0xfffffffc;
750 	shift = offset & 0x03;
751 
752 	ret = t3_seeprom_read(adapter, read_offset, &tmp);
753 	if (ret < 0)
754 		return ret;
755 
756 	*((u32 *)buf) = cpu_to_le32(tmp);
757 
758 	tag = buf[shift];
759 	if (tag & 0x80) {
760 		ret = t3_seeprom_read(adapter, read_offset + 4, &tmp);
761 		if (ret < 0)
762 			return ret;
763 
764 		*((u32 *)(&buf[4])) = cpu_to_le32(tmp);
765 		len = (buf[shift + 1] & 0xff) +
766 		      ((buf[shift+2] << 8) & 0xff00) + 3;
767 	} else
768 		len = (tag & 0x07) + 1;
769 
770 	return len;
771 }
772 
773 /**
774  *	is_end_tag - Check if a vpd tag is the end tag.
775  *	@adapter: the adapter
776  *	@offset: first byte offset of the tag
777  *
778  *	Checks if the tag located at offset is the end tag.
779  */
780 static int is_end_tag(adapter_t * adapter, u32 offset)
781 {
782 	u32 read_offset, shift, ret, tmp;
783 	u8 buf[4];
784 
785 	read_offset = offset & 0xfffffffc;
786 	shift = offset & 0x03;
787 
788 	ret = t3_seeprom_read(adapter, read_offset, &tmp);
789 	if (ret)
790 		return ret;
791 	*((u32 *)buf) = cpu_to_le32(tmp);
792 
793 	if (buf[shift] == 0x78)
794 		return 1;
795 	else
796 		return 0;
797 }
798 
799 /**
800  *	t3_get_vpd_len - computes the length of a vpd structure
801  *	@adapter: the adapter
802  *	@vpd: contains the offset of first byte of vpd
803  *
804  *	Computes the lentgh of the vpd structure starting at vpd->offset.
805  */
806 
807 int t3_get_vpd_len(adapter_t * adapter, struct generic_vpd *vpd)
808 {
809 	u32 len=0, offset;
810 	int inc, ret;
811 
812 	offset = vpd->offset;
813 
814 	while (offset < (vpd->offset + MAX_VPD_BYTES)) {
815 		ret = is_end_tag(adapter, offset);
816 		if (ret < 0)
817 			return ret;
818 		else if (ret == 1)
819 			break;
820 
821 		inc = get_desc_len(adapter, offset);
822 		if (inc < 0)
823 			return inc;
824 		len += inc;
825 		offset += inc;
826 	}
827 	return (len + 1);
828 }
829 
830 /**
831  *	t3_read_vpd - reads the stream of bytes containing a vpd structure
832  *	@adapter: the adapter
833  *	@vpd: contains a buffer that would hold the stream of bytes
834  *
835  *	Reads the vpd structure starting at vpd->offset into vpd->data,
836  *	the length of the byte stream to read is vpd->len.
837  */
838 
839 int t3_read_vpd(adapter_t *adapter, struct generic_vpd *vpd)
840 {
841 	u32 i, ret;
842 
843 	for (i = 0; i < vpd->len; i += 4) {
844 		ret = t3_seeprom_read(adapter, vpd->offset + i,
845 				      (u32 *) &(vpd->data[i]));
846 		if (ret)
847 			return ret;
848 	}
849 
850 	return 0;
851 }
852 
853 
854 /**
855  *	get_vpd_params - read VPD parameters from VPD EEPROM
856  *	@adapter: adapter to read
857  *	@p: where to store the parameters
858  *
859  *	Reads card parameters stored in VPD EEPROM.
860  */
861 static int get_vpd_params(adapter_t *adapter, struct vpd_params *p)
862 {
863 	int i, addr, ret;
864 	struct t3_vpd vpd;
865 
866 	/*
867 	 * Card information is normally at VPD_BASE but some early cards had
868 	 * it at 0.
869 	 */
870 	ret = t3_seeprom_read(adapter, VPD_BASE, (u32 *)&vpd);
871 	if (ret)
872 		return ret;
873 	addr = vpd.id_tag == 0x82 ? VPD_BASE : 0;
874 
875 	for (i = 0; i < sizeof(vpd); i += 4) {
876 		ret = t3_seeprom_read(adapter, addr + i,
877 				      (u32 *)((u8 *)&vpd + i));
878 		if (ret)
879 			return ret;
880 	}
881 
882 	p->cclk = simple_strtoul(vpd.cclk_data, NULL, 10);
883 	p->mclk = simple_strtoul(vpd.mclk_data, NULL, 10);
884 	p->uclk = simple_strtoul(vpd.uclk_data, NULL, 10);
885 	p->mdc = simple_strtoul(vpd.mdc_data, NULL, 10);
886 	p->mem_timing = simple_strtoul(vpd.mt_data, NULL, 10);
887 	memcpy(p->sn, vpd.sn_data, SERNUM_LEN);
888 	memcpy(p->ec, vpd.ec_data, ECNUM_LEN);
889 
890 	/* Old eeproms didn't have port information */
891 	if (adapter->params.rev == 0 && !vpd.port0_data[0]) {
892 		p->port_type[0] = uses_xaui(adapter) ? 1 : 2;
893 		p->port_type[1] = uses_xaui(adapter) ? 6 : 2;
894 	} else {
895 		p->port_type[0] = (u8)hex2int(vpd.port0_data[0]);
896 		p->port_type[1] = (u8)hex2int(vpd.port1_data[0]);
897 		p->port_type[2] = (u8)hex2int(vpd.port2_data[0]);
898 		p->port_type[3] = (u8)hex2int(vpd.port3_data[0]);
899 		p->xauicfg[0] = simple_strtoul(vpd.xaui0cfg_data, NULL, 16);
900 		p->xauicfg[1] = simple_strtoul(vpd.xaui1cfg_data, NULL, 16);
901 	}
902 
903 	for (i = 0; i < 6; i++)
904 		p->eth_base[i] = hex2int(vpd.na_data[2 * i]) * 16 +
905 				 hex2int(vpd.na_data[2 * i + 1]);
906 	return 0;
907 }
908 
909 /* BIOS boot header */
910 typedef struct boot_header_s {
911 	u8	signature[2];	/* signature */
912 	u8	length;		/* image length (include header) */
913 	u8	offset[4];	/* initialization vector */
914 	u8	reserved[19];	/* reserved */
915 	u8	exheader[2];	/* offset to expansion header */
916 } boot_header_t;
917 
918 /* serial flash and firmware constants */
919 enum {
920 	SF_ATTEMPTS = 5,           /* max retries for SF1 operations */
921 	SF_SEC_SIZE = 64 * 1024,   /* serial flash sector size */
922 	SF_SIZE = SF_SEC_SIZE * 8, /* serial flash size */
923 
924 	/* flash command opcodes */
925 	SF_PROG_PAGE    = 2,       /* program page */
926 	SF_WR_DISABLE   = 4,       /* disable writes */
927 	SF_RD_STATUS    = 5,       /* read status register */
928 	SF_WR_ENABLE    = 6,       /* enable writes */
929 	SF_RD_DATA_FAST = 0xb,     /* read flash */
930 	SF_ERASE_SECTOR = 0xd8,    /* erase sector */
931 
932 	FW_FLASH_BOOT_ADDR = 0x70000, /* start address of FW in flash */
933 	FW_VERS_ADDR = 0x7fffc,    /* flash address holding FW version */
934 	FW_VERS_ADDR_PRE8 = 0x77ffc,/* flash address holding FW version pre8 */
935 	FW_MIN_SIZE = 8,           /* at least version and csum */
936 	FW_MAX_SIZE = FW_VERS_ADDR - FW_FLASH_BOOT_ADDR,
937 	FW_MAX_SIZE_PRE8 = FW_VERS_ADDR_PRE8 - FW_FLASH_BOOT_ADDR,
938 
939 	BOOT_FLASH_BOOT_ADDR = 0x0,/* start address of boot image in flash */
940 	BOOT_SIGNATURE = 0xaa55,   /* signature of BIOS boot ROM */
941 	BOOT_SIZE_INC = 512,       /* image size measured in 512B chunks */
942 	BOOT_MIN_SIZE = sizeof(boot_header_t), /* at least basic header */
943 	BOOT_MAX_SIZE = 1024*BOOT_SIZE_INC /* 1 byte * length increment  */
944 };
945 
946 /**
947  *	sf1_read - read data from the serial flash
948  *	@adapter: the adapter
949  *	@byte_cnt: number of bytes to read
950  *	@cont: whether another operation will be chained
951  *	@valp: where to store the read data
952  *
953  *	Reads up to 4 bytes of data from the serial flash.  The location of
954  *	the read needs to be specified prior to calling this by issuing the
955  *	appropriate commands to the serial flash.
956  */
957 static int sf1_read(adapter_t *adapter, unsigned int byte_cnt, int cont,
958 		    u32 *valp)
959 {
960 	int ret;
961 
962 	if (!byte_cnt || byte_cnt > 4)
963 		return -EINVAL;
964 	if (t3_read_reg(adapter, A_SF_OP) & F_BUSY)
965 		return -EBUSY;
966 	t3_write_reg(adapter, A_SF_OP, V_CONT(cont) | V_BYTECNT(byte_cnt - 1));
967 	ret = t3_wait_op_done(adapter, A_SF_OP, F_BUSY, 0, SF_ATTEMPTS, 10);
968 	if (!ret)
969 		*valp = t3_read_reg(adapter, A_SF_DATA);
970 	return ret;
971 }
972 
973 /**
974  *	sf1_write - write data to the serial flash
975  *	@adapter: the adapter
976  *	@byte_cnt: number of bytes to write
977  *	@cont: whether another operation will be chained
978  *	@val: value to write
979  *
980  *	Writes up to 4 bytes of data to the serial flash.  The location of
981  *	the write needs to be specified prior to calling this by issuing the
982  *	appropriate commands to the serial flash.
983  */
984 static int sf1_write(adapter_t *adapter, unsigned int byte_cnt, int cont,
985 		     u32 val)
986 {
987 	if (!byte_cnt || byte_cnt > 4)
988 		return -EINVAL;
989 	if (t3_read_reg(adapter, A_SF_OP) & F_BUSY)
990 		return -EBUSY;
991 	t3_write_reg(adapter, A_SF_DATA, val);
992 	t3_write_reg(adapter, A_SF_OP,
993 		     V_CONT(cont) | V_BYTECNT(byte_cnt - 1) | V_OP(1));
994 	return t3_wait_op_done(adapter, A_SF_OP, F_BUSY, 0, SF_ATTEMPTS, 10);
995 }
996 
997 /**
998  *	flash_wait_op - wait for a flash operation to complete
999  *	@adapter: the adapter
1000  *	@attempts: max number of polls of the status register
1001  *	@delay: delay between polls in ms
1002  *
1003  *	Wait for a flash operation to complete by polling the status register.
1004  */
1005 static int flash_wait_op(adapter_t *adapter, int attempts, int delay)
1006 {
1007 	int ret;
1008 	u32 status;
1009 
1010 	while (1) {
1011 		if ((ret = sf1_write(adapter, 1, 1, SF_RD_STATUS)) != 0 ||
1012 		    (ret = sf1_read(adapter, 1, 0, &status)) != 0)
1013 			return ret;
1014 		if (!(status & 1))
1015 			return 0;
1016 		if (--attempts == 0)
1017 			return -EAGAIN;
1018 		if (delay)
1019 			msleep(delay);
1020 	}
1021 }
1022 
1023 /**
1024  *	t3_read_flash - read words from serial flash
1025  *	@adapter: the adapter
1026  *	@addr: the start address for the read
1027  *	@nwords: how many 32-bit words to read
1028  *	@data: where to store the read data
1029  *	@byte_oriented: whether to store data as bytes or as words
1030  *
1031  *	Read the specified number of 32-bit words from the serial flash.
1032  *	If @byte_oriented is set the read data is stored as a byte array
1033  *	(i.e., big-endian), otherwise as 32-bit words in the platform's
1034  *	natural endianess.
1035  */
1036 int t3_read_flash(adapter_t *adapter, unsigned int addr, unsigned int nwords,
1037 		  u32 *data, int byte_oriented)
1038 {
1039 	int ret;
1040 
1041 	if (addr + nwords * sizeof(u32) > SF_SIZE || (addr & 3))
1042 		return -EINVAL;
1043 
1044 	addr = swab32(addr) | SF_RD_DATA_FAST;
1045 
1046 	if ((ret = sf1_write(adapter, 4, 1, addr)) != 0 ||
1047 	    (ret = sf1_read(adapter, 1, 1, data)) != 0)
1048 		return ret;
1049 
1050 	for ( ; nwords; nwords--, data++) {
1051 		ret = sf1_read(adapter, 4, nwords > 1, data);
1052 		if (ret)
1053 			return ret;
1054 		if (byte_oriented)
1055 			*data = htonl(*data);
1056 	}
1057 	return 0;
1058 }
1059 
1060 /**
1061  *	t3_write_flash - write up to a page of data to the serial flash
1062  *	@adapter: the adapter
1063  *	@addr: the start address to write
1064  *	@n: length of data to write
1065  *	@data: the data to write
1066  *	@byte_oriented: whether to store data as bytes or as words
1067  *
1068  *	Writes up to a page of data (256 bytes) to the serial flash starting
1069  *	at the given address.
1070  *	If @byte_oriented is set the write data is stored as a 32-bit
1071  *	big-endian array, otherwise in the processor's native endianess.
1072  *
1073  */
1074 static int t3_write_flash(adapter_t *adapter, unsigned int addr,
1075 			  unsigned int n, const u8 *data,
1076 			  int byte_oriented)
1077 {
1078 	int ret;
1079 	u32 buf[64];
1080 	unsigned int c, left, val, offset = addr & 0xff;
1081 
1082 	if (addr + n > SF_SIZE || offset + n > 256)
1083 		return -EINVAL;
1084 
1085 	val = swab32(addr) | SF_PROG_PAGE;
1086 
1087 	if ((ret = sf1_write(adapter, 1, 0, SF_WR_ENABLE)) != 0 ||
1088 	    (ret = sf1_write(adapter, 4, 1, val)) != 0)
1089 		return ret;
1090 
1091 	for (left = n; left; left -= c) {
1092 		c = min(left, 4U);
1093 		val = *(const u32*)data;
1094 		data += c;
1095 		if (byte_oriented)
1096 			val = htonl(val);
1097 
1098 		ret = sf1_write(adapter, c, c != left, val);
1099 		if (ret)
1100 			return ret;
1101 	}
1102 	if ((ret = flash_wait_op(adapter, 5, 1)) != 0)
1103 		return ret;
1104 
1105 	/* Read the page to verify the write succeeded */
1106 	ret = t3_read_flash(adapter, addr & ~0xff, ARRAY_SIZE(buf), buf,
1107 			    byte_oriented);
1108 	if (ret)
1109 		return ret;
1110 
1111 	if (memcmp(data - n, (u8 *)buf + offset, n))
1112 		return -EIO;
1113 	return 0;
1114 }
1115 
1116 /**
1117  *	t3_get_tp_version - read the tp sram version
1118  *	@adapter: the adapter
1119  *	@vers: where to place the version
1120  *
1121  *	Reads the protocol sram version from sram.
1122  */
1123 int t3_get_tp_version(adapter_t *adapter, u32 *vers)
1124 {
1125 	int ret;
1126 
1127 	/* Get version loaded in SRAM */
1128 	t3_write_reg(adapter, A_TP_EMBED_OP_FIELD0, 0);
1129 	ret = t3_wait_op_done(adapter, A_TP_EMBED_OP_FIELD0,
1130 			      1, 1, 5, 1);
1131 	if (ret)
1132 		return ret;
1133 
1134 	*vers = t3_read_reg(adapter, A_TP_EMBED_OP_FIELD1);
1135 
1136 	return 0;
1137 }
1138 
1139 /**
1140  *	t3_check_tpsram_version - read the tp sram version
1141  *	@adapter: the adapter
1142  *
1143  */
1144 int t3_check_tpsram_version(adapter_t *adapter)
1145 {
1146 	int ret;
1147 	u32 vers;
1148 	unsigned int major, minor;
1149 
1150 	if (adapter->params.rev == T3_REV_A)
1151 		return 0;
1152 
1153 
1154 	ret = t3_get_tp_version(adapter, &vers);
1155 	if (ret)
1156 		return ret;
1157 
1158 	vers = t3_read_reg(adapter, A_TP_EMBED_OP_FIELD1);
1159 
1160 	major = G_TP_VERSION_MAJOR(vers);
1161 	minor = G_TP_VERSION_MINOR(vers);
1162 
1163 	if (major == TP_VERSION_MAJOR && minor == TP_VERSION_MINOR)
1164 		return 0;
1165 	else {
1166 		CH_ERR(adapter, "found wrong TP version (%u.%u), "
1167 		       "driver compiled for version %d.%d\n", major, minor,
1168 		       TP_VERSION_MAJOR, TP_VERSION_MINOR);
1169 	}
1170 	return -EINVAL;
1171 }
1172 
1173 /**
1174  *	t3_check_tpsram - check if provided protocol SRAM
1175  *			  is compatible with this driver
1176  *	@adapter: the adapter
1177  *	@tp_sram: the firmware image to write
1178  *	@size: image size
1179  *
1180  *	Checks if an adapter's tp sram is compatible with the driver.
1181  *	Returns 0 if the versions are compatible, a negative error otherwise.
1182  */
1183 int t3_check_tpsram(adapter_t *adapter, const u8 *tp_sram, unsigned int size)
1184 {
1185 	u32 csum;
1186 	unsigned int i;
1187 	const u32 *p = (const u32 *)tp_sram;
1188 
1189 	/* Verify checksum */
1190 	for (csum = 0, i = 0; i < size / sizeof(csum); i++)
1191 		csum += ntohl(p[i]);
1192 	if (csum != 0xffffffff) {
1193 		CH_ERR(adapter, "corrupted protocol SRAM image, checksum %u\n",
1194 		       csum);
1195 		return -EINVAL;
1196 	}
1197 
1198 	return 0;
1199 }
1200 
1201 enum fw_version_type {
1202 	FW_VERSION_N3,
1203 	FW_VERSION_T3
1204 };
1205 
1206 /**
1207  *	t3_get_fw_version - read the firmware version
1208  *	@adapter: the adapter
1209  *	@vers: where to place the version
1210  *
1211  *	Reads the FW version from flash. Note that we had to move the version
1212  *	due to FW size. If we don't find a valid FW version in the new location
1213  *	we fall back and read the old location.
1214  */
1215 int t3_get_fw_version(adapter_t *adapter, u32 *vers)
1216 {
1217 	int ret = t3_read_flash(adapter, FW_VERS_ADDR, 1, vers, 0);
1218 	if (!ret && *vers != 0xffffffff)
1219 		return 0;
1220 	else
1221 		return t3_read_flash(adapter, FW_VERS_ADDR_PRE8, 1, vers, 0);
1222 }
1223 
1224 /**
1225  *	t3_check_fw_version - check if the FW is compatible with this driver
1226  *	@adapter: the adapter
1227  *
1228  *	Checks if an adapter's FW is compatible with the driver.  Returns 0
1229  *	if the versions are compatible, a negative error otherwise.
1230  */
1231 int t3_check_fw_version(adapter_t *adapter)
1232 {
1233 	int ret;
1234 	u32 vers;
1235 	unsigned int type, major, minor;
1236 
1237 	ret = t3_get_fw_version(adapter, &vers);
1238 	if (ret)
1239 		return ret;
1240 
1241 	type = G_FW_VERSION_TYPE(vers);
1242 	major = G_FW_VERSION_MAJOR(vers);
1243 	minor = G_FW_VERSION_MINOR(vers);
1244 
1245 	if (type == FW_VERSION_T3 && major == FW_VERSION_MAJOR &&
1246 	    minor == FW_VERSION_MINOR)
1247 		return 0;
1248 
1249 	else if (major != FW_VERSION_MAJOR || minor < FW_VERSION_MINOR)
1250 		CH_WARN(adapter, "found old FW minor version(%u.%u), "
1251 		        "driver compiled for version %u.%u\n", major, minor,
1252 			FW_VERSION_MAJOR, FW_VERSION_MINOR);
1253 	else {
1254 		CH_WARN(adapter, "found newer FW version(%u.%u), "
1255 		        "driver compiled for version %u.%u\n", major, minor,
1256 			FW_VERSION_MAJOR, FW_VERSION_MINOR);
1257 			return 0;
1258 	}
1259 	return -EINVAL;
1260 }
1261 
1262 /**
1263  *	t3_flash_erase_sectors - erase a range of flash sectors
1264  *	@adapter: the adapter
1265  *	@start: the first sector to erase
1266  *	@end: the last sector to erase
1267  *
1268  *	Erases the sectors in the given range.
1269  */
1270 static int t3_flash_erase_sectors(adapter_t *adapter, int start, int end)
1271 {
1272 	while (start <= end) {
1273 		int ret;
1274 
1275 		if ((ret = sf1_write(adapter, 1, 0, SF_WR_ENABLE)) != 0 ||
1276 		    (ret = sf1_write(adapter, 4, 0,
1277 				     SF_ERASE_SECTOR | (start << 8))) != 0 ||
1278 		    (ret = flash_wait_op(adapter, 5, 500)) != 0)
1279 			return ret;
1280 		start++;
1281 	}
1282 	return 0;
1283 }
1284 
1285 /*
1286  *	t3_load_fw - download firmware
1287  *	@adapter: the adapter
1288  *	@fw_data: the firmware image to write
1289  *	@size: image size
1290  *
1291  *	Write the supplied firmware image to the card's serial flash.
1292  *	The FW image has the following sections: @size - 8 bytes of code and
1293  *	data, followed by 4 bytes of FW version, followed by the 32-bit
1294  *	1's complement checksum of the whole image.
1295  */
1296 int t3_load_fw(adapter_t *adapter, const u8 *fw_data, unsigned int size)
1297 {
1298 	u32 version, csum, fw_version_addr;
1299 	unsigned int i;
1300 	const u32 *p = (const u32 *)fw_data;
1301 	int ret, addr, fw_sector = FW_FLASH_BOOT_ADDR >> 16;
1302 
1303 	if ((size & 3) || size < FW_MIN_SIZE)
1304 		return -EINVAL;
1305 	if (size - 8 > FW_MAX_SIZE)
1306 		return -EFBIG;
1307 
1308 	version = ntohl(*(const u32 *)(fw_data + size - 8));
1309 	if (G_FW_VERSION_MAJOR(version) < 8) {
1310 
1311 		fw_version_addr = FW_VERS_ADDR_PRE8;
1312 
1313 		if (size - 8 > FW_MAX_SIZE_PRE8)
1314 			return -EFBIG;
1315 	} else
1316 		fw_version_addr = FW_VERS_ADDR;
1317 
1318 	for (csum = 0, i = 0; i < size / sizeof(csum); i++)
1319 		csum += ntohl(p[i]);
1320 	if (csum != 0xffffffff) {
1321 		CH_ERR(adapter, "corrupted firmware image, checksum %u\n",
1322 		       csum);
1323 		return -EINVAL;
1324 	}
1325 
1326 	ret = t3_flash_erase_sectors(adapter, fw_sector, fw_sector);
1327 	if (ret)
1328 		goto out;
1329 
1330 	size -= 8;  /* trim off version and checksum */
1331 	for (addr = FW_FLASH_BOOT_ADDR; size; ) {
1332 		unsigned int chunk_size = min(size, 256U);
1333 
1334 		ret = t3_write_flash(adapter, addr, chunk_size, fw_data, 1);
1335 		if (ret)
1336 			goto out;
1337 
1338 		addr += chunk_size;
1339 		fw_data += chunk_size;
1340 		size -= chunk_size;
1341 	}
1342 
1343 	ret = t3_write_flash(adapter, fw_version_addr, 4, fw_data, 1);
1344 out:
1345 	if (ret)
1346 		CH_ERR(adapter, "firmware download failed, error %d\n", ret);
1347 	return ret;
1348 }
1349 
1350 /*
1351  *	t3_load_boot - download boot flash
1352  *	@adapter: the adapter
1353  *	@boot_data: the boot image to write
1354  *	@size: image size
1355  *
1356  *	Write the supplied boot image to the card's serial flash.
1357  *	The boot image has the following sections: a 28-byte header and the
1358  *	boot image.
1359  */
1360 int t3_load_boot(adapter_t *adapter, u8 *boot_data, unsigned int size)
1361 {
1362 	boot_header_t *header = (boot_header_t *)boot_data;
1363 	int ret;
1364 	unsigned int addr;
1365 	unsigned int boot_sector = BOOT_FLASH_BOOT_ADDR >> 16;
1366 	unsigned int boot_end = (BOOT_FLASH_BOOT_ADDR + size - 1) >> 16;
1367 
1368 	/*
1369 	 * Perform some primitive sanity testing to avoid accidentally
1370 	 * writing garbage over the boot sectors.  We ought to check for
1371 	 * more but it's not worth it for now ...
1372 	 */
1373 	if (size < BOOT_MIN_SIZE || size > BOOT_MAX_SIZE) {
1374 		CH_ERR(adapter, "boot image too small/large\n");
1375 		return -EFBIG;
1376 	}
1377 	if (le16_to_cpu(*(u16*)header->signature) != BOOT_SIGNATURE) {
1378 		CH_ERR(adapter, "boot image missing signature\n");
1379 		return -EINVAL;
1380 	}
1381 	if (header->length * BOOT_SIZE_INC != size) {
1382 		CH_ERR(adapter, "boot image header length != image length\n");
1383 		return -EINVAL;
1384 	}
1385 
1386 	ret = t3_flash_erase_sectors(adapter, boot_sector, boot_end);
1387 	if (ret)
1388 		goto out;
1389 
1390 	for (addr = BOOT_FLASH_BOOT_ADDR; size; ) {
1391 		unsigned int chunk_size = min(size, 256U);
1392 
1393 		ret = t3_write_flash(adapter, addr, chunk_size, boot_data, 0);
1394 		if (ret)
1395 			goto out;
1396 
1397 		addr += chunk_size;
1398 		boot_data += chunk_size;
1399 		size -= chunk_size;
1400 	}
1401 
1402 out:
1403 	if (ret)
1404 		CH_ERR(adapter, "boot image download failed, error %d\n", ret);
1405 	return ret;
1406 }
1407 
1408 #define CIM_CTL_BASE 0x2000
1409 
1410 /**
1411  *	t3_cim_ctl_blk_read - read a block from CIM control region
1412  *	@adap: the adapter
1413  *	@addr: the start address within the CIM control region
1414  *	@n: number of words to read
1415  *	@valp: where to store the result
1416  *
1417  *	Reads a block of 4-byte words from the CIM control region.
1418  */
1419 int t3_cim_ctl_blk_read(adapter_t *adap, unsigned int addr, unsigned int n,
1420 			unsigned int *valp)
1421 {
1422 	int ret = 0;
1423 
1424 	if (t3_read_reg(adap, A_CIM_HOST_ACC_CTRL) & F_HOSTBUSY)
1425 		return -EBUSY;
1426 
1427 	for ( ; !ret && n--; addr += 4) {
1428 		t3_write_reg(adap, A_CIM_HOST_ACC_CTRL, CIM_CTL_BASE + addr);
1429 		ret = t3_wait_op_done(adap, A_CIM_HOST_ACC_CTRL, F_HOSTBUSY,
1430 				      0, 5, 2);
1431 		if (!ret)
1432 			*valp++ = t3_read_reg(adap, A_CIM_HOST_ACC_DATA);
1433 	}
1434 	return ret;
1435 }
1436 
1437 static void t3_gate_rx_traffic(struct cmac *mac, u32 *rx_cfg,
1438 			       u32 *rx_hash_high, u32 *rx_hash_low)
1439 {
1440 	/* stop Rx unicast traffic */
1441 	t3_mac_disable_exact_filters(mac);
1442 
1443 	/* stop broadcast, multicast, promiscuous mode traffic */
1444 	*rx_cfg = t3_read_reg(mac->adapter, A_XGM_RX_CFG + mac->offset);
1445 	t3_set_reg_field(mac->adapter, A_XGM_RX_CFG + mac->offset,
1446 			 F_ENHASHMCAST | F_DISBCAST | F_COPYALLFRAMES,
1447 			 F_DISBCAST);
1448 
1449 	*rx_hash_high = t3_read_reg(mac->adapter, A_XGM_RX_HASH_HIGH +
1450 	    mac->offset);
1451 	t3_write_reg(mac->adapter, A_XGM_RX_HASH_HIGH + mac->offset, 0);
1452 
1453 	*rx_hash_low = t3_read_reg(mac->adapter, A_XGM_RX_HASH_LOW +
1454 	    mac->offset);
1455 	t3_write_reg(mac->adapter, A_XGM_RX_HASH_LOW + mac->offset, 0);
1456 
1457 	/* Leave time to drain max RX fifo */
1458 	msleep(1);
1459 }
1460 
1461 static void t3_open_rx_traffic(struct cmac *mac, u32 rx_cfg,
1462 			       u32 rx_hash_high, u32 rx_hash_low)
1463 {
1464 	t3_mac_enable_exact_filters(mac);
1465 	t3_set_reg_field(mac->adapter, A_XGM_RX_CFG + mac->offset,
1466 			 F_ENHASHMCAST | F_DISBCAST | F_COPYALLFRAMES,
1467 			 rx_cfg);
1468 	t3_write_reg(mac->adapter, A_XGM_RX_HASH_HIGH + mac->offset,
1469 	    rx_hash_high);
1470 	t3_write_reg(mac->adapter, A_XGM_RX_HASH_LOW + mac->offset,
1471 	    rx_hash_low);
1472 }
1473 
1474 static int t3_detect_link_fault(adapter_t *adapter, int port_id)
1475 {
1476 	struct port_info *pi = adap2pinfo(adapter, port_id);
1477 	struct cmac *mac = &pi->mac;
1478 	uint32_t rx_cfg, rx_hash_high, rx_hash_low;
1479 	int link_fault;
1480 
1481 	/* stop rx */
1482 	t3_gate_rx_traffic(mac, &rx_cfg, &rx_hash_high, &rx_hash_low);
1483 	t3_write_reg(adapter, A_XGM_RX_CTRL + mac->offset, 0);
1484 
1485 	/* clear status and make sure intr is enabled */
1486 	(void) t3_read_reg(adapter, A_XGM_INT_STATUS + mac->offset);
1487 	t3_xgm_intr_enable(adapter, port_id);
1488 
1489 	/* restart rx */
1490 	t3_write_reg(adapter, A_XGM_RX_CTRL + mac->offset, F_RXEN);
1491 	t3_open_rx_traffic(mac, rx_cfg, rx_hash_high, rx_hash_low);
1492 
1493 	link_fault = t3_read_reg(adapter, A_XGM_INT_STATUS + mac->offset);
1494 	return (link_fault & F_LINKFAULTCHANGE ? 1 : 0);
1495 }
1496 
1497 static void t3_clear_faults(adapter_t *adapter, int port_id)
1498 {
1499 	struct port_info *pi = adap2pinfo(adapter, port_id);
1500 	struct cmac *mac = &pi->mac;
1501 
1502 	if (adapter->params.nports <= 2) {
1503 		t3_xgm_intr_disable(adapter, pi->port_id);
1504 		t3_read_reg(adapter, A_XGM_INT_STATUS + mac->offset);
1505 		t3_write_reg(adapter, A_XGM_INT_CAUSE + mac->offset, F_XGM_INT);
1506 		t3_set_reg_field(adapter, A_XGM_INT_ENABLE + mac->offset,
1507 				 F_XGM_INT, F_XGM_INT);
1508 		t3_xgm_intr_enable(adapter, pi->port_id);
1509 	}
1510 }
1511 
1512 /**
1513  *	t3_link_changed - handle interface link changes
1514  *	@adapter: the adapter
1515  *	@port_id: the port index that changed link state
1516  *
1517  *	Called when a port's link settings change to propagate the new values
1518  *	to the associated PHY and MAC.  After performing the common tasks it
1519  *	invokes an OS-specific handler.
1520  */
1521 void t3_link_changed(adapter_t *adapter, int port_id)
1522 {
1523 	int link_ok, speed, duplex, fc, link_fault, link_state;
1524 	struct port_info *pi = adap2pinfo(adapter, port_id);
1525 	struct cphy *phy = &pi->phy;
1526 	struct cmac *mac = &pi->mac;
1527 	struct link_config *lc = &pi->link_config;
1528 
1529 	link_ok = lc->link_ok;
1530 	speed = lc->speed;
1531 	duplex = lc->duplex;
1532 	fc = lc->fc;
1533 	link_fault = 0;
1534 
1535 	phy->ops->get_link_status(phy, &link_state, &speed, &duplex, &fc);
1536 	link_ok = (link_state == PHY_LINK_UP);
1537 	if (link_state != PHY_LINK_PARTIAL)
1538 		phy->rst = 0;
1539 	else if (++phy->rst == 3) {
1540 		phy->ops->reset(phy, 0);
1541 		phy->rst = 0;
1542 	}
1543 
1544 	if (link_ok == 0)
1545 		pi->link_fault = LF_NO;
1546 
1547 	if (lc->requested_fc & PAUSE_AUTONEG)
1548 		fc &= lc->requested_fc;
1549 	else
1550 		fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
1551 
1552 	/* Update mac speed before checking for link fault. */
1553 	if (link_ok && speed >= 0 && lc->autoneg == AUTONEG_ENABLE &&
1554 	    (speed != lc->speed || duplex != lc->duplex || fc != lc->fc))
1555 		t3_mac_set_speed_duplex_fc(mac, speed, duplex, fc);
1556 
1557 	/*
1558 	 * Check for link faults if any of these is true:
1559 	 * a) A link fault is suspected, and PHY says link ok
1560 	 * b) PHY link transitioned from down -> up
1561 	 */
1562 	if (adapter->params.nports <= 2 &&
1563 	    ((pi->link_fault && link_ok) || (!lc->link_ok && link_ok))) {
1564 
1565 		link_fault = t3_detect_link_fault(adapter, port_id);
1566 		if (link_fault) {
1567 			if (pi->link_fault != LF_YES) {
1568 				mac->stats.link_faults++;
1569 				pi->link_fault = LF_YES;
1570 			}
1571 
1572 			if (uses_xaui(adapter)) {
1573 				if (adapter->params.rev >= T3_REV_C)
1574 					t3c_pcs_force_los(mac);
1575 				else
1576 					t3b_pcs_reset(mac);
1577 			}
1578 
1579 			/* Don't report link up */
1580 			link_ok = 0;
1581 		} else {
1582 			/* clear faults here if this was a false alarm. */
1583 			if (pi->link_fault == LF_MAYBE &&
1584 			    link_ok && lc->link_ok)
1585 				t3_clear_faults(adapter, port_id);
1586 
1587 			pi->link_fault = LF_NO;
1588 		}
1589 	}
1590 
1591 	if (link_ok == lc->link_ok && speed == lc->speed &&
1592 	    duplex == lc->duplex && fc == lc->fc)
1593 		return;                            /* nothing changed */
1594 
1595 	lc->link_ok = (unsigned char)link_ok;
1596 	lc->speed = speed < 0 ? SPEED_INVALID : speed;
1597 	lc->duplex = duplex < 0 ? DUPLEX_INVALID : duplex;
1598 	lc->fc = fc;
1599 
1600 	if (link_ok) {
1601 
1602 		/* down -> up, or up -> up with changed settings */
1603 
1604 		if (adapter->params.rev > 0 && uses_xaui(adapter)) {
1605 
1606 			if (adapter->params.rev >= T3_REV_C)
1607 				t3c_pcs_force_los(mac);
1608 			else
1609 				t3b_pcs_reset(mac);
1610 
1611 			t3_write_reg(adapter, A_XGM_XAUI_ACT_CTRL + mac->offset,
1612 				     F_TXACTENABLE | F_RXEN);
1613 		}
1614 
1615 		/* disable TX FIFO drain */
1616 		t3_set_reg_field(adapter, A_XGM_TXFIFO_CFG + mac->offset,
1617 				 F_ENDROPPKT, 0);
1618 
1619 		t3_mac_enable(mac, MAC_DIRECTION_TX | MAC_DIRECTION_RX);
1620 		t3_set_reg_field(adapter, A_XGM_STAT_CTRL + mac->offset,
1621 				 F_CLRSTATS, 1);
1622 		t3_clear_faults(adapter, port_id);
1623 
1624 	} else {
1625 
1626 		/* up -> down */
1627 
1628 		if (adapter->params.rev > 0 && uses_xaui(adapter)) {
1629 			t3_write_reg(adapter,
1630 				     A_XGM_XAUI_ACT_CTRL + mac->offset, 0);
1631 		}
1632 
1633 		t3_xgm_intr_disable(adapter, pi->port_id);
1634 		if (adapter->params.nports <= 2) {
1635 			t3_set_reg_field(adapter,
1636 					 A_XGM_INT_ENABLE + mac->offset,
1637 					 F_XGM_INT, 0);
1638 
1639 			t3_mac_disable(mac, MAC_DIRECTION_RX);
1640 
1641 			/*
1642 			 * Make sure Tx FIFO continues to drain, even as rxen is
1643 			 * left high to help detect and indicate remote faults.
1644 			 */
1645 			t3_set_reg_field(adapter,
1646 			    A_XGM_TXFIFO_CFG + mac->offset, 0, F_ENDROPPKT);
1647 			t3_write_reg(adapter, A_XGM_RX_CTRL + mac->offset, 0);
1648 			t3_write_reg(adapter,
1649 			    A_XGM_TX_CTRL + mac->offset, F_TXEN);
1650 			t3_write_reg(adapter,
1651 			    A_XGM_RX_CTRL + mac->offset, F_RXEN);
1652 		}
1653 	}
1654 
1655 	t3_os_link_changed(adapter, port_id, link_ok, speed, duplex, fc,
1656 	    mac->was_reset);
1657 	mac->was_reset = 0;
1658 }
1659 
1660 /**
1661  *	t3_link_start - apply link configuration to MAC/PHY
1662  *	@phy: the PHY to setup
1663  *	@mac: the MAC to setup
1664  *	@lc: the requested link configuration
1665  *
1666  *	Set up a port's MAC and PHY according to a desired link configuration.
1667  *	- If the PHY can auto-negotiate first decide what to advertise, then
1668  *	  enable/disable auto-negotiation as desired, and reset.
1669  *	- If the PHY does not auto-negotiate just reset it.
1670  *	- If auto-negotiation is off set the MAC to the proper speed/duplex/FC,
1671  *	  otherwise do it later based on the outcome of auto-negotiation.
1672  */
1673 int t3_link_start(struct cphy *phy, struct cmac *mac, struct link_config *lc)
1674 {
1675 	unsigned int fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
1676 
1677 	lc->link_ok = 0;
1678 	if (lc->supported & SUPPORTED_Autoneg) {
1679 		lc->advertising &= ~(ADVERTISED_Asym_Pause | ADVERTISED_Pause);
1680 		if (fc) {
1681 			lc->advertising |= ADVERTISED_Asym_Pause;
1682 			if (fc & PAUSE_RX)
1683 				lc->advertising |= ADVERTISED_Pause;
1684 		}
1685 
1686 		phy->ops->advertise(phy, lc->advertising);
1687 
1688 		if (lc->autoneg == AUTONEG_DISABLE) {
1689 			lc->speed = lc->requested_speed;
1690 			lc->duplex = lc->requested_duplex;
1691 			lc->fc = (unsigned char)fc;
1692 			t3_mac_set_speed_duplex_fc(mac, lc->speed, lc->duplex,
1693 						   fc);
1694 			/* Also disables autoneg */
1695 			phy->ops->set_speed_duplex(phy, lc->speed, lc->duplex);
1696 			/* PR 5666. Power phy up when doing an ifup */
1697 			if (!is_10G(phy->adapter))
1698 				phy->ops->power_down(phy, 0);
1699 		} else
1700 			phy->ops->autoneg_enable(phy);
1701 	} else {
1702 		t3_mac_set_speed_duplex_fc(mac, -1, -1, fc);
1703 		lc->fc = (unsigned char)fc;
1704 		phy->ops->reset(phy, 0);
1705 	}
1706 	return 0;
1707 }
1708 
1709 /**
1710  *	t3_set_vlan_accel - control HW VLAN extraction
1711  *	@adapter: the adapter
1712  *	@ports: bitmap of adapter ports to operate on
1713  *	@on: enable (1) or disable (0) HW VLAN extraction
1714  *
1715  *	Enables or disables HW extraction of VLAN tags for the given port.
1716  */
1717 void t3_set_vlan_accel(adapter_t *adapter, unsigned int ports, int on)
1718 {
1719 	t3_set_reg_field(adapter, A_TP_OUT_CONFIG,
1720 			 ports << S_VLANEXTRACTIONENABLE,
1721 			 on ? (ports << S_VLANEXTRACTIONENABLE) : 0);
1722 }
1723 
1724 struct intr_info {
1725 	unsigned int mask;       /* bits to check in interrupt status */
1726 	const char *msg;         /* message to print or NULL */
1727 	short stat_idx;          /* stat counter to increment or -1 */
1728 	unsigned short fatal;    /* whether the condition reported is fatal */
1729 };
1730 
1731 /**
1732  *	t3_handle_intr_status - table driven interrupt handler
1733  *	@adapter: the adapter that generated the interrupt
1734  *	@reg: the interrupt status register to process
1735  *	@mask: a mask to apply to the interrupt status
1736  *	@acts: table of interrupt actions
1737  *	@stats: statistics counters tracking interrupt occurrences
1738  *
1739  *	A table driven interrupt handler that applies a set of masks to an
1740  *	interrupt status word and performs the corresponding actions if the
1741  *	interrupts described by the mask have occured.  The actions include
1742  *	optionally printing a warning or alert message, and optionally
1743  *	incrementing a stat counter.  The table is terminated by an entry
1744  *	specifying mask 0.  Returns the number of fatal interrupt conditions.
1745  */
1746 static int t3_handle_intr_status(adapter_t *adapter, unsigned int reg,
1747 				 unsigned int mask,
1748 				 const struct intr_info *acts,
1749 				 unsigned long *stats)
1750 {
1751 	int fatal = 0;
1752 	unsigned int status = t3_read_reg(adapter, reg) & mask;
1753 
1754 	for ( ; acts->mask; ++acts) {
1755 		if (!(status & acts->mask)) continue;
1756 		if (acts->fatal) {
1757 			fatal++;
1758 			CH_ALERT(adapter, "%s (0x%x)\n",
1759 				 acts->msg, status & acts->mask);
1760 			status &= ~acts->mask;
1761 		} else if (acts->msg)
1762 			CH_WARN(adapter, "%s (0x%x)\n",
1763 				acts->msg, status & acts->mask);
1764 		if (acts->stat_idx >= 0)
1765 			stats[acts->stat_idx]++;
1766 	}
1767 	if (status)                           /* clear processed interrupts */
1768 		t3_write_reg(adapter, reg, status);
1769 	return fatal;
1770 }
1771 
1772 #define SGE_INTR_MASK (F_RSPQDISABLED | \
1773 		       F_UC_REQ_FRAMINGERROR | F_R_REQ_FRAMINGERROR | \
1774 		       F_CPPARITYERROR | F_OCPARITYERROR | F_RCPARITYERROR | \
1775 		       F_IRPARITYERROR | V_ITPARITYERROR(M_ITPARITYERROR) | \
1776 		       V_FLPARITYERROR(M_FLPARITYERROR) | F_LODRBPARITYERROR | \
1777 		       F_HIDRBPARITYERROR | F_LORCQPARITYERROR | \
1778 		       F_HIRCQPARITYERROR)
1779 #define MC5_INTR_MASK (F_PARITYERR | F_ACTRGNFULL | F_UNKNOWNCMD | \
1780 		       F_REQQPARERR | F_DISPQPARERR | F_DELACTEMPTY | \
1781 		       F_NFASRCHFAIL)
1782 #define MC7_INTR_MASK (F_AE | F_UE | F_CE | V_PE(M_PE))
1783 #define XGM_INTR_MASK (V_TXFIFO_PRTY_ERR(M_TXFIFO_PRTY_ERR) | \
1784 		       V_RXFIFO_PRTY_ERR(M_RXFIFO_PRTY_ERR) | \
1785 		       F_TXFIFO_UNDERRUN)
1786 #define PCIX_INTR_MASK (F_MSTDETPARERR | F_SIGTARABT | F_RCVTARABT | \
1787 			F_RCVMSTABT | F_SIGSYSERR | F_DETPARERR | \
1788 			F_SPLCMPDIS | F_UNXSPLCMP | F_RCVSPLCMPERR | \
1789 			F_DETCORECCERR | F_DETUNCECCERR | F_PIOPARERR | \
1790 			V_WFPARERR(M_WFPARERR) | V_RFPARERR(M_RFPARERR) | \
1791 			V_CFPARERR(M_CFPARERR) /* | V_MSIXPARERR(M_MSIXPARERR) */)
1792 #define PCIE_INTR_MASK (F_UNXSPLCPLERRR | F_UNXSPLCPLERRC | F_PCIE_PIOPARERR |\
1793 			F_PCIE_WFPARERR | F_PCIE_RFPARERR | F_PCIE_CFPARERR | \
1794 			/* V_PCIE_MSIXPARERR(M_PCIE_MSIXPARERR) | */ \
1795 			F_RETRYBUFPARERR | F_RETRYLUTPARERR | F_RXPARERR | \
1796 			F_TXPARERR | V_BISTERR(M_BISTERR))
1797 #define ULPRX_INTR_MASK (F_PARERRDATA | F_PARERRPCMD | F_ARBPF1PERR | \
1798 			 F_ARBPF0PERR | F_ARBFPERR | F_PCMDMUXPERR | \
1799 			 F_DATASELFRAMEERR1 | F_DATASELFRAMEERR0)
1800 #define ULPTX_INTR_MASK 0xfc
1801 #define CPLSW_INTR_MASK (F_CIM_OP_MAP_PERR | F_TP_FRAMING_ERROR | \
1802 			 F_SGE_FRAMING_ERROR | F_CIM_FRAMING_ERROR | \
1803 			 F_ZERO_SWITCH_ERROR)
1804 #define CIM_INTR_MASK (F_BLKWRPLINT | F_BLKRDPLINT | F_BLKWRCTLINT | \
1805 		       F_BLKRDCTLINT | F_BLKWRFLASHINT | F_BLKRDFLASHINT | \
1806 		       F_SGLWRFLASHINT | F_WRBLKFLASHINT | F_BLKWRBOOTINT | \
1807 	 	       F_FLASHRANGEINT | F_SDRAMRANGEINT | F_RSVDSPACEINT | \
1808 		       F_DRAMPARERR | F_ICACHEPARERR | F_DCACHEPARERR | \
1809 		       F_OBQSGEPARERR | F_OBQULPHIPARERR | F_OBQULPLOPARERR | \
1810 		       F_IBQSGELOPARERR | F_IBQSGEHIPARERR | F_IBQULPPARERR | \
1811 		       F_IBQTPPARERR | F_ITAGPARERR | F_DTAGPARERR)
1812 #define PMTX_INTR_MASK (F_ZERO_C_CMD_ERROR | ICSPI_FRM_ERR | OESPI_FRM_ERR | \
1813 			V_ICSPI_PAR_ERROR(M_ICSPI_PAR_ERROR) | \
1814 			V_OESPI_PAR_ERROR(M_OESPI_PAR_ERROR))
1815 #define PMRX_INTR_MASK (F_ZERO_E_CMD_ERROR | IESPI_FRM_ERR | OCSPI_FRM_ERR | \
1816 			V_IESPI_PAR_ERROR(M_IESPI_PAR_ERROR) | \
1817 			V_OCSPI_PAR_ERROR(M_OCSPI_PAR_ERROR))
1818 #define MPS_INTR_MASK (V_TX0TPPARERRENB(M_TX0TPPARERRENB) | \
1819 		       V_TX1TPPARERRENB(M_TX1TPPARERRENB) | \
1820 		       V_RXTPPARERRENB(M_RXTPPARERRENB) | \
1821 		       V_MCAPARERRENB(M_MCAPARERRENB))
1822 #define XGM_EXTRA_INTR_MASK (F_LINKFAULTCHANGE)
1823 #define PL_INTR_MASK (F_T3DBG | F_XGMAC0_0 | F_XGMAC0_1 | F_MC5A | F_PM1_TX | \
1824 		      F_PM1_RX | F_ULP2_TX | F_ULP2_RX | F_TP1 | F_CIM | \
1825 		      F_MC7_CM | F_MC7_PMTX | F_MC7_PMRX | F_SGE3 | F_PCIM0 | \
1826 		      F_MPS0 | F_CPL_SWITCH)
1827 /*
1828  * Interrupt handler for the PCIX1 module.
1829  */
1830 static void pci_intr_handler(adapter_t *adapter)
1831 {
1832 	static struct intr_info pcix1_intr_info[] = {
1833 		{ F_MSTDETPARERR, "PCI master detected parity error", -1, 1 },
1834 		{ F_SIGTARABT, "PCI signaled target abort", -1, 1 },
1835 		{ F_RCVTARABT, "PCI received target abort", -1, 1 },
1836 		{ F_RCVMSTABT, "PCI received master abort", -1, 1 },
1837 		{ F_SIGSYSERR, "PCI signaled system error", -1, 1 },
1838 		{ F_DETPARERR, "PCI detected parity error", -1, 1 },
1839 		{ F_SPLCMPDIS, "PCI split completion discarded", -1, 1 },
1840 		{ F_UNXSPLCMP, "PCI unexpected split completion error", -1, 1 },
1841 		{ F_RCVSPLCMPERR, "PCI received split completion error", -1,
1842 		  1 },
1843 		{ F_DETCORECCERR, "PCI correctable ECC error",
1844 		  STAT_PCI_CORR_ECC, 0 },
1845 		{ F_DETUNCECCERR, "PCI uncorrectable ECC error", -1, 1 },
1846 		{ F_PIOPARERR, "PCI PIO FIFO parity error", -1, 1 },
1847 		{ V_WFPARERR(M_WFPARERR), "PCI write FIFO parity error", -1,
1848 		  1 },
1849 		{ V_RFPARERR(M_RFPARERR), "PCI read FIFO parity error", -1,
1850 		  1 },
1851 		{ V_CFPARERR(M_CFPARERR), "PCI command FIFO parity error", -1,
1852 		  1 },
1853 		{ V_MSIXPARERR(M_MSIXPARERR), "PCI MSI-X table/PBA parity "
1854 		  "error", -1, 1 },
1855 		{ 0 }
1856 	};
1857 
1858 	if (t3_handle_intr_status(adapter, A_PCIX_INT_CAUSE, PCIX_INTR_MASK,
1859 				  pcix1_intr_info, adapter->irq_stats))
1860 		t3_fatal_err(adapter);
1861 }
1862 
1863 /*
1864  * Interrupt handler for the PCIE module.
1865  */
1866 static void pcie_intr_handler(adapter_t *adapter)
1867 {
1868 	static struct intr_info pcie_intr_info[] = {
1869 		{ F_PEXERR, "PCI PEX error", -1, 1 },
1870 		{ F_UNXSPLCPLERRR,
1871 		  "PCI unexpected split completion DMA read error", -1, 1 },
1872 		{ F_UNXSPLCPLERRC,
1873 		  "PCI unexpected split completion DMA command error", -1, 1 },
1874 		{ F_PCIE_PIOPARERR, "PCI PIO FIFO parity error", -1, 1 },
1875 		{ F_PCIE_WFPARERR, "PCI write FIFO parity error", -1, 1 },
1876 		{ F_PCIE_RFPARERR, "PCI read FIFO parity error", -1, 1 },
1877 		{ F_PCIE_CFPARERR, "PCI command FIFO parity error", -1, 1 },
1878 		{ V_PCIE_MSIXPARERR(M_PCIE_MSIXPARERR),
1879 		  "PCI MSI-X table/PBA parity error", -1, 1 },
1880 		{ F_RETRYBUFPARERR, "PCI retry buffer parity error", -1, 1 },
1881 		{ F_RETRYLUTPARERR, "PCI retry LUT parity error", -1, 1 },
1882 		{ F_RXPARERR, "PCI Rx parity error", -1, 1 },
1883 		{ F_TXPARERR, "PCI Tx parity error", -1, 1 },
1884 		{ V_BISTERR(M_BISTERR), "PCI BIST error", -1, 1 },
1885 		{ 0 }
1886 	};
1887 
1888 	if (t3_read_reg(adapter, A_PCIE_INT_CAUSE) & F_PEXERR)
1889 		CH_ALERT(adapter, "PEX error code 0x%x\n",
1890 			 t3_read_reg(adapter, A_PCIE_PEX_ERR));
1891 
1892 	if (t3_handle_intr_status(adapter, A_PCIE_INT_CAUSE, PCIE_INTR_MASK,
1893 				  pcie_intr_info, adapter->irq_stats))
1894 		t3_fatal_err(adapter);
1895 }
1896 
1897 /*
1898  * TP interrupt handler.
1899  */
1900 static void tp_intr_handler(adapter_t *adapter)
1901 {
1902 	static struct intr_info tp_intr_info[] = {
1903 		{ 0xffffff,  "TP parity error", -1, 1 },
1904 		{ 0x1000000, "TP out of Rx pages", -1, 1 },
1905 		{ 0x2000000, "TP out of Tx pages", -1, 1 },
1906 		{ 0 }
1907 	};
1908 	static struct intr_info tp_intr_info_t3c[] = {
1909 		{ 0x1fffffff,  "TP parity error", -1, 1 },
1910 		{ F_FLMRXFLSTEMPTY, "TP out of Rx pages", -1, 1 },
1911 		{ F_FLMTXFLSTEMPTY, "TP out of Tx pages", -1, 1 },
1912 		{ 0 }
1913 	};
1914 
1915 	if (t3_handle_intr_status(adapter, A_TP_INT_CAUSE, 0xffffffff,
1916 				  adapter->params.rev < T3_REV_C ?
1917 					tp_intr_info : tp_intr_info_t3c, NULL))
1918 		t3_fatal_err(adapter);
1919 }
1920 
1921 /*
1922  * CIM interrupt handler.
1923  */
1924 static void cim_intr_handler(adapter_t *adapter)
1925 {
1926 	static struct intr_info cim_intr_info[] = {
1927 		{ F_RSVDSPACEINT, "CIM reserved space write", -1, 1 },
1928 		{ F_SDRAMRANGEINT, "CIM SDRAM address out of range", -1, 1 },
1929 		{ F_FLASHRANGEINT, "CIM flash address out of range", -1, 1 },
1930 		{ F_BLKWRBOOTINT, "CIM block write to boot space", -1, 1 },
1931 		{ F_WRBLKFLASHINT, "CIM write to cached flash space", -1, 1 },
1932 		{ F_SGLWRFLASHINT, "CIM single write to flash space", -1, 1 },
1933 		{ F_BLKRDFLASHINT, "CIM block read from flash space", -1, 1 },
1934 		{ F_BLKWRFLASHINT, "CIM block write to flash space", -1, 1 },
1935 		{ F_BLKRDCTLINT, "CIM block read from CTL space", -1, 1 },
1936 		{ F_BLKWRCTLINT, "CIM block write to CTL space", -1, 1 },
1937 		{ F_BLKRDPLINT, "CIM block read from PL space", -1, 1 },
1938 		{ F_BLKWRPLINT, "CIM block write to PL space", -1, 1 },
1939 		{ F_DRAMPARERR, "CIM DRAM parity error", -1, 1 },
1940 		{ F_ICACHEPARERR, "CIM icache parity error", -1, 1 },
1941 		{ F_DCACHEPARERR, "CIM dcache parity error", -1, 1 },
1942 		{ F_OBQSGEPARERR, "CIM OBQ SGE parity error", -1, 1 },
1943 		{ F_OBQULPHIPARERR, "CIM OBQ ULPHI parity error", -1, 1 },
1944 		{ F_OBQULPLOPARERR, "CIM OBQ ULPLO parity error", -1, 1 },
1945 		{ F_IBQSGELOPARERR, "CIM IBQ SGELO parity error", -1, 1 },
1946 		{ F_IBQSGEHIPARERR, "CIM IBQ SGEHI parity error", -1, 1 },
1947 		{ F_IBQULPPARERR, "CIM IBQ ULP parity error", -1, 1 },
1948 		{ F_IBQTPPARERR, "CIM IBQ TP parity error", -1, 1 },
1949 		{ F_ITAGPARERR, "CIM itag parity error", -1, 1 },
1950 		{ F_DTAGPARERR, "CIM dtag parity error", -1, 1 },
1951 		{ 0 }
1952         };
1953 
1954 	if (t3_handle_intr_status(adapter, A_CIM_HOST_INT_CAUSE, CIM_INTR_MASK,
1955 				  cim_intr_info, NULL))
1956 		t3_fatal_err(adapter);
1957 }
1958 
1959 /*
1960  * ULP RX interrupt handler.
1961  */
1962 static void ulprx_intr_handler(adapter_t *adapter)
1963 {
1964 	static struct intr_info ulprx_intr_info[] = {
1965 		{ F_PARERRDATA, "ULP RX data parity error", -1, 1 },
1966 		{ F_PARERRPCMD, "ULP RX command parity error", -1, 1 },
1967 		{ F_ARBPF1PERR, "ULP RX ArbPF1 parity error", -1, 1 },
1968 		{ F_ARBPF0PERR, "ULP RX ArbPF0 parity error", -1, 1 },
1969 		{ F_ARBFPERR, "ULP RX ArbF parity error", -1, 1 },
1970 		{ F_PCMDMUXPERR, "ULP RX PCMDMUX parity error", -1, 1 },
1971 		{ F_DATASELFRAMEERR1, "ULP RX frame error", -1, 1 },
1972 		{ F_DATASELFRAMEERR0, "ULP RX frame error", -1, 1 },
1973 		{ 0 }
1974         };
1975 
1976 	if (t3_handle_intr_status(adapter, A_ULPRX_INT_CAUSE, 0xffffffff,
1977 				  ulprx_intr_info, NULL))
1978 		t3_fatal_err(adapter);
1979 }
1980 
1981 /*
1982  * ULP TX interrupt handler.
1983  */
1984 static void ulptx_intr_handler(adapter_t *adapter)
1985 {
1986 	static struct intr_info ulptx_intr_info[] = {
1987 		{ F_PBL_BOUND_ERR_CH0, "ULP TX channel 0 PBL out of bounds",
1988 		  STAT_ULP_CH0_PBL_OOB, 0 },
1989 		{ F_PBL_BOUND_ERR_CH1, "ULP TX channel 1 PBL out of bounds",
1990 		  STAT_ULP_CH1_PBL_OOB, 0 },
1991 		{ 0xfc, "ULP TX parity error", -1, 1 },
1992 		{ 0 }
1993         };
1994 
1995 	if (t3_handle_intr_status(adapter, A_ULPTX_INT_CAUSE, 0xffffffff,
1996 				  ulptx_intr_info, adapter->irq_stats))
1997 		t3_fatal_err(adapter);
1998 }
1999 
2000 #define ICSPI_FRM_ERR (F_ICSPI0_FIFO2X_RX_FRAMING_ERROR | \
2001 	F_ICSPI1_FIFO2X_RX_FRAMING_ERROR | F_ICSPI0_RX_FRAMING_ERROR | \
2002 	F_ICSPI1_RX_FRAMING_ERROR | F_ICSPI0_TX_FRAMING_ERROR | \
2003 	F_ICSPI1_TX_FRAMING_ERROR)
2004 #define OESPI_FRM_ERR (F_OESPI0_RX_FRAMING_ERROR | \
2005 	F_OESPI1_RX_FRAMING_ERROR | F_OESPI0_TX_FRAMING_ERROR | \
2006 	F_OESPI1_TX_FRAMING_ERROR | F_OESPI0_OFIFO2X_TX_FRAMING_ERROR | \
2007 	F_OESPI1_OFIFO2X_TX_FRAMING_ERROR)
2008 
2009 /*
2010  * PM TX interrupt handler.
2011  */
2012 static void pmtx_intr_handler(adapter_t *adapter)
2013 {
2014 	static struct intr_info pmtx_intr_info[] = {
2015 		{ F_ZERO_C_CMD_ERROR, "PMTX 0-length pcmd", -1, 1 },
2016 		{ ICSPI_FRM_ERR, "PMTX ispi framing error", -1, 1 },
2017 		{ OESPI_FRM_ERR, "PMTX ospi framing error", -1, 1 },
2018 		{ V_ICSPI_PAR_ERROR(M_ICSPI_PAR_ERROR),
2019 		  "PMTX ispi parity error", -1, 1 },
2020 		{ V_OESPI_PAR_ERROR(M_OESPI_PAR_ERROR),
2021 		  "PMTX ospi parity error", -1, 1 },
2022 		{ 0 }
2023         };
2024 
2025 	if (t3_handle_intr_status(adapter, A_PM1_TX_INT_CAUSE, 0xffffffff,
2026 				  pmtx_intr_info, NULL))
2027 		t3_fatal_err(adapter);
2028 }
2029 
2030 #define IESPI_FRM_ERR (F_IESPI0_FIFO2X_RX_FRAMING_ERROR | \
2031 	F_IESPI1_FIFO2X_RX_FRAMING_ERROR | F_IESPI0_RX_FRAMING_ERROR | \
2032 	F_IESPI1_RX_FRAMING_ERROR | F_IESPI0_TX_FRAMING_ERROR | \
2033 	F_IESPI1_TX_FRAMING_ERROR)
2034 #define OCSPI_FRM_ERR (F_OCSPI0_RX_FRAMING_ERROR | \
2035 	F_OCSPI1_RX_FRAMING_ERROR | F_OCSPI0_TX_FRAMING_ERROR | \
2036 	F_OCSPI1_TX_FRAMING_ERROR | F_OCSPI0_OFIFO2X_TX_FRAMING_ERROR | \
2037 	F_OCSPI1_OFIFO2X_TX_FRAMING_ERROR)
2038 
2039 /*
2040  * PM RX interrupt handler.
2041  */
2042 static void pmrx_intr_handler(adapter_t *adapter)
2043 {
2044 	static struct intr_info pmrx_intr_info[] = {
2045 		{ F_ZERO_E_CMD_ERROR, "PMRX 0-length pcmd", -1, 1 },
2046 		{ IESPI_FRM_ERR, "PMRX ispi framing error", -1, 1 },
2047 		{ OCSPI_FRM_ERR, "PMRX ospi framing error", -1, 1 },
2048 		{ V_IESPI_PAR_ERROR(M_IESPI_PAR_ERROR),
2049 		  "PMRX ispi parity error", -1, 1 },
2050 		{ V_OCSPI_PAR_ERROR(M_OCSPI_PAR_ERROR),
2051 		  "PMRX ospi parity error", -1, 1 },
2052 		{ 0 }
2053         };
2054 
2055 	if (t3_handle_intr_status(adapter, A_PM1_RX_INT_CAUSE, 0xffffffff,
2056 				  pmrx_intr_info, NULL))
2057 		t3_fatal_err(adapter);
2058 }
2059 
2060 /*
2061  * CPL switch interrupt handler.
2062  */
2063 static void cplsw_intr_handler(adapter_t *adapter)
2064 {
2065 	static struct intr_info cplsw_intr_info[] = {
2066 		{ F_CIM_OP_MAP_PERR, "CPL switch CIM parity error", -1, 1 },
2067 		{ F_CIM_OVFL_ERROR, "CPL switch CIM overflow", -1, 1 },
2068 		{ F_TP_FRAMING_ERROR, "CPL switch TP framing error", -1, 1 },
2069 		{ F_SGE_FRAMING_ERROR, "CPL switch SGE framing error", -1, 1 },
2070 		{ F_CIM_FRAMING_ERROR, "CPL switch CIM framing error", -1, 1 },
2071 		{ F_ZERO_SWITCH_ERROR, "CPL switch no-switch error", -1, 1 },
2072 		{ 0 }
2073         };
2074 
2075 	if (t3_handle_intr_status(adapter, A_CPL_INTR_CAUSE, 0xffffffff,
2076 				  cplsw_intr_info, NULL))
2077 		t3_fatal_err(adapter);
2078 }
2079 
2080 /*
2081  * MPS interrupt handler.
2082  */
2083 static void mps_intr_handler(adapter_t *adapter)
2084 {
2085 	static struct intr_info mps_intr_info[] = {
2086 		{ 0x1ff, "MPS parity error", -1, 1 },
2087 		{ 0 }
2088 	};
2089 
2090 	if (t3_handle_intr_status(adapter, A_MPS_INT_CAUSE, 0xffffffff,
2091 				  mps_intr_info, NULL))
2092 		t3_fatal_err(adapter);
2093 }
2094 
2095 #define MC7_INTR_FATAL (F_UE | V_PE(M_PE) | F_AE)
2096 
2097 /*
2098  * MC7 interrupt handler.
2099  */
2100 static void mc7_intr_handler(struct mc7 *mc7)
2101 {
2102 	adapter_t *adapter = mc7->adapter;
2103 	u32 cause = t3_read_reg(adapter, mc7->offset + A_MC7_INT_CAUSE);
2104 
2105 	if (cause & F_CE) {
2106 		mc7->stats.corr_err++;
2107 		CH_WARN(adapter, "%s MC7 correctable error at addr 0x%x, "
2108 			"data 0x%x 0x%x 0x%x\n", mc7->name,
2109 			t3_read_reg(adapter, mc7->offset + A_MC7_CE_ADDR),
2110 			t3_read_reg(adapter, mc7->offset + A_MC7_CE_DATA0),
2111 			t3_read_reg(adapter, mc7->offset + A_MC7_CE_DATA1),
2112 			t3_read_reg(adapter, mc7->offset + A_MC7_CE_DATA2));
2113 	}
2114 
2115 	if (cause & F_UE) {
2116 		mc7->stats.uncorr_err++;
2117 		CH_ALERT(adapter, "%s MC7 uncorrectable error at addr 0x%x, "
2118 			 "data 0x%x 0x%x 0x%x\n", mc7->name,
2119 			 t3_read_reg(adapter, mc7->offset + A_MC7_UE_ADDR),
2120 			 t3_read_reg(adapter, mc7->offset + A_MC7_UE_DATA0),
2121 			 t3_read_reg(adapter, mc7->offset + A_MC7_UE_DATA1),
2122 			 t3_read_reg(adapter, mc7->offset + A_MC7_UE_DATA2));
2123 	}
2124 
2125 	if (G_PE(cause)) {
2126 		mc7->stats.parity_err++;
2127 		CH_ALERT(adapter, "%s MC7 parity error 0x%x\n",
2128 			 mc7->name, G_PE(cause));
2129 	}
2130 
2131 	if (cause & F_AE) {
2132 		u32 addr = 0;
2133 
2134 		if (adapter->params.rev > 0)
2135 			addr = t3_read_reg(adapter,
2136 					   mc7->offset + A_MC7_ERR_ADDR);
2137 		mc7->stats.addr_err++;
2138 		CH_ALERT(adapter, "%s MC7 address error: 0x%x\n",
2139 			 mc7->name, addr);
2140 	}
2141 
2142 	if (cause & MC7_INTR_FATAL)
2143 		t3_fatal_err(adapter);
2144 
2145 	t3_write_reg(adapter, mc7->offset + A_MC7_INT_CAUSE, cause);
2146 }
2147 
2148 #define XGM_INTR_FATAL (V_TXFIFO_PRTY_ERR(M_TXFIFO_PRTY_ERR) | \
2149 			V_RXFIFO_PRTY_ERR(M_RXFIFO_PRTY_ERR))
2150 /*
2151  * XGMAC interrupt handler.
2152  */
2153 static int mac_intr_handler(adapter_t *adap, unsigned int idx)
2154 {
2155 	u32 cause;
2156 	struct port_info *pi;
2157 	struct cmac *mac;
2158 
2159 	idx = idx == 0 ? 0 : adapter_info(adap)->nports0; /* MAC idx -> port */
2160 	pi = adap2pinfo(adap, idx);
2161 	mac = &pi->mac;
2162 
2163 	/*
2164 	 * We mask out interrupt causes for which we're not taking interrupts.
2165 	 * This allows us to use polling logic to monitor some of the other
2166 	 * conditions when taking interrupts would impose too much load on the
2167 	 * system.
2168 	 */
2169 	cause = (t3_read_reg(adap, A_XGM_INT_CAUSE + mac->offset)
2170 		 & ~(F_RXFIFO_OVERFLOW));
2171 
2172 	if (cause & V_TXFIFO_PRTY_ERR(M_TXFIFO_PRTY_ERR)) {
2173 		mac->stats.tx_fifo_parity_err++;
2174 		CH_ALERT(adap, "port%d: MAC TX FIFO parity error\n", idx);
2175 	}
2176 	if (cause & V_RXFIFO_PRTY_ERR(M_RXFIFO_PRTY_ERR)) {
2177 		mac->stats.rx_fifo_parity_err++;
2178 		CH_ALERT(adap, "port%d: MAC RX FIFO parity error\n", idx);
2179 	}
2180 	if (cause & F_TXFIFO_UNDERRUN)
2181 		mac->stats.tx_fifo_urun++;
2182 	if (cause & F_RXFIFO_OVERFLOW)
2183 		mac->stats.rx_fifo_ovfl++;
2184 	if (cause & V_SERDES_LOS(M_SERDES_LOS))
2185 		mac->stats.serdes_signal_loss++;
2186 	if (cause & F_XAUIPCSCTCERR)
2187 		mac->stats.xaui_pcs_ctc_err++;
2188 	if (cause & F_XAUIPCSALIGNCHANGE)
2189 		mac->stats.xaui_pcs_align_change++;
2190 	if (cause & F_XGM_INT &
2191 	    t3_read_reg(adap, A_XGM_INT_ENABLE + mac->offset)) {
2192 		t3_set_reg_field(adap, A_XGM_INT_ENABLE + mac->offset,
2193 		    F_XGM_INT, 0);
2194 
2195 		/* link fault suspected */
2196 		pi->link_fault = LF_MAYBE;
2197 		t3_os_link_intr(pi);
2198 	}
2199 
2200 	if (cause & XGM_INTR_FATAL)
2201 		t3_fatal_err(adap);
2202 
2203 	t3_write_reg(adap, A_XGM_INT_CAUSE + mac->offset, cause);
2204 	return cause != 0;
2205 }
2206 
2207 /*
2208  * Interrupt handler for PHY events.
2209  */
2210 static int phy_intr_handler(adapter_t *adapter)
2211 {
2212 	u32 i, cause = t3_read_reg(adapter, A_T3DBG_INT_CAUSE);
2213 
2214 	for_each_port(adapter, i) {
2215 		struct port_info *p = adap2pinfo(adapter, i);
2216 
2217 		if (!(p->phy.caps & SUPPORTED_IRQ))
2218 			continue;
2219 
2220 		if (cause & (1 << adapter_info(adapter)->gpio_intr[i])) {
2221 			int phy_cause = p->phy.ops->intr_handler(&p->phy);
2222 
2223 			if (phy_cause & cphy_cause_link_change)
2224 				t3_os_link_intr(p);
2225 			if (phy_cause & cphy_cause_fifo_error)
2226 				p->phy.fifo_errors++;
2227 			if (phy_cause & cphy_cause_module_change)
2228 				t3_os_phymod_changed(adapter, i);
2229 			if (phy_cause & cphy_cause_alarm)
2230 				CH_WARN(adapter, "Operation affected due to "
2231 				    "adverse environment.  Check the spec "
2232 				    "sheet for corrective action.");
2233 		}
2234 	}
2235 
2236 	t3_write_reg(adapter, A_T3DBG_INT_CAUSE, cause);
2237 	return 0;
2238 }
2239 
2240 /**
2241  *	t3_slow_intr_handler - control path interrupt handler
2242  *	@adapter: the adapter
2243  *
2244  *	T3 interrupt handler for non-data interrupt events, e.g., errors.
2245  *	The designation 'slow' is because it involves register reads, while
2246  *	data interrupts typically don't involve any MMIOs.
2247  */
2248 int t3_slow_intr_handler(adapter_t *adapter)
2249 {
2250 	u32 cause = t3_read_reg(adapter, A_PL_INT_CAUSE0);
2251 
2252 	cause &= adapter->slow_intr_mask;
2253 	if (!cause)
2254 		return 0;
2255 	if (cause & F_PCIM0) {
2256 		if (is_pcie(adapter))
2257 			pcie_intr_handler(adapter);
2258 		else
2259 			pci_intr_handler(adapter);
2260 	}
2261 	if (cause & F_SGE3)
2262 		t3_sge_err_intr_handler(adapter);
2263 	if (cause & F_MC7_PMRX)
2264 		mc7_intr_handler(&adapter->pmrx);
2265 	if (cause & F_MC7_PMTX)
2266 		mc7_intr_handler(&adapter->pmtx);
2267 	if (cause & F_MC7_CM)
2268 		mc7_intr_handler(&adapter->cm);
2269 	if (cause & F_CIM)
2270 		cim_intr_handler(adapter);
2271 	if (cause & F_TP1)
2272 		tp_intr_handler(adapter);
2273 	if (cause & F_ULP2_RX)
2274 		ulprx_intr_handler(adapter);
2275 	if (cause & F_ULP2_TX)
2276 		ulptx_intr_handler(adapter);
2277 	if (cause & F_PM1_RX)
2278 		pmrx_intr_handler(adapter);
2279 	if (cause & F_PM1_TX)
2280 		pmtx_intr_handler(adapter);
2281 	if (cause & F_CPL_SWITCH)
2282 		cplsw_intr_handler(adapter);
2283 	if (cause & F_MPS0)
2284 		mps_intr_handler(adapter);
2285 	if (cause & F_MC5A)
2286 		t3_mc5_intr_handler(&adapter->mc5);
2287 	if (cause & F_XGMAC0_0)
2288 		mac_intr_handler(adapter, 0);
2289 	if (cause & F_XGMAC0_1)
2290 		mac_intr_handler(adapter, 1);
2291 	if (cause & F_T3DBG)
2292 		phy_intr_handler(adapter);
2293 
2294 	/* Clear the interrupts just processed. */
2295 	t3_write_reg(adapter, A_PL_INT_CAUSE0, cause);
2296 	(void) t3_read_reg(adapter, A_PL_INT_CAUSE0); /* flush */
2297 	return 1;
2298 }
2299 
2300 static unsigned int calc_gpio_intr(adapter_t *adap)
2301 {
2302 	unsigned int i, gpi_intr = 0;
2303 
2304 	for_each_port(adap, i)
2305 		if ((adap2pinfo(adap, i)->phy.caps & SUPPORTED_IRQ) &&
2306 		    adapter_info(adap)->gpio_intr[i])
2307 			gpi_intr |= 1 << adapter_info(adap)->gpio_intr[i];
2308 	return gpi_intr;
2309 }
2310 
2311 /**
2312  *	t3_intr_enable - enable interrupts
2313  *	@adapter: the adapter whose interrupts should be enabled
2314  *
2315  *	Enable interrupts by setting the interrupt enable registers of the
2316  *	various HW modules and then enabling the top-level interrupt
2317  *	concentrator.
2318  */
2319 void t3_intr_enable(adapter_t *adapter)
2320 {
2321 	static struct addr_val_pair intr_en_avp[] = {
2322 		{ A_MC7_INT_ENABLE, MC7_INTR_MASK },
2323 		{ A_MC7_INT_ENABLE - MC7_PMRX_BASE_ADDR + MC7_PMTX_BASE_ADDR,
2324 			MC7_INTR_MASK },
2325 		{ A_MC7_INT_ENABLE - MC7_PMRX_BASE_ADDR + MC7_CM_BASE_ADDR,
2326 			MC7_INTR_MASK },
2327 		{ A_MC5_DB_INT_ENABLE, MC5_INTR_MASK },
2328 		{ A_ULPRX_INT_ENABLE, ULPRX_INTR_MASK },
2329 		{ A_PM1_TX_INT_ENABLE, PMTX_INTR_MASK },
2330 		{ A_PM1_RX_INT_ENABLE, PMRX_INTR_MASK },
2331 		{ A_CIM_HOST_INT_ENABLE, CIM_INTR_MASK },
2332 		{ A_MPS_INT_ENABLE, MPS_INTR_MASK },
2333 	};
2334 
2335 	adapter->slow_intr_mask = PL_INTR_MASK;
2336 
2337 	t3_write_regs(adapter, intr_en_avp, ARRAY_SIZE(intr_en_avp), 0);
2338 	t3_write_reg(adapter, A_TP_INT_ENABLE,
2339 		     adapter->params.rev >= T3_REV_C ? 0x2bfffff : 0x3bfffff);
2340 	t3_write_reg(adapter, A_SG_INT_ENABLE, SGE_INTR_MASK);
2341 
2342 	if (adapter->params.rev > 0) {
2343 		t3_write_reg(adapter, A_CPL_INTR_ENABLE,
2344 			     CPLSW_INTR_MASK | F_CIM_OVFL_ERROR);
2345 		t3_write_reg(adapter, A_ULPTX_INT_ENABLE,
2346 			     ULPTX_INTR_MASK | F_PBL_BOUND_ERR_CH0 |
2347 			     F_PBL_BOUND_ERR_CH1);
2348 	} else {
2349 		t3_write_reg(adapter, A_CPL_INTR_ENABLE, CPLSW_INTR_MASK);
2350 		t3_write_reg(adapter, A_ULPTX_INT_ENABLE, ULPTX_INTR_MASK);
2351 	}
2352 
2353 	t3_write_reg(adapter, A_T3DBG_INT_ENABLE, calc_gpio_intr(adapter));
2354 
2355 	if (is_pcie(adapter))
2356 		t3_write_reg(adapter, A_PCIE_INT_ENABLE, PCIE_INTR_MASK);
2357 	else
2358 		t3_write_reg(adapter, A_PCIX_INT_ENABLE, PCIX_INTR_MASK);
2359 	t3_write_reg(adapter, A_PL_INT_ENABLE0, adapter->slow_intr_mask);
2360 	(void) t3_read_reg(adapter, A_PL_INT_ENABLE0);          /* flush */
2361 }
2362 
2363 /**
2364  *	t3_intr_disable - disable a card's interrupts
2365  *	@adapter: the adapter whose interrupts should be disabled
2366  *
2367  *	Disable interrupts.  We only disable the top-level interrupt
2368  *	concentrator and the SGE data interrupts.
2369  */
2370 void t3_intr_disable(adapter_t *adapter)
2371 {
2372 	t3_write_reg(adapter, A_PL_INT_ENABLE0, 0);
2373 	(void) t3_read_reg(adapter, A_PL_INT_ENABLE0);  /* flush */
2374 	adapter->slow_intr_mask = 0;
2375 }
2376 
2377 /**
2378  *	t3_intr_clear - clear all interrupts
2379  *	@adapter: the adapter whose interrupts should be cleared
2380  *
2381  *	Clears all interrupts.
2382  */
2383 void t3_intr_clear(adapter_t *adapter)
2384 {
2385 	static const unsigned int cause_reg_addr[] = {
2386 		A_SG_INT_CAUSE,
2387 		A_SG_RSPQ_FL_STATUS,
2388 		A_PCIX_INT_CAUSE,
2389 		A_MC7_INT_CAUSE,
2390 		A_MC7_INT_CAUSE - MC7_PMRX_BASE_ADDR + MC7_PMTX_BASE_ADDR,
2391 		A_MC7_INT_CAUSE - MC7_PMRX_BASE_ADDR + MC7_CM_BASE_ADDR,
2392 		A_CIM_HOST_INT_CAUSE,
2393 		A_TP_INT_CAUSE,
2394 		A_MC5_DB_INT_CAUSE,
2395 		A_ULPRX_INT_CAUSE,
2396 		A_ULPTX_INT_CAUSE,
2397 		A_CPL_INTR_CAUSE,
2398 		A_PM1_TX_INT_CAUSE,
2399 		A_PM1_RX_INT_CAUSE,
2400 		A_MPS_INT_CAUSE,
2401 		A_T3DBG_INT_CAUSE,
2402 	};
2403 	unsigned int i;
2404 
2405 	/* Clear PHY and MAC interrupts for each port. */
2406 	for_each_port(adapter, i)
2407 		t3_port_intr_clear(adapter, i);
2408 
2409 	for (i = 0; i < ARRAY_SIZE(cause_reg_addr); ++i)
2410 		t3_write_reg(adapter, cause_reg_addr[i], 0xffffffff);
2411 
2412 	if (is_pcie(adapter))
2413 		t3_write_reg(adapter, A_PCIE_PEX_ERR, 0xffffffff);
2414 	t3_write_reg(adapter, A_PL_INT_CAUSE0, 0xffffffff);
2415 	(void) t3_read_reg(adapter, A_PL_INT_CAUSE0);          /* flush */
2416 }
2417 
2418 void t3_xgm_intr_enable(adapter_t *adapter, int idx)
2419 {
2420 	struct port_info *pi = adap2pinfo(adapter, idx);
2421 
2422 	t3_write_reg(adapter, A_XGM_XGM_INT_ENABLE + pi->mac.offset,
2423 		     XGM_EXTRA_INTR_MASK);
2424 }
2425 
2426 void t3_xgm_intr_disable(adapter_t *adapter, int idx)
2427 {
2428 	struct port_info *pi = adap2pinfo(adapter, idx);
2429 
2430 	t3_write_reg(adapter, A_XGM_XGM_INT_DISABLE + pi->mac.offset,
2431 		     0x7ff);
2432 }
2433 
2434 /**
2435  *	t3_port_intr_enable - enable port-specific interrupts
2436  *	@adapter: associated adapter
2437  *	@idx: index of port whose interrupts should be enabled
2438  *
2439  *	Enable port-specific (i.e., MAC and PHY) interrupts for the given
2440  *	adapter port.
2441  */
2442 void t3_port_intr_enable(adapter_t *adapter, int idx)
2443 {
2444 	struct port_info *pi = adap2pinfo(adapter, idx);
2445 
2446 	t3_write_reg(adapter, A_XGM_INT_ENABLE + pi->mac.offset, XGM_INTR_MASK);
2447 	pi->phy.ops->intr_enable(&pi->phy);
2448 }
2449 
2450 /**
2451  *	t3_port_intr_disable - disable port-specific interrupts
2452  *	@adapter: associated adapter
2453  *	@idx: index of port whose interrupts should be disabled
2454  *
2455  *	Disable port-specific (i.e., MAC and PHY) interrupts for the given
2456  *	adapter port.
2457  */
2458 void t3_port_intr_disable(adapter_t *adapter, int idx)
2459 {
2460 	struct port_info *pi = adap2pinfo(adapter, idx);
2461 
2462 	t3_write_reg(adapter, A_XGM_INT_ENABLE + pi->mac.offset, 0);
2463 	pi->phy.ops->intr_disable(&pi->phy);
2464 }
2465 
2466 /**
2467  *	t3_port_intr_clear - clear port-specific interrupts
2468  *	@adapter: associated adapter
2469  *	@idx: index of port whose interrupts to clear
2470  *
2471  *	Clear port-specific (i.e., MAC and PHY) interrupts for the given
2472  *	adapter port.
2473  */
2474 void t3_port_intr_clear(adapter_t *adapter, int idx)
2475 {
2476 	struct port_info *pi = adap2pinfo(adapter, idx);
2477 
2478 	t3_write_reg(adapter, A_XGM_INT_CAUSE + pi->mac.offset, 0xffffffff);
2479 	pi->phy.ops->intr_clear(&pi->phy);
2480 }
2481 
2482 #define SG_CONTEXT_CMD_ATTEMPTS 100
2483 
2484 /**
2485  * 	t3_sge_write_context - write an SGE context
2486  * 	@adapter: the adapter
2487  * 	@id: the context id
2488  * 	@type: the context type
2489  *
2490  * 	Program an SGE context with the values already loaded in the
2491  * 	CONTEXT_DATA? registers.
2492  */
2493 static int t3_sge_write_context(adapter_t *adapter, unsigned int id,
2494 				unsigned int type)
2495 {
2496 	if (type == F_RESPONSEQ) {
2497 		/*
2498 		 * Can't write the Response Queue Context bits for
2499 		 * Interrupt Armed or the Reserve bits after the chip
2500 		 * has been initialized out of reset.  Writing to these
2501 		 * bits can confuse the hardware.
2502 		 */
2503 		t3_write_reg(adapter, A_SG_CONTEXT_MASK0, 0xffffffff);
2504 		t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0xffffffff);
2505 		t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0x17ffffff);
2506 		t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0xffffffff);
2507 	} else {
2508 		t3_write_reg(adapter, A_SG_CONTEXT_MASK0, 0xffffffff);
2509 		t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0xffffffff);
2510 		t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0xffffffff);
2511 		t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0xffffffff);
2512 	}
2513 	t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2514 		     V_CONTEXT_CMD_OPCODE(1) | type | V_CONTEXT(id));
2515 	return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
2516 			       0, SG_CONTEXT_CMD_ATTEMPTS, 1);
2517 }
2518 
2519 /**
2520  *	clear_sge_ctxt - completely clear an SGE context
2521  *	@adapter: the adapter
2522  *	@id: the context id
2523  *	@type: the context type
2524  *
2525  *	Completely clear an SGE context.  Used predominantly at post-reset
2526  *	initialization.  Note in particular that we don't skip writing to any
2527  *	"sensitive bits" in the contexts the way that t3_sge_write_context()
2528  *	does ...
2529  */
2530 static int clear_sge_ctxt(adapter_t *adap, unsigned int id, unsigned int type)
2531 {
2532 	t3_write_reg(adap, A_SG_CONTEXT_DATA0, 0);
2533 	t3_write_reg(adap, A_SG_CONTEXT_DATA1, 0);
2534 	t3_write_reg(adap, A_SG_CONTEXT_DATA2, 0);
2535 	t3_write_reg(adap, A_SG_CONTEXT_DATA3, 0);
2536 	t3_write_reg(adap, A_SG_CONTEXT_MASK0, 0xffffffff);
2537 	t3_write_reg(adap, A_SG_CONTEXT_MASK1, 0xffffffff);
2538 	t3_write_reg(adap, A_SG_CONTEXT_MASK2, 0xffffffff);
2539 	t3_write_reg(adap, A_SG_CONTEXT_MASK3, 0xffffffff);
2540 	t3_write_reg(adap, A_SG_CONTEXT_CMD,
2541 		     V_CONTEXT_CMD_OPCODE(1) | type | V_CONTEXT(id));
2542 	return t3_wait_op_done(adap, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
2543 			       0, SG_CONTEXT_CMD_ATTEMPTS, 1);
2544 }
2545 
2546 /**
2547  *	t3_sge_init_ecntxt - initialize an SGE egress context
2548  *	@adapter: the adapter to configure
2549  *	@id: the context id
2550  *	@gts_enable: whether to enable GTS for the context
2551  *	@type: the egress context type
2552  *	@respq: associated response queue
2553  *	@base_addr: base address of queue
2554  *	@size: number of queue entries
2555  *	@token: uP token
2556  *	@gen: initial generation value for the context
2557  *	@cidx: consumer pointer
2558  *
2559  *	Initialize an SGE egress context and make it ready for use.  If the
2560  *	platform allows concurrent context operations, the caller is
2561  *	responsible for appropriate locking.
2562  */
2563 int t3_sge_init_ecntxt(adapter_t *adapter, unsigned int id, int gts_enable,
2564 		       enum sge_context_type type, int respq, u64 base_addr,
2565 		       unsigned int size, unsigned int token, int gen,
2566 		       unsigned int cidx)
2567 {
2568 	unsigned int credits = type == SGE_CNTXT_OFLD ? 0 : FW_WR_NUM;
2569 
2570 	if (base_addr & 0xfff)     /* must be 4K aligned */
2571 		return -EINVAL;
2572 	if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2573 		return -EBUSY;
2574 
2575 	base_addr >>= 12;
2576 	t3_write_reg(adapter, A_SG_CONTEXT_DATA0, V_EC_INDEX(cidx) |
2577 		     V_EC_CREDITS(credits) | V_EC_GTS(gts_enable));
2578 	t3_write_reg(adapter, A_SG_CONTEXT_DATA1, V_EC_SIZE(size) |
2579 		     V_EC_BASE_LO((u32)base_addr & 0xffff));
2580 	base_addr >>= 16;
2581 	t3_write_reg(adapter, A_SG_CONTEXT_DATA2, (u32)base_addr);
2582 	base_addr >>= 32;
2583 	t3_write_reg(adapter, A_SG_CONTEXT_DATA3,
2584 		     V_EC_BASE_HI((u32)base_addr & 0xf) | V_EC_RESPQ(respq) |
2585 		     V_EC_TYPE(type) | V_EC_GEN(gen) | V_EC_UP_TOKEN(token) |
2586 		     F_EC_VALID);
2587 	return t3_sge_write_context(adapter, id, F_EGRESS);
2588 }
2589 
2590 /**
2591  *	t3_sge_init_flcntxt - initialize an SGE free-buffer list context
2592  *	@adapter: the adapter to configure
2593  *	@id: the context id
2594  *	@gts_enable: whether to enable GTS for the context
2595  *	@base_addr: base address of queue
2596  *	@size: number of queue entries
2597  *	@bsize: size of each buffer for this queue
2598  *	@cong_thres: threshold to signal congestion to upstream producers
2599  *	@gen: initial generation value for the context
2600  *	@cidx: consumer pointer
2601  *
2602  *	Initialize an SGE free list context and make it ready for use.  The
2603  *	caller is responsible for ensuring only one context operation occurs
2604  *	at a time.
2605  */
2606 int t3_sge_init_flcntxt(adapter_t *adapter, unsigned int id, int gts_enable,
2607 			u64 base_addr, unsigned int size, unsigned int bsize,
2608 			unsigned int cong_thres, int gen, unsigned int cidx)
2609 {
2610 	if (base_addr & 0xfff)     /* must be 4K aligned */
2611 		return -EINVAL;
2612 	if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2613 		return -EBUSY;
2614 
2615 	base_addr >>= 12;
2616 	t3_write_reg(adapter, A_SG_CONTEXT_DATA0, (u32)base_addr);
2617 	base_addr >>= 32;
2618 	t3_write_reg(adapter, A_SG_CONTEXT_DATA1,
2619 		     V_FL_BASE_HI((u32)base_addr) |
2620 		     V_FL_INDEX_LO(cidx & M_FL_INDEX_LO));
2621 	t3_write_reg(adapter, A_SG_CONTEXT_DATA2, V_FL_SIZE(size) |
2622 		     V_FL_GEN(gen) | V_FL_INDEX_HI(cidx >> 12) |
2623 		     V_FL_ENTRY_SIZE_LO(bsize & M_FL_ENTRY_SIZE_LO));
2624 	t3_write_reg(adapter, A_SG_CONTEXT_DATA3,
2625 		     V_FL_ENTRY_SIZE_HI(bsize >> (32 - S_FL_ENTRY_SIZE_LO)) |
2626 		     V_FL_CONG_THRES(cong_thres) | V_FL_GTS(gts_enable));
2627 	return t3_sge_write_context(adapter, id, F_FREELIST);
2628 }
2629 
2630 /**
2631  *	t3_sge_init_rspcntxt - initialize an SGE response queue context
2632  *	@adapter: the adapter to configure
2633  *	@id: the context id
2634  *	@irq_vec_idx: MSI-X interrupt vector index, 0 if no MSI-X, -1 if no IRQ
2635  *	@base_addr: base address of queue
2636  *	@size: number of queue entries
2637  *	@fl_thres: threshold for selecting the normal or jumbo free list
2638  *	@gen: initial generation value for the context
2639  *	@cidx: consumer pointer
2640  *
2641  *	Initialize an SGE response queue context and make it ready for use.
2642  *	The caller is responsible for ensuring only one context operation
2643  *	occurs at a time.
2644  */
2645 int t3_sge_init_rspcntxt(adapter_t *adapter, unsigned int id, int irq_vec_idx,
2646 			 u64 base_addr, unsigned int size,
2647 			 unsigned int fl_thres, int gen, unsigned int cidx)
2648 {
2649 	unsigned int ctrl, intr = 0;
2650 
2651 	if (base_addr & 0xfff)     /* must be 4K aligned */
2652 		return -EINVAL;
2653 	if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2654 		return -EBUSY;
2655 
2656 	base_addr >>= 12;
2657 	t3_write_reg(adapter, A_SG_CONTEXT_DATA0, V_CQ_SIZE(size) |
2658 		     V_CQ_INDEX(cidx));
2659 	t3_write_reg(adapter, A_SG_CONTEXT_DATA1, (u32)base_addr);
2660 	base_addr >>= 32;
2661         ctrl = t3_read_reg(adapter, A_SG_CONTROL);
2662         if ((irq_vec_idx > 0) ||
2663 		((irq_vec_idx == 0) && !(ctrl & F_ONEINTMULTQ)))
2664                 	intr = F_RQ_INTR_EN;
2665         if (irq_vec_idx >= 0)
2666                 intr |= V_RQ_MSI_VEC(irq_vec_idx);
2667 	t3_write_reg(adapter, A_SG_CONTEXT_DATA2,
2668 		     V_CQ_BASE_HI((u32)base_addr) | intr | V_RQ_GEN(gen));
2669 	t3_write_reg(adapter, A_SG_CONTEXT_DATA3, fl_thres);
2670 	return t3_sge_write_context(adapter, id, F_RESPONSEQ);
2671 }
2672 
2673 /**
2674  *	t3_sge_init_cqcntxt - initialize an SGE completion queue context
2675  *	@adapter: the adapter to configure
2676  *	@id: the context id
2677  *	@base_addr: base address of queue
2678  *	@size: number of queue entries
2679  *	@rspq: response queue for async notifications
2680  *	@ovfl_mode: CQ overflow mode
2681  *	@credits: completion queue credits
2682  *	@credit_thres: the credit threshold
2683  *
2684  *	Initialize an SGE completion queue context and make it ready for use.
2685  *	The caller is responsible for ensuring only one context operation
2686  *	occurs at a time.
2687  */
2688 int t3_sge_init_cqcntxt(adapter_t *adapter, unsigned int id, u64 base_addr,
2689 			unsigned int size, int rspq, int ovfl_mode,
2690 			unsigned int credits, unsigned int credit_thres)
2691 {
2692 	if (base_addr & 0xfff)     /* must be 4K aligned */
2693 		return -EINVAL;
2694 	if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2695 		return -EBUSY;
2696 
2697 	base_addr >>= 12;
2698 	t3_write_reg(adapter, A_SG_CONTEXT_DATA0, V_CQ_SIZE(size));
2699 	t3_write_reg(adapter, A_SG_CONTEXT_DATA1, (u32)base_addr);
2700 	base_addr >>= 32;
2701 	t3_write_reg(adapter, A_SG_CONTEXT_DATA2,
2702 		     V_CQ_BASE_HI((u32)base_addr) | V_CQ_RSPQ(rspq) |
2703 		     V_CQ_GEN(1) | V_CQ_OVERFLOW_MODE(ovfl_mode) |
2704 		     V_CQ_ERR(ovfl_mode));
2705 	t3_write_reg(adapter, A_SG_CONTEXT_DATA3, V_CQ_CREDITS(credits) |
2706 		     V_CQ_CREDIT_THRES(credit_thres));
2707 	return t3_sge_write_context(adapter, id, F_CQ);
2708 }
2709 
2710 /**
2711  *	t3_sge_enable_ecntxt - enable/disable an SGE egress context
2712  *	@adapter: the adapter
2713  *	@id: the egress context id
2714  *	@enable: enable (1) or disable (0) the context
2715  *
2716  *	Enable or disable an SGE egress context.  The caller is responsible for
2717  *	ensuring only one context operation occurs at a time.
2718  */
2719 int t3_sge_enable_ecntxt(adapter_t *adapter, unsigned int id, int enable)
2720 {
2721 	if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2722 		return -EBUSY;
2723 
2724 	t3_write_reg(adapter, A_SG_CONTEXT_MASK0, 0);
2725 	t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0);
2726 	t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0);
2727 	t3_write_reg(adapter, A_SG_CONTEXT_MASK3, F_EC_VALID);
2728 	t3_write_reg(adapter, A_SG_CONTEXT_DATA3, V_EC_VALID(enable));
2729 	t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2730 		     V_CONTEXT_CMD_OPCODE(1) | F_EGRESS | V_CONTEXT(id));
2731 	return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
2732 			       0, SG_CONTEXT_CMD_ATTEMPTS, 1);
2733 }
2734 
2735 /**
2736  *	t3_sge_disable_fl - disable an SGE free-buffer list
2737  *	@adapter: the adapter
2738  *	@id: the free list context id
2739  *
2740  *	Disable an SGE free-buffer list.  The caller is responsible for
2741  *	ensuring only one context operation occurs at a time.
2742  */
2743 int t3_sge_disable_fl(adapter_t *adapter, unsigned int id)
2744 {
2745 	if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2746 		return -EBUSY;
2747 
2748 	t3_write_reg(adapter, A_SG_CONTEXT_MASK0, 0);
2749 	t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0);
2750 	t3_write_reg(adapter, A_SG_CONTEXT_MASK2, V_FL_SIZE(M_FL_SIZE));
2751 	t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0);
2752 	t3_write_reg(adapter, A_SG_CONTEXT_DATA2, 0);
2753 	t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2754 		     V_CONTEXT_CMD_OPCODE(1) | F_FREELIST | V_CONTEXT(id));
2755 	return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
2756 			       0, SG_CONTEXT_CMD_ATTEMPTS, 1);
2757 }
2758 
2759 /**
2760  *	t3_sge_disable_rspcntxt - disable an SGE response queue
2761  *	@adapter: the adapter
2762  *	@id: the response queue context id
2763  *
2764  *	Disable an SGE response queue.  The caller is responsible for
2765  *	ensuring only one context operation occurs at a time.
2766  */
2767 int t3_sge_disable_rspcntxt(adapter_t *adapter, unsigned int id)
2768 {
2769 	if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2770 		return -EBUSY;
2771 
2772 	t3_write_reg(adapter, A_SG_CONTEXT_MASK0, V_CQ_SIZE(M_CQ_SIZE));
2773 	t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0);
2774 	t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0);
2775 	t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0);
2776 	t3_write_reg(adapter, A_SG_CONTEXT_DATA0, 0);
2777 	t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2778 		     V_CONTEXT_CMD_OPCODE(1) | F_RESPONSEQ | V_CONTEXT(id));
2779 	return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
2780 			       0, SG_CONTEXT_CMD_ATTEMPTS, 1);
2781 }
2782 
2783 /**
2784  *	t3_sge_disable_cqcntxt - disable an SGE completion queue
2785  *	@adapter: the adapter
2786  *	@id: the completion queue context id
2787  *
2788  *	Disable an SGE completion queue.  The caller is responsible for
2789  *	ensuring only one context operation occurs at a time.
2790  */
2791 int t3_sge_disable_cqcntxt(adapter_t *adapter, unsigned int id)
2792 {
2793 	if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2794 		return -EBUSY;
2795 
2796 	t3_write_reg(adapter, A_SG_CONTEXT_MASK0, V_CQ_SIZE(M_CQ_SIZE));
2797 	t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0);
2798 	t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0);
2799 	t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0);
2800 	t3_write_reg(adapter, A_SG_CONTEXT_DATA0, 0);
2801 	t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2802 		     V_CONTEXT_CMD_OPCODE(1) | F_CQ | V_CONTEXT(id));
2803 	return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
2804 			       0, SG_CONTEXT_CMD_ATTEMPTS, 1);
2805 }
2806 
2807 /**
2808  *	t3_sge_cqcntxt_op - perform an operation on a completion queue context
2809  *	@adapter: the adapter
2810  *	@id: the context id
2811  *	@op: the operation to perform
2812  *	@credits: credits to return to the CQ
2813  *
2814  *	Perform the selected operation on an SGE completion queue context.
2815  *	The caller is responsible for ensuring only one context operation
2816  *	occurs at a time.
2817  *
2818  *	For most operations the function returns the current HW position in
2819  *	the completion queue.
2820  */
2821 int t3_sge_cqcntxt_op(adapter_t *adapter, unsigned int id, unsigned int op,
2822 		      unsigned int credits)
2823 {
2824 	u32 val;
2825 
2826 	if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2827 		return -EBUSY;
2828 
2829 	t3_write_reg(adapter, A_SG_CONTEXT_DATA0, credits << 16);
2830 	t3_write_reg(adapter, A_SG_CONTEXT_CMD, V_CONTEXT_CMD_OPCODE(op) |
2831 		     V_CONTEXT(id) | F_CQ);
2832 	if (t3_wait_op_done_val(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
2833 				0, SG_CONTEXT_CMD_ATTEMPTS, 1, &val))
2834 		return -EIO;
2835 
2836 	if (op >= 2 && op < 7) {
2837 		if (adapter->params.rev > 0)
2838 			return G_CQ_INDEX(val);
2839 
2840 		t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2841 			     V_CONTEXT_CMD_OPCODE(0) | F_CQ | V_CONTEXT(id));
2842 		if (t3_wait_op_done(adapter, A_SG_CONTEXT_CMD,
2843 				    F_CONTEXT_CMD_BUSY, 0,
2844 				    SG_CONTEXT_CMD_ATTEMPTS, 1))
2845 			return -EIO;
2846 		return G_CQ_INDEX(t3_read_reg(adapter, A_SG_CONTEXT_DATA0));
2847 	}
2848 	return 0;
2849 }
2850 
2851 /**
2852  * 	t3_sge_read_context - read an SGE context
2853  * 	@type: the context type
2854  * 	@adapter: the adapter
2855  * 	@id: the context id
2856  * 	@data: holds the retrieved context
2857  *
2858  * 	Read an SGE egress context.  The caller is responsible for ensuring
2859  * 	only one context operation occurs at a time.
2860  */
2861 static int t3_sge_read_context(unsigned int type, adapter_t *adapter,
2862 			       unsigned int id, u32 data[4])
2863 {
2864 	if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2865 		return -EBUSY;
2866 
2867 	t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2868 		     V_CONTEXT_CMD_OPCODE(0) | type | V_CONTEXT(id));
2869 	if (t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY, 0,
2870 			    SG_CONTEXT_CMD_ATTEMPTS, 1))
2871 		return -EIO;
2872 	data[0] = t3_read_reg(adapter, A_SG_CONTEXT_DATA0);
2873 	data[1] = t3_read_reg(adapter, A_SG_CONTEXT_DATA1);
2874 	data[2] = t3_read_reg(adapter, A_SG_CONTEXT_DATA2);
2875 	data[3] = t3_read_reg(adapter, A_SG_CONTEXT_DATA3);
2876 	return 0;
2877 }
2878 
2879 /**
2880  * 	t3_sge_read_ecntxt - read an SGE egress context
2881  * 	@adapter: the adapter
2882  * 	@id: the context id
2883  * 	@data: holds the retrieved context
2884  *
2885  * 	Read an SGE egress context.  The caller is responsible for ensuring
2886  * 	only one context operation occurs at a time.
2887  */
2888 int t3_sge_read_ecntxt(adapter_t *adapter, unsigned int id, u32 data[4])
2889 {
2890 	if (id >= 65536)
2891 		return -EINVAL;
2892 	return t3_sge_read_context(F_EGRESS, adapter, id, data);
2893 }
2894 
2895 /**
2896  * 	t3_sge_read_cq - read an SGE CQ context
2897  * 	@adapter: the adapter
2898  * 	@id: the context id
2899  * 	@data: holds the retrieved context
2900  *
2901  * 	Read an SGE CQ context.  The caller is responsible for ensuring
2902  * 	only one context operation occurs at a time.
2903  */
2904 int t3_sge_read_cq(adapter_t *adapter, unsigned int id, u32 data[4])
2905 {
2906 	if (id >= 65536)
2907 		return -EINVAL;
2908 	return t3_sge_read_context(F_CQ, adapter, id, data);
2909 }
2910 
2911 /**
2912  * 	t3_sge_read_fl - read an SGE free-list context
2913  * 	@adapter: the adapter
2914  * 	@id: the context id
2915  * 	@data: holds the retrieved context
2916  *
2917  * 	Read an SGE free-list context.  The caller is responsible for ensuring
2918  * 	only one context operation occurs at a time.
2919  */
2920 int t3_sge_read_fl(adapter_t *adapter, unsigned int id, u32 data[4])
2921 {
2922 	if (id >= SGE_QSETS * 2)
2923 		return -EINVAL;
2924 	return t3_sge_read_context(F_FREELIST, adapter, id, data);
2925 }
2926 
2927 /**
2928  * 	t3_sge_read_rspq - read an SGE response queue context
2929  * 	@adapter: the adapter
2930  * 	@id: the context id
2931  * 	@data: holds the retrieved context
2932  *
2933  * 	Read an SGE response queue context.  The caller is responsible for
2934  * 	ensuring only one context operation occurs at a time.
2935  */
2936 int t3_sge_read_rspq(adapter_t *adapter, unsigned int id, u32 data[4])
2937 {
2938 	if (id >= SGE_QSETS)
2939 		return -EINVAL;
2940 	return t3_sge_read_context(F_RESPONSEQ, adapter, id, data);
2941 }
2942 
2943 /**
2944  *	t3_config_rss - configure Rx packet steering
2945  *	@adapter: the adapter
2946  *	@rss_config: RSS settings (written to TP_RSS_CONFIG)
2947  *	@cpus: values for the CPU lookup table (0xff terminated)
2948  *	@rspq: values for the response queue lookup table (0xffff terminated)
2949  *
2950  *	Programs the receive packet steering logic.  @cpus and @rspq provide
2951  *	the values for the CPU and response queue lookup tables.  If they
2952  *	provide fewer values than the size of the tables the supplied values
2953  *	are used repeatedly until the tables are fully populated.
2954  */
2955 void t3_config_rss(adapter_t *adapter, unsigned int rss_config, const u8 *cpus,
2956 		   const u16 *rspq)
2957 {
2958 	int i, j, cpu_idx = 0, q_idx = 0;
2959 
2960 	if (cpus)
2961 		for (i = 0; i < RSS_TABLE_SIZE; ++i) {
2962 			u32 val = i << 16;
2963 
2964 			for (j = 0; j < 2; ++j) {
2965 				val |= (cpus[cpu_idx++] & 0x3f) << (8 * j);
2966 				if (cpus[cpu_idx] == 0xff)
2967 					cpu_idx = 0;
2968 			}
2969 			t3_write_reg(adapter, A_TP_RSS_LKP_TABLE, val);
2970 		}
2971 
2972 	if (rspq)
2973 		for (i = 0; i < RSS_TABLE_SIZE; ++i) {
2974 			t3_write_reg(adapter, A_TP_RSS_MAP_TABLE,
2975 				     (i << 16) | rspq[q_idx++]);
2976 			if (rspq[q_idx] == 0xffff)
2977 				q_idx = 0;
2978 		}
2979 
2980 	t3_write_reg(adapter, A_TP_RSS_CONFIG, rss_config);
2981 }
2982 
2983 /**
2984  *	t3_read_rss - read the contents of the RSS tables
2985  *	@adapter: the adapter
2986  *	@lkup: holds the contents of the RSS lookup table
2987  *	@map: holds the contents of the RSS map table
2988  *
2989  *	Reads the contents of the receive packet steering tables.
2990  */
2991 int t3_read_rss(adapter_t *adapter, u8 *lkup, u16 *map)
2992 {
2993 	int i;
2994 	u32 val;
2995 
2996 	if (lkup)
2997 		for (i = 0; i < RSS_TABLE_SIZE; ++i) {
2998 			t3_write_reg(adapter, A_TP_RSS_LKP_TABLE,
2999 				     0xffff0000 | i);
3000 			val = t3_read_reg(adapter, A_TP_RSS_LKP_TABLE);
3001 			if (!(val & 0x80000000))
3002 				return -EAGAIN;
3003 			*lkup++ = (u8)val;
3004 			*lkup++ = (u8)(val >> 8);
3005 		}
3006 
3007 	if (map)
3008 		for (i = 0; i < RSS_TABLE_SIZE; ++i) {
3009 			t3_write_reg(adapter, A_TP_RSS_MAP_TABLE,
3010 				     0xffff0000 | i);
3011 			val = t3_read_reg(adapter, A_TP_RSS_MAP_TABLE);
3012 			if (!(val & 0x80000000))
3013 				return -EAGAIN;
3014 			*map++ = (u16)val;
3015 		}
3016 	return 0;
3017 }
3018 
3019 /**
3020  *	t3_tp_set_offload_mode - put TP in NIC/offload mode
3021  *	@adap: the adapter
3022  *	@enable: 1 to select offload mode, 0 for regular NIC
3023  *
3024  *	Switches TP to NIC/offload mode.
3025  */
3026 void t3_tp_set_offload_mode(adapter_t *adap, int enable)
3027 {
3028 	if (is_offload(adap) || !enable)
3029 		t3_set_reg_field(adap, A_TP_IN_CONFIG, F_NICMODE,
3030 				 V_NICMODE(!enable));
3031 }
3032 
3033 /**
3034  *	tp_wr_bits_indirect - set/clear bits in an indirect TP register
3035  *	@adap: the adapter
3036  *	@addr: the indirect TP register address
3037  *	@mask: specifies the field within the register to modify
3038  *	@val: new value for the field
3039  *
3040  *	Sets a field of an indirect TP register to the given value.
3041  */
3042 static void tp_wr_bits_indirect(adapter_t *adap, unsigned int addr,
3043 				unsigned int mask, unsigned int val)
3044 {
3045 	t3_write_reg(adap, A_TP_PIO_ADDR, addr);
3046 	val |= t3_read_reg(adap, A_TP_PIO_DATA) & ~mask;
3047 	t3_write_reg(adap, A_TP_PIO_DATA, val);
3048 }
3049 
3050 /**
3051  *	t3_enable_filters - enable the HW filters
3052  *	@adap: the adapter
3053  *
3054  *	Enables the HW filters for NIC traffic.
3055  */
3056 void t3_enable_filters(adapter_t *adap)
3057 {
3058 	t3_set_reg_field(adap, A_TP_IN_CONFIG, F_NICMODE, 0);
3059 	t3_set_reg_field(adap, A_MC5_DB_CONFIG, 0, F_FILTEREN);
3060 	t3_set_reg_field(adap, A_TP_GLOBAL_CONFIG, 0, V_FIVETUPLELOOKUP(3));
3061 	tp_wr_bits_indirect(adap, A_TP_INGRESS_CONFIG, 0, F_LOOKUPEVERYPKT);
3062 }
3063 
3064 /**
3065  *	t3_disable_filters - disable the HW filters
3066  *	@adap: the adapter
3067  *
3068  *	Disables the HW filters for NIC traffic.
3069  */
3070 void t3_disable_filters(adapter_t *adap)
3071 {
3072 	/* note that we don't want to revert to NIC-only mode */
3073 	t3_set_reg_field(adap, A_MC5_DB_CONFIG, F_FILTEREN, 0);
3074 	t3_set_reg_field(adap, A_TP_GLOBAL_CONFIG,
3075 			 V_FIVETUPLELOOKUP(M_FIVETUPLELOOKUP), 0);
3076 	tp_wr_bits_indirect(adap, A_TP_INGRESS_CONFIG, F_LOOKUPEVERYPKT, 0);
3077 }
3078 
3079 /**
3080  *	pm_num_pages - calculate the number of pages of the payload memory
3081  *	@mem_size: the size of the payload memory
3082  *	@pg_size: the size of each payload memory page
3083  *
3084  *	Calculate the number of pages, each of the given size, that fit in a
3085  *	memory of the specified size, respecting the HW requirement that the
3086  *	number of pages must be a multiple of 24.
3087  */
3088 static inline unsigned int pm_num_pages(unsigned int mem_size,
3089 					unsigned int pg_size)
3090 {
3091 	unsigned int n = mem_size / pg_size;
3092 
3093 	return n - n % 24;
3094 }
3095 
3096 #define mem_region(adap, start, size, reg) \
3097 	t3_write_reg((adap), A_ ## reg, (start)); \
3098 	start += size
3099 
3100 /**
3101  *	partition_mem - partition memory and configure TP memory settings
3102  *	@adap: the adapter
3103  *	@p: the TP parameters
3104  *
3105  *	Partitions context and payload memory and configures TP's memory
3106  *	registers.
3107  */
3108 static void partition_mem(adapter_t *adap, const struct tp_params *p)
3109 {
3110 	unsigned int m, pstructs, tids = t3_mc5_size(&adap->mc5);
3111 	unsigned int timers = 0, timers_shift = 22;
3112 
3113 	if (adap->params.rev > 0) {
3114 		if (tids <= 16 * 1024) {
3115 			timers = 1;
3116 			timers_shift = 16;
3117 		} else if (tids <= 64 * 1024) {
3118 			timers = 2;
3119 			timers_shift = 18;
3120 		} else if (tids <= 256 * 1024) {
3121 			timers = 3;
3122 			timers_shift = 20;
3123 		}
3124 	}
3125 
3126 	t3_write_reg(adap, A_TP_PMM_SIZE,
3127 		     p->chan_rx_size | (p->chan_tx_size >> 16));
3128 
3129 	t3_write_reg(adap, A_TP_PMM_TX_BASE, 0);
3130 	t3_write_reg(adap, A_TP_PMM_TX_PAGE_SIZE, p->tx_pg_size);
3131 	t3_write_reg(adap, A_TP_PMM_TX_MAX_PAGE, p->tx_num_pgs);
3132 	t3_set_reg_field(adap, A_TP_PARA_REG3, V_TXDATAACKIDX(M_TXDATAACKIDX),
3133 			 V_TXDATAACKIDX(fls(p->tx_pg_size) - 12));
3134 
3135 	t3_write_reg(adap, A_TP_PMM_RX_BASE, 0);
3136 	t3_write_reg(adap, A_TP_PMM_RX_PAGE_SIZE, p->rx_pg_size);
3137 	t3_write_reg(adap, A_TP_PMM_RX_MAX_PAGE, p->rx_num_pgs);
3138 
3139 	pstructs = p->rx_num_pgs + p->tx_num_pgs;
3140 	/* Add a bit of headroom and make multiple of 24 */
3141 	pstructs += 48;
3142 	pstructs -= pstructs % 24;
3143 	t3_write_reg(adap, A_TP_CMM_MM_MAX_PSTRUCT, pstructs);
3144 
3145 	m = tids * TCB_SIZE;
3146 	mem_region(adap, m, (64 << 10) * 64, SG_EGR_CNTX_BADDR);
3147 	mem_region(adap, m, (64 << 10) * 64, SG_CQ_CONTEXT_BADDR);
3148 	t3_write_reg(adap, A_TP_CMM_TIMER_BASE, V_CMTIMERMAXNUM(timers) | m);
3149 	m += ((p->ntimer_qs - 1) << timers_shift) + (1 << 22);
3150 	mem_region(adap, m, pstructs * 64, TP_CMM_MM_BASE);
3151 	mem_region(adap, m, 64 * (pstructs / 24), TP_CMM_MM_PS_FLST_BASE);
3152 	mem_region(adap, m, 64 * (p->rx_num_pgs / 24), TP_CMM_MM_RX_FLST_BASE);
3153 	mem_region(adap, m, 64 * (p->tx_num_pgs / 24), TP_CMM_MM_TX_FLST_BASE);
3154 
3155 	m = (m + 4095) & ~0xfff;
3156 	t3_write_reg(adap, A_CIM_SDRAM_BASE_ADDR, m);
3157 	t3_write_reg(adap, A_CIM_SDRAM_ADDR_SIZE, p->cm_size - m);
3158 
3159 	tids = (p->cm_size - m - (3 << 20)) / 3072 - 32;
3160 	m = t3_mc5_size(&adap->mc5) - adap->params.mc5.nservers -
3161 	    adap->params.mc5.nfilters - adap->params.mc5.nroutes;
3162 	if (tids < m)
3163 		adap->params.mc5.nservers += m - tids;
3164 }
3165 
3166 static inline void tp_wr_indirect(adapter_t *adap, unsigned int addr, u32 val)
3167 {
3168 	t3_write_reg(adap, A_TP_PIO_ADDR, addr);
3169 	t3_write_reg(adap, A_TP_PIO_DATA, val);
3170 }
3171 
3172 static inline u32 tp_rd_indirect(adapter_t *adap, unsigned int addr)
3173 {
3174 	t3_write_reg(adap, A_TP_PIO_ADDR, addr);
3175 	return t3_read_reg(adap, A_TP_PIO_DATA);
3176 }
3177 
3178 static void tp_config(adapter_t *adap, const struct tp_params *p)
3179 {
3180 	t3_write_reg(adap, A_TP_GLOBAL_CONFIG, F_TXPACINGENABLE | F_PATHMTU |
3181 		     F_IPCHECKSUMOFFLOAD | F_UDPCHECKSUMOFFLOAD |
3182 		     F_TCPCHECKSUMOFFLOAD | V_IPTTL(64));
3183 	t3_write_reg(adap, A_TP_TCP_OPTIONS, V_MTUDEFAULT(576) |
3184 		     F_MTUENABLE | V_WINDOWSCALEMODE(1) |
3185 		     V_TIMESTAMPSMODE(1) | V_SACKMODE(1) | V_SACKRX(1));
3186 	t3_write_reg(adap, A_TP_DACK_CONFIG, V_AUTOSTATE3(1) |
3187 		     V_AUTOSTATE2(1) | V_AUTOSTATE1(0) |
3188 		     V_BYTETHRESHOLD(26880) | V_MSSTHRESHOLD(2) |
3189 		     F_AUTOCAREFUL | F_AUTOENABLE | V_DACK_MODE(1));
3190 	t3_set_reg_field(adap, A_TP_IN_CONFIG, F_RXFBARBPRIO | F_TXFBARBPRIO,
3191 			 F_IPV6ENABLE | F_NICMODE);
3192 	t3_write_reg(adap, A_TP_TX_RESOURCE_LIMIT, 0x18141814);
3193 	t3_write_reg(adap, A_TP_PARA_REG4, 0x5050105);
3194 	t3_set_reg_field(adap, A_TP_PARA_REG6, 0,
3195 			 adap->params.rev > 0 ? F_ENABLEESND :
3196 			 			F_T3A_ENABLEESND);
3197 	t3_set_reg_field(adap, A_TP_PC_CONFIG,
3198 			 F_ENABLEEPCMDAFULL,
3199 			 F_ENABLEOCSPIFULL |F_TXDEFERENABLE | F_HEARBEATDACK |
3200 			 F_TXCONGESTIONMODE | F_RXCONGESTIONMODE);
3201 	t3_set_reg_field(adap, A_TP_PC_CONFIG2, F_CHDRAFULL,
3202 			 F_ENABLEIPV6RSS | F_ENABLENONOFDTNLSYN |
3203 			 F_ENABLEARPMISS | F_DISBLEDAPARBIT0);
3204 	t3_write_reg(adap, A_TP_PROXY_FLOW_CNTL, 1080);
3205 	t3_write_reg(adap, A_TP_PROXY_FLOW_CNTL, 1000);
3206 
3207 	if (adap->params.rev > 0) {
3208 		tp_wr_indirect(adap, A_TP_EGRESS_CONFIG, F_REWRITEFORCETOSIZE);
3209 		t3_set_reg_field(adap, A_TP_PARA_REG3, 0,
3210 				 F_TXPACEAUTO | F_TXPACEAUTOSTRICT);
3211 		t3_set_reg_field(adap, A_TP_PC_CONFIG, F_LOCKTID, F_LOCKTID);
3212 		tp_wr_indirect(adap, A_TP_VLAN_PRI_MAP, 0xfa50);
3213 		tp_wr_indirect(adap, A_TP_MAC_MATCH_MAP0, 0xfac688);
3214 		tp_wr_indirect(adap, A_TP_MAC_MATCH_MAP1, 0xfac688);
3215 	} else
3216 		t3_set_reg_field(adap, A_TP_PARA_REG3, 0, F_TXPACEFIXED);
3217 
3218 	if (adap->params.rev == T3_REV_C)
3219 		t3_set_reg_field(adap, A_TP_PC_CONFIG,
3220 				 V_TABLELATENCYDELTA(M_TABLELATENCYDELTA),
3221 				 V_TABLELATENCYDELTA(4));
3222 
3223 	t3_write_reg(adap, A_TP_TX_MOD_QUEUE_WEIGHT1, 0);
3224 	t3_write_reg(adap, A_TP_TX_MOD_QUEUE_WEIGHT0, 0);
3225 	t3_write_reg(adap, A_TP_MOD_CHANNEL_WEIGHT, 0);
3226 	t3_write_reg(adap, A_TP_MOD_RATE_LIMIT, 0xf2200000);
3227 
3228 	if (adap->params.nports > 2) {
3229 		t3_set_reg_field(adap, A_TP_PC_CONFIG2, 0,
3230 				 F_ENABLETXPORTFROMDA2 | F_ENABLETXPORTFROMDA |
3231 				 F_ENABLERXPORTFROMADDR);
3232 		tp_wr_bits_indirect(adap, A_TP_QOS_RX_MAP_MODE,
3233 				    V_RXMAPMODE(M_RXMAPMODE), 0);
3234 		tp_wr_indirect(adap, A_TP_INGRESS_CONFIG, V_BITPOS0(48) |
3235 			       V_BITPOS1(49) | V_BITPOS2(50) | V_BITPOS3(51) |
3236 			       F_ENABLEEXTRACT | F_ENABLEEXTRACTIONSFD |
3237 			       F_ENABLEINSERTION | F_ENABLEINSERTIONSFD);
3238 		tp_wr_indirect(adap, A_TP_PREAMBLE_MSB, 0xfb000000);
3239 		tp_wr_indirect(adap, A_TP_PREAMBLE_LSB, 0xd5);
3240 		tp_wr_indirect(adap, A_TP_INTF_FROM_TX_PKT, F_INTFFROMTXPKT);
3241 	}
3242 }
3243 
3244 /* TCP timer values in ms */
3245 #define TP_DACK_TIMER 50
3246 #define TP_RTO_MIN    250
3247 
3248 /**
3249  *	tp_set_timers - set TP timing parameters
3250  *	@adap: the adapter to set
3251  *	@core_clk: the core clock frequency in Hz
3252  *
3253  *	Set TP's timing parameters, such as the various timer resolutions and
3254  *	the TCP timer values.
3255  */
3256 static void tp_set_timers(adapter_t *adap, unsigned int core_clk)
3257 {
3258 	unsigned int tre = adap->params.tp.tre;
3259 	unsigned int dack_re = adap->params.tp.dack_re;
3260 	unsigned int tstamp_re = fls(core_clk / 1000);     /* 1ms, at least */
3261 	unsigned int tps = core_clk >> tre;
3262 
3263 	t3_write_reg(adap, A_TP_TIMER_RESOLUTION, V_TIMERRESOLUTION(tre) |
3264 		     V_DELAYEDACKRESOLUTION(dack_re) |
3265 		     V_TIMESTAMPRESOLUTION(tstamp_re));
3266 	t3_write_reg(adap, A_TP_DACK_TIMER,
3267 		     (core_clk >> dack_re) / (1000 / TP_DACK_TIMER));
3268 	t3_write_reg(adap, A_TP_TCP_BACKOFF_REG0, 0x3020100);
3269 	t3_write_reg(adap, A_TP_TCP_BACKOFF_REG1, 0x7060504);
3270 	t3_write_reg(adap, A_TP_TCP_BACKOFF_REG2, 0xb0a0908);
3271 	t3_write_reg(adap, A_TP_TCP_BACKOFF_REG3, 0xf0e0d0c);
3272 	t3_write_reg(adap, A_TP_SHIFT_CNT, V_SYNSHIFTMAX(6) |
3273 		     V_RXTSHIFTMAXR1(4) | V_RXTSHIFTMAXR2(15) |
3274 		     V_PERSHIFTBACKOFFMAX(8) | V_PERSHIFTMAX(8) |
3275 		     V_KEEPALIVEMAX(9));
3276 
3277 #define SECONDS * tps
3278 
3279 	t3_write_reg(adap, A_TP_MSL,
3280 		     adap->params.rev > 0 ? 0 : 2 SECONDS);
3281 	t3_write_reg(adap, A_TP_RXT_MIN, tps / (1000 / TP_RTO_MIN));
3282 	t3_write_reg(adap, A_TP_RXT_MAX, 64 SECONDS);
3283 	t3_write_reg(adap, A_TP_PERS_MIN, 5 SECONDS);
3284 	t3_write_reg(adap, A_TP_PERS_MAX, 64 SECONDS);
3285 	t3_write_reg(adap, A_TP_KEEP_IDLE, 7200 SECONDS);
3286 	t3_write_reg(adap, A_TP_KEEP_INTVL, 75 SECONDS);
3287 	t3_write_reg(adap, A_TP_INIT_SRTT, 3 SECONDS);
3288 	t3_write_reg(adap, A_TP_FINWAIT2_TIMER, 600 SECONDS);
3289 
3290 #undef SECONDS
3291 }
3292 
3293 /**
3294  *	t3_tp_set_coalescing_size - set receive coalescing size
3295  *	@adap: the adapter
3296  *	@size: the receive coalescing size
3297  *	@psh: whether a set PSH bit should deliver coalesced data
3298  *
3299  *	Set the receive coalescing size and PSH bit handling.
3300  */
3301 int t3_tp_set_coalescing_size(adapter_t *adap, unsigned int size, int psh)
3302 {
3303 	u32 val;
3304 
3305 	if (size > MAX_RX_COALESCING_LEN)
3306 		return -EINVAL;
3307 
3308 	val = t3_read_reg(adap, A_TP_PARA_REG3);
3309 	val &= ~(F_RXCOALESCEENABLE | F_RXCOALESCEPSHEN);
3310 
3311 	if (size) {
3312 		val |= F_RXCOALESCEENABLE;
3313 		if (psh)
3314 			val |= F_RXCOALESCEPSHEN;
3315 		size = min(MAX_RX_COALESCING_LEN, size);
3316 		t3_write_reg(adap, A_TP_PARA_REG2, V_RXCOALESCESIZE(size) |
3317 			     V_MAXRXDATA(MAX_RX_COALESCING_LEN));
3318 	}
3319 	t3_write_reg(adap, A_TP_PARA_REG3, val);
3320 	return 0;
3321 }
3322 
3323 /**
3324  *	t3_tp_set_max_rxsize - set the max receive size
3325  *	@adap: the adapter
3326  *	@size: the max receive size
3327  *
3328  *	Set TP's max receive size.  This is the limit that applies when
3329  *	receive coalescing is disabled.
3330  */
3331 void t3_tp_set_max_rxsize(adapter_t *adap, unsigned int size)
3332 {
3333 	t3_write_reg(adap, A_TP_PARA_REG7,
3334 		     V_PMMAXXFERLEN0(size) | V_PMMAXXFERLEN1(size));
3335 }
3336 
3337 static void __devinit init_mtus(unsigned short mtus[])
3338 {
3339 	/*
3340 	 * See draft-mathis-plpmtud-00.txt for the values.  The min is 88 so
3341 	 * it can accomodate max size TCP/IP headers when SACK and timestamps
3342 	 * are enabled and still have at least 8 bytes of payload.
3343 	 */
3344 	mtus[0] = 88;
3345 	mtus[1] = 88;
3346 	mtus[2] = 256;
3347 	mtus[3] = 512;
3348 	mtus[4] = 576;
3349 	mtus[5] = 1024;
3350 	mtus[6] = 1280;
3351 	mtus[7] = 1492;
3352 	mtus[8] = 1500;
3353 	mtus[9] = 2002;
3354 	mtus[10] = 2048;
3355 	mtus[11] = 4096;
3356 	mtus[12] = 4352;
3357 	mtus[13] = 8192;
3358 	mtus[14] = 9000;
3359 	mtus[15] = 9600;
3360 }
3361 
3362 /**
3363  *	init_cong_ctrl - initialize congestion control parameters
3364  *	@a: the alpha values for congestion control
3365  *	@b: the beta values for congestion control
3366  *
3367  *	Initialize the congestion control parameters.
3368  */
3369 static void __devinit init_cong_ctrl(unsigned short *a, unsigned short *b)
3370 {
3371 	a[0] = a[1] = a[2] = a[3] = a[4] = a[5] = a[6] = a[7] = a[8] = 1;
3372 	a[9] = 2;
3373 	a[10] = 3;
3374 	a[11] = 4;
3375 	a[12] = 5;
3376 	a[13] = 6;
3377 	a[14] = 7;
3378 	a[15] = 8;
3379 	a[16] = 9;
3380 	a[17] = 10;
3381 	a[18] = 14;
3382 	a[19] = 17;
3383 	a[20] = 21;
3384 	a[21] = 25;
3385 	a[22] = 30;
3386 	a[23] = 35;
3387 	a[24] = 45;
3388 	a[25] = 60;
3389 	a[26] = 80;
3390 	a[27] = 100;
3391 	a[28] = 200;
3392 	a[29] = 300;
3393 	a[30] = 400;
3394 	a[31] = 500;
3395 
3396 	b[0] = b[1] = b[2] = b[3] = b[4] = b[5] = b[6] = b[7] = b[8] = 0;
3397 	b[9] = b[10] = 1;
3398 	b[11] = b[12] = 2;
3399 	b[13] = b[14] = b[15] = b[16] = 3;
3400 	b[17] = b[18] = b[19] = b[20] = b[21] = 4;
3401 	b[22] = b[23] = b[24] = b[25] = b[26] = b[27] = 5;
3402 	b[28] = b[29] = 6;
3403 	b[30] = b[31] = 7;
3404 }
3405 
3406 /* The minimum additive increment value for the congestion control table */
3407 #define CC_MIN_INCR 2U
3408 
3409 /**
3410  *	t3_load_mtus - write the MTU and congestion control HW tables
3411  *	@adap: the adapter
3412  *	@mtus: the unrestricted values for the MTU table
3413  *	@alpha: the values for the congestion control alpha parameter
3414  *	@beta: the values for the congestion control beta parameter
3415  *	@mtu_cap: the maximum permitted effective MTU
3416  *
3417  *	Write the MTU table with the supplied MTUs capping each at &mtu_cap.
3418  *	Update the high-speed congestion control table with the supplied alpha,
3419  * 	beta, and MTUs.
3420  */
3421 void t3_load_mtus(adapter_t *adap, unsigned short mtus[NMTUS],
3422 		  unsigned short alpha[NCCTRL_WIN],
3423 		  unsigned short beta[NCCTRL_WIN], unsigned short mtu_cap)
3424 {
3425 	static const unsigned int avg_pkts[NCCTRL_WIN] = {
3426 		2, 6, 10, 14, 20, 28, 40, 56, 80, 112, 160, 224, 320, 448, 640,
3427 		896, 1281, 1792, 2560, 3584, 5120, 7168, 10240, 14336, 20480,
3428 		28672, 40960, 57344, 81920, 114688, 163840, 229376 };
3429 
3430 	unsigned int i, w;
3431 
3432 	for (i = 0; i < NMTUS; ++i) {
3433 		unsigned int mtu = min(mtus[i], mtu_cap);
3434 		unsigned int log2 = fls(mtu);
3435 
3436 		if (!(mtu & ((1 << log2) >> 2)))     /* round */
3437 			log2--;
3438 		t3_write_reg(adap, A_TP_MTU_TABLE,
3439 			     (i << 24) | (log2 << 16) | mtu);
3440 
3441 		for (w = 0; w < NCCTRL_WIN; ++w) {
3442 			unsigned int inc;
3443 
3444 			inc = max(((mtu - 40) * alpha[w]) / avg_pkts[w],
3445 				  CC_MIN_INCR);
3446 
3447 			t3_write_reg(adap, A_TP_CCTRL_TABLE, (i << 21) |
3448 				     (w << 16) | (beta[w] << 13) | inc);
3449 		}
3450 	}
3451 }
3452 
3453 /**
3454  *	t3_read_hw_mtus - returns the values in the HW MTU table
3455  *	@adap: the adapter
3456  *	@mtus: where to store the HW MTU values
3457  *
3458  *	Reads the HW MTU table.
3459  */
3460 void t3_read_hw_mtus(adapter_t *adap, unsigned short mtus[NMTUS])
3461 {
3462 	int i;
3463 
3464 	for (i = 0; i < NMTUS; ++i) {
3465 		unsigned int val;
3466 
3467 		t3_write_reg(adap, A_TP_MTU_TABLE, 0xff000000 | i);
3468 		val = t3_read_reg(adap, A_TP_MTU_TABLE);
3469 		mtus[i] = val & 0x3fff;
3470 	}
3471 }
3472 
3473 /**
3474  *	t3_get_cong_cntl_tab - reads the congestion control table
3475  *	@adap: the adapter
3476  *	@incr: where to store the alpha values
3477  *
3478  *	Reads the additive increments programmed into the HW congestion
3479  *	control table.
3480  */
3481 void t3_get_cong_cntl_tab(adapter_t *adap,
3482 			  unsigned short incr[NMTUS][NCCTRL_WIN])
3483 {
3484 	unsigned int mtu, w;
3485 
3486 	for (mtu = 0; mtu < NMTUS; ++mtu)
3487 		for (w = 0; w < NCCTRL_WIN; ++w) {
3488 			t3_write_reg(adap, A_TP_CCTRL_TABLE,
3489 				     0xffff0000 | (mtu << 5) | w);
3490 			incr[mtu][w] = (unsigned short)t3_read_reg(adap,
3491 				        A_TP_CCTRL_TABLE) & 0x1fff;
3492 		}
3493 }
3494 
3495 /**
3496  *	t3_tp_get_mib_stats - read TP's MIB counters
3497  *	@adap: the adapter
3498  *	@tps: holds the returned counter values
3499  *
3500  *	Returns the values of TP's MIB counters.
3501  */
3502 void t3_tp_get_mib_stats(adapter_t *adap, struct tp_mib_stats *tps)
3503 {
3504 	t3_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_RDATA, (u32 *)tps,
3505 			 sizeof(*tps) / sizeof(u32), 0);
3506 }
3507 
3508 /**
3509  *	t3_read_pace_tbl - read the pace table
3510  *	@adap: the adapter
3511  *	@pace_vals: holds the returned values
3512  *
3513  *	Returns the values of TP's pace table in nanoseconds.
3514  */
3515 void t3_read_pace_tbl(adapter_t *adap, unsigned int pace_vals[NTX_SCHED])
3516 {
3517 	unsigned int i, tick_ns = dack_ticks_to_usec(adap, 1000);
3518 
3519 	for (i = 0; i < NTX_SCHED; i++) {
3520 		t3_write_reg(adap, A_TP_PACE_TABLE, 0xffff0000 + i);
3521 		pace_vals[i] = t3_read_reg(adap, A_TP_PACE_TABLE) * tick_ns;
3522 	}
3523 }
3524 
3525 /**
3526  *	t3_set_pace_tbl - set the pace table
3527  *	@adap: the adapter
3528  *	@pace_vals: the pace values in nanoseconds
3529  *	@start: index of the first entry in the HW pace table to set
3530  *	@n: how many entries to set
3531  *
3532  *	Sets (a subset of the) HW pace table.
3533  */
3534 void t3_set_pace_tbl(adapter_t *adap, unsigned int *pace_vals,
3535 		     unsigned int start, unsigned int n)
3536 {
3537 	unsigned int tick_ns = dack_ticks_to_usec(adap, 1000);
3538 
3539 	for ( ; n; n--, start++, pace_vals++)
3540 		t3_write_reg(adap, A_TP_PACE_TABLE, (start << 16) |
3541 			     ((*pace_vals + tick_ns / 2) / tick_ns));
3542 }
3543 
3544 #define ulp_region(adap, name, start, len) \
3545 	t3_write_reg((adap), A_ULPRX_ ## name ## _LLIMIT, (start)); \
3546 	t3_write_reg((adap), A_ULPRX_ ## name ## _ULIMIT, \
3547 		     (start) + (len) - 1); \
3548 	start += len
3549 
3550 #define ulptx_region(adap, name, start, len) \
3551 	t3_write_reg((adap), A_ULPTX_ ## name ## _LLIMIT, (start)); \
3552 	t3_write_reg((adap), A_ULPTX_ ## name ## _ULIMIT, \
3553 		     (start) + (len) - 1)
3554 
3555 static void ulp_config(adapter_t *adap, const struct tp_params *p)
3556 {
3557 	unsigned int m = p->chan_rx_size;
3558 
3559 	ulp_region(adap, ISCSI, m, p->chan_rx_size / 8);
3560 	ulp_region(adap, TDDP, m, p->chan_rx_size / 8);
3561 	ulptx_region(adap, TPT, m, p->chan_rx_size / 4);
3562 	ulp_region(adap, STAG, m, p->chan_rx_size / 4);
3563 	ulp_region(adap, RQ, m, p->chan_rx_size / 4);
3564 	ulptx_region(adap, PBL, m, p->chan_rx_size / 4);
3565 	ulp_region(adap, PBL, m, p->chan_rx_size / 4);
3566 	t3_write_reg(adap, A_ULPRX_TDDP_TAGMASK, 0xffffffff);
3567 }
3568 
3569 
3570 /**
3571  *	t3_set_proto_sram - set the contents of the protocol sram
3572  *	@adapter: the adapter
3573  *	@data: the protocol image
3574  *
3575  *	Write the contents of the protocol SRAM.
3576  */
3577 int t3_set_proto_sram(adapter_t *adap, const u8 *data)
3578 {
3579 	int i;
3580 	const u32 *buf = (const u32 *)data;
3581 
3582 	for (i = 0; i < PROTO_SRAM_LINES; i++) {
3583 		t3_write_reg(adap, A_TP_EMBED_OP_FIELD5, cpu_to_be32(*buf++));
3584 		t3_write_reg(adap, A_TP_EMBED_OP_FIELD4, cpu_to_be32(*buf++));
3585 		t3_write_reg(adap, A_TP_EMBED_OP_FIELD3, cpu_to_be32(*buf++));
3586 		t3_write_reg(adap, A_TP_EMBED_OP_FIELD2, cpu_to_be32(*buf++));
3587 		t3_write_reg(adap, A_TP_EMBED_OP_FIELD1, cpu_to_be32(*buf++));
3588 
3589 		t3_write_reg(adap, A_TP_EMBED_OP_FIELD0, i << 1 | 1 << 31);
3590 		if (t3_wait_op_done(adap, A_TP_EMBED_OP_FIELD0, 1, 1, 5, 1))
3591 			return -EIO;
3592 	}
3593 	return 0;
3594 }
3595 
3596 /**
3597  *	t3_config_trace_filter - configure one of the tracing filters
3598  *	@adapter: the adapter
3599  *	@tp: the desired trace filter parameters
3600  *	@filter_index: which filter to configure
3601  *	@invert: if set non-matching packets are traced instead of matching ones
3602  *	@enable: whether to enable or disable the filter
3603  *
3604  *	Configures one of the tracing filters available in HW.
3605  */
3606 void t3_config_trace_filter(adapter_t *adapter, const struct trace_params *tp,
3607 			    int filter_index, int invert, int enable)
3608 {
3609 	u32 addr, key[4], mask[4];
3610 
3611 	key[0] = tp->sport | (tp->sip << 16);
3612 	key[1] = (tp->sip >> 16) | (tp->dport << 16);
3613 	key[2] = tp->dip;
3614 	key[3] = tp->proto | (tp->vlan << 8) | (tp->intf << 20);
3615 
3616 	mask[0] = tp->sport_mask | (tp->sip_mask << 16);
3617 	mask[1] = (tp->sip_mask >> 16) | (tp->dport_mask << 16);
3618 	mask[2] = tp->dip_mask;
3619 	mask[3] = tp->proto_mask | (tp->vlan_mask << 8) | (tp->intf_mask << 20);
3620 
3621 	if (invert)
3622 		key[3] |= (1 << 29);
3623 	if (enable)
3624 		key[3] |= (1 << 28);
3625 
3626 	addr = filter_index ? A_TP_RX_TRC_KEY0 : A_TP_TX_TRC_KEY0;
3627 	tp_wr_indirect(adapter, addr++, key[0]);
3628 	tp_wr_indirect(adapter, addr++, mask[0]);
3629 	tp_wr_indirect(adapter, addr++, key[1]);
3630 	tp_wr_indirect(adapter, addr++, mask[1]);
3631 	tp_wr_indirect(adapter, addr++, key[2]);
3632 	tp_wr_indirect(adapter, addr++, mask[2]);
3633 	tp_wr_indirect(adapter, addr++, key[3]);
3634 	tp_wr_indirect(adapter, addr,   mask[3]);
3635 	(void) t3_read_reg(adapter, A_TP_PIO_DATA);
3636 }
3637 
3638 /**
3639  *	t3_query_trace_filter - query a tracing filter
3640  *	@adapter: the adapter
3641  *	@tp: the current trace filter parameters
3642  *	@filter_index: which filter to query
3643  *	@inverted: non-zero if the filter is inverted
3644  *	@enabled: non-zero if the filter is enabled
3645  *
3646  *	Returns the current settings of the specified HW tracing filter.
3647  */
3648 void t3_query_trace_filter(adapter_t *adapter, struct trace_params *tp,
3649 			   int filter_index, int *inverted, int *enabled)
3650 {
3651 	u32 addr, key[4], mask[4];
3652 
3653 	addr = filter_index ? A_TP_RX_TRC_KEY0 : A_TP_TX_TRC_KEY0;
3654 	key[0]  = tp_rd_indirect(adapter, addr++);
3655 	mask[0] = tp_rd_indirect(adapter, addr++);
3656 	key[1]  = tp_rd_indirect(adapter, addr++);
3657 	mask[1] = tp_rd_indirect(adapter, addr++);
3658 	key[2]  = tp_rd_indirect(adapter, addr++);
3659 	mask[2] = tp_rd_indirect(adapter, addr++);
3660 	key[3]  = tp_rd_indirect(adapter, addr++);
3661 	mask[3] = tp_rd_indirect(adapter, addr);
3662 
3663 	tp->sport = key[0] & 0xffff;
3664 	tp->sip   = (key[0] >> 16) | ((key[1] & 0xffff) << 16);
3665 	tp->dport = key[1] >> 16;
3666 	tp->dip   = key[2];
3667 	tp->proto = key[3] & 0xff;
3668 	tp->vlan  = key[3] >> 8;
3669 	tp->intf  = key[3] >> 20;
3670 
3671 	tp->sport_mask = mask[0] & 0xffff;
3672 	tp->sip_mask   = (mask[0] >> 16) | ((mask[1] & 0xffff) << 16);
3673 	tp->dport_mask = mask[1] >> 16;
3674 	tp->dip_mask   = mask[2];
3675 	tp->proto_mask = mask[3] & 0xff;
3676 	tp->vlan_mask  = mask[3] >> 8;
3677 	tp->intf_mask  = mask[3] >> 20;
3678 
3679 	*inverted = key[3] & (1 << 29);
3680 	*enabled  = key[3] & (1 << 28);
3681 }
3682 
3683 /**
3684  *	t3_config_sched - configure a HW traffic scheduler
3685  *	@adap: the adapter
3686  *	@kbps: target rate in Kbps
3687  *	@sched: the scheduler index
3688  *
3689  *	Configure a Tx HW scheduler for the target rate.
3690  */
3691 int t3_config_sched(adapter_t *adap, unsigned int kbps, int sched)
3692 {
3693 	unsigned int v, tps, cpt, bpt, delta, mindelta = ~0;
3694 	unsigned int clk = adap->params.vpd.cclk * 1000;
3695 	unsigned int selected_cpt = 0, selected_bpt = 0;
3696 
3697 	if (kbps > 0) {
3698 		kbps *= 125;     /* -> bytes */
3699 		for (cpt = 1; cpt <= 255; cpt++) {
3700 			tps = clk / cpt;
3701 			bpt = (kbps + tps / 2) / tps;
3702 			if (bpt > 0 && bpt <= 255) {
3703 				v = bpt * tps;
3704 				delta = v >= kbps ? v - kbps : kbps - v;
3705 				if (delta < mindelta) {
3706 					mindelta = delta;
3707 					selected_cpt = cpt;
3708 					selected_bpt = bpt;
3709 				}
3710 			} else if (selected_cpt)
3711 				break;
3712 		}
3713 		if (!selected_cpt)
3714 			return -EINVAL;
3715 	}
3716 	t3_write_reg(adap, A_TP_TM_PIO_ADDR,
3717 		     A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2);
3718 	v = t3_read_reg(adap, A_TP_TM_PIO_DATA);
3719 	if (sched & 1)
3720 		v = (v & 0xffff) | (selected_cpt << 16) | (selected_bpt << 24);
3721 	else
3722 		v = (v & 0xffff0000) | selected_cpt | (selected_bpt << 8);
3723 	t3_write_reg(adap, A_TP_TM_PIO_DATA, v);
3724 	return 0;
3725 }
3726 
3727 /**
3728  *	t3_set_sched_ipg - set the IPG for a Tx HW packet rate scheduler
3729  *	@adap: the adapter
3730  *	@sched: the scheduler index
3731  *	@ipg: the interpacket delay in tenths of nanoseconds
3732  *
3733  *	Set the interpacket delay for a HW packet rate scheduler.
3734  */
3735 int t3_set_sched_ipg(adapter_t *adap, int sched, unsigned int ipg)
3736 {
3737 	unsigned int v, addr = A_TP_TX_MOD_Q1_Q0_TIMER_SEPARATOR - sched / 2;
3738 
3739 	/* convert ipg to nearest number of core clocks */
3740 	ipg *= core_ticks_per_usec(adap);
3741 	ipg = (ipg + 5000) / 10000;
3742 	if (ipg > 0xffff)
3743 		return -EINVAL;
3744 
3745 	t3_write_reg(adap, A_TP_TM_PIO_ADDR, addr);
3746 	v = t3_read_reg(adap, A_TP_TM_PIO_DATA);
3747 	if (sched & 1)
3748 		v = (v & 0xffff) | (ipg << 16);
3749 	else
3750 		v = (v & 0xffff0000) | ipg;
3751 	t3_write_reg(adap, A_TP_TM_PIO_DATA, v);
3752 	t3_read_reg(adap, A_TP_TM_PIO_DATA);
3753 	return 0;
3754 }
3755 
3756 /**
3757  *	t3_get_tx_sched - get the configuration of a Tx HW traffic scheduler
3758  *	@adap: the adapter
3759  *	@sched: the scheduler index
3760  *	@kbps: the byte rate in Kbps
3761  *	@ipg: the interpacket delay in tenths of nanoseconds
3762  *
3763  *	Return the current configuration of a HW Tx scheduler.
3764  */
3765 void t3_get_tx_sched(adapter_t *adap, unsigned int sched, unsigned int *kbps,
3766 		     unsigned int *ipg)
3767 {
3768 	unsigned int v, addr, bpt, cpt;
3769 
3770 	if (kbps) {
3771 		addr = A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2;
3772 		t3_write_reg(adap, A_TP_TM_PIO_ADDR, addr);
3773 		v = t3_read_reg(adap, A_TP_TM_PIO_DATA);
3774 		if (sched & 1)
3775 			v >>= 16;
3776 		bpt = (v >> 8) & 0xff;
3777 		cpt = v & 0xff;
3778 		if (!cpt)
3779 			*kbps = 0;        /* scheduler disabled */
3780 		else {
3781 			v = (adap->params.vpd.cclk * 1000) / cpt;
3782 			*kbps = (v * bpt) / 125;
3783 		}
3784 	}
3785 	if (ipg) {
3786 		addr = A_TP_TX_MOD_Q1_Q0_TIMER_SEPARATOR - sched / 2;
3787 		t3_write_reg(adap, A_TP_TM_PIO_ADDR, addr);
3788 		v = t3_read_reg(adap, A_TP_TM_PIO_DATA);
3789 		if (sched & 1)
3790 			v >>= 16;
3791 		v &= 0xffff;
3792 		*ipg = (10000 * v) / core_ticks_per_usec(adap);
3793 	}
3794 }
3795 
3796 /**
3797  *	tp_init - configure TP
3798  *	@adap: the adapter
3799  *	@p: TP configuration parameters
3800  *
3801  *	Initializes the TP HW module.
3802  */
3803 static int tp_init(adapter_t *adap, const struct tp_params *p)
3804 {
3805 	int busy = 0;
3806 
3807 	tp_config(adap, p);
3808 	t3_set_vlan_accel(adap, 3, 0);
3809 
3810 	if (is_offload(adap)) {
3811 		tp_set_timers(adap, adap->params.vpd.cclk * 1000);
3812 		t3_write_reg(adap, A_TP_RESET, F_FLSTINITENABLE);
3813 		busy = t3_wait_op_done(adap, A_TP_RESET, F_FLSTINITENABLE,
3814 				       0, 1000, 5);
3815 		if (busy)
3816 			CH_ERR(adap, "TP initialization timed out\n");
3817 	}
3818 
3819 	if (!busy)
3820 		t3_write_reg(adap, A_TP_RESET, F_TPRESET);
3821 	return busy;
3822 }
3823 
3824 /**
3825  *	t3_mps_set_active_ports - configure port failover
3826  *	@adap: the adapter
3827  *	@port_mask: bitmap of active ports
3828  *
3829  *	Sets the active ports according to the supplied bitmap.
3830  */
3831 int t3_mps_set_active_ports(adapter_t *adap, unsigned int port_mask)
3832 {
3833 	if (port_mask & ~((1 << adap->params.nports) - 1))
3834 		return -EINVAL;
3835 	t3_set_reg_field(adap, A_MPS_CFG, F_PORT1ACTIVE | F_PORT0ACTIVE,
3836 			 port_mask << S_PORT0ACTIVE);
3837 	return 0;
3838 }
3839 
3840 /**
3841  * 	chan_init_hw - channel-dependent HW initialization
3842  *	@adap: the adapter
3843  *	@chan_map: bitmap of Tx channels being used
3844  *
3845  *	Perform the bits of HW initialization that are dependent on the Tx
3846  *	channels being used.
3847  */
3848 static void chan_init_hw(adapter_t *adap, unsigned int chan_map)
3849 {
3850 	int i;
3851 
3852 	if (chan_map != 3) {                                 /* one channel */
3853 		t3_set_reg_field(adap, A_ULPRX_CTL, F_ROUND_ROBIN, 0);
3854 		t3_set_reg_field(adap, A_ULPTX_CONFIG, F_CFG_RR_ARB, 0);
3855 		t3_write_reg(adap, A_MPS_CFG, F_TPRXPORTEN | F_ENFORCEPKT |
3856 			     (chan_map == 1 ? F_TPTXPORT0EN | F_PORT0ACTIVE :
3857 					      F_TPTXPORT1EN | F_PORT1ACTIVE));
3858 		t3_write_reg(adap, A_PM1_TX_CFG,
3859 			     chan_map == 1 ? 0xffffffff : 0);
3860 		if (chan_map == 2)
3861 			t3_write_reg(adap, A_TP_TX_MOD_QUEUE_REQ_MAP,
3862 				     V_TX_MOD_QUEUE_REQ_MAP(0xff));
3863 		t3_write_reg(adap, A_TP_TX_MOD_QUE_TABLE, (12 << 16) | 0xd9c8);
3864 		t3_write_reg(adap, A_TP_TX_MOD_QUE_TABLE, (13 << 16) | 0xfbea);
3865 	} else {                                             /* two channels */
3866 		t3_set_reg_field(adap, A_ULPRX_CTL, 0, F_ROUND_ROBIN);
3867 		t3_set_reg_field(adap, A_ULPTX_CONFIG, 0, F_CFG_RR_ARB);
3868 		t3_write_reg(adap, A_ULPTX_DMA_WEIGHT,
3869 			     V_D1_WEIGHT(16) | V_D0_WEIGHT(16));
3870 		t3_write_reg(adap, A_MPS_CFG, F_TPTXPORT0EN | F_TPTXPORT1EN |
3871 			     F_TPRXPORTEN | F_PORT0ACTIVE | F_PORT1ACTIVE |
3872 			     F_ENFORCEPKT);
3873 		t3_write_reg(adap, A_PM1_TX_CFG, 0x80008000);
3874 		t3_set_reg_field(adap, A_TP_PC_CONFIG, 0, F_TXTOSQUEUEMAPMODE);
3875 		t3_write_reg(adap, A_TP_TX_MOD_QUEUE_REQ_MAP,
3876 			     V_TX_MOD_QUEUE_REQ_MAP(0xaa));
3877 		for (i = 0; i < 16; i++)
3878 			t3_write_reg(adap, A_TP_TX_MOD_QUE_TABLE,
3879 				     (i << 16) | 0x1010);
3880 		t3_write_reg(adap, A_TP_TX_MOD_QUE_TABLE, (12 << 16) | 0xba98);
3881 		t3_write_reg(adap, A_TP_TX_MOD_QUE_TABLE, (13 << 16) | 0xfedc);
3882 	}
3883 }
3884 
3885 static int calibrate_xgm(adapter_t *adapter)
3886 {
3887 	if (uses_xaui(adapter)) {
3888 		unsigned int v, i;
3889 
3890 		for (i = 0; i < 5; ++i) {
3891 			t3_write_reg(adapter, A_XGM_XAUI_IMP, 0);
3892 			(void) t3_read_reg(adapter, A_XGM_XAUI_IMP);
3893 			msleep(1);
3894 			v = t3_read_reg(adapter, A_XGM_XAUI_IMP);
3895 			if (!(v & (F_XGM_CALFAULT | F_CALBUSY))) {
3896 				t3_write_reg(adapter, A_XGM_XAUI_IMP,
3897 					     V_XAUIIMP(G_CALIMP(v) >> 2));
3898 				return 0;
3899 			}
3900 		}
3901 		CH_ERR(adapter, "MAC calibration failed\n");
3902 		return -1;
3903 	} else {
3904 		t3_write_reg(adapter, A_XGM_RGMII_IMP,
3905 			     V_RGMIIIMPPD(2) | V_RGMIIIMPPU(3));
3906 		t3_set_reg_field(adapter, A_XGM_RGMII_IMP, F_XGM_IMPSETUPDATE,
3907 				 F_XGM_IMPSETUPDATE);
3908 	}
3909 	return 0;
3910 }
3911 
3912 static void calibrate_xgm_t3b(adapter_t *adapter)
3913 {
3914 	if (!uses_xaui(adapter)) {
3915 		t3_write_reg(adapter, A_XGM_RGMII_IMP, F_CALRESET |
3916 			     F_CALUPDATE | V_RGMIIIMPPD(2) | V_RGMIIIMPPU(3));
3917 		t3_set_reg_field(adapter, A_XGM_RGMII_IMP, F_CALRESET, 0);
3918 		t3_set_reg_field(adapter, A_XGM_RGMII_IMP, 0,
3919 				 F_XGM_IMPSETUPDATE);
3920 		t3_set_reg_field(adapter, A_XGM_RGMII_IMP, F_XGM_IMPSETUPDATE,
3921 				 0);
3922 		t3_set_reg_field(adapter, A_XGM_RGMII_IMP, F_CALUPDATE, 0);
3923 		t3_set_reg_field(adapter, A_XGM_RGMII_IMP, 0, F_CALUPDATE);
3924 	}
3925 }
3926 
3927 struct mc7_timing_params {
3928 	unsigned char ActToPreDly;
3929 	unsigned char ActToRdWrDly;
3930 	unsigned char PreCyc;
3931 	unsigned char RefCyc[5];
3932 	unsigned char BkCyc;
3933 	unsigned char WrToRdDly;
3934 	unsigned char RdToWrDly;
3935 };
3936 
3937 /*
3938  * Write a value to a register and check that the write completed.  These
3939  * writes normally complete in a cycle or two, so one read should suffice.
3940  * The very first read exists to flush the posted write to the device.
3941  */
3942 static int wrreg_wait(adapter_t *adapter, unsigned int addr, u32 val)
3943 {
3944 	t3_write_reg(adapter,	addr, val);
3945 	(void) t3_read_reg(adapter, addr);                   /* flush */
3946 	if (!(t3_read_reg(adapter, addr) & F_BUSY))
3947 		return 0;
3948 	CH_ERR(adapter, "write to MC7 register 0x%x timed out\n", addr);
3949 	return -EIO;
3950 }
3951 
3952 static int mc7_init(struct mc7 *mc7, unsigned int mc7_clock, int mem_type)
3953 {
3954 	static const unsigned int mc7_mode[] = {
3955 		0x632, 0x642, 0x652, 0x432, 0x442
3956 	};
3957 	static const struct mc7_timing_params mc7_timings[] = {
3958 		{ 12, 3, 4, { 20, 28, 34, 52, 0 }, 15, 6, 4 },
3959 		{ 12, 4, 5, { 20, 28, 34, 52, 0 }, 16, 7, 4 },
3960 		{ 12, 5, 6, { 20, 28, 34, 52, 0 }, 17, 8, 4 },
3961 		{ 9,  3, 4, { 15, 21, 26, 39, 0 }, 12, 6, 4 },
3962 		{ 9,  4, 5, { 15, 21, 26, 39, 0 }, 13, 7, 4 }
3963 	};
3964 
3965 	u32 val;
3966 	unsigned int width, density, slow, attempts;
3967 	adapter_t *adapter = mc7->adapter;
3968 	const struct mc7_timing_params *p = &mc7_timings[mem_type];
3969 
3970 	if (!mc7->size)
3971 		return 0;
3972 
3973 	val = t3_read_reg(adapter, mc7->offset + A_MC7_CFG);
3974 	slow = val & F_SLOW;
3975 	width = G_WIDTH(val);
3976 	density = G_DEN(val);
3977 
3978 	t3_write_reg(adapter, mc7->offset + A_MC7_CFG, val | F_IFEN);
3979 	val = t3_read_reg(adapter, mc7->offset + A_MC7_CFG);  /* flush */
3980 	msleep(1);
3981 
3982 	if (!slow) {
3983 		t3_write_reg(adapter, mc7->offset + A_MC7_CAL, F_SGL_CAL_EN);
3984 		(void) t3_read_reg(adapter, mc7->offset + A_MC7_CAL);
3985 		msleep(1);
3986 		if (t3_read_reg(adapter, mc7->offset + A_MC7_CAL) &
3987 		    (F_BUSY | F_SGL_CAL_EN | F_CAL_FAULT)) {
3988 			CH_ERR(adapter, "%s MC7 calibration timed out\n",
3989 			       mc7->name);
3990 			goto out_fail;
3991 		}
3992 	}
3993 
3994 	t3_write_reg(adapter, mc7->offset + A_MC7_PARM,
3995 		     V_ACTTOPREDLY(p->ActToPreDly) |
3996 		     V_ACTTORDWRDLY(p->ActToRdWrDly) | V_PRECYC(p->PreCyc) |
3997 		     V_REFCYC(p->RefCyc[density]) | V_BKCYC(p->BkCyc) |
3998 		     V_WRTORDDLY(p->WrToRdDly) | V_RDTOWRDLY(p->RdToWrDly));
3999 
4000 	t3_write_reg(adapter, mc7->offset + A_MC7_CFG,
4001 		     val | F_CLKEN | F_TERM150);
4002 	(void) t3_read_reg(adapter, mc7->offset + A_MC7_CFG); /* flush */
4003 
4004 	if (!slow)
4005 		t3_set_reg_field(adapter, mc7->offset + A_MC7_DLL, F_DLLENB,
4006 				 F_DLLENB);
4007 	udelay(1);
4008 
4009 	val = slow ? 3 : 6;
4010 	if (wrreg_wait(adapter, mc7->offset + A_MC7_PRE, 0) ||
4011 	    wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE2, 0) ||
4012 	    wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE3, 0) ||
4013 	    wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE1, val))
4014 		goto out_fail;
4015 
4016 	if (!slow) {
4017 		t3_write_reg(adapter, mc7->offset + A_MC7_MODE, 0x100);
4018 		t3_set_reg_field(adapter, mc7->offset + A_MC7_DLL,
4019 				 F_DLLRST, 0);
4020 		udelay(5);
4021 	}
4022 
4023 	if (wrreg_wait(adapter, mc7->offset + A_MC7_PRE, 0) ||
4024 	    wrreg_wait(adapter, mc7->offset + A_MC7_REF, 0) ||
4025 	    wrreg_wait(adapter, mc7->offset + A_MC7_REF, 0) ||
4026 	    wrreg_wait(adapter, mc7->offset + A_MC7_MODE,
4027 		       mc7_mode[mem_type]) ||
4028 	    wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE1, val | 0x380) ||
4029 	    wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE1, val))
4030 		goto out_fail;
4031 
4032 	/* clock value is in KHz */
4033 	mc7_clock = mc7_clock * 7812 + mc7_clock / 2;  /* ns */
4034 	mc7_clock /= 1000000;                          /* KHz->MHz, ns->us */
4035 
4036 	t3_write_reg(adapter, mc7->offset + A_MC7_REF,
4037 		     F_PERREFEN | V_PREREFDIV(mc7_clock));
4038 	(void) t3_read_reg(adapter, mc7->offset + A_MC7_REF); /* flush */
4039 
4040 	t3_write_reg(adapter, mc7->offset + A_MC7_ECC,
4041 		     F_ECCGENEN | F_ECCCHKEN);
4042 	t3_write_reg(adapter, mc7->offset + A_MC7_BIST_DATA, 0);
4043 	t3_write_reg(adapter, mc7->offset + A_MC7_BIST_ADDR_BEG, 0);
4044 	t3_write_reg(adapter, mc7->offset + A_MC7_BIST_ADDR_END,
4045 		     (mc7->size << width) - 1);
4046 	t3_write_reg(adapter, mc7->offset + A_MC7_BIST_OP, V_OP(1));
4047 	(void) t3_read_reg(adapter, mc7->offset + A_MC7_BIST_OP); /* flush */
4048 
4049 	attempts = 50;
4050 	do {
4051 		msleep(250);
4052 		val = t3_read_reg(adapter, mc7->offset + A_MC7_BIST_OP);
4053 	} while ((val & F_BUSY) && --attempts);
4054 	if (val & F_BUSY) {
4055 		CH_ERR(adapter, "%s MC7 BIST timed out\n", mc7->name);
4056 		goto out_fail;
4057 	}
4058 
4059 	/* Enable normal memory accesses. */
4060 	t3_set_reg_field(adapter, mc7->offset + A_MC7_CFG, 0, F_RDY);
4061 	return 0;
4062 
4063  out_fail:
4064 	return -1;
4065 }
4066 
4067 static void config_pcie(adapter_t *adap)
4068 {
4069 	static const u16 ack_lat[4][6] = {
4070 		{ 237, 416, 559, 1071, 2095, 4143 },
4071 		{ 128, 217, 289, 545, 1057, 2081 },
4072 		{ 73, 118, 154, 282, 538, 1050 },
4073 		{ 67, 107, 86, 150, 278, 534 }
4074 	};
4075 	static const u16 rpl_tmr[4][6] = {
4076 		{ 711, 1248, 1677, 3213, 6285, 12429 },
4077 		{ 384, 651, 867, 1635, 3171, 6243 },
4078 		{ 219, 354, 462, 846, 1614, 3150 },
4079 		{ 201, 321, 258, 450, 834, 1602 }
4080 	};
4081 
4082 	u16 val, devid;
4083 	unsigned int log2_width, pldsize;
4084 	unsigned int fst_trn_rx, fst_trn_tx, acklat, rpllmt;
4085 
4086 	t3_os_pci_read_config_2(adap,
4087 				adap->params.pci.pcie_cap_addr + PCI_EXP_DEVCTL,
4088 				&val);
4089 	pldsize = (val & PCI_EXP_DEVCTL_PAYLOAD) >> 5;
4090 
4091 	/*
4092 	 * Gen2 adapter pcie bridge compatibility requires minimum
4093 	 * Max_Read_Request_size
4094 	 */
4095 	t3_os_pci_read_config_2(adap, 0x2, &devid);
4096 	if (devid == 0x37) {
4097 		t3_os_pci_write_config_2(adap,
4098 		    adap->params.pci.pcie_cap_addr + PCI_EXP_DEVCTL,
4099 		    val & ~PCI_EXP_DEVCTL_READRQ & ~PCI_EXP_DEVCTL_PAYLOAD);
4100 		pldsize = 0;
4101 	}
4102 
4103 	t3_os_pci_read_config_2(adap,
4104 				adap->params.pci.pcie_cap_addr + PCI_EXP_LNKCTL,
4105 			       	&val);
4106 
4107 	fst_trn_tx = G_NUMFSTTRNSEQ(t3_read_reg(adap, A_PCIE_PEX_CTRL0));
4108 	fst_trn_rx = adap->params.rev == 0 ? fst_trn_tx :
4109 			G_NUMFSTTRNSEQRX(t3_read_reg(adap, A_PCIE_MODE));
4110 	log2_width = fls(adap->params.pci.width) - 1;
4111 	acklat = ack_lat[log2_width][pldsize];
4112 	if (val & 1)                            /* check LOsEnable */
4113 		acklat += fst_trn_tx * 4;
4114 	rpllmt = rpl_tmr[log2_width][pldsize] + fst_trn_rx * 4;
4115 
4116 	if (adap->params.rev == 0)
4117 		t3_set_reg_field(adap, A_PCIE_PEX_CTRL1,
4118 				 V_T3A_ACKLAT(M_T3A_ACKLAT),
4119 				 V_T3A_ACKLAT(acklat));
4120 	else
4121 		t3_set_reg_field(adap, A_PCIE_PEX_CTRL1, V_ACKLAT(M_ACKLAT),
4122 				 V_ACKLAT(acklat));
4123 
4124 	t3_set_reg_field(adap, A_PCIE_PEX_CTRL0, V_REPLAYLMT(M_REPLAYLMT),
4125 			 V_REPLAYLMT(rpllmt));
4126 
4127 	t3_write_reg(adap, A_PCIE_PEX_ERR, 0xffffffff);
4128 	t3_set_reg_field(adap, A_PCIE_CFG, 0,
4129 			 F_ENABLELINKDWNDRST | F_ENABLELINKDOWNRST |
4130 			 F_PCIE_DMASTOPEN | F_PCIE_CLIDECEN);
4131 }
4132 
4133 /**
4134  * 	t3_init_hw - initialize and configure T3 HW modules
4135  * 	@adapter: the adapter
4136  * 	@fw_params: initial parameters to pass to firmware (optional)
4137  *
4138  *	Initialize and configure T3 HW modules.  This performs the
4139  *	initialization steps that need to be done once after a card is reset.
4140  *	MAC and PHY initialization is handled separarely whenever a port is
4141  *	enabled.
4142  *
4143  *	@fw_params are passed to FW and their value is platform dependent.
4144  *	Only the top 8 bits are available for use, the rest must be 0.
4145  */
4146 int t3_init_hw(adapter_t *adapter, u32 fw_params)
4147 {
4148 	int err = -EIO, attempts, i;
4149 	const struct vpd_params *vpd = &adapter->params.vpd;
4150 
4151 	if (adapter->params.rev > 0)
4152 		calibrate_xgm_t3b(adapter);
4153 	else if (calibrate_xgm(adapter))
4154 		goto out_err;
4155 
4156 	if (adapter->params.nports > 2)
4157 		t3_mac_init(&adap2pinfo(adapter, 0)->mac);
4158 
4159 	if (vpd->mclk) {
4160 		partition_mem(adapter, &adapter->params.tp);
4161 
4162 		if (mc7_init(&adapter->pmrx, vpd->mclk, vpd->mem_timing) ||
4163 		    mc7_init(&adapter->pmtx, vpd->mclk, vpd->mem_timing) ||
4164 		    mc7_init(&adapter->cm, vpd->mclk, vpd->mem_timing) ||
4165 		    t3_mc5_init(&adapter->mc5, adapter->params.mc5.nservers,
4166 			        adapter->params.mc5.nfilters,
4167 			       	adapter->params.mc5.nroutes))
4168 			goto out_err;
4169 
4170 		for (i = 0; i < 32; i++)
4171 			if (clear_sge_ctxt(adapter, i, F_CQ))
4172 				goto out_err;
4173 	}
4174 
4175 	if (tp_init(adapter, &adapter->params.tp))
4176 		goto out_err;
4177 
4178 	t3_tp_set_coalescing_size(adapter,
4179 				  min(adapter->params.sge.max_pkt_size,
4180 				      MAX_RX_COALESCING_LEN), 1);
4181 	t3_tp_set_max_rxsize(adapter,
4182 			     min(adapter->params.sge.max_pkt_size, 16384U));
4183 	ulp_config(adapter, &adapter->params.tp);
4184 	if (is_pcie(adapter))
4185 		config_pcie(adapter);
4186 	else
4187 		t3_set_reg_field(adapter, A_PCIX_CFG, 0,
4188 				 F_DMASTOPEN | F_CLIDECEN);
4189 
4190 	if (adapter->params.rev == T3_REV_C)
4191 		t3_set_reg_field(adapter, A_ULPTX_CONFIG, 0,
4192 				 F_CFG_CQE_SOP_MASK);
4193 
4194 	t3_write_reg(adapter, A_PM1_RX_CFG, 0xffffffff);
4195 	t3_write_reg(adapter, A_PM1_RX_MODE, 0);
4196 	t3_write_reg(adapter, A_PM1_TX_MODE, 0);
4197 	chan_init_hw(adapter, adapter->params.chan_map);
4198 	t3_sge_init(adapter, &adapter->params.sge);
4199 	t3_set_reg_field(adapter, A_PL_RST, 0, F_FATALPERREN);
4200 
4201 	t3_write_reg(adapter, A_T3DBG_GPIO_ACT_LOW, calc_gpio_intr(adapter));
4202 
4203 	t3_write_reg(adapter, A_CIM_HOST_ACC_DATA, vpd->uclk | fw_params);
4204 	t3_write_reg(adapter, A_CIM_BOOT_CFG,
4205 		     V_BOOTADDR(FW_FLASH_BOOT_ADDR >> 2));
4206 	(void) t3_read_reg(adapter, A_CIM_BOOT_CFG);    /* flush */
4207 
4208 	attempts = 100;
4209 	do {                          /* wait for uP to initialize */
4210 		msleep(20);
4211 	} while (t3_read_reg(adapter, A_CIM_HOST_ACC_DATA) && --attempts);
4212 	if (!attempts) {
4213 		CH_ERR(adapter, "uP initialization timed out\n");
4214 		goto out_err;
4215 	}
4216 
4217 	err = 0;
4218  out_err:
4219 	return err;
4220 }
4221 
4222 /**
4223  *	get_pci_mode - determine a card's PCI mode
4224  *	@adapter: the adapter
4225  *	@p: where to store the PCI settings
4226  *
4227  *	Determines a card's PCI mode and associated parameters, such as speed
4228  *	and width.
4229  */
4230 static void __devinit get_pci_mode(adapter_t *adapter, struct pci_params *p)
4231 {
4232 	static unsigned short speed_map[] = { 33, 66, 100, 133 };
4233 	u32 pci_mode, pcie_cap;
4234 
4235 	pcie_cap = t3_os_find_pci_capability(adapter, PCI_CAP_ID_EXP);
4236 	if (pcie_cap) {
4237 		u16 val;
4238 
4239 		p->variant = PCI_VARIANT_PCIE;
4240 		p->pcie_cap_addr = pcie_cap;
4241 		t3_os_pci_read_config_2(adapter, pcie_cap + PCI_EXP_LNKSTA,
4242 					&val);
4243 		p->width = (val >> 4) & 0x3f;
4244 		return;
4245 	}
4246 
4247 	pci_mode = t3_read_reg(adapter, A_PCIX_MODE);
4248 	p->speed = speed_map[G_PCLKRANGE(pci_mode)];
4249 	p->width = (pci_mode & F_64BIT) ? 64 : 32;
4250 	pci_mode = G_PCIXINITPAT(pci_mode);
4251 	if (pci_mode == 0)
4252 		p->variant = PCI_VARIANT_PCI;
4253 	else if (pci_mode < 4)
4254 		p->variant = PCI_VARIANT_PCIX_MODE1_PARITY;
4255 	else if (pci_mode < 8)
4256 		p->variant = PCI_VARIANT_PCIX_MODE1_ECC;
4257 	else
4258 		p->variant = PCI_VARIANT_PCIX_266_MODE2;
4259 }
4260 
4261 /**
4262  *	init_link_config - initialize a link's SW state
4263  *	@lc: structure holding the link state
4264  *	@caps: link capabilities
4265  *
4266  *	Initializes the SW state maintained for each link, including the link's
4267  *	capabilities and default speed/duplex/flow-control/autonegotiation
4268  *	settings.
4269  */
4270 static void __devinit init_link_config(struct link_config *lc,
4271 				       unsigned int caps)
4272 {
4273 	lc->supported = caps;
4274 	lc->requested_speed = lc->speed = SPEED_INVALID;
4275 	lc->requested_duplex = lc->duplex = DUPLEX_INVALID;
4276 	lc->requested_fc = lc->fc = PAUSE_RX | PAUSE_TX;
4277 	if (lc->supported & SUPPORTED_Autoneg) {
4278 		lc->advertising = lc->supported;
4279 		lc->autoneg = AUTONEG_ENABLE;
4280 		lc->requested_fc |= PAUSE_AUTONEG;
4281 	} else {
4282 		lc->advertising = 0;
4283 		lc->autoneg = AUTONEG_DISABLE;
4284 	}
4285 }
4286 
4287 /**
4288  *	mc7_calc_size - calculate MC7 memory size
4289  *	@cfg: the MC7 configuration
4290  *
4291  *	Calculates the size of an MC7 memory in bytes from the value of its
4292  *	configuration register.
4293  */
4294 static unsigned int __devinit mc7_calc_size(u32 cfg)
4295 {
4296 	unsigned int width = G_WIDTH(cfg);
4297 	unsigned int banks = !!(cfg & F_BKS) + 1;
4298 	unsigned int org = !!(cfg & F_ORG) + 1;
4299 	unsigned int density = G_DEN(cfg);
4300 	unsigned int MBs = ((256 << density) * banks) / (org << width);
4301 
4302 	return MBs << 20;
4303 }
4304 
4305 static void __devinit mc7_prep(adapter_t *adapter, struct mc7 *mc7,
4306 			       unsigned int base_addr, const char *name)
4307 {
4308 	u32 cfg;
4309 
4310 	mc7->adapter = adapter;
4311 	mc7->name = name;
4312 	mc7->offset = base_addr - MC7_PMRX_BASE_ADDR;
4313 	cfg = t3_read_reg(adapter, mc7->offset + A_MC7_CFG);
4314 	mc7->size = G_DEN(cfg) == M_DEN ? 0 : mc7_calc_size(cfg);
4315 	mc7->width = G_WIDTH(cfg);
4316 }
4317 
4318 void mac_prep(struct cmac *mac, adapter_t *adapter, int index)
4319 {
4320 	u16 devid;
4321 
4322 	mac->adapter = adapter;
4323 	mac->multiport = adapter->params.nports > 2;
4324 	if (mac->multiport) {
4325 		mac->ext_port = (unsigned char)index;
4326 		mac->nucast = 8;
4327 	} else
4328 		mac->nucast = 1;
4329 
4330 	/* Gen2 adapter uses VPD xauicfg[] to notify driver which MAC
4331 	   is connected to each port, its suppose to be using xgmac0 for both ports
4332 	 */
4333 	t3_os_pci_read_config_2(adapter, 0x2, &devid);
4334 
4335 	if (mac->multiport ||
4336 		(!adapter->params.vpd.xauicfg[1] && (devid==0x37)))
4337 			index  = 0;
4338 
4339 	mac->offset = (XGMAC0_1_BASE_ADDR - XGMAC0_0_BASE_ADDR) * index;
4340 
4341 	if (adapter->params.rev == 0 && uses_xaui(adapter)) {
4342 		t3_write_reg(adapter, A_XGM_SERDES_CTRL + mac->offset,
4343 			     is_10G(adapter) ? 0x2901c04 : 0x2301c04);
4344 		t3_set_reg_field(adapter, A_XGM_PORT_CFG + mac->offset,
4345 				 F_ENRGMII, 0);
4346 	}
4347 }
4348 
4349 /**
4350  *	early_hw_init - HW initialization done at card detection time
4351  *	@adapter: the adapter
4352  *	@ai: contains information about the adapter type and properties
4353  *
4354  *	Perfoms the part of HW initialization that is done early on when the
4355  *	driver first detecs the card.  Most of the HW state is initialized
4356  *	lazily later on when a port or an offload function are first used.
4357  */
4358 void early_hw_init(adapter_t *adapter, const struct adapter_info *ai)
4359 {
4360 	u32 val = V_PORTSPEED(is_10G(adapter) || adapter->params.nports > 2 ?
4361 			      3 : 2);
4362 	u32 gpio_out = ai->gpio_out;
4363 
4364 	mi1_init(adapter, ai);
4365 	t3_write_reg(adapter, A_I2C_CFG,                  /* set for 80KHz */
4366 		     V_I2C_CLKDIV(adapter->params.vpd.cclk / 80 - 1));
4367 	t3_write_reg(adapter, A_T3DBG_GPIO_EN,
4368 		     gpio_out | F_GPIO0_OEN | F_GPIO0_OUT_VAL);
4369 	t3_write_reg(adapter, A_MC5_DB_SERVER_INDEX, 0);
4370 	t3_write_reg(adapter, A_SG_OCO_BASE, V_BASE1(0xfff));
4371 
4372 	if (adapter->params.rev == 0 || !uses_xaui(adapter))
4373 		val |= F_ENRGMII;
4374 
4375 	/* Enable MAC clocks so we can access the registers */
4376 	t3_write_reg(adapter, A_XGM_PORT_CFG, val);
4377 	(void) t3_read_reg(adapter, A_XGM_PORT_CFG);
4378 
4379 	val |= F_CLKDIVRESET_;
4380 	t3_write_reg(adapter, A_XGM_PORT_CFG, val);
4381 	(void) t3_read_reg(adapter, A_XGM_PORT_CFG);
4382 	t3_write_reg(adapter, XGM_REG(A_XGM_PORT_CFG, 1), val);
4383 	(void) t3_read_reg(adapter, A_XGM_PORT_CFG);
4384 }
4385 
4386 /**
4387  *	t3_reset_adapter - reset the adapter
4388  *	@adapter: the adapter
4389  *
4390  * 	Reset the adapter.
4391  */
4392 int t3_reset_adapter(adapter_t *adapter)
4393 {
4394 	int i, save_and_restore_pcie =
4395 	    adapter->params.rev < T3_REV_B2 && is_pcie(adapter);
4396 	uint16_t devid = 0;
4397 
4398 	if (save_and_restore_pcie)
4399 		t3_os_pci_save_state(adapter);
4400 	t3_write_reg(adapter, A_PL_RST, F_CRSTWRM | F_CRSTWRMMODE);
4401 
4402  	/*
4403 	 * Delay. Give Some time to device to reset fully.
4404 	 * XXX The delay time should be modified.
4405 	 */
4406 	for (i = 0; i < 10; i++) {
4407 		msleep(50);
4408 		t3_os_pci_read_config_2(adapter, 0x00, &devid);
4409 		if (devid == 0x1425)
4410 			break;
4411 	}
4412 
4413 	if (devid != 0x1425)
4414 		return -1;
4415 
4416 	if (save_and_restore_pcie)
4417 		t3_os_pci_restore_state(adapter);
4418 	return 0;
4419 }
4420 
4421 static int init_parity(adapter_t *adap)
4422 {
4423 	int i, err, addr;
4424 
4425 	if (t3_read_reg(adap, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
4426 		return -EBUSY;
4427 
4428 	for (err = i = 0; !err && i < 16; i++)
4429 		err = clear_sge_ctxt(adap, i, F_EGRESS);
4430 	for (i = 0xfff0; !err && i <= 0xffff; i++)
4431 		err = clear_sge_ctxt(adap, i, F_EGRESS);
4432 	for (i = 0; !err && i < SGE_QSETS; i++)
4433 		err = clear_sge_ctxt(adap, i, F_RESPONSEQ);
4434 	if (err)
4435 		return err;
4436 
4437 	t3_write_reg(adap, A_CIM_IBQ_DBG_DATA, 0);
4438 	for (i = 0; i < 4; i++)
4439 		for (addr = 0; addr <= M_IBQDBGADDR; addr++) {
4440 			t3_write_reg(adap, A_CIM_IBQ_DBG_CFG, F_IBQDBGEN |
4441 				     F_IBQDBGWR | V_IBQDBGQID(i) |
4442 				     V_IBQDBGADDR(addr));
4443 			err = t3_wait_op_done(adap, A_CIM_IBQ_DBG_CFG,
4444 					      F_IBQDBGBUSY, 0, 2, 1);
4445 			if (err)
4446 				return err;
4447 		}
4448 	return 0;
4449 }
4450 
4451 /**
4452  *	t3_prep_adapter - prepare SW and HW for operation
4453  *	@adapter: the adapter
4454  *	@ai: contains information about the adapter type and properties
4455  *
4456  *	Initialize adapter SW state for the various HW modules, set initial
4457  *	values for some adapter tunables, take PHYs out of reset, and
4458  *	initialize the MDIO interface.
4459  */
4460 int __devinit t3_prep_adapter(adapter_t *adapter,
4461 			      const struct adapter_info *ai, int reset)
4462 {
4463 	int ret;
4464 	unsigned int i, j = 0;
4465 
4466 	get_pci_mode(adapter, &adapter->params.pci);
4467 
4468 	adapter->params.info = ai;
4469 	adapter->params.nports = ai->nports0 + ai->nports1;
4470 	adapter->params.chan_map = (!!ai->nports0) | (!!ai->nports1 << 1);
4471 	adapter->params.rev = t3_read_reg(adapter, A_PL_REV);
4472 
4473 	/*
4474 	 * We used to only run the "adapter check task" once a second if
4475 	 * we had PHYs which didn't support interrupts (we would check
4476 	 * their link status once a second).  Now we check other conditions
4477 	 * in that routine which would [potentially] impose a very high
4478 	 * interrupt load on the system.  As such, we now always scan the
4479 	 * adapter state once a second ...
4480 	 */
4481 	adapter->params.linkpoll_period = 10;
4482 
4483 	if (adapter->params.nports > 2)
4484 		adapter->params.stats_update_period = VSC_STATS_ACCUM_SECS;
4485 	else
4486 		adapter->params.stats_update_period = is_10G(adapter) ?
4487 			MAC_STATS_ACCUM_SECS : (MAC_STATS_ACCUM_SECS * 10);
4488 	adapter->params.pci.vpd_cap_addr =
4489 		t3_os_find_pci_capability(adapter, PCI_CAP_ID_VPD);
4490 
4491 	ret = get_vpd_params(adapter, &adapter->params.vpd);
4492 	if (ret < 0)
4493 		return ret;
4494 
4495 	if (reset && t3_reset_adapter(adapter))
4496 		return -1;
4497 
4498 	if (adapter->params.vpd.mclk) {
4499 		struct tp_params *p = &adapter->params.tp;
4500 
4501 		mc7_prep(adapter, &adapter->pmrx, MC7_PMRX_BASE_ADDR, "PMRX");
4502 		mc7_prep(adapter, &adapter->pmtx, MC7_PMTX_BASE_ADDR, "PMTX");
4503 		mc7_prep(adapter, &adapter->cm, MC7_CM_BASE_ADDR, "CM");
4504 
4505 		p->nchan = adapter->params.chan_map == 3 ? 2 : 1;
4506 		p->pmrx_size = t3_mc7_size(&adapter->pmrx);
4507 		p->pmtx_size = t3_mc7_size(&adapter->pmtx);
4508 		p->cm_size = t3_mc7_size(&adapter->cm);
4509 		p->chan_rx_size = p->pmrx_size / 2;     /* only 1 Rx channel */
4510 		p->chan_tx_size = p->pmtx_size / p->nchan;
4511 		p->rx_pg_size = 64 * 1024;
4512 		p->tx_pg_size = is_10G(adapter) ? 64 * 1024 : 16 * 1024;
4513 		p->rx_num_pgs = pm_num_pages(p->chan_rx_size, p->rx_pg_size);
4514 		p->tx_num_pgs = pm_num_pages(p->chan_tx_size, p->tx_pg_size);
4515 		p->ntimer_qs = p->cm_size >= (128 << 20) ||
4516 			       adapter->params.rev > 0 ? 12 : 6;
4517 		p->tre = fls(adapter->params.vpd.cclk / (1000 / TP_TMR_RES)) -
4518 			 1;
4519 		p->dack_re = fls(adapter->params.vpd.cclk / 10) - 1; /* 100us */
4520 	}
4521 
4522 	adapter->params.offload = t3_mc7_size(&adapter->pmrx) &&
4523 				  t3_mc7_size(&adapter->pmtx) &&
4524 				  t3_mc7_size(&adapter->cm);
4525 
4526 	t3_sge_prep(adapter, &adapter->params.sge);
4527 
4528 	if (is_offload(adapter)) {
4529 		adapter->params.mc5.nservers = DEFAULT_NSERVERS;
4530 		/* PR 6487. TOE and filtering are mutually exclusive */
4531 		adapter->params.mc5.nfilters = 0;
4532 		adapter->params.mc5.nroutes = 0;
4533 		t3_mc5_prep(adapter, &adapter->mc5, MC5_MODE_144_BIT);
4534 
4535 		init_mtus(adapter->params.mtus);
4536 		init_cong_ctrl(adapter->params.a_wnd, adapter->params.b_wnd);
4537 	}
4538 
4539 	early_hw_init(adapter, ai);
4540 	ret = init_parity(adapter);
4541 	if (ret)
4542 		return ret;
4543 
4544 	if (adapter->params.nports > 2 &&
4545 	    (ret = t3_vsc7323_init(adapter, adapter->params.nports)))
4546 		return ret;
4547 
4548 	for_each_port(adapter, i) {
4549 		u8 hw_addr[6];
4550 		const struct port_type_info *pti;
4551 		struct port_info *p = adap2pinfo(adapter, i);
4552 
4553 		for (;;) {
4554 			unsigned port_type = adapter->params.vpd.port_type[j];
4555 			if (port_type) {
4556 				if (port_type < ARRAY_SIZE(port_types)) {
4557 					pti = &port_types[port_type];
4558 					break;
4559 				} else
4560 					return -EINVAL;
4561 			}
4562 			j++;
4563 			if (j >= ARRAY_SIZE(adapter->params.vpd.port_type))
4564 				return -EINVAL;
4565 		}
4566 		ret = pti->phy_prep(p, ai->phy_base_addr + j,
4567 				    ai->mdio_ops);
4568 		if (ret)
4569 			return ret;
4570 		mac_prep(&p->mac, adapter, j);
4571 		++j;
4572 
4573 		/*
4574 		 * The VPD EEPROM stores the base Ethernet address for the
4575 		 * card.  A port's address is derived from the base by adding
4576 		 * the port's index to the base's low octet.
4577 		 */
4578 		memcpy(hw_addr, adapter->params.vpd.eth_base, 5);
4579 		hw_addr[5] = adapter->params.vpd.eth_base[5] + i;
4580 
4581 		t3_os_set_hw_addr(adapter, i, hw_addr);
4582 		init_link_config(&p->link_config, p->phy.caps);
4583 		p->phy.ops->power_down(&p->phy, 1);
4584 
4585 		/*
4586 		 * If the PHY doesn't support interrupts for link status
4587 		 * changes, schedule a scan of the adapter links at least
4588 		 * once a second.
4589 		 */
4590 		if (!(p->phy.caps & SUPPORTED_IRQ) &&
4591 		    adapter->params.linkpoll_period > 10)
4592 			adapter->params.linkpoll_period = 10;
4593 	}
4594 
4595 	return 0;
4596 }
4597 
4598 /**
4599  *	t3_reinit_adapter - prepare HW for operation again
4600  *	@adapter: the adapter
4601  *
4602  *	Put HW in the same state as @t3_prep_adapter without any changes to
4603  *	SW state.  This is a cut down version of @t3_prep_adapter intended
4604  *	to be used after events that wipe out HW state but preserve SW state,
4605  *	e.g., EEH.  The device must be reset before calling this.
4606  */
4607 int t3_reinit_adapter(adapter_t *adap)
4608 {
4609 	unsigned int i;
4610 	int ret, j = 0;
4611 
4612 	early_hw_init(adap, adap->params.info);
4613 	ret = init_parity(adap);
4614 	if (ret)
4615 		return ret;
4616 
4617 	if (adap->params.nports > 2 &&
4618 	    (ret = t3_vsc7323_init(adap, adap->params.nports)))
4619 		return ret;
4620 
4621 	for_each_port(adap, i) {
4622 		const struct port_type_info *pti;
4623 		struct port_info *p = adap2pinfo(adap, i);
4624 
4625 		for (;;) {
4626 			unsigned port_type = adap->params.vpd.port_type[j];
4627 			if (port_type) {
4628 				if (port_type < ARRAY_SIZE(port_types)) {
4629 					pti = &port_types[port_type];
4630 					break;
4631 				} else
4632 					return -EINVAL;
4633 			}
4634 			j++;
4635 			if (j >= ARRAY_SIZE(adap->params.vpd.port_type))
4636 				return -EINVAL;
4637 		}
4638 		ret = pti->phy_prep(p, p->phy.addr, NULL);
4639 		if (ret)
4640 			return ret;
4641 		p->phy.ops->power_down(&p->phy, 1);
4642 	}
4643 	return 0;
4644 }
4645 
4646 void t3_led_ready(adapter_t *adapter)
4647 {
4648 	t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL,
4649 			 F_GPIO0_OUT_VAL);
4650 }
4651 
4652 void t3_port_failover(adapter_t *adapter, int port)
4653 {
4654 	u32 val;
4655 
4656 	val = port ? F_PORT1ACTIVE : F_PORT0ACTIVE;
4657 	t3_set_reg_field(adapter, A_MPS_CFG, F_PORT0ACTIVE | F_PORT1ACTIVE,
4658 			 val);
4659 }
4660 
4661 void t3_failover_done(adapter_t *adapter, int port)
4662 {
4663 	t3_set_reg_field(adapter, A_MPS_CFG, F_PORT0ACTIVE | F_PORT1ACTIVE,
4664 			 F_PORT0ACTIVE | F_PORT1ACTIVE);
4665 }
4666 
4667 void t3_failover_clear(adapter_t *adapter)
4668 {
4669 	t3_set_reg_field(adapter, A_MPS_CFG, F_PORT0ACTIVE | F_PORT1ACTIVE,
4670 			 F_PORT0ACTIVE | F_PORT1ACTIVE);
4671 }
4672 
4673 static int t3_cim_hac_read(adapter_t *adapter, u32 addr, u32 *val)
4674 {
4675 	u32 v;
4676 
4677 	t3_write_reg(adapter, A_CIM_HOST_ACC_CTRL, addr);
4678 	if (t3_wait_op_done_val(adapter, A_CIM_HOST_ACC_CTRL,
4679 				F_HOSTBUSY, 0, 10, 10, &v))
4680 		return -EIO;
4681 
4682 	*val = t3_read_reg(adapter, A_CIM_HOST_ACC_DATA);
4683 
4684 	return 0;
4685 }
4686 
4687 static int t3_cim_hac_write(adapter_t *adapter, u32 addr, u32 val)
4688 {
4689 	u32 v;
4690 
4691 	t3_write_reg(adapter, A_CIM_HOST_ACC_DATA, val);
4692 
4693 	addr |= F_HOSTWRITE;
4694 	t3_write_reg(adapter, A_CIM_HOST_ACC_CTRL, addr);
4695 
4696 	if (t3_wait_op_done_val(adapter, A_CIM_HOST_ACC_CTRL,
4697 				F_HOSTBUSY, 0, 10, 5, &v))
4698 		return -EIO;
4699 	return 0;
4700 }
4701 
4702 int t3_get_up_la(adapter_t *adapter, u32 *stopped, u32 *index,
4703 		 u32 *size, void *data)
4704 {
4705 	u32 v, *buf = data;
4706 	int i, cnt,  ret;
4707 
4708 	if (*size < LA_ENTRIES * 4)
4709 		return -EINVAL;
4710 
4711 	ret = t3_cim_hac_read(adapter, LA_CTRL, &v);
4712 	if (ret)
4713 		goto out;
4714 
4715 	*stopped = !(v & 1);
4716 
4717 	/* Freeze LA */
4718 	if (!*stopped) {
4719 		ret = t3_cim_hac_write(adapter, LA_CTRL, 0);
4720 		if (ret)
4721 			goto out;
4722 	}
4723 
4724 	for (i = 0; i < LA_ENTRIES; i++) {
4725 		v = (i << 2) | (1 << 1);
4726 		ret = t3_cim_hac_write(adapter, LA_CTRL, v);
4727 		if (ret)
4728 			goto out;
4729 
4730 		ret = t3_cim_hac_read(adapter, LA_CTRL, &v);
4731 		if (ret)
4732 			goto out;
4733 
4734 		cnt = 20;
4735 		while ((v & (1 << 1)) && cnt) {
4736 			udelay(5);
4737 			--cnt;
4738 			ret = t3_cim_hac_read(adapter, LA_CTRL, &v);
4739 			if (ret)
4740 				goto out;
4741 		}
4742 
4743 		if (v & (1 << 1))
4744 			return -EIO;
4745 
4746 		ret = t3_cim_hac_read(adapter, LA_DATA, &v);
4747 		if (ret)
4748 			goto out;
4749 
4750 		*buf++ = v;
4751 	}
4752 
4753 	ret = t3_cim_hac_read(adapter, LA_CTRL, &v);
4754 	if (ret)
4755 		goto out;
4756 
4757 	*index = (v >> 16) + 4;
4758 	*size = LA_ENTRIES * 4;
4759 out:
4760 	/* Unfreeze LA */
4761 	t3_cim_hac_write(adapter, LA_CTRL, 1);
4762 	return ret;
4763 }
4764 
4765 int t3_get_up_ioqs(adapter_t *adapter, u32 *size, void *data)
4766 {
4767 	u32 v, *buf = data;
4768 	int i, j, ret;
4769 
4770 	if (*size < IOQ_ENTRIES * sizeof(struct t3_ioq_entry))
4771 		return -EINVAL;
4772 
4773 	for (i = 0; i < 4; i++) {
4774 		ret = t3_cim_hac_read(adapter, (4 * i), &v);
4775 		if (ret)
4776 			goto out;
4777 
4778 		*buf++ = v;
4779 	}
4780 
4781 	for (i = 0; i < IOQ_ENTRIES; i++) {
4782 		u32 base_addr = 0x10 * (i + 1);
4783 
4784 		for (j = 0; j < 4; j++) {
4785 			ret = t3_cim_hac_read(adapter, base_addr + 4 * j, &v);
4786 			if (ret)
4787 				goto out;
4788 
4789 			*buf++ = v;
4790 		}
4791 	}
4792 
4793 	*size = IOQ_ENTRIES * sizeof(struct t3_ioq_entry);
4794 
4795 out:
4796 	return ret;
4797 }
4798 
4799