xref: /freebsd/sys/dev/cxgb/common/cxgb_t3_hw.c (revision 2c32b50248b3c92a1bbd538dc97b249c4b932c3a)
1 /**************************************************************************
2 
3 Copyright (c) 2007-2009, Chelsio Inc.
4 All rights reserved.
5 
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
8 
9  1. Redistributions of source code must retain the above copyright notice,
10     this list of conditions and the following disclaimer.
11 
12  2. Neither the name of the Chelsio Corporation nor the names of its
13     contributors may be used to endorse or promote products derived from
14     this software without specific prior written permission.
15 
16 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
17 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
20 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
23 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
24 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
25 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
26 POSSIBILITY OF SUCH DAMAGE.
27 
28 ***************************************************************************/
29 
30 #include <sys/cdefs.h>
31 __FBSDID("$FreeBSD$");
32 
33 
34 #include <cxgb_include.h>
35 
36 #undef msleep
37 #define msleep t3_os_sleep
38 
39 /**
40  *	t3_wait_op_done_val - wait until an operation is completed
41  *	@adapter: the adapter performing the operation
42  *	@reg: the register to check for completion
43  *	@mask: a single-bit field within @reg that indicates completion
44  *	@polarity: the value of the field when the operation is completed
45  *	@attempts: number of check iterations
46  *	@delay: delay in usecs between iterations
47  *	@valp: where to store the value of the register at completion time
48  *
49  *	Wait until an operation is completed by checking a bit in a register
50  *	up to @attempts times.  If @valp is not NULL the value of the register
51  *	at the time it indicated completion is stored there.  Returns 0 if the
52  *	operation completes and	-EAGAIN	otherwise.
53  */
54 int t3_wait_op_done_val(adapter_t *adapter, int reg, u32 mask, int polarity,
55 			int attempts, int delay, u32 *valp)
56 {
57 	while (1) {
58 		u32 val = t3_read_reg(adapter, reg);
59 
60 		if (!!(val & mask) == polarity) {
61 			if (valp)
62 				*valp = val;
63 			return 0;
64 		}
65 		if (--attempts == 0)
66 			return -EAGAIN;
67 		if (delay)
68 			udelay(delay);
69 	}
70 }
71 
72 /**
73  *	t3_write_regs - write a bunch of registers
74  *	@adapter: the adapter to program
75  *	@p: an array of register address/register value pairs
76  *	@n: the number of address/value pairs
77  *	@offset: register address offset
78  *
79  *	Takes an array of register address/register value pairs and writes each
80  *	value to the corresponding register.  Register addresses are adjusted
81  *	by the supplied offset.
82  */
83 void t3_write_regs(adapter_t *adapter, const struct addr_val_pair *p, int n,
84 		   unsigned int offset)
85 {
86 	while (n--) {
87 		t3_write_reg(adapter, p->reg_addr + offset, p->val);
88 		p++;
89 	}
90 }
91 
92 /**
93  *	t3_set_reg_field - set a register field to a value
94  *	@adapter: the adapter to program
95  *	@addr: the register address
96  *	@mask: specifies the portion of the register to modify
97  *	@val: the new value for the register field
98  *
99  *	Sets a register field specified by the supplied mask to the
100  *	given value.
101  */
102 void t3_set_reg_field(adapter_t *adapter, unsigned int addr, u32 mask, u32 val)
103 {
104 	u32 v = t3_read_reg(adapter, addr) & ~mask;
105 
106 	t3_write_reg(adapter, addr, v | val);
107 	(void) t3_read_reg(adapter, addr);      /* flush */
108 }
109 
110 /**
111  *	t3_read_indirect - read indirectly addressed registers
112  *	@adap: the adapter
113  *	@addr_reg: register holding the indirect address
114  *	@data_reg: register holding the value of the indirect register
115  *	@vals: where the read register values are stored
116  *	@start_idx: index of first indirect register to read
117  *	@nregs: how many indirect registers to read
118  *
119  *	Reads registers that are accessed indirectly through an address/data
120  *	register pair.
121  */
122 static void t3_read_indirect(adapter_t *adap, unsigned int addr_reg,
123 		      unsigned int data_reg, u32 *vals, unsigned int nregs,
124 		      unsigned int start_idx)
125 {
126 	while (nregs--) {
127 		t3_write_reg(adap, addr_reg, start_idx);
128 		*vals++ = t3_read_reg(adap, data_reg);
129 		start_idx++;
130 	}
131 }
132 
133 /**
134  *	t3_mc7_bd_read - read from MC7 through backdoor accesses
135  *	@mc7: identifies MC7 to read from
136  *	@start: index of first 64-bit word to read
137  *	@n: number of 64-bit words to read
138  *	@buf: where to store the read result
139  *
140  *	Read n 64-bit words from MC7 starting at word start, using backdoor
141  *	accesses.
142  */
143 int t3_mc7_bd_read(struct mc7 *mc7, unsigned int start, unsigned int n,
144                    u64 *buf)
145 {
146 	static int shift[] = { 0, 0, 16, 24 };
147 	static int step[]  = { 0, 32, 16, 8 };
148 
149 	unsigned int size64 = mc7->size / 8;  /* # of 64-bit words */
150 	adapter_t *adap = mc7->adapter;
151 
152 	if (start >= size64 || start + n > size64)
153 		return -EINVAL;
154 
155 	start *= (8 << mc7->width);
156 	while (n--) {
157 		int i;
158 		u64 val64 = 0;
159 
160 		for (i = (1 << mc7->width) - 1; i >= 0; --i) {
161 			int attempts = 10;
162 			u32 val;
163 
164 			t3_write_reg(adap, mc7->offset + A_MC7_BD_ADDR,
165 				       start);
166 			t3_write_reg(adap, mc7->offset + A_MC7_BD_OP, 0);
167 			val = t3_read_reg(adap, mc7->offset + A_MC7_BD_OP);
168 			while ((val & F_BUSY) && attempts--)
169 				val = t3_read_reg(adap,
170 						  mc7->offset + A_MC7_BD_OP);
171 			if (val & F_BUSY)
172 				return -EIO;
173 
174 			val = t3_read_reg(adap, mc7->offset + A_MC7_BD_DATA1);
175 			if (mc7->width == 0) {
176 				val64 = t3_read_reg(adap,
177 						mc7->offset + A_MC7_BD_DATA0);
178 				val64 |= (u64)val << 32;
179 			} else {
180 				if (mc7->width > 1)
181 					val >>= shift[mc7->width];
182 				val64 |= (u64)val << (step[mc7->width] * i);
183 			}
184 			start += 8;
185 		}
186 		*buf++ = val64;
187 	}
188 	return 0;
189 }
190 
191 /*
192  * Low-level I2C read and write routines.  These simply read and write a
193  * single byte with the option of indicating a "continue" if another operation
194  * is to be chained.  Generally most code will use higher-level routines to
195  * read and write to I2C Slave Devices.
196  */
197 #define I2C_ATTEMPTS 100
198 
199 /*
200  * Read an 8-bit value from the I2C bus.  If the "chained" parameter is
201  * non-zero then a STOP bit will not be written after the read command.  On
202  * error (the read timed out, etc.), a negative errno will be returned (e.g.
203  * -EAGAIN, etc.).  On success, the 8-bit value read from the I2C bus is
204  * stored into the buffer *valp and the value of the I2C ACK bit is returned
205  * as a 0/1 value.
206  */
207 int t3_i2c_read8(adapter_t *adapter, int chained, u8 *valp)
208 {
209 	int ret;
210 	u32 opval;
211 	MDIO_LOCK(adapter);
212 	t3_write_reg(adapter, A_I2C_OP,
213 		     F_I2C_READ | (chained ? F_I2C_CONT : 0));
214 	ret = t3_wait_op_done_val(adapter, A_I2C_OP, F_I2C_BUSY, 0,
215 				  I2C_ATTEMPTS, 10, &opval);
216 	if (ret >= 0) {
217 		ret = ((opval & F_I2C_ACK) == F_I2C_ACK);
218 		*valp = G_I2C_DATA(t3_read_reg(adapter, A_I2C_DATA));
219 	}
220 	MDIO_UNLOCK(adapter);
221 	return ret;
222 }
223 
224 /*
225  * Write an 8-bit value to the I2C bus.  If the "chained" parameter is
226  * non-zero, then a STOP bit will not be written after the write command.  On
227  * error (the write timed out, etc.), a negative errno will be returned (e.g.
228  * -EAGAIN, etc.).  On success, the value of the I2C ACK bit is returned as a
229  * 0/1 value.
230  */
231 int t3_i2c_write8(adapter_t *adapter, int chained, u8 val)
232 {
233 	int ret;
234 	u32 opval;
235 	MDIO_LOCK(adapter);
236 	t3_write_reg(adapter, A_I2C_DATA, V_I2C_DATA(val));
237 	t3_write_reg(adapter, A_I2C_OP,
238 		     F_I2C_WRITE | (chained ? F_I2C_CONT : 0));
239 	ret = t3_wait_op_done_val(adapter, A_I2C_OP, F_I2C_BUSY, 0,
240 				  I2C_ATTEMPTS, 10, &opval);
241 	if (ret >= 0)
242 		ret = ((opval & F_I2C_ACK) == F_I2C_ACK);
243 	MDIO_UNLOCK(adapter);
244 	return ret;
245 }
246 
247 /*
248  * Initialize MI1.
249  */
250 static void mi1_init(adapter_t *adap, const struct adapter_info *ai)
251 {
252         u32 clkdiv = adap->params.vpd.cclk / (2 * adap->params.vpd.mdc) - 1;
253         u32 val = F_PREEN | V_CLKDIV(clkdiv);
254 
255         t3_write_reg(adap, A_MI1_CFG, val);
256 }
257 
258 #define MDIO_ATTEMPTS 20
259 
260 /*
261  * MI1 read/write operations for clause 22 PHYs.
262  */
263 int t3_mi1_read(adapter_t *adapter, int phy_addr, int mmd_addr,
264 		int reg_addr, unsigned int *valp)
265 {
266 	int ret;
267 	u32 addr = V_REGADDR(reg_addr) | V_PHYADDR(phy_addr);
268 
269 	if (mmd_addr)
270 		return -EINVAL;
271 
272 	MDIO_LOCK(adapter);
273 	t3_set_reg_field(adapter, A_MI1_CFG, V_ST(M_ST), V_ST(1));
274 	t3_write_reg(adapter, A_MI1_ADDR, addr);
275 	t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(2));
276 	ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0, MDIO_ATTEMPTS, 10);
277 	if (!ret)
278 		*valp = t3_read_reg(adapter, A_MI1_DATA);
279 	MDIO_UNLOCK(adapter);
280 	return ret;
281 }
282 
283 int t3_mi1_write(adapter_t *adapter, int phy_addr, int mmd_addr,
284 		 int reg_addr, unsigned int val)
285 {
286 	int ret;
287 	u32 addr = V_REGADDR(reg_addr) | V_PHYADDR(phy_addr);
288 
289 	if (mmd_addr)
290 		return -EINVAL;
291 
292 	MDIO_LOCK(adapter);
293 	t3_set_reg_field(adapter, A_MI1_CFG, V_ST(M_ST), V_ST(1));
294 	t3_write_reg(adapter, A_MI1_ADDR, addr);
295 	t3_write_reg(adapter, A_MI1_DATA, val);
296 	t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(1));
297 	ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0, MDIO_ATTEMPTS, 10);
298 	MDIO_UNLOCK(adapter);
299 	return ret;
300 }
301 
302 static struct mdio_ops mi1_mdio_ops = {
303 	t3_mi1_read,
304 	t3_mi1_write
305 };
306 
307 /*
308  * MI1 read/write operations for clause 45 PHYs.
309  */
310 static int mi1_ext_read(adapter_t *adapter, int phy_addr, int mmd_addr,
311 			int reg_addr, unsigned int *valp)
312 {
313 	int ret;
314 	u32 addr = V_REGADDR(mmd_addr) | V_PHYADDR(phy_addr);
315 
316 	MDIO_LOCK(adapter);
317 	t3_set_reg_field(adapter, A_MI1_CFG, V_ST(M_ST), 0);
318 	t3_write_reg(adapter, A_MI1_ADDR, addr);
319 	t3_write_reg(adapter, A_MI1_DATA, reg_addr);
320 	t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(0));
321 	ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0, MDIO_ATTEMPTS, 10);
322 	if (!ret) {
323 		t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(3));
324 		ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0,
325 				      MDIO_ATTEMPTS, 10);
326 		if (!ret)
327 			*valp = t3_read_reg(adapter, A_MI1_DATA);
328 	}
329 	MDIO_UNLOCK(adapter);
330 	return ret;
331 }
332 
333 static int mi1_ext_write(adapter_t *adapter, int phy_addr, int mmd_addr,
334 			 int reg_addr, unsigned int val)
335 {
336 	int ret;
337 	u32 addr = V_REGADDR(mmd_addr) | V_PHYADDR(phy_addr);
338 
339 	MDIO_LOCK(adapter);
340 	t3_set_reg_field(adapter, A_MI1_CFG, V_ST(M_ST), 0);
341 	t3_write_reg(adapter, A_MI1_ADDR, addr);
342 	t3_write_reg(adapter, A_MI1_DATA, reg_addr);
343 	t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(0));
344 	ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0, MDIO_ATTEMPTS, 10);
345 	if (!ret) {
346 		t3_write_reg(adapter, A_MI1_DATA, val);
347 		t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(1));
348 		ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0,
349 				      MDIO_ATTEMPTS, 10);
350 	}
351 	MDIO_UNLOCK(adapter);
352 	return ret;
353 }
354 
355 static struct mdio_ops mi1_mdio_ext_ops = {
356 	mi1_ext_read,
357 	mi1_ext_write
358 };
359 
360 /**
361  *	t3_mdio_change_bits - modify the value of a PHY register
362  *	@phy: the PHY to operate on
363  *	@mmd: the device address
364  *	@reg: the register address
365  *	@clear: what part of the register value to mask off
366  *	@set: what part of the register value to set
367  *
368  *	Changes the value of a PHY register by applying a mask to its current
369  *	value and ORing the result with a new value.
370  */
371 int t3_mdio_change_bits(struct cphy *phy, int mmd, int reg, unsigned int clear,
372 			unsigned int set)
373 {
374 	int ret;
375 	unsigned int val;
376 
377 	ret = mdio_read(phy, mmd, reg, &val);
378 	if (!ret) {
379 		val &= ~clear;
380 		ret = mdio_write(phy, mmd, reg, val | set);
381 	}
382 	return ret;
383 }
384 
385 /**
386  *	t3_phy_reset - reset a PHY block
387  *	@phy: the PHY to operate on
388  *	@mmd: the device address of the PHY block to reset
389  *	@wait: how long to wait for the reset to complete in 1ms increments
390  *
391  *	Resets a PHY block and optionally waits for the reset to complete.
392  *	@mmd should be 0 for 10/100/1000 PHYs and the device address to reset
393  *	for 10G PHYs.
394  */
395 int t3_phy_reset(struct cphy *phy, int mmd, int wait)
396 {
397 	int err;
398 	unsigned int ctl;
399 
400 	err = t3_mdio_change_bits(phy, mmd, MII_BMCR, BMCR_PDOWN, BMCR_RESET);
401 	if (err || !wait)
402 		return err;
403 
404 	do {
405 		err = mdio_read(phy, mmd, MII_BMCR, &ctl);
406 		if (err)
407 			return err;
408 		ctl &= BMCR_RESET;
409 		if (ctl)
410 			msleep(1);
411 	} while (ctl && --wait);
412 
413 	return ctl ? -1 : 0;
414 }
415 
416 /**
417  *	t3_phy_advertise - set the PHY advertisement registers for autoneg
418  *	@phy: the PHY to operate on
419  *	@advert: bitmap of capabilities the PHY should advertise
420  *
421  *	Sets a 10/100/1000 PHY's advertisement registers to advertise the
422  *	requested capabilities.
423  */
424 int t3_phy_advertise(struct cphy *phy, unsigned int advert)
425 {
426 	int err;
427 	unsigned int val = 0;
428 
429 	err = mdio_read(phy, 0, MII_CTRL1000, &val);
430 	if (err)
431 		return err;
432 
433 	val &= ~(ADVERTISE_1000HALF | ADVERTISE_1000FULL);
434 	if (advert & ADVERTISED_1000baseT_Half)
435 		val |= ADVERTISE_1000HALF;
436 	if (advert & ADVERTISED_1000baseT_Full)
437 		val |= ADVERTISE_1000FULL;
438 
439 	err = mdio_write(phy, 0, MII_CTRL1000, val);
440 	if (err)
441 		return err;
442 
443 	val = 1;
444 	if (advert & ADVERTISED_10baseT_Half)
445 		val |= ADVERTISE_10HALF;
446 	if (advert & ADVERTISED_10baseT_Full)
447 		val |= ADVERTISE_10FULL;
448 	if (advert & ADVERTISED_100baseT_Half)
449 		val |= ADVERTISE_100HALF;
450 	if (advert & ADVERTISED_100baseT_Full)
451 		val |= ADVERTISE_100FULL;
452 	if (advert & ADVERTISED_Pause)
453 		val |= ADVERTISE_PAUSE_CAP;
454 	if (advert & ADVERTISED_Asym_Pause)
455 		val |= ADVERTISE_PAUSE_ASYM;
456 	return mdio_write(phy, 0, MII_ADVERTISE, val);
457 }
458 
459 /**
460  *	t3_phy_advertise_fiber - set fiber PHY advertisement register
461  *	@phy: the PHY to operate on
462  *	@advert: bitmap of capabilities the PHY should advertise
463  *
464  *	Sets a fiber PHY's advertisement register to advertise the
465  *	requested capabilities.
466  */
467 int t3_phy_advertise_fiber(struct cphy *phy, unsigned int advert)
468 {
469 	unsigned int val = 0;
470 
471 	if (advert & ADVERTISED_1000baseT_Half)
472 		val |= ADVERTISE_1000XHALF;
473 	if (advert & ADVERTISED_1000baseT_Full)
474 		val |= ADVERTISE_1000XFULL;
475 	if (advert & ADVERTISED_Pause)
476 		val |= ADVERTISE_1000XPAUSE;
477 	if (advert & ADVERTISED_Asym_Pause)
478 		val |= ADVERTISE_1000XPSE_ASYM;
479 	return mdio_write(phy, 0, MII_ADVERTISE, val);
480 }
481 
482 /**
483  *	t3_set_phy_speed_duplex - force PHY speed and duplex
484  *	@phy: the PHY to operate on
485  *	@speed: requested PHY speed
486  *	@duplex: requested PHY duplex
487  *
488  *	Force a 10/100/1000 PHY's speed and duplex.  This also disables
489  *	auto-negotiation except for GigE, where auto-negotiation is mandatory.
490  */
491 int t3_set_phy_speed_duplex(struct cphy *phy, int speed, int duplex)
492 {
493 	int err;
494 	unsigned int ctl;
495 
496 	err = mdio_read(phy, 0, MII_BMCR, &ctl);
497 	if (err)
498 		return err;
499 
500 	if (speed >= 0) {
501 		ctl &= ~(BMCR_SPEED100 | BMCR_SPEED1000 | BMCR_ANENABLE);
502 		if (speed == SPEED_100)
503 			ctl |= BMCR_SPEED100;
504 		else if (speed == SPEED_1000)
505 			ctl |= BMCR_SPEED1000;
506 	}
507 	if (duplex >= 0) {
508 		ctl &= ~(BMCR_FULLDPLX | BMCR_ANENABLE);
509 		if (duplex == DUPLEX_FULL)
510 			ctl |= BMCR_FULLDPLX;
511 	}
512 	if (ctl & BMCR_SPEED1000)  /* auto-negotiation required for GigE */
513 		ctl |= BMCR_ANENABLE;
514 	return mdio_write(phy, 0, MII_BMCR, ctl);
515 }
516 
517 int t3_phy_lasi_intr_enable(struct cphy *phy)
518 {
519 	return mdio_write(phy, MDIO_DEV_PMA_PMD, LASI_CTRL, 1);
520 }
521 
522 int t3_phy_lasi_intr_disable(struct cphy *phy)
523 {
524 	return mdio_write(phy, MDIO_DEV_PMA_PMD, LASI_CTRL, 0);
525 }
526 
527 int t3_phy_lasi_intr_clear(struct cphy *phy)
528 {
529 	u32 val;
530 
531 	return mdio_read(phy, MDIO_DEV_PMA_PMD, LASI_STAT, &val);
532 }
533 
534 int t3_phy_lasi_intr_handler(struct cphy *phy)
535 {
536 	unsigned int status;
537 	int err = mdio_read(phy, MDIO_DEV_PMA_PMD, LASI_STAT, &status);
538 
539 	if (err)
540 		return err;
541 	return (status & 1) ?  cphy_cause_link_change : 0;
542 }
543 
544 static struct adapter_info t3_adap_info[] = {
545 	{ 1, 1, 0,
546 	  F_GPIO2_OEN | F_GPIO4_OEN |
547 	  F_GPIO2_OUT_VAL | F_GPIO4_OUT_VAL, { S_GPIO3, S_GPIO5 }, 0,
548 	  &mi1_mdio_ops, "Chelsio PE9000" },
549 	{ 1, 1, 0,
550 	  F_GPIO2_OEN | F_GPIO4_OEN |
551 	  F_GPIO2_OUT_VAL | F_GPIO4_OUT_VAL, { S_GPIO3, S_GPIO5 }, 0,
552 	  &mi1_mdio_ops, "Chelsio T302" },
553 	{ 1, 0, 0,
554 	  F_GPIO1_OEN | F_GPIO6_OEN | F_GPIO7_OEN | F_GPIO10_OEN |
555 	  F_GPIO11_OEN | F_GPIO1_OUT_VAL | F_GPIO6_OUT_VAL | F_GPIO10_OUT_VAL,
556 	  { 0 }, SUPPORTED_10000baseT_Full | SUPPORTED_AUI,
557 	  &mi1_mdio_ext_ops, "Chelsio T310" },
558 	{ 1, 1, 0,
559 	  F_GPIO1_OEN | F_GPIO2_OEN | F_GPIO4_OEN | F_GPIO5_OEN | F_GPIO6_OEN |
560 	  F_GPIO7_OEN | F_GPIO10_OEN | F_GPIO11_OEN | F_GPIO1_OUT_VAL |
561 	  F_GPIO5_OUT_VAL | F_GPIO6_OUT_VAL | F_GPIO10_OUT_VAL,
562 	  { S_GPIO9, S_GPIO3 }, SUPPORTED_10000baseT_Full | SUPPORTED_AUI,
563 	  &mi1_mdio_ext_ops, "Chelsio T320" },
564 	{ 4, 0, 0,
565 	  F_GPIO5_OEN | F_GPIO6_OEN | F_GPIO7_OEN | F_GPIO5_OUT_VAL |
566 	  F_GPIO6_OUT_VAL | F_GPIO7_OUT_VAL,
567 	  { S_GPIO1, S_GPIO2, S_GPIO3, S_GPIO4 }, SUPPORTED_AUI,
568 	  &mi1_mdio_ops, "Chelsio T304" },
569 	{ 0 },
570 	{ 1, 0, 0,
571 	  F_GPIO1_OEN | F_GPIO2_OEN | F_GPIO4_OEN | F_GPIO6_OEN | F_GPIO7_OEN |
572 	  F_GPIO10_OEN | F_GPIO1_OUT_VAL | F_GPIO6_OUT_VAL | F_GPIO10_OUT_VAL,
573 	  { S_GPIO9 }, SUPPORTED_10000baseT_Full | SUPPORTED_AUI,
574 	  &mi1_mdio_ext_ops, "Chelsio T310" },
575 	{ 1, 0, 0,
576 	  F_GPIO1_OEN | F_GPIO6_OEN | F_GPIO7_OEN |
577 	  F_GPIO1_OUT_VAL | F_GPIO6_OUT_VAL,
578 	  { S_GPIO9 }, SUPPORTED_10000baseT_Full | SUPPORTED_AUI,
579 	  &mi1_mdio_ext_ops, "Chelsio N320E-G2" },
580 };
581 
582 /*
583  * Return the adapter_info structure with a given index.  Out-of-range indices
584  * return NULL.
585  */
586 const struct adapter_info *t3_get_adapter_info(unsigned int id)
587 {
588 	return id < ARRAY_SIZE(t3_adap_info) ? &t3_adap_info[id] : NULL;
589 }
590 
591 struct port_type_info {
592 	int (*phy_prep)(pinfo_t *pinfo, int phy_addr,
593 			const struct mdio_ops *ops);
594 };
595 
596 static struct port_type_info port_types[] = {
597 	{ NULL },
598 	{ t3_ael1002_phy_prep },
599 	{ t3_vsc8211_phy_prep },
600 	{ t3_mv88e1xxx_phy_prep },
601 	{ t3_xaui_direct_phy_prep },
602 	{ t3_ael2005_phy_prep },
603 	{ t3_qt2045_phy_prep },
604 	{ t3_ael1006_phy_prep },
605 	{ t3_tn1010_phy_prep },
606 	{ t3_aq100x_phy_prep },
607 	{ t3_ael2020_phy_prep },
608 };
609 
610 #define VPD_ENTRY(name, len) \
611 	u8 name##_kword[2]; u8 name##_len; u8 name##_data[len]
612 
613 /*
614  * Partial EEPROM Vital Product Data structure.  Includes only the ID and
615  * VPD-R sections.
616  */
617 struct t3_vpd {
618 	u8  id_tag;
619 	u8  id_len[2];
620 	u8  id_data[16];
621 	u8  vpdr_tag;
622 	u8  vpdr_len[2];
623 	VPD_ENTRY(pn, 16);                     /* part number */
624 	VPD_ENTRY(ec, ECNUM_LEN);              /* EC level */
625 	VPD_ENTRY(sn, SERNUM_LEN);             /* serial number */
626 	VPD_ENTRY(na, 12);                     /* MAC address base */
627 	VPD_ENTRY(cclk, 6);                    /* core clock */
628 	VPD_ENTRY(mclk, 6);                    /* mem clock */
629 	VPD_ENTRY(uclk, 6);                    /* uP clk */
630 	VPD_ENTRY(mdc, 6);                     /* MDIO clk */
631 	VPD_ENTRY(mt, 2);                      /* mem timing */
632 	VPD_ENTRY(xaui0cfg, 6);                /* XAUI0 config */
633 	VPD_ENTRY(xaui1cfg, 6);                /* XAUI1 config */
634 	VPD_ENTRY(port0, 2);                   /* PHY0 complex */
635 	VPD_ENTRY(port1, 2);                   /* PHY1 complex */
636 	VPD_ENTRY(port2, 2);                   /* PHY2 complex */
637 	VPD_ENTRY(port3, 2);                   /* PHY3 complex */
638 	VPD_ENTRY(rv, 1);                      /* csum */
639 	u32 pad;                  /* for multiple-of-4 sizing and alignment */
640 };
641 
642 #define EEPROM_MAX_POLL   40
643 #define EEPROM_STAT_ADDR  0x4000
644 #define VPD_BASE          0xc00
645 
646 /**
647  *	t3_seeprom_read - read a VPD EEPROM location
648  *	@adapter: adapter to read
649  *	@addr: EEPROM address
650  *	@data: where to store the read data
651  *
652  *	Read a 32-bit word from a location in VPD EEPROM using the card's PCI
653  *	VPD ROM capability.  A zero is written to the flag bit when the
654  *	addres is written to the control register.  The hardware device will
655  *	set the flag to 1 when 4 bytes have been read into the data register.
656  */
657 int t3_seeprom_read(adapter_t *adapter, u32 addr, u32 *data)
658 {
659 	u16 val;
660 	int attempts = EEPROM_MAX_POLL;
661 	unsigned int base = adapter->params.pci.vpd_cap_addr;
662 
663 	if ((addr >= EEPROMSIZE && addr != EEPROM_STAT_ADDR) || (addr & 3))
664 		return -EINVAL;
665 
666 	t3_os_pci_write_config_2(adapter, base + PCI_VPD_ADDR, (u16)addr);
667 	do {
668 		udelay(10);
669 		t3_os_pci_read_config_2(adapter, base + PCI_VPD_ADDR, &val);
670 	} while (!(val & PCI_VPD_ADDR_F) && --attempts);
671 
672 	if (!(val & PCI_VPD_ADDR_F)) {
673 		CH_ERR(adapter, "reading EEPROM address 0x%x failed\n", addr);
674 		return -EIO;
675 	}
676 	t3_os_pci_read_config_4(adapter, base + PCI_VPD_DATA, data);
677 	*data = le32_to_cpu(*data);
678 	return 0;
679 }
680 
681 /**
682  *	t3_seeprom_write - write a VPD EEPROM location
683  *	@adapter: adapter to write
684  *	@addr: EEPROM address
685  *	@data: value to write
686  *
687  *	Write a 32-bit word to a location in VPD EEPROM using the card's PCI
688  *	VPD ROM capability.
689  */
690 int t3_seeprom_write(adapter_t *adapter, u32 addr, u32 data)
691 {
692 	u16 val;
693 	int attempts = EEPROM_MAX_POLL;
694 	unsigned int base = adapter->params.pci.vpd_cap_addr;
695 
696 	if ((addr >= EEPROMSIZE && addr != EEPROM_STAT_ADDR) || (addr & 3))
697 		return -EINVAL;
698 
699 	t3_os_pci_write_config_4(adapter, base + PCI_VPD_DATA,
700 				 cpu_to_le32(data));
701 	t3_os_pci_write_config_2(adapter, base + PCI_VPD_ADDR,
702 				 (u16)addr | PCI_VPD_ADDR_F);
703 	do {
704 		msleep(1);
705 		t3_os_pci_read_config_2(adapter, base + PCI_VPD_ADDR, &val);
706 	} while ((val & PCI_VPD_ADDR_F) && --attempts);
707 
708 	if (val & PCI_VPD_ADDR_F) {
709 		CH_ERR(adapter, "write to EEPROM address 0x%x failed\n", addr);
710 		return -EIO;
711 	}
712 	return 0;
713 }
714 
715 /**
716  *	t3_seeprom_wp - enable/disable EEPROM write protection
717  *	@adapter: the adapter
718  *	@enable: 1 to enable write protection, 0 to disable it
719  *
720  *	Enables or disables write protection on the serial EEPROM.
721  */
722 int t3_seeprom_wp(adapter_t *adapter, int enable)
723 {
724 	return t3_seeprom_write(adapter, EEPROM_STAT_ADDR, enable ? 0xc : 0);
725 }
726 
727 /*
728  * Convert a character holding a hex digit to a number.
729  */
730 static unsigned int hex2int(unsigned char c)
731 {
732 	return isdigit(c) ? c - '0' : toupper(c) - 'A' + 10;
733 }
734 
735 /**
736  * 	get_desc_len - get the length of a vpd descriptor.
737  *	@adapter: the adapter
738  *	@offset: first byte offset of the vpd descriptor
739  *
740  *	Retrieves the length of the small/large resource
741  *	data type starting at offset.
742  */
743 static int get_desc_len(adapter_t *adapter, u32 offset)
744 {
745 	u32 read_offset, tmp, shift, len = 0;
746 	u8 tag, buf[8];
747 	int ret;
748 
749 	read_offset = offset & 0xfffffffc;
750 	shift = offset & 0x03;
751 
752 	ret = t3_seeprom_read(adapter, read_offset, &tmp);
753 	if (ret < 0)
754 		return ret;
755 
756 	*((u32 *)buf) = cpu_to_le32(tmp);
757 
758 	tag = buf[shift];
759 	if (tag & 0x80) {
760 		ret = t3_seeprom_read(adapter, read_offset + 4, &tmp);
761 		if (ret < 0)
762 			return ret;
763 
764 		*((u32 *)(&buf[4])) = cpu_to_le32(tmp);
765 		len = (buf[shift + 1] & 0xff) +
766 		      ((buf[shift+2] << 8) & 0xff00) + 3;
767 	} else
768 		len = (tag & 0x07) + 1;
769 
770 	return len;
771 }
772 
773 /**
774  *	is_end_tag - Check if a vpd tag is the end tag.
775  *	@adapter: the adapter
776  *	@offset: first byte offset of the tag
777  *
778  *	Checks if the tag located at offset is the end tag.
779  */
780 static int is_end_tag(adapter_t * adapter, u32 offset)
781 {
782 	u32 read_offset, shift, ret, tmp;
783 	u8 buf[4];
784 
785 	read_offset = offset & 0xfffffffc;
786 	shift = offset & 0x03;
787 
788 	ret = t3_seeprom_read(adapter, read_offset, &tmp);
789 	if (ret)
790 		return ret;
791 	*((u32 *)buf) = cpu_to_le32(tmp);
792 
793 	if (buf[shift] == 0x78)
794 		return 1;
795 	else
796 		return 0;
797 }
798 
799 /**
800  *	t3_get_vpd_len - computes the length of a vpd structure
801  *	@adapter: the adapter
802  *	@vpd: contains the offset of first byte of vpd
803  *
804  *	Computes the lentgh of the vpd structure starting at vpd->offset.
805  */
806 
807 int t3_get_vpd_len(adapter_t * adapter, struct generic_vpd *vpd)
808 {
809 	u32 len=0, offset;
810 	int inc, ret;
811 
812 	offset = vpd->offset;
813 
814 	while (offset < (vpd->offset + MAX_VPD_BYTES)) {
815 		ret = is_end_tag(adapter, offset);
816 		if (ret < 0)
817 			return ret;
818 		else if (ret == 1)
819 			break;
820 
821 		inc = get_desc_len(adapter, offset);
822 		if (inc < 0)
823 			return inc;
824 		len += inc;
825 		offset += inc;
826 	}
827 	return (len + 1);
828 }
829 
830 /**
831  *	t3_read_vpd - reads the stream of bytes containing a vpd structure
832  *	@adapter: the adapter
833  *	@vpd: contains a buffer that would hold the stream of bytes
834  *
835  *	Reads the vpd structure starting at vpd->offset into vpd->data,
836  *	the length of the byte stream to read is vpd->len.
837  */
838 
839 int t3_read_vpd(adapter_t *adapter, struct generic_vpd *vpd)
840 {
841 	u32 i, ret;
842 
843 	for (i = 0; i < vpd->len; i += 4) {
844 		ret = t3_seeprom_read(adapter, vpd->offset + i,
845 				      (u32 *) &(vpd->data[i]));
846 		if (ret)
847 			return ret;
848 	}
849 
850 	return 0;
851 }
852 
853 
854 /**
855  *	get_vpd_params - read VPD parameters from VPD EEPROM
856  *	@adapter: adapter to read
857  *	@p: where to store the parameters
858  *
859  *	Reads card parameters stored in VPD EEPROM.
860  */
861 static int get_vpd_params(adapter_t *adapter, struct vpd_params *p)
862 {
863 	int i, addr, ret;
864 	struct t3_vpd vpd;
865 
866 	/*
867 	 * Card information is normally at VPD_BASE but some early cards had
868 	 * it at 0.
869 	 */
870 	ret = t3_seeprom_read(adapter, VPD_BASE, (u32 *)&vpd);
871 	if (ret)
872 		return ret;
873 	addr = vpd.id_tag == 0x82 ? VPD_BASE : 0;
874 
875 	for (i = 0; i < sizeof(vpd); i += 4) {
876 		ret = t3_seeprom_read(adapter, addr + i,
877 				      (u32 *)((u8 *)&vpd + i));
878 		if (ret)
879 			return ret;
880 	}
881 
882 	p->cclk = simple_strtoul(vpd.cclk_data, NULL, 10);
883 	p->mclk = simple_strtoul(vpd.mclk_data, NULL, 10);
884 	p->uclk = simple_strtoul(vpd.uclk_data, NULL, 10);
885 	p->mdc = simple_strtoul(vpd.mdc_data, NULL, 10);
886 	p->mem_timing = simple_strtoul(vpd.mt_data, NULL, 10);
887 	memcpy(p->sn, vpd.sn_data, SERNUM_LEN);
888 	memcpy(p->ec, vpd.ec_data, ECNUM_LEN);
889 
890 	/* Old eeproms didn't have port information */
891 	if (adapter->params.rev == 0 && !vpd.port0_data[0]) {
892 		p->port_type[0] = uses_xaui(adapter) ? 1 : 2;
893 		p->port_type[1] = uses_xaui(adapter) ? 6 : 2;
894 	} else {
895 		p->port_type[0] = (u8)hex2int(vpd.port0_data[0]);
896 		p->port_type[1] = (u8)hex2int(vpd.port1_data[0]);
897 		p->port_type[2] = (u8)hex2int(vpd.port2_data[0]);
898 		p->port_type[3] = (u8)hex2int(vpd.port3_data[0]);
899 		p->xauicfg[0] = simple_strtoul(vpd.xaui0cfg_data, NULL, 16);
900 		p->xauicfg[1] = simple_strtoul(vpd.xaui1cfg_data, NULL, 16);
901 	}
902 
903 	for (i = 0; i < 6; i++)
904 		p->eth_base[i] = hex2int(vpd.na_data[2 * i]) * 16 +
905 				 hex2int(vpd.na_data[2 * i + 1]);
906 	return 0;
907 }
908 
909 /* BIOS boot header */
910 typedef struct boot_header_s {
911 	u8	signature[2];	/* signature */
912 	u8	length;		/* image length (include header) */
913 	u8	offset[4];	/* initialization vector */
914 	u8	reserved[19];	/* reserved */
915 	u8	exheader[2];	/* offset to expansion header */
916 } boot_header_t;
917 
918 /* serial flash and firmware constants */
919 enum {
920 	SF_ATTEMPTS = 5,           /* max retries for SF1 operations */
921 	SF_SEC_SIZE = 64 * 1024,   /* serial flash sector size */
922 	SF_SIZE = SF_SEC_SIZE * 8, /* serial flash size */
923 
924 	/* flash command opcodes */
925 	SF_PROG_PAGE    = 2,       /* program page */
926 	SF_WR_DISABLE   = 4,       /* disable writes */
927 	SF_RD_STATUS    = 5,       /* read status register */
928 	SF_WR_ENABLE    = 6,       /* enable writes */
929 	SF_RD_DATA_FAST = 0xb,     /* read flash */
930 	SF_ERASE_SECTOR = 0xd8,    /* erase sector */
931 
932 	FW_FLASH_BOOT_ADDR = 0x70000, /* start address of FW in flash */
933 	FW_VERS_ADDR = 0x7fffc,    /* flash address holding FW version */
934 	FW_VERS_ADDR_PRE8 = 0x77ffc,/* flash address holding FW version pre8 */
935 	FW_MIN_SIZE = 8,           /* at least version and csum */
936 	FW_MAX_SIZE = FW_VERS_ADDR - FW_FLASH_BOOT_ADDR,
937 	FW_MAX_SIZE_PRE8 = FW_VERS_ADDR_PRE8 - FW_FLASH_BOOT_ADDR,
938 
939 	BOOT_FLASH_BOOT_ADDR = 0x0,/* start address of boot image in flash */
940 	BOOT_SIGNATURE = 0xaa55,   /* signature of BIOS boot ROM */
941 	BOOT_SIZE_INC = 512,       /* image size measured in 512B chunks */
942 	BOOT_MIN_SIZE = sizeof(boot_header_t), /* at least basic header */
943 	BOOT_MAX_SIZE = 1024*BOOT_SIZE_INC /* 1 byte * length increment  */
944 };
945 
946 /**
947  *	sf1_read - read data from the serial flash
948  *	@adapter: the adapter
949  *	@byte_cnt: number of bytes to read
950  *	@cont: whether another operation will be chained
951  *	@valp: where to store the read data
952  *
953  *	Reads up to 4 bytes of data from the serial flash.  The location of
954  *	the read needs to be specified prior to calling this by issuing the
955  *	appropriate commands to the serial flash.
956  */
957 static int sf1_read(adapter_t *adapter, unsigned int byte_cnt, int cont,
958 		    u32 *valp)
959 {
960 	int ret;
961 
962 	if (!byte_cnt || byte_cnt > 4)
963 		return -EINVAL;
964 	if (t3_read_reg(adapter, A_SF_OP) & F_BUSY)
965 		return -EBUSY;
966 	t3_write_reg(adapter, A_SF_OP, V_CONT(cont) | V_BYTECNT(byte_cnt - 1));
967 	ret = t3_wait_op_done(adapter, A_SF_OP, F_BUSY, 0, SF_ATTEMPTS, 10);
968 	if (!ret)
969 		*valp = t3_read_reg(adapter, A_SF_DATA);
970 	return ret;
971 }
972 
973 /**
974  *	sf1_write - write data to the serial flash
975  *	@adapter: the adapter
976  *	@byte_cnt: number of bytes to write
977  *	@cont: whether another operation will be chained
978  *	@val: value to write
979  *
980  *	Writes up to 4 bytes of data to the serial flash.  The location of
981  *	the write needs to be specified prior to calling this by issuing the
982  *	appropriate commands to the serial flash.
983  */
984 static int sf1_write(adapter_t *adapter, unsigned int byte_cnt, int cont,
985 		     u32 val)
986 {
987 	if (!byte_cnt || byte_cnt > 4)
988 		return -EINVAL;
989 	if (t3_read_reg(adapter, A_SF_OP) & F_BUSY)
990 		return -EBUSY;
991 	t3_write_reg(adapter, A_SF_DATA, val);
992 	t3_write_reg(adapter, A_SF_OP,
993 		     V_CONT(cont) | V_BYTECNT(byte_cnt - 1) | V_OP(1));
994 	return t3_wait_op_done(adapter, A_SF_OP, F_BUSY, 0, SF_ATTEMPTS, 10);
995 }
996 
997 /**
998  *	flash_wait_op - wait for a flash operation to complete
999  *	@adapter: the adapter
1000  *	@attempts: max number of polls of the status register
1001  *	@delay: delay between polls in ms
1002  *
1003  *	Wait for a flash operation to complete by polling the status register.
1004  */
1005 static int flash_wait_op(adapter_t *adapter, int attempts, int delay)
1006 {
1007 	int ret;
1008 	u32 status;
1009 
1010 	while (1) {
1011 		if ((ret = sf1_write(adapter, 1, 1, SF_RD_STATUS)) != 0 ||
1012 		    (ret = sf1_read(adapter, 1, 0, &status)) != 0)
1013 			return ret;
1014 		if (!(status & 1))
1015 			return 0;
1016 		if (--attempts == 0)
1017 			return -EAGAIN;
1018 		if (delay)
1019 			msleep(delay);
1020 	}
1021 }
1022 
1023 /**
1024  *	t3_read_flash - read words from serial flash
1025  *	@adapter: the adapter
1026  *	@addr: the start address for the read
1027  *	@nwords: how many 32-bit words to read
1028  *	@data: where to store the read data
1029  *	@byte_oriented: whether to store data as bytes or as words
1030  *
1031  *	Read the specified number of 32-bit words from the serial flash.
1032  *	If @byte_oriented is set the read data is stored as a byte array
1033  *	(i.e., big-endian), otherwise as 32-bit words in the platform's
1034  *	natural endianess.
1035  */
1036 int t3_read_flash(adapter_t *adapter, unsigned int addr, unsigned int nwords,
1037 		  u32 *data, int byte_oriented)
1038 {
1039 	int ret;
1040 
1041 	if (addr + nwords * sizeof(u32) > SF_SIZE || (addr & 3))
1042 		return -EINVAL;
1043 
1044 	addr = swab32(addr) | SF_RD_DATA_FAST;
1045 
1046 	if ((ret = sf1_write(adapter, 4, 1, addr)) != 0 ||
1047 	    (ret = sf1_read(adapter, 1, 1, data)) != 0)
1048 		return ret;
1049 
1050 	for ( ; nwords; nwords--, data++) {
1051 		ret = sf1_read(adapter, 4, nwords > 1, data);
1052 		if (ret)
1053 			return ret;
1054 		if (byte_oriented)
1055 			*data = htonl(*data);
1056 	}
1057 	return 0;
1058 }
1059 
1060 /**
1061  *	t3_write_flash - write up to a page of data to the serial flash
1062  *	@adapter: the adapter
1063  *	@addr: the start address to write
1064  *	@n: length of data to write
1065  *	@data: the data to write
1066  *	@byte_oriented: whether to store data as bytes or as words
1067  *
1068  *	Writes up to a page of data (256 bytes) to the serial flash starting
1069  *	at the given address.
1070  *	If @byte_oriented is set the write data is stored as a 32-bit
1071  *	big-endian array, otherwise in the processor's native endianess.
1072  *
1073  */
1074 static int t3_write_flash(adapter_t *adapter, unsigned int addr,
1075 			  unsigned int n, const u8 *data,
1076 			  int byte_oriented)
1077 {
1078 	int ret;
1079 	u32 buf[64];
1080 	unsigned int c, left, val, offset = addr & 0xff;
1081 
1082 	if (addr + n > SF_SIZE || offset + n > 256)
1083 		return -EINVAL;
1084 
1085 	val = swab32(addr) | SF_PROG_PAGE;
1086 
1087 	if ((ret = sf1_write(adapter, 1, 0, SF_WR_ENABLE)) != 0 ||
1088 	    (ret = sf1_write(adapter, 4, 1, val)) != 0)
1089 		return ret;
1090 
1091 	for (left = n; left; left -= c) {
1092 		c = min(left, 4U);
1093 		val = *(const u32*)data;
1094 		data += c;
1095 		if (byte_oriented)
1096 			val = htonl(val);
1097 
1098 		ret = sf1_write(adapter, c, c != left, val);
1099 		if (ret)
1100 			return ret;
1101 	}
1102 	if ((ret = flash_wait_op(adapter, 5, 1)) != 0)
1103 		return ret;
1104 
1105 	/* Read the page to verify the write succeeded */
1106 	ret = t3_read_flash(adapter, addr & ~0xff, ARRAY_SIZE(buf), buf,
1107 			    byte_oriented);
1108 	if (ret)
1109 		return ret;
1110 
1111 	if (memcmp(data - n, (u8 *)buf + offset, n))
1112 		return -EIO;
1113 	return 0;
1114 }
1115 
1116 /**
1117  *	t3_get_tp_version - read the tp sram version
1118  *	@adapter: the adapter
1119  *	@vers: where to place the version
1120  *
1121  *	Reads the protocol sram version from sram.
1122  */
1123 int t3_get_tp_version(adapter_t *adapter, u32 *vers)
1124 {
1125 	int ret;
1126 
1127 	/* Get version loaded in SRAM */
1128 	t3_write_reg(adapter, A_TP_EMBED_OP_FIELD0, 0);
1129 	ret = t3_wait_op_done(adapter, A_TP_EMBED_OP_FIELD0,
1130 			      1, 1, 5, 1);
1131 	if (ret)
1132 		return ret;
1133 
1134 	*vers = t3_read_reg(adapter, A_TP_EMBED_OP_FIELD1);
1135 
1136 	return 0;
1137 }
1138 
1139 /**
1140  *	t3_check_tpsram_version - read the tp sram version
1141  *	@adapter: the adapter
1142  *
1143  */
1144 int t3_check_tpsram_version(adapter_t *adapter)
1145 {
1146 	int ret;
1147 	u32 vers;
1148 	unsigned int major, minor;
1149 
1150 	if (adapter->params.rev == T3_REV_A)
1151 		return 0;
1152 
1153 
1154 	ret = t3_get_tp_version(adapter, &vers);
1155 	if (ret)
1156 		return ret;
1157 
1158 	vers = t3_read_reg(adapter, A_TP_EMBED_OP_FIELD1);
1159 
1160 	major = G_TP_VERSION_MAJOR(vers);
1161 	minor = G_TP_VERSION_MINOR(vers);
1162 
1163 	if (major == TP_VERSION_MAJOR && minor == TP_VERSION_MINOR)
1164 		return 0;
1165 	else {
1166 		CH_ERR(adapter, "found wrong TP version (%u.%u), "
1167 		       "driver compiled for version %d.%d\n", major, minor,
1168 		       TP_VERSION_MAJOR, TP_VERSION_MINOR);
1169 	}
1170 	return -EINVAL;
1171 }
1172 
1173 /**
1174  *	t3_check_tpsram - check if provided protocol SRAM
1175  *			  is compatible with this driver
1176  *	@adapter: the adapter
1177  *	@tp_sram: the firmware image to write
1178  *	@size: image size
1179  *
1180  *	Checks if an adapter's tp sram is compatible with the driver.
1181  *	Returns 0 if the versions are compatible, a negative error otherwise.
1182  */
1183 int t3_check_tpsram(adapter_t *adapter, const u8 *tp_sram, unsigned int size)
1184 {
1185 	u32 csum;
1186 	unsigned int i;
1187 	const u32 *p = (const u32 *)tp_sram;
1188 
1189 	/* Verify checksum */
1190 	for (csum = 0, i = 0; i < size / sizeof(csum); i++)
1191 		csum += ntohl(p[i]);
1192 	if (csum != 0xffffffff) {
1193 		CH_ERR(adapter, "corrupted protocol SRAM image, checksum %u\n",
1194 		       csum);
1195 		return -EINVAL;
1196 	}
1197 
1198 	return 0;
1199 }
1200 
1201 enum fw_version_type {
1202 	FW_VERSION_N3,
1203 	FW_VERSION_T3
1204 };
1205 
1206 /**
1207  *	t3_get_fw_version - read the firmware version
1208  *	@adapter: the adapter
1209  *	@vers: where to place the version
1210  *
1211  *	Reads the FW version from flash. Note that we had to move the version
1212  *	due to FW size. If we don't find a valid FW version in the new location
1213  *	we fall back and read the old location.
1214  */
1215 int t3_get_fw_version(adapter_t *adapter, u32 *vers)
1216 {
1217 	int ret = t3_read_flash(adapter, FW_VERS_ADDR, 1, vers, 0);
1218 	if (!ret && *vers != 0xffffffff)
1219 		return 0;
1220 	else
1221 		return t3_read_flash(adapter, FW_VERS_ADDR_PRE8, 1, vers, 0);
1222 }
1223 
1224 /**
1225  *	t3_check_fw_version - check if the FW is compatible with this driver
1226  *	@adapter: the adapter
1227  *
1228  *	Checks if an adapter's FW is compatible with the driver.  Returns 0
1229  *	if the versions are compatible, a negative error otherwise.
1230  */
1231 int t3_check_fw_version(adapter_t *adapter)
1232 {
1233 	int ret;
1234 	u32 vers;
1235 	unsigned int type, major, minor;
1236 
1237 	ret = t3_get_fw_version(adapter, &vers);
1238 	if (ret)
1239 		return ret;
1240 
1241 	type = G_FW_VERSION_TYPE(vers);
1242 	major = G_FW_VERSION_MAJOR(vers);
1243 	minor = G_FW_VERSION_MINOR(vers);
1244 
1245 	if (type == FW_VERSION_T3 && major == FW_VERSION_MAJOR &&
1246 	    minor == FW_VERSION_MINOR)
1247 		return 0;
1248 
1249 	else if (major != FW_VERSION_MAJOR || minor < FW_VERSION_MINOR)
1250 		CH_WARN(adapter, "found old FW minor version(%u.%u), "
1251 		        "driver compiled for version %u.%u\n", major, minor,
1252 			FW_VERSION_MAJOR, FW_VERSION_MINOR);
1253 	else {
1254 		CH_WARN(adapter, "found newer FW version(%u.%u), "
1255 		        "driver compiled for version %u.%u\n", major, minor,
1256 			FW_VERSION_MAJOR, FW_VERSION_MINOR);
1257 			return 0;
1258 	}
1259 	return -EINVAL;
1260 }
1261 
1262 /**
1263  *	t3_flash_erase_sectors - erase a range of flash sectors
1264  *	@adapter: the adapter
1265  *	@start: the first sector to erase
1266  *	@end: the last sector to erase
1267  *
1268  *	Erases the sectors in the given range.
1269  */
1270 static int t3_flash_erase_sectors(adapter_t *adapter, int start, int end)
1271 {
1272 	while (start <= end) {
1273 		int ret;
1274 
1275 		if ((ret = sf1_write(adapter, 1, 0, SF_WR_ENABLE)) != 0 ||
1276 		    (ret = sf1_write(adapter, 4, 0,
1277 				     SF_ERASE_SECTOR | (start << 8))) != 0 ||
1278 		    (ret = flash_wait_op(adapter, 5, 500)) != 0)
1279 			return ret;
1280 		start++;
1281 	}
1282 	return 0;
1283 }
1284 
1285 /*
1286  *	t3_load_fw - download firmware
1287  *	@adapter: the adapter
1288  *	@fw_data: the firmware image to write
1289  *	@size: image size
1290  *
1291  *	Write the supplied firmware image to the card's serial flash.
1292  *	The FW image has the following sections: @size - 8 bytes of code and
1293  *	data, followed by 4 bytes of FW version, followed by the 32-bit
1294  *	1's complement checksum of the whole image.
1295  */
1296 int t3_load_fw(adapter_t *adapter, const u8 *fw_data, unsigned int size)
1297 {
1298 	u32 version, csum, fw_version_addr;
1299 	unsigned int i;
1300 	const u32 *p = (const u32 *)fw_data;
1301 	int ret, addr, fw_sector = FW_FLASH_BOOT_ADDR >> 16;
1302 
1303 	if ((size & 3) || size < FW_MIN_SIZE)
1304 		return -EINVAL;
1305 	if (size - 8 > FW_MAX_SIZE)
1306 		return -EFBIG;
1307 
1308 	version = ntohl(*(const u32 *)(fw_data + size - 8));
1309 	if (G_FW_VERSION_MAJOR(version) < 8) {
1310 
1311 		fw_version_addr = FW_VERS_ADDR_PRE8;
1312 
1313 		if (size - 8 > FW_MAX_SIZE_PRE8)
1314 			return -EFBIG;
1315 	} else
1316 		fw_version_addr = FW_VERS_ADDR;
1317 
1318 	for (csum = 0, i = 0; i < size / sizeof(csum); i++)
1319 		csum += ntohl(p[i]);
1320 	if (csum != 0xffffffff) {
1321 		CH_ERR(adapter, "corrupted firmware image, checksum %u\n",
1322 		       csum);
1323 		return -EINVAL;
1324 	}
1325 
1326 	ret = t3_flash_erase_sectors(adapter, fw_sector, fw_sector);
1327 	if (ret)
1328 		goto out;
1329 
1330 	size -= 8;  /* trim off version and checksum */
1331 	for (addr = FW_FLASH_BOOT_ADDR; size; ) {
1332 		unsigned int chunk_size = min(size, 256U);
1333 
1334 		ret = t3_write_flash(adapter, addr, chunk_size, fw_data, 1);
1335 		if (ret)
1336 			goto out;
1337 
1338 		addr += chunk_size;
1339 		fw_data += chunk_size;
1340 		size -= chunk_size;
1341 	}
1342 
1343 	ret = t3_write_flash(adapter, fw_version_addr, 4, fw_data, 1);
1344 out:
1345 	if (ret)
1346 		CH_ERR(adapter, "firmware download failed, error %d\n", ret);
1347 	return ret;
1348 }
1349 
1350 /*
1351  *	t3_load_boot - download boot flash
1352  *	@adapter: the adapter
1353  *	@boot_data: the boot image to write
1354  *	@size: image size
1355  *
1356  *	Write the supplied boot image to the card's serial flash.
1357  *	The boot image has the following sections: a 28-byte header and the
1358  *	boot image.
1359  */
1360 int t3_load_boot(adapter_t *adapter, u8 *boot_data, unsigned int size)
1361 {
1362 	boot_header_t *header = (boot_header_t *)boot_data;
1363 	int ret;
1364 	unsigned int addr;
1365 	unsigned int boot_sector = BOOT_FLASH_BOOT_ADDR >> 16;
1366 	unsigned int boot_end = (BOOT_FLASH_BOOT_ADDR + size - 1) >> 16;
1367 
1368 	/*
1369 	 * Perform some primitive sanity testing to avoid accidentally
1370 	 * writing garbage over the boot sectors.  We ought to check for
1371 	 * more but it's not worth it for now ...
1372 	 */
1373 	if (size < BOOT_MIN_SIZE || size > BOOT_MAX_SIZE) {
1374 		CH_ERR(adapter, "boot image too small/large\n");
1375 		return -EFBIG;
1376 	}
1377 	if (le16_to_cpu(*(u16*)header->signature) != BOOT_SIGNATURE) {
1378 		CH_ERR(adapter, "boot image missing signature\n");
1379 		return -EINVAL;
1380 	}
1381 	if (header->length * BOOT_SIZE_INC != size) {
1382 		CH_ERR(adapter, "boot image header length != image length\n");
1383 		return -EINVAL;
1384 	}
1385 
1386 	ret = t3_flash_erase_sectors(adapter, boot_sector, boot_end);
1387 	if (ret)
1388 		goto out;
1389 
1390 	for (addr = BOOT_FLASH_BOOT_ADDR; size; ) {
1391 		unsigned int chunk_size = min(size, 256U);
1392 
1393 		ret = t3_write_flash(adapter, addr, chunk_size, boot_data, 0);
1394 		if (ret)
1395 			goto out;
1396 
1397 		addr += chunk_size;
1398 		boot_data += chunk_size;
1399 		size -= chunk_size;
1400 	}
1401 
1402 out:
1403 	if (ret)
1404 		CH_ERR(adapter, "boot image download failed, error %d\n", ret);
1405 	return ret;
1406 }
1407 
1408 #define CIM_CTL_BASE 0x2000
1409 
1410 /**
1411  *	t3_cim_ctl_blk_read - read a block from CIM control region
1412  *	@adap: the adapter
1413  *	@addr: the start address within the CIM control region
1414  *	@n: number of words to read
1415  *	@valp: where to store the result
1416  *
1417  *	Reads a block of 4-byte words from the CIM control region.
1418  */
1419 int t3_cim_ctl_blk_read(adapter_t *adap, unsigned int addr, unsigned int n,
1420 			unsigned int *valp)
1421 {
1422 	int ret = 0;
1423 
1424 	if (t3_read_reg(adap, A_CIM_HOST_ACC_CTRL) & F_HOSTBUSY)
1425 		return -EBUSY;
1426 
1427 	for ( ; !ret && n--; addr += 4) {
1428 		t3_write_reg(adap, A_CIM_HOST_ACC_CTRL, CIM_CTL_BASE + addr);
1429 		ret = t3_wait_op_done(adap, A_CIM_HOST_ACC_CTRL, F_HOSTBUSY,
1430 				      0, 5, 2);
1431 		if (!ret)
1432 			*valp++ = t3_read_reg(adap, A_CIM_HOST_ACC_DATA);
1433 	}
1434 	return ret;
1435 }
1436 
1437 static void t3_gate_rx_traffic(struct cmac *mac, u32 *rx_cfg,
1438 			       u32 *rx_hash_high, u32 *rx_hash_low)
1439 {
1440 	/* stop Rx unicast traffic */
1441 	t3_mac_disable_exact_filters(mac);
1442 
1443 	/* stop broadcast, multicast, promiscuous mode traffic */
1444 	*rx_cfg = t3_read_reg(mac->adapter, A_XGM_RX_CFG);
1445 	t3_set_reg_field(mac->adapter, A_XGM_RX_CFG,
1446 			 F_ENHASHMCAST | F_DISBCAST | F_COPYALLFRAMES,
1447 			 F_DISBCAST);
1448 
1449 	*rx_hash_high = t3_read_reg(mac->adapter, A_XGM_RX_HASH_HIGH);
1450 	t3_write_reg(mac->adapter, A_XGM_RX_HASH_HIGH, 0);
1451 
1452 	*rx_hash_low = t3_read_reg(mac->adapter, A_XGM_RX_HASH_LOW);
1453 	t3_write_reg(mac->adapter, A_XGM_RX_HASH_LOW, 0);
1454 
1455 	/* Leave time to drain max RX fifo */
1456 	msleep(1);
1457 }
1458 
1459 static void t3_open_rx_traffic(struct cmac *mac, u32 rx_cfg,
1460 			       u32 rx_hash_high, u32 rx_hash_low)
1461 {
1462 	t3_mac_enable_exact_filters(mac);
1463 	t3_set_reg_field(mac->adapter, A_XGM_RX_CFG,
1464 			 F_ENHASHMCAST | F_DISBCAST | F_COPYALLFRAMES,
1465 			 rx_cfg);
1466 	t3_write_reg(mac->adapter, A_XGM_RX_HASH_HIGH, rx_hash_high);
1467 	t3_write_reg(mac->adapter, A_XGM_RX_HASH_LOW, rx_hash_low);
1468 }
1469 
1470 static int t3_detect_link_fault(adapter_t *adapter, int port_id)
1471 {
1472 	struct port_info *pi = adap2pinfo(adapter, port_id);
1473 	struct cmac *mac = &pi->mac;
1474 	uint32_t rx_cfg, rx_hash_high, rx_hash_low;
1475 	int link_fault;
1476 
1477 	/* stop rx */
1478 	t3_gate_rx_traffic(mac, &rx_cfg, &rx_hash_high, &rx_hash_low);
1479 	t3_write_reg(adapter, A_XGM_RX_CTRL + mac->offset, 0);
1480 
1481 	/* clear status and make sure intr is enabled */
1482 	(void) t3_read_reg(adapter, A_XGM_INT_STATUS + mac->offset);
1483 	t3_xgm_intr_enable(adapter, port_id);
1484 
1485 	/* restart rx */
1486 	t3_write_reg(adapter, A_XGM_RX_CTRL + mac->offset, F_RXEN);
1487 	t3_open_rx_traffic(mac, rx_cfg, rx_hash_high, rx_hash_low);
1488 
1489 	link_fault = t3_read_reg(adapter, A_XGM_INT_STATUS + mac->offset);
1490 	return (link_fault & F_LINKFAULTCHANGE ? 1 : 0);
1491 }
1492 
1493 static void t3_clear_faults(adapter_t *adapter, int port_id)
1494 {
1495 	struct port_info *pi = adap2pinfo(adapter, port_id);
1496 	struct cmac *mac = &pi->mac;
1497 
1498 	if (adapter->params.nports <= 2) {
1499 		t3_xgm_intr_disable(adapter, pi->port_id);
1500 		t3_read_reg(adapter, A_XGM_INT_STATUS + mac->offset);
1501 		t3_write_reg(adapter, A_XGM_INT_CAUSE + mac->offset, F_XGM_INT);
1502 		t3_set_reg_field(adapter, A_XGM_INT_ENABLE + mac->offset,
1503 				 F_XGM_INT, F_XGM_INT);
1504 		t3_xgm_intr_enable(adapter, pi->port_id);
1505 	}
1506 }
1507 
1508 /**
1509  *	t3_link_changed - handle interface link changes
1510  *	@adapter: the adapter
1511  *	@port_id: the port index that changed link state
1512  *
1513  *	Called when a port's link settings change to propagate the new values
1514  *	to the associated PHY and MAC.  After performing the common tasks it
1515  *	invokes an OS-specific handler.
1516  */
1517 void t3_link_changed(adapter_t *adapter, int port_id)
1518 {
1519 	int link_ok, speed, duplex, fc, link_fault;
1520 	struct port_info *pi = adap2pinfo(adapter, port_id);
1521 	struct cphy *phy = &pi->phy;
1522 	struct cmac *mac = &pi->mac;
1523 	struct link_config *lc = &pi->link_config;
1524 
1525 	link_ok = lc->link_ok;
1526 	speed = lc->speed;
1527 	duplex = lc->duplex;
1528 	fc = lc->fc;
1529 	link_fault = 0;
1530 
1531 	phy->ops->get_link_status(phy, &link_ok, &speed, &duplex, &fc);
1532 
1533 	if (lc->requested_fc & PAUSE_AUTONEG)
1534 		fc &= lc->requested_fc;
1535 	else
1536 		fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
1537 
1538 	/* Update mac speed before checking for link fault. */
1539 	if (link_ok && speed >= 0 && lc->autoneg == AUTONEG_ENABLE &&
1540 	    (speed != lc->speed || duplex != lc->duplex || fc != lc->fc))
1541 		t3_mac_set_speed_duplex_fc(mac, speed, duplex, fc);
1542 
1543 	/*
1544 	 * Check for link faults if any of these is true:
1545 	 * a) A link fault is suspected, and PHY says link ok
1546 	 * b) PHY link transitioned from down -> up
1547 	 */
1548 	if (adapter->params.nports <= 2 &&
1549 	    ((pi->link_fault && link_ok) || (!lc->link_ok && link_ok))) {
1550 
1551 		link_fault = t3_detect_link_fault(adapter, port_id);
1552 		if (link_fault) {
1553 			if (pi->link_fault != LF_YES) {
1554 				mac->stats.link_faults++;
1555 				pi->link_fault = LF_YES;
1556 			}
1557 
1558 			/* Don't report link up */
1559 			link_ok = 0;
1560 		} else {
1561 			/* clear faults here if this was a false alarm. */
1562 			if (pi->link_fault == LF_MAYBE &&
1563 			    link_ok && lc->link_ok)
1564 				t3_clear_faults(adapter, port_id);
1565 
1566 			pi->link_fault = LF_NO;
1567 		}
1568 	}
1569 
1570 	if (link_ok == lc->link_ok && speed == lc->speed &&
1571 	    duplex == lc->duplex && fc == lc->fc)
1572 		return;                            /* nothing changed */
1573 
1574 	lc->link_ok = (unsigned char)link_ok;
1575 	lc->speed = speed < 0 ? SPEED_INVALID : speed;
1576 	lc->duplex = duplex < 0 ? DUPLEX_INVALID : duplex;
1577 	lc->fc = fc;
1578 
1579 	if (link_ok) {
1580 
1581 		/* down -> up, or up -> up with changed settings */
1582 
1583 		if (adapter->params.rev > 0 && uses_xaui(adapter)) {
1584 			t3_write_reg(adapter, A_XGM_XAUI_ACT_CTRL + mac->offset,
1585 				     F_TXACTENABLE | F_RXEN);
1586 		}
1587 
1588 		t3_set_reg_field(adapter, A_XGM_TXFIFO_CFG + mac->offset,
1589 				 F_ENDROPPKT, 0);
1590 		t3_mac_enable(mac, MAC_DIRECTION_TX | MAC_DIRECTION_RX);
1591 		t3_set_reg_field(adapter, A_XGM_STAT_CTRL + mac->offset,
1592 				 F_CLRSTATS, 1);
1593 		t3_clear_faults(adapter, port_id);
1594 
1595 	} else {
1596 
1597 		/* up -> down */
1598 
1599 		if (adapter->params.rev > 0 && uses_xaui(adapter)) {
1600 			t3_write_reg(adapter,
1601 				     A_XGM_XAUI_ACT_CTRL + mac->offset, 0);
1602 		}
1603 
1604 		t3_xgm_intr_disable(adapter, pi->port_id);
1605 		if (adapter->params.nports <= 2) {
1606 			t3_set_reg_field(adapter,
1607 					 A_XGM_INT_ENABLE + mac->offset,
1608 					 F_XGM_INT, 0);
1609 		}
1610 
1611 		if (!link_fault) {
1612 			if (is_10G(adapter))
1613 				pi->phy.ops->power_down(&pi->phy, 1);
1614 			t3_mac_disable(mac, MAC_DIRECTION_RX);
1615 			t3_link_start(phy, mac, lc);
1616 		}
1617 
1618 		/*
1619 		 * Make sure Tx FIFO continues to drain, even as rxen is left
1620 		 * high to help detect and indicate remote faults.
1621 		 */
1622 		t3_set_reg_field(adapter, A_XGM_TXFIFO_CFG + mac->offset, 0,
1623 				 F_ENDROPPKT);
1624 		t3_write_reg(adapter, A_XGM_RX_CTRL + mac->offset, 0);
1625 		t3_write_reg(adapter, A_XGM_TX_CTRL + mac->offset, F_TXEN);
1626 		t3_write_reg(adapter, A_XGM_RX_CTRL + mac->offset, F_RXEN);
1627 	}
1628 
1629 	t3_os_link_changed(adapter, port_id, link_ok, speed, duplex, fc,
1630 	    mac->was_reset);
1631 	mac->was_reset = 0;
1632 }
1633 
1634 /**
1635  *	t3_link_start - apply link configuration to MAC/PHY
1636  *	@phy: the PHY to setup
1637  *	@mac: the MAC to setup
1638  *	@lc: the requested link configuration
1639  *
1640  *	Set up a port's MAC and PHY according to a desired link configuration.
1641  *	- If the PHY can auto-negotiate first decide what to advertise, then
1642  *	  enable/disable auto-negotiation as desired, and reset.
1643  *	- If the PHY does not auto-negotiate just reset it.
1644  *	- If auto-negotiation is off set the MAC to the proper speed/duplex/FC,
1645  *	  otherwise do it later based on the outcome of auto-negotiation.
1646  */
1647 int t3_link_start(struct cphy *phy, struct cmac *mac, struct link_config *lc)
1648 {
1649 	unsigned int fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
1650 
1651 	lc->link_ok = 0;
1652 	if (lc->supported & SUPPORTED_Autoneg) {
1653 		lc->advertising &= ~(ADVERTISED_Asym_Pause | ADVERTISED_Pause);
1654 		if (fc) {
1655 			lc->advertising |= ADVERTISED_Asym_Pause;
1656 			if (fc & PAUSE_RX)
1657 				lc->advertising |= ADVERTISED_Pause;
1658 		}
1659 
1660 		phy->ops->advertise(phy, lc->advertising);
1661 
1662 		if (lc->autoneg == AUTONEG_DISABLE) {
1663 			lc->speed = lc->requested_speed;
1664 			lc->duplex = lc->requested_duplex;
1665 			lc->fc = (unsigned char)fc;
1666 			t3_mac_set_speed_duplex_fc(mac, lc->speed, lc->duplex,
1667 						   fc);
1668 			/* Also disables autoneg */
1669 			phy->ops->set_speed_duplex(phy, lc->speed, lc->duplex);
1670 			/* PR 5666. Power phy up when doing an ifup */
1671 			if (!is_10G(phy->adapter))
1672 				phy->ops->power_down(phy, 0);
1673 		} else
1674 			phy->ops->autoneg_enable(phy);
1675 	} else {
1676 		t3_mac_set_speed_duplex_fc(mac, -1, -1, fc);
1677 		lc->fc = (unsigned char)fc;
1678 		phy->ops->reset(phy, 0);
1679 	}
1680 	return 0;
1681 }
1682 
1683 /**
1684  *	t3_set_vlan_accel - control HW VLAN extraction
1685  *	@adapter: the adapter
1686  *	@ports: bitmap of adapter ports to operate on
1687  *	@on: enable (1) or disable (0) HW VLAN extraction
1688  *
1689  *	Enables or disables HW extraction of VLAN tags for the given port.
1690  */
1691 void t3_set_vlan_accel(adapter_t *adapter, unsigned int ports, int on)
1692 {
1693 	t3_set_reg_field(adapter, A_TP_OUT_CONFIG,
1694 			 ports << S_VLANEXTRACTIONENABLE,
1695 			 on ? (ports << S_VLANEXTRACTIONENABLE) : 0);
1696 }
1697 
1698 struct intr_info {
1699 	unsigned int mask;       /* bits to check in interrupt status */
1700 	const char *msg;         /* message to print or NULL */
1701 	short stat_idx;          /* stat counter to increment or -1 */
1702 	unsigned short fatal;    /* whether the condition reported is fatal */
1703 };
1704 
1705 /**
1706  *	t3_handle_intr_status - table driven interrupt handler
1707  *	@adapter: the adapter that generated the interrupt
1708  *	@reg: the interrupt status register to process
1709  *	@mask: a mask to apply to the interrupt status
1710  *	@acts: table of interrupt actions
1711  *	@stats: statistics counters tracking interrupt occurences
1712  *
1713  *	A table driven interrupt handler that applies a set of masks to an
1714  *	interrupt status word and performs the corresponding actions if the
1715  *	interrupts described by the mask have occured.  The actions include
1716  *	optionally printing a warning or alert message, and optionally
1717  *	incrementing a stat counter.  The table is terminated by an entry
1718  *	specifying mask 0.  Returns the number of fatal interrupt conditions.
1719  */
1720 static int t3_handle_intr_status(adapter_t *adapter, unsigned int reg,
1721 				 unsigned int mask,
1722 				 const struct intr_info *acts,
1723 				 unsigned long *stats)
1724 {
1725 	int fatal = 0;
1726 	unsigned int status = t3_read_reg(adapter, reg) & mask;
1727 
1728 	for ( ; acts->mask; ++acts) {
1729 		if (!(status & acts->mask)) continue;
1730 		if (acts->fatal) {
1731 			fatal++;
1732 			CH_ALERT(adapter, "%s (0x%x)\n",
1733 				 acts->msg, status & acts->mask);
1734 		} else if (acts->msg)
1735 			CH_WARN(adapter, "%s (0x%x)\n",
1736 				acts->msg, status & acts->mask);
1737 		if (acts->stat_idx >= 0)
1738 			stats[acts->stat_idx]++;
1739 	}
1740 	if (status)                           /* clear processed interrupts */
1741 		t3_write_reg(adapter, reg, status);
1742 	return fatal;
1743 }
1744 
1745 #define SGE_INTR_MASK (F_RSPQDISABLED | \
1746 		       F_UC_REQ_FRAMINGERROR | F_R_REQ_FRAMINGERROR | \
1747 		       F_CPPARITYERROR | F_OCPARITYERROR | F_RCPARITYERROR | \
1748 		       F_IRPARITYERROR | V_ITPARITYERROR(M_ITPARITYERROR) | \
1749 		       V_FLPARITYERROR(M_FLPARITYERROR) | F_LODRBPARITYERROR | \
1750 		       F_HIDRBPARITYERROR | F_LORCQPARITYERROR | \
1751 		       F_HIRCQPARITYERROR)
1752 #define MC5_INTR_MASK (F_PARITYERR | F_ACTRGNFULL | F_UNKNOWNCMD | \
1753 		       F_REQQPARERR | F_DISPQPARERR | F_DELACTEMPTY | \
1754 		       F_NFASRCHFAIL)
1755 #define MC7_INTR_MASK (F_AE | F_UE | F_CE | V_PE(M_PE))
1756 #define XGM_INTR_MASK (V_TXFIFO_PRTY_ERR(M_TXFIFO_PRTY_ERR) | \
1757 		       V_RXFIFO_PRTY_ERR(M_RXFIFO_PRTY_ERR) | \
1758 		       F_TXFIFO_UNDERRUN)
1759 #define PCIX_INTR_MASK (F_MSTDETPARERR | F_SIGTARABT | F_RCVTARABT | \
1760 			F_RCVMSTABT | F_SIGSYSERR | F_DETPARERR | \
1761 			F_SPLCMPDIS | F_UNXSPLCMP | F_RCVSPLCMPERR | \
1762 			F_DETCORECCERR | F_DETUNCECCERR | F_PIOPARERR | \
1763 			V_WFPARERR(M_WFPARERR) | V_RFPARERR(M_RFPARERR) | \
1764 			V_CFPARERR(M_CFPARERR) /* | V_MSIXPARERR(M_MSIXPARERR) */)
1765 #define PCIE_INTR_MASK (F_UNXSPLCPLERRR | F_UNXSPLCPLERRC | F_PCIE_PIOPARERR |\
1766 			F_PCIE_WFPARERR | F_PCIE_RFPARERR | F_PCIE_CFPARERR | \
1767 			/* V_PCIE_MSIXPARERR(M_PCIE_MSIXPARERR) | */ \
1768 			F_RETRYBUFPARERR | F_RETRYLUTPARERR | F_RXPARERR | \
1769 			F_TXPARERR | V_BISTERR(M_BISTERR))
1770 #define ULPRX_INTR_MASK (F_PARERRDATA | F_PARERRPCMD | F_ARBPF1PERR | \
1771 			 F_ARBPF0PERR | F_ARBFPERR | F_PCMDMUXPERR | \
1772 			 F_DATASELFRAMEERR1 | F_DATASELFRAMEERR0)
1773 #define ULPTX_INTR_MASK 0xfc
1774 #define CPLSW_INTR_MASK (F_CIM_OP_MAP_PERR | F_TP_FRAMING_ERROR | \
1775 			 F_SGE_FRAMING_ERROR | F_CIM_FRAMING_ERROR | \
1776 			 F_ZERO_SWITCH_ERROR)
1777 #define CIM_INTR_MASK (F_BLKWRPLINT | F_BLKRDPLINT | F_BLKWRCTLINT | \
1778 		       F_BLKRDCTLINT | F_BLKWRFLASHINT | F_BLKRDFLASHINT | \
1779 		       F_SGLWRFLASHINT | F_WRBLKFLASHINT | F_BLKWRBOOTINT | \
1780 	 	       F_FLASHRANGEINT | F_SDRAMRANGEINT | F_RSVDSPACEINT | \
1781 		       F_DRAMPARERR | F_ICACHEPARERR | F_DCACHEPARERR | \
1782 		       F_OBQSGEPARERR | F_OBQULPHIPARERR | F_OBQULPLOPARERR | \
1783 		       F_IBQSGELOPARERR | F_IBQSGEHIPARERR | F_IBQULPPARERR | \
1784 		       F_IBQTPPARERR | F_ITAGPARERR | F_DTAGPARERR)
1785 #define PMTX_INTR_MASK (F_ZERO_C_CMD_ERROR | ICSPI_FRM_ERR | OESPI_FRM_ERR | \
1786 			V_ICSPI_PAR_ERROR(M_ICSPI_PAR_ERROR) | \
1787 			V_OESPI_PAR_ERROR(M_OESPI_PAR_ERROR))
1788 #define PMRX_INTR_MASK (F_ZERO_E_CMD_ERROR | IESPI_FRM_ERR | OCSPI_FRM_ERR | \
1789 			V_IESPI_PAR_ERROR(M_IESPI_PAR_ERROR) | \
1790 			V_OCSPI_PAR_ERROR(M_OCSPI_PAR_ERROR))
1791 #define MPS_INTR_MASK (V_TX0TPPARERRENB(M_TX0TPPARERRENB) | \
1792 		       V_TX1TPPARERRENB(M_TX1TPPARERRENB) | \
1793 		       V_RXTPPARERRENB(M_RXTPPARERRENB) | \
1794 		       V_MCAPARERRENB(M_MCAPARERRENB))
1795 #define XGM_EXTRA_INTR_MASK (F_LINKFAULTCHANGE)
1796 #define PL_INTR_MASK (F_T3DBG | F_XGMAC0_0 | F_XGMAC0_1 | F_MC5A | F_PM1_TX | \
1797 		      F_PM1_RX | F_ULP2_TX | F_ULP2_RX | F_TP1 | F_CIM | \
1798 		      F_MC7_CM | F_MC7_PMTX | F_MC7_PMRX | F_SGE3 | F_PCIM0 | \
1799 		      F_MPS0 | F_CPL_SWITCH)
1800 /*
1801  * Interrupt handler for the PCIX1 module.
1802  */
1803 static void pci_intr_handler(adapter_t *adapter)
1804 {
1805 	static struct intr_info pcix1_intr_info[] = {
1806 		{ F_MSTDETPARERR, "PCI master detected parity error", -1, 1 },
1807 		{ F_SIGTARABT, "PCI signaled target abort", -1, 1 },
1808 		{ F_RCVTARABT, "PCI received target abort", -1, 1 },
1809 		{ F_RCVMSTABT, "PCI received master abort", -1, 1 },
1810 		{ F_SIGSYSERR, "PCI signaled system error", -1, 1 },
1811 		{ F_DETPARERR, "PCI detected parity error", -1, 1 },
1812 		{ F_SPLCMPDIS, "PCI split completion discarded", -1, 1 },
1813 		{ F_UNXSPLCMP, "PCI unexpected split completion error", -1, 1 },
1814 		{ F_RCVSPLCMPERR, "PCI received split completion error", -1,
1815 		  1 },
1816 		{ F_DETCORECCERR, "PCI correctable ECC error",
1817 		  STAT_PCI_CORR_ECC, 0 },
1818 		{ F_DETUNCECCERR, "PCI uncorrectable ECC error", -1, 1 },
1819 		{ F_PIOPARERR, "PCI PIO FIFO parity error", -1, 1 },
1820 		{ V_WFPARERR(M_WFPARERR), "PCI write FIFO parity error", -1,
1821 		  1 },
1822 		{ V_RFPARERR(M_RFPARERR), "PCI read FIFO parity error", -1,
1823 		  1 },
1824 		{ V_CFPARERR(M_CFPARERR), "PCI command FIFO parity error", -1,
1825 		  1 },
1826 		{ V_MSIXPARERR(M_MSIXPARERR), "PCI MSI-X table/PBA parity "
1827 		  "error", -1, 1 },
1828 		{ 0 }
1829 	};
1830 
1831 	if (t3_handle_intr_status(adapter, A_PCIX_INT_CAUSE, PCIX_INTR_MASK,
1832 				  pcix1_intr_info, adapter->irq_stats))
1833 		t3_fatal_err(adapter);
1834 }
1835 
1836 /*
1837  * Interrupt handler for the PCIE module.
1838  */
1839 static void pcie_intr_handler(adapter_t *adapter)
1840 {
1841 	static struct intr_info pcie_intr_info[] = {
1842 		{ F_PEXERR, "PCI PEX error", -1, 1 },
1843 		{ F_UNXSPLCPLERRR,
1844 		  "PCI unexpected split completion DMA read error", -1, 1 },
1845 		{ F_UNXSPLCPLERRC,
1846 		  "PCI unexpected split completion DMA command error", -1, 1 },
1847 		{ F_PCIE_PIOPARERR, "PCI PIO FIFO parity error", -1, 1 },
1848 		{ F_PCIE_WFPARERR, "PCI write FIFO parity error", -1, 1 },
1849 		{ F_PCIE_RFPARERR, "PCI read FIFO parity error", -1, 1 },
1850 		{ F_PCIE_CFPARERR, "PCI command FIFO parity error", -1, 1 },
1851 		{ V_PCIE_MSIXPARERR(M_PCIE_MSIXPARERR),
1852 		  "PCI MSI-X table/PBA parity error", -1, 1 },
1853 		{ F_RETRYBUFPARERR, "PCI retry buffer parity error", -1, 1 },
1854 		{ F_RETRYLUTPARERR, "PCI retry LUT parity error", -1, 1 },
1855 		{ F_RXPARERR, "PCI Rx parity error", -1, 1 },
1856 		{ F_TXPARERR, "PCI Tx parity error", -1, 1 },
1857 		{ V_BISTERR(M_BISTERR), "PCI BIST error", -1, 1 },
1858 		{ 0 }
1859 	};
1860 
1861 	if (t3_read_reg(adapter, A_PCIE_INT_CAUSE) & F_PEXERR)
1862 		CH_ALERT(adapter, "PEX error code 0x%x\n",
1863 			 t3_read_reg(adapter, A_PCIE_PEX_ERR));
1864 
1865 	if (t3_handle_intr_status(adapter, A_PCIE_INT_CAUSE, PCIE_INTR_MASK,
1866 				  pcie_intr_info, adapter->irq_stats))
1867 		t3_fatal_err(adapter);
1868 }
1869 
1870 /*
1871  * TP interrupt handler.
1872  */
1873 static void tp_intr_handler(adapter_t *adapter)
1874 {
1875 	static struct intr_info tp_intr_info[] = {
1876 		{ 0xffffff,  "TP parity error", -1, 1 },
1877 		{ 0x1000000, "TP out of Rx pages", -1, 1 },
1878 		{ 0x2000000, "TP out of Tx pages", -1, 1 },
1879 		{ 0 }
1880 	};
1881 	static struct intr_info tp_intr_info_t3c[] = {
1882 		{ 0x1fffffff,  "TP parity error", -1, 1 },
1883 		{ F_FLMRXFLSTEMPTY, "TP out of Rx pages", -1, 1 },
1884 		{ F_FLMTXFLSTEMPTY, "TP out of Tx pages", -1, 1 },
1885 		{ 0 }
1886 	};
1887 
1888 	if (t3_handle_intr_status(adapter, A_TP_INT_CAUSE, 0xffffffff,
1889 				  adapter->params.rev < T3_REV_C ?
1890 					tp_intr_info : tp_intr_info_t3c, NULL))
1891 		t3_fatal_err(adapter);
1892 }
1893 
1894 /*
1895  * CIM interrupt handler.
1896  */
1897 static void cim_intr_handler(adapter_t *adapter)
1898 {
1899 	static struct intr_info cim_intr_info[] = {
1900 		{ F_RSVDSPACEINT, "CIM reserved space write", -1, 1 },
1901 		{ F_SDRAMRANGEINT, "CIM SDRAM address out of range", -1, 1 },
1902 		{ F_FLASHRANGEINT, "CIM flash address out of range", -1, 1 },
1903 		{ F_BLKWRBOOTINT, "CIM block write to boot space", -1, 1 },
1904 		{ F_WRBLKFLASHINT, "CIM write to cached flash space", -1, 1 },
1905 		{ F_SGLWRFLASHINT, "CIM single write to flash space", -1, 1 },
1906 		{ F_BLKRDFLASHINT, "CIM block read from flash space", -1, 1 },
1907 		{ F_BLKWRFLASHINT, "CIM block write to flash space", -1, 1 },
1908 		{ F_BLKRDCTLINT, "CIM block read from CTL space", -1, 1 },
1909 		{ F_BLKWRCTLINT, "CIM block write to CTL space", -1, 1 },
1910 		{ F_BLKRDPLINT, "CIM block read from PL space", -1, 1 },
1911 		{ F_BLKWRPLINT, "CIM block write to PL space", -1, 1 },
1912 		{ F_DRAMPARERR, "CIM DRAM parity error", -1, 1 },
1913 		{ F_ICACHEPARERR, "CIM icache parity error", -1, 1 },
1914 		{ F_DCACHEPARERR, "CIM dcache parity error", -1, 1 },
1915 		{ F_OBQSGEPARERR, "CIM OBQ SGE parity error", -1, 1 },
1916 		{ F_OBQULPHIPARERR, "CIM OBQ ULPHI parity error", -1, 1 },
1917 		{ F_OBQULPLOPARERR, "CIM OBQ ULPLO parity error", -1, 1 },
1918 		{ F_IBQSGELOPARERR, "CIM IBQ SGELO parity error", -1, 1 },
1919 		{ F_IBQSGEHIPARERR, "CIM IBQ SGEHI parity error", -1, 1 },
1920 		{ F_IBQULPPARERR, "CIM IBQ ULP parity error", -1, 1 },
1921 		{ F_IBQTPPARERR, "CIM IBQ TP parity error", -1, 1 },
1922 		{ F_ITAGPARERR, "CIM itag parity error", -1, 1 },
1923 		{ F_DTAGPARERR, "CIM dtag parity error", -1, 1 },
1924 		{ 0 }
1925         };
1926 
1927 	if (t3_handle_intr_status(adapter, A_CIM_HOST_INT_CAUSE, CIM_INTR_MASK,
1928 				  cim_intr_info, NULL))
1929 		t3_fatal_err(adapter);
1930 }
1931 
1932 /*
1933  * ULP RX interrupt handler.
1934  */
1935 static void ulprx_intr_handler(adapter_t *adapter)
1936 {
1937 	static struct intr_info ulprx_intr_info[] = {
1938 		{ F_PARERRDATA, "ULP RX data parity error", -1, 1 },
1939 		{ F_PARERRPCMD, "ULP RX command parity error", -1, 1 },
1940 		{ F_ARBPF1PERR, "ULP RX ArbPF1 parity error", -1, 1 },
1941 		{ F_ARBPF0PERR, "ULP RX ArbPF0 parity error", -1, 1 },
1942 		{ F_ARBFPERR, "ULP RX ArbF parity error", -1, 1 },
1943 		{ F_PCMDMUXPERR, "ULP RX PCMDMUX parity error", -1, 1 },
1944 		{ F_DATASELFRAMEERR1, "ULP RX frame error", -1, 1 },
1945 		{ F_DATASELFRAMEERR0, "ULP RX frame error", -1, 1 },
1946 		{ 0 }
1947         };
1948 
1949 	if (t3_handle_intr_status(adapter, A_ULPRX_INT_CAUSE, 0xffffffff,
1950 				  ulprx_intr_info, NULL))
1951 		t3_fatal_err(adapter);
1952 }
1953 
1954 /*
1955  * ULP TX interrupt handler.
1956  */
1957 static void ulptx_intr_handler(adapter_t *adapter)
1958 {
1959 	static struct intr_info ulptx_intr_info[] = {
1960 		{ F_PBL_BOUND_ERR_CH0, "ULP TX channel 0 PBL out of bounds",
1961 		  STAT_ULP_CH0_PBL_OOB, 0 },
1962 		{ F_PBL_BOUND_ERR_CH1, "ULP TX channel 1 PBL out of bounds",
1963 		  STAT_ULP_CH1_PBL_OOB, 0 },
1964 		{ 0xfc, "ULP TX parity error", -1, 1 },
1965 		{ 0 }
1966         };
1967 
1968 	if (t3_handle_intr_status(adapter, A_ULPTX_INT_CAUSE, 0xffffffff,
1969 				  ulptx_intr_info, adapter->irq_stats))
1970 		t3_fatal_err(adapter);
1971 }
1972 
1973 #define ICSPI_FRM_ERR (F_ICSPI0_FIFO2X_RX_FRAMING_ERROR | \
1974 	F_ICSPI1_FIFO2X_RX_FRAMING_ERROR | F_ICSPI0_RX_FRAMING_ERROR | \
1975 	F_ICSPI1_RX_FRAMING_ERROR | F_ICSPI0_TX_FRAMING_ERROR | \
1976 	F_ICSPI1_TX_FRAMING_ERROR)
1977 #define OESPI_FRM_ERR (F_OESPI0_RX_FRAMING_ERROR | \
1978 	F_OESPI1_RX_FRAMING_ERROR | F_OESPI0_TX_FRAMING_ERROR | \
1979 	F_OESPI1_TX_FRAMING_ERROR | F_OESPI0_OFIFO2X_TX_FRAMING_ERROR | \
1980 	F_OESPI1_OFIFO2X_TX_FRAMING_ERROR)
1981 
1982 /*
1983  * PM TX interrupt handler.
1984  */
1985 static void pmtx_intr_handler(adapter_t *adapter)
1986 {
1987 	static struct intr_info pmtx_intr_info[] = {
1988 		{ F_ZERO_C_CMD_ERROR, "PMTX 0-length pcmd", -1, 1 },
1989 		{ ICSPI_FRM_ERR, "PMTX ispi framing error", -1, 1 },
1990 		{ OESPI_FRM_ERR, "PMTX ospi framing error", -1, 1 },
1991 		{ V_ICSPI_PAR_ERROR(M_ICSPI_PAR_ERROR),
1992 		  "PMTX ispi parity error", -1, 1 },
1993 		{ V_OESPI_PAR_ERROR(M_OESPI_PAR_ERROR),
1994 		  "PMTX ospi parity error", -1, 1 },
1995 		{ 0 }
1996         };
1997 
1998 	if (t3_handle_intr_status(adapter, A_PM1_TX_INT_CAUSE, 0xffffffff,
1999 				  pmtx_intr_info, NULL))
2000 		t3_fatal_err(adapter);
2001 }
2002 
2003 #define IESPI_FRM_ERR (F_IESPI0_FIFO2X_RX_FRAMING_ERROR | \
2004 	F_IESPI1_FIFO2X_RX_FRAMING_ERROR | F_IESPI0_RX_FRAMING_ERROR | \
2005 	F_IESPI1_RX_FRAMING_ERROR | F_IESPI0_TX_FRAMING_ERROR | \
2006 	F_IESPI1_TX_FRAMING_ERROR)
2007 #define OCSPI_FRM_ERR (F_OCSPI0_RX_FRAMING_ERROR | \
2008 	F_OCSPI1_RX_FRAMING_ERROR | F_OCSPI0_TX_FRAMING_ERROR | \
2009 	F_OCSPI1_TX_FRAMING_ERROR | F_OCSPI0_OFIFO2X_TX_FRAMING_ERROR | \
2010 	F_OCSPI1_OFIFO2X_TX_FRAMING_ERROR)
2011 
2012 /*
2013  * PM RX interrupt handler.
2014  */
2015 static void pmrx_intr_handler(adapter_t *adapter)
2016 {
2017 	static struct intr_info pmrx_intr_info[] = {
2018 		{ F_ZERO_E_CMD_ERROR, "PMRX 0-length pcmd", -1, 1 },
2019 		{ IESPI_FRM_ERR, "PMRX ispi framing error", -1, 1 },
2020 		{ OCSPI_FRM_ERR, "PMRX ospi framing error", -1, 1 },
2021 		{ V_IESPI_PAR_ERROR(M_IESPI_PAR_ERROR),
2022 		  "PMRX ispi parity error", -1, 1 },
2023 		{ V_OCSPI_PAR_ERROR(M_OCSPI_PAR_ERROR),
2024 		  "PMRX ospi parity error", -1, 1 },
2025 		{ 0 }
2026         };
2027 
2028 	if (t3_handle_intr_status(adapter, A_PM1_RX_INT_CAUSE, 0xffffffff,
2029 				  pmrx_intr_info, NULL))
2030 		t3_fatal_err(adapter);
2031 }
2032 
2033 /*
2034  * CPL switch interrupt handler.
2035  */
2036 static void cplsw_intr_handler(adapter_t *adapter)
2037 {
2038 	static struct intr_info cplsw_intr_info[] = {
2039 		{ F_CIM_OP_MAP_PERR, "CPL switch CIM parity error", -1, 1 },
2040 		{ F_CIM_OVFL_ERROR, "CPL switch CIM overflow", -1, 1 },
2041 		{ F_TP_FRAMING_ERROR, "CPL switch TP framing error", -1, 1 },
2042 		{ F_SGE_FRAMING_ERROR, "CPL switch SGE framing error", -1, 1 },
2043 		{ F_CIM_FRAMING_ERROR, "CPL switch CIM framing error", -1, 1 },
2044 		{ F_ZERO_SWITCH_ERROR, "CPL switch no-switch error", -1, 1 },
2045 		{ 0 }
2046         };
2047 
2048 	if (t3_handle_intr_status(adapter, A_CPL_INTR_CAUSE, 0xffffffff,
2049 				  cplsw_intr_info, NULL))
2050 		t3_fatal_err(adapter);
2051 }
2052 
2053 /*
2054  * MPS interrupt handler.
2055  */
2056 static void mps_intr_handler(adapter_t *adapter)
2057 {
2058 	static struct intr_info mps_intr_info[] = {
2059 		{ 0x1ff, "MPS parity error", -1, 1 },
2060 		{ 0 }
2061 	};
2062 
2063 	if (t3_handle_intr_status(adapter, A_MPS_INT_CAUSE, 0xffffffff,
2064 				  mps_intr_info, NULL))
2065 		t3_fatal_err(adapter);
2066 }
2067 
2068 #define MC7_INTR_FATAL (F_UE | V_PE(M_PE) | F_AE)
2069 
2070 /*
2071  * MC7 interrupt handler.
2072  */
2073 static void mc7_intr_handler(struct mc7 *mc7)
2074 {
2075 	adapter_t *adapter = mc7->adapter;
2076 	u32 cause = t3_read_reg(adapter, mc7->offset + A_MC7_INT_CAUSE);
2077 
2078 	if (cause & F_CE) {
2079 		mc7->stats.corr_err++;
2080 		CH_WARN(adapter, "%s MC7 correctable error at addr 0x%x, "
2081 			"data 0x%x 0x%x 0x%x\n", mc7->name,
2082 			t3_read_reg(adapter, mc7->offset + A_MC7_CE_ADDR),
2083 			t3_read_reg(adapter, mc7->offset + A_MC7_CE_DATA0),
2084 			t3_read_reg(adapter, mc7->offset + A_MC7_CE_DATA1),
2085 			t3_read_reg(adapter, mc7->offset + A_MC7_CE_DATA2));
2086 	}
2087 
2088 	if (cause & F_UE) {
2089 		mc7->stats.uncorr_err++;
2090 		CH_ALERT(adapter, "%s MC7 uncorrectable error at addr 0x%x, "
2091 			 "data 0x%x 0x%x 0x%x\n", mc7->name,
2092 			 t3_read_reg(adapter, mc7->offset + A_MC7_UE_ADDR),
2093 			 t3_read_reg(adapter, mc7->offset + A_MC7_UE_DATA0),
2094 			 t3_read_reg(adapter, mc7->offset + A_MC7_UE_DATA1),
2095 			 t3_read_reg(adapter, mc7->offset + A_MC7_UE_DATA2));
2096 	}
2097 
2098 	if (G_PE(cause)) {
2099 		mc7->stats.parity_err++;
2100 		CH_ALERT(adapter, "%s MC7 parity error 0x%x\n",
2101 			 mc7->name, G_PE(cause));
2102 	}
2103 
2104 	if (cause & F_AE) {
2105 		u32 addr = 0;
2106 
2107 		if (adapter->params.rev > 0)
2108 			addr = t3_read_reg(adapter,
2109 					   mc7->offset + A_MC7_ERR_ADDR);
2110 		mc7->stats.addr_err++;
2111 		CH_ALERT(adapter, "%s MC7 address error: 0x%x\n",
2112 			 mc7->name, addr);
2113 	}
2114 
2115 	if (cause & MC7_INTR_FATAL)
2116 		t3_fatal_err(adapter);
2117 
2118 	t3_write_reg(adapter, mc7->offset + A_MC7_INT_CAUSE, cause);
2119 }
2120 
2121 #define XGM_INTR_FATAL (V_TXFIFO_PRTY_ERR(M_TXFIFO_PRTY_ERR) | \
2122 			V_RXFIFO_PRTY_ERR(M_RXFIFO_PRTY_ERR))
2123 /*
2124  * XGMAC interrupt handler.
2125  */
2126 static int mac_intr_handler(adapter_t *adap, unsigned int idx)
2127 {
2128 	u32 cause;
2129 	struct port_info *pi;
2130 	struct cmac *mac;
2131 
2132 	idx = idx == 0 ? 0 : adapter_info(adap)->nports0; /* MAC idx -> port */
2133 	pi = adap2pinfo(adap, idx);
2134 	mac = &pi->mac;
2135 
2136 	/*
2137 	 * We mask out interrupt causes for which we're not taking interrupts.
2138 	 * This allows us to use polling logic to monitor some of the other
2139 	 * conditions when taking interrupts would impose too much load on the
2140 	 * system.
2141 	 */
2142 	cause = (t3_read_reg(adap, A_XGM_INT_CAUSE + mac->offset)
2143 		 & ~(F_RXFIFO_OVERFLOW));
2144 
2145 	if (cause & V_TXFIFO_PRTY_ERR(M_TXFIFO_PRTY_ERR)) {
2146 		mac->stats.tx_fifo_parity_err++;
2147 		CH_ALERT(adap, "port%d: MAC TX FIFO parity error\n", idx);
2148 	}
2149 	if (cause & V_RXFIFO_PRTY_ERR(M_RXFIFO_PRTY_ERR)) {
2150 		mac->stats.rx_fifo_parity_err++;
2151 		CH_ALERT(adap, "port%d: MAC RX FIFO parity error\n", idx);
2152 	}
2153 	if (cause & F_TXFIFO_UNDERRUN)
2154 		mac->stats.tx_fifo_urun++;
2155 	if (cause & F_RXFIFO_OVERFLOW)
2156 		mac->stats.rx_fifo_ovfl++;
2157 	if (cause & V_SERDES_LOS(M_SERDES_LOS))
2158 		mac->stats.serdes_signal_loss++;
2159 	if (cause & F_XAUIPCSCTCERR)
2160 		mac->stats.xaui_pcs_ctc_err++;
2161 	if (cause & F_XAUIPCSALIGNCHANGE)
2162 		mac->stats.xaui_pcs_align_change++;
2163 	if (cause & F_XGM_INT) {
2164 		t3_set_reg_field(adap,
2165 				 A_XGM_INT_ENABLE + mac->offset,
2166 				 F_XGM_INT, 0);
2167 
2168 		/* link fault suspected */
2169 		pi->link_fault = LF_MAYBE;
2170 	}
2171 
2172 	t3_write_reg(adap, A_XGM_INT_CAUSE + mac->offset, cause);
2173 
2174 	if (cause & XGM_INTR_FATAL)
2175 		t3_fatal_err(adap);
2176 
2177 	return cause != 0;
2178 }
2179 
2180 /*
2181  * Interrupt handler for PHY events.
2182  */
2183 static int phy_intr_handler(adapter_t *adapter)
2184 {
2185 	u32 i, cause = t3_read_reg(adapter, A_T3DBG_INT_CAUSE);
2186 
2187 	for_each_port(adapter, i) {
2188 		struct port_info *p = adap2pinfo(adapter, i);
2189 
2190 		if (!(p->phy.caps & SUPPORTED_IRQ))
2191 			continue;
2192 
2193 		if (cause & (1 << adapter_info(adapter)->gpio_intr[i])) {
2194 			int phy_cause = p->phy.ops->intr_handler(&p->phy);
2195 
2196 			if (phy_cause & cphy_cause_link_change)
2197 				t3_link_changed(adapter, i);
2198 			if (phy_cause & cphy_cause_fifo_error)
2199 				p->phy.fifo_errors++;
2200 			if (phy_cause & cphy_cause_module_change)
2201 				t3_os_phymod_changed(adapter, i);
2202 			if (phy_cause & cphy_cause_alarm)
2203 				CH_WARN(adapter, "Operation affected due to "
2204 				    "adverse environment.  Check the spec "
2205 				    "sheet for corrective action.");
2206 		}
2207 	}
2208 
2209 	t3_write_reg(adapter, A_T3DBG_INT_CAUSE, cause);
2210 	return 0;
2211 }
2212 
2213 /**
2214  *	t3_slow_intr_handler - control path interrupt handler
2215  *	@adapter: the adapter
2216  *
2217  *	T3 interrupt handler for non-data interrupt events, e.g., errors.
2218  *	The designation 'slow' is because it involves register reads, while
2219  *	data interrupts typically don't involve any MMIOs.
2220  */
2221 int t3_slow_intr_handler(adapter_t *adapter)
2222 {
2223 	u32 cause = t3_read_reg(adapter, A_PL_INT_CAUSE0);
2224 
2225 	cause &= adapter->slow_intr_mask;
2226 	if (!cause)
2227 		return 0;
2228 	if (cause & F_PCIM0) {
2229 		if (is_pcie(adapter))
2230 			pcie_intr_handler(adapter);
2231 		else
2232 			pci_intr_handler(adapter);
2233 	}
2234 	if (cause & F_SGE3)
2235 		t3_sge_err_intr_handler(adapter);
2236 	if (cause & F_MC7_PMRX)
2237 		mc7_intr_handler(&adapter->pmrx);
2238 	if (cause & F_MC7_PMTX)
2239 		mc7_intr_handler(&adapter->pmtx);
2240 	if (cause & F_MC7_CM)
2241 		mc7_intr_handler(&adapter->cm);
2242 	if (cause & F_CIM)
2243 		cim_intr_handler(adapter);
2244 	if (cause & F_TP1)
2245 		tp_intr_handler(adapter);
2246 	if (cause & F_ULP2_RX)
2247 		ulprx_intr_handler(adapter);
2248 	if (cause & F_ULP2_TX)
2249 		ulptx_intr_handler(adapter);
2250 	if (cause & F_PM1_RX)
2251 		pmrx_intr_handler(adapter);
2252 	if (cause & F_PM1_TX)
2253 		pmtx_intr_handler(adapter);
2254 	if (cause & F_CPL_SWITCH)
2255 		cplsw_intr_handler(adapter);
2256 	if (cause & F_MPS0)
2257 		mps_intr_handler(adapter);
2258 	if (cause & F_MC5A)
2259 		t3_mc5_intr_handler(&adapter->mc5);
2260 	if (cause & F_XGMAC0_0)
2261 		mac_intr_handler(adapter, 0);
2262 	if (cause & F_XGMAC0_1)
2263 		mac_intr_handler(adapter, 1);
2264 	if (cause & F_T3DBG)
2265 		phy_intr_handler(adapter);
2266 
2267 	/* Clear the interrupts just processed. */
2268 	t3_write_reg(adapter, A_PL_INT_CAUSE0, cause);
2269 	(void) t3_read_reg(adapter, A_PL_INT_CAUSE0); /* flush */
2270 	return 1;
2271 }
2272 
2273 static unsigned int calc_gpio_intr(adapter_t *adap)
2274 {
2275 	unsigned int i, gpi_intr = 0;
2276 
2277 	for_each_port(adap, i)
2278 		if ((adap2pinfo(adap, i)->phy.caps & SUPPORTED_IRQ) &&
2279 		    adapter_info(adap)->gpio_intr[i])
2280 			gpi_intr |= 1 << adapter_info(adap)->gpio_intr[i];
2281 	return gpi_intr;
2282 }
2283 
2284 /**
2285  *	t3_intr_enable - enable interrupts
2286  *	@adapter: the adapter whose interrupts should be enabled
2287  *
2288  *	Enable interrupts by setting the interrupt enable registers of the
2289  *	various HW modules and then enabling the top-level interrupt
2290  *	concentrator.
2291  */
2292 void t3_intr_enable(adapter_t *adapter)
2293 {
2294 	static struct addr_val_pair intr_en_avp[] = {
2295 		{ A_MC7_INT_ENABLE, MC7_INTR_MASK },
2296 		{ A_MC7_INT_ENABLE - MC7_PMRX_BASE_ADDR + MC7_PMTX_BASE_ADDR,
2297 			MC7_INTR_MASK },
2298 		{ A_MC7_INT_ENABLE - MC7_PMRX_BASE_ADDR + MC7_CM_BASE_ADDR,
2299 			MC7_INTR_MASK },
2300 		{ A_MC5_DB_INT_ENABLE, MC5_INTR_MASK },
2301 		{ A_ULPRX_INT_ENABLE, ULPRX_INTR_MASK },
2302 		{ A_PM1_TX_INT_ENABLE, PMTX_INTR_MASK },
2303 		{ A_PM1_RX_INT_ENABLE, PMRX_INTR_MASK },
2304 		{ A_CIM_HOST_INT_ENABLE, CIM_INTR_MASK },
2305 		{ A_MPS_INT_ENABLE, MPS_INTR_MASK },
2306 	};
2307 
2308 	adapter->slow_intr_mask = PL_INTR_MASK;
2309 
2310 	t3_write_regs(adapter, intr_en_avp, ARRAY_SIZE(intr_en_avp), 0);
2311 	t3_write_reg(adapter, A_TP_INT_ENABLE,
2312 		     adapter->params.rev >= T3_REV_C ? 0x2bfffff : 0x3bfffff);
2313 	t3_write_reg(adapter, A_SG_INT_ENABLE, SGE_INTR_MASK);
2314 
2315 	if (adapter->params.rev > 0) {
2316 		t3_write_reg(adapter, A_CPL_INTR_ENABLE,
2317 			     CPLSW_INTR_MASK | F_CIM_OVFL_ERROR);
2318 		t3_write_reg(adapter, A_ULPTX_INT_ENABLE,
2319 			     ULPTX_INTR_MASK | F_PBL_BOUND_ERR_CH0 |
2320 			     F_PBL_BOUND_ERR_CH1);
2321 	} else {
2322 		t3_write_reg(adapter, A_CPL_INTR_ENABLE, CPLSW_INTR_MASK);
2323 		t3_write_reg(adapter, A_ULPTX_INT_ENABLE, ULPTX_INTR_MASK);
2324 	}
2325 
2326 	t3_write_reg(adapter, A_T3DBG_INT_ENABLE, calc_gpio_intr(adapter));
2327 
2328 	if (is_pcie(adapter))
2329 		t3_write_reg(adapter, A_PCIE_INT_ENABLE, PCIE_INTR_MASK);
2330 	else
2331 		t3_write_reg(adapter, A_PCIX_INT_ENABLE, PCIX_INTR_MASK);
2332 	t3_write_reg(adapter, A_PL_INT_ENABLE0, adapter->slow_intr_mask);
2333 	(void) t3_read_reg(adapter, A_PL_INT_ENABLE0);          /* flush */
2334 }
2335 
2336 /**
2337  *	t3_intr_disable - disable a card's interrupts
2338  *	@adapter: the adapter whose interrupts should be disabled
2339  *
2340  *	Disable interrupts.  We only disable the top-level interrupt
2341  *	concentrator and the SGE data interrupts.
2342  */
2343 void t3_intr_disable(adapter_t *adapter)
2344 {
2345 	t3_write_reg(adapter, A_PL_INT_ENABLE0, 0);
2346 	(void) t3_read_reg(adapter, A_PL_INT_ENABLE0);  /* flush */
2347 	adapter->slow_intr_mask = 0;
2348 }
2349 
2350 /**
2351  *	t3_intr_clear - clear all interrupts
2352  *	@adapter: the adapter whose interrupts should be cleared
2353  *
2354  *	Clears all interrupts.
2355  */
2356 void t3_intr_clear(adapter_t *adapter)
2357 {
2358 	static const unsigned int cause_reg_addr[] = {
2359 		A_SG_INT_CAUSE,
2360 		A_SG_RSPQ_FL_STATUS,
2361 		A_PCIX_INT_CAUSE,
2362 		A_MC7_INT_CAUSE,
2363 		A_MC7_INT_CAUSE - MC7_PMRX_BASE_ADDR + MC7_PMTX_BASE_ADDR,
2364 		A_MC7_INT_CAUSE - MC7_PMRX_BASE_ADDR + MC7_CM_BASE_ADDR,
2365 		A_CIM_HOST_INT_CAUSE,
2366 		A_TP_INT_CAUSE,
2367 		A_MC5_DB_INT_CAUSE,
2368 		A_ULPRX_INT_CAUSE,
2369 		A_ULPTX_INT_CAUSE,
2370 		A_CPL_INTR_CAUSE,
2371 		A_PM1_TX_INT_CAUSE,
2372 		A_PM1_RX_INT_CAUSE,
2373 		A_MPS_INT_CAUSE,
2374 		A_T3DBG_INT_CAUSE,
2375 	};
2376 	unsigned int i;
2377 
2378 	/* Clear PHY and MAC interrupts for each port. */
2379 	for_each_port(adapter, i)
2380 		t3_port_intr_clear(adapter, i);
2381 
2382 	for (i = 0; i < ARRAY_SIZE(cause_reg_addr); ++i)
2383 		t3_write_reg(adapter, cause_reg_addr[i], 0xffffffff);
2384 
2385 	if (is_pcie(adapter))
2386 		t3_write_reg(adapter, A_PCIE_PEX_ERR, 0xffffffff);
2387 	t3_write_reg(adapter, A_PL_INT_CAUSE0, 0xffffffff);
2388 	(void) t3_read_reg(adapter, A_PL_INT_CAUSE0);          /* flush */
2389 }
2390 
2391 void t3_xgm_intr_enable(adapter_t *adapter, int idx)
2392 {
2393 	struct port_info *pi = adap2pinfo(adapter, idx);
2394 
2395 	t3_write_reg(adapter, A_XGM_XGM_INT_ENABLE + pi->mac.offset,
2396 		     XGM_EXTRA_INTR_MASK);
2397 }
2398 
2399 void t3_xgm_intr_disable(adapter_t *adapter, int idx)
2400 {
2401 	struct port_info *pi = adap2pinfo(adapter, idx);
2402 
2403 	t3_write_reg(adapter, A_XGM_XGM_INT_DISABLE + pi->mac.offset,
2404 		     0x7ff);
2405 }
2406 
2407 /**
2408  *	t3_port_intr_enable - enable port-specific interrupts
2409  *	@adapter: associated adapter
2410  *	@idx: index of port whose interrupts should be enabled
2411  *
2412  *	Enable port-specific (i.e., MAC and PHY) interrupts for the given
2413  *	adapter port.
2414  */
2415 void t3_port_intr_enable(adapter_t *adapter, int idx)
2416 {
2417 	struct port_info *pi = adap2pinfo(adapter, idx);
2418 
2419 	t3_write_reg(adapter, A_XGM_INT_ENABLE + pi->mac.offset, XGM_INTR_MASK);
2420 	pi->phy.ops->intr_enable(&pi->phy);
2421 }
2422 
2423 /**
2424  *	t3_port_intr_disable - disable port-specific interrupts
2425  *	@adapter: associated adapter
2426  *	@idx: index of port whose interrupts should be disabled
2427  *
2428  *	Disable port-specific (i.e., MAC and PHY) interrupts for the given
2429  *	adapter port.
2430  */
2431 void t3_port_intr_disable(adapter_t *adapter, int idx)
2432 {
2433 	struct port_info *pi = adap2pinfo(adapter, idx);
2434 
2435 	t3_write_reg(adapter, A_XGM_INT_ENABLE + pi->mac.offset, 0);
2436 	pi->phy.ops->intr_disable(&pi->phy);
2437 }
2438 
2439 /**
2440  *	t3_port_intr_clear - clear port-specific interrupts
2441  *	@adapter: associated adapter
2442  *	@idx: index of port whose interrupts to clear
2443  *
2444  *	Clear port-specific (i.e., MAC and PHY) interrupts for the given
2445  *	adapter port.
2446  */
2447 void t3_port_intr_clear(adapter_t *adapter, int idx)
2448 {
2449 	struct port_info *pi = adap2pinfo(adapter, idx);
2450 
2451 	t3_write_reg(adapter, A_XGM_INT_CAUSE + pi->mac.offset, 0xffffffff);
2452 	pi->phy.ops->intr_clear(&pi->phy);
2453 }
2454 
2455 #define SG_CONTEXT_CMD_ATTEMPTS 100
2456 
2457 /**
2458  * 	t3_sge_write_context - write an SGE context
2459  * 	@adapter: the adapter
2460  * 	@id: the context id
2461  * 	@type: the context type
2462  *
2463  * 	Program an SGE context with the values already loaded in the
2464  * 	CONTEXT_DATA? registers.
2465  */
2466 static int t3_sge_write_context(adapter_t *adapter, unsigned int id,
2467 				unsigned int type)
2468 {
2469 	if (type == F_RESPONSEQ) {
2470 		/*
2471 		 * Can't write the Response Queue Context bits for
2472 		 * Interrupt Armed or the Reserve bits after the chip
2473 		 * has been initialized out of reset.  Writing to these
2474 		 * bits can confuse the hardware.
2475 		 */
2476 		t3_write_reg(adapter, A_SG_CONTEXT_MASK0, 0xffffffff);
2477 		t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0xffffffff);
2478 		t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0x17ffffff);
2479 		t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0xffffffff);
2480 	} else {
2481 		t3_write_reg(adapter, A_SG_CONTEXT_MASK0, 0xffffffff);
2482 		t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0xffffffff);
2483 		t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0xffffffff);
2484 		t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0xffffffff);
2485 	}
2486 	t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2487 		     V_CONTEXT_CMD_OPCODE(1) | type | V_CONTEXT(id));
2488 	return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
2489 			       0, SG_CONTEXT_CMD_ATTEMPTS, 1);
2490 }
2491 
2492 /**
2493  *	clear_sge_ctxt - completely clear an SGE context
2494  *	@adapter: the adapter
2495  *	@id: the context id
2496  *	@type: the context type
2497  *
2498  *	Completely clear an SGE context.  Used predominantly at post-reset
2499  *	initialization.  Note in particular that we don't skip writing to any
2500  *	"sensitive bits" in the contexts the way that t3_sge_write_context()
2501  *	does ...
2502  */
2503 static int clear_sge_ctxt(adapter_t *adap, unsigned int id, unsigned int type)
2504 {
2505 	t3_write_reg(adap, A_SG_CONTEXT_DATA0, 0);
2506 	t3_write_reg(adap, A_SG_CONTEXT_DATA1, 0);
2507 	t3_write_reg(adap, A_SG_CONTEXT_DATA2, 0);
2508 	t3_write_reg(adap, A_SG_CONTEXT_DATA3, 0);
2509 	t3_write_reg(adap, A_SG_CONTEXT_MASK0, 0xffffffff);
2510 	t3_write_reg(adap, A_SG_CONTEXT_MASK1, 0xffffffff);
2511 	t3_write_reg(adap, A_SG_CONTEXT_MASK2, 0xffffffff);
2512 	t3_write_reg(adap, A_SG_CONTEXT_MASK3, 0xffffffff);
2513 	t3_write_reg(adap, A_SG_CONTEXT_CMD,
2514 		     V_CONTEXT_CMD_OPCODE(1) | type | V_CONTEXT(id));
2515 	return t3_wait_op_done(adap, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
2516 			       0, SG_CONTEXT_CMD_ATTEMPTS, 1);
2517 }
2518 
2519 /**
2520  *	t3_sge_init_ecntxt - initialize an SGE egress context
2521  *	@adapter: the adapter to configure
2522  *	@id: the context id
2523  *	@gts_enable: whether to enable GTS for the context
2524  *	@type: the egress context type
2525  *	@respq: associated response queue
2526  *	@base_addr: base address of queue
2527  *	@size: number of queue entries
2528  *	@token: uP token
2529  *	@gen: initial generation value for the context
2530  *	@cidx: consumer pointer
2531  *
2532  *	Initialize an SGE egress context and make it ready for use.  If the
2533  *	platform allows concurrent context operations, the caller is
2534  *	responsible for appropriate locking.
2535  */
2536 int t3_sge_init_ecntxt(adapter_t *adapter, unsigned int id, int gts_enable,
2537 		       enum sge_context_type type, int respq, u64 base_addr,
2538 		       unsigned int size, unsigned int token, int gen,
2539 		       unsigned int cidx)
2540 {
2541 	unsigned int credits = type == SGE_CNTXT_OFLD ? 0 : FW_WR_NUM;
2542 
2543 	if (base_addr & 0xfff)     /* must be 4K aligned */
2544 		return -EINVAL;
2545 	if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2546 		return -EBUSY;
2547 
2548 	base_addr >>= 12;
2549 	t3_write_reg(adapter, A_SG_CONTEXT_DATA0, V_EC_INDEX(cidx) |
2550 		     V_EC_CREDITS(credits) | V_EC_GTS(gts_enable));
2551 	t3_write_reg(adapter, A_SG_CONTEXT_DATA1, V_EC_SIZE(size) |
2552 		     V_EC_BASE_LO((u32)base_addr & 0xffff));
2553 	base_addr >>= 16;
2554 	t3_write_reg(adapter, A_SG_CONTEXT_DATA2, (u32)base_addr);
2555 	base_addr >>= 32;
2556 	t3_write_reg(adapter, A_SG_CONTEXT_DATA3,
2557 		     V_EC_BASE_HI((u32)base_addr & 0xf) | V_EC_RESPQ(respq) |
2558 		     V_EC_TYPE(type) | V_EC_GEN(gen) | V_EC_UP_TOKEN(token) |
2559 		     F_EC_VALID);
2560 	return t3_sge_write_context(adapter, id, F_EGRESS);
2561 }
2562 
2563 /**
2564  *	t3_sge_init_flcntxt - initialize an SGE free-buffer list context
2565  *	@adapter: the adapter to configure
2566  *	@id: the context id
2567  *	@gts_enable: whether to enable GTS for the context
2568  *	@base_addr: base address of queue
2569  *	@size: number of queue entries
2570  *	@bsize: size of each buffer for this queue
2571  *	@cong_thres: threshold to signal congestion to upstream producers
2572  *	@gen: initial generation value for the context
2573  *	@cidx: consumer pointer
2574  *
2575  *	Initialize an SGE free list context and make it ready for use.  The
2576  *	caller is responsible for ensuring only one context operation occurs
2577  *	at a time.
2578  */
2579 int t3_sge_init_flcntxt(adapter_t *adapter, unsigned int id, int gts_enable,
2580 			u64 base_addr, unsigned int size, unsigned int bsize,
2581 			unsigned int cong_thres, int gen, unsigned int cidx)
2582 {
2583 	if (base_addr & 0xfff)     /* must be 4K aligned */
2584 		return -EINVAL;
2585 	if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2586 		return -EBUSY;
2587 
2588 	base_addr >>= 12;
2589 	t3_write_reg(adapter, A_SG_CONTEXT_DATA0, (u32)base_addr);
2590 	base_addr >>= 32;
2591 	t3_write_reg(adapter, A_SG_CONTEXT_DATA1,
2592 		     V_FL_BASE_HI((u32)base_addr) |
2593 		     V_FL_INDEX_LO(cidx & M_FL_INDEX_LO));
2594 	t3_write_reg(adapter, A_SG_CONTEXT_DATA2, V_FL_SIZE(size) |
2595 		     V_FL_GEN(gen) | V_FL_INDEX_HI(cidx >> 12) |
2596 		     V_FL_ENTRY_SIZE_LO(bsize & M_FL_ENTRY_SIZE_LO));
2597 	t3_write_reg(adapter, A_SG_CONTEXT_DATA3,
2598 		     V_FL_ENTRY_SIZE_HI(bsize >> (32 - S_FL_ENTRY_SIZE_LO)) |
2599 		     V_FL_CONG_THRES(cong_thres) | V_FL_GTS(gts_enable));
2600 	return t3_sge_write_context(adapter, id, F_FREELIST);
2601 }
2602 
2603 /**
2604  *	t3_sge_init_rspcntxt - initialize an SGE response queue context
2605  *	@adapter: the adapter to configure
2606  *	@id: the context id
2607  *	@irq_vec_idx: MSI-X interrupt vector index, 0 if no MSI-X, -1 if no IRQ
2608  *	@base_addr: base address of queue
2609  *	@size: number of queue entries
2610  *	@fl_thres: threshold for selecting the normal or jumbo free list
2611  *	@gen: initial generation value for the context
2612  *	@cidx: consumer pointer
2613  *
2614  *	Initialize an SGE response queue context and make it ready for use.
2615  *	The caller is responsible for ensuring only one context operation
2616  *	occurs at a time.
2617  */
2618 int t3_sge_init_rspcntxt(adapter_t *adapter, unsigned int id, int irq_vec_idx,
2619 			 u64 base_addr, unsigned int size,
2620 			 unsigned int fl_thres, int gen, unsigned int cidx)
2621 {
2622 	unsigned int ctrl, intr = 0;
2623 
2624 	if (base_addr & 0xfff)     /* must be 4K aligned */
2625 		return -EINVAL;
2626 	if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2627 		return -EBUSY;
2628 
2629 	base_addr >>= 12;
2630 	t3_write_reg(adapter, A_SG_CONTEXT_DATA0, V_CQ_SIZE(size) |
2631 		     V_CQ_INDEX(cidx));
2632 	t3_write_reg(adapter, A_SG_CONTEXT_DATA1, (u32)base_addr);
2633 	base_addr >>= 32;
2634         ctrl = t3_read_reg(adapter, A_SG_CONTROL);
2635         if ((irq_vec_idx > 0) ||
2636 		((irq_vec_idx == 0) && !(ctrl & F_ONEINTMULTQ)))
2637                 	intr = F_RQ_INTR_EN;
2638         if (irq_vec_idx >= 0)
2639                 intr |= V_RQ_MSI_VEC(irq_vec_idx);
2640 	t3_write_reg(adapter, A_SG_CONTEXT_DATA2,
2641 		     V_CQ_BASE_HI((u32)base_addr) | intr | V_RQ_GEN(gen));
2642 	t3_write_reg(adapter, A_SG_CONTEXT_DATA3, fl_thres);
2643 	return t3_sge_write_context(adapter, id, F_RESPONSEQ);
2644 }
2645 
2646 /**
2647  *	t3_sge_init_cqcntxt - initialize an SGE completion queue context
2648  *	@adapter: the adapter to configure
2649  *	@id: the context id
2650  *	@base_addr: base address of queue
2651  *	@size: number of queue entries
2652  *	@rspq: response queue for async notifications
2653  *	@ovfl_mode: CQ overflow mode
2654  *	@credits: completion queue credits
2655  *	@credit_thres: the credit threshold
2656  *
2657  *	Initialize an SGE completion queue context and make it ready for use.
2658  *	The caller is responsible for ensuring only one context operation
2659  *	occurs at a time.
2660  */
2661 int t3_sge_init_cqcntxt(adapter_t *adapter, unsigned int id, u64 base_addr,
2662 			unsigned int size, int rspq, int ovfl_mode,
2663 			unsigned int credits, unsigned int credit_thres)
2664 {
2665 	if (base_addr & 0xfff)     /* must be 4K aligned */
2666 		return -EINVAL;
2667 	if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2668 		return -EBUSY;
2669 
2670 	base_addr >>= 12;
2671 	t3_write_reg(adapter, A_SG_CONTEXT_DATA0, V_CQ_SIZE(size));
2672 	t3_write_reg(adapter, A_SG_CONTEXT_DATA1, (u32)base_addr);
2673 	base_addr >>= 32;
2674 	t3_write_reg(adapter, A_SG_CONTEXT_DATA2,
2675 		     V_CQ_BASE_HI((u32)base_addr) | V_CQ_RSPQ(rspq) |
2676 		     V_CQ_GEN(1) | V_CQ_OVERFLOW_MODE(ovfl_mode) |
2677 		     V_CQ_ERR(ovfl_mode));
2678 	t3_write_reg(adapter, A_SG_CONTEXT_DATA3, V_CQ_CREDITS(credits) |
2679 		     V_CQ_CREDIT_THRES(credit_thres));
2680 	return t3_sge_write_context(adapter, id, F_CQ);
2681 }
2682 
2683 /**
2684  *	t3_sge_enable_ecntxt - enable/disable an SGE egress context
2685  *	@adapter: the adapter
2686  *	@id: the egress context id
2687  *	@enable: enable (1) or disable (0) the context
2688  *
2689  *	Enable or disable an SGE egress context.  The caller is responsible for
2690  *	ensuring only one context operation occurs at a time.
2691  */
2692 int t3_sge_enable_ecntxt(adapter_t *adapter, unsigned int id, int enable)
2693 {
2694 	if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2695 		return -EBUSY;
2696 
2697 	t3_write_reg(adapter, A_SG_CONTEXT_MASK0, 0);
2698 	t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0);
2699 	t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0);
2700 	t3_write_reg(adapter, A_SG_CONTEXT_MASK3, F_EC_VALID);
2701 	t3_write_reg(adapter, A_SG_CONTEXT_DATA3, V_EC_VALID(enable));
2702 	t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2703 		     V_CONTEXT_CMD_OPCODE(1) | F_EGRESS | V_CONTEXT(id));
2704 	return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
2705 			       0, SG_CONTEXT_CMD_ATTEMPTS, 1);
2706 }
2707 
2708 /**
2709  *	t3_sge_disable_fl - disable an SGE free-buffer list
2710  *	@adapter: the adapter
2711  *	@id: the free list context id
2712  *
2713  *	Disable an SGE free-buffer list.  The caller is responsible for
2714  *	ensuring only one context operation occurs at a time.
2715  */
2716 int t3_sge_disable_fl(adapter_t *adapter, unsigned int id)
2717 {
2718 	if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2719 		return -EBUSY;
2720 
2721 	t3_write_reg(adapter, A_SG_CONTEXT_MASK0, 0);
2722 	t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0);
2723 	t3_write_reg(adapter, A_SG_CONTEXT_MASK2, V_FL_SIZE(M_FL_SIZE));
2724 	t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0);
2725 	t3_write_reg(adapter, A_SG_CONTEXT_DATA2, 0);
2726 	t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2727 		     V_CONTEXT_CMD_OPCODE(1) | F_FREELIST | V_CONTEXT(id));
2728 	return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
2729 			       0, SG_CONTEXT_CMD_ATTEMPTS, 1);
2730 }
2731 
2732 /**
2733  *	t3_sge_disable_rspcntxt - disable an SGE response queue
2734  *	@adapter: the adapter
2735  *	@id: the response queue context id
2736  *
2737  *	Disable an SGE response queue.  The caller is responsible for
2738  *	ensuring only one context operation occurs at a time.
2739  */
2740 int t3_sge_disable_rspcntxt(adapter_t *adapter, unsigned int id)
2741 {
2742 	if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2743 		return -EBUSY;
2744 
2745 	t3_write_reg(adapter, A_SG_CONTEXT_MASK0, V_CQ_SIZE(M_CQ_SIZE));
2746 	t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0);
2747 	t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0);
2748 	t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0);
2749 	t3_write_reg(adapter, A_SG_CONTEXT_DATA0, 0);
2750 	t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2751 		     V_CONTEXT_CMD_OPCODE(1) | F_RESPONSEQ | V_CONTEXT(id));
2752 	return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
2753 			       0, SG_CONTEXT_CMD_ATTEMPTS, 1);
2754 }
2755 
2756 /**
2757  *	t3_sge_disable_cqcntxt - disable an SGE completion queue
2758  *	@adapter: the adapter
2759  *	@id: the completion queue context id
2760  *
2761  *	Disable an SGE completion queue.  The caller is responsible for
2762  *	ensuring only one context operation occurs at a time.
2763  */
2764 int t3_sge_disable_cqcntxt(adapter_t *adapter, unsigned int id)
2765 {
2766 	if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2767 		return -EBUSY;
2768 
2769 	t3_write_reg(adapter, A_SG_CONTEXT_MASK0, V_CQ_SIZE(M_CQ_SIZE));
2770 	t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0);
2771 	t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0);
2772 	t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0);
2773 	t3_write_reg(adapter, A_SG_CONTEXT_DATA0, 0);
2774 	t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2775 		     V_CONTEXT_CMD_OPCODE(1) | F_CQ | V_CONTEXT(id));
2776 	return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
2777 			       0, SG_CONTEXT_CMD_ATTEMPTS, 1);
2778 }
2779 
2780 /**
2781  *	t3_sge_cqcntxt_op - perform an operation on a completion queue context
2782  *	@adapter: the adapter
2783  *	@id: the context id
2784  *	@op: the operation to perform
2785  *	@credits: credits to return to the CQ
2786  *
2787  *	Perform the selected operation on an SGE completion queue context.
2788  *	The caller is responsible for ensuring only one context operation
2789  *	occurs at a time.
2790  *
2791  *	For most operations the function returns the current HW position in
2792  *	the completion queue.
2793  */
2794 int t3_sge_cqcntxt_op(adapter_t *adapter, unsigned int id, unsigned int op,
2795 		      unsigned int credits)
2796 {
2797 	u32 val;
2798 
2799 	if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2800 		return -EBUSY;
2801 
2802 	t3_write_reg(adapter, A_SG_CONTEXT_DATA0, credits << 16);
2803 	t3_write_reg(adapter, A_SG_CONTEXT_CMD, V_CONTEXT_CMD_OPCODE(op) |
2804 		     V_CONTEXT(id) | F_CQ);
2805 	if (t3_wait_op_done_val(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
2806 				0, SG_CONTEXT_CMD_ATTEMPTS, 1, &val))
2807 		return -EIO;
2808 
2809 	if (op >= 2 && op < 7) {
2810 		if (adapter->params.rev > 0)
2811 			return G_CQ_INDEX(val);
2812 
2813 		t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2814 			     V_CONTEXT_CMD_OPCODE(0) | F_CQ | V_CONTEXT(id));
2815 		if (t3_wait_op_done(adapter, A_SG_CONTEXT_CMD,
2816 				    F_CONTEXT_CMD_BUSY, 0,
2817 				    SG_CONTEXT_CMD_ATTEMPTS, 1))
2818 			return -EIO;
2819 		return G_CQ_INDEX(t3_read_reg(adapter, A_SG_CONTEXT_DATA0));
2820 	}
2821 	return 0;
2822 }
2823 
2824 /**
2825  * 	t3_sge_read_context - read an SGE context
2826  * 	@type: the context type
2827  * 	@adapter: the adapter
2828  * 	@id: the context id
2829  * 	@data: holds the retrieved context
2830  *
2831  * 	Read an SGE egress context.  The caller is responsible for ensuring
2832  * 	only one context operation occurs at a time.
2833  */
2834 static int t3_sge_read_context(unsigned int type, adapter_t *adapter,
2835 			       unsigned int id, u32 data[4])
2836 {
2837 	if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2838 		return -EBUSY;
2839 
2840 	t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2841 		     V_CONTEXT_CMD_OPCODE(0) | type | V_CONTEXT(id));
2842 	if (t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY, 0,
2843 			    SG_CONTEXT_CMD_ATTEMPTS, 1))
2844 		return -EIO;
2845 	data[0] = t3_read_reg(adapter, A_SG_CONTEXT_DATA0);
2846 	data[1] = t3_read_reg(adapter, A_SG_CONTEXT_DATA1);
2847 	data[2] = t3_read_reg(adapter, A_SG_CONTEXT_DATA2);
2848 	data[3] = t3_read_reg(adapter, A_SG_CONTEXT_DATA3);
2849 	return 0;
2850 }
2851 
2852 /**
2853  * 	t3_sge_read_ecntxt - read an SGE egress context
2854  * 	@adapter: the adapter
2855  * 	@id: the context id
2856  * 	@data: holds the retrieved context
2857  *
2858  * 	Read an SGE egress context.  The caller is responsible for ensuring
2859  * 	only one context operation occurs at a time.
2860  */
2861 int t3_sge_read_ecntxt(adapter_t *adapter, unsigned int id, u32 data[4])
2862 {
2863 	if (id >= 65536)
2864 		return -EINVAL;
2865 	return t3_sge_read_context(F_EGRESS, adapter, id, data);
2866 }
2867 
2868 /**
2869  * 	t3_sge_read_cq - read an SGE CQ context
2870  * 	@adapter: the adapter
2871  * 	@id: the context id
2872  * 	@data: holds the retrieved context
2873  *
2874  * 	Read an SGE CQ context.  The caller is responsible for ensuring
2875  * 	only one context operation occurs at a time.
2876  */
2877 int t3_sge_read_cq(adapter_t *adapter, unsigned int id, u32 data[4])
2878 {
2879 	if (id >= 65536)
2880 		return -EINVAL;
2881 	return t3_sge_read_context(F_CQ, adapter, id, data);
2882 }
2883 
2884 /**
2885  * 	t3_sge_read_fl - read an SGE free-list context
2886  * 	@adapter: the adapter
2887  * 	@id: the context id
2888  * 	@data: holds the retrieved context
2889  *
2890  * 	Read an SGE free-list context.  The caller is responsible for ensuring
2891  * 	only one context operation occurs at a time.
2892  */
2893 int t3_sge_read_fl(adapter_t *adapter, unsigned int id, u32 data[4])
2894 {
2895 	if (id >= SGE_QSETS * 2)
2896 		return -EINVAL;
2897 	return t3_sge_read_context(F_FREELIST, adapter, id, data);
2898 }
2899 
2900 /**
2901  * 	t3_sge_read_rspq - read an SGE response queue context
2902  * 	@adapter: the adapter
2903  * 	@id: the context id
2904  * 	@data: holds the retrieved context
2905  *
2906  * 	Read an SGE response queue context.  The caller is responsible for
2907  * 	ensuring only one context operation occurs at a time.
2908  */
2909 int t3_sge_read_rspq(adapter_t *adapter, unsigned int id, u32 data[4])
2910 {
2911 	if (id >= SGE_QSETS)
2912 		return -EINVAL;
2913 	return t3_sge_read_context(F_RESPONSEQ, adapter, id, data);
2914 }
2915 
2916 /**
2917  *	t3_config_rss - configure Rx packet steering
2918  *	@adapter: the adapter
2919  *	@rss_config: RSS settings (written to TP_RSS_CONFIG)
2920  *	@cpus: values for the CPU lookup table (0xff terminated)
2921  *	@rspq: values for the response queue lookup table (0xffff terminated)
2922  *
2923  *	Programs the receive packet steering logic.  @cpus and @rspq provide
2924  *	the values for the CPU and response queue lookup tables.  If they
2925  *	provide fewer values than the size of the tables the supplied values
2926  *	are used repeatedly until the tables are fully populated.
2927  */
2928 void t3_config_rss(adapter_t *adapter, unsigned int rss_config, const u8 *cpus,
2929 		   const u16 *rspq)
2930 {
2931 	int i, j, cpu_idx = 0, q_idx = 0;
2932 
2933 	if (cpus)
2934 		for (i = 0; i < RSS_TABLE_SIZE; ++i) {
2935 			u32 val = i << 16;
2936 
2937 			for (j = 0; j < 2; ++j) {
2938 				val |= (cpus[cpu_idx++] & 0x3f) << (8 * j);
2939 				if (cpus[cpu_idx] == 0xff)
2940 					cpu_idx = 0;
2941 			}
2942 			t3_write_reg(adapter, A_TP_RSS_LKP_TABLE, val);
2943 		}
2944 
2945 	if (rspq)
2946 		for (i = 0; i < RSS_TABLE_SIZE; ++i) {
2947 			t3_write_reg(adapter, A_TP_RSS_MAP_TABLE,
2948 				     (i << 16) | rspq[q_idx++]);
2949 			if (rspq[q_idx] == 0xffff)
2950 				q_idx = 0;
2951 		}
2952 
2953 	t3_write_reg(adapter, A_TP_RSS_CONFIG, rss_config);
2954 }
2955 
2956 /**
2957  *	t3_read_rss - read the contents of the RSS tables
2958  *	@adapter: the adapter
2959  *	@lkup: holds the contents of the RSS lookup table
2960  *	@map: holds the contents of the RSS map table
2961  *
2962  *	Reads the contents of the receive packet steering tables.
2963  */
2964 int t3_read_rss(adapter_t *adapter, u8 *lkup, u16 *map)
2965 {
2966 	int i;
2967 	u32 val;
2968 
2969 	if (lkup)
2970 		for (i = 0; i < RSS_TABLE_SIZE; ++i) {
2971 			t3_write_reg(adapter, A_TP_RSS_LKP_TABLE,
2972 				     0xffff0000 | i);
2973 			val = t3_read_reg(adapter, A_TP_RSS_LKP_TABLE);
2974 			if (!(val & 0x80000000))
2975 				return -EAGAIN;
2976 			*lkup++ = (u8)val;
2977 			*lkup++ = (u8)(val >> 8);
2978 		}
2979 
2980 	if (map)
2981 		for (i = 0; i < RSS_TABLE_SIZE; ++i) {
2982 			t3_write_reg(adapter, A_TP_RSS_MAP_TABLE,
2983 				     0xffff0000 | i);
2984 			val = t3_read_reg(adapter, A_TP_RSS_MAP_TABLE);
2985 			if (!(val & 0x80000000))
2986 				return -EAGAIN;
2987 			*map++ = (u16)val;
2988 		}
2989 	return 0;
2990 }
2991 
2992 /**
2993  *	t3_tp_set_offload_mode - put TP in NIC/offload mode
2994  *	@adap: the adapter
2995  *	@enable: 1 to select offload mode, 0 for regular NIC
2996  *
2997  *	Switches TP to NIC/offload mode.
2998  */
2999 void t3_tp_set_offload_mode(adapter_t *adap, int enable)
3000 {
3001 	if (is_offload(adap) || !enable)
3002 		t3_set_reg_field(adap, A_TP_IN_CONFIG, F_NICMODE,
3003 				 V_NICMODE(!enable));
3004 }
3005 
3006 /**
3007  *	tp_wr_bits_indirect - set/clear bits in an indirect TP register
3008  *	@adap: the adapter
3009  *	@addr: the indirect TP register address
3010  *	@mask: specifies the field within the register to modify
3011  *	@val: new value for the field
3012  *
3013  *	Sets a field of an indirect TP register to the given value.
3014  */
3015 static void tp_wr_bits_indirect(adapter_t *adap, unsigned int addr,
3016 				unsigned int mask, unsigned int val)
3017 {
3018 	t3_write_reg(adap, A_TP_PIO_ADDR, addr);
3019 	val |= t3_read_reg(adap, A_TP_PIO_DATA) & ~mask;
3020 	t3_write_reg(adap, A_TP_PIO_DATA, val);
3021 }
3022 
3023 /**
3024  *	t3_enable_filters - enable the HW filters
3025  *	@adap: the adapter
3026  *
3027  *	Enables the HW filters for NIC traffic.
3028  */
3029 void t3_enable_filters(adapter_t *adap)
3030 {
3031 	t3_set_reg_field(adap, A_TP_IN_CONFIG, F_NICMODE, 0);
3032 	t3_set_reg_field(adap, A_MC5_DB_CONFIG, 0, F_FILTEREN);
3033 	t3_set_reg_field(adap, A_TP_GLOBAL_CONFIG, 0, V_FIVETUPLELOOKUP(3));
3034 	tp_wr_bits_indirect(adap, A_TP_INGRESS_CONFIG, 0, F_LOOKUPEVERYPKT);
3035 }
3036 
3037 /**
3038  *	t3_disable_filters - disable the HW filters
3039  *	@adap: the adapter
3040  *
3041  *	Disables the HW filters for NIC traffic.
3042  */
3043 void t3_disable_filters(adapter_t *adap)
3044 {
3045 	/* note that we don't want to revert to NIC-only mode */
3046 	t3_set_reg_field(adap, A_MC5_DB_CONFIG, F_FILTEREN, 0);
3047 	t3_set_reg_field(adap, A_TP_GLOBAL_CONFIG,
3048 			 V_FIVETUPLELOOKUP(M_FIVETUPLELOOKUP), 0);
3049 	tp_wr_bits_indirect(adap, A_TP_INGRESS_CONFIG, F_LOOKUPEVERYPKT, 0);
3050 }
3051 
3052 /**
3053  *	pm_num_pages - calculate the number of pages of the payload memory
3054  *	@mem_size: the size of the payload memory
3055  *	@pg_size: the size of each payload memory page
3056  *
3057  *	Calculate the number of pages, each of the given size, that fit in a
3058  *	memory of the specified size, respecting the HW requirement that the
3059  *	number of pages must be a multiple of 24.
3060  */
3061 static inline unsigned int pm_num_pages(unsigned int mem_size,
3062 					unsigned int pg_size)
3063 {
3064 	unsigned int n = mem_size / pg_size;
3065 
3066 	return n - n % 24;
3067 }
3068 
3069 #define mem_region(adap, start, size, reg) \
3070 	t3_write_reg((adap), A_ ## reg, (start)); \
3071 	start += size
3072 
3073 /**
3074  *	partition_mem - partition memory and configure TP memory settings
3075  *	@adap: the adapter
3076  *	@p: the TP parameters
3077  *
3078  *	Partitions context and payload memory and configures TP's memory
3079  *	registers.
3080  */
3081 static void partition_mem(adapter_t *adap, const struct tp_params *p)
3082 {
3083 	unsigned int m, pstructs, tids = t3_mc5_size(&adap->mc5);
3084 	unsigned int timers = 0, timers_shift = 22;
3085 
3086 	if (adap->params.rev > 0) {
3087 		if (tids <= 16 * 1024) {
3088 			timers = 1;
3089 			timers_shift = 16;
3090 		} else if (tids <= 64 * 1024) {
3091 			timers = 2;
3092 			timers_shift = 18;
3093 		} else if (tids <= 256 * 1024) {
3094 			timers = 3;
3095 			timers_shift = 20;
3096 		}
3097 	}
3098 
3099 	t3_write_reg(adap, A_TP_PMM_SIZE,
3100 		     p->chan_rx_size | (p->chan_tx_size >> 16));
3101 
3102 	t3_write_reg(adap, A_TP_PMM_TX_BASE, 0);
3103 	t3_write_reg(adap, A_TP_PMM_TX_PAGE_SIZE, p->tx_pg_size);
3104 	t3_write_reg(adap, A_TP_PMM_TX_MAX_PAGE, p->tx_num_pgs);
3105 	t3_set_reg_field(adap, A_TP_PARA_REG3, V_TXDATAACKIDX(M_TXDATAACKIDX),
3106 			 V_TXDATAACKIDX(fls(p->tx_pg_size) - 12));
3107 
3108 	t3_write_reg(adap, A_TP_PMM_RX_BASE, 0);
3109 	t3_write_reg(adap, A_TP_PMM_RX_PAGE_SIZE, p->rx_pg_size);
3110 	t3_write_reg(adap, A_TP_PMM_RX_MAX_PAGE, p->rx_num_pgs);
3111 
3112 	pstructs = p->rx_num_pgs + p->tx_num_pgs;
3113 	/* Add a bit of headroom and make multiple of 24 */
3114 	pstructs += 48;
3115 	pstructs -= pstructs % 24;
3116 	t3_write_reg(adap, A_TP_CMM_MM_MAX_PSTRUCT, pstructs);
3117 
3118 	m = tids * TCB_SIZE;
3119 	mem_region(adap, m, (64 << 10) * 64, SG_EGR_CNTX_BADDR);
3120 	mem_region(adap, m, (64 << 10) * 64, SG_CQ_CONTEXT_BADDR);
3121 	t3_write_reg(adap, A_TP_CMM_TIMER_BASE, V_CMTIMERMAXNUM(timers) | m);
3122 	m += ((p->ntimer_qs - 1) << timers_shift) + (1 << 22);
3123 	mem_region(adap, m, pstructs * 64, TP_CMM_MM_BASE);
3124 	mem_region(adap, m, 64 * (pstructs / 24), TP_CMM_MM_PS_FLST_BASE);
3125 	mem_region(adap, m, 64 * (p->rx_num_pgs / 24), TP_CMM_MM_RX_FLST_BASE);
3126 	mem_region(adap, m, 64 * (p->tx_num_pgs / 24), TP_CMM_MM_TX_FLST_BASE);
3127 
3128 	m = (m + 4095) & ~0xfff;
3129 	t3_write_reg(adap, A_CIM_SDRAM_BASE_ADDR, m);
3130 	t3_write_reg(adap, A_CIM_SDRAM_ADDR_SIZE, p->cm_size - m);
3131 
3132 	tids = (p->cm_size - m - (3 << 20)) / 3072 - 32;
3133 	m = t3_mc5_size(&adap->mc5) - adap->params.mc5.nservers -
3134 	    adap->params.mc5.nfilters - adap->params.mc5.nroutes;
3135 	if (tids < m)
3136 		adap->params.mc5.nservers += m - tids;
3137 }
3138 
3139 static inline void tp_wr_indirect(adapter_t *adap, unsigned int addr, u32 val)
3140 {
3141 	t3_write_reg(adap, A_TP_PIO_ADDR, addr);
3142 	t3_write_reg(adap, A_TP_PIO_DATA, val);
3143 }
3144 
3145 static inline u32 tp_rd_indirect(adapter_t *adap, unsigned int addr)
3146 {
3147 	t3_write_reg(adap, A_TP_PIO_ADDR, addr);
3148 	return t3_read_reg(adap, A_TP_PIO_DATA);
3149 }
3150 
3151 static void tp_config(adapter_t *adap, const struct tp_params *p)
3152 {
3153 	t3_write_reg(adap, A_TP_GLOBAL_CONFIG, F_TXPACINGENABLE | F_PATHMTU |
3154 		     F_IPCHECKSUMOFFLOAD | F_UDPCHECKSUMOFFLOAD |
3155 		     F_TCPCHECKSUMOFFLOAD | V_IPTTL(64));
3156 	t3_write_reg(adap, A_TP_TCP_OPTIONS, V_MTUDEFAULT(576) |
3157 		     F_MTUENABLE | V_WINDOWSCALEMODE(1) |
3158 		     V_TIMESTAMPSMODE(1) | V_SACKMODE(1) | V_SACKRX(1));
3159 	t3_write_reg(adap, A_TP_DACK_CONFIG, V_AUTOSTATE3(1) |
3160 		     V_AUTOSTATE2(1) | V_AUTOSTATE1(0) |
3161 		     V_BYTETHRESHOLD(26880) | V_MSSTHRESHOLD(2) |
3162 		     F_AUTOCAREFUL | F_AUTOENABLE | V_DACK_MODE(1));
3163 	t3_set_reg_field(adap, A_TP_IN_CONFIG, F_RXFBARBPRIO | F_TXFBARBPRIO,
3164 			 F_IPV6ENABLE | F_NICMODE);
3165 	t3_write_reg(adap, A_TP_TX_RESOURCE_LIMIT, 0x18141814);
3166 	t3_write_reg(adap, A_TP_PARA_REG4, 0x5050105);
3167 	t3_set_reg_field(adap, A_TP_PARA_REG6, 0,
3168 			 adap->params.rev > 0 ? F_ENABLEESND :
3169 			 			F_T3A_ENABLEESND);
3170 	t3_set_reg_field(adap, A_TP_PC_CONFIG,
3171 			 F_ENABLEEPCMDAFULL,
3172 			 F_ENABLEOCSPIFULL |F_TXDEFERENABLE | F_HEARBEATDACK |
3173 			 F_TXCONGESTIONMODE | F_RXCONGESTIONMODE);
3174 	t3_set_reg_field(adap, A_TP_PC_CONFIG2, F_CHDRAFULL,
3175 			 F_ENABLEIPV6RSS | F_ENABLENONOFDTNLSYN |
3176 			 F_ENABLEARPMISS | F_DISBLEDAPARBIT0);
3177 	t3_write_reg(adap, A_TP_PROXY_FLOW_CNTL, 1080);
3178 	t3_write_reg(adap, A_TP_PROXY_FLOW_CNTL, 1000);
3179 
3180 	if (adap->params.rev > 0) {
3181 		tp_wr_indirect(adap, A_TP_EGRESS_CONFIG, F_REWRITEFORCETOSIZE);
3182 		t3_set_reg_field(adap, A_TP_PARA_REG3, 0,
3183 				 F_TXPACEAUTO | F_TXPACEAUTOSTRICT);
3184 		t3_set_reg_field(adap, A_TP_PC_CONFIG, F_LOCKTID, F_LOCKTID);
3185 		tp_wr_indirect(adap, A_TP_VLAN_PRI_MAP, 0xfa50);
3186 		tp_wr_indirect(adap, A_TP_MAC_MATCH_MAP0, 0xfac688);
3187 		tp_wr_indirect(adap, A_TP_MAC_MATCH_MAP1, 0xfac688);
3188 	} else
3189 		t3_set_reg_field(adap, A_TP_PARA_REG3, 0, F_TXPACEFIXED);
3190 
3191 	if (adap->params.rev == T3_REV_C)
3192 		t3_set_reg_field(adap, A_TP_PC_CONFIG,
3193 				 V_TABLELATENCYDELTA(M_TABLELATENCYDELTA),
3194 				 V_TABLELATENCYDELTA(4));
3195 
3196 	t3_write_reg(adap, A_TP_TX_MOD_QUEUE_WEIGHT1, 0);
3197 	t3_write_reg(adap, A_TP_TX_MOD_QUEUE_WEIGHT0, 0);
3198 	t3_write_reg(adap, A_TP_MOD_CHANNEL_WEIGHT, 0);
3199 	t3_write_reg(adap, A_TP_MOD_RATE_LIMIT, 0xf2200000);
3200 
3201 	if (adap->params.nports > 2) {
3202 		t3_set_reg_field(adap, A_TP_PC_CONFIG2, 0,
3203 				 F_ENABLETXPORTFROMDA2 | F_ENABLETXPORTFROMDA |
3204 				 F_ENABLERXPORTFROMADDR);
3205 		tp_wr_bits_indirect(adap, A_TP_QOS_RX_MAP_MODE,
3206 				    V_RXMAPMODE(M_RXMAPMODE), 0);
3207 		tp_wr_indirect(adap, A_TP_INGRESS_CONFIG, V_BITPOS0(48) |
3208 			       V_BITPOS1(49) | V_BITPOS2(50) | V_BITPOS3(51) |
3209 			       F_ENABLEEXTRACT | F_ENABLEEXTRACTIONSFD |
3210 			       F_ENABLEINSERTION | F_ENABLEINSERTIONSFD);
3211 		tp_wr_indirect(adap, A_TP_PREAMBLE_MSB, 0xfb000000);
3212 		tp_wr_indirect(adap, A_TP_PREAMBLE_LSB, 0xd5);
3213 		tp_wr_indirect(adap, A_TP_INTF_FROM_TX_PKT, F_INTFFROMTXPKT);
3214 	}
3215 }
3216 
3217 /* TCP timer values in ms */
3218 #define TP_DACK_TIMER 50
3219 #define TP_RTO_MIN    250
3220 
3221 /**
3222  *	tp_set_timers - set TP timing parameters
3223  *	@adap: the adapter to set
3224  *	@core_clk: the core clock frequency in Hz
3225  *
3226  *	Set TP's timing parameters, such as the various timer resolutions and
3227  *	the TCP timer values.
3228  */
3229 static void tp_set_timers(adapter_t *adap, unsigned int core_clk)
3230 {
3231 	unsigned int tre = adap->params.tp.tre;
3232 	unsigned int dack_re = adap->params.tp.dack_re;
3233 	unsigned int tstamp_re = fls(core_clk / 1000);     /* 1ms, at least */
3234 	unsigned int tps = core_clk >> tre;
3235 
3236 	t3_write_reg(adap, A_TP_TIMER_RESOLUTION, V_TIMERRESOLUTION(tre) |
3237 		     V_DELAYEDACKRESOLUTION(dack_re) |
3238 		     V_TIMESTAMPRESOLUTION(tstamp_re));
3239 	t3_write_reg(adap, A_TP_DACK_TIMER,
3240 		     (core_clk >> dack_re) / (1000 / TP_DACK_TIMER));
3241 	t3_write_reg(adap, A_TP_TCP_BACKOFF_REG0, 0x3020100);
3242 	t3_write_reg(adap, A_TP_TCP_BACKOFF_REG1, 0x7060504);
3243 	t3_write_reg(adap, A_TP_TCP_BACKOFF_REG2, 0xb0a0908);
3244 	t3_write_reg(adap, A_TP_TCP_BACKOFF_REG3, 0xf0e0d0c);
3245 	t3_write_reg(adap, A_TP_SHIFT_CNT, V_SYNSHIFTMAX(6) |
3246 		     V_RXTSHIFTMAXR1(4) | V_RXTSHIFTMAXR2(15) |
3247 		     V_PERSHIFTBACKOFFMAX(8) | V_PERSHIFTMAX(8) |
3248 		     V_KEEPALIVEMAX(9));
3249 
3250 #define SECONDS * tps
3251 
3252 	t3_write_reg(adap, A_TP_MSL,
3253 		     adap->params.rev > 0 ? 0 : 2 SECONDS);
3254 	t3_write_reg(adap, A_TP_RXT_MIN, tps / (1000 / TP_RTO_MIN));
3255 	t3_write_reg(adap, A_TP_RXT_MAX, 64 SECONDS);
3256 	t3_write_reg(adap, A_TP_PERS_MIN, 5 SECONDS);
3257 	t3_write_reg(adap, A_TP_PERS_MAX, 64 SECONDS);
3258 	t3_write_reg(adap, A_TP_KEEP_IDLE, 7200 SECONDS);
3259 	t3_write_reg(adap, A_TP_KEEP_INTVL, 75 SECONDS);
3260 	t3_write_reg(adap, A_TP_INIT_SRTT, 3 SECONDS);
3261 	t3_write_reg(adap, A_TP_FINWAIT2_TIMER, 600 SECONDS);
3262 
3263 #undef SECONDS
3264 }
3265 
3266 /**
3267  *	t3_tp_set_coalescing_size - set receive coalescing size
3268  *	@adap: the adapter
3269  *	@size: the receive coalescing size
3270  *	@psh: whether a set PSH bit should deliver coalesced data
3271  *
3272  *	Set the receive coalescing size and PSH bit handling.
3273  */
3274 int t3_tp_set_coalescing_size(adapter_t *adap, unsigned int size, int psh)
3275 {
3276 	u32 val;
3277 
3278 	if (size > MAX_RX_COALESCING_LEN)
3279 		return -EINVAL;
3280 
3281 	val = t3_read_reg(adap, A_TP_PARA_REG3);
3282 	val &= ~(F_RXCOALESCEENABLE | F_RXCOALESCEPSHEN);
3283 
3284 	if (size) {
3285 		val |= F_RXCOALESCEENABLE;
3286 		if (psh)
3287 			val |= F_RXCOALESCEPSHEN;
3288 		size = min(MAX_RX_COALESCING_LEN, size);
3289 		t3_write_reg(adap, A_TP_PARA_REG2, V_RXCOALESCESIZE(size) |
3290 			     V_MAXRXDATA(MAX_RX_COALESCING_LEN));
3291 	}
3292 	t3_write_reg(adap, A_TP_PARA_REG3, val);
3293 	return 0;
3294 }
3295 
3296 /**
3297  *	t3_tp_set_max_rxsize - set the max receive size
3298  *	@adap: the adapter
3299  *	@size: the max receive size
3300  *
3301  *	Set TP's max receive size.  This is the limit that applies when
3302  *	receive coalescing is disabled.
3303  */
3304 void t3_tp_set_max_rxsize(adapter_t *adap, unsigned int size)
3305 {
3306 	t3_write_reg(adap, A_TP_PARA_REG7,
3307 		     V_PMMAXXFERLEN0(size) | V_PMMAXXFERLEN1(size));
3308 }
3309 
3310 static void __devinit init_mtus(unsigned short mtus[])
3311 {
3312 	/*
3313 	 * See draft-mathis-plpmtud-00.txt for the values.  The min is 88 so
3314 	 * it can accomodate max size TCP/IP headers when SACK and timestamps
3315 	 * are enabled and still have at least 8 bytes of payload.
3316 	 */
3317 	mtus[0] = 88;
3318 	mtus[1] = 88;
3319 	mtus[2] = 256;
3320 	mtus[3] = 512;
3321 	mtus[4] = 576;
3322 	mtus[5] = 1024;
3323 	mtus[6] = 1280;
3324 	mtus[7] = 1492;
3325 	mtus[8] = 1500;
3326 	mtus[9] = 2002;
3327 	mtus[10] = 2048;
3328 	mtus[11] = 4096;
3329 	mtus[12] = 4352;
3330 	mtus[13] = 8192;
3331 	mtus[14] = 9000;
3332 	mtus[15] = 9600;
3333 }
3334 
3335 /**
3336  *	init_cong_ctrl - initialize congestion control parameters
3337  *	@a: the alpha values for congestion control
3338  *	@b: the beta values for congestion control
3339  *
3340  *	Initialize the congestion control parameters.
3341  */
3342 static void __devinit init_cong_ctrl(unsigned short *a, unsigned short *b)
3343 {
3344 	a[0] = a[1] = a[2] = a[3] = a[4] = a[5] = a[6] = a[7] = a[8] = 1;
3345 	a[9] = 2;
3346 	a[10] = 3;
3347 	a[11] = 4;
3348 	a[12] = 5;
3349 	a[13] = 6;
3350 	a[14] = 7;
3351 	a[15] = 8;
3352 	a[16] = 9;
3353 	a[17] = 10;
3354 	a[18] = 14;
3355 	a[19] = 17;
3356 	a[20] = 21;
3357 	a[21] = 25;
3358 	a[22] = 30;
3359 	a[23] = 35;
3360 	a[24] = 45;
3361 	a[25] = 60;
3362 	a[26] = 80;
3363 	a[27] = 100;
3364 	a[28] = 200;
3365 	a[29] = 300;
3366 	a[30] = 400;
3367 	a[31] = 500;
3368 
3369 	b[0] = b[1] = b[2] = b[3] = b[4] = b[5] = b[6] = b[7] = b[8] = 0;
3370 	b[9] = b[10] = 1;
3371 	b[11] = b[12] = 2;
3372 	b[13] = b[14] = b[15] = b[16] = 3;
3373 	b[17] = b[18] = b[19] = b[20] = b[21] = 4;
3374 	b[22] = b[23] = b[24] = b[25] = b[26] = b[27] = 5;
3375 	b[28] = b[29] = 6;
3376 	b[30] = b[31] = 7;
3377 }
3378 
3379 /* The minimum additive increment value for the congestion control table */
3380 #define CC_MIN_INCR 2U
3381 
3382 /**
3383  *	t3_load_mtus - write the MTU and congestion control HW tables
3384  *	@adap: the adapter
3385  *	@mtus: the unrestricted values for the MTU table
3386  *	@alpha: the values for the congestion control alpha parameter
3387  *	@beta: the values for the congestion control beta parameter
3388  *	@mtu_cap: the maximum permitted effective MTU
3389  *
3390  *	Write the MTU table with the supplied MTUs capping each at &mtu_cap.
3391  *	Update the high-speed congestion control table with the supplied alpha,
3392  * 	beta, and MTUs.
3393  */
3394 void t3_load_mtus(adapter_t *adap, unsigned short mtus[NMTUS],
3395 		  unsigned short alpha[NCCTRL_WIN],
3396 		  unsigned short beta[NCCTRL_WIN], unsigned short mtu_cap)
3397 {
3398 	static const unsigned int avg_pkts[NCCTRL_WIN] = {
3399 		2, 6, 10, 14, 20, 28, 40, 56, 80, 112, 160, 224, 320, 448, 640,
3400 		896, 1281, 1792, 2560, 3584, 5120, 7168, 10240, 14336, 20480,
3401 		28672, 40960, 57344, 81920, 114688, 163840, 229376 };
3402 
3403 	unsigned int i, w;
3404 
3405 	for (i = 0; i < NMTUS; ++i) {
3406 		unsigned int mtu = min(mtus[i], mtu_cap);
3407 		unsigned int log2 = fls(mtu);
3408 
3409 		if (!(mtu & ((1 << log2) >> 2)))     /* round */
3410 			log2--;
3411 		t3_write_reg(adap, A_TP_MTU_TABLE,
3412 			     (i << 24) | (log2 << 16) | mtu);
3413 
3414 		for (w = 0; w < NCCTRL_WIN; ++w) {
3415 			unsigned int inc;
3416 
3417 			inc = max(((mtu - 40) * alpha[w]) / avg_pkts[w],
3418 				  CC_MIN_INCR);
3419 
3420 			t3_write_reg(adap, A_TP_CCTRL_TABLE, (i << 21) |
3421 				     (w << 16) | (beta[w] << 13) | inc);
3422 		}
3423 	}
3424 }
3425 
3426 /**
3427  *	t3_read_hw_mtus - returns the values in the HW MTU table
3428  *	@adap: the adapter
3429  *	@mtus: where to store the HW MTU values
3430  *
3431  *	Reads the HW MTU table.
3432  */
3433 void t3_read_hw_mtus(adapter_t *adap, unsigned short mtus[NMTUS])
3434 {
3435 	int i;
3436 
3437 	for (i = 0; i < NMTUS; ++i) {
3438 		unsigned int val;
3439 
3440 		t3_write_reg(adap, A_TP_MTU_TABLE, 0xff000000 | i);
3441 		val = t3_read_reg(adap, A_TP_MTU_TABLE);
3442 		mtus[i] = val & 0x3fff;
3443 	}
3444 }
3445 
3446 /**
3447  *	t3_get_cong_cntl_tab - reads the congestion control table
3448  *	@adap: the adapter
3449  *	@incr: where to store the alpha values
3450  *
3451  *	Reads the additive increments programmed into the HW congestion
3452  *	control table.
3453  */
3454 void t3_get_cong_cntl_tab(adapter_t *adap,
3455 			  unsigned short incr[NMTUS][NCCTRL_WIN])
3456 {
3457 	unsigned int mtu, w;
3458 
3459 	for (mtu = 0; mtu < NMTUS; ++mtu)
3460 		for (w = 0; w < NCCTRL_WIN; ++w) {
3461 			t3_write_reg(adap, A_TP_CCTRL_TABLE,
3462 				     0xffff0000 | (mtu << 5) | w);
3463 			incr[mtu][w] = (unsigned short)t3_read_reg(adap,
3464 				        A_TP_CCTRL_TABLE) & 0x1fff;
3465 		}
3466 }
3467 
3468 /**
3469  *	t3_tp_get_mib_stats - read TP's MIB counters
3470  *	@adap: the adapter
3471  *	@tps: holds the returned counter values
3472  *
3473  *	Returns the values of TP's MIB counters.
3474  */
3475 void t3_tp_get_mib_stats(adapter_t *adap, struct tp_mib_stats *tps)
3476 {
3477 	t3_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_RDATA, (u32 *)tps,
3478 			 sizeof(*tps) / sizeof(u32), 0);
3479 }
3480 
3481 /**
3482  *	t3_read_pace_tbl - read the pace table
3483  *	@adap: the adapter
3484  *	@pace_vals: holds the returned values
3485  *
3486  *	Returns the values of TP's pace table in nanoseconds.
3487  */
3488 void t3_read_pace_tbl(adapter_t *adap, unsigned int pace_vals[NTX_SCHED])
3489 {
3490 	unsigned int i, tick_ns = dack_ticks_to_usec(adap, 1000);
3491 
3492 	for (i = 0; i < NTX_SCHED; i++) {
3493 		t3_write_reg(adap, A_TP_PACE_TABLE, 0xffff0000 + i);
3494 		pace_vals[i] = t3_read_reg(adap, A_TP_PACE_TABLE) * tick_ns;
3495 	}
3496 }
3497 
3498 /**
3499  *	t3_set_pace_tbl - set the pace table
3500  *	@adap: the adapter
3501  *	@pace_vals: the pace values in nanoseconds
3502  *	@start: index of the first entry in the HW pace table to set
3503  *	@n: how many entries to set
3504  *
3505  *	Sets (a subset of the) HW pace table.
3506  */
3507 void t3_set_pace_tbl(adapter_t *adap, unsigned int *pace_vals,
3508 		     unsigned int start, unsigned int n)
3509 {
3510 	unsigned int tick_ns = dack_ticks_to_usec(adap, 1000);
3511 
3512 	for ( ; n; n--, start++, pace_vals++)
3513 		t3_write_reg(adap, A_TP_PACE_TABLE, (start << 16) |
3514 			     ((*pace_vals + tick_ns / 2) / tick_ns));
3515 }
3516 
3517 #define ulp_region(adap, name, start, len) \
3518 	t3_write_reg((adap), A_ULPRX_ ## name ## _LLIMIT, (start)); \
3519 	t3_write_reg((adap), A_ULPRX_ ## name ## _ULIMIT, \
3520 		     (start) + (len) - 1); \
3521 	start += len
3522 
3523 #define ulptx_region(adap, name, start, len) \
3524 	t3_write_reg((adap), A_ULPTX_ ## name ## _LLIMIT, (start)); \
3525 	t3_write_reg((adap), A_ULPTX_ ## name ## _ULIMIT, \
3526 		     (start) + (len) - 1)
3527 
3528 static void ulp_config(adapter_t *adap, const struct tp_params *p)
3529 {
3530 	unsigned int m = p->chan_rx_size;
3531 
3532 	ulp_region(adap, ISCSI, m, p->chan_rx_size / 8);
3533 	ulp_region(adap, TDDP, m, p->chan_rx_size / 8);
3534 	ulptx_region(adap, TPT, m, p->chan_rx_size / 4);
3535 	ulp_region(adap, STAG, m, p->chan_rx_size / 4);
3536 	ulp_region(adap, RQ, m, p->chan_rx_size / 4);
3537 	ulptx_region(adap, PBL, m, p->chan_rx_size / 4);
3538 	ulp_region(adap, PBL, m, p->chan_rx_size / 4);
3539 	t3_write_reg(adap, A_ULPRX_TDDP_TAGMASK, 0xffffffff);
3540 }
3541 
3542 
3543 /**
3544  *	t3_set_proto_sram - set the contents of the protocol sram
3545  *	@adapter: the adapter
3546  *	@data: the protocol image
3547  *
3548  *	Write the contents of the protocol SRAM.
3549  */
3550 int t3_set_proto_sram(adapter_t *adap, const u8 *data)
3551 {
3552 	int i;
3553 	const u32 *buf = (const u32 *)data;
3554 
3555 	for (i = 0; i < PROTO_SRAM_LINES; i++) {
3556 		t3_write_reg(adap, A_TP_EMBED_OP_FIELD5, cpu_to_be32(*buf++));
3557 		t3_write_reg(adap, A_TP_EMBED_OP_FIELD4, cpu_to_be32(*buf++));
3558 		t3_write_reg(adap, A_TP_EMBED_OP_FIELD3, cpu_to_be32(*buf++));
3559 		t3_write_reg(adap, A_TP_EMBED_OP_FIELD2, cpu_to_be32(*buf++));
3560 		t3_write_reg(adap, A_TP_EMBED_OP_FIELD1, cpu_to_be32(*buf++));
3561 
3562 		t3_write_reg(adap, A_TP_EMBED_OP_FIELD0, i << 1 | 1 << 31);
3563 		if (t3_wait_op_done(adap, A_TP_EMBED_OP_FIELD0, 1, 1, 5, 1))
3564 			return -EIO;
3565 	}
3566 	return 0;
3567 }
3568 
3569 /**
3570  *	t3_config_trace_filter - configure one of the tracing filters
3571  *	@adapter: the adapter
3572  *	@tp: the desired trace filter parameters
3573  *	@filter_index: which filter to configure
3574  *	@invert: if set non-matching packets are traced instead of matching ones
3575  *	@enable: whether to enable or disable the filter
3576  *
3577  *	Configures one of the tracing filters available in HW.
3578  */
3579 void t3_config_trace_filter(adapter_t *adapter, const struct trace_params *tp,
3580 			    int filter_index, int invert, int enable)
3581 {
3582 	u32 addr, key[4], mask[4];
3583 
3584 	key[0] = tp->sport | (tp->sip << 16);
3585 	key[1] = (tp->sip >> 16) | (tp->dport << 16);
3586 	key[2] = tp->dip;
3587 	key[3] = tp->proto | (tp->vlan << 8) | (tp->intf << 20);
3588 
3589 	mask[0] = tp->sport_mask | (tp->sip_mask << 16);
3590 	mask[1] = (tp->sip_mask >> 16) | (tp->dport_mask << 16);
3591 	mask[2] = tp->dip_mask;
3592 	mask[3] = tp->proto_mask | (tp->vlan_mask << 8) | (tp->intf_mask << 20);
3593 
3594 	if (invert)
3595 		key[3] |= (1 << 29);
3596 	if (enable)
3597 		key[3] |= (1 << 28);
3598 
3599 	addr = filter_index ? A_TP_RX_TRC_KEY0 : A_TP_TX_TRC_KEY0;
3600 	tp_wr_indirect(adapter, addr++, key[0]);
3601 	tp_wr_indirect(adapter, addr++, mask[0]);
3602 	tp_wr_indirect(adapter, addr++, key[1]);
3603 	tp_wr_indirect(adapter, addr++, mask[1]);
3604 	tp_wr_indirect(adapter, addr++, key[2]);
3605 	tp_wr_indirect(adapter, addr++, mask[2]);
3606 	tp_wr_indirect(adapter, addr++, key[3]);
3607 	tp_wr_indirect(adapter, addr,   mask[3]);
3608 	(void) t3_read_reg(adapter, A_TP_PIO_DATA);
3609 }
3610 
3611 /**
3612  *	t3_query_trace_filter - query a tracing filter
3613  *	@adapter: the adapter
3614  *	@tp: the current trace filter parameters
3615  *	@filter_index: which filter to query
3616  *	@inverted: non-zero if the filter is inverted
3617  *	@enabled: non-zero if the filter is enabled
3618  *
3619  *	Returns the current settings of the specified HW tracing filter.
3620  */
3621 void t3_query_trace_filter(adapter_t *adapter, struct trace_params *tp,
3622 			   int filter_index, int *inverted, int *enabled)
3623 {
3624 	u32 addr, key[4], mask[4];
3625 
3626 	addr = filter_index ? A_TP_RX_TRC_KEY0 : A_TP_TX_TRC_KEY0;
3627 	key[0]  = tp_rd_indirect(adapter, addr++);
3628 	mask[0] = tp_rd_indirect(adapter, addr++);
3629 	key[1]  = tp_rd_indirect(adapter, addr++);
3630 	mask[1] = tp_rd_indirect(adapter, addr++);
3631 	key[2]  = tp_rd_indirect(adapter, addr++);
3632 	mask[2] = tp_rd_indirect(adapter, addr++);
3633 	key[3]  = tp_rd_indirect(adapter, addr++);
3634 	mask[3] = tp_rd_indirect(adapter, addr);
3635 
3636 	tp->sport = key[0] & 0xffff;
3637 	tp->sip   = (key[0] >> 16) | ((key[1] & 0xffff) << 16);
3638 	tp->dport = key[1] >> 16;
3639 	tp->dip   = key[2];
3640 	tp->proto = key[3] & 0xff;
3641 	tp->vlan  = key[3] >> 8;
3642 	tp->intf  = key[3] >> 20;
3643 
3644 	tp->sport_mask = mask[0] & 0xffff;
3645 	tp->sip_mask   = (mask[0] >> 16) | ((mask[1] & 0xffff) << 16);
3646 	tp->dport_mask = mask[1] >> 16;
3647 	tp->dip_mask   = mask[2];
3648 	tp->proto_mask = mask[3] & 0xff;
3649 	tp->vlan_mask  = mask[3] >> 8;
3650 	tp->intf_mask  = mask[3] >> 20;
3651 
3652 	*inverted = key[3] & (1 << 29);
3653 	*enabled  = key[3] & (1 << 28);
3654 }
3655 
3656 /**
3657  *	t3_config_sched - configure a HW traffic scheduler
3658  *	@adap: the adapter
3659  *	@kbps: target rate in Kbps
3660  *	@sched: the scheduler index
3661  *
3662  *	Configure a Tx HW scheduler for the target rate.
3663  */
3664 int t3_config_sched(adapter_t *adap, unsigned int kbps, int sched)
3665 {
3666 	unsigned int v, tps, cpt, bpt, delta, mindelta = ~0;
3667 	unsigned int clk = adap->params.vpd.cclk * 1000;
3668 	unsigned int selected_cpt = 0, selected_bpt = 0;
3669 
3670 	if (kbps > 0) {
3671 		kbps *= 125;     /* -> bytes */
3672 		for (cpt = 1; cpt <= 255; cpt++) {
3673 			tps = clk / cpt;
3674 			bpt = (kbps + tps / 2) / tps;
3675 			if (bpt > 0 && bpt <= 255) {
3676 				v = bpt * tps;
3677 				delta = v >= kbps ? v - kbps : kbps - v;
3678 				if (delta < mindelta) {
3679 					mindelta = delta;
3680 					selected_cpt = cpt;
3681 					selected_bpt = bpt;
3682 				}
3683 			} else if (selected_cpt)
3684 				break;
3685 		}
3686 		if (!selected_cpt)
3687 			return -EINVAL;
3688 	}
3689 	t3_write_reg(adap, A_TP_TM_PIO_ADDR,
3690 		     A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2);
3691 	v = t3_read_reg(adap, A_TP_TM_PIO_DATA);
3692 	if (sched & 1)
3693 		v = (v & 0xffff) | (selected_cpt << 16) | (selected_bpt << 24);
3694 	else
3695 		v = (v & 0xffff0000) | selected_cpt | (selected_bpt << 8);
3696 	t3_write_reg(adap, A_TP_TM_PIO_DATA, v);
3697 	return 0;
3698 }
3699 
3700 /**
3701  *	t3_set_sched_ipg - set the IPG for a Tx HW packet rate scheduler
3702  *	@adap: the adapter
3703  *	@sched: the scheduler index
3704  *	@ipg: the interpacket delay in tenths of nanoseconds
3705  *
3706  *	Set the interpacket delay for a HW packet rate scheduler.
3707  */
3708 int t3_set_sched_ipg(adapter_t *adap, int sched, unsigned int ipg)
3709 {
3710 	unsigned int v, addr = A_TP_TX_MOD_Q1_Q0_TIMER_SEPARATOR - sched / 2;
3711 
3712 	/* convert ipg to nearest number of core clocks */
3713 	ipg *= core_ticks_per_usec(adap);
3714 	ipg = (ipg + 5000) / 10000;
3715 	if (ipg > 0xffff)
3716 		return -EINVAL;
3717 
3718 	t3_write_reg(adap, A_TP_TM_PIO_ADDR, addr);
3719 	v = t3_read_reg(adap, A_TP_TM_PIO_DATA);
3720 	if (sched & 1)
3721 		v = (v & 0xffff) | (ipg << 16);
3722 	else
3723 		v = (v & 0xffff0000) | ipg;
3724 	t3_write_reg(adap, A_TP_TM_PIO_DATA, v);
3725 	t3_read_reg(adap, A_TP_TM_PIO_DATA);
3726 	return 0;
3727 }
3728 
3729 /**
3730  *	t3_get_tx_sched - get the configuration of a Tx HW traffic scheduler
3731  *	@adap: the adapter
3732  *	@sched: the scheduler index
3733  *	@kbps: the byte rate in Kbps
3734  *	@ipg: the interpacket delay in tenths of nanoseconds
3735  *
3736  *	Return the current configuration of a HW Tx scheduler.
3737  */
3738 void t3_get_tx_sched(adapter_t *adap, unsigned int sched, unsigned int *kbps,
3739 		     unsigned int *ipg)
3740 {
3741 	unsigned int v, addr, bpt, cpt;
3742 
3743 	if (kbps) {
3744 		addr = A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2;
3745 		t3_write_reg(adap, A_TP_TM_PIO_ADDR, addr);
3746 		v = t3_read_reg(adap, A_TP_TM_PIO_DATA);
3747 		if (sched & 1)
3748 			v >>= 16;
3749 		bpt = (v >> 8) & 0xff;
3750 		cpt = v & 0xff;
3751 		if (!cpt)
3752 			*kbps = 0;        /* scheduler disabled */
3753 		else {
3754 			v = (adap->params.vpd.cclk * 1000) / cpt;
3755 			*kbps = (v * bpt) / 125;
3756 		}
3757 	}
3758 	if (ipg) {
3759 		addr = A_TP_TX_MOD_Q1_Q0_TIMER_SEPARATOR - sched / 2;
3760 		t3_write_reg(adap, A_TP_TM_PIO_ADDR, addr);
3761 		v = t3_read_reg(adap, A_TP_TM_PIO_DATA);
3762 		if (sched & 1)
3763 			v >>= 16;
3764 		v &= 0xffff;
3765 		*ipg = (10000 * v) / core_ticks_per_usec(adap);
3766 	}
3767 }
3768 
3769 /**
3770  *	tp_init - configure TP
3771  *	@adap: the adapter
3772  *	@p: TP configuration parameters
3773  *
3774  *	Initializes the TP HW module.
3775  */
3776 static int tp_init(adapter_t *adap, const struct tp_params *p)
3777 {
3778 	int busy = 0;
3779 
3780 	tp_config(adap, p);
3781 	t3_set_vlan_accel(adap, 3, 0);
3782 
3783 	if (is_offload(adap)) {
3784 		tp_set_timers(adap, adap->params.vpd.cclk * 1000);
3785 		t3_write_reg(adap, A_TP_RESET, F_FLSTINITENABLE);
3786 		busy = t3_wait_op_done(adap, A_TP_RESET, F_FLSTINITENABLE,
3787 				       0, 1000, 5);
3788 		if (busy)
3789 			CH_ERR(adap, "TP initialization timed out\n");
3790 	}
3791 
3792 	if (!busy)
3793 		t3_write_reg(adap, A_TP_RESET, F_TPRESET);
3794 	return busy;
3795 }
3796 
3797 /**
3798  *	t3_mps_set_active_ports - configure port failover
3799  *	@adap: the adapter
3800  *	@port_mask: bitmap of active ports
3801  *
3802  *	Sets the active ports according to the supplied bitmap.
3803  */
3804 int t3_mps_set_active_ports(adapter_t *adap, unsigned int port_mask)
3805 {
3806 	if (port_mask & ~((1 << adap->params.nports) - 1))
3807 		return -EINVAL;
3808 	t3_set_reg_field(adap, A_MPS_CFG, F_PORT1ACTIVE | F_PORT0ACTIVE,
3809 			 port_mask << S_PORT0ACTIVE);
3810 	return 0;
3811 }
3812 
3813 /**
3814  * 	chan_init_hw - channel-dependent HW initialization
3815  *	@adap: the adapter
3816  *	@chan_map: bitmap of Tx channels being used
3817  *
3818  *	Perform the bits of HW initialization that are dependent on the Tx
3819  *	channels being used.
3820  */
3821 static void chan_init_hw(adapter_t *adap, unsigned int chan_map)
3822 {
3823 	int i;
3824 
3825 	if (chan_map != 3) {                                 /* one channel */
3826 		t3_set_reg_field(adap, A_ULPRX_CTL, F_ROUND_ROBIN, 0);
3827 		t3_set_reg_field(adap, A_ULPTX_CONFIG, F_CFG_RR_ARB, 0);
3828 		t3_write_reg(adap, A_MPS_CFG, F_TPRXPORTEN | F_ENFORCEPKT |
3829 			     (chan_map == 1 ? F_TPTXPORT0EN | F_PORT0ACTIVE :
3830 					      F_TPTXPORT1EN | F_PORT1ACTIVE));
3831 		t3_write_reg(adap, A_PM1_TX_CFG,
3832 			     chan_map == 1 ? 0xffffffff : 0);
3833 		if (chan_map == 2)
3834 			t3_write_reg(adap, A_TP_TX_MOD_QUEUE_REQ_MAP,
3835 				     V_TX_MOD_QUEUE_REQ_MAP(0xff));
3836 		t3_write_reg(adap, A_TP_TX_MOD_QUE_TABLE, (12 << 16) | 0xd9c8);
3837 		t3_write_reg(adap, A_TP_TX_MOD_QUE_TABLE, (13 << 16) | 0xfbea);
3838 	} else {                                             /* two channels */
3839 		t3_set_reg_field(adap, A_ULPRX_CTL, 0, F_ROUND_ROBIN);
3840 		t3_set_reg_field(adap, A_ULPTX_CONFIG, 0, F_CFG_RR_ARB);
3841 		t3_write_reg(adap, A_ULPTX_DMA_WEIGHT,
3842 			     V_D1_WEIGHT(16) | V_D0_WEIGHT(16));
3843 		t3_write_reg(adap, A_MPS_CFG, F_TPTXPORT0EN | F_TPTXPORT1EN |
3844 			     F_TPRXPORTEN | F_PORT0ACTIVE | F_PORT1ACTIVE |
3845 			     F_ENFORCEPKT);
3846 		t3_write_reg(adap, A_PM1_TX_CFG, 0x80008000);
3847 		t3_set_reg_field(adap, A_TP_PC_CONFIG, 0, F_TXTOSQUEUEMAPMODE);
3848 		t3_write_reg(adap, A_TP_TX_MOD_QUEUE_REQ_MAP,
3849 			     V_TX_MOD_QUEUE_REQ_MAP(0xaa));
3850 		for (i = 0; i < 16; i++)
3851 			t3_write_reg(adap, A_TP_TX_MOD_QUE_TABLE,
3852 				     (i << 16) | 0x1010);
3853 		t3_write_reg(adap, A_TP_TX_MOD_QUE_TABLE, (12 << 16) | 0xba98);
3854 		t3_write_reg(adap, A_TP_TX_MOD_QUE_TABLE, (13 << 16) | 0xfedc);
3855 	}
3856 }
3857 
3858 static int calibrate_xgm(adapter_t *adapter)
3859 {
3860 	if (uses_xaui(adapter)) {
3861 		unsigned int v, i;
3862 
3863 		for (i = 0; i < 5; ++i) {
3864 			t3_write_reg(adapter, A_XGM_XAUI_IMP, 0);
3865 			(void) t3_read_reg(adapter, A_XGM_XAUI_IMP);
3866 			msleep(1);
3867 			v = t3_read_reg(adapter, A_XGM_XAUI_IMP);
3868 			if (!(v & (F_XGM_CALFAULT | F_CALBUSY))) {
3869 				t3_write_reg(adapter, A_XGM_XAUI_IMP,
3870 					     V_XAUIIMP(G_CALIMP(v) >> 2));
3871 				return 0;
3872 			}
3873 		}
3874 		CH_ERR(adapter, "MAC calibration failed\n");
3875 		return -1;
3876 	} else {
3877 		t3_write_reg(adapter, A_XGM_RGMII_IMP,
3878 			     V_RGMIIIMPPD(2) | V_RGMIIIMPPU(3));
3879 		t3_set_reg_field(adapter, A_XGM_RGMII_IMP, F_XGM_IMPSETUPDATE,
3880 				 F_XGM_IMPSETUPDATE);
3881 	}
3882 	return 0;
3883 }
3884 
3885 static void calibrate_xgm_t3b(adapter_t *adapter)
3886 {
3887 	if (!uses_xaui(adapter)) {
3888 		t3_write_reg(adapter, A_XGM_RGMII_IMP, F_CALRESET |
3889 			     F_CALUPDATE | V_RGMIIIMPPD(2) | V_RGMIIIMPPU(3));
3890 		t3_set_reg_field(adapter, A_XGM_RGMII_IMP, F_CALRESET, 0);
3891 		t3_set_reg_field(adapter, A_XGM_RGMII_IMP, 0,
3892 				 F_XGM_IMPSETUPDATE);
3893 		t3_set_reg_field(adapter, A_XGM_RGMII_IMP, F_XGM_IMPSETUPDATE,
3894 				 0);
3895 		t3_set_reg_field(adapter, A_XGM_RGMII_IMP, F_CALUPDATE, 0);
3896 		t3_set_reg_field(adapter, A_XGM_RGMII_IMP, 0, F_CALUPDATE);
3897 	}
3898 }
3899 
3900 struct mc7_timing_params {
3901 	unsigned char ActToPreDly;
3902 	unsigned char ActToRdWrDly;
3903 	unsigned char PreCyc;
3904 	unsigned char RefCyc[5];
3905 	unsigned char BkCyc;
3906 	unsigned char WrToRdDly;
3907 	unsigned char RdToWrDly;
3908 };
3909 
3910 /*
3911  * Write a value to a register and check that the write completed.  These
3912  * writes normally complete in a cycle or two, so one read should suffice.
3913  * The very first read exists to flush the posted write to the device.
3914  */
3915 static int wrreg_wait(adapter_t *adapter, unsigned int addr, u32 val)
3916 {
3917 	t3_write_reg(adapter,	addr, val);
3918 	(void) t3_read_reg(adapter, addr);                   /* flush */
3919 	if (!(t3_read_reg(adapter, addr) & F_BUSY))
3920 		return 0;
3921 	CH_ERR(adapter, "write to MC7 register 0x%x timed out\n", addr);
3922 	return -EIO;
3923 }
3924 
3925 static int mc7_init(struct mc7 *mc7, unsigned int mc7_clock, int mem_type)
3926 {
3927 	static const unsigned int mc7_mode[] = {
3928 		0x632, 0x642, 0x652, 0x432, 0x442
3929 	};
3930 	static const struct mc7_timing_params mc7_timings[] = {
3931 		{ 12, 3, 4, { 20, 28, 34, 52, 0 }, 15, 6, 4 },
3932 		{ 12, 4, 5, { 20, 28, 34, 52, 0 }, 16, 7, 4 },
3933 		{ 12, 5, 6, { 20, 28, 34, 52, 0 }, 17, 8, 4 },
3934 		{ 9,  3, 4, { 15, 21, 26, 39, 0 }, 12, 6, 4 },
3935 		{ 9,  4, 5, { 15, 21, 26, 39, 0 }, 13, 7, 4 }
3936 	};
3937 
3938 	u32 val;
3939 	unsigned int width, density, slow, attempts;
3940 	adapter_t *adapter = mc7->adapter;
3941 	const struct mc7_timing_params *p = &mc7_timings[mem_type];
3942 
3943 	if (!mc7->size)
3944 		return 0;
3945 
3946 	val = t3_read_reg(adapter, mc7->offset + A_MC7_CFG);
3947 	slow = val & F_SLOW;
3948 	width = G_WIDTH(val);
3949 	density = G_DEN(val);
3950 
3951 	t3_write_reg(adapter, mc7->offset + A_MC7_CFG, val | F_IFEN);
3952 	val = t3_read_reg(adapter, mc7->offset + A_MC7_CFG);  /* flush */
3953 	msleep(1);
3954 
3955 	if (!slow) {
3956 		t3_write_reg(adapter, mc7->offset + A_MC7_CAL, F_SGL_CAL_EN);
3957 		(void) t3_read_reg(adapter, mc7->offset + A_MC7_CAL);
3958 		msleep(1);
3959 		if (t3_read_reg(adapter, mc7->offset + A_MC7_CAL) &
3960 		    (F_BUSY | F_SGL_CAL_EN | F_CAL_FAULT)) {
3961 			CH_ERR(adapter, "%s MC7 calibration timed out\n",
3962 			       mc7->name);
3963 			goto out_fail;
3964 		}
3965 	}
3966 
3967 	t3_write_reg(adapter, mc7->offset + A_MC7_PARM,
3968 		     V_ACTTOPREDLY(p->ActToPreDly) |
3969 		     V_ACTTORDWRDLY(p->ActToRdWrDly) | V_PRECYC(p->PreCyc) |
3970 		     V_REFCYC(p->RefCyc[density]) | V_BKCYC(p->BkCyc) |
3971 		     V_WRTORDDLY(p->WrToRdDly) | V_RDTOWRDLY(p->RdToWrDly));
3972 
3973 	t3_write_reg(adapter, mc7->offset + A_MC7_CFG,
3974 		     val | F_CLKEN | F_TERM150);
3975 	(void) t3_read_reg(adapter, mc7->offset + A_MC7_CFG); /* flush */
3976 
3977 	if (!slow)
3978 		t3_set_reg_field(adapter, mc7->offset + A_MC7_DLL, F_DLLENB,
3979 				 F_DLLENB);
3980 	udelay(1);
3981 
3982 	val = slow ? 3 : 6;
3983 	if (wrreg_wait(adapter, mc7->offset + A_MC7_PRE, 0) ||
3984 	    wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE2, 0) ||
3985 	    wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE3, 0) ||
3986 	    wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE1, val))
3987 		goto out_fail;
3988 
3989 	if (!slow) {
3990 		t3_write_reg(adapter, mc7->offset + A_MC7_MODE, 0x100);
3991 		t3_set_reg_field(adapter, mc7->offset + A_MC7_DLL,
3992 				 F_DLLRST, 0);
3993 		udelay(5);
3994 	}
3995 
3996 	if (wrreg_wait(adapter, mc7->offset + A_MC7_PRE, 0) ||
3997 	    wrreg_wait(adapter, mc7->offset + A_MC7_REF, 0) ||
3998 	    wrreg_wait(adapter, mc7->offset + A_MC7_REF, 0) ||
3999 	    wrreg_wait(adapter, mc7->offset + A_MC7_MODE,
4000 		       mc7_mode[mem_type]) ||
4001 	    wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE1, val | 0x380) ||
4002 	    wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE1, val))
4003 		goto out_fail;
4004 
4005 	/* clock value is in KHz */
4006 	mc7_clock = mc7_clock * 7812 + mc7_clock / 2;  /* ns */
4007 	mc7_clock /= 1000000;                          /* KHz->MHz, ns->us */
4008 
4009 	t3_write_reg(adapter, mc7->offset + A_MC7_REF,
4010 		     F_PERREFEN | V_PREREFDIV(mc7_clock));
4011 	(void) t3_read_reg(adapter, mc7->offset + A_MC7_REF); /* flush */
4012 
4013 	t3_write_reg(adapter, mc7->offset + A_MC7_ECC,
4014 		     F_ECCGENEN | F_ECCCHKEN);
4015 	t3_write_reg(adapter, mc7->offset + A_MC7_BIST_DATA, 0);
4016 	t3_write_reg(adapter, mc7->offset + A_MC7_BIST_ADDR_BEG, 0);
4017 	t3_write_reg(adapter, mc7->offset + A_MC7_BIST_ADDR_END,
4018 		     (mc7->size << width) - 1);
4019 	t3_write_reg(adapter, mc7->offset + A_MC7_BIST_OP, V_OP(1));
4020 	(void) t3_read_reg(adapter, mc7->offset + A_MC7_BIST_OP); /* flush */
4021 
4022 	attempts = 50;
4023 	do {
4024 		msleep(250);
4025 		val = t3_read_reg(adapter, mc7->offset + A_MC7_BIST_OP);
4026 	} while ((val & F_BUSY) && --attempts);
4027 	if (val & F_BUSY) {
4028 		CH_ERR(adapter, "%s MC7 BIST timed out\n", mc7->name);
4029 		goto out_fail;
4030 	}
4031 
4032 	/* Enable normal memory accesses. */
4033 	t3_set_reg_field(adapter, mc7->offset + A_MC7_CFG, 0, F_RDY);
4034 	return 0;
4035 
4036  out_fail:
4037 	return -1;
4038 }
4039 
4040 static void config_pcie(adapter_t *adap)
4041 {
4042 	static const u16 ack_lat[4][6] = {
4043 		{ 237, 416, 559, 1071, 2095, 4143 },
4044 		{ 128, 217, 289, 545, 1057, 2081 },
4045 		{ 73, 118, 154, 282, 538, 1050 },
4046 		{ 67, 107, 86, 150, 278, 534 }
4047 	};
4048 	static const u16 rpl_tmr[4][6] = {
4049 		{ 711, 1248, 1677, 3213, 6285, 12429 },
4050 		{ 384, 651, 867, 1635, 3171, 6243 },
4051 		{ 219, 354, 462, 846, 1614, 3150 },
4052 		{ 201, 321, 258, 450, 834, 1602 }
4053 	};
4054 
4055 	u16 val, devid;
4056 	unsigned int log2_width, pldsize;
4057 	unsigned int fst_trn_rx, fst_trn_tx, acklat, rpllmt;
4058 
4059 	t3_os_pci_read_config_2(adap,
4060 				adap->params.pci.pcie_cap_addr + PCI_EXP_DEVCTL,
4061 				&val);
4062 	pldsize = (val & PCI_EXP_DEVCTL_PAYLOAD) >> 5;
4063 
4064 	/*
4065 	 * Gen2 adapter pcie bridge compatibility requires minimum
4066 	 * Max_Read_Request_size
4067 	 */
4068 	t3_os_pci_read_config_2(adap, 0x2, &devid);
4069 	if (devid == 0x37) {
4070 		t3_os_pci_write_config_2(adap,
4071 		    adap->params.pci.pcie_cap_addr + PCI_EXP_DEVCTL,
4072 		    val & ~PCI_EXP_DEVCTL_READRQ & ~PCI_EXP_DEVCTL_PAYLOAD);
4073 		pldsize = 0;
4074 	}
4075 
4076 	t3_os_pci_read_config_2(adap,
4077 				adap->params.pci.pcie_cap_addr + PCI_EXP_LNKCTL,
4078 			       	&val);
4079 
4080 	fst_trn_tx = G_NUMFSTTRNSEQ(t3_read_reg(adap, A_PCIE_PEX_CTRL0));
4081 	fst_trn_rx = adap->params.rev == 0 ? fst_trn_tx :
4082 			G_NUMFSTTRNSEQRX(t3_read_reg(adap, A_PCIE_MODE));
4083 	log2_width = fls(adap->params.pci.width) - 1;
4084 	acklat = ack_lat[log2_width][pldsize];
4085 	if (val & 1)                            /* check LOsEnable */
4086 		acklat += fst_trn_tx * 4;
4087 	rpllmt = rpl_tmr[log2_width][pldsize] + fst_trn_rx * 4;
4088 
4089 	if (adap->params.rev == 0)
4090 		t3_set_reg_field(adap, A_PCIE_PEX_CTRL1,
4091 				 V_T3A_ACKLAT(M_T3A_ACKLAT),
4092 				 V_T3A_ACKLAT(acklat));
4093 	else
4094 		t3_set_reg_field(adap, A_PCIE_PEX_CTRL1, V_ACKLAT(M_ACKLAT),
4095 				 V_ACKLAT(acklat));
4096 
4097 	t3_set_reg_field(adap, A_PCIE_PEX_CTRL0, V_REPLAYLMT(M_REPLAYLMT),
4098 			 V_REPLAYLMT(rpllmt));
4099 
4100 	t3_write_reg(adap, A_PCIE_PEX_ERR, 0xffffffff);
4101 	t3_set_reg_field(adap, A_PCIE_CFG, 0,
4102 			 F_ENABLELINKDWNDRST | F_ENABLELINKDOWNRST |
4103 			 F_PCIE_DMASTOPEN | F_PCIE_CLIDECEN);
4104 }
4105 
4106 /**
4107  * 	t3_init_hw - initialize and configure T3 HW modules
4108  * 	@adapter: the adapter
4109  * 	@fw_params: initial parameters to pass to firmware (optional)
4110  *
4111  *	Initialize and configure T3 HW modules.  This performs the
4112  *	initialization steps that need to be done once after a card is reset.
4113  *	MAC and PHY initialization is handled separarely whenever a port is
4114  *	enabled.
4115  *
4116  *	@fw_params are passed to FW and their value is platform dependent.
4117  *	Only the top 8 bits are available for use, the rest must be 0.
4118  */
4119 int t3_init_hw(adapter_t *adapter, u32 fw_params)
4120 {
4121 	int err = -EIO, attempts, i;
4122 	const struct vpd_params *vpd = &adapter->params.vpd;
4123 
4124 	if (adapter->params.rev > 0)
4125 		calibrate_xgm_t3b(adapter);
4126 	else if (calibrate_xgm(adapter))
4127 		goto out_err;
4128 
4129 	if (adapter->params.nports > 2)
4130 		t3_mac_init(&adap2pinfo(adapter, 0)->mac);
4131 
4132 	if (vpd->mclk) {
4133 		partition_mem(adapter, &adapter->params.tp);
4134 
4135 		if (mc7_init(&adapter->pmrx, vpd->mclk, vpd->mem_timing) ||
4136 		    mc7_init(&adapter->pmtx, vpd->mclk, vpd->mem_timing) ||
4137 		    mc7_init(&adapter->cm, vpd->mclk, vpd->mem_timing) ||
4138 		    t3_mc5_init(&adapter->mc5, adapter->params.mc5.nservers,
4139 			        adapter->params.mc5.nfilters,
4140 			       	adapter->params.mc5.nroutes))
4141 			goto out_err;
4142 
4143 		for (i = 0; i < 32; i++)
4144 			if (clear_sge_ctxt(adapter, i, F_CQ))
4145 				goto out_err;
4146 	}
4147 
4148 	if (tp_init(adapter, &adapter->params.tp))
4149 		goto out_err;
4150 
4151 	t3_tp_set_coalescing_size(adapter,
4152 				  min(adapter->params.sge.max_pkt_size,
4153 				      MAX_RX_COALESCING_LEN), 1);
4154 	t3_tp_set_max_rxsize(adapter,
4155 			     min(adapter->params.sge.max_pkt_size, 16384U));
4156 	ulp_config(adapter, &adapter->params.tp);
4157 	if (is_pcie(adapter))
4158 		config_pcie(adapter);
4159 	else
4160 		t3_set_reg_field(adapter, A_PCIX_CFG, 0,
4161 				 F_DMASTOPEN | F_CLIDECEN);
4162 
4163 	if (adapter->params.rev == T3_REV_C)
4164 		t3_set_reg_field(adapter, A_ULPTX_CONFIG, 0,
4165 				 F_CFG_CQE_SOP_MASK);
4166 
4167 	t3_write_reg(adapter, A_PM1_RX_CFG, 0xffffffff);
4168 	t3_write_reg(adapter, A_PM1_RX_MODE, 0);
4169 	t3_write_reg(adapter, A_PM1_TX_MODE, 0);
4170 	chan_init_hw(adapter, adapter->params.chan_map);
4171 	t3_sge_init(adapter, &adapter->params.sge);
4172 
4173 	t3_write_reg(adapter, A_T3DBG_GPIO_ACT_LOW, calc_gpio_intr(adapter));
4174 
4175 	t3_write_reg(adapter, A_CIM_HOST_ACC_DATA, vpd->uclk | fw_params);
4176 	t3_write_reg(adapter, A_CIM_BOOT_CFG,
4177 		     V_BOOTADDR(FW_FLASH_BOOT_ADDR >> 2));
4178 	(void) t3_read_reg(adapter, A_CIM_BOOT_CFG);    /* flush */
4179 
4180 	attempts = 100;
4181 	do {                          /* wait for uP to initialize */
4182 		msleep(20);
4183 	} while (t3_read_reg(adapter, A_CIM_HOST_ACC_DATA) && --attempts);
4184 	if (!attempts) {
4185 		CH_ERR(adapter, "uP initialization timed out\n");
4186 		goto out_err;
4187 	}
4188 
4189 	err = 0;
4190  out_err:
4191 	return err;
4192 }
4193 
4194 /**
4195  *	get_pci_mode - determine a card's PCI mode
4196  *	@adapter: the adapter
4197  *	@p: where to store the PCI settings
4198  *
4199  *	Determines a card's PCI mode and associated parameters, such as speed
4200  *	and width.
4201  */
4202 static void __devinit get_pci_mode(adapter_t *adapter, struct pci_params *p)
4203 {
4204 	static unsigned short speed_map[] = { 33, 66, 100, 133 };
4205 	u32 pci_mode, pcie_cap;
4206 
4207 	pcie_cap = t3_os_find_pci_capability(adapter, PCI_CAP_ID_EXP);
4208 	if (pcie_cap) {
4209 		u16 val;
4210 
4211 		p->variant = PCI_VARIANT_PCIE;
4212 		p->pcie_cap_addr = pcie_cap;
4213 		t3_os_pci_read_config_2(adapter, pcie_cap + PCI_EXP_LNKSTA,
4214 					&val);
4215 		p->width = (val >> 4) & 0x3f;
4216 		return;
4217 	}
4218 
4219 	pci_mode = t3_read_reg(adapter, A_PCIX_MODE);
4220 	p->speed = speed_map[G_PCLKRANGE(pci_mode)];
4221 	p->width = (pci_mode & F_64BIT) ? 64 : 32;
4222 	pci_mode = G_PCIXINITPAT(pci_mode);
4223 	if (pci_mode == 0)
4224 		p->variant = PCI_VARIANT_PCI;
4225 	else if (pci_mode < 4)
4226 		p->variant = PCI_VARIANT_PCIX_MODE1_PARITY;
4227 	else if (pci_mode < 8)
4228 		p->variant = PCI_VARIANT_PCIX_MODE1_ECC;
4229 	else
4230 		p->variant = PCI_VARIANT_PCIX_266_MODE2;
4231 }
4232 
4233 /**
4234  *	init_link_config - initialize a link's SW state
4235  *	@lc: structure holding the link state
4236  *	@caps: link capabilities
4237  *
4238  *	Initializes the SW state maintained for each link, including the link's
4239  *	capabilities and default speed/duplex/flow-control/autonegotiation
4240  *	settings.
4241  */
4242 static void __devinit init_link_config(struct link_config *lc,
4243 				       unsigned int caps)
4244 {
4245 	lc->supported = caps;
4246 	lc->requested_speed = lc->speed = SPEED_INVALID;
4247 	lc->requested_duplex = lc->duplex = DUPLEX_INVALID;
4248 	lc->requested_fc = lc->fc = PAUSE_RX | PAUSE_TX;
4249 	if (lc->supported & SUPPORTED_Autoneg) {
4250 		lc->advertising = lc->supported;
4251 		lc->autoneg = AUTONEG_ENABLE;
4252 		lc->requested_fc |= PAUSE_AUTONEG;
4253 	} else {
4254 		lc->advertising = 0;
4255 		lc->autoneg = AUTONEG_DISABLE;
4256 	}
4257 }
4258 
4259 /**
4260  *	mc7_calc_size - calculate MC7 memory size
4261  *	@cfg: the MC7 configuration
4262  *
4263  *	Calculates the size of an MC7 memory in bytes from the value of its
4264  *	configuration register.
4265  */
4266 static unsigned int __devinit mc7_calc_size(u32 cfg)
4267 {
4268 	unsigned int width = G_WIDTH(cfg);
4269 	unsigned int banks = !!(cfg & F_BKS) + 1;
4270 	unsigned int org = !!(cfg & F_ORG) + 1;
4271 	unsigned int density = G_DEN(cfg);
4272 	unsigned int MBs = ((256 << density) * banks) / (org << width);
4273 
4274 	return MBs << 20;
4275 }
4276 
4277 static void __devinit mc7_prep(adapter_t *adapter, struct mc7 *mc7,
4278 			       unsigned int base_addr, const char *name)
4279 {
4280 	u32 cfg;
4281 
4282 	mc7->adapter = adapter;
4283 	mc7->name = name;
4284 	mc7->offset = base_addr - MC7_PMRX_BASE_ADDR;
4285 	cfg = t3_read_reg(adapter, mc7->offset + A_MC7_CFG);
4286 	mc7->size = G_DEN(cfg) == M_DEN ? 0 : mc7_calc_size(cfg);
4287 	mc7->width = G_WIDTH(cfg);
4288 }
4289 
4290 void mac_prep(struct cmac *mac, adapter_t *adapter, int index)
4291 {
4292 	u16 devid;
4293 
4294 	mac->adapter = adapter;
4295 	mac->multiport = adapter->params.nports > 2;
4296 	if (mac->multiport) {
4297 		mac->ext_port = (unsigned char)index;
4298 		mac->nucast = 8;
4299 	} else
4300 		mac->nucast = 1;
4301 
4302 	/* Gen2 adapter uses VPD xauicfg[] to notify driver which MAC
4303 	   is connected to each port, its suppose to be using xgmac0 for both ports
4304 	 */
4305 	t3_os_pci_read_config_2(adapter, 0x2, &devid);
4306 
4307 	if (mac->multiport ||
4308 		(!adapter->params.vpd.xauicfg[1] && (devid==0x37)))
4309 			index  = 0;
4310 
4311 	mac->offset = (XGMAC0_1_BASE_ADDR - XGMAC0_0_BASE_ADDR) * index;
4312 
4313 	if (adapter->params.rev == 0 && uses_xaui(adapter)) {
4314 		t3_write_reg(adapter, A_XGM_SERDES_CTRL + mac->offset,
4315 			     is_10G(adapter) ? 0x2901c04 : 0x2301c04);
4316 		t3_set_reg_field(adapter, A_XGM_PORT_CFG + mac->offset,
4317 				 F_ENRGMII, 0);
4318 	}
4319 }
4320 
4321 /**
4322  *	early_hw_init - HW initialization done at card detection time
4323  *	@adapter: the adapter
4324  *	@ai: contains information about the adapter type and properties
4325  *
4326  *	Perfoms the part of HW initialization that is done early on when the
4327  *	driver first detecs the card.  Most of the HW state is initialized
4328  *	lazily later on when a port or an offload function are first used.
4329  */
4330 void early_hw_init(adapter_t *adapter, const struct adapter_info *ai)
4331 {
4332 	u32 val = V_PORTSPEED(is_10G(adapter) || adapter->params.nports > 2 ?
4333 			      3 : 2);
4334 	u32 gpio_out = ai->gpio_out;
4335 
4336 	mi1_init(adapter, ai);
4337 	t3_write_reg(adapter, A_I2C_CFG,                  /* set for 80KHz */
4338 		     V_I2C_CLKDIV(adapter->params.vpd.cclk / 80 - 1));
4339 	t3_write_reg(adapter, A_T3DBG_GPIO_EN,
4340 		     gpio_out | F_GPIO0_OEN | F_GPIO0_OUT_VAL);
4341 	t3_write_reg(adapter, A_MC5_DB_SERVER_INDEX, 0);
4342 	t3_write_reg(adapter, A_SG_OCO_BASE, V_BASE1(0xfff));
4343 
4344 	if (adapter->params.rev == 0 || !uses_xaui(adapter))
4345 		val |= F_ENRGMII;
4346 
4347 	/* Enable MAC clocks so we can access the registers */
4348 	t3_write_reg(adapter, A_XGM_PORT_CFG, val);
4349 	(void) t3_read_reg(adapter, A_XGM_PORT_CFG);
4350 
4351 	val |= F_CLKDIVRESET_;
4352 	t3_write_reg(adapter, A_XGM_PORT_CFG, val);
4353 	(void) t3_read_reg(adapter, A_XGM_PORT_CFG);
4354 	t3_write_reg(adapter, XGM_REG(A_XGM_PORT_CFG, 1), val);
4355 	(void) t3_read_reg(adapter, A_XGM_PORT_CFG);
4356 }
4357 
4358 /**
4359  *	t3_reset_adapter - reset the adapter
4360  *	@adapter: the adapter
4361  *
4362  * 	Reset the adapter.
4363  */
4364 int t3_reset_adapter(adapter_t *adapter)
4365 {
4366 	int i, save_and_restore_pcie =
4367 	    adapter->params.rev < T3_REV_B2 && is_pcie(adapter);
4368 	uint16_t devid = 0;
4369 
4370 	if (save_and_restore_pcie)
4371 		t3_os_pci_save_state(adapter);
4372 	t3_write_reg(adapter, A_PL_RST, F_CRSTWRM | F_CRSTWRMMODE);
4373 
4374  	/*
4375 	 * Delay. Give Some time to device to reset fully.
4376 	 * XXX The delay time should be modified.
4377 	 */
4378 	for (i = 0; i < 10; i++) {
4379 		msleep(50);
4380 		t3_os_pci_read_config_2(adapter, 0x00, &devid);
4381 		if (devid == 0x1425)
4382 			break;
4383 	}
4384 
4385 	if (devid != 0x1425)
4386 		return -1;
4387 
4388 	if (save_and_restore_pcie)
4389 		t3_os_pci_restore_state(adapter);
4390 	return 0;
4391 }
4392 
4393 static int init_parity(adapter_t *adap)
4394 {
4395 	int i, err, addr;
4396 
4397 	if (t3_read_reg(adap, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
4398 		return -EBUSY;
4399 
4400 	for (err = i = 0; !err && i < 16; i++)
4401 		err = clear_sge_ctxt(adap, i, F_EGRESS);
4402 	for (i = 0xfff0; !err && i <= 0xffff; i++)
4403 		err = clear_sge_ctxt(adap, i, F_EGRESS);
4404 	for (i = 0; !err && i < SGE_QSETS; i++)
4405 		err = clear_sge_ctxt(adap, i, F_RESPONSEQ);
4406 	if (err)
4407 		return err;
4408 
4409 	t3_write_reg(adap, A_CIM_IBQ_DBG_DATA, 0);
4410 	for (i = 0; i < 4; i++)
4411 		for (addr = 0; addr <= M_IBQDBGADDR; addr++) {
4412 			t3_write_reg(adap, A_CIM_IBQ_DBG_CFG, F_IBQDBGEN |
4413 				     F_IBQDBGWR | V_IBQDBGQID(i) |
4414 				     V_IBQDBGADDR(addr));
4415 			err = t3_wait_op_done(adap, A_CIM_IBQ_DBG_CFG,
4416 					      F_IBQDBGBUSY, 0, 2, 1);
4417 			if (err)
4418 				return err;
4419 		}
4420 	return 0;
4421 }
4422 
4423 /**
4424  *	t3_prep_adapter - prepare SW and HW for operation
4425  *	@adapter: the adapter
4426  *	@ai: contains information about the adapter type and properties
4427  *
4428  *	Initialize adapter SW state for the various HW modules, set initial
4429  *	values for some adapter tunables, take PHYs out of reset, and
4430  *	initialize the MDIO interface.
4431  */
4432 int __devinit t3_prep_adapter(adapter_t *adapter,
4433 			      const struct adapter_info *ai, int reset)
4434 {
4435 	int ret;
4436 	unsigned int i, j = 0;
4437 
4438 	get_pci_mode(adapter, &adapter->params.pci);
4439 
4440 	adapter->params.info = ai;
4441 	adapter->params.nports = ai->nports0 + ai->nports1;
4442 	adapter->params.chan_map = (!!ai->nports0) | (!!ai->nports1 << 1);
4443 	adapter->params.rev = t3_read_reg(adapter, A_PL_REV);
4444 
4445 	/*
4446 	 * We used to only run the "adapter check task" once a second if
4447 	 * we had PHYs which didn't support interrupts (we would check
4448 	 * their link status once a second).  Now we check other conditions
4449 	 * in that routine which would [potentially] impose a very high
4450 	 * interrupt load on the system.  As such, we now always scan the
4451 	 * adapter state once a second ...
4452 	 */
4453 	adapter->params.linkpoll_period = 10;
4454 
4455 	if (adapter->params.nports > 2)
4456 		adapter->params.stats_update_period = VSC_STATS_ACCUM_SECS;
4457 	else
4458 		adapter->params.stats_update_period = is_10G(adapter) ?
4459 			MAC_STATS_ACCUM_SECS : (MAC_STATS_ACCUM_SECS * 10);
4460 	adapter->params.pci.vpd_cap_addr =
4461 		t3_os_find_pci_capability(adapter, PCI_CAP_ID_VPD);
4462 
4463 	ret = get_vpd_params(adapter, &adapter->params.vpd);
4464 	if (ret < 0)
4465 		return ret;
4466 
4467 	if (reset && t3_reset_adapter(adapter))
4468 		return -1;
4469 
4470 	if (adapter->params.vpd.mclk) {
4471 		struct tp_params *p = &adapter->params.tp;
4472 
4473 		mc7_prep(adapter, &adapter->pmrx, MC7_PMRX_BASE_ADDR, "PMRX");
4474 		mc7_prep(adapter, &adapter->pmtx, MC7_PMTX_BASE_ADDR, "PMTX");
4475 		mc7_prep(adapter, &adapter->cm, MC7_CM_BASE_ADDR, "CM");
4476 
4477 		p->nchan = adapter->params.chan_map == 3 ? 2 : 1;
4478 		p->pmrx_size = t3_mc7_size(&adapter->pmrx);
4479 		p->pmtx_size = t3_mc7_size(&adapter->pmtx);
4480 		p->cm_size = t3_mc7_size(&adapter->cm);
4481 		p->chan_rx_size = p->pmrx_size / 2;     /* only 1 Rx channel */
4482 		p->chan_tx_size = p->pmtx_size / p->nchan;
4483 		p->rx_pg_size = 64 * 1024;
4484 		p->tx_pg_size = is_10G(adapter) ? 64 * 1024 : 16 * 1024;
4485 		p->rx_num_pgs = pm_num_pages(p->chan_rx_size, p->rx_pg_size);
4486 		p->tx_num_pgs = pm_num_pages(p->chan_tx_size, p->tx_pg_size);
4487 		p->ntimer_qs = p->cm_size >= (128 << 20) ||
4488 			       adapter->params.rev > 0 ? 12 : 6;
4489 		p->tre = fls(adapter->params.vpd.cclk / (1000 / TP_TMR_RES)) -
4490 			 1;
4491 		p->dack_re = fls(adapter->params.vpd.cclk / 10) - 1; /* 100us */
4492 	}
4493 
4494 	adapter->params.offload = t3_mc7_size(&adapter->pmrx) &&
4495 				  t3_mc7_size(&adapter->pmtx) &&
4496 				  t3_mc7_size(&adapter->cm);
4497 
4498 	t3_sge_prep(adapter, &adapter->params.sge);
4499 
4500 	if (is_offload(adapter)) {
4501 		adapter->params.mc5.nservers = DEFAULT_NSERVERS;
4502 		/* PR 6487. TOE and filtering are mutually exclusive */
4503 		adapter->params.mc5.nfilters = 0;
4504 		adapter->params.mc5.nroutes = 0;
4505 		t3_mc5_prep(adapter, &adapter->mc5, MC5_MODE_144_BIT);
4506 
4507 		init_mtus(adapter->params.mtus);
4508 		init_cong_ctrl(adapter->params.a_wnd, adapter->params.b_wnd);
4509 	}
4510 
4511 	early_hw_init(adapter, ai);
4512 	ret = init_parity(adapter);
4513 	if (ret)
4514 		return ret;
4515 
4516 	if (adapter->params.nports > 2 &&
4517 	    (ret = t3_vsc7323_init(adapter, adapter->params.nports)))
4518 		return ret;
4519 
4520 	for_each_port(adapter, i) {
4521 		u8 hw_addr[6];
4522 		const struct port_type_info *pti;
4523 		struct port_info *p = adap2pinfo(adapter, i);
4524 
4525 		for (;;) {
4526 			unsigned port_type = adapter->params.vpd.port_type[j];
4527 			if (port_type) {
4528 				if (port_type < ARRAY_SIZE(port_types)) {
4529 					pti = &port_types[port_type];
4530 					break;
4531 				} else
4532 					return -EINVAL;
4533 			}
4534 			j++;
4535 			if (j >= ARRAY_SIZE(adapter->params.vpd.port_type))
4536 				return -EINVAL;
4537 		}
4538 		ret = pti->phy_prep(p, ai->phy_base_addr + j,
4539 				    ai->mdio_ops);
4540 		if (ret)
4541 			return ret;
4542 		mac_prep(&p->mac, adapter, j);
4543 		++j;
4544 
4545 		/*
4546 		 * The VPD EEPROM stores the base Ethernet address for the
4547 		 * card.  A port's address is derived from the base by adding
4548 		 * the port's index to the base's low octet.
4549 		 */
4550 		memcpy(hw_addr, adapter->params.vpd.eth_base, 5);
4551 		hw_addr[5] = adapter->params.vpd.eth_base[5] + i;
4552 
4553 		t3_os_set_hw_addr(adapter, i, hw_addr);
4554 		init_link_config(&p->link_config, p->phy.caps);
4555 		p->phy.ops->power_down(&p->phy, 1);
4556 
4557 		/*
4558 		 * If the PHY doesn't support interrupts for link status
4559 		 * changes, schedule a scan of the adapter links at least
4560 		 * once a second.
4561 		 */
4562 		if (!(p->phy.caps & SUPPORTED_IRQ) &&
4563 		    adapter->params.linkpoll_period > 10)
4564 			adapter->params.linkpoll_period = 10;
4565 	}
4566 
4567 	return 0;
4568 }
4569 
4570 /**
4571  *	t3_reinit_adapter - prepare HW for operation again
4572  *	@adapter: the adapter
4573  *
4574  *	Put HW in the same state as @t3_prep_adapter without any changes to
4575  *	SW state.  This is a cut down version of @t3_prep_adapter intended
4576  *	to be used after events that wipe out HW state but preserve SW state,
4577  *	e.g., EEH.  The device must be reset before calling this.
4578  */
4579 int t3_reinit_adapter(adapter_t *adap)
4580 {
4581 	unsigned int i;
4582 	int ret, j = 0;
4583 
4584 	early_hw_init(adap, adap->params.info);
4585 	ret = init_parity(adap);
4586 	if (ret)
4587 		return ret;
4588 
4589 	if (adap->params.nports > 2 &&
4590 	    (ret = t3_vsc7323_init(adap, adap->params.nports)))
4591 		return ret;
4592 
4593 	for_each_port(adap, i) {
4594 		const struct port_type_info *pti;
4595 		struct port_info *p = adap2pinfo(adap, i);
4596 
4597 		for (;;) {
4598 			unsigned port_type = adap->params.vpd.port_type[j];
4599 			if (port_type) {
4600 				if (port_type < ARRAY_SIZE(port_types)) {
4601 					pti = &port_types[port_type];
4602 					break;
4603 				} else
4604 					return -EINVAL;
4605 			}
4606 			j++;
4607 			if (j >= ARRAY_SIZE(adap->params.vpd.port_type))
4608 				return -EINVAL;
4609 		}
4610 		ret = pti->phy_prep(p, p->phy.addr, NULL);
4611 		if (ret)
4612 			return ret;
4613 		p->phy.ops->power_down(&p->phy, 1);
4614 	}
4615 	return 0;
4616 }
4617 
4618 void t3_led_ready(adapter_t *adapter)
4619 {
4620 	t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL,
4621 			 F_GPIO0_OUT_VAL);
4622 }
4623 
4624 void t3_port_failover(adapter_t *adapter, int port)
4625 {
4626 	u32 val;
4627 
4628 	val = port ? F_PORT1ACTIVE : F_PORT0ACTIVE;
4629 	t3_set_reg_field(adapter, A_MPS_CFG, F_PORT0ACTIVE | F_PORT1ACTIVE,
4630 			 val);
4631 }
4632 
4633 void t3_failover_done(adapter_t *adapter, int port)
4634 {
4635 	t3_set_reg_field(adapter, A_MPS_CFG, F_PORT0ACTIVE | F_PORT1ACTIVE,
4636 			 F_PORT0ACTIVE | F_PORT1ACTIVE);
4637 }
4638 
4639 void t3_failover_clear(adapter_t *adapter)
4640 {
4641 	t3_set_reg_field(adapter, A_MPS_CFG, F_PORT0ACTIVE | F_PORT1ACTIVE,
4642 			 F_PORT0ACTIVE | F_PORT1ACTIVE);
4643 }
4644 
4645 static int t3_cim_hac_read(adapter_t *adapter, u32 addr, u32 *val)
4646 {
4647 	u32 v;
4648 
4649 	t3_write_reg(adapter, A_CIM_HOST_ACC_CTRL, addr);
4650 	if (t3_wait_op_done_val(adapter, A_CIM_HOST_ACC_CTRL,
4651 				F_HOSTBUSY, 0, 10, 10, &v))
4652 		return -EIO;
4653 
4654 	*val = t3_read_reg(adapter, A_CIM_HOST_ACC_DATA);
4655 
4656 	return 0;
4657 }
4658 
4659 static int t3_cim_hac_write(adapter_t *adapter, u32 addr, u32 val)
4660 {
4661 	u32 v;
4662 
4663 	t3_write_reg(adapter, A_CIM_HOST_ACC_DATA, val);
4664 
4665 	addr |= F_HOSTWRITE;
4666 	t3_write_reg(adapter, A_CIM_HOST_ACC_CTRL, addr);
4667 
4668 	if (t3_wait_op_done_val(adapter, A_CIM_HOST_ACC_CTRL,
4669 				F_HOSTBUSY, 0, 10, 5, &v))
4670 		return -EIO;
4671 	return 0;
4672 }
4673 
4674 int t3_get_up_la(adapter_t *adapter, u32 *stopped, u32 *index,
4675 		 u32 *size, void *data)
4676 {
4677 	u32 v, *buf = data;
4678 	int i, cnt,  ret;
4679 
4680 	if (*size < LA_ENTRIES * 4)
4681 		return -EINVAL;
4682 
4683 	ret = t3_cim_hac_read(adapter, LA_CTRL, &v);
4684 	if (ret)
4685 		goto out;
4686 
4687 	*stopped = !(v & 1);
4688 
4689 	/* Freeze LA */
4690 	if (!*stopped) {
4691 		ret = t3_cim_hac_write(adapter, LA_CTRL, 0);
4692 		if (ret)
4693 			goto out;
4694 	}
4695 
4696 	for (i = 0; i < LA_ENTRIES; i++) {
4697 		v = (i << 2) | (1 << 1);
4698 		ret = t3_cim_hac_write(adapter, LA_CTRL, v);
4699 		if (ret)
4700 			goto out;
4701 
4702 		ret = t3_cim_hac_read(adapter, LA_CTRL, &v);
4703 		if (ret)
4704 			goto out;
4705 
4706 		cnt = 20;
4707 		while ((v & (1 << 1)) && cnt) {
4708 			udelay(5);
4709 			--cnt;
4710 			ret = t3_cim_hac_read(adapter, LA_CTRL, &v);
4711 			if (ret)
4712 				goto out;
4713 		}
4714 
4715 		if (v & (1 << 1))
4716 			return -EIO;
4717 
4718 		ret = t3_cim_hac_read(adapter, LA_DATA, &v);
4719 		if (ret)
4720 			goto out;
4721 
4722 		*buf++ = v;
4723 	}
4724 
4725 	ret = t3_cim_hac_read(adapter, LA_CTRL, &v);
4726 	if (ret)
4727 		goto out;
4728 
4729 	*index = (v >> 16) + 4;
4730 	*size = LA_ENTRIES * 4;
4731 out:
4732 	/* Unfreeze LA */
4733 	t3_cim_hac_write(adapter, LA_CTRL, 1);
4734 	return ret;
4735 }
4736 
4737 int t3_get_up_ioqs(adapter_t *adapter, u32 *size, void *data)
4738 {
4739 	u32 v, *buf = data;
4740 	int i, j, ret;
4741 
4742 	if (*size < IOQ_ENTRIES * sizeof(struct t3_ioq_entry))
4743 		return -EINVAL;
4744 
4745 	for (i = 0; i < 4; i++) {
4746 		ret = t3_cim_hac_read(adapter, (4 * i), &v);
4747 		if (ret)
4748 			goto out;
4749 
4750 		*buf++ = v;
4751 	}
4752 
4753 	for (i = 0; i < IOQ_ENTRIES; i++) {
4754 		u32 base_addr = 0x10 * (i + 1);
4755 
4756 		for (j = 0; j < 4; j++) {
4757 			ret = t3_cim_hac_read(adapter, base_addr + 4 * j, &v);
4758 			if (ret)
4759 				goto out;
4760 
4761 			*buf++ = v;
4762 		}
4763 	}
4764 
4765 	*size = IOQ_ENTRIES * sizeof(struct t3_ioq_entry);
4766 
4767 out:
4768 	return ret;
4769 }
4770 
4771