xref: /freebsd/sys/dev/cxgb/common/cxgb_t3_hw.c (revision eb6d21b4ca6d668cf89afd99eef7baeafa712197)
1 /**************************************************************************
2 
3 Copyright (c) 2007-2009, Chelsio Inc.
4 All rights reserved.
5 
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
8 
9  1. Redistributions of source code must retain the above copyright notice,
10     this list of conditions and the following disclaimer.
11 
12  2. Neither the name of the Chelsio Corporation nor the names of its
13     contributors may be used to endorse or promote products derived from
14     this software without specific prior written permission.
15 
16 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
17 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
20 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
23 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
24 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
25 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
26 POSSIBILITY OF SUCH DAMAGE.
27 
28 ***************************************************************************/
29 
30 #include <sys/cdefs.h>
31 __FBSDID("$FreeBSD$");
32 
33 
34 #include <cxgb_include.h>
35 
36 #undef msleep
37 #define msleep t3_os_sleep
38 
39 /**
40  *	t3_wait_op_done_val - wait until an operation is completed
41  *	@adapter: the adapter performing the operation
42  *	@reg: the register to check for completion
43  *	@mask: a single-bit field within @reg that indicates completion
44  *	@polarity: the value of the field when the operation is completed
45  *	@attempts: number of check iterations
46  *	@delay: delay in usecs between iterations
47  *	@valp: where to store the value of the register at completion time
48  *
49  *	Wait until an operation is completed by checking a bit in a register
50  *	up to @attempts times.  If @valp is not NULL the value of the register
51  *	at the time it indicated completion is stored there.  Returns 0 if the
52  *	operation completes and	-EAGAIN	otherwise.
53  */
54 int t3_wait_op_done_val(adapter_t *adapter, int reg, u32 mask, int polarity,
55 			int attempts, int delay, u32 *valp)
56 {
57 	while (1) {
58 		u32 val = t3_read_reg(adapter, reg);
59 
60 		if (!!(val & mask) == polarity) {
61 			if (valp)
62 				*valp = val;
63 			return 0;
64 		}
65 		if (--attempts == 0)
66 			return -EAGAIN;
67 		if (delay)
68 			udelay(delay);
69 	}
70 }
71 
72 /**
73  *	t3_write_regs - write a bunch of registers
74  *	@adapter: the adapter to program
75  *	@p: an array of register address/register value pairs
76  *	@n: the number of address/value pairs
77  *	@offset: register address offset
78  *
79  *	Takes an array of register address/register value pairs and writes each
80  *	value to the corresponding register.  Register addresses are adjusted
81  *	by the supplied offset.
82  */
83 void t3_write_regs(adapter_t *adapter, const struct addr_val_pair *p, int n,
84 		   unsigned int offset)
85 {
86 	while (n--) {
87 		t3_write_reg(adapter, p->reg_addr + offset, p->val);
88 		p++;
89 	}
90 }
91 
92 /**
93  *	t3_set_reg_field - set a register field to a value
94  *	@adapter: the adapter to program
95  *	@addr: the register address
96  *	@mask: specifies the portion of the register to modify
97  *	@val: the new value for the register field
98  *
99  *	Sets a register field specified by the supplied mask to the
100  *	given value.
101  */
102 void t3_set_reg_field(adapter_t *adapter, unsigned int addr, u32 mask, u32 val)
103 {
104 	u32 v = t3_read_reg(adapter, addr) & ~mask;
105 
106 	t3_write_reg(adapter, addr, v | val);
107 	(void) t3_read_reg(adapter, addr);      /* flush */
108 }
109 
110 /**
111  *	t3_read_indirect - read indirectly addressed registers
112  *	@adap: the adapter
113  *	@addr_reg: register holding the indirect address
114  *	@data_reg: register holding the value of the indirect register
115  *	@vals: where the read register values are stored
116  *	@start_idx: index of first indirect register to read
117  *	@nregs: how many indirect registers to read
118  *
119  *	Reads registers that are accessed indirectly through an address/data
120  *	register pair.
121  */
122 static void t3_read_indirect(adapter_t *adap, unsigned int addr_reg,
123 		      unsigned int data_reg, u32 *vals, unsigned int nregs,
124 		      unsigned int start_idx)
125 {
126 	while (nregs--) {
127 		t3_write_reg(adap, addr_reg, start_idx);
128 		*vals++ = t3_read_reg(adap, data_reg);
129 		start_idx++;
130 	}
131 }
132 
133 /**
134  *	t3_mc7_bd_read - read from MC7 through backdoor accesses
135  *	@mc7: identifies MC7 to read from
136  *	@start: index of first 64-bit word to read
137  *	@n: number of 64-bit words to read
138  *	@buf: where to store the read result
139  *
140  *	Read n 64-bit words from MC7 starting at word start, using backdoor
141  *	accesses.
142  */
143 int t3_mc7_bd_read(struct mc7 *mc7, unsigned int start, unsigned int n,
144                    u64 *buf)
145 {
146 	static int shift[] = { 0, 0, 16, 24 };
147 	static int step[]  = { 0, 32, 16, 8 };
148 
149 	unsigned int size64 = mc7->size / 8;  /* # of 64-bit words */
150 	adapter_t *adap = mc7->adapter;
151 
152 	if (start >= size64 || start + n > size64)
153 		return -EINVAL;
154 
155 	start *= (8 << mc7->width);
156 	while (n--) {
157 		int i;
158 		u64 val64 = 0;
159 
160 		for (i = (1 << mc7->width) - 1; i >= 0; --i) {
161 			int attempts = 10;
162 			u32 val;
163 
164 			t3_write_reg(adap, mc7->offset + A_MC7_BD_ADDR,
165 				       start);
166 			t3_write_reg(adap, mc7->offset + A_MC7_BD_OP, 0);
167 			val = t3_read_reg(adap, mc7->offset + A_MC7_BD_OP);
168 			while ((val & F_BUSY) && attempts--)
169 				val = t3_read_reg(adap,
170 						  mc7->offset + A_MC7_BD_OP);
171 			if (val & F_BUSY)
172 				return -EIO;
173 
174 			val = t3_read_reg(adap, mc7->offset + A_MC7_BD_DATA1);
175 			if (mc7->width == 0) {
176 				val64 = t3_read_reg(adap,
177 						mc7->offset + A_MC7_BD_DATA0);
178 				val64 |= (u64)val << 32;
179 			} else {
180 				if (mc7->width > 1)
181 					val >>= shift[mc7->width];
182 				val64 |= (u64)val << (step[mc7->width] * i);
183 			}
184 			start += 8;
185 		}
186 		*buf++ = val64;
187 	}
188 	return 0;
189 }
190 
191 /*
192  * Low-level I2C read and write routines.  These simply read and write a
193  * single byte with the option of indicating a "continue" if another operation
194  * is to be chained.  Generally most code will use higher-level routines to
195  * read and write to I2C Slave Devices.
196  */
197 #define I2C_ATTEMPTS 100
198 
199 /*
200  * Read an 8-bit value from the I2C bus.  If the "chained" parameter is
201  * non-zero then a STOP bit will not be written after the read command.  On
202  * error (the read timed out, etc.), a negative errno will be returned (e.g.
203  * -EAGAIN, etc.).  On success, the 8-bit value read from the I2C bus is
204  * stored into the buffer *valp and the value of the I2C ACK bit is returned
205  * as a 0/1 value.
206  */
207 int t3_i2c_read8(adapter_t *adapter, int chained, u8 *valp)
208 {
209 	int ret;
210 	u32 opval;
211 	MDIO_LOCK(adapter);
212 	t3_write_reg(adapter, A_I2C_OP,
213 		     F_I2C_READ | (chained ? F_I2C_CONT : 0));
214 	ret = t3_wait_op_done_val(adapter, A_I2C_OP, F_I2C_BUSY, 0,
215 				  I2C_ATTEMPTS, 10, &opval);
216 	if (ret >= 0) {
217 		ret = ((opval & F_I2C_ACK) == F_I2C_ACK);
218 		*valp = G_I2C_DATA(t3_read_reg(adapter, A_I2C_DATA));
219 	}
220 	MDIO_UNLOCK(adapter);
221 	return ret;
222 }
223 
224 /*
225  * Write an 8-bit value to the I2C bus.  If the "chained" parameter is
226  * non-zero, then a STOP bit will not be written after the write command.  On
227  * error (the write timed out, etc.), a negative errno will be returned (e.g.
228  * -EAGAIN, etc.).  On success, the value of the I2C ACK bit is returned as a
229  * 0/1 value.
230  */
231 int t3_i2c_write8(adapter_t *adapter, int chained, u8 val)
232 {
233 	int ret;
234 	u32 opval;
235 	MDIO_LOCK(adapter);
236 	t3_write_reg(adapter, A_I2C_DATA, V_I2C_DATA(val));
237 	t3_write_reg(adapter, A_I2C_OP,
238 		     F_I2C_WRITE | (chained ? F_I2C_CONT : 0));
239 	ret = t3_wait_op_done_val(adapter, A_I2C_OP, F_I2C_BUSY, 0,
240 				  I2C_ATTEMPTS, 10, &opval);
241 	if (ret >= 0)
242 		ret = ((opval & F_I2C_ACK) == F_I2C_ACK);
243 	MDIO_UNLOCK(adapter);
244 	return ret;
245 }
246 
247 /*
248  * Initialize MI1.
249  */
250 static void mi1_init(adapter_t *adap, const struct adapter_info *ai)
251 {
252         u32 clkdiv = adap->params.vpd.cclk / (2 * adap->params.vpd.mdc) - 1;
253         u32 val = F_PREEN | V_CLKDIV(clkdiv);
254 
255         t3_write_reg(adap, A_MI1_CFG, val);
256 }
257 
258 #define MDIO_ATTEMPTS 20
259 
260 /*
261  * MI1 read/write operations for clause 22 PHYs.
262  */
263 int t3_mi1_read(adapter_t *adapter, int phy_addr, int mmd_addr,
264 		int reg_addr, unsigned int *valp)
265 {
266 	int ret;
267 	u32 addr = V_REGADDR(reg_addr) | V_PHYADDR(phy_addr);
268 
269 	if (mmd_addr)
270 		return -EINVAL;
271 
272 	MDIO_LOCK(adapter);
273 	t3_set_reg_field(adapter, A_MI1_CFG, V_ST(M_ST), V_ST(1));
274 	t3_write_reg(adapter, A_MI1_ADDR, addr);
275 	t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(2));
276 	ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0, MDIO_ATTEMPTS, 10);
277 	if (!ret)
278 		*valp = t3_read_reg(adapter, A_MI1_DATA);
279 	MDIO_UNLOCK(adapter);
280 	return ret;
281 }
282 
283 int t3_mi1_write(adapter_t *adapter, int phy_addr, int mmd_addr,
284 		 int reg_addr, unsigned int val)
285 {
286 	int ret;
287 	u32 addr = V_REGADDR(reg_addr) | V_PHYADDR(phy_addr);
288 
289 	if (mmd_addr)
290 		return -EINVAL;
291 
292 	MDIO_LOCK(adapter);
293 	t3_set_reg_field(adapter, A_MI1_CFG, V_ST(M_ST), V_ST(1));
294 	t3_write_reg(adapter, A_MI1_ADDR, addr);
295 	t3_write_reg(adapter, A_MI1_DATA, val);
296 	t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(1));
297 	ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0, MDIO_ATTEMPTS, 10);
298 	MDIO_UNLOCK(adapter);
299 	return ret;
300 }
301 
302 static struct mdio_ops mi1_mdio_ops = {
303 	t3_mi1_read,
304 	t3_mi1_write
305 };
306 
307 /*
308  * MI1 read/write operations for clause 45 PHYs.
309  */
310 static int mi1_ext_read(adapter_t *adapter, int phy_addr, int mmd_addr,
311 			int reg_addr, unsigned int *valp)
312 {
313 	int ret;
314 	u32 addr = V_REGADDR(mmd_addr) | V_PHYADDR(phy_addr);
315 
316 	MDIO_LOCK(adapter);
317 	t3_set_reg_field(adapter, A_MI1_CFG, V_ST(M_ST), 0);
318 	t3_write_reg(adapter, A_MI1_ADDR, addr);
319 	t3_write_reg(adapter, A_MI1_DATA, reg_addr);
320 	t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(0));
321 	ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0, MDIO_ATTEMPTS, 10);
322 	if (!ret) {
323 		t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(3));
324 		ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0,
325 				      MDIO_ATTEMPTS, 10);
326 		if (!ret)
327 			*valp = t3_read_reg(adapter, A_MI1_DATA);
328 	}
329 	MDIO_UNLOCK(adapter);
330 	return ret;
331 }
332 
333 static int mi1_ext_write(adapter_t *adapter, int phy_addr, int mmd_addr,
334 			 int reg_addr, unsigned int val)
335 {
336 	int ret;
337 	u32 addr = V_REGADDR(mmd_addr) | V_PHYADDR(phy_addr);
338 
339 	MDIO_LOCK(adapter);
340 	t3_set_reg_field(adapter, A_MI1_CFG, V_ST(M_ST), 0);
341 	t3_write_reg(adapter, A_MI1_ADDR, addr);
342 	t3_write_reg(adapter, A_MI1_DATA, reg_addr);
343 	t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(0));
344 	ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0, MDIO_ATTEMPTS, 10);
345 	if (!ret) {
346 		t3_write_reg(adapter, A_MI1_DATA, val);
347 		t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(1));
348 		ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0,
349 				      MDIO_ATTEMPTS, 10);
350 	}
351 	MDIO_UNLOCK(adapter);
352 	return ret;
353 }
354 
355 static struct mdio_ops mi1_mdio_ext_ops = {
356 	mi1_ext_read,
357 	mi1_ext_write
358 };
359 
360 /**
361  *	t3_mdio_change_bits - modify the value of a PHY register
362  *	@phy: the PHY to operate on
363  *	@mmd: the device address
364  *	@reg: the register address
365  *	@clear: what part of the register value to mask off
366  *	@set: what part of the register value to set
367  *
368  *	Changes the value of a PHY register by applying a mask to its current
369  *	value and ORing the result with a new value.
370  */
371 int t3_mdio_change_bits(struct cphy *phy, int mmd, int reg, unsigned int clear,
372 			unsigned int set)
373 {
374 	int ret;
375 	unsigned int val;
376 
377 	ret = mdio_read(phy, mmd, reg, &val);
378 	if (!ret) {
379 		val &= ~clear;
380 		ret = mdio_write(phy, mmd, reg, val | set);
381 	}
382 	return ret;
383 }
384 
385 /**
386  *	t3_phy_reset - reset a PHY block
387  *	@phy: the PHY to operate on
388  *	@mmd: the device address of the PHY block to reset
389  *	@wait: how long to wait for the reset to complete in 1ms increments
390  *
391  *	Resets a PHY block and optionally waits for the reset to complete.
392  *	@mmd should be 0 for 10/100/1000 PHYs and the device address to reset
393  *	for 10G PHYs.
394  */
395 int t3_phy_reset(struct cphy *phy, int mmd, int wait)
396 {
397 	int err;
398 	unsigned int ctl;
399 
400 	err = t3_mdio_change_bits(phy, mmd, MII_BMCR, BMCR_PDOWN, BMCR_RESET);
401 	if (err || !wait)
402 		return err;
403 
404 	do {
405 		err = mdio_read(phy, mmd, MII_BMCR, &ctl);
406 		if (err)
407 			return err;
408 		ctl &= BMCR_RESET;
409 		if (ctl)
410 			msleep(1);
411 	} while (ctl && --wait);
412 
413 	return ctl ? -1 : 0;
414 }
415 
416 /**
417  *	t3_phy_advertise - set the PHY advertisement registers for autoneg
418  *	@phy: the PHY to operate on
419  *	@advert: bitmap of capabilities the PHY should advertise
420  *
421  *	Sets a 10/100/1000 PHY's advertisement registers to advertise the
422  *	requested capabilities.
423  */
424 int t3_phy_advertise(struct cphy *phy, unsigned int advert)
425 {
426 	int err;
427 	unsigned int val = 0;
428 
429 	err = mdio_read(phy, 0, MII_CTRL1000, &val);
430 	if (err)
431 		return err;
432 
433 	val &= ~(ADVERTISE_1000HALF | ADVERTISE_1000FULL);
434 	if (advert & ADVERTISED_1000baseT_Half)
435 		val |= ADVERTISE_1000HALF;
436 	if (advert & ADVERTISED_1000baseT_Full)
437 		val |= ADVERTISE_1000FULL;
438 
439 	err = mdio_write(phy, 0, MII_CTRL1000, val);
440 	if (err)
441 		return err;
442 
443 	val = 1;
444 	if (advert & ADVERTISED_10baseT_Half)
445 		val |= ADVERTISE_10HALF;
446 	if (advert & ADVERTISED_10baseT_Full)
447 		val |= ADVERTISE_10FULL;
448 	if (advert & ADVERTISED_100baseT_Half)
449 		val |= ADVERTISE_100HALF;
450 	if (advert & ADVERTISED_100baseT_Full)
451 		val |= ADVERTISE_100FULL;
452 	if (advert & ADVERTISED_Pause)
453 		val |= ADVERTISE_PAUSE_CAP;
454 	if (advert & ADVERTISED_Asym_Pause)
455 		val |= ADVERTISE_PAUSE_ASYM;
456 	return mdio_write(phy, 0, MII_ADVERTISE, val);
457 }
458 
459 /**
460  *	t3_phy_advertise_fiber - set fiber PHY advertisement register
461  *	@phy: the PHY to operate on
462  *	@advert: bitmap of capabilities the PHY should advertise
463  *
464  *	Sets a fiber PHY's advertisement register to advertise the
465  *	requested capabilities.
466  */
467 int t3_phy_advertise_fiber(struct cphy *phy, unsigned int advert)
468 {
469 	unsigned int val = 0;
470 
471 	if (advert & ADVERTISED_1000baseT_Half)
472 		val |= ADVERTISE_1000XHALF;
473 	if (advert & ADVERTISED_1000baseT_Full)
474 		val |= ADVERTISE_1000XFULL;
475 	if (advert & ADVERTISED_Pause)
476 		val |= ADVERTISE_1000XPAUSE;
477 	if (advert & ADVERTISED_Asym_Pause)
478 		val |= ADVERTISE_1000XPSE_ASYM;
479 	return mdio_write(phy, 0, MII_ADVERTISE, val);
480 }
481 
482 /**
483  *	t3_set_phy_speed_duplex - force PHY speed and duplex
484  *	@phy: the PHY to operate on
485  *	@speed: requested PHY speed
486  *	@duplex: requested PHY duplex
487  *
488  *	Force a 10/100/1000 PHY's speed and duplex.  This also disables
489  *	auto-negotiation except for GigE, where auto-negotiation is mandatory.
490  */
491 int t3_set_phy_speed_duplex(struct cphy *phy, int speed, int duplex)
492 {
493 	int err;
494 	unsigned int ctl;
495 
496 	err = mdio_read(phy, 0, MII_BMCR, &ctl);
497 	if (err)
498 		return err;
499 
500 	if (speed >= 0) {
501 		ctl &= ~(BMCR_SPEED100 | BMCR_SPEED1000 | BMCR_ANENABLE);
502 		if (speed == SPEED_100)
503 			ctl |= BMCR_SPEED100;
504 		else if (speed == SPEED_1000)
505 			ctl |= BMCR_SPEED1000;
506 	}
507 	if (duplex >= 0) {
508 		ctl &= ~(BMCR_FULLDPLX | BMCR_ANENABLE);
509 		if (duplex == DUPLEX_FULL)
510 			ctl |= BMCR_FULLDPLX;
511 	}
512 	if (ctl & BMCR_SPEED1000)  /* auto-negotiation required for GigE */
513 		ctl |= BMCR_ANENABLE;
514 	return mdio_write(phy, 0, MII_BMCR, ctl);
515 }
516 
517 int t3_phy_lasi_intr_enable(struct cphy *phy)
518 {
519 	return mdio_write(phy, MDIO_DEV_PMA_PMD, LASI_CTRL, 1);
520 }
521 
522 int t3_phy_lasi_intr_disable(struct cphy *phy)
523 {
524 	return mdio_write(phy, MDIO_DEV_PMA_PMD, LASI_CTRL, 0);
525 }
526 
527 int t3_phy_lasi_intr_clear(struct cphy *phy)
528 {
529 	u32 val;
530 
531 	return mdio_read(phy, MDIO_DEV_PMA_PMD, LASI_STAT, &val);
532 }
533 
534 int t3_phy_lasi_intr_handler(struct cphy *phy)
535 {
536 	unsigned int status;
537 	int err = mdio_read(phy, MDIO_DEV_PMA_PMD, LASI_STAT, &status);
538 
539 	if (err)
540 		return err;
541 	return (status & 1) ?  cphy_cause_link_change : 0;
542 }
543 
544 static struct adapter_info t3_adap_info[] = {
545 	{ 1, 1, 0,
546 	  F_GPIO2_OEN | F_GPIO4_OEN |
547 	  F_GPIO2_OUT_VAL | F_GPIO4_OUT_VAL, { S_GPIO3, S_GPIO5 }, 0,
548 	  &mi1_mdio_ops, "Chelsio PE9000" },
549 	{ 1, 1, 0,
550 	  F_GPIO2_OEN | F_GPIO4_OEN |
551 	  F_GPIO2_OUT_VAL | F_GPIO4_OUT_VAL, { S_GPIO3, S_GPIO5 }, 0,
552 	  &mi1_mdio_ops, "Chelsio T302" },
553 	{ 1, 0, 0,
554 	  F_GPIO1_OEN | F_GPIO6_OEN | F_GPIO7_OEN | F_GPIO10_OEN |
555 	  F_GPIO11_OEN | F_GPIO1_OUT_VAL | F_GPIO6_OUT_VAL | F_GPIO10_OUT_VAL,
556 	  { 0 }, SUPPORTED_10000baseT_Full | SUPPORTED_AUI,
557 	  &mi1_mdio_ext_ops, "Chelsio T310" },
558 	{ 1, 1, 0,
559 	  F_GPIO1_OEN | F_GPIO2_OEN | F_GPIO4_OEN | F_GPIO5_OEN | F_GPIO6_OEN |
560 	  F_GPIO7_OEN | F_GPIO10_OEN | F_GPIO11_OEN | F_GPIO1_OUT_VAL |
561 	  F_GPIO5_OUT_VAL | F_GPIO6_OUT_VAL | F_GPIO10_OUT_VAL,
562 	  { S_GPIO9, S_GPIO3 }, SUPPORTED_10000baseT_Full | SUPPORTED_AUI,
563 	  &mi1_mdio_ext_ops, "Chelsio T320" },
564 	{ 4, 0, 0,
565 	  F_GPIO5_OEN | F_GPIO6_OEN | F_GPIO7_OEN | F_GPIO5_OUT_VAL |
566 	  F_GPIO6_OUT_VAL | F_GPIO7_OUT_VAL,
567 	  { S_GPIO1, S_GPIO2, S_GPIO3, S_GPIO4 }, SUPPORTED_AUI,
568 	  &mi1_mdio_ops, "Chelsio T304" },
569 	{ 0 },
570 	{ 1, 0, 0,
571 	  F_GPIO1_OEN | F_GPIO2_OEN | F_GPIO4_OEN | F_GPIO6_OEN | F_GPIO7_OEN |
572 	  F_GPIO10_OEN | F_GPIO1_OUT_VAL | F_GPIO6_OUT_VAL | F_GPIO10_OUT_VAL,
573 	  { S_GPIO9 }, SUPPORTED_10000baseT_Full | SUPPORTED_AUI,
574 	  &mi1_mdio_ext_ops, "Chelsio T310" },
575 	{ 1, 0, 0,
576 	  F_GPIO1_OEN | F_GPIO6_OEN | F_GPIO7_OEN |
577 	  F_GPIO1_OUT_VAL | F_GPIO6_OUT_VAL,
578 	  { S_GPIO9 }, SUPPORTED_10000baseT_Full | SUPPORTED_AUI,
579 	  &mi1_mdio_ext_ops, "Chelsio N320E-G2" },
580 };
581 
582 /*
583  * Return the adapter_info structure with a given index.  Out-of-range indices
584  * return NULL.
585  */
586 const struct adapter_info *t3_get_adapter_info(unsigned int id)
587 {
588 	return id < ARRAY_SIZE(t3_adap_info) ? &t3_adap_info[id] : NULL;
589 }
590 
591 struct port_type_info {
592 	int (*phy_prep)(pinfo_t *pinfo, int phy_addr,
593 			const struct mdio_ops *ops);
594 };
595 
596 static struct port_type_info port_types[] = {
597 	{ NULL },
598 	{ t3_ael1002_phy_prep },
599 	{ t3_vsc8211_phy_prep },
600 	{ t3_mv88e1xxx_phy_prep },
601 	{ t3_xaui_direct_phy_prep },
602 	{ t3_ael2005_phy_prep },
603 	{ t3_qt2045_phy_prep },
604 	{ t3_ael1006_phy_prep },
605 	{ t3_tn1010_phy_prep },
606 	{ t3_aq100x_phy_prep },
607 	{ t3_ael2020_phy_prep },
608 };
609 
610 #define VPD_ENTRY(name, len) \
611 	u8 name##_kword[2]; u8 name##_len; u8 name##_data[len]
612 
613 /*
614  * Partial EEPROM Vital Product Data structure.  Includes only the ID and
615  * VPD-R sections.
616  */
617 struct t3_vpd {
618 	u8  id_tag;
619 	u8  id_len[2];
620 	u8  id_data[16];
621 	u8  vpdr_tag;
622 	u8  vpdr_len[2];
623 	VPD_ENTRY(pn, 16);                     /* part number */
624 	VPD_ENTRY(ec, ECNUM_LEN);              /* EC level */
625 	VPD_ENTRY(sn, SERNUM_LEN);             /* serial number */
626 	VPD_ENTRY(na, 12);                     /* MAC address base */
627 	VPD_ENTRY(cclk, 6);                    /* core clock */
628 	VPD_ENTRY(mclk, 6);                    /* mem clock */
629 	VPD_ENTRY(uclk, 6);                    /* uP clk */
630 	VPD_ENTRY(mdc, 6);                     /* MDIO clk */
631 	VPD_ENTRY(mt, 2);                      /* mem timing */
632 	VPD_ENTRY(xaui0cfg, 6);                /* XAUI0 config */
633 	VPD_ENTRY(xaui1cfg, 6);                /* XAUI1 config */
634 	VPD_ENTRY(port0, 2);                   /* PHY0 complex */
635 	VPD_ENTRY(port1, 2);                   /* PHY1 complex */
636 	VPD_ENTRY(port2, 2);                   /* PHY2 complex */
637 	VPD_ENTRY(port3, 2);                   /* PHY3 complex */
638 	VPD_ENTRY(rv, 1);                      /* csum */
639 	u32 pad;                  /* for multiple-of-4 sizing and alignment */
640 };
641 
642 #define EEPROM_MAX_POLL   40
643 #define EEPROM_STAT_ADDR  0x4000
644 #define VPD_BASE          0xc00
645 
646 /**
647  *	t3_seeprom_read - read a VPD EEPROM location
648  *	@adapter: adapter to read
649  *	@addr: EEPROM address
650  *	@data: where to store the read data
651  *
652  *	Read a 32-bit word from a location in VPD EEPROM using the card's PCI
653  *	VPD ROM capability.  A zero is written to the flag bit when the
654  *	addres is written to the control register.  The hardware device will
655  *	set the flag to 1 when 4 bytes have been read into the data register.
656  */
657 int t3_seeprom_read(adapter_t *adapter, u32 addr, u32 *data)
658 {
659 	u16 val;
660 	int attempts = EEPROM_MAX_POLL;
661 	unsigned int base = adapter->params.pci.vpd_cap_addr;
662 
663 	if ((addr >= EEPROMSIZE && addr != EEPROM_STAT_ADDR) || (addr & 3))
664 		return -EINVAL;
665 
666 	t3_os_pci_write_config_2(adapter, base + PCI_VPD_ADDR, (u16)addr);
667 	do {
668 		udelay(10);
669 		t3_os_pci_read_config_2(adapter, base + PCI_VPD_ADDR, &val);
670 	} while (!(val & PCI_VPD_ADDR_F) && --attempts);
671 
672 	if (!(val & PCI_VPD_ADDR_F)) {
673 		CH_ERR(adapter, "reading EEPROM address 0x%x failed\n", addr);
674 		return -EIO;
675 	}
676 	t3_os_pci_read_config_4(adapter, base + PCI_VPD_DATA, data);
677 	*data = le32_to_cpu(*data);
678 	return 0;
679 }
680 
681 /**
682  *	t3_seeprom_write - write a VPD EEPROM location
683  *	@adapter: adapter to write
684  *	@addr: EEPROM address
685  *	@data: value to write
686  *
687  *	Write a 32-bit word to a location in VPD EEPROM using the card's PCI
688  *	VPD ROM capability.
689  */
690 int t3_seeprom_write(adapter_t *adapter, u32 addr, u32 data)
691 {
692 	u16 val;
693 	int attempts = EEPROM_MAX_POLL;
694 	unsigned int base = adapter->params.pci.vpd_cap_addr;
695 
696 	if ((addr >= EEPROMSIZE && addr != EEPROM_STAT_ADDR) || (addr & 3))
697 		return -EINVAL;
698 
699 	t3_os_pci_write_config_4(adapter, base + PCI_VPD_DATA,
700 				 cpu_to_le32(data));
701 	t3_os_pci_write_config_2(adapter, base + PCI_VPD_ADDR,
702 				 (u16)addr | PCI_VPD_ADDR_F);
703 	do {
704 		msleep(1);
705 		t3_os_pci_read_config_2(adapter, base + PCI_VPD_ADDR, &val);
706 	} while ((val & PCI_VPD_ADDR_F) && --attempts);
707 
708 	if (val & PCI_VPD_ADDR_F) {
709 		CH_ERR(adapter, "write to EEPROM address 0x%x failed\n", addr);
710 		return -EIO;
711 	}
712 	return 0;
713 }
714 
715 /**
716  *	t3_seeprom_wp - enable/disable EEPROM write protection
717  *	@adapter: the adapter
718  *	@enable: 1 to enable write protection, 0 to disable it
719  *
720  *	Enables or disables write protection on the serial EEPROM.
721  */
722 int t3_seeprom_wp(adapter_t *adapter, int enable)
723 {
724 	return t3_seeprom_write(adapter, EEPROM_STAT_ADDR, enable ? 0xc : 0);
725 }
726 
727 /*
728  * Convert a character holding a hex digit to a number.
729  */
730 static unsigned int hex2int(unsigned char c)
731 {
732 	return isdigit(c) ? c - '0' : toupper(c) - 'A' + 10;
733 }
734 
735 /**
736  * 	get_desc_len - get the length of a vpd descriptor.
737  *	@adapter: the adapter
738  *	@offset: first byte offset of the vpd descriptor
739  *
740  *	Retrieves the length of the small/large resource
741  *	data type starting at offset.
742  */
743 static int get_desc_len(adapter_t *adapter, u32 offset)
744 {
745 	u32 read_offset, tmp, shift, len = 0;
746 	u8 tag, buf[8];
747 	int ret;
748 
749 	read_offset = offset & 0xfffffffc;
750 	shift = offset & 0x03;
751 
752 	ret = t3_seeprom_read(adapter, read_offset, &tmp);
753 	if (ret < 0)
754 		return ret;
755 
756 	*((u32 *)buf) = cpu_to_le32(tmp);
757 
758 	tag = buf[shift];
759 	if (tag & 0x80) {
760 		ret = t3_seeprom_read(adapter, read_offset + 4, &tmp);
761 		if (ret < 0)
762 			return ret;
763 
764 		*((u32 *)(&buf[4])) = cpu_to_le32(tmp);
765 		len = (buf[shift + 1] & 0xff) +
766 		      ((buf[shift+2] << 8) & 0xff00) + 3;
767 	} else
768 		len = (tag & 0x07) + 1;
769 
770 	return len;
771 }
772 
773 /**
774  *	is_end_tag - Check if a vpd tag is the end tag.
775  *	@adapter: the adapter
776  *	@offset: first byte offset of the tag
777  *
778  *	Checks if the tag located at offset is the end tag.
779  */
780 static int is_end_tag(adapter_t * adapter, u32 offset)
781 {
782 	u32 read_offset, shift, ret, tmp;
783 	u8 buf[4];
784 
785 	read_offset = offset & 0xfffffffc;
786 	shift = offset & 0x03;
787 
788 	ret = t3_seeprom_read(adapter, read_offset, &tmp);
789 	if (ret)
790 		return ret;
791 	*((u32 *)buf) = cpu_to_le32(tmp);
792 
793 	if (buf[shift] == 0x78)
794 		return 1;
795 	else
796 		return 0;
797 }
798 
799 /**
800  *	t3_get_vpd_len - computes the length of a vpd structure
801  *	@adapter: the adapter
802  *	@vpd: contains the offset of first byte of vpd
803  *
804  *	Computes the lentgh of the vpd structure starting at vpd->offset.
805  */
806 
807 int t3_get_vpd_len(adapter_t * adapter, struct generic_vpd *vpd)
808 {
809 	u32 len=0, offset;
810 	int inc, ret;
811 
812 	offset = vpd->offset;
813 
814 	while (offset < (vpd->offset + MAX_VPD_BYTES)) {
815 		ret = is_end_tag(adapter, offset);
816 		if (ret < 0)
817 			return ret;
818 		else if (ret == 1)
819 			break;
820 
821 		inc = get_desc_len(adapter, offset);
822 		if (inc < 0)
823 			return inc;
824 		len += inc;
825 		offset += inc;
826 	}
827 	return (len + 1);
828 }
829 
830 /**
831  *	t3_read_vpd - reads the stream of bytes containing a vpd structure
832  *	@adapter: the adapter
833  *	@vpd: contains a buffer that would hold the stream of bytes
834  *
835  *	Reads the vpd structure starting at vpd->offset into vpd->data,
836  *	the length of the byte stream to read is vpd->len.
837  */
838 
839 int t3_read_vpd(adapter_t *adapter, struct generic_vpd *vpd)
840 {
841 	u32 i, ret;
842 
843 	for (i = 0; i < vpd->len; i += 4) {
844 		ret = t3_seeprom_read(adapter, vpd->offset + i,
845 				      (u32 *) &(vpd->data[i]));
846 		if (ret)
847 			return ret;
848 	}
849 
850 	return 0;
851 }
852 
853 
854 /**
855  *	get_vpd_params - read VPD parameters from VPD EEPROM
856  *	@adapter: adapter to read
857  *	@p: where to store the parameters
858  *
859  *	Reads card parameters stored in VPD EEPROM.
860  */
861 static int get_vpd_params(adapter_t *adapter, struct vpd_params *p)
862 {
863 	int i, addr, ret;
864 	struct t3_vpd vpd;
865 
866 	/*
867 	 * Card information is normally at VPD_BASE but some early cards had
868 	 * it at 0.
869 	 */
870 	ret = t3_seeprom_read(adapter, VPD_BASE, (u32 *)&vpd);
871 	if (ret)
872 		return ret;
873 	addr = vpd.id_tag == 0x82 ? VPD_BASE : 0;
874 
875 	for (i = 0; i < sizeof(vpd); i += 4) {
876 		ret = t3_seeprom_read(adapter, addr + i,
877 				      (u32 *)((u8 *)&vpd + i));
878 		if (ret)
879 			return ret;
880 	}
881 
882 	p->cclk = simple_strtoul(vpd.cclk_data, NULL, 10);
883 	p->mclk = simple_strtoul(vpd.mclk_data, NULL, 10);
884 	p->uclk = simple_strtoul(vpd.uclk_data, NULL, 10);
885 	p->mdc = simple_strtoul(vpd.mdc_data, NULL, 10);
886 	p->mem_timing = simple_strtoul(vpd.mt_data, NULL, 10);
887 	memcpy(p->sn, vpd.sn_data, SERNUM_LEN);
888 	memcpy(p->ec, vpd.ec_data, ECNUM_LEN);
889 
890 	/* Old eeproms didn't have port information */
891 	if (adapter->params.rev == 0 && !vpd.port0_data[0]) {
892 		p->port_type[0] = uses_xaui(adapter) ? 1 : 2;
893 		p->port_type[1] = uses_xaui(adapter) ? 6 : 2;
894 	} else {
895 		p->port_type[0] = (u8)hex2int(vpd.port0_data[0]);
896 		p->port_type[1] = (u8)hex2int(vpd.port1_data[0]);
897 		p->port_type[2] = (u8)hex2int(vpd.port2_data[0]);
898 		p->port_type[3] = (u8)hex2int(vpd.port3_data[0]);
899 		p->xauicfg[0] = simple_strtoul(vpd.xaui0cfg_data, NULL, 16);
900 		p->xauicfg[1] = simple_strtoul(vpd.xaui1cfg_data, NULL, 16);
901 	}
902 
903 	for (i = 0; i < 6; i++)
904 		p->eth_base[i] = hex2int(vpd.na_data[2 * i]) * 16 +
905 				 hex2int(vpd.na_data[2 * i + 1]);
906 	return 0;
907 }
908 
909 /* BIOS boot header */
910 typedef struct boot_header_s {
911 	u8	signature[2];	/* signature */
912 	u8	length;		/* image length (include header) */
913 	u8	offset[4];	/* initialization vector */
914 	u8	reserved[19];	/* reserved */
915 	u8	exheader[2];	/* offset to expansion header */
916 } boot_header_t;
917 
918 /* serial flash and firmware constants */
919 enum {
920 	SF_ATTEMPTS = 5,           /* max retries for SF1 operations */
921 	SF_SEC_SIZE = 64 * 1024,   /* serial flash sector size */
922 	SF_SIZE = SF_SEC_SIZE * 8, /* serial flash size */
923 
924 	/* flash command opcodes */
925 	SF_PROG_PAGE    = 2,       /* program page */
926 	SF_WR_DISABLE   = 4,       /* disable writes */
927 	SF_RD_STATUS    = 5,       /* read status register */
928 	SF_WR_ENABLE    = 6,       /* enable writes */
929 	SF_RD_DATA_FAST = 0xb,     /* read flash */
930 	SF_ERASE_SECTOR = 0xd8,    /* erase sector */
931 
932 	FW_FLASH_BOOT_ADDR = 0x70000, /* start address of FW in flash */
933 	FW_VERS_ADDR = 0x7fffc,    /* flash address holding FW version */
934 	FW_VERS_ADDR_PRE8 = 0x77ffc,/* flash address holding FW version pre8 */
935 	FW_MIN_SIZE = 8,           /* at least version and csum */
936 	FW_MAX_SIZE = FW_VERS_ADDR - FW_FLASH_BOOT_ADDR,
937 	FW_MAX_SIZE_PRE8 = FW_VERS_ADDR_PRE8 - FW_FLASH_BOOT_ADDR,
938 
939 	BOOT_FLASH_BOOT_ADDR = 0x0,/* start address of boot image in flash */
940 	BOOT_SIGNATURE = 0xaa55,   /* signature of BIOS boot ROM */
941 	BOOT_SIZE_INC = 512,       /* image size measured in 512B chunks */
942 	BOOT_MIN_SIZE = sizeof(boot_header_t), /* at least basic header */
943 	BOOT_MAX_SIZE = 1024*BOOT_SIZE_INC /* 1 byte * length increment  */
944 };
945 
946 /**
947  *	sf1_read - read data from the serial flash
948  *	@adapter: the adapter
949  *	@byte_cnt: number of bytes to read
950  *	@cont: whether another operation will be chained
951  *	@valp: where to store the read data
952  *
953  *	Reads up to 4 bytes of data from the serial flash.  The location of
954  *	the read needs to be specified prior to calling this by issuing the
955  *	appropriate commands to the serial flash.
956  */
957 static int sf1_read(adapter_t *adapter, unsigned int byte_cnt, int cont,
958 		    u32 *valp)
959 {
960 	int ret;
961 
962 	if (!byte_cnt || byte_cnt > 4)
963 		return -EINVAL;
964 	if (t3_read_reg(adapter, A_SF_OP) & F_BUSY)
965 		return -EBUSY;
966 	t3_write_reg(adapter, A_SF_OP, V_CONT(cont) | V_BYTECNT(byte_cnt - 1));
967 	ret = t3_wait_op_done(adapter, A_SF_OP, F_BUSY, 0, SF_ATTEMPTS, 10);
968 	if (!ret)
969 		*valp = t3_read_reg(adapter, A_SF_DATA);
970 	return ret;
971 }
972 
973 /**
974  *	sf1_write - write data to the serial flash
975  *	@adapter: the adapter
976  *	@byte_cnt: number of bytes to write
977  *	@cont: whether another operation will be chained
978  *	@val: value to write
979  *
980  *	Writes up to 4 bytes of data to the serial flash.  The location of
981  *	the write needs to be specified prior to calling this by issuing the
982  *	appropriate commands to the serial flash.
983  */
984 static int sf1_write(adapter_t *adapter, unsigned int byte_cnt, int cont,
985 		     u32 val)
986 {
987 	if (!byte_cnt || byte_cnt > 4)
988 		return -EINVAL;
989 	if (t3_read_reg(adapter, A_SF_OP) & F_BUSY)
990 		return -EBUSY;
991 	t3_write_reg(adapter, A_SF_DATA, val);
992 	t3_write_reg(adapter, A_SF_OP,
993 		     V_CONT(cont) | V_BYTECNT(byte_cnt - 1) | V_OP(1));
994 	return t3_wait_op_done(adapter, A_SF_OP, F_BUSY, 0, SF_ATTEMPTS, 10);
995 }
996 
997 /**
998  *	flash_wait_op - wait for a flash operation to complete
999  *	@adapter: the adapter
1000  *	@attempts: max number of polls of the status register
1001  *	@delay: delay between polls in ms
1002  *
1003  *	Wait for a flash operation to complete by polling the status register.
1004  */
1005 static int flash_wait_op(adapter_t *adapter, int attempts, int delay)
1006 {
1007 	int ret;
1008 	u32 status;
1009 
1010 	while (1) {
1011 		if ((ret = sf1_write(adapter, 1, 1, SF_RD_STATUS)) != 0 ||
1012 		    (ret = sf1_read(adapter, 1, 0, &status)) != 0)
1013 			return ret;
1014 		if (!(status & 1))
1015 			return 0;
1016 		if (--attempts == 0)
1017 			return -EAGAIN;
1018 		if (delay)
1019 			msleep(delay);
1020 	}
1021 }
1022 
1023 /**
1024  *	t3_read_flash - read words from serial flash
1025  *	@adapter: the adapter
1026  *	@addr: the start address for the read
1027  *	@nwords: how many 32-bit words to read
1028  *	@data: where to store the read data
1029  *	@byte_oriented: whether to store data as bytes or as words
1030  *
1031  *	Read the specified number of 32-bit words from the serial flash.
1032  *	If @byte_oriented is set the read data is stored as a byte array
1033  *	(i.e., big-endian), otherwise as 32-bit words in the platform's
1034  *	natural endianess.
1035  */
1036 int t3_read_flash(adapter_t *adapter, unsigned int addr, unsigned int nwords,
1037 		  u32 *data, int byte_oriented)
1038 {
1039 	int ret;
1040 
1041 	if (addr + nwords * sizeof(u32) > SF_SIZE || (addr & 3))
1042 		return -EINVAL;
1043 
1044 	addr = swab32(addr) | SF_RD_DATA_FAST;
1045 
1046 	if ((ret = sf1_write(adapter, 4, 1, addr)) != 0 ||
1047 	    (ret = sf1_read(adapter, 1, 1, data)) != 0)
1048 		return ret;
1049 
1050 	for ( ; nwords; nwords--, data++) {
1051 		ret = sf1_read(adapter, 4, nwords > 1, data);
1052 		if (ret)
1053 			return ret;
1054 		if (byte_oriented)
1055 			*data = htonl(*data);
1056 	}
1057 	return 0;
1058 }
1059 
1060 /**
1061  *	t3_write_flash - write up to a page of data to the serial flash
1062  *	@adapter: the adapter
1063  *	@addr: the start address to write
1064  *	@n: length of data to write
1065  *	@data: the data to write
1066  *	@byte_oriented: whether to store data as bytes or as words
1067  *
1068  *	Writes up to a page of data (256 bytes) to the serial flash starting
1069  *	at the given address.
1070  *	If @byte_oriented is set the write data is stored as a 32-bit
1071  *	big-endian array, otherwise in the processor's native endianess.
1072  *
1073  */
1074 static int t3_write_flash(adapter_t *adapter, unsigned int addr,
1075 			  unsigned int n, const u8 *data,
1076 			  int byte_oriented)
1077 {
1078 	int ret;
1079 	u32 buf[64];
1080 	unsigned int c, left, val, offset = addr & 0xff;
1081 
1082 	if (addr + n > SF_SIZE || offset + n > 256)
1083 		return -EINVAL;
1084 
1085 	val = swab32(addr) | SF_PROG_PAGE;
1086 
1087 	if ((ret = sf1_write(adapter, 1, 0, SF_WR_ENABLE)) != 0 ||
1088 	    (ret = sf1_write(adapter, 4, 1, val)) != 0)
1089 		return ret;
1090 
1091 	for (left = n; left; left -= c) {
1092 		c = min(left, 4U);
1093 		val = *(const u32*)data;
1094 		data += c;
1095 		if (byte_oriented)
1096 			val = htonl(val);
1097 
1098 		ret = sf1_write(adapter, c, c != left, val);
1099 		if (ret)
1100 			return ret;
1101 	}
1102 	if ((ret = flash_wait_op(adapter, 5, 1)) != 0)
1103 		return ret;
1104 
1105 	/* Read the page to verify the write succeeded */
1106 	ret = t3_read_flash(adapter, addr & ~0xff, ARRAY_SIZE(buf), buf,
1107 			    byte_oriented);
1108 	if (ret)
1109 		return ret;
1110 
1111 	if (memcmp(data - n, (u8 *)buf + offset, n))
1112 		return -EIO;
1113 	return 0;
1114 }
1115 
1116 /**
1117  *	t3_get_tp_version - read the tp sram version
1118  *	@adapter: the adapter
1119  *	@vers: where to place the version
1120  *
1121  *	Reads the protocol sram version from sram.
1122  */
1123 int t3_get_tp_version(adapter_t *adapter, u32 *vers)
1124 {
1125 	int ret;
1126 
1127 	/* Get version loaded in SRAM */
1128 	t3_write_reg(adapter, A_TP_EMBED_OP_FIELD0, 0);
1129 	ret = t3_wait_op_done(adapter, A_TP_EMBED_OP_FIELD0,
1130 			      1, 1, 5, 1);
1131 	if (ret)
1132 		return ret;
1133 
1134 	*vers = t3_read_reg(adapter, A_TP_EMBED_OP_FIELD1);
1135 
1136 	return 0;
1137 }
1138 
1139 /**
1140  *	t3_check_tpsram_version - read the tp sram version
1141  *	@adapter: the adapter
1142  *
1143  */
1144 int t3_check_tpsram_version(adapter_t *adapter)
1145 {
1146 	int ret;
1147 	u32 vers;
1148 	unsigned int major, minor;
1149 
1150 	if (adapter->params.rev == T3_REV_A)
1151 		return 0;
1152 
1153 
1154 	ret = t3_get_tp_version(adapter, &vers);
1155 	if (ret)
1156 		return ret;
1157 
1158 	vers = t3_read_reg(adapter, A_TP_EMBED_OP_FIELD1);
1159 
1160 	major = G_TP_VERSION_MAJOR(vers);
1161 	minor = G_TP_VERSION_MINOR(vers);
1162 
1163 	if (major == TP_VERSION_MAJOR && minor == TP_VERSION_MINOR)
1164 		return 0;
1165 	else {
1166 		CH_ERR(adapter, "found wrong TP version (%u.%u), "
1167 		       "driver compiled for version %d.%d\n", major, minor,
1168 		       TP_VERSION_MAJOR, TP_VERSION_MINOR);
1169 	}
1170 	return -EINVAL;
1171 }
1172 
1173 /**
1174  *	t3_check_tpsram - check if provided protocol SRAM
1175  *			  is compatible with this driver
1176  *	@adapter: the adapter
1177  *	@tp_sram: the firmware image to write
1178  *	@size: image size
1179  *
1180  *	Checks if an adapter's tp sram is compatible with the driver.
1181  *	Returns 0 if the versions are compatible, a negative error otherwise.
1182  */
1183 int t3_check_tpsram(adapter_t *adapter, const u8 *tp_sram, unsigned int size)
1184 {
1185 	u32 csum;
1186 	unsigned int i;
1187 	const u32 *p = (const u32 *)tp_sram;
1188 
1189 	/* Verify checksum */
1190 	for (csum = 0, i = 0; i < size / sizeof(csum); i++)
1191 		csum += ntohl(p[i]);
1192 	if (csum != 0xffffffff) {
1193 		CH_ERR(adapter, "corrupted protocol SRAM image, checksum %u\n",
1194 		       csum);
1195 		return -EINVAL;
1196 	}
1197 
1198 	return 0;
1199 }
1200 
1201 enum fw_version_type {
1202 	FW_VERSION_N3,
1203 	FW_VERSION_T3
1204 };
1205 
1206 /**
1207  *	t3_get_fw_version - read the firmware version
1208  *	@adapter: the adapter
1209  *	@vers: where to place the version
1210  *
1211  *	Reads the FW version from flash. Note that we had to move the version
1212  *	due to FW size. If we don't find a valid FW version in the new location
1213  *	we fall back and read the old location.
1214  */
1215 int t3_get_fw_version(adapter_t *adapter, u32 *vers)
1216 {
1217 	int ret = t3_read_flash(adapter, FW_VERS_ADDR, 1, vers, 0);
1218 	if (!ret && *vers != 0xffffffff)
1219 		return 0;
1220 	else
1221 		return t3_read_flash(adapter, FW_VERS_ADDR_PRE8, 1, vers, 0);
1222 }
1223 
1224 /**
1225  *	t3_check_fw_version - check if the FW is compatible with this driver
1226  *	@adapter: the adapter
1227  *
1228  *	Checks if an adapter's FW is compatible with the driver.  Returns 0
1229  *	if the versions are compatible, a negative error otherwise.
1230  */
1231 int t3_check_fw_version(adapter_t *adapter)
1232 {
1233 	int ret;
1234 	u32 vers;
1235 	unsigned int type, major, minor;
1236 
1237 	ret = t3_get_fw_version(adapter, &vers);
1238 	if (ret)
1239 		return ret;
1240 
1241 	type = G_FW_VERSION_TYPE(vers);
1242 	major = G_FW_VERSION_MAJOR(vers);
1243 	minor = G_FW_VERSION_MINOR(vers);
1244 
1245 	if (type == FW_VERSION_T3 && major == FW_VERSION_MAJOR &&
1246 	    minor == FW_VERSION_MINOR)
1247 		return 0;
1248 
1249 	else if (major != FW_VERSION_MAJOR || minor < FW_VERSION_MINOR)
1250 		CH_WARN(adapter, "found old FW minor version(%u.%u), "
1251 		        "driver compiled for version %u.%u\n", major, minor,
1252 			FW_VERSION_MAJOR, FW_VERSION_MINOR);
1253 	else {
1254 		CH_WARN(adapter, "found newer FW version(%u.%u), "
1255 		        "driver compiled for version %u.%u\n", major, minor,
1256 			FW_VERSION_MAJOR, FW_VERSION_MINOR);
1257 			return 0;
1258 	}
1259 	return -EINVAL;
1260 }
1261 
1262 /**
1263  *	t3_flash_erase_sectors - erase a range of flash sectors
1264  *	@adapter: the adapter
1265  *	@start: the first sector to erase
1266  *	@end: the last sector to erase
1267  *
1268  *	Erases the sectors in the given range.
1269  */
1270 static int t3_flash_erase_sectors(adapter_t *adapter, int start, int end)
1271 {
1272 	while (start <= end) {
1273 		int ret;
1274 
1275 		if ((ret = sf1_write(adapter, 1, 0, SF_WR_ENABLE)) != 0 ||
1276 		    (ret = sf1_write(adapter, 4, 0,
1277 				     SF_ERASE_SECTOR | (start << 8))) != 0 ||
1278 		    (ret = flash_wait_op(adapter, 5, 500)) != 0)
1279 			return ret;
1280 		start++;
1281 	}
1282 	return 0;
1283 }
1284 
1285 /*
1286  *	t3_load_fw - download firmware
1287  *	@adapter: the adapter
1288  *	@fw_data: the firmware image to write
1289  *	@size: image size
1290  *
1291  *	Write the supplied firmware image to the card's serial flash.
1292  *	The FW image has the following sections: @size - 8 bytes of code and
1293  *	data, followed by 4 bytes of FW version, followed by the 32-bit
1294  *	1's complement checksum of the whole image.
1295  */
1296 int t3_load_fw(adapter_t *adapter, const u8 *fw_data, unsigned int size)
1297 {
1298 	u32 version, csum, fw_version_addr;
1299 	unsigned int i;
1300 	const u32 *p = (const u32 *)fw_data;
1301 	int ret, addr, fw_sector = FW_FLASH_BOOT_ADDR >> 16;
1302 
1303 	if ((size & 3) || size < FW_MIN_SIZE)
1304 		return -EINVAL;
1305 	if (size - 8 > FW_MAX_SIZE)
1306 		return -EFBIG;
1307 
1308 	version = ntohl(*(const u32 *)(fw_data + size - 8));
1309 	if (G_FW_VERSION_MAJOR(version) < 8) {
1310 
1311 		fw_version_addr = FW_VERS_ADDR_PRE8;
1312 
1313 		if (size - 8 > FW_MAX_SIZE_PRE8)
1314 			return -EFBIG;
1315 	} else
1316 		fw_version_addr = FW_VERS_ADDR;
1317 
1318 	for (csum = 0, i = 0; i < size / sizeof(csum); i++)
1319 		csum += ntohl(p[i]);
1320 	if (csum != 0xffffffff) {
1321 		CH_ERR(adapter, "corrupted firmware image, checksum %u\n",
1322 		       csum);
1323 		return -EINVAL;
1324 	}
1325 
1326 	ret = t3_flash_erase_sectors(adapter, fw_sector, fw_sector);
1327 	if (ret)
1328 		goto out;
1329 
1330 	size -= 8;  /* trim off version and checksum */
1331 	for (addr = FW_FLASH_BOOT_ADDR; size; ) {
1332 		unsigned int chunk_size = min(size, 256U);
1333 
1334 		ret = t3_write_flash(adapter, addr, chunk_size, fw_data, 1);
1335 		if (ret)
1336 			goto out;
1337 
1338 		addr += chunk_size;
1339 		fw_data += chunk_size;
1340 		size -= chunk_size;
1341 	}
1342 
1343 	ret = t3_write_flash(adapter, fw_version_addr, 4, fw_data, 1);
1344 out:
1345 	if (ret)
1346 		CH_ERR(adapter, "firmware download failed, error %d\n", ret);
1347 	return ret;
1348 }
1349 
1350 /*
1351  *	t3_load_boot - download boot flash
1352  *	@adapter: the adapter
1353  *	@boot_data: the boot image to write
1354  *	@size: image size
1355  *
1356  *	Write the supplied boot image to the card's serial flash.
1357  *	The boot image has the following sections: a 28-byte header and the
1358  *	boot image.
1359  */
1360 int t3_load_boot(adapter_t *adapter, u8 *boot_data, unsigned int size)
1361 {
1362 	boot_header_t *header = (boot_header_t *)boot_data;
1363 	int ret;
1364 	unsigned int addr;
1365 	unsigned int boot_sector = BOOT_FLASH_BOOT_ADDR >> 16;
1366 	unsigned int boot_end = (BOOT_FLASH_BOOT_ADDR + size - 1) >> 16;
1367 
1368 	/*
1369 	 * Perform some primitive sanity testing to avoid accidentally
1370 	 * writing garbage over the boot sectors.  We ought to check for
1371 	 * more but it's not worth it for now ...
1372 	 */
1373 	if (size < BOOT_MIN_SIZE || size > BOOT_MAX_SIZE) {
1374 		CH_ERR(adapter, "boot image too small/large\n");
1375 		return -EFBIG;
1376 	}
1377 	if (le16_to_cpu(*(u16*)header->signature) != BOOT_SIGNATURE) {
1378 		CH_ERR(adapter, "boot image missing signature\n");
1379 		return -EINVAL;
1380 	}
1381 	if (header->length * BOOT_SIZE_INC != size) {
1382 		CH_ERR(adapter, "boot image header length != image length\n");
1383 		return -EINVAL;
1384 	}
1385 
1386 	ret = t3_flash_erase_sectors(adapter, boot_sector, boot_end);
1387 	if (ret)
1388 		goto out;
1389 
1390 	for (addr = BOOT_FLASH_BOOT_ADDR; size; ) {
1391 		unsigned int chunk_size = min(size, 256U);
1392 
1393 		ret = t3_write_flash(adapter, addr, chunk_size, boot_data, 0);
1394 		if (ret)
1395 			goto out;
1396 
1397 		addr += chunk_size;
1398 		boot_data += chunk_size;
1399 		size -= chunk_size;
1400 	}
1401 
1402 out:
1403 	if (ret)
1404 		CH_ERR(adapter, "boot image download failed, error %d\n", ret);
1405 	return ret;
1406 }
1407 
1408 #define CIM_CTL_BASE 0x2000
1409 
1410 /**
1411  *	t3_cim_ctl_blk_read - read a block from CIM control region
1412  *	@adap: the adapter
1413  *	@addr: the start address within the CIM control region
1414  *	@n: number of words to read
1415  *	@valp: where to store the result
1416  *
1417  *	Reads a block of 4-byte words from the CIM control region.
1418  */
1419 int t3_cim_ctl_blk_read(adapter_t *adap, unsigned int addr, unsigned int n,
1420 			unsigned int *valp)
1421 {
1422 	int ret = 0;
1423 
1424 	if (t3_read_reg(adap, A_CIM_HOST_ACC_CTRL) & F_HOSTBUSY)
1425 		return -EBUSY;
1426 
1427 	for ( ; !ret && n--; addr += 4) {
1428 		t3_write_reg(adap, A_CIM_HOST_ACC_CTRL, CIM_CTL_BASE + addr);
1429 		ret = t3_wait_op_done(adap, A_CIM_HOST_ACC_CTRL, F_HOSTBUSY,
1430 				      0, 5, 2);
1431 		if (!ret)
1432 			*valp++ = t3_read_reg(adap, A_CIM_HOST_ACC_DATA);
1433 	}
1434 	return ret;
1435 }
1436 
1437 static void t3_gate_rx_traffic(struct cmac *mac, u32 *rx_cfg,
1438 			       u32 *rx_hash_high, u32 *rx_hash_low)
1439 {
1440 	/* stop Rx unicast traffic */
1441 	t3_mac_disable_exact_filters(mac);
1442 
1443 	/* stop broadcast, multicast, promiscuous mode traffic */
1444 	*rx_cfg = t3_read_reg(mac->adapter, A_XGM_RX_CFG);
1445 	t3_set_reg_field(mac->adapter, A_XGM_RX_CFG,
1446 			 F_ENHASHMCAST | F_DISBCAST | F_COPYALLFRAMES,
1447 			 F_DISBCAST);
1448 
1449 	*rx_hash_high = t3_read_reg(mac->adapter, A_XGM_RX_HASH_HIGH);
1450 	t3_write_reg(mac->adapter, A_XGM_RX_HASH_HIGH, 0);
1451 
1452 	*rx_hash_low = t3_read_reg(mac->adapter, A_XGM_RX_HASH_LOW);
1453 	t3_write_reg(mac->adapter, A_XGM_RX_HASH_LOW, 0);
1454 
1455 	/* Leave time to drain max RX fifo */
1456 	msleep(1);
1457 }
1458 
1459 static void t3_open_rx_traffic(struct cmac *mac, u32 rx_cfg,
1460 			       u32 rx_hash_high, u32 rx_hash_low)
1461 {
1462 	t3_mac_enable_exact_filters(mac);
1463 	t3_set_reg_field(mac->adapter, A_XGM_RX_CFG,
1464 			 F_ENHASHMCAST | F_DISBCAST | F_COPYALLFRAMES,
1465 			 rx_cfg);
1466 	t3_write_reg(mac->adapter, A_XGM_RX_HASH_HIGH, rx_hash_high);
1467 	t3_write_reg(mac->adapter, A_XGM_RX_HASH_LOW, rx_hash_low);
1468 }
1469 
1470 static int t3_detect_link_fault(adapter_t *adapter, int port_id)
1471 {
1472 	struct port_info *pi = adap2pinfo(adapter, port_id);
1473 	struct cmac *mac = &pi->mac;
1474 	uint32_t rx_cfg, rx_hash_high, rx_hash_low;
1475 	int link_fault;
1476 
1477 	/* stop rx */
1478 	t3_gate_rx_traffic(mac, &rx_cfg, &rx_hash_high, &rx_hash_low);
1479 	t3_write_reg(adapter, A_XGM_RX_CTRL + mac->offset, 0);
1480 
1481 	/* clear status and make sure intr is enabled */
1482 	(void) t3_read_reg(adapter, A_XGM_INT_STATUS + mac->offset);
1483 	t3_xgm_intr_enable(adapter, port_id);
1484 
1485 	/* restart rx */
1486 	t3_write_reg(adapter, A_XGM_RX_CTRL + mac->offset, F_RXEN);
1487 	t3_open_rx_traffic(mac, rx_cfg, rx_hash_high, rx_hash_low);
1488 
1489 	link_fault = t3_read_reg(adapter, A_XGM_INT_STATUS + mac->offset);
1490 	return (link_fault & F_LINKFAULTCHANGE ? 1 : 0);
1491 }
1492 
1493 static void t3_clear_faults(adapter_t *adapter, int port_id)
1494 {
1495 	struct port_info *pi = adap2pinfo(adapter, port_id);
1496 	struct cmac *mac = &pi->mac;
1497 
1498 	if (adapter->params.nports <= 2) {
1499 		t3_xgm_intr_disable(adapter, pi->port_id);
1500 		t3_read_reg(adapter, A_XGM_INT_STATUS + mac->offset);
1501 		t3_write_reg(adapter, A_XGM_INT_CAUSE + mac->offset, F_XGM_INT);
1502 		t3_set_reg_field(adapter, A_XGM_INT_ENABLE + mac->offset,
1503 				 F_XGM_INT, F_XGM_INT);
1504 		t3_xgm_intr_enable(adapter, pi->port_id);
1505 	}
1506 }
1507 
1508 /**
1509  *	t3_link_changed - handle interface link changes
1510  *	@adapter: the adapter
1511  *	@port_id: the port index that changed link state
1512  *
1513  *	Called when a port's link settings change to propagate the new values
1514  *	to the associated PHY and MAC.  After performing the common tasks it
1515  *	invokes an OS-specific handler.
1516  */
1517 void t3_link_changed(adapter_t *adapter, int port_id)
1518 {
1519 	int link_ok, speed, duplex, fc, link_fault;
1520 	struct port_info *pi = adap2pinfo(adapter, port_id);
1521 	struct cphy *phy = &pi->phy;
1522 	struct cmac *mac = &pi->mac;
1523 	struct link_config *lc = &pi->link_config;
1524 
1525 	link_ok = lc->link_ok;
1526 	speed = lc->speed;
1527 	duplex = lc->duplex;
1528 	fc = lc->fc;
1529 	link_fault = 0;
1530 
1531 	phy->ops->get_link_status(phy, &link_ok, &speed, &duplex, &fc);
1532 
1533 	if (lc->requested_fc & PAUSE_AUTONEG)
1534 		fc &= lc->requested_fc;
1535 	else
1536 		fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
1537 
1538 	/* Update mac speed before checking for link fault. */
1539 	if (link_ok && speed >= 0 && lc->autoneg == AUTONEG_ENABLE &&
1540 	    (speed != lc->speed || duplex != lc->duplex || fc != lc->fc))
1541 		t3_mac_set_speed_duplex_fc(mac, speed, duplex, fc);
1542 
1543 	/*
1544 	 * Check for link faults if any of these is true:
1545 	 * a) A link fault is suspected, and PHY says link ok
1546 	 * b) PHY link transitioned from down -> up
1547 	 */
1548 	if (adapter->params.nports <= 2 &&
1549 	    ((pi->link_fault && link_ok) || (!lc->link_ok && link_ok))) {
1550 
1551 		link_fault = t3_detect_link_fault(adapter, port_id);
1552 		if (link_fault) {
1553 			if (pi->link_fault != LF_YES) {
1554 				mac->stats.link_faults++;
1555 				pi->link_fault = LF_YES;
1556 			}
1557 
1558 			/* Don't report link up */
1559 			link_ok = 0;
1560 		} else {
1561 			/* clear faults here if this was a false alarm. */
1562 			if (pi->link_fault == LF_MAYBE &&
1563 			    link_ok && lc->link_ok)
1564 				t3_clear_faults(adapter, port_id);
1565 
1566 			pi->link_fault = LF_NO;
1567 		}
1568 	}
1569 
1570 	if (link_ok == lc->link_ok && speed == lc->speed &&
1571 	    duplex == lc->duplex && fc == lc->fc)
1572 		return;                            /* nothing changed */
1573 
1574 	lc->link_ok = (unsigned char)link_ok;
1575 	lc->speed = speed < 0 ? SPEED_INVALID : speed;
1576 	lc->duplex = duplex < 0 ? DUPLEX_INVALID : duplex;
1577 	lc->fc = fc;
1578 
1579 	if (link_ok) {
1580 
1581 		/* down -> up, or up -> up with changed settings */
1582 
1583 		if (adapter->params.rev > 0 && uses_xaui(adapter)) {
1584 			t3_write_reg(adapter, A_XGM_XAUI_ACT_CTRL + mac->offset,
1585 				     F_TXACTENABLE | F_RXEN);
1586 		}
1587 
1588 		t3_set_reg_field(adapter, A_XGM_TXFIFO_CFG + mac->offset,
1589 				 F_ENDROPPKT, 0);
1590 		t3_mac_enable(mac, MAC_DIRECTION_TX | MAC_DIRECTION_RX);
1591 		t3_set_reg_field(adapter, A_XGM_STAT_CTRL + mac->offset,
1592 				 F_CLRSTATS, 1);
1593 		t3_clear_faults(adapter, port_id);
1594 
1595 	} else {
1596 
1597 		/* up -> down */
1598 
1599 		if (adapter->params.rev > 0 && uses_xaui(adapter)) {
1600 			t3_write_reg(adapter,
1601 				     A_XGM_XAUI_ACT_CTRL + mac->offset, 0);
1602 		}
1603 
1604 		t3_xgm_intr_disable(adapter, pi->port_id);
1605 		if (adapter->params.nports <= 2) {
1606 			t3_set_reg_field(adapter,
1607 					 A_XGM_INT_ENABLE + mac->offset,
1608 					 F_XGM_INT, 0);
1609 		}
1610 
1611 		if (!link_fault) {
1612 			if (is_10G(adapter))
1613 				pi->phy.ops->power_down(&pi->phy, 1);
1614 			t3_mac_disable(mac, MAC_DIRECTION_RX);
1615 			t3_link_start(phy, mac, lc);
1616 		}
1617 
1618 		/*
1619 		 * Make sure Tx FIFO continues to drain, even as rxen is left
1620 		 * high to help detect and indicate remote faults.
1621 		 */
1622 		t3_set_reg_field(adapter, A_XGM_TXFIFO_CFG + mac->offset, 0,
1623 				 F_ENDROPPKT);
1624 		t3_write_reg(adapter, A_XGM_RX_CTRL + mac->offset, 0);
1625 		t3_write_reg(adapter, A_XGM_TX_CTRL + mac->offset, F_TXEN);
1626 		t3_write_reg(adapter, A_XGM_RX_CTRL + mac->offset, F_RXEN);
1627 	}
1628 
1629 	t3_os_link_changed(adapter, port_id, link_ok, speed, duplex, fc,
1630 	    mac->was_reset);
1631 	mac->was_reset = 0;
1632 }
1633 
1634 /**
1635  *	t3_link_start - apply link configuration to MAC/PHY
1636  *	@phy: the PHY to setup
1637  *	@mac: the MAC to setup
1638  *	@lc: the requested link configuration
1639  *
1640  *	Set up a port's MAC and PHY according to a desired link configuration.
1641  *	- If the PHY can auto-negotiate first decide what to advertise, then
1642  *	  enable/disable auto-negotiation as desired, and reset.
1643  *	- If the PHY does not auto-negotiate just reset it.
1644  *	- If auto-negotiation is off set the MAC to the proper speed/duplex/FC,
1645  *	  otherwise do it later based on the outcome of auto-negotiation.
1646  */
1647 int t3_link_start(struct cphy *phy, struct cmac *mac, struct link_config *lc)
1648 {
1649 	unsigned int fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
1650 
1651 	lc->link_ok = 0;
1652 	if (lc->supported & SUPPORTED_Autoneg) {
1653 		lc->advertising &= ~(ADVERTISED_Asym_Pause | ADVERTISED_Pause);
1654 		if (fc) {
1655 			lc->advertising |= ADVERTISED_Asym_Pause;
1656 			if (fc & PAUSE_RX)
1657 				lc->advertising |= ADVERTISED_Pause;
1658 		}
1659 
1660 		phy->ops->advertise(phy, lc->advertising);
1661 
1662 		if (lc->autoneg == AUTONEG_DISABLE) {
1663 			lc->speed = lc->requested_speed;
1664 			lc->duplex = lc->requested_duplex;
1665 			lc->fc = (unsigned char)fc;
1666 			t3_mac_set_speed_duplex_fc(mac, lc->speed, lc->duplex,
1667 						   fc);
1668 			/* Also disables autoneg */
1669 			phy->ops->set_speed_duplex(phy, lc->speed, lc->duplex);
1670 			/* PR 5666. Power phy up when doing an ifup */
1671 			if (!is_10G(phy->adapter))
1672 				phy->ops->power_down(phy, 0);
1673 		} else
1674 			phy->ops->autoneg_enable(phy);
1675 	} else {
1676 		t3_mac_set_speed_duplex_fc(mac, -1, -1, fc);
1677 		lc->fc = (unsigned char)fc;
1678 		phy->ops->reset(phy, 0);
1679 	}
1680 	return 0;
1681 }
1682 
1683 /**
1684  *	t3_set_vlan_accel - control HW VLAN extraction
1685  *	@adapter: the adapter
1686  *	@ports: bitmap of adapter ports to operate on
1687  *	@on: enable (1) or disable (0) HW VLAN extraction
1688  *
1689  *	Enables or disables HW extraction of VLAN tags for the given port.
1690  */
1691 void t3_set_vlan_accel(adapter_t *adapter, unsigned int ports, int on)
1692 {
1693 	t3_set_reg_field(adapter, A_TP_OUT_CONFIG,
1694 			 ports << S_VLANEXTRACTIONENABLE,
1695 			 on ? (ports << S_VLANEXTRACTIONENABLE) : 0);
1696 }
1697 
1698 struct intr_info {
1699 	unsigned int mask;       /* bits to check in interrupt status */
1700 	const char *msg;         /* message to print or NULL */
1701 	short stat_idx;          /* stat counter to increment or -1 */
1702 	unsigned short fatal;    /* whether the condition reported is fatal */
1703 };
1704 
1705 /**
1706  *	t3_handle_intr_status - table driven interrupt handler
1707  *	@adapter: the adapter that generated the interrupt
1708  *	@reg: the interrupt status register to process
1709  *	@mask: a mask to apply to the interrupt status
1710  *	@acts: table of interrupt actions
1711  *	@stats: statistics counters tracking interrupt occurences
1712  *
1713  *	A table driven interrupt handler that applies a set of masks to an
1714  *	interrupt status word and performs the corresponding actions if the
1715  *	interrupts described by the mask have occured.  The actions include
1716  *	optionally printing a warning or alert message, and optionally
1717  *	incrementing a stat counter.  The table is terminated by an entry
1718  *	specifying mask 0.  Returns the number of fatal interrupt conditions.
1719  */
1720 static int t3_handle_intr_status(adapter_t *adapter, unsigned int reg,
1721 				 unsigned int mask,
1722 				 const struct intr_info *acts,
1723 				 unsigned long *stats)
1724 {
1725 	int fatal = 0;
1726 	unsigned int status = t3_read_reg(adapter, reg) & mask;
1727 
1728 	for ( ; acts->mask; ++acts) {
1729 		if (!(status & acts->mask)) continue;
1730 		if (acts->fatal) {
1731 			fatal++;
1732 			CH_ALERT(adapter, "%s (0x%x)\n",
1733 				 acts->msg, status & acts->mask);
1734 		} else if (acts->msg)
1735 			CH_WARN(adapter, "%s (0x%x)\n",
1736 				acts->msg, status & acts->mask);
1737 		if (acts->stat_idx >= 0)
1738 			stats[acts->stat_idx]++;
1739 	}
1740 	if (status)                           /* clear processed interrupts */
1741 		t3_write_reg(adapter, reg, status);
1742 	return fatal;
1743 }
1744 
1745 #define SGE_INTR_MASK (F_RSPQDISABLED | \
1746 		       F_UC_REQ_FRAMINGERROR | F_R_REQ_FRAMINGERROR | \
1747 		       F_CPPARITYERROR | F_OCPARITYERROR | F_RCPARITYERROR | \
1748 		       F_IRPARITYERROR | V_ITPARITYERROR(M_ITPARITYERROR) | \
1749 		       V_FLPARITYERROR(M_FLPARITYERROR) | F_LODRBPARITYERROR | \
1750 		       F_HIDRBPARITYERROR | F_LORCQPARITYERROR | \
1751 		       F_HIRCQPARITYERROR)
1752 #define MC5_INTR_MASK (F_PARITYERR | F_ACTRGNFULL | F_UNKNOWNCMD | \
1753 		       F_REQQPARERR | F_DISPQPARERR | F_DELACTEMPTY | \
1754 		       F_NFASRCHFAIL)
1755 #define MC7_INTR_MASK (F_AE | F_UE | F_CE | V_PE(M_PE))
1756 #define XGM_INTR_MASK (V_TXFIFO_PRTY_ERR(M_TXFIFO_PRTY_ERR) | \
1757 		       V_RXFIFO_PRTY_ERR(M_RXFIFO_PRTY_ERR) | \
1758 		       F_TXFIFO_UNDERRUN)
1759 #define PCIX_INTR_MASK (F_MSTDETPARERR | F_SIGTARABT | F_RCVTARABT | \
1760 			F_RCVMSTABT | F_SIGSYSERR | F_DETPARERR | \
1761 			F_SPLCMPDIS | F_UNXSPLCMP | F_RCVSPLCMPERR | \
1762 			F_DETCORECCERR | F_DETUNCECCERR | F_PIOPARERR | \
1763 			V_WFPARERR(M_WFPARERR) | V_RFPARERR(M_RFPARERR) | \
1764 			V_CFPARERR(M_CFPARERR) /* | V_MSIXPARERR(M_MSIXPARERR) */)
1765 #define PCIE_INTR_MASK (F_UNXSPLCPLERRR | F_UNXSPLCPLERRC | F_PCIE_PIOPARERR |\
1766 			F_PCIE_WFPARERR | F_PCIE_RFPARERR | F_PCIE_CFPARERR | \
1767 			/* V_PCIE_MSIXPARERR(M_PCIE_MSIXPARERR) | */ \
1768 			F_RETRYBUFPARERR | F_RETRYLUTPARERR | F_RXPARERR | \
1769 			F_TXPARERR | V_BISTERR(M_BISTERR))
1770 #define ULPRX_INTR_MASK (F_PARERRDATA | F_PARERRPCMD | F_ARBPF1PERR | \
1771 			 F_ARBPF0PERR | F_ARBFPERR | F_PCMDMUXPERR | \
1772 			 F_DATASELFRAMEERR1 | F_DATASELFRAMEERR0)
1773 #define ULPTX_INTR_MASK 0xfc
1774 #define CPLSW_INTR_MASK (F_CIM_OP_MAP_PERR | F_TP_FRAMING_ERROR | \
1775 			 F_SGE_FRAMING_ERROR | F_CIM_FRAMING_ERROR | \
1776 			 F_ZERO_SWITCH_ERROR)
1777 #define CIM_INTR_MASK (F_BLKWRPLINT | F_BLKRDPLINT | F_BLKWRCTLINT | \
1778 		       F_BLKRDCTLINT | F_BLKWRFLASHINT | F_BLKRDFLASHINT | \
1779 		       F_SGLWRFLASHINT | F_WRBLKFLASHINT | F_BLKWRBOOTINT | \
1780 	 	       F_FLASHRANGEINT | F_SDRAMRANGEINT | F_RSVDSPACEINT | \
1781 		       F_DRAMPARERR | F_ICACHEPARERR | F_DCACHEPARERR | \
1782 		       F_OBQSGEPARERR | F_OBQULPHIPARERR | F_OBQULPLOPARERR | \
1783 		       F_IBQSGELOPARERR | F_IBQSGEHIPARERR | F_IBQULPPARERR | \
1784 		       F_IBQTPPARERR | F_ITAGPARERR | F_DTAGPARERR)
1785 #define PMTX_INTR_MASK (F_ZERO_C_CMD_ERROR | ICSPI_FRM_ERR | OESPI_FRM_ERR | \
1786 			V_ICSPI_PAR_ERROR(M_ICSPI_PAR_ERROR) | \
1787 			V_OESPI_PAR_ERROR(M_OESPI_PAR_ERROR))
1788 #define PMRX_INTR_MASK (F_ZERO_E_CMD_ERROR | IESPI_FRM_ERR | OCSPI_FRM_ERR | \
1789 			V_IESPI_PAR_ERROR(M_IESPI_PAR_ERROR) | \
1790 			V_OCSPI_PAR_ERROR(M_OCSPI_PAR_ERROR))
1791 #define MPS_INTR_MASK (V_TX0TPPARERRENB(M_TX0TPPARERRENB) | \
1792 		       V_TX1TPPARERRENB(M_TX1TPPARERRENB) | \
1793 		       V_RXTPPARERRENB(M_RXTPPARERRENB) | \
1794 		       V_MCAPARERRENB(M_MCAPARERRENB))
1795 #define XGM_EXTRA_INTR_MASK (F_LINKFAULTCHANGE)
1796 #define PL_INTR_MASK (F_T3DBG | F_XGMAC0_0 | F_XGMAC0_1 | F_MC5A | F_PM1_TX | \
1797 		      F_PM1_RX | F_ULP2_TX | F_ULP2_RX | F_TP1 | F_CIM | \
1798 		      F_MC7_CM | F_MC7_PMTX | F_MC7_PMRX | F_SGE3 | F_PCIM0 | \
1799 		      F_MPS0 | F_CPL_SWITCH)
1800 /*
1801  * Interrupt handler for the PCIX1 module.
1802  */
1803 static void pci_intr_handler(adapter_t *adapter)
1804 {
1805 	static struct intr_info pcix1_intr_info[] = {
1806 		{ F_MSTDETPARERR, "PCI master detected parity error", -1, 1 },
1807 		{ F_SIGTARABT, "PCI signaled target abort", -1, 1 },
1808 		{ F_RCVTARABT, "PCI received target abort", -1, 1 },
1809 		{ F_RCVMSTABT, "PCI received master abort", -1, 1 },
1810 		{ F_SIGSYSERR, "PCI signaled system error", -1, 1 },
1811 		{ F_DETPARERR, "PCI detected parity error", -1, 1 },
1812 		{ F_SPLCMPDIS, "PCI split completion discarded", -1, 1 },
1813 		{ F_UNXSPLCMP, "PCI unexpected split completion error", -1, 1 },
1814 		{ F_RCVSPLCMPERR, "PCI received split completion error", -1,
1815 		  1 },
1816 		{ F_DETCORECCERR, "PCI correctable ECC error",
1817 		  STAT_PCI_CORR_ECC, 0 },
1818 		{ F_DETUNCECCERR, "PCI uncorrectable ECC error", -1, 1 },
1819 		{ F_PIOPARERR, "PCI PIO FIFO parity error", -1, 1 },
1820 		{ V_WFPARERR(M_WFPARERR), "PCI write FIFO parity error", -1,
1821 		  1 },
1822 		{ V_RFPARERR(M_RFPARERR), "PCI read FIFO parity error", -1,
1823 		  1 },
1824 		{ V_CFPARERR(M_CFPARERR), "PCI command FIFO parity error", -1,
1825 		  1 },
1826 		{ V_MSIXPARERR(M_MSIXPARERR), "PCI MSI-X table/PBA parity "
1827 		  "error", -1, 1 },
1828 		{ 0 }
1829 	};
1830 
1831 	if (t3_handle_intr_status(adapter, A_PCIX_INT_CAUSE, PCIX_INTR_MASK,
1832 				  pcix1_intr_info, adapter->irq_stats))
1833 		t3_fatal_err(adapter);
1834 }
1835 
1836 /*
1837  * Interrupt handler for the PCIE module.
1838  */
1839 static void pcie_intr_handler(adapter_t *adapter)
1840 {
1841 	static struct intr_info pcie_intr_info[] = {
1842 		{ F_PEXERR, "PCI PEX error", -1, 1 },
1843 		{ F_UNXSPLCPLERRR,
1844 		  "PCI unexpected split completion DMA read error", -1, 1 },
1845 		{ F_UNXSPLCPLERRC,
1846 		  "PCI unexpected split completion DMA command error", -1, 1 },
1847 		{ F_PCIE_PIOPARERR, "PCI PIO FIFO parity error", -1, 1 },
1848 		{ F_PCIE_WFPARERR, "PCI write FIFO parity error", -1, 1 },
1849 		{ F_PCIE_RFPARERR, "PCI read FIFO parity error", -1, 1 },
1850 		{ F_PCIE_CFPARERR, "PCI command FIFO parity error", -1, 1 },
1851 		{ V_PCIE_MSIXPARERR(M_PCIE_MSIXPARERR),
1852 		  "PCI MSI-X table/PBA parity error", -1, 1 },
1853 		{ F_RETRYBUFPARERR, "PCI retry buffer parity error", -1, 1 },
1854 		{ F_RETRYLUTPARERR, "PCI retry LUT parity error", -1, 1 },
1855 		{ F_RXPARERR, "PCI Rx parity error", -1, 1 },
1856 		{ F_TXPARERR, "PCI Tx parity error", -1, 1 },
1857 		{ V_BISTERR(M_BISTERR), "PCI BIST error", -1, 1 },
1858 		{ 0 }
1859 	};
1860 
1861 	if (t3_read_reg(adapter, A_PCIE_INT_CAUSE) & F_PEXERR)
1862 		CH_ALERT(adapter, "PEX error code 0x%x\n",
1863 			 t3_read_reg(adapter, A_PCIE_PEX_ERR));
1864 
1865 	if (t3_handle_intr_status(adapter, A_PCIE_INT_CAUSE, PCIE_INTR_MASK,
1866 				  pcie_intr_info, adapter->irq_stats))
1867 		t3_fatal_err(adapter);
1868 }
1869 
1870 /*
1871  * TP interrupt handler.
1872  */
1873 static void tp_intr_handler(adapter_t *adapter)
1874 {
1875 	static struct intr_info tp_intr_info[] = {
1876 		{ 0xffffff,  "TP parity error", -1, 1 },
1877 		{ 0x1000000, "TP out of Rx pages", -1, 1 },
1878 		{ 0x2000000, "TP out of Tx pages", -1, 1 },
1879 		{ 0 }
1880 	};
1881 	static struct intr_info tp_intr_info_t3c[] = {
1882 		{ 0x1fffffff,  "TP parity error", -1, 1 },
1883 		{ F_FLMRXFLSTEMPTY, "TP out of Rx pages", -1, 1 },
1884 		{ F_FLMTXFLSTEMPTY, "TP out of Tx pages", -1, 1 },
1885 		{ 0 }
1886 	};
1887 
1888 	if (t3_handle_intr_status(adapter, A_TP_INT_CAUSE, 0xffffffff,
1889 				  adapter->params.rev < T3_REV_C ?
1890 					tp_intr_info : tp_intr_info_t3c, NULL))
1891 		t3_fatal_err(adapter);
1892 }
1893 
1894 /*
1895  * CIM interrupt handler.
1896  */
1897 static void cim_intr_handler(adapter_t *adapter)
1898 {
1899 	static struct intr_info cim_intr_info[] = {
1900 		{ F_RSVDSPACEINT, "CIM reserved space write", -1, 1 },
1901 		{ F_SDRAMRANGEINT, "CIM SDRAM address out of range", -1, 1 },
1902 		{ F_FLASHRANGEINT, "CIM flash address out of range", -1, 1 },
1903 		{ F_BLKWRBOOTINT, "CIM block write to boot space", -1, 1 },
1904 		{ F_WRBLKFLASHINT, "CIM write to cached flash space", -1, 1 },
1905 		{ F_SGLWRFLASHINT, "CIM single write to flash space", -1, 1 },
1906 		{ F_BLKRDFLASHINT, "CIM block read from flash space", -1, 1 },
1907 		{ F_BLKWRFLASHINT, "CIM block write to flash space", -1, 1 },
1908 		{ F_BLKRDCTLINT, "CIM block read from CTL space", -1, 1 },
1909 		{ F_BLKWRCTLINT, "CIM block write to CTL space", -1, 1 },
1910 		{ F_BLKRDPLINT, "CIM block read from PL space", -1, 1 },
1911 		{ F_BLKWRPLINT, "CIM block write to PL space", -1, 1 },
1912 		{ F_DRAMPARERR, "CIM DRAM parity error", -1, 1 },
1913 		{ F_ICACHEPARERR, "CIM icache parity error", -1, 1 },
1914 		{ F_DCACHEPARERR, "CIM dcache parity error", -1, 1 },
1915 		{ F_OBQSGEPARERR, "CIM OBQ SGE parity error", -1, 1 },
1916 		{ F_OBQULPHIPARERR, "CIM OBQ ULPHI parity error", -1, 1 },
1917 		{ F_OBQULPLOPARERR, "CIM OBQ ULPLO parity error", -1, 1 },
1918 		{ F_IBQSGELOPARERR, "CIM IBQ SGELO parity error", -1, 1 },
1919 		{ F_IBQSGEHIPARERR, "CIM IBQ SGEHI parity error", -1, 1 },
1920 		{ F_IBQULPPARERR, "CIM IBQ ULP parity error", -1, 1 },
1921 		{ F_IBQTPPARERR, "CIM IBQ TP parity error", -1, 1 },
1922 		{ F_ITAGPARERR, "CIM itag parity error", -1, 1 },
1923 		{ F_DTAGPARERR, "CIM dtag parity error", -1, 1 },
1924 		{ 0 }
1925         };
1926 
1927 	if (t3_handle_intr_status(adapter, A_CIM_HOST_INT_CAUSE, CIM_INTR_MASK,
1928 				  cim_intr_info, NULL))
1929 		t3_fatal_err(adapter);
1930 }
1931 
1932 /*
1933  * ULP RX interrupt handler.
1934  */
1935 static void ulprx_intr_handler(adapter_t *adapter)
1936 {
1937 	static struct intr_info ulprx_intr_info[] = {
1938 		{ F_PARERRDATA, "ULP RX data parity error", -1, 1 },
1939 		{ F_PARERRPCMD, "ULP RX command parity error", -1, 1 },
1940 		{ F_ARBPF1PERR, "ULP RX ArbPF1 parity error", -1, 1 },
1941 		{ F_ARBPF0PERR, "ULP RX ArbPF0 parity error", -1, 1 },
1942 		{ F_ARBFPERR, "ULP RX ArbF parity error", -1, 1 },
1943 		{ F_PCMDMUXPERR, "ULP RX PCMDMUX parity error", -1, 1 },
1944 		{ F_DATASELFRAMEERR1, "ULP RX frame error", -1, 1 },
1945 		{ F_DATASELFRAMEERR0, "ULP RX frame error", -1, 1 },
1946 		{ 0 }
1947         };
1948 
1949 	if (t3_handle_intr_status(adapter, A_ULPRX_INT_CAUSE, 0xffffffff,
1950 				  ulprx_intr_info, NULL))
1951 		t3_fatal_err(adapter);
1952 }
1953 
1954 /*
1955  * ULP TX interrupt handler.
1956  */
1957 static void ulptx_intr_handler(adapter_t *adapter)
1958 {
1959 	static struct intr_info ulptx_intr_info[] = {
1960 		{ F_PBL_BOUND_ERR_CH0, "ULP TX channel 0 PBL out of bounds",
1961 		  STAT_ULP_CH0_PBL_OOB, 0 },
1962 		{ F_PBL_BOUND_ERR_CH1, "ULP TX channel 1 PBL out of bounds",
1963 		  STAT_ULP_CH1_PBL_OOB, 0 },
1964 		{ 0xfc, "ULP TX parity error", -1, 1 },
1965 		{ 0 }
1966         };
1967 
1968 	if (t3_handle_intr_status(adapter, A_ULPTX_INT_CAUSE, 0xffffffff,
1969 				  ulptx_intr_info, adapter->irq_stats))
1970 		t3_fatal_err(adapter);
1971 }
1972 
1973 #define ICSPI_FRM_ERR (F_ICSPI0_FIFO2X_RX_FRAMING_ERROR | \
1974 	F_ICSPI1_FIFO2X_RX_FRAMING_ERROR | F_ICSPI0_RX_FRAMING_ERROR | \
1975 	F_ICSPI1_RX_FRAMING_ERROR | F_ICSPI0_TX_FRAMING_ERROR | \
1976 	F_ICSPI1_TX_FRAMING_ERROR)
1977 #define OESPI_FRM_ERR (F_OESPI0_RX_FRAMING_ERROR | \
1978 	F_OESPI1_RX_FRAMING_ERROR | F_OESPI0_TX_FRAMING_ERROR | \
1979 	F_OESPI1_TX_FRAMING_ERROR | F_OESPI0_OFIFO2X_TX_FRAMING_ERROR | \
1980 	F_OESPI1_OFIFO2X_TX_FRAMING_ERROR)
1981 
1982 /*
1983  * PM TX interrupt handler.
1984  */
1985 static void pmtx_intr_handler(adapter_t *adapter)
1986 {
1987 	static struct intr_info pmtx_intr_info[] = {
1988 		{ F_ZERO_C_CMD_ERROR, "PMTX 0-length pcmd", -1, 1 },
1989 		{ ICSPI_FRM_ERR, "PMTX ispi framing error", -1, 1 },
1990 		{ OESPI_FRM_ERR, "PMTX ospi framing error", -1, 1 },
1991 		{ V_ICSPI_PAR_ERROR(M_ICSPI_PAR_ERROR),
1992 		  "PMTX ispi parity error", -1, 1 },
1993 		{ V_OESPI_PAR_ERROR(M_OESPI_PAR_ERROR),
1994 		  "PMTX ospi parity error", -1, 1 },
1995 		{ 0 }
1996         };
1997 
1998 	if (t3_handle_intr_status(adapter, A_PM1_TX_INT_CAUSE, 0xffffffff,
1999 				  pmtx_intr_info, NULL))
2000 		t3_fatal_err(adapter);
2001 }
2002 
2003 #define IESPI_FRM_ERR (F_IESPI0_FIFO2X_RX_FRAMING_ERROR | \
2004 	F_IESPI1_FIFO2X_RX_FRAMING_ERROR | F_IESPI0_RX_FRAMING_ERROR | \
2005 	F_IESPI1_RX_FRAMING_ERROR | F_IESPI0_TX_FRAMING_ERROR | \
2006 	F_IESPI1_TX_FRAMING_ERROR)
2007 #define OCSPI_FRM_ERR (F_OCSPI0_RX_FRAMING_ERROR | \
2008 	F_OCSPI1_RX_FRAMING_ERROR | F_OCSPI0_TX_FRAMING_ERROR | \
2009 	F_OCSPI1_TX_FRAMING_ERROR | F_OCSPI0_OFIFO2X_TX_FRAMING_ERROR | \
2010 	F_OCSPI1_OFIFO2X_TX_FRAMING_ERROR)
2011 
2012 /*
2013  * PM RX interrupt handler.
2014  */
2015 static void pmrx_intr_handler(adapter_t *adapter)
2016 {
2017 	static struct intr_info pmrx_intr_info[] = {
2018 		{ F_ZERO_E_CMD_ERROR, "PMRX 0-length pcmd", -1, 1 },
2019 		{ IESPI_FRM_ERR, "PMRX ispi framing error", -1, 1 },
2020 		{ OCSPI_FRM_ERR, "PMRX ospi framing error", -1, 1 },
2021 		{ V_IESPI_PAR_ERROR(M_IESPI_PAR_ERROR),
2022 		  "PMRX ispi parity error", -1, 1 },
2023 		{ V_OCSPI_PAR_ERROR(M_OCSPI_PAR_ERROR),
2024 		  "PMRX ospi parity error", -1, 1 },
2025 		{ 0 }
2026         };
2027 
2028 	if (t3_handle_intr_status(adapter, A_PM1_RX_INT_CAUSE, 0xffffffff,
2029 				  pmrx_intr_info, NULL))
2030 		t3_fatal_err(adapter);
2031 }
2032 
2033 /*
2034  * CPL switch interrupt handler.
2035  */
2036 static void cplsw_intr_handler(adapter_t *adapter)
2037 {
2038 	static struct intr_info cplsw_intr_info[] = {
2039 		{ F_CIM_OP_MAP_PERR, "CPL switch CIM parity error", -1, 1 },
2040 		{ F_CIM_OVFL_ERROR, "CPL switch CIM overflow", -1, 1 },
2041 		{ F_TP_FRAMING_ERROR, "CPL switch TP framing error", -1, 1 },
2042 		{ F_SGE_FRAMING_ERROR, "CPL switch SGE framing error", -1, 1 },
2043 		{ F_CIM_FRAMING_ERROR, "CPL switch CIM framing error", -1, 1 },
2044 		{ F_ZERO_SWITCH_ERROR, "CPL switch no-switch error", -1, 1 },
2045 		{ 0 }
2046         };
2047 
2048 	if (t3_handle_intr_status(adapter, A_CPL_INTR_CAUSE, 0xffffffff,
2049 				  cplsw_intr_info, NULL))
2050 		t3_fatal_err(adapter);
2051 }
2052 
2053 /*
2054  * MPS interrupt handler.
2055  */
2056 static void mps_intr_handler(adapter_t *adapter)
2057 {
2058 	static struct intr_info mps_intr_info[] = {
2059 		{ 0x1ff, "MPS parity error", -1, 1 },
2060 		{ 0 }
2061 	};
2062 
2063 	if (t3_handle_intr_status(adapter, A_MPS_INT_CAUSE, 0xffffffff,
2064 				  mps_intr_info, NULL))
2065 		t3_fatal_err(adapter);
2066 }
2067 
2068 #define MC7_INTR_FATAL (F_UE | V_PE(M_PE) | F_AE)
2069 
2070 /*
2071  * MC7 interrupt handler.
2072  */
2073 static void mc7_intr_handler(struct mc7 *mc7)
2074 {
2075 	adapter_t *adapter = mc7->adapter;
2076 	u32 cause = t3_read_reg(adapter, mc7->offset + A_MC7_INT_CAUSE);
2077 
2078 	if (cause & F_CE) {
2079 		mc7->stats.corr_err++;
2080 		CH_WARN(adapter, "%s MC7 correctable error at addr 0x%x, "
2081 			"data 0x%x 0x%x 0x%x\n", mc7->name,
2082 			t3_read_reg(adapter, mc7->offset + A_MC7_CE_ADDR),
2083 			t3_read_reg(adapter, mc7->offset + A_MC7_CE_DATA0),
2084 			t3_read_reg(adapter, mc7->offset + A_MC7_CE_DATA1),
2085 			t3_read_reg(adapter, mc7->offset + A_MC7_CE_DATA2));
2086 	}
2087 
2088 	if (cause & F_UE) {
2089 		mc7->stats.uncorr_err++;
2090 		CH_ALERT(adapter, "%s MC7 uncorrectable error at addr 0x%x, "
2091 			 "data 0x%x 0x%x 0x%x\n", mc7->name,
2092 			 t3_read_reg(adapter, mc7->offset + A_MC7_UE_ADDR),
2093 			 t3_read_reg(adapter, mc7->offset + A_MC7_UE_DATA0),
2094 			 t3_read_reg(adapter, mc7->offset + A_MC7_UE_DATA1),
2095 			 t3_read_reg(adapter, mc7->offset + A_MC7_UE_DATA2));
2096 	}
2097 
2098 	if (G_PE(cause)) {
2099 		mc7->stats.parity_err++;
2100 		CH_ALERT(adapter, "%s MC7 parity error 0x%x\n",
2101 			 mc7->name, G_PE(cause));
2102 	}
2103 
2104 	if (cause & F_AE) {
2105 		u32 addr = 0;
2106 
2107 		if (adapter->params.rev > 0)
2108 			addr = t3_read_reg(adapter,
2109 					   mc7->offset + A_MC7_ERR_ADDR);
2110 		mc7->stats.addr_err++;
2111 		CH_ALERT(adapter, "%s MC7 address error: 0x%x\n",
2112 			 mc7->name, addr);
2113 	}
2114 
2115 	if (cause & MC7_INTR_FATAL)
2116 		t3_fatal_err(adapter);
2117 
2118 	t3_write_reg(adapter, mc7->offset + A_MC7_INT_CAUSE, cause);
2119 }
2120 
2121 #define XGM_INTR_FATAL (V_TXFIFO_PRTY_ERR(M_TXFIFO_PRTY_ERR) | \
2122 			V_RXFIFO_PRTY_ERR(M_RXFIFO_PRTY_ERR))
2123 /*
2124  * XGMAC interrupt handler.
2125  */
2126 static int mac_intr_handler(adapter_t *adap, unsigned int idx)
2127 {
2128 	u32 cause;
2129 	struct port_info *pi;
2130 	struct cmac *mac;
2131 
2132 	idx = idx == 0 ? 0 : adapter_info(adap)->nports0; /* MAC idx -> port */
2133 	pi = adap2pinfo(adap, idx);
2134 	mac = &pi->mac;
2135 
2136 	/*
2137 	 * We mask out interrupt causes for which we're not taking interrupts.
2138 	 * This allows us to use polling logic to monitor some of the other
2139 	 * conditions when taking interrupts would impose too much load on the
2140 	 * system.
2141 	 */
2142 	cause = (t3_read_reg(adap, A_XGM_INT_CAUSE + mac->offset)
2143 		 & ~(F_RXFIFO_OVERFLOW));
2144 
2145 	if (cause & V_TXFIFO_PRTY_ERR(M_TXFIFO_PRTY_ERR)) {
2146 		mac->stats.tx_fifo_parity_err++;
2147 		CH_ALERT(adap, "port%d: MAC TX FIFO parity error\n", idx);
2148 	}
2149 	if (cause & V_RXFIFO_PRTY_ERR(M_RXFIFO_PRTY_ERR)) {
2150 		mac->stats.rx_fifo_parity_err++;
2151 		CH_ALERT(adap, "port%d: MAC RX FIFO parity error\n", idx);
2152 	}
2153 	if (cause & F_TXFIFO_UNDERRUN)
2154 		mac->stats.tx_fifo_urun++;
2155 	if (cause & F_RXFIFO_OVERFLOW)
2156 		mac->stats.rx_fifo_ovfl++;
2157 	if (cause & V_SERDES_LOS(M_SERDES_LOS))
2158 		mac->stats.serdes_signal_loss++;
2159 	if (cause & F_XAUIPCSCTCERR)
2160 		mac->stats.xaui_pcs_ctc_err++;
2161 	if (cause & F_XAUIPCSALIGNCHANGE)
2162 		mac->stats.xaui_pcs_align_change++;
2163 	if (cause & F_XGM_INT) {
2164 		t3_set_reg_field(adap,
2165 				 A_XGM_INT_ENABLE + mac->offset,
2166 				 F_XGM_INT, 0);
2167 
2168 		/* link fault suspected */
2169 		pi->link_fault = LF_MAYBE;
2170 	}
2171 
2172 	t3_write_reg(adap, A_XGM_INT_CAUSE + mac->offset, cause);
2173 
2174 	if (cause & XGM_INTR_FATAL)
2175 		t3_fatal_err(adap);
2176 
2177 	return cause != 0;
2178 }
2179 
2180 /*
2181  * Interrupt handler for PHY events.
2182  */
2183 int t3_phy_intr_handler(adapter_t *adapter)
2184 {
2185 	u32 i, cause = t3_read_reg(adapter, A_T3DBG_INT_CAUSE);
2186 
2187 	for_each_port(adapter, i) {
2188 		struct port_info *p = adap2pinfo(adapter, i);
2189 
2190 		if (!(p->phy.caps & SUPPORTED_IRQ))
2191 			continue;
2192 
2193 		if (cause & (1 << adapter_info(adapter)->gpio_intr[i])) {
2194 			int phy_cause = p->phy.ops->intr_handler(&p->phy);
2195 
2196 			if (phy_cause & cphy_cause_link_change)
2197 				t3_link_changed(adapter, i);
2198 			if (phy_cause & cphy_cause_fifo_error)
2199 				p->phy.fifo_errors++;
2200 			if (phy_cause & cphy_cause_module_change)
2201 				t3_os_phymod_changed(adapter, i);
2202 			if (phy_cause & cphy_cause_alarm)
2203 				CH_WARN(adapter, "Operation affected due to "
2204 				    "adverse environment.  Check the spec "
2205 				    "sheet for corrective action.");
2206 		}
2207 	}
2208 
2209 	t3_write_reg(adapter, A_T3DBG_INT_CAUSE, cause);
2210 	return 0;
2211 }
2212 
2213 /**
2214  *	t3_slow_intr_handler - control path interrupt handler
2215  *	@adapter: the adapter
2216  *
2217  *	T3 interrupt handler for non-data interrupt events, e.g., errors.
2218  *	The designation 'slow' is because it involves register reads, while
2219  *	data interrupts typically don't involve any MMIOs.
2220  */
2221 int t3_slow_intr_handler(adapter_t *adapter)
2222 {
2223 	u32 cause = t3_read_reg(adapter, A_PL_INT_CAUSE0);
2224 
2225 	cause &= adapter->slow_intr_mask;
2226 	if (!cause)
2227 		return 0;
2228 	if (cause & F_PCIM0) {
2229 		if (is_pcie(adapter))
2230 			pcie_intr_handler(adapter);
2231 		else
2232 			pci_intr_handler(adapter);
2233 	}
2234 	if (cause & F_SGE3)
2235 		t3_sge_err_intr_handler(adapter);
2236 	if (cause & F_MC7_PMRX)
2237 		mc7_intr_handler(&adapter->pmrx);
2238 	if (cause & F_MC7_PMTX)
2239 		mc7_intr_handler(&adapter->pmtx);
2240 	if (cause & F_MC7_CM)
2241 		mc7_intr_handler(&adapter->cm);
2242 	if (cause & F_CIM)
2243 		cim_intr_handler(adapter);
2244 	if (cause & F_TP1)
2245 		tp_intr_handler(adapter);
2246 	if (cause & F_ULP2_RX)
2247 		ulprx_intr_handler(adapter);
2248 	if (cause & F_ULP2_TX)
2249 		ulptx_intr_handler(adapter);
2250 	if (cause & F_PM1_RX)
2251 		pmrx_intr_handler(adapter);
2252 	if (cause & F_PM1_TX)
2253 		pmtx_intr_handler(adapter);
2254 	if (cause & F_CPL_SWITCH)
2255 		cplsw_intr_handler(adapter);
2256 	if (cause & F_MPS0)
2257 		mps_intr_handler(adapter);
2258 	if (cause & F_MC5A)
2259 		t3_mc5_intr_handler(&adapter->mc5);
2260 	if (cause & F_XGMAC0_0)
2261 		mac_intr_handler(adapter, 0);
2262 	if (cause & F_XGMAC0_1)
2263 		mac_intr_handler(adapter, 1);
2264 	if (cause & F_T3DBG)
2265 		t3_os_ext_intr_handler(adapter);
2266 
2267 	/* Clear the interrupts just processed. */
2268 	t3_write_reg(adapter, A_PL_INT_CAUSE0, cause);
2269 	(void) t3_read_reg(adapter, A_PL_INT_CAUSE0); /* flush */
2270 	return 1;
2271 }
2272 
2273 static unsigned int calc_gpio_intr(adapter_t *adap)
2274 {
2275 	unsigned int i, gpi_intr = 0;
2276 
2277 	for_each_port(adap, i)
2278 		if ((adap2pinfo(adap, i)->phy.caps & SUPPORTED_IRQ) &&
2279 		    adapter_info(adap)->gpio_intr[i])
2280 			gpi_intr |= 1 << adapter_info(adap)->gpio_intr[i];
2281 	return gpi_intr;
2282 }
2283 
2284 /**
2285  *	t3_intr_enable - enable interrupts
2286  *	@adapter: the adapter whose interrupts should be enabled
2287  *
2288  *	Enable interrupts by setting the interrupt enable registers of the
2289  *	various HW modules and then enabling the top-level interrupt
2290  *	concentrator.
2291  */
2292 void t3_intr_enable(adapter_t *adapter)
2293 {
2294 	static struct addr_val_pair intr_en_avp[] = {
2295 		{ A_MC7_INT_ENABLE, MC7_INTR_MASK },
2296 		{ A_MC7_INT_ENABLE - MC7_PMRX_BASE_ADDR + MC7_PMTX_BASE_ADDR,
2297 			MC7_INTR_MASK },
2298 		{ A_MC7_INT_ENABLE - MC7_PMRX_BASE_ADDR + MC7_CM_BASE_ADDR,
2299 			MC7_INTR_MASK },
2300 		{ A_MC5_DB_INT_ENABLE, MC5_INTR_MASK },
2301 		{ A_ULPRX_INT_ENABLE, ULPRX_INTR_MASK },
2302 		{ A_PM1_TX_INT_ENABLE, PMTX_INTR_MASK },
2303 		{ A_PM1_RX_INT_ENABLE, PMRX_INTR_MASK },
2304 		{ A_CIM_HOST_INT_ENABLE, CIM_INTR_MASK },
2305 		{ A_MPS_INT_ENABLE, MPS_INTR_MASK },
2306 	};
2307 
2308 	adapter->slow_intr_mask = PL_INTR_MASK;
2309 
2310 	t3_write_regs(adapter, intr_en_avp, ARRAY_SIZE(intr_en_avp), 0);
2311 	t3_write_reg(adapter, A_TP_INT_ENABLE,
2312 		     adapter->params.rev >= T3_REV_C ? 0x2bfffff : 0x3bfffff);
2313 	t3_write_reg(adapter, A_SG_INT_ENABLE, SGE_INTR_MASK);
2314 
2315 	if (adapter->params.rev > 0) {
2316 		t3_write_reg(adapter, A_CPL_INTR_ENABLE,
2317 			     CPLSW_INTR_MASK | F_CIM_OVFL_ERROR);
2318 		t3_write_reg(adapter, A_ULPTX_INT_ENABLE,
2319 			     ULPTX_INTR_MASK | F_PBL_BOUND_ERR_CH0 |
2320 			     F_PBL_BOUND_ERR_CH1);
2321 	} else {
2322 		t3_write_reg(adapter, A_CPL_INTR_ENABLE, CPLSW_INTR_MASK);
2323 		t3_write_reg(adapter, A_ULPTX_INT_ENABLE, ULPTX_INTR_MASK);
2324 	}
2325 
2326 	t3_write_reg(adapter, A_T3DBG_INT_ENABLE, calc_gpio_intr(adapter));
2327 
2328 	if (is_pcie(adapter))
2329 		t3_write_reg(adapter, A_PCIE_INT_ENABLE, PCIE_INTR_MASK);
2330 	else
2331 		t3_write_reg(adapter, A_PCIX_INT_ENABLE, PCIX_INTR_MASK);
2332 	t3_write_reg(adapter, A_PL_INT_ENABLE0, adapter->slow_intr_mask);
2333 	(void) t3_read_reg(adapter, A_PL_INT_ENABLE0);          /* flush */
2334 }
2335 
2336 /**
2337  *	t3_intr_disable - disable a card's interrupts
2338  *	@adapter: the adapter whose interrupts should be disabled
2339  *
2340  *	Disable interrupts.  We only disable the top-level interrupt
2341  *	concentrator and the SGE data interrupts.
2342  */
2343 void t3_intr_disable(adapter_t *adapter)
2344 {
2345 	t3_write_reg(adapter, A_PL_INT_ENABLE0, 0);
2346 	(void) t3_read_reg(adapter, A_PL_INT_ENABLE0);  /* flush */
2347 	adapter->slow_intr_mask = 0;
2348 }
2349 
2350 /**
2351  *	t3_intr_clear - clear all interrupts
2352  *	@adapter: the adapter whose interrupts should be cleared
2353  *
2354  *	Clears all interrupts.
2355  */
2356 void t3_intr_clear(adapter_t *adapter)
2357 {
2358 	static const unsigned int cause_reg_addr[] = {
2359 		A_SG_INT_CAUSE,
2360 		A_SG_RSPQ_FL_STATUS,
2361 		A_PCIX_INT_CAUSE,
2362 		A_MC7_INT_CAUSE,
2363 		A_MC7_INT_CAUSE - MC7_PMRX_BASE_ADDR + MC7_PMTX_BASE_ADDR,
2364 		A_MC7_INT_CAUSE - MC7_PMRX_BASE_ADDR + MC7_CM_BASE_ADDR,
2365 		A_CIM_HOST_INT_CAUSE,
2366 		A_TP_INT_CAUSE,
2367 		A_MC5_DB_INT_CAUSE,
2368 		A_ULPRX_INT_CAUSE,
2369 		A_ULPTX_INT_CAUSE,
2370 		A_CPL_INTR_CAUSE,
2371 		A_PM1_TX_INT_CAUSE,
2372 		A_PM1_RX_INT_CAUSE,
2373 		A_MPS_INT_CAUSE,
2374 		A_T3DBG_INT_CAUSE,
2375 	};
2376 	unsigned int i;
2377 
2378 	/* Clear PHY and MAC interrupts for each port. */
2379 	for_each_port(adapter, i)
2380 		t3_port_intr_clear(adapter, i);
2381 
2382 	for (i = 0; i < ARRAY_SIZE(cause_reg_addr); ++i)
2383 		t3_write_reg(adapter, cause_reg_addr[i], 0xffffffff);
2384 
2385 	if (is_pcie(adapter))
2386 		t3_write_reg(adapter, A_PCIE_PEX_ERR, 0xffffffff);
2387 	t3_write_reg(adapter, A_PL_INT_CAUSE0, 0xffffffff);
2388 	(void) t3_read_reg(adapter, A_PL_INT_CAUSE0);          /* flush */
2389 }
2390 
2391 void t3_xgm_intr_enable(adapter_t *adapter, int idx)
2392 {
2393 	struct port_info *pi = adap2pinfo(adapter, idx);
2394 
2395 	t3_write_reg(adapter, A_XGM_XGM_INT_ENABLE + pi->mac.offset,
2396 		     XGM_EXTRA_INTR_MASK);
2397 }
2398 
2399 void t3_xgm_intr_disable(adapter_t *adapter, int idx)
2400 {
2401 	struct port_info *pi = adap2pinfo(adapter, idx);
2402 
2403 	t3_write_reg(adapter, A_XGM_XGM_INT_DISABLE + pi->mac.offset,
2404 		     0x7ff);
2405 }
2406 
2407 /**
2408  *	t3_port_intr_enable - enable port-specific interrupts
2409  *	@adapter: associated adapter
2410  *	@idx: index of port whose interrupts should be enabled
2411  *
2412  *	Enable port-specific (i.e., MAC and PHY) interrupts for the given
2413  *	adapter port.
2414  */
2415 void t3_port_intr_enable(adapter_t *adapter, int idx)
2416 {
2417 	struct port_info *pi = adap2pinfo(adapter, idx);
2418 
2419 	t3_write_reg(adapter, A_XGM_INT_ENABLE + pi->mac.offset, XGM_INTR_MASK);
2420 	pi->phy.ops->intr_enable(&pi->phy);
2421 }
2422 
2423 /**
2424  *	t3_port_intr_disable - disable port-specific interrupts
2425  *	@adapter: associated adapter
2426  *	@idx: index of port whose interrupts should be disabled
2427  *
2428  *	Disable port-specific (i.e., MAC and PHY) interrupts for the given
2429  *	adapter port.
2430  */
2431 void t3_port_intr_disable(adapter_t *adapter, int idx)
2432 {
2433 	struct port_info *pi = adap2pinfo(adapter, idx);
2434 
2435 	t3_write_reg(adapter, A_XGM_INT_ENABLE + pi->mac.offset, 0);
2436 	pi->phy.ops->intr_disable(&pi->phy);
2437 }
2438 
2439 /**
2440  *	t3_port_intr_clear - clear port-specific interrupts
2441  *	@adapter: associated adapter
2442  *	@idx: index of port whose interrupts to clear
2443  *
2444  *	Clear port-specific (i.e., MAC and PHY) interrupts for the given
2445  *	adapter port.
2446  */
2447 void t3_port_intr_clear(adapter_t *adapter, int idx)
2448 {
2449 	struct port_info *pi = adap2pinfo(adapter, idx);
2450 
2451 	t3_write_reg(adapter, A_XGM_INT_CAUSE + pi->mac.offset, 0xffffffff);
2452 	pi->phy.ops->intr_clear(&pi->phy);
2453 }
2454 
2455 #define SG_CONTEXT_CMD_ATTEMPTS 100
2456 
2457 /**
2458  * 	t3_sge_write_context - write an SGE context
2459  * 	@adapter: the adapter
2460  * 	@id: the context id
2461  * 	@type: the context type
2462  *
2463  * 	Program an SGE context with the values already loaded in the
2464  * 	CONTEXT_DATA? registers.
2465  */
2466 static int t3_sge_write_context(adapter_t *adapter, unsigned int id,
2467 				unsigned int type)
2468 {
2469 	if (type == F_RESPONSEQ) {
2470 		/*
2471 		 * Can't write the Response Queue Context bits for
2472 		 * Interrupt Armed or the Reserve bits after the chip
2473 		 * has been initialized out of reset.  Writing to these
2474 		 * bits can confuse the hardware.
2475 		 */
2476 		t3_write_reg(adapter, A_SG_CONTEXT_MASK0, 0xffffffff);
2477 		t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0xffffffff);
2478 		t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0x17ffffff);
2479 		t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0xffffffff);
2480 	} else {
2481 		t3_write_reg(adapter, A_SG_CONTEXT_MASK0, 0xffffffff);
2482 		t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0xffffffff);
2483 		t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0xffffffff);
2484 		t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0xffffffff);
2485 	}
2486 	t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2487 		     V_CONTEXT_CMD_OPCODE(1) | type | V_CONTEXT(id));
2488 	return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
2489 			       0, SG_CONTEXT_CMD_ATTEMPTS, 1);
2490 }
2491 
2492 /**
2493  *	clear_sge_ctxt - completely clear an SGE context
2494  *	@adapter: the adapter
2495  *	@id: the context id
2496  *	@type: the context type
2497  *
2498  *	Completely clear an SGE context.  Used predominantly at post-reset
2499  *	initialization.  Note in particular that we don't skip writing to any
2500  *	"sensitive bits" in the contexts the way that t3_sge_write_context()
2501  *	does ...
2502  */
2503 static int clear_sge_ctxt(adapter_t *adap, unsigned int id, unsigned int type)
2504 {
2505 	t3_write_reg(adap, A_SG_CONTEXT_DATA0, 0);
2506 	t3_write_reg(adap, A_SG_CONTEXT_DATA1, 0);
2507 	t3_write_reg(adap, A_SG_CONTEXT_DATA2, 0);
2508 	t3_write_reg(adap, A_SG_CONTEXT_DATA3, 0);
2509 	t3_write_reg(adap, A_SG_CONTEXT_MASK0, 0xffffffff);
2510 	t3_write_reg(adap, A_SG_CONTEXT_MASK1, 0xffffffff);
2511 	t3_write_reg(adap, A_SG_CONTEXT_MASK2, 0xffffffff);
2512 	t3_write_reg(adap, A_SG_CONTEXT_MASK3, 0xffffffff);
2513 	t3_write_reg(adap, A_SG_CONTEXT_CMD,
2514 		     V_CONTEXT_CMD_OPCODE(1) | type | V_CONTEXT(id));
2515 	return t3_wait_op_done(adap, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
2516 			       0, SG_CONTEXT_CMD_ATTEMPTS, 1);
2517 }
2518 
2519 /**
2520  *	t3_sge_init_ecntxt - initialize an SGE egress context
2521  *	@adapter: the adapter to configure
2522  *	@id: the context id
2523  *	@gts_enable: whether to enable GTS for the context
2524  *	@type: the egress context type
2525  *	@respq: associated response queue
2526  *	@base_addr: base address of queue
2527  *	@size: number of queue entries
2528  *	@token: uP token
2529  *	@gen: initial generation value for the context
2530  *	@cidx: consumer pointer
2531  *
2532  *	Initialize an SGE egress context and make it ready for use.  If the
2533  *	platform allows concurrent context operations, the caller is
2534  *	responsible for appropriate locking.
2535  */
2536 int t3_sge_init_ecntxt(adapter_t *adapter, unsigned int id, int gts_enable,
2537 		       enum sge_context_type type, int respq, u64 base_addr,
2538 		       unsigned int size, unsigned int token, int gen,
2539 		       unsigned int cidx)
2540 {
2541 	unsigned int credits = type == SGE_CNTXT_OFLD ? 0 : FW_WR_NUM;
2542 
2543 	if (base_addr & 0xfff)     /* must be 4K aligned */
2544 		return -EINVAL;
2545 	if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2546 		return -EBUSY;
2547 
2548 	base_addr >>= 12;
2549 	t3_write_reg(adapter, A_SG_CONTEXT_DATA0, V_EC_INDEX(cidx) |
2550 		     V_EC_CREDITS(credits) | V_EC_GTS(gts_enable));
2551 	t3_write_reg(adapter, A_SG_CONTEXT_DATA1, V_EC_SIZE(size) |
2552 		     V_EC_BASE_LO((u32)base_addr & 0xffff));
2553 	base_addr >>= 16;
2554 	t3_write_reg(adapter, A_SG_CONTEXT_DATA2, (u32)base_addr);
2555 	base_addr >>= 32;
2556 	t3_write_reg(adapter, A_SG_CONTEXT_DATA3,
2557 		     V_EC_BASE_HI((u32)base_addr & 0xf) | V_EC_RESPQ(respq) |
2558 		     V_EC_TYPE(type) | V_EC_GEN(gen) | V_EC_UP_TOKEN(token) |
2559 		     F_EC_VALID);
2560 	return t3_sge_write_context(adapter, id, F_EGRESS);
2561 }
2562 
2563 /**
2564  *	t3_sge_init_flcntxt - initialize an SGE free-buffer list context
2565  *	@adapter: the adapter to configure
2566  *	@id: the context id
2567  *	@gts_enable: whether to enable GTS for the context
2568  *	@base_addr: base address of queue
2569  *	@size: number of queue entries
2570  *	@bsize: size of each buffer for this queue
2571  *	@cong_thres: threshold to signal congestion to upstream producers
2572  *	@gen: initial generation value for the context
2573  *	@cidx: consumer pointer
2574  *
2575  *	Initialize an SGE free list context and make it ready for use.  The
2576  *	caller is responsible for ensuring only one context operation occurs
2577  *	at a time.
2578  */
2579 int t3_sge_init_flcntxt(adapter_t *adapter, unsigned int id, int gts_enable,
2580 			u64 base_addr, unsigned int size, unsigned int bsize,
2581 			unsigned int cong_thres, int gen, unsigned int cidx)
2582 {
2583 	if (base_addr & 0xfff)     /* must be 4K aligned */
2584 		return -EINVAL;
2585 	if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2586 		return -EBUSY;
2587 
2588 	base_addr >>= 12;
2589 	t3_write_reg(adapter, A_SG_CONTEXT_DATA0, (u32)base_addr);
2590 	base_addr >>= 32;
2591 	t3_write_reg(adapter, A_SG_CONTEXT_DATA1,
2592 		     V_FL_BASE_HI((u32)base_addr) |
2593 		     V_FL_INDEX_LO(cidx & M_FL_INDEX_LO));
2594 	t3_write_reg(adapter, A_SG_CONTEXT_DATA2, V_FL_SIZE(size) |
2595 		     V_FL_GEN(gen) | V_FL_INDEX_HI(cidx >> 12) |
2596 		     V_FL_ENTRY_SIZE_LO(bsize & M_FL_ENTRY_SIZE_LO));
2597 	t3_write_reg(adapter, A_SG_CONTEXT_DATA3,
2598 		     V_FL_ENTRY_SIZE_HI(bsize >> (32 - S_FL_ENTRY_SIZE_LO)) |
2599 		     V_FL_CONG_THRES(cong_thres) | V_FL_GTS(gts_enable));
2600 	return t3_sge_write_context(adapter, id, F_FREELIST);
2601 }
2602 
2603 /**
2604  *	t3_sge_init_rspcntxt - initialize an SGE response queue context
2605  *	@adapter: the adapter to configure
2606  *	@id: the context id
2607  *	@irq_vec_idx: MSI-X interrupt vector index, 0 if no MSI-X, -1 if no IRQ
2608  *	@base_addr: base address of queue
2609  *	@size: number of queue entries
2610  *	@fl_thres: threshold for selecting the normal or jumbo free list
2611  *	@gen: initial generation value for the context
2612  *	@cidx: consumer pointer
2613  *
2614  *	Initialize an SGE response queue context and make it ready for use.
2615  *	The caller is responsible for ensuring only one context operation
2616  *	occurs at a time.
2617  */
2618 int t3_sge_init_rspcntxt(adapter_t *adapter, unsigned int id, int irq_vec_idx,
2619 			 u64 base_addr, unsigned int size,
2620 			 unsigned int fl_thres, int gen, unsigned int cidx)
2621 {
2622 	unsigned int ctrl, intr = 0;
2623 
2624 	if (base_addr & 0xfff)     /* must be 4K aligned */
2625 		return -EINVAL;
2626 	if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2627 		return -EBUSY;
2628 
2629 	base_addr >>= 12;
2630 	t3_write_reg(adapter, A_SG_CONTEXT_DATA0, V_CQ_SIZE(size) |
2631 		     V_CQ_INDEX(cidx));
2632 	t3_write_reg(adapter, A_SG_CONTEXT_DATA1, (u32)base_addr);
2633 	base_addr >>= 32;
2634         ctrl = t3_read_reg(adapter, A_SG_CONTROL);
2635         if ((irq_vec_idx > 0) ||
2636 		((irq_vec_idx == 0) && !(ctrl & F_ONEINTMULTQ)))
2637                 	intr = F_RQ_INTR_EN;
2638         if (irq_vec_idx >= 0)
2639                 intr |= V_RQ_MSI_VEC(irq_vec_idx);
2640 	t3_write_reg(adapter, A_SG_CONTEXT_DATA2,
2641 		     V_CQ_BASE_HI((u32)base_addr) | intr | V_RQ_GEN(gen));
2642 	t3_write_reg(adapter, A_SG_CONTEXT_DATA3, fl_thres);
2643 	return t3_sge_write_context(adapter, id, F_RESPONSEQ);
2644 }
2645 
2646 /**
2647  *	t3_sge_init_cqcntxt - initialize an SGE completion queue context
2648  *	@adapter: the adapter to configure
2649  *	@id: the context id
2650  *	@base_addr: base address of queue
2651  *	@size: number of queue entries
2652  *	@rspq: response queue for async notifications
2653  *	@ovfl_mode: CQ overflow mode
2654  *	@credits: completion queue credits
2655  *	@credit_thres: the credit threshold
2656  *
2657  *	Initialize an SGE completion queue context and make it ready for use.
2658  *	The caller is responsible for ensuring only one context operation
2659  *	occurs at a time.
2660  */
2661 int t3_sge_init_cqcntxt(adapter_t *adapter, unsigned int id, u64 base_addr,
2662 			unsigned int size, int rspq, int ovfl_mode,
2663 			unsigned int credits, unsigned int credit_thres)
2664 {
2665 	if (base_addr & 0xfff)     /* must be 4K aligned */
2666 		return -EINVAL;
2667 	if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2668 		return -EBUSY;
2669 
2670 	base_addr >>= 12;
2671 	t3_write_reg(adapter, A_SG_CONTEXT_DATA0, V_CQ_SIZE(size));
2672 	t3_write_reg(adapter, A_SG_CONTEXT_DATA1, (u32)base_addr);
2673 	base_addr >>= 32;
2674 	t3_write_reg(adapter, A_SG_CONTEXT_DATA2,
2675 		     V_CQ_BASE_HI((u32)base_addr) | V_CQ_RSPQ(rspq) |
2676 		     V_CQ_GEN(1) | V_CQ_OVERFLOW_MODE(ovfl_mode) |
2677 		     V_CQ_ERR(ovfl_mode));
2678 	t3_write_reg(adapter, A_SG_CONTEXT_DATA3, V_CQ_CREDITS(credits) |
2679 		     V_CQ_CREDIT_THRES(credit_thres));
2680 	return t3_sge_write_context(adapter, id, F_CQ);
2681 }
2682 
2683 /**
2684  *	t3_sge_enable_ecntxt - enable/disable an SGE egress context
2685  *	@adapter: the adapter
2686  *	@id: the egress context id
2687  *	@enable: enable (1) or disable (0) the context
2688  *
2689  *	Enable or disable an SGE egress context.  The caller is responsible for
2690  *	ensuring only one context operation occurs at a time.
2691  */
2692 int t3_sge_enable_ecntxt(adapter_t *adapter, unsigned int id, int enable)
2693 {
2694 	if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2695 		return -EBUSY;
2696 
2697 	t3_write_reg(adapter, A_SG_CONTEXT_MASK0, 0);
2698 	t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0);
2699 	t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0);
2700 	t3_write_reg(adapter, A_SG_CONTEXT_MASK3, F_EC_VALID);
2701 	t3_write_reg(adapter, A_SG_CONTEXT_DATA3, V_EC_VALID(enable));
2702 	t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2703 		     V_CONTEXT_CMD_OPCODE(1) | F_EGRESS | V_CONTEXT(id));
2704 	return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
2705 			       0, SG_CONTEXT_CMD_ATTEMPTS, 1);
2706 }
2707 
2708 /**
2709  *	t3_sge_disable_fl - disable an SGE free-buffer list
2710  *	@adapter: the adapter
2711  *	@id: the free list context id
2712  *
2713  *	Disable an SGE free-buffer list.  The caller is responsible for
2714  *	ensuring only one context operation occurs at a time.
2715  */
2716 int t3_sge_disable_fl(adapter_t *adapter, unsigned int id)
2717 {
2718 	if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2719 		return -EBUSY;
2720 
2721 	t3_write_reg(adapter, A_SG_CONTEXT_MASK0, 0);
2722 	t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0);
2723 	t3_write_reg(adapter, A_SG_CONTEXT_MASK2, V_FL_SIZE(M_FL_SIZE));
2724 	t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0);
2725 	t3_write_reg(adapter, A_SG_CONTEXT_DATA2, 0);
2726 	t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2727 		     V_CONTEXT_CMD_OPCODE(1) | F_FREELIST | V_CONTEXT(id));
2728 	return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
2729 			       0, SG_CONTEXT_CMD_ATTEMPTS, 1);
2730 }
2731 
2732 /**
2733  *	t3_sge_disable_rspcntxt - disable an SGE response queue
2734  *	@adapter: the adapter
2735  *	@id: the response queue context id
2736  *
2737  *	Disable an SGE response queue.  The caller is responsible for
2738  *	ensuring only one context operation occurs at a time.
2739  */
2740 int t3_sge_disable_rspcntxt(adapter_t *adapter, unsigned int id)
2741 {
2742 	if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2743 		return -EBUSY;
2744 
2745 	t3_write_reg(adapter, A_SG_CONTEXT_MASK0, V_CQ_SIZE(M_CQ_SIZE));
2746 	t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0);
2747 	t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0);
2748 	t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0);
2749 	t3_write_reg(adapter, A_SG_CONTEXT_DATA0, 0);
2750 	t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2751 		     V_CONTEXT_CMD_OPCODE(1) | F_RESPONSEQ | V_CONTEXT(id));
2752 	return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
2753 			       0, SG_CONTEXT_CMD_ATTEMPTS, 1);
2754 }
2755 
2756 /**
2757  *	t3_sge_disable_cqcntxt - disable an SGE completion queue
2758  *	@adapter: the adapter
2759  *	@id: the completion queue context id
2760  *
2761  *	Disable an SGE completion queue.  The caller is responsible for
2762  *	ensuring only one context operation occurs at a time.
2763  */
2764 int t3_sge_disable_cqcntxt(adapter_t *adapter, unsigned int id)
2765 {
2766 	if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2767 		return -EBUSY;
2768 
2769 	t3_write_reg(adapter, A_SG_CONTEXT_MASK0, V_CQ_SIZE(M_CQ_SIZE));
2770 	t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0);
2771 	t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0);
2772 	t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0);
2773 	t3_write_reg(adapter, A_SG_CONTEXT_DATA0, 0);
2774 	t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2775 		     V_CONTEXT_CMD_OPCODE(1) | F_CQ | V_CONTEXT(id));
2776 	return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
2777 			       0, SG_CONTEXT_CMD_ATTEMPTS, 1);
2778 }
2779 
2780 /**
2781  *	t3_sge_cqcntxt_op - perform an operation on a completion queue context
2782  *	@adapter: the adapter
2783  *	@id: the context id
2784  *	@op: the operation to perform
2785  *	@credits: credits to return to the CQ
2786  *
2787  *	Perform the selected operation on an SGE completion queue context.
2788  *	The caller is responsible for ensuring only one context operation
2789  *	occurs at a time.
2790  *
2791  *	For most operations the function returns the current HW position in
2792  *	the completion queue.
2793  */
2794 int t3_sge_cqcntxt_op(adapter_t *adapter, unsigned int id, unsigned int op,
2795 		      unsigned int credits)
2796 {
2797 	u32 val;
2798 
2799 	if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2800 		return -EBUSY;
2801 
2802 	t3_write_reg(adapter, A_SG_CONTEXT_DATA0, credits << 16);
2803 	t3_write_reg(adapter, A_SG_CONTEXT_CMD, V_CONTEXT_CMD_OPCODE(op) |
2804 		     V_CONTEXT(id) | F_CQ);
2805 	if (t3_wait_op_done_val(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
2806 				0, SG_CONTEXT_CMD_ATTEMPTS, 1, &val))
2807 		return -EIO;
2808 
2809 	if (op >= 2 && op < 7) {
2810 		if (adapter->params.rev > 0)
2811 			return G_CQ_INDEX(val);
2812 
2813 		t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2814 			     V_CONTEXT_CMD_OPCODE(0) | F_CQ | V_CONTEXT(id));
2815 		if (t3_wait_op_done(adapter, A_SG_CONTEXT_CMD,
2816 				    F_CONTEXT_CMD_BUSY, 0,
2817 				    SG_CONTEXT_CMD_ATTEMPTS, 1))
2818 			return -EIO;
2819 		return G_CQ_INDEX(t3_read_reg(adapter, A_SG_CONTEXT_DATA0));
2820 	}
2821 	return 0;
2822 }
2823 
2824 /**
2825  * 	t3_sge_read_context - read an SGE context
2826  * 	@type: the context type
2827  * 	@adapter: the adapter
2828  * 	@id: the context id
2829  * 	@data: holds the retrieved context
2830  *
2831  * 	Read an SGE egress context.  The caller is responsible for ensuring
2832  * 	only one context operation occurs at a time.
2833  */
2834 static int t3_sge_read_context(unsigned int type, adapter_t *adapter,
2835 			       unsigned int id, u32 data[4])
2836 {
2837 	if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2838 		return -EBUSY;
2839 
2840 	t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2841 		     V_CONTEXT_CMD_OPCODE(0) | type | V_CONTEXT(id));
2842 	if (t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY, 0,
2843 			    SG_CONTEXT_CMD_ATTEMPTS, 1))
2844 		return -EIO;
2845 	data[0] = t3_read_reg(adapter, A_SG_CONTEXT_DATA0);
2846 	data[1] = t3_read_reg(adapter, A_SG_CONTEXT_DATA1);
2847 	data[2] = t3_read_reg(adapter, A_SG_CONTEXT_DATA2);
2848 	data[3] = t3_read_reg(adapter, A_SG_CONTEXT_DATA3);
2849 	return 0;
2850 }
2851 
2852 /**
2853  * 	t3_sge_read_ecntxt - read an SGE egress context
2854  * 	@adapter: the adapter
2855  * 	@id: the context id
2856  * 	@data: holds the retrieved context
2857  *
2858  * 	Read an SGE egress context.  The caller is responsible for ensuring
2859  * 	only one context operation occurs at a time.
2860  */
2861 int t3_sge_read_ecntxt(adapter_t *adapter, unsigned int id, u32 data[4])
2862 {
2863 	if (id >= 65536)
2864 		return -EINVAL;
2865 	return t3_sge_read_context(F_EGRESS, adapter, id, data);
2866 }
2867 
2868 /**
2869  * 	t3_sge_read_cq - read an SGE CQ context
2870  * 	@adapter: the adapter
2871  * 	@id: the context id
2872  * 	@data: holds the retrieved context
2873  *
2874  * 	Read an SGE CQ context.  The caller is responsible for ensuring
2875  * 	only one context operation occurs at a time.
2876  */
2877 int t3_sge_read_cq(adapter_t *adapter, unsigned int id, u32 data[4])
2878 {
2879 	if (id >= 65536)
2880 		return -EINVAL;
2881 	return t3_sge_read_context(F_CQ, adapter, id, data);
2882 }
2883 
2884 /**
2885  * 	t3_sge_read_fl - read an SGE free-list context
2886  * 	@adapter: the adapter
2887  * 	@id: the context id
2888  * 	@data: holds the retrieved context
2889  *
2890  * 	Read an SGE free-list context.  The caller is responsible for ensuring
2891  * 	only one context operation occurs at a time.
2892  */
2893 int t3_sge_read_fl(adapter_t *adapter, unsigned int id, u32 data[4])
2894 {
2895 	if (id >= SGE_QSETS * 2)
2896 		return -EINVAL;
2897 	return t3_sge_read_context(F_FREELIST, adapter, id, data);
2898 }
2899 
2900 /**
2901  * 	t3_sge_read_rspq - read an SGE response queue context
2902  * 	@adapter: the adapter
2903  * 	@id: the context id
2904  * 	@data: holds the retrieved context
2905  *
2906  * 	Read an SGE response queue context.  The caller is responsible for
2907  * 	ensuring only one context operation occurs at a time.
2908  */
2909 int t3_sge_read_rspq(adapter_t *adapter, unsigned int id, u32 data[4])
2910 {
2911 	if (id >= SGE_QSETS)
2912 		return -EINVAL;
2913 	return t3_sge_read_context(F_RESPONSEQ, adapter, id, data);
2914 }
2915 
2916 /**
2917  *	t3_config_rss - configure Rx packet steering
2918  *	@adapter: the adapter
2919  *	@rss_config: RSS settings (written to TP_RSS_CONFIG)
2920  *	@cpus: values for the CPU lookup table (0xff terminated)
2921  *	@rspq: values for the response queue lookup table (0xffff terminated)
2922  *
2923  *	Programs the receive packet steering logic.  @cpus and @rspq provide
2924  *	the values for the CPU and response queue lookup tables.  If they
2925  *	provide fewer values than the size of the tables the supplied values
2926  *	are used repeatedly until the tables are fully populated.
2927  */
2928 void t3_config_rss(adapter_t *adapter, unsigned int rss_config, const u8 *cpus,
2929 		   const u16 *rspq)
2930 {
2931 	int i, j, cpu_idx = 0, q_idx = 0;
2932 
2933 	if (cpus)
2934 		for (i = 0; i < RSS_TABLE_SIZE; ++i) {
2935 			u32 val = i << 16;
2936 
2937 			for (j = 0; j < 2; ++j) {
2938 				val |= (cpus[cpu_idx++] & 0x3f) << (8 * j);
2939 				if (cpus[cpu_idx] == 0xff)
2940 					cpu_idx = 0;
2941 			}
2942 			t3_write_reg(adapter, A_TP_RSS_LKP_TABLE, val);
2943 		}
2944 
2945 	if (rspq)
2946 		for (i = 0; i < RSS_TABLE_SIZE; ++i) {
2947 			t3_write_reg(adapter, A_TP_RSS_MAP_TABLE,
2948 				     (i << 16) | rspq[q_idx++]);
2949 			if (rspq[q_idx] == 0xffff)
2950 				q_idx = 0;
2951 		}
2952 
2953 	t3_write_reg(adapter, A_TP_RSS_CONFIG, rss_config);
2954 }
2955 
2956 /**
2957  *	t3_read_rss - read the contents of the RSS tables
2958  *	@adapter: the adapter
2959  *	@lkup: holds the contents of the RSS lookup table
2960  *	@map: holds the contents of the RSS map table
2961  *
2962  *	Reads the contents of the receive packet steering tables.
2963  */
2964 int t3_read_rss(adapter_t *adapter, u8 *lkup, u16 *map)
2965 {
2966 	int i;
2967 	u32 val;
2968 
2969 	if (lkup)
2970 		for (i = 0; i < RSS_TABLE_SIZE; ++i) {
2971 			t3_write_reg(adapter, A_TP_RSS_LKP_TABLE,
2972 				     0xffff0000 | i);
2973 			val = t3_read_reg(adapter, A_TP_RSS_LKP_TABLE);
2974 			if (!(val & 0x80000000))
2975 				return -EAGAIN;
2976 			*lkup++ = (u8)val;
2977 			*lkup++ = (u8)(val >> 8);
2978 		}
2979 
2980 	if (map)
2981 		for (i = 0; i < RSS_TABLE_SIZE; ++i) {
2982 			t3_write_reg(adapter, A_TP_RSS_MAP_TABLE,
2983 				     0xffff0000 | i);
2984 			val = t3_read_reg(adapter, A_TP_RSS_MAP_TABLE);
2985 			if (!(val & 0x80000000))
2986 				return -EAGAIN;
2987 			*map++ = (u16)val;
2988 		}
2989 	return 0;
2990 }
2991 
2992 /**
2993  *	t3_tp_set_offload_mode - put TP in NIC/offload mode
2994  *	@adap: the adapter
2995  *	@enable: 1 to select offload mode, 0 for regular NIC
2996  *
2997  *	Switches TP to NIC/offload mode.
2998  */
2999 void t3_tp_set_offload_mode(adapter_t *adap, int enable)
3000 {
3001 	if (is_offload(adap) || !enable)
3002 		t3_set_reg_field(adap, A_TP_IN_CONFIG, F_NICMODE,
3003 				 V_NICMODE(!enable));
3004 }
3005 
3006 /**
3007  *	tp_wr_bits_indirect - set/clear bits in an indirect TP register
3008  *	@adap: the adapter
3009  *	@addr: the indirect TP register address
3010  *	@mask: specifies the field within the register to modify
3011  *	@val: new value for the field
3012  *
3013  *	Sets a field of an indirect TP register to the given value.
3014  */
3015 static void tp_wr_bits_indirect(adapter_t *adap, unsigned int addr,
3016 				unsigned int mask, unsigned int val)
3017 {
3018 	t3_write_reg(adap, A_TP_PIO_ADDR, addr);
3019 	val |= t3_read_reg(adap, A_TP_PIO_DATA) & ~mask;
3020 	t3_write_reg(adap, A_TP_PIO_DATA, val);
3021 }
3022 
3023 /**
3024  *	t3_enable_filters - enable the HW filters
3025  *	@adap: the adapter
3026  *
3027  *	Enables the HW filters for NIC traffic.
3028  */
3029 void t3_enable_filters(adapter_t *adap)
3030 {
3031 	t3_set_reg_field(adap, A_TP_IN_CONFIG, F_NICMODE, 0);
3032 	t3_set_reg_field(adap, A_MC5_DB_CONFIG, 0, F_FILTEREN);
3033 	t3_set_reg_field(adap, A_TP_GLOBAL_CONFIG, 0, V_FIVETUPLELOOKUP(3));
3034 	tp_wr_bits_indirect(adap, A_TP_INGRESS_CONFIG, 0, F_LOOKUPEVERYPKT);
3035 }
3036 
3037 /**
3038  *	t3_disable_filters - disable the HW filters
3039  *	@adap: the adapter
3040  *
3041  *	Disables the HW filters for NIC traffic.
3042  */
3043 void t3_disable_filters(adapter_t *adap)
3044 {
3045 	/* note that we don't want to revert to NIC-only mode */
3046 	t3_set_reg_field(adap, A_MC5_DB_CONFIG, F_FILTEREN, 0);
3047 	t3_set_reg_field(adap, A_TP_GLOBAL_CONFIG,
3048 			 V_FIVETUPLELOOKUP(M_FIVETUPLELOOKUP), 0);
3049 	tp_wr_bits_indirect(adap, A_TP_INGRESS_CONFIG, F_LOOKUPEVERYPKT, 0);
3050 }
3051 
3052 /**
3053  *	pm_num_pages - calculate the number of pages of the payload memory
3054  *	@mem_size: the size of the payload memory
3055  *	@pg_size: the size of each payload memory page
3056  *
3057  *	Calculate the number of pages, each of the given size, that fit in a
3058  *	memory of the specified size, respecting the HW requirement that the
3059  *	number of pages must be a multiple of 24.
3060  */
3061 static inline unsigned int pm_num_pages(unsigned int mem_size,
3062 					unsigned int pg_size)
3063 {
3064 	unsigned int n = mem_size / pg_size;
3065 
3066 	return n - n % 24;
3067 }
3068 
3069 #define mem_region(adap, start, size, reg) \
3070 	t3_write_reg((adap), A_ ## reg, (start)); \
3071 	start += size
3072 
3073 /**
3074  *	partition_mem - partition memory and configure TP memory settings
3075  *	@adap: the adapter
3076  *	@p: the TP parameters
3077  *
3078  *	Partitions context and payload memory and configures TP's memory
3079  *	registers.
3080  */
3081 static void partition_mem(adapter_t *adap, const struct tp_params *p)
3082 {
3083 	unsigned int m, pstructs, tids = t3_mc5_size(&adap->mc5);
3084 	unsigned int timers = 0, timers_shift = 22;
3085 
3086 	if (adap->params.rev > 0) {
3087 		if (tids <= 16 * 1024) {
3088 			timers = 1;
3089 			timers_shift = 16;
3090 		} else if (tids <= 64 * 1024) {
3091 			timers = 2;
3092 			timers_shift = 18;
3093 		} else if (tids <= 256 * 1024) {
3094 			timers = 3;
3095 			timers_shift = 20;
3096 		}
3097 	}
3098 
3099 	t3_write_reg(adap, A_TP_PMM_SIZE,
3100 		     p->chan_rx_size | (p->chan_tx_size >> 16));
3101 
3102 	t3_write_reg(adap, A_TP_PMM_TX_BASE, 0);
3103 	t3_write_reg(adap, A_TP_PMM_TX_PAGE_SIZE, p->tx_pg_size);
3104 	t3_write_reg(adap, A_TP_PMM_TX_MAX_PAGE, p->tx_num_pgs);
3105 	t3_set_reg_field(adap, A_TP_PARA_REG3, V_TXDATAACKIDX(M_TXDATAACKIDX),
3106 			 V_TXDATAACKIDX(fls(p->tx_pg_size) - 12));
3107 
3108 	t3_write_reg(adap, A_TP_PMM_RX_BASE, 0);
3109 	t3_write_reg(adap, A_TP_PMM_RX_PAGE_SIZE, p->rx_pg_size);
3110 	t3_write_reg(adap, A_TP_PMM_RX_MAX_PAGE, p->rx_num_pgs);
3111 
3112 	pstructs = p->rx_num_pgs + p->tx_num_pgs;
3113 	/* Add a bit of headroom and make multiple of 24 */
3114 	pstructs += 48;
3115 	pstructs -= pstructs % 24;
3116 	t3_write_reg(adap, A_TP_CMM_MM_MAX_PSTRUCT, pstructs);
3117 
3118 	m = tids * TCB_SIZE;
3119 	mem_region(adap, m, (64 << 10) * 64, SG_EGR_CNTX_BADDR);
3120 	mem_region(adap, m, (64 << 10) * 64, SG_CQ_CONTEXT_BADDR);
3121 	t3_write_reg(adap, A_TP_CMM_TIMER_BASE, V_CMTIMERMAXNUM(timers) | m);
3122 	m += ((p->ntimer_qs - 1) << timers_shift) + (1 << 22);
3123 	mem_region(adap, m, pstructs * 64, TP_CMM_MM_BASE);
3124 	mem_region(adap, m, 64 * (pstructs / 24), TP_CMM_MM_PS_FLST_BASE);
3125 	mem_region(adap, m, 64 * (p->rx_num_pgs / 24), TP_CMM_MM_RX_FLST_BASE);
3126 	mem_region(adap, m, 64 * (p->tx_num_pgs / 24), TP_CMM_MM_TX_FLST_BASE);
3127 
3128 	m = (m + 4095) & ~0xfff;
3129 	t3_write_reg(adap, A_CIM_SDRAM_BASE_ADDR, m);
3130 	t3_write_reg(adap, A_CIM_SDRAM_ADDR_SIZE, p->cm_size - m);
3131 
3132 	tids = (p->cm_size - m - (3 << 20)) / 3072 - 32;
3133 	m = t3_mc5_size(&adap->mc5) - adap->params.mc5.nservers -
3134 	    adap->params.mc5.nfilters - adap->params.mc5.nroutes;
3135 	if (tids < m)
3136 		adap->params.mc5.nservers += m - tids;
3137 }
3138 
3139 static inline void tp_wr_indirect(adapter_t *adap, unsigned int addr, u32 val)
3140 {
3141 	t3_write_reg(adap, A_TP_PIO_ADDR, addr);
3142 	t3_write_reg(adap, A_TP_PIO_DATA, val);
3143 }
3144 
3145 static inline u32 tp_rd_indirect(adapter_t *adap, unsigned int addr)
3146 {
3147 	t3_write_reg(adap, A_TP_PIO_ADDR, addr);
3148 	return t3_read_reg(adap, A_TP_PIO_DATA);
3149 }
3150 
3151 static void tp_config(adapter_t *adap, const struct tp_params *p)
3152 {
3153 	t3_write_reg(adap, A_TP_GLOBAL_CONFIG, F_TXPACINGENABLE | F_PATHMTU |
3154 		     F_IPCHECKSUMOFFLOAD | F_UDPCHECKSUMOFFLOAD |
3155 		     F_TCPCHECKSUMOFFLOAD | V_IPTTL(64));
3156 	t3_write_reg(adap, A_TP_TCP_OPTIONS, V_MTUDEFAULT(576) |
3157 		     F_MTUENABLE | V_WINDOWSCALEMODE(1) |
3158 		     V_TIMESTAMPSMODE(1) | V_SACKMODE(1) | V_SACKRX(1));
3159 	t3_write_reg(adap, A_TP_DACK_CONFIG, V_AUTOSTATE3(1) |
3160 		     V_AUTOSTATE2(1) | V_AUTOSTATE1(0) |
3161 		     V_BYTETHRESHOLD(26880) | V_MSSTHRESHOLD(2) |
3162 		     F_AUTOCAREFUL | F_AUTOENABLE | V_DACK_MODE(1));
3163 	t3_set_reg_field(adap, A_TP_IN_CONFIG, F_RXFBARBPRIO | F_TXFBARBPRIO,
3164 			 F_IPV6ENABLE | F_NICMODE);
3165 	t3_write_reg(adap, A_TP_TX_RESOURCE_LIMIT, 0x18141814);
3166 	t3_write_reg(adap, A_TP_PARA_REG4, 0x5050105);
3167 	t3_set_reg_field(adap, A_TP_PARA_REG6, 0,
3168 			 adap->params.rev > 0 ? F_ENABLEESND :
3169 			 			F_T3A_ENABLEESND);
3170 	t3_set_reg_field(adap, A_TP_PC_CONFIG,
3171 			 F_ENABLEEPCMDAFULL,
3172 			 F_ENABLEOCSPIFULL |F_TXDEFERENABLE | F_HEARBEATDACK |
3173 			 F_TXCONGESTIONMODE | F_RXCONGESTIONMODE);
3174 	t3_set_reg_field(adap, A_TP_PC_CONFIG2, F_CHDRAFULL,
3175 			 F_ENABLEIPV6RSS | F_ENABLENONOFDTNLSYN |
3176 			 F_ENABLEARPMISS | F_DISBLEDAPARBIT0);
3177 	t3_write_reg(adap, A_TP_PROXY_FLOW_CNTL, 1080);
3178 	t3_write_reg(adap, A_TP_PROXY_FLOW_CNTL, 1000);
3179 
3180 	if (adap->params.rev > 0) {
3181 		tp_wr_indirect(adap, A_TP_EGRESS_CONFIG, F_REWRITEFORCETOSIZE);
3182 		t3_set_reg_field(adap, A_TP_PARA_REG3, 0,
3183 				 F_TXPACEAUTO | F_TXPACEAUTOSTRICT);
3184 		t3_set_reg_field(adap, A_TP_PC_CONFIG, F_LOCKTID, F_LOCKTID);
3185 		tp_wr_indirect(adap, A_TP_VLAN_PRI_MAP, 0xfa50);
3186 		tp_wr_indirect(adap, A_TP_MAC_MATCH_MAP0, 0xfac688);
3187 		tp_wr_indirect(adap, A_TP_MAC_MATCH_MAP1, 0xfac688);
3188 	} else
3189 		t3_set_reg_field(adap, A_TP_PARA_REG3, 0, F_TXPACEFIXED);
3190 
3191 	if (adap->params.rev == T3_REV_C)
3192 		t3_set_reg_field(adap, A_TP_PC_CONFIG,
3193 				 V_TABLELATENCYDELTA(M_TABLELATENCYDELTA),
3194 				 V_TABLELATENCYDELTA(4));
3195 
3196 	t3_write_reg(adap, A_TP_TX_MOD_QUEUE_WEIGHT1, 0);
3197 	t3_write_reg(adap, A_TP_TX_MOD_QUEUE_WEIGHT0, 0);
3198 	t3_write_reg(adap, A_TP_MOD_CHANNEL_WEIGHT, 0);
3199 	t3_write_reg(adap, A_TP_MOD_RATE_LIMIT, 0xf2200000);
3200 
3201 	if (adap->params.nports > 2) {
3202 		t3_set_reg_field(adap, A_TP_PC_CONFIG2, 0,
3203 				 F_ENABLETXPORTFROMDA2 | F_ENABLETXPORTFROMDA |
3204 				 F_ENABLERXPORTFROMADDR);
3205 		tp_wr_bits_indirect(adap, A_TP_QOS_RX_MAP_MODE,
3206 				    V_RXMAPMODE(M_RXMAPMODE), 0);
3207 		tp_wr_indirect(adap, A_TP_INGRESS_CONFIG, V_BITPOS0(48) |
3208 			       V_BITPOS1(49) | V_BITPOS2(50) | V_BITPOS3(51) |
3209 			       F_ENABLEEXTRACT | F_ENABLEEXTRACTIONSFD |
3210 			       F_ENABLEINSERTION | F_ENABLEINSERTIONSFD);
3211 		tp_wr_indirect(adap, A_TP_PREAMBLE_MSB, 0xfb000000);
3212 		tp_wr_indirect(adap, A_TP_PREAMBLE_LSB, 0xd5);
3213 		tp_wr_indirect(adap, A_TP_INTF_FROM_TX_PKT, F_INTFFROMTXPKT);
3214 	}
3215 }
3216 
3217 /* TCP timer values in ms */
3218 #define TP_DACK_TIMER 50
3219 #define TP_RTO_MIN    250
3220 
3221 /**
3222  *	tp_set_timers - set TP timing parameters
3223  *	@adap: the adapter to set
3224  *	@core_clk: the core clock frequency in Hz
3225  *
3226  *	Set TP's timing parameters, such as the various timer resolutions and
3227  *	the TCP timer values.
3228  */
3229 static void tp_set_timers(adapter_t *adap, unsigned int core_clk)
3230 {
3231 	unsigned int tre = adap->params.tp.tre;
3232 	unsigned int dack_re = adap->params.tp.dack_re;
3233 	unsigned int tstamp_re = fls(core_clk / 1000);     /* 1ms, at least */
3234 	unsigned int tps = core_clk >> tre;
3235 
3236 	t3_write_reg(adap, A_TP_TIMER_RESOLUTION, V_TIMERRESOLUTION(tre) |
3237 		     V_DELAYEDACKRESOLUTION(dack_re) |
3238 		     V_TIMESTAMPRESOLUTION(tstamp_re));
3239 	t3_write_reg(adap, A_TP_DACK_TIMER,
3240 		     (core_clk >> dack_re) / (1000 / TP_DACK_TIMER));
3241 	t3_write_reg(adap, A_TP_TCP_BACKOFF_REG0, 0x3020100);
3242 	t3_write_reg(adap, A_TP_TCP_BACKOFF_REG1, 0x7060504);
3243 	t3_write_reg(adap, A_TP_TCP_BACKOFF_REG2, 0xb0a0908);
3244 	t3_write_reg(adap, A_TP_TCP_BACKOFF_REG3, 0xf0e0d0c);
3245 	t3_write_reg(adap, A_TP_SHIFT_CNT, V_SYNSHIFTMAX(6) |
3246 		     V_RXTSHIFTMAXR1(4) | V_RXTSHIFTMAXR2(15) |
3247 		     V_PERSHIFTBACKOFFMAX(8) | V_PERSHIFTMAX(8) |
3248 		     V_KEEPALIVEMAX(9));
3249 
3250 #define SECONDS * tps
3251 
3252 	t3_write_reg(adap, A_TP_MSL,
3253 		     adap->params.rev > 0 ? 0 : 2 SECONDS);
3254 	t3_write_reg(adap, A_TP_RXT_MIN, tps / (1000 / TP_RTO_MIN));
3255 	t3_write_reg(adap, A_TP_RXT_MAX, 64 SECONDS);
3256 	t3_write_reg(adap, A_TP_PERS_MIN, 5 SECONDS);
3257 	t3_write_reg(adap, A_TP_PERS_MAX, 64 SECONDS);
3258 	t3_write_reg(adap, A_TP_KEEP_IDLE, 7200 SECONDS);
3259 	t3_write_reg(adap, A_TP_KEEP_INTVL, 75 SECONDS);
3260 	t3_write_reg(adap, A_TP_INIT_SRTT, 3 SECONDS);
3261 	t3_write_reg(adap, A_TP_FINWAIT2_TIMER, 600 SECONDS);
3262 
3263 #undef SECONDS
3264 }
3265 
3266 #ifdef CONFIG_CHELSIO_T3_CORE
3267 /**
3268  *	t3_tp_set_coalescing_size - set receive coalescing size
3269  *	@adap: the adapter
3270  *	@size: the receive coalescing size
3271  *	@psh: whether a set PSH bit should deliver coalesced data
3272  *
3273  *	Set the receive coalescing size and PSH bit handling.
3274  */
3275 int t3_tp_set_coalescing_size(adapter_t *adap, unsigned int size, int psh)
3276 {
3277 	u32 val;
3278 
3279 	if (size > MAX_RX_COALESCING_LEN)
3280 		return -EINVAL;
3281 
3282 	val = t3_read_reg(adap, A_TP_PARA_REG3);
3283 	val &= ~(F_RXCOALESCEENABLE | F_RXCOALESCEPSHEN);
3284 
3285 	if (size) {
3286 		val |= F_RXCOALESCEENABLE;
3287 		if (psh)
3288 			val |= F_RXCOALESCEPSHEN;
3289 		size = min(MAX_RX_COALESCING_LEN, size);
3290 		t3_write_reg(adap, A_TP_PARA_REG2, V_RXCOALESCESIZE(size) |
3291 			     V_MAXRXDATA(MAX_RX_COALESCING_LEN));
3292 	}
3293 	t3_write_reg(adap, A_TP_PARA_REG3, val);
3294 	return 0;
3295 }
3296 
3297 /**
3298  *	t3_tp_set_max_rxsize - set the max receive size
3299  *	@adap: the adapter
3300  *	@size: the max receive size
3301  *
3302  *	Set TP's max receive size.  This is the limit that applies when
3303  *	receive coalescing is disabled.
3304  */
3305 void t3_tp_set_max_rxsize(adapter_t *adap, unsigned int size)
3306 {
3307 	t3_write_reg(adap, A_TP_PARA_REG7,
3308 		     V_PMMAXXFERLEN0(size) | V_PMMAXXFERLEN1(size));
3309 }
3310 
3311 static void __devinit init_mtus(unsigned short mtus[])
3312 {
3313 	/*
3314 	 * See draft-mathis-plpmtud-00.txt for the values.  The min is 88 so
3315 	 * it can accomodate max size TCP/IP headers when SACK and timestamps
3316 	 * are enabled and still have at least 8 bytes of payload.
3317 	 */
3318 	mtus[0] = 88;
3319 	mtus[1] = 88;
3320 	mtus[2] = 256;
3321 	mtus[3] = 512;
3322 	mtus[4] = 576;
3323 	mtus[5] = 1024;
3324 	mtus[6] = 1280;
3325 	mtus[7] = 1492;
3326 	mtus[8] = 1500;
3327 	mtus[9] = 2002;
3328 	mtus[10] = 2048;
3329 	mtus[11] = 4096;
3330 	mtus[12] = 4352;
3331 	mtus[13] = 8192;
3332 	mtus[14] = 9000;
3333 	mtus[15] = 9600;
3334 }
3335 
3336 /**
3337  *	init_cong_ctrl - initialize congestion control parameters
3338  *	@a: the alpha values for congestion control
3339  *	@b: the beta values for congestion control
3340  *
3341  *	Initialize the congestion control parameters.
3342  */
3343 static void __devinit init_cong_ctrl(unsigned short *a, unsigned short *b)
3344 {
3345 	a[0] = a[1] = a[2] = a[3] = a[4] = a[5] = a[6] = a[7] = a[8] = 1;
3346 	a[9] = 2;
3347 	a[10] = 3;
3348 	a[11] = 4;
3349 	a[12] = 5;
3350 	a[13] = 6;
3351 	a[14] = 7;
3352 	a[15] = 8;
3353 	a[16] = 9;
3354 	a[17] = 10;
3355 	a[18] = 14;
3356 	a[19] = 17;
3357 	a[20] = 21;
3358 	a[21] = 25;
3359 	a[22] = 30;
3360 	a[23] = 35;
3361 	a[24] = 45;
3362 	a[25] = 60;
3363 	a[26] = 80;
3364 	a[27] = 100;
3365 	a[28] = 200;
3366 	a[29] = 300;
3367 	a[30] = 400;
3368 	a[31] = 500;
3369 
3370 	b[0] = b[1] = b[2] = b[3] = b[4] = b[5] = b[6] = b[7] = b[8] = 0;
3371 	b[9] = b[10] = 1;
3372 	b[11] = b[12] = 2;
3373 	b[13] = b[14] = b[15] = b[16] = 3;
3374 	b[17] = b[18] = b[19] = b[20] = b[21] = 4;
3375 	b[22] = b[23] = b[24] = b[25] = b[26] = b[27] = 5;
3376 	b[28] = b[29] = 6;
3377 	b[30] = b[31] = 7;
3378 }
3379 
3380 /* The minimum additive increment value for the congestion control table */
3381 #define CC_MIN_INCR 2U
3382 
3383 /**
3384  *	t3_load_mtus - write the MTU and congestion control HW tables
3385  *	@adap: the adapter
3386  *	@mtus: the unrestricted values for the MTU table
3387  *	@alpha: the values for the congestion control alpha parameter
3388  *	@beta: the values for the congestion control beta parameter
3389  *	@mtu_cap: the maximum permitted effective MTU
3390  *
3391  *	Write the MTU table with the supplied MTUs capping each at &mtu_cap.
3392  *	Update the high-speed congestion control table with the supplied alpha,
3393  * 	beta, and MTUs.
3394  */
3395 void t3_load_mtus(adapter_t *adap, unsigned short mtus[NMTUS],
3396 		  unsigned short alpha[NCCTRL_WIN],
3397 		  unsigned short beta[NCCTRL_WIN], unsigned short mtu_cap)
3398 {
3399 	static const unsigned int avg_pkts[NCCTRL_WIN] = {
3400 		2, 6, 10, 14, 20, 28, 40, 56, 80, 112, 160, 224, 320, 448, 640,
3401 		896, 1281, 1792, 2560, 3584, 5120, 7168, 10240, 14336, 20480,
3402 		28672, 40960, 57344, 81920, 114688, 163840, 229376 };
3403 
3404 	unsigned int i, w;
3405 
3406 	for (i = 0; i < NMTUS; ++i) {
3407 		unsigned int mtu = min(mtus[i], mtu_cap);
3408 		unsigned int log2 = fls(mtu);
3409 
3410 		if (!(mtu & ((1 << log2) >> 2)))     /* round */
3411 			log2--;
3412 		t3_write_reg(adap, A_TP_MTU_TABLE,
3413 			     (i << 24) | (log2 << 16) | mtu);
3414 
3415 		for (w = 0; w < NCCTRL_WIN; ++w) {
3416 			unsigned int inc;
3417 
3418 			inc = max(((mtu - 40) * alpha[w]) / avg_pkts[w],
3419 				  CC_MIN_INCR);
3420 
3421 			t3_write_reg(adap, A_TP_CCTRL_TABLE, (i << 21) |
3422 				     (w << 16) | (beta[w] << 13) | inc);
3423 		}
3424 	}
3425 }
3426 
3427 /**
3428  *	t3_read_hw_mtus - returns the values in the HW MTU table
3429  *	@adap: the adapter
3430  *	@mtus: where to store the HW MTU values
3431  *
3432  *	Reads the HW MTU table.
3433  */
3434 void t3_read_hw_mtus(adapter_t *adap, unsigned short mtus[NMTUS])
3435 {
3436 	int i;
3437 
3438 	for (i = 0; i < NMTUS; ++i) {
3439 		unsigned int val;
3440 
3441 		t3_write_reg(adap, A_TP_MTU_TABLE, 0xff000000 | i);
3442 		val = t3_read_reg(adap, A_TP_MTU_TABLE);
3443 		mtus[i] = val & 0x3fff;
3444 	}
3445 }
3446 
3447 /**
3448  *	t3_get_cong_cntl_tab - reads the congestion control table
3449  *	@adap: the adapter
3450  *	@incr: where to store the alpha values
3451  *
3452  *	Reads the additive increments programmed into the HW congestion
3453  *	control table.
3454  */
3455 void t3_get_cong_cntl_tab(adapter_t *adap,
3456 			  unsigned short incr[NMTUS][NCCTRL_WIN])
3457 {
3458 	unsigned int mtu, w;
3459 
3460 	for (mtu = 0; mtu < NMTUS; ++mtu)
3461 		for (w = 0; w < NCCTRL_WIN; ++w) {
3462 			t3_write_reg(adap, A_TP_CCTRL_TABLE,
3463 				     0xffff0000 | (mtu << 5) | w);
3464 			incr[mtu][w] = (unsigned short)t3_read_reg(adap,
3465 				        A_TP_CCTRL_TABLE) & 0x1fff;
3466 		}
3467 }
3468 
3469 /**
3470  *	t3_tp_get_mib_stats - read TP's MIB counters
3471  *	@adap: the adapter
3472  *	@tps: holds the returned counter values
3473  *
3474  *	Returns the values of TP's MIB counters.
3475  */
3476 void t3_tp_get_mib_stats(adapter_t *adap, struct tp_mib_stats *tps)
3477 {
3478 	t3_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_RDATA, (u32 *)tps,
3479 			 sizeof(*tps) / sizeof(u32), 0);
3480 }
3481 
3482 /**
3483  *	t3_read_pace_tbl - read the pace table
3484  *	@adap: the adapter
3485  *	@pace_vals: holds the returned values
3486  *
3487  *	Returns the values of TP's pace table in nanoseconds.
3488  */
3489 void t3_read_pace_tbl(adapter_t *adap, unsigned int pace_vals[NTX_SCHED])
3490 {
3491 	unsigned int i, tick_ns = dack_ticks_to_usec(adap, 1000);
3492 
3493 	for (i = 0; i < NTX_SCHED; i++) {
3494 		t3_write_reg(adap, A_TP_PACE_TABLE, 0xffff0000 + i);
3495 		pace_vals[i] = t3_read_reg(adap, A_TP_PACE_TABLE) * tick_ns;
3496 	}
3497 }
3498 
3499 /**
3500  *	t3_set_pace_tbl - set the pace table
3501  *	@adap: the adapter
3502  *	@pace_vals: the pace values in nanoseconds
3503  *	@start: index of the first entry in the HW pace table to set
3504  *	@n: how many entries to set
3505  *
3506  *	Sets (a subset of the) HW pace table.
3507  */
3508 void t3_set_pace_tbl(adapter_t *adap, unsigned int *pace_vals,
3509 		     unsigned int start, unsigned int n)
3510 {
3511 	unsigned int tick_ns = dack_ticks_to_usec(adap, 1000);
3512 
3513 	for ( ; n; n--, start++, pace_vals++)
3514 		t3_write_reg(adap, A_TP_PACE_TABLE, (start << 16) |
3515 			     ((*pace_vals + tick_ns / 2) / tick_ns));
3516 }
3517 
3518 #define ulp_region(adap, name, start, len) \
3519 	t3_write_reg((adap), A_ULPRX_ ## name ## _LLIMIT, (start)); \
3520 	t3_write_reg((adap), A_ULPRX_ ## name ## _ULIMIT, \
3521 		     (start) + (len) - 1); \
3522 	start += len
3523 
3524 #define ulptx_region(adap, name, start, len) \
3525 	t3_write_reg((adap), A_ULPTX_ ## name ## _LLIMIT, (start)); \
3526 	t3_write_reg((adap), A_ULPTX_ ## name ## _ULIMIT, \
3527 		     (start) + (len) - 1)
3528 
3529 static void ulp_config(adapter_t *adap, const struct tp_params *p)
3530 {
3531 	unsigned int m = p->chan_rx_size;
3532 
3533 	ulp_region(adap, ISCSI, m, p->chan_rx_size / 8);
3534 	ulp_region(adap, TDDP, m, p->chan_rx_size / 8);
3535 	ulptx_region(adap, TPT, m, p->chan_rx_size / 4);
3536 	ulp_region(adap, STAG, m, p->chan_rx_size / 4);
3537 	ulp_region(adap, RQ, m, p->chan_rx_size / 4);
3538 	ulptx_region(adap, PBL, m, p->chan_rx_size / 4);
3539 	ulp_region(adap, PBL, m, p->chan_rx_size / 4);
3540 	t3_write_reg(adap, A_ULPRX_TDDP_TAGMASK, 0xffffffff);
3541 }
3542 
3543 
3544 /**
3545  *	t3_set_proto_sram - set the contents of the protocol sram
3546  *	@adapter: the adapter
3547  *	@data: the protocol image
3548  *
3549  *	Write the contents of the protocol SRAM.
3550  */
3551 int t3_set_proto_sram(adapter_t *adap, const u8 *data)
3552 {
3553 	int i;
3554 	const u32 *buf = (const u32 *)data;
3555 
3556 	for (i = 0; i < PROTO_SRAM_LINES; i++) {
3557 		t3_write_reg(adap, A_TP_EMBED_OP_FIELD5, cpu_to_be32(*buf++));
3558 		t3_write_reg(adap, A_TP_EMBED_OP_FIELD4, cpu_to_be32(*buf++));
3559 		t3_write_reg(adap, A_TP_EMBED_OP_FIELD3, cpu_to_be32(*buf++));
3560 		t3_write_reg(adap, A_TP_EMBED_OP_FIELD2, cpu_to_be32(*buf++));
3561 		t3_write_reg(adap, A_TP_EMBED_OP_FIELD1, cpu_to_be32(*buf++));
3562 
3563 		t3_write_reg(adap, A_TP_EMBED_OP_FIELD0, i << 1 | 1 << 31);
3564 		if (t3_wait_op_done(adap, A_TP_EMBED_OP_FIELD0, 1, 1, 5, 1))
3565 			return -EIO;
3566 	}
3567 	return 0;
3568 }
3569 #endif
3570 
3571 /**
3572  *	t3_config_trace_filter - configure one of the tracing filters
3573  *	@adapter: the adapter
3574  *	@tp: the desired trace filter parameters
3575  *	@filter_index: which filter to configure
3576  *	@invert: if set non-matching packets are traced instead of matching ones
3577  *	@enable: whether to enable or disable the filter
3578  *
3579  *	Configures one of the tracing filters available in HW.
3580  */
3581 void t3_config_trace_filter(adapter_t *adapter, const struct trace_params *tp,
3582 			    int filter_index, int invert, int enable)
3583 {
3584 	u32 addr, key[4], mask[4];
3585 
3586 	key[0] = tp->sport | (tp->sip << 16);
3587 	key[1] = (tp->sip >> 16) | (tp->dport << 16);
3588 	key[2] = tp->dip;
3589 	key[3] = tp->proto | (tp->vlan << 8) | (tp->intf << 20);
3590 
3591 	mask[0] = tp->sport_mask | (tp->sip_mask << 16);
3592 	mask[1] = (tp->sip_mask >> 16) | (tp->dport_mask << 16);
3593 	mask[2] = tp->dip_mask;
3594 	mask[3] = tp->proto_mask | (tp->vlan_mask << 8) | (tp->intf_mask << 20);
3595 
3596 	if (invert)
3597 		key[3] |= (1 << 29);
3598 	if (enable)
3599 		key[3] |= (1 << 28);
3600 
3601 	addr = filter_index ? A_TP_RX_TRC_KEY0 : A_TP_TX_TRC_KEY0;
3602 	tp_wr_indirect(adapter, addr++, key[0]);
3603 	tp_wr_indirect(adapter, addr++, mask[0]);
3604 	tp_wr_indirect(adapter, addr++, key[1]);
3605 	tp_wr_indirect(adapter, addr++, mask[1]);
3606 	tp_wr_indirect(adapter, addr++, key[2]);
3607 	tp_wr_indirect(adapter, addr++, mask[2]);
3608 	tp_wr_indirect(adapter, addr++, key[3]);
3609 	tp_wr_indirect(adapter, addr,   mask[3]);
3610 	(void) t3_read_reg(adapter, A_TP_PIO_DATA);
3611 }
3612 
3613 /**
3614  *	t3_query_trace_filter - query a tracing filter
3615  *	@adapter: the adapter
3616  *	@tp: the current trace filter parameters
3617  *	@filter_index: which filter to query
3618  *	@inverted: non-zero if the filter is inverted
3619  *	@enabled: non-zero if the filter is enabled
3620  *
3621  *	Returns the current settings of the specified HW tracing filter.
3622  */
3623 void t3_query_trace_filter(adapter_t *adapter, struct trace_params *tp,
3624 			   int filter_index, int *inverted, int *enabled)
3625 {
3626 	u32 addr, key[4], mask[4];
3627 
3628 	addr = filter_index ? A_TP_RX_TRC_KEY0 : A_TP_TX_TRC_KEY0;
3629 	key[0]  = tp_rd_indirect(adapter, addr++);
3630 	mask[0] = tp_rd_indirect(adapter, addr++);
3631 	key[1]  = tp_rd_indirect(adapter, addr++);
3632 	mask[1] = tp_rd_indirect(adapter, addr++);
3633 	key[2]  = tp_rd_indirect(adapter, addr++);
3634 	mask[2] = tp_rd_indirect(adapter, addr++);
3635 	key[3]  = tp_rd_indirect(adapter, addr++);
3636 	mask[3] = tp_rd_indirect(adapter, addr);
3637 
3638 	tp->sport = key[0] & 0xffff;
3639 	tp->sip   = (key[0] >> 16) | ((key[1] & 0xffff) << 16);
3640 	tp->dport = key[1] >> 16;
3641 	tp->dip   = key[2];
3642 	tp->proto = key[3] & 0xff;
3643 	tp->vlan  = key[3] >> 8;
3644 	tp->intf  = key[3] >> 20;
3645 
3646 	tp->sport_mask = mask[0] & 0xffff;
3647 	tp->sip_mask   = (mask[0] >> 16) | ((mask[1] & 0xffff) << 16);
3648 	tp->dport_mask = mask[1] >> 16;
3649 	tp->dip_mask   = mask[2];
3650 	tp->proto_mask = mask[3] & 0xff;
3651 	tp->vlan_mask  = mask[3] >> 8;
3652 	tp->intf_mask  = mask[3] >> 20;
3653 
3654 	*inverted = key[3] & (1 << 29);
3655 	*enabled  = key[3] & (1 << 28);
3656 }
3657 
3658 /**
3659  *	t3_config_sched - configure a HW traffic scheduler
3660  *	@adap: the adapter
3661  *	@kbps: target rate in Kbps
3662  *	@sched: the scheduler index
3663  *
3664  *	Configure a Tx HW scheduler for the target rate.
3665  */
3666 int t3_config_sched(adapter_t *adap, unsigned int kbps, int sched)
3667 {
3668 	unsigned int v, tps, cpt, bpt, delta, mindelta = ~0;
3669 	unsigned int clk = adap->params.vpd.cclk * 1000;
3670 	unsigned int selected_cpt = 0, selected_bpt = 0;
3671 
3672 	if (kbps > 0) {
3673 		kbps *= 125;     /* -> bytes */
3674 		for (cpt = 1; cpt <= 255; cpt++) {
3675 			tps = clk / cpt;
3676 			bpt = (kbps + tps / 2) / tps;
3677 			if (bpt > 0 && bpt <= 255) {
3678 				v = bpt * tps;
3679 				delta = v >= kbps ? v - kbps : kbps - v;
3680 				if (delta < mindelta) {
3681 					mindelta = delta;
3682 					selected_cpt = cpt;
3683 					selected_bpt = bpt;
3684 				}
3685 			} else if (selected_cpt)
3686 				break;
3687 		}
3688 		if (!selected_cpt)
3689 			return -EINVAL;
3690 	}
3691 	t3_write_reg(adap, A_TP_TM_PIO_ADDR,
3692 		     A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2);
3693 	v = t3_read_reg(adap, A_TP_TM_PIO_DATA);
3694 	if (sched & 1)
3695 		v = (v & 0xffff) | (selected_cpt << 16) | (selected_bpt << 24);
3696 	else
3697 		v = (v & 0xffff0000) | selected_cpt | (selected_bpt << 8);
3698 	t3_write_reg(adap, A_TP_TM_PIO_DATA, v);
3699 	return 0;
3700 }
3701 
3702 /**
3703  *	t3_set_sched_ipg - set the IPG for a Tx HW packet rate scheduler
3704  *	@adap: the adapter
3705  *	@sched: the scheduler index
3706  *	@ipg: the interpacket delay in tenths of nanoseconds
3707  *
3708  *	Set the interpacket delay for a HW packet rate scheduler.
3709  */
3710 int t3_set_sched_ipg(adapter_t *adap, int sched, unsigned int ipg)
3711 {
3712 	unsigned int v, addr = A_TP_TX_MOD_Q1_Q0_TIMER_SEPARATOR - sched / 2;
3713 
3714 	/* convert ipg to nearest number of core clocks */
3715 	ipg *= core_ticks_per_usec(adap);
3716 	ipg = (ipg + 5000) / 10000;
3717 	if (ipg > 0xffff)
3718 		return -EINVAL;
3719 
3720 	t3_write_reg(adap, A_TP_TM_PIO_ADDR, addr);
3721 	v = t3_read_reg(adap, A_TP_TM_PIO_DATA);
3722 	if (sched & 1)
3723 		v = (v & 0xffff) | (ipg << 16);
3724 	else
3725 		v = (v & 0xffff0000) | ipg;
3726 	t3_write_reg(adap, A_TP_TM_PIO_DATA, v);
3727 	t3_read_reg(adap, A_TP_TM_PIO_DATA);
3728 	return 0;
3729 }
3730 
3731 /**
3732  *	t3_get_tx_sched - get the configuration of a Tx HW traffic scheduler
3733  *	@adap: the adapter
3734  *	@sched: the scheduler index
3735  *	@kbps: the byte rate in Kbps
3736  *	@ipg: the interpacket delay in tenths of nanoseconds
3737  *
3738  *	Return the current configuration of a HW Tx scheduler.
3739  */
3740 void t3_get_tx_sched(adapter_t *adap, unsigned int sched, unsigned int *kbps,
3741 		     unsigned int *ipg)
3742 {
3743 	unsigned int v, addr, bpt, cpt;
3744 
3745 	if (kbps) {
3746 		addr = A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2;
3747 		t3_write_reg(adap, A_TP_TM_PIO_ADDR, addr);
3748 		v = t3_read_reg(adap, A_TP_TM_PIO_DATA);
3749 		if (sched & 1)
3750 			v >>= 16;
3751 		bpt = (v >> 8) & 0xff;
3752 		cpt = v & 0xff;
3753 		if (!cpt)
3754 			*kbps = 0;        /* scheduler disabled */
3755 		else {
3756 			v = (adap->params.vpd.cclk * 1000) / cpt;
3757 			*kbps = (v * bpt) / 125;
3758 		}
3759 	}
3760 	if (ipg) {
3761 		addr = A_TP_TX_MOD_Q1_Q0_TIMER_SEPARATOR - sched / 2;
3762 		t3_write_reg(adap, A_TP_TM_PIO_ADDR, addr);
3763 		v = t3_read_reg(adap, A_TP_TM_PIO_DATA);
3764 		if (sched & 1)
3765 			v >>= 16;
3766 		v &= 0xffff;
3767 		*ipg = (10000 * v) / core_ticks_per_usec(adap);
3768 	}
3769 }
3770 
3771 /**
3772  *	tp_init - configure TP
3773  *	@adap: the adapter
3774  *	@p: TP configuration parameters
3775  *
3776  *	Initializes the TP HW module.
3777  */
3778 static int tp_init(adapter_t *adap, const struct tp_params *p)
3779 {
3780 	int busy = 0;
3781 
3782 	tp_config(adap, p);
3783 	t3_set_vlan_accel(adap, 3, 0);
3784 
3785 	if (is_offload(adap)) {
3786 		tp_set_timers(adap, adap->params.vpd.cclk * 1000);
3787 		t3_write_reg(adap, A_TP_RESET, F_FLSTINITENABLE);
3788 		busy = t3_wait_op_done(adap, A_TP_RESET, F_FLSTINITENABLE,
3789 				       0, 1000, 5);
3790 		if (busy)
3791 			CH_ERR(adap, "TP initialization timed out\n");
3792 	}
3793 
3794 	if (!busy)
3795 		t3_write_reg(adap, A_TP_RESET, F_TPRESET);
3796 	return busy;
3797 }
3798 
3799 /**
3800  *	t3_mps_set_active_ports - configure port failover
3801  *	@adap: the adapter
3802  *	@port_mask: bitmap of active ports
3803  *
3804  *	Sets the active ports according to the supplied bitmap.
3805  */
3806 int t3_mps_set_active_ports(adapter_t *adap, unsigned int port_mask)
3807 {
3808 	if (port_mask & ~((1 << adap->params.nports) - 1))
3809 		return -EINVAL;
3810 	t3_set_reg_field(adap, A_MPS_CFG, F_PORT1ACTIVE | F_PORT0ACTIVE,
3811 			 port_mask << S_PORT0ACTIVE);
3812 	return 0;
3813 }
3814 
3815 /**
3816  * 	chan_init_hw - channel-dependent HW initialization
3817  *	@adap: the adapter
3818  *	@chan_map: bitmap of Tx channels being used
3819  *
3820  *	Perform the bits of HW initialization that are dependent on the Tx
3821  *	channels being used.
3822  */
3823 static void chan_init_hw(adapter_t *adap, unsigned int chan_map)
3824 {
3825 	int i;
3826 
3827 	if (chan_map != 3) {                                 /* one channel */
3828 		t3_set_reg_field(adap, A_ULPRX_CTL, F_ROUND_ROBIN, 0);
3829 		t3_set_reg_field(adap, A_ULPTX_CONFIG, F_CFG_RR_ARB, 0);
3830 		t3_write_reg(adap, A_MPS_CFG, F_TPRXPORTEN | F_ENFORCEPKT |
3831 			     (chan_map == 1 ? F_TPTXPORT0EN | F_PORT0ACTIVE :
3832 					      F_TPTXPORT1EN | F_PORT1ACTIVE));
3833 		t3_write_reg(adap, A_PM1_TX_CFG,
3834 			     chan_map == 1 ? 0xffffffff : 0);
3835 		if (chan_map == 2)
3836 			t3_write_reg(adap, A_TP_TX_MOD_QUEUE_REQ_MAP,
3837 				     V_TX_MOD_QUEUE_REQ_MAP(0xff));
3838 		t3_write_reg(adap, A_TP_TX_MOD_QUE_TABLE, (12 << 16) | 0xd9c8);
3839 		t3_write_reg(adap, A_TP_TX_MOD_QUE_TABLE, (13 << 16) | 0xfbea);
3840 	} else {                                             /* two channels */
3841 		t3_set_reg_field(adap, A_ULPRX_CTL, 0, F_ROUND_ROBIN);
3842 		t3_set_reg_field(adap, A_ULPTX_CONFIG, 0, F_CFG_RR_ARB);
3843 		t3_write_reg(adap, A_ULPTX_DMA_WEIGHT,
3844 			     V_D1_WEIGHT(16) | V_D0_WEIGHT(16));
3845 		t3_write_reg(adap, A_MPS_CFG, F_TPTXPORT0EN | F_TPTXPORT1EN |
3846 			     F_TPRXPORTEN | F_PORT0ACTIVE | F_PORT1ACTIVE |
3847 			     F_ENFORCEPKT);
3848 		t3_write_reg(adap, A_PM1_TX_CFG, 0x80008000);
3849 		t3_set_reg_field(adap, A_TP_PC_CONFIG, 0, F_TXTOSQUEUEMAPMODE);
3850 		t3_write_reg(adap, A_TP_TX_MOD_QUEUE_REQ_MAP,
3851 			     V_TX_MOD_QUEUE_REQ_MAP(0xaa));
3852 		for (i = 0; i < 16; i++)
3853 			t3_write_reg(adap, A_TP_TX_MOD_QUE_TABLE,
3854 				     (i << 16) | 0x1010);
3855 		t3_write_reg(adap, A_TP_TX_MOD_QUE_TABLE, (12 << 16) | 0xba98);
3856 		t3_write_reg(adap, A_TP_TX_MOD_QUE_TABLE, (13 << 16) | 0xfedc);
3857 	}
3858 }
3859 
3860 static int calibrate_xgm(adapter_t *adapter)
3861 {
3862 	if (uses_xaui(adapter)) {
3863 		unsigned int v, i;
3864 
3865 		for (i = 0; i < 5; ++i) {
3866 			t3_write_reg(adapter, A_XGM_XAUI_IMP, 0);
3867 			(void) t3_read_reg(adapter, A_XGM_XAUI_IMP);
3868 			msleep(1);
3869 			v = t3_read_reg(adapter, A_XGM_XAUI_IMP);
3870 			if (!(v & (F_XGM_CALFAULT | F_CALBUSY))) {
3871 				t3_write_reg(adapter, A_XGM_XAUI_IMP,
3872 					     V_XAUIIMP(G_CALIMP(v) >> 2));
3873 				return 0;
3874 			}
3875 		}
3876 		CH_ERR(adapter, "MAC calibration failed\n");
3877 		return -1;
3878 	} else {
3879 		t3_write_reg(adapter, A_XGM_RGMII_IMP,
3880 			     V_RGMIIIMPPD(2) | V_RGMIIIMPPU(3));
3881 		t3_set_reg_field(adapter, A_XGM_RGMII_IMP, F_XGM_IMPSETUPDATE,
3882 				 F_XGM_IMPSETUPDATE);
3883 	}
3884 	return 0;
3885 }
3886 
3887 static void calibrate_xgm_t3b(adapter_t *adapter)
3888 {
3889 	if (!uses_xaui(adapter)) {
3890 		t3_write_reg(adapter, A_XGM_RGMII_IMP, F_CALRESET |
3891 			     F_CALUPDATE | V_RGMIIIMPPD(2) | V_RGMIIIMPPU(3));
3892 		t3_set_reg_field(adapter, A_XGM_RGMII_IMP, F_CALRESET, 0);
3893 		t3_set_reg_field(adapter, A_XGM_RGMII_IMP, 0,
3894 				 F_XGM_IMPSETUPDATE);
3895 		t3_set_reg_field(adapter, A_XGM_RGMII_IMP, F_XGM_IMPSETUPDATE,
3896 				 0);
3897 		t3_set_reg_field(adapter, A_XGM_RGMII_IMP, F_CALUPDATE, 0);
3898 		t3_set_reg_field(adapter, A_XGM_RGMII_IMP, 0, F_CALUPDATE);
3899 	}
3900 }
3901 
3902 struct mc7_timing_params {
3903 	unsigned char ActToPreDly;
3904 	unsigned char ActToRdWrDly;
3905 	unsigned char PreCyc;
3906 	unsigned char RefCyc[5];
3907 	unsigned char BkCyc;
3908 	unsigned char WrToRdDly;
3909 	unsigned char RdToWrDly;
3910 };
3911 
3912 /*
3913  * Write a value to a register and check that the write completed.  These
3914  * writes normally complete in a cycle or two, so one read should suffice.
3915  * The very first read exists to flush the posted write to the device.
3916  */
3917 static int wrreg_wait(adapter_t *adapter, unsigned int addr, u32 val)
3918 {
3919 	t3_write_reg(adapter,	addr, val);
3920 	(void) t3_read_reg(adapter, addr);                   /* flush */
3921 	if (!(t3_read_reg(adapter, addr) & F_BUSY))
3922 		return 0;
3923 	CH_ERR(adapter, "write to MC7 register 0x%x timed out\n", addr);
3924 	return -EIO;
3925 }
3926 
3927 static int mc7_init(struct mc7 *mc7, unsigned int mc7_clock, int mem_type)
3928 {
3929 	static const unsigned int mc7_mode[] = {
3930 		0x632, 0x642, 0x652, 0x432, 0x442
3931 	};
3932 	static const struct mc7_timing_params mc7_timings[] = {
3933 		{ 12, 3, 4, { 20, 28, 34, 52, 0 }, 15, 6, 4 },
3934 		{ 12, 4, 5, { 20, 28, 34, 52, 0 }, 16, 7, 4 },
3935 		{ 12, 5, 6, { 20, 28, 34, 52, 0 }, 17, 8, 4 },
3936 		{ 9,  3, 4, { 15, 21, 26, 39, 0 }, 12, 6, 4 },
3937 		{ 9,  4, 5, { 15, 21, 26, 39, 0 }, 13, 7, 4 }
3938 	};
3939 
3940 	u32 val;
3941 	unsigned int width, density, slow, attempts;
3942 	adapter_t *adapter = mc7->adapter;
3943 	const struct mc7_timing_params *p = &mc7_timings[mem_type];
3944 
3945 	if (!mc7->size)
3946 		return 0;
3947 
3948 	val = t3_read_reg(adapter, mc7->offset + A_MC7_CFG);
3949 	slow = val & F_SLOW;
3950 	width = G_WIDTH(val);
3951 	density = G_DEN(val);
3952 
3953 	t3_write_reg(adapter, mc7->offset + A_MC7_CFG, val | F_IFEN);
3954 	val = t3_read_reg(adapter, mc7->offset + A_MC7_CFG);  /* flush */
3955 	msleep(1);
3956 
3957 	if (!slow) {
3958 		t3_write_reg(adapter, mc7->offset + A_MC7_CAL, F_SGL_CAL_EN);
3959 		(void) t3_read_reg(adapter, mc7->offset + A_MC7_CAL);
3960 		msleep(1);
3961 		if (t3_read_reg(adapter, mc7->offset + A_MC7_CAL) &
3962 		    (F_BUSY | F_SGL_CAL_EN | F_CAL_FAULT)) {
3963 			CH_ERR(adapter, "%s MC7 calibration timed out\n",
3964 			       mc7->name);
3965 			goto out_fail;
3966 		}
3967 	}
3968 
3969 	t3_write_reg(adapter, mc7->offset + A_MC7_PARM,
3970 		     V_ACTTOPREDLY(p->ActToPreDly) |
3971 		     V_ACTTORDWRDLY(p->ActToRdWrDly) | V_PRECYC(p->PreCyc) |
3972 		     V_REFCYC(p->RefCyc[density]) | V_BKCYC(p->BkCyc) |
3973 		     V_WRTORDDLY(p->WrToRdDly) | V_RDTOWRDLY(p->RdToWrDly));
3974 
3975 	t3_write_reg(adapter, mc7->offset + A_MC7_CFG,
3976 		     val | F_CLKEN | F_TERM150);
3977 	(void) t3_read_reg(adapter, mc7->offset + A_MC7_CFG); /* flush */
3978 
3979 	if (!slow)
3980 		t3_set_reg_field(adapter, mc7->offset + A_MC7_DLL, F_DLLENB,
3981 				 F_DLLENB);
3982 	udelay(1);
3983 
3984 	val = slow ? 3 : 6;
3985 	if (wrreg_wait(adapter, mc7->offset + A_MC7_PRE, 0) ||
3986 	    wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE2, 0) ||
3987 	    wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE3, 0) ||
3988 	    wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE1, val))
3989 		goto out_fail;
3990 
3991 	if (!slow) {
3992 		t3_write_reg(adapter, mc7->offset + A_MC7_MODE, 0x100);
3993 		t3_set_reg_field(adapter, mc7->offset + A_MC7_DLL,
3994 				 F_DLLRST, 0);
3995 		udelay(5);
3996 	}
3997 
3998 	if (wrreg_wait(adapter, mc7->offset + A_MC7_PRE, 0) ||
3999 	    wrreg_wait(adapter, mc7->offset + A_MC7_REF, 0) ||
4000 	    wrreg_wait(adapter, mc7->offset + A_MC7_REF, 0) ||
4001 	    wrreg_wait(adapter, mc7->offset + A_MC7_MODE,
4002 		       mc7_mode[mem_type]) ||
4003 	    wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE1, val | 0x380) ||
4004 	    wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE1, val))
4005 		goto out_fail;
4006 
4007 	/* clock value is in KHz */
4008 	mc7_clock = mc7_clock * 7812 + mc7_clock / 2;  /* ns */
4009 	mc7_clock /= 1000000;                          /* KHz->MHz, ns->us */
4010 
4011 	t3_write_reg(adapter, mc7->offset + A_MC7_REF,
4012 		     F_PERREFEN | V_PREREFDIV(mc7_clock));
4013 	(void) t3_read_reg(adapter, mc7->offset + A_MC7_REF); /* flush */
4014 
4015 	t3_write_reg(adapter, mc7->offset + A_MC7_ECC,
4016 		     F_ECCGENEN | F_ECCCHKEN);
4017 	t3_write_reg(adapter, mc7->offset + A_MC7_BIST_DATA, 0);
4018 	t3_write_reg(adapter, mc7->offset + A_MC7_BIST_ADDR_BEG, 0);
4019 	t3_write_reg(adapter, mc7->offset + A_MC7_BIST_ADDR_END,
4020 		     (mc7->size << width) - 1);
4021 	t3_write_reg(adapter, mc7->offset + A_MC7_BIST_OP, V_OP(1));
4022 	(void) t3_read_reg(adapter, mc7->offset + A_MC7_BIST_OP); /* flush */
4023 
4024 	attempts = 50;
4025 	do {
4026 		msleep(250);
4027 		val = t3_read_reg(adapter, mc7->offset + A_MC7_BIST_OP);
4028 	} while ((val & F_BUSY) && --attempts);
4029 	if (val & F_BUSY) {
4030 		CH_ERR(adapter, "%s MC7 BIST timed out\n", mc7->name);
4031 		goto out_fail;
4032 	}
4033 
4034 	/* Enable normal memory accesses. */
4035 	t3_set_reg_field(adapter, mc7->offset + A_MC7_CFG, 0, F_RDY);
4036 	return 0;
4037 
4038  out_fail:
4039 	return -1;
4040 }
4041 
4042 static void config_pcie(adapter_t *adap)
4043 {
4044 	static const u16 ack_lat[4][6] = {
4045 		{ 237, 416, 559, 1071, 2095, 4143 },
4046 		{ 128, 217, 289, 545, 1057, 2081 },
4047 		{ 73, 118, 154, 282, 538, 1050 },
4048 		{ 67, 107, 86, 150, 278, 534 }
4049 	};
4050 	static const u16 rpl_tmr[4][6] = {
4051 		{ 711, 1248, 1677, 3213, 6285, 12429 },
4052 		{ 384, 651, 867, 1635, 3171, 6243 },
4053 		{ 219, 354, 462, 846, 1614, 3150 },
4054 		{ 201, 321, 258, 450, 834, 1602 }
4055 	};
4056 
4057 	u16 val, devid;
4058 	unsigned int log2_width, pldsize;
4059 	unsigned int fst_trn_rx, fst_trn_tx, acklat, rpllmt;
4060 
4061 	t3_os_pci_read_config_2(adap,
4062 				adap->params.pci.pcie_cap_addr + PCI_EXP_DEVCTL,
4063 				&val);
4064 	pldsize = (val & PCI_EXP_DEVCTL_PAYLOAD) >> 5;
4065 
4066 	/*
4067 	 * Gen2 adapter pcie bridge compatibility requires minimum
4068 	 * Max_Read_Request_size
4069 	 */
4070 	t3_os_pci_read_config_2(adap, 0x2, &devid);
4071 	if (devid == 0x37) {
4072 		t3_os_pci_write_config_2(adap,
4073 		    adap->params.pci.pcie_cap_addr + PCI_EXP_DEVCTL,
4074 		    val & ~PCI_EXP_DEVCTL_READRQ & ~PCI_EXP_DEVCTL_PAYLOAD);
4075 		pldsize = 0;
4076 	}
4077 
4078 	t3_os_pci_read_config_2(adap,
4079 				adap->params.pci.pcie_cap_addr + PCI_EXP_LNKCTL,
4080 			       	&val);
4081 
4082 	fst_trn_tx = G_NUMFSTTRNSEQ(t3_read_reg(adap, A_PCIE_PEX_CTRL0));
4083 	fst_trn_rx = adap->params.rev == 0 ? fst_trn_tx :
4084 			G_NUMFSTTRNSEQRX(t3_read_reg(adap, A_PCIE_MODE));
4085 	log2_width = fls(adap->params.pci.width) - 1;
4086 	acklat = ack_lat[log2_width][pldsize];
4087 	if (val & 1)                            /* check LOsEnable */
4088 		acklat += fst_trn_tx * 4;
4089 	rpllmt = rpl_tmr[log2_width][pldsize] + fst_trn_rx * 4;
4090 
4091 	if (adap->params.rev == 0)
4092 		t3_set_reg_field(adap, A_PCIE_PEX_CTRL1,
4093 				 V_T3A_ACKLAT(M_T3A_ACKLAT),
4094 				 V_T3A_ACKLAT(acklat));
4095 	else
4096 		t3_set_reg_field(adap, A_PCIE_PEX_CTRL1, V_ACKLAT(M_ACKLAT),
4097 				 V_ACKLAT(acklat));
4098 
4099 	t3_set_reg_field(adap, A_PCIE_PEX_CTRL0, V_REPLAYLMT(M_REPLAYLMT),
4100 			 V_REPLAYLMT(rpllmt));
4101 
4102 	t3_write_reg(adap, A_PCIE_PEX_ERR, 0xffffffff);
4103 	t3_set_reg_field(adap, A_PCIE_CFG, 0,
4104 			 F_ENABLELINKDWNDRST | F_ENABLELINKDOWNRST |
4105 			 F_PCIE_DMASTOPEN | F_PCIE_CLIDECEN);
4106 }
4107 
4108 /**
4109  * 	t3_init_hw - initialize and configure T3 HW modules
4110  * 	@adapter: the adapter
4111  * 	@fw_params: initial parameters to pass to firmware (optional)
4112  *
4113  *	Initialize and configure T3 HW modules.  This performs the
4114  *	initialization steps that need to be done once after a card is reset.
4115  *	MAC and PHY initialization is handled separarely whenever a port is
4116  *	enabled.
4117  *
4118  *	@fw_params are passed to FW and their value is platform dependent.
4119  *	Only the top 8 bits are available for use, the rest must be 0.
4120  */
4121 int t3_init_hw(adapter_t *adapter, u32 fw_params)
4122 {
4123 	int err = -EIO, attempts, i;
4124 	const struct vpd_params *vpd = &adapter->params.vpd;
4125 
4126 	if (adapter->params.rev > 0)
4127 		calibrate_xgm_t3b(adapter);
4128 	else if (calibrate_xgm(adapter))
4129 		goto out_err;
4130 
4131 	if (adapter->params.nports > 2)
4132 		t3_mac_init(&adap2pinfo(adapter, 0)->mac);
4133 
4134 	if (vpd->mclk) {
4135 		partition_mem(adapter, &adapter->params.tp);
4136 
4137 		if (mc7_init(&adapter->pmrx, vpd->mclk, vpd->mem_timing) ||
4138 		    mc7_init(&adapter->pmtx, vpd->mclk, vpd->mem_timing) ||
4139 		    mc7_init(&adapter->cm, vpd->mclk, vpd->mem_timing) ||
4140 		    t3_mc5_init(&adapter->mc5, adapter->params.mc5.nservers,
4141 			        adapter->params.mc5.nfilters,
4142 			       	adapter->params.mc5.nroutes))
4143 			goto out_err;
4144 
4145 		for (i = 0; i < 32; i++)
4146 			if (clear_sge_ctxt(adapter, i, F_CQ))
4147 				goto out_err;
4148 	}
4149 
4150 	if (tp_init(adapter, &adapter->params.tp))
4151 		goto out_err;
4152 
4153 #ifdef CONFIG_CHELSIO_T3_CORE
4154 	t3_tp_set_coalescing_size(adapter,
4155 				  min(adapter->params.sge.max_pkt_size,
4156 				      MAX_RX_COALESCING_LEN), 1);
4157 	t3_tp_set_max_rxsize(adapter,
4158 			     min(adapter->params.sge.max_pkt_size, 16384U));
4159 	ulp_config(adapter, &adapter->params.tp);
4160 #endif
4161 	if (is_pcie(adapter))
4162 		config_pcie(adapter);
4163 	else
4164 		t3_set_reg_field(adapter, A_PCIX_CFG, 0,
4165 				 F_DMASTOPEN | F_CLIDECEN);
4166 
4167 	if (adapter->params.rev == T3_REV_C)
4168 		t3_set_reg_field(adapter, A_ULPTX_CONFIG, 0,
4169 				 F_CFG_CQE_SOP_MASK);
4170 
4171 	t3_write_reg(adapter, A_PM1_RX_CFG, 0xffffffff);
4172 	t3_write_reg(adapter, A_PM1_RX_MODE, 0);
4173 	t3_write_reg(adapter, A_PM1_TX_MODE, 0);
4174 	chan_init_hw(adapter, adapter->params.chan_map);
4175 	t3_sge_init(adapter, &adapter->params.sge);
4176 
4177 	t3_write_reg(adapter, A_T3DBG_GPIO_ACT_LOW, calc_gpio_intr(adapter));
4178 
4179 	t3_write_reg(adapter, A_CIM_HOST_ACC_DATA, vpd->uclk | fw_params);
4180 	t3_write_reg(adapter, A_CIM_BOOT_CFG,
4181 		     V_BOOTADDR(FW_FLASH_BOOT_ADDR >> 2));
4182 	(void) t3_read_reg(adapter, A_CIM_BOOT_CFG);    /* flush */
4183 
4184 	attempts = 100;
4185 	do {                          /* wait for uP to initialize */
4186 		msleep(20);
4187 	} while (t3_read_reg(adapter, A_CIM_HOST_ACC_DATA) && --attempts);
4188 	if (!attempts) {
4189 		CH_ERR(adapter, "uP initialization timed out\n");
4190 		goto out_err;
4191 	}
4192 
4193 	err = 0;
4194  out_err:
4195 	return err;
4196 }
4197 
4198 /**
4199  *	get_pci_mode - determine a card's PCI mode
4200  *	@adapter: the adapter
4201  *	@p: where to store the PCI settings
4202  *
4203  *	Determines a card's PCI mode and associated parameters, such as speed
4204  *	and width.
4205  */
4206 static void __devinit get_pci_mode(adapter_t *adapter, struct pci_params *p)
4207 {
4208 	static unsigned short speed_map[] = { 33, 66, 100, 133 };
4209 	u32 pci_mode, pcie_cap;
4210 
4211 	pcie_cap = t3_os_find_pci_capability(adapter, PCI_CAP_ID_EXP);
4212 	if (pcie_cap) {
4213 		u16 val;
4214 
4215 		p->variant = PCI_VARIANT_PCIE;
4216 		p->pcie_cap_addr = pcie_cap;
4217 		t3_os_pci_read_config_2(adapter, pcie_cap + PCI_EXP_LNKSTA,
4218 					&val);
4219 		p->width = (val >> 4) & 0x3f;
4220 		return;
4221 	}
4222 
4223 	pci_mode = t3_read_reg(adapter, A_PCIX_MODE);
4224 	p->speed = speed_map[G_PCLKRANGE(pci_mode)];
4225 	p->width = (pci_mode & F_64BIT) ? 64 : 32;
4226 	pci_mode = G_PCIXINITPAT(pci_mode);
4227 	if (pci_mode == 0)
4228 		p->variant = PCI_VARIANT_PCI;
4229 	else if (pci_mode < 4)
4230 		p->variant = PCI_VARIANT_PCIX_MODE1_PARITY;
4231 	else if (pci_mode < 8)
4232 		p->variant = PCI_VARIANT_PCIX_MODE1_ECC;
4233 	else
4234 		p->variant = PCI_VARIANT_PCIX_266_MODE2;
4235 }
4236 
4237 /**
4238  *	init_link_config - initialize a link's SW state
4239  *	@lc: structure holding the link state
4240  *	@caps: link capabilities
4241  *
4242  *	Initializes the SW state maintained for each link, including the link's
4243  *	capabilities and default speed/duplex/flow-control/autonegotiation
4244  *	settings.
4245  */
4246 static void __devinit init_link_config(struct link_config *lc,
4247 				       unsigned int caps)
4248 {
4249 	lc->supported = caps;
4250 	lc->requested_speed = lc->speed = SPEED_INVALID;
4251 	lc->requested_duplex = lc->duplex = DUPLEX_INVALID;
4252 	lc->requested_fc = lc->fc = PAUSE_RX | PAUSE_TX;
4253 	if (lc->supported & SUPPORTED_Autoneg) {
4254 		lc->advertising = lc->supported;
4255 		lc->autoneg = AUTONEG_ENABLE;
4256 		lc->requested_fc |= PAUSE_AUTONEG;
4257 	} else {
4258 		lc->advertising = 0;
4259 		lc->autoneg = AUTONEG_DISABLE;
4260 	}
4261 }
4262 
4263 /**
4264  *	mc7_calc_size - calculate MC7 memory size
4265  *	@cfg: the MC7 configuration
4266  *
4267  *	Calculates the size of an MC7 memory in bytes from the value of its
4268  *	configuration register.
4269  */
4270 static unsigned int __devinit mc7_calc_size(u32 cfg)
4271 {
4272 	unsigned int width = G_WIDTH(cfg);
4273 	unsigned int banks = !!(cfg & F_BKS) + 1;
4274 	unsigned int org = !!(cfg & F_ORG) + 1;
4275 	unsigned int density = G_DEN(cfg);
4276 	unsigned int MBs = ((256 << density) * banks) / (org << width);
4277 
4278 	return MBs << 20;
4279 }
4280 
4281 static void __devinit mc7_prep(adapter_t *adapter, struct mc7 *mc7,
4282 			       unsigned int base_addr, const char *name)
4283 {
4284 	u32 cfg;
4285 
4286 	mc7->adapter = adapter;
4287 	mc7->name = name;
4288 	mc7->offset = base_addr - MC7_PMRX_BASE_ADDR;
4289 	cfg = t3_read_reg(adapter, mc7->offset + A_MC7_CFG);
4290 	mc7->size = G_DEN(cfg) == M_DEN ? 0 : mc7_calc_size(cfg);
4291 	mc7->width = G_WIDTH(cfg);
4292 }
4293 
4294 void mac_prep(struct cmac *mac, adapter_t *adapter, int index)
4295 {
4296 	u16 devid;
4297 
4298 	mac->adapter = adapter;
4299 	mac->multiport = adapter->params.nports > 2;
4300 	if (mac->multiport) {
4301 		mac->ext_port = (unsigned char)index;
4302 		mac->nucast = 8;
4303 	} else
4304 		mac->nucast = 1;
4305 
4306 	/* Gen2 adapter uses VPD xauicfg[] to notify driver which MAC
4307 	   is connected to each port, its suppose to be using xgmac0 for both ports
4308 	 */
4309 	t3_os_pci_read_config_2(adapter, 0x2, &devid);
4310 
4311 	if (mac->multiport ||
4312 		(!adapter->params.vpd.xauicfg[1] && (devid==0x37)))
4313 			index  = 0;
4314 
4315 	mac->offset = (XGMAC0_1_BASE_ADDR - XGMAC0_0_BASE_ADDR) * index;
4316 
4317 	if (adapter->params.rev == 0 && uses_xaui(adapter)) {
4318 		t3_write_reg(adapter, A_XGM_SERDES_CTRL + mac->offset,
4319 			     is_10G(adapter) ? 0x2901c04 : 0x2301c04);
4320 		t3_set_reg_field(adapter, A_XGM_PORT_CFG + mac->offset,
4321 				 F_ENRGMII, 0);
4322 	}
4323 }
4324 
4325 /**
4326  *	early_hw_init - HW initialization done at card detection time
4327  *	@adapter: the adapter
4328  *	@ai: contains information about the adapter type and properties
4329  *
4330  *	Perfoms the part of HW initialization that is done early on when the
4331  *	driver first detecs the card.  Most of the HW state is initialized
4332  *	lazily later on when a port or an offload function are first used.
4333  */
4334 void early_hw_init(adapter_t *adapter, const struct adapter_info *ai)
4335 {
4336 	u32 val = V_PORTSPEED(is_10G(adapter) || adapter->params.nports > 2 ?
4337 			      3 : 2);
4338 	u32 gpio_out = ai->gpio_out;
4339 
4340 	mi1_init(adapter, ai);
4341 	t3_write_reg(adapter, A_I2C_CFG,                  /* set for 80KHz */
4342 		     V_I2C_CLKDIV(adapter->params.vpd.cclk / 80 - 1));
4343 	t3_write_reg(adapter, A_T3DBG_GPIO_EN,
4344 		     gpio_out | F_GPIO0_OEN | F_GPIO0_OUT_VAL);
4345 	t3_write_reg(adapter, A_MC5_DB_SERVER_INDEX, 0);
4346 	t3_write_reg(adapter, A_SG_OCO_BASE, V_BASE1(0xfff));
4347 
4348 	if (adapter->params.rev == 0 || !uses_xaui(adapter))
4349 		val |= F_ENRGMII;
4350 
4351 	/* Enable MAC clocks so we can access the registers */
4352 	t3_write_reg(adapter, A_XGM_PORT_CFG, val);
4353 	(void) t3_read_reg(adapter, A_XGM_PORT_CFG);
4354 
4355 	val |= F_CLKDIVRESET_;
4356 	t3_write_reg(adapter, A_XGM_PORT_CFG, val);
4357 	(void) t3_read_reg(adapter, A_XGM_PORT_CFG);
4358 	t3_write_reg(adapter, XGM_REG(A_XGM_PORT_CFG, 1), val);
4359 	(void) t3_read_reg(adapter, A_XGM_PORT_CFG);
4360 }
4361 
4362 /**
4363  *	t3_reset_adapter - reset the adapter
4364  *	@adapter: the adapter
4365  *
4366  * 	Reset the adapter.
4367  */
4368 int t3_reset_adapter(adapter_t *adapter)
4369 {
4370 	int i, save_and_restore_pcie =
4371 	    adapter->params.rev < T3_REV_B2 && is_pcie(adapter);
4372 	uint16_t devid = 0;
4373 
4374 	if (save_and_restore_pcie)
4375 		t3_os_pci_save_state(adapter);
4376 	t3_write_reg(adapter, A_PL_RST, F_CRSTWRM | F_CRSTWRMMODE);
4377 
4378  	/*
4379 	 * Delay. Give Some time to device to reset fully.
4380 	 * XXX The delay time should be modified.
4381 	 */
4382 	for (i = 0; i < 10; i++) {
4383 		msleep(50);
4384 		t3_os_pci_read_config_2(adapter, 0x00, &devid);
4385 		if (devid == 0x1425)
4386 			break;
4387 	}
4388 
4389 	if (devid != 0x1425)
4390 		return -1;
4391 
4392 	if (save_and_restore_pcie)
4393 		t3_os_pci_restore_state(adapter);
4394 	return 0;
4395 }
4396 
4397 static int init_parity(adapter_t *adap)
4398 {
4399 	int i, err, addr;
4400 
4401 	if (t3_read_reg(adap, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
4402 		return -EBUSY;
4403 
4404 	for (err = i = 0; !err && i < 16; i++)
4405 		err = clear_sge_ctxt(adap, i, F_EGRESS);
4406 	for (i = 0xfff0; !err && i <= 0xffff; i++)
4407 		err = clear_sge_ctxt(adap, i, F_EGRESS);
4408 	for (i = 0; !err && i < SGE_QSETS; i++)
4409 		err = clear_sge_ctxt(adap, i, F_RESPONSEQ);
4410 	if (err)
4411 		return err;
4412 
4413 	t3_write_reg(adap, A_CIM_IBQ_DBG_DATA, 0);
4414 	for (i = 0; i < 4; i++)
4415 		for (addr = 0; addr <= M_IBQDBGADDR; addr++) {
4416 			t3_write_reg(adap, A_CIM_IBQ_DBG_CFG, F_IBQDBGEN |
4417 				     F_IBQDBGWR | V_IBQDBGQID(i) |
4418 				     V_IBQDBGADDR(addr));
4419 			err = t3_wait_op_done(adap, A_CIM_IBQ_DBG_CFG,
4420 					      F_IBQDBGBUSY, 0, 2, 1);
4421 			if (err)
4422 				return err;
4423 		}
4424 	return 0;
4425 }
4426 
4427 /**
4428  *	t3_prep_adapter - prepare SW and HW for operation
4429  *	@adapter: the adapter
4430  *	@ai: contains information about the adapter type and properties
4431  *
4432  *	Initialize adapter SW state for the various HW modules, set initial
4433  *	values for some adapter tunables, take PHYs out of reset, and
4434  *	initialize the MDIO interface.
4435  */
4436 int __devinit t3_prep_adapter(adapter_t *adapter,
4437 			      const struct adapter_info *ai, int reset)
4438 {
4439 	int ret;
4440 	unsigned int i, j = 0;
4441 
4442 	get_pci_mode(adapter, &adapter->params.pci);
4443 
4444 	adapter->params.info = ai;
4445 	adapter->params.nports = ai->nports0 + ai->nports1;
4446 	adapter->params.chan_map = !!ai->nports0 | (!!ai->nports1 << 1);
4447 	adapter->params.rev = t3_read_reg(adapter, A_PL_REV);
4448 
4449 	/*
4450 	 * We used to only run the "adapter check task" once a second if
4451 	 * we had PHYs which didn't support interrupts (we would check
4452 	 * their link status once a second).  Now we check other conditions
4453 	 * in that routine which would [potentially] impose a very high
4454 	 * interrupt load on the system.  As such, we now always scan the
4455 	 * adapter state once a second ...
4456 	 */
4457 	adapter->params.linkpoll_period = 10;
4458 
4459 	if (adapter->params.nports > 2)
4460 		adapter->params.stats_update_period = VSC_STATS_ACCUM_SECS;
4461 	else
4462 		adapter->params.stats_update_period = is_10G(adapter) ?
4463 			MAC_STATS_ACCUM_SECS : (MAC_STATS_ACCUM_SECS * 10);
4464 	adapter->params.pci.vpd_cap_addr =
4465 		t3_os_find_pci_capability(adapter, PCI_CAP_ID_VPD);
4466 
4467 	ret = get_vpd_params(adapter, &adapter->params.vpd);
4468 	if (ret < 0)
4469 		return ret;
4470 
4471 	if (reset && t3_reset_adapter(adapter))
4472 		return -1;
4473 
4474 	t3_sge_prep(adapter, &adapter->params.sge);
4475 
4476 	if (adapter->params.vpd.mclk) {
4477 		struct tp_params *p = &adapter->params.tp;
4478 
4479 		mc7_prep(adapter, &adapter->pmrx, MC7_PMRX_BASE_ADDR, "PMRX");
4480 		mc7_prep(adapter, &adapter->pmtx, MC7_PMTX_BASE_ADDR, "PMTX");
4481 		mc7_prep(adapter, &adapter->cm, MC7_CM_BASE_ADDR, "CM");
4482 
4483 		p->nchan = adapter->params.chan_map == 3 ? 2 : 1;
4484 		p->pmrx_size = t3_mc7_size(&adapter->pmrx);
4485 		p->pmtx_size = t3_mc7_size(&adapter->pmtx);
4486 		p->cm_size = t3_mc7_size(&adapter->cm);
4487 		p->chan_rx_size = p->pmrx_size / 2;     /* only 1 Rx channel */
4488 		p->chan_tx_size = p->pmtx_size / p->nchan;
4489 		p->rx_pg_size = 64 * 1024;
4490 		p->tx_pg_size = is_10G(adapter) ? 64 * 1024 : 16 * 1024;
4491 		p->rx_num_pgs = pm_num_pages(p->chan_rx_size, p->rx_pg_size);
4492 		p->tx_num_pgs = pm_num_pages(p->chan_tx_size, p->tx_pg_size);
4493 		p->ntimer_qs = p->cm_size >= (128 << 20) ||
4494 			       adapter->params.rev > 0 ? 12 : 6;
4495 		p->tre = fls(adapter->params.vpd.cclk / (1000 / TP_TMR_RES)) -
4496 			 1;
4497 		p->dack_re = fls(adapter->params.vpd.cclk / 10) - 1; /* 100us */
4498 	}
4499 
4500 	adapter->params.offload = t3_mc7_size(&adapter->pmrx) &&
4501 				  t3_mc7_size(&adapter->pmtx) &&
4502 				  t3_mc7_size(&adapter->cm);
4503 
4504 	if (is_offload(adapter)) {
4505 		adapter->params.mc5.nservers = DEFAULT_NSERVERS;
4506 		/* PR 6487. TOE and filtering are mutually exclusive */
4507 		adapter->params.mc5.nfilters = 0;
4508 		adapter->params.mc5.nroutes = 0;
4509 		t3_mc5_prep(adapter, &adapter->mc5, MC5_MODE_144_BIT);
4510 
4511 #ifdef CONFIG_CHELSIO_T3_CORE
4512 		init_mtus(adapter->params.mtus);
4513 		init_cong_ctrl(adapter->params.a_wnd, adapter->params.b_wnd);
4514 #endif
4515 	}
4516 
4517 	early_hw_init(adapter, ai);
4518 	ret = init_parity(adapter);
4519 	if (ret)
4520 		return ret;
4521 
4522 	if (adapter->params.nports > 2 &&
4523 	    (ret = t3_vsc7323_init(adapter, adapter->params.nports)))
4524 		return ret;
4525 
4526 	for_each_port(adapter, i) {
4527 		u8 hw_addr[6];
4528 		const struct port_type_info *pti;
4529 		struct port_info *p = adap2pinfo(adapter, i);
4530 
4531 		for (;;) {
4532 			unsigned port_type = adapter->params.vpd.port_type[j];
4533 			if (port_type) {
4534 				if (port_type < ARRAY_SIZE(port_types)) {
4535 					pti = &port_types[port_type];
4536 					break;
4537 				} else
4538 					return -EINVAL;
4539 			}
4540 			j++;
4541 			if (j >= ARRAY_SIZE(adapter->params.vpd.port_type))
4542 				return -EINVAL;
4543 		}
4544 		ret = pti->phy_prep(p, ai->phy_base_addr + j,
4545 				    ai->mdio_ops);
4546 		if (ret)
4547 			return ret;
4548 		mac_prep(&p->mac, adapter, j);
4549 		++j;
4550 
4551 		/*
4552 		 * The VPD EEPROM stores the base Ethernet address for the
4553 		 * card.  A port's address is derived from the base by adding
4554 		 * the port's index to the base's low octet.
4555 		 */
4556 		memcpy(hw_addr, adapter->params.vpd.eth_base, 5);
4557 		hw_addr[5] = adapter->params.vpd.eth_base[5] + i;
4558 
4559 		t3_os_set_hw_addr(adapter, i, hw_addr);
4560 		init_link_config(&p->link_config, p->phy.caps);
4561 		p->phy.ops->power_down(&p->phy, 1);
4562 
4563 		/*
4564 		 * If the PHY doesn't support interrupts for link status
4565 		 * changes, schedule a scan of the adapter links at least
4566 		 * once a second.
4567 		 */
4568 		if (!(p->phy.caps & SUPPORTED_IRQ) &&
4569 		    adapter->params.linkpoll_period > 10)
4570 			adapter->params.linkpoll_period = 10;
4571 	}
4572 
4573 	return 0;
4574 }
4575 
4576 /**
4577  *	t3_reinit_adapter - prepare HW for operation again
4578  *	@adapter: the adapter
4579  *
4580  *	Put HW in the same state as @t3_prep_adapter without any changes to
4581  *	SW state.  This is a cut down version of @t3_prep_adapter intended
4582  *	to be used after events that wipe out HW state but preserve SW state,
4583  *	e.g., EEH.  The device must be reset before calling this.
4584  */
4585 int t3_reinit_adapter(adapter_t *adap)
4586 {
4587 	unsigned int i;
4588 	int ret, j = 0;
4589 
4590 	early_hw_init(adap, adap->params.info);
4591 	ret = init_parity(adap);
4592 	if (ret)
4593 		return ret;
4594 
4595 	if (adap->params.nports > 2 &&
4596 	    (ret = t3_vsc7323_init(adap, adap->params.nports)))
4597 		return ret;
4598 
4599 	for_each_port(adap, i) {
4600 		const struct port_type_info *pti;
4601 		struct port_info *p = adap2pinfo(adap, i);
4602 
4603 		for (;;) {
4604 			unsigned port_type = adap->params.vpd.port_type[j];
4605 			if (port_type) {
4606 				if (port_type < ARRAY_SIZE(port_types)) {
4607 					pti = &port_types[port_type];
4608 					break;
4609 				} else
4610 					return -EINVAL;
4611 			}
4612 			j++;
4613 			if (j >= ARRAY_SIZE(adap->params.vpd.port_type))
4614 				return -EINVAL;
4615 		}
4616 		ret = pti->phy_prep(p, p->phy.addr, NULL);
4617 		if (ret)
4618 			return ret;
4619 		p->phy.ops->power_down(&p->phy, 1);
4620 	}
4621 	return 0;
4622 }
4623 
4624 void t3_led_ready(adapter_t *adapter)
4625 {
4626 	t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL,
4627 			 F_GPIO0_OUT_VAL);
4628 }
4629 
4630 void t3_port_failover(adapter_t *adapter, int port)
4631 {
4632 	u32 val;
4633 
4634 	val = port ? F_PORT1ACTIVE : F_PORT0ACTIVE;
4635 	t3_set_reg_field(adapter, A_MPS_CFG, F_PORT0ACTIVE | F_PORT1ACTIVE,
4636 			 val);
4637 }
4638 
4639 void t3_failover_done(adapter_t *adapter, int port)
4640 {
4641 	t3_set_reg_field(adapter, A_MPS_CFG, F_PORT0ACTIVE | F_PORT1ACTIVE,
4642 			 F_PORT0ACTIVE | F_PORT1ACTIVE);
4643 }
4644 
4645 void t3_failover_clear(adapter_t *adapter)
4646 {
4647 	t3_set_reg_field(adapter, A_MPS_CFG, F_PORT0ACTIVE | F_PORT1ACTIVE,
4648 			 F_PORT0ACTIVE | F_PORT1ACTIVE);
4649 }
4650 
4651 static int t3_cim_hac_read(adapter_t *adapter, u32 addr, u32 *val)
4652 {
4653 	u32 v;
4654 
4655 	t3_write_reg(adapter, A_CIM_HOST_ACC_CTRL, addr);
4656 	if (t3_wait_op_done_val(adapter, A_CIM_HOST_ACC_CTRL,
4657 				F_HOSTBUSY, 0, 10, 10, &v))
4658 		return -EIO;
4659 
4660 	*val = t3_read_reg(adapter, A_CIM_HOST_ACC_DATA);
4661 
4662 	return 0;
4663 }
4664 
4665 static int t3_cim_hac_write(adapter_t *adapter, u32 addr, u32 val)
4666 {
4667 	u32 v;
4668 
4669 	t3_write_reg(adapter, A_CIM_HOST_ACC_DATA, val);
4670 
4671 	addr |= F_HOSTWRITE;
4672 	t3_write_reg(adapter, A_CIM_HOST_ACC_CTRL, addr);
4673 
4674 	if (t3_wait_op_done_val(adapter, A_CIM_HOST_ACC_CTRL,
4675 				F_HOSTBUSY, 0, 10, 5, &v))
4676 		return -EIO;
4677 	return 0;
4678 }
4679 
4680 int t3_get_up_la(adapter_t *adapter, u32 *stopped, u32 *index,
4681 		 u32 *size, void *data)
4682 {
4683 	u32 v, *buf = data;
4684 	int i, cnt,  ret;
4685 
4686 	if (*size < LA_ENTRIES * 4)
4687 		return -EINVAL;
4688 
4689 	ret = t3_cim_hac_read(adapter, LA_CTRL, &v);
4690 	if (ret)
4691 		goto out;
4692 
4693 	*stopped = !(v & 1);
4694 
4695 	/* Freeze LA */
4696 	if (!*stopped) {
4697 		ret = t3_cim_hac_write(adapter, LA_CTRL, 0);
4698 		if (ret)
4699 			goto out;
4700 	}
4701 
4702 	for (i = 0; i < LA_ENTRIES; i++) {
4703 		v = (i << 2) | (1 << 1);
4704 		ret = t3_cim_hac_write(adapter, LA_CTRL, v);
4705 		if (ret)
4706 			goto out;
4707 
4708 		ret = t3_cim_hac_read(adapter, LA_CTRL, &v);
4709 		if (ret)
4710 			goto out;
4711 
4712 		cnt = 20;
4713 		while ((v & (1 << 1)) && cnt) {
4714 			udelay(5);
4715 			--cnt;
4716 			ret = t3_cim_hac_read(adapter, LA_CTRL, &v);
4717 			if (ret)
4718 				goto out;
4719 		}
4720 
4721 		if (v & (1 << 1))
4722 			return -EIO;
4723 
4724 		ret = t3_cim_hac_read(adapter, LA_DATA, &v);
4725 		if (ret)
4726 			goto out;
4727 
4728 		*buf++ = v;
4729 	}
4730 
4731 	ret = t3_cim_hac_read(adapter, LA_CTRL, &v);
4732 	if (ret)
4733 		goto out;
4734 
4735 	*index = (v >> 16) + 4;
4736 	*size = LA_ENTRIES * 4;
4737 out:
4738 	/* Unfreeze LA */
4739 	t3_cim_hac_write(adapter, LA_CTRL, 1);
4740 	return ret;
4741 }
4742 
4743 int t3_get_up_ioqs(adapter_t *adapter, u32 *size, void *data)
4744 {
4745 	u32 v, *buf = data;
4746 	int i, j, ret;
4747 
4748 	if (*size < IOQ_ENTRIES * sizeof(struct t3_ioq_entry))
4749 		return -EINVAL;
4750 
4751 	for (i = 0; i < 4; i++) {
4752 		ret = t3_cim_hac_read(adapter, (4 * i), &v);
4753 		if (ret)
4754 			goto out;
4755 
4756 		*buf++ = v;
4757 	}
4758 
4759 	for (i = 0; i < IOQ_ENTRIES; i++) {
4760 		u32 base_addr = 0x10 * (i + 1);
4761 
4762 		for (j = 0; j < 4; j++) {
4763 			ret = t3_cim_hac_read(adapter, base_addr + 4 * j, &v);
4764 			if (ret)
4765 				goto out;
4766 
4767 			*buf++ = v;
4768 		}
4769 	}
4770 
4771 	*size = IOQ_ENTRIES * sizeof(struct t3_ioq_entry);
4772 
4773 out:
4774 	return ret;
4775 }
4776 
4777