xref: /freebsd/sys/dev/cxgb/common/cxgb_t3_hw.c (revision 499fe48de8938d4c7b0a91e20eb6c16db9d55633)
1 /**************************************************************************
2 SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3 
4 Copyright (c) 2007-2009, Chelsio Inc.
5 All rights reserved.
6 
7 Redistribution and use in source and binary forms, with or without
8 modification, are permitted provided that the following conditions are met:
9 
10  1. Redistributions of source code must retain the above copyright notice,
11     this list of conditions and the following disclaimer.
12 
13  2. Neither the name of the Chelsio Corporation nor the names of its
14     contributors may be used to endorse or promote products derived from
15     this software without specific prior written permission.
16 
17 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
18 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
21 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
22 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
23 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
24 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
25 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
26 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
27 POSSIBILITY OF SUCH DAMAGE.
28 
29 ***************************************************************************/
30 
31 #include <sys/cdefs.h>
32 __FBSDID("$FreeBSD$");
33 
34 
35 #include <cxgb_include.h>
36 
37 #undef msleep
38 #define msleep t3_os_sleep
39 
40 /**
41  *	t3_wait_op_done_val - wait until an operation is completed
42  *	@adapter: the adapter performing the operation
43  *	@reg: the register to check for completion
44  *	@mask: a single-bit field within @reg that indicates completion
45  *	@polarity: the value of the field when the operation is completed
46  *	@attempts: number of check iterations
47  *	@delay: delay in usecs between iterations
48  *	@valp: where to store the value of the register at completion time
49  *
50  *	Wait until an operation is completed by checking a bit in a register
51  *	up to @attempts times.  If @valp is not NULL the value of the register
52  *	at the time it indicated completion is stored there.  Returns 0 if the
53  *	operation completes and	-EAGAIN	otherwise.
54  */
55 int t3_wait_op_done_val(adapter_t *adapter, int reg, u32 mask, int polarity,
56 			int attempts, int delay, u32 *valp)
57 {
58 	while (1) {
59 		u32 val = t3_read_reg(adapter, reg);
60 
61 		if (!!(val & mask) == polarity) {
62 			if (valp)
63 				*valp = val;
64 			return 0;
65 		}
66 		if (--attempts == 0)
67 			return -EAGAIN;
68 		if (delay)
69 			udelay(delay);
70 	}
71 }
72 
73 /**
74  *	t3_write_regs - write a bunch of registers
75  *	@adapter: the adapter to program
76  *	@p: an array of register address/register value pairs
77  *	@n: the number of address/value pairs
78  *	@offset: register address offset
79  *
80  *	Takes an array of register address/register value pairs and writes each
81  *	value to the corresponding register.  Register addresses are adjusted
82  *	by the supplied offset.
83  */
84 void t3_write_regs(adapter_t *adapter, const struct addr_val_pair *p, int n,
85 		   unsigned int offset)
86 {
87 	while (n--) {
88 		t3_write_reg(adapter, p->reg_addr + offset, p->val);
89 		p++;
90 	}
91 }
92 
93 /**
94  *	t3_set_reg_field - set a register field to a value
95  *	@adapter: the adapter to program
96  *	@addr: the register address
97  *	@mask: specifies the portion of the register to modify
98  *	@val: the new value for the register field
99  *
100  *	Sets a register field specified by the supplied mask to the
101  *	given value.
102  */
103 void t3_set_reg_field(adapter_t *adapter, unsigned int addr, u32 mask, u32 val)
104 {
105 	u32 v = t3_read_reg(adapter, addr) & ~mask;
106 
107 	t3_write_reg(adapter, addr, v | val);
108 	(void) t3_read_reg(adapter, addr);      /* flush */
109 }
110 
111 /**
112  *	t3_read_indirect - read indirectly addressed registers
113  *	@adap: the adapter
114  *	@addr_reg: register holding the indirect address
115  *	@data_reg: register holding the value of the indirect register
116  *	@vals: where the read register values are stored
117  *	@start_idx: index of first indirect register to read
118  *	@nregs: how many indirect registers to read
119  *
120  *	Reads registers that are accessed indirectly through an address/data
121  *	register pair.
122  */
123 static void t3_read_indirect(adapter_t *adap, unsigned int addr_reg,
124 		      unsigned int data_reg, u32 *vals, unsigned int nregs,
125 		      unsigned int start_idx)
126 {
127 	while (nregs--) {
128 		t3_write_reg(adap, addr_reg, start_idx);
129 		*vals++ = t3_read_reg(adap, data_reg);
130 		start_idx++;
131 	}
132 }
133 
134 /**
135  *	t3_mc7_bd_read - read from MC7 through backdoor accesses
136  *	@mc7: identifies MC7 to read from
137  *	@start: index of first 64-bit word to read
138  *	@n: number of 64-bit words to read
139  *	@buf: where to store the read result
140  *
141  *	Read n 64-bit words from MC7 starting at word start, using backdoor
142  *	accesses.
143  */
144 int t3_mc7_bd_read(struct mc7 *mc7, unsigned int start, unsigned int n,
145                    u64 *buf)
146 {
147 	static int shift[] = { 0, 0, 16, 24 };
148 	static int step[]  = { 0, 32, 16, 8 };
149 
150 	unsigned int size64 = mc7->size / 8;  /* # of 64-bit words */
151 	adapter_t *adap = mc7->adapter;
152 
153 	if (start >= size64 || start + n > size64)
154 		return -EINVAL;
155 
156 	start *= (8 << mc7->width);
157 	while (n--) {
158 		int i;
159 		u64 val64 = 0;
160 
161 		for (i = (1 << mc7->width) - 1; i >= 0; --i) {
162 			int attempts = 10;
163 			u32 val;
164 
165 			t3_write_reg(adap, mc7->offset + A_MC7_BD_ADDR,
166 				       start);
167 			t3_write_reg(adap, mc7->offset + A_MC7_BD_OP, 0);
168 			val = t3_read_reg(adap, mc7->offset + A_MC7_BD_OP);
169 			while ((val & F_BUSY) && attempts--)
170 				val = t3_read_reg(adap,
171 						  mc7->offset + A_MC7_BD_OP);
172 			if (val & F_BUSY)
173 				return -EIO;
174 
175 			val = t3_read_reg(adap, mc7->offset + A_MC7_BD_DATA1);
176 			if (mc7->width == 0) {
177 				val64 = t3_read_reg(adap,
178 						mc7->offset + A_MC7_BD_DATA0);
179 				val64 |= (u64)val << 32;
180 			} else {
181 				if (mc7->width > 1)
182 					val >>= shift[mc7->width];
183 				val64 |= (u64)val << (step[mc7->width] * i);
184 			}
185 			start += 8;
186 		}
187 		*buf++ = val64;
188 	}
189 	return 0;
190 }
191 
192 /*
193  * Low-level I2C read and write routines.  These simply read and write a
194  * single byte with the option of indicating a "continue" if another operation
195  * is to be chained.  Generally most code will use higher-level routines to
196  * read and write to I2C Slave Devices.
197  */
198 #define I2C_ATTEMPTS 100
199 
200 /*
201  * Read an 8-bit value from the I2C bus.  If the "chained" parameter is
202  * non-zero then a STOP bit will not be written after the read command.  On
203  * error (the read timed out, etc.), a negative errno will be returned (e.g.
204  * -EAGAIN, etc.).  On success, the 8-bit value read from the I2C bus is
205  * stored into the buffer *valp and the value of the I2C ACK bit is returned
206  * as a 0/1 value.
207  */
208 int t3_i2c_read8(adapter_t *adapter, int chained, u8 *valp)
209 {
210 	int ret;
211 	u32 opval;
212 	MDIO_LOCK(adapter);
213 	t3_write_reg(adapter, A_I2C_OP,
214 		     F_I2C_READ | (chained ? F_I2C_CONT : 0));
215 	ret = t3_wait_op_done_val(adapter, A_I2C_OP, F_I2C_BUSY, 0,
216 				  I2C_ATTEMPTS, 10, &opval);
217 	if (ret >= 0) {
218 		ret = ((opval & F_I2C_ACK) == F_I2C_ACK);
219 		*valp = G_I2C_DATA(t3_read_reg(adapter, A_I2C_DATA));
220 	}
221 	MDIO_UNLOCK(adapter);
222 	return ret;
223 }
224 
225 /*
226  * Write an 8-bit value to the I2C bus.  If the "chained" parameter is
227  * non-zero, then a STOP bit will not be written after the write command.  On
228  * error (the write timed out, etc.), a negative errno will be returned (e.g.
229  * -EAGAIN, etc.).  On success, the value of the I2C ACK bit is returned as a
230  * 0/1 value.
231  */
232 int t3_i2c_write8(adapter_t *adapter, int chained, u8 val)
233 {
234 	int ret;
235 	u32 opval;
236 	MDIO_LOCK(adapter);
237 	t3_write_reg(adapter, A_I2C_DATA, V_I2C_DATA(val));
238 	t3_write_reg(adapter, A_I2C_OP,
239 		     F_I2C_WRITE | (chained ? F_I2C_CONT : 0));
240 	ret = t3_wait_op_done_val(adapter, A_I2C_OP, F_I2C_BUSY, 0,
241 				  I2C_ATTEMPTS, 10, &opval);
242 	if (ret >= 0)
243 		ret = ((opval & F_I2C_ACK) == F_I2C_ACK);
244 	MDIO_UNLOCK(adapter);
245 	return ret;
246 }
247 
248 /*
249  * Initialize MI1.
250  */
251 static void mi1_init(adapter_t *adap, const struct adapter_info *ai)
252 {
253         u32 clkdiv = adap->params.vpd.cclk / (2 * adap->params.vpd.mdc) - 1;
254         u32 val = F_PREEN | V_CLKDIV(clkdiv);
255 
256         t3_write_reg(adap, A_MI1_CFG, val);
257 }
258 
259 #define MDIO_ATTEMPTS 20
260 
261 /*
262  * MI1 read/write operations for clause 22 PHYs.
263  */
264 int t3_mi1_read(adapter_t *adapter, int phy_addr, int mmd_addr,
265 		int reg_addr, unsigned int *valp)
266 {
267 	int ret;
268 	u32 addr = V_REGADDR(reg_addr) | V_PHYADDR(phy_addr);
269 
270 	if (mmd_addr)
271 		return -EINVAL;
272 
273 	MDIO_LOCK(adapter);
274 	t3_set_reg_field(adapter, A_MI1_CFG, V_ST(M_ST), V_ST(1));
275 	t3_write_reg(adapter, A_MI1_ADDR, addr);
276 	t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(2));
277 	ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0, MDIO_ATTEMPTS, 10);
278 	if (!ret)
279 		*valp = t3_read_reg(adapter, A_MI1_DATA);
280 	MDIO_UNLOCK(adapter);
281 	return ret;
282 }
283 
284 int t3_mi1_write(adapter_t *adapter, int phy_addr, int mmd_addr,
285 		 int reg_addr, unsigned int val)
286 {
287 	int ret;
288 	u32 addr = V_REGADDR(reg_addr) | V_PHYADDR(phy_addr);
289 
290 	if (mmd_addr)
291 		return -EINVAL;
292 
293 	MDIO_LOCK(adapter);
294 	t3_set_reg_field(adapter, A_MI1_CFG, V_ST(M_ST), V_ST(1));
295 	t3_write_reg(adapter, A_MI1_ADDR, addr);
296 	t3_write_reg(adapter, A_MI1_DATA, val);
297 	t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(1));
298 	ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0, MDIO_ATTEMPTS, 10);
299 	MDIO_UNLOCK(adapter);
300 	return ret;
301 }
302 
303 static struct mdio_ops mi1_mdio_ops = {
304 	t3_mi1_read,
305 	t3_mi1_write
306 };
307 
308 /*
309  * MI1 read/write operations for clause 45 PHYs.
310  */
311 static int mi1_ext_read(adapter_t *adapter, int phy_addr, int mmd_addr,
312 			int reg_addr, unsigned int *valp)
313 {
314 	int ret;
315 	u32 addr = V_REGADDR(mmd_addr) | V_PHYADDR(phy_addr);
316 
317 	MDIO_LOCK(adapter);
318 	t3_set_reg_field(adapter, A_MI1_CFG, V_ST(M_ST), 0);
319 	t3_write_reg(adapter, A_MI1_ADDR, addr);
320 	t3_write_reg(adapter, A_MI1_DATA, reg_addr);
321 	t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(0));
322 	ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0, MDIO_ATTEMPTS, 10);
323 	if (!ret) {
324 		t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(3));
325 		ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0,
326 				      MDIO_ATTEMPTS, 10);
327 		if (!ret)
328 			*valp = t3_read_reg(adapter, A_MI1_DATA);
329 	}
330 	MDIO_UNLOCK(adapter);
331 	return ret;
332 }
333 
334 static int mi1_ext_write(adapter_t *adapter, int phy_addr, int mmd_addr,
335 			 int reg_addr, unsigned int val)
336 {
337 	int ret;
338 	u32 addr = V_REGADDR(mmd_addr) | V_PHYADDR(phy_addr);
339 
340 	MDIO_LOCK(adapter);
341 	t3_set_reg_field(adapter, A_MI1_CFG, V_ST(M_ST), 0);
342 	t3_write_reg(adapter, A_MI1_ADDR, addr);
343 	t3_write_reg(adapter, A_MI1_DATA, reg_addr);
344 	t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(0));
345 	ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0, MDIO_ATTEMPTS, 10);
346 	if (!ret) {
347 		t3_write_reg(adapter, A_MI1_DATA, val);
348 		t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(1));
349 		ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0,
350 				      MDIO_ATTEMPTS, 10);
351 	}
352 	MDIO_UNLOCK(adapter);
353 	return ret;
354 }
355 
356 static struct mdio_ops mi1_mdio_ext_ops = {
357 	mi1_ext_read,
358 	mi1_ext_write
359 };
360 
361 /**
362  *	t3_mdio_change_bits - modify the value of a PHY register
363  *	@phy: the PHY to operate on
364  *	@mmd: the device address
365  *	@reg: the register address
366  *	@clear: what part of the register value to mask off
367  *	@set: what part of the register value to set
368  *
369  *	Changes the value of a PHY register by applying a mask to its current
370  *	value and ORing the result with a new value.
371  */
372 int t3_mdio_change_bits(struct cphy *phy, int mmd, int reg, unsigned int clear,
373 			unsigned int set)
374 {
375 	int ret;
376 	unsigned int val;
377 
378 	ret = mdio_read(phy, mmd, reg, &val);
379 	if (!ret) {
380 		val &= ~clear;
381 		ret = mdio_write(phy, mmd, reg, val | set);
382 	}
383 	return ret;
384 }
385 
386 /**
387  *	t3_phy_reset - reset a PHY block
388  *	@phy: the PHY to operate on
389  *	@mmd: the device address of the PHY block to reset
390  *	@wait: how long to wait for the reset to complete in 1ms increments
391  *
392  *	Resets a PHY block and optionally waits for the reset to complete.
393  *	@mmd should be 0 for 10/100/1000 PHYs and the device address to reset
394  *	for 10G PHYs.
395  */
396 int t3_phy_reset(struct cphy *phy, int mmd, int wait)
397 {
398 	int err;
399 	unsigned int ctl;
400 
401 	err = t3_mdio_change_bits(phy, mmd, MII_BMCR, BMCR_PDOWN, BMCR_RESET);
402 	if (err || !wait)
403 		return err;
404 
405 	do {
406 		err = mdio_read(phy, mmd, MII_BMCR, &ctl);
407 		if (err)
408 			return err;
409 		ctl &= BMCR_RESET;
410 		if (ctl)
411 			msleep(1);
412 	} while (ctl && --wait);
413 
414 	return ctl ? -1 : 0;
415 }
416 
417 /**
418  *	t3_phy_advertise - set the PHY advertisement registers for autoneg
419  *	@phy: the PHY to operate on
420  *	@advert: bitmap of capabilities the PHY should advertise
421  *
422  *	Sets a 10/100/1000 PHY's advertisement registers to advertise the
423  *	requested capabilities.
424  */
425 int t3_phy_advertise(struct cphy *phy, unsigned int advert)
426 {
427 	int err;
428 	unsigned int val = 0;
429 
430 	err = mdio_read(phy, 0, MII_CTRL1000, &val);
431 	if (err)
432 		return err;
433 
434 	val &= ~(ADVERTISE_1000HALF | ADVERTISE_1000FULL);
435 	if (advert & ADVERTISED_1000baseT_Half)
436 		val |= ADVERTISE_1000HALF;
437 	if (advert & ADVERTISED_1000baseT_Full)
438 		val |= ADVERTISE_1000FULL;
439 
440 	err = mdio_write(phy, 0, MII_CTRL1000, val);
441 	if (err)
442 		return err;
443 
444 	val = 1;
445 	if (advert & ADVERTISED_10baseT_Half)
446 		val |= ADVERTISE_10HALF;
447 	if (advert & ADVERTISED_10baseT_Full)
448 		val |= ADVERTISE_10FULL;
449 	if (advert & ADVERTISED_100baseT_Half)
450 		val |= ADVERTISE_100HALF;
451 	if (advert & ADVERTISED_100baseT_Full)
452 		val |= ADVERTISE_100FULL;
453 	if (advert & ADVERTISED_Pause)
454 		val |= ADVERTISE_PAUSE_CAP;
455 	if (advert & ADVERTISED_Asym_Pause)
456 		val |= ADVERTISE_PAUSE_ASYM;
457 	return mdio_write(phy, 0, MII_ADVERTISE, val);
458 }
459 
460 /**
461  *	t3_phy_advertise_fiber - set fiber PHY advertisement register
462  *	@phy: the PHY to operate on
463  *	@advert: bitmap of capabilities the PHY should advertise
464  *
465  *	Sets a fiber PHY's advertisement register to advertise the
466  *	requested capabilities.
467  */
468 int t3_phy_advertise_fiber(struct cphy *phy, unsigned int advert)
469 {
470 	unsigned int val = 0;
471 
472 	if (advert & ADVERTISED_1000baseT_Half)
473 		val |= ADVERTISE_1000XHALF;
474 	if (advert & ADVERTISED_1000baseT_Full)
475 		val |= ADVERTISE_1000XFULL;
476 	if (advert & ADVERTISED_Pause)
477 		val |= ADVERTISE_1000XPAUSE;
478 	if (advert & ADVERTISED_Asym_Pause)
479 		val |= ADVERTISE_1000XPSE_ASYM;
480 	return mdio_write(phy, 0, MII_ADVERTISE, val);
481 }
482 
483 /**
484  *	t3_set_phy_speed_duplex - force PHY speed and duplex
485  *	@phy: the PHY to operate on
486  *	@speed: requested PHY speed
487  *	@duplex: requested PHY duplex
488  *
489  *	Force a 10/100/1000 PHY's speed and duplex.  This also disables
490  *	auto-negotiation except for GigE, where auto-negotiation is mandatory.
491  */
492 int t3_set_phy_speed_duplex(struct cphy *phy, int speed, int duplex)
493 {
494 	int err;
495 	unsigned int ctl;
496 
497 	err = mdio_read(phy, 0, MII_BMCR, &ctl);
498 	if (err)
499 		return err;
500 
501 	if (speed >= 0) {
502 		ctl &= ~(BMCR_SPEED100 | BMCR_SPEED1000 | BMCR_ANENABLE);
503 		if (speed == SPEED_100)
504 			ctl |= BMCR_SPEED100;
505 		else if (speed == SPEED_1000)
506 			ctl |= BMCR_SPEED1000;
507 	}
508 	if (duplex >= 0) {
509 		ctl &= ~(BMCR_FULLDPLX | BMCR_ANENABLE);
510 		if (duplex == DUPLEX_FULL)
511 			ctl |= BMCR_FULLDPLX;
512 	}
513 	if (ctl & BMCR_SPEED1000)  /* auto-negotiation required for GigE */
514 		ctl |= BMCR_ANENABLE;
515 	return mdio_write(phy, 0, MII_BMCR, ctl);
516 }
517 
518 int t3_phy_lasi_intr_enable(struct cphy *phy)
519 {
520 	return mdio_write(phy, MDIO_DEV_PMA_PMD, LASI_CTRL, 1);
521 }
522 
523 int t3_phy_lasi_intr_disable(struct cphy *phy)
524 {
525 	return mdio_write(phy, MDIO_DEV_PMA_PMD, LASI_CTRL, 0);
526 }
527 
528 int t3_phy_lasi_intr_clear(struct cphy *phy)
529 {
530 	u32 val;
531 
532 	return mdio_read(phy, MDIO_DEV_PMA_PMD, LASI_STAT, &val);
533 }
534 
535 int t3_phy_lasi_intr_handler(struct cphy *phy)
536 {
537 	unsigned int status;
538 	int err = mdio_read(phy, MDIO_DEV_PMA_PMD, LASI_STAT, &status);
539 
540 	if (err)
541 		return err;
542 	return (status & 1) ?  cphy_cause_link_change : 0;
543 }
544 
545 static struct adapter_info t3_adap_info[] = {
546 	{ 1, 1, 0,
547 	  F_GPIO2_OEN | F_GPIO4_OEN |
548 	  F_GPIO2_OUT_VAL | F_GPIO4_OUT_VAL, { S_GPIO3, S_GPIO5 }, 0,
549 	  &mi1_mdio_ops, "Chelsio PE9000" },
550 	{ 1, 1, 0,
551 	  F_GPIO2_OEN | F_GPIO4_OEN |
552 	  F_GPIO2_OUT_VAL | F_GPIO4_OUT_VAL, { S_GPIO3, S_GPIO5 }, 0,
553 	  &mi1_mdio_ops, "Chelsio T302" },
554 	{ 1, 0, 0,
555 	  F_GPIO1_OEN | F_GPIO6_OEN | F_GPIO7_OEN | F_GPIO10_OEN |
556 	  F_GPIO11_OEN | F_GPIO1_OUT_VAL | F_GPIO6_OUT_VAL | F_GPIO10_OUT_VAL,
557 	  { 0 }, SUPPORTED_10000baseT_Full | SUPPORTED_AUI,
558 	  &mi1_mdio_ext_ops, "Chelsio T310" },
559 	{ 1, 1, 0,
560 	  F_GPIO1_OEN | F_GPIO2_OEN | F_GPIO4_OEN | F_GPIO5_OEN | F_GPIO6_OEN |
561 	  F_GPIO7_OEN | F_GPIO10_OEN | F_GPIO11_OEN | F_GPIO1_OUT_VAL |
562 	  F_GPIO5_OUT_VAL | F_GPIO6_OUT_VAL | F_GPIO10_OUT_VAL,
563 	  { S_GPIO9, S_GPIO3 }, SUPPORTED_10000baseT_Full | SUPPORTED_AUI,
564 	  &mi1_mdio_ext_ops, "Chelsio T320" },
565 	{ 4, 0, 0,
566 	  F_GPIO5_OEN | F_GPIO6_OEN | F_GPIO7_OEN | F_GPIO5_OUT_VAL |
567 	  F_GPIO6_OUT_VAL | F_GPIO7_OUT_VAL,
568 	  { S_GPIO1, S_GPIO2, S_GPIO3, S_GPIO4 }, SUPPORTED_AUI,
569 	  &mi1_mdio_ops, "Chelsio T304" },
570 	{ 0 },
571 	{ 1, 0, 0,
572 	  F_GPIO1_OEN | F_GPIO2_OEN | F_GPIO4_OEN | F_GPIO6_OEN | F_GPIO7_OEN |
573 	  F_GPIO10_OEN | F_GPIO1_OUT_VAL | F_GPIO6_OUT_VAL | F_GPIO10_OUT_VAL,
574 	  { S_GPIO9 }, SUPPORTED_10000baseT_Full | SUPPORTED_AUI,
575 	  &mi1_mdio_ext_ops, "Chelsio T310" },
576 	{ 1, 0, 0,
577 	  F_GPIO1_OEN | F_GPIO6_OEN | F_GPIO7_OEN |
578 	  F_GPIO1_OUT_VAL | F_GPIO6_OUT_VAL,
579 	  { S_GPIO9 }, SUPPORTED_10000baseT_Full | SUPPORTED_AUI,
580 	  &mi1_mdio_ext_ops, "Chelsio N320E-G2" },
581 };
582 
583 /*
584  * Return the adapter_info structure with a given index.  Out-of-range indices
585  * return NULL.
586  */
587 const struct adapter_info *t3_get_adapter_info(unsigned int id)
588 {
589 	return id < ARRAY_SIZE(t3_adap_info) ? &t3_adap_info[id] : NULL;
590 }
591 
592 struct port_type_info {
593 	int (*phy_prep)(pinfo_t *pinfo, int phy_addr,
594 			const struct mdio_ops *ops);
595 };
596 
597 static struct port_type_info port_types[] = {
598 	{ NULL },
599 	{ t3_ael1002_phy_prep },
600 	{ t3_vsc8211_phy_prep },
601 	{ t3_mv88e1xxx_phy_prep },
602 	{ t3_xaui_direct_phy_prep },
603 	{ t3_ael2005_phy_prep },
604 	{ t3_qt2045_phy_prep },
605 	{ t3_ael1006_phy_prep },
606 	{ t3_tn1010_phy_prep },
607 	{ t3_aq100x_phy_prep },
608 	{ t3_ael2020_phy_prep },
609 };
610 
611 #define VPD_ENTRY(name, len) \
612 	u8 name##_kword[2]; u8 name##_len; u8 name##_data[len]
613 
614 /*
615  * Partial EEPROM Vital Product Data structure.  Includes only the ID and
616  * VPD-R sections.
617  */
618 struct t3_vpd {
619 	u8  id_tag;
620 	u8  id_len[2];
621 	u8  id_data[16];
622 	u8  vpdr_tag;
623 	u8  vpdr_len[2];
624 	VPD_ENTRY(pn, 16);                     /* part number */
625 	VPD_ENTRY(ec, ECNUM_LEN);              /* EC level */
626 	VPD_ENTRY(sn, SERNUM_LEN);             /* serial number */
627 	VPD_ENTRY(na, 12);                     /* MAC address base */
628 	VPD_ENTRY(cclk, 6);                    /* core clock */
629 	VPD_ENTRY(mclk, 6);                    /* mem clock */
630 	VPD_ENTRY(uclk, 6);                    /* uP clk */
631 	VPD_ENTRY(mdc, 6);                     /* MDIO clk */
632 	VPD_ENTRY(mt, 2);                      /* mem timing */
633 	VPD_ENTRY(xaui0cfg, 6);                /* XAUI0 config */
634 	VPD_ENTRY(xaui1cfg, 6);                /* XAUI1 config */
635 	VPD_ENTRY(port0, 2);                   /* PHY0 complex */
636 	VPD_ENTRY(port1, 2);                   /* PHY1 complex */
637 	VPD_ENTRY(port2, 2);                   /* PHY2 complex */
638 	VPD_ENTRY(port3, 2);                   /* PHY3 complex */
639 	VPD_ENTRY(rv, 1);                      /* csum */
640 	u32 pad;                  /* for multiple-of-4 sizing and alignment */
641 };
642 
643 #define EEPROM_MAX_POLL   40
644 #define EEPROM_STAT_ADDR  0x4000
645 #define VPD_BASE          0xc00
646 
647 /**
648  *	t3_seeprom_read - read a VPD EEPROM location
649  *	@adapter: adapter to read
650  *	@addr: EEPROM address
651  *	@data: where to store the read data
652  *
653  *	Read a 32-bit word from a location in VPD EEPROM using the card's PCI
654  *	VPD ROM capability.  A zero is written to the flag bit when the
655  *	address is written to the control register.  The hardware device will
656  *	set the flag to 1 when 4 bytes have been read into the data register.
657  */
658 int t3_seeprom_read(adapter_t *adapter, u32 addr, u32 *data)
659 {
660 	u16 val;
661 	int attempts = EEPROM_MAX_POLL;
662 	unsigned int base = adapter->params.pci.vpd_cap_addr;
663 
664 	if ((addr >= EEPROMSIZE && addr != EEPROM_STAT_ADDR) || (addr & 3))
665 		return -EINVAL;
666 
667 	t3_os_pci_write_config_2(adapter, base + PCI_VPD_ADDR, (u16)addr);
668 	do {
669 		udelay(10);
670 		t3_os_pci_read_config_2(adapter, base + PCI_VPD_ADDR, &val);
671 	} while (!(val & PCI_VPD_ADDR_F) && --attempts);
672 
673 	if (!(val & PCI_VPD_ADDR_F)) {
674 		CH_ERR(adapter, "reading EEPROM address 0x%x failed\n", addr);
675 		return -EIO;
676 	}
677 	t3_os_pci_read_config_4(adapter, base + PCI_VPD_DATA, data);
678 	*data = le32_to_cpu(*data);
679 	return 0;
680 }
681 
682 /**
683  *	t3_seeprom_write - write a VPD EEPROM location
684  *	@adapter: adapter to write
685  *	@addr: EEPROM address
686  *	@data: value to write
687  *
688  *	Write a 32-bit word to a location in VPD EEPROM using the card's PCI
689  *	VPD ROM capability.
690  */
691 int t3_seeprom_write(adapter_t *adapter, u32 addr, u32 data)
692 {
693 	u16 val;
694 	int attempts = EEPROM_MAX_POLL;
695 	unsigned int base = adapter->params.pci.vpd_cap_addr;
696 
697 	if ((addr >= EEPROMSIZE && addr != EEPROM_STAT_ADDR) || (addr & 3))
698 		return -EINVAL;
699 
700 	t3_os_pci_write_config_4(adapter, base + PCI_VPD_DATA,
701 				 cpu_to_le32(data));
702 	t3_os_pci_write_config_2(adapter, base + PCI_VPD_ADDR,
703 				 (u16)addr | PCI_VPD_ADDR_F);
704 	do {
705 		msleep(1);
706 		t3_os_pci_read_config_2(adapter, base + PCI_VPD_ADDR, &val);
707 	} while ((val & PCI_VPD_ADDR_F) && --attempts);
708 
709 	if (val & PCI_VPD_ADDR_F) {
710 		CH_ERR(adapter, "write to EEPROM address 0x%x failed\n", addr);
711 		return -EIO;
712 	}
713 	return 0;
714 }
715 
716 /**
717  *	t3_seeprom_wp - enable/disable EEPROM write protection
718  *	@adapter: the adapter
719  *	@enable: 1 to enable write protection, 0 to disable it
720  *
721  *	Enables or disables write protection on the serial EEPROM.
722  */
723 int t3_seeprom_wp(adapter_t *adapter, int enable)
724 {
725 	return t3_seeprom_write(adapter, EEPROM_STAT_ADDR, enable ? 0xc : 0);
726 }
727 
728 /*
729  * Convert a character holding a hex digit to a number.
730  */
731 static unsigned int hex2int(unsigned char c)
732 {
733 	return isdigit(c) ? c - '0' : toupper(c) - 'A' + 10;
734 }
735 
736 /**
737  * 	get_desc_len - get the length of a vpd descriptor.
738  *	@adapter: the adapter
739  *	@offset: first byte offset of the vpd descriptor
740  *
741  *	Retrieves the length of the small/large resource
742  *	data type starting at offset.
743  */
744 static int get_desc_len(adapter_t *adapter, u32 offset)
745 {
746 	u32 read_offset, tmp, shift, len = 0;
747 	u8 tag, buf[8];
748 	int ret;
749 
750 	read_offset = offset & 0xfffffffc;
751 	shift = offset & 0x03;
752 
753 	ret = t3_seeprom_read(adapter, read_offset, &tmp);
754 	if (ret < 0)
755 		return ret;
756 
757 	*((u32 *)buf) = cpu_to_le32(tmp);
758 
759 	tag = buf[shift];
760 	if (tag & 0x80) {
761 		ret = t3_seeprom_read(adapter, read_offset + 4, &tmp);
762 		if (ret < 0)
763 			return ret;
764 
765 		*((u32 *)(&buf[4])) = cpu_to_le32(tmp);
766 		len = (buf[shift + 1] & 0xff) +
767 		      ((buf[shift+2] << 8) & 0xff00) + 3;
768 	} else
769 		len = (tag & 0x07) + 1;
770 
771 	return len;
772 }
773 
774 /**
775  *	is_end_tag - Check if a vpd tag is the end tag.
776  *	@adapter: the adapter
777  *	@offset: first byte offset of the tag
778  *
779  *	Checks if the tag located at offset is the end tag.
780  */
781 static int is_end_tag(adapter_t * adapter, u32 offset)
782 {
783 	u32 read_offset, shift, ret, tmp;
784 	u8 buf[4];
785 
786 	read_offset = offset & 0xfffffffc;
787 	shift = offset & 0x03;
788 
789 	ret = t3_seeprom_read(adapter, read_offset, &tmp);
790 	if (ret)
791 		return ret;
792 	*((u32 *)buf) = cpu_to_le32(tmp);
793 
794 	if (buf[shift] == 0x78)
795 		return 1;
796 	else
797 		return 0;
798 }
799 
800 /**
801  *	t3_get_vpd_len - computes the length of a vpd structure
802  *	@adapter: the adapter
803  *	@vpd: contains the offset of first byte of vpd
804  *
805  *	Computes the lentgh of the vpd structure starting at vpd->offset.
806  */
807 
808 int t3_get_vpd_len(adapter_t * adapter, struct generic_vpd *vpd)
809 {
810 	u32 len=0, offset;
811 	int inc, ret;
812 
813 	offset = vpd->offset;
814 
815 	while (offset < (vpd->offset + MAX_VPD_BYTES)) {
816 		ret = is_end_tag(adapter, offset);
817 		if (ret < 0)
818 			return ret;
819 		else if (ret == 1)
820 			break;
821 
822 		inc = get_desc_len(adapter, offset);
823 		if (inc < 0)
824 			return inc;
825 		len += inc;
826 		offset += inc;
827 	}
828 	return (len + 1);
829 }
830 
831 /**
832  *	t3_read_vpd - reads the stream of bytes containing a vpd structure
833  *	@adapter: the adapter
834  *	@vpd: contains a buffer that would hold the stream of bytes
835  *
836  *	Reads the vpd structure starting at vpd->offset into vpd->data,
837  *	the length of the byte stream to read is vpd->len.
838  */
839 
840 int t3_read_vpd(adapter_t *adapter, struct generic_vpd *vpd)
841 {
842 	u32 i, ret;
843 
844 	for (i = 0; i < vpd->len; i += 4) {
845 		ret = t3_seeprom_read(adapter, vpd->offset + i,
846 				      (u32 *) &(vpd->data[i]));
847 		if (ret)
848 			return ret;
849 	}
850 
851 	return 0;
852 }
853 
854 
855 /**
856  *	get_vpd_params - read VPD parameters from VPD EEPROM
857  *	@adapter: adapter to read
858  *	@p: where to store the parameters
859  *
860  *	Reads card parameters stored in VPD EEPROM.
861  */
862 static int get_vpd_params(adapter_t *adapter, struct vpd_params *p)
863 {
864 	int i, addr, ret;
865 	struct t3_vpd vpd;
866 
867 	/*
868 	 * Card information is normally at VPD_BASE but some early cards had
869 	 * it at 0.
870 	 */
871 	ret = t3_seeprom_read(adapter, VPD_BASE, (u32 *)&vpd);
872 	if (ret)
873 		return ret;
874 	addr = vpd.id_tag == 0x82 ? VPD_BASE : 0;
875 
876 	for (i = 0; i < sizeof(vpd); i += 4) {
877 		ret = t3_seeprom_read(adapter, addr + i,
878 				      (u32 *)((u8 *)&vpd + i));
879 		if (ret)
880 			return ret;
881 	}
882 
883 	p->cclk = simple_strtoul(vpd.cclk_data, NULL, 10);
884 	p->mclk = simple_strtoul(vpd.mclk_data, NULL, 10);
885 	p->uclk = simple_strtoul(vpd.uclk_data, NULL, 10);
886 	p->mdc = simple_strtoul(vpd.mdc_data, NULL, 10);
887 	p->mem_timing = simple_strtoul(vpd.mt_data, NULL, 10);
888 	memcpy(p->sn, vpd.sn_data, SERNUM_LEN);
889 	memcpy(p->ec, vpd.ec_data, ECNUM_LEN);
890 
891 	/* Old eeproms didn't have port information */
892 	if (adapter->params.rev == 0 && !vpd.port0_data[0]) {
893 		p->port_type[0] = uses_xaui(adapter) ? 1 : 2;
894 		p->port_type[1] = uses_xaui(adapter) ? 6 : 2;
895 	} else {
896 		p->port_type[0] = (u8)hex2int(vpd.port0_data[0]);
897 		p->port_type[1] = (u8)hex2int(vpd.port1_data[0]);
898 		p->port_type[2] = (u8)hex2int(vpd.port2_data[0]);
899 		p->port_type[3] = (u8)hex2int(vpd.port3_data[0]);
900 		p->xauicfg[0] = simple_strtoul(vpd.xaui0cfg_data, NULL, 16);
901 		p->xauicfg[1] = simple_strtoul(vpd.xaui1cfg_data, NULL, 16);
902 	}
903 
904 	for (i = 0; i < 6; i++)
905 		p->eth_base[i] = hex2int(vpd.na_data[2 * i]) * 16 +
906 				 hex2int(vpd.na_data[2 * i + 1]);
907 	return 0;
908 }
909 
910 /* BIOS boot header */
911 typedef struct boot_header_s {
912 	u8	signature[2];	/* signature */
913 	u8	length;		/* image length (include header) */
914 	u8	offset[4];	/* initialization vector */
915 	u8	reserved[19];	/* reserved */
916 	u8	exheader[2];	/* offset to expansion header */
917 } boot_header_t;
918 
919 /* serial flash and firmware constants */
920 enum {
921 	SF_ATTEMPTS = 5,           /* max retries for SF1 operations */
922 	SF_SEC_SIZE = 64 * 1024,   /* serial flash sector size */
923 	SF_SIZE = SF_SEC_SIZE * 8, /* serial flash size */
924 
925 	/* flash command opcodes */
926 	SF_PROG_PAGE    = 2,       /* program page */
927 	SF_WR_DISABLE   = 4,       /* disable writes */
928 	SF_RD_STATUS    = 5,       /* read status register */
929 	SF_WR_ENABLE    = 6,       /* enable writes */
930 	SF_RD_DATA_FAST = 0xb,     /* read flash */
931 	SF_ERASE_SECTOR = 0xd8,    /* erase sector */
932 
933 	FW_FLASH_BOOT_ADDR = 0x70000, /* start address of FW in flash */
934 	FW_VERS_ADDR = 0x7fffc,    /* flash address holding FW version */
935 	FW_VERS_ADDR_PRE8 = 0x77ffc,/* flash address holding FW version pre8 */
936 	FW_MIN_SIZE = 8,           /* at least version and csum */
937 	FW_MAX_SIZE = FW_VERS_ADDR - FW_FLASH_BOOT_ADDR,
938 	FW_MAX_SIZE_PRE8 = FW_VERS_ADDR_PRE8 - FW_FLASH_BOOT_ADDR,
939 
940 	BOOT_FLASH_BOOT_ADDR = 0x0,/* start address of boot image in flash */
941 	BOOT_SIGNATURE = 0xaa55,   /* signature of BIOS boot ROM */
942 	BOOT_SIZE_INC = 512,       /* image size measured in 512B chunks */
943 	BOOT_MIN_SIZE = sizeof(boot_header_t), /* at least basic header */
944 	BOOT_MAX_SIZE = 1024*BOOT_SIZE_INC /* 1 byte * length increment  */
945 };
946 
947 /**
948  *	sf1_read - read data from the serial flash
949  *	@adapter: the adapter
950  *	@byte_cnt: number of bytes to read
951  *	@cont: whether another operation will be chained
952  *	@valp: where to store the read data
953  *
954  *	Reads up to 4 bytes of data from the serial flash.  The location of
955  *	the read needs to be specified prior to calling this by issuing the
956  *	appropriate commands to the serial flash.
957  */
958 static int sf1_read(adapter_t *adapter, unsigned int byte_cnt, int cont,
959 		    u32 *valp)
960 {
961 	int ret;
962 
963 	if (!byte_cnt || byte_cnt > 4)
964 		return -EINVAL;
965 	if (t3_read_reg(adapter, A_SF_OP) & F_BUSY)
966 		return -EBUSY;
967 	t3_write_reg(adapter, A_SF_OP, V_CONT(cont) | V_BYTECNT(byte_cnt - 1));
968 	ret = t3_wait_op_done(adapter, A_SF_OP, F_BUSY, 0, SF_ATTEMPTS, 10);
969 	if (!ret)
970 		*valp = t3_read_reg(adapter, A_SF_DATA);
971 	return ret;
972 }
973 
974 /**
975  *	sf1_write - write data to the serial flash
976  *	@adapter: the adapter
977  *	@byte_cnt: number of bytes to write
978  *	@cont: whether another operation will be chained
979  *	@val: value to write
980  *
981  *	Writes up to 4 bytes of data to the serial flash.  The location of
982  *	the write needs to be specified prior to calling this by issuing the
983  *	appropriate commands to the serial flash.
984  */
985 static int sf1_write(adapter_t *adapter, unsigned int byte_cnt, int cont,
986 		     u32 val)
987 {
988 	if (!byte_cnt || byte_cnt > 4)
989 		return -EINVAL;
990 	if (t3_read_reg(adapter, A_SF_OP) & F_BUSY)
991 		return -EBUSY;
992 	t3_write_reg(adapter, A_SF_DATA, val);
993 	t3_write_reg(adapter, A_SF_OP,
994 		     V_CONT(cont) | V_BYTECNT(byte_cnt - 1) | V_OP(1));
995 	return t3_wait_op_done(adapter, A_SF_OP, F_BUSY, 0, SF_ATTEMPTS, 10);
996 }
997 
998 /**
999  *	flash_wait_op - wait for a flash operation to complete
1000  *	@adapter: the adapter
1001  *	@attempts: max number of polls of the status register
1002  *	@delay: delay between polls in ms
1003  *
1004  *	Wait for a flash operation to complete by polling the status register.
1005  */
1006 static int flash_wait_op(adapter_t *adapter, int attempts, int delay)
1007 {
1008 	int ret;
1009 	u32 status;
1010 
1011 	while (1) {
1012 		if ((ret = sf1_write(adapter, 1, 1, SF_RD_STATUS)) != 0 ||
1013 		    (ret = sf1_read(adapter, 1, 0, &status)) != 0)
1014 			return ret;
1015 		if (!(status & 1))
1016 			return 0;
1017 		if (--attempts == 0)
1018 			return -EAGAIN;
1019 		if (delay)
1020 			msleep(delay);
1021 	}
1022 }
1023 
1024 /**
1025  *	t3_read_flash - read words from serial flash
1026  *	@adapter: the adapter
1027  *	@addr: the start address for the read
1028  *	@nwords: how many 32-bit words to read
1029  *	@data: where to store the read data
1030  *	@byte_oriented: whether to store data as bytes or as words
1031  *
1032  *	Read the specified number of 32-bit words from the serial flash.
1033  *	If @byte_oriented is set the read data is stored as a byte array
1034  *	(i.e., big-endian), otherwise as 32-bit words in the platform's
1035  *	natural endianness.
1036  */
1037 int t3_read_flash(adapter_t *adapter, unsigned int addr, unsigned int nwords,
1038 		  u32 *data, int byte_oriented)
1039 {
1040 	int ret;
1041 
1042 	if (addr + nwords * sizeof(u32) > SF_SIZE || (addr & 3))
1043 		return -EINVAL;
1044 
1045 	addr = swab32(addr) | SF_RD_DATA_FAST;
1046 
1047 	if ((ret = sf1_write(adapter, 4, 1, addr)) != 0 ||
1048 	    (ret = sf1_read(adapter, 1, 1, data)) != 0)
1049 		return ret;
1050 
1051 	for ( ; nwords; nwords--, data++) {
1052 		ret = sf1_read(adapter, 4, nwords > 1, data);
1053 		if (ret)
1054 			return ret;
1055 		if (byte_oriented)
1056 			*data = htonl(*data);
1057 	}
1058 	return 0;
1059 }
1060 
1061 /**
1062  *	t3_write_flash - write up to a page of data to the serial flash
1063  *	@adapter: the adapter
1064  *	@addr: the start address to write
1065  *	@n: length of data to write
1066  *	@data: the data to write
1067  *	@byte_oriented: whether to store data as bytes or as words
1068  *
1069  *	Writes up to a page of data (256 bytes) to the serial flash starting
1070  *	at the given address.
1071  *	If @byte_oriented is set the write data is stored as a 32-bit
1072  *	big-endian array, otherwise in the processor's native endianness.
1073  *
1074  */
1075 static int t3_write_flash(adapter_t *adapter, unsigned int addr,
1076 			  unsigned int n, const u8 *data,
1077 			  int byte_oriented)
1078 {
1079 	int ret;
1080 	u32 buf[64];
1081 	unsigned int c, left, val, offset = addr & 0xff;
1082 
1083 	if (addr + n > SF_SIZE || offset + n > 256)
1084 		return -EINVAL;
1085 
1086 	val = swab32(addr) | SF_PROG_PAGE;
1087 
1088 	if ((ret = sf1_write(adapter, 1, 0, SF_WR_ENABLE)) != 0 ||
1089 	    (ret = sf1_write(adapter, 4, 1, val)) != 0)
1090 		return ret;
1091 
1092 	for (left = n; left; left -= c) {
1093 		c = min(left, 4U);
1094 		val = *(const u32*)data;
1095 		data += c;
1096 		if (byte_oriented)
1097 			val = htonl(val);
1098 
1099 		ret = sf1_write(adapter, c, c != left, val);
1100 		if (ret)
1101 			return ret;
1102 	}
1103 	if ((ret = flash_wait_op(adapter, 5, 1)) != 0)
1104 		return ret;
1105 
1106 	/* Read the page to verify the write succeeded */
1107 	ret = t3_read_flash(adapter, addr & ~0xff, ARRAY_SIZE(buf), buf,
1108 			    byte_oriented);
1109 	if (ret)
1110 		return ret;
1111 
1112 	if (memcmp(data - n, (u8 *)buf + offset, n))
1113 		return -EIO;
1114 	return 0;
1115 }
1116 
1117 /**
1118  *	t3_get_tp_version - read the tp sram version
1119  *	@adapter: the adapter
1120  *	@vers: where to place the version
1121  *
1122  *	Reads the protocol sram version from sram.
1123  */
1124 int t3_get_tp_version(adapter_t *adapter, u32 *vers)
1125 {
1126 	int ret;
1127 
1128 	/* Get version loaded in SRAM */
1129 	t3_write_reg(adapter, A_TP_EMBED_OP_FIELD0, 0);
1130 	ret = t3_wait_op_done(adapter, A_TP_EMBED_OP_FIELD0,
1131 			      1, 1, 5, 1);
1132 	if (ret)
1133 		return ret;
1134 
1135 	*vers = t3_read_reg(adapter, A_TP_EMBED_OP_FIELD1);
1136 
1137 	return 0;
1138 }
1139 
1140 /**
1141  *	t3_check_tpsram_version - read the tp sram version
1142  *	@adapter: the adapter
1143  *
1144  */
1145 int t3_check_tpsram_version(adapter_t *adapter)
1146 {
1147 	int ret;
1148 	u32 vers;
1149 	unsigned int major, minor;
1150 
1151 	if (adapter->params.rev == T3_REV_A)
1152 		return 0;
1153 
1154 
1155 	ret = t3_get_tp_version(adapter, &vers);
1156 	if (ret)
1157 		return ret;
1158 
1159 	vers = t3_read_reg(adapter, A_TP_EMBED_OP_FIELD1);
1160 
1161 	major = G_TP_VERSION_MAJOR(vers);
1162 	minor = G_TP_VERSION_MINOR(vers);
1163 
1164 	if (major == TP_VERSION_MAJOR && minor == TP_VERSION_MINOR)
1165 		return 0;
1166 	else {
1167 		CH_ERR(adapter, "found wrong TP version (%u.%u), "
1168 		       "driver compiled for version %d.%d\n", major, minor,
1169 		       TP_VERSION_MAJOR, TP_VERSION_MINOR);
1170 	}
1171 	return -EINVAL;
1172 }
1173 
1174 /**
1175  *	t3_check_tpsram - check if provided protocol SRAM
1176  *			  is compatible with this driver
1177  *	@adapter: the adapter
1178  *	@tp_sram: the firmware image to write
1179  *	@size: image size
1180  *
1181  *	Checks if an adapter's tp sram is compatible with the driver.
1182  *	Returns 0 if the versions are compatible, a negative error otherwise.
1183  */
1184 int t3_check_tpsram(adapter_t *adapter, const u8 *tp_sram, unsigned int size)
1185 {
1186 	u32 csum;
1187 	unsigned int i;
1188 	const u32 *p = (const u32 *)tp_sram;
1189 
1190 	/* Verify checksum */
1191 	for (csum = 0, i = 0; i < size / sizeof(csum); i++)
1192 		csum += ntohl(p[i]);
1193 	if (csum != 0xffffffff) {
1194 		CH_ERR(adapter, "corrupted protocol SRAM image, checksum %u\n",
1195 		       csum);
1196 		return -EINVAL;
1197 	}
1198 
1199 	return 0;
1200 }
1201 
1202 enum fw_version_type {
1203 	FW_VERSION_N3,
1204 	FW_VERSION_T3
1205 };
1206 
1207 /**
1208  *	t3_get_fw_version - read the firmware version
1209  *	@adapter: the adapter
1210  *	@vers: where to place the version
1211  *
1212  *	Reads the FW version from flash. Note that we had to move the version
1213  *	due to FW size. If we don't find a valid FW version in the new location
1214  *	we fall back and read the old location.
1215  */
1216 int t3_get_fw_version(adapter_t *adapter, u32 *vers)
1217 {
1218 	int ret = t3_read_flash(adapter, FW_VERS_ADDR, 1, vers, 0);
1219 	if (!ret && *vers != 0xffffffff)
1220 		return 0;
1221 	else
1222 		return t3_read_flash(adapter, FW_VERS_ADDR_PRE8, 1, vers, 0);
1223 }
1224 
1225 /**
1226  *	t3_check_fw_version - check if the FW is compatible with this driver
1227  *	@adapter: the adapter
1228  *
1229  *	Checks if an adapter's FW is compatible with the driver.  Returns 0
1230  *	if the versions are compatible, a negative error otherwise.
1231  */
1232 int t3_check_fw_version(adapter_t *adapter)
1233 {
1234 	int ret;
1235 	u32 vers;
1236 	unsigned int type, major, minor;
1237 
1238 	ret = t3_get_fw_version(adapter, &vers);
1239 	if (ret)
1240 		return ret;
1241 
1242 	type = G_FW_VERSION_TYPE(vers);
1243 	major = G_FW_VERSION_MAJOR(vers);
1244 	minor = G_FW_VERSION_MINOR(vers);
1245 
1246 	if (type == FW_VERSION_T3 && major == FW_VERSION_MAJOR &&
1247 	    minor == FW_VERSION_MINOR)
1248 		return 0;
1249 
1250 	else if (major != FW_VERSION_MAJOR || minor < FW_VERSION_MINOR)
1251 		CH_WARN(adapter, "found old FW minor version(%u.%u), "
1252 		        "driver compiled for version %u.%u\n", major, minor,
1253 			FW_VERSION_MAJOR, FW_VERSION_MINOR);
1254 	else {
1255 		CH_WARN(adapter, "found newer FW version(%u.%u), "
1256 		        "driver compiled for version %u.%u\n", major, minor,
1257 			FW_VERSION_MAJOR, FW_VERSION_MINOR);
1258 			return 0;
1259 	}
1260 	return -EINVAL;
1261 }
1262 
1263 /**
1264  *	t3_flash_erase_sectors - erase a range of flash sectors
1265  *	@adapter: the adapter
1266  *	@start: the first sector to erase
1267  *	@end: the last sector to erase
1268  *
1269  *	Erases the sectors in the given range.
1270  */
1271 static int t3_flash_erase_sectors(adapter_t *adapter, int start, int end)
1272 {
1273 	while (start <= end) {
1274 		int ret;
1275 
1276 		if ((ret = sf1_write(adapter, 1, 0, SF_WR_ENABLE)) != 0 ||
1277 		    (ret = sf1_write(adapter, 4, 0,
1278 				     SF_ERASE_SECTOR | (start << 8))) != 0 ||
1279 		    (ret = flash_wait_op(adapter, 5, 500)) != 0)
1280 			return ret;
1281 		start++;
1282 	}
1283 	return 0;
1284 }
1285 
1286 /*
1287  *	t3_load_fw - download firmware
1288  *	@adapter: the adapter
1289  *	@fw_data: the firmware image to write
1290  *	@size: image size
1291  *
1292  *	Write the supplied firmware image to the card's serial flash.
1293  *	The FW image has the following sections: @size - 8 bytes of code and
1294  *	data, followed by 4 bytes of FW version, followed by the 32-bit
1295  *	1's complement checksum of the whole image.
1296  */
1297 int t3_load_fw(adapter_t *adapter, const u8 *fw_data, unsigned int size)
1298 {
1299 	u32 version, csum, fw_version_addr;
1300 	unsigned int i;
1301 	const u32 *p = (const u32 *)fw_data;
1302 	int ret, addr, fw_sector = FW_FLASH_BOOT_ADDR >> 16;
1303 
1304 	if ((size & 3) || size < FW_MIN_SIZE)
1305 		return -EINVAL;
1306 	if (size - 8 > FW_MAX_SIZE)
1307 		return -EFBIG;
1308 
1309 	version = ntohl(*(const u32 *)(fw_data + size - 8));
1310 	if (G_FW_VERSION_MAJOR(version) < 8) {
1311 
1312 		fw_version_addr = FW_VERS_ADDR_PRE8;
1313 
1314 		if (size - 8 > FW_MAX_SIZE_PRE8)
1315 			return -EFBIG;
1316 	} else
1317 		fw_version_addr = FW_VERS_ADDR;
1318 
1319 	for (csum = 0, i = 0; i < size / sizeof(csum); i++)
1320 		csum += ntohl(p[i]);
1321 	if (csum != 0xffffffff) {
1322 		CH_ERR(adapter, "corrupted firmware image, checksum %u\n",
1323 		       csum);
1324 		return -EINVAL;
1325 	}
1326 
1327 	ret = t3_flash_erase_sectors(adapter, fw_sector, fw_sector);
1328 	if (ret)
1329 		goto out;
1330 
1331 	size -= 8;  /* trim off version and checksum */
1332 	for (addr = FW_FLASH_BOOT_ADDR; size; ) {
1333 		unsigned int chunk_size = min(size, 256U);
1334 
1335 		ret = t3_write_flash(adapter, addr, chunk_size, fw_data, 1);
1336 		if (ret)
1337 			goto out;
1338 
1339 		addr += chunk_size;
1340 		fw_data += chunk_size;
1341 		size -= chunk_size;
1342 	}
1343 
1344 	ret = t3_write_flash(adapter, fw_version_addr, 4, fw_data, 1);
1345 out:
1346 	if (ret)
1347 		CH_ERR(adapter, "firmware download failed, error %d\n", ret);
1348 	return ret;
1349 }
1350 
1351 /*
1352  *	t3_load_boot - download boot flash
1353  *	@adapter: the adapter
1354  *	@boot_data: the boot image to write
1355  *	@size: image size
1356  *
1357  *	Write the supplied boot image to the card's serial flash.
1358  *	The boot image has the following sections: a 28-byte header and the
1359  *	boot image.
1360  */
1361 int t3_load_boot(adapter_t *adapter, u8 *boot_data, unsigned int size)
1362 {
1363 	boot_header_t *header = (boot_header_t *)boot_data;
1364 	int ret;
1365 	unsigned int addr;
1366 	unsigned int boot_sector = BOOT_FLASH_BOOT_ADDR >> 16;
1367 	unsigned int boot_end = (BOOT_FLASH_BOOT_ADDR + size - 1) >> 16;
1368 
1369 	/*
1370 	 * Perform some primitive sanity testing to avoid accidentally
1371 	 * writing garbage over the boot sectors.  We ought to check for
1372 	 * more but it's not worth it for now ...
1373 	 */
1374 	if (size < BOOT_MIN_SIZE || size > BOOT_MAX_SIZE) {
1375 		CH_ERR(adapter, "boot image too small/large\n");
1376 		return -EFBIG;
1377 	}
1378 	if (le16_to_cpu(*(u16*)header->signature) != BOOT_SIGNATURE) {
1379 		CH_ERR(adapter, "boot image missing signature\n");
1380 		return -EINVAL;
1381 	}
1382 	if (header->length * BOOT_SIZE_INC != size) {
1383 		CH_ERR(adapter, "boot image header length != image length\n");
1384 		return -EINVAL;
1385 	}
1386 
1387 	ret = t3_flash_erase_sectors(adapter, boot_sector, boot_end);
1388 	if (ret)
1389 		goto out;
1390 
1391 	for (addr = BOOT_FLASH_BOOT_ADDR; size; ) {
1392 		unsigned int chunk_size = min(size, 256U);
1393 
1394 		ret = t3_write_flash(adapter, addr, chunk_size, boot_data, 0);
1395 		if (ret)
1396 			goto out;
1397 
1398 		addr += chunk_size;
1399 		boot_data += chunk_size;
1400 		size -= chunk_size;
1401 	}
1402 
1403 out:
1404 	if (ret)
1405 		CH_ERR(adapter, "boot image download failed, error %d\n", ret);
1406 	return ret;
1407 }
1408 
1409 #define CIM_CTL_BASE 0x2000
1410 
1411 /**
1412  *	t3_cim_ctl_blk_read - read a block from CIM control region
1413  *	@adap: the adapter
1414  *	@addr: the start address within the CIM control region
1415  *	@n: number of words to read
1416  *	@valp: where to store the result
1417  *
1418  *	Reads a block of 4-byte words from the CIM control region.
1419  */
1420 int t3_cim_ctl_blk_read(adapter_t *adap, unsigned int addr, unsigned int n,
1421 			unsigned int *valp)
1422 {
1423 	int ret = 0;
1424 
1425 	if (t3_read_reg(adap, A_CIM_HOST_ACC_CTRL) & F_HOSTBUSY)
1426 		return -EBUSY;
1427 
1428 	for ( ; !ret && n--; addr += 4) {
1429 		t3_write_reg(adap, A_CIM_HOST_ACC_CTRL, CIM_CTL_BASE + addr);
1430 		ret = t3_wait_op_done(adap, A_CIM_HOST_ACC_CTRL, F_HOSTBUSY,
1431 				      0, 5, 2);
1432 		if (!ret)
1433 			*valp++ = t3_read_reg(adap, A_CIM_HOST_ACC_DATA);
1434 	}
1435 	return ret;
1436 }
1437 
1438 static void t3_gate_rx_traffic(struct cmac *mac, u32 *rx_cfg,
1439 			       u32 *rx_hash_high, u32 *rx_hash_low)
1440 {
1441 	/* stop Rx unicast traffic */
1442 	t3_mac_disable_exact_filters(mac);
1443 
1444 	/* stop broadcast, multicast, promiscuous mode traffic */
1445 	*rx_cfg = t3_read_reg(mac->adapter, A_XGM_RX_CFG + mac->offset);
1446 	t3_set_reg_field(mac->adapter, A_XGM_RX_CFG + mac->offset,
1447 			 F_ENHASHMCAST | F_DISBCAST | F_COPYALLFRAMES,
1448 			 F_DISBCAST);
1449 
1450 	*rx_hash_high = t3_read_reg(mac->adapter, A_XGM_RX_HASH_HIGH +
1451 	    mac->offset);
1452 	t3_write_reg(mac->adapter, A_XGM_RX_HASH_HIGH + mac->offset, 0);
1453 
1454 	*rx_hash_low = t3_read_reg(mac->adapter, A_XGM_RX_HASH_LOW +
1455 	    mac->offset);
1456 	t3_write_reg(mac->adapter, A_XGM_RX_HASH_LOW + mac->offset, 0);
1457 
1458 	/* Leave time to drain max RX fifo */
1459 	msleep(1);
1460 }
1461 
1462 static void t3_open_rx_traffic(struct cmac *mac, u32 rx_cfg,
1463 			       u32 rx_hash_high, u32 rx_hash_low)
1464 {
1465 	t3_mac_enable_exact_filters(mac);
1466 	t3_set_reg_field(mac->adapter, A_XGM_RX_CFG + mac->offset,
1467 			 F_ENHASHMCAST | F_DISBCAST | F_COPYALLFRAMES,
1468 			 rx_cfg);
1469 	t3_write_reg(mac->adapter, A_XGM_RX_HASH_HIGH + mac->offset,
1470 	    rx_hash_high);
1471 	t3_write_reg(mac->adapter, A_XGM_RX_HASH_LOW + mac->offset,
1472 	    rx_hash_low);
1473 }
1474 
1475 static int t3_detect_link_fault(adapter_t *adapter, int port_id)
1476 {
1477 	struct port_info *pi = adap2pinfo(adapter, port_id);
1478 	struct cmac *mac = &pi->mac;
1479 	uint32_t rx_cfg, rx_hash_high, rx_hash_low;
1480 	int link_fault;
1481 
1482 	/* stop rx */
1483 	t3_gate_rx_traffic(mac, &rx_cfg, &rx_hash_high, &rx_hash_low);
1484 	t3_write_reg(adapter, A_XGM_RX_CTRL + mac->offset, 0);
1485 
1486 	/* clear status and make sure intr is enabled */
1487 	(void) t3_read_reg(adapter, A_XGM_INT_STATUS + mac->offset);
1488 	t3_xgm_intr_enable(adapter, port_id);
1489 
1490 	/* restart rx */
1491 	t3_write_reg(adapter, A_XGM_RX_CTRL + mac->offset, F_RXEN);
1492 	t3_open_rx_traffic(mac, rx_cfg, rx_hash_high, rx_hash_low);
1493 
1494 	link_fault = t3_read_reg(adapter, A_XGM_INT_STATUS + mac->offset);
1495 	return (link_fault & F_LINKFAULTCHANGE ? 1 : 0);
1496 }
1497 
1498 static void t3_clear_faults(adapter_t *adapter, int port_id)
1499 {
1500 	struct port_info *pi = adap2pinfo(adapter, port_id);
1501 	struct cmac *mac = &pi->mac;
1502 
1503 	if (adapter->params.nports <= 2) {
1504 		t3_xgm_intr_disable(adapter, pi->port_id);
1505 		t3_read_reg(adapter, A_XGM_INT_STATUS + mac->offset);
1506 		t3_write_reg(adapter, A_XGM_INT_CAUSE + mac->offset, F_XGM_INT);
1507 		t3_set_reg_field(adapter, A_XGM_INT_ENABLE + mac->offset,
1508 				 F_XGM_INT, F_XGM_INT);
1509 		t3_xgm_intr_enable(adapter, pi->port_id);
1510 	}
1511 }
1512 
1513 /**
1514  *	t3_link_changed - handle interface link changes
1515  *	@adapter: the adapter
1516  *	@port_id: the port index that changed link state
1517  *
1518  *	Called when a port's link settings change to propagate the new values
1519  *	to the associated PHY and MAC.  After performing the common tasks it
1520  *	invokes an OS-specific handler.
1521  */
1522 void t3_link_changed(adapter_t *adapter, int port_id)
1523 {
1524 	int link_ok, speed, duplex, fc, link_fault, link_state;
1525 	struct port_info *pi = adap2pinfo(adapter, port_id);
1526 	struct cphy *phy = &pi->phy;
1527 	struct cmac *mac = &pi->mac;
1528 	struct link_config *lc = &pi->link_config;
1529 
1530 	link_ok = lc->link_ok;
1531 	speed = lc->speed;
1532 	duplex = lc->duplex;
1533 	fc = lc->fc;
1534 	link_fault = 0;
1535 
1536 	phy->ops->get_link_status(phy, &link_state, &speed, &duplex, &fc);
1537 	link_ok = (link_state == PHY_LINK_UP);
1538 	if (link_state != PHY_LINK_PARTIAL)
1539 		phy->rst = 0;
1540 	else if (++phy->rst == 3) {
1541 		phy->ops->reset(phy, 0);
1542 		phy->rst = 0;
1543 	}
1544 
1545 	if (link_ok == 0)
1546 		pi->link_fault = LF_NO;
1547 
1548 	if (lc->requested_fc & PAUSE_AUTONEG)
1549 		fc &= lc->requested_fc;
1550 	else
1551 		fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
1552 
1553 	/* Update mac speed before checking for link fault. */
1554 	if (link_ok && speed >= 0 && lc->autoneg == AUTONEG_ENABLE &&
1555 	    (speed != lc->speed || duplex != lc->duplex || fc != lc->fc))
1556 		t3_mac_set_speed_duplex_fc(mac, speed, duplex, fc);
1557 
1558 	/*
1559 	 * Check for link faults if any of these is true:
1560 	 * a) A link fault is suspected, and PHY says link ok
1561 	 * b) PHY link transitioned from down -> up
1562 	 */
1563 	if (adapter->params.nports <= 2 &&
1564 	    ((pi->link_fault && link_ok) || (!lc->link_ok && link_ok))) {
1565 
1566 		link_fault = t3_detect_link_fault(adapter, port_id);
1567 		if (link_fault) {
1568 			if (pi->link_fault != LF_YES) {
1569 				mac->stats.link_faults++;
1570 				pi->link_fault = LF_YES;
1571 			}
1572 
1573 			if (uses_xaui(adapter)) {
1574 				if (adapter->params.rev >= T3_REV_C)
1575 					t3c_pcs_force_los(mac);
1576 				else
1577 					t3b_pcs_reset(mac);
1578 			}
1579 
1580 			/* Don't report link up */
1581 			link_ok = 0;
1582 		} else {
1583 			/* clear faults here if this was a false alarm. */
1584 			if (pi->link_fault == LF_MAYBE &&
1585 			    link_ok && lc->link_ok)
1586 				t3_clear_faults(adapter, port_id);
1587 
1588 			pi->link_fault = LF_NO;
1589 		}
1590 	}
1591 
1592 	if (link_ok == lc->link_ok && speed == lc->speed &&
1593 	    duplex == lc->duplex && fc == lc->fc)
1594 		return;                            /* nothing changed */
1595 
1596 	lc->link_ok = (unsigned char)link_ok;
1597 	lc->speed = speed < 0 ? SPEED_INVALID : speed;
1598 	lc->duplex = duplex < 0 ? DUPLEX_INVALID : duplex;
1599 	lc->fc = fc;
1600 
1601 	if (link_ok) {
1602 
1603 		/* down -> up, or up -> up with changed settings */
1604 
1605 		if (adapter->params.rev > 0 && uses_xaui(adapter)) {
1606 
1607 			if (adapter->params.rev >= T3_REV_C)
1608 				t3c_pcs_force_los(mac);
1609 			else
1610 				t3b_pcs_reset(mac);
1611 
1612 			t3_write_reg(adapter, A_XGM_XAUI_ACT_CTRL + mac->offset,
1613 				     F_TXACTENABLE | F_RXEN);
1614 		}
1615 
1616 		/* disable TX FIFO drain */
1617 		t3_set_reg_field(adapter, A_XGM_TXFIFO_CFG + mac->offset,
1618 				 F_ENDROPPKT, 0);
1619 
1620 		t3_mac_enable(mac, MAC_DIRECTION_TX | MAC_DIRECTION_RX);
1621 		t3_set_reg_field(adapter, A_XGM_STAT_CTRL + mac->offset,
1622 				 F_CLRSTATS, 1);
1623 		t3_clear_faults(adapter, port_id);
1624 
1625 	} else {
1626 
1627 		/* up -> down */
1628 
1629 		if (adapter->params.rev > 0 && uses_xaui(adapter)) {
1630 			t3_write_reg(adapter,
1631 				     A_XGM_XAUI_ACT_CTRL + mac->offset, 0);
1632 		}
1633 
1634 		t3_xgm_intr_disable(adapter, pi->port_id);
1635 		if (adapter->params.nports <= 2) {
1636 			t3_set_reg_field(adapter,
1637 					 A_XGM_INT_ENABLE + mac->offset,
1638 					 F_XGM_INT, 0);
1639 
1640 			t3_mac_disable(mac, MAC_DIRECTION_RX);
1641 
1642 			/*
1643 			 * Make sure Tx FIFO continues to drain, even as rxen is
1644 			 * left high to help detect and indicate remote faults.
1645 			 */
1646 			t3_set_reg_field(adapter,
1647 			    A_XGM_TXFIFO_CFG + mac->offset, 0, F_ENDROPPKT);
1648 			t3_write_reg(adapter, A_XGM_RX_CTRL + mac->offset, 0);
1649 			t3_write_reg(adapter,
1650 			    A_XGM_TX_CTRL + mac->offset, F_TXEN);
1651 			t3_write_reg(adapter,
1652 			    A_XGM_RX_CTRL + mac->offset, F_RXEN);
1653 		}
1654 	}
1655 
1656 	t3_os_link_changed(adapter, port_id, link_ok, speed, duplex, fc,
1657 	    mac->was_reset);
1658 	mac->was_reset = 0;
1659 }
1660 
1661 /**
1662  *	t3_link_start - apply link configuration to MAC/PHY
1663  *	@phy: the PHY to setup
1664  *	@mac: the MAC to setup
1665  *	@lc: the requested link configuration
1666  *
1667  *	Set up a port's MAC and PHY according to a desired link configuration.
1668  *	- If the PHY can auto-negotiate first decide what to advertise, then
1669  *	  enable/disable auto-negotiation as desired, and reset.
1670  *	- If the PHY does not auto-negotiate just reset it.
1671  *	- If auto-negotiation is off set the MAC to the proper speed/duplex/FC,
1672  *	  otherwise do it later based on the outcome of auto-negotiation.
1673  */
1674 int t3_link_start(struct cphy *phy, struct cmac *mac, struct link_config *lc)
1675 {
1676 	unsigned int fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
1677 
1678 	lc->link_ok = 0;
1679 	if (lc->supported & SUPPORTED_Autoneg) {
1680 		lc->advertising &= ~(ADVERTISED_Asym_Pause | ADVERTISED_Pause);
1681 		if (fc) {
1682 			lc->advertising |= ADVERTISED_Asym_Pause;
1683 			if (fc & PAUSE_RX)
1684 				lc->advertising |= ADVERTISED_Pause;
1685 		}
1686 
1687 		phy->ops->advertise(phy, lc->advertising);
1688 
1689 		if (lc->autoneg == AUTONEG_DISABLE) {
1690 			lc->speed = lc->requested_speed;
1691 			lc->duplex = lc->requested_duplex;
1692 			lc->fc = (unsigned char)fc;
1693 			t3_mac_set_speed_duplex_fc(mac, lc->speed, lc->duplex,
1694 						   fc);
1695 			/* Also disables autoneg */
1696 			phy->ops->set_speed_duplex(phy, lc->speed, lc->duplex);
1697 			/* PR 5666. Power phy up when doing an ifup */
1698 			if (!is_10G(phy->adapter))
1699 				phy->ops->power_down(phy, 0);
1700 		} else
1701 			phy->ops->autoneg_enable(phy);
1702 	} else {
1703 		t3_mac_set_speed_duplex_fc(mac, -1, -1, fc);
1704 		lc->fc = (unsigned char)fc;
1705 		phy->ops->reset(phy, 0);
1706 	}
1707 	return 0;
1708 }
1709 
1710 /**
1711  *	t3_set_vlan_accel - control HW VLAN extraction
1712  *	@adapter: the adapter
1713  *	@ports: bitmap of adapter ports to operate on
1714  *	@on: enable (1) or disable (0) HW VLAN extraction
1715  *
1716  *	Enables or disables HW extraction of VLAN tags for the given port.
1717  */
1718 void t3_set_vlan_accel(adapter_t *adapter, unsigned int ports, int on)
1719 {
1720 	t3_set_reg_field(adapter, A_TP_OUT_CONFIG,
1721 			 ports << S_VLANEXTRACTIONENABLE,
1722 			 on ? (ports << S_VLANEXTRACTIONENABLE) : 0);
1723 }
1724 
1725 struct intr_info {
1726 	unsigned int mask;       /* bits to check in interrupt status */
1727 	const char *msg;         /* message to print or NULL */
1728 	short stat_idx;          /* stat counter to increment or -1 */
1729 	unsigned short fatal;    /* whether the condition reported is fatal */
1730 };
1731 
1732 /**
1733  *	t3_handle_intr_status - table driven interrupt handler
1734  *	@adapter: the adapter that generated the interrupt
1735  *	@reg: the interrupt status register to process
1736  *	@mask: a mask to apply to the interrupt status
1737  *	@acts: table of interrupt actions
1738  *	@stats: statistics counters tracking interrupt occurrences
1739  *
1740  *	A table driven interrupt handler that applies a set of masks to an
1741  *	interrupt status word and performs the corresponding actions if the
1742  *	interrupts described by the mask have occurred.  The actions include
1743  *	optionally printing a warning or alert message, and optionally
1744  *	incrementing a stat counter.  The table is terminated by an entry
1745  *	specifying mask 0.  Returns the number of fatal interrupt conditions.
1746  */
1747 static int t3_handle_intr_status(adapter_t *adapter, unsigned int reg,
1748 				 unsigned int mask,
1749 				 const struct intr_info *acts,
1750 				 unsigned long *stats)
1751 {
1752 	int fatal = 0;
1753 	unsigned int status = t3_read_reg(adapter, reg) & mask;
1754 
1755 	for ( ; acts->mask; ++acts) {
1756 		if (!(status & acts->mask)) continue;
1757 		if (acts->fatal) {
1758 			fatal++;
1759 			CH_ALERT(adapter, "%s (0x%x)\n",
1760 				 acts->msg, status & acts->mask);
1761 			status &= ~acts->mask;
1762 		} else if (acts->msg)
1763 			CH_WARN(adapter, "%s (0x%x)\n",
1764 				acts->msg, status & acts->mask);
1765 		if (acts->stat_idx >= 0)
1766 			stats[acts->stat_idx]++;
1767 	}
1768 	if (status)                           /* clear processed interrupts */
1769 		t3_write_reg(adapter, reg, status);
1770 	return fatal;
1771 }
1772 
1773 #define SGE_INTR_MASK (F_RSPQDISABLED | \
1774 		       F_UC_REQ_FRAMINGERROR | F_R_REQ_FRAMINGERROR | \
1775 		       F_CPPARITYERROR | F_OCPARITYERROR | F_RCPARITYERROR | \
1776 		       F_IRPARITYERROR | V_ITPARITYERROR(M_ITPARITYERROR) | \
1777 		       V_FLPARITYERROR(M_FLPARITYERROR) | F_LODRBPARITYERROR | \
1778 		       F_HIDRBPARITYERROR | F_LORCQPARITYERROR | \
1779 		       F_HIRCQPARITYERROR)
1780 #define MC5_INTR_MASK (F_PARITYERR | F_ACTRGNFULL | F_UNKNOWNCMD | \
1781 		       F_REQQPARERR | F_DISPQPARERR | F_DELACTEMPTY | \
1782 		       F_NFASRCHFAIL)
1783 #define MC7_INTR_MASK (F_AE | F_UE | F_CE | V_PE(M_PE))
1784 #define XGM_INTR_MASK (V_TXFIFO_PRTY_ERR(M_TXFIFO_PRTY_ERR) | \
1785 		       V_RXFIFO_PRTY_ERR(M_RXFIFO_PRTY_ERR) | \
1786 		       F_TXFIFO_UNDERRUN)
1787 #define PCIX_INTR_MASK (F_MSTDETPARERR | F_SIGTARABT | F_RCVTARABT | \
1788 			F_RCVMSTABT | F_SIGSYSERR | F_DETPARERR | \
1789 			F_SPLCMPDIS | F_UNXSPLCMP | F_RCVSPLCMPERR | \
1790 			F_DETCORECCERR | F_DETUNCECCERR | F_PIOPARERR | \
1791 			V_WFPARERR(M_WFPARERR) | V_RFPARERR(M_RFPARERR) | \
1792 			V_CFPARERR(M_CFPARERR) /* | V_MSIXPARERR(M_MSIXPARERR) */)
1793 #define PCIE_INTR_MASK (F_UNXSPLCPLERRR | F_UNXSPLCPLERRC | F_PCIE_PIOPARERR |\
1794 			F_PCIE_WFPARERR | F_PCIE_RFPARERR | F_PCIE_CFPARERR | \
1795 			/* V_PCIE_MSIXPARERR(M_PCIE_MSIXPARERR) | */ \
1796 			F_RETRYBUFPARERR | F_RETRYLUTPARERR | F_RXPARERR | \
1797 			F_TXPARERR | V_BISTERR(M_BISTERR))
1798 #define ULPRX_INTR_MASK (F_PARERRDATA | F_PARERRPCMD | F_ARBPF1PERR | \
1799 			 F_ARBPF0PERR | F_ARBFPERR | F_PCMDMUXPERR | \
1800 			 F_DATASELFRAMEERR1 | F_DATASELFRAMEERR0)
1801 #define ULPTX_INTR_MASK 0xfc
1802 #define CPLSW_INTR_MASK (F_CIM_OP_MAP_PERR | F_TP_FRAMING_ERROR | \
1803 			 F_SGE_FRAMING_ERROR | F_CIM_FRAMING_ERROR | \
1804 			 F_ZERO_SWITCH_ERROR)
1805 #define CIM_INTR_MASK (F_BLKWRPLINT | F_BLKRDPLINT | F_BLKWRCTLINT | \
1806 		       F_BLKRDCTLINT | F_BLKWRFLASHINT | F_BLKRDFLASHINT | \
1807 		       F_SGLWRFLASHINT | F_WRBLKFLASHINT | F_BLKWRBOOTINT | \
1808 	 	       F_FLASHRANGEINT | F_SDRAMRANGEINT | F_RSVDSPACEINT | \
1809 		       F_DRAMPARERR | F_ICACHEPARERR | F_DCACHEPARERR | \
1810 		       F_OBQSGEPARERR | F_OBQULPHIPARERR | F_OBQULPLOPARERR | \
1811 		       F_IBQSGELOPARERR | F_IBQSGEHIPARERR | F_IBQULPPARERR | \
1812 		       F_IBQTPPARERR | F_ITAGPARERR | F_DTAGPARERR)
1813 #define PMTX_INTR_MASK (F_ZERO_C_CMD_ERROR | ICSPI_FRM_ERR | OESPI_FRM_ERR | \
1814 			V_ICSPI_PAR_ERROR(M_ICSPI_PAR_ERROR) | \
1815 			V_OESPI_PAR_ERROR(M_OESPI_PAR_ERROR))
1816 #define PMRX_INTR_MASK (F_ZERO_E_CMD_ERROR | IESPI_FRM_ERR | OCSPI_FRM_ERR | \
1817 			V_IESPI_PAR_ERROR(M_IESPI_PAR_ERROR) | \
1818 			V_OCSPI_PAR_ERROR(M_OCSPI_PAR_ERROR))
1819 #define MPS_INTR_MASK (V_TX0TPPARERRENB(M_TX0TPPARERRENB) | \
1820 		       V_TX1TPPARERRENB(M_TX1TPPARERRENB) | \
1821 		       V_RXTPPARERRENB(M_RXTPPARERRENB) | \
1822 		       V_MCAPARERRENB(M_MCAPARERRENB))
1823 #define XGM_EXTRA_INTR_MASK (F_LINKFAULTCHANGE)
1824 #define PL_INTR_MASK (F_T3DBG | F_XGMAC0_0 | F_XGMAC0_1 | F_MC5A | F_PM1_TX | \
1825 		      F_PM1_RX | F_ULP2_TX | F_ULP2_RX | F_TP1 | F_CIM | \
1826 		      F_MC7_CM | F_MC7_PMTX | F_MC7_PMRX | F_SGE3 | F_PCIM0 | \
1827 		      F_MPS0 | F_CPL_SWITCH)
1828 /*
1829  * Interrupt handler for the PCIX1 module.
1830  */
1831 static void pci_intr_handler(adapter_t *adapter)
1832 {
1833 	static struct intr_info pcix1_intr_info[] = {
1834 		{ F_MSTDETPARERR, "PCI master detected parity error", -1, 1 },
1835 		{ F_SIGTARABT, "PCI signaled target abort", -1, 1 },
1836 		{ F_RCVTARABT, "PCI received target abort", -1, 1 },
1837 		{ F_RCVMSTABT, "PCI received master abort", -1, 1 },
1838 		{ F_SIGSYSERR, "PCI signaled system error", -1, 1 },
1839 		{ F_DETPARERR, "PCI detected parity error", -1, 1 },
1840 		{ F_SPLCMPDIS, "PCI split completion discarded", -1, 1 },
1841 		{ F_UNXSPLCMP, "PCI unexpected split completion error", -1, 1 },
1842 		{ F_RCVSPLCMPERR, "PCI received split completion error", -1,
1843 		  1 },
1844 		{ F_DETCORECCERR, "PCI correctable ECC error",
1845 		  STAT_PCI_CORR_ECC, 0 },
1846 		{ F_DETUNCECCERR, "PCI uncorrectable ECC error", -1, 1 },
1847 		{ F_PIOPARERR, "PCI PIO FIFO parity error", -1, 1 },
1848 		{ V_WFPARERR(M_WFPARERR), "PCI write FIFO parity error", -1,
1849 		  1 },
1850 		{ V_RFPARERR(M_RFPARERR), "PCI read FIFO parity error", -1,
1851 		  1 },
1852 		{ V_CFPARERR(M_CFPARERR), "PCI command FIFO parity error", -1,
1853 		  1 },
1854 		{ V_MSIXPARERR(M_MSIXPARERR), "PCI MSI-X table/PBA parity "
1855 		  "error", -1, 1 },
1856 		{ 0 }
1857 	};
1858 
1859 	if (t3_handle_intr_status(adapter, A_PCIX_INT_CAUSE, PCIX_INTR_MASK,
1860 				  pcix1_intr_info, adapter->irq_stats))
1861 		t3_fatal_err(adapter);
1862 }
1863 
1864 /*
1865  * Interrupt handler for the PCIE module.
1866  */
1867 static void pcie_intr_handler(adapter_t *adapter)
1868 {
1869 	static struct intr_info pcie_intr_info[] = {
1870 		{ F_PEXERR, "PCI PEX error", -1, 1 },
1871 		{ F_UNXSPLCPLERRR,
1872 		  "PCI unexpected split completion DMA read error", -1, 1 },
1873 		{ F_UNXSPLCPLERRC,
1874 		  "PCI unexpected split completion DMA command error", -1, 1 },
1875 		{ F_PCIE_PIOPARERR, "PCI PIO FIFO parity error", -1, 1 },
1876 		{ F_PCIE_WFPARERR, "PCI write FIFO parity error", -1, 1 },
1877 		{ F_PCIE_RFPARERR, "PCI read FIFO parity error", -1, 1 },
1878 		{ F_PCIE_CFPARERR, "PCI command FIFO parity error", -1, 1 },
1879 		{ V_PCIE_MSIXPARERR(M_PCIE_MSIXPARERR),
1880 		  "PCI MSI-X table/PBA parity error", -1, 1 },
1881 		{ F_RETRYBUFPARERR, "PCI retry buffer parity error", -1, 1 },
1882 		{ F_RETRYLUTPARERR, "PCI retry LUT parity error", -1, 1 },
1883 		{ F_RXPARERR, "PCI Rx parity error", -1, 1 },
1884 		{ F_TXPARERR, "PCI Tx parity error", -1, 1 },
1885 		{ V_BISTERR(M_BISTERR), "PCI BIST error", -1, 1 },
1886 		{ 0 }
1887 	};
1888 
1889 	if (t3_read_reg(adapter, A_PCIE_INT_CAUSE) & F_PEXERR)
1890 		CH_ALERT(adapter, "PEX error code 0x%x\n",
1891 			 t3_read_reg(adapter, A_PCIE_PEX_ERR));
1892 
1893 	if (t3_handle_intr_status(adapter, A_PCIE_INT_CAUSE, PCIE_INTR_MASK,
1894 				  pcie_intr_info, adapter->irq_stats))
1895 		t3_fatal_err(adapter);
1896 }
1897 
1898 /*
1899  * TP interrupt handler.
1900  */
1901 static void tp_intr_handler(adapter_t *adapter)
1902 {
1903 	static struct intr_info tp_intr_info[] = {
1904 		{ 0xffffff,  "TP parity error", -1, 1 },
1905 		{ 0x1000000, "TP out of Rx pages", -1, 1 },
1906 		{ 0x2000000, "TP out of Tx pages", -1, 1 },
1907 		{ 0 }
1908 	};
1909 	static struct intr_info tp_intr_info_t3c[] = {
1910 		{ 0x1fffffff,  "TP parity error", -1, 1 },
1911 		{ F_FLMRXFLSTEMPTY, "TP out of Rx pages", -1, 1 },
1912 		{ F_FLMTXFLSTEMPTY, "TP out of Tx pages", -1, 1 },
1913 		{ 0 }
1914 	};
1915 
1916 	if (t3_handle_intr_status(adapter, A_TP_INT_CAUSE, 0xffffffff,
1917 				  adapter->params.rev < T3_REV_C ?
1918 					tp_intr_info : tp_intr_info_t3c, NULL))
1919 		t3_fatal_err(adapter);
1920 }
1921 
1922 /*
1923  * CIM interrupt handler.
1924  */
1925 static void cim_intr_handler(adapter_t *adapter)
1926 {
1927 	static struct intr_info cim_intr_info[] = {
1928 		{ F_RSVDSPACEINT, "CIM reserved space write", -1, 1 },
1929 		{ F_SDRAMRANGEINT, "CIM SDRAM address out of range", -1, 1 },
1930 		{ F_FLASHRANGEINT, "CIM flash address out of range", -1, 1 },
1931 		{ F_BLKWRBOOTINT, "CIM block write to boot space", -1, 1 },
1932 		{ F_WRBLKFLASHINT, "CIM write to cached flash space", -1, 1 },
1933 		{ F_SGLWRFLASHINT, "CIM single write to flash space", -1, 1 },
1934 		{ F_BLKRDFLASHINT, "CIM block read from flash space", -1, 1 },
1935 		{ F_BLKWRFLASHINT, "CIM block write to flash space", -1, 1 },
1936 		{ F_BLKRDCTLINT, "CIM block read from CTL space", -1, 1 },
1937 		{ F_BLKWRCTLINT, "CIM block write to CTL space", -1, 1 },
1938 		{ F_BLKRDPLINT, "CIM block read from PL space", -1, 1 },
1939 		{ F_BLKWRPLINT, "CIM block write to PL space", -1, 1 },
1940 		{ F_DRAMPARERR, "CIM DRAM parity error", -1, 1 },
1941 		{ F_ICACHEPARERR, "CIM icache parity error", -1, 1 },
1942 		{ F_DCACHEPARERR, "CIM dcache parity error", -1, 1 },
1943 		{ F_OBQSGEPARERR, "CIM OBQ SGE parity error", -1, 1 },
1944 		{ F_OBQULPHIPARERR, "CIM OBQ ULPHI parity error", -1, 1 },
1945 		{ F_OBQULPLOPARERR, "CIM OBQ ULPLO parity error", -1, 1 },
1946 		{ F_IBQSGELOPARERR, "CIM IBQ SGELO parity error", -1, 1 },
1947 		{ F_IBQSGEHIPARERR, "CIM IBQ SGEHI parity error", -1, 1 },
1948 		{ F_IBQULPPARERR, "CIM IBQ ULP parity error", -1, 1 },
1949 		{ F_IBQTPPARERR, "CIM IBQ TP parity error", -1, 1 },
1950 		{ F_ITAGPARERR, "CIM itag parity error", -1, 1 },
1951 		{ F_DTAGPARERR, "CIM dtag parity error", -1, 1 },
1952 		{ 0 }
1953         };
1954 
1955 	if (t3_handle_intr_status(adapter, A_CIM_HOST_INT_CAUSE, CIM_INTR_MASK,
1956 				  cim_intr_info, NULL))
1957 		t3_fatal_err(adapter);
1958 }
1959 
1960 /*
1961  * ULP RX interrupt handler.
1962  */
1963 static void ulprx_intr_handler(adapter_t *adapter)
1964 {
1965 	static struct intr_info ulprx_intr_info[] = {
1966 		{ F_PARERRDATA, "ULP RX data parity error", -1, 1 },
1967 		{ F_PARERRPCMD, "ULP RX command parity error", -1, 1 },
1968 		{ F_ARBPF1PERR, "ULP RX ArbPF1 parity error", -1, 1 },
1969 		{ F_ARBPF0PERR, "ULP RX ArbPF0 parity error", -1, 1 },
1970 		{ F_ARBFPERR, "ULP RX ArbF parity error", -1, 1 },
1971 		{ F_PCMDMUXPERR, "ULP RX PCMDMUX parity error", -1, 1 },
1972 		{ F_DATASELFRAMEERR1, "ULP RX frame error", -1, 1 },
1973 		{ F_DATASELFRAMEERR0, "ULP RX frame error", -1, 1 },
1974 		{ 0 }
1975         };
1976 
1977 	if (t3_handle_intr_status(adapter, A_ULPRX_INT_CAUSE, 0xffffffff,
1978 				  ulprx_intr_info, NULL))
1979 		t3_fatal_err(adapter);
1980 }
1981 
1982 /*
1983  * ULP TX interrupt handler.
1984  */
1985 static void ulptx_intr_handler(adapter_t *adapter)
1986 {
1987 	static struct intr_info ulptx_intr_info[] = {
1988 		{ F_PBL_BOUND_ERR_CH0, "ULP TX channel 0 PBL out of bounds",
1989 		  STAT_ULP_CH0_PBL_OOB, 0 },
1990 		{ F_PBL_BOUND_ERR_CH1, "ULP TX channel 1 PBL out of bounds",
1991 		  STAT_ULP_CH1_PBL_OOB, 0 },
1992 		{ 0xfc, "ULP TX parity error", -1, 1 },
1993 		{ 0 }
1994         };
1995 
1996 	if (t3_handle_intr_status(adapter, A_ULPTX_INT_CAUSE, 0xffffffff,
1997 				  ulptx_intr_info, adapter->irq_stats))
1998 		t3_fatal_err(adapter);
1999 }
2000 
2001 #define ICSPI_FRM_ERR (F_ICSPI0_FIFO2X_RX_FRAMING_ERROR | \
2002 	F_ICSPI1_FIFO2X_RX_FRAMING_ERROR | F_ICSPI0_RX_FRAMING_ERROR | \
2003 	F_ICSPI1_RX_FRAMING_ERROR | F_ICSPI0_TX_FRAMING_ERROR | \
2004 	F_ICSPI1_TX_FRAMING_ERROR)
2005 #define OESPI_FRM_ERR (F_OESPI0_RX_FRAMING_ERROR | \
2006 	F_OESPI1_RX_FRAMING_ERROR | F_OESPI0_TX_FRAMING_ERROR | \
2007 	F_OESPI1_TX_FRAMING_ERROR | F_OESPI0_OFIFO2X_TX_FRAMING_ERROR | \
2008 	F_OESPI1_OFIFO2X_TX_FRAMING_ERROR)
2009 
2010 /*
2011  * PM TX interrupt handler.
2012  */
2013 static void pmtx_intr_handler(adapter_t *adapter)
2014 {
2015 	static struct intr_info pmtx_intr_info[] = {
2016 		{ F_ZERO_C_CMD_ERROR, "PMTX 0-length pcmd", -1, 1 },
2017 		{ ICSPI_FRM_ERR, "PMTX ispi framing error", -1, 1 },
2018 		{ OESPI_FRM_ERR, "PMTX ospi framing error", -1, 1 },
2019 		{ V_ICSPI_PAR_ERROR(M_ICSPI_PAR_ERROR),
2020 		  "PMTX ispi parity error", -1, 1 },
2021 		{ V_OESPI_PAR_ERROR(M_OESPI_PAR_ERROR),
2022 		  "PMTX ospi parity error", -1, 1 },
2023 		{ 0 }
2024         };
2025 
2026 	if (t3_handle_intr_status(adapter, A_PM1_TX_INT_CAUSE, 0xffffffff,
2027 				  pmtx_intr_info, NULL))
2028 		t3_fatal_err(adapter);
2029 }
2030 
2031 #define IESPI_FRM_ERR (F_IESPI0_FIFO2X_RX_FRAMING_ERROR | \
2032 	F_IESPI1_FIFO2X_RX_FRAMING_ERROR | F_IESPI0_RX_FRAMING_ERROR | \
2033 	F_IESPI1_RX_FRAMING_ERROR | F_IESPI0_TX_FRAMING_ERROR | \
2034 	F_IESPI1_TX_FRAMING_ERROR)
2035 #define OCSPI_FRM_ERR (F_OCSPI0_RX_FRAMING_ERROR | \
2036 	F_OCSPI1_RX_FRAMING_ERROR | F_OCSPI0_TX_FRAMING_ERROR | \
2037 	F_OCSPI1_TX_FRAMING_ERROR | F_OCSPI0_OFIFO2X_TX_FRAMING_ERROR | \
2038 	F_OCSPI1_OFIFO2X_TX_FRAMING_ERROR)
2039 
2040 /*
2041  * PM RX interrupt handler.
2042  */
2043 static void pmrx_intr_handler(adapter_t *adapter)
2044 {
2045 	static struct intr_info pmrx_intr_info[] = {
2046 		{ F_ZERO_E_CMD_ERROR, "PMRX 0-length pcmd", -1, 1 },
2047 		{ IESPI_FRM_ERR, "PMRX ispi framing error", -1, 1 },
2048 		{ OCSPI_FRM_ERR, "PMRX ospi framing error", -1, 1 },
2049 		{ V_IESPI_PAR_ERROR(M_IESPI_PAR_ERROR),
2050 		  "PMRX ispi parity error", -1, 1 },
2051 		{ V_OCSPI_PAR_ERROR(M_OCSPI_PAR_ERROR),
2052 		  "PMRX ospi parity error", -1, 1 },
2053 		{ 0 }
2054         };
2055 
2056 	if (t3_handle_intr_status(adapter, A_PM1_RX_INT_CAUSE, 0xffffffff,
2057 				  pmrx_intr_info, NULL))
2058 		t3_fatal_err(adapter);
2059 }
2060 
2061 /*
2062  * CPL switch interrupt handler.
2063  */
2064 static void cplsw_intr_handler(adapter_t *adapter)
2065 {
2066 	static struct intr_info cplsw_intr_info[] = {
2067 		{ F_CIM_OP_MAP_PERR, "CPL switch CIM parity error", -1, 1 },
2068 		{ F_CIM_OVFL_ERROR, "CPL switch CIM overflow", -1, 1 },
2069 		{ F_TP_FRAMING_ERROR, "CPL switch TP framing error", -1, 1 },
2070 		{ F_SGE_FRAMING_ERROR, "CPL switch SGE framing error", -1, 1 },
2071 		{ F_CIM_FRAMING_ERROR, "CPL switch CIM framing error", -1, 1 },
2072 		{ F_ZERO_SWITCH_ERROR, "CPL switch no-switch error", -1, 1 },
2073 		{ 0 }
2074         };
2075 
2076 	if (t3_handle_intr_status(adapter, A_CPL_INTR_CAUSE, 0xffffffff,
2077 				  cplsw_intr_info, NULL))
2078 		t3_fatal_err(adapter);
2079 }
2080 
2081 /*
2082  * MPS interrupt handler.
2083  */
2084 static void mps_intr_handler(adapter_t *adapter)
2085 {
2086 	static struct intr_info mps_intr_info[] = {
2087 		{ 0x1ff, "MPS parity error", -1, 1 },
2088 		{ 0 }
2089 	};
2090 
2091 	if (t3_handle_intr_status(adapter, A_MPS_INT_CAUSE, 0xffffffff,
2092 				  mps_intr_info, NULL))
2093 		t3_fatal_err(adapter);
2094 }
2095 
2096 #define MC7_INTR_FATAL (F_UE | V_PE(M_PE) | F_AE)
2097 
2098 /*
2099  * MC7 interrupt handler.
2100  */
2101 static void mc7_intr_handler(struct mc7 *mc7)
2102 {
2103 	adapter_t *adapter = mc7->adapter;
2104 	u32 cause = t3_read_reg(adapter, mc7->offset + A_MC7_INT_CAUSE);
2105 
2106 	if (cause & F_CE) {
2107 		mc7->stats.corr_err++;
2108 		CH_WARN(adapter, "%s MC7 correctable error at addr 0x%x, "
2109 			"data 0x%x 0x%x 0x%x\n", mc7->name,
2110 			t3_read_reg(adapter, mc7->offset + A_MC7_CE_ADDR),
2111 			t3_read_reg(adapter, mc7->offset + A_MC7_CE_DATA0),
2112 			t3_read_reg(adapter, mc7->offset + A_MC7_CE_DATA1),
2113 			t3_read_reg(adapter, mc7->offset + A_MC7_CE_DATA2));
2114 	}
2115 
2116 	if (cause & F_UE) {
2117 		mc7->stats.uncorr_err++;
2118 		CH_ALERT(adapter, "%s MC7 uncorrectable error at addr 0x%x, "
2119 			 "data 0x%x 0x%x 0x%x\n", mc7->name,
2120 			 t3_read_reg(adapter, mc7->offset + A_MC7_UE_ADDR),
2121 			 t3_read_reg(adapter, mc7->offset + A_MC7_UE_DATA0),
2122 			 t3_read_reg(adapter, mc7->offset + A_MC7_UE_DATA1),
2123 			 t3_read_reg(adapter, mc7->offset + A_MC7_UE_DATA2));
2124 	}
2125 
2126 	if (G_PE(cause)) {
2127 		mc7->stats.parity_err++;
2128 		CH_ALERT(adapter, "%s MC7 parity error 0x%x\n",
2129 			 mc7->name, G_PE(cause));
2130 	}
2131 
2132 	if (cause & F_AE) {
2133 		u32 addr = 0;
2134 
2135 		if (adapter->params.rev > 0)
2136 			addr = t3_read_reg(adapter,
2137 					   mc7->offset + A_MC7_ERR_ADDR);
2138 		mc7->stats.addr_err++;
2139 		CH_ALERT(adapter, "%s MC7 address error: 0x%x\n",
2140 			 mc7->name, addr);
2141 	}
2142 
2143 	if (cause & MC7_INTR_FATAL)
2144 		t3_fatal_err(adapter);
2145 
2146 	t3_write_reg(adapter, mc7->offset + A_MC7_INT_CAUSE, cause);
2147 }
2148 
2149 #define XGM_INTR_FATAL (V_TXFIFO_PRTY_ERR(M_TXFIFO_PRTY_ERR) | \
2150 			V_RXFIFO_PRTY_ERR(M_RXFIFO_PRTY_ERR))
2151 /*
2152  * XGMAC interrupt handler.
2153  */
2154 static int mac_intr_handler(adapter_t *adap, unsigned int idx)
2155 {
2156 	u32 cause;
2157 	struct port_info *pi;
2158 	struct cmac *mac;
2159 
2160 	idx = idx == 0 ? 0 : adapter_info(adap)->nports0; /* MAC idx -> port */
2161 	pi = adap2pinfo(adap, idx);
2162 	mac = &pi->mac;
2163 
2164 	/*
2165 	 * We mask out interrupt causes for which we're not taking interrupts.
2166 	 * This allows us to use polling logic to monitor some of the other
2167 	 * conditions when taking interrupts would impose too much load on the
2168 	 * system.
2169 	 */
2170 	cause = (t3_read_reg(adap, A_XGM_INT_CAUSE + mac->offset)
2171 		 & ~(F_RXFIFO_OVERFLOW));
2172 
2173 	if (cause & V_TXFIFO_PRTY_ERR(M_TXFIFO_PRTY_ERR)) {
2174 		mac->stats.tx_fifo_parity_err++;
2175 		CH_ALERT(adap, "port%d: MAC TX FIFO parity error\n", idx);
2176 	}
2177 	if (cause & V_RXFIFO_PRTY_ERR(M_RXFIFO_PRTY_ERR)) {
2178 		mac->stats.rx_fifo_parity_err++;
2179 		CH_ALERT(adap, "port%d: MAC RX FIFO parity error\n", idx);
2180 	}
2181 	if (cause & F_TXFIFO_UNDERRUN)
2182 		mac->stats.tx_fifo_urun++;
2183 	if (cause & F_RXFIFO_OVERFLOW)
2184 		mac->stats.rx_fifo_ovfl++;
2185 	if (cause & V_SERDES_LOS(M_SERDES_LOS))
2186 		mac->stats.serdes_signal_loss++;
2187 	if (cause & F_XAUIPCSCTCERR)
2188 		mac->stats.xaui_pcs_ctc_err++;
2189 	if (cause & F_XAUIPCSALIGNCHANGE)
2190 		mac->stats.xaui_pcs_align_change++;
2191 	if (cause & F_XGM_INT &
2192 	    t3_read_reg(adap, A_XGM_INT_ENABLE + mac->offset)) {
2193 		t3_set_reg_field(adap, A_XGM_INT_ENABLE + mac->offset,
2194 		    F_XGM_INT, 0);
2195 
2196 		/* link fault suspected */
2197 		pi->link_fault = LF_MAYBE;
2198 		t3_os_link_intr(pi);
2199 	}
2200 
2201 	if (cause & XGM_INTR_FATAL)
2202 		t3_fatal_err(adap);
2203 
2204 	t3_write_reg(adap, A_XGM_INT_CAUSE + mac->offset, cause);
2205 	return cause != 0;
2206 }
2207 
2208 /*
2209  * Interrupt handler for PHY events.
2210  */
2211 static int phy_intr_handler(adapter_t *adapter)
2212 {
2213 	u32 i, cause = t3_read_reg(adapter, A_T3DBG_INT_CAUSE);
2214 
2215 	for_each_port(adapter, i) {
2216 		struct port_info *p = adap2pinfo(adapter, i);
2217 
2218 		if (!(p->phy.caps & SUPPORTED_IRQ))
2219 			continue;
2220 
2221 		if (cause & (1 << adapter_info(adapter)->gpio_intr[i])) {
2222 			int phy_cause = p->phy.ops->intr_handler(&p->phy);
2223 
2224 			if (phy_cause & cphy_cause_link_change)
2225 				t3_os_link_intr(p);
2226 			if (phy_cause & cphy_cause_fifo_error)
2227 				p->phy.fifo_errors++;
2228 			if (phy_cause & cphy_cause_module_change)
2229 				t3_os_phymod_changed(adapter, i);
2230 			if (phy_cause & cphy_cause_alarm)
2231 				CH_WARN(adapter, "Operation affected due to "
2232 				    "adverse environment.  Check the spec "
2233 				    "sheet for corrective action.");
2234 		}
2235 	}
2236 
2237 	t3_write_reg(adapter, A_T3DBG_INT_CAUSE, cause);
2238 	return 0;
2239 }
2240 
2241 /**
2242  *	t3_slow_intr_handler - control path interrupt handler
2243  *	@adapter: the adapter
2244  *
2245  *	T3 interrupt handler for non-data interrupt events, e.g., errors.
2246  *	The designation 'slow' is because it involves register reads, while
2247  *	data interrupts typically don't involve any MMIOs.
2248  */
2249 int t3_slow_intr_handler(adapter_t *adapter)
2250 {
2251 	u32 cause = t3_read_reg(adapter, A_PL_INT_CAUSE0);
2252 
2253 	cause &= adapter->slow_intr_mask;
2254 	if (!cause)
2255 		return 0;
2256 	if (cause & F_PCIM0) {
2257 		if (is_pcie(adapter))
2258 			pcie_intr_handler(adapter);
2259 		else
2260 			pci_intr_handler(adapter);
2261 	}
2262 	if (cause & F_SGE3)
2263 		t3_sge_err_intr_handler(adapter);
2264 	if (cause & F_MC7_PMRX)
2265 		mc7_intr_handler(&adapter->pmrx);
2266 	if (cause & F_MC7_PMTX)
2267 		mc7_intr_handler(&adapter->pmtx);
2268 	if (cause & F_MC7_CM)
2269 		mc7_intr_handler(&adapter->cm);
2270 	if (cause & F_CIM)
2271 		cim_intr_handler(adapter);
2272 	if (cause & F_TP1)
2273 		tp_intr_handler(adapter);
2274 	if (cause & F_ULP2_RX)
2275 		ulprx_intr_handler(adapter);
2276 	if (cause & F_ULP2_TX)
2277 		ulptx_intr_handler(adapter);
2278 	if (cause & F_PM1_RX)
2279 		pmrx_intr_handler(adapter);
2280 	if (cause & F_PM1_TX)
2281 		pmtx_intr_handler(adapter);
2282 	if (cause & F_CPL_SWITCH)
2283 		cplsw_intr_handler(adapter);
2284 	if (cause & F_MPS0)
2285 		mps_intr_handler(adapter);
2286 	if (cause & F_MC5A)
2287 		t3_mc5_intr_handler(&adapter->mc5);
2288 	if (cause & F_XGMAC0_0)
2289 		mac_intr_handler(adapter, 0);
2290 	if (cause & F_XGMAC0_1)
2291 		mac_intr_handler(adapter, 1);
2292 	if (cause & F_T3DBG)
2293 		phy_intr_handler(adapter);
2294 
2295 	/* Clear the interrupts just processed. */
2296 	t3_write_reg(adapter, A_PL_INT_CAUSE0, cause);
2297 	(void) t3_read_reg(adapter, A_PL_INT_CAUSE0); /* flush */
2298 	return 1;
2299 }
2300 
2301 static unsigned int calc_gpio_intr(adapter_t *adap)
2302 {
2303 	unsigned int i, gpi_intr = 0;
2304 
2305 	for_each_port(adap, i)
2306 		if ((adap2pinfo(adap, i)->phy.caps & SUPPORTED_IRQ) &&
2307 		    adapter_info(adap)->gpio_intr[i])
2308 			gpi_intr |= 1 << adapter_info(adap)->gpio_intr[i];
2309 	return gpi_intr;
2310 }
2311 
2312 /**
2313  *	t3_intr_enable - enable interrupts
2314  *	@adapter: the adapter whose interrupts should be enabled
2315  *
2316  *	Enable interrupts by setting the interrupt enable registers of the
2317  *	various HW modules and then enabling the top-level interrupt
2318  *	concentrator.
2319  */
2320 void t3_intr_enable(adapter_t *adapter)
2321 {
2322 	static struct addr_val_pair intr_en_avp[] = {
2323 		{ A_MC7_INT_ENABLE, MC7_INTR_MASK },
2324 		{ A_MC7_INT_ENABLE - MC7_PMRX_BASE_ADDR + MC7_PMTX_BASE_ADDR,
2325 			MC7_INTR_MASK },
2326 		{ A_MC7_INT_ENABLE - MC7_PMRX_BASE_ADDR + MC7_CM_BASE_ADDR,
2327 			MC7_INTR_MASK },
2328 		{ A_MC5_DB_INT_ENABLE, MC5_INTR_MASK },
2329 		{ A_ULPRX_INT_ENABLE, ULPRX_INTR_MASK },
2330 		{ A_PM1_TX_INT_ENABLE, PMTX_INTR_MASK },
2331 		{ A_PM1_RX_INT_ENABLE, PMRX_INTR_MASK },
2332 		{ A_CIM_HOST_INT_ENABLE, CIM_INTR_MASK },
2333 		{ A_MPS_INT_ENABLE, MPS_INTR_MASK },
2334 	};
2335 
2336 	adapter->slow_intr_mask = PL_INTR_MASK;
2337 
2338 	t3_write_regs(adapter, intr_en_avp, ARRAY_SIZE(intr_en_avp), 0);
2339 	t3_write_reg(adapter, A_TP_INT_ENABLE,
2340 		     adapter->params.rev >= T3_REV_C ? 0x2bfffff : 0x3bfffff);
2341 	t3_write_reg(adapter, A_SG_INT_ENABLE, SGE_INTR_MASK);
2342 
2343 	if (adapter->params.rev > 0) {
2344 		t3_write_reg(adapter, A_CPL_INTR_ENABLE,
2345 			     CPLSW_INTR_MASK | F_CIM_OVFL_ERROR);
2346 		t3_write_reg(adapter, A_ULPTX_INT_ENABLE,
2347 			     ULPTX_INTR_MASK | F_PBL_BOUND_ERR_CH0 |
2348 			     F_PBL_BOUND_ERR_CH1);
2349 	} else {
2350 		t3_write_reg(adapter, A_CPL_INTR_ENABLE, CPLSW_INTR_MASK);
2351 		t3_write_reg(adapter, A_ULPTX_INT_ENABLE, ULPTX_INTR_MASK);
2352 	}
2353 
2354 	t3_write_reg(adapter, A_T3DBG_INT_ENABLE, calc_gpio_intr(adapter));
2355 
2356 	if (is_pcie(adapter))
2357 		t3_write_reg(adapter, A_PCIE_INT_ENABLE, PCIE_INTR_MASK);
2358 	else
2359 		t3_write_reg(adapter, A_PCIX_INT_ENABLE, PCIX_INTR_MASK);
2360 	t3_write_reg(adapter, A_PL_INT_ENABLE0, adapter->slow_intr_mask);
2361 	(void) t3_read_reg(adapter, A_PL_INT_ENABLE0);          /* flush */
2362 }
2363 
2364 /**
2365  *	t3_intr_disable - disable a card's interrupts
2366  *	@adapter: the adapter whose interrupts should be disabled
2367  *
2368  *	Disable interrupts.  We only disable the top-level interrupt
2369  *	concentrator and the SGE data interrupts.
2370  */
2371 void t3_intr_disable(adapter_t *adapter)
2372 {
2373 	t3_write_reg(adapter, A_PL_INT_ENABLE0, 0);
2374 	(void) t3_read_reg(adapter, A_PL_INT_ENABLE0);  /* flush */
2375 	adapter->slow_intr_mask = 0;
2376 }
2377 
2378 /**
2379  *	t3_intr_clear - clear all interrupts
2380  *	@adapter: the adapter whose interrupts should be cleared
2381  *
2382  *	Clears all interrupts.
2383  */
2384 void t3_intr_clear(adapter_t *adapter)
2385 {
2386 	static const unsigned int cause_reg_addr[] = {
2387 		A_SG_INT_CAUSE,
2388 		A_SG_RSPQ_FL_STATUS,
2389 		A_PCIX_INT_CAUSE,
2390 		A_MC7_INT_CAUSE,
2391 		A_MC7_INT_CAUSE - MC7_PMRX_BASE_ADDR + MC7_PMTX_BASE_ADDR,
2392 		A_MC7_INT_CAUSE - MC7_PMRX_BASE_ADDR + MC7_CM_BASE_ADDR,
2393 		A_CIM_HOST_INT_CAUSE,
2394 		A_TP_INT_CAUSE,
2395 		A_MC5_DB_INT_CAUSE,
2396 		A_ULPRX_INT_CAUSE,
2397 		A_ULPTX_INT_CAUSE,
2398 		A_CPL_INTR_CAUSE,
2399 		A_PM1_TX_INT_CAUSE,
2400 		A_PM1_RX_INT_CAUSE,
2401 		A_MPS_INT_CAUSE,
2402 		A_T3DBG_INT_CAUSE,
2403 	};
2404 	unsigned int i;
2405 
2406 	/* Clear PHY and MAC interrupts for each port. */
2407 	for_each_port(adapter, i)
2408 		t3_port_intr_clear(adapter, i);
2409 
2410 	for (i = 0; i < ARRAY_SIZE(cause_reg_addr); ++i)
2411 		t3_write_reg(adapter, cause_reg_addr[i], 0xffffffff);
2412 
2413 	if (is_pcie(adapter))
2414 		t3_write_reg(adapter, A_PCIE_PEX_ERR, 0xffffffff);
2415 	t3_write_reg(adapter, A_PL_INT_CAUSE0, 0xffffffff);
2416 	(void) t3_read_reg(adapter, A_PL_INT_CAUSE0);          /* flush */
2417 }
2418 
2419 void t3_xgm_intr_enable(adapter_t *adapter, int idx)
2420 {
2421 	struct port_info *pi = adap2pinfo(adapter, idx);
2422 
2423 	t3_write_reg(adapter, A_XGM_XGM_INT_ENABLE + pi->mac.offset,
2424 		     XGM_EXTRA_INTR_MASK);
2425 }
2426 
2427 void t3_xgm_intr_disable(adapter_t *adapter, int idx)
2428 {
2429 	struct port_info *pi = adap2pinfo(adapter, idx);
2430 
2431 	t3_write_reg(adapter, A_XGM_XGM_INT_DISABLE + pi->mac.offset,
2432 		     0x7ff);
2433 }
2434 
2435 /**
2436  *	t3_port_intr_enable - enable port-specific interrupts
2437  *	@adapter: associated adapter
2438  *	@idx: index of port whose interrupts should be enabled
2439  *
2440  *	Enable port-specific (i.e., MAC and PHY) interrupts for the given
2441  *	adapter port.
2442  */
2443 void t3_port_intr_enable(adapter_t *adapter, int idx)
2444 {
2445 	struct port_info *pi = adap2pinfo(adapter, idx);
2446 
2447 	t3_write_reg(adapter, A_XGM_INT_ENABLE + pi->mac.offset, XGM_INTR_MASK);
2448 	pi->phy.ops->intr_enable(&pi->phy);
2449 }
2450 
2451 /**
2452  *	t3_port_intr_disable - disable port-specific interrupts
2453  *	@adapter: associated adapter
2454  *	@idx: index of port whose interrupts should be disabled
2455  *
2456  *	Disable port-specific (i.e., MAC and PHY) interrupts for the given
2457  *	adapter port.
2458  */
2459 void t3_port_intr_disable(adapter_t *adapter, int idx)
2460 {
2461 	struct port_info *pi = adap2pinfo(adapter, idx);
2462 
2463 	t3_write_reg(adapter, A_XGM_INT_ENABLE + pi->mac.offset, 0);
2464 	pi->phy.ops->intr_disable(&pi->phy);
2465 }
2466 
2467 /**
2468  *	t3_port_intr_clear - clear port-specific interrupts
2469  *	@adapter: associated adapter
2470  *	@idx: index of port whose interrupts to clear
2471  *
2472  *	Clear port-specific (i.e., MAC and PHY) interrupts for the given
2473  *	adapter port.
2474  */
2475 void t3_port_intr_clear(adapter_t *adapter, int idx)
2476 {
2477 	struct port_info *pi = adap2pinfo(adapter, idx);
2478 
2479 	t3_write_reg(adapter, A_XGM_INT_CAUSE + pi->mac.offset, 0xffffffff);
2480 	pi->phy.ops->intr_clear(&pi->phy);
2481 }
2482 
2483 #define SG_CONTEXT_CMD_ATTEMPTS 100
2484 
2485 /**
2486  * 	t3_sge_write_context - write an SGE context
2487  * 	@adapter: the adapter
2488  * 	@id: the context id
2489  * 	@type: the context type
2490  *
2491  * 	Program an SGE context with the values already loaded in the
2492  * 	CONTEXT_DATA? registers.
2493  */
2494 static int t3_sge_write_context(adapter_t *adapter, unsigned int id,
2495 				unsigned int type)
2496 {
2497 	if (type == F_RESPONSEQ) {
2498 		/*
2499 		 * Can't write the Response Queue Context bits for
2500 		 * Interrupt Armed or the Reserve bits after the chip
2501 		 * has been initialized out of reset.  Writing to these
2502 		 * bits can confuse the hardware.
2503 		 */
2504 		t3_write_reg(adapter, A_SG_CONTEXT_MASK0, 0xffffffff);
2505 		t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0xffffffff);
2506 		t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0x17ffffff);
2507 		t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0xffffffff);
2508 	} else {
2509 		t3_write_reg(adapter, A_SG_CONTEXT_MASK0, 0xffffffff);
2510 		t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0xffffffff);
2511 		t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0xffffffff);
2512 		t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0xffffffff);
2513 	}
2514 	t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2515 		     V_CONTEXT_CMD_OPCODE(1) | type | V_CONTEXT(id));
2516 	return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
2517 			       0, SG_CONTEXT_CMD_ATTEMPTS, 1);
2518 }
2519 
2520 /**
2521  *	clear_sge_ctxt - completely clear an SGE context
2522  *	@adapter: the adapter
2523  *	@id: the context id
2524  *	@type: the context type
2525  *
2526  *	Completely clear an SGE context.  Used predominantly at post-reset
2527  *	initialization.  Note in particular that we don't skip writing to any
2528  *	"sensitive bits" in the contexts the way that t3_sge_write_context()
2529  *	does ...
2530  */
2531 static int clear_sge_ctxt(adapter_t *adap, unsigned int id, unsigned int type)
2532 {
2533 	t3_write_reg(adap, A_SG_CONTEXT_DATA0, 0);
2534 	t3_write_reg(adap, A_SG_CONTEXT_DATA1, 0);
2535 	t3_write_reg(adap, A_SG_CONTEXT_DATA2, 0);
2536 	t3_write_reg(adap, A_SG_CONTEXT_DATA3, 0);
2537 	t3_write_reg(adap, A_SG_CONTEXT_MASK0, 0xffffffff);
2538 	t3_write_reg(adap, A_SG_CONTEXT_MASK1, 0xffffffff);
2539 	t3_write_reg(adap, A_SG_CONTEXT_MASK2, 0xffffffff);
2540 	t3_write_reg(adap, A_SG_CONTEXT_MASK3, 0xffffffff);
2541 	t3_write_reg(adap, A_SG_CONTEXT_CMD,
2542 		     V_CONTEXT_CMD_OPCODE(1) | type | V_CONTEXT(id));
2543 	return t3_wait_op_done(adap, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
2544 			       0, SG_CONTEXT_CMD_ATTEMPTS, 1);
2545 }
2546 
2547 /**
2548  *	t3_sge_init_ecntxt - initialize an SGE egress context
2549  *	@adapter: the adapter to configure
2550  *	@id: the context id
2551  *	@gts_enable: whether to enable GTS for the context
2552  *	@type: the egress context type
2553  *	@respq: associated response queue
2554  *	@base_addr: base address of queue
2555  *	@size: number of queue entries
2556  *	@token: uP token
2557  *	@gen: initial generation value for the context
2558  *	@cidx: consumer pointer
2559  *
2560  *	Initialize an SGE egress context and make it ready for use.  If the
2561  *	platform allows concurrent context operations, the caller is
2562  *	responsible for appropriate locking.
2563  */
2564 int t3_sge_init_ecntxt(adapter_t *adapter, unsigned int id, int gts_enable,
2565 		       enum sge_context_type type, int respq, u64 base_addr,
2566 		       unsigned int size, unsigned int token, int gen,
2567 		       unsigned int cidx)
2568 {
2569 	unsigned int credits = type == SGE_CNTXT_OFLD ? 0 : FW_WR_NUM;
2570 
2571 	if (base_addr & 0xfff)     /* must be 4K aligned */
2572 		return -EINVAL;
2573 	if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2574 		return -EBUSY;
2575 
2576 	base_addr >>= 12;
2577 	t3_write_reg(adapter, A_SG_CONTEXT_DATA0, V_EC_INDEX(cidx) |
2578 		     V_EC_CREDITS(credits) | V_EC_GTS(gts_enable));
2579 	t3_write_reg(adapter, A_SG_CONTEXT_DATA1, V_EC_SIZE(size) |
2580 		     V_EC_BASE_LO((u32)base_addr & 0xffff));
2581 	base_addr >>= 16;
2582 	t3_write_reg(adapter, A_SG_CONTEXT_DATA2, (u32)base_addr);
2583 	base_addr >>= 32;
2584 	t3_write_reg(adapter, A_SG_CONTEXT_DATA3,
2585 		     V_EC_BASE_HI((u32)base_addr & 0xf) | V_EC_RESPQ(respq) |
2586 		     V_EC_TYPE(type) | V_EC_GEN(gen) | V_EC_UP_TOKEN(token) |
2587 		     F_EC_VALID);
2588 	return t3_sge_write_context(adapter, id, F_EGRESS);
2589 }
2590 
2591 /**
2592  *	t3_sge_init_flcntxt - initialize an SGE free-buffer list context
2593  *	@adapter: the adapter to configure
2594  *	@id: the context id
2595  *	@gts_enable: whether to enable GTS for the context
2596  *	@base_addr: base address of queue
2597  *	@size: number of queue entries
2598  *	@bsize: size of each buffer for this queue
2599  *	@cong_thres: threshold to signal congestion to upstream producers
2600  *	@gen: initial generation value for the context
2601  *	@cidx: consumer pointer
2602  *
2603  *	Initialize an SGE free list context and make it ready for use.  The
2604  *	caller is responsible for ensuring only one context operation occurs
2605  *	at a time.
2606  */
2607 int t3_sge_init_flcntxt(adapter_t *adapter, unsigned int id, int gts_enable,
2608 			u64 base_addr, unsigned int size, unsigned int bsize,
2609 			unsigned int cong_thres, int gen, unsigned int cidx)
2610 {
2611 	if (base_addr & 0xfff)     /* must be 4K aligned */
2612 		return -EINVAL;
2613 	if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2614 		return -EBUSY;
2615 
2616 	base_addr >>= 12;
2617 	t3_write_reg(adapter, A_SG_CONTEXT_DATA0, (u32)base_addr);
2618 	base_addr >>= 32;
2619 	t3_write_reg(adapter, A_SG_CONTEXT_DATA1,
2620 		     V_FL_BASE_HI((u32)base_addr) |
2621 		     V_FL_INDEX_LO(cidx & M_FL_INDEX_LO));
2622 	t3_write_reg(adapter, A_SG_CONTEXT_DATA2, V_FL_SIZE(size) |
2623 		     V_FL_GEN(gen) | V_FL_INDEX_HI(cidx >> 12) |
2624 		     V_FL_ENTRY_SIZE_LO(bsize & M_FL_ENTRY_SIZE_LO));
2625 	t3_write_reg(adapter, A_SG_CONTEXT_DATA3,
2626 		     V_FL_ENTRY_SIZE_HI(bsize >> (32 - S_FL_ENTRY_SIZE_LO)) |
2627 		     V_FL_CONG_THRES(cong_thres) | V_FL_GTS(gts_enable));
2628 	return t3_sge_write_context(adapter, id, F_FREELIST);
2629 }
2630 
2631 /**
2632  *	t3_sge_init_rspcntxt - initialize an SGE response queue context
2633  *	@adapter: the adapter to configure
2634  *	@id: the context id
2635  *	@irq_vec_idx: MSI-X interrupt vector index, 0 if no MSI-X, -1 if no IRQ
2636  *	@base_addr: base address of queue
2637  *	@size: number of queue entries
2638  *	@fl_thres: threshold for selecting the normal or jumbo free list
2639  *	@gen: initial generation value for the context
2640  *	@cidx: consumer pointer
2641  *
2642  *	Initialize an SGE response queue context and make it ready for use.
2643  *	The caller is responsible for ensuring only one context operation
2644  *	occurs at a time.
2645  */
2646 int t3_sge_init_rspcntxt(adapter_t *adapter, unsigned int id, int irq_vec_idx,
2647 			 u64 base_addr, unsigned int size,
2648 			 unsigned int fl_thres, int gen, unsigned int cidx)
2649 {
2650 	unsigned int ctrl, intr = 0;
2651 
2652 	if (base_addr & 0xfff)     /* must be 4K aligned */
2653 		return -EINVAL;
2654 	if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2655 		return -EBUSY;
2656 
2657 	base_addr >>= 12;
2658 	t3_write_reg(adapter, A_SG_CONTEXT_DATA0, V_CQ_SIZE(size) |
2659 		     V_CQ_INDEX(cidx));
2660 	t3_write_reg(adapter, A_SG_CONTEXT_DATA1, (u32)base_addr);
2661 	base_addr >>= 32;
2662         ctrl = t3_read_reg(adapter, A_SG_CONTROL);
2663         if ((irq_vec_idx > 0) ||
2664 		((irq_vec_idx == 0) && !(ctrl & F_ONEINTMULTQ)))
2665                 	intr = F_RQ_INTR_EN;
2666         if (irq_vec_idx >= 0)
2667                 intr |= V_RQ_MSI_VEC(irq_vec_idx);
2668 	t3_write_reg(adapter, A_SG_CONTEXT_DATA2,
2669 		     V_CQ_BASE_HI((u32)base_addr) | intr | V_RQ_GEN(gen));
2670 	t3_write_reg(adapter, A_SG_CONTEXT_DATA3, fl_thres);
2671 	return t3_sge_write_context(adapter, id, F_RESPONSEQ);
2672 }
2673 
2674 /**
2675  *	t3_sge_init_cqcntxt - initialize an SGE completion queue context
2676  *	@adapter: the adapter to configure
2677  *	@id: the context id
2678  *	@base_addr: base address of queue
2679  *	@size: number of queue entries
2680  *	@rspq: response queue for async notifications
2681  *	@ovfl_mode: CQ overflow mode
2682  *	@credits: completion queue credits
2683  *	@credit_thres: the credit threshold
2684  *
2685  *	Initialize an SGE completion queue context and make it ready for use.
2686  *	The caller is responsible for ensuring only one context operation
2687  *	occurs at a time.
2688  */
2689 int t3_sge_init_cqcntxt(adapter_t *adapter, unsigned int id, u64 base_addr,
2690 			unsigned int size, int rspq, int ovfl_mode,
2691 			unsigned int credits, unsigned int credit_thres)
2692 {
2693 	if (base_addr & 0xfff)     /* must be 4K aligned */
2694 		return -EINVAL;
2695 	if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2696 		return -EBUSY;
2697 
2698 	base_addr >>= 12;
2699 	t3_write_reg(adapter, A_SG_CONTEXT_DATA0, V_CQ_SIZE(size));
2700 	t3_write_reg(adapter, A_SG_CONTEXT_DATA1, (u32)base_addr);
2701 	base_addr >>= 32;
2702 	t3_write_reg(adapter, A_SG_CONTEXT_DATA2,
2703 		     V_CQ_BASE_HI((u32)base_addr) | V_CQ_RSPQ(rspq) |
2704 		     V_CQ_GEN(1) | V_CQ_OVERFLOW_MODE(ovfl_mode) |
2705 		     V_CQ_ERR(ovfl_mode));
2706 	t3_write_reg(adapter, A_SG_CONTEXT_DATA3, V_CQ_CREDITS(credits) |
2707 		     V_CQ_CREDIT_THRES(credit_thres));
2708 	return t3_sge_write_context(adapter, id, F_CQ);
2709 }
2710 
2711 /**
2712  *	t3_sge_enable_ecntxt - enable/disable an SGE egress context
2713  *	@adapter: the adapter
2714  *	@id: the egress context id
2715  *	@enable: enable (1) or disable (0) the context
2716  *
2717  *	Enable or disable an SGE egress context.  The caller is responsible for
2718  *	ensuring only one context operation occurs at a time.
2719  */
2720 int t3_sge_enable_ecntxt(adapter_t *adapter, unsigned int id, int enable)
2721 {
2722 	if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2723 		return -EBUSY;
2724 
2725 	t3_write_reg(adapter, A_SG_CONTEXT_MASK0, 0);
2726 	t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0);
2727 	t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0);
2728 	t3_write_reg(adapter, A_SG_CONTEXT_MASK3, F_EC_VALID);
2729 	t3_write_reg(adapter, A_SG_CONTEXT_DATA3, V_EC_VALID(enable));
2730 	t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2731 		     V_CONTEXT_CMD_OPCODE(1) | F_EGRESS | V_CONTEXT(id));
2732 	return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
2733 			       0, SG_CONTEXT_CMD_ATTEMPTS, 1);
2734 }
2735 
2736 /**
2737  *	t3_sge_disable_fl - disable an SGE free-buffer list
2738  *	@adapter: the adapter
2739  *	@id: the free list context id
2740  *
2741  *	Disable an SGE free-buffer list.  The caller is responsible for
2742  *	ensuring only one context operation occurs at a time.
2743  */
2744 int t3_sge_disable_fl(adapter_t *adapter, unsigned int id)
2745 {
2746 	if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2747 		return -EBUSY;
2748 
2749 	t3_write_reg(adapter, A_SG_CONTEXT_MASK0, 0);
2750 	t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0);
2751 	t3_write_reg(adapter, A_SG_CONTEXT_MASK2, V_FL_SIZE(M_FL_SIZE));
2752 	t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0);
2753 	t3_write_reg(adapter, A_SG_CONTEXT_DATA2, 0);
2754 	t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2755 		     V_CONTEXT_CMD_OPCODE(1) | F_FREELIST | V_CONTEXT(id));
2756 	return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
2757 			       0, SG_CONTEXT_CMD_ATTEMPTS, 1);
2758 }
2759 
2760 /**
2761  *	t3_sge_disable_rspcntxt - disable an SGE response queue
2762  *	@adapter: the adapter
2763  *	@id: the response queue context id
2764  *
2765  *	Disable an SGE response queue.  The caller is responsible for
2766  *	ensuring only one context operation occurs at a time.
2767  */
2768 int t3_sge_disable_rspcntxt(adapter_t *adapter, unsigned int id)
2769 {
2770 	if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2771 		return -EBUSY;
2772 
2773 	t3_write_reg(adapter, A_SG_CONTEXT_MASK0, V_CQ_SIZE(M_CQ_SIZE));
2774 	t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0);
2775 	t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0);
2776 	t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0);
2777 	t3_write_reg(adapter, A_SG_CONTEXT_DATA0, 0);
2778 	t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2779 		     V_CONTEXT_CMD_OPCODE(1) | F_RESPONSEQ | V_CONTEXT(id));
2780 	return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
2781 			       0, SG_CONTEXT_CMD_ATTEMPTS, 1);
2782 }
2783 
2784 /**
2785  *	t3_sge_disable_cqcntxt - disable an SGE completion queue
2786  *	@adapter: the adapter
2787  *	@id: the completion queue context id
2788  *
2789  *	Disable an SGE completion queue.  The caller is responsible for
2790  *	ensuring only one context operation occurs at a time.
2791  */
2792 int t3_sge_disable_cqcntxt(adapter_t *adapter, unsigned int id)
2793 {
2794 	if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2795 		return -EBUSY;
2796 
2797 	t3_write_reg(adapter, A_SG_CONTEXT_MASK0, V_CQ_SIZE(M_CQ_SIZE));
2798 	t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0);
2799 	t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0);
2800 	t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0);
2801 	t3_write_reg(adapter, A_SG_CONTEXT_DATA0, 0);
2802 	t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2803 		     V_CONTEXT_CMD_OPCODE(1) | F_CQ | V_CONTEXT(id));
2804 	return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
2805 			       0, SG_CONTEXT_CMD_ATTEMPTS, 1);
2806 }
2807 
2808 /**
2809  *	t3_sge_cqcntxt_op - perform an operation on a completion queue context
2810  *	@adapter: the adapter
2811  *	@id: the context id
2812  *	@op: the operation to perform
2813  *	@credits: credits to return to the CQ
2814  *
2815  *	Perform the selected operation on an SGE completion queue context.
2816  *	The caller is responsible for ensuring only one context operation
2817  *	occurs at a time.
2818  *
2819  *	For most operations the function returns the current HW position in
2820  *	the completion queue.
2821  */
2822 int t3_sge_cqcntxt_op(adapter_t *adapter, unsigned int id, unsigned int op,
2823 		      unsigned int credits)
2824 {
2825 	u32 val;
2826 
2827 	if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2828 		return -EBUSY;
2829 
2830 	t3_write_reg(adapter, A_SG_CONTEXT_DATA0, credits << 16);
2831 	t3_write_reg(adapter, A_SG_CONTEXT_CMD, V_CONTEXT_CMD_OPCODE(op) |
2832 		     V_CONTEXT(id) | F_CQ);
2833 	if (t3_wait_op_done_val(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
2834 				0, SG_CONTEXT_CMD_ATTEMPTS, 1, &val))
2835 		return -EIO;
2836 
2837 	if (op >= 2 && op < 7) {
2838 		if (adapter->params.rev > 0)
2839 			return G_CQ_INDEX(val);
2840 
2841 		t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2842 			     V_CONTEXT_CMD_OPCODE(0) | F_CQ | V_CONTEXT(id));
2843 		if (t3_wait_op_done(adapter, A_SG_CONTEXT_CMD,
2844 				    F_CONTEXT_CMD_BUSY, 0,
2845 				    SG_CONTEXT_CMD_ATTEMPTS, 1))
2846 			return -EIO;
2847 		return G_CQ_INDEX(t3_read_reg(adapter, A_SG_CONTEXT_DATA0));
2848 	}
2849 	return 0;
2850 }
2851 
2852 /**
2853  * 	t3_sge_read_context - read an SGE context
2854  * 	@type: the context type
2855  * 	@adapter: the adapter
2856  * 	@id: the context id
2857  * 	@data: holds the retrieved context
2858  *
2859  * 	Read an SGE egress context.  The caller is responsible for ensuring
2860  * 	only one context operation occurs at a time.
2861  */
2862 static int t3_sge_read_context(unsigned int type, adapter_t *adapter,
2863 			       unsigned int id, u32 data[4])
2864 {
2865 	if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2866 		return -EBUSY;
2867 
2868 	t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2869 		     V_CONTEXT_CMD_OPCODE(0) | type | V_CONTEXT(id));
2870 	if (t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY, 0,
2871 			    SG_CONTEXT_CMD_ATTEMPTS, 1))
2872 		return -EIO;
2873 	data[0] = t3_read_reg(adapter, A_SG_CONTEXT_DATA0);
2874 	data[1] = t3_read_reg(adapter, A_SG_CONTEXT_DATA1);
2875 	data[2] = t3_read_reg(adapter, A_SG_CONTEXT_DATA2);
2876 	data[3] = t3_read_reg(adapter, A_SG_CONTEXT_DATA3);
2877 	return 0;
2878 }
2879 
2880 /**
2881  * 	t3_sge_read_ecntxt - read an SGE egress context
2882  * 	@adapter: the adapter
2883  * 	@id: the context id
2884  * 	@data: holds the retrieved context
2885  *
2886  * 	Read an SGE egress context.  The caller is responsible for ensuring
2887  * 	only one context operation occurs at a time.
2888  */
2889 int t3_sge_read_ecntxt(adapter_t *adapter, unsigned int id, u32 data[4])
2890 {
2891 	if (id >= 65536)
2892 		return -EINVAL;
2893 	return t3_sge_read_context(F_EGRESS, adapter, id, data);
2894 }
2895 
2896 /**
2897  * 	t3_sge_read_cq - read an SGE CQ context
2898  * 	@adapter: the adapter
2899  * 	@id: the context id
2900  * 	@data: holds the retrieved context
2901  *
2902  * 	Read an SGE CQ context.  The caller is responsible for ensuring
2903  * 	only one context operation occurs at a time.
2904  */
2905 int t3_sge_read_cq(adapter_t *adapter, unsigned int id, u32 data[4])
2906 {
2907 	if (id >= 65536)
2908 		return -EINVAL;
2909 	return t3_sge_read_context(F_CQ, adapter, id, data);
2910 }
2911 
2912 /**
2913  * 	t3_sge_read_fl - read an SGE free-list context
2914  * 	@adapter: the adapter
2915  * 	@id: the context id
2916  * 	@data: holds the retrieved context
2917  *
2918  * 	Read an SGE free-list context.  The caller is responsible for ensuring
2919  * 	only one context operation occurs at a time.
2920  */
2921 int t3_sge_read_fl(adapter_t *adapter, unsigned int id, u32 data[4])
2922 {
2923 	if (id >= SGE_QSETS * 2)
2924 		return -EINVAL;
2925 	return t3_sge_read_context(F_FREELIST, adapter, id, data);
2926 }
2927 
2928 /**
2929  * 	t3_sge_read_rspq - read an SGE response queue context
2930  * 	@adapter: the adapter
2931  * 	@id: the context id
2932  * 	@data: holds the retrieved context
2933  *
2934  * 	Read an SGE response queue context.  The caller is responsible for
2935  * 	ensuring only one context operation occurs at a time.
2936  */
2937 int t3_sge_read_rspq(adapter_t *adapter, unsigned int id, u32 data[4])
2938 {
2939 	if (id >= SGE_QSETS)
2940 		return -EINVAL;
2941 	return t3_sge_read_context(F_RESPONSEQ, adapter, id, data);
2942 }
2943 
2944 /**
2945  *	t3_config_rss - configure Rx packet steering
2946  *	@adapter: the adapter
2947  *	@rss_config: RSS settings (written to TP_RSS_CONFIG)
2948  *	@cpus: values for the CPU lookup table (0xff terminated)
2949  *	@rspq: values for the response queue lookup table (0xffff terminated)
2950  *
2951  *	Programs the receive packet steering logic.  @cpus and @rspq provide
2952  *	the values for the CPU and response queue lookup tables.  If they
2953  *	provide fewer values than the size of the tables the supplied values
2954  *	are used repeatedly until the tables are fully populated.
2955  */
2956 void t3_config_rss(adapter_t *adapter, unsigned int rss_config, const u8 *cpus,
2957 		   const u16 *rspq)
2958 {
2959 	int i, j, cpu_idx = 0, q_idx = 0;
2960 
2961 	if (cpus)
2962 		for (i = 0; i < RSS_TABLE_SIZE; ++i) {
2963 			u32 val = i << 16;
2964 
2965 			for (j = 0; j < 2; ++j) {
2966 				val |= (cpus[cpu_idx++] & 0x3f) << (8 * j);
2967 				if (cpus[cpu_idx] == 0xff)
2968 					cpu_idx = 0;
2969 			}
2970 			t3_write_reg(adapter, A_TP_RSS_LKP_TABLE, val);
2971 		}
2972 
2973 	if (rspq)
2974 		for (i = 0; i < RSS_TABLE_SIZE; ++i) {
2975 			t3_write_reg(adapter, A_TP_RSS_MAP_TABLE,
2976 				     (i << 16) | rspq[q_idx++]);
2977 			if (rspq[q_idx] == 0xffff)
2978 				q_idx = 0;
2979 		}
2980 
2981 	t3_write_reg(adapter, A_TP_RSS_CONFIG, rss_config);
2982 }
2983 
2984 /**
2985  *	t3_read_rss - read the contents of the RSS tables
2986  *	@adapter: the adapter
2987  *	@lkup: holds the contents of the RSS lookup table
2988  *	@map: holds the contents of the RSS map table
2989  *
2990  *	Reads the contents of the receive packet steering tables.
2991  */
2992 int t3_read_rss(adapter_t *adapter, u8 *lkup, u16 *map)
2993 {
2994 	int i;
2995 	u32 val;
2996 
2997 	if (lkup)
2998 		for (i = 0; i < RSS_TABLE_SIZE; ++i) {
2999 			t3_write_reg(adapter, A_TP_RSS_LKP_TABLE,
3000 				     0xffff0000 | i);
3001 			val = t3_read_reg(adapter, A_TP_RSS_LKP_TABLE);
3002 			if (!(val & 0x80000000))
3003 				return -EAGAIN;
3004 			*lkup++ = (u8)val;
3005 			*lkup++ = (u8)(val >> 8);
3006 		}
3007 
3008 	if (map)
3009 		for (i = 0; i < RSS_TABLE_SIZE; ++i) {
3010 			t3_write_reg(adapter, A_TP_RSS_MAP_TABLE,
3011 				     0xffff0000 | i);
3012 			val = t3_read_reg(adapter, A_TP_RSS_MAP_TABLE);
3013 			if (!(val & 0x80000000))
3014 				return -EAGAIN;
3015 			*map++ = (u16)val;
3016 		}
3017 	return 0;
3018 }
3019 
3020 /**
3021  *	t3_tp_set_offload_mode - put TP in NIC/offload mode
3022  *	@adap: the adapter
3023  *	@enable: 1 to select offload mode, 0 for regular NIC
3024  *
3025  *	Switches TP to NIC/offload mode.
3026  */
3027 void t3_tp_set_offload_mode(adapter_t *adap, int enable)
3028 {
3029 	if (is_offload(adap) || !enable)
3030 		t3_set_reg_field(adap, A_TP_IN_CONFIG, F_NICMODE,
3031 				 V_NICMODE(!enable));
3032 }
3033 
3034 /**
3035  *	tp_wr_bits_indirect - set/clear bits in an indirect TP register
3036  *	@adap: the adapter
3037  *	@addr: the indirect TP register address
3038  *	@mask: specifies the field within the register to modify
3039  *	@val: new value for the field
3040  *
3041  *	Sets a field of an indirect TP register to the given value.
3042  */
3043 static void tp_wr_bits_indirect(adapter_t *adap, unsigned int addr,
3044 				unsigned int mask, unsigned int val)
3045 {
3046 	t3_write_reg(adap, A_TP_PIO_ADDR, addr);
3047 	val |= t3_read_reg(adap, A_TP_PIO_DATA) & ~mask;
3048 	t3_write_reg(adap, A_TP_PIO_DATA, val);
3049 }
3050 
3051 /**
3052  *	t3_enable_filters - enable the HW filters
3053  *	@adap: the adapter
3054  *
3055  *	Enables the HW filters for NIC traffic.
3056  */
3057 void t3_enable_filters(adapter_t *adap)
3058 {
3059 	t3_set_reg_field(adap, A_TP_IN_CONFIG, F_NICMODE, 0);
3060 	t3_set_reg_field(adap, A_MC5_DB_CONFIG, 0, F_FILTEREN);
3061 	t3_set_reg_field(adap, A_TP_GLOBAL_CONFIG, 0, V_FIVETUPLELOOKUP(3));
3062 	tp_wr_bits_indirect(adap, A_TP_INGRESS_CONFIG, 0, F_LOOKUPEVERYPKT);
3063 }
3064 
3065 /**
3066  *	t3_disable_filters - disable the HW filters
3067  *	@adap: the adapter
3068  *
3069  *	Disables the HW filters for NIC traffic.
3070  */
3071 void t3_disable_filters(adapter_t *adap)
3072 {
3073 	/* note that we don't want to revert to NIC-only mode */
3074 	t3_set_reg_field(adap, A_MC5_DB_CONFIG, F_FILTEREN, 0);
3075 	t3_set_reg_field(adap, A_TP_GLOBAL_CONFIG,
3076 			 V_FIVETUPLELOOKUP(M_FIVETUPLELOOKUP), 0);
3077 	tp_wr_bits_indirect(adap, A_TP_INGRESS_CONFIG, F_LOOKUPEVERYPKT, 0);
3078 }
3079 
3080 /**
3081  *	pm_num_pages - calculate the number of pages of the payload memory
3082  *	@mem_size: the size of the payload memory
3083  *	@pg_size: the size of each payload memory page
3084  *
3085  *	Calculate the number of pages, each of the given size, that fit in a
3086  *	memory of the specified size, respecting the HW requirement that the
3087  *	number of pages must be a multiple of 24.
3088  */
3089 static inline unsigned int pm_num_pages(unsigned int mem_size,
3090 					unsigned int pg_size)
3091 {
3092 	unsigned int n = mem_size / pg_size;
3093 
3094 	return n - n % 24;
3095 }
3096 
3097 #define mem_region(adap, start, size, reg) \
3098 	t3_write_reg((adap), A_ ## reg, (start)); \
3099 	start += size
3100 
3101 /**
3102  *	partition_mem - partition memory and configure TP memory settings
3103  *	@adap: the adapter
3104  *	@p: the TP parameters
3105  *
3106  *	Partitions context and payload memory and configures TP's memory
3107  *	registers.
3108  */
3109 static void partition_mem(adapter_t *adap, const struct tp_params *p)
3110 {
3111 	unsigned int m, pstructs, tids = t3_mc5_size(&adap->mc5);
3112 	unsigned int timers = 0, timers_shift = 22;
3113 
3114 	if (adap->params.rev > 0) {
3115 		if (tids <= 16 * 1024) {
3116 			timers = 1;
3117 			timers_shift = 16;
3118 		} else if (tids <= 64 * 1024) {
3119 			timers = 2;
3120 			timers_shift = 18;
3121 		} else if (tids <= 256 * 1024) {
3122 			timers = 3;
3123 			timers_shift = 20;
3124 		}
3125 	}
3126 
3127 	t3_write_reg(adap, A_TP_PMM_SIZE,
3128 		     p->chan_rx_size | (p->chan_tx_size >> 16));
3129 
3130 	t3_write_reg(adap, A_TP_PMM_TX_BASE, 0);
3131 	t3_write_reg(adap, A_TP_PMM_TX_PAGE_SIZE, p->tx_pg_size);
3132 	t3_write_reg(adap, A_TP_PMM_TX_MAX_PAGE, p->tx_num_pgs);
3133 	t3_set_reg_field(adap, A_TP_PARA_REG3, V_TXDATAACKIDX(M_TXDATAACKIDX),
3134 			 V_TXDATAACKIDX(fls(p->tx_pg_size) - 12));
3135 
3136 	t3_write_reg(adap, A_TP_PMM_RX_BASE, 0);
3137 	t3_write_reg(adap, A_TP_PMM_RX_PAGE_SIZE, p->rx_pg_size);
3138 	t3_write_reg(adap, A_TP_PMM_RX_MAX_PAGE, p->rx_num_pgs);
3139 
3140 	pstructs = p->rx_num_pgs + p->tx_num_pgs;
3141 	/* Add a bit of headroom and make multiple of 24 */
3142 	pstructs += 48;
3143 	pstructs -= pstructs % 24;
3144 	t3_write_reg(adap, A_TP_CMM_MM_MAX_PSTRUCT, pstructs);
3145 
3146 	m = tids * TCB_SIZE;
3147 	mem_region(adap, m, (64 << 10) * 64, SG_EGR_CNTX_BADDR);
3148 	mem_region(adap, m, (64 << 10) * 64, SG_CQ_CONTEXT_BADDR);
3149 	t3_write_reg(adap, A_TP_CMM_TIMER_BASE, V_CMTIMERMAXNUM(timers) | m);
3150 	m += ((p->ntimer_qs - 1) << timers_shift) + (1 << 22);
3151 	mem_region(adap, m, pstructs * 64, TP_CMM_MM_BASE);
3152 	mem_region(adap, m, 64 * (pstructs / 24), TP_CMM_MM_PS_FLST_BASE);
3153 	mem_region(adap, m, 64 * (p->rx_num_pgs / 24), TP_CMM_MM_RX_FLST_BASE);
3154 	mem_region(adap, m, 64 * (p->tx_num_pgs / 24), TP_CMM_MM_TX_FLST_BASE);
3155 
3156 	m = (m + 4095) & ~0xfff;
3157 	t3_write_reg(adap, A_CIM_SDRAM_BASE_ADDR, m);
3158 	t3_write_reg(adap, A_CIM_SDRAM_ADDR_SIZE, p->cm_size - m);
3159 
3160 	tids = (p->cm_size - m - (3 << 20)) / 3072 - 32;
3161 	m = t3_mc5_size(&adap->mc5) - adap->params.mc5.nservers -
3162 	    adap->params.mc5.nfilters - adap->params.mc5.nroutes;
3163 	if (tids < m)
3164 		adap->params.mc5.nservers += m - tids;
3165 }
3166 
3167 static inline void tp_wr_indirect(adapter_t *adap, unsigned int addr, u32 val)
3168 {
3169 	t3_write_reg(adap, A_TP_PIO_ADDR, addr);
3170 	t3_write_reg(adap, A_TP_PIO_DATA, val);
3171 }
3172 
3173 static inline u32 tp_rd_indirect(adapter_t *adap, unsigned int addr)
3174 {
3175 	t3_write_reg(adap, A_TP_PIO_ADDR, addr);
3176 	return t3_read_reg(adap, A_TP_PIO_DATA);
3177 }
3178 
3179 static void tp_config(adapter_t *adap, const struct tp_params *p)
3180 {
3181 	t3_write_reg(adap, A_TP_GLOBAL_CONFIG, F_TXPACINGENABLE | F_PATHMTU |
3182 		     F_IPCHECKSUMOFFLOAD | F_UDPCHECKSUMOFFLOAD |
3183 		     F_TCPCHECKSUMOFFLOAD | V_IPTTL(64));
3184 	t3_write_reg(adap, A_TP_TCP_OPTIONS, V_MTUDEFAULT(576) |
3185 		     F_MTUENABLE | V_WINDOWSCALEMODE(1) |
3186 		     V_TIMESTAMPSMODE(1) | V_SACKMODE(1) | V_SACKRX(1));
3187 	t3_write_reg(adap, A_TP_DACK_CONFIG, V_AUTOSTATE3(1) |
3188 		     V_AUTOSTATE2(1) | V_AUTOSTATE1(0) |
3189 		     V_BYTETHRESHOLD(26880) | V_MSSTHRESHOLD(2) |
3190 		     F_AUTOCAREFUL | F_AUTOENABLE | V_DACK_MODE(1));
3191 	t3_set_reg_field(adap, A_TP_IN_CONFIG, F_RXFBARBPRIO | F_TXFBARBPRIO,
3192 			 F_IPV6ENABLE | F_NICMODE);
3193 	t3_write_reg(adap, A_TP_TX_RESOURCE_LIMIT, 0x18141814);
3194 	t3_write_reg(adap, A_TP_PARA_REG4, 0x5050105);
3195 	t3_set_reg_field(adap, A_TP_PARA_REG6, 0,
3196 			 adap->params.rev > 0 ? F_ENABLEESND :
3197 			 			F_T3A_ENABLEESND);
3198 	t3_set_reg_field(adap, A_TP_PC_CONFIG,
3199 			 F_ENABLEEPCMDAFULL,
3200 			 F_ENABLEOCSPIFULL |F_TXDEFERENABLE | F_HEARBEATDACK |
3201 			 F_TXCONGESTIONMODE | F_RXCONGESTIONMODE);
3202 	t3_set_reg_field(adap, A_TP_PC_CONFIG2, F_CHDRAFULL,
3203 			 F_ENABLEIPV6RSS | F_ENABLENONOFDTNLSYN |
3204 			 F_ENABLEARPMISS | F_DISBLEDAPARBIT0);
3205 	t3_write_reg(adap, A_TP_PROXY_FLOW_CNTL, 1080);
3206 	t3_write_reg(adap, A_TP_PROXY_FLOW_CNTL, 1000);
3207 
3208 	if (adap->params.rev > 0) {
3209 		tp_wr_indirect(adap, A_TP_EGRESS_CONFIG, F_REWRITEFORCETOSIZE);
3210 		t3_set_reg_field(adap, A_TP_PARA_REG3, 0,
3211 				 F_TXPACEAUTO | F_TXPACEAUTOSTRICT);
3212 		t3_set_reg_field(adap, A_TP_PC_CONFIG, F_LOCKTID, F_LOCKTID);
3213 		tp_wr_indirect(adap, A_TP_VLAN_PRI_MAP, 0xfa50);
3214 		tp_wr_indirect(adap, A_TP_MAC_MATCH_MAP0, 0xfac688);
3215 		tp_wr_indirect(adap, A_TP_MAC_MATCH_MAP1, 0xfac688);
3216 	} else
3217 		t3_set_reg_field(adap, A_TP_PARA_REG3, 0, F_TXPACEFIXED);
3218 
3219 	if (adap->params.rev == T3_REV_C)
3220 		t3_set_reg_field(adap, A_TP_PC_CONFIG,
3221 				 V_TABLELATENCYDELTA(M_TABLELATENCYDELTA),
3222 				 V_TABLELATENCYDELTA(4));
3223 
3224 	t3_write_reg(adap, A_TP_TX_MOD_QUEUE_WEIGHT1, 0);
3225 	t3_write_reg(adap, A_TP_TX_MOD_QUEUE_WEIGHT0, 0);
3226 	t3_write_reg(adap, A_TP_MOD_CHANNEL_WEIGHT, 0);
3227 	t3_write_reg(adap, A_TP_MOD_RATE_LIMIT, 0xf2200000);
3228 
3229 	if (adap->params.nports > 2) {
3230 		t3_set_reg_field(adap, A_TP_PC_CONFIG2, 0,
3231 				 F_ENABLETXPORTFROMDA2 | F_ENABLETXPORTFROMDA |
3232 				 F_ENABLERXPORTFROMADDR);
3233 		tp_wr_bits_indirect(adap, A_TP_QOS_RX_MAP_MODE,
3234 				    V_RXMAPMODE(M_RXMAPMODE), 0);
3235 		tp_wr_indirect(adap, A_TP_INGRESS_CONFIG, V_BITPOS0(48) |
3236 			       V_BITPOS1(49) | V_BITPOS2(50) | V_BITPOS3(51) |
3237 			       F_ENABLEEXTRACT | F_ENABLEEXTRACTIONSFD |
3238 			       F_ENABLEINSERTION | F_ENABLEINSERTIONSFD);
3239 		tp_wr_indirect(adap, A_TP_PREAMBLE_MSB, 0xfb000000);
3240 		tp_wr_indirect(adap, A_TP_PREAMBLE_LSB, 0xd5);
3241 		tp_wr_indirect(adap, A_TP_INTF_FROM_TX_PKT, F_INTFFROMTXPKT);
3242 	}
3243 }
3244 
3245 /* TCP timer values in ms */
3246 #define TP_DACK_TIMER 50
3247 #define TP_RTO_MIN    250
3248 
3249 /**
3250  *	tp_set_timers - set TP timing parameters
3251  *	@adap: the adapter to set
3252  *	@core_clk: the core clock frequency in Hz
3253  *
3254  *	Set TP's timing parameters, such as the various timer resolutions and
3255  *	the TCP timer values.
3256  */
3257 static void tp_set_timers(adapter_t *adap, unsigned int core_clk)
3258 {
3259 	unsigned int tre = adap->params.tp.tre;
3260 	unsigned int dack_re = adap->params.tp.dack_re;
3261 	unsigned int tstamp_re = fls(core_clk / 1000);     /* 1ms, at least */
3262 	unsigned int tps = core_clk >> tre;
3263 
3264 	t3_write_reg(adap, A_TP_TIMER_RESOLUTION, V_TIMERRESOLUTION(tre) |
3265 		     V_DELAYEDACKRESOLUTION(dack_re) |
3266 		     V_TIMESTAMPRESOLUTION(tstamp_re));
3267 	t3_write_reg(adap, A_TP_DACK_TIMER,
3268 		     (core_clk >> dack_re) / (1000 / TP_DACK_TIMER));
3269 	t3_write_reg(adap, A_TP_TCP_BACKOFF_REG0, 0x3020100);
3270 	t3_write_reg(adap, A_TP_TCP_BACKOFF_REG1, 0x7060504);
3271 	t3_write_reg(adap, A_TP_TCP_BACKOFF_REG2, 0xb0a0908);
3272 	t3_write_reg(adap, A_TP_TCP_BACKOFF_REG3, 0xf0e0d0c);
3273 	t3_write_reg(adap, A_TP_SHIFT_CNT, V_SYNSHIFTMAX(6) |
3274 		     V_RXTSHIFTMAXR1(4) | V_RXTSHIFTMAXR2(15) |
3275 		     V_PERSHIFTBACKOFFMAX(8) | V_PERSHIFTMAX(8) |
3276 		     V_KEEPALIVEMAX(9));
3277 
3278 #define SECONDS * tps
3279 
3280 	t3_write_reg(adap, A_TP_MSL,
3281 		     adap->params.rev > 0 ? 0 : 2 SECONDS);
3282 	t3_write_reg(adap, A_TP_RXT_MIN, tps / (1000 / TP_RTO_MIN));
3283 	t3_write_reg(adap, A_TP_RXT_MAX, 64 SECONDS);
3284 	t3_write_reg(adap, A_TP_PERS_MIN, 5 SECONDS);
3285 	t3_write_reg(adap, A_TP_PERS_MAX, 64 SECONDS);
3286 	t3_write_reg(adap, A_TP_KEEP_IDLE, 7200 SECONDS);
3287 	t3_write_reg(adap, A_TP_KEEP_INTVL, 75 SECONDS);
3288 	t3_write_reg(adap, A_TP_INIT_SRTT, 3 SECONDS);
3289 	t3_write_reg(adap, A_TP_FINWAIT2_TIMER, 600 SECONDS);
3290 
3291 #undef SECONDS
3292 }
3293 
3294 /**
3295  *	t3_tp_set_coalescing_size - set receive coalescing size
3296  *	@adap: the adapter
3297  *	@size: the receive coalescing size
3298  *	@psh: whether a set PSH bit should deliver coalesced data
3299  *
3300  *	Set the receive coalescing size and PSH bit handling.
3301  */
3302 int t3_tp_set_coalescing_size(adapter_t *adap, unsigned int size, int psh)
3303 {
3304 	u32 val;
3305 
3306 	if (size > MAX_RX_COALESCING_LEN)
3307 		return -EINVAL;
3308 
3309 	val = t3_read_reg(adap, A_TP_PARA_REG3);
3310 	val &= ~(F_RXCOALESCEENABLE | F_RXCOALESCEPSHEN);
3311 
3312 	if (size) {
3313 		val |= F_RXCOALESCEENABLE;
3314 		if (psh)
3315 			val |= F_RXCOALESCEPSHEN;
3316 		size = min(MAX_RX_COALESCING_LEN, size);
3317 		t3_write_reg(adap, A_TP_PARA_REG2, V_RXCOALESCESIZE(size) |
3318 			     V_MAXRXDATA(MAX_RX_COALESCING_LEN));
3319 	}
3320 	t3_write_reg(adap, A_TP_PARA_REG3, val);
3321 	return 0;
3322 }
3323 
3324 /**
3325  *	t3_tp_set_max_rxsize - set the max receive size
3326  *	@adap: the adapter
3327  *	@size: the max receive size
3328  *
3329  *	Set TP's max receive size.  This is the limit that applies when
3330  *	receive coalescing is disabled.
3331  */
3332 void t3_tp_set_max_rxsize(adapter_t *adap, unsigned int size)
3333 {
3334 	t3_write_reg(adap, A_TP_PARA_REG7,
3335 		     V_PMMAXXFERLEN0(size) | V_PMMAXXFERLEN1(size));
3336 }
3337 
3338 static void __devinit init_mtus(unsigned short mtus[])
3339 {
3340 	/*
3341 	 * See draft-mathis-plpmtud-00.txt for the values.  The min is 88 so
3342 	 * it can accommodate max size TCP/IP headers when SACK and timestamps
3343 	 * are enabled and still have at least 8 bytes of payload.
3344 	 */
3345 	mtus[0] = 88;
3346 	mtus[1] = 88;
3347 	mtus[2] = 256;
3348 	mtus[3] = 512;
3349 	mtus[4] = 576;
3350 	mtus[5] = 1024;
3351 	mtus[6] = 1280;
3352 	mtus[7] = 1492;
3353 	mtus[8] = 1500;
3354 	mtus[9] = 2002;
3355 	mtus[10] = 2048;
3356 	mtus[11] = 4096;
3357 	mtus[12] = 4352;
3358 	mtus[13] = 8192;
3359 	mtus[14] = 9000;
3360 	mtus[15] = 9600;
3361 }
3362 
3363 /**
3364  *	init_cong_ctrl - initialize congestion control parameters
3365  *	@a: the alpha values for congestion control
3366  *	@b: the beta values for congestion control
3367  *
3368  *	Initialize the congestion control parameters.
3369  */
3370 static void __devinit init_cong_ctrl(unsigned short *a, unsigned short *b)
3371 {
3372 	a[0] = a[1] = a[2] = a[3] = a[4] = a[5] = a[6] = a[7] = a[8] = 1;
3373 	a[9] = 2;
3374 	a[10] = 3;
3375 	a[11] = 4;
3376 	a[12] = 5;
3377 	a[13] = 6;
3378 	a[14] = 7;
3379 	a[15] = 8;
3380 	a[16] = 9;
3381 	a[17] = 10;
3382 	a[18] = 14;
3383 	a[19] = 17;
3384 	a[20] = 21;
3385 	a[21] = 25;
3386 	a[22] = 30;
3387 	a[23] = 35;
3388 	a[24] = 45;
3389 	a[25] = 60;
3390 	a[26] = 80;
3391 	a[27] = 100;
3392 	a[28] = 200;
3393 	a[29] = 300;
3394 	a[30] = 400;
3395 	a[31] = 500;
3396 
3397 	b[0] = b[1] = b[2] = b[3] = b[4] = b[5] = b[6] = b[7] = b[8] = 0;
3398 	b[9] = b[10] = 1;
3399 	b[11] = b[12] = 2;
3400 	b[13] = b[14] = b[15] = b[16] = 3;
3401 	b[17] = b[18] = b[19] = b[20] = b[21] = 4;
3402 	b[22] = b[23] = b[24] = b[25] = b[26] = b[27] = 5;
3403 	b[28] = b[29] = 6;
3404 	b[30] = b[31] = 7;
3405 }
3406 
3407 /* The minimum additive increment value for the congestion control table */
3408 #define CC_MIN_INCR 2U
3409 
3410 /**
3411  *	t3_load_mtus - write the MTU and congestion control HW tables
3412  *	@adap: the adapter
3413  *	@mtus: the unrestricted values for the MTU table
3414  *	@alpha: the values for the congestion control alpha parameter
3415  *	@beta: the values for the congestion control beta parameter
3416  *	@mtu_cap: the maximum permitted effective MTU
3417  *
3418  *	Write the MTU table with the supplied MTUs capping each at &mtu_cap.
3419  *	Update the high-speed congestion control table with the supplied alpha,
3420  * 	beta, and MTUs.
3421  */
3422 void t3_load_mtus(adapter_t *adap, unsigned short mtus[NMTUS],
3423 		  unsigned short alpha[NCCTRL_WIN],
3424 		  unsigned short beta[NCCTRL_WIN], unsigned short mtu_cap)
3425 {
3426 	static const unsigned int avg_pkts[NCCTRL_WIN] = {
3427 		2, 6, 10, 14, 20, 28, 40, 56, 80, 112, 160, 224, 320, 448, 640,
3428 		896, 1281, 1792, 2560, 3584, 5120, 7168, 10240, 14336, 20480,
3429 		28672, 40960, 57344, 81920, 114688, 163840, 229376 };
3430 
3431 	unsigned int i, w;
3432 
3433 	for (i = 0; i < NMTUS; ++i) {
3434 		unsigned int mtu = min(mtus[i], mtu_cap);
3435 		unsigned int log2 = fls(mtu);
3436 
3437 		if (!(mtu & ((1 << log2) >> 2)))     /* round */
3438 			log2--;
3439 		t3_write_reg(adap, A_TP_MTU_TABLE,
3440 			     (i << 24) | (log2 << 16) | mtu);
3441 
3442 		for (w = 0; w < NCCTRL_WIN; ++w) {
3443 			unsigned int inc;
3444 
3445 			inc = max(((mtu - 40) * alpha[w]) / avg_pkts[w],
3446 				  CC_MIN_INCR);
3447 
3448 			t3_write_reg(adap, A_TP_CCTRL_TABLE, (i << 21) |
3449 				     (w << 16) | (beta[w] << 13) | inc);
3450 		}
3451 	}
3452 }
3453 
3454 /**
3455  *	t3_read_hw_mtus - returns the values in the HW MTU table
3456  *	@adap: the adapter
3457  *	@mtus: where to store the HW MTU values
3458  *
3459  *	Reads the HW MTU table.
3460  */
3461 void t3_read_hw_mtus(adapter_t *adap, unsigned short mtus[NMTUS])
3462 {
3463 	int i;
3464 
3465 	for (i = 0; i < NMTUS; ++i) {
3466 		unsigned int val;
3467 
3468 		t3_write_reg(adap, A_TP_MTU_TABLE, 0xff000000 | i);
3469 		val = t3_read_reg(adap, A_TP_MTU_TABLE);
3470 		mtus[i] = val & 0x3fff;
3471 	}
3472 }
3473 
3474 /**
3475  *	t3_get_cong_cntl_tab - reads the congestion control table
3476  *	@adap: the adapter
3477  *	@incr: where to store the alpha values
3478  *
3479  *	Reads the additive increments programmed into the HW congestion
3480  *	control table.
3481  */
3482 void t3_get_cong_cntl_tab(adapter_t *adap,
3483 			  unsigned short incr[NMTUS][NCCTRL_WIN])
3484 {
3485 	unsigned int mtu, w;
3486 
3487 	for (mtu = 0; mtu < NMTUS; ++mtu)
3488 		for (w = 0; w < NCCTRL_WIN; ++w) {
3489 			t3_write_reg(adap, A_TP_CCTRL_TABLE,
3490 				     0xffff0000 | (mtu << 5) | w);
3491 			incr[mtu][w] = (unsigned short)t3_read_reg(adap,
3492 				        A_TP_CCTRL_TABLE) & 0x1fff;
3493 		}
3494 }
3495 
3496 /**
3497  *	t3_tp_get_mib_stats - read TP's MIB counters
3498  *	@adap: the adapter
3499  *	@tps: holds the returned counter values
3500  *
3501  *	Returns the values of TP's MIB counters.
3502  */
3503 void t3_tp_get_mib_stats(adapter_t *adap, struct tp_mib_stats *tps)
3504 {
3505 	t3_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_RDATA, (u32 *)tps,
3506 			 sizeof(*tps) / sizeof(u32), 0);
3507 }
3508 
3509 /**
3510  *	t3_read_pace_tbl - read the pace table
3511  *	@adap: the adapter
3512  *	@pace_vals: holds the returned values
3513  *
3514  *	Returns the values of TP's pace table in nanoseconds.
3515  */
3516 void t3_read_pace_tbl(adapter_t *adap, unsigned int pace_vals[NTX_SCHED])
3517 {
3518 	unsigned int i, tick_ns = dack_ticks_to_usec(adap, 1000);
3519 
3520 	for (i = 0; i < NTX_SCHED; i++) {
3521 		t3_write_reg(adap, A_TP_PACE_TABLE, 0xffff0000 + i);
3522 		pace_vals[i] = t3_read_reg(adap, A_TP_PACE_TABLE) * tick_ns;
3523 	}
3524 }
3525 
3526 /**
3527  *	t3_set_pace_tbl - set the pace table
3528  *	@adap: the adapter
3529  *	@pace_vals: the pace values in nanoseconds
3530  *	@start: index of the first entry in the HW pace table to set
3531  *	@n: how many entries to set
3532  *
3533  *	Sets (a subset of the) HW pace table.
3534  */
3535 void t3_set_pace_tbl(adapter_t *adap, unsigned int *pace_vals,
3536 		     unsigned int start, unsigned int n)
3537 {
3538 	unsigned int tick_ns = dack_ticks_to_usec(adap, 1000);
3539 
3540 	for ( ; n; n--, start++, pace_vals++)
3541 		t3_write_reg(adap, A_TP_PACE_TABLE, (start << 16) |
3542 			     ((*pace_vals + tick_ns / 2) / tick_ns));
3543 }
3544 
3545 #define ulp_region(adap, name, start, len) \
3546 	t3_write_reg((adap), A_ULPRX_ ## name ## _LLIMIT, (start)); \
3547 	t3_write_reg((adap), A_ULPRX_ ## name ## _ULIMIT, \
3548 		     (start) + (len) - 1); \
3549 	start += len
3550 
3551 #define ulptx_region(adap, name, start, len) \
3552 	t3_write_reg((adap), A_ULPTX_ ## name ## _LLIMIT, (start)); \
3553 	t3_write_reg((adap), A_ULPTX_ ## name ## _ULIMIT, \
3554 		     (start) + (len) - 1)
3555 
3556 static void ulp_config(adapter_t *adap, const struct tp_params *p)
3557 {
3558 	unsigned int m = p->chan_rx_size;
3559 
3560 	ulp_region(adap, ISCSI, m, p->chan_rx_size / 8);
3561 	ulp_region(adap, TDDP, m, p->chan_rx_size / 8);
3562 	ulptx_region(adap, TPT, m, p->chan_rx_size / 4);
3563 	ulp_region(adap, STAG, m, p->chan_rx_size / 4);
3564 	ulp_region(adap, RQ, m, p->chan_rx_size / 4);
3565 	ulptx_region(adap, PBL, m, p->chan_rx_size / 4);
3566 	ulp_region(adap, PBL, m, p->chan_rx_size / 4);
3567 	t3_write_reg(adap, A_ULPRX_TDDP_TAGMASK, 0xffffffff);
3568 }
3569 
3570 
3571 /**
3572  *	t3_set_proto_sram - set the contents of the protocol sram
3573  *	@adapter: the adapter
3574  *	@data: the protocol image
3575  *
3576  *	Write the contents of the protocol SRAM.
3577  */
3578 int t3_set_proto_sram(adapter_t *adap, const u8 *data)
3579 {
3580 	int i;
3581 	const u32 *buf = (const u32 *)data;
3582 
3583 	for (i = 0; i < PROTO_SRAM_LINES; i++) {
3584 		t3_write_reg(adap, A_TP_EMBED_OP_FIELD5, cpu_to_be32(*buf++));
3585 		t3_write_reg(adap, A_TP_EMBED_OP_FIELD4, cpu_to_be32(*buf++));
3586 		t3_write_reg(adap, A_TP_EMBED_OP_FIELD3, cpu_to_be32(*buf++));
3587 		t3_write_reg(adap, A_TP_EMBED_OP_FIELD2, cpu_to_be32(*buf++));
3588 		t3_write_reg(adap, A_TP_EMBED_OP_FIELD1, cpu_to_be32(*buf++));
3589 
3590 		t3_write_reg(adap, A_TP_EMBED_OP_FIELD0, i << 1 | 1 << 31);
3591 		if (t3_wait_op_done(adap, A_TP_EMBED_OP_FIELD0, 1, 1, 5, 1))
3592 			return -EIO;
3593 	}
3594 	return 0;
3595 }
3596 
3597 /**
3598  *	t3_config_trace_filter - configure one of the tracing filters
3599  *	@adapter: the adapter
3600  *	@tp: the desired trace filter parameters
3601  *	@filter_index: which filter to configure
3602  *	@invert: if set non-matching packets are traced instead of matching ones
3603  *	@enable: whether to enable or disable the filter
3604  *
3605  *	Configures one of the tracing filters available in HW.
3606  */
3607 void t3_config_trace_filter(adapter_t *adapter, const struct trace_params *tp,
3608 			    int filter_index, int invert, int enable)
3609 {
3610 	u32 addr, key[4], mask[4];
3611 
3612 	key[0] = tp->sport | (tp->sip << 16);
3613 	key[1] = (tp->sip >> 16) | (tp->dport << 16);
3614 	key[2] = tp->dip;
3615 	key[3] = tp->proto | (tp->vlan << 8) | (tp->intf << 20);
3616 
3617 	mask[0] = tp->sport_mask | (tp->sip_mask << 16);
3618 	mask[1] = (tp->sip_mask >> 16) | (tp->dport_mask << 16);
3619 	mask[2] = tp->dip_mask;
3620 	mask[3] = tp->proto_mask | (tp->vlan_mask << 8) | (tp->intf_mask << 20);
3621 
3622 	if (invert)
3623 		key[3] |= (1 << 29);
3624 	if (enable)
3625 		key[3] |= (1 << 28);
3626 
3627 	addr = filter_index ? A_TP_RX_TRC_KEY0 : A_TP_TX_TRC_KEY0;
3628 	tp_wr_indirect(adapter, addr++, key[0]);
3629 	tp_wr_indirect(adapter, addr++, mask[0]);
3630 	tp_wr_indirect(adapter, addr++, key[1]);
3631 	tp_wr_indirect(adapter, addr++, mask[1]);
3632 	tp_wr_indirect(adapter, addr++, key[2]);
3633 	tp_wr_indirect(adapter, addr++, mask[2]);
3634 	tp_wr_indirect(adapter, addr++, key[3]);
3635 	tp_wr_indirect(adapter, addr,   mask[3]);
3636 	(void) t3_read_reg(adapter, A_TP_PIO_DATA);
3637 }
3638 
3639 /**
3640  *	t3_query_trace_filter - query a tracing filter
3641  *	@adapter: the adapter
3642  *	@tp: the current trace filter parameters
3643  *	@filter_index: which filter to query
3644  *	@inverted: non-zero if the filter is inverted
3645  *	@enabled: non-zero if the filter is enabled
3646  *
3647  *	Returns the current settings of the specified HW tracing filter.
3648  */
3649 void t3_query_trace_filter(adapter_t *adapter, struct trace_params *tp,
3650 			   int filter_index, int *inverted, int *enabled)
3651 {
3652 	u32 addr, key[4], mask[4];
3653 
3654 	addr = filter_index ? A_TP_RX_TRC_KEY0 : A_TP_TX_TRC_KEY0;
3655 	key[0]  = tp_rd_indirect(adapter, addr++);
3656 	mask[0] = tp_rd_indirect(adapter, addr++);
3657 	key[1]  = tp_rd_indirect(adapter, addr++);
3658 	mask[1] = tp_rd_indirect(adapter, addr++);
3659 	key[2]  = tp_rd_indirect(adapter, addr++);
3660 	mask[2] = tp_rd_indirect(adapter, addr++);
3661 	key[3]  = tp_rd_indirect(adapter, addr++);
3662 	mask[3] = tp_rd_indirect(adapter, addr);
3663 
3664 	tp->sport = key[0] & 0xffff;
3665 	tp->sip   = (key[0] >> 16) | ((key[1] & 0xffff) << 16);
3666 	tp->dport = key[1] >> 16;
3667 	tp->dip   = key[2];
3668 	tp->proto = key[3] & 0xff;
3669 	tp->vlan  = key[3] >> 8;
3670 	tp->intf  = key[3] >> 20;
3671 
3672 	tp->sport_mask = mask[0] & 0xffff;
3673 	tp->sip_mask   = (mask[0] >> 16) | ((mask[1] & 0xffff) << 16);
3674 	tp->dport_mask = mask[1] >> 16;
3675 	tp->dip_mask   = mask[2];
3676 	tp->proto_mask = mask[3] & 0xff;
3677 	tp->vlan_mask  = mask[3] >> 8;
3678 	tp->intf_mask  = mask[3] >> 20;
3679 
3680 	*inverted = key[3] & (1 << 29);
3681 	*enabled  = key[3] & (1 << 28);
3682 }
3683 
3684 /**
3685  *	t3_config_sched - configure a HW traffic scheduler
3686  *	@adap: the adapter
3687  *	@kbps: target rate in Kbps
3688  *	@sched: the scheduler index
3689  *
3690  *	Configure a Tx HW scheduler for the target rate.
3691  */
3692 int t3_config_sched(adapter_t *adap, unsigned int kbps, int sched)
3693 {
3694 	unsigned int v, tps, cpt, bpt, delta, mindelta = ~0;
3695 	unsigned int clk = adap->params.vpd.cclk * 1000;
3696 	unsigned int selected_cpt = 0, selected_bpt = 0;
3697 
3698 	if (kbps > 0) {
3699 		kbps *= 125;     /* -> bytes */
3700 		for (cpt = 1; cpt <= 255; cpt++) {
3701 			tps = clk / cpt;
3702 			bpt = (kbps + tps / 2) / tps;
3703 			if (bpt > 0 && bpt <= 255) {
3704 				v = bpt * tps;
3705 				delta = v >= kbps ? v - kbps : kbps - v;
3706 				if (delta < mindelta) {
3707 					mindelta = delta;
3708 					selected_cpt = cpt;
3709 					selected_bpt = bpt;
3710 				}
3711 			} else if (selected_cpt)
3712 				break;
3713 		}
3714 		if (!selected_cpt)
3715 			return -EINVAL;
3716 	}
3717 	t3_write_reg(adap, A_TP_TM_PIO_ADDR,
3718 		     A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2);
3719 	v = t3_read_reg(adap, A_TP_TM_PIO_DATA);
3720 	if (sched & 1)
3721 		v = (v & 0xffff) | (selected_cpt << 16) | (selected_bpt << 24);
3722 	else
3723 		v = (v & 0xffff0000) | selected_cpt | (selected_bpt << 8);
3724 	t3_write_reg(adap, A_TP_TM_PIO_DATA, v);
3725 	return 0;
3726 }
3727 
3728 /**
3729  *	t3_set_sched_ipg - set the IPG for a Tx HW packet rate scheduler
3730  *	@adap: the adapter
3731  *	@sched: the scheduler index
3732  *	@ipg: the interpacket delay in tenths of nanoseconds
3733  *
3734  *	Set the interpacket delay for a HW packet rate scheduler.
3735  */
3736 int t3_set_sched_ipg(adapter_t *adap, int sched, unsigned int ipg)
3737 {
3738 	unsigned int v, addr = A_TP_TX_MOD_Q1_Q0_TIMER_SEPARATOR - sched / 2;
3739 
3740 	/* convert ipg to nearest number of core clocks */
3741 	ipg *= core_ticks_per_usec(adap);
3742 	ipg = (ipg + 5000) / 10000;
3743 	if (ipg > 0xffff)
3744 		return -EINVAL;
3745 
3746 	t3_write_reg(adap, A_TP_TM_PIO_ADDR, addr);
3747 	v = t3_read_reg(adap, A_TP_TM_PIO_DATA);
3748 	if (sched & 1)
3749 		v = (v & 0xffff) | (ipg << 16);
3750 	else
3751 		v = (v & 0xffff0000) | ipg;
3752 	t3_write_reg(adap, A_TP_TM_PIO_DATA, v);
3753 	t3_read_reg(adap, A_TP_TM_PIO_DATA);
3754 	return 0;
3755 }
3756 
3757 /**
3758  *	t3_get_tx_sched - get the configuration of a Tx HW traffic scheduler
3759  *	@adap: the adapter
3760  *	@sched: the scheduler index
3761  *	@kbps: the byte rate in Kbps
3762  *	@ipg: the interpacket delay in tenths of nanoseconds
3763  *
3764  *	Return the current configuration of a HW Tx scheduler.
3765  */
3766 void t3_get_tx_sched(adapter_t *adap, unsigned int sched, unsigned int *kbps,
3767 		     unsigned int *ipg)
3768 {
3769 	unsigned int v, addr, bpt, cpt;
3770 
3771 	if (kbps) {
3772 		addr = A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2;
3773 		t3_write_reg(adap, A_TP_TM_PIO_ADDR, addr);
3774 		v = t3_read_reg(adap, A_TP_TM_PIO_DATA);
3775 		if (sched & 1)
3776 			v >>= 16;
3777 		bpt = (v >> 8) & 0xff;
3778 		cpt = v & 0xff;
3779 		if (!cpt)
3780 			*kbps = 0;        /* scheduler disabled */
3781 		else {
3782 			v = (adap->params.vpd.cclk * 1000) / cpt;
3783 			*kbps = (v * bpt) / 125;
3784 		}
3785 	}
3786 	if (ipg) {
3787 		addr = A_TP_TX_MOD_Q1_Q0_TIMER_SEPARATOR - sched / 2;
3788 		t3_write_reg(adap, A_TP_TM_PIO_ADDR, addr);
3789 		v = t3_read_reg(adap, A_TP_TM_PIO_DATA);
3790 		if (sched & 1)
3791 			v >>= 16;
3792 		v &= 0xffff;
3793 		*ipg = (10000 * v) / core_ticks_per_usec(adap);
3794 	}
3795 }
3796 
3797 /**
3798  *	tp_init - configure TP
3799  *	@adap: the adapter
3800  *	@p: TP configuration parameters
3801  *
3802  *	Initializes the TP HW module.
3803  */
3804 static int tp_init(adapter_t *adap, const struct tp_params *p)
3805 {
3806 	int busy = 0;
3807 
3808 	tp_config(adap, p);
3809 	t3_set_vlan_accel(adap, 3, 0);
3810 
3811 	if (is_offload(adap)) {
3812 		tp_set_timers(adap, adap->params.vpd.cclk * 1000);
3813 		t3_write_reg(adap, A_TP_RESET, F_FLSTINITENABLE);
3814 		busy = t3_wait_op_done(adap, A_TP_RESET, F_FLSTINITENABLE,
3815 				       0, 1000, 5);
3816 		if (busy)
3817 			CH_ERR(adap, "TP initialization timed out\n");
3818 	}
3819 
3820 	if (!busy)
3821 		t3_write_reg(adap, A_TP_RESET, F_TPRESET);
3822 	return busy;
3823 }
3824 
3825 /**
3826  *	t3_mps_set_active_ports - configure port failover
3827  *	@adap: the adapter
3828  *	@port_mask: bitmap of active ports
3829  *
3830  *	Sets the active ports according to the supplied bitmap.
3831  */
3832 int t3_mps_set_active_ports(adapter_t *adap, unsigned int port_mask)
3833 {
3834 	if (port_mask & ~((1 << adap->params.nports) - 1))
3835 		return -EINVAL;
3836 	t3_set_reg_field(adap, A_MPS_CFG, F_PORT1ACTIVE | F_PORT0ACTIVE,
3837 			 port_mask << S_PORT0ACTIVE);
3838 	return 0;
3839 }
3840 
3841 /**
3842  * 	chan_init_hw - channel-dependent HW initialization
3843  *	@adap: the adapter
3844  *	@chan_map: bitmap of Tx channels being used
3845  *
3846  *	Perform the bits of HW initialization that are dependent on the Tx
3847  *	channels being used.
3848  */
3849 static void chan_init_hw(adapter_t *adap, unsigned int chan_map)
3850 {
3851 	int i;
3852 
3853 	if (chan_map != 3) {                                 /* one channel */
3854 		t3_set_reg_field(adap, A_ULPRX_CTL, F_ROUND_ROBIN, 0);
3855 		t3_set_reg_field(adap, A_ULPTX_CONFIG, F_CFG_RR_ARB, 0);
3856 		t3_write_reg(adap, A_MPS_CFG, F_TPRXPORTEN | F_ENFORCEPKT |
3857 			     (chan_map == 1 ? F_TPTXPORT0EN | F_PORT0ACTIVE :
3858 					      F_TPTXPORT1EN | F_PORT1ACTIVE));
3859 		t3_write_reg(adap, A_PM1_TX_CFG,
3860 			     chan_map == 1 ? 0xffffffff : 0);
3861 		if (chan_map == 2)
3862 			t3_write_reg(adap, A_TP_TX_MOD_QUEUE_REQ_MAP,
3863 				     V_TX_MOD_QUEUE_REQ_MAP(0xff));
3864 		t3_write_reg(adap, A_TP_TX_MOD_QUE_TABLE, (12 << 16) | 0xd9c8);
3865 		t3_write_reg(adap, A_TP_TX_MOD_QUE_TABLE, (13 << 16) | 0xfbea);
3866 	} else {                                             /* two channels */
3867 		t3_set_reg_field(adap, A_ULPRX_CTL, 0, F_ROUND_ROBIN);
3868 		t3_set_reg_field(adap, A_ULPTX_CONFIG, 0, F_CFG_RR_ARB);
3869 		t3_write_reg(adap, A_ULPTX_DMA_WEIGHT,
3870 			     V_D1_WEIGHT(16) | V_D0_WEIGHT(16));
3871 		t3_write_reg(adap, A_MPS_CFG, F_TPTXPORT0EN | F_TPTXPORT1EN |
3872 			     F_TPRXPORTEN | F_PORT0ACTIVE | F_PORT1ACTIVE |
3873 			     F_ENFORCEPKT);
3874 		t3_write_reg(adap, A_PM1_TX_CFG, 0x80008000);
3875 		t3_set_reg_field(adap, A_TP_PC_CONFIG, 0, F_TXTOSQUEUEMAPMODE);
3876 		t3_write_reg(adap, A_TP_TX_MOD_QUEUE_REQ_MAP,
3877 			     V_TX_MOD_QUEUE_REQ_MAP(0xaa));
3878 		for (i = 0; i < 16; i++)
3879 			t3_write_reg(adap, A_TP_TX_MOD_QUE_TABLE,
3880 				     (i << 16) | 0x1010);
3881 		t3_write_reg(adap, A_TP_TX_MOD_QUE_TABLE, (12 << 16) | 0xba98);
3882 		t3_write_reg(adap, A_TP_TX_MOD_QUE_TABLE, (13 << 16) | 0xfedc);
3883 	}
3884 }
3885 
3886 static int calibrate_xgm(adapter_t *adapter)
3887 {
3888 	if (uses_xaui(adapter)) {
3889 		unsigned int v, i;
3890 
3891 		for (i = 0; i < 5; ++i) {
3892 			t3_write_reg(adapter, A_XGM_XAUI_IMP, 0);
3893 			(void) t3_read_reg(adapter, A_XGM_XAUI_IMP);
3894 			msleep(1);
3895 			v = t3_read_reg(adapter, A_XGM_XAUI_IMP);
3896 			if (!(v & (F_XGM_CALFAULT | F_CALBUSY))) {
3897 				t3_write_reg(adapter, A_XGM_XAUI_IMP,
3898 					     V_XAUIIMP(G_CALIMP(v) >> 2));
3899 				return 0;
3900 			}
3901 		}
3902 		CH_ERR(adapter, "MAC calibration failed\n");
3903 		return -1;
3904 	} else {
3905 		t3_write_reg(adapter, A_XGM_RGMII_IMP,
3906 			     V_RGMIIIMPPD(2) | V_RGMIIIMPPU(3));
3907 		t3_set_reg_field(adapter, A_XGM_RGMII_IMP, F_XGM_IMPSETUPDATE,
3908 				 F_XGM_IMPSETUPDATE);
3909 	}
3910 	return 0;
3911 }
3912 
3913 static void calibrate_xgm_t3b(adapter_t *adapter)
3914 {
3915 	if (!uses_xaui(adapter)) {
3916 		t3_write_reg(adapter, A_XGM_RGMII_IMP, F_CALRESET |
3917 			     F_CALUPDATE | V_RGMIIIMPPD(2) | V_RGMIIIMPPU(3));
3918 		t3_set_reg_field(adapter, A_XGM_RGMII_IMP, F_CALRESET, 0);
3919 		t3_set_reg_field(adapter, A_XGM_RGMII_IMP, 0,
3920 				 F_XGM_IMPSETUPDATE);
3921 		t3_set_reg_field(adapter, A_XGM_RGMII_IMP, F_XGM_IMPSETUPDATE,
3922 				 0);
3923 		t3_set_reg_field(adapter, A_XGM_RGMII_IMP, F_CALUPDATE, 0);
3924 		t3_set_reg_field(adapter, A_XGM_RGMII_IMP, 0, F_CALUPDATE);
3925 	}
3926 }
3927 
3928 struct mc7_timing_params {
3929 	unsigned char ActToPreDly;
3930 	unsigned char ActToRdWrDly;
3931 	unsigned char PreCyc;
3932 	unsigned char RefCyc[5];
3933 	unsigned char BkCyc;
3934 	unsigned char WrToRdDly;
3935 	unsigned char RdToWrDly;
3936 };
3937 
3938 /*
3939  * Write a value to a register and check that the write completed.  These
3940  * writes normally complete in a cycle or two, so one read should suffice.
3941  * The very first read exists to flush the posted write to the device.
3942  */
3943 static int wrreg_wait(adapter_t *adapter, unsigned int addr, u32 val)
3944 {
3945 	t3_write_reg(adapter,	addr, val);
3946 	(void) t3_read_reg(adapter, addr);                   /* flush */
3947 	if (!(t3_read_reg(adapter, addr) & F_BUSY))
3948 		return 0;
3949 	CH_ERR(adapter, "write to MC7 register 0x%x timed out\n", addr);
3950 	return -EIO;
3951 }
3952 
3953 static int mc7_init(struct mc7 *mc7, unsigned int mc7_clock, int mem_type)
3954 {
3955 	static const unsigned int mc7_mode[] = {
3956 		0x632, 0x642, 0x652, 0x432, 0x442
3957 	};
3958 	static const struct mc7_timing_params mc7_timings[] = {
3959 		{ 12, 3, 4, { 20, 28, 34, 52, 0 }, 15, 6, 4 },
3960 		{ 12, 4, 5, { 20, 28, 34, 52, 0 }, 16, 7, 4 },
3961 		{ 12, 5, 6, { 20, 28, 34, 52, 0 }, 17, 8, 4 },
3962 		{ 9,  3, 4, { 15, 21, 26, 39, 0 }, 12, 6, 4 },
3963 		{ 9,  4, 5, { 15, 21, 26, 39, 0 }, 13, 7, 4 }
3964 	};
3965 
3966 	u32 val;
3967 	unsigned int width, density, slow, attempts;
3968 	adapter_t *adapter = mc7->adapter;
3969 	const struct mc7_timing_params *p = &mc7_timings[mem_type];
3970 
3971 	if (!mc7->size)
3972 		return 0;
3973 
3974 	val = t3_read_reg(adapter, mc7->offset + A_MC7_CFG);
3975 	slow = val & F_SLOW;
3976 	width = G_WIDTH(val);
3977 	density = G_DEN(val);
3978 
3979 	t3_write_reg(adapter, mc7->offset + A_MC7_CFG, val | F_IFEN);
3980 	val = t3_read_reg(adapter, mc7->offset + A_MC7_CFG);  /* flush */
3981 	msleep(1);
3982 
3983 	if (!slow) {
3984 		t3_write_reg(adapter, mc7->offset + A_MC7_CAL, F_SGL_CAL_EN);
3985 		(void) t3_read_reg(adapter, mc7->offset + A_MC7_CAL);
3986 		msleep(1);
3987 		if (t3_read_reg(adapter, mc7->offset + A_MC7_CAL) &
3988 		    (F_BUSY | F_SGL_CAL_EN | F_CAL_FAULT)) {
3989 			CH_ERR(adapter, "%s MC7 calibration timed out\n",
3990 			       mc7->name);
3991 			goto out_fail;
3992 		}
3993 	}
3994 
3995 	t3_write_reg(adapter, mc7->offset + A_MC7_PARM,
3996 		     V_ACTTOPREDLY(p->ActToPreDly) |
3997 		     V_ACTTORDWRDLY(p->ActToRdWrDly) | V_PRECYC(p->PreCyc) |
3998 		     V_REFCYC(p->RefCyc[density]) | V_BKCYC(p->BkCyc) |
3999 		     V_WRTORDDLY(p->WrToRdDly) | V_RDTOWRDLY(p->RdToWrDly));
4000 
4001 	t3_write_reg(adapter, mc7->offset + A_MC7_CFG,
4002 		     val | F_CLKEN | F_TERM150);
4003 	(void) t3_read_reg(adapter, mc7->offset + A_MC7_CFG); /* flush */
4004 
4005 	if (!slow)
4006 		t3_set_reg_field(adapter, mc7->offset + A_MC7_DLL, F_DLLENB,
4007 				 F_DLLENB);
4008 	udelay(1);
4009 
4010 	val = slow ? 3 : 6;
4011 	if (wrreg_wait(adapter, mc7->offset + A_MC7_PRE, 0) ||
4012 	    wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE2, 0) ||
4013 	    wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE3, 0) ||
4014 	    wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE1, val))
4015 		goto out_fail;
4016 
4017 	if (!slow) {
4018 		t3_write_reg(adapter, mc7->offset + A_MC7_MODE, 0x100);
4019 		t3_set_reg_field(adapter, mc7->offset + A_MC7_DLL,
4020 				 F_DLLRST, 0);
4021 		udelay(5);
4022 	}
4023 
4024 	if (wrreg_wait(adapter, mc7->offset + A_MC7_PRE, 0) ||
4025 	    wrreg_wait(adapter, mc7->offset + A_MC7_REF, 0) ||
4026 	    wrreg_wait(adapter, mc7->offset + A_MC7_REF, 0) ||
4027 	    wrreg_wait(adapter, mc7->offset + A_MC7_MODE,
4028 		       mc7_mode[mem_type]) ||
4029 	    wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE1, val | 0x380) ||
4030 	    wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE1, val))
4031 		goto out_fail;
4032 
4033 	/* clock value is in KHz */
4034 	mc7_clock = mc7_clock * 7812 + mc7_clock / 2;  /* ns */
4035 	mc7_clock /= 1000000;                          /* KHz->MHz, ns->us */
4036 
4037 	t3_write_reg(adapter, mc7->offset + A_MC7_REF,
4038 		     F_PERREFEN | V_PREREFDIV(mc7_clock));
4039 	(void) t3_read_reg(adapter, mc7->offset + A_MC7_REF); /* flush */
4040 
4041 	t3_write_reg(adapter, mc7->offset + A_MC7_ECC,
4042 		     F_ECCGENEN | F_ECCCHKEN);
4043 	t3_write_reg(adapter, mc7->offset + A_MC7_BIST_DATA, 0);
4044 	t3_write_reg(adapter, mc7->offset + A_MC7_BIST_ADDR_BEG, 0);
4045 	t3_write_reg(adapter, mc7->offset + A_MC7_BIST_ADDR_END,
4046 		     (mc7->size << width) - 1);
4047 	t3_write_reg(adapter, mc7->offset + A_MC7_BIST_OP, V_OP(1));
4048 	(void) t3_read_reg(adapter, mc7->offset + A_MC7_BIST_OP); /* flush */
4049 
4050 	attempts = 50;
4051 	do {
4052 		msleep(250);
4053 		val = t3_read_reg(adapter, mc7->offset + A_MC7_BIST_OP);
4054 	} while ((val & F_BUSY) && --attempts);
4055 	if (val & F_BUSY) {
4056 		CH_ERR(adapter, "%s MC7 BIST timed out\n", mc7->name);
4057 		goto out_fail;
4058 	}
4059 
4060 	/* Enable normal memory accesses. */
4061 	t3_set_reg_field(adapter, mc7->offset + A_MC7_CFG, 0, F_RDY);
4062 	return 0;
4063 
4064  out_fail:
4065 	return -1;
4066 }
4067 
4068 static void config_pcie(adapter_t *adap)
4069 {
4070 	static const u16 ack_lat[4][6] = {
4071 		{ 237, 416, 559, 1071, 2095, 4143 },
4072 		{ 128, 217, 289, 545, 1057, 2081 },
4073 		{ 73, 118, 154, 282, 538, 1050 },
4074 		{ 67, 107, 86, 150, 278, 534 }
4075 	};
4076 	static const u16 rpl_tmr[4][6] = {
4077 		{ 711, 1248, 1677, 3213, 6285, 12429 },
4078 		{ 384, 651, 867, 1635, 3171, 6243 },
4079 		{ 219, 354, 462, 846, 1614, 3150 },
4080 		{ 201, 321, 258, 450, 834, 1602 }
4081 	};
4082 
4083 	u16 val, devid;
4084 	unsigned int log2_width, pldsize;
4085 	unsigned int fst_trn_rx, fst_trn_tx, acklat, rpllmt;
4086 
4087 	t3_os_pci_read_config_2(adap,
4088 				adap->params.pci.pcie_cap_addr + PCI_EXP_DEVCTL,
4089 				&val);
4090 	pldsize = (val & PCI_EXP_DEVCTL_PAYLOAD) >> 5;
4091 
4092 	/*
4093 	 * Gen2 adapter pcie bridge compatibility requires minimum
4094 	 * Max_Read_Request_size
4095 	 */
4096 	t3_os_pci_read_config_2(adap, 0x2, &devid);
4097 	if (devid == 0x37) {
4098 		t3_os_pci_write_config_2(adap,
4099 		    adap->params.pci.pcie_cap_addr + PCI_EXP_DEVCTL,
4100 		    val & ~PCI_EXP_DEVCTL_READRQ & ~PCI_EXP_DEVCTL_PAYLOAD);
4101 		pldsize = 0;
4102 	}
4103 
4104 	t3_os_pci_read_config_2(adap,
4105 				adap->params.pci.pcie_cap_addr + PCI_EXP_LNKCTL,
4106 			       	&val);
4107 
4108 	fst_trn_tx = G_NUMFSTTRNSEQ(t3_read_reg(adap, A_PCIE_PEX_CTRL0));
4109 	fst_trn_rx = adap->params.rev == 0 ? fst_trn_tx :
4110 			G_NUMFSTTRNSEQRX(t3_read_reg(adap, A_PCIE_MODE));
4111 	log2_width = fls(adap->params.pci.width) - 1;
4112 	acklat = ack_lat[log2_width][pldsize];
4113 	if (val & 1)                            /* check LOsEnable */
4114 		acklat += fst_trn_tx * 4;
4115 	rpllmt = rpl_tmr[log2_width][pldsize] + fst_trn_rx * 4;
4116 
4117 	if (adap->params.rev == 0)
4118 		t3_set_reg_field(adap, A_PCIE_PEX_CTRL1,
4119 				 V_T3A_ACKLAT(M_T3A_ACKLAT),
4120 				 V_T3A_ACKLAT(acklat));
4121 	else
4122 		t3_set_reg_field(adap, A_PCIE_PEX_CTRL1, V_ACKLAT(M_ACKLAT),
4123 				 V_ACKLAT(acklat));
4124 
4125 	t3_set_reg_field(adap, A_PCIE_PEX_CTRL0, V_REPLAYLMT(M_REPLAYLMT),
4126 			 V_REPLAYLMT(rpllmt));
4127 
4128 	t3_write_reg(adap, A_PCIE_PEX_ERR, 0xffffffff);
4129 	t3_set_reg_field(adap, A_PCIE_CFG, 0,
4130 			 F_ENABLELINKDWNDRST | F_ENABLELINKDOWNRST |
4131 			 F_PCIE_DMASTOPEN | F_PCIE_CLIDECEN);
4132 }
4133 
4134 /**
4135  * 	t3_init_hw - initialize and configure T3 HW modules
4136  * 	@adapter: the adapter
4137  * 	@fw_params: initial parameters to pass to firmware (optional)
4138  *
4139  *	Initialize and configure T3 HW modules.  This performs the
4140  *	initialization steps that need to be done once after a card is reset.
4141  *	MAC and PHY initialization is handled separarely whenever a port is
4142  *	enabled.
4143  *
4144  *	@fw_params are passed to FW and their value is platform dependent.
4145  *	Only the top 8 bits are available for use, the rest must be 0.
4146  */
4147 int t3_init_hw(adapter_t *adapter, u32 fw_params)
4148 {
4149 	int err = -EIO, attempts, i;
4150 	const struct vpd_params *vpd = &adapter->params.vpd;
4151 
4152 	if (adapter->params.rev > 0)
4153 		calibrate_xgm_t3b(adapter);
4154 	else if (calibrate_xgm(adapter))
4155 		goto out_err;
4156 
4157 	if (adapter->params.nports > 2)
4158 		t3_mac_init(&adap2pinfo(adapter, 0)->mac);
4159 
4160 	if (vpd->mclk) {
4161 		partition_mem(adapter, &adapter->params.tp);
4162 
4163 		if (mc7_init(&adapter->pmrx, vpd->mclk, vpd->mem_timing) ||
4164 		    mc7_init(&adapter->pmtx, vpd->mclk, vpd->mem_timing) ||
4165 		    mc7_init(&adapter->cm, vpd->mclk, vpd->mem_timing) ||
4166 		    t3_mc5_init(&adapter->mc5, adapter->params.mc5.nservers,
4167 			        adapter->params.mc5.nfilters,
4168 			       	adapter->params.mc5.nroutes))
4169 			goto out_err;
4170 
4171 		for (i = 0; i < 32; i++)
4172 			if (clear_sge_ctxt(adapter, i, F_CQ))
4173 				goto out_err;
4174 	}
4175 
4176 	if (tp_init(adapter, &adapter->params.tp))
4177 		goto out_err;
4178 
4179 	t3_tp_set_coalescing_size(adapter,
4180 				  min(adapter->params.sge.max_pkt_size,
4181 				      MAX_RX_COALESCING_LEN), 1);
4182 	t3_tp_set_max_rxsize(adapter,
4183 			     min(adapter->params.sge.max_pkt_size, 16384U));
4184 	ulp_config(adapter, &adapter->params.tp);
4185 	if (is_pcie(adapter))
4186 		config_pcie(adapter);
4187 	else
4188 		t3_set_reg_field(adapter, A_PCIX_CFG, 0,
4189 				 F_DMASTOPEN | F_CLIDECEN);
4190 
4191 	if (adapter->params.rev == T3_REV_C)
4192 		t3_set_reg_field(adapter, A_ULPTX_CONFIG, 0,
4193 				 F_CFG_CQE_SOP_MASK);
4194 
4195 	t3_write_reg(adapter, A_PM1_RX_CFG, 0xffffffff);
4196 	t3_write_reg(adapter, A_PM1_RX_MODE, 0);
4197 	t3_write_reg(adapter, A_PM1_TX_MODE, 0);
4198 	chan_init_hw(adapter, adapter->params.chan_map);
4199 	t3_sge_init(adapter, &adapter->params.sge);
4200 	t3_set_reg_field(adapter, A_PL_RST, 0, F_FATALPERREN);
4201 
4202 	t3_write_reg(adapter, A_T3DBG_GPIO_ACT_LOW, calc_gpio_intr(adapter));
4203 
4204 	t3_write_reg(adapter, A_CIM_HOST_ACC_DATA, vpd->uclk | fw_params);
4205 	t3_write_reg(adapter, A_CIM_BOOT_CFG,
4206 		     V_BOOTADDR(FW_FLASH_BOOT_ADDR >> 2));
4207 	(void) t3_read_reg(adapter, A_CIM_BOOT_CFG);    /* flush */
4208 
4209 	attempts = 100;
4210 	do {                          /* wait for uP to initialize */
4211 		msleep(20);
4212 	} while (t3_read_reg(adapter, A_CIM_HOST_ACC_DATA) && --attempts);
4213 	if (!attempts) {
4214 		CH_ERR(adapter, "uP initialization timed out\n");
4215 		goto out_err;
4216 	}
4217 
4218 	err = 0;
4219  out_err:
4220 	return err;
4221 }
4222 
4223 /**
4224  *	get_pci_mode - determine a card's PCI mode
4225  *	@adapter: the adapter
4226  *	@p: where to store the PCI settings
4227  *
4228  *	Determines a card's PCI mode and associated parameters, such as speed
4229  *	and width.
4230  */
4231 static void __devinit get_pci_mode(adapter_t *adapter, struct pci_params *p)
4232 {
4233 	static unsigned short speed_map[] = { 33, 66, 100, 133 };
4234 	u32 pci_mode, pcie_cap;
4235 
4236 	pcie_cap = t3_os_find_pci_capability(adapter, PCI_CAP_ID_EXP);
4237 	if (pcie_cap) {
4238 		u16 val;
4239 
4240 		p->variant = PCI_VARIANT_PCIE;
4241 		p->pcie_cap_addr = pcie_cap;
4242 		t3_os_pci_read_config_2(adapter, pcie_cap + PCI_EXP_LNKSTA,
4243 					&val);
4244 		p->width = (val >> 4) & 0x3f;
4245 		return;
4246 	}
4247 
4248 	pci_mode = t3_read_reg(adapter, A_PCIX_MODE);
4249 	p->speed = speed_map[G_PCLKRANGE(pci_mode)];
4250 	p->width = (pci_mode & F_64BIT) ? 64 : 32;
4251 	pci_mode = G_PCIXINITPAT(pci_mode);
4252 	if (pci_mode == 0)
4253 		p->variant = PCI_VARIANT_PCI;
4254 	else if (pci_mode < 4)
4255 		p->variant = PCI_VARIANT_PCIX_MODE1_PARITY;
4256 	else if (pci_mode < 8)
4257 		p->variant = PCI_VARIANT_PCIX_MODE1_ECC;
4258 	else
4259 		p->variant = PCI_VARIANT_PCIX_266_MODE2;
4260 }
4261 
4262 /**
4263  *	init_link_config - initialize a link's SW state
4264  *	@lc: structure holding the link state
4265  *	@caps: link capabilities
4266  *
4267  *	Initializes the SW state maintained for each link, including the link's
4268  *	capabilities and default speed/duplex/flow-control/autonegotiation
4269  *	settings.
4270  */
4271 static void __devinit init_link_config(struct link_config *lc,
4272 				       unsigned int caps)
4273 {
4274 	lc->supported = caps;
4275 	lc->requested_speed = lc->speed = SPEED_INVALID;
4276 	lc->requested_duplex = lc->duplex = DUPLEX_INVALID;
4277 	lc->requested_fc = lc->fc = PAUSE_RX | PAUSE_TX;
4278 	if (lc->supported & SUPPORTED_Autoneg) {
4279 		lc->advertising = lc->supported;
4280 		lc->autoneg = AUTONEG_ENABLE;
4281 		lc->requested_fc |= PAUSE_AUTONEG;
4282 	} else {
4283 		lc->advertising = 0;
4284 		lc->autoneg = AUTONEG_DISABLE;
4285 	}
4286 }
4287 
4288 /**
4289  *	mc7_calc_size - calculate MC7 memory size
4290  *	@cfg: the MC7 configuration
4291  *
4292  *	Calculates the size of an MC7 memory in bytes from the value of its
4293  *	configuration register.
4294  */
4295 static unsigned int __devinit mc7_calc_size(u32 cfg)
4296 {
4297 	unsigned int width = G_WIDTH(cfg);
4298 	unsigned int banks = !!(cfg & F_BKS) + 1;
4299 	unsigned int org = !!(cfg & F_ORG) + 1;
4300 	unsigned int density = G_DEN(cfg);
4301 	unsigned int MBs = ((256 << density) * banks) / (org << width);
4302 
4303 	return MBs << 20;
4304 }
4305 
4306 static void __devinit mc7_prep(adapter_t *adapter, struct mc7 *mc7,
4307 			       unsigned int base_addr, const char *name)
4308 {
4309 	u32 cfg;
4310 
4311 	mc7->adapter = adapter;
4312 	mc7->name = name;
4313 	mc7->offset = base_addr - MC7_PMRX_BASE_ADDR;
4314 	cfg = t3_read_reg(adapter, mc7->offset + A_MC7_CFG);
4315 	mc7->size = G_DEN(cfg) == M_DEN ? 0 : mc7_calc_size(cfg);
4316 	mc7->width = G_WIDTH(cfg);
4317 }
4318 
4319 void mac_prep(struct cmac *mac, adapter_t *adapter, int index)
4320 {
4321 	u16 devid;
4322 
4323 	mac->adapter = adapter;
4324 	mac->multiport = adapter->params.nports > 2;
4325 	if (mac->multiport) {
4326 		mac->ext_port = (unsigned char)index;
4327 		mac->nucast = 8;
4328 	} else
4329 		mac->nucast = 1;
4330 
4331 	/* Gen2 adapter uses VPD xauicfg[] to notify driver which MAC
4332 	   is connected to each port, its suppose to be using xgmac0 for both ports
4333 	 */
4334 	t3_os_pci_read_config_2(adapter, 0x2, &devid);
4335 
4336 	if (mac->multiport ||
4337 		(!adapter->params.vpd.xauicfg[1] && (devid==0x37)))
4338 			index  = 0;
4339 
4340 	mac->offset = (XGMAC0_1_BASE_ADDR - XGMAC0_0_BASE_ADDR) * index;
4341 
4342 	if (adapter->params.rev == 0 && uses_xaui(adapter)) {
4343 		t3_write_reg(adapter, A_XGM_SERDES_CTRL + mac->offset,
4344 			     is_10G(adapter) ? 0x2901c04 : 0x2301c04);
4345 		t3_set_reg_field(adapter, A_XGM_PORT_CFG + mac->offset,
4346 				 F_ENRGMII, 0);
4347 	}
4348 }
4349 
4350 /**
4351  *	early_hw_init - HW initialization done at card detection time
4352  *	@adapter: the adapter
4353  *	@ai: contains information about the adapter type and properties
4354  *
4355  *	Perfoms the part of HW initialization that is done early on when the
4356  *	driver first detecs the card.  Most of the HW state is initialized
4357  *	lazily later on when a port or an offload function are first used.
4358  */
4359 void early_hw_init(adapter_t *adapter, const struct adapter_info *ai)
4360 {
4361 	u32 val = V_PORTSPEED(is_10G(adapter) || adapter->params.nports > 2 ?
4362 			      3 : 2);
4363 	u32 gpio_out = ai->gpio_out;
4364 
4365 	mi1_init(adapter, ai);
4366 	t3_write_reg(adapter, A_I2C_CFG,                  /* set for 80KHz */
4367 		     V_I2C_CLKDIV(adapter->params.vpd.cclk / 80 - 1));
4368 	t3_write_reg(adapter, A_T3DBG_GPIO_EN,
4369 		     gpio_out | F_GPIO0_OEN | F_GPIO0_OUT_VAL);
4370 	t3_write_reg(adapter, A_MC5_DB_SERVER_INDEX, 0);
4371 	t3_write_reg(adapter, A_SG_OCO_BASE, V_BASE1(0xfff));
4372 
4373 	if (adapter->params.rev == 0 || !uses_xaui(adapter))
4374 		val |= F_ENRGMII;
4375 
4376 	/* Enable MAC clocks so we can access the registers */
4377 	t3_write_reg(adapter, A_XGM_PORT_CFG, val);
4378 	(void) t3_read_reg(adapter, A_XGM_PORT_CFG);
4379 
4380 	val |= F_CLKDIVRESET_;
4381 	t3_write_reg(adapter, A_XGM_PORT_CFG, val);
4382 	(void) t3_read_reg(adapter, A_XGM_PORT_CFG);
4383 	t3_write_reg(adapter, XGM_REG(A_XGM_PORT_CFG, 1), val);
4384 	(void) t3_read_reg(adapter, A_XGM_PORT_CFG);
4385 }
4386 
4387 /**
4388  *	t3_reset_adapter - reset the adapter
4389  *	@adapter: the adapter
4390  *
4391  * 	Reset the adapter.
4392  */
4393 int t3_reset_adapter(adapter_t *adapter)
4394 {
4395 	int i, save_and_restore_pcie =
4396 	    adapter->params.rev < T3_REV_B2 && is_pcie(adapter);
4397 	uint16_t devid = 0;
4398 
4399 	if (save_and_restore_pcie)
4400 		t3_os_pci_save_state(adapter);
4401 	t3_write_reg(adapter, A_PL_RST, F_CRSTWRM | F_CRSTWRMMODE);
4402 
4403  	/*
4404 	 * Delay. Give Some time to device to reset fully.
4405 	 * XXX The delay time should be modified.
4406 	 */
4407 	for (i = 0; i < 10; i++) {
4408 		msleep(50);
4409 		t3_os_pci_read_config_2(adapter, 0x00, &devid);
4410 		if (devid == 0x1425)
4411 			break;
4412 	}
4413 
4414 	if (devid != 0x1425)
4415 		return -1;
4416 
4417 	if (save_and_restore_pcie)
4418 		t3_os_pci_restore_state(adapter);
4419 	return 0;
4420 }
4421 
4422 static int init_parity(adapter_t *adap)
4423 {
4424 	int i, err, addr;
4425 
4426 	if (t3_read_reg(adap, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
4427 		return -EBUSY;
4428 
4429 	for (err = i = 0; !err && i < 16; i++)
4430 		err = clear_sge_ctxt(adap, i, F_EGRESS);
4431 	for (i = 0xfff0; !err && i <= 0xffff; i++)
4432 		err = clear_sge_ctxt(adap, i, F_EGRESS);
4433 	for (i = 0; !err && i < SGE_QSETS; i++)
4434 		err = clear_sge_ctxt(adap, i, F_RESPONSEQ);
4435 	if (err)
4436 		return err;
4437 
4438 	t3_write_reg(adap, A_CIM_IBQ_DBG_DATA, 0);
4439 	for (i = 0; i < 4; i++)
4440 		for (addr = 0; addr <= M_IBQDBGADDR; addr++) {
4441 			t3_write_reg(adap, A_CIM_IBQ_DBG_CFG, F_IBQDBGEN |
4442 				     F_IBQDBGWR | V_IBQDBGQID(i) |
4443 				     V_IBQDBGADDR(addr));
4444 			err = t3_wait_op_done(adap, A_CIM_IBQ_DBG_CFG,
4445 					      F_IBQDBGBUSY, 0, 2, 1);
4446 			if (err)
4447 				return err;
4448 		}
4449 	return 0;
4450 }
4451 
4452 /**
4453  *	t3_prep_adapter - prepare SW and HW for operation
4454  *	@adapter: the adapter
4455  *	@ai: contains information about the adapter type and properties
4456  *
4457  *	Initialize adapter SW state for the various HW modules, set initial
4458  *	values for some adapter tunables, take PHYs out of reset, and
4459  *	initialize the MDIO interface.
4460  */
4461 int __devinit t3_prep_adapter(adapter_t *adapter,
4462 			      const struct adapter_info *ai, int reset)
4463 {
4464 	int ret;
4465 	unsigned int i, j = 0;
4466 
4467 	get_pci_mode(adapter, &adapter->params.pci);
4468 
4469 	adapter->params.info = ai;
4470 	adapter->params.nports = ai->nports0 + ai->nports1;
4471 	adapter->params.chan_map = (!!ai->nports0) | (!!ai->nports1 << 1);
4472 	adapter->params.rev = t3_read_reg(adapter, A_PL_REV);
4473 
4474 	/*
4475 	 * We used to only run the "adapter check task" once a second if
4476 	 * we had PHYs which didn't support interrupts (we would check
4477 	 * their link status once a second).  Now we check other conditions
4478 	 * in that routine which would [potentially] impose a very high
4479 	 * interrupt load on the system.  As such, we now always scan the
4480 	 * adapter state once a second ...
4481 	 */
4482 	adapter->params.linkpoll_period = 10;
4483 
4484 	if (adapter->params.nports > 2)
4485 		adapter->params.stats_update_period = VSC_STATS_ACCUM_SECS;
4486 	else
4487 		adapter->params.stats_update_period = is_10G(adapter) ?
4488 			MAC_STATS_ACCUM_SECS : (MAC_STATS_ACCUM_SECS * 10);
4489 	adapter->params.pci.vpd_cap_addr =
4490 		t3_os_find_pci_capability(adapter, PCI_CAP_ID_VPD);
4491 
4492 	ret = get_vpd_params(adapter, &adapter->params.vpd);
4493 	if (ret < 0)
4494 		return ret;
4495 
4496 	if (reset && t3_reset_adapter(adapter))
4497 		return -1;
4498 
4499 	if (adapter->params.vpd.mclk) {
4500 		struct tp_params *p = &adapter->params.tp;
4501 
4502 		mc7_prep(adapter, &adapter->pmrx, MC7_PMRX_BASE_ADDR, "PMRX");
4503 		mc7_prep(adapter, &adapter->pmtx, MC7_PMTX_BASE_ADDR, "PMTX");
4504 		mc7_prep(adapter, &adapter->cm, MC7_CM_BASE_ADDR, "CM");
4505 
4506 		p->nchan = adapter->params.chan_map == 3 ? 2 : 1;
4507 		p->pmrx_size = t3_mc7_size(&adapter->pmrx);
4508 		p->pmtx_size = t3_mc7_size(&adapter->pmtx);
4509 		p->cm_size = t3_mc7_size(&adapter->cm);
4510 		p->chan_rx_size = p->pmrx_size / 2;     /* only 1 Rx channel */
4511 		p->chan_tx_size = p->pmtx_size / p->nchan;
4512 		p->rx_pg_size = 64 * 1024;
4513 		p->tx_pg_size = is_10G(adapter) ? 64 * 1024 : 16 * 1024;
4514 		p->rx_num_pgs = pm_num_pages(p->chan_rx_size, p->rx_pg_size);
4515 		p->tx_num_pgs = pm_num_pages(p->chan_tx_size, p->tx_pg_size);
4516 		p->ntimer_qs = p->cm_size >= (128 << 20) ||
4517 			       adapter->params.rev > 0 ? 12 : 6;
4518 		p->tre = fls(adapter->params.vpd.cclk / (1000 / TP_TMR_RES)) -
4519 			 1;
4520 		p->dack_re = fls(adapter->params.vpd.cclk / 10) - 1; /* 100us */
4521 	}
4522 
4523 	adapter->params.offload = t3_mc7_size(&adapter->pmrx) &&
4524 				  t3_mc7_size(&adapter->pmtx) &&
4525 				  t3_mc7_size(&adapter->cm);
4526 
4527 	t3_sge_prep(adapter, &adapter->params.sge);
4528 
4529 	if (is_offload(adapter)) {
4530 		adapter->params.mc5.nservers = DEFAULT_NSERVERS;
4531 		/* PR 6487. TOE and filtering are mutually exclusive */
4532 		adapter->params.mc5.nfilters = 0;
4533 		adapter->params.mc5.nroutes = 0;
4534 		t3_mc5_prep(adapter, &adapter->mc5, MC5_MODE_144_BIT);
4535 
4536 		init_mtus(adapter->params.mtus);
4537 		init_cong_ctrl(adapter->params.a_wnd, adapter->params.b_wnd);
4538 	}
4539 
4540 	early_hw_init(adapter, ai);
4541 	ret = init_parity(adapter);
4542 	if (ret)
4543 		return ret;
4544 
4545 	if (adapter->params.nports > 2 &&
4546 	    (ret = t3_vsc7323_init(adapter, adapter->params.nports)))
4547 		return ret;
4548 
4549 	for_each_port(adapter, i) {
4550 		u8 hw_addr[6];
4551 		const struct port_type_info *pti;
4552 		struct port_info *p = adap2pinfo(adapter, i);
4553 
4554 		for (;;) {
4555 			unsigned port_type = adapter->params.vpd.port_type[j];
4556 			if (port_type) {
4557 				if (port_type < ARRAY_SIZE(port_types)) {
4558 					pti = &port_types[port_type];
4559 					break;
4560 				} else
4561 					return -EINVAL;
4562 			}
4563 			j++;
4564 			if (j >= ARRAY_SIZE(adapter->params.vpd.port_type))
4565 				return -EINVAL;
4566 		}
4567 		ret = pti->phy_prep(p, ai->phy_base_addr + j,
4568 				    ai->mdio_ops);
4569 		if (ret)
4570 			return ret;
4571 		mac_prep(&p->mac, adapter, j);
4572 		++j;
4573 
4574 		/*
4575 		 * The VPD EEPROM stores the base Ethernet address for the
4576 		 * card.  A port's address is derived from the base by adding
4577 		 * the port's index to the base's low octet.
4578 		 */
4579 		memcpy(hw_addr, adapter->params.vpd.eth_base, 5);
4580 		hw_addr[5] = adapter->params.vpd.eth_base[5] + i;
4581 
4582 		t3_os_set_hw_addr(adapter, i, hw_addr);
4583 		init_link_config(&p->link_config, p->phy.caps);
4584 		p->phy.ops->power_down(&p->phy, 1);
4585 
4586 		/*
4587 		 * If the PHY doesn't support interrupts for link status
4588 		 * changes, schedule a scan of the adapter links at least
4589 		 * once a second.
4590 		 */
4591 		if (!(p->phy.caps & SUPPORTED_IRQ) &&
4592 		    adapter->params.linkpoll_period > 10)
4593 			adapter->params.linkpoll_period = 10;
4594 	}
4595 
4596 	return 0;
4597 }
4598 
4599 /**
4600  *	t3_reinit_adapter - prepare HW for operation again
4601  *	@adapter: the adapter
4602  *
4603  *	Put HW in the same state as @t3_prep_adapter without any changes to
4604  *	SW state.  This is a cut down version of @t3_prep_adapter intended
4605  *	to be used after events that wipe out HW state but preserve SW state,
4606  *	e.g., EEH.  The device must be reset before calling this.
4607  */
4608 int t3_reinit_adapter(adapter_t *adap)
4609 {
4610 	unsigned int i;
4611 	int ret, j = 0;
4612 
4613 	early_hw_init(adap, adap->params.info);
4614 	ret = init_parity(adap);
4615 	if (ret)
4616 		return ret;
4617 
4618 	if (adap->params.nports > 2 &&
4619 	    (ret = t3_vsc7323_init(adap, adap->params.nports)))
4620 		return ret;
4621 
4622 	for_each_port(adap, i) {
4623 		const struct port_type_info *pti;
4624 		struct port_info *p = adap2pinfo(adap, i);
4625 
4626 		for (;;) {
4627 			unsigned port_type = adap->params.vpd.port_type[j];
4628 			if (port_type) {
4629 				if (port_type < ARRAY_SIZE(port_types)) {
4630 					pti = &port_types[port_type];
4631 					break;
4632 				} else
4633 					return -EINVAL;
4634 			}
4635 			j++;
4636 			if (j >= ARRAY_SIZE(adap->params.vpd.port_type))
4637 				return -EINVAL;
4638 		}
4639 		ret = pti->phy_prep(p, p->phy.addr, NULL);
4640 		if (ret)
4641 			return ret;
4642 		p->phy.ops->power_down(&p->phy, 1);
4643 	}
4644 	return 0;
4645 }
4646 
4647 void t3_led_ready(adapter_t *adapter)
4648 {
4649 	t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL,
4650 			 F_GPIO0_OUT_VAL);
4651 }
4652 
4653 void t3_port_failover(adapter_t *adapter, int port)
4654 {
4655 	u32 val;
4656 
4657 	val = port ? F_PORT1ACTIVE : F_PORT0ACTIVE;
4658 	t3_set_reg_field(adapter, A_MPS_CFG, F_PORT0ACTIVE | F_PORT1ACTIVE,
4659 			 val);
4660 }
4661 
4662 void t3_failover_done(adapter_t *adapter, int port)
4663 {
4664 	t3_set_reg_field(adapter, A_MPS_CFG, F_PORT0ACTIVE | F_PORT1ACTIVE,
4665 			 F_PORT0ACTIVE | F_PORT1ACTIVE);
4666 }
4667 
4668 void t3_failover_clear(adapter_t *adapter)
4669 {
4670 	t3_set_reg_field(adapter, A_MPS_CFG, F_PORT0ACTIVE | F_PORT1ACTIVE,
4671 			 F_PORT0ACTIVE | F_PORT1ACTIVE);
4672 }
4673 
4674 static int t3_cim_hac_read(adapter_t *adapter, u32 addr, u32 *val)
4675 {
4676 	u32 v;
4677 
4678 	t3_write_reg(adapter, A_CIM_HOST_ACC_CTRL, addr);
4679 	if (t3_wait_op_done_val(adapter, A_CIM_HOST_ACC_CTRL,
4680 				F_HOSTBUSY, 0, 10, 10, &v))
4681 		return -EIO;
4682 
4683 	*val = t3_read_reg(adapter, A_CIM_HOST_ACC_DATA);
4684 
4685 	return 0;
4686 }
4687 
4688 static int t3_cim_hac_write(adapter_t *adapter, u32 addr, u32 val)
4689 {
4690 	u32 v;
4691 
4692 	t3_write_reg(adapter, A_CIM_HOST_ACC_DATA, val);
4693 
4694 	addr |= F_HOSTWRITE;
4695 	t3_write_reg(adapter, A_CIM_HOST_ACC_CTRL, addr);
4696 
4697 	if (t3_wait_op_done_val(adapter, A_CIM_HOST_ACC_CTRL,
4698 				F_HOSTBUSY, 0, 10, 5, &v))
4699 		return -EIO;
4700 	return 0;
4701 }
4702 
4703 int t3_get_up_la(adapter_t *adapter, u32 *stopped, u32 *index,
4704 		 u32 *size, void *data)
4705 {
4706 	u32 v, *buf = data;
4707 	int i, cnt,  ret;
4708 
4709 	if (*size < LA_ENTRIES * 4)
4710 		return -EINVAL;
4711 
4712 	ret = t3_cim_hac_read(adapter, LA_CTRL, &v);
4713 	if (ret)
4714 		goto out;
4715 
4716 	*stopped = !(v & 1);
4717 
4718 	/* Freeze LA */
4719 	if (!*stopped) {
4720 		ret = t3_cim_hac_write(adapter, LA_CTRL, 0);
4721 		if (ret)
4722 			goto out;
4723 	}
4724 
4725 	for (i = 0; i < LA_ENTRIES; i++) {
4726 		v = (i << 2) | (1 << 1);
4727 		ret = t3_cim_hac_write(adapter, LA_CTRL, v);
4728 		if (ret)
4729 			goto out;
4730 
4731 		ret = t3_cim_hac_read(adapter, LA_CTRL, &v);
4732 		if (ret)
4733 			goto out;
4734 
4735 		cnt = 20;
4736 		while ((v & (1 << 1)) && cnt) {
4737 			udelay(5);
4738 			--cnt;
4739 			ret = t3_cim_hac_read(adapter, LA_CTRL, &v);
4740 			if (ret)
4741 				goto out;
4742 		}
4743 
4744 		if (v & (1 << 1))
4745 			return -EIO;
4746 
4747 		ret = t3_cim_hac_read(adapter, LA_DATA, &v);
4748 		if (ret)
4749 			goto out;
4750 
4751 		*buf++ = v;
4752 	}
4753 
4754 	ret = t3_cim_hac_read(adapter, LA_CTRL, &v);
4755 	if (ret)
4756 		goto out;
4757 
4758 	*index = (v >> 16) + 4;
4759 	*size = LA_ENTRIES * 4;
4760 out:
4761 	/* Unfreeze LA */
4762 	t3_cim_hac_write(adapter, LA_CTRL, 1);
4763 	return ret;
4764 }
4765 
4766 int t3_get_up_ioqs(adapter_t *adapter, u32 *size, void *data)
4767 {
4768 	u32 v, *buf = data;
4769 	int i, j, ret;
4770 
4771 	if (*size < IOQ_ENTRIES * sizeof(struct t3_ioq_entry))
4772 		return -EINVAL;
4773 
4774 	for (i = 0; i < 4; i++) {
4775 		ret = t3_cim_hac_read(adapter, (4 * i), &v);
4776 		if (ret)
4777 			goto out;
4778 
4779 		*buf++ = v;
4780 	}
4781 
4782 	for (i = 0; i < IOQ_ENTRIES; i++) {
4783 		u32 base_addr = 0x10 * (i + 1);
4784 
4785 		for (j = 0; j < 4; j++) {
4786 			ret = t3_cim_hac_read(adapter, base_addr + 4 * j, &v);
4787 			if (ret)
4788 				goto out;
4789 
4790 			*buf++ = v;
4791 		}
4792 	}
4793 
4794 	*size = IOQ_ENTRIES * sizeof(struct t3_ioq_entry);
4795 
4796 out:
4797 	return ret;
4798 }
4799 
4800