xref: /freebsd/sys/dev/cxgb/common/cxgb_t3_hw.c (revision f0a75d274af375d15b97b830966b99a02b7db911)
1 /**************************************************************************
2 
3 Copyright (c) 2007, Chelsio Inc.
4 All rights reserved.
5 
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
8 
9  1. Redistributions of source code must retain the above copyright notice,
10     this list of conditions and the following disclaimer.
11 
12  2. Redistributions in binary form must reproduce the above copyright
13     notice, this list of conditions and the following disclaimer in the
14     documentation and/or other materials provided with the distribution.
15 
16  3. Neither the name of the Chelsio Corporation nor the names of its
17     contributors may be used to endorse or promote products derived from
18     this software without specific prior written permission.
19 
20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 POSSIBILITY OF SUCH DAMAGE.
31 
32 ***************************************************************************/
33 
34 #include <sys/cdefs.h>
35 __FBSDID("$FreeBSD$");
36 
37 #include <dev/cxgb/common/cxgb_common.h>
38 #include <dev/cxgb/common/cxgb_regs.h>
39 #include <dev/cxgb/common/cxgb_sge_defs.h>
40 #include <dev/cxgb/common/cxgb_firmware_exports.h>
41 
42 /**
43  *	t3_wait_op_done_val - wait until an operation is completed
44  *	@adapter: the adapter performing the operation
45  *	@reg: the register to check for completion
46  *	@mask: a single-bit field within @reg that indicates completion
47  *	@polarity: the value of the field when the operation is completed
48  *	@attempts: number of check iterations
49  *	@delay: delay in usecs between iterations
50  *	@valp: where to store the value of the register at completion time
51  *
52  *	Wait until an operation is completed by checking a bit in a register
53  *	up to @attempts times.  If @valp is not NULL the value of the register
54  *	at the time it indicated completion is stored there.  Returns 0 if the
55  *	operation completes and	-EAGAIN	otherwise.
56  */
57 int t3_wait_op_done_val(adapter_t *adapter, int reg, u32 mask, int polarity,
58 			int attempts, int delay, u32 *valp)
59 {
60 	while (1) {
61 		u32 val = t3_read_reg(adapter, reg);
62 
63 		if (!!(val & mask) == polarity) {
64 			if (valp)
65 				*valp = val;
66 			return 0;
67 		}
68 		if (--attempts == 0)
69 			return -EAGAIN;
70 		if (delay)
71 			udelay(delay);
72 	}
73 }
74 
75 /**
76  *	t3_write_regs - write a bunch of registers
77  *	@adapter: the adapter to program
78  *	@p: an array of register address/register value pairs
79  *	@n: the number of address/value pairs
80  *	@offset: register address offset
81  *
82  *	Takes an array of register address/register value pairs and writes each
83  *	value to the corresponding register.  Register addresses are adjusted
84  *	by the supplied offset.
85  */
86 void t3_write_regs(adapter_t *adapter, const struct addr_val_pair *p, int n,
87 		   unsigned int offset)
88 {
89 	while (n--) {
90 		t3_write_reg(adapter, p->reg_addr + offset, p->val);
91 		p++;
92 	}
93 }
94 
95 /**
96  *	t3_set_reg_field - set a register field to a value
97  *	@adapter: the adapter to program
98  *	@addr: the register address
99  *	@mask: specifies the portion of the register to modify
100  *	@val: the new value for the register field
101  *
102  *	Sets a register field specified by the supplied mask to the
103  *	given value.
104  */
105 void t3_set_reg_field(adapter_t *adapter, unsigned int addr, u32 mask, u32 val)
106 {
107 	u32 v = t3_read_reg(adapter, addr) & ~mask;
108 
109 	t3_write_reg(adapter, addr, v | val);
110 	(void) t3_read_reg(adapter, addr);      /* flush */
111 }
112 
113 /**
114  *	t3_read_indirect - read indirectly addressed registers
115  *	@adap: the adapter
116  *	@addr_reg: register holding the indirect address
117  *	@data_reg: register holding the value of the indirect register
118  *	@vals: where the read register values are stored
119  *	@start_idx: index of first indirect register to read
120  *	@nregs: how many indirect registers to read
121  *
122  *	Reads registers that are accessed indirectly through an address/data
123  *	register pair.
124  */
125 void t3_read_indirect(adapter_t *adap, unsigned int addr_reg,
126 		      unsigned int data_reg, u32 *vals, unsigned int nregs,
127 		      unsigned int start_idx)
128 {
129 	while (nregs--) {
130 		t3_write_reg(adap, addr_reg, start_idx);
131 		*vals++ = t3_read_reg(adap, data_reg);
132 		start_idx++;
133 	}
134 }
135 
136 /**
137  *	t3_mc7_bd_read - read from MC7 through backdoor accesses
138  *	@mc7: identifies MC7 to read from
139  *	@start: index of first 64-bit word to read
140  *	@n: number of 64-bit words to read
141  *	@buf: where to store the read result
142  *
143  *	Read n 64-bit words from MC7 starting at word start, using backdoor
144  *	accesses.
145  */
146 int t3_mc7_bd_read(struct mc7 *mc7, unsigned int start, unsigned int n,
147                    u64 *buf)
148 {
149 	static int shift[] = { 0, 0, 16, 24 };
150 	static int step[]  = { 0, 32, 16, 8 };
151 
152 	unsigned int size64 = mc7->size / 8;  /* # of 64-bit words */
153 	adapter_t *adap = mc7->adapter;
154 
155 	if (start >= size64 || start + n > size64)
156 		return -EINVAL;
157 
158 	start *= (8 << mc7->width);
159 	while (n--) {
160 		int i;
161 		u64 val64 = 0;
162 
163 		for (i = (1 << mc7->width) - 1; i >= 0; --i) {
164 			int attempts = 10;
165 			u32 val;
166 
167 			t3_write_reg(adap, mc7->offset + A_MC7_BD_ADDR,
168 				       start);
169 			t3_write_reg(adap, mc7->offset + A_MC7_BD_OP, 0);
170 			val = t3_read_reg(adap, mc7->offset + A_MC7_BD_OP);
171 			while ((val & F_BUSY) && attempts--)
172 				val = t3_read_reg(adap,
173 						  mc7->offset + A_MC7_BD_OP);
174 			if (val & F_BUSY)
175 				return -EIO;
176 
177 			val = t3_read_reg(adap, mc7->offset + A_MC7_BD_DATA1);
178 			if (mc7->width == 0) {
179 				val64 = t3_read_reg(adap,
180 						mc7->offset + A_MC7_BD_DATA0);
181 				val64 |= (u64)val << 32;
182 			} else {
183 				if (mc7->width > 1)
184 					val >>= shift[mc7->width];
185 				val64 |= (u64)val << (step[mc7->width] * i);
186 			}
187 			start += 8;
188 		}
189 		*buf++ = val64;
190 	}
191 	return 0;
192 }
193 
194 /*
195  * Initialize MI1.
196  */
197 static void mi1_init(adapter_t *adap, const struct adapter_info *ai)
198 {
199         u32 clkdiv = adap->params.vpd.cclk / (2 * adap->params.vpd.mdc) - 1;
200         u32 val = F_PREEN | V_MDIINV(ai->mdiinv) | V_MDIEN(ai->mdien) |
201 		  V_CLKDIV(clkdiv);
202 
203 	if (!(ai->caps & SUPPORTED_10000baseT_Full))
204 		val |= V_ST(1);
205         t3_write_reg(adap, A_MI1_CFG, val);
206 }
207 
208 #define MDIO_ATTEMPTS 10
209 
210 /*
211  * MI1 read/write operations for direct-addressed PHYs.
212  */
213 static int mi1_read(adapter_t *adapter, int phy_addr, int mmd_addr,
214 		    int reg_addr, unsigned int *valp)
215 {
216 	int ret;
217 	u32 addr = V_REGADDR(reg_addr) | V_PHYADDR(phy_addr);
218 
219 	if (mmd_addr)
220 		return -EINVAL;
221 
222 	MDIO_LOCK(adapter);
223 	t3_write_reg(adapter, A_MI1_ADDR, addr);
224 	t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(2));
225 	ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0, MDIO_ATTEMPTS, 20);
226 	if (!ret)
227 		*valp = t3_read_reg(adapter, A_MI1_DATA);
228 	MDIO_UNLOCK(adapter);
229 	return ret;
230 }
231 
232 static int mi1_write(adapter_t *adapter, int phy_addr, int mmd_addr,
233 		     int reg_addr, unsigned int val)
234 {
235 	int ret;
236 	u32 addr = V_REGADDR(reg_addr) | V_PHYADDR(phy_addr);
237 
238 	if (mmd_addr)
239 		return -EINVAL;
240 
241 	MDIO_LOCK(adapter);
242 	t3_write_reg(adapter, A_MI1_ADDR, addr);
243 	t3_write_reg(adapter, A_MI1_DATA, val);
244 	t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(1));
245 	ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0, MDIO_ATTEMPTS, 20);
246 	MDIO_UNLOCK(adapter);
247 	return ret;
248 }
249 
250 static struct mdio_ops mi1_mdio_ops = {
251 	mi1_read,
252 	mi1_write
253 };
254 
255 /*
256  * MI1 read/write operations for indirect-addressed PHYs.
257  */
258 static int mi1_ext_read(adapter_t *adapter, int phy_addr, int mmd_addr,
259 			int reg_addr, unsigned int *valp)
260 {
261 	int ret;
262 	u32 addr = V_REGADDR(mmd_addr) | V_PHYADDR(phy_addr);
263 
264 	MDIO_LOCK(adapter);
265 	t3_write_reg(adapter, A_MI1_ADDR, addr);
266 	t3_write_reg(adapter, A_MI1_DATA, reg_addr);
267 	t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(0));
268 	ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0, MDIO_ATTEMPTS, 20);
269 	if (!ret) {
270 		t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(3));
271 		ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0,
272 				      MDIO_ATTEMPTS, 20);
273 		if (!ret)
274 			*valp = t3_read_reg(adapter, A_MI1_DATA);
275 	}
276 	MDIO_UNLOCK(adapter);
277 	return ret;
278 }
279 
280 static int mi1_ext_write(adapter_t *adapter, int phy_addr, int mmd_addr,
281 			 int reg_addr, unsigned int val)
282 {
283 	int ret;
284 	u32 addr = V_REGADDR(mmd_addr) | V_PHYADDR(phy_addr);
285 
286 	MDIO_LOCK(adapter);
287 	t3_write_reg(adapter, A_MI1_ADDR, addr);
288 	t3_write_reg(adapter, A_MI1_DATA, reg_addr);
289 	t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(0));
290 	ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0, MDIO_ATTEMPTS, 20);
291 	if (!ret) {
292 		t3_write_reg(adapter, A_MI1_DATA, val);
293 		t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(1));
294 		ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0,
295 				      MDIO_ATTEMPTS, 20);
296 	}
297 	MDIO_UNLOCK(adapter);
298 	return ret;
299 }
300 
301 static struct mdio_ops mi1_mdio_ext_ops = {
302 	mi1_ext_read,
303 	mi1_ext_write
304 };
305 
306 /**
307  *	t3_mdio_change_bits - modify the value of a PHY register
308  *	@phy: the PHY to operate on
309  *	@mmd: the device address
310  *	@reg: the register address
311  *	@clear: what part of the register value to mask off
312  *	@set: what part of the register value to set
313  *
314  *	Changes the value of a PHY register by applying a mask to its current
315  *	value and ORing the result with a new value.
316  */
317 int t3_mdio_change_bits(struct cphy *phy, int mmd, int reg, unsigned int clear,
318 			unsigned int set)
319 {
320 	int ret;
321 	unsigned int val;
322 
323 	ret = mdio_read(phy, mmd, reg, &val);
324 	if (!ret) {
325 		val &= ~clear;
326 		ret = mdio_write(phy, mmd, reg, val | set);
327 	}
328 	return ret;
329 }
330 
331 /**
332  *	t3_phy_reset - reset a PHY block
333  *	@phy: the PHY to operate on
334  *	@mmd: the device address of the PHY block to reset
335  *	@wait: how long to wait for the reset to complete in 1ms increments
336  *
337  *	Resets a PHY block and optionally waits for the reset to complete.
338  *	@mmd should be 0 for 10/100/1000 PHYs and the device address to reset
339  *	for 10G PHYs.
340  */
341 int t3_phy_reset(struct cphy *phy, int mmd, int wait)
342 {
343 	int err;
344 	unsigned int ctl;
345 
346 	err = t3_mdio_change_bits(phy, mmd, MII_BMCR, BMCR_PDOWN, BMCR_RESET);
347 	if (err || !wait)
348 		return err;
349 
350 	do {
351 		err = mdio_read(phy, mmd, MII_BMCR, &ctl);
352 		if (err)
353 			return err;
354 		ctl &= BMCR_RESET;
355 		if (ctl)
356 			t3_os_sleep(1);
357 	} while (ctl && --wait);
358 
359 	return ctl ? -1 : 0;
360 }
361 
362 /**
363  *	t3_phy_advertise - set the PHY advertisement registers for autoneg
364  *	@phy: the PHY to operate on
365  *	@advert: bitmap of capabilities the PHY should advertise
366  *
367  *	Sets a 10/100/1000 PHY's advertisement registers to advertise the
368  *	requested capabilities.
369  */
370 int t3_phy_advertise(struct cphy *phy, unsigned int advert)
371 {
372 	int err;
373 	unsigned int val = 0;
374 
375 	err = mdio_read(phy, 0, MII_CTRL1000, &val);
376 	if (err)
377 		return err;
378 
379 	val &= ~(ADVERTISE_1000HALF | ADVERTISE_1000FULL);
380 	if (advert & ADVERTISED_1000baseT_Half)
381 		val |= ADVERTISE_1000HALF;
382 	if (advert & ADVERTISED_1000baseT_Full)
383 		val |= ADVERTISE_1000FULL;
384 
385 	err = mdio_write(phy, 0, MII_CTRL1000, val);
386 	if (err)
387 		return err;
388 
389 	val = 1;
390 	if (advert & ADVERTISED_10baseT_Half)
391 		val |= ADVERTISE_10HALF;
392 	if (advert & ADVERTISED_10baseT_Full)
393 		val |= ADVERTISE_10FULL;
394 	if (advert & ADVERTISED_100baseT_Half)
395 		val |= ADVERTISE_100HALF;
396 	if (advert & ADVERTISED_100baseT_Full)
397 		val |= ADVERTISE_100FULL;
398 	if (advert & ADVERTISED_Pause)
399 		val |= ADVERTISE_PAUSE_CAP;
400 	if (advert & ADVERTISED_Asym_Pause)
401 		val |= ADVERTISE_PAUSE_ASYM;
402 	return mdio_write(phy, 0, MII_ADVERTISE, val);
403 }
404 
405 /**
406  *	t3_set_phy_speed_duplex - force PHY speed and duplex
407  *	@phy: the PHY to operate on
408  *	@speed: requested PHY speed
409  *	@duplex: requested PHY duplex
410  *
411  *	Force a 10/100/1000 PHY's speed and duplex.  This also disables
412  *	auto-negotiation except for GigE, where auto-negotiation is mandatory.
413  */
414 int t3_set_phy_speed_duplex(struct cphy *phy, int speed, int duplex)
415 {
416 	int err;
417 	unsigned int ctl;
418 
419 	err = mdio_read(phy, 0, MII_BMCR, &ctl);
420 	if (err)
421 		return err;
422 
423 	if (speed >= 0) {
424 		ctl &= ~(BMCR_SPEED100 | BMCR_SPEED1000 | BMCR_ANENABLE);
425 		if (speed == SPEED_100)
426 			ctl |= BMCR_SPEED100;
427 		else if (speed == SPEED_1000)
428 			ctl |= BMCR_SPEED1000;
429 	}
430 	if (duplex >= 0) {
431 		ctl &= ~(BMCR_FULLDPLX | BMCR_ANENABLE);
432 		if (duplex == DUPLEX_FULL)
433 			ctl |= BMCR_FULLDPLX;
434 	}
435 	if (ctl & BMCR_SPEED1000)  /* auto-negotiation required for GigE */
436 		ctl |= BMCR_ANENABLE;
437 	return mdio_write(phy, 0, MII_BMCR, ctl);
438 }
439 
440 static struct adapter_info t3_adap_info[] = {
441 	{ 2, 0, 0, 0,
442 	  F_GPIO2_OEN | F_GPIO4_OEN |
443 	  F_GPIO2_OUT_VAL | F_GPIO4_OUT_VAL, F_GPIO3 | F_GPIO5,
444 	  SUPPORTED_OFFLOAD,
445 	  &mi1_mdio_ops, "Chelsio PE9000" },
446 	{ 2, 0, 0, 0,
447 	  F_GPIO2_OEN | F_GPIO4_OEN |
448 	  F_GPIO2_OUT_VAL | F_GPIO4_OUT_VAL, F_GPIO3 | F_GPIO5,
449 	  SUPPORTED_OFFLOAD,
450 	  &mi1_mdio_ops, "Chelsio T302" },
451 	{ 1, 0, 0, 0,
452 	  F_GPIO1_OEN | F_GPIO6_OEN | F_GPIO7_OEN | F_GPIO10_OEN |
453 	  F_GPIO1_OUT_VAL | F_GPIO6_OUT_VAL | F_GPIO10_OUT_VAL, 0,
454 	  SUPPORTED_10000baseT_Full | SUPPORTED_AUI | SUPPORTED_OFFLOAD,
455 	  &mi1_mdio_ext_ops, "Chelsio T310" },
456 	{ 2, 0, 0, 0,
457 	  F_GPIO1_OEN | F_GPIO2_OEN | F_GPIO4_OEN | F_GPIO5_OEN | F_GPIO6_OEN |
458 	  F_GPIO7_OEN | F_GPIO10_OEN | F_GPIO11_OEN | F_GPIO1_OUT_VAL |
459 	  F_GPIO5_OUT_VAL | F_GPIO6_OUT_VAL | F_GPIO10_OUT_VAL, 0,
460 	  SUPPORTED_10000baseT_Full | SUPPORTED_AUI | SUPPORTED_OFFLOAD,
461 	  &mi1_mdio_ext_ops, "Chelsio T320" },
462 };
463 
464 /*
465  * Return the adapter_info structure with a given index.  Out-of-range indices
466  * return NULL.
467  */
468 const struct adapter_info *t3_get_adapter_info(unsigned int id)
469 {
470 	return id < ARRAY_SIZE(t3_adap_info) ? &t3_adap_info[id] : NULL;
471 }
472 
473 #define CAPS_1G (SUPPORTED_10baseT_Full | SUPPORTED_100baseT_Full | \
474 		 SUPPORTED_1000baseT_Full | SUPPORTED_Autoneg | SUPPORTED_MII)
475 #define CAPS_10G (SUPPORTED_10000baseT_Full | SUPPORTED_AUI)
476 
477 static struct port_type_info port_types[] = {
478 	{ NULL },
479 	{ t3_ael1002_phy_prep, CAPS_10G | SUPPORTED_FIBRE,
480 	  "10GBASE-XR" },
481 	{ t3_vsc8211_phy_prep, CAPS_1G | SUPPORTED_TP | SUPPORTED_IRQ,
482 	  "10/100/1000BASE-T" },
483 	{ t3_mv88e1xxx_phy_prep, CAPS_1G | SUPPORTED_TP | SUPPORTED_IRQ,
484 	  "10/100/1000BASE-T" },
485 	{ t3_xaui_direct_phy_prep, CAPS_10G | SUPPORTED_TP, "10GBASE-CX4" },
486 	{ NULL, CAPS_10G, "10GBASE-KX4" },
487 	{ t3_qt2045_phy_prep, CAPS_10G | SUPPORTED_TP, "10GBASE-CX4" },
488 	{ t3_ael1006_phy_prep, CAPS_10G | SUPPORTED_FIBRE,
489 	  "10GBASE-SR" },
490 	{ NULL, CAPS_10G | SUPPORTED_TP, "10GBASE-CX4" },
491 };
492 
493 #undef CAPS_1G
494 #undef CAPS_10G
495 
496 #define VPD_ENTRY(name, len) \
497 	u8 name##_kword[2]; u8 name##_len; u8 name##_data[len]
498 
499 /*
500  * Partial EEPROM Vital Product Data structure.  Includes only the ID and
501  * VPD-R sections.
502  */
503 struct t3_vpd {
504 	u8  id_tag;
505 	u8  id_len[2];
506 	u8  id_data[16];
507 	u8  vpdr_tag;
508 	u8  vpdr_len[2];
509 	VPD_ENTRY(pn, 16);                     /* part number */
510 	VPD_ENTRY(ec, 16);                     /* EC level */
511 	VPD_ENTRY(sn, 16);                     /* serial number */
512 	VPD_ENTRY(na, 12);                     /* MAC address base */
513 	VPD_ENTRY(cclk, 6);                    /* core clock */
514 	VPD_ENTRY(mclk, 6);                    /* mem clock */
515 	VPD_ENTRY(uclk, 6);                    /* uP clk */
516 	VPD_ENTRY(mdc, 6);                     /* MDIO clk */
517 	VPD_ENTRY(mt, 2);                      /* mem timing */
518 	VPD_ENTRY(xaui0cfg, 6);                /* XAUI0 config */
519 	VPD_ENTRY(xaui1cfg, 6);                /* XAUI1 config */
520 	VPD_ENTRY(port0, 2);                   /* PHY0 complex */
521 	VPD_ENTRY(port1, 2);                   /* PHY1 complex */
522 	VPD_ENTRY(port2, 2);                   /* PHY2 complex */
523 	VPD_ENTRY(port3, 2);                   /* PHY3 complex */
524 	VPD_ENTRY(rv, 1);                      /* csum */
525 	u32 pad;                  /* for multiple-of-4 sizing and alignment */
526 };
527 
528 #define EEPROM_MAX_POLL   4
529 #define EEPROM_STAT_ADDR  0x4000
530 #define VPD_BASE          0xc00
531 
532 /**
533  *	t3_seeprom_read - read a VPD EEPROM location
534  *	@adapter: adapter to read
535  *	@addr: EEPROM address
536  *	@data: where to store the read data
537  *
538  *	Read a 32-bit word from a location in VPD EEPROM using the card's PCI
539  *	VPD ROM capability.  A zero is written to the flag bit when the
540  *	addres is written to the control register.  The hardware device will
541  *	set the flag to 1 when 4 bytes have been read into the data register.
542  */
543 int t3_seeprom_read(adapter_t *adapter, u32 addr, u32 *data)
544 {
545 	u16 val;
546 	int attempts = EEPROM_MAX_POLL;
547 	unsigned int base = adapter->params.pci.vpd_cap_addr;
548 
549 	if ((addr >= EEPROMSIZE && addr != EEPROM_STAT_ADDR) || (addr & 3))
550 		return -EINVAL;
551 
552 	t3_os_pci_write_config_2(adapter, base + PCI_VPD_ADDR, (u16)addr);
553 	do {
554 		udelay(10);
555 		t3_os_pci_read_config_2(adapter, base + PCI_VPD_ADDR, &val);
556 	} while (!(val & PCI_VPD_ADDR_F) && --attempts);
557 
558 	if (!(val & PCI_VPD_ADDR_F)) {
559 		CH_ERR(adapter, "reading EEPROM address 0x%x failed\n", addr);
560 		return -EIO;
561 	}
562 	t3_os_pci_read_config_4(adapter, base + PCI_VPD_DATA, data);
563 	*data = le32_to_cpu(*data);
564 	return 0;
565 }
566 
567 /**
568  *	t3_seeprom_write - write a VPD EEPROM location
569  *	@adapter: adapter to write
570  *	@addr: EEPROM address
571  *	@data: value to write
572  *
573  *	Write a 32-bit word to a location in VPD EEPROM using the card's PCI
574  *	VPD ROM capability.
575  */
576 int t3_seeprom_write(adapter_t *adapter, u32 addr, u32 data)
577 {
578 	u16 val;
579 	int attempts = EEPROM_MAX_POLL;
580 	unsigned int base = adapter->params.pci.vpd_cap_addr;
581 
582 	if ((addr >= EEPROMSIZE && addr != EEPROM_STAT_ADDR) || (addr & 3))
583 		return -EINVAL;
584 
585 	t3_os_pci_write_config_4(adapter, base + PCI_VPD_DATA,
586 				 cpu_to_le32(data));
587 	t3_os_pci_write_config_2(adapter, base + PCI_VPD_ADDR,
588 				 (u16)addr | PCI_VPD_ADDR_F);
589 	do {
590 		t3_os_sleep(1);
591 		t3_os_pci_read_config_2(adapter, base + PCI_VPD_ADDR, &val);
592 	} while ((val & PCI_VPD_ADDR_F) && --attempts);
593 
594 	if (val & PCI_VPD_ADDR_F) {
595 		CH_ERR(adapter, "write to EEPROM address 0x%x failed\n", addr);
596 		return -EIO;
597 	}
598 	return 0;
599 }
600 
601 /**
602  *	t3_seeprom_wp - enable/disable EEPROM write protection
603  *	@adapter: the adapter
604  *	@enable: 1 to enable write protection, 0 to disable it
605  *
606  *	Enables or disables write protection on the serial EEPROM.
607  */
608 int t3_seeprom_wp(adapter_t *adapter, int enable)
609 {
610 	return t3_seeprom_write(adapter, EEPROM_STAT_ADDR, enable ? 0xc : 0);
611 }
612 
613 /*
614  * Convert a character holding a hex digit to a number.
615  */
616 static unsigned int hex2int(unsigned char c)
617 {
618 	return isdigit(c) ? c - '0' : toupper(c) - 'A' + 10;
619 }
620 
621 /**
622  *	get_vpd_params - read VPD parameters from VPD EEPROM
623  *	@adapter: adapter to read
624  *	@p: where to store the parameters
625  *
626  *	Reads card parameters stored in VPD EEPROM.
627  */
628 static int get_vpd_params(adapter_t *adapter, struct vpd_params *p)
629 {
630 	int i, addr, ret;
631 	struct t3_vpd vpd;
632 
633 	/*
634 	 * Card information is normally at VPD_BASE but some early cards had
635 	 * it at 0.
636 	 */
637 	ret = t3_seeprom_read(adapter, VPD_BASE, (u32 *)&vpd);
638 	if (ret)
639 		return ret;
640 	addr = vpd.id_tag == 0x82 ? VPD_BASE : 0;
641 
642 	for (i = 0; i < sizeof(vpd); i += 4) {
643 		ret = t3_seeprom_read(adapter, addr + i,
644 				      (u32 *)((u8 *)&vpd + i));
645 		if (ret)
646 			return ret;
647 	}
648 
649 	p->cclk = simple_strtoul(vpd.cclk_data, NULL, 10);
650 	p->mclk = simple_strtoul(vpd.mclk_data, NULL, 10);
651 	p->uclk = simple_strtoul(vpd.uclk_data, NULL, 10);
652 	p->mdc = simple_strtoul(vpd.mdc_data, NULL, 10);
653 	p->mem_timing = simple_strtoul(vpd.mt_data, NULL, 10);
654 
655 	/* Old eeproms didn't have port information */
656 	if (adapter->params.rev == 0 && !vpd.port0_data[0]) {
657 		p->port_type[0] = uses_xaui(adapter) ? 1 : 2;
658 		p->port_type[1] = uses_xaui(adapter) ? 6 : 2;
659 	} else {
660 		p->port_type[0] = (u8)hex2int(vpd.port0_data[0]);
661 		p->port_type[1] = (u8)hex2int(vpd.port1_data[0]);
662 		p->xauicfg[0] = simple_strtoul(vpd.xaui0cfg_data, NULL, 16);
663 		p->xauicfg[1] = simple_strtoul(vpd.xaui1cfg_data, NULL, 16);
664 	}
665 
666 	for (i = 0; i < 6; i++)
667 		p->eth_base[i] = hex2int(vpd.na_data[2 * i]) * 16 +
668 				 hex2int(vpd.na_data[2 * i + 1]);
669 	return 0;
670 }
671 
672 /* serial flash and firmware constants */
673 enum {
674 	SF_ATTEMPTS = 5,           /* max retries for SF1 operations */
675 	SF_SEC_SIZE = 64 * 1024,   /* serial flash sector size */
676 	SF_SIZE = SF_SEC_SIZE * 8, /* serial flash size */
677 
678 	/* flash command opcodes */
679 	SF_PROG_PAGE    = 2,       /* program page */
680 	SF_WR_DISABLE   = 4,       /* disable writes */
681 	SF_RD_STATUS    = 5,       /* read status register */
682 	SF_WR_ENABLE    = 6,       /* enable writes */
683 	SF_RD_DATA_FAST = 0xb,     /* read flash */
684 	SF_ERASE_SECTOR = 0xd8,    /* erase sector */
685 
686 	FW_FLASH_BOOT_ADDR = 0x70000, /* start address of FW in flash */
687 	FW_VERS_ADDR = 0x77ffc,    /* flash address holding FW version */
688 	FW_MIN_SIZE = 8            /* at least version and csum */
689 };
690 
691 /**
692  *	sf1_read - read data from the serial flash
693  *	@adapter: the adapter
694  *	@byte_cnt: number of bytes to read
695  *	@cont: whether another operation will be chained
696  *	@valp: where to store the read data
697  *
698  *	Reads up to 4 bytes of data from the serial flash.  The location of
699  *	the read needs to be specified prior to calling this by issuing the
700  *	appropriate commands to the serial flash.
701  */
702 static int sf1_read(adapter_t *adapter, unsigned int byte_cnt, int cont,
703 		    u32 *valp)
704 {
705 	int ret;
706 
707 	if (!byte_cnt || byte_cnt > 4)
708 		return -EINVAL;
709 	if (t3_read_reg(adapter, A_SF_OP) & F_BUSY)
710 		return -EBUSY;
711 	t3_write_reg(adapter, A_SF_OP, V_CONT(cont) | V_BYTECNT(byte_cnt - 1));
712 	ret = t3_wait_op_done(adapter, A_SF_OP, F_BUSY, 0, SF_ATTEMPTS, 10);
713 	if (!ret)
714 		*valp = t3_read_reg(adapter, A_SF_DATA);
715 	return ret;
716 }
717 
718 /**
719  *	sf1_write - write data to the serial flash
720  *	@adapter: the adapter
721  *	@byte_cnt: number of bytes to write
722  *	@cont: whether another operation will be chained
723  *	@val: value to write
724  *
725  *	Writes up to 4 bytes of data to the serial flash.  The location of
726  *	the write needs to be specified prior to calling this by issuing the
727  *	appropriate commands to the serial flash.
728  */
729 static int sf1_write(adapter_t *adapter, unsigned int byte_cnt, int cont,
730 		     u32 val)
731 {
732 	if (!byte_cnt || byte_cnt > 4)
733 		return -EINVAL;
734 	if (t3_read_reg(adapter, A_SF_OP) & F_BUSY)
735 		return -EBUSY;
736 	t3_write_reg(adapter, A_SF_DATA, val);
737 	t3_write_reg(adapter, A_SF_OP,
738 		     V_CONT(cont) | V_BYTECNT(byte_cnt - 1) | V_OP(1));
739 	return t3_wait_op_done(adapter, A_SF_OP, F_BUSY, 0, SF_ATTEMPTS, 10);
740 }
741 
742 /**
743  *	flash_wait_op - wait for a flash operation to complete
744  *	@adapter: the adapter
745  *	@attempts: max number of polls of the status register
746  *	@delay: delay between polls in ms
747  *
748  *	Wait for a flash operation to complete by polling the status register.
749  */
750 static int flash_wait_op(adapter_t *adapter, int attempts, int delay)
751 {
752 	int ret;
753 	u32 status;
754 
755 	while (1) {
756 		if ((ret = sf1_write(adapter, 1, 1, SF_RD_STATUS)) != 0 ||
757 		    (ret = sf1_read(adapter, 1, 0, &status)) != 0)
758 			return ret;
759 		if (!(status & 1))
760 			return 0;
761 		if (--attempts == 0)
762 			return -EAGAIN;
763 		if (delay)
764 			t3_os_sleep(delay);
765 	}
766 }
767 
768 /**
769  *	t3_read_flash - read words from serial flash
770  *	@adapter: the adapter
771  *	@addr: the start address for the read
772  *	@nwords: how many 32-bit words to read
773  *	@data: where to store the read data
774  *	@byte_oriented: whether to store data as bytes or as words
775  *
776  *	Read the specified number of 32-bit words from the serial flash.
777  *	If @byte_oriented is set the read data is stored as a byte array
778  *	(i.e., big-endian), otherwise as 32-bit words in the platform's
779  *	natural endianess.
780  */
781 int t3_read_flash(adapter_t *adapter, unsigned int addr, unsigned int nwords,
782 		  u32 *data, int byte_oriented)
783 {
784 	int ret;
785 
786 	if (addr + nwords * sizeof(u32) > SF_SIZE || (addr & 3))
787 		return -EINVAL;
788 
789 	addr = swab32(addr) | SF_RD_DATA_FAST;
790 
791 	if ((ret = sf1_write(adapter, 4, 1, addr)) != 0 ||
792 	    (ret = sf1_read(adapter, 1, 1, data)) != 0)
793 		return ret;
794 
795 	for ( ; nwords; nwords--, data++) {
796 		ret = sf1_read(adapter, 4, nwords > 1, data);
797 		if (ret)
798 			return ret;
799 		if (byte_oriented)
800 			*data = htonl(*data);
801 	}
802 	return 0;
803 }
804 
805 /**
806  *	t3_write_flash - write up to a page of data to the serial flash
807  *	@adapter: the adapter
808  *	@addr: the start address to write
809  *	@n: length of data to write
810  *	@data: the data to write
811  *
812  *	Writes up to a page of data (256 bytes) to the serial flash starting
813  *	at the given address.
814  */
815 static int t3_write_flash(adapter_t *adapter, unsigned int addr,
816 			  unsigned int n, const u8 *data)
817 {
818 	int ret;
819 	u32 buf[64];
820 	unsigned int i, c, left, val, offset = addr & 0xff;
821 
822 	if (addr + n > SF_SIZE || offset + n > 256)
823 		return -EINVAL;
824 
825 	val = swab32(addr) | SF_PROG_PAGE;
826 
827 	if ((ret = sf1_write(adapter, 1, 0, SF_WR_ENABLE)) != 0 ||
828 	    (ret = sf1_write(adapter, 4, 1, val)) != 0)
829 		return ret;
830 
831 	for (left = n; left; left -= c) {
832 		c = min(left, 4U);
833 		for (val = 0, i = 0; i < c; ++i)
834 			val = (val << 8) + *data++;
835 
836 		ret = sf1_write(adapter, c, c != left, val);
837 		if (ret)
838 			return ret;
839 	}
840 	if ((ret = flash_wait_op(adapter, 5, 1)) != 0)
841 		return ret;
842 
843 	/* Read the page to verify the write succeeded */
844 	ret = t3_read_flash(adapter, addr & ~0xff, ARRAY_SIZE(buf), buf, 1);
845 	if (ret)
846 		return ret;
847 
848 	if (memcmp(data - n, (u8 *)buf + offset, n))
849 		return -EIO;
850 	return 0;
851 }
852 
853 enum fw_version_type {
854 	FW_VERSION_N3,
855 	FW_VERSION_T3
856 };
857 
858 /**
859  *	t3_get_fw_version - read the firmware version
860  *	@adapter: the adapter
861  *	@vers: where to place the version
862  *
863  *	Reads the FW version from flash.
864  */
865 int t3_get_fw_version(adapter_t *adapter, u32 *vers)
866 {
867 	return t3_read_flash(adapter, FW_VERS_ADDR, 1, vers, 0);
868 }
869 
870 /**
871  *	t3_check_fw_version - check if the FW is compatible with this driver
872  *	@adapter: the adapter
873  *
874  *	Checks if an adapter's FW is compatible with the driver.  Returns 0
875  *	if the versions are compatible, a negative error otherwise.
876  */
877 int t3_check_fw_version(adapter_t *adapter)
878 {
879 	int ret;
880 	u32 vers;
881 	unsigned int type, major, minor;
882 
883 	ret = t3_get_fw_version(adapter, &vers);
884 	if (ret)
885 		return ret;
886 
887 	type = G_FW_VERSION_TYPE(vers);
888 	major = G_FW_VERSION_MAJOR(vers);
889 	minor = G_FW_VERSION_MINOR(vers);
890 
891 	if (type == FW_VERSION_T3 && major == FW_VERSION_MAJOR &&
892 	    minor == FW_VERSION_MINOR)
893 		return 0;
894 
895 	CH_ERR(adapter, "found wrong FW version(%u.%u), "
896 	    "driver needs version %d.%d\n", major, minor,
897 	    FW_VERSION_MAJOR, FW_VERSION_MINOR);
898 	return -EINVAL;
899 }
900 
901 /**
902  *	t3_flash_erase_sectors - erase a range of flash sectors
903  *	@adapter: the adapter
904  *	@start: the first sector to erase
905  *	@end: the last sector to erase
906  *
907  *	Erases the sectors in the given range.
908  */
909 static int t3_flash_erase_sectors(adapter_t *adapter, int start, int end)
910 {
911 	while (start <= end) {
912 		int ret;
913 
914 		if ((ret = sf1_write(adapter, 1, 0, SF_WR_ENABLE)) != 0 ||
915 		    (ret = sf1_write(adapter, 4, 0,
916 				     SF_ERASE_SECTOR | (start << 8))) != 0 ||
917 		    (ret = flash_wait_op(adapter, 5, 500)) != 0)
918 			return ret;
919 		start++;
920 	}
921 	return 0;
922 }
923 
924 /*
925  *	t3_load_fw - download firmware
926  *	@adapter: the adapter
927  *	@fw_data: the firrware image to write
928  *	@size: image size
929  *
930  *	Write the supplied firmware image to the card's serial flash.
931  *	The FW image has the following sections: @size - 8 bytes of code and
932  *	data, followed by 4 bytes of FW version, followed by the 32-bit
933  *	1's complement checksum of the whole image.
934  */
935 int t3_load_fw(adapter_t *adapter, const u8 *fw_data, unsigned int size)
936 {
937 	u32 csum;
938 	unsigned int i;
939 	const u32 *p = (const u32 *)fw_data;
940 	int ret, addr, fw_sector = FW_FLASH_BOOT_ADDR >> 16;
941 
942 	if ((size & 3) || (size < FW_MIN_SIZE))
943 		return -EINVAL;
944 	if (size > FW_VERS_ADDR + 8 - FW_FLASH_BOOT_ADDR)
945 		return -EFBIG;
946 
947 	for (csum = 0, i = 0; i < size / sizeof(csum); i++)
948 		csum += ntohl(p[i]);
949 	if (csum != 0xffffffff) {
950 		CH_ERR(adapter, "corrupted firmware image, checksum %u\n",
951 		       csum);
952 		return -EINVAL;
953 	}
954 
955 	ret = t3_flash_erase_sectors(adapter, fw_sector, fw_sector);
956 	if (ret)
957 		goto out;
958 
959 	size -= 8;  /* trim off version and checksum */
960 	for (addr = FW_FLASH_BOOT_ADDR; size; ) {
961 		unsigned int chunk_size = min(size, 256U);
962 
963 		ret = t3_write_flash(adapter, addr, chunk_size, fw_data);
964 		if (ret)
965 			goto out;
966 
967 		addr += chunk_size;
968 		fw_data += chunk_size;
969 		size -= chunk_size;
970 	}
971 
972 	ret = t3_write_flash(adapter, FW_VERS_ADDR, 4, fw_data);
973 out:
974 	if (ret)
975 		CH_ERR(adapter, "firmware download failed, error %d\n", ret);
976 	return ret;
977 }
978 
979 #define CIM_CTL_BASE 0x2000
980 
981 /**
982  *	t3_cim_ctl_blk_read - read a block from CIM control region
983  *
984  *	@adap: the adapter
985  *	@addr: the start address within the CIM control region
986  *	@n: number of words to read
987  *	@valp: where to store the result
988  *
989  *	Reads a block of 4-byte words from the CIM control region.
990  */
991 int t3_cim_ctl_blk_read(adapter_t *adap, unsigned int addr, unsigned int n,
992 			unsigned int *valp)
993 {
994 	int ret = 0;
995 
996 	if (t3_read_reg(adap, A_CIM_HOST_ACC_CTRL) & F_HOSTBUSY)
997 		return -EBUSY;
998 
999 	for ( ; !ret && n--; addr += 4) {
1000 		t3_write_reg(adap, A_CIM_HOST_ACC_CTRL, CIM_CTL_BASE + addr);
1001 		ret = t3_wait_op_done(adap, A_CIM_HOST_ACC_CTRL, F_HOSTBUSY,
1002 				      0, 5, 2);
1003 		if (!ret)
1004 			*valp++ = t3_read_reg(adap, A_CIM_HOST_ACC_DATA);
1005 	}
1006 	return ret;
1007 }
1008 
1009 /**
1010  *	t3_link_changed - handle interface link changes
1011  *	@adapter: the adapter
1012  *	@port_id: the port index that changed link state
1013  *
1014  *	Called when a port's link settings change to propagate the new values
1015  *	to the associated PHY and MAC.  After performing the common tasks it
1016  *	invokes an OS-specific handler.
1017  */
1018 void t3_link_changed(adapter_t *adapter, int port_id)
1019 {
1020 	int link_ok, speed, duplex, fc;
1021 	struct cphy *phy = &adapter->port[port_id].phy;
1022 	struct cmac *mac = &adapter->port[port_id].mac;
1023 	struct link_config *lc = &adapter->port[port_id].link_config;
1024 
1025 	phy->ops->get_link_status(phy, &link_ok, &speed, &duplex, &fc);
1026 
1027 	if (link_ok != lc->link_ok && adapter->params.rev > 0 &&
1028 	    uses_xaui(adapter)) {
1029 		if (link_ok)
1030 			t3b_pcs_reset(mac);
1031 		t3_write_reg(adapter, A_XGM_XAUI_ACT_CTRL + mac->offset,
1032 			     link_ok ? F_TXACTENABLE | F_RXEN : 0);
1033 	}
1034 	lc->link_ok = (unsigned char)link_ok;
1035 	lc->speed = speed < 0 ? SPEED_INVALID : speed;
1036 	lc->duplex = duplex < 0 ? DUPLEX_INVALID : duplex;
1037 	if (lc->requested_fc & PAUSE_AUTONEG)
1038 		fc &= lc->requested_fc;
1039 	else
1040 		fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
1041 
1042 	if (link_ok && speed >= 0 && lc->autoneg == AUTONEG_ENABLE) {
1043 		/* Set MAC speed, duplex, and flow control to match PHY. */
1044 		t3_mac_set_speed_duplex_fc(mac, speed, duplex, fc);
1045 		lc->fc = (unsigned char)fc;
1046 	}
1047 
1048 	t3_os_link_changed(adapter, port_id, link_ok, speed, duplex, fc);
1049 }
1050 
1051 /**
1052  *	t3_link_start - apply link configuration to MAC/PHY
1053  *	@phy: the PHY to setup
1054  *	@mac: the MAC to setup
1055  *	@lc: the requested link configuration
1056  *
1057  *	Set up a port's MAC and PHY according to a desired link configuration.
1058  *	- If the PHY can auto-negotiate first decide what to advertise, then
1059  *	  enable/disable auto-negotiation as desired, and reset.
1060  *	- If the PHY does not auto-negotiate just reset it.
1061  *	- If auto-negotiation is off set the MAC to the proper speed/duplex/FC,
1062  *	  otherwise do it later based on the outcome of auto-negotiation.
1063  */
1064 int t3_link_start(struct cphy *phy, struct cmac *mac, struct link_config *lc)
1065 {
1066 	unsigned int fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
1067 
1068 	lc->link_ok = 0;
1069 	if (lc->supported & SUPPORTED_Autoneg) {
1070 		lc->advertising &= ~(ADVERTISED_Asym_Pause | ADVERTISED_Pause);
1071 		if (fc) {
1072 			lc->advertising |= ADVERTISED_Asym_Pause;
1073 			if (fc & PAUSE_RX)
1074 				lc->advertising |= ADVERTISED_Pause;
1075 		}
1076 		phy->ops->advertise(phy, lc->advertising);
1077 
1078 		if (lc->autoneg == AUTONEG_DISABLE) {
1079 			lc->speed = lc->requested_speed;
1080 			lc->duplex = lc->requested_duplex;
1081 			lc->fc = (unsigned char)fc;
1082 			t3_mac_set_speed_duplex_fc(mac, lc->speed, lc->duplex,
1083 						   fc);
1084 			/* Also disables autoneg */
1085 			phy->ops->set_speed_duplex(phy, lc->speed, lc->duplex);
1086 			phy->ops->reset(phy, 0);
1087 		} else
1088 			phy->ops->autoneg_enable(phy);
1089 	} else {
1090 		t3_mac_set_speed_duplex_fc(mac, -1, -1, fc);
1091 		lc->fc = (unsigned char)fc;
1092 		phy->ops->reset(phy, 0);
1093 	}
1094 	return 0;
1095 }
1096 
1097 /**
1098  *	t3_set_vlan_accel - control HW VLAN extraction
1099  *	@adapter: the adapter
1100  *	@ports: bitmap of adapter ports to operate on
1101  *	@on: enable (1) or disable (0) HW VLAN extraction
1102  *
1103  *	Enables or disables HW extraction of VLAN tags for the given port.
1104  */
1105 void t3_set_vlan_accel(adapter_t *adapter, unsigned int ports, int on)
1106 {
1107 	t3_set_reg_field(adapter, A_TP_OUT_CONFIG,
1108 			 ports << S_VLANEXTRACTIONENABLE,
1109 			 on ? (ports << S_VLANEXTRACTIONENABLE) : 0);
1110 }
1111 
1112 struct intr_info {
1113 	unsigned int mask;       /* bits to check in interrupt status */
1114 	const char *msg;         /* message to print or NULL */
1115 	short stat_idx;          /* stat counter to increment or -1 */
1116 	unsigned short fatal:1;  /* whether the condition reported is fatal */
1117 };
1118 
1119 /**
1120  *	t3_handle_intr_status - table driven interrupt handler
1121  *	@adapter: the adapter that generated the interrupt
1122  *	@reg: the interrupt status register to process
1123  *	@mask: a mask to apply to the interrupt status
1124  *	@acts: table of interrupt actions
1125  *	@stats: statistics counters tracking interrupt occurences
1126  *
1127  *	A table driven interrupt handler that applies a set of masks to an
1128  *	interrupt status word and performs the corresponding actions if the
1129  *	interrupts described by the mask have occured.  The actions include
1130  *	optionally printing a warning or alert message, and optionally
1131  *	incrementing a stat counter.  The table is terminated by an entry
1132  *	specifying mask 0.  Returns the number of fatal interrupt conditions.
1133  */
1134 static int t3_handle_intr_status(adapter_t *adapter, unsigned int reg,
1135 				 unsigned int mask,
1136 				 const struct intr_info *acts,
1137 				 unsigned long *stats)
1138 {
1139 	int fatal = 0;
1140 	unsigned int status = t3_read_reg(adapter, reg) & mask;
1141 
1142 	for ( ; acts->mask; ++acts) {
1143 		if (!(status & acts->mask)) continue;
1144 		if (acts->fatal) {
1145 			fatal++;
1146 			CH_ALERT(adapter, "%s (0x%x)\n",
1147 				 acts->msg, status & acts->mask);
1148 		} else if (acts->msg)
1149 			CH_WARN(adapter, "%s (0x%x)\n",
1150 				acts->msg, status & acts->mask);
1151 		if (acts->stat_idx >= 0)
1152 			stats[acts->stat_idx]++;
1153 	}
1154 	if (status)                           /* clear processed interrupts */
1155 		t3_write_reg(adapter, reg, status);
1156 	return fatal;
1157 }
1158 
1159 #define SGE_INTR_MASK (F_RSPQDISABLED)
1160 #define MC5_INTR_MASK (F_PARITYERR | F_ACTRGNFULL | F_UNKNOWNCMD | \
1161 		       F_REQQPARERR | F_DISPQPARERR | F_DELACTEMPTY | \
1162 		       F_NFASRCHFAIL)
1163 #define MC7_INTR_MASK (F_AE | F_UE | F_CE | V_PE(M_PE))
1164 #define XGM_INTR_MASK (V_TXFIFO_PRTY_ERR(M_TXFIFO_PRTY_ERR) | \
1165 		       V_RXFIFO_PRTY_ERR(M_RXFIFO_PRTY_ERR) | \
1166 		       F_TXFIFO_UNDERRUN | F_RXFIFO_OVERFLOW)
1167 #define PCIX_INTR_MASK (F_MSTDETPARERR | F_SIGTARABT | F_RCVTARABT | \
1168 			F_RCVMSTABT | F_SIGSYSERR | F_DETPARERR | \
1169 			F_SPLCMPDIS | F_UNXSPLCMP | F_RCVSPLCMPERR | \
1170 			F_DETCORECCERR | F_DETUNCECCERR | F_PIOPARERR | \
1171 			V_WFPARERR(M_WFPARERR) | V_RFPARERR(M_RFPARERR) | \
1172 			V_CFPARERR(M_CFPARERR) /* | V_MSIXPARERR(M_MSIXPARERR) */)
1173 #define PCIE_INTR_MASK (F_UNXSPLCPLERRR | F_UNXSPLCPLERRC | F_PCIE_PIOPARERR |\
1174 			F_PCIE_WFPARERR | F_PCIE_RFPARERR | F_PCIE_CFPARERR | \
1175 			/* V_PCIE_MSIXPARERR(M_PCIE_MSIXPARERR) | */ \
1176 			V_BISTERR(M_BISTERR) | F_PEXERR)
1177 #define ULPRX_INTR_MASK F_PARERR
1178 #define ULPTX_INTR_MASK 0
1179 #define CPLSW_INTR_MASK (F_TP_FRAMING_ERROR | \
1180 			 F_SGE_FRAMING_ERROR | F_CIM_FRAMING_ERROR | \
1181 			 F_ZERO_SWITCH_ERROR)
1182 #define CIM_INTR_MASK (F_BLKWRPLINT | F_BLKRDPLINT | F_BLKWRCTLINT | \
1183 		       F_BLKRDCTLINT | F_BLKWRFLASHINT | F_BLKRDFLASHINT | \
1184 		       F_SGLWRFLASHINT | F_WRBLKFLASHINT | F_BLKWRBOOTINT | \
1185 	 	       F_FLASHRANGEINT | F_SDRAMRANGEINT | F_RSVDSPACEINT)
1186 #define PMTX_INTR_MASK (F_ZERO_C_CMD_ERROR | ICSPI_FRM_ERR | OESPI_FRM_ERR | \
1187 			V_ICSPI_PAR_ERROR(M_ICSPI_PAR_ERROR) | \
1188 			V_OESPI_PAR_ERROR(M_OESPI_PAR_ERROR))
1189 #define PMRX_INTR_MASK (F_ZERO_E_CMD_ERROR | IESPI_FRM_ERR | OCSPI_FRM_ERR | \
1190 			V_IESPI_PAR_ERROR(M_IESPI_PAR_ERROR) | \
1191 			V_OCSPI_PAR_ERROR(M_OCSPI_PAR_ERROR))
1192 #define MPS_INTR_MASK (V_TX0TPPARERRENB(M_TX0TPPARERRENB) | \
1193 		       V_TX1TPPARERRENB(M_TX1TPPARERRENB) | \
1194 		       V_RXTPPARERRENB(M_RXTPPARERRENB) | \
1195 		       V_MCAPARERRENB(M_MCAPARERRENB))
1196 #define PL_INTR_MASK (F_T3DBG | F_XGMAC0_0 | F_XGMAC0_1 | F_MC5A | F_PM1_TX | \
1197 		      F_PM1_RX | F_ULP2_TX | F_ULP2_RX | F_TP1 | F_CIM | \
1198 		      F_MC7_CM | F_MC7_PMTX | F_MC7_PMRX | F_SGE3 | F_PCIM0 | \
1199 		      F_MPS0 | F_CPL_SWITCH)
1200 
1201 /*
1202  * Interrupt handler for the PCIX1 module.
1203  */
1204 static void pci_intr_handler(adapter_t *adapter)
1205 {
1206 	static struct intr_info pcix1_intr_info[] = {
1207 		{ F_MSTDETPARERR, "PCI master detected parity error", -1, 1 },
1208 		{ F_SIGTARABT, "PCI signaled target abort", -1, 1 },
1209 		{ F_RCVTARABT, "PCI received target abort", -1, 1 },
1210 		{ F_RCVMSTABT, "PCI received master abort", -1, 1 },
1211 		{ F_SIGSYSERR, "PCI signaled system error", -1, 1 },
1212 		{ F_DETPARERR, "PCI detected parity error", -1, 1 },
1213 		{ F_SPLCMPDIS, "PCI split completion discarded", -1, 1 },
1214 		{ F_UNXSPLCMP, "PCI unexpected split completion error", -1, 1 },
1215 		{ F_RCVSPLCMPERR, "PCI received split completion error", -1,
1216 		  1 },
1217 		{ F_DETCORECCERR, "PCI correctable ECC error",
1218 		  STAT_PCI_CORR_ECC, 0 },
1219 		{ F_DETUNCECCERR, "PCI uncorrectable ECC error", -1, 1 },
1220 		{ F_PIOPARERR, "PCI PIO FIFO parity error", -1, 1 },
1221 		{ V_WFPARERR(M_WFPARERR), "PCI write FIFO parity error", -1,
1222 		  1 },
1223 		{ V_RFPARERR(M_RFPARERR), "PCI read FIFO parity error", -1,
1224 		  1 },
1225 		{ V_CFPARERR(M_CFPARERR), "PCI command FIFO parity error", -1,
1226 		  1 },
1227 		{ V_MSIXPARERR(M_MSIXPARERR), "PCI MSI-X table/PBA parity "
1228 		  "error", -1, 1 },
1229 		{ 0 }
1230 	};
1231 
1232 	if (t3_handle_intr_status(adapter, A_PCIX_INT_CAUSE, PCIX_INTR_MASK,
1233 				  pcix1_intr_info, adapter->irq_stats))
1234 		t3_fatal_err(adapter);
1235 }
1236 
1237 /*
1238  * Interrupt handler for the PCIE module.
1239  */
1240 static void pcie_intr_handler(adapter_t *adapter)
1241 {
1242 	static struct intr_info pcie_intr_info[] = {
1243 		{ F_PEXERR, "PCI PEX error", -1, 1 },
1244 		{ F_UNXSPLCPLERRR,
1245 		  "PCI unexpected split completion DMA read error", -1, 1 },
1246 		{ F_UNXSPLCPLERRC,
1247 		  "PCI unexpected split completion DMA command error", -1, 1 },
1248 		{ F_PCIE_PIOPARERR, "PCI PIO FIFO parity error", -1, 1 },
1249 		{ F_PCIE_WFPARERR, "PCI write FIFO parity error", -1, 1 },
1250 		{ F_PCIE_RFPARERR, "PCI read FIFO parity error", -1, 1 },
1251 		{ F_PCIE_CFPARERR, "PCI command FIFO parity error", -1, 1 },
1252 		{ V_PCIE_MSIXPARERR(M_PCIE_MSIXPARERR),
1253 		  "PCI MSI-X table/PBA parity error", -1, 1 },
1254 		{ V_BISTERR(M_BISTERR), "PCI BIST error", -1, 1 },
1255 		{ 0 }
1256 	};
1257 
1258 	if (t3_handle_intr_status(adapter, A_PCIE_INT_CAUSE, PCIE_INTR_MASK,
1259 				  pcie_intr_info, adapter->irq_stats))
1260 		t3_fatal_err(adapter);
1261 }
1262 
1263 /*
1264  * TP interrupt handler.
1265  */
1266 static void tp_intr_handler(adapter_t *adapter)
1267 {
1268 	static struct intr_info tp_intr_info[] = {
1269 		{ 0xffffff,  "TP parity error", -1, 1 },
1270 		{ 0x1000000, "TP out of Rx pages", -1, 1 },
1271 		{ 0x2000000, "TP out of Tx pages", -1, 1 },
1272 		{ 0 }
1273 	};
1274 
1275 	if (t3_handle_intr_status(adapter, A_TP_INT_CAUSE, 0xffffffff,
1276 				  tp_intr_info, NULL))
1277 		t3_fatal_err(adapter);
1278 }
1279 
1280 /*
1281  * CIM interrupt handler.
1282  */
1283 static void cim_intr_handler(adapter_t *adapter)
1284 {
1285 	static struct intr_info cim_intr_info[] = {
1286 		{ F_RSVDSPACEINT, "CIM reserved space write", -1, 1 },
1287 		{ F_SDRAMRANGEINT, "CIM SDRAM address out of range", -1, 1 },
1288 		{ F_FLASHRANGEINT, "CIM flash address out of range", -1, 1 },
1289 		{ F_BLKWRBOOTINT, "CIM block write to boot space", -1, 1 },
1290 		{ F_WRBLKFLASHINT, "CIM write to cached flash space", -1, 1 },
1291 		{ F_SGLWRFLASHINT, "CIM single write to flash space", -1, 1 },
1292 		{ F_BLKRDFLASHINT, "CIM block read from flash space", -1, 1 },
1293 		{ F_BLKWRFLASHINT, "CIM block write to flash space", -1, 1 },
1294 		{ F_BLKRDCTLINT, "CIM block read from CTL space", -1, 1 },
1295 		{ F_BLKWRCTLINT, "CIM block write to CTL space", -1, 1 },
1296 		{ F_BLKRDPLINT, "CIM block read from PL space", -1, 1 },
1297 		{ F_BLKWRPLINT, "CIM block write to PL space", -1, 1 },
1298 		{ 0 }
1299         };
1300 
1301 	if (t3_handle_intr_status(adapter, A_CIM_HOST_INT_CAUSE, 0xffffffff,
1302 				  cim_intr_info, NULL))
1303 		t3_fatal_err(adapter);
1304 }
1305 
1306 /*
1307  * ULP RX interrupt handler.
1308  */
1309 static void ulprx_intr_handler(adapter_t *adapter)
1310 {
1311 	static struct intr_info ulprx_intr_info[] = {
1312 		{ F_PARERR, "ULP RX parity error", -1, 1 },
1313 		{ 0 }
1314         };
1315 
1316 	if (t3_handle_intr_status(adapter, A_ULPRX_INT_CAUSE, 0xffffffff,
1317 				  ulprx_intr_info, NULL))
1318 		t3_fatal_err(adapter);
1319 }
1320 
1321 /*
1322  * ULP TX interrupt handler.
1323  */
1324 static void ulptx_intr_handler(adapter_t *adapter)
1325 {
1326 	static struct intr_info ulptx_intr_info[] = {
1327 		{ F_PBL_BOUND_ERR_CH0, "ULP TX channel 0 PBL out of bounds",
1328 		  STAT_ULP_CH0_PBL_OOB, 0 },
1329 		{ F_PBL_BOUND_ERR_CH1, "ULP TX channel 1 PBL out of bounds",
1330 		  STAT_ULP_CH1_PBL_OOB, 0 },
1331 		{ 0 }
1332         };
1333 
1334 	if (t3_handle_intr_status(adapter, A_ULPTX_INT_CAUSE, 0xffffffff,
1335 				  ulptx_intr_info, adapter->irq_stats))
1336 		t3_fatal_err(adapter);
1337 }
1338 
1339 #define ICSPI_FRM_ERR (F_ICSPI0_FIFO2X_RX_FRAMING_ERROR | \
1340 	F_ICSPI1_FIFO2X_RX_FRAMING_ERROR | F_ICSPI0_RX_FRAMING_ERROR | \
1341 	F_ICSPI1_RX_FRAMING_ERROR | F_ICSPI0_TX_FRAMING_ERROR | \
1342 	F_ICSPI1_TX_FRAMING_ERROR)
1343 #define OESPI_FRM_ERR (F_OESPI0_RX_FRAMING_ERROR | \
1344 	F_OESPI1_RX_FRAMING_ERROR | F_OESPI0_TX_FRAMING_ERROR | \
1345 	F_OESPI1_TX_FRAMING_ERROR | F_OESPI0_OFIFO2X_TX_FRAMING_ERROR | \
1346 	F_OESPI1_OFIFO2X_TX_FRAMING_ERROR)
1347 
1348 /*
1349  * PM TX interrupt handler.
1350  */
1351 static void pmtx_intr_handler(adapter_t *adapter)
1352 {
1353 	static struct intr_info pmtx_intr_info[] = {
1354 		{ F_ZERO_C_CMD_ERROR, "PMTX 0-length pcmd", -1, 1 },
1355 		{ ICSPI_FRM_ERR, "PMTX ispi framing error", -1, 1 },
1356 		{ OESPI_FRM_ERR, "PMTX ospi framing error", -1, 1 },
1357 		{ V_ICSPI_PAR_ERROR(M_ICSPI_PAR_ERROR),
1358 		  "PMTX ispi parity error", -1, 1 },
1359 		{ V_OESPI_PAR_ERROR(M_OESPI_PAR_ERROR),
1360 		  "PMTX ospi parity error", -1, 1 },
1361 		{ 0 }
1362         };
1363 
1364 	if (t3_handle_intr_status(adapter, A_PM1_TX_INT_CAUSE, 0xffffffff,
1365 				  pmtx_intr_info, NULL))
1366 		t3_fatal_err(adapter);
1367 }
1368 
1369 #define IESPI_FRM_ERR (F_IESPI0_FIFO2X_RX_FRAMING_ERROR | \
1370 	F_IESPI1_FIFO2X_RX_FRAMING_ERROR | F_IESPI0_RX_FRAMING_ERROR | \
1371 	F_IESPI1_RX_FRAMING_ERROR | F_IESPI0_TX_FRAMING_ERROR | \
1372 	F_IESPI1_TX_FRAMING_ERROR)
1373 #define OCSPI_FRM_ERR (F_OCSPI0_RX_FRAMING_ERROR | \
1374 	F_OCSPI1_RX_FRAMING_ERROR | F_OCSPI0_TX_FRAMING_ERROR | \
1375 	F_OCSPI1_TX_FRAMING_ERROR | F_OCSPI0_OFIFO2X_TX_FRAMING_ERROR | \
1376 	F_OCSPI1_OFIFO2X_TX_FRAMING_ERROR)
1377 
1378 /*
1379  * PM RX interrupt handler.
1380  */
1381 static void pmrx_intr_handler(adapter_t *adapter)
1382 {
1383 	static struct intr_info pmrx_intr_info[] = {
1384 		{ F_ZERO_E_CMD_ERROR, "PMRX 0-length pcmd", -1, 1 },
1385 		{ IESPI_FRM_ERR, "PMRX ispi framing error", -1, 1 },
1386 		{ OCSPI_FRM_ERR, "PMRX ospi framing error", -1, 1 },
1387 		{ V_IESPI_PAR_ERROR(M_IESPI_PAR_ERROR),
1388 		  "PMRX ispi parity error", -1, 1 },
1389 		{ V_OCSPI_PAR_ERROR(M_OCSPI_PAR_ERROR),
1390 		  "PMRX ospi parity error", -1, 1 },
1391 		{ 0 }
1392         };
1393 
1394 	if (t3_handle_intr_status(adapter, A_PM1_RX_INT_CAUSE, 0xffffffff,
1395 				  pmrx_intr_info, NULL))
1396 		t3_fatal_err(adapter);
1397 }
1398 
1399 /*
1400  * CPL switch interrupt handler.
1401  */
1402 static void cplsw_intr_handler(adapter_t *adapter)
1403 {
1404 	static struct intr_info cplsw_intr_info[] = {
1405 //		{ F_CIM_OVFL_ERROR, "CPL switch CIM overflow", -1, 1 },
1406 		{ F_TP_FRAMING_ERROR, "CPL switch TP framing error", -1, 1 },
1407 		{ F_SGE_FRAMING_ERROR, "CPL switch SGE framing error", -1, 1 },
1408 		{ F_CIM_FRAMING_ERROR, "CPL switch CIM framing error", -1, 1 },
1409 		{ F_ZERO_SWITCH_ERROR, "CPL switch no-switch error", -1, 1 },
1410 		{ 0 }
1411         };
1412 
1413 	if (t3_handle_intr_status(adapter, A_CPL_INTR_CAUSE, 0xffffffff,
1414 				  cplsw_intr_info, NULL))
1415 		t3_fatal_err(adapter);
1416 }
1417 
1418 /*
1419  * MPS interrupt handler.
1420  */
1421 static void mps_intr_handler(adapter_t *adapter)
1422 {
1423 	static struct intr_info mps_intr_info[] = {
1424 		{ 0x1ff, "MPS parity error", -1, 1 },
1425 		{ 0 }
1426 	};
1427 
1428 	if (t3_handle_intr_status(adapter, A_MPS_INT_CAUSE, 0xffffffff,
1429 				  mps_intr_info, NULL))
1430 		t3_fatal_err(adapter);
1431 }
1432 
1433 #define MC7_INTR_FATAL (F_UE | V_PE(M_PE) | F_AE)
1434 
1435 /*
1436  * MC7 interrupt handler.
1437  */
1438 static void mc7_intr_handler(struct mc7 *mc7)
1439 {
1440 	adapter_t *adapter = mc7->adapter;
1441 	u32 cause = t3_read_reg(adapter, mc7->offset + A_MC7_INT_CAUSE);
1442 
1443 	if (cause & F_CE) {
1444 		mc7->stats.corr_err++;
1445 		CH_WARN(adapter, "%s MC7 correctable error at addr 0x%x, "
1446 			"data 0x%x 0x%x 0x%x\n", mc7->name,
1447 			t3_read_reg(adapter, mc7->offset + A_MC7_CE_ADDR),
1448 			t3_read_reg(adapter, mc7->offset + A_MC7_CE_DATA0),
1449 			t3_read_reg(adapter, mc7->offset + A_MC7_CE_DATA1),
1450 			t3_read_reg(adapter, mc7->offset + A_MC7_CE_DATA2));
1451 	}
1452 
1453 	if (cause & F_UE) {
1454 		mc7->stats.uncorr_err++;
1455 		CH_ALERT(adapter, "%s MC7 uncorrectable error at addr 0x%x, "
1456 			 "data 0x%x 0x%x 0x%x\n", mc7->name,
1457 			 t3_read_reg(adapter, mc7->offset + A_MC7_UE_ADDR),
1458 			 t3_read_reg(adapter, mc7->offset + A_MC7_UE_DATA0),
1459 			 t3_read_reg(adapter, mc7->offset + A_MC7_UE_DATA1),
1460 			 t3_read_reg(adapter, mc7->offset + A_MC7_UE_DATA2));
1461 	}
1462 
1463 	if (G_PE(cause)) {
1464 		mc7->stats.parity_err++;
1465 		CH_ALERT(adapter, "%s MC7 parity error 0x%x\n",
1466 			 mc7->name, G_PE(cause));
1467 	}
1468 
1469 	if (cause & F_AE) {
1470 		u32 addr = 0;
1471 
1472 		if (adapter->params.rev > 0)
1473 			addr = t3_read_reg(adapter,
1474 					   mc7->offset + A_MC7_ERR_ADDR);
1475 		mc7->stats.addr_err++;
1476 		CH_ALERT(adapter, "%s MC7 address error: 0x%x\n",
1477 			 mc7->name, addr);
1478 	}
1479 
1480 	if (cause & MC7_INTR_FATAL)
1481 		t3_fatal_err(adapter);
1482 
1483 	t3_write_reg(adapter, mc7->offset + A_MC7_INT_CAUSE, cause);
1484 }
1485 
1486 #define XGM_INTR_FATAL (V_TXFIFO_PRTY_ERR(M_TXFIFO_PRTY_ERR) | \
1487 			V_RXFIFO_PRTY_ERR(M_RXFIFO_PRTY_ERR))
1488 /*
1489  * XGMAC interrupt handler.
1490  */
1491 static int mac_intr_handler(adapter_t *adap, unsigned int idx)
1492 {
1493 	struct cmac *mac = &adap->port[idx].mac;
1494 	u32 cause = t3_read_reg(adap, A_XGM_INT_CAUSE + mac->offset);
1495 
1496 	if (cause & V_TXFIFO_PRTY_ERR(M_TXFIFO_PRTY_ERR)) {
1497 		mac->stats.tx_fifo_parity_err++;
1498 		CH_ALERT(adap, "port%d: MAC TX FIFO parity error\n", idx);
1499 	}
1500 	if (cause & V_RXFIFO_PRTY_ERR(M_RXFIFO_PRTY_ERR)) {
1501 		mac->stats.rx_fifo_parity_err++;
1502 		CH_ALERT(adap, "port%d: MAC RX FIFO parity error\n", idx);
1503 	}
1504 	if (cause & F_TXFIFO_UNDERRUN)
1505 		mac->stats.tx_fifo_urun++;
1506 	if (cause & F_RXFIFO_OVERFLOW)
1507 		mac->stats.rx_fifo_ovfl++;
1508 	if (cause & V_SERDES_LOS(M_SERDES_LOS))
1509 		mac->stats.serdes_signal_loss++;
1510 	if (cause & F_XAUIPCSCTCERR)
1511 		mac->stats.xaui_pcs_ctc_err++;
1512 	if (cause & F_XAUIPCSALIGNCHANGE)
1513 		mac->stats.xaui_pcs_align_change++;
1514 
1515 	t3_write_reg(adap, A_XGM_INT_CAUSE + mac->offset, cause);
1516 	if (cause & XGM_INTR_FATAL)
1517 		t3_fatal_err(adap);
1518 	return cause != 0;
1519 }
1520 
1521 /*
1522  * Interrupt handler for PHY events.
1523  */
1524 int t3_phy_intr_handler(adapter_t *adapter)
1525 {
1526 	u32 mask, gpi = adapter_info(adapter)->gpio_intr;
1527 	u32 i, cause = t3_read_reg(adapter, A_T3DBG_INT_CAUSE);
1528 
1529 	for_each_port(adapter, i) {
1530 		struct port_info *p = &adapter->port[i];
1531 
1532 		mask = gpi - (gpi & (gpi - 1));
1533 		gpi -= mask;
1534 
1535 		if (!(p->port_type->caps & SUPPORTED_IRQ))
1536 			continue;
1537 
1538 		if (cause & mask) {
1539 			int phy_cause = p->phy.ops->intr_handler(&p->phy);
1540 
1541 			if (phy_cause & cphy_cause_link_change)
1542 				t3_link_changed(adapter, i);
1543 			if (phy_cause & cphy_cause_fifo_error)
1544 				p->phy.fifo_errors++;
1545 		}
1546 	}
1547 
1548 	t3_write_reg(adapter, A_T3DBG_INT_CAUSE, cause);
1549 	return 0;
1550 }
1551 
1552 /*
1553  * T3 slow path (non-data) interrupt handler.
1554  */
1555 int t3_slow_intr_handler(adapter_t *adapter)
1556 {
1557 	u32 cause = t3_read_reg(adapter, A_PL_INT_CAUSE0);
1558 
1559 	cause &= adapter->slow_intr_mask;
1560 	if (!cause)
1561 		return 0;
1562 
1563 	if (cause & F_PCIM0) {
1564 		if (is_pcie(adapter))
1565 			pcie_intr_handler(adapter);
1566 		else
1567 			pci_intr_handler(adapter);
1568 	}
1569 	if (cause & F_SGE3)
1570 		t3_sge_err_intr_handler(adapter);
1571 	if (cause & F_MC7_PMRX)
1572 		mc7_intr_handler(&adapter->pmrx);
1573 	if (cause & F_MC7_PMTX)
1574 		mc7_intr_handler(&adapter->pmtx);
1575 	if (cause & F_MC7_CM)
1576 		mc7_intr_handler(&adapter->cm);
1577 	if (cause & F_CIM)
1578 		cim_intr_handler(adapter);
1579 	if (cause & F_TP1)
1580 		tp_intr_handler(adapter);
1581 	if (cause & F_ULP2_RX)
1582 		ulprx_intr_handler(adapter);
1583 	if (cause & F_ULP2_TX)
1584 		ulptx_intr_handler(adapter);
1585 	if (cause & F_PM1_RX)
1586 		pmrx_intr_handler(adapter);
1587 	if (cause & F_PM1_TX)
1588 		pmtx_intr_handler(adapter);
1589 	if (cause & F_CPL_SWITCH)
1590 		cplsw_intr_handler(adapter);
1591 	if (cause & F_MPS0)
1592 		mps_intr_handler(adapter);
1593 	if (cause & F_MC5A)
1594 		t3_mc5_intr_handler(&adapter->mc5);
1595 	if (cause & F_XGMAC0_0)
1596 		mac_intr_handler(adapter, 0);
1597 	if (cause & F_XGMAC0_1)
1598 		mac_intr_handler(adapter, 1);
1599 	if (cause & F_T3DBG)
1600 		t3_os_ext_intr_handler(adapter);
1601 
1602 	/* Clear the interrupts just processed. */
1603 	t3_write_reg(adapter, A_PL_INT_CAUSE0, cause);
1604 	(void) t3_read_reg(adapter, A_PL_INT_CAUSE0); /* flush */
1605 	return 1;
1606 }
1607 
1608 /**
1609  *	t3_intr_enable - enable interrupts
1610  *	@adapter: the adapter whose interrupts should be enabled
1611  *
1612  *	Enable interrupts by setting the interrupt enable registers of the
1613  *	various HW modules and then enabling the top-level interrupt
1614  *	concentrator.
1615  */
1616 void t3_intr_enable(adapter_t *adapter)
1617 {
1618 	static struct addr_val_pair intr_en_avp[] = {
1619 		{ A_SG_INT_ENABLE, SGE_INTR_MASK },
1620 		{ A_MC7_INT_ENABLE, MC7_INTR_MASK },
1621 		{ A_MC7_INT_ENABLE - MC7_PMRX_BASE_ADDR + MC7_PMTX_BASE_ADDR,
1622 			MC7_INTR_MASK },
1623 		{ A_MC7_INT_ENABLE - MC7_PMRX_BASE_ADDR + MC7_CM_BASE_ADDR,
1624 			MC7_INTR_MASK },
1625 		{ A_MC5_DB_INT_ENABLE, MC5_INTR_MASK },
1626 		{ A_ULPRX_INT_ENABLE, ULPRX_INTR_MASK },
1627 		{ A_TP_INT_ENABLE, 0x3bfffff },
1628 		{ A_PM1_TX_INT_ENABLE, PMTX_INTR_MASK },
1629 		{ A_PM1_RX_INT_ENABLE, PMRX_INTR_MASK },
1630 		{ A_CIM_HOST_INT_ENABLE, CIM_INTR_MASK },
1631 		{ A_MPS_INT_ENABLE, MPS_INTR_MASK },
1632 	};
1633 
1634 	adapter->slow_intr_mask = PL_INTR_MASK;
1635 
1636 	t3_write_regs(adapter, intr_en_avp, ARRAY_SIZE(intr_en_avp), 0);
1637 
1638 	if (adapter->params.rev > 0) {
1639 		t3_write_reg(adapter, A_CPL_INTR_ENABLE,
1640 			     CPLSW_INTR_MASK | F_CIM_OVFL_ERROR);
1641 		t3_write_reg(adapter, A_ULPTX_INT_ENABLE,
1642 			     ULPTX_INTR_MASK | F_PBL_BOUND_ERR_CH0 |
1643 			     F_PBL_BOUND_ERR_CH1);
1644 	} else {
1645 		t3_write_reg(adapter, A_CPL_INTR_ENABLE, CPLSW_INTR_MASK);
1646 		t3_write_reg(adapter, A_ULPTX_INT_ENABLE, ULPTX_INTR_MASK);
1647 	}
1648 
1649 	t3_write_reg(adapter, A_T3DBG_GPIO_ACT_LOW,
1650 		     adapter_info(adapter)->gpio_intr);
1651 	t3_write_reg(adapter, A_T3DBG_INT_ENABLE,
1652 		     adapter_info(adapter)->gpio_intr);
1653 	if (is_pcie(adapter)) {
1654 		t3_write_reg(adapter, A_PCIE_INT_ENABLE, PCIE_INTR_MASK);
1655 	} else {
1656 		t3_write_reg(adapter, A_PCIX_INT_ENABLE, PCIX_INTR_MASK);
1657 	}
1658 	t3_write_reg(adapter, A_PL_INT_ENABLE0, adapter->slow_intr_mask);
1659 	(void) t3_read_reg(adapter, A_PL_INT_ENABLE0);          /* flush */
1660 }
1661 
1662 /**
1663  *	t3_intr_disable - disable a card's interrupts
1664  *	@adapter: the adapter whose interrupts should be disabled
1665  *
1666  *	Disable interrupts.  We only disable the top-level interrupt
1667  *	concentrator and the SGE data interrupts.
1668  */
1669 void t3_intr_disable(adapter_t *adapter)
1670 {
1671 	t3_write_reg(adapter, A_PL_INT_ENABLE0, 0);
1672 	(void) t3_read_reg(adapter, A_PL_INT_ENABLE0);  /* flush */
1673 	adapter->slow_intr_mask = 0;
1674 }
1675 
1676 /**
1677  *	t3_intr_clear - clear all interrupts
1678  *	@adapter: the adapter whose interrupts should be cleared
1679  *
1680  *	Clears all interrupts.
1681  */
1682 void t3_intr_clear(adapter_t *adapter)
1683 {
1684 	static const unsigned int cause_reg_addr[] = {
1685 		A_SG_INT_CAUSE,
1686 		A_SG_RSPQ_FL_STATUS,
1687 		A_PCIX_INT_CAUSE,
1688 		A_MC7_INT_CAUSE,
1689 		A_MC7_INT_CAUSE - MC7_PMRX_BASE_ADDR + MC7_PMTX_BASE_ADDR,
1690 		A_MC7_INT_CAUSE - MC7_PMRX_BASE_ADDR + MC7_CM_BASE_ADDR,
1691 		A_CIM_HOST_INT_CAUSE,
1692 		A_TP_INT_CAUSE,
1693 		A_MC5_DB_INT_CAUSE,
1694 		A_ULPRX_INT_CAUSE,
1695 		A_ULPTX_INT_CAUSE,
1696 		A_CPL_INTR_CAUSE,
1697 		A_PM1_TX_INT_CAUSE,
1698 		A_PM1_RX_INT_CAUSE,
1699 		A_MPS_INT_CAUSE,
1700 		A_T3DBG_INT_CAUSE,
1701 	};
1702 	unsigned int i;
1703 
1704 	/* Clear PHY and MAC interrupts for each port. */
1705 	for_each_port(adapter, i)
1706 		t3_port_intr_clear(adapter, i);
1707 
1708 	for (i = 0; i < ARRAY_SIZE(cause_reg_addr); ++i)
1709 		t3_write_reg(adapter, cause_reg_addr[i], 0xffffffff);
1710 
1711 	t3_write_reg(adapter, A_PL_INT_CAUSE0, 0xffffffff);
1712 	(void) t3_read_reg(adapter, A_PL_INT_CAUSE0);          /* flush */
1713 }
1714 
1715 /**
1716  *	t3_port_intr_enable - enable port-specific interrupts
1717  *	@adapter: associated adapter
1718  *	@idx: index of port whose interrupts should be enabled
1719  *
1720  *	Enable port-specific (i.e., MAC and PHY) interrupts for the given
1721  *	adapter port.
1722  */
1723 void t3_port_intr_enable(adapter_t *adapter, int idx)
1724 {
1725 	t3_write_reg(adapter, XGM_REG(A_XGM_INT_ENABLE, idx), XGM_INTR_MASK);
1726 	adapter->port[idx].phy.ops->intr_enable(&adapter->port[idx].phy);
1727 }
1728 
1729 /**
1730  *	t3_port_intr_disable - disable port-specific interrupts
1731  *	@adapter: associated adapter
1732  *	@idx: index of port whose interrupts should be disabled
1733  *
1734  *	Disable port-specific (i.e., MAC and PHY) interrupts for the given
1735  *	adapter port.
1736  */
1737 void t3_port_intr_disable(adapter_t *adapter, int idx)
1738 {
1739 	t3_write_reg(adapter, XGM_REG(A_XGM_INT_ENABLE, idx), 0);
1740 	adapter->port[idx].phy.ops->intr_disable(&adapter->port[idx].phy);
1741 }
1742 
1743 /**
1744  *	t3_port_intr_clear - clear port-specific interrupts
1745  *	@adapter: associated adapter
1746  *	@idx: index of port whose interrupts to clear
1747  *
1748  *	Clear port-specific (i.e., MAC and PHY) interrupts for the given
1749  *	adapter port.
1750  */
1751 void t3_port_intr_clear(adapter_t *adapter, int idx)
1752 {
1753 	t3_write_reg(adapter, XGM_REG(A_XGM_INT_CAUSE, idx), 0xffffffff);
1754 	adapter->port[idx].phy.ops->intr_clear(&adapter->port[idx].phy);
1755 }
1756 
1757 
1758 /**
1759  * 	t3_sge_write_context - write an SGE context
1760  * 	@adapter: the adapter
1761  * 	@id: the context id
1762  * 	@type: the context type
1763  *
1764  * 	Program an SGE context with the values already loaded in the
1765  * 	CONTEXT_DATA? registers.
1766  */
1767 static int t3_sge_write_context(adapter_t *adapter, unsigned int id,
1768 				unsigned int type)
1769 {
1770 	t3_write_reg(adapter, A_SG_CONTEXT_MASK0, 0xffffffff);
1771 	t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0xffffffff);
1772 	t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0xffffffff);
1773 	t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0xffffffff);
1774 	t3_write_reg(adapter, A_SG_CONTEXT_CMD,
1775 		     V_CONTEXT_CMD_OPCODE(1) | type | V_CONTEXT(id));
1776 	return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
1777 			       0, 5, 1);
1778 }
1779 
1780 /**
1781  *	t3_sge_init_ecntxt - initialize an SGE egress context
1782  *	@adapter: the adapter to configure
1783  *	@id: the context id
1784  *	@gts_enable: whether to enable GTS for the context
1785  *	@type: the egress context type
1786  *	@respq: associated response queue
1787  *	@base_addr: base address of queue
1788  *	@size: number of queue entries
1789  *	@token: uP token
1790  *	@gen: initial generation value for the context
1791  *	@cidx: consumer pointer
1792  *
1793  *	Initialize an SGE egress context and make it ready for use.  If the
1794  *	platform allows concurrent context operations, the caller is
1795  *	responsible for appropriate locking.
1796  */
1797 int t3_sge_init_ecntxt(adapter_t *adapter, unsigned int id, int gts_enable,
1798 		       enum sge_context_type type, int respq, u64 base_addr,
1799 		       unsigned int size, unsigned int token, int gen,
1800 		       unsigned int cidx)
1801 {
1802 	unsigned int credits = type == SGE_CNTXT_OFLD ? 0 : FW_WR_NUM;
1803 
1804 	if (base_addr & 0xfff)     /* must be 4K aligned */
1805 		return -EINVAL;
1806 	if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
1807 		return -EBUSY;
1808 
1809 	base_addr >>= 12;
1810 	t3_write_reg(adapter, A_SG_CONTEXT_DATA0, V_EC_INDEX(cidx) |
1811 		     V_EC_CREDITS(credits) | V_EC_GTS(gts_enable));
1812 	t3_write_reg(adapter, A_SG_CONTEXT_DATA1, V_EC_SIZE(size) |
1813 		     V_EC_BASE_LO((u32)base_addr & 0xffff));
1814 	base_addr >>= 16;
1815 	t3_write_reg(adapter, A_SG_CONTEXT_DATA2, (u32)base_addr);
1816 	base_addr >>= 32;
1817 	t3_write_reg(adapter, A_SG_CONTEXT_DATA3,
1818 		     V_EC_BASE_HI((u32)base_addr & 0xf) | V_EC_RESPQ(respq) |
1819 		     V_EC_TYPE(type) | V_EC_GEN(gen) | V_EC_UP_TOKEN(token) |
1820 		     F_EC_VALID);
1821 	return t3_sge_write_context(adapter, id, F_EGRESS);
1822 }
1823 
1824 /**
1825  *	t3_sge_init_flcntxt - initialize an SGE free-buffer list context
1826  *	@adapter: the adapter to configure
1827  *	@id: the context id
1828  *	@gts_enable: whether to enable GTS for the context
1829  *	@base_addr: base address of queue
1830  *	@size: number of queue entries
1831  *	@bsize: size of each buffer for this queue
1832  *	@cong_thres: threshold to signal congestion to upstream producers
1833  *	@gen: initial generation value for the context
1834  *	@cidx: consumer pointer
1835  *
1836  *	Initialize an SGE free list context and make it ready for use.  The
1837  *	caller is responsible for ensuring only one context operation occurs
1838  *	at a time.
1839  */
1840 int t3_sge_init_flcntxt(adapter_t *adapter, unsigned int id, int gts_enable,
1841 			u64 base_addr, unsigned int size, unsigned int bsize,
1842 			unsigned int cong_thres, int gen, unsigned int cidx)
1843 {
1844 	if (base_addr & 0xfff)     /* must be 4K aligned */
1845 		return -EINVAL;
1846 	if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
1847 		return -EBUSY;
1848 
1849 	base_addr >>= 12;
1850 	t3_write_reg(adapter, A_SG_CONTEXT_DATA0, (u32)base_addr);
1851 	base_addr >>= 32;
1852 	t3_write_reg(adapter, A_SG_CONTEXT_DATA1,
1853 		     V_FL_BASE_HI((u32)base_addr) |
1854 		     V_FL_INDEX_LO(cidx & M_FL_INDEX_LO));
1855 	t3_write_reg(adapter, A_SG_CONTEXT_DATA2, V_FL_SIZE(size) |
1856 		     V_FL_GEN(gen) | V_FL_INDEX_HI(cidx >> 12) |
1857 		     V_FL_ENTRY_SIZE_LO(bsize & M_FL_ENTRY_SIZE_LO));
1858 	t3_write_reg(adapter, A_SG_CONTEXT_DATA3,
1859 		     V_FL_ENTRY_SIZE_HI(bsize >> (32 - S_FL_ENTRY_SIZE_LO)) |
1860 		     V_FL_CONG_THRES(cong_thres) | V_FL_GTS(gts_enable));
1861 	return t3_sge_write_context(adapter, id, F_FREELIST);
1862 }
1863 
1864 /**
1865  *	t3_sge_init_rspcntxt - initialize an SGE response queue context
1866  *	@adapter: the adapter to configure
1867  *	@id: the context id
1868  *	@irq_vec_idx: MSI-X interrupt vector index, 0 if no MSI-X, -1 if no IRQ
1869  *	@base_addr: base address of queue
1870  *	@size: number of queue entries
1871  *	@fl_thres: threshold for selecting the normal or jumbo free list
1872  *	@gen: initial generation value for the context
1873  *	@cidx: consumer pointer
1874  *
1875  *	Initialize an SGE response queue context and make it ready for use.
1876  *	The caller is responsible for ensuring only one context operation
1877  *	occurs at a time.
1878  */
1879 int t3_sge_init_rspcntxt(adapter_t *adapter, unsigned int id, int irq_vec_idx,
1880 			 u64 base_addr, unsigned int size,
1881 			 unsigned int fl_thres, int gen, unsigned int cidx)
1882 {
1883 	unsigned int intr = 0;
1884 
1885 	if (base_addr & 0xfff)     /* must be 4K aligned */
1886 		return -EINVAL;
1887 	if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
1888 		return -EBUSY;
1889 
1890 	base_addr >>= 12;
1891 	t3_write_reg(adapter, A_SG_CONTEXT_DATA0, V_CQ_SIZE(size) |
1892 		     V_CQ_INDEX(cidx));
1893 	t3_write_reg(adapter, A_SG_CONTEXT_DATA1, (u32)base_addr);
1894 	base_addr >>= 32;
1895 	if (irq_vec_idx >= 0)
1896 		intr = V_RQ_MSI_VEC(irq_vec_idx) | F_RQ_INTR_EN;
1897 	t3_write_reg(adapter, A_SG_CONTEXT_DATA2,
1898 		     V_CQ_BASE_HI((u32)base_addr) | intr | V_RQ_GEN(gen));
1899 	t3_write_reg(adapter, A_SG_CONTEXT_DATA3, fl_thres);
1900 	return t3_sge_write_context(adapter, id, F_RESPONSEQ);
1901 }
1902 
1903 /**
1904  *	t3_sge_init_cqcntxt - initialize an SGE completion queue context
1905  *	@adapter: the adapter to configure
1906  *	@id: the context id
1907  *	@base_addr: base address of queue
1908  *	@size: number of queue entries
1909  *	@rspq: response queue for async notifications
1910  *	@ovfl_mode: CQ overflow mode
1911  *	@credits: completion queue credits
1912  *	@credit_thres: the credit threshold
1913  *
1914  *	Initialize an SGE completion queue context and make it ready for use.
1915  *	The caller is responsible for ensuring only one context operation
1916  *	occurs at a time.
1917  */
1918 int t3_sge_init_cqcntxt(adapter_t *adapter, unsigned int id, u64 base_addr,
1919 			unsigned int size, int rspq, int ovfl_mode,
1920 			unsigned int credits, unsigned int credit_thres)
1921 {
1922 	if (base_addr & 0xfff)     /* must be 4K aligned */
1923 		return -EINVAL;
1924 	if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
1925 		return -EBUSY;
1926 
1927 	base_addr >>= 12;
1928 	t3_write_reg(adapter, A_SG_CONTEXT_DATA0, V_CQ_SIZE(size));
1929 	t3_write_reg(adapter, A_SG_CONTEXT_DATA1, (u32)base_addr);
1930 	base_addr >>= 32;
1931 	t3_write_reg(adapter, A_SG_CONTEXT_DATA2,
1932 		     V_CQ_BASE_HI((u32)base_addr) | V_CQ_RSPQ(rspq) |
1933 		     V_CQ_GEN(1) | V_CQ_OVERFLOW_MODE(ovfl_mode));
1934 	t3_write_reg(adapter, A_SG_CONTEXT_DATA3, V_CQ_CREDITS(credits) |
1935 		     V_CQ_CREDIT_THRES(credit_thres));
1936 	return t3_sge_write_context(adapter, id, F_CQ);
1937 }
1938 
1939 /**
1940  *	t3_sge_enable_ecntxt - enable/disable an SGE egress context
1941  *	@adapter: the adapter
1942  *	@id: the egress context id
1943  *	@enable: enable (1) or disable (0) the context
1944  *
1945  *	Enable or disable an SGE egress context.  The caller is responsible for
1946  *	ensuring only one context operation occurs at a time.
1947  */
1948 int t3_sge_enable_ecntxt(adapter_t *adapter, unsigned int id, int enable)
1949 {
1950 	if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
1951 		return -EBUSY;
1952 
1953 	t3_write_reg(adapter, A_SG_CONTEXT_MASK0, 0);
1954 	t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0);
1955 	t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0);
1956 	t3_write_reg(adapter, A_SG_CONTEXT_MASK3, F_EC_VALID);
1957 	t3_write_reg(adapter, A_SG_CONTEXT_DATA3, V_EC_VALID(enable));
1958 	t3_write_reg(adapter, A_SG_CONTEXT_CMD,
1959 		     V_CONTEXT_CMD_OPCODE(1) | F_EGRESS | V_CONTEXT(id));
1960 	return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
1961 			       0, 5, 1);
1962 }
1963 
1964 /**
1965  *	t3_sge_disable_fl - disable an SGE free-buffer list
1966  *	@adapter: the adapter
1967  *	@id: the free list context id
1968  *
1969  *	Disable an SGE free-buffer list.  The caller is responsible for
1970  *	ensuring only one context operation occurs at a time.
1971  */
1972 int t3_sge_disable_fl(adapter_t *adapter, unsigned int id)
1973 {
1974 	if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
1975 		return -EBUSY;
1976 
1977 	t3_write_reg(adapter, A_SG_CONTEXT_MASK0, 0);
1978 	t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0);
1979 	t3_write_reg(adapter, A_SG_CONTEXT_MASK2, V_FL_SIZE(M_FL_SIZE));
1980 	t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0);
1981 	t3_write_reg(adapter, A_SG_CONTEXT_DATA2, 0);
1982 	t3_write_reg(adapter, A_SG_CONTEXT_CMD,
1983 		     V_CONTEXT_CMD_OPCODE(1) | F_FREELIST | V_CONTEXT(id));
1984 	return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
1985 			       0, 5, 1);
1986 }
1987 
1988 /**
1989  *	t3_sge_disable_rspcntxt - disable an SGE response queue
1990  *	@adapter: the adapter
1991  *	@id: the response queue context id
1992  *
1993  *	Disable an SGE response queue.  The caller is responsible for
1994  *	ensuring only one context operation occurs at a time.
1995  */
1996 int t3_sge_disable_rspcntxt(adapter_t *adapter, unsigned int id)
1997 {
1998 	if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
1999 		return -EBUSY;
2000 
2001 	t3_write_reg(adapter, A_SG_CONTEXT_MASK0, V_CQ_SIZE(M_CQ_SIZE));
2002 	t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0);
2003 	t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0);
2004 	t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0);
2005 	t3_write_reg(adapter, A_SG_CONTEXT_DATA0, 0);
2006 	t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2007 		     V_CONTEXT_CMD_OPCODE(1) | F_RESPONSEQ | V_CONTEXT(id));
2008 	return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
2009 			       0, 5, 1);
2010 }
2011 
2012 /**
2013  *	t3_sge_disable_cqcntxt - disable an SGE completion queue
2014  *	@adapter: the adapter
2015  *	@id: the completion queue context id
2016  *
2017  *	Disable an SGE completion queue.  The caller is responsible for
2018  *	ensuring only one context operation occurs at a time.
2019  */
2020 int t3_sge_disable_cqcntxt(adapter_t *adapter, unsigned int id)
2021 {
2022 	if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2023 		return -EBUSY;
2024 
2025 	t3_write_reg(adapter, A_SG_CONTEXT_MASK0, V_CQ_SIZE(M_CQ_SIZE));
2026 	t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0);
2027 	t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0);
2028 	t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0);
2029 	t3_write_reg(adapter, A_SG_CONTEXT_DATA0, 0);
2030 	t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2031 		     V_CONTEXT_CMD_OPCODE(1) | F_CQ | V_CONTEXT(id));
2032 	return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
2033 			       0, 5, 1);
2034 }
2035 
2036 /**
2037  *	t3_sge_cqcntxt_op - perform an operation on a completion queue context
2038  *	@adapter: the adapter
2039  *	@id: the context id
2040  *	@op: the operation to perform
2041  *
2042  *	Perform the selected operation on an SGE completion queue context.
2043  *	The caller is responsible for ensuring only one context operation
2044  *	occurs at a time.
2045  */
2046 int t3_sge_cqcntxt_op(adapter_t *adapter, unsigned int id, unsigned int op,
2047 		      unsigned int credits)
2048 {
2049 	u32 val;
2050 
2051 	if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2052 		return -EBUSY;
2053 
2054 	t3_write_reg(adapter, A_SG_CONTEXT_DATA0, credits << 16);
2055 	t3_write_reg(adapter, A_SG_CONTEXT_CMD, V_CONTEXT_CMD_OPCODE(op) |
2056 		     V_CONTEXT(id) | F_CQ);
2057 	if (t3_wait_op_done_val(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
2058 				0, 5, 1, &val))
2059 		return -EIO;
2060 
2061 	if (op >= 2 && op < 7) {
2062 		if (adapter->params.rev > 0)
2063 			return G_CQ_INDEX(val);
2064 
2065 		t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2066 			     V_CONTEXT_CMD_OPCODE(0) | F_CQ | V_CONTEXT(id));
2067 		if (t3_wait_op_done(adapter, A_SG_CONTEXT_CMD,
2068 				    F_CONTEXT_CMD_BUSY, 0, 5, 1))
2069 			return -EIO;
2070 		return G_CQ_INDEX(t3_read_reg(adapter, A_SG_CONTEXT_DATA0));
2071 	}
2072 	return 0;
2073 }
2074 
2075 /**
2076  * 	t3_sge_read_context - read an SGE context
2077  * 	@type: the context type
2078  * 	@adapter: the adapter
2079  * 	@id: the context id
2080  * 	@data: holds the retrieved context
2081  *
2082  * 	Read an SGE egress context.  The caller is responsible for ensuring
2083  * 	only one context operation occurs at a time.
2084  */
2085 static int t3_sge_read_context(unsigned int type, adapter_t *adapter,
2086 			       unsigned int id, u32 data[4])
2087 {
2088 	if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2089 		return -EBUSY;
2090 
2091 	t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2092 		     V_CONTEXT_CMD_OPCODE(0) | type | V_CONTEXT(id));
2093 	if (t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY, 0,
2094 			    5, 1))
2095 		return -EIO;
2096 	data[0] = t3_read_reg(adapter, A_SG_CONTEXT_DATA0);
2097 	data[1] = t3_read_reg(adapter, A_SG_CONTEXT_DATA1);
2098 	data[2] = t3_read_reg(adapter, A_SG_CONTEXT_DATA2);
2099 	data[3] = t3_read_reg(adapter, A_SG_CONTEXT_DATA3);
2100 	return 0;
2101 }
2102 
2103 /**
2104  * 	t3_sge_read_ecntxt - read an SGE egress context
2105  * 	@adapter: the adapter
2106  * 	@id: the context id
2107  * 	@data: holds the retrieved context
2108  *
2109  * 	Read an SGE egress context.  The caller is responsible for ensuring
2110  * 	only one context operation occurs at a time.
2111  */
2112 int t3_sge_read_ecntxt(adapter_t *adapter, unsigned int id, u32 data[4])
2113 {
2114 	if (id >= 65536)
2115 		return -EINVAL;
2116 	return t3_sge_read_context(F_EGRESS, adapter, id, data);
2117 }
2118 
2119 /**
2120  * 	t3_sge_read_cq - read an SGE CQ context
2121  * 	@adapter: the adapter
2122  * 	@id: the context id
2123  * 	@data: holds the retrieved context
2124  *
2125  * 	Read an SGE CQ context.  The caller is responsible for ensuring
2126  * 	only one context operation occurs at a time.
2127  */
2128 int t3_sge_read_cq(adapter_t *adapter, unsigned int id, u32 data[4])
2129 {
2130 	if (id >= 65536)
2131 		return -EINVAL;
2132 	return t3_sge_read_context(F_CQ, adapter, id, data);
2133 }
2134 
2135 /**
2136  * 	t3_sge_read_fl - read an SGE free-list context
2137  * 	@adapter: the adapter
2138  * 	@id: the context id
2139  * 	@data: holds the retrieved context
2140  *
2141  * 	Read an SGE free-list context.  The caller is responsible for ensuring
2142  * 	only one context operation occurs at a time.
2143  */
2144 int t3_sge_read_fl(adapter_t *adapter, unsigned int id, u32 data[4])
2145 {
2146 	if (id >= SGE_QSETS * 2)
2147 		return -EINVAL;
2148 	return t3_sge_read_context(F_FREELIST, adapter, id, data);
2149 }
2150 
2151 /**
2152  * 	t3_sge_read_rspq - read an SGE response queue context
2153  * 	@adapter: the adapter
2154  * 	@id: the context id
2155  * 	@data: holds the retrieved context
2156  *
2157  * 	Read an SGE response queue context.  The caller is responsible for
2158  * 	ensuring only one context operation occurs at a time.
2159  */
2160 int t3_sge_read_rspq(adapter_t *adapter, unsigned int id, u32 data[4])
2161 {
2162 	if (id >= SGE_QSETS)
2163 		return -EINVAL;
2164 	return t3_sge_read_context(F_RESPONSEQ, adapter, id, data);
2165 }
2166 
2167 /**
2168  *	t3_config_rss - configure Rx packet steering
2169  *	@adapter: the adapter
2170  *	@rss_config: RSS settings (written to TP_RSS_CONFIG)
2171  *	@cpus: values for the CPU lookup table (0xff terminated)
2172  *	@rspq: values for the response queue lookup table (0xffff terminated)
2173  *
2174  *	Programs the receive packet steering logic.  @cpus and @rspq provide
2175  *	the values for the CPU and response queue lookup tables.  If they
2176  *	provide fewer values than the size of the tables the supplied values
2177  *	are used repeatedly until the tables are fully populated.
2178  */
2179 void t3_config_rss(adapter_t *adapter, unsigned int rss_config, const u8 *cpus,
2180 		   const u16 *rspq)
2181 {
2182 	int i, j, cpu_idx = 0, q_idx = 0;
2183 
2184 	if (cpus)
2185 		for (i = 0; i < RSS_TABLE_SIZE; ++i) {
2186 			u32 val = i << 16;
2187 
2188 			for (j = 0; j < 2; ++j) {
2189 				val |= (cpus[cpu_idx++] & 0x3f) << (8 * j);
2190 				if (cpus[cpu_idx] == 0xff)
2191 					cpu_idx = 0;
2192 			}
2193 			t3_write_reg(adapter, A_TP_RSS_LKP_TABLE, val);
2194 		}
2195 
2196 	if (rspq)
2197 		for (i = 0; i < RSS_TABLE_SIZE; ++i) {
2198 			t3_write_reg(adapter, A_TP_RSS_MAP_TABLE,
2199 				     (i << 16) | rspq[q_idx++]);
2200 			if (rspq[q_idx] == 0xffff)
2201 				q_idx = 0;
2202 		}
2203 
2204 	t3_write_reg(adapter, A_TP_RSS_CONFIG, rss_config);
2205 }
2206 
2207 /**
2208  *	t3_read_rss - read the contents of the RSS tables
2209  *	@adapter: the adapter
2210  *	@lkup: holds the contents of the RSS lookup table
2211  *	@map: holds the contents of the RSS map table
2212  *
2213  *	Reads the contents of the receive packet steering tables.
2214  */
2215 int t3_read_rss(adapter_t *adapter, u8 *lkup, u16 *map)
2216 {
2217 	int i;
2218 	u32 val;
2219 
2220 	if (lkup)
2221 		for (i = 0; i < RSS_TABLE_SIZE; ++i) {
2222 			t3_write_reg(adapter, A_TP_RSS_LKP_TABLE,
2223 				     0xffff0000 | i);
2224 			val = t3_read_reg(adapter, A_TP_RSS_LKP_TABLE);
2225 			if (!(val & 0x80000000))
2226 				return -EAGAIN;
2227 			*lkup++ = (u8)val;
2228 			*lkup++ = (u8)(val >> 8);
2229 		}
2230 
2231 	if (map)
2232 		for (i = 0; i < RSS_TABLE_SIZE; ++i) {
2233 			t3_write_reg(adapter, A_TP_RSS_MAP_TABLE,
2234 				     0xffff0000 | i);
2235 			val = t3_read_reg(adapter, A_TP_RSS_MAP_TABLE);
2236 			if (!(val & 0x80000000))
2237 				return -EAGAIN;
2238 			*map++ = (u16)val;
2239 		}
2240 	return 0;
2241 }
2242 
2243 /**
2244  *	t3_tp_set_offload_mode - put TP in NIC/offload mode
2245  *	@adap: the adapter
2246  *	@enable: 1 to select offload mode, 0 for regular NIC
2247  *
2248  *	Switches TP to NIC/offload mode.
2249  */
2250 void t3_tp_set_offload_mode(adapter_t *adap, int enable)
2251 {
2252 	if (is_offload(adap) || !enable)
2253 		t3_set_reg_field(adap, A_TP_IN_CONFIG, F_NICMODE,
2254 				 V_NICMODE(!enable));
2255 }
2256 
2257 /**
2258  *	pm_num_pages - calculate the number of pages of the payload memory
2259  *	@mem_size: the size of the payload memory
2260  *	@pg_size: the size of each payload memory page
2261  *
2262  *	Calculate the number of pages, each of the given size, that fit in a
2263  *	memory of the specified size, respecting the HW requirement that the
2264  *	number of pages must be a multiple of 24.
2265  */
2266 static inline unsigned int pm_num_pages(unsigned int mem_size,
2267 					unsigned int pg_size)
2268 {
2269 	unsigned int n = mem_size / pg_size;
2270 
2271 	return n - n % 24;
2272 }
2273 
2274 #define mem_region(adap, start, size, reg) \
2275 	t3_write_reg((adap), A_ ## reg, (start)); \
2276 	start += size
2277 
2278 /*
2279  *	partition_mem - partition memory and configure TP memory settings
2280  *	@adap: the adapter
2281  *	@p: the TP parameters
2282  *
2283  *	Partitions context and payload memory and configures TP's memory
2284  *	registers.
2285  */
2286 static void partition_mem(adapter_t *adap, const struct tp_params *p)
2287 {
2288 	unsigned int m, pstructs, tids = t3_mc5_size(&adap->mc5);
2289 	unsigned int timers = 0, timers_shift = 22;
2290 
2291 	if (adap->params.rev > 0) {
2292 		if (tids <= 16 * 1024) {
2293 			timers = 1;
2294 			timers_shift = 16;
2295 		} else if (tids <= 64 * 1024) {
2296 			timers = 2;
2297 			timers_shift = 18;
2298 		} else if (tids <= 256 * 1024) {
2299 			timers = 3;
2300 			timers_shift = 20;
2301 		}
2302 	}
2303 
2304 	t3_write_reg(adap, A_TP_PMM_SIZE,
2305 		     p->chan_rx_size | (p->chan_tx_size >> 16));
2306 
2307 	t3_write_reg(adap, A_TP_PMM_TX_BASE, 0);
2308 	t3_write_reg(adap, A_TP_PMM_TX_PAGE_SIZE, p->tx_pg_size);
2309 	t3_write_reg(adap, A_TP_PMM_TX_MAX_PAGE, p->tx_num_pgs);
2310 	t3_set_reg_field(adap, A_TP_PARA_REG3, V_TXDATAACKIDX(M_TXDATAACKIDX),
2311 			 V_TXDATAACKIDX(fls(p->tx_pg_size) - 12));
2312 
2313 	t3_write_reg(adap, A_TP_PMM_RX_BASE, 0);
2314 	t3_write_reg(adap, A_TP_PMM_RX_PAGE_SIZE, p->rx_pg_size);
2315 	t3_write_reg(adap, A_TP_PMM_RX_MAX_PAGE, p->rx_num_pgs);
2316 
2317 	pstructs = p->rx_num_pgs + p->tx_num_pgs;
2318 	/* Add a bit of headroom and make multiple of 24 */
2319 	pstructs += 48;
2320 	pstructs -= pstructs % 24;
2321 	t3_write_reg(adap, A_TP_CMM_MM_MAX_PSTRUCT, pstructs);
2322 
2323 	m = tids * TCB_SIZE;
2324 	mem_region(adap, m, (64 << 10) * 64, SG_EGR_CNTX_BADDR);
2325 	mem_region(adap, m, (64 << 10) * 64, SG_CQ_CONTEXT_BADDR);
2326 	t3_write_reg(adap, A_TP_CMM_TIMER_BASE, V_CMTIMERMAXNUM(timers) | m);
2327 	m += ((p->ntimer_qs - 1) << timers_shift) + (1 << 22);
2328 	mem_region(adap, m, pstructs * 64, TP_CMM_MM_BASE);
2329 	mem_region(adap, m, 64 * (pstructs / 24), TP_CMM_MM_PS_FLST_BASE);
2330 	mem_region(adap, m, 64 * (p->rx_num_pgs / 24), TP_CMM_MM_RX_FLST_BASE);
2331 	mem_region(adap, m, 64 * (p->tx_num_pgs / 24), TP_CMM_MM_TX_FLST_BASE);
2332 
2333 	m = (m + 4095) & ~0xfff;
2334 	t3_write_reg(adap, A_CIM_SDRAM_BASE_ADDR, m);
2335 	t3_write_reg(adap, A_CIM_SDRAM_ADDR_SIZE, p->cm_size - m);
2336 
2337 	tids = (p->cm_size - m - (3 << 20)) / 3072 - 32;
2338 	m = t3_mc5_size(&adap->mc5) - adap->params.mc5.nservers -
2339 	    adap->params.mc5.nfilters - adap->params.mc5.nroutes;
2340 	if (tids < m)
2341 		adap->params.mc5.nservers += m - tids;
2342 }
2343 
2344 static inline void tp_wr_indirect(adapter_t *adap, unsigned int addr, u32 val)
2345 {
2346 	t3_write_reg(adap, A_TP_PIO_ADDR, addr);
2347 	t3_write_reg(adap, A_TP_PIO_DATA, val);
2348 }
2349 
2350 static void tp_config(adapter_t *adap, const struct tp_params *p)
2351 {
2352 	t3_write_reg(adap, A_TP_GLOBAL_CONFIG, F_TXPACINGENABLE | F_PATHMTU |
2353 		     F_IPCHECKSUMOFFLOAD | F_UDPCHECKSUMOFFLOAD |
2354 		     F_TCPCHECKSUMOFFLOAD | V_IPTTL(64));
2355 	t3_write_reg(adap, A_TP_TCP_OPTIONS, V_MTUDEFAULT(576) |
2356 		     F_MTUENABLE | V_WINDOWSCALEMODE(1) |
2357 		     V_TIMESTAMPSMODE(0) | V_SACKMODE(1) | V_SACKRX(1));
2358 	t3_write_reg(adap, A_TP_DACK_CONFIG, V_AUTOSTATE3(1) |
2359 		     V_AUTOSTATE2(1) | V_AUTOSTATE1(0) |
2360 		     V_BYTETHRESHOLD(16384) | V_MSSTHRESHOLD(2) |
2361 		     F_AUTOCAREFUL | F_AUTOENABLE | V_DACK_MODE(1));
2362 	t3_set_reg_field(adap, A_TP_IN_CONFIG, F_IPV6ENABLE | F_NICMODE,
2363 			 F_IPV6ENABLE | F_NICMODE);
2364 	t3_write_reg(adap, A_TP_TX_RESOURCE_LIMIT, 0x18141814);
2365 	t3_write_reg(adap, A_TP_PARA_REG4, 0x5050105);
2366 	t3_set_reg_field(adap, A_TP_PARA_REG6,
2367 			 adap->params.rev > 0 ? F_ENABLEESND : F_T3A_ENABLEESND,
2368 			 0);
2369 	t3_set_reg_field(adap, A_TP_PC_CONFIG,
2370 			 F_ENABLEEPCMDAFULL | F_ENABLEOCSPIFULL,
2371 			 F_TXDEFERENABLE | F_HEARBEATDACK | F_TXCONGESTIONMODE |
2372 			 F_RXCONGESTIONMODE);
2373 	t3_set_reg_field(adap, A_TP_PC_CONFIG2, F_CHDRAFULL, 0);
2374 
2375 	if (adap->params.rev > 0) {
2376 		tp_wr_indirect(adap, A_TP_EGRESS_CONFIG, F_REWRITEFORCETOSIZE);
2377 		t3_set_reg_field(adap, A_TP_PARA_REG3, F_TXPACEAUTO,
2378 				 F_TXPACEAUTO);
2379 		t3_set_reg_field(adap, A_TP_PC_CONFIG, F_LOCKTID, F_LOCKTID);
2380 		t3_set_reg_field(adap, A_TP_PARA_REG3, 0, F_TXPACEAUTOSTRICT);
2381 	} else
2382 		t3_set_reg_field(adap, A_TP_PARA_REG3, 0, F_TXPACEFIXED);
2383 
2384 	t3_write_reg(adap, A_TP_TX_MOD_QUEUE_WEIGHT1, 0);
2385 	t3_write_reg(adap, A_TP_TX_MOD_QUEUE_WEIGHT0, 0);
2386 	t3_write_reg(adap, A_TP_MOD_CHANNEL_WEIGHT, 0);
2387 	t3_write_reg(adap, A_TP_MOD_RATE_LIMIT, 0);
2388 }
2389 
2390 /* Desired TP timer resolution in usec */
2391 #define TP_TMR_RES 200
2392 
2393 /* TCP timer values in ms */
2394 #define TP_DACK_TIMER 50
2395 #define TP_RTO_MIN    250
2396 
2397 /**
2398  *	tp_set_timers - set TP timing parameters
2399  *	@adap: the adapter to set
2400  *	@core_clk: the core clock frequency in Hz
2401  *
2402  *	Set TP's timing parameters, such as the various timer resolutions and
2403  *	the TCP timer values.
2404  */
2405 static void tp_set_timers(adapter_t *adap, unsigned int core_clk)
2406 {
2407 	unsigned int tre = fls(core_clk / (1000000 / TP_TMR_RES)) - 1;
2408 	unsigned int dack_re = adap->params.tp.dack_re;
2409 	unsigned int tstamp_re = fls(core_clk / 1000);     /* 1ms, at least */
2410 	unsigned int tps = core_clk >> tre;
2411 
2412 	t3_write_reg(adap, A_TP_TIMER_RESOLUTION, V_TIMERRESOLUTION(tre) |
2413 		     V_DELAYEDACKRESOLUTION(dack_re) |
2414 		     V_TIMESTAMPRESOLUTION(tstamp_re));
2415 	t3_write_reg(adap, A_TP_DACK_TIMER,
2416 		     (core_clk >> dack_re) / (1000 / TP_DACK_TIMER));
2417 	t3_write_reg(adap, A_TP_TCP_BACKOFF_REG0, 0x3020100);
2418 	t3_write_reg(adap, A_TP_TCP_BACKOFF_REG1, 0x7060504);
2419 	t3_write_reg(adap, A_TP_TCP_BACKOFF_REG2, 0xb0a0908);
2420 	t3_write_reg(adap, A_TP_TCP_BACKOFF_REG3, 0xf0e0d0c);
2421 	t3_write_reg(adap, A_TP_SHIFT_CNT, V_SYNSHIFTMAX(6) |
2422 		     V_RXTSHIFTMAXR1(4) | V_RXTSHIFTMAXR2(15) |
2423 		     V_PERSHIFTBACKOFFMAX(8) | V_PERSHIFTMAX(8) |
2424 		     V_KEEPALIVEMAX(9));
2425 
2426 #define SECONDS * tps
2427 
2428 	t3_write_reg(adap, A_TP_MSL,
2429 		     adap->params.rev > 0 ? 0 : 2 SECONDS);
2430 	t3_write_reg(adap, A_TP_RXT_MIN, tps / (1000 / TP_RTO_MIN));
2431 	t3_write_reg(adap, A_TP_RXT_MAX, 64 SECONDS);
2432 	t3_write_reg(adap, A_TP_PERS_MIN, 5 SECONDS);
2433 	t3_write_reg(adap, A_TP_PERS_MAX, 64 SECONDS);
2434 	t3_write_reg(adap, A_TP_KEEP_IDLE, 7200 SECONDS);
2435 	t3_write_reg(adap, A_TP_KEEP_INTVL, 75 SECONDS);
2436 	t3_write_reg(adap, A_TP_INIT_SRTT, 3 SECONDS);
2437 	t3_write_reg(adap, A_TP_FINWAIT2_TIMER, 600 SECONDS);
2438 
2439 #undef SECONDS
2440 }
2441 
2442 #ifdef CONFIG_CHELSIO_T3_CORE
2443 /**
2444  *	t3_tp_set_coalescing_size - set receive coalescing size
2445  *	@adap: the adapter
2446  *	@size: the receive coalescing size
2447  *	@psh: whether a set PSH bit should deliver coalesced data
2448  *
2449  *	Set the receive coalescing size and PSH bit handling.
2450  */
2451 int t3_tp_set_coalescing_size(adapter_t *adap, unsigned int size, int psh)
2452 {
2453 	u32 val;
2454 
2455 	if (size > MAX_RX_COALESCING_LEN)
2456 		return -EINVAL;
2457 
2458 	val = t3_read_reg(adap, A_TP_PARA_REG3);
2459 	val &= ~(F_RXCOALESCEENABLE | F_RXCOALESCEPSHEN);
2460 
2461 	if (size) {
2462 		val |= F_RXCOALESCEENABLE;
2463 		if (psh)
2464 			val |= F_RXCOALESCEPSHEN;
2465 		t3_write_reg(adap, A_TP_PARA_REG2, V_RXCOALESCESIZE(size) |
2466 			     V_MAXRXDATA(MAX_RX_COALESCING_LEN));
2467 	}
2468 	t3_write_reg(adap, A_TP_PARA_REG3, val);
2469 	return 0;
2470 }
2471 
2472 /**
2473  *	t3_tp_set_max_rxsize - set the max receive size
2474  *	@adap: the adapter
2475  *	@size: the max receive size
2476  *
2477  *	Set TP's max receive size.  This is the limit that applies when
2478  *	receive coalescing is disabled.
2479  */
2480 void t3_tp_set_max_rxsize(adapter_t *adap, unsigned int size)
2481 {
2482 	t3_write_reg(adap, A_TP_PARA_REG7,
2483 		     V_PMMAXXFERLEN0(size) | V_PMMAXXFERLEN1(size));
2484 }
2485 
2486 static void __devinit init_mtus(unsigned short mtus[])
2487 {
2488 	/*
2489 	 * See draft-mathis-plpmtud-00.txt for the values.  The min is 88 so
2490 	 * it can accomodate max size TCP/IP headers when SACK and timestamps
2491 	 * are enabled and still have at least 8 bytes of payload.
2492 	 */
2493 	mtus[0] = 88;
2494 	mtus[1] = 88; /* workaround for silicon starting at 1 */
2495 	mtus[2] = 256;
2496 	mtus[3] = 512;
2497 	mtus[4] = 576;
2498 	/* mtus[4] = 808; */
2499 	mtus[5] = 1024;
2500 	mtus[6] = 1280;
2501 	mtus[7] = 1492;
2502 	mtus[8] = 1500;
2503 	mtus[9] = 2002;
2504 	mtus[10] = 2048;
2505 	mtus[11] = 4096;
2506 	mtus[12] = 4352;
2507 	mtus[13] = 8192;
2508 	mtus[14] = 9000;
2509 	mtus[15] = 9600;
2510 }
2511 
2512 /*
2513  * Initial congestion control parameters.
2514  */
2515 static void __devinit init_cong_ctrl(unsigned short *a, unsigned short *b)
2516 {
2517 	a[0] = a[1] = a[2] = a[3] = a[4] = a[5] = a[6] = a[7] = a[8] = 1;
2518 	a[9] = 2;
2519 	a[10] = 3;
2520 	a[11] = 4;
2521 	a[12] = 5;
2522 	a[13] = 6;
2523 	a[14] = 7;
2524 	a[15] = 8;
2525 	a[16] = 9;
2526 	a[17] = 10;
2527 	a[18] = 14;
2528 	a[19] = 17;
2529 	a[20] = 21;
2530 	a[21] = 25;
2531 	a[22] = 30;
2532 	a[23] = 35;
2533 	a[24] = 45;
2534 	a[25] = 60;
2535 	a[26] = 80;
2536 	a[27] = 100;
2537 	a[28] = 200;
2538 	a[29] = 300;
2539 	a[30] = 400;
2540 	a[31] = 500;
2541 
2542 	b[0] = b[1] = b[2] = b[3] = b[4] = b[5] = b[6] = b[7] = b[8] = 0;
2543 	b[9] = b[10] = 1;
2544 	b[11] = b[12] = 2;
2545 	b[13] = b[14] = b[15] = b[16] = 3;
2546 	b[17] = b[18] = b[19] = b[20] = b[21] = 4;
2547 	b[22] = b[23] = b[24] = b[25] = b[26] = b[27] = 5;
2548 	b[28] = b[29] = 6;
2549 	b[30] = b[31] = 7;
2550 }
2551 
2552 /* The minimum additive increment value for the congestion control table */
2553 #define CC_MIN_INCR 2U
2554 
2555 /**
2556  *	t3_load_mtus - write the MTU and congestion control HW tables
2557  *	@adap: the adapter
2558  *	@mtus: the unrestricted values for the MTU table
2559  *	@alphs: the values for the congestion control alpha parameter
2560  *	@beta: the values for the congestion control beta parameter
2561  *	@mtu_cap: the maximum permitted effective MTU
2562  *
2563  *	Write the MTU table with the supplied MTUs capping each at &mtu_cap.
2564  *	Update the high-speed congestion control table with the supplied alpha,
2565  * 	beta, and MTUs.
2566  */
2567 void t3_load_mtus(adapter_t *adap, unsigned short mtus[NMTUS],
2568 		  unsigned short alpha[NCCTRL_WIN],
2569 		  unsigned short beta[NCCTRL_WIN], unsigned short mtu_cap)
2570 {
2571 	static const unsigned int avg_pkts[NCCTRL_WIN] = {
2572 		2, 6, 10, 14, 20, 28, 40, 56, 80, 112, 160, 224, 320, 448, 640,
2573 		896, 1281, 1792, 2560, 3584, 5120, 7168, 10240, 14336, 20480,
2574 		28672, 40960, 57344, 81920, 114688, 163840, 229376 };
2575 
2576 	unsigned int i, w;
2577 
2578 	for (i = 0; i < NMTUS; ++i) {
2579 		unsigned int mtu = min(mtus[i], mtu_cap);
2580 		unsigned int log2 = fls(mtu);
2581 
2582 		if (!(mtu & ((1 << log2) >> 2)))     /* round */
2583 			log2--;
2584 		t3_write_reg(adap, A_TP_MTU_TABLE,
2585 			     (i << 24) | (log2 << 16) | mtu);
2586 
2587 		for (w = 0; w < NCCTRL_WIN; ++w) {
2588 			unsigned int inc;
2589 
2590 			inc = max(((mtu - 40) * alpha[w]) / avg_pkts[w],
2591 				  CC_MIN_INCR);
2592 
2593 			t3_write_reg(adap, A_TP_CCTRL_TABLE, (i << 21) |
2594 				     (w << 16) | (beta[w] << 13) | inc);
2595 		}
2596 	}
2597 }
2598 
2599 /**
2600  *	t3_read_hw_mtus - returns the values in the HW MTU table
2601  *	@adap: the adapter
2602  *	@mtus: where to store the HW MTU values
2603  *
2604  *	Reads the HW MTU table.
2605  */
2606 void t3_read_hw_mtus(adapter_t *adap, unsigned short mtus[NMTUS])
2607 {
2608 	int i;
2609 
2610 	for (i = 0; i < NMTUS; ++i) {
2611 		unsigned int val;
2612 
2613 		t3_write_reg(adap, A_TP_MTU_TABLE, 0xff000000 | i);
2614 		val = t3_read_reg(adap, A_TP_MTU_TABLE);
2615 		mtus[i] = val & 0x3fff;
2616 	}
2617 }
2618 
2619 /**
2620  *	t3_get_cong_cntl_tab - reads the congestion control table
2621  *	@adap: the adapter
2622  *	@incr: where to store the alpha values
2623  *
2624  *	Reads the additive increments programmed into the HW congestion
2625  *	control table.
2626  */
2627 void t3_get_cong_cntl_tab(adapter_t *adap,
2628 			  unsigned short incr[NMTUS][NCCTRL_WIN])
2629 {
2630 	unsigned int mtu, w;
2631 
2632 	for (mtu = 0; mtu < NMTUS; ++mtu)
2633 		for (w = 0; w < NCCTRL_WIN; ++w) {
2634 			t3_write_reg(adap, A_TP_CCTRL_TABLE,
2635 				     0xffff0000 | (mtu << 5) | w);
2636 			incr[mtu][w] = (unsigned short)t3_read_reg(adap,
2637 				        A_TP_CCTRL_TABLE) & 0x1fff;
2638 		}
2639 }
2640 
2641 /**
2642  *	t3_tp_get_mib_stats - read TP's MIB counters
2643  *	@adap: the adapter
2644  *	@tps: holds the returned counter values
2645  *
2646  *	Returns the values of TP's MIB counters.
2647  */
2648 void t3_tp_get_mib_stats(adapter_t *adap, struct tp_mib_stats *tps)
2649 {
2650 	t3_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_RDATA, (u32 *)tps,
2651 			 sizeof(*tps) / sizeof(u32), 0);
2652 }
2653 
2654 /**
2655  *	t3_read_pace_tbl - read the pace table
2656  *	@adap: the adapter
2657  *	@pace_vals: holds the returned values
2658  *
2659  *	Returns the values of TP's pace table in nanoseconds.
2660  */
2661 void t3_read_pace_tbl(adapter_t *adap, unsigned int pace_vals[NTX_SCHED])
2662 {
2663 	unsigned int i, tick_ns = dack_ticks_to_usec(adap, 1000);
2664 
2665 	for (i = 0; i < NTX_SCHED; i++) {
2666 		t3_write_reg(adap, A_TP_PACE_TABLE, 0xffff0000 + i);
2667 		pace_vals[i] = t3_read_reg(adap, A_TP_PACE_TABLE) * tick_ns;
2668 	}
2669 }
2670 
2671 /**
2672  *	t3_set_pace_tbl - set the pace table
2673  *	@adap: the adapter
2674  *	@pace_vals: the pace values in nanoseconds
2675  *	@start: index of the first entry in the HW pace table to set
2676  *	@n: how many entries to set
2677  *
2678  *	Sets (a subset of the) HW pace table.
2679  */
2680 void t3_set_pace_tbl(adapter_t *adap, unsigned int *pace_vals,
2681 		     unsigned int start, unsigned int n)
2682 {
2683 	unsigned int tick_ns = dack_ticks_to_usec(adap, 1000);
2684 
2685 	for ( ; n; n--, start++, pace_vals++)
2686 		t3_write_reg(adap, A_TP_PACE_TABLE, (start << 16) |
2687 			     ((*pace_vals + tick_ns / 2) / tick_ns));
2688 }
2689 
2690 #define ulp_region(adap, name, start, len) \
2691 	t3_write_reg((adap), A_ULPRX_ ## name ## _LLIMIT, (start)); \
2692 	t3_write_reg((adap), A_ULPRX_ ## name ## _ULIMIT, \
2693 		     (start) + (len) - 1); \
2694 	start += len
2695 
2696 #define ulptx_region(adap, name, start, len) \
2697 	t3_write_reg((adap), A_ULPTX_ ## name ## _LLIMIT, (start)); \
2698 	t3_write_reg((adap), A_ULPTX_ ## name ## _ULIMIT, \
2699 		     (start) + (len) - 1)
2700 
2701 static void ulp_config(adapter_t *adap, const struct tp_params *p)
2702 {
2703 	unsigned int m = p->chan_rx_size;
2704 
2705 	ulp_region(adap, ISCSI, m, p->chan_rx_size / 8);
2706 	ulp_region(adap, TDDP, m, p->chan_rx_size / 8);
2707 	ulptx_region(adap, TPT, m, p->chan_rx_size / 4);
2708 	ulp_region(adap, STAG, m, p->chan_rx_size / 4);
2709 	ulp_region(adap, RQ, m, p->chan_rx_size / 4);
2710 	ulptx_region(adap, PBL, m, p->chan_rx_size / 4);
2711 	ulp_region(adap, PBL, m, p->chan_rx_size / 4);
2712 	t3_write_reg(adap, A_ULPRX_TDDP_TAGMASK, 0xffffffff);
2713 }
2714 #endif
2715 
2716 void t3_config_trace_filter(adapter_t *adapter, const struct trace_params *tp,
2717 			    int filter_index, int invert, int enable)
2718 {
2719 	u32 addr, key[4], mask[4];
2720 
2721 	key[0] = tp->sport | (tp->sip << 16);
2722 	key[1] = (tp->sip >> 16) | (tp->dport << 16);
2723 	key[2] = tp->dip;
2724 	key[3] = tp->proto | (tp->vlan << 8) | (tp->intf << 20);
2725 
2726 	mask[0] = tp->sport_mask | (tp->sip_mask << 16);
2727 	mask[1] = (tp->sip_mask >> 16) | (tp->dport_mask << 16);
2728 	mask[2] = tp->dip_mask;
2729 	mask[3] = tp->proto_mask | (tp->vlan_mask << 8) | (tp->intf_mask << 20);
2730 
2731 	if (invert)
2732 		key[3] |= (1 << 29);
2733 	if (enable)
2734 		key[3] |= (1 << 28);
2735 
2736 	addr = filter_index ? A_TP_RX_TRC_KEY0 : A_TP_TX_TRC_KEY0;
2737 	tp_wr_indirect(adapter, addr++, key[0]);
2738 	tp_wr_indirect(adapter, addr++, mask[0]);
2739 	tp_wr_indirect(adapter, addr++, key[1]);
2740 	tp_wr_indirect(adapter, addr++, mask[1]);
2741 	tp_wr_indirect(adapter, addr++, key[2]);
2742 	tp_wr_indirect(adapter, addr++, mask[2]);
2743 	tp_wr_indirect(adapter, addr++, key[3]);
2744 	tp_wr_indirect(adapter, addr,   mask[3]);
2745 	(void) t3_read_reg(adapter, A_TP_PIO_DATA);
2746 }
2747 
2748 /**
2749  *	t3_config_sched - configure a HW traffic scheduler
2750  *	@adap: the adapter
2751  *	@kbps: target rate in Kbps
2752  *	@sched: the scheduler index
2753  *
2754  *	Configure a Tx HW scheduler for the target rate.
2755  */
2756 int t3_config_sched(adapter_t *adap, unsigned int kbps, int sched)
2757 {
2758 	unsigned int v, tps, cpt, bpt, delta, mindelta = ~0;
2759 	unsigned int clk = adap->params.vpd.cclk * 1000;
2760 	unsigned int selected_cpt = 0, selected_bpt = 0;
2761 
2762 	if (kbps > 0) {
2763 		kbps *= 125;     /* -> bytes */
2764 		for (cpt = 1; cpt <= 255; cpt++) {
2765 			tps = clk / cpt;
2766 			bpt = (kbps + tps / 2) / tps;
2767 			if (bpt > 0 && bpt <= 255) {
2768 				v = bpt * tps;
2769 				delta = v >= kbps ? v - kbps : kbps - v;
2770 				if (delta <= mindelta) {
2771 					mindelta = delta;
2772 					selected_cpt = cpt;
2773 					selected_bpt = bpt;
2774 				}
2775 			} else if (selected_cpt)
2776 				break;
2777 		}
2778 		if (!selected_cpt)
2779 			return -EINVAL;
2780 	}
2781 	t3_write_reg(adap, A_TP_TM_PIO_ADDR,
2782 		     A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2);
2783 	v = t3_read_reg(adap, A_TP_TM_PIO_DATA);
2784 	if (sched & 1)
2785 		v = (v & 0xffff) | (selected_cpt << 16) | (selected_bpt << 24);
2786 	else
2787 		v = (v & 0xffff0000) | selected_cpt | (selected_bpt << 8);
2788 	t3_write_reg(adap, A_TP_TM_PIO_DATA, v);
2789 	return 0;
2790 }
2791 
2792 /**
2793  *	t3_set_sched_ipg - set the IPG for a Tx HW packet rate scheduler
2794  *	@adap: the adapter
2795  *	@sched: the scheduler index
2796  *	@ipg: the interpacket delay in tenths of nanoseconds
2797  *
2798  *	Set the interpacket delay for a HW packet rate scheduler.
2799  */
2800 int t3_set_sched_ipg(adapter_t *adap, int sched, unsigned int ipg)
2801 {
2802 	unsigned int v, addr = A_TP_TX_MOD_Q1_Q0_TIMER_SEPARATOR - sched / 2;
2803 
2804 	/* convert ipg to nearest number of core clocks */
2805 	ipg *= core_ticks_per_usec(adap);
2806 	ipg = (ipg + 5000) / 10000;
2807 	if (ipg > 0xffff)
2808 		return -EINVAL;
2809 
2810 	t3_write_reg(adap, A_TP_TM_PIO_ADDR, addr);
2811 	v = t3_read_reg(adap, A_TP_TM_PIO_DATA);
2812 	if (sched & 1)
2813 		v = (v & 0xffff) | (ipg << 16);
2814 	else
2815 		v = (v & 0xffff0000) | ipg;
2816 	t3_write_reg(adap, A_TP_TM_PIO_DATA, v);
2817 	t3_read_reg(adap, A_TP_TM_PIO_DATA);
2818 	return 0;
2819 }
2820 
2821 /**
2822  *	t3_get_tx_sched - get the configuration of a Tx HW traffic scheduler
2823  *	@adap: the adapter
2824  *	@sched: the scheduler index
2825  *	@kbps: the byte rate in Kbps
2826  *	@ipg: the interpacket delay in tenths of nanoseconds
2827  *
2828  *	Return the current configuration of a HW Tx scheduler.
2829  */
2830 void t3_get_tx_sched(adapter_t *adap, unsigned int sched, unsigned int *kbps,
2831 		     unsigned int *ipg)
2832 {
2833 	unsigned int v, addr, bpt, cpt;
2834 
2835 	if (kbps) {
2836 		addr = A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2;
2837 		t3_write_reg(adap, A_TP_TM_PIO_ADDR, addr);
2838 		v = t3_read_reg(adap, A_TP_TM_PIO_DATA);
2839 		if (sched & 1)
2840 			v >>= 16;
2841 		bpt = (v >> 8) & 0xff;
2842 		cpt = v & 0xff;
2843 		if (!cpt)
2844 			*kbps = 0;        /* scheduler disabled */
2845 		else {
2846 			v = (adap->params.vpd.cclk * 1000) / cpt;
2847 			*kbps = (v * bpt) / 125;
2848 		}
2849 	}
2850 	if (ipg) {
2851 		addr = A_TP_TX_MOD_Q1_Q0_TIMER_SEPARATOR - sched / 2;
2852 		t3_write_reg(adap, A_TP_TM_PIO_ADDR, addr);
2853 		v = t3_read_reg(adap, A_TP_TM_PIO_DATA);
2854 		if (sched & 1)
2855 			v >>= 16;
2856 		v &= 0xffff;
2857 		*ipg = (10000 * v) / core_ticks_per_usec(adap);
2858 	}
2859 }
2860 
2861 static int tp_init(adapter_t *adap, const struct tp_params *p)
2862 {
2863 	int busy = 0;
2864 
2865 	tp_config(adap, p);
2866 	t3_set_vlan_accel(adap, 3, 0);
2867 
2868 	if (is_offload(adap)) {
2869 		tp_set_timers(adap, adap->params.vpd.cclk * 1000);
2870 		t3_write_reg(adap, A_TP_RESET, F_FLSTINITENABLE);
2871 		busy = t3_wait_op_done(adap, A_TP_RESET, F_FLSTINITENABLE,
2872 				       0, 1000, 5);
2873 		if (busy)
2874 			CH_ERR(adap, "TP initialization timed out\n");
2875 	}
2876 
2877 	if (!busy)
2878 		t3_write_reg(adap, A_TP_RESET, F_TPRESET);
2879 	return busy;
2880 }
2881 
2882 int t3_mps_set_active_ports(adapter_t *adap, unsigned int port_mask)
2883 {
2884 	if (port_mask & ~((1 << adap->params.nports) - 1))
2885 		return -EINVAL;
2886 	t3_set_reg_field(adap, A_MPS_CFG, F_PORT1ACTIVE | F_PORT0ACTIVE,
2887 			 port_mask << S_PORT0ACTIVE);
2888 	return 0;
2889 }
2890 
2891 /*
2892  * Perform the bits of HW initialization that are dependent on the number
2893  * of available ports.
2894  */
2895 static void init_hw_for_avail_ports(adapter_t *adap, int nports)
2896 {
2897 	int i;
2898 
2899 	if (nports == 1) {
2900 		t3_set_reg_field(adap, A_ULPRX_CTL, F_ROUND_ROBIN, 0);
2901 		t3_set_reg_field(adap, A_ULPTX_CONFIG, F_CFG_RR_ARB, 0);
2902 		t3_write_reg(adap, A_MPS_CFG, F_TPRXPORTEN | F_TPTXPORT0EN |
2903 			     F_PORT0ACTIVE | F_ENFORCEPKT);
2904 		t3_write_reg(adap, A_PM1_TX_CFG, 0xc000c000);
2905 	} else {
2906 		t3_set_reg_field(adap, A_ULPRX_CTL, 0, F_ROUND_ROBIN);
2907 		t3_set_reg_field(adap, A_ULPTX_CONFIG, 0, F_CFG_RR_ARB);
2908 		t3_write_reg(adap, A_ULPTX_DMA_WEIGHT,
2909 			     V_D1_WEIGHT(16) | V_D0_WEIGHT(16));
2910 		t3_write_reg(adap, A_MPS_CFG, F_TPTXPORT0EN | F_TPTXPORT1EN |
2911 			     F_TPRXPORTEN | F_PORT0ACTIVE | F_PORT1ACTIVE |
2912 			     F_ENFORCEPKT);
2913 		t3_write_reg(adap, A_PM1_TX_CFG, 0x80008000);
2914 		t3_set_reg_field(adap, A_TP_PC_CONFIG, 0, F_TXTOSQUEUEMAPMODE);
2915 		t3_write_reg(adap, A_TP_TX_MOD_QUEUE_REQ_MAP,
2916 			     V_TX_MOD_QUEUE_REQ_MAP(0xaa));
2917 		for (i = 0; i < 16; i++)
2918 			t3_write_reg(adap, A_TP_TX_MOD_QUE_TABLE,
2919 				     (i << 16) | 0x1010);
2920 	}
2921 }
2922 
2923 static int calibrate_xgm(adapter_t *adapter)
2924 {
2925 	if (uses_xaui(adapter)) {
2926 		unsigned int v, i;
2927 
2928 		for (i = 0; i < 5; ++i) {
2929 			t3_write_reg(adapter, A_XGM_XAUI_IMP, 0);
2930 			(void) t3_read_reg(adapter, A_XGM_XAUI_IMP);
2931 			t3_os_sleep(1);
2932 			v = t3_read_reg(adapter, A_XGM_XAUI_IMP);
2933 			if (!(v & (F_XGM_CALFAULT | F_CALBUSY))) {
2934 				t3_write_reg(adapter, A_XGM_XAUI_IMP,
2935 					     V_XAUIIMP(G_CALIMP(v) >> 2));
2936 				return 0;
2937 			}
2938 		}
2939 		CH_ERR(adapter, "MAC calibration failed\n");
2940 		return -1;
2941 	} else {
2942 		t3_write_reg(adapter, A_XGM_RGMII_IMP,
2943 			     V_RGMIIIMPPD(2) | V_RGMIIIMPPU(3));
2944 		t3_set_reg_field(adapter, A_XGM_RGMII_IMP, F_XGM_IMPSETUPDATE,
2945 				 F_XGM_IMPSETUPDATE);
2946 	}
2947 	return 0;
2948 }
2949 
2950 static void calibrate_xgm_t3b(adapter_t *adapter)
2951 {
2952 	if (!uses_xaui(adapter)) {
2953 		t3_write_reg(adapter, A_XGM_RGMII_IMP, F_CALRESET |
2954 			     F_CALUPDATE | V_RGMIIIMPPD(2) | V_RGMIIIMPPU(3));
2955 		t3_set_reg_field(adapter, A_XGM_RGMII_IMP, F_CALRESET, 0);
2956 		t3_set_reg_field(adapter, A_XGM_RGMII_IMP, 0,
2957 				 F_XGM_IMPSETUPDATE);
2958 		t3_set_reg_field(adapter, A_XGM_RGMII_IMP, F_XGM_IMPSETUPDATE,
2959 				 0);
2960 		t3_set_reg_field(adapter, A_XGM_RGMII_IMP, F_CALUPDATE, 0);
2961 		t3_set_reg_field(adapter, A_XGM_RGMII_IMP, 0, F_CALUPDATE);
2962 	}
2963 }
2964 
2965 struct mc7_timing_params {
2966 	unsigned char ActToPreDly;
2967 	unsigned char ActToRdWrDly;
2968 	unsigned char PreCyc;
2969 	unsigned char RefCyc[5];
2970 	unsigned char BkCyc;
2971 	unsigned char WrToRdDly;
2972 	unsigned char RdToWrDly;
2973 };
2974 
2975 /*
2976  * Write a value to a register and check that the write completed.  These
2977  * writes normally complete in a cycle or two, so one read should suffice.
2978  * The very first read exists to flush the posted write to the device.
2979  */
2980 static int wrreg_wait(adapter_t *adapter, unsigned int addr, u32 val)
2981 {
2982 	t3_write_reg(adapter,	addr, val);
2983 	(void) t3_read_reg(adapter, addr);                   /* flush */
2984 	if (!(t3_read_reg(adapter, addr) & F_BUSY))
2985 		return 0;
2986 	CH_ERR(adapter, "write to MC7 register 0x%x timed out\n", addr);
2987 	return -EIO;
2988 }
2989 
2990 static int mc7_init(struct mc7 *mc7, unsigned int mc7_clock, int mem_type)
2991 {
2992 	static const unsigned int mc7_mode[] = {
2993 		0x632, 0x642, 0x652, 0x432, 0x442
2994 	};
2995 	static const struct mc7_timing_params mc7_timings[] = {
2996 		{ 12, 3, 4, { 20, 28, 34, 52, 0 }, 15, 6, 4 },
2997 		{ 12, 4, 5, { 20, 28, 34, 52, 0 }, 16, 7, 4 },
2998 		{ 12, 5, 6, { 20, 28, 34, 52, 0 }, 17, 8, 4 },
2999 		{ 9,  3, 4, { 15, 21, 26, 39, 0 }, 12, 6, 4 },
3000 		{ 9,  4, 5, { 15, 21, 26, 39, 0 }, 13, 7, 4 }
3001 	};
3002 
3003 	u32 val;
3004 	unsigned int width, density, slow, attempts;
3005 	adapter_t *adapter = mc7->adapter;
3006 	const struct mc7_timing_params *p = &mc7_timings[mem_type];
3007 
3008 	val = t3_read_reg(adapter, mc7->offset + A_MC7_CFG);
3009 	slow = val & F_SLOW;
3010 	width = G_WIDTH(val);
3011 	density = G_DEN(val);
3012 
3013 	t3_write_reg(adapter, mc7->offset + A_MC7_CFG, val | F_IFEN);
3014 	val = t3_read_reg(adapter, mc7->offset + A_MC7_CFG);  /* flush */
3015 	t3_os_sleep(1);
3016 
3017 	if (!slow) {
3018 		t3_write_reg(adapter, mc7->offset + A_MC7_CAL, F_SGL_CAL_EN);
3019 		(void) t3_read_reg(adapter, mc7->offset + A_MC7_CAL);
3020 		t3_os_sleep(1);
3021 		if (t3_read_reg(adapter, mc7->offset + A_MC7_CAL) &
3022 		    (F_BUSY | F_SGL_CAL_EN | F_CAL_FAULT)) {
3023 			CH_ERR(adapter, "%s MC7 calibration timed out\n",
3024 			       mc7->name);
3025 			goto out_fail;
3026 		}
3027 	}
3028 
3029 	t3_write_reg(adapter, mc7->offset + A_MC7_PARM,
3030 		     V_ACTTOPREDLY(p->ActToPreDly) |
3031 		     V_ACTTORDWRDLY(p->ActToRdWrDly) | V_PRECYC(p->PreCyc) |
3032 		     V_REFCYC(p->RefCyc[density]) | V_BKCYC(p->BkCyc) |
3033 		     V_WRTORDDLY(p->WrToRdDly) | V_RDTOWRDLY(p->RdToWrDly));
3034 
3035 	t3_write_reg(adapter, mc7->offset + A_MC7_CFG,
3036 		     val | F_CLKEN | F_TERM150);
3037 	(void) t3_read_reg(adapter, mc7->offset + A_MC7_CFG); /* flush */
3038 
3039 	if (!slow)
3040 		t3_set_reg_field(adapter, mc7->offset + A_MC7_DLL, F_DLLENB,
3041 				 F_DLLENB);
3042 	udelay(1);
3043 
3044 	val = slow ? 3 : 6;
3045 	if (wrreg_wait(adapter, mc7->offset + A_MC7_PRE, 0) ||
3046 	    wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE2, 0) ||
3047 	    wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE3, 0) ||
3048 	    wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE1, val))
3049 		goto out_fail;
3050 
3051 	if (!slow) {
3052 		t3_write_reg(adapter, mc7->offset + A_MC7_MODE, 0x100);
3053 		t3_set_reg_field(adapter, mc7->offset + A_MC7_DLL,
3054 				 F_DLLRST, 0);
3055 		udelay(5);
3056 	}
3057 
3058 	if (wrreg_wait(adapter, mc7->offset + A_MC7_PRE, 0) ||
3059 	    wrreg_wait(adapter, mc7->offset + A_MC7_REF, 0) ||
3060 	    wrreg_wait(adapter, mc7->offset + A_MC7_REF, 0) ||
3061 	    wrreg_wait(adapter, mc7->offset + A_MC7_MODE,
3062 		       mc7_mode[mem_type]) ||
3063 	    wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE1, val | 0x380) ||
3064 	    wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE1, val))
3065 		goto out_fail;
3066 
3067 	/* clock value is in KHz */
3068 	mc7_clock = mc7_clock * 7812 + mc7_clock / 2;  /* ns */
3069 	mc7_clock /= 1000000;                          /* KHz->MHz, ns->us */
3070 
3071 	t3_write_reg(adapter, mc7->offset + A_MC7_REF,
3072 		     F_PERREFEN | V_PREREFDIV(mc7_clock));
3073 	(void) t3_read_reg(adapter, mc7->offset + A_MC7_REF); /* flush */
3074 
3075 	t3_write_reg(adapter, mc7->offset + A_MC7_ECC,
3076 		     F_ECCGENEN | F_ECCCHKEN);
3077 	t3_write_reg(adapter, mc7->offset + A_MC7_BIST_DATA, 0);
3078 	t3_write_reg(adapter, mc7->offset + A_MC7_BIST_ADDR_BEG, 0);
3079 	t3_write_reg(adapter, mc7->offset + A_MC7_BIST_ADDR_END,
3080 		     (mc7->size << width) - 1);
3081 	t3_write_reg(adapter, mc7->offset + A_MC7_BIST_OP, V_OP(1));
3082 	(void) t3_read_reg(adapter, mc7->offset + A_MC7_BIST_OP); /* flush */
3083 
3084 	attempts = 50;
3085 	do {
3086 		t3_os_sleep(250);
3087 		val = t3_read_reg(adapter, mc7->offset + A_MC7_BIST_OP);
3088 	} while ((val & F_BUSY) && --attempts);
3089 	if (val & F_BUSY) {
3090 		CH_ERR(adapter, "%s MC7 BIST timed out\n", mc7->name);
3091 		goto out_fail;
3092 	}
3093 
3094 	/* Enable normal memory accesses. */
3095 	t3_set_reg_field(adapter, mc7->offset + A_MC7_CFG, 0, F_RDY);
3096 	return 0;
3097 
3098  out_fail:
3099 	return -1;
3100 }
3101 
3102 static void config_pcie(adapter_t *adap)
3103 {
3104 	static const u16 ack_lat[4][6] = {
3105 		{ 237, 416, 559, 1071, 2095, 4143 },
3106 		{ 128, 217, 289, 545, 1057, 2081 },
3107 		{ 73, 118, 154, 282, 538, 1050 },
3108 		{ 67, 107, 86, 150, 278, 534 }
3109 	};
3110 	static const u16 rpl_tmr[4][6] = {
3111 		{ 711, 1248, 1677, 3213, 6285, 12429 },
3112 		{ 384, 651, 867, 1635, 3171, 6243 },
3113 		{ 219, 354, 462, 846, 1614, 3150 },
3114 		{ 201, 321, 258, 450, 834, 1602 }
3115 	};
3116 
3117 	u16 val;
3118 	unsigned int log2_width, pldsize;
3119 	unsigned int fst_trn_rx, fst_trn_tx, acklat, rpllmt;
3120 
3121 	t3_os_pci_read_config_2(adap,
3122 				adap->params.pci.pcie_cap_addr + PCI_EXP_DEVCTL,
3123 				&val);
3124 	pldsize = (val & PCI_EXP_DEVCTL_PAYLOAD) >> 5;
3125 
3126 	t3_os_pci_read_config_2(adap,
3127 				adap->params.pci.pcie_cap_addr + PCI_EXP_LNKCTL,
3128 			       	&val);
3129 
3130 	fst_trn_tx = G_NUMFSTTRNSEQ(t3_read_reg(adap, A_PCIE_PEX_CTRL0));
3131 	fst_trn_rx = adap->params.rev == 0 ? fst_trn_tx :
3132 			G_NUMFSTTRNSEQRX(t3_read_reg(adap, A_PCIE_MODE));
3133 	log2_width = fls(adap->params.pci.width) - 1;
3134 	acklat = ack_lat[log2_width][pldsize];
3135 	if (val & 1)                            /* check LOsEnable */
3136 		acklat += fst_trn_tx * 4;
3137 	rpllmt = rpl_tmr[log2_width][pldsize] + fst_trn_rx * 4;
3138 
3139 	if (adap->params.rev == 0)
3140 		t3_set_reg_field(adap, A_PCIE_PEX_CTRL1,
3141 				 V_T3A_ACKLAT(M_T3A_ACKLAT),
3142 				 V_T3A_ACKLAT(acklat));
3143 	else
3144 		t3_set_reg_field(adap, A_PCIE_PEX_CTRL1, V_ACKLAT(M_ACKLAT),
3145 				 V_ACKLAT(acklat));
3146 
3147 	t3_set_reg_field(adap, A_PCIE_PEX_CTRL0, V_REPLAYLMT(M_REPLAYLMT),
3148 			 V_REPLAYLMT(rpllmt));
3149 
3150 	t3_write_reg(adap, A_PCIE_PEX_ERR, 0xffffffff);
3151 	t3_set_reg_field(adap, A_PCIE_CFG, F_PCIE_CLIDECEN, F_PCIE_CLIDECEN);
3152 }
3153 
3154 /*
3155  * Initialize and configure T3 HW modules.  This performs the
3156  * initialization steps that need to be done once after a card is reset.
3157  * MAC and PHY initialization is handled separarely whenever a port is enabled.
3158  *
3159  * fw_params are passed to FW and their value is platform dependent.  Only the
3160  * top 8 bits are available for use, the rest must be 0.
3161  */
3162 int t3_init_hw(adapter_t *adapter, u32 fw_params)
3163 {
3164 	int err = -EIO, attempts = 100;
3165 	const struct vpd_params *vpd = &adapter->params.vpd;
3166 
3167 	if (adapter->params.rev > 0)
3168 		calibrate_xgm_t3b(adapter);
3169 	else if (calibrate_xgm(adapter))
3170 		goto out_err;
3171 
3172 	if (vpd->mclk) {
3173 		partition_mem(adapter, &adapter->params.tp);
3174 
3175 		if (mc7_init(&adapter->pmrx, vpd->mclk, vpd->mem_timing) ||
3176 		    mc7_init(&adapter->pmtx, vpd->mclk, vpd->mem_timing) ||
3177 		    mc7_init(&adapter->cm, vpd->mclk, vpd->mem_timing) ||
3178 		    t3_mc5_init(&adapter->mc5, adapter->params.mc5.nservers,
3179 			        adapter->params.mc5.nfilters,
3180 			       	adapter->params.mc5.nroutes))
3181 			goto out_err;
3182 	}
3183 
3184 	if (tp_init(adapter, &adapter->params.tp))
3185 		goto out_err;
3186 
3187 #ifdef CONFIG_CHELSIO_T3_CORE
3188 	t3_tp_set_coalescing_size(adapter,
3189 				  min(adapter->params.sge.max_pkt_size,
3190 				      MAX_RX_COALESCING_LEN), 1);
3191 	t3_tp_set_max_rxsize(adapter,
3192 			     min(adapter->params.sge.max_pkt_size, 16384U));
3193 	ulp_config(adapter, &adapter->params.tp);
3194 #endif
3195 	if (is_pcie(adapter))
3196 		config_pcie(adapter);
3197 	else
3198 		t3_set_reg_field(adapter, A_PCIX_CFG, 0, F_CLIDECEN);
3199 
3200 	t3_write_reg(adapter, A_PM1_RX_CFG, 0xf000f000);
3201 	init_hw_for_avail_ports(adapter, adapter->params.nports);
3202 	t3_sge_init(adapter, &adapter->params.sge);
3203 
3204 	t3_write_reg(adapter, A_CIM_HOST_ACC_DATA, vpd->uclk | fw_params);
3205 	t3_write_reg(adapter, A_CIM_BOOT_CFG,
3206 		     V_BOOTADDR(FW_FLASH_BOOT_ADDR >> 2));
3207 	(void) t3_read_reg(adapter, A_CIM_BOOT_CFG);    /* flush */
3208 
3209 	do {                          /* wait for uP to initialize */
3210 		t3_os_sleep(20);
3211 	} while (t3_read_reg(adapter, A_CIM_HOST_ACC_DATA) && --attempts);
3212 	if (!attempts)
3213 		goto out_err;
3214 
3215 	err = 0;
3216  out_err:
3217 	return err;
3218 }
3219 
3220 /**
3221  *	get_pci_mode - determine a card's PCI mode
3222  *	@adapter: the adapter
3223  *	@p: where to store the PCI settings
3224  *
3225  *	Determines a card's PCI mode and associated parameters, such as speed
3226  *	and width.
3227  */
3228 static void __devinit get_pci_mode(adapter_t *adapter, struct pci_params *p)
3229 {
3230 	static unsigned short speed_map[] = { 33, 66, 100, 133 };
3231 	u32 pci_mode, pcie_cap;
3232 
3233 	pcie_cap = t3_os_find_pci_capability(adapter, PCI_CAP_ID_EXP);
3234 	if (pcie_cap) {
3235 		u16 val;
3236 
3237 		p->variant = PCI_VARIANT_PCIE;
3238 		p->pcie_cap_addr = pcie_cap;
3239 		t3_os_pci_read_config_2(adapter, pcie_cap + PCI_EXP_LNKSTA,
3240 					&val);
3241 		p->width = (val >> 4) & 0x3f;
3242 		return;
3243 	}
3244 
3245 	pci_mode = t3_read_reg(adapter, A_PCIX_MODE);
3246 	p->speed = speed_map[G_PCLKRANGE(pci_mode)];
3247 	p->width = (pci_mode & F_64BIT) ? 64 : 32;
3248 	pci_mode = G_PCIXINITPAT(pci_mode);
3249 	if (pci_mode == 0)
3250 		p->variant = PCI_VARIANT_PCI;
3251 	else if (pci_mode < 4)
3252 		p->variant = PCI_VARIANT_PCIX_MODE1_PARITY;
3253 	else if (pci_mode < 8)
3254 		p->variant = PCI_VARIANT_PCIX_MODE1_ECC;
3255 	else
3256 		p->variant = PCI_VARIANT_PCIX_266_MODE2;
3257 }
3258 
3259 /**
3260  *	init_link_config - initialize a link's SW state
3261  *	@lc: structure holding the link state
3262  *	@ai: information about the current card
3263  *
3264  *	Initializes the SW state maintained for each link, including the link's
3265  *	capabilities and default speed/duplex/flow-control/autonegotiation
3266  *	settings.
3267  */
3268 static void __devinit init_link_config(struct link_config *lc,
3269 				       unsigned int caps)
3270 {
3271 	lc->supported = caps;
3272 	lc->requested_speed = lc->speed = SPEED_INVALID;
3273 	lc->requested_duplex = lc->duplex = DUPLEX_INVALID;
3274 	lc->requested_fc = lc->fc = PAUSE_RX | PAUSE_TX;
3275 	if (lc->supported & SUPPORTED_Autoneg) {
3276 		lc->advertising = lc->supported;
3277 		lc->autoneg = AUTONEG_ENABLE;
3278 		lc->requested_fc |= PAUSE_AUTONEG;
3279 	} else {
3280 		lc->advertising = 0;
3281 		lc->autoneg = AUTONEG_DISABLE;
3282 	}
3283 }
3284 
3285 /**
3286  *	mc7_calc_size - calculate MC7 memory size
3287  *	@cfg: the MC7 configuration
3288  *
3289  *	Calculates the size of an MC7 memory in bytes from the value of its
3290  *	configuration register.
3291  */
3292 static unsigned int __devinit mc7_calc_size(u32 cfg)
3293 {
3294 	unsigned int width = G_WIDTH(cfg);
3295 	unsigned int banks = !!(cfg & F_BKS) + 1;
3296 	unsigned int org = !!(cfg & F_ORG) + 1;
3297 	unsigned int density = G_DEN(cfg);
3298 	unsigned int MBs = ((256 << density) * banks) / (org << width);
3299 
3300 	return MBs << 20;
3301 }
3302 
3303 static void __devinit mc7_prep(adapter_t *adapter, struct mc7 *mc7,
3304 			       unsigned int base_addr, const char *name)
3305 {
3306 	u32 cfg;
3307 
3308 	mc7->adapter = adapter;
3309 	mc7->name = name;
3310 	mc7->offset = base_addr - MC7_PMRX_BASE_ADDR;
3311 	cfg = t3_read_reg(adapter, mc7->offset + A_MC7_CFG);
3312 	mc7->size = mc7_calc_size(cfg);
3313 	mc7->width = G_WIDTH(cfg);
3314 }
3315 
3316 void mac_prep(struct cmac *mac, adapter_t *adapter, int index)
3317 {
3318 	mac->adapter = adapter;
3319 	mac->offset = (XGMAC0_1_BASE_ADDR - XGMAC0_0_BASE_ADDR) * index;
3320 	mac->nucast = 1;
3321 
3322 	if (adapter->params.rev == 0 && uses_xaui(adapter)) {
3323 		t3_write_reg(adapter, A_XGM_SERDES_CTRL + mac->offset,
3324 			     is_10G(adapter) ? 0x2901c04 : 0x2301c04);
3325 		t3_set_reg_field(adapter, A_XGM_PORT_CFG + mac->offset,
3326 				 F_ENRGMII, 0);
3327 	}
3328 }
3329 
3330 void early_hw_init(adapter_t *adapter, const struct adapter_info *ai)
3331 {
3332 	u32 val = V_PORTSPEED(is_10G(adapter) ? 3 : 2);
3333 
3334 	mi1_init(adapter, ai);
3335 	t3_write_reg(adapter, A_I2C_CFG,                  /* set for 80KHz */
3336 		     V_I2C_CLKDIV(adapter->params.vpd.cclk / 80 - 1));
3337 	t3_write_reg(adapter, A_T3DBG_GPIO_EN,
3338 		     ai->gpio_out | F_GPIO0_OEN | F_GPIO0_OUT_VAL);
3339 
3340 	if (adapter->params.rev == 0 || !uses_xaui(adapter))
3341 		val |= F_ENRGMII;
3342 
3343 	/* Enable MAC clocks so we can access the registers */
3344 	t3_write_reg(adapter, A_XGM_PORT_CFG, val);
3345 	(void) t3_read_reg(adapter, A_XGM_PORT_CFG);
3346 
3347 	val |= F_CLKDIVRESET_;
3348 	t3_write_reg(adapter, A_XGM_PORT_CFG, val);
3349 	(void) t3_read_reg(adapter, A_XGM_PORT_CFG);
3350 	t3_write_reg(adapter, XGM_REG(A_XGM_PORT_CFG, 1), val);
3351 	(void) t3_read_reg(adapter, A_XGM_PORT_CFG);
3352 }
3353 
3354 /*
3355  * Reset the adapter.  PCIe cards lose their config space during reset, PCI-X
3356  * ones don't.
3357  */
3358 int t3_reset_adapter(adapter_t *adapter)
3359 {
3360 	int i, save_and_restore_pcie =
3361 	    adapter->params.rev < T3_REV_B2 && is_pcie(adapter);
3362 	uint16_t devid = 0;
3363 
3364 	if (save_and_restore_pcie)
3365 		t3_os_pci_save_state(adapter);
3366 	t3_write_reg(adapter, A_PL_RST, F_CRSTWRM | F_CRSTWRMMODE);
3367 
3368  	/*
3369 	 * Delay. Give Some time to device to reset fully.
3370 	 * XXX The delay time should be modified.
3371 	 */
3372 	for (i = 0; i < 10; i++) {
3373 		t3_os_sleep(50);
3374 		t3_os_pci_read_config_2(adapter, 0x00, &devid);
3375 		if (devid == 0x1425)
3376 			break;
3377 	}
3378 
3379 	if (devid != 0x1425)
3380 		return -1;
3381 
3382 	if (save_and_restore_pcie)
3383 		t3_os_pci_restore_state(adapter);
3384 	return 0;
3385 }
3386 
3387 /*
3388  * Initialize adapter SW state for the various HW modules, set initial values
3389  * for some adapter tunables, take PHYs out of reset, and initialize the MDIO
3390  * interface.
3391  */
3392 int __devinit t3_prep_adapter(adapter_t *adapter,
3393 			      const struct adapter_info *ai, int reset)
3394 {
3395 	int ret;
3396 	unsigned int i, j = 0;
3397 
3398 	get_pci_mode(adapter, &adapter->params.pci);
3399 
3400 	adapter->params.info = ai;
3401 	adapter->params.nports = ai->nports;
3402 	adapter->params.rev = t3_read_reg(adapter, A_PL_REV);
3403 	adapter->params.linkpoll_period = 0;
3404 	adapter->params.stats_update_period = is_10G(adapter) ?
3405 		MAC_STATS_ACCUM_SECS : (MAC_STATS_ACCUM_SECS * 10);
3406 	adapter->params.pci.vpd_cap_addr =
3407 		t3_os_find_pci_capability(adapter, PCI_CAP_ID_VPD);
3408 
3409 	ret = get_vpd_params(adapter, &adapter->params.vpd);
3410 	if (ret < 0)
3411 		return ret;
3412 
3413 	if (reset && t3_reset_adapter(adapter))
3414 		return -1;
3415 
3416 	t3_sge_prep(adapter, &adapter->params.sge);
3417 
3418 	if (adapter->params.vpd.mclk) {
3419 		struct tp_params *p = &adapter->params.tp;
3420 
3421 		mc7_prep(adapter, &adapter->pmrx, MC7_PMRX_BASE_ADDR, "PMRX");
3422 		mc7_prep(adapter, &adapter->pmtx, MC7_PMTX_BASE_ADDR, "PMTX");
3423 		mc7_prep(adapter, &adapter->cm, MC7_CM_BASE_ADDR, "CM");
3424 
3425 		p->nchan = ai->nports;
3426 		p->pmrx_size = t3_mc7_size(&adapter->pmrx);
3427 		p->pmtx_size = t3_mc7_size(&adapter->pmtx);
3428 		p->cm_size = t3_mc7_size(&adapter->cm);
3429 		p->chan_rx_size = p->pmrx_size / 2;     /* only 1 Rx channel */
3430 		p->chan_tx_size = p->pmtx_size / p->nchan;
3431 		p->rx_pg_size = 64 * 1024;
3432 		p->tx_pg_size = is_10G(adapter) ? 64 * 1024 : 16 * 1024;
3433 		p->rx_num_pgs = pm_num_pages(p->chan_rx_size, p->rx_pg_size);
3434 		p->tx_num_pgs = pm_num_pages(p->chan_tx_size, p->tx_pg_size);
3435 		p->ntimer_qs = p->cm_size >= (128 << 20) ||
3436 			       adapter->params.rev > 0 ? 12 : 6;
3437 		p->dack_re = fls(adapter->params.vpd.cclk / 10) - 1; /* 100us */
3438 
3439 		adapter->params.mc5.nservers = DEFAULT_NSERVERS;
3440 		adapter->params.mc5.nfilters = adapter->params.rev > 0 ?
3441 					       DEFAULT_NFILTERS : 0;
3442 		adapter->params.mc5.nroutes = 0;
3443 		t3_mc5_prep(adapter, &adapter->mc5, MC5_MODE_144_BIT);
3444 
3445 #ifdef CONFIG_CHELSIO_T3_CORE
3446 		init_mtus(adapter->params.mtus);
3447 		init_cong_ctrl(adapter->params.a_wnd, adapter->params.b_wnd);
3448 #endif
3449 	}
3450 
3451 	early_hw_init(adapter, ai);
3452 
3453 	for_each_port(adapter, i) {
3454 		u8 hw_addr[6];
3455 		struct port_info *p = &adapter->port[i];
3456 
3457 		while (!adapter->params.vpd.port_type[j])
3458 			++j;
3459 
3460 		p->port_type = &port_types[adapter->params.vpd.port_type[j]];
3461 		p->port_type->phy_prep(&p->phy, adapter, ai->phy_base_addr + j,
3462 				       ai->mdio_ops);
3463 		mac_prep(&p->mac, adapter, j);
3464 		++j;
3465 
3466 		/*
3467 		 * The VPD EEPROM stores the base Ethernet address for the
3468 		 * card.  A port's address is derived from the base by adding
3469 		 * the port's index to the base's low octet.
3470 		 */
3471 		memcpy(hw_addr, adapter->params.vpd.eth_base, 5);
3472 		hw_addr[5] = adapter->params.vpd.eth_base[5] + i;
3473 
3474 		t3_os_set_hw_addr(adapter, i, hw_addr);
3475 		init_link_config(&p->link_config, p->port_type->caps);
3476 		p->phy.ops->power_down(&p->phy, 1);
3477 		if (!(p->port_type->caps & SUPPORTED_IRQ))
3478 			adapter->params.linkpoll_period = 10;
3479 	}
3480 
3481 	return 0;
3482 }
3483 
3484 void t3_led_ready(adapter_t *adapter)
3485 {
3486 	t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL,
3487 			 F_GPIO0_OUT_VAL);
3488 }
3489 
3490 void t3_port_failover(adapter_t *adapter, int port)
3491 {
3492 	u32 val;
3493 
3494 	val = port ? F_PORT1ACTIVE : F_PORT0ACTIVE;
3495 	t3_set_reg_field(adapter, A_MPS_CFG, F_PORT0ACTIVE | F_PORT1ACTIVE,
3496 			 val);
3497 }
3498 
3499 void t3_failover_done(adapter_t *adapter, int port)
3500 {
3501 	t3_set_reg_field(adapter, A_MPS_CFG, F_PORT0ACTIVE | F_PORT1ACTIVE,
3502 			 F_PORT0ACTIVE | F_PORT1ACTIVE);
3503 }
3504 
3505 void t3_failover_clear(adapter_t *adapter)
3506 {
3507 	t3_set_reg_field(adapter, A_MPS_CFG, F_PORT0ACTIVE | F_PORT1ACTIVE,
3508 			 F_PORT0ACTIVE | F_PORT1ACTIVE);
3509 }
3510