1 /**************************************************************************
2 SPDX-License-Identifier: BSD-2-Clause
3
4 Copyright (c) 2007-2009, Chelsio Inc.
5 All rights reserved.
6
7 Redistribution and use in source and binary forms, with or without
8 modification, are permitted provided that the following conditions are met:
9
10 1. Redistributions of source code must retain the above copyright notice,
11 this list of conditions and the following disclaimer.
12
13 2. Neither the name of the Chelsio Corporation nor the names of its
14 contributors may be used to endorse or promote products derived from
15 this software without specific prior written permission.
16
17 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
18 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
21 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
22 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
23 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
24 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
25 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
26 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
27 POSSIBILITY OF SUCH DAMAGE.
28
29 ***************************************************************************/
30
31 #include <sys/cdefs.h>
32 #include <cxgb_include.h>
33
34 #undef msleep
35 #define msleep t3_os_sleep
36
37 /**
38 * t3_wait_op_done_val - wait until an operation is completed
39 * @adapter: the adapter performing the operation
40 * @reg: the register to check for completion
41 * @mask: a single-bit field within @reg that indicates completion
42 * @polarity: the value of the field when the operation is completed
43 * @attempts: number of check iterations
44 * @delay: delay in usecs between iterations
45 * @valp: where to store the value of the register at completion time
46 *
47 * Wait until an operation is completed by checking a bit in a register
48 * up to @attempts times. If @valp is not NULL the value of the register
49 * at the time it indicated completion is stored there. Returns 0 if the
50 * operation completes and -EAGAIN otherwise.
51 */
t3_wait_op_done_val(adapter_t * adapter,int reg,u32 mask,int polarity,int attempts,int delay,u32 * valp)52 int t3_wait_op_done_val(adapter_t *adapter, int reg, u32 mask, int polarity,
53 int attempts, int delay, u32 *valp)
54 {
55 while (1) {
56 u32 val = t3_read_reg(adapter, reg);
57
58 if (!!(val & mask) == polarity) {
59 if (valp)
60 *valp = val;
61 return 0;
62 }
63 if (--attempts == 0)
64 return -EAGAIN;
65 if (delay)
66 udelay(delay);
67 }
68 }
69
70 /**
71 * t3_write_regs - write a bunch of registers
72 * @adapter: the adapter to program
73 * @p: an array of register address/register value pairs
74 * @n: the number of address/value pairs
75 * @offset: register address offset
76 *
77 * Takes an array of register address/register value pairs and writes each
78 * value to the corresponding register. Register addresses are adjusted
79 * by the supplied offset.
80 */
t3_write_regs(adapter_t * adapter,const struct addr_val_pair * p,int n,unsigned int offset)81 void t3_write_regs(adapter_t *adapter, const struct addr_val_pair *p, int n,
82 unsigned int offset)
83 {
84 while (n--) {
85 t3_write_reg(adapter, p->reg_addr + offset, p->val);
86 p++;
87 }
88 }
89
90 /**
91 * t3_set_reg_field - set a register field to a value
92 * @adapter: the adapter to program
93 * @addr: the register address
94 * @mask: specifies the portion of the register to modify
95 * @val: the new value for the register field
96 *
97 * Sets a register field specified by the supplied mask to the
98 * given value.
99 */
t3_set_reg_field(adapter_t * adapter,unsigned int addr,u32 mask,u32 val)100 void t3_set_reg_field(adapter_t *adapter, unsigned int addr, u32 mask, u32 val)
101 {
102 u32 v = t3_read_reg(adapter, addr) & ~mask;
103
104 t3_write_reg(adapter, addr, v | val);
105 (void) t3_read_reg(adapter, addr); /* flush */
106 }
107
108 /**
109 * t3_read_indirect - read indirectly addressed registers
110 * @adap: the adapter
111 * @addr_reg: register holding the indirect address
112 * @data_reg: register holding the value of the indirect register
113 * @vals: where the read register values are stored
114 * @start_idx: index of first indirect register to read
115 * @nregs: how many indirect registers to read
116 *
117 * Reads registers that are accessed indirectly through an address/data
118 * register pair.
119 */
t3_read_indirect(adapter_t * adap,unsigned int addr_reg,unsigned int data_reg,u32 * vals,unsigned int nregs,unsigned int start_idx)120 static void t3_read_indirect(adapter_t *adap, unsigned int addr_reg,
121 unsigned int data_reg, u32 *vals, unsigned int nregs,
122 unsigned int start_idx)
123 {
124 while (nregs--) {
125 t3_write_reg(adap, addr_reg, start_idx);
126 *vals++ = t3_read_reg(adap, data_reg);
127 start_idx++;
128 }
129 }
130
131 /**
132 * t3_mc7_bd_read - read from MC7 through backdoor accesses
133 * @mc7: identifies MC7 to read from
134 * @start: index of first 64-bit word to read
135 * @n: number of 64-bit words to read
136 * @buf: where to store the read result
137 *
138 * Read n 64-bit words from MC7 starting at word start, using backdoor
139 * accesses.
140 */
t3_mc7_bd_read(struct mc7 * mc7,unsigned int start,unsigned int n,u64 * buf)141 int t3_mc7_bd_read(struct mc7 *mc7, unsigned int start, unsigned int n,
142 u64 *buf)
143 {
144 static int shift[] = { 0, 0, 16, 24 };
145 static int step[] = { 0, 32, 16, 8 };
146
147 unsigned int size64 = mc7->size / 8; /* # of 64-bit words */
148 adapter_t *adap = mc7->adapter;
149
150 if (start >= size64 || start + n > size64)
151 return -EINVAL;
152
153 start *= (8 << mc7->width);
154 while (n--) {
155 int i;
156 u64 val64 = 0;
157
158 for (i = (1 << mc7->width) - 1; i >= 0; --i) {
159 int attempts = 10;
160 u32 val;
161
162 t3_write_reg(adap, mc7->offset + A_MC7_BD_ADDR,
163 start);
164 t3_write_reg(adap, mc7->offset + A_MC7_BD_OP, 0);
165 val = t3_read_reg(adap, mc7->offset + A_MC7_BD_OP);
166 while ((val & F_BUSY) && attempts--)
167 val = t3_read_reg(adap,
168 mc7->offset + A_MC7_BD_OP);
169 if (val & F_BUSY)
170 return -EIO;
171
172 val = t3_read_reg(adap, mc7->offset + A_MC7_BD_DATA1);
173 if (mc7->width == 0) {
174 val64 = t3_read_reg(adap,
175 mc7->offset + A_MC7_BD_DATA0);
176 val64 |= (u64)val << 32;
177 } else {
178 if (mc7->width > 1)
179 val >>= shift[mc7->width];
180 val64 |= (u64)val << (step[mc7->width] * i);
181 }
182 start += 8;
183 }
184 *buf++ = val64;
185 }
186 return 0;
187 }
188
189 /*
190 * Low-level I2C read and write routines. These simply read and write a
191 * single byte with the option of indicating a "continue" if another operation
192 * is to be chained. Generally most code will use higher-level routines to
193 * read and write to I2C Slave Devices.
194 */
195 #define I2C_ATTEMPTS 100
196
197 /*
198 * Read an 8-bit value from the I2C bus. If the "chained" parameter is
199 * non-zero then a STOP bit will not be written after the read command. On
200 * error (the read timed out, etc.), a negative errno will be returned (e.g.
201 * -EAGAIN, etc.). On success, the 8-bit value read from the I2C bus is
202 * stored into the buffer *valp and the value of the I2C ACK bit is returned
203 * as a 0/1 value.
204 */
t3_i2c_read8(adapter_t * adapter,int chained,u8 * valp)205 int t3_i2c_read8(adapter_t *adapter, int chained, u8 *valp)
206 {
207 int ret;
208 u32 opval;
209 MDIO_LOCK(adapter);
210 t3_write_reg(adapter, A_I2C_OP,
211 F_I2C_READ | (chained ? F_I2C_CONT : 0));
212 ret = t3_wait_op_done_val(adapter, A_I2C_OP, F_I2C_BUSY, 0,
213 I2C_ATTEMPTS, 10, &opval);
214 if (ret >= 0) {
215 ret = ((opval & F_I2C_ACK) == F_I2C_ACK);
216 *valp = G_I2C_DATA(t3_read_reg(adapter, A_I2C_DATA));
217 }
218 MDIO_UNLOCK(adapter);
219 return ret;
220 }
221
222 /*
223 * Write an 8-bit value to the I2C bus. If the "chained" parameter is
224 * non-zero, then a STOP bit will not be written after the write command. On
225 * error (the write timed out, etc.), a negative errno will be returned (e.g.
226 * -EAGAIN, etc.). On success, the value of the I2C ACK bit is returned as a
227 * 0/1 value.
228 */
t3_i2c_write8(adapter_t * adapter,int chained,u8 val)229 int t3_i2c_write8(adapter_t *adapter, int chained, u8 val)
230 {
231 int ret;
232 u32 opval;
233 MDIO_LOCK(adapter);
234 t3_write_reg(adapter, A_I2C_DATA, V_I2C_DATA(val));
235 t3_write_reg(adapter, A_I2C_OP,
236 F_I2C_WRITE | (chained ? F_I2C_CONT : 0));
237 ret = t3_wait_op_done_val(adapter, A_I2C_OP, F_I2C_BUSY, 0,
238 I2C_ATTEMPTS, 10, &opval);
239 if (ret >= 0)
240 ret = ((opval & F_I2C_ACK) == F_I2C_ACK);
241 MDIO_UNLOCK(adapter);
242 return ret;
243 }
244
245 /*
246 * Initialize MI1.
247 */
mi1_init(adapter_t * adap,const struct adapter_info * ai)248 static void mi1_init(adapter_t *adap, const struct adapter_info *ai)
249 {
250 u32 clkdiv = adap->params.vpd.cclk / (2 * adap->params.vpd.mdc) - 1;
251 u32 val = F_PREEN | V_CLKDIV(clkdiv);
252
253 t3_write_reg(adap, A_MI1_CFG, val);
254 }
255
256 #define MDIO_ATTEMPTS 20
257
258 /*
259 * MI1 read/write operations for clause 22 PHYs.
260 */
t3_mi1_read(adapter_t * adapter,int phy_addr,int mmd_addr,int reg_addr,unsigned int * valp)261 int t3_mi1_read(adapter_t *adapter, int phy_addr, int mmd_addr,
262 int reg_addr, unsigned int *valp)
263 {
264 int ret;
265 u32 addr = V_REGADDR(reg_addr) | V_PHYADDR(phy_addr);
266
267 if (mmd_addr)
268 return -EINVAL;
269
270 MDIO_LOCK(adapter);
271 t3_set_reg_field(adapter, A_MI1_CFG, V_ST(M_ST), V_ST(1));
272 t3_write_reg(adapter, A_MI1_ADDR, addr);
273 t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(2));
274 ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0, MDIO_ATTEMPTS, 10);
275 if (!ret)
276 *valp = t3_read_reg(adapter, A_MI1_DATA);
277 MDIO_UNLOCK(adapter);
278 return ret;
279 }
280
t3_mi1_write(adapter_t * adapter,int phy_addr,int mmd_addr,int reg_addr,unsigned int val)281 int t3_mi1_write(adapter_t *adapter, int phy_addr, int mmd_addr,
282 int reg_addr, unsigned int val)
283 {
284 int ret;
285 u32 addr = V_REGADDR(reg_addr) | V_PHYADDR(phy_addr);
286
287 if (mmd_addr)
288 return -EINVAL;
289
290 MDIO_LOCK(adapter);
291 t3_set_reg_field(adapter, A_MI1_CFG, V_ST(M_ST), V_ST(1));
292 t3_write_reg(adapter, A_MI1_ADDR, addr);
293 t3_write_reg(adapter, A_MI1_DATA, val);
294 t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(1));
295 ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0, MDIO_ATTEMPTS, 10);
296 MDIO_UNLOCK(adapter);
297 return ret;
298 }
299
300 static struct mdio_ops mi1_mdio_ops = {
301 t3_mi1_read,
302 t3_mi1_write
303 };
304
305 /*
306 * MI1 read/write operations for clause 45 PHYs.
307 */
mi1_ext_read(adapter_t * adapter,int phy_addr,int mmd_addr,int reg_addr,unsigned int * valp)308 static int mi1_ext_read(adapter_t *adapter, int phy_addr, int mmd_addr,
309 int reg_addr, unsigned int *valp)
310 {
311 int ret;
312 u32 addr = V_REGADDR(mmd_addr) | V_PHYADDR(phy_addr);
313
314 MDIO_LOCK(adapter);
315 t3_set_reg_field(adapter, A_MI1_CFG, V_ST(M_ST), 0);
316 t3_write_reg(adapter, A_MI1_ADDR, addr);
317 t3_write_reg(adapter, A_MI1_DATA, reg_addr);
318 t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(0));
319 ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0, MDIO_ATTEMPTS, 10);
320 if (!ret) {
321 t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(3));
322 ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0,
323 MDIO_ATTEMPTS, 10);
324 if (!ret)
325 *valp = t3_read_reg(adapter, A_MI1_DATA);
326 }
327 MDIO_UNLOCK(adapter);
328 return ret;
329 }
330
mi1_ext_write(adapter_t * adapter,int phy_addr,int mmd_addr,int reg_addr,unsigned int val)331 static int mi1_ext_write(adapter_t *adapter, int phy_addr, int mmd_addr,
332 int reg_addr, unsigned int val)
333 {
334 int ret;
335 u32 addr = V_REGADDR(mmd_addr) | V_PHYADDR(phy_addr);
336
337 MDIO_LOCK(adapter);
338 t3_set_reg_field(adapter, A_MI1_CFG, V_ST(M_ST), 0);
339 t3_write_reg(adapter, A_MI1_ADDR, addr);
340 t3_write_reg(adapter, A_MI1_DATA, reg_addr);
341 t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(0));
342 ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0, MDIO_ATTEMPTS, 10);
343 if (!ret) {
344 t3_write_reg(adapter, A_MI1_DATA, val);
345 t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(1));
346 ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0,
347 MDIO_ATTEMPTS, 10);
348 }
349 MDIO_UNLOCK(adapter);
350 return ret;
351 }
352
353 static struct mdio_ops mi1_mdio_ext_ops = {
354 mi1_ext_read,
355 mi1_ext_write
356 };
357
358 /**
359 * t3_mdio_change_bits - modify the value of a PHY register
360 * @phy: the PHY to operate on
361 * @mmd: the device address
362 * @reg: the register address
363 * @clear: what part of the register value to mask off
364 * @set: what part of the register value to set
365 *
366 * Changes the value of a PHY register by applying a mask to its current
367 * value and ORing the result with a new value.
368 */
t3_mdio_change_bits(struct cphy * phy,int mmd,int reg,unsigned int clear,unsigned int set)369 int t3_mdio_change_bits(struct cphy *phy, int mmd, int reg, unsigned int clear,
370 unsigned int set)
371 {
372 int ret;
373 unsigned int val;
374
375 ret = mdio_read(phy, mmd, reg, &val);
376 if (!ret) {
377 val &= ~clear;
378 ret = mdio_write(phy, mmd, reg, val | set);
379 }
380 return ret;
381 }
382
383 /**
384 * t3_phy_reset - reset a PHY block
385 * @phy: the PHY to operate on
386 * @mmd: the device address of the PHY block to reset
387 * @wait: how long to wait for the reset to complete in 1ms increments
388 *
389 * Resets a PHY block and optionally waits for the reset to complete.
390 * @mmd should be 0 for 10/100/1000 PHYs and the device address to reset
391 * for 10G PHYs.
392 */
t3_phy_reset(struct cphy * phy,int mmd,int wait)393 int t3_phy_reset(struct cphy *phy, int mmd, int wait)
394 {
395 int err;
396 unsigned int ctl;
397
398 err = t3_mdio_change_bits(phy, mmd, MII_BMCR, BMCR_PDOWN, BMCR_RESET);
399 if (err || !wait)
400 return err;
401
402 do {
403 err = mdio_read(phy, mmd, MII_BMCR, &ctl);
404 if (err)
405 return err;
406 ctl &= BMCR_RESET;
407 if (ctl)
408 msleep(1);
409 } while (ctl && --wait);
410
411 return ctl ? -1 : 0;
412 }
413
414 /**
415 * t3_phy_advertise - set the PHY advertisement registers for autoneg
416 * @phy: the PHY to operate on
417 * @advert: bitmap of capabilities the PHY should advertise
418 *
419 * Sets a 10/100/1000 PHY's advertisement registers to advertise the
420 * requested capabilities.
421 */
t3_phy_advertise(struct cphy * phy,unsigned int advert)422 int t3_phy_advertise(struct cphy *phy, unsigned int advert)
423 {
424 int err;
425 unsigned int val = 0;
426
427 err = mdio_read(phy, 0, MII_CTRL1000, &val);
428 if (err)
429 return err;
430
431 val &= ~(ADVERTISE_1000HALF | ADVERTISE_1000FULL);
432 if (advert & ADVERTISED_1000baseT_Half)
433 val |= ADVERTISE_1000HALF;
434 if (advert & ADVERTISED_1000baseT_Full)
435 val |= ADVERTISE_1000FULL;
436
437 err = mdio_write(phy, 0, MII_CTRL1000, val);
438 if (err)
439 return err;
440
441 val = 1;
442 if (advert & ADVERTISED_10baseT_Half)
443 val |= ADVERTISE_10HALF;
444 if (advert & ADVERTISED_10baseT_Full)
445 val |= ADVERTISE_10FULL;
446 if (advert & ADVERTISED_100baseT_Half)
447 val |= ADVERTISE_100HALF;
448 if (advert & ADVERTISED_100baseT_Full)
449 val |= ADVERTISE_100FULL;
450 if (advert & ADVERTISED_Pause)
451 val |= ADVERTISE_PAUSE_CAP;
452 if (advert & ADVERTISED_Asym_Pause)
453 val |= ADVERTISE_PAUSE_ASYM;
454 return mdio_write(phy, 0, MII_ADVERTISE, val);
455 }
456
457 /**
458 * t3_phy_advertise_fiber - set fiber PHY advertisement register
459 * @phy: the PHY to operate on
460 * @advert: bitmap of capabilities the PHY should advertise
461 *
462 * Sets a fiber PHY's advertisement register to advertise the
463 * requested capabilities.
464 */
t3_phy_advertise_fiber(struct cphy * phy,unsigned int advert)465 int t3_phy_advertise_fiber(struct cphy *phy, unsigned int advert)
466 {
467 unsigned int val = 0;
468
469 if (advert & ADVERTISED_1000baseT_Half)
470 val |= ADVERTISE_1000XHALF;
471 if (advert & ADVERTISED_1000baseT_Full)
472 val |= ADVERTISE_1000XFULL;
473 if (advert & ADVERTISED_Pause)
474 val |= ADVERTISE_1000XPAUSE;
475 if (advert & ADVERTISED_Asym_Pause)
476 val |= ADVERTISE_1000XPSE_ASYM;
477 return mdio_write(phy, 0, MII_ADVERTISE, val);
478 }
479
480 /**
481 * t3_set_phy_speed_duplex - force PHY speed and duplex
482 * @phy: the PHY to operate on
483 * @speed: requested PHY speed
484 * @duplex: requested PHY duplex
485 *
486 * Force a 10/100/1000 PHY's speed and duplex. This also disables
487 * auto-negotiation except for GigE, where auto-negotiation is mandatory.
488 */
t3_set_phy_speed_duplex(struct cphy * phy,int speed,int duplex)489 int t3_set_phy_speed_duplex(struct cphy *phy, int speed, int duplex)
490 {
491 int err;
492 unsigned int ctl;
493
494 err = mdio_read(phy, 0, MII_BMCR, &ctl);
495 if (err)
496 return err;
497
498 if (speed >= 0) {
499 ctl &= ~(BMCR_SPEED100 | BMCR_SPEED1000 | BMCR_ANENABLE);
500 if (speed == SPEED_100)
501 ctl |= BMCR_SPEED100;
502 else if (speed == SPEED_1000)
503 ctl |= BMCR_SPEED1000;
504 }
505 if (duplex >= 0) {
506 ctl &= ~(BMCR_FULLDPLX | BMCR_ANENABLE);
507 if (duplex == DUPLEX_FULL)
508 ctl |= BMCR_FULLDPLX;
509 }
510 if (ctl & BMCR_SPEED1000) /* auto-negotiation required for GigE */
511 ctl |= BMCR_ANENABLE;
512 return mdio_write(phy, 0, MII_BMCR, ctl);
513 }
514
t3_phy_lasi_intr_enable(struct cphy * phy)515 int t3_phy_lasi_intr_enable(struct cphy *phy)
516 {
517 return mdio_write(phy, MDIO_DEV_PMA_PMD, LASI_CTRL, 1);
518 }
519
t3_phy_lasi_intr_disable(struct cphy * phy)520 int t3_phy_lasi_intr_disable(struct cphy *phy)
521 {
522 return mdio_write(phy, MDIO_DEV_PMA_PMD, LASI_CTRL, 0);
523 }
524
t3_phy_lasi_intr_clear(struct cphy * phy)525 int t3_phy_lasi_intr_clear(struct cphy *phy)
526 {
527 u32 val;
528
529 return mdio_read(phy, MDIO_DEV_PMA_PMD, LASI_STAT, &val);
530 }
531
t3_phy_lasi_intr_handler(struct cphy * phy)532 int t3_phy_lasi_intr_handler(struct cphy *phy)
533 {
534 unsigned int status;
535 int err = mdio_read(phy, MDIO_DEV_PMA_PMD, LASI_STAT, &status);
536
537 if (err)
538 return err;
539 return (status & 1) ? cphy_cause_link_change : 0;
540 }
541
542 static struct adapter_info t3_adap_info[] = {
543 { 1, 1, 0,
544 F_GPIO2_OEN | F_GPIO4_OEN |
545 F_GPIO2_OUT_VAL | F_GPIO4_OUT_VAL, { S_GPIO3, S_GPIO5 }, 0,
546 &mi1_mdio_ops, "Chelsio PE9000" },
547 { 1, 1, 0,
548 F_GPIO2_OEN | F_GPIO4_OEN |
549 F_GPIO2_OUT_VAL | F_GPIO4_OUT_VAL, { S_GPIO3, S_GPIO5 }, 0,
550 &mi1_mdio_ops, "Chelsio T302" },
551 { 1, 0, 0,
552 F_GPIO1_OEN | F_GPIO6_OEN | F_GPIO7_OEN | F_GPIO10_OEN |
553 F_GPIO11_OEN | F_GPIO1_OUT_VAL | F_GPIO6_OUT_VAL | F_GPIO10_OUT_VAL,
554 { 0 }, SUPPORTED_10000baseT_Full | SUPPORTED_AUI,
555 &mi1_mdio_ext_ops, "Chelsio T310" },
556 { 1, 1, 0,
557 F_GPIO1_OEN | F_GPIO2_OEN | F_GPIO4_OEN | F_GPIO5_OEN | F_GPIO6_OEN |
558 F_GPIO7_OEN | F_GPIO10_OEN | F_GPIO11_OEN | F_GPIO1_OUT_VAL |
559 F_GPIO5_OUT_VAL | F_GPIO6_OUT_VAL | F_GPIO10_OUT_VAL,
560 { S_GPIO9, S_GPIO3 }, SUPPORTED_10000baseT_Full | SUPPORTED_AUI,
561 &mi1_mdio_ext_ops, "Chelsio T320" },
562 { 4, 0, 0,
563 F_GPIO5_OEN | F_GPIO6_OEN | F_GPIO7_OEN | F_GPIO5_OUT_VAL |
564 F_GPIO6_OUT_VAL | F_GPIO7_OUT_VAL,
565 { S_GPIO1, S_GPIO2, S_GPIO3, S_GPIO4 }, SUPPORTED_AUI,
566 &mi1_mdio_ops, "Chelsio T304" },
567 { 0 },
568 { 1, 0, 0,
569 F_GPIO1_OEN | F_GPIO2_OEN | F_GPIO4_OEN | F_GPIO6_OEN | F_GPIO7_OEN |
570 F_GPIO10_OEN | F_GPIO1_OUT_VAL | F_GPIO6_OUT_VAL | F_GPIO10_OUT_VAL,
571 { S_GPIO9 }, SUPPORTED_10000baseT_Full | SUPPORTED_AUI,
572 &mi1_mdio_ext_ops, "Chelsio T310" },
573 { 1, 0, 0,
574 F_GPIO1_OEN | F_GPIO6_OEN | F_GPIO7_OEN |
575 F_GPIO1_OUT_VAL | F_GPIO6_OUT_VAL,
576 { S_GPIO9 }, SUPPORTED_10000baseT_Full | SUPPORTED_AUI,
577 &mi1_mdio_ext_ops, "Chelsio N320E-G2" },
578 };
579
580 /*
581 * Return the adapter_info structure with a given index. Out-of-range indices
582 * return NULL.
583 */
t3_get_adapter_info(unsigned int id)584 const struct adapter_info *t3_get_adapter_info(unsigned int id)
585 {
586 return id < ARRAY_SIZE(t3_adap_info) ? &t3_adap_info[id] : NULL;
587 }
588
589 struct port_type_info {
590 int (*phy_prep)(pinfo_t *pinfo, int phy_addr,
591 const struct mdio_ops *ops);
592 };
593
594 static struct port_type_info port_types[] = {
595 { NULL },
596 { t3_ael1002_phy_prep },
597 { t3_vsc8211_phy_prep },
598 { t3_mv88e1xxx_phy_prep },
599 { t3_xaui_direct_phy_prep },
600 { t3_ael2005_phy_prep },
601 { t3_qt2045_phy_prep },
602 { t3_ael1006_phy_prep },
603 { t3_tn1010_phy_prep },
604 { t3_aq100x_phy_prep },
605 { t3_ael2020_phy_prep },
606 };
607
608 #define VPD_ENTRY(name, len) \
609 u8 name##_kword[2]; u8 name##_len; u8 name##_data[len]
610
611 /*
612 * Partial EEPROM Vital Product Data structure. Includes only the ID and
613 * VPD-R sections.
614 */
615 struct t3_vpd {
616 u8 id_tag;
617 u8 id_len[2];
618 u8 id_data[16];
619 u8 vpdr_tag;
620 u8 vpdr_len[2];
621 VPD_ENTRY(pn, 16); /* part number */
622 VPD_ENTRY(ec, ECNUM_LEN); /* EC level */
623 VPD_ENTRY(sn, SERNUM_LEN); /* serial number */
624 VPD_ENTRY(na, 12); /* MAC address base */
625 VPD_ENTRY(cclk, 6); /* core clock */
626 VPD_ENTRY(mclk, 6); /* mem clock */
627 VPD_ENTRY(uclk, 6); /* uP clk */
628 VPD_ENTRY(mdc, 6); /* MDIO clk */
629 VPD_ENTRY(mt, 2); /* mem timing */
630 VPD_ENTRY(xaui0cfg, 6); /* XAUI0 config */
631 VPD_ENTRY(xaui1cfg, 6); /* XAUI1 config */
632 VPD_ENTRY(port0, 2); /* PHY0 complex */
633 VPD_ENTRY(port1, 2); /* PHY1 complex */
634 VPD_ENTRY(port2, 2); /* PHY2 complex */
635 VPD_ENTRY(port3, 2); /* PHY3 complex */
636 VPD_ENTRY(rv, 1); /* csum */
637 u32 pad; /* for multiple-of-4 sizing and alignment */
638 };
639
640 #define EEPROM_MAX_POLL 40
641 #define EEPROM_STAT_ADDR 0x4000
642 #define VPD_BASE 0xc00
643
644 /**
645 * t3_seeprom_read - read a VPD EEPROM location
646 * @adapter: adapter to read
647 * @addr: EEPROM address
648 * @data: where to store the read data
649 *
650 * Read a 32-bit word from a location in VPD EEPROM using the card's PCI
651 * VPD ROM capability. A zero is written to the flag bit when the
652 * address is written to the control register. The hardware device will
653 * set the flag to 1 when 4 bytes have been read into the data register.
654 */
t3_seeprom_read(adapter_t * adapter,u32 addr,u32 * data)655 int t3_seeprom_read(adapter_t *adapter, u32 addr, u32 *data)
656 {
657 u16 val;
658 int attempts = EEPROM_MAX_POLL;
659 unsigned int base = adapter->params.pci.vpd_cap_addr;
660
661 if ((addr >= EEPROMSIZE && addr != EEPROM_STAT_ADDR) || (addr & 3))
662 return -EINVAL;
663
664 t3_os_pci_write_config_2(adapter, base + PCI_VPD_ADDR, (u16)addr);
665 do {
666 udelay(10);
667 t3_os_pci_read_config_2(adapter, base + PCI_VPD_ADDR, &val);
668 } while (!(val & PCI_VPD_ADDR_F) && --attempts);
669
670 if (!(val & PCI_VPD_ADDR_F)) {
671 CH_ERR(adapter, "reading EEPROM address 0x%x failed\n", addr);
672 return -EIO;
673 }
674 t3_os_pci_read_config_4(adapter, base + PCI_VPD_DATA, data);
675 *data = le32_to_cpu(*data);
676 return 0;
677 }
678
679 /**
680 * t3_seeprom_write - write a VPD EEPROM location
681 * @adapter: adapter to write
682 * @addr: EEPROM address
683 * @data: value to write
684 *
685 * Write a 32-bit word to a location in VPD EEPROM using the card's PCI
686 * VPD ROM capability.
687 */
t3_seeprom_write(adapter_t * adapter,u32 addr,u32 data)688 int t3_seeprom_write(adapter_t *adapter, u32 addr, u32 data)
689 {
690 u16 val;
691 int attempts = EEPROM_MAX_POLL;
692 unsigned int base = adapter->params.pci.vpd_cap_addr;
693
694 if ((addr >= EEPROMSIZE && addr != EEPROM_STAT_ADDR) || (addr & 3))
695 return -EINVAL;
696
697 t3_os_pci_write_config_4(adapter, base + PCI_VPD_DATA,
698 cpu_to_le32(data));
699 t3_os_pci_write_config_2(adapter, base + PCI_VPD_ADDR,
700 (u16)addr | PCI_VPD_ADDR_F);
701 do {
702 msleep(1);
703 t3_os_pci_read_config_2(adapter, base + PCI_VPD_ADDR, &val);
704 } while ((val & PCI_VPD_ADDR_F) && --attempts);
705
706 if (val & PCI_VPD_ADDR_F) {
707 CH_ERR(adapter, "write to EEPROM address 0x%x failed\n", addr);
708 return -EIO;
709 }
710 return 0;
711 }
712
713 /**
714 * t3_seeprom_wp - enable/disable EEPROM write protection
715 * @adapter: the adapter
716 * @enable: 1 to enable write protection, 0 to disable it
717 *
718 * Enables or disables write protection on the serial EEPROM.
719 */
t3_seeprom_wp(adapter_t * adapter,int enable)720 int t3_seeprom_wp(adapter_t *adapter, int enable)
721 {
722 return t3_seeprom_write(adapter, EEPROM_STAT_ADDR, enable ? 0xc : 0);
723 }
724
725 /*
726 * Convert a character holding a hex digit to a number.
727 */
hex2int(unsigned char c)728 static unsigned int hex2int(unsigned char c)
729 {
730 return isdigit(c) ? c - '0' : toupper(c) - 'A' + 10;
731 }
732
733 /**
734 * get_desc_len - get the length of a vpd descriptor.
735 * @adapter: the adapter
736 * @offset: first byte offset of the vpd descriptor
737 *
738 * Retrieves the length of the small/large resource
739 * data type starting at offset.
740 */
get_desc_len(adapter_t * adapter,u32 offset)741 static int get_desc_len(adapter_t *adapter, u32 offset)
742 {
743 u32 read_offset, tmp, shift, len = 0;
744 u8 tag, buf[8];
745 int ret;
746
747 read_offset = offset & 0xfffffffc;
748 shift = offset & 0x03;
749
750 ret = t3_seeprom_read(adapter, read_offset, &tmp);
751 if (ret < 0)
752 return ret;
753
754 *((u32 *)buf) = cpu_to_le32(tmp);
755
756 tag = buf[shift];
757 if (tag & 0x80) {
758 ret = t3_seeprom_read(adapter, read_offset + 4, &tmp);
759 if (ret < 0)
760 return ret;
761
762 *((u32 *)(&buf[4])) = cpu_to_le32(tmp);
763 len = (buf[shift + 1] & 0xff) +
764 ((buf[shift+2] << 8) & 0xff00) + 3;
765 } else
766 len = (tag & 0x07) + 1;
767
768 return len;
769 }
770
771 /**
772 * is_end_tag - Check if a vpd tag is the end tag.
773 * @adapter: the adapter
774 * @offset: first byte offset of the tag
775 *
776 * Checks if the tag located at offset is the end tag.
777 */
is_end_tag(adapter_t * adapter,u32 offset)778 static int is_end_tag(adapter_t * adapter, u32 offset)
779 {
780 u32 read_offset, shift, ret, tmp;
781 u8 buf[4];
782
783 read_offset = offset & 0xfffffffc;
784 shift = offset & 0x03;
785
786 ret = t3_seeprom_read(adapter, read_offset, &tmp);
787 if (ret)
788 return ret;
789 *((u32 *)buf) = cpu_to_le32(tmp);
790
791 if (buf[shift] == 0x78)
792 return 1;
793 else
794 return 0;
795 }
796
797 /**
798 * t3_get_vpd_len - computes the length of a vpd structure
799 * @adapter: the adapter
800 * @vpd: contains the offset of first byte of vpd
801 *
802 * Computes the lentgh of the vpd structure starting at vpd->offset.
803 */
804
t3_get_vpd_len(adapter_t * adapter,struct generic_vpd * vpd)805 int t3_get_vpd_len(adapter_t * adapter, struct generic_vpd *vpd)
806 {
807 u32 len=0, offset;
808 int inc, ret;
809
810 offset = vpd->offset;
811
812 while (offset < (vpd->offset + MAX_VPD_BYTES)) {
813 ret = is_end_tag(adapter, offset);
814 if (ret < 0)
815 return ret;
816 else if (ret == 1)
817 break;
818
819 inc = get_desc_len(adapter, offset);
820 if (inc < 0)
821 return inc;
822 len += inc;
823 offset += inc;
824 }
825 return (len + 1);
826 }
827
828 /**
829 * t3_read_vpd - reads the stream of bytes containing a vpd structure
830 * @adapter: the adapter
831 * @vpd: contains a buffer that would hold the stream of bytes
832 *
833 * Reads the vpd structure starting at vpd->offset into vpd->data,
834 * the length of the byte stream to read is vpd->len.
835 */
836
t3_read_vpd(adapter_t * adapter,struct generic_vpd * vpd)837 int t3_read_vpd(adapter_t *adapter, struct generic_vpd *vpd)
838 {
839 u32 i, ret;
840
841 for (i = 0; i < vpd->len; i += 4) {
842 ret = t3_seeprom_read(adapter, vpd->offset + i,
843 (u32 *) &(vpd->data[i]));
844 if (ret)
845 return ret;
846 }
847
848 return 0;
849 }
850
851
852 /**
853 * get_vpd_params - read VPD parameters from VPD EEPROM
854 * @adapter: adapter to read
855 * @p: where to store the parameters
856 *
857 * Reads card parameters stored in VPD EEPROM.
858 */
get_vpd_params(adapter_t * adapter,struct vpd_params * p)859 static int get_vpd_params(adapter_t *adapter, struct vpd_params *p)
860 {
861 int i, addr, ret;
862 struct t3_vpd vpd;
863
864 /*
865 * Card information is normally at VPD_BASE but some early cards had
866 * it at 0.
867 */
868 ret = t3_seeprom_read(adapter, VPD_BASE, (u32 *)&vpd);
869 if (ret)
870 return ret;
871 addr = vpd.id_tag == 0x82 ? VPD_BASE : 0;
872
873 for (i = 0; i < sizeof(vpd); i += 4) {
874 ret = t3_seeprom_read(adapter, addr + i,
875 (u32 *)((u8 *)&vpd + i));
876 if (ret)
877 return ret;
878 }
879
880 p->cclk = simple_strtoul(vpd.cclk_data, NULL, 10);
881 p->mclk = simple_strtoul(vpd.mclk_data, NULL, 10);
882 p->uclk = simple_strtoul(vpd.uclk_data, NULL, 10);
883 p->mdc = simple_strtoul(vpd.mdc_data, NULL, 10);
884 p->mem_timing = simple_strtoul(vpd.mt_data, NULL, 10);
885 memcpy(p->sn, vpd.sn_data, SERNUM_LEN);
886 memcpy(p->ec, vpd.ec_data, ECNUM_LEN);
887
888 /* Old eeproms didn't have port information */
889 if (adapter->params.rev == 0 && !vpd.port0_data[0]) {
890 p->port_type[0] = uses_xaui(adapter) ? 1 : 2;
891 p->port_type[1] = uses_xaui(adapter) ? 6 : 2;
892 } else {
893 p->port_type[0] = (u8)hex2int(vpd.port0_data[0]);
894 p->port_type[1] = (u8)hex2int(vpd.port1_data[0]);
895 p->port_type[2] = (u8)hex2int(vpd.port2_data[0]);
896 p->port_type[3] = (u8)hex2int(vpd.port3_data[0]);
897 p->xauicfg[0] = simple_strtoul(vpd.xaui0cfg_data, NULL, 16);
898 p->xauicfg[1] = simple_strtoul(vpd.xaui1cfg_data, NULL, 16);
899 }
900
901 for (i = 0; i < 6; i++)
902 p->eth_base[i] = hex2int(vpd.na_data[2 * i]) * 16 +
903 hex2int(vpd.na_data[2 * i + 1]);
904 return 0;
905 }
906
907 /* BIOS boot header */
908 typedef struct boot_header_s {
909 u8 signature[2]; /* signature */
910 u8 length; /* image length (include header) */
911 u8 offset[4]; /* initialization vector */
912 u8 reserved[19]; /* reserved */
913 u8 exheader[2]; /* offset to expansion header */
914 } boot_header_t;
915
916 /* serial flash and firmware constants */
917 enum {
918 SF_ATTEMPTS = 5, /* max retries for SF1 operations */
919 SF_SEC_SIZE = 64 * 1024, /* serial flash sector size */
920 SF_SIZE = SF_SEC_SIZE * 8, /* serial flash size */
921
922 /* flash command opcodes */
923 SF_PROG_PAGE = 2, /* program page */
924 SF_WR_DISABLE = 4, /* disable writes */
925 SF_RD_STATUS = 5, /* read status register */
926 SF_WR_ENABLE = 6, /* enable writes */
927 SF_RD_DATA_FAST = 0xb, /* read flash */
928 SF_ERASE_SECTOR = 0xd8, /* erase sector */
929
930 FW_FLASH_BOOT_ADDR = 0x70000, /* start address of FW in flash */
931 FW_VERS_ADDR = 0x7fffc, /* flash address holding FW version */
932 FW_VERS_ADDR_PRE8 = 0x77ffc,/* flash address holding FW version pre8 */
933 FW_MIN_SIZE = 8, /* at least version and csum */
934 FW_MAX_SIZE = FW_VERS_ADDR - FW_FLASH_BOOT_ADDR,
935 FW_MAX_SIZE_PRE8 = FW_VERS_ADDR_PRE8 - FW_FLASH_BOOT_ADDR,
936
937 BOOT_FLASH_BOOT_ADDR = 0x0,/* start address of boot image in flash */
938 BOOT_SIGNATURE = 0xaa55, /* signature of BIOS boot ROM */
939 BOOT_SIZE_INC = 512, /* image size measured in 512B chunks */
940 BOOT_MIN_SIZE = sizeof(boot_header_t), /* at least basic header */
941 BOOT_MAX_SIZE = 1024*BOOT_SIZE_INC /* 1 byte * length increment */
942 };
943
944 /**
945 * sf1_read - read data from the serial flash
946 * @adapter: the adapter
947 * @byte_cnt: number of bytes to read
948 * @cont: whether another operation will be chained
949 * @valp: where to store the read data
950 *
951 * Reads up to 4 bytes of data from the serial flash. The location of
952 * the read needs to be specified prior to calling this by issuing the
953 * appropriate commands to the serial flash.
954 */
sf1_read(adapter_t * adapter,unsigned int byte_cnt,int cont,u32 * valp)955 static int sf1_read(adapter_t *adapter, unsigned int byte_cnt, int cont,
956 u32 *valp)
957 {
958 int ret;
959
960 if (!byte_cnt || byte_cnt > 4)
961 return -EINVAL;
962 if (t3_read_reg(adapter, A_SF_OP) & F_BUSY)
963 return -EBUSY;
964 t3_write_reg(adapter, A_SF_OP, V_CONT(cont) | V_BYTECNT(byte_cnt - 1));
965 ret = t3_wait_op_done(adapter, A_SF_OP, F_BUSY, 0, SF_ATTEMPTS, 10);
966 if (!ret)
967 *valp = t3_read_reg(adapter, A_SF_DATA);
968 return ret;
969 }
970
971 /**
972 * sf1_write - write data to the serial flash
973 * @adapter: the adapter
974 * @byte_cnt: number of bytes to write
975 * @cont: whether another operation will be chained
976 * @val: value to write
977 *
978 * Writes up to 4 bytes of data to the serial flash. The location of
979 * the write needs to be specified prior to calling this by issuing the
980 * appropriate commands to the serial flash.
981 */
sf1_write(adapter_t * adapter,unsigned int byte_cnt,int cont,u32 val)982 static int sf1_write(adapter_t *adapter, unsigned int byte_cnt, int cont,
983 u32 val)
984 {
985 if (!byte_cnt || byte_cnt > 4)
986 return -EINVAL;
987 if (t3_read_reg(adapter, A_SF_OP) & F_BUSY)
988 return -EBUSY;
989 t3_write_reg(adapter, A_SF_DATA, val);
990 t3_write_reg(adapter, A_SF_OP,
991 V_CONT(cont) | V_BYTECNT(byte_cnt - 1) | V_OP(1));
992 return t3_wait_op_done(adapter, A_SF_OP, F_BUSY, 0, SF_ATTEMPTS, 10);
993 }
994
995 /**
996 * flash_wait_op - wait for a flash operation to complete
997 * @adapter: the adapter
998 * @attempts: max number of polls of the status register
999 * @delay: delay between polls in ms
1000 *
1001 * Wait for a flash operation to complete by polling the status register.
1002 */
flash_wait_op(adapter_t * adapter,int attempts,int delay)1003 static int flash_wait_op(adapter_t *adapter, int attempts, int delay)
1004 {
1005 int ret;
1006 u32 status;
1007
1008 while (1) {
1009 if ((ret = sf1_write(adapter, 1, 1, SF_RD_STATUS)) != 0 ||
1010 (ret = sf1_read(adapter, 1, 0, &status)) != 0)
1011 return ret;
1012 if (!(status & 1))
1013 return 0;
1014 if (--attempts == 0)
1015 return -EAGAIN;
1016 if (delay)
1017 msleep(delay);
1018 }
1019 }
1020
1021 /**
1022 * t3_read_flash - read words from serial flash
1023 * @adapter: the adapter
1024 * @addr: the start address for the read
1025 * @nwords: how many 32-bit words to read
1026 * @data: where to store the read data
1027 * @byte_oriented: whether to store data as bytes or as words
1028 *
1029 * Read the specified number of 32-bit words from the serial flash.
1030 * If @byte_oriented is set the read data is stored as a byte array
1031 * (i.e., big-endian), otherwise as 32-bit words in the platform's
1032 * natural endianness.
1033 */
t3_read_flash(adapter_t * adapter,unsigned int addr,unsigned int nwords,u32 * data,int byte_oriented)1034 int t3_read_flash(adapter_t *adapter, unsigned int addr, unsigned int nwords,
1035 u32 *data, int byte_oriented)
1036 {
1037 int ret;
1038
1039 if (addr + nwords * sizeof(u32) > SF_SIZE || (addr & 3))
1040 return -EINVAL;
1041
1042 addr = swab32(addr) | SF_RD_DATA_FAST;
1043
1044 if ((ret = sf1_write(adapter, 4, 1, addr)) != 0 ||
1045 (ret = sf1_read(adapter, 1, 1, data)) != 0)
1046 return ret;
1047
1048 for ( ; nwords; nwords--, data++) {
1049 ret = sf1_read(adapter, 4, nwords > 1, data);
1050 if (ret)
1051 return ret;
1052 if (byte_oriented)
1053 *data = htonl(*data);
1054 }
1055 return 0;
1056 }
1057
1058 /**
1059 * t3_write_flash - write up to a page of data to the serial flash
1060 * @adapter: the adapter
1061 * @addr: the start address to write
1062 * @n: length of data to write
1063 * @data: the data to write
1064 * @byte_oriented: whether to store data as bytes or as words
1065 *
1066 * Writes up to a page of data (256 bytes) to the serial flash starting
1067 * at the given address.
1068 * If @byte_oriented is set the write data is stored as a 32-bit
1069 * big-endian array, otherwise in the processor's native endianness.
1070 *
1071 */
t3_write_flash(adapter_t * adapter,unsigned int addr,unsigned int n,const u8 * data,int byte_oriented)1072 static int t3_write_flash(adapter_t *adapter, unsigned int addr,
1073 unsigned int n, const u8 *data,
1074 int byte_oriented)
1075 {
1076 int ret;
1077 u32 buf[64];
1078 unsigned int c, left, val, offset = addr & 0xff;
1079
1080 if (addr + n > SF_SIZE || offset + n > 256)
1081 return -EINVAL;
1082
1083 val = swab32(addr) | SF_PROG_PAGE;
1084
1085 if ((ret = sf1_write(adapter, 1, 0, SF_WR_ENABLE)) != 0 ||
1086 (ret = sf1_write(adapter, 4, 1, val)) != 0)
1087 return ret;
1088
1089 for (left = n; left; left -= c) {
1090 c = min(left, 4U);
1091 val = *(const u32*)data;
1092 data += c;
1093 if (byte_oriented)
1094 val = htonl(val);
1095
1096 ret = sf1_write(adapter, c, c != left, val);
1097 if (ret)
1098 return ret;
1099 }
1100 if ((ret = flash_wait_op(adapter, 5, 1)) != 0)
1101 return ret;
1102
1103 /* Read the page to verify the write succeeded */
1104 ret = t3_read_flash(adapter, addr & ~0xff, ARRAY_SIZE(buf), buf,
1105 byte_oriented);
1106 if (ret)
1107 return ret;
1108
1109 if (memcmp(data - n, (u8 *)buf + offset, n))
1110 return -EIO;
1111 return 0;
1112 }
1113
1114 /**
1115 * t3_get_tp_version - read the tp sram version
1116 * @adapter: the adapter
1117 * @vers: where to place the version
1118 *
1119 * Reads the protocol sram version from sram.
1120 */
t3_get_tp_version(adapter_t * adapter,u32 * vers)1121 int t3_get_tp_version(adapter_t *adapter, u32 *vers)
1122 {
1123 int ret;
1124
1125 /* Get version loaded in SRAM */
1126 t3_write_reg(adapter, A_TP_EMBED_OP_FIELD0, 0);
1127 ret = t3_wait_op_done(adapter, A_TP_EMBED_OP_FIELD0,
1128 1, 1, 5, 1);
1129 if (ret)
1130 return ret;
1131
1132 *vers = t3_read_reg(adapter, A_TP_EMBED_OP_FIELD1);
1133
1134 return 0;
1135 }
1136
1137 /**
1138 * t3_check_tpsram_version - read the tp sram version
1139 * @adapter: the adapter
1140 *
1141 */
t3_check_tpsram_version(adapter_t * adapter)1142 int t3_check_tpsram_version(adapter_t *adapter)
1143 {
1144 int ret;
1145 u32 vers;
1146 unsigned int major, minor;
1147
1148 if (adapter->params.rev == T3_REV_A)
1149 return 0;
1150
1151
1152 ret = t3_get_tp_version(adapter, &vers);
1153 if (ret)
1154 return ret;
1155
1156 vers = t3_read_reg(adapter, A_TP_EMBED_OP_FIELD1);
1157
1158 major = G_TP_VERSION_MAJOR(vers);
1159 minor = G_TP_VERSION_MINOR(vers);
1160
1161 if (major == TP_VERSION_MAJOR && minor == TP_VERSION_MINOR)
1162 return 0;
1163 else {
1164 CH_ERR(adapter, "found wrong TP version (%u.%u), "
1165 "driver compiled for version %d.%d\n", major, minor,
1166 TP_VERSION_MAJOR, TP_VERSION_MINOR);
1167 }
1168 return -EINVAL;
1169 }
1170
1171 /**
1172 * t3_check_tpsram - check if provided protocol SRAM
1173 * is compatible with this driver
1174 * @adapter: the adapter
1175 * @tp_sram: the firmware image to write
1176 * @size: image size
1177 *
1178 * Checks if an adapter's tp sram is compatible with the driver.
1179 * Returns 0 if the versions are compatible, a negative error otherwise.
1180 */
t3_check_tpsram(adapter_t * adapter,const u8 * tp_sram,unsigned int size)1181 int t3_check_tpsram(adapter_t *adapter, const u8 *tp_sram, unsigned int size)
1182 {
1183 u32 csum;
1184 unsigned int i;
1185 const u32 *p = (const u32 *)tp_sram;
1186
1187 /* Verify checksum */
1188 for (csum = 0, i = 0; i < size / sizeof(csum); i++)
1189 csum += ntohl(p[i]);
1190 if (csum != 0xffffffff) {
1191 CH_ERR(adapter, "corrupted protocol SRAM image, checksum %u\n",
1192 csum);
1193 return -EINVAL;
1194 }
1195
1196 return 0;
1197 }
1198
1199 enum fw_version_type {
1200 FW_VERSION_N3,
1201 FW_VERSION_T3
1202 };
1203
1204 /**
1205 * t3_get_fw_version - read the firmware version
1206 * @adapter: the adapter
1207 * @vers: where to place the version
1208 *
1209 * Reads the FW version from flash. Note that we had to move the version
1210 * due to FW size. If we don't find a valid FW version in the new location
1211 * we fall back and read the old location.
1212 */
t3_get_fw_version(adapter_t * adapter,u32 * vers)1213 int t3_get_fw_version(adapter_t *adapter, u32 *vers)
1214 {
1215 int ret = t3_read_flash(adapter, FW_VERS_ADDR, 1, vers, 0);
1216 if (!ret && *vers != 0xffffffff)
1217 return 0;
1218 else
1219 return t3_read_flash(adapter, FW_VERS_ADDR_PRE8, 1, vers, 0);
1220 }
1221
1222 /**
1223 * t3_check_fw_version - check if the FW is compatible with this driver
1224 * @adapter: the adapter
1225 *
1226 * Checks if an adapter's FW is compatible with the driver. Returns 0
1227 * if the versions are compatible, a negative error otherwise.
1228 */
t3_check_fw_version(adapter_t * adapter)1229 int t3_check_fw_version(adapter_t *adapter)
1230 {
1231 int ret;
1232 u32 vers;
1233 unsigned int type, major, minor;
1234
1235 ret = t3_get_fw_version(adapter, &vers);
1236 if (ret)
1237 return ret;
1238
1239 type = G_FW_VERSION_TYPE(vers);
1240 major = G_FW_VERSION_MAJOR(vers);
1241 minor = G_FW_VERSION_MINOR(vers);
1242
1243 if (type == FW_VERSION_T3 && major == FW_VERSION_MAJOR &&
1244 minor == FW_VERSION_MINOR)
1245 return 0;
1246
1247 else if (major != FW_VERSION_MAJOR || minor < FW_VERSION_MINOR)
1248 CH_WARN(adapter, "found old FW minor version(%u.%u), "
1249 "driver compiled for version %u.%u\n", major, minor,
1250 FW_VERSION_MAJOR, FW_VERSION_MINOR);
1251 else {
1252 CH_WARN(adapter, "found newer FW version(%u.%u), "
1253 "driver compiled for version %u.%u\n", major, minor,
1254 FW_VERSION_MAJOR, FW_VERSION_MINOR);
1255 return 0;
1256 }
1257 return -EINVAL;
1258 }
1259
1260 /**
1261 * t3_flash_erase_sectors - erase a range of flash sectors
1262 * @adapter: the adapter
1263 * @start: the first sector to erase
1264 * @end: the last sector to erase
1265 *
1266 * Erases the sectors in the given range.
1267 */
t3_flash_erase_sectors(adapter_t * adapter,int start,int end)1268 static int t3_flash_erase_sectors(adapter_t *adapter, int start, int end)
1269 {
1270 while (start <= end) {
1271 int ret;
1272
1273 if ((ret = sf1_write(adapter, 1, 0, SF_WR_ENABLE)) != 0 ||
1274 (ret = sf1_write(adapter, 4, 0,
1275 SF_ERASE_SECTOR | (start << 8))) != 0 ||
1276 (ret = flash_wait_op(adapter, 5, 500)) != 0)
1277 return ret;
1278 start++;
1279 }
1280 return 0;
1281 }
1282
1283 /*
1284 * t3_load_fw - download firmware
1285 * @adapter: the adapter
1286 * @fw_data: the firmware image to write
1287 * @size: image size
1288 *
1289 * Write the supplied firmware image to the card's serial flash.
1290 * The FW image has the following sections: @size - 8 bytes of code and
1291 * data, followed by 4 bytes of FW version, followed by the 32-bit
1292 * 1's complement checksum of the whole image.
1293 */
t3_load_fw(adapter_t * adapter,const u8 * fw_data,unsigned int size)1294 int t3_load_fw(adapter_t *adapter, const u8 *fw_data, unsigned int size)
1295 {
1296 u32 version, csum, fw_version_addr;
1297 unsigned int i;
1298 const u32 *p = (const u32 *)fw_data;
1299 int ret, addr, fw_sector = FW_FLASH_BOOT_ADDR >> 16;
1300
1301 if ((size & 3) || size < FW_MIN_SIZE)
1302 return -EINVAL;
1303 if (size - 8 > FW_MAX_SIZE)
1304 return -EFBIG;
1305
1306 version = ntohl(*(const u32 *)(fw_data + size - 8));
1307 if (G_FW_VERSION_MAJOR(version) < 8) {
1308
1309 fw_version_addr = FW_VERS_ADDR_PRE8;
1310
1311 if (size - 8 > FW_MAX_SIZE_PRE8)
1312 return -EFBIG;
1313 } else
1314 fw_version_addr = FW_VERS_ADDR;
1315
1316 for (csum = 0, i = 0; i < size / sizeof(csum); i++)
1317 csum += ntohl(p[i]);
1318 if (csum != 0xffffffff) {
1319 CH_ERR(adapter, "corrupted firmware image, checksum %u\n",
1320 csum);
1321 return -EINVAL;
1322 }
1323
1324 ret = t3_flash_erase_sectors(adapter, fw_sector, fw_sector);
1325 if (ret)
1326 goto out;
1327
1328 size -= 8; /* trim off version and checksum */
1329 for (addr = FW_FLASH_BOOT_ADDR; size; ) {
1330 unsigned int chunk_size = min(size, 256U);
1331
1332 ret = t3_write_flash(adapter, addr, chunk_size, fw_data, 1);
1333 if (ret)
1334 goto out;
1335
1336 addr += chunk_size;
1337 fw_data += chunk_size;
1338 size -= chunk_size;
1339 }
1340
1341 ret = t3_write_flash(adapter, fw_version_addr, 4, fw_data, 1);
1342 out:
1343 if (ret)
1344 CH_ERR(adapter, "firmware download failed, error %d\n", ret);
1345 return ret;
1346 }
1347
1348 /*
1349 * t3_load_boot - download boot flash
1350 * @adapter: the adapter
1351 * @boot_data: the boot image to write
1352 * @size: image size
1353 *
1354 * Write the supplied boot image to the card's serial flash.
1355 * The boot image has the following sections: a 28-byte header and the
1356 * boot image.
1357 */
t3_load_boot(adapter_t * adapter,u8 * boot_data,unsigned int size)1358 int t3_load_boot(adapter_t *adapter, u8 *boot_data, unsigned int size)
1359 {
1360 boot_header_t *header = (boot_header_t *)boot_data;
1361 int ret;
1362 unsigned int addr;
1363 unsigned int boot_sector = BOOT_FLASH_BOOT_ADDR >> 16;
1364 unsigned int boot_end = (BOOT_FLASH_BOOT_ADDR + size - 1) >> 16;
1365
1366 /*
1367 * Perform some primitive sanity testing to avoid accidentally
1368 * writing garbage over the boot sectors. We ought to check for
1369 * more but it's not worth it for now ...
1370 */
1371 if (size < BOOT_MIN_SIZE || size > BOOT_MAX_SIZE) {
1372 CH_ERR(adapter, "boot image too small/large\n");
1373 return -EFBIG;
1374 }
1375 if (le16_to_cpu(*(u16*)header->signature) != BOOT_SIGNATURE) {
1376 CH_ERR(adapter, "boot image missing signature\n");
1377 return -EINVAL;
1378 }
1379 if (header->length * BOOT_SIZE_INC != size) {
1380 CH_ERR(adapter, "boot image header length != image length\n");
1381 return -EINVAL;
1382 }
1383
1384 ret = t3_flash_erase_sectors(adapter, boot_sector, boot_end);
1385 if (ret)
1386 goto out;
1387
1388 for (addr = BOOT_FLASH_BOOT_ADDR; size; ) {
1389 unsigned int chunk_size = min(size, 256U);
1390
1391 ret = t3_write_flash(adapter, addr, chunk_size, boot_data, 0);
1392 if (ret)
1393 goto out;
1394
1395 addr += chunk_size;
1396 boot_data += chunk_size;
1397 size -= chunk_size;
1398 }
1399
1400 out:
1401 if (ret)
1402 CH_ERR(adapter, "boot image download failed, error %d\n", ret);
1403 return ret;
1404 }
1405
1406 #define CIM_CTL_BASE 0x2000
1407
1408 /**
1409 * t3_cim_ctl_blk_read - read a block from CIM control region
1410 * @adap: the adapter
1411 * @addr: the start address within the CIM control region
1412 * @n: number of words to read
1413 * @valp: where to store the result
1414 *
1415 * Reads a block of 4-byte words from the CIM control region.
1416 */
t3_cim_ctl_blk_read(adapter_t * adap,unsigned int addr,unsigned int n,unsigned int * valp)1417 int t3_cim_ctl_blk_read(adapter_t *adap, unsigned int addr, unsigned int n,
1418 unsigned int *valp)
1419 {
1420 int ret = 0;
1421
1422 if (t3_read_reg(adap, A_CIM_HOST_ACC_CTRL) & F_HOSTBUSY)
1423 return -EBUSY;
1424
1425 for ( ; !ret && n--; addr += 4) {
1426 t3_write_reg(adap, A_CIM_HOST_ACC_CTRL, CIM_CTL_BASE + addr);
1427 ret = t3_wait_op_done(adap, A_CIM_HOST_ACC_CTRL, F_HOSTBUSY,
1428 0, 5, 2);
1429 if (!ret)
1430 *valp++ = t3_read_reg(adap, A_CIM_HOST_ACC_DATA);
1431 }
1432 return ret;
1433 }
1434
t3_gate_rx_traffic(struct cmac * mac,u32 * rx_cfg,u32 * rx_hash_high,u32 * rx_hash_low)1435 static void t3_gate_rx_traffic(struct cmac *mac, u32 *rx_cfg,
1436 u32 *rx_hash_high, u32 *rx_hash_low)
1437 {
1438 /* stop Rx unicast traffic */
1439 t3_mac_disable_exact_filters(mac);
1440
1441 /* stop broadcast, multicast, promiscuous mode traffic */
1442 *rx_cfg = t3_read_reg(mac->adapter, A_XGM_RX_CFG + mac->offset);
1443 t3_set_reg_field(mac->adapter, A_XGM_RX_CFG + mac->offset,
1444 F_ENHASHMCAST | F_DISBCAST | F_COPYALLFRAMES,
1445 F_DISBCAST);
1446
1447 *rx_hash_high = t3_read_reg(mac->adapter, A_XGM_RX_HASH_HIGH +
1448 mac->offset);
1449 t3_write_reg(mac->adapter, A_XGM_RX_HASH_HIGH + mac->offset, 0);
1450
1451 *rx_hash_low = t3_read_reg(mac->adapter, A_XGM_RX_HASH_LOW +
1452 mac->offset);
1453 t3_write_reg(mac->adapter, A_XGM_RX_HASH_LOW + mac->offset, 0);
1454
1455 /* Leave time to drain max RX fifo */
1456 msleep(1);
1457 }
1458
t3_open_rx_traffic(struct cmac * mac,u32 rx_cfg,u32 rx_hash_high,u32 rx_hash_low)1459 static void t3_open_rx_traffic(struct cmac *mac, u32 rx_cfg,
1460 u32 rx_hash_high, u32 rx_hash_low)
1461 {
1462 t3_mac_enable_exact_filters(mac);
1463 t3_set_reg_field(mac->adapter, A_XGM_RX_CFG + mac->offset,
1464 F_ENHASHMCAST | F_DISBCAST | F_COPYALLFRAMES,
1465 rx_cfg);
1466 t3_write_reg(mac->adapter, A_XGM_RX_HASH_HIGH + mac->offset,
1467 rx_hash_high);
1468 t3_write_reg(mac->adapter, A_XGM_RX_HASH_LOW + mac->offset,
1469 rx_hash_low);
1470 }
1471
t3_detect_link_fault(adapter_t * adapter,int port_id)1472 static int t3_detect_link_fault(adapter_t *adapter, int port_id)
1473 {
1474 struct port_info *pi = adap2pinfo(adapter, port_id);
1475 struct cmac *mac = &pi->mac;
1476 uint32_t rx_cfg, rx_hash_high, rx_hash_low;
1477 int link_fault;
1478
1479 /* stop rx */
1480 t3_gate_rx_traffic(mac, &rx_cfg, &rx_hash_high, &rx_hash_low);
1481 t3_write_reg(adapter, A_XGM_RX_CTRL + mac->offset, 0);
1482
1483 /* clear status and make sure intr is enabled */
1484 (void) t3_read_reg(adapter, A_XGM_INT_STATUS + mac->offset);
1485 t3_xgm_intr_enable(adapter, port_id);
1486
1487 /* restart rx */
1488 t3_write_reg(adapter, A_XGM_RX_CTRL + mac->offset, F_RXEN);
1489 t3_open_rx_traffic(mac, rx_cfg, rx_hash_high, rx_hash_low);
1490
1491 link_fault = t3_read_reg(adapter, A_XGM_INT_STATUS + mac->offset);
1492 return (link_fault & F_LINKFAULTCHANGE ? 1 : 0);
1493 }
1494
t3_clear_faults(adapter_t * adapter,int port_id)1495 static void t3_clear_faults(adapter_t *adapter, int port_id)
1496 {
1497 struct port_info *pi = adap2pinfo(adapter, port_id);
1498 struct cmac *mac = &pi->mac;
1499
1500 if (adapter->params.nports <= 2) {
1501 t3_xgm_intr_disable(adapter, pi->port_id);
1502 t3_read_reg(adapter, A_XGM_INT_STATUS + mac->offset);
1503 t3_write_reg(adapter, A_XGM_INT_CAUSE + mac->offset, F_XGM_INT);
1504 t3_set_reg_field(adapter, A_XGM_INT_ENABLE + mac->offset,
1505 F_XGM_INT, F_XGM_INT);
1506 t3_xgm_intr_enable(adapter, pi->port_id);
1507 }
1508 }
1509
1510 /**
1511 * t3_link_changed - handle interface link changes
1512 * @adapter: the adapter
1513 * @port_id: the port index that changed link state
1514 *
1515 * Called when a port's link settings change to propagate the new values
1516 * to the associated PHY and MAC. After performing the common tasks it
1517 * invokes an OS-specific handler.
1518 */
t3_link_changed(adapter_t * adapter,int port_id)1519 void t3_link_changed(adapter_t *adapter, int port_id)
1520 {
1521 int link_ok, speed, duplex, fc, link_fault, link_state;
1522 struct port_info *pi = adap2pinfo(adapter, port_id);
1523 struct cphy *phy = &pi->phy;
1524 struct cmac *mac = &pi->mac;
1525 struct link_config *lc = &pi->link_config;
1526
1527 link_ok = lc->link_ok;
1528 speed = lc->speed;
1529 duplex = lc->duplex;
1530 fc = lc->fc;
1531 link_fault = 0;
1532
1533 phy->ops->get_link_status(phy, &link_state, &speed, &duplex, &fc);
1534 link_ok = (link_state == PHY_LINK_UP);
1535 if (link_state != PHY_LINK_PARTIAL)
1536 phy->rst = 0;
1537 else if (++phy->rst == 3) {
1538 phy->ops->reset(phy, 0);
1539 phy->rst = 0;
1540 }
1541
1542 if (link_ok == 0)
1543 pi->link_fault = LF_NO;
1544
1545 if (lc->requested_fc & PAUSE_AUTONEG)
1546 fc &= lc->requested_fc;
1547 else
1548 fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
1549
1550 /* Update mac speed before checking for link fault. */
1551 if (link_ok && speed >= 0 && lc->autoneg == AUTONEG_ENABLE &&
1552 (speed != lc->speed || duplex != lc->duplex || fc != lc->fc))
1553 t3_mac_set_speed_duplex_fc(mac, speed, duplex, fc);
1554
1555 /*
1556 * Check for link faults if any of these is true:
1557 * a) A link fault is suspected, and PHY says link ok
1558 * b) PHY link transitioned from down -> up
1559 */
1560 if (adapter->params.nports <= 2 &&
1561 ((pi->link_fault && link_ok) || (!lc->link_ok && link_ok))) {
1562
1563 link_fault = t3_detect_link_fault(adapter, port_id);
1564 if (link_fault) {
1565 if (pi->link_fault != LF_YES) {
1566 mac->stats.link_faults++;
1567 pi->link_fault = LF_YES;
1568 }
1569
1570 if (uses_xaui(adapter)) {
1571 if (adapter->params.rev >= T3_REV_C)
1572 t3c_pcs_force_los(mac);
1573 else
1574 t3b_pcs_reset(mac);
1575 }
1576
1577 /* Don't report link up */
1578 link_ok = 0;
1579 } else {
1580 /* clear faults here if this was a false alarm. */
1581 if (pi->link_fault == LF_MAYBE &&
1582 link_ok && lc->link_ok)
1583 t3_clear_faults(adapter, port_id);
1584
1585 pi->link_fault = LF_NO;
1586 }
1587 }
1588
1589 if (link_ok == lc->link_ok && speed == lc->speed &&
1590 duplex == lc->duplex && fc == lc->fc)
1591 return; /* nothing changed */
1592
1593 lc->link_ok = (unsigned char)link_ok;
1594 lc->speed = speed < 0 ? SPEED_INVALID : speed;
1595 lc->duplex = duplex < 0 ? DUPLEX_INVALID : duplex;
1596 lc->fc = fc;
1597
1598 if (link_ok) {
1599
1600 /* down -> up, or up -> up with changed settings */
1601
1602 if (adapter->params.rev > 0 && uses_xaui(adapter)) {
1603
1604 if (adapter->params.rev >= T3_REV_C)
1605 t3c_pcs_force_los(mac);
1606 else
1607 t3b_pcs_reset(mac);
1608
1609 t3_write_reg(adapter, A_XGM_XAUI_ACT_CTRL + mac->offset,
1610 F_TXACTENABLE | F_RXEN);
1611 }
1612
1613 /* disable TX FIFO drain */
1614 t3_set_reg_field(adapter, A_XGM_TXFIFO_CFG + mac->offset,
1615 F_ENDROPPKT, 0);
1616
1617 t3_mac_enable(mac, MAC_DIRECTION_TX | MAC_DIRECTION_RX);
1618 t3_set_reg_field(adapter, A_XGM_STAT_CTRL + mac->offset,
1619 F_CLRSTATS, 1);
1620 t3_clear_faults(adapter, port_id);
1621
1622 } else {
1623
1624 /* up -> down */
1625
1626 if (adapter->params.rev > 0 && uses_xaui(adapter)) {
1627 t3_write_reg(adapter,
1628 A_XGM_XAUI_ACT_CTRL + mac->offset, 0);
1629 }
1630
1631 t3_xgm_intr_disable(adapter, pi->port_id);
1632 if (adapter->params.nports <= 2) {
1633 t3_set_reg_field(adapter,
1634 A_XGM_INT_ENABLE + mac->offset,
1635 F_XGM_INT, 0);
1636
1637 t3_mac_disable(mac, MAC_DIRECTION_RX);
1638
1639 /*
1640 * Make sure Tx FIFO continues to drain, even as rxen is
1641 * left high to help detect and indicate remote faults.
1642 */
1643 t3_set_reg_field(adapter,
1644 A_XGM_TXFIFO_CFG + mac->offset, 0, F_ENDROPPKT);
1645 t3_write_reg(adapter, A_XGM_RX_CTRL + mac->offset, 0);
1646 t3_write_reg(adapter,
1647 A_XGM_TX_CTRL + mac->offset, F_TXEN);
1648 t3_write_reg(adapter,
1649 A_XGM_RX_CTRL + mac->offset, F_RXEN);
1650 }
1651 }
1652
1653 t3_os_link_changed(adapter, port_id, link_ok, speed, duplex, fc,
1654 mac->was_reset);
1655 mac->was_reset = 0;
1656 }
1657
1658 /**
1659 * t3_link_start - apply link configuration to MAC/PHY
1660 * @phy: the PHY to setup
1661 * @mac: the MAC to setup
1662 * @lc: the requested link configuration
1663 *
1664 * Set up a port's MAC and PHY according to a desired link configuration.
1665 * - If the PHY can auto-negotiate first decide what to advertise, then
1666 * enable/disable auto-negotiation as desired, and reset.
1667 * - If the PHY does not auto-negotiate just reset it.
1668 * - If auto-negotiation is off set the MAC to the proper speed/duplex/FC,
1669 * otherwise do it later based on the outcome of auto-negotiation.
1670 */
t3_link_start(struct cphy * phy,struct cmac * mac,struct link_config * lc)1671 int t3_link_start(struct cphy *phy, struct cmac *mac, struct link_config *lc)
1672 {
1673 unsigned int fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
1674
1675 lc->link_ok = 0;
1676 if (lc->supported & SUPPORTED_Autoneg) {
1677 lc->advertising &= ~(ADVERTISED_Asym_Pause | ADVERTISED_Pause);
1678 if (fc) {
1679 lc->advertising |= ADVERTISED_Asym_Pause;
1680 if (fc & PAUSE_RX)
1681 lc->advertising |= ADVERTISED_Pause;
1682 }
1683
1684 phy->ops->advertise(phy, lc->advertising);
1685
1686 if (lc->autoneg == AUTONEG_DISABLE) {
1687 lc->speed = lc->requested_speed;
1688 lc->duplex = lc->requested_duplex;
1689 lc->fc = (unsigned char)fc;
1690 t3_mac_set_speed_duplex_fc(mac, lc->speed, lc->duplex,
1691 fc);
1692 /* Also disables autoneg */
1693 phy->ops->set_speed_duplex(phy, lc->speed, lc->duplex);
1694 /* PR 5666. Power phy up when doing an ifup */
1695 if (!is_10G(phy->adapter))
1696 phy->ops->power_down(phy, 0);
1697 } else
1698 phy->ops->autoneg_enable(phy);
1699 } else {
1700 t3_mac_set_speed_duplex_fc(mac, -1, -1, fc);
1701 lc->fc = (unsigned char)fc;
1702 phy->ops->reset(phy, 0);
1703 }
1704 return 0;
1705 }
1706
1707 /**
1708 * t3_set_vlan_accel - control HW VLAN extraction
1709 * @adapter: the adapter
1710 * @ports: bitmap of adapter ports to operate on
1711 * @on: enable (1) or disable (0) HW VLAN extraction
1712 *
1713 * Enables or disables HW extraction of VLAN tags for the given port.
1714 */
t3_set_vlan_accel(adapter_t * adapter,unsigned int ports,int on)1715 void t3_set_vlan_accel(adapter_t *adapter, unsigned int ports, int on)
1716 {
1717 t3_set_reg_field(adapter, A_TP_OUT_CONFIG,
1718 ports << S_VLANEXTRACTIONENABLE,
1719 on ? (ports << S_VLANEXTRACTIONENABLE) : 0);
1720 }
1721
1722 struct intr_info {
1723 unsigned int mask; /* bits to check in interrupt status */
1724 const char *msg; /* message to print or NULL */
1725 short stat_idx; /* stat counter to increment or -1 */
1726 unsigned short fatal; /* whether the condition reported is fatal */
1727 };
1728
1729 /**
1730 * t3_handle_intr_status - table driven interrupt handler
1731 * @adapter: the adapter that generated the interrupt
1732 * @reg: the interrupt status register to process
1733 * @mask: a mask to apply to the interrupt status
1734 * @acts: table of interrupt actions
1735 * @stats: statistics counters tracking interrupt occurrences
1736 *
1737 * A table driven interrupt handler that applies a set of masks to an
1738 * interrupt status word and performs the corresponding actions if the
1739 * interrupts described by the mask have occurred. The actions include
1740 * optionally printing a warning or alert message, and optionally
1741 * incrementing a stat counter. The table is terminated by an entry
1742 * specifying mask 0. Returns the number of fatal interrupt conditions.
1743 */
t3_handle_intr_status(adapter_t * adapter,unsigned int reg,unsigned int mask,const struct intr_info * acts,unsigned long * stats)1744 static int t3_handle_intr_status(adapter_t *adapter, unsigned int reg,
1745 unsigned int mask,
1746 const struct intr_info *acts,
1747 unsigned long *stats)
1748 {
1749 int fatal = 0;
1750 unsigned int status = t3_read_reg(adapter, reg) & mask;
1751
1752 for ( ; acts->mask; ++acts) {
1753 if (!(status & acts->mask)) continue;
1754 if (acts->fatal) {
1755 fatal++;
1756 CH_ALERT(adapter, "%s (0x%x)\n",
1757 acts->msg, status & acts->mask);
1758 status &= ~acts->mask;
1759 } else if (acts->msg)
1760 CH_WARN(adapter, "%s (0x%x)\n",
1761 acts->msg, status & acts->mask);
1762 if (acts->stat_idx >= 0)
1763 stats[acts->stat_idx]++;
1764 }
1765 if (status) /* clear processed interrupts */
1766 t3_write_reg(adapter, reg, status);
1767 return fatal;
1768 }
1769
1770 #define SGE_INTR_MASK (F_RSPQDISABLED | \
1771 F_UC_REQ_FRAMINGERROR | F_R_REQ_FRAMINGERROR | \
1772 F_CPPARITYERROR | F_OCPARITYERROR | F_RCPARITYERROR | \
1773 F_IRPARITYERROR | V_ITPARITYERROR(M_ITPARITYERROR) | \
1774 V_FLPARITYERROR(M_FLPARITYERROR) | F_LODRBPARITYERROR | \
1775 F_HIDRBPARITYERROR | F_LORCQPARITYERROR | \
1776 F_HIRCQPARITYERROR)
1777 #define MC5_INTR_MASK (F_PARITYERR | F_ACTRGNFULL | F_UNKNOWNCMD | \
1778 F_REQQPARERR | F_DISPQPARERR | F_DELACTEMPTY | \
1779 F_NFASRCHFAIL)
1780 #define MC7_INTR_MASK (F_AE | F_UE | F_CE | V_PE(M_PE))
1781 #define XGM_INTR_MASK (V_TXFIFO_PRTY_ERR(M_TXFIFO_PRTY_ERR) | \
1782 V_RXFIFO_PRTY_ERR(M_RXFIFO_PRTY_ERR) | \
1783 F_TXFIFO_UNDERRUN)
1784 #define PCIX_INTR_MASK (F_MSTDETPARERR | F_SIGTARABT | F_RCVTARABT | \
1785 F_RCVMSTABT | F_SIGSYSERR | F_DETPARERR | \
1786 F_SPLCMPDIS | F_UNXSPLCMP | F_RCVSPLCMPERR | \
1787 F_DETCORECCERR | F_DETUNCECCERR | F_PIOPARERR | \
1788 V_WFPARERR(M_WFPARERR) | V_RFPARERR(M_RFPARERR) | \
1789 V_CFPARERR(M_CFPARERR) /* | V_MSIXPARERR(M_MSIXPARERR) */)
1790 #define PCIE_INTR_MASK (F_UNXSPLCPLERRR | F_UNXSPLCPLERRC | F_PCIE_PIOPARERR |\
1791 F_PCIE_WFPARERR | F_PCIE_RFPARERR | F_PCIE_CFPARERR | \
1792 /* V_PCIE_MSIXPARERR(M_PCIE_MSIXPARERR) | */ \
1793 F_RETRYBUFPARERR | F_RETRYLUTPARERR | F_RXPARERR | \
1794 F_TXPARERR | V_BISTERR(M_BISTERR))
1795 #define ULPRX_INTR_MASK (F_PARERRDATA | F_PARERRPCMD | F_ARBPF1PERR | \
1796 F_ARBPF0PERR | F_ARBFPERR | F_PCMDMUXPERR | \
1797 F_DATASELFRAMEERR1 | F_DATASELFRAMEERR0)
1798 #define ULPTX_INTR_MASK 0xfc
1799 #define CPLSW_INTR_MASK (F_CIM_OP_MAP_PERR | F_TP_FRAMING_ERROR | \
1800 F_SGE_FRAMING_ERROR | F_CIM_FRAMING_ERROR | \
1801 F_ZERO_SWITCH_ERROR)
1802 #define CIM_INTR_MASK (F_BLKWRPLINT | F_BLKRDPLINT | F_BLKWRCTLINT | \
1803 F_BLKRDCTLINT | F_BLKWRFLASHINT | F_BLKRDFLASHINT | \
1804 F_SGLWRFLASHINT | F_WRBLKFLASHINT | F_BLKWRBOOTINT | \
1805 F_FLASHRANGEINT | F_SDRAMRANGEINT | F_RSVDSPACEINT | \
1806 F_DRAMPARERR | F_ICACHEPARERR | F_DCACHEPARERR | \
1807 F_OBQSGEPARERR | F_OBQULPHIPARERR | F_OBQULPLOPARERR | \
1808 F_IBQSGELOPARERR | F_IBQSGEHIPARERR | F_IBQULPPARERR | \
1809 F_IBQTPPARERR | F_ITAGPARERR | F_DTAGPARERR)
1810 #define PMTX_INTR_MASK (F_ZERO_C_CMD_ERROR | ICSPI_FRM_ERR | OESPI_FRM_ERR | \
1811 V_ICSPI_PAR_ERROR(M_ICSPI_PAR_ERROR) | \
1812 V_OESPI_PAR_ERROR(M_OESPI_PAR_ERROR))
1813 #define PMRX_INTR_MASK (F_ZERO_E_CMD_ERROR | IESPI_FRM_ERR | OCSPI_FRM_ERR | \
1814 V_IESPI_PAR_ERROR(M_IESPI_PAR_ERROR) | \
1815 V_OCSPI_PAR_ERROR(M_OCSPI_PAR_ERROR))
1816 #define MPS_INTR_MASK (V_TX0TPPARERRENB(M_TX0TPPARERRENB) | \
1817 V_TX1TPPARERRENB(M_TX1TPPARERRENB) | \
1818 V_RXTPPARERRENB(M_RXTPPARERRENB) | \
1819 V_MCAPARERRENB(M_MCAPARERRENB))
1820 #define XGM_EXTRA_INTR_MASK (F_LINKFAULTCHANGE)
1821 #define PL_INTR_MASK (F_T3DBG | F_XGMAC0_0 | F_XGMAC0_1 | F_MC5A | F_PM1_TX | \
1822 F_PM1_RX | F_ULP2_TX | F_ULP2_RX | F_TP1 | F_CIM | \
1823 F_MC7_CM | F_MC7_PMTX | F_MC7_PMRX | F_SGE3 | F_PCIM0 | \
1824 F_MPS0 | F_CPL_SWITCH)
1825 /*
1826 * Interrupt handler for the PCIX1 module.
1827 */
pci_intr_handler(adapter_t * adapter)1828 static void pci_intr_handler(adapter_t *adapter)
1829 {
1830 static struct intr_info pcix1_intr_info[] = {
1831 { F_MSTDETPARERR, "PCI master detected parity error", -1, 1 },
1832 { F_SIGTARABT, "PCI signaled target abort", -1, 1 },
1833 { F_RCVTARABT, "PCI received target abort", -1, 1 },
1834 { F_RCVMSTABT, "PCI received master abort", -1, 1 },
1835 { F_SIGSYSERR, "PCI signaled system error", -1, 1 },
1836 { F_DETPARERR, "PCI detected parity error", -1, 1 },
1837 { F_SPLCMPDIS, "PCI split completion discarded", -1, 1 },
1838 { F_UNXSPLCMP, "PCI unexpected split completion error", -1, 1 },
1839 { F_RCVSPLCMPERR, "PCI received split completion error", -1,
1840 1 },
1841 { F_DETCORECCERR, "PCI correctable ECC error",
1842 STAT_PCI_CORR_ECC, 0 },
1843 { F_DETUNCECCERR, "PCI uncorrectable ECC error", -1, 1 },
1844 { F_PIOPARERR, "PCI PIO FIFO parity error", -1, 1 },
1845 { V_WFPARERR(M_WFPARERR), "PCI write FIFO parity error", -1,
1846 1 },
1847 { V_RFPARERR(M_RFPARERR), "PCI read FIFO parity error", -1,
1848 1 },
1849 { V_CFPARERR(M_CFPARERR), "PCI command FIFO parity error", -1,
1850 1 },
1851 { V_MSIXPARERR(M_MSIXPARERR), "PCI MSI-X table/PBA parity "
1852 "error", -1, 1 },
1853 { 0 }
1854 };
1855
1856 if (t3_handle_intr_status(adapter, A_PCIX_INT_CAUSE, PCIX_INTR_MASK,
1857 pcix1_intr_info, adapter->irq_stats))
1858 t3_fatal_err(adapter);
1859 }
1860
1861 /*
1862 * Interrupt handler for the PCIE module.
1863 */
pcie_intr_handler(adapter_t * adapter)1864 static void pcie_intr_handler(adapter_t *adapter)
1865 {
1866 static struct intr_info pcie_intr_info[] = {
1867 { F_PEXERR, "PCI PEX error", -1, 1 },
1868 { F_UNXSPLCPLERRR,
1869 "PCI unexpected split completion DMA read error", -1, 1 },
1870 { F_UNXSPLCPLERRC,
1871 "PCI unexpected split completion DMA command error", -1, 1 },
1872 { F_PCIE_PIOPARERR, "PCI PIO FIFO parity error", -1, 1 },
1873 { F_PCIE_WFPARERR, "PCI write FIFO parity error", -1, 1 },
1874 { F_PCIE_RFPARERR, "PCI read FIFO parity error", -1, 1 },
1875 { F_PCIE_CFPARERR, "PCI command FIFO parity error", -1, 1 },
1876 { V_PCIE_MSIXPARERR(M_PCIE_MSIXPARERR),
1877 "PCI MSI-X table/PBA parity error", -1, 1 },
1878 { F_RETRYBUFPARERR, "PCI retry buffer parity error", -1, 1 },
1879 { F_RETRYLUTPARERR, "PCI retry LUT parity error", -1, 1 },
1880 { F_RXPARERR, "PCI Rx parity error", -1, 1 },
1881 { F_TXPARERR, "PCI Tx parity error", -1, 1 },
1882 { V_BISTERR(M_BISTERR), "PCI BIST error", -1, 1 },
1883 { 0 }
1884 };
1885
1886 if (t3_read_reg(adapter, A_PCIE_INT_CAUSE) & F_PEXERR)
1887 CH_ALERT(adapter, "PEX error code 0x%x\n",
1888 t3_read_reg(adapter, A_PCIE_PEX_ERR));
1889
1890 if (t3_handle_intr_status(adapter, A_PCIE_INT_CAUSE, PCIE_INTR_MASK,
1891 pcie_intr_info, adapter->irq_stats))
1892 t3_fatal_err(adapter);
1893 }
1894
1895 /*
1896 * TP interrupt handler.
1897 */
tp_intr_handler(adapter_t * adapter)1898 static void tp_intr_handler(adapter_t *adapter)
1899 {
1900 static struct intr_info tp_intr_info[] = {
1901 { 0xffffff, "TP parity error", -1, 1 },
1902 { 0x1000000, "TP out of Rx pages", -1, 1 },
1903 { 0x2000000, "TP out of Tx pages", -1, 1 },
1904 { 0 }
1905 };
1906 static struct intr_info tp_intr_info_t3c[] = {
1907 { 0x1fffffff, "TP parity error", -1, 1 },
1908 { F_FLMRXFLSTEMPTY, "TP out of Rx pages", -1, 1 },
1909 { F_FLMTXFLSTEMPTY, "TP out of Tx pages", -1, 1 },
1910 { 0 }
1911 };
1912
1913 if (t3_handle_intr_status(adapter, A_TP_INT_CAUSE, 0xffffffff,
1914 adapter->params.rev < T3_REV_C ?
1915 tp_intr_info : tp_intr_info_t3c, NULL))
1916 t3_fatal_err(adapter);
1917 }
1918
1919 /*
1920 * CIM interrupt handler.
1921 */
cim_intr_handler(adapter_t * adapter)1922 static void cim_intr_handler(adapter_t *adapter)
1923 {
1924 static struct intr_info cim_intr_info[] = {
1925 { F_RSVDSPACEINT, "CIM reserved space write", -1, 1 },
1926 { F_SDRAMRANGEINT, "CIM SDRAM address out of range", -1, 1 },
1927 { F_FLASHRANGEINT, "CIM flash address out of range", -1, 1 },
1928 { F_BLKWRBOOTINT, "CIM block write to boot space", -1, 1 },
1929 { F_WRBLKFLASHINT, "CIM write to cached flash space", -1, 1 },
1930 { F_SGLWRFLASHINT, "CIM single write to flash space", -1, 1 },
1931 { F_BLKRDFLASHINT, "CIM block read from flash space", -1, 1 },
1932 { F_BLKWRFLASHINT, "CIM block write to flash space", -1, 1 },
1933 { F_BLKRDCTLINT, "CIM block read from CTL space", -1, 1 },
1934 { F_BLKWRCTLINT, "CIM block write to CTL space", -1, 1 },
1935 { F_BLKRDPLINT, "CIM block read from PL space", -1, 1 },
1936 { F_BLKWRPLINT, "CIM block write to PL space", -1, 1 },
1937 { F_DRAMPARERR, "CIM DRAM parity error", -1, 1 },
1938 { F_ICACHEPARERR, "CIM icache parity error", -1, 1 },
1939 { F_DCACHEPARERR, "CIM dcache parity error", -1, 1 },
1940 { F_OBQSGEPARERR, "CIM OBQ SGE parity error", -1, 1 },
1941 { F_OBQULPHIPARERR, "CIM OBQ ULPHI parity error", -1, 1 },
1942 { F_OBQULPLOPARERR, "CIM OBQ ULPLO parity error", -1, 1 },
1943 { F_IBQSGELOPARERR, "CIM IBQ SGELO parity error", -1, 1 },
1944 { F_IBQSGEHIPARERR, "CIM IBQ SGEHI parity error", -1, 1 },
1945 { F_IBQULPPARERR, "CIM IBQ ULP parity error", -1, 1 },
1946 { F_IBQTPPARERR, "CIM IBQ TP parity error", -1, 1 },
1947 { F_ITAGPARERR, "CIM itag parity error", -1, 1 },
1948 { F_DTAGPARERR, "CIM dtag parity error", -1, 1 },
1949 { 0 }
1950 };
1951
1952 if (t3_handle_intr_status(adapter, A_CIM_HOST_INT_CAUSE, CIM_INTR_MASK,
1953 cim_intr_info, NULL))
1954 t3_fatal_err(adapter);
1955 }
1956
1957 /*
1958 * ULP RX interrupt handler.
1959 */
ulprx_intr_handler(adapter_t * adapter)1960 static void ulprx_intr_handler(adapter_t *adapter)
1961 {
1962 static struct intr_info ulprx_intr_info[] = {
1963 { F_PARERRDATA, "ULP RX data parity error", -1, 1 },
1964 { F_PARERRPCMD, "ULP RX command parity error", -1, 1 },
1965 { F_ARBPF1PERR, "ULP RX ArbPF1 parity error", -1, 1 },
1966 { F_ARBPF0PERR, "ULP RX ArbPF0 parity error", -1, 1 },
1967 { F_ARBFPERR, "ULP RX ArbF parity error", -1, 1 },
1968 { F_PCMDMUXPERR, "ULP RX PCMDMUX parity error", -1, 1 },
1969 { F_DATASELFRAMEERR1, "ULP RX frame error", -1, 1 },
1970 { F_DATASELFRAMEERR0, "ULP RX frame error", -1, 1 },
1971 { 0 }
1972 };
1973
1974 if (t3_handle_intr_status(adapter, A_ULPRX_INT_CAUSE, 0xffffffff,
1975 ulprx_intr_info, NULL))
1976 t3_fatal_err(adapter);
1977 }
1978
1979 /*
1980 * ULP TX interrupt handler.
1981 */
ulptx_intr_handler(adapter_t * adapter)1982 static void ulptx_intr_handler(adapter_t *adapter)
1983 {
1984 static struct intr_info ulptx_intr_info[] = {
1985 { F_PBL_BOUND_ERR_CH0, "ULP TX channel 0 PBL out of bounds",
1986 STAT_ULP_CH0_PBL_OOB, 0 },
1987 { F_PBL_BOUND_ERR_CH1, "ULP TX channel 1 PBL out of bounds",
1988 STAT_ULP_CH1_PBL_OOB, 0 },
1989 { 0xfc, "ULP TX parity error", -1, 1 },
1990 { 0 }
1991 };
1992
1993 if (t3_handle_intr_status(adapter, A_ULPTX_INT_CAUSE, 0xffffffff,
1994 ulptx_intr_info, adapter->irq_stats))
1995 t3_fatal_err(adapter);
1996 }
1997
1998 #define ICSPI_FRM_ERR (F_ICSPI0_FIFO2X_RX_FRAMING_ERROR | \
1999 F_ICSPI1_FIFO2X_RX_FRAMING_ERROR | F_ICSPI0_RX_FRAMING_ERROR | \
2000 F_ICSPI1_RX_FRAMING_ERROR | F_ICSPI0_TX_FRAMING_ERROR | \
2001 F_ICSPI1_TX_FRAMING_ERROR)
2002 #define OESPI_FRM_ERR (F_OESPI0_RX_FRAMING_ERROR | \
2003 F_OESPI1_RX_FRAMING_ERROR | F_OESPI0_TX_FRAMING_ERROR | \
2004 F_OESPI1_TX_FRAMING_ERROR | F_OESPI0_OFIFO2X_TX_FRAMING_ERROR | \
2005 F_OESPI1_OFIFO2X_TX_FRAMING_ERROR)
2006
2007 /*
2008 * PM TX interrupt handler.
2009 */
pmtx_intr_handler(adapter_t * adapter)2010 static void pmtx_intr_handler(adapter_t *adapter)
2011 {
2012 static struct intr_info pmtx_intr_info[] = {
2013 { F_ZERO_C_CMD_ERROR, "PMTX 0-length pcmd", -1, 1 },
2014 { ICSPI_FRM_ERR, "PMTX ispi framing error", -1, 1 },
2015 { OESPI_FRM_ERR, "PMTX ospi framing error", -1, 1 },
2016 { V_ICSPI_PAR_ERROR(M_ICSPI_PAR_ERROR),
2017 "PMTX ispi parity error", -1, 1 },
2018 { V_OESPI_PAR_ERROR(M_OESPI_PAR_ERROR),
2019 "PMTX ospi parity error", -1, 1 },
2020 { 0 }
2021 };
2022
2023 if (t3_handle_intr_status(adapter, A_PM1_TX_INT_CAUSE, 0xffffffff,
2024 pmtx_intr_info, NULL))
2025 t3_fatal_err(adapter);
2026 }
2027
2028 #define IESPI_FRM_ERR (F_IESPI0_FIFO2X_RX_FRAMING_ERROR | \
2029 F_IESPI1_FIFO2X_RX_FRAMING_ERROR | F_IESPI0_RX_FRAMING_ERROR | \
2030 F_IESPI1_RX_FRAMING_ERROR | F_IESPI0_TX_FRAMING_ERROR | \
2031 F_IESPI1_TX_FRAMING_ERROR)
2032 #define OCSPI_FRM_ERR (F_OCSPI0_RX_FRAMING_ERROR | \
2033 F_OCSPI1_RX_FRAMING_ERROR | F_OCSPI0_TX_FRAMING_ERROR | \
2034 F_OCSPI1_TX_FRAMING_ERROR | F_OCSPI0_OFIFO2X_TX_FRAMING_ERROR | \
2035 F_OCSPI1_OFIFO2X_TX_FRAMING_ERROR)
2036
2037 /*
2038 * PM RX interrupt handler.
2039 */
pmrx_intr_handler(adapter_t * adapter)2040 static void pmrx_intr_handler(adapter_t *adapter)
2041 {
2042 static struct intr_info pmrx_intr_info[] = {
2043 { F_ZERO_E_CMD_ERROR, "PMRX 0-length pcmd", -1, 1 },
2044 { IESPI_FRM_ERR, "PMRX ispi framing error", -1, 1 },
2045 { OCSPI_FRM_ERR, "PMRX ospi framing error", -1, 1 },
2046 { V_IESPI_PAR_ERROR(M_IESPI_PAR_ERROR),
2047 "PMRX ispi parity error", -1, 1 },
2048 { V_OCSPI_PAR_ERROR(M_OCSPI_PAR_ERROR),
2049 "PMRX ospi parity error", -1, 1 },
2050 { 0 }
2051 };
2052
2053 if (t3_handle_intr_status(adapter, A_PM1_RX_INT_CAUSE, 0xffffffff,
2054 pmrx_intr_info, NULL))
2055 t3_fatal_err(adapter);
2056 }
2057
2058 /*
2059 * CPL switch interrupt handler.
2060 */
cplsw_intr_handler(adapter_t * adapter)2061 static void cplsw_intr_handler(adapter_t *adapter)
2062 {
2063 static struct intr_info cplsw_intr_info[] = {
2064 { F_CIM_OP_MAP_PERR, "CPL switch CIM parity error", -1, 1 },
2065 { F_CIM_OVFL_ERROR, "CPL switch CIM overflow", -1, 1 },
2066 { F_TP_FRAMING_ERROR, "CPL switch TP framing error", -1, 1 },
2067 { F_SGE_FRAMING_ERROR, "CPL switch SGE framing error", -1, 1 },
2068 { F_CIM_FRAMING_ERROR, "CPL switch CIM framing error", -1, 1 },
2069 { F_ZERO_SWITCH_ERROR, "CPL switch no-switch error", -1, 1 },
2070 { 0 }
2071 };
2072
2073 if (t3_handle_intr_status(adapter, A_CPL_INTR_CAUSE, 0xffffffff,
2074 cplsw_intr_info, NULL))
2075 t3_fatal_err(adapter);
2076 }
2077
2078 /*
2079 * MPS interrupt handler.
2080 */
mps_intr_handler(adapter_t * adapter)2081 static void mps_intr_handler(adapter_t *adapter)
2082 {
2083 static struct intr_info mps_intr_info[] = {
2084 { 0x1ff, "MPS parity error", -1, 1 },
2085 { 0 }
2086 };
2087
2088 if (t3_handle_intr_status(adapter, A_MPS_INT_CAUSE, 0xffffffff,
2089 mps_intr_info, NULL))
2090 t3_fatal_err(adapter);
2091 }
2092
2093 #define MC7_INTR_FATAL (F_UE | V_PE(M_PE) | F_AE)
2094
2095 /*
2096 * MC7 interrupt handler.
2097 */
mc7_intr_handler(struct mc7 * mc7)2098 static void mc7_intr_handler(struct mc7 *mc7)
2099 {
2100 adapter_t *adapter = mc7->adapter;
2101 u32 cause = t3_read_reg(adapter, mc7->offset + A_MC7_INT_CAUSE);
2102
2103 if (cause & F_CE) {
2104 mc7->stats.corr_err++;
2105 CH_WARN(adapter, "%s MC7 correctable error at addr 0x%x, "
2106 "data 0x%x 0x%x 0x%x\n", mc7->name,
2107 t3_read_reg(adapter, mc7->offset + A_MC7_CE_ADDR),
2108 t3_read_reg(adapter, mc7->offset + A_MC7_CE_DATA0),
2109 t3_read_reg(adapter, mc7->offset + A_MC7_CE_DATA1),
2110 t3_read_reg(adapter, mc7->offset + A_MC7_CE_DATA2));
2111 }
2112
2113 if (cause & F_UE) {
2114 mc7->stats.uncorr_err++;
2115 CH_ALERT(adapter, "%s MC7 uncorrectable error at addr 0x%x, "
2116 "data 0x%x 0x%x 0x%x\n", mc7->name,
2117 t3_read_reg(adapter, mc7->offset + A_MC7_UE_ADDR),
2118 t3_read_reg(adapter, mc7->offset + A_MC7_UE_DATA0),
2119 t3_read_reg(adapter, mc7->offset + A_MC7_UE_DATA1),
2120 t3_read_reg(adapter, mc7->offset + A_MC7_UE_DATA2));
2121 }
2122
2123 if (G_PE(cause)) {
2124 mc7->stats.parity_err++;
2125 CH_ALERT(adapter, "%s MC7 parity error 0x%x\n",
2126 mc7->name, G_PE(cause));
2127 }
2128
2129 if (cause & F_AE) {
2130 u32 addr = 0;
2131
2132 if (adapter->params.rev > 0)
2133 addr = t3_read_reg(adapter,
2134 mc7->offset + A_MC7_ERR_ADDR);
2135 mc7->stats.addr_err++;
2136 CH_ALERT(adapter, "%s MC7 address error: 0x%x\n",
2137 mc7->name, addr);
2138 }
2139
2140 if (cause & MC7_INTR_FATAL)
2141 t3_fatal_err(adapter);
2142
2143 t3_write_reg(adapter, mc7->offset + A_MC7_INT_CAUSE, cause);
2144 }
2145
2146 #define XGM_INTR_FATAL (V_TXFIFO_PRTY_ERR(M_TXFIFO_PRTY_ERR) | \
2147 V_RXFIFO_PRTY_ERR(M_RXFIFO_PRTY_ERR))
2148 /*
2149 * XGMAC interrupt handler.
2150 */
mac_intr_handler(adapter_t * adap,unsigned int idx)2151 static int mac_intr_handler(adapter_t *adap, unsigned int idx)
2152 {
2153 u32 cause;
2154 struct port_info *pi;
2155 struct cmac *mac;
2156
2157 idx = idx == 0 ? 0 : adapter_info(adap)->nports0; /* MAC idx -> port */
2158 pi = adap2pinfo(adap, idx);
2159 mac = &pi->mac;
2160
2161 /*
2162 * We mask out interrupt causes for which we're not taking interrupts.
2163 * This allows us to use polling logic to monitor some of the other
2164 * conditions when taking interrupts would impose too much load on the
2165 * system.
2166 */
2167 cause = (t3_read_reg(adap, A_XGM_INT_CAUSE + mac->offset)
2168 & ~(F_RXFIFO_OVERFLOW));
2169
2170 if (cause & V_TXFIFO_PRTY_ERR(M_TXFIFO_PRTY_ERR)) {
2171 mac->stats.tx_fifo_parity_err++;
2172 CH_ALERT(adap, "port%d: MAC TX FIFO parity error\n", idx);
2173 }
2174 if (cause & V_RXFIFO_PRTY_ERR(M_RXFIFO_PRTY_ERR)) {
2175 mac->stats.rx_fifo_parity_err++;
2176 CH_ALERT(adap, "port%d: MAC RX FIFO parity error\n", idx);
2177 }
2178 if (cause & F_TXFIFO_UNDERRUN)
2179 mac->stats.tx_fifo_urun++;
2180 if (cause & F_RXFIFO_OVERFLOW)
2181 mac->stats.rx_fifo_ovfl++;
2182 if (cause & V_SERDES_LOS(M_SERDES_LOS))
2183 mac->stats.serdes_signal_loss++;
2184 if (cause & F_XAUIPCSCTCERR)
2185 mac->stats.xaui_pcs_ctc_err++;
2186 if (cause & F_XAUIPCSALIGNCHANGE)
2187 mac->stats.xaui_pcs_align_change++;
2188 if (cause & F_XGM_INT &
2189 t3_read_reg(adap, A_XGM_INT_ENABLE + mac->offset)) {
2190 t3_set_reg_field(adap, A_XGM_INT_ENABLE + mac->offset,
2191 F_XGM_INT, 0);
2192
2193 /* link fault suspected */
2194 pi->link_fault = LF_MAYBE;
2195 t3_os_link_intr(pi);
2196 }
2197
2198 if (cause & XGM_INTR_FATAL)
2199 t3_fatal_err(adap);
2200
2201 t3_write_reg(adap, A_XGM_INT_CAUSE + mac->offset, cause);
2202 return cause != 0;
2203 }
2204
2205 /*
2206 * Interrupt handler for PHY events.
2207 */
phy_intr_handler(adapter_t * adapter)2208 static int phy_intr_handler(adapter_t *adapter)
2209 {
2210 u32 i, cause = t3_read_reg(adapter, A_T3DBG_INT_CAUSE);
2211
2212 for_each_port(adapter, i) {
2213 struct port_info *p = adap2pinfo(adapter, i);
2214
2215 if (!(p->phy.caps & SUPPORTED_IRQ))
2216 continue;
2217
2218 if (cause & (1 << adapter_info(adapter)->gpio_intr[i])) {
2219 int phy_cause = p->phy.ops->intr_handler(&p->phy);
2220
2221 if (phy_cause & cphy_cause_link_change)
2222 t3_os_link_intr(p);
2223 if (phy_cause & cphy_cause_fifo_error)
2224 p->phy.fifo_errors++;
2225 if (phy_cause & cphy_cause_module_change)
2226 t3_os_phymod_changed(adapter, i);
2227 if (phy_cause & cphy_cause_alarm)
2228 CH_WARN(adapter, "Operation affected due to "
2229 "adverse environment. Check the spec "
2230 "sheet for corrective action.");
2231 }
2232 }
2233
2234 t3_write_reg(adapter, A_T3DBG_INT_CAUSE, cause);
2235 return 0;
2236 }
2237
2238 /**
2239 * t3_slow_intr_handler - control path interrupt handler
2240 * @adapter: the adapter
2241 *
2242 * T3 interrupt handler for non-data interrupt events, e.g., errors.
2243 * The designation 'slow' is because it involves register reads, while
2244 * data interrupts typically don't involve any MMIOs.
2245 */
t3_slow_intr_handler(adapter_t * adapter)2246 int t3_slow_intr_handler(adapter_t *adapter)
2247 {
2248 u32 cause = t3_read_reg(adapter, A_PL_INT_CAUSE0);
2249
2250 cause &= adapter->slow_intr_mask;
2251 if (!cause)
2252 return 0;
2253 if (cause & F_PCIM0) {
2254 if (is_pcie(adapter))
2255 pcie_intr_handler(adapter);
2256 else
2257 pci_intr_handler(adapter);
2258 }
2259 if (cause & F_SGE3)
2260 t3_sge_err_intr_handler(adapter);
2261 if (cause & F_MC7_PMRX)
2262 mc7_intr_handler(&adapter->pmrx);
2263 if (cause & F_MC7_PMTX)
2264 mc7_intr_handler(&adapter->pmtx);
2265 if (cause & F_MC7_CM)
2266 mc7_intr_handler(&adapter->cm);
2267 if (cause & F_CIM)
2268 cim_intr_handler(adapter);
2269 if (cause & F_TP1)
2270 tp_intr_handler(adapter);
2271 if (cause & F_ULP2_RX)
2272 ulprx_intr_handler(adapter);
2273 if (cause & F_ULP2_TX)
2274 ulptx_intr_handler(adapter);
2275 if (cause & F_PM1_RX)
2276 pmrx_intr_handler(adapter);
2277 if (cause & F_PM1_TX)
2278 pmtx_intr_handler(adapter);
2279 if (cause & F_CPL_SWITCH)
2280 cplsw_intr_handler(adapter);
2281 if (cause & F_MPS0)
2282 mps_intr_handler(adapter);
2283 if (cause & F_MC5A)
2284 t3_mc5_intr_handler(&adapter->mc5);
2285 if (cause & F_XGMAC0_0)
2286 mac_intr_handler(adapter, 0);
2287 if (cause & F_XGMAC0_1)
2288 mac_intr_handler(adapter, 1);
2289 if (cause & F_T3DBG)
2290 phy_intr_handler(adapter);
2291
2292 /* Clear the interrupts just processed. */
2293 t3_write_reg(adapter, A_PL_INT_CAUSE0, cause);
2294 (void) t3_read_reg(adapter, A_PL_INT_CAUSE0); /* flush */
2295 return 1;
2296 }
2297
calc_gpio_intr(adapter_t * adap)2298 static unsigned int calc_gpio_intr(adapter_t *adap)
2299 {
2300 unsigned int i, gpi_intr = 0;
2301
2302 for_each_port(adap, i)
2303 if ((adap2pinfo(adap, i)->phy.caps & SUPPORTED_IRQ) &&
2304 adapter_info(adap)->gpio_intr[i])
2305 gpi_intr |= 1 << adapter_info(adap)->gpio_intr[i];
2306 return gpi_intr;
2307 }
2308
2309 /**
2310 * t3_intr_enable - enable interrupts
2311 * @adapter: the adapter whose interrupts should be enabled
2312 *
2313 * Enable interrupts by setting the interrupt enable registers of the
2314 * various HW modules and then enabling the top-level interrupt
2315 * concentrator.
2316 */
t3_intr_enable(adapter_t * adapter)2317 void t3_intr_enable(adapter_t *adapter)
2318 {
2319 static struct addr_val_pair intr_en_avp[] = {
2320 { A_MC7_INT_ENABLE, MC7_INTR_MASK },
2321 { A_MC7_INT_ENABLE - MC7_PMRX_BASE_ADDR + MC7_PMTX_BASE_ADDR,
2322 MC7_INTR_MASK },
2323 { A_MC7_INT_ENABLE - MC7_PMRX_BASE_ADDR + MC7_CM_BASE_ADDR,
2324 MC7_INTR_MASK },
2325 { A_MC5_DB_INT_ENABLE, MC5_INTR_MASK },
2326 { A_ULPRX_INT_ENABLE, ULPRX_INTR_MASK },
2327 { A_PM1_TX_INT_ENABLE, PMTX_INTR_MASK },
2328 { A_PM1_RX_INT_ENABLE, PMRX_INTR_MASK },
2329 { A_CIM_HOST_INT_ENABLE, CIM_INTR_MASK },
2330 { A_MPS_INT_ENABLE, MPS_INTR_MASK },
2331 };
2332
2333 adapter->slow_intr_mask = PL_INTR_MASK;
2334
2335 t3_write_regs(adapter, intr_en_avp, ARRAY_SIZE(intr_en_avp), 0);
2336 t3_write_reg(adapter, A_TP_INT_ENABLE,
2337 adapter->params.rev >= T3_REV_C ? 0x2bfffff : 0x3bfffff);
2338 t3_write_reg(adapter, A_SG_INT_ENABLE, SGE_INTR_MASK);
2339
2340 if (adapter->params.rev > 0) {
2341 t3_write_reg(adapter, A_CPL_INTR_ENABLE,
2342 CPLSW_INTR_MASK | F_CIM_OVFL_ERROR);
2343 t3_write_reg(adapter, A_ULPTX_INT_ENABLE,
2344 ULPTX_INTR_MASK | F_PBL_BOUND_ERR_CH0 |
2345 F_PBL_BOUND_ERR_CH1);
2346 } else {
2347 t3_write_reg(adapter, A_CPL_INTR_ENABLE, CPLSW_INTR_MASK);
2348 t3_write_reg(adapter, A_ULPTX_INT_ENABLE, ULPTX_INTR_MASK);
2349 }
2350
2351 t3_write_reg(adapter, A_T3DBG_INT_ENABLE, calc_gpio_intr(adapter));
2352
2353 if (is_pcie(adapter))
2354 t3_write_reg(adapter, A_PCIE_INT_ENABLE, PCIE_INTR_MASK);
2355 else
2356 t3_write_reg(adapter, A_PCIX_INT_ENABLE, PCIX_INTR_MASK);
2357 t3_write_reg(adapter, A_PL_INT_ENABLE0, adapter->slow_intr_mask);
2358 (void) t3_read_reg(adapter, A_PL_INT_ENABLE0); /* flush */
2359 }
2360
2361 /**
2362 * t3_intr_disable - disable a card's interrupts
2363 * @adapter: the adapter whose interrupts should be disabled
2364 *
2365 * Disable interrupts. We only disable the top-level interrupt
2366 * concentrator and the SGE data interrupts.
2367 */
t3_intr_disable(adapter_t * adapter)2368 void t3_intr_disable(adapter_t *adapter)
2369 {
2370 t3_write_reg(adapter, A_PL_INT_ENABLE0, 0);
2371 (void) t3_read_reg(adapter, A_PL_INT_ENABLE0); /* flush */
2372 adapter->slow_intr_mask = 0;
2373 }
2374
2375 /**
2376 * t3_intr_clear - clear all interrupts
2377 * @adapter: the adapter whose interrupts should be cleared
2378 *
2379 * Clears all interrupts.
2380 */
t3_intr_clear(adapter_t * adapter)2381 void t3_intr_clear(adapter_t *adapter)
2382 {
2383 static const unsigned int cause_reg_addr[] = {
2384 A_SG_INT_CAUSE,
2385 A_SG_RSPQ_FL_STATUS,
2386 A_PCIX_INT_CAUSE,
2387 A_MC7_INT_CAUSE,
2388 A_MC7_INT_CAUSE - MC7_PMRX_BASE_ADDR + MC7_PMTX_BASE_ADDR,
2389 A_MC7_INT_CAUSE - MC7_PMRX_BASE_ADDR + MC7_CM_BASE_ADDR,
2390 A_CIM_HOST_INT_CAUSE,
2391 A_TP_INT_CAUSE,
2392 A_MC5_DB_INT_CAUSE,
2393 A_ULPRX_INT_CAUSE,
2394 A_ULPTX_INT_CAUSE,
2395 A_CPL_INTR_CAUSE,
2396 A_PM1_TX_INT_CAUSE,
2397 A_PM1_RX_INT_CAUSE,
2398 A_MPS_INT_CAUSE,
2399 A_T3DBG_INT_CAUSE,
2400 };
2401 unsigned int i;
2402
2403 /* Clear PHY and MAC interrupts for each port. */
2404 for_each_port(adapter, i)
2405 t3_port_intr_clear(adapter, i);
2406
2407 for (i = 0; i < ARRAY_SIZE(cause_reg_addr); ++i)
2408 t3_write_reg(adapter, cause_reg_addr[i], 0xffffffff);
2409
2410 if (is_pcie(adapter))
2411 t3_write_reg(adapter, A_PCIE_PEX_ERR, 0xffffffff);
2412 t3_write_reg(adapter, A_PL_INT_CAUSE0, 0xffffffff);
2413 (void) t3_read_reg(adapter, A_PL_INT_CAUSE0); /* flush */
2414 }
2415
t3_xgm_intr_enable(adapter_t * adapter,int idx)2416 void t3_xgm_intr_enable(adapter_t *adapter, int idx)
2417 {
2418 struct port_info *pi = adap2pinfo(adapter, idx);
2419
2420 t3_write_reg(adapter, A_XGM_XGM_INT_ENABLE + pi->mac.offset,
2421 XGM_EXTRA_INTR_MASK);
2422 }
2423
t3_xgm_intr_disable(adapter_t * adapter,int idx)2424 void t3_xgm_intr_disable(adapter_t *adapter, int idx)
2425 {
2426 struct port_info *pi = adap2pinfo(adapter, idx);
2427
2428 t3_write_reg(adapter, A_XGM_XGM_INT_DISABLE + pi->mac.offset,
2429 0x7ff);
2430 }
2431
2432 /**
2433 * t3_port_intr_enable - enable port-specific interrupts
2434 * @adapter: associated adapter
2435 * @idx: index of port whose interrupts should be enabled
2436 *
2437 * Enable port-specific (i.e., MAC and PHY) interrupts for the given
2438 * adapter port.
2439 */
t3_port_intr_enable(adapter_t * adapter,int idx)2440 void t3_port_intr_enable(adapter_t *adapter, int idx)
2441 {
2442 struct port_info *pi = adap2pinfo(adapter, idx);
2443
2444 t3_write_reg(adapter, A_XGM_INT_ENABLE + pi->mac.offset, XGM_INTR_MASK);
2445 pi->phy.ops->intr_enable(&pi->phy);
2446 }
2447
2448 /**
2449 * t3_port_intr_disable - disable port-specific interrupts
2450 * @adapter: associated adapter
2451 * @idx: index of port whose interrupts should be disabled
2452 *
2453 * Disable port-specific (i.e., MAC and PHY) interrupts for the given
2454 * adapter port.
2455 */
t3_port_intr_disable(adapter_t * adapter,int idx)2456 void t3_port_intr_disable(adapter_t *adapter, int idx)
2457 {
2458 struct port_info *pi = adap2pinfo(adapter, idx);
2459
2460 t3_write_reg(adapter, A_XGM_INT_ENABLE + pi->mac.offset, 0);
2461 pi->phy.ops->intr_disable(&pi->phy);
2462 }
2463
2464 /**
2465 * t3_port_intr_clear - clear port-specific interrupts
2466 * @adapter: associated adapter
2467 * @idx: index of port whose interrupts to clear
2468 *
2469 * Clear port-specific (i.e., MAC and PHY) interrupts for the given
2470 * adapter port.
2471 */
t3_port_intr_clear(adapter_t * adapter,int idx)2472 void t3_port_intr_clear(adapter_t *adapter, int idx)
2473 {
2474 struct port_info *pi = adap2pinfo(adapter, idx);
2475
2476 t3_write_reg(adapter, A_XGM_INT_CAUSE + pi->mac.offset, 0xffffffff);
2477 pi->phy.ops->intr_clear(&pi->phy);
2478 }
2479
2480 #define SG_CONTEXT_CMD_ATTEMPTS 100
2481
2482 /**
2483 * t3_sge_write_context - write an SGE context
2484 * @adapter: the adapter
2485 * @id: the context id
2486 * @type: the context type
2487 *
2488 * Program an SGE context with the values already loaded in the
2489 * CONTEXT_DATA? registers.
2490 */
t3_sge_write_context(adapter_t * adapter,unsigned int id,unsigned int type)2491 static int t3_sge_write_context(adapter_t *adapter, unsigned int id,
2492 unsigned int type)
2493 {
2494 if (type == F_RESPONSEQ) {
2495 /*
2496 * Can't write the Response Queue Context bits for
2497 * Interrupt Armed or the Reserve bits after the chip
2498 * has been initialized out of reset. Writing to these
2499 * bits can confuse the hardware.
2500 */
2501 t3_write_reg(adapter, A_SG_CONTEXT_MASK0, 0xffffffff);
2502 t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0xffffffff);
2503 t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0x17ffffff);
2504 t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0xffffffff);
2505 } else {
2506 t3_write_reg(adapter, A_SG_CONTEXT_MASK0, 0xffffffff);
2507 t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0xffffffff);
2508 t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0xffffffff);
2509 t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0xffffffff);
2510 }
2511 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2512 V_CONTEXT_CMD_OPCODE(1) | type | V_CONTEXT(id));
2513 return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
2514 0, SG_CONTEXT_CMD_ATTEMPTS, 1);
2515 }
2516
2517 /**
2518 * clear_sge_ctxt - completely clear an SGE context
2519 * @adapter: the adapter
2520 * @id: the context id
2521 * @type: the context type
2522 *
2523 * Completely clear an SGE context. Used predominantly at post-reset
2524 * initialization. Note in particular that we don't skip writing to any
2525 * "sensitive bits" in the contexts the way that t3_sge_write_context()
2526 * does ...
2527 */
clear_sge_ctxt(adapter_t * adap,unsigned int id,unsigned int type)2528 static int clear_sge_ctxt(adapter_t *adap, unsigned int id, unsigned int type)
2529 {
2530 t3_write_reg(adap, A_SG_CONTEXT_DATA0, 0);
2531 t3_write_reg(adap, A_SG_CONTEXT_DATA1, 0);
2532 t3_write_reg(adap, A_SG_CONTEXT_DATA2, 0);
2533 t3_write_reg(adap, A_SG_CONTEXT_DATA3, 0);
2534 t3_write_reg(adap, A_SG_CONTEXT_MASK0, 0xffffffff);
2535 t3_write_reg(adap, A_SG_CONTEXT_MASK1, 0xffffffff);
2536 t3_write_reg(adap, A_SG_CONTEXT_MASK2, 0xffffffff);
2537 t3_write_reg(adap, A_SG_CONTEXT_MASK3, 0xffffffff);
2538 t3_write_reg(adap, A_SG_CONTEXT_CMD,
2539 V_CONTEXT_CMD_OPCODE(1) | type | V_CONTEXT(id));
2540 return t3_wait_op_done(adap, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
2541 0, SG_CONTEXT_CMD_ATTEMPTS, 1);
2542 }
2543
2544 /**
2545 * t3_sge_init_ecntxt - initialize an SGE egress context
2546 * @adapter: the adapter to configure
2547 * @id: the context id
2548 * @gts_enable: whether to enable GTS for the context
2549 * @type: the egress context type
2550 * @respq: associated response queue
2551 * @base_addr: base address of queue
2552 * @size: number of queue entries
2553 * @token: uP token
2554 * @gen: initial generation value for the context
2555 * @cidx: consumer pointer
2556 *
2557 * Initialize an SGE egress context and make it ready for use. If the
2558 * platform allows concurrent context operations, the caller is
2559 * responsible for appropriate locking.
2560 */
t3_sge_init_ecntxt(adapter_t * adapter,unsigned int id,int gts_enable,enum sge_context_type type,int respq,u64 base_addr,unsigned int size,unsigned int token,int gen,unsigned int cidx)2561 int t3_sge_init_ecntxt(adapter_t *adapter, unsigned int id, int gts_enable,
2562 enum sge_context_type type, int respq, u64 base_addr,
2563 unsigned int size, unsigned int token, int gen,
2564 unsigned int cidx)
2565 {
2566 unsigned int credits = type == SGE_CNTXT_OFLD ? 0 : FW_WR_NUM;
2567
2568 if (base_addr & 0xfff) /* must be 4K aligned */
2569 return -EINVAL;
2570 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2571 return -EBUSY;
2572
2573 base_addr >>= 12;
2574 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, V_EC_INDEX(cidx) |
2575 V_EC_CREDITS(credits) | V_EC_GTS(gts_enable));
2576 t3_write_reg(adapter, A_SG_CONTEXT_DATA1, V_EC_SIZE(size) |
2577 V_EC_BASE_LO((u32)base_addr & 0xffff));
2578 base_addr >>= 16;
2579 t3_write_reg(adapter, A_SG_CONTEXT_DATA2, (u32)base_addr);
2580 base_addr >>= 32;
2581 t3_write_reg(adapter, A_SG_CONTEXT_DATA3,
2582 V_EC_BASE_HI((u32)base_addr & 0xf) | V_EC_RESPQ(respq) |
2583 V_EC_TYPE(type) | V_EC_GEN(gen) | V_EC_UP_TOKEN(token) |
2584 F_EC_VALID);
2585 return t3_sge_write_context(adapter, id, F_EGRESS);
2586 }
2587
2588 /**
2589 * t3_sge_init_flcntxt - initialize an SGE free-buffer list context
2590 * @adapter: the adapter to configure
2591 * @id: the context id
2592 * @gts_enable: whether to enable GTS for the context
2593 * @base_addr: base address of queue
2594 * @size: number of queue entries
2595 * @bsize: size of each buffer for this queue
2596 * @cong_thres: threshold to signal congestion to upstream producers
2597 * @gen: initial generation value for the context
2598 * @cidx: consumer pointer
2599 *
2600 * Initialize an SGE free list context and make it ready for use. The
2601 * caller is responsible for ensuring only one context operation occurs
2602 * at a time.
2603 */
t3_sge_init_flcntxt(adapter_t * adapter,unsigned int id,int gts_enable,u64 base_addr,unsigned int size,unsigned int bsize,unsigned int cong_thres,int gen,unsigned int cidx)2604 int t3_sge_init_flcntxt(adapter_t *adapter, unsigned int id, int gts_enable,
2605 u64 base_addr, unsigned int size, unsigned int bsize,
2606 unsigned int cong_thres, int gen, unsigned int cidx)
2607 {
2608 if (base_addr & 0xfff) /* must be 4K aligned */
2609 return -EINVAL;
2610 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2611 return -EBUSY;
2612
2613 base_addr >>= 12;
2614 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, (u32)base_addr);
2615 base_addr >>= 32;
2616 t3_write_reg(adapter, A_SG_CONTEXT_DATA1,
2617 V_FL_BASE_HI((u32)base_addr) |
2618 V_FL_INDEX_LO(cidx & M_FL_INDEX_LO));
2619 t3_write_reg(adapter, A_SG_CONTEXT_DATA2, V_FL_SIZE(size) |
2620 V_FL_GEN(gen) | V_FL_INDEX_HI(cidx >> 12) |
2621 V_FL_ENTRY_SIZE_LO(bsize & M_FL_ENTRY_SIZE_LO));
2622 t3_write_reg(adapter, A_SG_CONTEXT_DATA3,
2623 V_FL_ENTRY_SIZE_HI(bsize >> (32 - S_FL_ENTRY_SIZE_LO)) |
2624 V_FL_CONG_THRES(cong_thres) | V_FL_GTS(gts_enable));
2625 return t3_sge_write_context(adapter, id, F_FREELIST);
2626 }
2627
2628 /**
2629 * t3_sge_init_rspcntxt - initialize an SGE response queue context
2630 * @adapter: the adapter to configure
2631 * @id: the context id
2632 * @irq_vec_idx: MSI-X interrupt vector index, 0 if no MSI-X, -1 if no IRQ
2633 * @base_addr: base address of queue
2634 * @size: number of queue entries
2635 * @fl_thres: threshold for selecting the normal or jumbo free list
2636 * @gen: initial generation value for the context
2637 * @cidx: consumer pointer
2638 *
2639 * Initialize an SGE response queue context and make it ready for use.
2640 * The caller is responsible for ensuring only one context operation
2641 * occurs at a time.
2642 */
t3_sge_init_rspcntxt(adapter_t * adapter,unsigned int id,int irq_vec_idx,u64 base_addr,unsigned int size,unsigned int fl_thres,int gen,unsigned int cidx)2643 int t3_sge_init_rspcntxt(adapter_t *adapter, unsigned int id, int irq_vec_idx,
2644 u64 base_addr, unsigned int size,
2645 unsigned int fl_thres, int gen, unsigned int cidx)
2646 {
2647 unsigned int ctrl, intr = 0;
2648
2649 if (base_addr & 0xfff) /* must be 4K aligned */
2650 return -EINVAL;
2651 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2652 return -EBUSY;
2653
2654 base_addr >>= 12;
2655 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, V_CQ_SIZE(size) |
2656 V_CQ_INDEX(cidx));
2657 t3_write_reg(adapter, A_SG_CONTEXT_DATA1, (u32)base_addr);
2658 base_addr >>= 32;
2659 ctrl = t3_read_reg(adapter, A_SG_CONTROL);
2660 if ((irq_vec_idx > 0) ||
2661 ((irq_vec_idx == 0) && !(ctrl & F_ONEINTMULTQ)))
2662 intr = F_RQ_INTR_EN;
2663 if (irq_vec_idx >= 0)
2664 intr |= V_RQ_MSI_VEC(irq_vec_idx);
2665 t3_write_reg(adapter, A_SG_CONTEXT_DATA2,
2666 V_CQ_BASE_HI((u32)base_addr) | intr | V_RQ_GEN(gen));
2667 t3_write_reg(adapter, A_SG_CONTEXT_DATA3, fl_thres);
2668 return t3_sge_write_context(adapter, id, F_RESPONSEQ);
2669 }
2670
2671 /**
2672 * t3_sge_init_cqcntxt - initialize an SGE completion queue context
2673 * @adapter: the adapter to configure
2674 * @id: the context id
2675 * @base_addr: base address of queue
2676 * @size: number of queue entries
2677 * @rspq: response queue for async notifications
2678 * @ovfl_mode: CQ overflow mode
2679 * @credits: completion queue credits
2680 * @credit_thres: the credit threshold
2681 *
2682 * Initialize an SGE completion queue context and make it ready for use.
2683 * The caller is responsible for ensuring only one context operation
2684 * occurs at a time.
2685 */
t3_sge_init_cqcntxt(adapter_t * adapter,unsigned int id,u64 base_addr,unsigned int size,int rspq,int ovfl_mode,unsigned int credits,unsigned int credit_thres)2686 int t3_sge_init_cqcntxt(adapter_t *adapter, unsigned int id, u64 base_addr,
2687 unsigned int size, int rspq, int ovfl_mode,
2688 unsigned int credits, unsigned int credit_thres)
2689 {
2690 if (base_addr & 0xfff) /* must be 4K aligned */
2691 return -EINVAL;
2692 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2693 return -EBUSY;
2694
2695 base_addr >>= 12;
2696 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, V_CQ_SIZE(size));
2697 t3_write_reg(adapter, A_SG_CONTEXT_DATA1, (u32)base_addr);
2698 base_addr >>= 32;
2699 t3_write_reg(adapter, A_SG_CONTEXT_DATA2,
2700 V_CQ_BASE_HI((u32)base_addr) | V_CQ_RSPQ(rspq) |
2701 V_CQ_GEN(1) | V_CQ_OVERFLOW_MODE(ovfl_mode) |
2702 V_CQ_ERR(ovfl_mode));
2703 t3_write_reg(adapter, A_SG_CONTEXT_DATA3, V_CQ_CREDITS(credits) |
2704 V_CQ_CREDIT_THRES(credit_thres));
2705 return t3_sge_write_context(adapter, id, F_CQ);
2706 }
2707
2708 /**
2709 * t3_sge_enable_ecntxt - enable/disable an SGE egress context
2710 * @adapter: the adapter
2711 * @id: the egress context id
2712 * @enable: enable (1) or disable (0) the context
2713 *
2714 * Enable or disable an SGE egress context. The caller is responsible for
2715 * ensuring only one context operation occurs at a time.
2716 */
t3_sge_enable_ecntxt(adapter_t * adapter,unsigned int id,int enable)2717 int t3_sge_enable_ecntxt(adapter_t *adapter, unsigned int id, int enable)
2718 {
2719 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2720 return -EBUSY;
2721
2722 t3_write_reg(adapter, A_SG_CONTEXT_MASK0, 0);
2723 t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0);
2724 t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0);
2725 t3_write_reg(adapter, A_SG_CONTEXT_MASK3, F_EC_VALID);
2726 t3_write_reg(adapter, A_SG_CONTEXT_DATA3, V_EC_VALID(enable));
2727 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2728 V_CONTEXT_CMD_OPCODE(1) | F_EGRESS | V_CONTEXT(id));
2729 return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
2730 0, SG_CONTEXT_CMD_ATTEMPTS, 1);
2731 }
2732
2733 /**
2734 * t3_sge_disable_fl - disable an SGE free-buffer list
2735 * @adapter: the adapter
2736 * @id: the free list context id
2737 *
2738 * Disable an SGE free-buffer list. The caller is responsible for
2739 * ensuring only one context operation occurs at a time.
2740 */
t3_sge_disable_fl(adapter_t * adapter,unsigned int id)2741 int t3_sge_disable_fl(adapter_t *adapter, unsigned int id)
2742 {
2743 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2744 return -EBUSY;
2745
2746 t3_write_reg(adapter, A_SG_CONTEXT_MASK0, 0);
2747 t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0);
2748 t3_write_reg(adapter, A_SG_CONTEXT_MASK2, V_FL_SIZE(M_FL_SIZE));
2749 t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0);
2750 t3_write_reg(adapter, A_SG_CONTEXT_DATA2, 0);
2751 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2752 V_CONTEXT_CMD_OPCODE(1) | F_FREELIST | V_CONTEXT(id));
2753 return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
2754 0, SG_CONTEXT_CMD_ATTEMPTS, 1);
2755 }
2756
2757 /**
2758 * t3_sge_disable_rspcntxt - disable an SGE response queue
2759 * @adapter: the adapter
2760 * @id: the response queue context id
2761 *
2762 * Disable an SGE response queue. The caller is responsible for
2763 * ensuring only one context operation occurs at a time.
2764 */
t3_sge_disable_rspcntxt(adapter_t * adapter,unsigned int id)2765 int t3_sge_disable_rspcntxt(adapter_t *adapter, unsigned int id)
2766 {
2767 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2768 return -EBUSY;
2769
2770 t3_write_reg(adapter, A_SG_CONTEXT_MASK0, V_CQ_SIZE(M_CQ_SIZE));
2771 t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0);
2772 t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0);
2773 t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0);
2774 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, 0);
2775 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2776 V_CONTEXT_CMD_OPCODE(1) | F_RESPONSEQ | V_CONTEXT(id));
2777 return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
2778 0, SG_CONTEXT_CMD_ATTEMPTS, 1);
2779 }
2780
2781 /**
2782 * t3_sge_disable_cqcntxt - disable an SGE completion queue
2783 * @adapter: the adapter
2784 * @id: the completion queue context id
2785 *
2786 * Disable an SGE completion queue. The caller is responsible for
2787 * ensuring only one context operation occurs at a time.
2788 */
t3_sge_disable_cqcntxt(adapter_t * adapter,unsigned int id)2789 int t3_sge_disable_cqcntxt(adapter_t *adapter, unsigned int id)
2790 {
2791 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2792 return -EBUSY;
2793
2794 t3_write_reg(adapter, A_SG_CONTEXT_MASK0, V_CQ_SIZE(M_CQ_SIZE));
2795 t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0);
2796 t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0);
2797 t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0);
2798 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, 0);
2799 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2800 V_CONTEXT_CMD_OPCODE(1) | F_CQ | V_CONTEXT(id));
2801 return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
2802 0, SG_CONTEXT_CMD_ATTEMPTS, 1);
2803 }
2804
2805 /**
2806 * t3_sge_cqcntxt_op - perform an operation on a completion queue context
2807 * @adapter: the adapter
2808 * @id: the context id
2809 * @op: the operation to perform
2810 * @credits: credits to return to the CQ
2811 *
2812 * Perform the selected operation on an SGE completion queue context.
2813 * The caller is responsible for ensuring only one context operation
2814 * occurs at a time.
2815 *
2816 * For most operations the function returns the current HW position in
2817 * the completion queue.
2818 */
t3_sge_cqcntxt_op(adapter_t * adapter,unsigned int id,unsigned int op,unsigned int credits)2819 int t3_sge_cqcntxt_op(adapter_t *adapter, unsigned int id, unsigned int op,
2820 unsigned int credits)
2821 {
2822 u32 val;
2823
2824 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2825 return -EBUSY;
2826
2827 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, credits << 16);
2828 t3_write_reg(adapter, A_SG_CONTEXT_CMD, V_CONTEXT_CMD_OPCODE(op) |
2829 V_CONTEXT(id) | F_CQ);
2830 if (t3_wait_op_done_val(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
2831 0, SG_CONTEXT_CMD_ATTEMPTS, 1, &val))
2832 return -EIO;
2833
2834 if (op >= 2 && op < 7) {
2835 if (adapter->params.rev > 0)
2836 return G_CQ_INDEX(val);
2837
2838 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2839 V_CONTEXT_CMD_OPCODE(0) | F_CQ | V_CONTEXT(id));
2840 if (t3_wait_op_done(adapter, A_SG_CONTEXT_CMD,
2841 F_CONTEXT_CMD_BUSY, 0,
2842 SG_CONTEXT_CMD_ATTEMPTS, 1))
2843 return -EIO;
2844 return G_CQ_INDEX(t3_read_reg(adapter, A_SG_CONTEXT_DATA0));
2845 }
2846 return 0;
2847 }
2848
2849 /**
2850 * t3_sge_read_context - read an SGE context
2851 * @type: the context type
2852 * @adapter: the adapter
2853 * @id: the context id
2854 * @data: holds the retrieved context
2855 *
2856 * Read an SGE egress context. The caller is responsible for ensuring
2857 * only one context operation occurs at a time.
2858 */
t3_sge_read_context(unsigned int type,adapter_t * adapter,unsigned int id,u32 data[4])2859 static int t3_sge_read_context(unsigned int type, adapter_t *adapter,
2860 unsigned int id, u32 data[4])
2861 {
2862 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2863 return -EBUSY;
2864
2865 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2866 V_CONTEXT_CMD_OPCODE(0) | type | V_CONTEXT(id));
2867 if (t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY, 0,
2868 SG_CONTEXT_CMD_ATTEMPTS, 1))
2869 return -EIO;
2870 data[0] = t3_read_reg(adapter, A_SG_CONTEXT_DATA0);
2871 data[1] = t3_read_reg(adapter, A_SG_CONTEXT_DATA1);
2872 data[2] = t3_read_reg(adapter, A_SG_CONTEXT_DATA2);
2873 data[3] = t3_read_reg(adapter, A_SG_CONTEXT_DATA3);
2874 return 0;
2875 }
2876
2877 /**
2878 * t3_sge_read_ecntxt - read an SGE egress context
2879 * @adapter: the adapter
2880 * @id: the context id
2881 * @data: holds the retrieved context
2882 *
2883 * Read an SGE egress context. The caller is responsible for ensuring
2884 * only one context operation occurs at a time.
2885 */
t3_sge_read_ecntxt(adapter_t * adapter,unsigned int id,u32 data[4])2886 int t3_sge_read_ecntxt(adapter_t *adapter, unsigned int id, u32 data[4])
2887 {
2888 if (id >= 65536)
2889 return -EINVAL;
2890 return t3_sge_read_context(F_EGRESS, adapter, id, data);
2891 }
2892
2893 /**
2894 * t3_sge_read_cq - read an SGE CQ context
2895 * @adapter: the adapter
2896 * @id: the context id
2897 * @data: holds the retrieved context
2898 *
2899 * Read an SGE CQ context. The caller is responsible for ensuring
2900 * only one context operation occurs at a time.
2901 */
t3_sge_read_cq(adapter_t * adapter,unsigned int id,u32 data[4])2902 int t3_sge_read_cq(adapter_t *adapter, unsigned int id, u32 data[4])
2903 {
2904 if (id >= 65536)
2905 return -EINVAL;
2906 return t3_sge_read_context(F_CQ, adapter, id, data);
2907 }
2908
2909 /**
2910 * t3_sge_read_fl - read an SGE free-list context
2911 * @adapter: the adapter
2912 * @id: the context id
2913 * @data: holds the retrieved context
2914 *
2915 * Read an SGE free-list context. The caller is responsible for ensuring
2916 * only one context operation occurs at a time.
2917 */
t3_sge_read_fl(adapter_t * adapter,unsigned int id,u32 data[4])2918 int t3_sge_read_fl(adapter_t *adapter, unsigned int id, u32 data[4])
2919 {
2920 if (id >= SGE_QSETS * 2)
2921 return -EINVAL;
2922 return t3_sge_read_context(F_FREELIST, adapter, id, data);
2923 }
2924
2925 /**
2926 * t3_sge_read_rspq - read an SGE response queue context
2927 * @adapter: the adapter
2928 * @id: the context id
2929 * @data: holds the retrieved context
2930 *
2931 * Read an SGE response queue context. The caller is responsible for
2932 * ensuring only one context operation occurs at a time.
2933 */
t3_sge_read_rspq(adapter_t * adapter,unsigned int id,u32 data[4])2934 int t3_sge_read_rspq(adapter_t *adapter, unsigned int id, u32 data[4])
2935 {
2936 if (id >= SGE_QSETS)
2937 return -EINVAL;
2938 return t3_sge_read_context(F_RESPONSEQ, adapter, id, data);
2939 }
2940
2941 /**
2942 * t3_config_rss - configure Rx packet steering
2943 * @adapter: the adapter
2944 * @rss_config: RSS settings (written to TP_RSS_CONFIG)
2945 * @cpus: values for the CPU lookup table (0xff terminated)
2946 * @rspq: values for the response queue lookup table (0xffff terminated)
2947 *
2948 * Programs the receive packet steering logic. @cpus and @rspq provide
2949 * the values for the CPU and response queue lookup tables. If they
2950 * provide fewer values than the size of the tables the supplied values
2951 * are used repeatedly until the tables are fully populated.
2952 */
t3_config_rss(adapter_t * adapter,unsigned int rss_config,const u8 * cpus,const u16 * rspq)2953 void t3_config_rss(adapter_t *adapter, unsigned int rss_config, const u8 *cpus,
2954 const u16 *rspq)
2955 {
2956 int i, j, cpu_idx = 0, q_idx = 0;
2957
2958 if (cpus)
2959 for (i = 0; i < RSS_TABLE_SIZE; ++i) {
2960 u32 val = i << 16;
2961
2962 for (j = 0; j < 2; ++j) {
2963 val |= (cpus[cpu_idx++] & 0x3f) << (8 * j);
2964 if (cpus[cpu_idx] == 0xff)
2965 cpu_idx = 0;
2966 }
2967 t3_write_reg(adapter, A_TP_RSS_LKP_TABLE, val);
2968 }
2969
2970 if (rspq)
2971 for (i = 0; i < RSS_TABLE_SIZE; ++i) {
2972 t3_write_reg(adapter, A_TP_RSS_MAP_TABLE,
2973 (i << 16) | rspq[q_idx++]);
2974 if (rspq[q_idx] == 0xffff)
2975 q_idx = 0;
2976 }
2977
2978 t3_write_reg(adapter, A_TP_RSS_CONFIG, rss_config);
2979 }
2980
2981 /**
2982 * t3_read_rss - read the contents of the RSS tables
2983 * @adapter: the adapter
2984 * @lkup: holds the contents of the RSS lookup table
2985 * @map: holds the contents of the RSS map table
2986 *
2987 * Reads the contents of the receive packet steering tables.
2988 */
t3_read_rss(adapter_t * adapter,u8 * lkup,u16 * map)2989 int t3_read_rss(adapter_t *adapter, u8 *lkup, u16 *map)
2990 {
2991 int i;
2992 u32 val;
2993
2994 if (lkup)
2995 for (i = 0; i < RSS_TABLE_SIZE; ++i) {
2996 t3_write_reg(adapter, A_TP_RSS_LKP_TABLE,
2997 0xffff0000 | i);
2998 val = t3_read_reg(adapter, A_TP_RSS_LKP_TABLE);
2999 if (!(val & 0x80000000))
3000 return -EAGAIN;
3001 *lkup++ = (u8)val;
3002 *lkup++ = (u8)(val >> 8);
3003 }
3004
3005 if (map)
3006 for (i = 0; i < RSS_TABLE_SIZE; ++i) {
3007 t3_write_reg(adapter, A_TP_RSS_MAP_TABLE,
3008 0xffff0000 | i);
3009 val = t3_read_reg(adapter, A_TP_RSS_MAP_TABLE);
3010 if (!(val & 0x80000000))
3011 return -EAGAIN;
3012 *map++ = (u16)val;
3013 }
3014 return 0;
3015 }
3016
3017 /**
3018 * t3_tp_set_offload_mode - put TP in NIC/offload mode
3019 * @adap: the adapter
3020 * @enable: 1 to select offload mode, 0 for regular NIC
3021 *
3022 * Switches TP to NIC/offload mode.
3023 */
t3_tp_set_offload_mode(adapter_t * adap,int enable)3024 void t3_tp_set_offload_mode(adapter_t *adap, int enable)
3025 {
3026 if (is_offload(adap) || !enable)
3027 t3_set_reg_field(adap, A_TP_IN_CONFIG, F_NICMODE,
3028 V_NICMODE(!enable));
3029 }
3030
3031 /**
3032 * tp_wr_bits_indirect - set/clear bits in an indirect TP register
3033 * @adap: the adapter
3034 * @addr: the indirect TP register address
3035 * @mask: specifies the field within the register to modify
3036 * @val: new value for the field
3037 *
3038 * Sets a field of an indirect TP register to the given value.
3039 */
tp_wr_bits_indirect(adapter_t * adap,unsigned int addr,unsigned int mask,unsigned int val)3040 static void tp_wr_bits_indirect(adapter_t *adap, unsigned int addr,
3041 unsigned int mask, unsigned int val)
3042 {
3043 t3_write_reg(adap, A_TP_PIO_ADDR, addr);
3044 val |= t3_read_reg(adap, A_TP_PIO_DATA) & ~mask;
3045 t3_write_reg(adap, A_TP_PIO_DATA, val);
3046 }
3047
3048 /**
3049 * t3_enable_filters - enable the HW filters
3050 * @adap: the adapter
3051 *
3052 * Enables the HW filters for NIC traffic.
3053 */
t3_enable_filters(adapter_t * adap)3054 void t3_enable_filters(adapter_t *adap)
3055 {
3056 t3_set_reg_field(adap, A_TP_IN_CONFIG, F_NICMODE, 0);
3057 t3_set_reg_field(adap, A_MC5_DB_CONFIG, 0, F_FILTEREN);
3058 t3_set_reg_field(adap, A_TP_GLOBAL_CONFIG, 0, V_FIVETUPLELOOKUP(3));
3059 tp_wr_bits_indirect(adap, A_TP_INGRESS_CONFIG, 0, F_LOOKUPEVERYPKT);
3060 }
3061
3062 /**
3063 * t3_disable_filters - disable the HW filters
3064 * @adap: the adapter
3065 *
3066 * Disables the HW filters for NIC traffic.
3067 */
t3_disable_filters(adapter_t * adap)3068 void t3_disable_filters(adapter_t *adap)
3069 {
3070 /* note that we don't want to revert to NIC-only mode */
3071 t3_set_reg_field(adap, A_MC5_DB_CONFIG, F_FILTEREN, 0);
3072 t3_set_reg_field(adap, A_TP_GLOBAL_CONFIG,
3073 V_FIVETUPLELOOKUP(M_FIVETUPLELOOKUP), 0);
3074 tp_wr_bits_indirect(adap, A_TP_INGRESS_CONFIG, F_LOOKUPEVERYPKT, 0);
3075 }
3076
3077 /**
3078 * pm_num_pages - calculate the number of pages of the payload memory
3079 * @mem_size: the size of the payload memory
3080 * @pg_size: the size of each payload memory page
3081 *
3082 * Calculate the number of pages, each of the given size, that fit in a
3083 * memory of the specified size, respecting the HW requirement that the
3084 * number of pages must be a multiple of 24.
3085 */
pm_num_pages(unsigned int mem_size,unsigned int pg_size)3086 static inline unsigned int pm_num_pages(unsigned int mem_size,
3087 unsigned int pg_size)
3088 {
3089 unsigned int n = mem_size / pg_size;
3090
3091 return n - n % 24;
3092 }
3093
3094 #define mem_region(adap, start, size, reg) \
3095 t3_write_reg((adap), A_ ## reg, (start)); \
3096 start += size
3097
3098 /**
3099 * partition_mem - partition memory and configure TP memory settings
3100 * @adap: the adapter
3101 * @p: the TP parameters
3102 *
3103 * Partitions context and payload memory and configures TP's memory
3104 * registers.
3105 */
partition_mem(adapter_t * adap,const struct tp_params * p)3106 static void partition_mem(adapter_t *adap, const struct tp_params *p)
3107 {
3108 unsigned int m, pstructs, tids = t3_mc5_size(&adap->mc5);
3109 unsigned int timers = 0, timers_shift = 22;
3110
3111 if (adap->params.rev > 0) {
3112 if (tids <= 16 * 1024) {
3113 timers = 1;
3114 timers_shift = 16;
3115 } else if (tids <= 64 * 1024) {
3116 timers = 2;
3117 timers_shift = 18;
3118 } else if (tids <= 256 * 1024) {
3119 timers = 3;
3120 timers_shift = 20;
3121 }
3122 }
3123
3124 t3_write_reg(adap, A_TP_PMM_SIZE,
3125 p->chan_rx_size | (p->chan_tx_size >> 16));
3126
3127 t3_write_reg(adap, A_TP_PMM_TX_BASE, 0);
3128 t3_write_reg(adap, A_TP_PMM_TX_PAGE_SIZE, p->tx_pg_size);
3129 t3_write_reg(adap, A_TP_PMM_TX_MAX_PAGE, p->tx_num_pgs);
3130 t3_set_reg_field(adap, A_TP_PARA_REG3, V_TXDATAACKIDX(M_TXDATAACKIDX),
3131 V_TXDATAACKIDX(fls(p->tx_pg_size) - 12));
3132
3133 t3_write_reg(adap, A_TP_PMM_RX_BASE, 0);
3134 t3_write_reg(adap, A_TP_PMM_RX_PAGE_SIZE, p->rx_pg_size);
3135 t3_write_reg(adap, A_TP_PMM_RX_MAX_PAGE, p->rx_num_pgs);
3136
3137 pstructs = p->rx_num_pgs + p->tx_num_pgs;
3138 /* Add a bit of headroom and make multiple of 24 */
3139 pstructs += 48;
3140 pstructs -= pstructs % 24;
3141 t3_write_reg(adap, A_TP_CMM_MM_MAX_PSTRUCT, pstructs);
3142
3143 m = tids * TCB_SIZE;
3144 mem_region(adap, m, (64 << 10) * 64, SG_EGR_CNTX_BADDR);
3145 mem_region(adap, m, (64 << 10) * 64, SG_CQ_CONTEXT_BADDR);
3146 t3_write_reg(adap, A_TP_CMM_TIMER_BASE, V_CMTIMERMAXNUM(timers) | m);
3147 m += ((p->ntimer_qs - 1) << timers_shift) + (1 << 22);
3148 mem_region(adap, m, pstructs * 64, TP_CMM_MM_BASE);
3149 mem_region(adap, m, 64 * (pstructs / 24), TP_CMM_MM_PS_FLST_BASE);
3150 mem_region(adap, m, 64 * (p->rx_num_pgs / 24), TP_CMM_MM_RX_FLST_BASE);
3151 mem_region(adap, m, 64 * (p->tx_num_pgs / 24), TP_CMM_MM_TX_FLST_BASE);
3152
3153 m = (m + 4095) & ~0xfff;
3154 t3_write_reg(adap, A_CIM_SDRAM_BASE_ADDR, m);
3155 t3_write_reg(adap, A_CIM_SDRAM_ADDR_SIZE, p->cm_size - m);
3156
3157 tids = (p->cm_size - m - (3 << 20)) / 3072 - 32;
3158 m = t3_mc5_size(&adap->mc5) - adap->params.mc5.nservers -
3159 adap->params.mc5.nfilters - adap->params.mc5.nroutes;
3160 if (tids < m)
3161 adap->params.mc5.nservers += m - tids;
3162 }
3163
tp_wr_indirect(adapter_t * adap,unsigned int addr,u32 val)3164 static inline void tp_wr_indirect(adapter_t *adap, unsigned int addr, u32 val)
3165 {
3166 t3_write_reg(adap, A_TP_PIO_ADDR, addr);
3167 t3_write_reg(adap, A_TP_PIO_DATA, val);
3168 }
3169
tp_rd_indirect(adapter_t * adap,unsigned int addr)3170 static inline u32 tp_rd_indirect(adapter_t *adap, unsigned int addr)
3171 {
3172 t3_write_reg(adap, A_TP_PIO_ADDR, addr);
3173 return t3_read_reg(adap, A_TP_PIO_DATA);
3174 }
3175
tp_config(adapter_t * adap,const struct tp_params * p)3176 static void tp_config(adapter_t *adap, const struct tp_params *p)
3177 {
3178 t3_write_reg(adap, A_TP_GLOBAL_CONFIG, F_TXPACINGENABLE | F_PATHMTU |
3179 F_IPCHECKSUMOFFLOAD | F_UDPCHECKSUMOFFLOAD |
3180 F_TCPCHECKSUMOFFLOAD | V_IPTTL(64));
3181 t3_write_reg(adap, A_TP_TCP_OPTIONS, V_MTUDEFAULT(576) |
3182 F_MTUENABLE | V_WINDOWSCALEMODE(1) |
3183 V_TIMESTAMPSMODE(1) | V_SACKMODE(1) | V_SACKRX(1));
3184 t3_write_reg(adap, A_TP_DACK_CONFIG, V_AUTOSTATE3(1) |
3185 V_AUTOSTATE2(1) | V_AUTOSTATE1(0) |
3186 V_BYTETHRESHOLD(26880) | V_MSSTHRESHOLD(2) |
3187 F_AUTOCAREFUL | F_AUTOENABLE | V_DACK_MODE(1));
3188 t3_set_reg_field(adap, A_TP_IN_CONFIG, F_RXFBARBPRIO | F_TXFBARBPRIO,
3189 F_IPV6ENABLE | F_NICMODE);
3190 t3_write_reg(adap, A_TP_TX_RESOURCE_LIMIT, 0x18141814);
3191 t3_write_reg(adap, A_TP_PARA_REG4, 0x5050105);
3192 t3_set_reg_field(adap, A_TP_PARA_REG6, 0,
3193 adap->params.rev > 0 ? F_ENABLEESND :
3194 F_T3A_ENABLEESND);
3195 t3_set_reg_field(adap, A_TP_PC_CONFIG,
3196 F_ENABLEEPCMDAFULL,
3197 F_ENABLEOCSPIFULL |F_TXDEFERENABLE | F_HEARBEATDACK |
3198 F_TXCONGESTIONMODE | F_RXCONGESTIONMODE);
3199 t3_set_reg_field(adap, A_TP_PC_CONFIG2, F_CHDRAFULL,
3200 F_ENABLEIPV6RSS | F_ENABLENONOFDTNLSYN |
3201 F_ENABLEARPMISS | F_DISBLEDAPARBIT0);
3202 t3_write_reg(adap, A_TP_PROXY_FLOW_CNTL, 1080);
3203 t3_write_reg(adap, A_TP_PROXY_FLOW_CNTL, 1000);
3204
3205 if (adap->params.rev > 0) {
3206 tp_wr_indirect(adap, A_TP_EGRESS_CONFIG, F_REWRITEFORCETOSIZE);
3207 t3_set_reg_field(adap, A_TP_PARA_REG3, 0,
3208 F_TXPACEAUTO | F_TXPACEAUTOSTRICT);
3209 t3_set_reg_field(adap, A_TP_PC_CONFIG, F_LOCKTID, F_LOCKTID);
3210 tp_wr_indirect(adap, A_TP_VLAN_PRI_MAP, 0xfa50);
3211 tp_wr_indirect(adap, A_TP_MAC_MATCH_MAP0, 0xfac688);
3212 tp_wr_indirect(adap, A_TP_MAC_MATCH_MAP1, 0xfac688);
3213 } else
3214 t3_set_reg_field(adap, A_TP_PARA_REG3, 0, F_TXPACEFIXED);
3215
3216 if (adap->params.rev == T3_REV_C)
3217 t3_set_reg_field(adap, A_TP_PC_CONFIG,
3218 V_TABLELATENCYDELTA(M_TABLELATENCYDELTA),
3219 V_TABLELATENCYDELTA(4));
3220
3221 t3_write_reg(adap, A_TP_TX_MOD_QUEUE_WEIGHT1, 0);
3222 t3_write_reg(adap, A_TP_TX_MOD_QUEUE_WEIGHT0, 0);
3223 t3_write_reg(adap, A_TP_MOD_CHANNEL_WEIGHT, 0);
3224 t3_write_reg(adap, A_TP_MOD_RATE_LIMIT, 0xf2200000);
3225
3226 if (adap->params.nports > 2) {
3227 t3_set_reg_field(adap, A_TP_PC_CONFIG2, 0,
3228 F_ENABLETXPORTFROMDA2 | F_ENABLETXPORTFROMDA |
3229 F_ENABLERXPORTFROMADDR);
3230 tp_wr_bits_indirect(adap, A_TP_QOS_RX_MAP_MODE,
3231 V_RXMAPMODE(M_RXMAPMODE), 0);
3232 tp_wr_indirect(adap, A_TP_INGRESS_CONFIG, V_BITPOS0(48) |
3233 V_BITPOS1(49) | V_BITPOS2(50) | V_BITPOS3(51) |
3234 F_ENABLEEXTRACT | F_ENABLEEXTRACTIONSFD |
3235 F_ENABLEINSERTION | F_ENABLEINSERTIONSFD);
3236 tp_wr_indirect(adap, A_TP_PREAMBLE_MSB, 0xfb000000);
3237 tp_wr_indirect(adap, A_TP_PREAMBLE_LSB, 0xd5);
3238 tp_wr_indirect(adap, A_TP_INTF_FROM_TX_PKT, F_INTFFROMTXPKT);
3239 }
3240 }
3241
3242 /* TCP timer values in ms */
3243 #define TP_DACK_TIMER 50
3244 #define TP_RTO_MIN 250
3245
3246 /**
3247 * tp_set_timers - set TP timing parameters
3248 * @adap: the adapter to set
3249 * @core_clk: the core clock frequency in Hz
3250 *
3251 * Set TP's timing parameters, such as the various timer resolutions and
3252 * the TCP timer values.
3253 */
tp_set_timers(adapter_t * adap,unsigned int core_clk)3254 static void tp_set_timers(adapter_t *adap, unsigned int core_clk)
3255 {
3256 unsigned int tre = adap->params.tp.tre;
3257 unsigned int dack_re = adap->params.tp.dack_re;
3258 unsigned int tstamp_re = fls(core_clk / 1000); /* 1ms, at least */
3259 unsigned int tps = core_clk >> tre;
3260
3261 t3_write_reg(adap, A_TP_TIMER_RESOLUTION, V_TIMERRESOLUTION(tre) |
3262 V_DELAYEDACKRESOLUTION(dack_re) |
3263 V_TIMESTAMPRESOLUTION(tstamp_re));
3264 t3_write_reg(adap, A_TP_DACK_TIMER,
3265 (core_clk >> dack_re) / (1000 / TP_DACK_TIMER));
3266 t3_write_reg(adap, A_TP_TCP_BACKOFF_REG0, 0x3020100);
3267 t3_write_reg(adap, A_TP_TCP_BACKOFF_REG1, 0x7060504);
3268 t3_write_reg(adap, A_TP_TCP_BACKOFF_REG2, 0xb0a0908);
3269 t3_write_reg(adap, A_TP_TCP_BACKOFF_REG3, 0xf0e0d0c);
3270 t3_write_reg(adap, A_TP_SHIFT_CNT, V_SYNSHIFTMAX(6) |
3271 V_RXTSHIFTMAXR1(4) | V_RXTSHIFTMAXR2(15) |
3272 V_PERSHIFTBACKOFFMAX(8) | V_PERSHIFTMAX(8) |
3273 V_KEEPALIVEMAX(9));
3274
3275 #define SECONDS * tps
3276
3277 t3_write_reg(adap, A_TP_MSL,
3278 adap->params.rev > 0 ? 0 : 2 SECONDS);
3279 t3_write_reg(adap, A_TP_RXT_MIN, tps / (1000 / TP_RTO_MIN));
3280 t3_write_reg(adap, A_TP_RXT_MAX, 64 SECONDS);
3281 t3_write_reg(adap, A_TP_PERS_MIN, 5 SECONDS);
3282 t3_write_reg(adap, A_TP_PERS_MAX, 64 SECONDS);
3283 t3_write_reg(adap, A_TP_KEEP_IDLE, 7200 SECONDS);
3284 t3_write_reg(adap, A_TP_KEEP_INTVL, 75 SECONDS);
3285 t3_write_reg(adap, A_TP_INIT_SRTT, 3 SECONDS);
3286 t3_write_reg(adap, A_TP_FINWAIT2_TIMER, 600 SECONDS);
3287
3288 #undef SECONDS
3289 }
3290
3291 /**
3292 * t3_tp_set_coalescing_size - set receive coalescing size
3293 * @adap: the adapter
3294 * @size: the receive coalescing size
3295 * @psh: whether a set PSH bit should deliver coalesced data
3296 *
3297 * Set the receive coalescing size and PSH bit handling.
3298 */
t3_tp_set_coalescing_size(adapter_t * adap,unsigned int size,int psh)3299 int t3_tp_set_coalescing_size(adapter_t *adap, unsigned int size, int psh)
3300 {
3301 u32 val;
3302
3303 if (size > MAX_RX_COALESCING_LEN)
3304 return -EINVAL;
3305
3306 val = t3_read_reg(adap, A_TP_PARA_REG3);
3307 val &= ~(F_RXCOALESCEENABLE | F_RXCOALESCEPSHEN);
3308
3309 if (size) {
3310 val |= F_RXCOALESCEENABLE;
3311 if (psh)
3312 val |= F_RXCOALESCEPSHEN;
3313 size = min(MAX_RX_COALESCING_LEN, size);
3314 t3_write_reg(adap, A_TP_PARA_REG2, V_RXCOALESCESIZE(size) |
3315 V_MAXRXDATA(MAX_RX_COALESCING_LEN));
3316 }
3317 t3_write_reg(adap, A_TP_PARA_REG3, val);
3318 return 0;
3319 }
3320
3321 /**
3322 * t3_tp_set_max_rxsize - set the max receive size
3323 * @adap: the adapter
3324 * @size: the max receive size
3325 *
3326 * Set TP's max receive size. This is the limit that applies when
3327 * receive coalescing is disabled.
3328 */
t3_tp_set_max_rxsize(adapter_t * adap,unsigned int size)3329 void t3_tp_set_max_rxsize(adapter_t *adap, unsigned int size)
3330 {
3331 t3_write_reg(adap, A_TP_PARA_REG7,
3332 V_PMMAXXFERLEN0(size) | V_PMMAXXFERLEN1(size));
3333 }
3334
init_mtus(unsigned short mtus[])3335 static void __devinit init_mtus(unsigned short mtus[])
3336 {
3337 /*
3338 * See draft-mathis-plpmtud-00.txt for the values. The min is 88 so
3339 * it can accommodate max size TCP/IP headers when SACK and timestamps
3340 * are enabled and still have at least 8 bytes of payload.
3341 */
3342 mtus[0] = 88;
3343 mtus[1] = 88;
3344 mtus[2] = 256;
3345 mtus[3] = 512;
3346 mtus[4] = 576;
3347 mtus[5] = 1024;
3348 mtus[6] = 1280;
3349 mtus[7] = 1492;
3350 mtus[8] = 1500;
3351 mtus[9] = 2002;
3352 mtus[10] = 2048;
3353 mtus[11] = 4096;
3354 mtus[12] = 4352;
3355 mtus[13] = 8192;
3356 mtus[14] = 9000;
3357 mtus[15] = 9600;
3358 }
3359
3360 /**
3361 * init_cong_ctrl - initialize congestion control parameters
3362 * @a: the alpha values for congestion control
3363 * @b: the beta values for congestion control
3364 *
3365 * Initialize the congestion control parameters.
3366 */
init_cong_ctrl(unsigned short * a,unsigned short * b)3367 static void __devinit init_cong_ctrl(unsigned short *a, unsigned short *b)
3368 {
3369 a[0] = a[1] = a[2] = a[3] = a[4] = a[5] = a[6] = a[7] = a[8] = 1;
3370 a[9] = 2;
3371 a[10] = 3;
3372 a[11] = 4;
3373 a[12] = 5;
3374 a[13] = 6;
3375 a[14] = 7;
3376 a[15] = 8;
3377 a[16] = 9;
3378 a[17] = 10;
3379 a[18] = 14;
3380 a[19] = 17;
3381 a[20] = 21;
3382 a[21] = 25;
3383 a[22] = 30;
3384 a[23] = 35;
3385 a[24] = 45;
3386 a[25] = 60;
3387 a[26] = 80;
3388 a[27] = 100;
3389 a[28] = 200;
3390 a[29] = 300;
3391 a[30] = 400;
3392 a[31] = 500;
3393
3394 b[0] = b[1] = b[2] = b[3] = b[4] = b[5] = b[6] = b[7] = b[8] = 0;
3395 b[9] = b[10] = 1;
3396 b[11] = b[12] = 2;
3397 b[13] = b[14] = b[15] = b[16] = 3;
3398 b[17] = b[18] = b[19] = b[20] = b[21] = 4;
3399 b[22] = b[23] = b[24] = b[25] = b[26] = b[27] = 5;
3400 b[28] = b[29] = 6;
3401 b[30] = b[31] = 7;
3402 }
3403
3404 /* The minimum additive increment value for the congestion control table */
3405 #define CC_MIN_INCR 2U
3406
3407 /**
3408 * t3_load_mtus - write the MTU and congestion control HW tables
3409 * @adap: the adapter
3410 * @mtus: the unrestricted values for the MTU table
3411 * @alpha: the values for the congestion control alpha parameter
3412 * @beta: the values for the congestion control beta parameter
3413 * @mtu_cap: the maximum permitted effective MTU
3414 *
3415 * Write the MTU table with the supplied MTUs capping each at &mtu_cap.
3416 * Update the high-speed congestion control table with the supplied alpha,
3417 * beta, and MTUs.
3418 */
t3_load_mtus(adapter_t * adap,unsigned short mtus[NMTUS],unsigned short alpha[NCCTRL_WIN],unsigned short beta[NCCTRL_WIN],unsigned short mtu_cap)3419 void t3_load_mtus(adapter_t *adap, unsigned short mtus[NMTUS],
3420 unsigned short alpha[NCCTRL_WIN],
3421 unsigned short beta[NCCTRL_WIN], unsigned short mtu_cap)
3422 {
3423 static const unsigned int avg_pkts[NCCTRL_WIN] = {
3424 2, 6, 10, 14, 20, 28, 40, 56, 80, 112, 160, 224, 320, 448, 640,
3425 896, 1281, 1792, 2560, 3584, 5120, 7168, 10240, 14336, 20480,
3426 28672, 40960, 57344, 81920, 114688, 163840, 229376 };
3427
3428 unsigned int i, w;
3429
3430 for (i = 0; i < NMTUS; ++i) {
3431 unsigned int mtu = min(mtus[i], mtu_cap);
3432 unsigned int log2 = fls(mtu);
3433
3434 if (!(mtu & ((1 << log2) >> 2))) /* round */
3435 log2--;
3436 t3_write_reg(adap, A_TP_MTU_TABLE,
3437 (i << 24) | (log2 << 16) | mtu);
3438
3439 for (w = 0; w < NCCTRL_WIN; ++w) {
3440 unsigned int inc;
3441
3442 inc = max(((mtu - 40) * alpha[w]) / avg_pkts[w],
3443 CC_MIN_INCR);
3444
3445 t3_write_reg(adap, A_TP_CCTRL_TABLE, (i << 21) |
3446 (w << 16) | (beta[w] << 13) | inc);
3447 }
3448 }
3449 }
3450
3451 /**
3452 * t3_read_hw_mtus - returns the values in the HW MTU table
3453 * @adap: the adapter
3454 * @mtus: where to store the HW MTU values
3455 *
3456 * Reads the HW MTU table.
3457 */
t3_read_hw_mtus(adapter_t * adap,unsigned short mtus[NMTUS])3458 void t3_read_hw_mtus(adapter_t *adap, unsigned short mtus[NMTUS])
3459 {
3460 int i;
3461
3462 for (i = 0; i < NMTUS; ++i) {
3463 unsigned int val;
3464
3465 t3_write_reg(adap, A_TP_MTU_TABLE, 0xff000000 | i);
3466 val = t3_read_reg(adap, A_TP_MTU_TABLE);
3467 mtus[i] = val & 0x3fff;
3468 }
3469 }
3470
3471 /**
3472 * t3_get_cong_cntl_tab - reads the congestion control table
3473 * @adap: the adapter
3474 * @incr: where to store the alpha values
3475 *
3476 * Reads the additive increments programmed into the HW congestion
3477 * control table.
3478 */
t3_get_cong_cntl_tab(adapter_t * adap,unsigned short incr[NMTUS][NCCTRL_WIN])3479 void t3_get_cong_cntl_tab(adapter_t *adap,
3480 unsigned short incr[NMTUS][NCCTRL_WIN])
3481 {
3482 unsigned int mtu, w;
3483
3484 for (mtu = 0; mtu < NMTUS; ++mtu)
3485 for (w = 0; w < NCCTRL_WIN; ++w) {
3486 t3_write_reg(adap, A_TP_CCTRL_TABLE,
3487 0xffff0000 | (mtu << 5) | w);
3488 incr[mtu][w] = (unsigned short)t3_read_reg(adap,
3489 A_TP_CCTRL_TABLE) & 0x1fff;
3490 }
3491 }
3492
3493 /**
3494 * t3_tp_get_mib_stats - read TP's MIB counters
3495 * @adap: the adapter
3496 * @tps: holds the returned counter values
3497 *
3498 * Returns the values of TP's MIB counters.
3499 */
t3_tp_get_mib_stats(adapter_t * adap,struct tp_mib_stats * tps)3500 void t3_tp_get_mib_stats(adapter_t *adap, struct tp_mib_stats *tps)
3501 {
3502 t3_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_RDATA, (u32 *)tps,
3503 sizeof(*tps) / sizeof(u32), 0);
3504 }
3505
3506 /**
3507 * t3_read_pace_tbl - read the pace table
3508 * @adap: the adapter
3509 * @pace_vals: holds the returned values
3510 *
3511 * Returns the values of TP's pace table in nanoseconds.
3512 */
t3_read_pace_tbl(adapter_t * adap,unsigned int pace_vals[NTX_SCHED])3513 void t3_read_pace_tbl(adapter_t *adap, unsigned int pace_vals[NTX_SCHED])
3514 {
3515 unsigned int i, tick_ns = dack_ticks_to_usec(adap, 1000);
3516
3517 for (i = 0; i < NTX_SCHED; i++) {
3518 t3_write_reg(adap, A_TP_PACE_TABLE, 0xffff0000 + i);
3519 pace_vals[i] = t3_read_reg(adap, A_TP_PACE_TABLE) * tick_ns;
3520 }
3521 }
3522
3523 /**
3524 * t3_set_pace_tbl - set the pace table
3525 * @adap: the adapter
3526 * @pace_vals: the pace values in nanoseconds
3527 * @start: index of the first entry in the HW pace table to set
3528 * @n: how many entries to set
3529 *
3530 * Sets (a subset of the) HW pace table.
3531 */
t3_set_pace_tbl(adapter_t * adap,unsigned int * pace_vals,unsigned int start,unsigned int n)3532 void t3_set_pace_tbl(adapter_t *adap, unsigned int *pace_vals,
3533 unsigned int start, unsigned int n)
3534 {
3535 unsigned int tick_ns = dack_ticks_to_usec(adap, 1000);
3536
3537 for ( ; n; n--, start++, pace_vals++)
3538 t3_write_reg(adap, A_TP_PACE_TABLE, (start << 16) |
3539 ((*pace_vals + tick_ns / 2) / tick_ns));
3540 }
3541
3542 #define ulp_region(adap, name, start, len) \
3543 t3_write_reg((adap), A_ULPRX_ ## name ## _LLIMIT, (start)); \
3544 t3_write_reg((adap), A_ULPRX_ ## name ## _ULIMIT, \
3545 (start) + (len) - 1); \
3546 start += len
3547
3548 #define ulptx_region(adap, name, start, len) \
3549 t3_write_reg((adap), A_ULPTX_ ## name ## _LLIMIT, (start)); \
3550 t3_write_reg((adap), A_ULPTX_ ## name ## _ULIMIT, \
3551 (start) + (len) - 1)
3552
ulp_config(adapter_t * adap,const struct tp_params * p)3553 static void ulp_config(adapter_t *adap, const struct tp_params *p)
3554 {
3555 unsigned int m = p->chan_rx_size;
3556
3557 ulp_region(adap, ISCSI, m, p->chan_rx_size / 8);
3558 ulp_region(adap, TDDP, m, p->chan_rx_size / 8);
3559 ulptx_region(adap, TPT, m, p->chan_rx_size / 4);
3560 ulp_region(adap, STAG, m, p->chan_rx_size / 4);
3561 ulp_region(adap, RQ, m, p->chan_rx_size / 4);
3562 ulptx_region(adap, PBL, m, p->chan_rx_size / 4);
3563 ulp_region(adap, PBL, m, p->chan_rx_size / 4);
3564 t3_write_reg(adap, A_ULPRX_TDDP_TAGMASK, 0xffffffff);
3565 }
3566
3567
3568 /**
3569 * t3_set_proto_sram - set the contents of the protocol sram
3570 * @adapter: the adapter
3571 * @data: the protocol image
3572 *
3573 * Write the contents of the protocol SRAM.
3574 */
t3_set_proto_sram(adapter_t * adap,const u8 * data)3575 int t3_set_proto_sram(adapter_t *adap, const u8 *data)
3576 {
3577 int i;
3578 const u32 *buf = (const u32 *)data;
3579
3580 for (i = 0; i < PROTO_SRAM_LINES; i++) {
3581 t3_write_reg(adap, A_TP_EMBED_OP_FIELD5, cpu_to_be32(*buf++));
3582 t3_write_reg(adap, A_TP_EMBED_OP_FIELD4, cpu_to_be32(*buf++));
3583 t3_write_reg(adap, A_TP_EMBED_OP_FIELD3, cpu_to_be32(*buf++));
3584 t3_write_reg(adap, A_TP_EMBED_OP_FIELD2, cpu_to_be32(*buf++));
3585 t3_write_reg(adap, A_TP_EMBED_OP_FIELD1, cpu_to_be32(*buf++));
3586
3587 t3_write_reg(adap, A_TP_EMBED_OP_FIELD0, i << 1 | 1 << 31);
3588 if (t3_wait_op_done(adap, A_TP_EMBED_OP_FIELD0, 1, 1, 5, 1))
3589 return -EIO;
3590 }
3591 return 0;
3592 }
3593
3594 /**
3595 * t3_config_trace_filter - configure one of the tracing filters
3596 * @adapter: the adapter
3597 * @tp: the desired trace filter parameters
3598 * @filter_index: which filter to configure
3599 * @invert: if set non-matching packets are traced instead of matching ones
3600 * @enable: whether to enable or disable the filter
3601 *
3602 * Configures one of the tracing filters available in HW.
3603 */
t3_config_trace_filter(adapter_t * adapter,const struct trace_params * tp,int filter_index,int invert,int enable)3604 void t3_config_trace_filter(adapter_t *adapter, const struct trace_params *tp,
3605 int filter_index, int invert, int enable)
3606 {
3607 u32 addr, key[4], mask[4];
3608
3609 key[0] = tp->sport | (tp->sip << 16);
3610 key[1] = (tp->sip >> 16) | (tp->dport << 16);
3611 key[2] = tp->dip;
3612 key[3] = tp->proto | (tp->vlan << 8) | (tp->intf << 20);
3613
3614 mask[0] = tp->sport_mask | (tp->sip_mask << 16);
3615 mask[1] = (tp->sip_mask >> 16) | (tp->dport_mask << 16);
3616 mask[2] = tp->dip_mask;
3617 mask[3] = tp->proto_mask | (tp->vlan_mask << 8) | (tp->intf_mask << 20);
3618
3619 if (invert)
3620 key[3] |= (1 << 29);
3621 if (enable)
3622 key[3] |= (1 << 28);
3623
3624 addr = filter_index ? A_TP_RX_TRC_KEY0 : A_TP_TX_TRC_KEY0;
3625 tp_wr_indirect(adapter, addr++, key[0]);
3626 tp_wr_indirect(adapter, addr++, mask[0]);
3627 tp_wr_indirect(adapter, addr++, key[1]);
3628 tp_wr_indirect(adapter, addr++, mask[1]);
3629 tp_wr_indirect(adapter, addr++, key[2]);
3630 tp_wr_indirect(adapter, addr++, mask[2]);
3631 tp_wr_indirect(adapter, addr++, key[3]);
3632 tp_wr_indirect(adapter, addr, mask[3]);
3633 (void) t3_read_reg(adapter, A_TP_PIO_DATA);
3634 }
3635
3636 /**
3637 * t3_query_trace_filter - query a tracing filter
3638 * @adapter: the adapter
3639 * @tp: the current trace filter parameters
3640 * @filter_index: which filter to query
3641 * @inverted: non-zero if the filter is inverted
3642 * @enabled: non-zero if the filter is enabled
3643 *
3644 * Returns the current settings of the specified HW tracing filter.
3645 */
t3_query_trace_filter(adapter_t * adapter,struct trace_params * tp,int filter_index,int * inverted,int * enabled)3646 void t3_query_trace_filter(adapter_t *adapter, struct trace_params *tp,
3647 int filter_index, int *inverted, int *enabled)
3648 {
3649 u32 addr, key[4], mask[4];
3650
3651 addr = filter_index ? A_TP_RX_TRC_KEY0 : A_TP_TX_TRC_KEY0;
3652 key[0] = tp_rd_indirect(adapter, addr++);
3653 mask[0] = tp_rd_indirect(adapter, addr++);
3654 key[1] = tp_rd_indirect(adapter, addr++);
3655 mask[1] = tp_rd_indirect(adapter, addr++);
3656 key[2] = tp_rd_indirect(adapter, addr++);
3657 mask[2] = tp_rd_indirect(adapter, addr++);
3658 key[3] = tp_rd_indirect(adapter, addr++);
3659 mask[3] = tp_rd_indirect(adapter, addr);
3660
3661 tp->sport = key[0] & 0xffff;
3662 tp->sip = (key[0] >> 16) | ((key[1] & 0xffff) << 16);
3663 tp->dport = key[1] >> 16;
3664 tp->dip = key[2];
3665 tp->proto = key[3] & 0xff;
3666 tp->vlan = key[3] >> 8;
3667 tp->intf = key[3] >> 20;
3668
3669 tp->sport_mask = mask[0] & 0xffff;
3670 tp->sip_mask = (mask[0] >> 16) | ((mask[1] & 0xffff) << 16);
3671 tp->dport_mask = mask[1] >> 16;
3672 tp->dip_mask = mask[2];
3673 tp->proto_mask = mask[3] & 0xff;
3674 tp->vlan_mask = mask[3] >> 8;
3675 tp->intf_mask = mask[3] >> 20;
3676
3677 *inverted = key[3] & (1 << 29);
3678 *enabled = key[3] & (1 << 28);
3679 }
3680
3681 /**
3682 * t3_config_sched - configure a HW traffic scheduler
3683 * @adap: the adapter
3684 * @kbps: target rate in Kbps
3685 * @sched: the scheduler index
3686 *
3687 * Configure a Tx HW scheduler for the target rate.
3688 */
t3_config_sched(adapter_t * adap,unsigned int kbps,int sched)3689 int t3_config_sched(adapter_t *adap, unsigned int kbps, int sched)
3690 {
3691 unsigned int v, tps, cpt, bpt, delta, mindelta = ~0;
3692 unsigned int clk = adap->params.vpd.cclk * 1000;
3693 unsigned int selected_cpt = 0, selected_bpt = 0;
3694
3695 if (kbps > 0) {
3696 kbps *= 125; /* -> bytes */
3697 for (cpt = 1; cpt <= 255; cpt++) {
3698 tps = clk / cpt;
3699 bpt = (kbps + tps / 2) / tps;
3700 if (bpt > 0 && bpt <= 255) {
3701 v = bpt * tps;
3702 delta = v >= kbps ? v - kbps : kbps - v;
3703 if (delta < mindelta) {
3704 mindelta = delta;
3705 selected_cpt = cpt;
3706 selected_bpt = bpt;
3707 }
3708 } else if (selected_cpt)
3709 break;
3710 }
3711 if (!selected_cpt)
3712 return -EINVAL;
3713 }
3714 t3_write_reg(adap, A_TP_TM_PIO_ADDR,
3715 A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2);
3716 v = t3_read_reg(adap, A_TP_TM_PIO_DATA);
3717 if (sched & 1)
3718 v = (v & 0xffff) | (selected_cpt << 16) | (selected_bpt << 24);
3719 else
3720 v = (v & 0xffff0000) | selected_cpt | (selected_bpt << 8);
3721 t3_write_reg(adap, A_TP_TM_PIO_DATA, v);
3722 return 0;
3723 }
3724
3725 /**
3726 * t3_set_sched_ipg - set the IPG for a Tx HW packet rate scheduler
3727 * @adap: the adapter
3728 * @sched: the scheduler index
3729 * @ipg: the interpacket delay in tenths of nanoseconds
3730 *
3731 * Set the interpacket delay for a HW packet rate scheduler.
3732 */
t3_set_sched_ipg(adapter_t * adap,int sched,unsigned int ipg)3733 int t3_set_sched_ipg(adapter_t *adap, int sched, unsigned int ipg)
3734 {
3735 unsigned int v, addr = A_TP_TX_MOD_Q1_Q0_TIMER_SEPARATOR - sched / 2;
3736
3737 /* convert ipg to nearest number of core clocks */
3738 ipg *= core_ticks_per_usec(adap);
3739 ipg = (ipg + 5000) / 10000;
3740 if (ipg > 0xffff)
3741 return -EINVAL;
3742
3743 t3_write_reg(adap, A_TP_TM_PIO_ADDR, addr);
3744 v = t3_read_reg(adap, A_TP_TM_PIO_DATA);
3745 if (sched & 1)
3746 v = (v & 0xffff) | (ipg << 16);
3747 else
3748 v = (v & 0xffff0000) | ipg;
3749 t3_write_reg(adap, A_TP_TM_PIO_DATA, v);
3750 t3_read_reg(adap, A_TP_TM_PIO_DATA);
3751 return 0;
3752 }
3753
3754 /**
3755 * t3_get_tx_sched - get the configuration of a Tx HW traffic scheduler
3756 * @adap: the adapter
3757 * @sched: the scheduler index
3758 * @kbps: the byte rate in Kbps
3759 * @ipg: the interpacket delay in tenths of nanoseconds
3760 *
3761 * Return the current configuration of a HW Tx scheduler.
3762 */
t3_get_tx_sched(adapter_t * adap,unsigned int sched,unsigned int * kbps,unsigned int * ipg)3763 void t3_get_tx_sched(adapter_t *adap, unsigned int sched, unsigned int *kbps,
3764 unsigned int *ipg)
3765 {
3766 unsigned int v, addr, bpt, cpt;
3767
3768 if (kbps) {
3769 addr = A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2;
3770 t3_write_reg(adap, A_TP_TM_PIO_ADDR, addr);
3771 v = t3_read_reg(adap, A_TP_TM_PIO_DATA);
3772 if (sched & 1)
3773 v >>= 16;
3774 bpt = (v >> 8) & 0xff;
3775 cpt = v & 0xff;
3776 if (!cpt)
3777 *kbps = 0; /* scheduler disabled */
3778 else {
3779 v = (adap->params.vpd.cclk * 1000) / cpt;
3780 *kbps = (v * bpt) / 125;
3781 }
3782 }
3783 if (ipg) {
3784 addr = A_TP_TX_MOD_Q1_Q0_TIMER_SEPARATOR - sched / 2;
3785 t3_write_reg(adap, A_TP_TM_PIO_ADDR, addr);
3786 v = t3_read_reg(adap, A_TP_TM_PIO_DATA);
3787 if (sched & 1)
3788 v >>= 16;
3789 v &= 0xffff;
3790 *ipg = (10000 * v) / core_ticks_per_usec(adap);
3791 }
3792 }
3793
3794 /**
3795 * tp_init - configure TP
3796 * @adap: the adapter
3797 * @p: TP configuration parameters
3798 *
3799 * Initializes the TP HW module.
3800 */
tp_init(adapter_t * adap,const struct tp_params * p)3801 static int tp_init(adapter_t *adap, const struct tp_params *p)
3802 {
3803 int busy = 0;
3804
3805 tp_config(adap, p);
3806 t3_set_vlan_accel(adap, 3, 0);
3807
3808 if (is_offload(adap)) {
3809 tp_set_timers(adap, adap->params.vpd.cclk * 1000);
3810 t3_write_reg(adap, A_TP_RESET, F_FLSTINITENABLE);
3811 busy = t3_wait_op_done(adap, A_TP_RESET, F_FLSTINITENABLE,
3812 0, 1000, 5);
3813 if (busy)
3814 CH_ERR(adap, "TP initialization timed out\n");
3815 }
3816
3817 if (!busy)
3818 t3_write_reg(adap, A_TP_RESET, F_TPRESET);
3819 return busy;
3820 }
3821
3822 /**
3823 * t3_mps_set_active_ports - configure port failover
3824 * @adap: the adapter
3825 * @port_mask: bitmap of active ports
3826 *
3827 * Sets the active ports according to the supplied bitmap.
3828 */
t3_mps_set_active_ports(adapter_t * adap,unsigned int port_mask)3829 int t3_mps_set_active_ports(adapter_t *adap, unsigned int port_mask)
3830 {
3831 if (port_mask & ~((1 << adap->params.nports) - 1))
3832 return -EINVAL;
3833 t3_set_reg_field(adap, A_MPS_CFG, F_PORT1ACTIVE | F_PORT0ACTIVE,
3834 port_mask << S_PORT0ACTIVE);
3835 return 0;
3836 }
3837
3838 /**
3839 * chan_init_hw - channel-dependent HW initialization
3840 * @adap: the adapter
3841 * @chan_map: bitmap of Tx channels being used
3842 *
3843 * Perform the bits of HW initialization that are dependent on the Tx
3844 * channels being used.
3845 */
chan_init_hw(adapter_t * adap,unsigned int chan_map)3846 static void chan_init_hw(adapter_t *adap, unsigned int chan_map)
3847 {
3848 int i;
3849
3850 if (chan_map != 3) { /* one channel */
3851 t3_set_reg_field(adap, A_ULPRX_CTL, F_ROUND_ROBIN, 0);
3852 t3_set_reg_field(adap, A_ULPTX_CONFIG, F_CFG_RR_ARB, 0);
3853 t3_write_reg(adap, A_MPS_CFG, F_TPRXPORTEN | F_ENFORCEPKT |
3854 (chan_map == 1 ? F_TPTXPORT0EN | F_PORT0ACTIVE :
3855 F_TPTXPORT1EN | F_PORT1ACTIVE));
3856 t3_write_reg(adap, A_PM1_TX_CFG,
3857 chan_map == 1 ? 0xffffffff : 0);
3858 if (chan_map == 2)
3859 t3_write_reg(adap, A_TP_TX_MOD_QUEUE_REQ_MAP,
3860 V_TX_MOD_QUEUE_REQ_MAP(0xff));
3861 t3_write_reg(adap, A_TP_TX_MOD_QUE_TABLE, (12 << 16) | 0xd9c8);
3862 t3_write_reg(adap, A_TP_TX_MOD_QUE_TABLE, (13 << 16) | 0xfbea);
3863 } else { /* two channels */
3864 t3_set_reg_field(adap, A_ULPRX_CTL, 0, F_ROUND_ROBIN);
3865 t3_set_reg_field(adap, A_ULPTX_CONFIG, 0, F_CFG_RR_ARB);
3866 t3_write_reg(adap, A_ULPTX_DMA_WEIGHT,
3867 V_D1_WEIGHT(16) | V_D0_WEIGHT(16));
3868 t3_write_reg(adap, A_MPS_CFG, F_TPTXPORT0EN | F_TPTXPORT1EN |
3869 F_TPRXPORTEN | F_PORT0ACTIVE | F_PORT1ACTIVE |
3870 F_ENFORCEPKT);
3871 t3_write_reg(adap, A_PM1_TX_CFG, 0x80008000);
3872 t3_set_reg_field(adap, A_TP_PC_CONFIG, 0, F_TXTOSQUEUEMAPMODE);
3873 t3_write_reg(adap, A_TP_TX_MOD_QUEUE_REQ_MAP,
3874 V_TX_MOD_QUEUE_REQ_MAP(0xaa));
3875 for (i = 0; i < 16; i++)
3876 t3_write_reg(adap, A_TP_TX_MOD_QUE_TABLE,
3877 (i << 16) | 0x1010);
3878 t3_write_reg(adap, A_TP_TX_MOD_QUE_TABLE, (12 << 16) | 0xba98);
3879 t3_write_reg(adap, A_TP_TX_MOD_QUE_TABLE, (13 << 16) | 0xfedc);
3880 }
3881 }
3882
calibrate_xgm(adapter_t * adapter)3883 static int calibrate_xgm(adapter_t *adapter)
3884 {
3885 if (uses_xaui(adapter)) {
3886 unsigned int v, i;
3887
3888 for (i = 0; i < 5; ++i) {
3889 t3_write_reg(adapter, A_XGM_XAUI_IMP, 0);
3890 (void) t3_read_reg(adapter, A_XGM_XAUI_IMP);
3891 msleep(1);
3892 v = t3_read_reg(adapter, A_XGM_XAUI_IMP);
3893 if (!(v & (F_XGM_CALFAULT | F_CALBUSY))) {
3894 t3_write_reg(adapter, A_XGM_XAUI_IMP,
3895 V_XAUIIMP(G_CALIMP(v) >> 2));
3896 return 0;
3897 }
3898 }
3899 CH_ERR(adapter, "MAC calibration failed\n");
3900 return -1;
3901 } else {
3902 t3_write_reg(adapter, A_XGM_RGMII_IMP,
3903 V_RGMIIIMPPD(2) | V_RGMIIIMPPU(3));
3904 t3_set_reg_field(adapter, A_XGM_RGMII_IMP, F_XGM_IMPSETUPDATE,
3905 F_XGM_IMPSETUPDATE);
3906 }
3907 return 0;
3908 }
3909
calibrate_xgm_t3b(adapter_t * adapter)3910 static void calibrate_xgm_t3b(adapter_t *adapter)
3911 {
3912 if (!uses_xaui(adapter)) {
3913 t3_write_reg(adapter, A_XGM_RGMII_IMP, F_CALRESET |
3914 F_CALUPDATE | V_RGMIIIMPPD(2) | V_RGMIIIMPPU(3));
3915 t3_set_reg_field(adapter, A_XGM_RGMII_IMP, F_CALRESET, 0);
3916 t3_set_reg_field(adapter, A_XGM_RGMII_IMP, 0,
3917 F_XGM_IMPSETUPDATE);
3918 t3_set_reg_field(adapter, A_XGM_RGMII_IMP, F_XGM_IMPSETUPDATE,
3919 0);
3920 t3_set_reg_field(adapter, A_XGM_RGMII_IMP, F_CALUPDATE, 0);
3921 t3_set_reg_field(adapter, A_XGM_RGMII_IMP, 0, F_CALUPDATE);
3922 }
3923 }
3924
3925 struct mc7_timing_params {
3926 unsigned char ActToPreDly;
3927 unsigned char ActToRdWrDly;
3928 unsigned char PreCyc;
3929 unsigned char RefCyc[5];
3930 unsigned char BkCyc;
3931 unsigned char WrToRdDly;
3932 unsigned char RdToWrDly;
3933 };
3934
3935 /*
3936 * Write a value to a register and check that the write completed. These
3937 * writes normally complete in a cycle or two, so one read should suffice.
3938 * The very first read exists to flush the posted write to the device.
3939 */
wrreg_wait(adapter_t * adapter,unsigned int addr,u32 val)3940 static int wrreg_wait(adapter_t *adapter, unsigned int addr, u32 val)
3941 {
3942 t3_write_reg(adapter, addr, val);
3943 (void) t3_read_reg(adapter, addr); /* flush */
3944 if (!(t3_read_reg(adapter, addr) & F_BUSY))
3945 return 0;
3946 CH_ERR(adapter, "write to MC7 register 0x%x timed out\n", addr);
3947 return -EIO;
3948 }
3949
mc7_init(struct mc7 * mc7,unsigned int mc7_clock,int mem_type)3950 static int mc7_init(struct mc7 *mc7, unsigned int mc7_clock, int mem_type)
3951 {
3952 static const unsigned int mc7_mode[] = {
3953 0x632, 0x642, 0x652, 0x432, 0x442
3954 };
3955 static const struct mc7_timing_params mc7_timings[] = {
3956 { 12, 3, 4, { 20, 28, 34, 52, 0 }, 15, 6, 4 },
3957 { 12, 4, 5, { 20, 28, 34, 52, 0 }, 16, 7, 4 },
3958 { 12, 5, 6, { 20, 28, 34, 52, 0 }, 17, 8, 4 },
3959 { 9, 3, 4, { 15, 21, 26, 39, 0 }, 12, 6, 4 },
3960 { 9, 4, 5, { 15, 21, 26, 39, 0 }, 13, 7, 4 }
3961 };
3962
3963 u32 val;
3964 unsigned int width, density, slow, attempts;
3965 adapter_t *adapter = mc7->adapter;
3966 const struct mc7_timing_params *p = &mc7_timings[mem_type];
3967
3968 if (!mc7->size)
3969 return 0;
3970
3971 val = t3_read_reg(adapter, mc7->offset + A_MC7_CFG);
3972 slow = val & F_SLOW;
3973 width = G_WIDTH(val);
3974 density = G_DEN(val);
3975
3976 t3_write_reg(adapter, mc7->offset + A_MC7_CFG, val | F_IFEN);
3977 val = t3_read_reg(adapter, mc7->offset + A_MC7_CFG); /* flush */
3978 msleep(1);
3979
3980 if (!slow) {
3981 t3_write_reg(adapter, mc7->offset + A_MC7_CAL, F_SGL_CAL_EN);
3982 (void) t3_read_reg(adapter, mc7->offset + A_MC7_CAL);
3983 msleep(1);
3984 if (t3_read_reg(adapter, mc7->offset + A_MC7_CAL) &
3985 (F_BUSY | F_SGL_CAL_EN | F_CAL_FAULT)) {
3986 CH_ERR(adapter, "%s MC7 calibration timed out\n",
3987 mc7->name);
3988 goto out_fail;
3989 }
3990 }
3991
3992 t3_write_reg(adapter, mc7->offset + A_MC7_PARM,
3993 V_ACTTOPREDLY(p->ActToPreDly) |
3994 V_ACTTORDWRDLY(p->ActToRdWrDly) | V_PRECYC(p->PreCyc) |
3995 V_REFCYC(p->RefCyc[density]) | V_BKCYC(p->BkCyc) |
3996 V_WRTORDDLY(p->WrToRdDly) | V_RDTOWRDLY(p->RdToWrDly));
3997
3998 t3_write_reg(adapter, mc7->offset + A_MC7_CFG,
3999 val | F_CLKEN | F_TERM150);
4000 (void) t3_read_reg(adapter, mc7->offset + A_MC7_CFG); /* flush */
4001
4002 if (!slow)
4003 t3_set_reg_field(adapter, mc7->offset + A_MC7_DLL, F_DLLENB,
4004 F_DLLENB);
4005 udelay(1);
4006
4007 val = slow ? 3 : 6;
4008 if (wrreg_wait(adapter, mc7->offset + A_MC7_PRE, 0) ||
4009 wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE2, 0) ||
4010 wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE3, 0) ||
4011 wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE1, val))
4012 goto out_fail;
4013
4014 if (!slow) {
4015 t3_write_reg(adapter, mc7->offset + A_MC7_MODE, 0x100);
4016 t3_set_reg_field(adapter, mc7->offset + A_MC7_DLL,
4017 F_DLLRST, 0);
4018 udelay(5);
4019 }
4020
4021 if (wrreg_wait(adapter, mc7->offset + A_MC7_PRE, 0) ||
4022 wrreg_wait(adapter, mc7->offset + A_MC7_REF, 0) ||
4023 wrreg_wait(adapter, mc7->offset + A_MC7_REF, 0) ||
4024 wrreg_wait(adapter, mc7->offset + A_MC7_MODE,
4025 mc7_mode[mem_type]) ||
4026 wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE1, val | 0x380) ||
4027 wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE1, val))
4028 goto out_fail;
4029
4030 /* clock value is in KHz */
4031 mc7_clock = mc7_clock * 7812 + mc7_clock / 2; /* ns */
4032 mc7_clock /= 1000000; /* KHz->MHz, ns->us */
4033
4034 t3_write_reg(adapter, mc7->offset + A_MC7_REF,
4035 F_PERREFEN | V_PREREFDIV(mc7_clock));
4036 (void) t3_read_reg(adapter, mc7->offset + A_MC7_REF); /* flush */
4037
4038 t3_write_reg(adapter, mc7->offset + A_MC7_ECC,
4039 F_ECCGENEN | F_ECCCHKEN);
4040 t3_write_reg(adapter, mc7->offset + A_MC7_BIST_DATA, 0);
4041 t3_write_reg(adapter, mc7->offset + A_MC7_BIST_ADDR_BEG, 0);
4042 t3_write_reg(adapter, mc7->offset + A_MC7_BIST_ADDR_END,
4043 (mc7->size << width) - 1);
4044 t3_write_reg(adapter, mc7->offset + A_MC7_BIST_OP, V_OP(1));
4045 (void) t3_read_reg(adapter, mc7->offset + A_MC7_BIST_OP); /* flush */
4046
4047 attempts = 50;
4048 do {
4049 msleep(250);
4050 val = t3_read_reg(adapter, mc7->offset + A_MC7_BIST_OP);
4051 } while ((val & F_BUSY) && --attempts);
4052 if (val & F_BUSY) {
4053 CH_ERR(adapter, "%s MC7 BIST timed out\n", mc7->name);
4054 goto out_fail;
4055 }
4056
4057 /* Enable normal memory accesses. */
4058 t3_set_reg_field(adapter, mc7->offset + A_MC7_CFG, 0, F_RDY);
4059 return 0;
4060
4061 out_fail:
4062 return -1;
4063 }
4064
config_pcie(adapter_t * adap)4065 static void config_pcie(adapter_t *adap)
4066 {
4067 static const u16 ack_lat[4][6] = {
4068 { 237, 416, 559, 1071, 2095, 4143 },
4069 { 128, 217, 289, 545, 1057, 2081 },
4070 { 73, 118, 154, 282, 538, 1050 },
4071 { 67, 107, 86, 150, 278, 534 }
4072 };
4073 static const u16 rpl_tmr[4][6] = {
4074 { 711, 1248, 1677, 3213, 6285, 12429 },
4075 { 384, 651, 867, 1635, 3171, 6243 },
4076 { 219, 354, 462, 846, 1614, 3150 },
4077 { 201, 321, 258, 450, 834, 1602 }
4078 };
4079
4080 u16 val, devid;
4081 unsigned int log2_width, pldsize;
4082 unsigned int fst_trn_rx, fst_trn_tx, acklat, rpllmt;
4083
4084 t3_os_pci_read_config_2(adap,
4085 adap->params.pci.pcie_cap_addr + PCI_EXP_DEVCTL,
4086 &val);
4087 pldsize = (val & PCI_EXP_DEVCTL_PAYLOAD) >> 5;
4088
4089 /*
4090 * Gen2 adapter pcie bridge compatibility requires minimum
4091 * Max_Read_Request_size
4092 */
4093 t3_os_pci_read_config_2(adap, 0x2, &devid);
4094 if (devid == 0x37) {
4095 t3_os_pci_write_config_2(adap,
4096 adap->params.pci.pcie_cap_addr + PCI_EXP_DEVCTL,
4097 val & ~PCI_EXP_DEVCTL_READRQ & ~PCI_EXP_DEVCTL_PAYLOAD);
4098 pldsize = 0;
4099 }
4100
4101 t3_os_pci_read_config_2(adap,
4102 adap->params.pci.pcie_cap_addr + PCI_EXP_LNKCTL,
4103 &val);
4104
4105 fst_trn_tx = G_NUMFSTTRNSEQ(t3_read_reg(adap, A_PCIE_PEX_CTRL0));
4106 fst_trn_rx = adap->params.rev == 0 ? fst_trn_tx :
4107 G_NUMFSTTRNSEQRX(t3_read_reg(adap, A_PCIE_MODE));
4108 log2_width = fls(adap->params.pci.width) - 1;
4109 acklat = ack_lat[log2_width][pldsize];
4110 if (val & 1) /* check LOsEnable */
4111 acklat += fst_trn_tx * 4;
4112 rpllmt = rpl_tmr[log2_width][pldsize] + fst_trn_rx * 4;
4113
4114 if (adap->params.rev == 0)
4115 t3_set_reg_field(adap, A_PCIE_PEX_CTRL1,
4116 V_T3A_ACKLAT(M_T3A_ACKLAT),
4117 V_T3A_ACKLAT(acklat));
4118 else
4119 t3_set_reg_field(adap, A_PCIE_PEX_CTRL1, V_ACKLAT(M_ACKLAT),
4120 V_ACKLAT(acklat));
4121
4122 t3_set_reg_field(adap, A_PCIE_PEX_CTRL0, V_REPLAYLMT(M_REPLAYLMT),
4123 V_REPLAYLMT(rpllmt));
4124
4125 t3_write_reg(adap, A_PCIE_PEX_ERR, 0xffffffff);
4126 t3_set_reg_field(adap, A_PCIE_CFG, 0,
4127 F_ENABLELINKDWNDRST | F_ENABLELINKDOWNRST |
4128 F_PCIE_DMASTOPEN | F_PCIE_CLIDECEN);
4129 }
4130
4131 /**
4132 * t3_init_hw - initialize and configure T3 HW modules
4133 * @adapter: the adapter
4134 * @fw_params: initial parameters to pass to firmware (optional)
4135 *
4136 * Initialize and configure T3 HW modules. This performs the
4137 * initialization steps that need to be done once after a card is reset.
4138 * MAC and PHY initialization is handled separarely whenever a port is
4139 * enabled.
4140 *
4141 * @fw_params are passed to FW and their value is platform dependent.
4142 * Only the top 8 bits are available for use, the rest must be 0.
4143 */
t3_init_hw(adapter_t * adapter,u32 fw_params)4144 int t3_init_hw(adapter_t *adapter, u32 fw_params)
4145 {
4146 int err = -EIO, attempts, i;
4147 const struct vpd_params *vpd = &adapter->params.vpd;
4148
4149 if (adapter->params.rev > 0)
4150 calibrate_xgm_t3b(adapter);
4151 else if (calibrate_xgm(adapter))
4152 goto out_err;
4153
4154 if (adapter->params.nports > 2)
4155 t3_mac_init(&adap2pinfo(adapter, 0)->mac);
4156
4157 if (vpd->mclk) {
4158 partition_mem(adapter, &adapter->params.tp);
4159
4160 if (mc7_init(&adapter->pmrx, vpd->mclk, vpd->mem_timing) ||
4161 mc7_init(&adapter->pmtx, vpd->mclk, vpd->mem_timing) ||
4162 mc7_init(&adapter->cm, vpd->mclk, vpd->mem_timing) ||
4163 t3_mc5_init(&adapter->mc5, adapter->params.mc5.nservers,
4164 adapter->params.mc5.nfilters,
4165 adapter->params.mc5.nroutes))
4166 goto out_err;
4167
4168 for (i = 0; i < 32; i++)
4169 if (clear_sge_ctxt(adapter, i, F_CQ))
4170 goto out_err;
4171 }
4172
4173 if (tp_init(adapter, &adapter->params.tp))
4174 goto out_err;
4175
4176 t3_tp_set_coalescing_size(adapter,
4177 min(adapter->params.sge.max_pkt_size,
4178 MAX_RX_COALESCING_LEN), 1);
4179 t3_tp_set_max_rxsize(adapter,
4180 min(adapter->params.sge.max_pkt_size, 16384U));
4181 ulp_config(adapter, &adapter->params.tp);
4182 if (is_pcie(adapter))
4183 config_pcie(adapter);
4184 else
4185 t3_set_reg_field(adapter, A_PCIX_CFG, 0,
4186 F_DMASTOPEN | F_CLIDECEN);
4187
4188 if (adapter->params.rev == T3_REV_C)
4189 t3_set_reg_field(adapter, A_ULPTX_CONFIG, 0,
4190 F_CFG_CQE_SOP_MASK);
4191
4192 t3_write_reg(adapter, A_PM1_RX_CFG, 0xffffffff);
4193 t3_write_reg(adapter, A_PM1_RX_MODE, 0);
4194 t3_write_reg(adapter, A_PM1_TX_MODE, 0);
4195 chan_init_hw(adapter, adapter->params.chan_map);
4196 t3_sge_init(adapter, &adapter->params.sge);
4197 t3_set_reg_field(adapter, A_PL_RST, 0, F_FATALPERREN);
4198
4199 t3_write_reg(adapter, A_T3DBG_GPIO_ACT_LOW, calc_gpio_intr(adapter));
4200
4201 t3_write_reg(adapter, A_CIM_HOST_ACC_DATA, vpd->uclk | fw_params);
4202 t3_write_reg(adapter, A_CIM_BOOT_CFG,
4203 V_BOOTADDR(FW_FLASH_BOOT_ADDR >> 2));
4204 (void) t3_read_reg(adapter, A_CIM_BOOT_CFG); /* flush */
4205
4206 attempts = 100;
4207 do { /* wait for uP to initialize */
4208 msleep(20);
4209 } while (t3_read_reg(adapter, A_CIM_HOST_ACC_DATA) && --attempts);
4210 if (!attempts) {
4211 CH_ERR(adapter, "uP initialization timed out\n");
4212 goto out_err;
4213 }
4214
4215 err = 0;
4216 out_err:
4217 return err;
4218 }
4219
4220 /**
4221 * get_pci_mode - determine a card's PCI mode
4222 * @adapter: the adapter
4223 * @p: where to store the PCI settings
4224 *
4225 * Determines a card's PCI mode and associated parameters, such as speed
4226 * and width.
4227 */
get_pci_mode(adapter_t * adapter,struct pci_params * p)4228 static void __devinit get_pci_mode(adapter_t *adapter, struct pci_params *p)
4229 {
4230 static unsigned short speed_map[] = { 33, 66, 100, 133 };
4231 u32 pci_mode, pcie_cap;
4232
4233 pcie_cap = t3_os_find_pci_capability(adapter, PCI_CAP_ID_EXP);
4234 if (pcie_cap) {
4235 u16 val;
4236
4237 p->variant = PCI_VARIANT_PCIE;
4238 p->pcie_cap_addr = pcie_cap;
4239 t3_os_pci_read_config_2(adapter, pcie_cap + PCI_EXP_LNKSTA,
4240 &val);
4241 p->width = (val >> 4) & 0x3f;
4242 return;
4243 }
4244
4245 pci_mode = t3_read_reg(adapter, A_PCIX_MODE);
4246 p->speed = speed_map[G_PCLKRANGE(pci_mode)];
4247 p->width = (pci_mode & F_64BIT) ? 64 : 32;
4248 pci_mode = G_PCIXINITPAT(pci_mode);
4249 if (pci_mode == 0)
4250 p->variant = PCI_VARIANT_PCI;
4251 else if (pci_mode < 4)
4252 p->variant = PCI_VARIANT_PCIX_MODE1_PARITY;
4253 else if (pci_mode < 8)
4254 p->variant = PCI_VARIANT_PCIX_MODE1_ECC;
4255 else
4256 p->variant = PCI_VARIANT_PCIX_266_MODE2;
4257 }
4258
4259 /**
4260 * init_link_config - initialize a link's SW state
4261 * @lc: structure holding the link state
4262 * @caps: link capabilities
4263 *
4264 * Initializes the SW state maintained for each link, including the link's
4265 * capabilities and default speed/duplex/flow-control/autonegotiation
4266 * settings.
4267 */
init_link_config(struct link_config * lc,unsigned int caps)4268 static void __devinit init_link_config(struct link_config *lc,
4269 unsigned int caps)
4270 {
4271 lc->supported = caps;
4272 lc->requested_speed = lc->speed = SPEED_INVALID;
4273 lc->requested_duplex = lc->duplex = DUPLEX_INVALID;
4274 lc->requested_fc = lc->fc = PAUSE_RX | PAUSE_TX;
4275 if (lc->supported & SUPPORTED_Autoneg) {
4276 lc->advertising = lc->supported;
4277 lc->autoneg = AUTONEG_ENABLE;
4278 lc->requested_fc |= PAUSE_AUTONEG;
4279 } else {
4280 lc->advertising = 0;
4281 lc->autoneg = AUTONEG_DISABLE;
4282 }
4283 }
4284
4285 /**
4286 * mc7_calc_size - calculate MC7 memory size
4287 * @cfg: the MC7 configuration
4288 *
4289 * Calculates the size of an MC7 memory in bytes from the value of its
4290 * configuration register.
4291 */
mc7_calc_size(u32 cfg)4292 static unsigned int __devinit mc7_calc_size(u32 cfg)
4293 {
4294 unsigned int width = G_WIDTH(cfg);
4295 unsigned int banks = !!(cfg & F_BKS) + 1;
4296 unsigned int org = !!(cfg & F_ORG) + 1;
4297 unsigned int density = G_DEN(cfg);
4298 unsigned int MBs = ((256 << density) * banks) / (org << width);
4299
4300 return MBs << 20;
4301 }
4302
mc7_prep(adapter_t * adapter,struct mc7 * mc7,unsigned int base_addr,const char * name)4303 static void __devinit mc7_prep(adapter_t *adapter, struct mc7 *mc7,
4304 unsigned int base_addr, const char *name)
4305 {
4306 u32 cfg;
4307
4308 mc7->adapter = adapter;
4309 mc7->name = name;
4310 mc7->offset = base_addr - MC7_PMRX_BASE_ADDR;
4311 cfg = t3_read_reg(adapter, mc7->offset + A_MC7_CFG);
4312 mc7->size = G_DEN(cfg) == M_DEN ? 0 : mc7_calc_size(cfg);
4313 mc7->width = G_WIDTH(cfg);
4314 }
4315
mac_prep(struct cmac * mac,adapter_t * adapter,int index)4316 void mac_prep(struct cmac *mac, adapter_t *adapter, int index)
4317 {
4318 u16 devid;
4319
4320 mac->adapter = adapter;
4321 mac->multiport = adapter->params.nports > 2;
4322 if (mac->multiport) {
4323 mac->ext_port = (unsigned char)index;
4324 mac->nucast = 8;
4325 } else
4326 mac->nucast = 1;
4327
4328 /* Gen2 adapter uses VPD xauicfg[] to notify driver which MAC
4329 is connected to each port, its suppose to be using xgmac0 for both ports
4330 */
4331 t3_os_pci_read_config_2(adapter, 0x2, &devid);
4332
4333 if (mac->multiport ||
4334 (!adapter->params.vpd.xauicfg[1] && (devid==0x37)))
4335 index = 0;
4336
4337 mac->offset = (XGMAC0_1_BASE_ADDR - XGMAC0_0_BASE_ADDR) * index;
4338
4339 if (adapter->params.rev == 0 && uses_xaui(adapter)) {
4340 t3_write_reg(adapter, A_XGM_SERDES_CTRL + mac->offset,
4341 is_10G(adapter) ? 0x2901c04 : 0x2301c04);
4342 t3_set_reg_field(adapter, A_XGM_PORT_CFG + mac->offset,
4343 F_ENRGMII, 0);
4344 }
4345 }
4346
4347 /**
4348 * early_hw_init - HW initialization done at card detection time
4349 * @adapter: the adapter
4350 * @ai: contains information about the adapter type and properties
4351 *
4352 * Perfoms the part of HW initialization that is done early on when the
4353 * driver first detecs the card. Most of the HW state is initialized
4354 * lazily later on when a port or an offload function are first used.
4355 */
early_hw_init(adapter_t * adapter,const struct adapter_info * ai)4356 void early_hw_init(adapter_t *adapter, const struct adapter_info *ai)
4357 {
4358 u32 val = V_PORTSPEED(is_10G(adapter) || adapter->params.nports > 2 ?
4359 3 : 2);
4360 u32 gpio_out = ai->gpio_out;
4361
4362 mi1_init(adapter, ai);
4363 t3_write_reg(adapter, A_I2C_CFG, /* set for 80KHz */
4364 V_I2C_CLKDIV(adapter->params.vpd.cclk / 80 - 1));
4365 t3_write_reg(adapter, A_T3DBG_GPIO_EN,
4366 gpio_out | F_GPIO0_OEN | F_GPIO0_OUT_VAL);
4367 t3_write_reg(adapter, A_MC5_DB_SERVER_INDEX, 0);
4368 t3_write_reg(adapter, A_SG_OCO_BASE, V_BASE1(0xfff));
4369
4370 if (adapter->params.rev == 0 || !uses_xaui(adapter))
4371 val |= F_ENRGMII;
4372
4373 /* Enable MAC clocks so we can access the registers */
4374 t3_write_reg(adapter, A_XGM_PORT_CFG, val);
4375 (void) t3_read_reg(adapter, A_XGM_PORT_CFG);
4376
4377 val |= F_CLKDIVRESET_;
4378 t3_write_reg(adapter, A_XGM_PORT_CFG, val);
4379 (void) t3_read_reg(adapter, A_XGM_PORT_CFG);
4380 t3_write_reg(adapter, XGM_REG(A_XGM_PORT_CFG, 1), val);
4381 (void) t3_read_reg(adapter, A_XGM_PORT_CFG);
4382 }
4383
4384 /**
4385 * t3_reset_adapter - reset the adapter
4386 * @adapter: the adapter
4387 *
4388 * Reset the adapter.
4389 */
t3_reset_adapter(adapter_t * adapter)4390 int t3_reset_adapter(adapter_t *adapter)
4391 {
4392 int i, save_and_restore_pcie =
4393 adapter->params.rev < T3_REV_B2 && is_pcie(adapter);
4394 uint16_t devid = 0;
4395
4396 if (save_and_restore_pcie)
4397 t3_os_pci_save_state(adapter);
4398 t3_write_reg(adapter, A_PL_RST, F_CRSTWRM | F_CRSTWRMMODE);
4399
4400 /*
4401 * Delay. Give Some time to device to reset fully.
4402 * XXX The delay time should be modified.
4403 */
4404 for (i = 0; i < 10; i++) {
4405 msleep(50);
4406 t3_os_pci_read_config_2(adapter, 0x00, &devid);
4407 if (devid == 0x1425)
4408 break;
4409 }
4410
4411 if (devid != 0x1425)
4412 return -1;
4413
4414 if (save_and_restore_pcie)
4415 t3_os_pci_restore_state(adapter);
4416 return 0;
4417 }
4418
init_parity(adapter_t * adap)4419 static int init_parity(adapter_t *adap)
4420 {
4421 int i, err, addr;
4422
4423 if (t3_read_reg(adap, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
4424 return -EBUSY;
4425
4426 for (err = i = 0; !err && i < 16; i++)
4427 err = clear_sge_ctxt(adap, i, F_EGRESS);
4428 for (i = 0xfff0; !err && i <= 0xffff; i++)
4429 err = clear_sge_ctxt(adap, i, F_EGRESS);
4430 for (i = 0; !err && i < SGE_QSETS; i++)
4431 err = clear_sge_ctxt(adap, i, F_RESPONSEQ);
4432 if (err)
4433 return err;
4434
4435 t3_write_reg(adap, A_CIM_IBQ_DBG_DATA, 0);
4436 for (i = 0; i < 4; i++)
4437 for (addr = 0; addr <= M_IBQDBGADDR; addr++) {
4438 t3_write_reg(adap, A_CIM_IBQ_DBG_CFG, F_IBQDBGEN |
4439 F_IBQDBGWR | V_IBQDBGQID(i) |
4440 V_IBQDBGADDR(addr));
4441 err = t3_wait_op_done(adap, A_CIM_IBQ_DBG_CFG,
4442 F_IBQDBGBUSY, 0, 2, 1);
4443 if (err)
4444 return err;
4445 }
4446 return 0;
4447 }
4448
4449 /**
4450 * t3_prep_adapter - prepare SW and HW for operation
4451 * @adapter: the adapter
4452 * @ai: contains information about the adapter type and properties
4453 *
4454 * Initialize adapter SW state for the various HW modules, set initial
4455 * values for some adapter tunables, take PHYs out of reset, and
4456 * initialize the MDIO interface.
4457 */
t3_prep_adapter(adapter_t * adapter,const struct adapter_info * ai,int reset)4458 int __devinit t3_prep_adapter(adapter_t *adapter,
4459 const struct adapter_info *ai, int reset)
4460 {
4461 int ret;
4462 unsigned int i, j = 0;
4463
4464 get_pci_mode(adapter, &adapter->params.pci);
4465
4466 adapter->params.info = ai;
4467 adapter->params.nports = ai->nports0 + ai->nports1;
4468 adapter->params.chan_map = (!!ai->nports0) | (!!ai->nports1 << 1);
4469 adapter->params.rev = t3_read_reg(adapter, A_PL_REV);
4470
4471 /*
4472 * We used to only run the "adapter check task" once a second if
4473 * we had PHYs which didn't support interrupts (we would check
4474 * their link status once a second). Now we check other conditions
4475 * in that routine which would [potentially] impose a very high
4476 * interrupt load on the system. As such, we now always scan the
4477 * adapter state once a second ...
4478 */
4479 adapter->params.linkpoll_period = 10;
4480
4481 if (adapter->params.nports > 2)
4482 adapter->params.stats_update_period = VSC_STATS_ACCUM_SECS;
4483 else
4484 adapter->params.stats_update_period = is_10G(adapter) ?
4485 MAC_STATS_ACCUM_SECS : (MAC_STATS_ACCUM_SECS * 10);
4486 adapter->params.pci.vpd_cap_addr =
4487 t3_os_find_pci_capability(adapter, PCI_CAP_ID_VPD);
4488
4489 ret = get_vpd_params(adapter, &adapter->params.vpd);
4490 if (ret < 0)
4491 return ret;
4492
4493 if (reset && t3_reset_adapter(adapter))
4494 return -1;
4495
4496 if (adapter->params.vpd.mclk) {
4497 struct tp_params *p = &adapter->params.tp;
4498
4499 mc7_prep(adapter, &adapter->pmrx, MC7_PMRX_BASE_ADDR, "PMRX");
4500 mc7_prep(adapter, &adapter->pmtx, MC7_PMTX_BASE_ADDR, "PMTX");
4501 mc7_prep(adapter, &adapter->cm, MC7_CM_BASE_ADDR, "CM");
4502
4503 p->nchan = adapter->params.chan_map == 3 ? 2 : 1;
4504 p->pmrx_size = t3_mc7_size(&adapter->pmrx);
4505 p->pmtx_size = t3_mc7_size(&adapter->pmtx);
4506 p->cm_size = t3_mc7_size(&adapter->cm);
4507 p->chan_rx_size = p->pmrx_size / 2; /* only 1 Rx channel */
4508 p->chan_tx_size = p->pmtx_size / p->nchan;
4509 p->rx_pg_size = 64 * 1024;
4510 p->tx_pg_size = is_10G(adapter) ? 64 * 1024 : 16 * 1024;
4511 p->rx_num_pgs = pm_num_pages(p->chan_rx_size, p->rx_pg_size);
4512 p->tx_num_pgs = pm_num_pages(p->chan_tx_size, p->tx_pg_size);
4513 p->ntimer_qs = p->cm_size >= (128 << 20) ||
4514 adapter->params.rev > 0 ? 12 : 6;
4515 p->tre = fls(adapter->params.vpd.cclk / (1000 / TP_TMR_RES)) -
4516 1;
4517 p->dack_re = fls(adapter->params.vpd.cclk / 10) - 1; /* 100us */
4518 }
4519
4520 adapter->params.offload = t3_mc7_size(&adapter->pmrx) &&
4521 t3_mc7_size(&adapter->pmtx) &&
4522 t3_mc7_size(&adapter->cm);
4523
4524 t3_sge_prep(adapter, &adapter->params.sge);
4525
4526 if (is_offload(adapter)) {
4527 adapter->params.mc5.nservers = DEFAULT_NSERVERS;
4528 /* PR 6487. TOE and filtering are mutually exclusive */
4529 adapter->params.mc5.nfilters = 0;
4530 adapter->params.mc5.nroutes = 0;
4531 t3_mc5_prep(adapter, &adapter->mc5, MC5_MODE_144_BIT);
4532
4533 init_mtus(adapter->params.mtus);
4534 init_cong_ctrl(adapter->params.a_wnd, adapter->params.b_wnd);
4535 }
4536
4537 early_hw_init(adapter, ai);
4538 ret = init_parity(adapter);
4539 if (ret)
4540 return ret;
4541
4542 if (adapter->params.nports > 2 &&
4543 (ret = t3_vsc7323_init(adapter, adapter->params.nports)))
4544 return ret;
4545
4546 for_each_port(adapter, i) {
4547 u8 hw_addr[6];
4548 const struct port_type_info *pti;
4549 struct port_info *p = adap2pinfo(adapter, i);
4550
4551 for (;;) {
4552 unsigned port_type = adapter->params.vpd.port_type[j];
4553 if (port_type) {
4554 if (port_type < ARRAY_SIZE(port_types)) {
4555 pti = &port_types[port_type];
4556 break;
4557 } else
4558 return -EINVAL;
4559 }
4560 j++;
4561 if (j >= ARRAY_SIZE(adapter->params.vpd.port_type))
4562 return -EINVAL;
4563 }
4564 ret = pti->phy_prep(p, ai->phy_base_addr + j,
4565 ai->mdio_ops);
4566 if (ret)
4567 return ret;
4568 mac_prep(&p->mac, adapter, j);
4569 ++j;
4570
4571 /*
4572 * The VPD EEPROM stores the base Ethernet address for the
4573 * card. A port's address is derived from the base by adding
4574 * the port's index to the base's low octet.
4575 */
4576 memcpy(hw_addr, adapter->params.vpd.eth_base, 5);
4577 hw_addr[5] = adapter->params.vpd.eth_base[5] + i;
4578
4579 t3_os_set_hw_addr(adapter, i, hw_addr);
4580 init_link_config(&p->link_config, p->phy.caps);
4581 p->phy.ops->power_down(&p->phy, 1);
4582
4583 /*
4584 * If the PHY doesn't support interrupts for link status
4585 * changes, schedule a scan of the adapter links at least
4586 * once a second.
4587 */
4588 if (!(p->phy.caps & SUPPORTED_IRQ) &&
4589 adapter->params.linkpoll_period > 10)
4590 adapter->params.linkpoll_period = 10;
4591 }
4592
4593 return 0;
4594 }
4595
4596 /**
4597 * t3_reinit_adapter - prepare HW for operation again
4598 * @adapter: the adapter
4599 *
4600 * Put HW in the same state as @t3_prep_adapter without any changes to
4601 * SW state. This is a cut down version of @t3_prep_adapter intended
4602 * to be used after events that wipe out HW state but preserve SW state,
4603 * e.g., EEH. The device must be reset before calling this.
4604 */
t3_reinit_adapter(adapter_t * adap)4605 int t3_reinit_adapter(adapter_t *adap)
4606 {
4607 unsigned int i;
4608 int ret, j = 0;
4609
4610 early_hw_init(adap, adap->params.info);
4611 ret = init_parity(adap);
4612 if (ret)
4613 return ret;
4614
4615 if (adap->params.nports > 2 &&
4616 (ret = t3_vsc7323_init(adap, adap->params.nports)))
4617 return ret;
4618
4619 for_each_port(adap, i) {
4620 const struct port_type_info *pti;
4621 struct port_info *p = adap2pinfo(adap, i);
4622
4623 for (;;) {
4624 unsigned port_type = adap->params.vpd.port_type[j];
4625 if (port_type) {
4626 if (port_type < ARRAY_SIZE(port_types)) {
4627 pti = &port_types[port_type];
4628 break;
4629 } else
4630 return -EINVAL;
4631 }
4632 j++;
4633 if (j >= ARRAY_SIZE(adap->params.vpd.port_type))
4634 return -EINVAL;
4635 }
4636 ret = pti->phy_prep(p, p->phy.addr, NULL);
4637 if (ret)
4638 return ret;
4639 p->phy.ops->power_down(&p->phy, 1);
4640 }
4641 return 0;
4642 }
4643
t3_led_ready(adapter_t * adapter)4644 void t3_led_ready(adapter_t *adapter)
4645 {
4646 t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL,
4647 F_GPIO0_OUT_VAL);
4648 }
4649
t3_port_failover(adapter_t * adapter,int port)4650 void t3_port_failover(adapter_t *adapter, int port)
4651 {
4652 u32 val;
4653
4654 val = port ? F_PORT1ACTIVE : F_PORT0ACTIVE;
4655 t3_set_reg_field(adapter, A_MPS_CFG, F_PORT0ACTIVE | F_PORT1ACTIVE,
4656 val);
4657 }
4658
t3_failover_done(adapter_t * adapter,int port)4659 void t3_failover_done(adapter_t *adapter, int port)
4660 {
4661 t3_set_reg_field(adapter, A_MPS_CFG, F_PORT0ACTIVE | F_PORT1ACTIVE,
4662 F_PORT0ACTIVE | F_PORT1ACTIVE);
4663 }
4664
t3_failover_clear(adapter_t * adapter)4665 void t3_failover_clear(adapter_t *adapter)
4666 {
4667 t3_set_reg_field(adapter, A_MPS_CFG, F_PORT0ACTIVE | F_PORT1ACTIVE,
4668 F_PORT0ACTIVE | F_PORT1ACTIVE);
4669 }
4670
t3_cim_hac_read(adapter_t * adapter,u32 addr,u32 * val)4671 static int t3_cim_hac_read(adapter_t *adapter, u32 addr, u32 *val)
4672 {
4673 u32 v;
4674
4675 t3_write_reg(adapter, A_CIM_HOST_ACC_CTRL, addr);
4676 if (t3_wait_op_done_val(adapter, A_CIM_HOST_ACC_CTRL,
4677 F_HOSTBUSY, 0, 10, 10, &v))
4678 return -EIO;
4679
4680 *val = t3_read_reg(adapter, A_CIM_HOST_ACC_DATA);
4681
4682 return 0;
4683 }
4684
t3_cim_hac_write(adapter_t * adapter,u32 addr,u32 val)4685 static int t3_cim_hac_write(adapter_t *adapter, u32 addr, u32 val)
4686 {
4687 u32 v;
4688
4689 t3_write_reg(adapter, A_CIM_HOST_ACC_DATA, val);
4690
4691 addr |= F_HOSTWRITE;
4692 t3_write_reg(adapter, A_CIM_HOST_ACC_CTRL, addr);
4693
4694 if (t3_wait_op_done_val(adapter, A_CIM_HOST_ACC_CTRL,
4695 F_HOSTBUSY, 0, 10, 5, &v))
4696 return -EIO;
4697 return 0;
4698 }
4699
t3_get_up_la(adapter_t * adapter,u32 * stopped,u32 * index,u32 * size,void * data)4700 int t3_get_up_la(adapter_t *adapter, u32 *stopped, u32 *index,
4701 u32 *size, void *data)
4702 {
4703 u32 v, *buf = data;
4704 int i, cnt, ret;
4705
4706 if (*size < LA_ENTRIES * 4)
4707 return -EINVAL;
4708
4709 ret = t3_cim_hac_read(adapter, LA_CTRL, &v);
4710 if (ret)
4711 goto out;
4712
4713 *stopped = !(v & 1);
4714
4715 /* Freeze LA */
4716 if (!*stopped) {
4717 ret = t3_cim_hac_write(adapter, LA_CTRL, 0);
4718 if (ret)
4719 goto out;
4720 }
4721
4722 for (i = 0; i < LA_ENTRIES; i++) {
4723 v = (i << 2) | (1 << 1);
4724 ret = t3_cim_hac_write(adapter, LA_CTRL, v);
4725 if (ret)
4726 goto out;
4727
4728 ret = t3_cim_hac_read(adapter, LA_CTRL, &v);
4729 if (ret)
4730 goto out;
4731
4732 cnt = 20;
4733 while ((v & (1 << 1)) && cnt) {
4734 udelay(5);
4735 --cnt;
4736 ret = t3_cim_hac_read(adapter, LA_CTRL, &v);
4737 if (ret)
4738 goto out;
4739 }
4740
4741 if (v & (1 << 1))
4742 return -EIO;
4743
4744 ret = t3_cim_hac_read(adapter, LA_DATA, &v);
4745 if (ret)
4746 goto out;
4747
4748 *buf++ = v;
4749 }
4750
4751 ret = t3_cim_hac_read(adapter, LA_CTRL, &v);
4752 if (ret)
4753 goto out;
4754
4755 *index = (v >> 16) + 4;
4756 *size = LA_ENTRIES * 4;
4757 out:
4758 /* Unfreeze LA */
4759 t3_cim_hac_write(adapter, LA_CTRL, 1);
4760 return ret;
4761 }
4762
t3_get_up_ioqs(adapter_t * adapter,u32 * size,void * data)4763 int t3_get_up_ioqs(adapter_t *adapter, u32 *size, void *data)
4764 {
4765 u32 v, *buf = data;
4766 int i, j, ret;
4767
4768 if (*size < IOQ_ENTRIES * sizeof(struct t3_ioq_entry))
4769 return -EINVAL;
4770
4771 for (i = 0; i < 4; i++) {
4772 ret = t3_cim_hac_read(adapter, (4 * i), &v);
4773 if (ret)
4774 goto out;
4775
4776 *buf++ = v;
4777 }
4778
4779 for (i = 0; i < IOQ_ENTRIES; i++) {
4780 u32 base_addr = 0x10 * (i + 1);
4781
4782 for (j = 0; j < 4; j++) {
4783 ret = t3_cim_hac_read(adapter, base_addr + 4 * j, &v);
4784 if (ret)
4785 goto out;
4786
4787 *buf++ = v;
4788 }
4789 }
4790
4791 *size = IOQ_ENTRIES * sizeof(struct t3_ioq_entry);
4792
4793 out:
4794 return ret;
4795 }
4796
4797