1 /************************************************************************** 2 3 Copyright (c) 2007-2009, Chelsio Inc. 4 All rights reserved. 5 6 Redistribution and use in source and binary forms, with or without 7 modification, are permitted provided that the following conditions are met: 8 9 1. Redistributions of source code must retain the above copyright notice, 10 this list of conditions and the following disclaimer. 11 12 2. Neither the name of the Chelsio Corporation nor the names of its 13 contributors may be used to endorse or promote products derived from 14 this software without specific prior written permission. 15 16 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 17 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 20 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 21 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 22 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 23 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 24 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 25 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 26 POSSIBILITY OF SUCH DAMAGE. 27 28 ***************************************************************************/ 29 30 #include <sys/cdefs.h> 31 __FBSDID("$FreeBSD$"); 32 33 34 #include <cxgb_include.h> 35 36 #undef msleep 37 #define msleep t3_os_sleep 38 39 /** 40 * t3_wait_op_done_val - wait until an operation is completed 41 * @adapter: the adapter performing the operation 42 * @reg: the register to check for completion 43 * @mask: a single-bit field within @reg that indicates completion 44 * @polarity: the value of the field when the operation is completed 45 * @attempts: number of check iterations 46 * @delay: delay in usecs between iterations 47 * @valp: where to store the value of the register at completion time 48 * 49 * Wait until an operation is completed by checking a bit in a register 50 * up to @attempts times. If @valp is not NULL the value of the register 51 * at the time it indicated completion is stored there. Returns 0 if the 52 * operation completes and -EAGAIN otherwise. 53 */ 54 int t3_wait_op_done_val(adapter_t *adapter, int reg, u32 mask, int polarity, 55 int attempts, int delay, u32 *valp) 56 { 57 while (1) { 58 u32 val = t3_read_reg(adapter, reg); 59 60 if (!!(val & mask) == polarity) { 61 if (valp) 62 *valp = val; 63 return 0; 64 } 65 if (--attempts == 0) 66 return -EAGAIN; 67 if (delay) 68 udelay(delay); 69 } 70 } 71 72 /** 73 * t3_write_regs - write a bunch of registers 74 * @adapter: the adapter to program 75 * @p: an array of register address/register value pairs 76 * @n: the number of address/value pairs 77 * @offset: register address offset 78 * 79 * Takes an array of register address/register value pairs and writes each 80 * value to the corresponding register. Register addresses are adjusted 81 * by the supplied offset. 82 */ 83 void t3_write_regs(adapter_t *adapter, const struct addr_val_pair *p, int n, 84 unsigned int offset) 85 { 86 while (n--) { 87 t3_write_reg(adapter, p->reg_addr + offset, p->val); 88 p++; 89 } 90 } 91 92 /** 93 * t3_set_reg_field - set a register field to a value 94 * @adapter: the adapter to program 95 * @addr: the register address 96 * @mask: specifies the portion of the register to modify 97 * @val: the new value for the register field 98 * 99 * Sets a register field specified by the supplied mask to the 100 * given value. 101 */ 102 void t3_set_reg_field(adapter_t *adapter, unsigned int addr, u32 mask, u32 val) 103 { 104 u32 v = t3_read_reg(adapter, addr) & ~mask; 105 106 t3_write_reg(adapter, addr, v | val); 107 (void) t3_read_reg(adapter, addr); /* flush */ 108 } 109 110 /** 111 * t3_read_indirect - read indirectly addressed registers 112 * @adap: the adapter 113 * @addr_reg: register holding the indirect address 114 * @data_reg: register holding the value of the indirect register 115 * @vals: where the read register values are stored 116 * @start_idx: index of first indirect register to read 117 * @nregs: how many indirect registers to read 118 * 119 * Reads registers that are accessed indirectly through an address/data 120 * register pair. 121 */ 122 static void t3_read_indirect(adapter_t *adap, unsigned int addr_reg, 123 unsigned int data_reg, u32 *vals, unsigned int nregs, 124 unsigned int start_idx) 125 { 126 while (nregs--) { 127 t3_write_reg(adap, addr_reg, start_idx); 128 *vals++ = t3_read_reg(adap, data_reg); 129 start_idx++; 130 } 131 } 132 133 /** 134 * t3_mc7_bd_read - read from MC7 through backdoor accesses 135 * @mc7: identifies MC7 to read from 136 * @start: index of first 64-bit word to read 137 * @n: number of 64-bit words to read 138 * @buf: where to store the read result 139 * 140 * Read n 64-bit words from MC7 starting at word start, using backdoor 141 * accesses. 142 */ 143 int t3_mc7_bd_read(struct mc7 *mc7, unsigned int start, unsigned int n, 144 u64 *buf) 145 { 146 static int shift[] = { 0, 0, 16, 24 }; 147 static int step[] = { 0, 32, 16, 8 }; 148 149 unsigned int size64 = mc7->size / 8; /* # of 64-bit words */ 150 adapter_t *adap = mc7->adapter; 151 152 if (start >= size64 || start + n > size64) 153 return -EINVAL; 154 155 start *= (8 << mc7->width); 156 while (n--) { 157 int i; 158 u64 val64 = 0; 159 160 for (i = (1 << mc7->width) - 1; i >= 0; --i) { 161 int attempts = 10; 162 u32 val; 163 164 t3_write_reg(adap, mc7->offset + A_MC7_BD_ADDR, 165 start); 166 t3_write_reg(adap, mc7->offset + A_MC7_BD_OP, 0); 167 val = t3_read_reg(adap, mc7->offset + A_MC7_BD_OP); 168 while ((val & F_BUSY) && attempts--) 169 val = t3_read_reg(adap, 170 mc7->offset + A_MC7_BD_OP); 171 if (val & F_BUSY) 172 return -EIO; 173 174 val = t3_read_reg(adap, mc7->offset + A_MC7_BD_DATA1); 175 if (mc7->width == 0) { 176 val64 = t3_read_reg(adap, 177 mc7->offset + A_MC7_BD_DATA0); 178 val64 |= (u64)val << 32; 179 } else { 180 if (mc7->width > 1) 181 val >>= shift[mc7->width]; 182 val64 |= (u64)val << (step[mc7->width] * i); 183 } 184 start += 8; 185 } 186 *buf++ = val64; 187 } 188 return 0; 189 } 190 191 /* 192 * Initialize MI1. 193 */ 194 static void mi1_init(adapter_t *adap, const struct adapter_info *ai) 195 { 196 u32 clkdiv = adap->params.vpd.cclk / (2 * adap->params.vpd.mdc) - 1; 197 u32 val = F_PREEN | V_CLKDIV(clkdiv); 198 199 t3_write_reg(adap, A_MI1_CFG, val); 200 } 201 202 #define MDIO_ATTEMPTS 20 203 204 /* 205 * MI1 read/write operations for clause 22 PHYs. 206 */ 207 int t3_mi1_read(adapter_t *adapter, int phy_addr, int mmd_addr, 208 int reg_addr, unsigned int *valp) 209 { 210 int ret; 211 u32 addr = V_REGADDR(reg_addr) | V_PHYADDR(phy_addr); 212 213 if (mmd_addr) 214 return -EINVAL; 215 216 MDIO_LOCK(adapter); 217 t3_set_reg_field(adapter, A_MI1_CFG, V_ST(M_ST), V_ST(1)); 218 t3_write_reg(adapter, A_MI1_ADDR, addr); 219 t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(2)); 220 ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0, MDIO_ATTEMPTS, 10); 221 if (!ret) 222 *valp = t3_read_reg(adapter, A_MI1_DATA); 223 MDIO_UNLOCK(adapter); 224 return ret; 225 } 226 227 int t3_mi1_write(adapter_t *adapter, int phy_addr, int mmd_addr, 228 int reg_addr, unsigned int val) 229 { 230 int ret; 231 u32 addr = V_REGADDR(reg_addr) | V_PHYADDR(phy_addr); 232 233 if (mmd_addr) 234 return -EINVAL; 235 236 MDIO_LOCK(adapter); 237 t3_set_reg_field(adapter, A_MI1_CFG, V_ST(M_ST), V_ST(1)); 238 t3_write_reg(adapter, A_MI1_ADDR, addr); 239 t3_write_reg(adapter, A_MI1_DATA, val); 240 t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(1)); 241 ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0, MDIO_ATTEMPTS, 10); 242 MDIO_UNLOCK(adapter); 243 return ret; 244 } 245 246 static struct mdio_ops mi1_mdio_ops = { 247 t3_mi1_read, 248 t3_mi1_write 249 }; 250 251 /* 252 * MI1 read/write operations for clause 45 PHYs. 253 */ 254 static int mi1_ext_read(adapter_t *adapter, int phy_addr, int mmd_addr, 255 int reg_addr, unsigned int *valp) 256 { 257 int ret; 258 u32 addr = V_REGADDR(mmd_addr) | V_PHYADDR(phy_addr); 259 260 MDIO_LOCK(adapter); 261 t3_set_reg_field(adapter, A_MI1_CFG, V_ST(M_ST), 0); 262 t3_write_reg(adapter, A_MI1_ADDR, addr); 263 t3_write_reg(adapter, A_MI1_DATA, reg_addr); 264 t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(0)); 265 ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0, MDIO_ATTEMPTS, 10); 266 if (!ret) { 267 t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(3)); 268 ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0, 269 MDIO_ATTEMPTS, 10); 270 if (!ret) 271 *valp = t3_read_reg(adapter, A_MI1_DATA); 272 } 273 MDIO_UNLOCK(adapter); 274 return ret; 275 } 276 277 static int mi1_ext_write(adapter_t *adapter, int phy_addr, int mmd_addr, 278 int reg_addr, unsigned int val) 279 { 280 int ret; 281 u32 addr = V_REGADDR(mmd_addr) | V_PHYADDR(phy_addr); 282 283 MDIO_LOCK(adapter); 284 t3_set_reg_field(adapter, A_MI1_CFG, V_ST(M_ST), 0); 285 t3_write_reg(adapter, A_MI1_ADDR, addr); 286 t3_write_reg(adapter, A_MI1_DATA, reg_addr); 287 t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(0)); 288 ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0, MDIO_ATTEMPTS, 10); 289 if (!ret) { 290 t3_write_reg(adapter, A_MI1_DATA, val); 291 t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(1)); 292 ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0, 293 MDIO_ATTEMPTS, 10); 294 } 295 MDIO_UNLOCK(adapter); 296 return ret; 297 } 298 299 static struct mdio_ops mi1_mdio_ext_ops = { 300 mi1_ext_read, 301 mi1_ext_write 302 }; 303 304 /** 305 * t3_mdio_change_bits - modify the value of a PHY register 306 * @phy: the PHY to operate on 307 * @mmd: the device address 308 * @reg: the register address 309 * @clear: what part of the register value to mask off 310 * @set: what part of the register value to set 311 * 312 * Changes the value of a PHY register by applying a mask to its current 313 * value and ORing the result with a new value. 314 */ 315 int t3_mdio_change_bits(struct cphy *phy, int mmd, int reg, unsigned int clear, 316 unsigned int set) 317 { 318 int ret; 319 unsigned int val; 320 321 ret = mdio_read(phy, mmd, reg, &val); 322 if (!ret) { 323 val &= ~clear; 324 ret = mdio_write(phy, mmd, reg, val | set); 325 } 326 return ret; 327 } 328 329 /** 330 * t3_phy_reset - reset a PHY block 331 * @phy: the PHY to operate on 332 * @mmd: the device address of the PHY block to reset 333 * @wait: how long to wait for the reset to complete in 1ms increments 334 * 335 * Resets a PHY block and optionally waits for the reset to complete. 336 * @mmd should be 0 for 10/100/1000 PHYs and the device address to reset 337 * for 10G PHYs. 338 */ 339 int t3_phy_reset(struct cphy *phy, int mmd, int wait) 340 { 341 int err; 342 unsigned int ctl; 343 344 err = t3_mdio_change_bits(phy, mmd, MII_BMCR, BMCR_PDOWN, BMCR_RESET); 345 if (err || !wait) 346 return err; 347 348 do { 349 err = mdio_read(phy, mmd, MII_BMCR, &ctl); 350 if (err) 351 return err; 352 ctl &= BMCR_RESET; 353 if (ctl) 354 msleep(1); 355 } while (ctl && --wait); 356 357 return ctl ? -1 : 0; 358 } 359 360 /** 361 * t3_phy_advertise - set the PHY advertisement registers for autoneg 362 * @phy: the PHY to operate on 363 * @advert: bitmap of capabilities the PHY should advertise 364 * 365 * Sets a 10/100/1000 PHY's advertisement registers to advertise the 366 * requested capabilities. 367 */ 368 int t3_phy_advertise(struct cphy *phy, unsigned int advert) 369 { 370 int err; 371 unsigned int val = 0; 372 373 err = mdio_read(phy, 0, MII_CTRL1000, &val); 374 if (err) 375 return err; 376 377 val &= ~(ADVERTISE_1000HALF | ADVERTISE_1000FULL); 378 if (advert & ADVERTISED_1000baseT_Half) 379 val |= ADVERTISE_1000HALF; 380 if (advert & ADVERTISED_1000baseT_Full) 381 val |= ADVERTISE_1000FULL; 382 383 err = mdio_write(phy, 0, MII_CTRL1000, val); 384 if (err) 385 return err; 386 387 val = 1; 388 if (advert & ADVERTISED_10baseT_Half) 389 val |= ADVERTISE_10HALF; 390 if (advert & ADVERTISED_10baseT_Full) 391 val |= ADVERTISE_10FULL; 392 if (advert & ADVERTISED_100baseT_Half) 393 val |= ADVERTISE_100HALF; 394 if (advert & ADVERTISED_100baseT_Full) 395 val |= ADVERTISE_100FULL; 396 if (advert & ADVERTISED_Pause) 397 val |= ADVERTISE_PAUSE_CAP; 398 if (advert & ADVERTISED_Asym_Pause) 399 val |= ADVERTISE_PAUSE_ASYM; 400 return mdio_write(phy, 0, MII_ADVERTISE, val); 401 } 402 403 /** 404 * t3_phy_advertise_fiber - set fiber PHY advertisement register 405 * @phy: the PHY to operate on 406 * @advert: bitmap of capabilities the PHY should advertise 407 * 408 * Sets a fiber PHY's advertisement register to advertise the 409 * requested capabilities. 410 */ 411 int t3_phy_advertise_fiber(struct cphy *phy, unsigned int advert) 412 { 413 unsigned int val = 0; 414 415 if (advert & ADVERTISED_1000baseT_Half) 416 val |= ADVERTISE_1000XHALF; 417 if (advert & ADVERTISED_1000baseT_Full) 418 val |= ADVERTISE_1000XFULL; 419 if (advert & ADVERTISED_Pause) 420 val |= ADVERTISE_1000XPAUSE; 421 if (advert & ADVERTISED_Asym_Pause) 422 val |= ADVERTISE_1000XPSE_ASYM; 423 return mdio_write(phy, 0, MII_ADVERTISE, val); 424 } 425 426 /** 427 * t3_set_phy_speed_duplex - force PHY speed and duplex 428 * @phy: the PHY to operate on 429 * @speed: requested PHY speed 430 * @duplex: requested PHY duplex 431 * 432 * Force a 10/100/1000 PHY's speed and duplex. This also disables 433 * auto-negotiation except for GigE, where auto-negotiation is mandatory. 434 */ 435 int t3_set_phy_speed_duplex(struct cphy *phy, int speed, int duplex) 436 { 437 int err; 438 unsigned int ctl; 439 440 err = mdio_read(phy, 0, MII_BMCR, &ctl); 441 if (err) 442 return err; 443 444 if (speed >= 0) { 445 ctl &= ~(BMCR_SPEED100 | BMCR_SPEED1000 | BMCR_ANENABLE); 446 if (speed == SPEED_100) 447 ctl |= BMCR_SPEED100; 448 else if (speed == SPEED_1000) 449 ctl |= BMCR_SPEED1000; 450 } 451 if (duplex >= 0) { 452 ctl &= ~(BMCR_FULLDPLX | BMCR_ANENABLE); 453 if (duplex == DUPLEX_FULL) 454 ctl |= BMCR_FULLDPLX; 455 } 456 if (ctl & BMCR_SPEED1000) /* auto-negotiation required for GigE */ 457 ctl |= BMCR_ANENABLE; 458 return mdio_write(phy, 0, MII_BMCR, ctl); 459 } 460 461 int t3_phy_lasi_intr_enable(struct cphy *phy) 462 { 463 return mdio_write(phy, MDIO_DEV_PMA_PMD, LASI_CTRL, 1); 464 } 465 466 int t3_phy_lasi_intr_disable(struct cphy *phy) 467 { 468 return mdio_write(phy, MDIO_DEV_PMA_PMD, LASI_CTRL, 0); 469 } 470 471 int t3_phy_lasi_intr_clear(struct cphy *phy) 472 { 473 u32 val; 474 475 return mdio_read(phy, MDIO_DEV_PMA_PMD, LASI_STAT, &val); 476 } 477 478 int t3_phy_lasi_intr_handler(struct cphy *phy) 479 { 480 unsigned int status; 481 int err = mdio_read(phy, MDIO_DEV_PMA_PMD, LASI_STAT, &status); 482 483 if (err) 484 return err; 485 return (status & 1) ? cphy_cause_link_change : 0; 486 } 487 488 static struct adapter_info t3_adap_info[] = { 489 { 1, 1, 0, 490 F_GPIO2_OEN | F_GPIO4_OEN | 491 F_GPIO2_OUT_VAL | F_GPIO4_OUT_VAL, { S_GPIO3, S_GPIO5 }, 0, 492 &mi1_mdio_ops, "Chelsio PE9000" }, 493 { 1, 1, 0, 494 F_GPIO2_OEN | F_GPIO4_OEN | 495 F_GPIO2_OUT_VAL | F_GPIO4_OUT_VAL, { S_GPIO3, S_GPIO5 }, 0, 496 &mi1_mdio_ops, "Chelsio T302" }, 497 { 1, 0, 0, 498 F_GPIO1_OEN | F_GPIO6_OEN | F_GPIO7_OEN | F_GPIO10_OEN | 499 F_GPIO11_OEN | F_GPIO1_OUT_VAL | F_GPIO6_OUT_VAL | F_GPIO10_OUT_VAL, 500 { 0 }, SUPPORTED_10000baseT_Full | SUPPORTED_AUI, 501 &mi1_mdio_ext_ops, "Chelsio T310" }, 502 { 1, 1, 0, 503 F_GPIO1_OEN | F_GPIO2_OEN | F_GPIO4_OEN | F_GPIO5_OEN | F_GPIO6_OEN | 504 F_GPIO7_OEN | F_GPIO10_OEN | F_GPIO11_OEN | F_GPIO1_OUT_VAL | 505 F_GPIO5_OUT_VAL | F_GPIO6_OUT_VAL | F_GPIO10_OUT_VAL, 506 { S_GPIO9, S_GPIO3 }, SUPPORTED_10000baseT_Full | SUPPORTED_AUI, 507 &mi1_mdio_ext_ops, "Chelsio T320" }, 508 { 4, 0, 0, 509 F_GPIO5_OEN | F_GPIO6_OEN | F_GPIO7_OEN | F_GPIO5_OUT_VAL | 510 F_GPIO6_OUT_VAL | F_GPIO7_OUT_VAL, 511 { S_GPIO1, S_GPIO2, S_GPIO3, S_GPIO4 }, SUPPORTED_AUI, 512 &mi1_mdio_ops, "Chelsio T304" }, 513 { 0 }, 514 { 1, 0, 0, 515 F_GPIO1_OEN | F_GPIO2_OEN | F_GPIO4_OEN | F_GPIO6_OEN | F_GPIO7_OEN | 516 F_GPIO10_OEN | F_GPIO1_OUT_VAL | F_GPIO6_OUT_VAL | F_GPIO10_OUT_VAL, 517 { S_GPIO9 }, SUPPORTED_10000baseT_Full | SUPPORTED_AUI, 518 &mi1_mdio_ext_ops, "Chelsio N310" } 519 }; 520 521 /* 522 * Return the adapter_info structure with a given index. Out-of-range indices 523 * return NULL. 524 */ 525 const struct adapter_info *t3_get_adapter_info(unsigned int id) 526 { 527 return id < ARRAY_SIZE(t3_adap_info) ? &t3_adap_info[id] : NULL; 528 } 529 530 struct port_type_info { 531 int (*phy_prep)(struct cphy *phy, adapter_t *adapter, int phy_addr, 532 const struct mdio_ops *ops); 533 }; 534 535 static struct port_type_info port_types[] = { 536 { NULL }, 537 { t3_ael1002_phy_prep }, 538 { t3_vsc8211_phy_prep }, 539 { t3_mv88e1xxx_phy_prep }, 540 { t3_xaui_direct_phy_prep }, 541 { t3_ael2005_phy_prep }, 542 { t3_qt2045_phy_prep }, 543 { t3_ael1006_phy_prep }, 544 { t3_tn1010_phy_prep }, 545 }; 546 547 #define VPD_ENTRY(name, len) \ 548 u8 name##_kword[2]; u8 name##_len; u8 name##_data[len] 549 550 /* 551 * Partial EEPROM Vital Product Data structure. Includes only the ID and 552 * VPD-R sections. 553 */ 554 struct t3_vpd { 555 u8 id_tag; 556 u8 id_len[2]; 557 u8 id_data[16]; 558 u8 vpdr_tag; 559 u8 vpdr_len[2]; 560 VPD_ENTRY(pn, 16); /* part number */ 561 VPD_ENTRY(ec, ECNUM_LEN); /* EC level */ 562 VPD_ENTRY(sn, SERNUM_LEN); /* serial number */ 563 VPD_ENTRY(na, 12); /* MAC address base */ 564 VPD_ENTRY(cclk, 6); /* core clock */ 565 VPD_ENTRY(mclk, 6); /* mem clock */ 566 VPD_ENTRY(uclk, 6); /* uP clk */ 567 VPD_ENTRY(mdc, 6); /* MDIO clk */ 568 VPD_ENTRY(mt, 2); /* mem timing */ 569 VPD_ENTRY(xaui0cfg, 6); /* XAUI0 config */ 570 VPD_ENTRY(xaui1cfg, 6); /* XAUI1 config */ 571 VPD_ENTRY(port0, 2); /* PHY0 complex */ 572 VPD_ENTRY(port1, 2); /* PHY1 complex */ 573 VPD_ENTRY(port2, 2); /* PHY2 complex */ 574 VPD_ENTRY(port3, 2); /* PHY3 complex */ 575 VPD_ENTRY(rv, 1); /* csum */ 576 u32 pad; /* for multiple-of-4 sizing and alignment */ 577 }; 578 579 #define EEPROM_MAX_POLL 40 580 #define EEPROM_STAT_ADDR 0x4000 581 #define VPD_BASE 0xc00 582 583 /** 584 * t3_seeprom_read - read a VPD EEPROM location 585 * @adapter: adapter to read 586 * @addr: EEPROM address 587 * @data: where to store the read data 588 * 589 * Read a 32-bit word from a location in VPD EEPROM using the card's PCI 590 * VPD ROM capability. A zero is written to the flag bit when the 591 * addres is written to the control register. The hardware device will 592 * set the flag to 1 when 4 bytes have been read into the data register. 593 */ 594 int t3_seeprom_read(adapter_t *adapter, u32 addr, u32 *data) 595 { 596 u16 val; 597 int attempts = EEPROM_MAX_POLL; 598 unsigned int base = adapter->params.pci.vpd_cap_addr; 599 600 if ((addr >= EEPROMSIZE && addr != EEPROM_STAT_ADDR) || (addr & 3)) 601 return -EINVAL; 602 603 t3_os_pci_write_config_2(adapter, base + PCI_VPD_ADDR, (u16)addr); 604 do { 605 udelay(10); 606 t3_os_pci_read_config_2(adapter, base + PCI_VPD_ADDR, &val); 607 } while (!(val & PCI_VPD_ADDR_F) && --attempts); 608 609 if (!(val & PCI_VPD_ADDR_F)) { 610 CH_ERR(adapter, "reading EEPROM address 0x%x failed\n", addr); 611 return -EIO; 612 } 613 t3_os_pci_read_config_4(adapter, base + PCI_VPD_DATA, data); 614 *data = le32_to_cpu(*data); 615 return 0; 616 } 617 618 /** 619 * t3_seeprom_write - write a VPD EEPROM location 620 * @adapter: adapter to write 621 * @addr: EEPROM address 622 * @data: value to write 623 * 624 * Write a 32-bit word to a location in VPD EEPROM using the card's PCI 625 * VPD ROM capability. 626 */ 627 int t3_seeprom_write(adapter_t *adapter, u32 addr, u32 data) 628 { 629 u16 val; 630 int attempts = EEPROM_MAX_POLL; 631 unsigned int base = adapter->params.pci.vpd_cap_addr; 632 633 if ((addr >= EEPROMSIZE && addr != EEPROM_STAT_ADDR) || (addr & 3)) 634 return -EINVAL; 635 636 t3_os_pci_write_config_4(adapter, base + PCI_VPD_DATA, 637 cpu_to_le32(data)); 638 t3_os_pci_write_config_2(adapter, base + PCI_VPD_ADDR, 639 (u16)addr | PCI_VPD_ADDR_F); 640 do { 641 msleep(1); 642 t3_os_pci_read_config_2(adapter, base + PCI_VPD_ADDR, &val); 643 } while ((val & PCI_VPD_ADDR_F) && --attempts); 644 645 if (val & PCI_VPD_ADDR_F) { 646 CH_ERR(adapter, "write to EEPROM address 0x%x failed\n", addr); 647 return -EIO; 648 } 649 return 0; 650 } 651 652 /** 653 * t3_seeprom_wp - enable/disable EEPROM write protection 654 * @adapter: the adapter 655 * @enable: 1 to enable write protection, 0 to disable it 656 * 657 * Enables or disables write protection on the serial EEPROM. 658 */ 659 int t3_seeprom_wp(adapter_t *adapter, int enable) 660 { 661 return t3_seeprom_write(adapter, EEPROM_STAT_ADDR, enable ? 0xc : 0); 662 } 663 664 /* 665 * Convert a character holding a hex digit to a number. 666 */ 667 static unsigned int hex2int(unsigned char c) 668 { 669 return isdigit(c) ? c - '0' : toupper(c) - 'A' + 10; 670 } 671 672 /** 673 * get_vpd_params - read VPD parameters from VPD EEPROM 674 * @adapter: adapter to read 675 * @p: where to store the parameters 676 * 677 * Reads card parameters stored in VPD EEPROM. 678 */ 679 static int get_vpd_params(adapter_t *adapter, struct vpd_params *p) 680 { 681 int i, addr, ret; 682 struct t3_vpd vpd; 683 684 /* 685 * Card information is normally at VPD_BASE but some early cards had 686 * it at 0. 687 */ 688 ret = t3_seeprom_read(adapter, VPD_BASE, (u32 *)&vpd); 689 if (ret) 690 return ret; 691 addr = vpd.id_tag == 0x82 ? VPD_BASE : 0; 692 693 for (i = 0; i < sizeof(vpd); i += 4) { 694 ret = t3_seeprom_read(adapter, addr + i, 695 (u32 *)((u8 *)&vpd + i)); 696 if (ret) 697 return ret; 698 } 699 700 p->cclk = simple_strtoul(vpd.cclk_data, NULL, 10); 701 p->mclk = simple_strtoul(vpd.mclk_data, NULL, 10); 702 p->uclk = simple_strtoul(vpd.uclk_data, NULL, 10); 703 p->mdc = simple_strtoul(vpd.mdc_data, NULL, 10); 704 p->mem_timing = simple_strtoul(vpd.mt_data, NULL, 10); 705 memcpy(p->sn, vpd.sn_data, SERNUM_LEN); 706 memcpy(p->ec, vpd.ec_data, ECNUM_LEN); 707 708 /* Old eeproms didn't have port information */ 709 if (adapter->params.rev == 0 && !vpd.port0_data[0]) { 710 p->port_type[0] = uses_xaui(adapter) ? 1 : 2; 711 p->port_type[1] = uses_xaui(adapter) ? 6 : 2; 712 } else { 713 p->port_type[0] = (u8)hex2int(vpd.port0_data[0]); 714 p->port_type[1] = (u8)hex2int(vpd.port1_data[0]); 715 p->port_type[2] = (u8)hex2int(vpd.port2_data[0]); 716 p->port_type[3] = (u8)hex2int(vpd.port3_data[0]); 717 p->xauicfg[0] = simple_strtoul(vpd.xaui0cfg_data, NULL, 16); 718 p->xauicfg[1] = simple_strtoul(vpd.xaui1cfg_data, NULL, 16); 719 } 720 721 for (i = 0; i < 6; i++) 722 p->eth_base[i] = hex2int(vpd.na_data[2 * i]) * 16 + 723 hex2int(vpd.na_data[2 * i + 1]); 724 return 0; 725 } 726 727 /* BIOS boot header */ 728 typedef struct boot_header_s { 729 u8 signature[2]; /* signature */ 730 u8 length; /* image length (include header) */ 731 u8 offset[4]; /* initialization vector */ 732 u8 reserved[19]; /* reserved */ 733 u8 exheader[2]; /* offset to expansion header */ 734 } boot_header_t; 735 736 /* serial flash and firmware constants */ 737 enum { 738 SF_ATTEMPTS = 5, /* max retries for SF1 operations */ 739 SF_SEC_SIZE = 64 * 1024, /* serial flash sector size */ 740 SF_SIZE = SF_SEC_SIZE * 8, /* serial flash size */ 741 742 /* flash command opcodes */ 743 SF_PROG_PAGE = 2, /* program page */ 744 SF_WR_DISABLE = 4, /* disable writes */ 745 SF_RD_STATUS = 5, /* read status register */ 746 SF_WR_ENABLE = 6, /* enable writes */ 747 SF_RD_DATA_FAST = 0xb, /* read flash */ 748 SF_ERASE_SECTOR = 0xd8, /* erase sector */ 749 750 FW_FLASH_BOOT_ADDR = 0x70000, /* start address of FW in flash */ 751 FW_VERS_ADDR = 0x7fffc, /* flash address holding FW version */ 752 FW_VERS_ADDR_PRE8 = 0x77ffc,/* flash address holding FW version pre8 */ 753 FW_MIN_SIZE = 8, /* at least version and csum */ 754 FW_MAX_SIZE = FW_VERS_ADDR - FW_FLASH_BOOT_ADDR, 755 FW_MAX_SIZE_PRE8 = FW_VERS_ADDR_PRE8 - FW_FLASH_BOOT_ADDR, 756 757 BOOT_FLASH_BOOT_ADDR = 0x0,/* start address of boot image in flash */ 758 BOOT_SIGNATURE = 0xaa55, /* signature of BIOS boot ROM */ 759 BOOT_SIZE_INC = 512, /* image size measured in 512B chunks */ 760 BOOT_MIN_SIZE = sizeof(boot_header_t), /* at least basic header */ 761 BOOT_MAX_SIZE = 1024*BOOT_SIZE_INC /* 1 byte * length increment */ 762 }; 763 764 /** 765 * sf1_read - read data from the serial flash 766 * @adapter: the adapter 767 * @byte_cnt: number of bytes to read 768 * @cont: whether another operation will be chained 769 * @valp: where to store the read data 770 * 771 * Reads up to 4 bytes of data from the serial flash. The location of 772 * the read needs to be specified prior to calling this by issuing the 773 * appropriate commands to the serial flash. 774 */ 775 static int sf1_read(adapter_t *adapter, unsigned int byte_cnt, int cont, 776 u32 *valp) 777 { 778 int ret; 779 780 if (!byte_cnt || byte_cnt > 4) 781 return -EINVAL; 782 if (t3_read_reg(adapter, A_SF_OP) & F_BUSY) 783 return -EBUSY; 784 t3_write_reg(adapter, A_SF_OP, V_CONT(cont) | V_BYTECNT(byte_cnt - 1)); 785 ret = t3_wait_op_done(adapter, A_SF_OP, F_BUSY, 0, SF_ATTEMPTS, 10); 786 if (!ret) 787 *valp = t3_read_reg(adapter, A_SF_DATA); 788 return ret; 789 } 790 791 /** 792 * sf1_write - write data to the serial flash 793 * @adapter: the adapter 794 * @byte_cnt: number of bytes to write 795 * @cont: whether another operation will be chained 796 * @val: value to write 797 * 798 * Writes up to 4 bytes of data to the serial flash. The location of 799 * the write needs to be specified prior to calling this by issuing the 800 * appropriate commands to the serial flash. 801 */ 802 static int sf1_write(adapter_t *adapter, unsigned int byte_cnt, int cont, 803 u32 val) 804 { 805 if (!byte_cnt || byte_cnt > 4) 806 return -EINVAL; 807 if (t3_read_reg(adapter, A_SF_OP) & F_BUSY) 808 return -EBUSY; 809 t3_write_reg(adapter, A_SF_DATA, val); 810 t3_write_reg(adapter, A_SF_OP, 811 V_CONT(cont) | V_BYTECNT(byte_cnt - 1) | V_OP(1)); 812 return t3_wait_op_done(adapter, A_SF_OP, F_BUSY, 0, SF_ATTEMPTS, 10); 813 } 814 815 /** 816 * flash_wait_op - wait for a flash operation to complete 817 * @adapter: the adapter 818 * @attempts: max number of polls of the status register 819 * @delay: delay between polls in ms 820 * 821 * Wait for a flash operation to complete by polling the status register. 822 */ 823 static int flash_wait_op(adapter_t *adapter, int attempts, int delay) 824 { 825 int ret; 826 u32 status; 827 828 while (1) { 829 if ((ret = sf1_write(adapter, 1, 1, SF_RD_STATUS)) != 0 || 830 (ret = sf1_read(adapter, 1, 0, &status)) != 0) 831 return ret; 832 if (!(status & 1)) 833 return 0; 834 if (--attempts == 0) 835 return -EAGAIN; 836 if (delay) 837 msleep(delay); 838 } 839 } 840 841 /** 842 * t3_read_flash - read words from serial flash 843 * @adapter: the adapter 844 * @addr: the start address for the read 845 * @nwords: how many 32-bit words to read 846 * @data: where to store the read data 847 * @byte_oriented: whether to store data as bytes or as words 848 * 849 * Read the specified number of 32-bit words from the serial flash. 850 * If @byte_oriented is set the read data is stored as a byte array 851 * (i.e., big-endian), otherwise as 32-bit words in the platform's 852 * natural endianess. 853 */ 854 int t3_read_flash(adapter_t *adapter, unsigned int addr, unsigned int nwords, 855 u32 *data, int byte_oriented) 856 { 857 int ret; 858 859 if (addr + nwords * sizeof(u32) > SF_SIZE || (addr & 3)) 860 return -EINVAL; 861 862 addr = swab32(addr) | SF_RD_DATA_FAST; 863 864 if ((ret = sf1_write(adapter, 4, 1, addr)) != 0 || 865 (ret = sf1_read(adapter, 1, 1, data)) != 0) 866 return ret; 867 868 for ( ; nwords; nwords--, data++) { 869 ret = sf1_read(adapter, 4, nwords > 1, data); 870 if (ret) 871 return ret; 872 if (byte_oriented) 873 *data = htonl(*data); 874 } 875 return 0; 876 } 877 878 /** 879 * t3_write_flash - write up to a page of data to the serial flash 880 * @adapter: the adapter 881 * @addr: the start address to write 882 * @n: length of data to write 883 * @data: the data to write 884 * @byte_oriented: whether to store data as bytes or as words 885 * 886 * Writes up to a page of data (256 bytes) to the serial flash starting 887 * at the given address. 888 * If @byte_oriented is set the write data is stored as a 32-bit 889 * big-endian array, otherwise in the processor's native endianess. 890 * 891 */ 892 static int t3_write_flash(adapter_t *adapter, unsigned int addr, 893 unsigned int n, const u8 *data, 894 int byte_oriented) 895 { 896 int ret; 897 u32 buf[64]; 898 unsigned int c, left, val, offset = addr & 0xff; 899 900 if (addr + n > SF_SIZE || offset + n > 256) 901 return -EINVAL; 902 903 val = swab32(addr) | SF_PROG_PAGE; 904 905 if ((ret = sf1_write(adapter, 1, 0, SF_WR_ENABLE)) != 0 || 906 (ret = sf1_write(adapter, 4, 1, val)) != 0) 907 return ret; 908 909 for (left = n; left; left -= c) { 910 c = min(left, 4U); 911 val = *(const u32*)data; 912 data += c; 913 if (byte_oriented) 914 val = htonl(val); 915 916 ret = sf1_write(adapter, c, c != left, val); 917 if (ret) 918 return ret; 919 } 920 if ((ret = flash_wait_op(adapter, 5, 1)) != 0) 921 return ret; 922 923 /* Read the page to verify the write succeeded */ 924 ret = t3_read_flash(adapter, addr & ~0xff, ARRAY_SIZE(buf), buf, 925 byte_oriented); 926 if (ret) 927 return ret; 928 929 if (memcmp(data - n, (u8 *)buf + offset, n)) 930 return -EIO; 931 return 0; 932 } 933 934 /** 935 * t3_get_tp_version - read the tp sram version 936 * @adapter: the adapter 937 * @vers: where to place the version 938 * 939 * Reads the protocol sram version from sram. 940 */ 941 int t3_get_tp_version(adapter_t *adapter, u32 *vers) 942 { 943 int ret; 944 945 /* Get version loaded in SRAM */ 946 t3_write_reg(adapter, A_TP_EMBED_OP_FIELD0, 0); 947 ret = t3_wait_op_done(adapter, A_TP_EMBED_OP_FIELD0, 948 1, 1, 5, 1); 949 if (ret) 950 return ret; 951 952 *vers = t3_read_reg(adapter, A_TP_EMBED_OP_FIELD1); 953 954 return 0; 955 } 956 957 /** 958 * t3_check_tpsram_version - read the tp sram version 959 * @adapter: the adapter 960 * 961 */ 962 int t3_check_tpsram_version(adapter_t *adapter) 963 { 964 int ret; 965 u32 vers; 966 unsigned int major, minor; 967 968 if (adapter->params.rev == T3_REV_A) 969 return 0; 970 971 972 ret = t3_get_tp_version(adapter, &vers); 973 if (ret) 974 return ret; 975 976 vers = t3_read_reg(adapter, A_TP_EMBED_OP_FIELD1); 977 978 major = G_TP_VERSION_MAJOR(vers); 979 minor = G_TP_VERSION_MINOR(vers); 980 981 if (major == TP_VERSION_MAJOR && minor == TP_VERSION_MINOR) 982 return 0; 983 else { 984 CH_ERR(adapter, "found wrong TP version (%u.%u), " 985 "driver compiled for version %d.%d\n", major, minor, 986 TP_VERSION_MAJOR, TP_VERSION_MINOR); 987 } 988 return -EINVAL; 989 } 990 991 /** 992 * t3_check_tpsram - check if provided protocol SRAM 993 * is compatible with this driver 994 * @adapter: the adapter 995 * @tp_sram: the firmware image to write 996 * @size: image size 997 * 998 * Checks if an adapter's tp sram is compatible with the driver. 999 * Returns 0 if the versions are compatible, a negative error otherwise. 1000 */ 1001 int t3_check_tpsram(adapter_t *adapter, const u8 *tp_sram, unsigned int size) 1002 { 1003 u32 csum; 1004 unsigned int i; 1005 const u32 *p = (const u32 *)tp_sram; 1006 1007 /* Verify checksum */ 1008 for (csum = 0, i = 0; i < size / sizeof(csum); i++) 1009 csum += ntohl(p[i]); 1010 if (csum != 0xffffffff) { 1011 CH_ERR(adapter, "corrupted protocol SRAM image, checksum %u\n", 1012 csum); 1013 return -EINVAL; 1014 } 1015 1016 return 0; 1017 } 1018 1019 enum fw_version_type { 1020 FW_VERSION_N3, 1021 FW_VERSION_T3 1022 }; 1023 1024 /** 1025 * t3_get_fw_version - read the firmware version 1026 * @adapter: the adapter 1027 * @vers: where to place the version 1028 * 1029 * Reads the FW version from flash. Note that we had to move the version 1030 * due to FW size. If we don't find a valid FW version in the new location 1031 * we fall back and read the old location. 1032 */ 1033 int t3_get_fw_version(adapter_t *adapter, u32 *vers) 1034 { 1035 int ret = t3_read_flash(adapter, FW_VERS_ADDR, 1, vers, 0); 1036 if (!ret && *vers != 0xffffffff) 1037 return 0; 1038 else 1039 return t3_read_flash(adapter, FW_VERS_ADDR_PRE8, 1, vers, 0); 1040 } 1041 1042 /** 1043 * t3_check_fw_version - check if the FW is compatible with this driver 1044 * @adapter: the adapter 1045 * 1046 * Checks if an adapter's FW is compatible with the driver. Returns 0 1047 * if the versions are compatible, a negative error otherwise. 1048 */ 1049 int t3_check_fw_version(adapter_t *adapter) 1050 { 1051 int ret; 1052 u32 vers; 1053 unsigned int type, major, minor; 1054 1055 ret = t3_get_fw_version(adapter, &vers); 1056 if (ret) 1057 return ret; 1058 1059 type = G_FW_VERSION_TYPE(vers); 1060 major = G_FW_VERSION_MAJOR(vers); 1061 minor = G_FW_VERSION_MINOR(vers); 1062 1063 if (type == FW_VERSION_T3 && major == FW_VERSION_MAJOR && 1064 minor == FW_VERSION_MINOR) 1065 return 0; 1066 1067 else if (major != FW_VERSION_MAJOR || minor < FW_VERSION_MINOR) 1068 CH_WARN(adapter, "found old FW minor version(%u.%u), " 1069 "driver compiled for version %u.%u\n", major, minor, 1070 FW_VERSION_MAJOR, FW_VERSION_MINOR); 1071 else { 1072 CH_WARN(adapter, "found newer FW version(%u.%u), " 1073 "driver compiled for version %u.%u\n", major, minor, 1074 FW_VERSION_MAJOR, FW_VERSION_MINOR); 1075 return 0; 1076 } 1077 return -EINVAL; 1078 } 1079 1080 /** 1081 * t3_flash_erase_sectors - erase a range of flash sectors 1082 * @adapter: the adapter 1083 * @start: the first sector to erase 1084 * @end: the last sector to erase 1085 * 1086 * Erases the sectors in the given range. 1087 */ 1088 static int t3_flash_erase_sectors(adapter_t *adapter, int start, int end) 1089 { 1090 while (start <= end) { 1091 int ret; 1092 1093 if ((ret = sf1_write(adapter, 1, 0, SF_WR_ENABLE)) != 0 || 1094 (ret = sf1_write(adapter, 4, 0, 1095 SF_ERASE_SECTOR | (start << 8))) != 0 || 1096 (ret = flash_wait_op(adapter, 5, 500)) != 0) 1097 return ret; 1098 start++; 1099 } 1100 return 0; 1101 } 1102 1103 /* 1104 * t3_load_fw - download firmware 1105 * @adapter: the adapter 1106 * @fw_data: the firmware image to write 1107 * @size: image size 1108 * 1109 * Write the supplied firmware image to the card's serial flash. 1110 * The FW image has the following sections: @size - 8 bytes of code and 1111 * data, followed by 4 bytes of FW version, followed by the 32-bit 1112 * 1's complement checksum of the whole image. 1113 */ 1114 int t3_load_fw(adapter_t *adapter, const u8 *fw_data, unsigned int size) 1115 { 1116 u32 version, csum, fw_version_addr; 1117 unsigned int i; 1118 const u32 *p = (const u32 *)fw_data; 1119 int ret, addr, fw_sector = FW_FLASH_BOOT_ADDR >> 16; 1120 1121 if ((size & 3) || size < FW_MIN_SIZE) 1122 return -EINVAL; 1123 if (size - 8 > FW_MAX_SIZE) 1124 return -EFBIG; 1125 1126 version = ntohl(*(const u32 *)(fw_data + size - 8)); 1127 if (G_FW_VERSION_MAJOR(version) < 8) { 1128 1129 fw_version_addr = FW_VERS_ADDR_PRE8; 1130 1131 if (size - 8 > FW_MAX_SIZE_PRE8) 1132 return -EFBIG; 1133 } else 1134 fw_version_addr = FW_VERS_ADDR; 1135 1136 for (csum = 0, i = 0; i < size / sizeof(csum); i++) 1137 csum += ntohl(p[i]); 1138 if (csum != 0xffffffff) { 1139 CH_ERR(adapter, "corrupted firmware image, checksum %u\n", 1140 csum); 1141 return -EINVAL; 1142 } 1143 1144 ret = t3_flash_erase_sectors(adapter, fw_sector, fw_sector); 1145 if (ret) 1146 goto out; 1147 1148 size -= 8; /* trim off version and checksum */ 1149 for (addr = FW_FLASH_BOOT_ADDR; size; ) { 1150 unsigned int chunk_size = min(size, 256U); 1151 1152 ret = t3_write_flash(adapter, addr, chunk_size, fw_data, 1); 1153 if (ret) 1154 goto out; 1155 1156 addr += chunk_size; 1157 fw_data += chunk_size; 1158 size -= chunk_size; 1159 } 1160 1161 ret = t3_write_flash(adapter, fw_version_addr, 4, fw_data, 1); 1162 out: 1163 if (ret) 1164 CH_ERR(adapter, "firmware download failed, error %d\n", ret); 1165 return ret; 1166 } 1167 1168 /* 1169 * t3_load_boot - download boot flash 1170 * @adapter: the adapter 1171 * @boot_data: the boot image to write 1172 * @size: image size 1173 * 1174 * Write the supplied boot image to the card's serial flash. 1175 * The boot image has the following sections: a 28-byte header and the 1176 * boot image. 1177 */ 1178 int t3_load_boot(adapter_t *adapter, u8 *boot_data, unsigned int size) 1179 { 1180 boot_header_t *header = (boot_header_t *)boot_data; 1181 int ret; 1182 unsigned int addr; 1183 unsigned int boot_sector = BOOT_FLASH_BOOT_ADDR >> 16; 1184 unsigned int boot_end = (BOOT_FLASH_BOOT_ADDR + size - 1) >> 16; 1185 1186 /* 1187 * Perform some primitive sanity testing to avoid accidentally 1188 * writing garbage over the boot sectors. We ought to check for 1189 * more but it's not worth it for now ... 1190 */ 1191 if (size < BOOT_MIN_SIZE || size > BOOT_MAX_SIZE) { 1192 CH_ERR(adapter, "boot image too small/large\n"); 1193 return -EFBIG; 1194 } 1195 if (le16_to_cpu(*(u16*)header->signature) != BOOT_SIGNATURE) { 1196 CH_ERR(adapter, "boot image missing signature\n"); 1197 return -EINVAL; 1198 } 1199 if (header->length * BOOT_SIZE_INC != size) { 1200 CH_ERR(adapter, "boot image header length != image length\n"); 1201 return -EINVAL; 1202 } 1203 1204 ret = t3_flash_erase_sectors(adapter, boot_sector, boot_end); 1205 if (ret) 1206 goto out; 1207 1208 for (addr = BOOT_FLASH_BOOT_ADDR; size; ) { 1209 unsigned int chunk_size = min(size, 256U); 1210 1211 ret = t3_write_flash(adapter, addr, chunk_size, boot_data, 0); 1212 if (ret) 1213 goto out; 1214 1215 addr += chunk_size; 1216 boot_data += chunk_size; 1217 size -= chunk_size; 1218 } 1219 1220 out: 1221 if (ret) 1222 CH_ERR(adapter, "boot image download failed, error %d\n", ret); 1223 return ret; 1224 } 1225 1226 #define CIM_CTL_BASE 0x2000 1227 1228 /** 1229 * t3_cim_ctl_blk_read - read a block from CIM control region 1230 * @adap: the adapter 1231 * @addr: the start address within the CIM control region 1232 * @n: number of words to read 1233 * @valp: where to store the result 1234 * 1235 * Reads a block of 4-byte words from the CIM control region. 1236 */ 1237 int t3_cim_ctl_blk_read(adapter_t *adap, unsigned int addr, unsigned int n, 1238 unsigned int *valp) 1239 { 1240 int ret = 0; 1241 1242 if (t3_read_reg(adap, A_CIM_HOST_ACC_CTRL) & F_HOSTBUSY) 1243 return -EBUSY; 1244 1245 for ( ; !ret && n--; addr += 4) { 1246 t3_write_reg(adap, A_CIM_HOST_ACC_CTRL, CIM_CTL_BASE + addr); 1247 ret = t3_wait_op_done(adap, A_CIM_HOST_ACC_CTRL, F_HOSTBUSY, 1248 0, 5, 2); 1249 if (!ret) 1250 *valp++ = t3_read_reg(adap, A_CIM_HOST_ACC_DATA); 1251 } 1252 return ret; 1253 } 1254 1255 static void t3_gate_rx_traffic(struct cmac *mac, u32 *rx_cfg, 1256 u32 *rx_hash_high, u32 *rx_hash_low) 1257 { 1258 /* stop Rx unicast traffic */ 1259 t3_mac_disable_exact_filters(mac); 1260 1261 /* stop broadcast, multicast, promiscuous mode traffic */ 1262 *rx_cfg = t3_read_reg(mac->adapter, A_XGM_RX_CFG); 1263 t3_set_reg_field(mac->adapter, A_XGM_RX_CFG, 1264 F_ENHASHMCAST | F_DISBCAST | F_COPYALLFRAMES, 1265 F_DISBCAST); 1266 1267 *rx_hash_high = t3_read_reg(mac->adapter, A_XGM_RX_HASH_HIGH); 1268 t3_write_reg(mac->adapter, A_XGM_RX_HASH_HIGH, 0); 1269 1270 *rx_hash_low = t3_read_reg(mac->adapter, A_XGM_RX_HASH_LOW); 1271 t3_write_reg(mac->adapter, A_XGM_RX_HASH_LOW, 0); 1272 1273 /* Leave time to drain max RX fifo */ 1274 msleep(1); 1275 } 1276 1277 static void t3_open_rx_traffic(struct cmac *mac, u32 rx_cfg, 1278 u32 rx_hash_high, u32 rx_hash_low) 1279 { 1280 t3_mac_enable_exact_filters(mac); 1281 t3_set_reg_field(mac->adapter, A_XGM_RX_CFG, 1282 F_ENHASHMCAST | F_DISBCAST | F_COPYALLFRAMES, 1283 rx_cfg); 1284 t3_write_reg(mac->adapter, A_XGM_RX_HASH_HIGH, rx_hash_high); 1285 t3_write_reg(mac->adapter, A_XGM_RX_HASH_LOW, rx_hash_low); 1286 } 1287 1288 static int t3_detect_link_fault(adapter_t *adapter, int port_id) 1289 { 1290 struct port_info *pi = adap2pinfo(adapter, port_id); 1291 struct cmac *mac = &pi->mac; 1292 uint32_t rx_cfg, rx_hash_high, rx_hash_low; 1293 int link_fault; 1294 1295 /* stop rx */ 1296 t3_gate_rx_traffic(mac, &rx_cfg, &rx_hash_high, &rx_hash_low); 1297 t3_write_reg(adapter, A_XGM_RX_CTRL + mac->offset, 0); 1298 1299 /* clear status and make sure intr is enabled */ 1300 (void) t3_read_reg(adapter, A_XGM_INT_STATUS + mac->offset); 1301 t3_xgm_intr_enable(adapter, port_id); 1302 1303 /* restart rx */ 1304 t3_write_reg(adapter, A_XGM_RX_CTRL + mac->offset, F_RXEN); 1305 t3_open_rx_traffic(mac, rx_cfg, rx_hash_high, rx_hash_low); 1306 1307 link_fault = t3_read_reg(adapter, A_XGM_INT_STATUS + mac->offset); 1308 return (link_fault & F_LINKFAULTCHANGE ? 1 : 0); 1309 } 1310 1311 static void t3_clear_faults(adapter_t *adapter, int port_id) 1312 { 1313 struct port_info *pi = adap2pinfo(adapter, port_id); 1314 struct cmac *mac = &pi->mac; 1315 1316 t3_set_reg_field(adapter, A_XGM_TXFIFO_CFG + mac->offset, 1317 F_ENDROPPKT, 0); 1318 t3_mac_enable(mac, MAC_DIRECTION_TX | MAC_DIRECTION_RX); 1319 t3_set_reg_field(adapter, A_XGM_STAT_CTRL + mac->offset, F_CLRSTATS, 1); 1320 1321 if (adapter->params.nports <= 2) { 1322 t3_xgm_intr_disable(adapter, pi->port_id); 1323 t3_read_reg(adapter, A_XGM_INT_STATUS + mac->offset); 1324 t3_write_reg(adapter, A_XGM_INT_CAUSE + mac->offset, F_XGM_INT); 1325 t3_set_reg_field(adapter, A_XGM_INT_ENABLE + mac->offset, 1326 F_XGM_INT, F_XGM_INT); 1327 t3_xgm_intr_enable(adapter, pi->port_id); 1328 } 1329 } 1330 1331 /** 1332 * t3_link_changed - handle interface link changes 1333 * @adapter: the adapter 1334 * @port_id: the port index that changed link state 1335 * 1336 * Called when a port's link settings change to propagate the new values 1337 * to the associated PHY and MAC. After performing the common tasks it 1338 * invokes an OS-specific handler. 1339 */ 1340 void t3_link_changed(adapter_t *adapter, int port_id) 1341 { 1342 int link_ok, speed, duplex, fc, link_fault, link_change; 1343 struct port_info *pi = adap2pinfo(adapter, port_id); 1344 struct cphy *phy = &pi->phy; 1345 struct cmac *mac = &pi->mac; 1346 struct link_config *lc = &pi->link_config; 1347 1348 link_ok = lc->link_ok; 1349 speed = lc->speed; 1350 duplex = lc->duplex; 1351 fc = lc->fc; 1352 link_fault = 0; 1353 1354 phy->ops->get_link_status(phy, &link_ok, &speed, &duplex, &fc); 1355 1356 /* 1357 * Check for link faults if any of these is true: 1358 * a) A link fault is suspected, and PHY says link ok 1359 * b) PHY link transitioned from down -> up 1360 */ 1361 if (adapter->params.nports <= 2 && 1362 ((pi->link_fault && link_ok) || (!lc->link_ok && link_ok))) { 1363 1364 link_fault = t3_detect_link_fault(adapter, port_id); 1365 if (link_fault) { 1366 if (pi->link_fault != LF_YES) { 1367 mac->stats.link_faults++; 1368 pi->link_fault = LF_YES; 1369 } 1370 1371 /* Don't report link up or any other change */ 1372 link_ok = 0; 1373 speed = lc->speed; 1374 duplex = lc->duplex; 1375 fc = lc->fc; 1376 } else { 1377 /* clear faults here if this was a false alarm. */ 1378 if (pi->link_fault == LF_MAYBE && 1379 link_ok && lc->link_ok) 1380 t3_clear_faults(adapter, port_id); 1381 1382 pi->link_fault = LF_NO; 1383 } 1384 } 1385 1386 if (lc->requested_fc & PAUSE_AUTONEG) 1387 fc &= lc->requested_fc; 1388 else 1389 fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX); 1390 1391 if (link_ok == lc->link_ok && speed == lc->speed && 1392 duplex == lc->duplex && fc == lc->fc) 1393 return; /* nothing changed */ 1394 1395 link_change = link_ok != lc->link_ok; 1396 lc->link_ok = (unsigned char)link_ok; 1397 lc->speed = speed < 0 ? SPEED_INVALID : speed; 1398 lc->duplex = duplex < 0 ? DUPLEX_INVALID : duplex; 1399 1400 if (link_ok) { 1401 1402 /* down -> up, or up -> up with changed settings */ 1403 1404 if (link_change && adapter->params.rev > 0 && 1405 uses_xaui(adapter)) { 1406 t3b_pcs_reset(mac); 1407 t3_write_reg(adapter, A_XGM_XAUI_ACT_CTRL + mac->offset, 1408 F_TXACTENABLE | F_RXEN); 1409 } 1410 1411 if (speed >= 0 && lc->autoneg == AUTONEG_ENABLE) { 1412 /* Set MAC settings to match PHY. */ 1413 t3_mac_set_speed_duplex_fc(mac, speed, duplex, fc); 1414 lc->fc = (unsigned char)fc; 1415 } 1416 1417 t3_clear_faults(adapter, port_id); 1418 1419 } else { 1420 1421 /* up -> down */ 1422 1423 if (adapter->params.rev > 0 && uses_xaui(adapter)) { 1424 t3_write_reg(adapter, 1425 A_XGM_XAUI_ACT_CTRL + mac->offset, 0); 1426 } 1427 1428 t3_xgm_intr_disable(adapter, pi->port_id); 1429 if (adapter->params.nports <= 2) { 1430 t3_set_reg_field(adapter, 1431 A_XGM_INT_ENABLE + mac->offset, 1432 F_XGM_INT, 0); 1433 } 1434 1435 if (!link_fault) { 1436 if (is_10G(adapter)) 1437 pi->phy.ops->power_down(&pi->phy, 1); 1438 t3_mac_disable(mac, MAC_DIRECTION_RX); 1439 t3_link_start(phy, mac, lc); 1440 } 1441 1442 /* 1443 * Make sure Tx FIFO continues to drain, even as rxen is left 1444 * high to help detect and indicate remote faults. 1445 */ 1446 t3_set_reg_field(adapter, A_XGM_TXFIFO_CFG + mac->offset, 0, 1447 F_ENDROPPKT); 1448 t3_write_reg(adapter, A_XGM_RX_CTRL + mac->offset, 0); 1449 t3_write_reg(adapter, A_XGM_TX_CTRL + mac->offset, F_TXEN); 1450 t3_write_reg(adapter, A_XGM_RX_CTRL + mac->offset, F_RXEN); 1451 } 1452 1453 t3_os_link_changed(adapter, port_id, link_ok, speed, duplex, fc); 1454 } 1455 1456 /** 1457 * t3_link_start - apply link configuration to MAC/PHY 1458 * @phy: the PHY to setup 1459 * @mac: the MAC to setup 1460 * @lc: the requested link configuration 1461 * 1462 * Set up a port's MAC and PHY according to a desired link configuration. 1463 * - If the PHY can auto-negotiate first decide what to advertise, then 1464 * enable/disable auto-negotiation as desired, and reset. 1465 * - If the PHY does not auto-negotiate just reset it. 1466 * - If auto-negotiation is off set the MAC to the proper speed/duplex/FC, 1467 * otherwise do it later based on the outcome of auto-negotiation. 1468 */ 1469 int t3_link_start(struct cphy *phy, struct cmac *mac, struct link_config *lc) 1470 { 1471 unsigned int fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX); 1472 1473 lc->link_ok = 0; 1474 if (lc->supported & SUPPORTED_Autoneg) { 1475 lc->advertising &= ~(ADVERTISED_Asym_Pause | ADVERTISED_Pause); 1476 if (fc) { 1477 lc->advertising |= ADVERTISED_Asym_Pause; 1478 if (fc & PAUSE_RX) 1479 lc->advertising |= ADVERTISED_Pause; 1480 } 1481 phy->ops->advertise(phy, lc->advertising); 1482 1483 if (lc->autoneg == AUTONEG_DISABLE) { 1484 lc->speed = lc->requested_speed; 1485 lc->duplex = lc->requested_duplex; 1486 lc->fc = (unsigned char)fc; 1487 t3_mac_set_speed_duplex_fc(mac, lc->speed, lc->duplex, 1488 fc); 1489 /* Also disables autoneg */ 1490 phy->ops->set_speed_duplex(phy, lc->speed, lc->duplex); 1491 /* PR 5666. Power phy up when doing an ifup */ 1492 if (!is_10G(phy->adapter)) 1493 phy->ops->power_down(phy, 0); 1494 } else 1495 phy->ops->autoneg_enable(phy); 1496 } else { 1497 t3_mac_set_speed_duplex_fc(mac, -1, -1, fc); 1498 lc->fc = (unsigned char)fc; 1499 phy->ops->reset(phy, 0); 1500 } 1501 return 0; 1502 } 1503 1504 /** 1505 * t3_set_vlan_accel - control HW VLAN extraction 1506 * @adapter: the adapter 1507 * @ports: bitmap of adapter ports to operate on 1508 * @on: enable (1) or disable (0) HW VLAN extraction 1509 * 1510 * Enables or disables HW extraction of VLAN tags for the given port. 1511 */ 1512 void t3_set_vlan_accel(adapter_t *adapter, unsigned int ports, int on) 1513 { 1514 t3_set_reg_field(adapter, A_TP_OUT_CONFIG, 1515 ports << S_VLANEXTRACTIONENABLE, 1516 on ? (ports << S_VLANEXTRACTIONENABLE) : 0); 1517 } 1518 1519 struct intr_info { 1520 unsigned int mask; /* bits to check in interrupt status */ 1521 const char *msg; /* message to print or NULL */ 1522 short stat_idx; /* stat counter to increment or -1 */ 1523 unsigned short fatal; /* whether the condition reported is fatal */ 1524 }; 1525 1526 /** 1527 * t3_handle_intr_status - table driven interrupt handler 1528 * @adapter: the adapter that generated the interrupt 1529 * @reg: the interrupt status register to process 1530 * @mask: a mask to apply to the interrupt status 1531 * @acts: table of interrupt actions 1532 * @stats: statistics counters tracking interrupt occurences 1533 * 1534 * A table driven interrupt handler that applies a set of masks to an 1535 * interrupt status word and performs the corresponding actions if the 1536 * interrupts described by the mask have occured. The actions include 1537 * optionally printing a warning or alert message, and optionally 1538 * incrementing a stat counter. The table is terminated by an entry 1539 * specifying mask 0. Returns the number of fatal interrupt conditions. 1540 */ 1541 static int t3_handle_intr_status(adapter_t *adapter, unsigned int reg, 1542 unsigned int mask, 1543 const struct intr_info *acts, 1544 unsigned long *stats) 1545 { 1546 int fatal = 0; 1547 unsigned int status = t3_read_reg(adapter, reg) & mask; 1548 1549 for ( ; acts->mask; ++acts) { 1550 if (!(status & acts->mask)) continue; 1551 if (acts->fatal) { 1552 fatal++; 1553 CH_ALERT(adapter, "%s (0x%x)\n", 1554 acts->msg, status & acts->mask); 1555 } else if (acts->msg) 1556 CH_WARN(adapter, "%s (0x%x)\n", 1557 acts->msg, status & acts->mask); 1558 if (acts->stat_idx >= 0) 1559 stats[acts->stat_idx]++; 1560 } 1561 if (status) /* clear processed interrupts */ 1562 t3_write_reg(adapter, reg, status); 1563 return fatal; 1564 } 1565 1566 #define SGE_INTR_MASK (F_RSPQDISABLED | \ 1567 F_UC_REQ_FRAMINGERROR | F_R_REQ_FRAMINGERROR | \ 1568 F_CPPARITYERROR | F_OCPARITYERROR | F_RCPARITYERROR | \ 1569 F_IRPARITYERROR | V_ITPARITYERROR(M_ITPARITYERROR) | \ 1570 V_FLPARITYERROR(M_FLPARITYERROR) | F_LODRBPARITYERROR | \ 1571 F_HIDRBPARITYERROR | F_LORCQPARITYERROR | \ 1572 F_HIRCQPARITYERROR) 1573 #define MC5_INTR_MASK (F_PARITYERR | F_ACTRGNFULL | F_UNKNOWNCMD | \ 1574 F_REQQPARERR | F_DISPQPARERR | F_DELACTEMPTY | \ 1575 F_NFASRCHFAIL) 1576 #define MC7_INTR_MASK (F_AE | F_UE | F_CE | V_PE(M_PE)) 1577 #define XGM_INTR_MASK (V_TXFIFO_PRTY_ERR(M_TXFIFO_PRTY_ERR) | \ 1578 V_RXFIFO_PRTY_ERR(M_RXFIFO_PRTY_ERR) | \ 1579 F_TXFIFO_UNDERRUN) 1580 #define PCIX_INTR_MASK (F_MSTDETPARERR | F_SIGTARABT | F_RCVTARABT | \ 1581 F_RCVMSTABT | F_SIGSYSERR | F_DETPARERR | \ 1582 F_SPLCMPDIS | F_UNXSPLCMP | F_RCVSPLCMPERR | \ 1583 F_DETCORECCERR | F_DETUNCECCERR | F_PIOPARERR | \ 1584 V_WFPARERR(M_WFPARERR) | V_RFPARERR(M_RFPARERR) | \ 1585 V_CFPARERR(M_CFPARERR) /* | V_MSIXPARERR(M_MSIXPARERR) */) 1586 #define PCIE_INTR_MASK (F_UNXSPLCPLERRR | F_UNXSPLCPLERRC | F_PCIE_PIOPARERR |\ 1587 F_PCIE_WFPARERR | F_PCIE_RFPARERR | F_PCIE_CFPARERR | \ 1588 /* V_PCIE_MSIXPARERR(M_PCIE_MSIXPARERR) | */ \ 1589 F_RETRYBUFPARERR | F_RETRYLUTPARERR | F_RXPARERR | \ 1590 F_TXPARERR | V_BISTERR(M_BISTERR)) 1591 #define ULPRX_INTR_MASK (F_PARERRDATA | F_PARERRPCMD | F_ARBPF1PERR | \ 1592 F_ARBPF0PERR | F_ARBFPERR | F_PCMDMUXPERR | \ 1593 F_DATASELFRAMEERR1 | F_DATASELFRAMEERR0) 1594 #define ULPTX_INTR_MASK 0xfc 1595 #define CPLSW_INTR_MASK (F_CIM_OP_MAP_PERR | F_TP_FRAMING_ERROR | \ 1596 F_SGE_FRAMING_ERROR | F_CIM_FRAMING_ERROR | \ 1597 F_ZERO_SWITCH_ERROR) 1598 #define CIM_INTR_MASK (F_BLKWRPLINT | F_BLKRDPLINT | F_BLKWRCTLINT | \ 1599 F_BLKRDCTLINT | F_BLKWRFLASHINT | F_BLKRDFLASHINT | \ 1600 F_SGLWRFLASHINT | F_WRBLKFLASHINT | F_BLKWRBOOTINT | \ 1601 F_FLASHRANGEINT | F_SDRAMRANGEINT | F_RSVDSPACEINT | \ 1602 F_DRAMPARERR | F_ICACHEPARERR | F_DCACHEPARERR | \ 1603 F_OBQSGEPARERR | F_OBQULPHIPARERR | F_OBQULPLOPARERR | \ 1604 F_IBQSGELOPARERR | F_IBQSGEHIPARERR | F_IBQULPPARERR | \ 1605 F_IBQTPPARERR | F_ITAGPARERR | F_DTAGPARERR) 1606 #define PMTX_INTR_MASK (F_ZERO_C_CMD_ERROR | ICSPI_FRM_ERR | OESPI_FRM_ERR | \ 1607 V_ICSPI_PAR_ERROR(M_ICSPI_PAR_ERROR) | \ 1608 V_OESPI_PAR_ERROR(M_OESPI_PAR_ERROR)) 1609 #define PMRX_INTR_MASK (F_ZERO_E_CMD_ERROR | IESPI_FRM_ERR | OCSPI_FRM_ERR | \ 1610 V_IESPI_PAR_ERROR(M_IESPI_PAR_ERROR) | \ 1611 V_OCSPI_PAR_ERROR(M_OCSPI_PAR_ERROR)) 1612 #define MPS_INTR_MASK (V_TX0TPPARERRENB(M_TX0TPPARERRENB) | \ 1613 V_TX1TPPARERRENB(M_TX1TPPARERRENB) | \ 1614 V_RXTPPARERRENB(M_RXTPPARERRENB) | \ 1615 V_MCAPARERRENB(M_MCAPARERRENB)) 1616 #define XGM_EXTRA_INTR_MASK (F_LINKFAULTCHANGE) 1617 #define PL_INTR_MASK (F_T3DBG | F_XGMAC0_0 | F_XGMAC0_1 | F_MC5A | F_PM1_TX | \ 1618 F_PM1_RX | F_ULP2_TX | F_ULP2_RX | F_TP1 | F_CIM | \ 1619 F_MC7_CM | F_MC7_PMTX | F_MC7_PMRX | F_SGE3 | F_PCIM0 | \ 1620 F_MPS0 | F_CPL_SWITCH) 1621 /* 1622 * Interrupt handler for the PCIX1 module. 1623 */ 1624 static void pci_intr_handler(adapter_t *adapter) 1625 { 1626 static struct intr_info pcix1_intr_info[] = { 1627 { F_MSTDETPARERR, "PCI master detected parity error", -1, 1 }, 1628 { F_SIGTARABT, "PCI signaled target abort", -1, 1 }, 1629 { F_RCVTARABT, "PCI received target abort", -1, 1 }, 1630 { F_RCVMSTABT, "PCI received master abort", -1, 1 }, 1631 { F_SIGSYSERR, "PCI signaled system error", -1, 1 }, 1632 { F_DETPARERR, "PCI detected parity error", -1, 1 }, 1633 { F_SPLCMPDIS, "PCI split completion discarded", -1, 1 }, 1634 { F_UNXSPLCMP, "PCI unexpected split completion error", -1, 1 }, 1635 { F_RCVSPLCMPERR, "PCI received split completion error", -1, 1636 1 }, 1637 { F_DETCORECCERR, "PCI correctable ECC error", 1638 STAT_PCI_CORR_ECC, 0 }, 1639 { F_DETUNCECCERR, "PCI uncorrectable ECC error", -1, 1 }, 1640 { F_PIOPARERR, "PCI PIO FIFO parity error", -1, 1 }, 1641 { V_WFPARERR(M_WFPARERR), "PCI write FIFO parity error", -1, 1642 1 }, 1643 { V_RFPARERR(M_RFPARERR), "PCI read FIFO parity error", -1, 1644 1 }, 1645 { V_CFPARERR(M_CFPARERR), "PCI command FIFO parity error", -1, 1646 1 }, 1647 { V_MSIXPARERR(M_MSIXPARERR), "PCI MSI-X table/PBA parity " 1648 "error", -1, 1 }, 1649 { 0 } 1650 }; 1651 1652 if (t3_handle_intr_status(adapter, A_PCIX_INT_CAUSE, PCIX_INTR_MASK, 1653 pcix1_intr_info, adapter->irq_stats)) 1654 t3_fatal_err(adapter); 1655 } 1656 1657 /* 1658 * Interrupt handler for the PCIE module. 1659 */ 1660 static void pcie_intr_handler(adapter_t *adapter) 1661 { 1662 static struct intr_info pcie_intr_info[] = { 1663 { F_PEXERR, "PCI PEX error", -1, 1 }, 1664 { F_UNXSPLCPLERRR, 1665 "PCI unexpected split completion DMA read error", -1, 1 }, 1666 { F_UNXSPLCPLERRC, 1667 "PCI unexpected split completion DMA command error", -1, 1 }, 1668 { F_PCIE_PIOPARERR, "PCI PIO FIFO parity error", -1, 1 }, 1669 { F_PCIE_WFPARERR, "PCI write FIFO parity error", -1, 1 }, 1670 { F_PCIE_RFPARERR, "PCI read FIFO parity error", -1, 1 }, 1671 { F_PCIE_CFPARERR, "PCI command FIFO parity error", -1, 1 }, 1672 { V_PCIE_MSIXPARERR(M_PCIE_MSIXPARERR), 1673 "PCI MSI-X table/PBA parity error", -1, 1 }, 1674 { F_RETRYBUFPARERR, "PCI retry buffer parity error", -1, 1 }, 1675 { F_RETRYLUTPARERR, "PCI retry LUT parity error", -1, 1 }, 1676 { F_RXPARERR, "PCI Rx parity error", -1, 1 }, 1677 { F_TXPARERR, "PCI Tx parity error", -1, 1 }, 1678 { V_BISTERR(M_BISTERR), "PCI BIST error", -1, 1 }, 1679 { 0 } 1680 }; 1681 1682 if (t3_read_reg(adapter, A_PCIE_INT_CAUSE) & F_PEXERR) 1683 CH_ALERT(adapter, "PEX error code 0x%x\n", 1684 t3_read_reg(adapter, A_PCIE_PEX_ERR)); 1685 1686 if (t3_handle_intr_status(adapter, A_PCIE_INT_CAUSE, PCIE_INTR_MASK, 1687 pcie_intr_info, adapter->irq_stats)) 1688 t3_fatal_err(adapter); 1689 } 1690 1691 /* 1692 * TP interrupt handler. 1693 */ 1694 static void tp_intr_handler(adapter_t *adapter) 1695 { 1696 static struct intr_info tp_intr_info[] = { 1697 { 0xffffff, "TP parity error", -1, 1 }, 1698 { 0x1000000, "TP out of Rx pages", -1, 1 }, 1699 { 0x2000000, "TP out of Tx pages", -1, 1 }, 1700 { 0 } 1701 }; 1702 static struct intr_info tp_intr_info_t3c[] = { 1703 { 0x1fffffff, "TP parity error", -1, 1 }, 1704 { F_FLMRXFLSTEMPTY, "TP out of Rx pages", -1, 1 }, 1705 { F_FLMTXFLSTEMPTY, "TP out of Tx pages", -1, 1 }, 1706 { 0 } 1707 }; 1708 1709 if (t3_handle_intr_status(adapter, A_TP_INT_CAUSE, 0xffffffff, 1710 adapter->params.rev < T3_REV_C ? 1711 tp_intr_info : tp_intr_info_t3c, NULL)) 1712 t3_fatal_err(adapter); 1713 } 1714 1715 /* 1716 * CIM interrupt handler. 1717 */ 1718 static void cim_intr_handler(adapter_t *adapter) 1719 { 1720 static struct intr_info cim_intr_info[] = { 1721 { F_RSVDSPACEINT, "CIM reserved space write", -1, 1 }, 1722 { F_SDRAMRANGEINT, "CIM SDRAM address out of range", -1, 1 }, 1723 { F_FLASHRANGEINT, "CIM flash address out of range", -1, 1 }, 1724 { F_BLKWRBOOTINT, "CIM block write to boot space", -1, 1 }, 1725 { F_WRBLKFLASHINT, "CIM write to cached flash space", -1, 1 }, 1726 { F_SGLWRFLASHINT, "CIM single write to flash space", -1, 1 }, 1727 { F_BLKRDFLASHINT, "CIM block read from flash space", -1, 1 }, 1728 { F_BLKWRFLASHINT, "CIM block write to flash space", -1, 1 }, 1729 { F_BLKRDCTLINT, "CIM block read from CTL space", -1, 1 }, 1730 { F_BLKWRCTLINT, "CIM block write to CTL space", -1, 1 }, 1731 { F_BLKRDPLINT, "CIM block read from PL space", -1, 1 }, 1732 { F_BLKWRPLINT, "CIM block write to PL space", -1, 1 }, 1733 { F_DRAMPARERR, "CIM DRAM parity error", -1, 1 }, 1734 { F_ICACHEPARERR, "CIM icache parity error", -1, 1 }, 1735 { F_DCACHEPARERR, "CIM dcache parity error", -1, 1 }, 1736 { F_OBQSGEPARERR, "CIM OBQ SGE parity error", -1, 1 }, 1737 { F_OBQULPHIPARERR, "CIM OBQ ULPHI parity error", -1, 1 }, 1738 { F_OBQULPLOPARERR, "CIM OBQ ULPLO parity error", -1, 1 }, 1739 { F_IBQSGELOPARERR, "CIM IBQ SGELO parity error", -1, 1 }, 1740 { F_IBQSGEHIPARERR, "CIM IBQ SGEHI parity error", -1, 1 }, 1741 { F_IBQULPPARERR, "CIM IBQ ULP parity error", -1, 1 }, 1742 { F_IBQTPPARERR, "CIM IBQ TP parity error", -1, 1 }, 1743 { F_ITAGPARERR, "CIM itag parity error", -1, 1 }, 1744 { F_DTAGPARERR, "CIM dtag parity error", -1, 1 }, 1745 { 0 } 1746 }; 1747 1748 if (t3_handle_intr_status(adapter, A_CIM_HOST_INT_CAUSE, CIM_INTR_MASK, 1749 cim_intr_info, NULL)) 1750 t3_fatal_err(adapter); 1751 } 1752 1753 /* 1754 * ULP RX interrupt handler. 1755 */ 1756 static void ulprx_intr_handler(adapter_t *adapter) 1757 { 1758 static struct intr_info ulprx_intr_info[] = { 1759 { F_PARERRDATA, "ULP RX data parity error", -1, 1 }, 1760 { F_PARERRPCMD, "ULP RX command parity error", -1, 1 }, 1761 { F_ARBPF1PERR, "ULP RX ArbPF1 parity error", -1, 1 }, 1762 { F_ARBPF0PERR, "ULP RX ArbPF0 parity error", -1, 1 }, 1763 { F_ARBFPERR, "ULP RX ArbF parity error", -1, 1 }, 1764 { F_PCMDMUXPERR, "ULP RX PCMDMUX parity error", -1, 1 }, 1765 { F_DATASELFRAMEERR1, "ULP RX frame error", -1, 1 }, 1766 { F_DATASELFRAMEERR0, "ULP RX frame error", -1, 1 }, 1767 { 0 } 1768 }; 1769 1770 if (t3_handle_intr_status(adapter, A_ULPRX_INT_CAUSE, 0xffffffff, 1771 ulprx_intr_info, NULL)) 1772 t3_fatal_err(adapter); 1773 } 1774 1775 /* 1776 * ULP TX interrupt handler. 1777 */ 1778 static void ulptx_intr_handler(adapter_t *adapter) 1779 { 1780 static struct intr_info ulptx_intr_info[] = { 1781 { F_PBL_BOUND_ERR_CH0, "ULP TX channel 0 PBL out of bounds", 1782 STAT_ULP_CH0_PBL_OOB, 0 }, 1783 { F_PBL_BOUND_ERR_CH1, "ULP TX channel 1 PBL out of bounds", 1784 STAT_ULP_CH1_PBL_OOB, 0 }, 1785 { 0xfc, "ULP TX parity error", -1, 1 }, 1786 { 0 } 1787 }; 1788 1789 if (t3_handle_intr_status(adapter, A_ULPTX_INT_CAUSE, 0xffffffff, 1790 ulptx_intr_info, adapter->irq_stats)) 1791 t3_fatal_err(adapter); 1792 } 1793 1794 #define ICSPI_FRM_ERR (F_ICSPI0_FIFO2X_RX_FRAMING_ERROR | \ 1795 F_ICSPI1_FIFO2X_RX_FRAMING_ERROR | F_ICSPI0_RX_FRAMING_ERROR | \ 1796 F_ICSPI1_RX_FRAMING_ERROR | F_ICSPI0_TX_FRAMING_ERROR | \ 1797 F_ICSPI1_TX_FRAMING_ERROR) 1798 #define OESPI_FRM_ERR (F_OESPI0_RX_FRAMING_ERROR | \ 1799 F_OESPI1_RX_FRAMING_ERROR | F_OESPI0_TX_FRAMING_ERROR | \ 1800 F_OESPI1_TX_FRAMING_ERROR | F_OESPI0_OFIFO2X_TX_FRAMING_ERROR | \ 1801 F_OESPI1_OFIFO2X_TX_FRAMING_ERROR) 1802 1803 /* 1804 * PM TX interrupt handler. 1805 */ 1806 static void pmtx_intr_handler(adapter_t *adapter) 1807 { 1808 static struct intr_info pmtx_intr_info[] = { 1809 { F_ZERO_C_CMD_ERROR, "PMTX 0-length pcmd", -1, 1 }, 1810 { ICSPI_FRM_ERR, "PMTX ispi framing error", -1, 1 }, 1811 { OESPI_FRM_ERR, "PMTX ospi framing error", -1, 1 }, 1812 { V_ICSPI_PAR_ERROR(M_ICSPI_PAR_ERROR), 1813 "PMTX ispi parity error", -1, 1 }, 1814 { V_OESPI_PAR_ERROR(M_OESPI_PAR_ERROR), 1815 "PMTX ospi parity error", -1, 1 }, 1816 { 0 } 1817 }; 1818 1819 if (t3_handle_intr_status(adapter, A_PM1_TX_INT_CAUSE, 0xffffffff, 1820 pmtx_intr_info, NULL)) 1821 t3_fatal_err(adapter); 1822 } 1823 1824 #define IESPI_FRM_ERR (F_IESPI0_FIFO2X_RX_FRAMING_ERROR | \ 1825 F_IESPI1_FIFO2X_RX_FRAMING_ERROR | F_IESPI0_RX_FRAMING_ERROR | \ 1826 F_IESPI1_RX_FRAMING_ERROR | F_IESPI0_TX_FRAMING_ERROR | \ 1827 F_IESPI1_TX_FRAMING_ERROR) 1828 #define OCSPI_FRM_ERR (F_OCSPI0_RX_FRAMING_ERROR | \ 1829 F_OCSPI1_RX_FRAMING_ERROR | F_OCSPI0_TX_FRAMING_ERROR | \ 1830 F_OCSPI1_TX_FRAMING_ERROR | F_OCSPI0_OFIFO2X_TX_FRAMING_ERROR | \ 1831 F_OCSPI1_OFIFO2X_TX_FRAMING_ERROR) 1832 1833 /* 1834 * PM RX interrupt handler. 1835 */ 1836 static void pmrx_intr_handler(adapter_t *adapter) 1837 { 1838 static struct intr_info pmrx_intr_info[] = { 1839 { F_ZERO_E_CMD_ERROR, "PMRX 0-length pcmd", -1, 1 }, 1840 { IESPI_FRM_ERR, "PMRX ispi framing error", -1, 1 }, 1841 { OCSPI_FRM_ERR, "PMRX ospi framing error", -1, 1 }, 1842 { V_IESPI_PAR_ERROR(M_IESPI_PAR_ERROR), 1843 "PMRX ispi parity error", -1, 1 }, 1844 { V_OCSPI_PAR_ERROR(M_OCSPI_PAR_ERROR), 1845 "PMRX ospi parity error", -1, 1 }, 1846 { 0 } 1847 }; 1848 1849 if (t3_handle_intr_status(adapter, A_PM1_RX_INT_CAUSE, 0xffffffff, 1850 pmrx_intr_info, NULL)) 1851 t3_fatal_err(adapter); 1852 } 1853 1854 /* 1855 * CPL switch interrupt handler. 1856 */ 1857 static void cplsw_intr_handler(adapter_t *adapter) 1858 { 1859 static struct intr_info cplsw_intr_info[] = { 1860 { F_CIM_OP_MAP_PERR, "CPL switch CIM parity error", -1, 1 }, 1861 { F_CIM_OVFL_ERROR, "CPL switch CIM overflow", -1, 1 }, 1862 { F_TP_FRAMING_ERROR, "CPL switch TP framing error", -1, 1 }, 1863 { F_SGE_FRAMING_ERROR, "CPL switch SGE framing error", -1, 1 }, 1864 { F_CIM_FRAMING_ERROR, "CPL switch CIM framing error", -1, 1 }, 1865 { F_ZERO_SWITCH_ERROR, "CPL switch no-switch error", -1, 1 }, 1866 { 0 } 1867 }; 1868 1869 if (t3_handle_intr_status(adapter, A_CPL_INTR_CAUSE, 0xffffffff, 1870 cplsw_intr_info, NULL)) 1871 t3_fatal_err(adapter); 1872 } 1873 1874 /* 1875 * MPS interrupt handler. 1876 */ 1877 static void mps_intr_handler(adapter_t *adapter) 1878 { 1879 static struct intr_info mps_intr_info[] = { 1880 { 0x1ff, "MPS parity error", -1, 1 }, 1881 { 0 } 1882 }; 1883 1884 if (t3_handle_intr_status(adapter, A_MPS_INT_CAUSE, 0xffffffff, 1885 mps_intr_info, NULL)) 1886 t3_fatal_err(adapter); 1887 } 1888 1889 #define MC7_INTR_FATAL (F_UE | V_PE(M_PE) | F_AE) 1890 1891 /* 1892 * MC7 interrupt handler. 1893 */ 1894 static void mc7_intr_handler(struct mc7 *mc7) 1895 { 1896 adapter_t *adapter = mc7->adapter; 1897 u32 cause = t3_read_reg(adapter, mc7->offset + A_MC7_INT_CAUSE); 1898 1899 if (cause & F_CE) { 1900 mc7->stats.corr_err++; 1901 CH_WARN(adapter, "%s MC7 correctable error at addr 0x%x, " 1902 "data 0x%x 0x%x 0x%x\n", mc7->name, 1903 t3_read_reg(adapter, mc7->offset + A_MC7_CE_ADDR), 1904 t3_read_reg(adapter, mc7->offset + A_MC7_CE_DATA0), 1905 t3_read_reg(adapter, mc7->offset + A_MC7_CE_DATA1), 1906 t3_read_reg(adapter, mc7->offset + A_MC7_CE_DATA2)); 1907 } 1908 1909 if (cause & F_UE) { 1910 mc7->stats.uncorr_err++; 1911 CH_ALERT(adapter, "%s MC7 uncorrectable error at addr 0x%x, " 1912 "data 0x%x 0x%x 0x%x\n", mc7->name, 1913 t3_read_reg(adapter, mc7->offset + A_MC7_UE_ADDR), 1914 t3_read_reg(adapter, mc7->offset + A_MC7_UE_DATA0), 1915 t3_read_reg(adapter, mc7->offset + A_MC7_UE_DATA1), 1916 t3_read_reg(adapter, mc7->offset + A_MC7_UE_DATA2)); 1917 } 1918 1919 if (G_PE(cause)) { 1920 mc7->stats.parity_err++; 1921 CH_ALERT(adapter, "%s MC7 parity error 0x%x\n", 1922 mc7->name, G_PE(cause)); 1923 } 1924 1925 if (cause & F_AE) { 1926 u32 addr = 0; 1927 1928 if (adapter->params.rev > 0) 1929 addr = t3_read_reg(adapter, 1930 mc7->offset + A_MC7_ERR_ADDR); 1931 mc7->stats.addr_err++; 1932 CH_ALERT(adapter, "%s MC7 address error: 0x%x\n", 1933 mc7->name, addr); 1934 } 1935 1936 if (cause & MC7_INTR_FATAL) 1937 t3_fatal_err(adapter); 1938 1939 t3_write_reg(adapter, mc7->offset + A_MC7_INT_CAUSE, cause); 1940 } 1941 1942 #define XGM_INTR_FATAL (V_TXFIFO_PRTY_ERR(M_TXFIFO_PRTY_ERR) | \ 1943 V_RXFIFO_PRTY_ERR(M_RXFIFO_PRTY_ERR)) 1944 /* 1945 * XGMAC interrupt handler. 1946 */ 1947 static int mac_intr_handler(adapter_t *adap, unsigned int idx) 1948 { 1949 u32 cause; 1950 struct port_info *pi; 1951 struct cmac *mac; 1952 1953 idx = idx == 0 ? 0 : adapter_info(adap)->nports0; /* MAC idx -> port */ 1954 pi = adap2pinfo(adap, idx); 1955 mac = &pi->mac; 1956 1957 /* 1958 * We mask out interrupt causes for which we're not taking interrupts. 1959 * This allows us to use polling logic to monitor some of the other 1960 * conditions when taking interrupts would impose too much load on the 1961 * system. 1962 */ 1963 cause = (t3_read_reg(adap, A_XGM_INT_CAUSE + mac->offset) 1964 & ~(F_RXFIFO_OVERFLOW)); 1965 1966 if (cause & V_TXFIFO_PRTY_ERR(M_TXFIFO_PRTY_ERR)) { 1967 mac->stats.tx_fifo_parity_err++; 1968 CH_ALERT(adap, "port%d: MAC TX FIFO parity error\n", idx); 1969 } 1970 if (cause & V_RXFIFO_PRTY_ERR(M_RXFIFO_PRTY_ERR)) { 1971 mac->stats.rx_fifo_parity_err++; 1972 CH_ALERT(adap, "port%d: MAC RX FIFO parity error\n", idx); 1973 } 1974 if (cause & F_TXFIFO_UNDERRUN) 1975 mac->stats.tx_fifo_urun++; 1976 if (cause & F_RXFIFO_OVERFLOW) 1977 mac->stats.rx_fifo_ovfl++; 1978 if (cause & V_SERDES_LOS(M_SERDES_LOS)) 1979 mac->stats.serdes_signal_loss++; 1980 if (cause & F_XAUIPCSCTCERR) 1981 mac->stats.xaui_pcs_ctc_err++; 1982 if (cause & F_XAUIPCSALIGNCHANGE) 1983 mac->stats.xaui_pcs_align_change++; 1984 if (cause & F_XGM_INT) { 1985 t3_set_reg_field(adap, 1986 A_XGM_INT_ENABLE + mac->offset, 1987 F_XGM_INT, 0); 1988 1989 /* link fault suspected */ 1990 pi->link_fault = LF_MAYBE; 1991 } 1992 1993 t3_write_reg(adap, A_XGM_INT_CAUSE + mac->offset, cause); 1994 1995 if (cause & XGM_INTR_FATAL) 1996 t3_fatal_err(adap); 1997 1998 return cause != 0; 1999 } 2000 2001 /* 2002 * Interrupt handler for PHY events. 2003 */ 2004 int t3_phy_intr_handler(adapter_t *adapter) 2005 { 2006 u32 i, cause = t3_read_reg(adapter, A_T3DBG_INT_CAUSE); 2007 2008 for_each_port(adapter, i) { 2009 struct port_info *p = adap2pinfo(adapter, i); 2010 2011 if (!(p->phy.caps & SUPPORTED_IRQ)) 2012 continue; 2013 2014 if (cause & (1 << adapter_info(adapter)->gpio_intr[i])) { 2015 int phy_cause = p->phy.ops->intr_handler(&p->phy); 2016 2017 if (phy_cause & cphy_cause_link_change) 2018 t3_link_changed(adapter, i); 2019 if (phy_cause & cphy_cause_fifo_error) 2020 p->phy.fifo_errors++; 2021 if (phy_cause & cphy_cause_module_change) 2022 t3_os_phymod_changed(adapter, i); 2023 } 2024 } 2025 2026 t3_write_reg(adapter, A_T3DBG_INT_CAUSE, cause); 2027 return 0; 2028 } 2029 2030 /** 2031 * t3_slow_intr_handler - control path interrupt handler 2032 * @adapter: the adapter 2033 * 2034 * T3 interrupt handler for non-data interrupt events, e.g., errors. 2035 * The designation 'slow' is because it involves register reads, while 2036 * data interrupts typically don't involve any MMIOs. 2037 */ 2038 int t3_slow_intr_handler(adapter_t *adapter) 2039 { 2040 u32 cause = t3_read_reg(adapter, A_PL_INT_CAUSE0); 2041 2042 cause &= adapter->slow_intr_mask; 2043 if (!cause) 2044 return 0; 2045 if (cause & F_PCIM0) { 2046 if (is_pcie(adapter)) 2047 pcie_intr_handler(adapter); 2048 else 2049 pci_intr_handler(adapter); 2050 } 2051 if (cause & F_SGE3) 2052 t3_sge_err_intr_handler(adapter); 2053 if (cause & F_MC7_PMRX) 2054 mc7_intr_handler(&adapter->pmrx); 2055 if (cause & F_MC7_PMTX) 2056 mc7_intr_handler(&adapter->pmtx); 2057 if (cause & F_MC7_CM) 2058 mc7_intr_handler(&adapter->cm); 2059 if (cause & F_CIM) 2060 cim_intr_handler(adapter); 2061 if (cause & F_TP1) 2062 tp_intr_handler(adapter); 2063 if (cause & F_ULP2_RX) 2064 ulprx_intr_handler(adapter); 2065 if (cause & F_ULP2_TX) 2066 ulptx_intr_handler(adapter); 2067 if (cause & F_PM1_RX) 2068 pmrx_intr_handler(adapter); 2069 if (cause & F_PM1_TX) 2070 pmtx_intr_handler(adapter); 2071 if (cause & F_CPL_SWITCH) 2072 cplsw_intr_handler(adapter); 2073 if (cause & F_MPS0) 2074 mps_intr_handler(adapter); 2075 if (cause & F_MC5A) 2076 t3_mc5_intr_handler(&adapter->mc5); 2077 if (cause & F_XGMAC0_0) 2078 mac_intr_handler(adapter, 0); 2079 if (cause & F_XGMAC0_1) 2080 mac_intr_handler(adapter, 1); 2081 if (cause & F_T3DBG) 2082 t3_os_ext_intr_handler(adapter); 2083 2084 /* Clear the interrupts just processed. */ 2085 t3_write_reg(adapter, A_PL_INT_CAUSE0, cause); 2086 (void) t3_read_reg(adapter, A_PL_INT_CAUSE0); /* flush */ 2087 return 1; 2088 } 2089 2090 static unsigned int calc_gpio_intr(adapter_t *adap) 2091 { 2092 unsigned int i, gpi_intr = 0; 2093 2094 for_each_port(adap, i) 2095 if ((adap2pinfo(adap, i)->phy.caps & SUPPORTED_IRQ) && 2096 adapter_info(adap)->gpio_intr[i]) 2097 gpi_intr |= 1 << adapter_info(adap)->gpio_intr[i]; 2098 return gpi_intr; 2099 } 2100 2101 /** 2102 * t3_intr_enable - enable interrupts 2103 * @adapter: the adapter whose interrupts should be enabled 2104 * 2105 * Enable interrupts by setting the interrupt enable registers of the 2106 * various HW modules and then enabling the top-level interrupt 2107 * concentrator. 2108 */ 2109 void t3_intr_enable(adapter_t *adapter) 2110 { 2111 static struct addr_val_pair intr_en_avp[] = { 2112 { A_MC7_INT_ENABLE, MC7_INTR_MASK }, 2113 { A_MC7_INT_ENABLE - MC7_PMRX_BASE_ADDR + MC7_PMTX_BASE_ADDR, 2114 MC7_INTR_MASK }, 2115 { A_MC7_INT_ENABLE - MC7_PMRX_BASE_ADDR + MC7_CM_BASE_ADDR, 2116 MC7_INTR_MASK }, 2117 { A_MC5_DB_INT_ENABLE, MC5_INTR_MASK }, 2118 { A_ULPRX_INT_ENABLE, ULPRX_INTR_MASK }, 2119 { A_PM1_TX_INT_ENABLE, PMTX_INTR_MASK }, 2120 { A_PM1_RX_INT_ENABLE, PMRX_INTR_MASK }, 2121 { A_CIM_HOST_INT_ENABLE, CIM_INTR_MASK }, 2122 { A_MPS_INT_ENABLE, MPS_INTR_MASK }, 2123 }; 2124 2125 adapter->slow_intr_mask = PL_INTR_MASK; 2126 2127 t3_write_regs(adapter, intr_en_avp, ARRAY_SIZE(intr_en_avp), 0); 2128 t3_write_reg(adapter, A_TP_INT_ENABLE, 2129 adapter->params.rev >= T3_REV_C ? 0x2bfffff : 0x3bfffff); 2130 t3_write_reg(adapter, A_SG_INT_ENABLE, SGE_INTR_MASK); 2131 2132 if (adapter->params.rev > 0) { 2133 t3_write_reg(adapter, A_CPL_INTR_ENABLE, 2134 CPLSW_INTR_MASK | F_CIM_OVFL_ERROR); 2135 t3_write_reg(adapter, A_ULPTX_INT_ENABLE, 2136 ULPTX_INTR_MASK | F_PBL_BOUND_ERR_CH0 | 2137 F_PBL_BOUND_ERR_CH1); 2138 } else { 2139 t3_write_reg(adapter, A_CPL_INTR_ENABLE, CPLSW_INTR_MASK); 2140 t3_write_reg(adapter, A_ULPTX_INT_ENABLE, ULPTX_INTR_MASK); 2141 } 2142 2143 t3_write_reg(adapter, A_T3DBG_INT_ENABLE, calc_gpio_intr(adapter)); 2144 2145 if (is_pcie(adapter)) 2146 t3_write_reg(adapter, A_PCIE_INT_ENABLE, PCIE_INTR_MASK); 2147 else 2148 t3_write_reg(adapter, A_PCIX_INT_ENABLE, PCIX_INTR_MASK); 2149 t3_write_reg(adapter, A_PL_INT_ENABLE0, adapter->slow_intr_mask); 2150 (void) t3_read_reg(adapter, A_PL_INT_ENABLE0); /* flush */ 2151 } 2152 2153 /** 2154 * t3_intr_disable - disable a card's interrupts 2155 * @adapter: the adapter whose interrupts should be disabled 2156 * 2157 * Disable interrupts. We only disable the top-level interrupt 2158 * concentrator and the SGE data interrupts. 2159 */ 2160 void t3_intr_disable(adapter_t *adapter) 2161 { 2162 t3_write_reg(adapter, A_PL_INT_ENABLE0, 0); 2163 (void) t3_read_reg(adapter, A_PL_INT_ENABLE0); /* flush */ 2164 adapter->slow_intr_mask = 0; 2165 } 2166 2167 /** 2168 * t3_intr_clear - clear all interrupts 2169 * @adapter: the adapter whose interrupts should be cleared 2170 * 2171 * Clears all interrupts. 2172 */ 2173 void t3_intr_clear(adapter_t *adapter) 2174 { 2175 static const unsigned int cause_reg_addr[] = { 2176 A_SG_INT_CAUSE, 2177 A_SG_RSPQ_FL_STATUS, 2178 A_PCIX_INT_CAUSE, 2179 A_MC7_INT_CAUSE, 2180 A_MC7_INT_CAUSE - MC7_PMRX_BASE_ADDR + MC7_PMTX_BASE_ADDR, 2181 A_MC7_INT_CAUSE - MC7_PMRX_BASE_ADDR + MC7_CM_BASE_ADDR, 2182 A_CIM_HOST_INT_CAUSE, 2183 A_TP_INT_CAUSE, 2184 A_MC5_DB_INT_CAUSE, 2185 A_ULPRX_INT_CAUSE, 2186 A_ULPTX_INT_CAUSE, 2187 A_CPL_INTR_CAUSE, 2188 A_PM1_TX_INT_CAUSE, 2189 A_PM1_RX_INT_CAUSE, 2190 A_MPS_INT_CAUSE, 2191 A_T3DBG_INT_CAUSE, 2192 }; 2193 unsigned int i; 2194 2195 /* Clear PHY and MAC interrupts for each port. */ 2196 for_each_port(adapter, i) 2197 t3_port_intr_clear(adapter, i); 2198 2199 for (i = 0; i < ARRAY_SIZE(cause_reg_addr); ++i) 2200 t3_write_reg(adapter, cause_reg_addr[i], 0xffffffff); 2201 2202 if (is_pcie(adapter)) 2203 t3_write_reg(adapter, A_PCIE_PEX_ERR, 0xffffffff); 2204 t3_write_reg(adapter, A_PL_INT_CAUSE0, 0xffffffff); 2205 (void) t3_read_reg(adapter, A_PL_INT_CAUSE0); /* flush */ 2206 } 2207 2208 void t3_xgm_intr_enable(adapter_t *adapter, int idx) 2209 { 2210 struct port_info *pi = adap2pinfo(adapter, idx); 2211 2212 t3_write_reg(adapter, A_XGM_XGM_INT_ENABLE + pi->mac.offset, 2213 XGM_EXTRA_INTR_MASK); 2214 } 2215 2216 void t3_xgm_intr_disable(adapter_t *adapter, int idx) 2217 { 2218 struct port_info *pi = adap2pinfo(adapter, idx); 2219 2220 t3_write_reg(adapter, A_XGM_XGM_INT_DISABLE + pi->mac.offset, 2221 0x7ff); 2222 } 2223 2224 /** 2225 * t3_port_intr_enable - enable port-specific interrupts 2226 * @adapter: associated adapter 2227 * @idx: index of port whose interrupts should be enabled 2228 * 2229 * Enable port-specific (i.e., MAC and PHY) interrupts for the given 2230 * adapter port. 2231 */ 2232 void t3_port_intr_enable(adapter_t *adapter, int idx) 2233 { 2234 struct port_info *pi = adap2pinfo(adapter, idx); 2235 2236 t3_write_reg(adapter, A_XGM_INT_ENABLE + pi->mac.offset, XGM_INTR_MASK); 2237 pi->phy.ops->intr_enable(&pi->phy); 2238 } 2239 2240 /** 2241 * t3_port_intr_disable - disable port-specific interrupts 2242 * @adapter: associated adapter 2243 * @idx: index of port whose interrupts should be disabled 2244 * 2245 * Disable port-specific (i.e., MAC and PHY) interrupts for the given 2246 * adapter port. 2247 */ 2248 void t3_port_intr_disable(adapter_t *adapter, int idx) 2249 { 2250 struct port_info *pi = adap2pinfo(adapter, idx); 2251 2252 t3_write_reg(adapter, A_XGM_INT_ENABLE + pi->mac.offset, 0); 2253 pi->phy.ops->intr_disable(&pi->phy); 2254 } 2255 2256 /** 2257 * t3_port_intr_clear - clear port-specific interrupts 2258 * @adapter: associated adapter 2259 * @idx: index of port whose interrupts to clear 2260 * 2261 * Clear port-specific (i.e., MAC and PHY) interrupts for the given 2262 * adapter port. 2263 */ 2264 void t3_port_intr_clear(adapter_t *adapter, int idx) 2265 { 2266 struct port_info *pi = adap2pinfo(adapter, idx); 2267 2268 t3_write_reg(adapter, A_XGM_INT_CAUSE + pi->mac.offset, 0xffffffff); 2269 pi->phy.ops->intr_clear(&pi->phy); 2270 } 2271 2272 #define SG_CONTEXT_CMD_ATTEMPTS 100 2273 2274 /** 2275 * t3_sge_write_context - write an SGE context 2276 * @adapter: the adapter 2277 * @id: the context id 2278 * @type: the context type 2279 * 2280 * Program an SGE context with the values already loaded in the 2281 * CONTEXT_DATA? registers. 2282 */ 2283 static int t3_sge_write_context(adapter_t *adapter, unsigned int id, 2284 unsigned int type) 2285 { 2286 if (type == F_RESPONSEQ) { 2287 /* 2288 * Can't write the Response Queue Context bits for 2289 * Interrupt Armed or the Reserve bits after the chip 2290 * has been initialized out of reset. Writing to these 2291 * bits can confuse the hardware. 2292 */ 2293 t3_write_reg(adapter, A_SG_CONTEXT_MASK0, 0xffffffff); 2294 t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0xffffffff); 2295 t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0x17ffffff); 2296 t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0xffffffff); 2297 } else { 2298 t3_write_reg(adapter, A_SG_CONTEXT_MASK0, 0xffffffff); 2299 t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0xffffffff); 2300 t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0xffffffff); 2301 t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0xffffffff); 2302 } 2303 t3_write_reg(adapter, A_SG_CONTEXT_CMD, 2304 V_CONTEXT_CMD_OPCODE(1) | type | V_CONTEXT(id)); 2305 return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY, 2306 0, SG_CONTEXT_CMD_ATTEMPTS, 1); 2307 } 2308 2309 /** 2310 * clear_sge_ctxt - completely clear an SGE context 2311 * @adapter: the adapter 2312 * @id: the context id 2313 * @type: the context type 2314 * 2315 * Completely clear an SGE context. Used predominantly at post-reset 2316 * initialization. Note in particular that we don't skip writing to any 2317 * "sensitive bits" in the contexts the way that t3_sge_write_context() 2318 * does ... 2319 */ 2320 static int clear_sge_ctxt(adapter_t *adap, unsigned int id, unsigned int type) 2321 { 2322 t3_write_reg(adap, A_SG_CONTEXT_DATA0, 0); 2323 t3_write_reg(adap, A_SG_CONTEXT_DATA1, 0); 2324 t3_write_reg(adap, A_SG_CONTEXT_DATA2, 0); 2325 t3_write_reg(adap, A_SG_CONTEXT_DATA3, 0); 2326 t3_write_reg(adap, A_SG_CONTEXT_MASK0, 0xffffffff); 2327 t3_write_reg(adap, A_SG_CONTEXT_MASK1, 0xffffffff); 2328 t3_write_reg(adap, A_SG_CONTEXT_MASK2, 0xffffffff); 2329 t3_write_reg(adap, A_SG_CONTEXT_MASK3, 0xffffffff); 2330 t3_write_reg(adap, A_SG_CONTEXT_CMD, 2331 V_CONTEXT_CMD_OPCODE(1) | type | V_CONTEXT(id)); 2332 return t3_wait_op_done(adap, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY, 2333 0, SG_CONTEXT_CMD_ATTEMPTS, 1); 2334 } 2335 2336 /** 2337 * t3_sge_init_ecntxt - initialize an SGE egress context 2338 * @adapter: the adapter to configure 2339 * @id: the context id 2340 * @gts_enable: whether to enable GTS for the context 2341 * @type: the egress context type 2342 * @respq: associated response queue 2343 * @base_addr: base address of queue 2344 * @size: number of queue entries 2345 * @token: uP token 2346 * @gen: initial generation value for the context 2347 * @cidx: consumer pointer 2348 * 2349 * Initialize an SGE egress context and make it ready for use. If the 2350 * platform allows concurrent context operations, the caller is 2351 * responsible for appropriate locking. 2352 */ 2353 int t3_sge_init_ecntxt(adapter_t *adapter, unsigned int id, int gts_enable, 2354 enum sge_context_type type, int respq, u64 base_addr, 2355 unsigned int size, unsigned int token, int gen, 2356 unsigned int cidx) 2357 { 2358 unsigned int credits = type == SGE_CNTXT_OFLD ? 0 : FW_WR_NUM; 2359 2360 if (base_addr & 0xfff) /* must be 4K aligned */ 2361 return -EINVAL; 2362 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY) 2363 return -EBUSY; 2364 2365 base_addr >>= 12; 2366 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, V_EC_INDEX(cidx) | 2367 V_EC_CREDITS(credits) | V_EC_GTS(gts_enable)); 2368 t3_write_reg(adapter, A_SG_CONTEXT_DATA1, V_EC_SIZE(size) | 2369 V_EC_BASE_LO((u32)base_addr & 0xffff)); 2370 base_addr >>= 16; 2371 t3_write_reg(adapter, A_SG_CONTEXT_DATA2, (u32)base_addr); 2372 base_addr >>= 32; 2373 t3_write_reg(adapter, A_SG_CONTEXT_DATA3, 2374 V_EC_BASE_HI((u32)base_addr & 0xf) | V_EC_RESPQ(respq) | 2375 V_EC_TYPE(type) | V_EC_GEN(gen) | V_EC_UP_TOKEN(token) | 2376 F_EC_VALID); 2377 return t3_sge_write_context(adapter, id, F_EGRESS); 2378 } 2379 2380 /** 2381 * t3_sge_init_flcntxt - initialize an SGE free-buffer list context 2382 * @adapter: the adapter to configure 2383 * @id: the context id 2384 * @gts_enable: whether to enable GTS for the context 2385 * @base_addr: base address of queue 2386 * @size: number of queue entries 2387 * @bsize: size of each buffer for this queue 2388 * @cong_thres: threshold to signal congestion to upstream producers 2389 * @gen: initial generation value for the context 2390 * @cidx: consumer pointer 2391 * 2392 * Initialize an SGE free list context and make it ready for use. The 2393 * caller is responsible for ensuring only one context operation occurs 2394 * at a time. 2395 */ 2396 int t3_sge_init_flcntxt(adapter_t *adapter, unsigned int id, int gts_enable, 2397 u64 base_addr, unsigned int size, unsigned int bsize, 2398 unsigned int cong_thres, int gen, unsigned int cidx) 2399 { 2400 if (base_addr & 0xfff) /* must be 4K aligned */ 2401 return -EINVAL; 2402 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY) 2403 return -EBUSY; 2404 2405 base_addr >>= 12; 2406 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, (u32)base_addr); 2407 base_addr >>= 32; 2408 t3_write_reg(adapter, A_SG_CONTEXT_DATA1, 2409 V_FL_BASE_HI((u32)base_addr) | 2410 V_FL_INDEX_LO(cidx & M_FL_INDEX_LO)); 2411 t3_write_reg(adapter, A_SG_CONTEXT_DATA2, V_FL_SIZE(size) | 2412 V_FL_GEN(gen) | V_FL_INDEX_HI(cidx >> 12) | 2413 V_FL_ENTRY_SIZE_LO(bsize & M_FL_ENTRY_SIZE_LO)); 2414 t3_write_reg(adapter, A_SG_CONTEXT_DATA3, 2415 V_FL_ENTRY_SIZE_HI(bsize >> (32 - S_FL_ENTRY_SIZE_LO)) | 2416 V_FL_CONG_THRES(cong_thres) | V_FL_GTS(gts_enable)); 2417 return t3_sge_write_context(adapter, id, F_FREELIST); 2418 } 2419 2420 /** 2421 * t3_sge_init_rspcntxt - initialize an SGE response queue context 2422 * @adapter: the adapter to configure 2423 * @id: the context id 2424 * @irq_vec_idx: MSI-X interrupt vector index, 0 if no MSI-X, -1 if no IRQ 2425 * @base_addr: base address of queue 2426 * @size: number of queue entries 2427 * @fl_thres: threshold for selecting the normal or jumbo free list 2428 * @gen: initial generation value for the context 2429 * @cidx: consumer pointer 2430 * 2431 * Initialize an SGE response queue context and make it ready for use. 2432 * The caller is responsible for ensuring only one context operation 2433 * occurs at a time. 2434 */ 2435 int t3_sge_init_rspcntxt(adapter_t *adapter, unsigned int id, int irq_vec_idx, 2436 u64 base_addr, unsigned int size, 2437 unsigned int fl_thres, int gen, unsigned int cidx) 2438 { 2439 unsigned int ctrl, intr = 0; 2440 2441 if (base_addr & 0xfff) /* must be 4K aligned */ 2442 return -EINVAL; 2443 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY) 2444 return -EBUSY; 2445 2446 base_addr >>= 12; 2447 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, V_CQ_SIZE(size) | 2448 V_CQ_INDEX(cidx)); 2449 t3_write_reg(adapter, A_SG_CONTEXT_DATA1, (u32)base_addr); 2450 base_addr >>= 32; 2451 ctrl = t3_read_reg(adapter, A_SG_CONTROL); 2452 if ((irq_vec_idx > 0) || 2453 ((irq_vec_idx == 0) && !(ctrl & F_ONEINTMULTQ))) 2454 intr = F_RQ_INTR_EN; 2455 if (irq_vec_idx >= 0) 2456 intr |= V_RQ_MSI_VEC(irq_vec_idx); 2457 t3_write_reg(adapter, A_SG_CONTEXT_DATA2, 2458 V_CQ_BASE_HI((u32)base_addr) | intr | V_RQ_GEN(gen)); 2459 t3_write_reg(adapter, A_SG_CONTEXT_DATA3, fl_thres); 2460 return t3_sge_write_context(adapter, id, F_RESPONSEQ); 2461 } 2462 2463 /** 2464 * t3_sge_init_cqcntxt - initialize an SGE completion queue context 2465 * @adapter: the adapter to configure 2466 * @id: the context id 2467 * @base_addr: base address of queue 2468 * @size: number of queue entries 2469 * @rspq: response queue for async notifications 2470 * @ovfl_mode: CQ overflow mode 2471 * @credits: completion queue credits 2472 * @credit_thres: the credit threshold 2473 * 2474 * Initialize an SGE completion queue context and make it ready for use. 2475 * The caller is responsible for ensuring only one context operation 2476 * occurs at a time. 2477 */ 2478 int t3_sge_init_cqcntxt(adapter_t *adapter, unsigned int id, u64 base_addr, 2479 unsigned int size, int rspq, int ovfl_mode, 2480 unsigned int credits, unsigned int credit_thres) 2481 { 2482 if (base_addr & 0xfff) /* must be 4K aligned */ 2483 return -EINVAL; 2484 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY) 2485 return -EBUSY; 2486 2487 base_addr >>= 12; 2488 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, V_CQ_SIZE(size)); 2489 t3_write_reg(adapter, A_SG_CONTEXT_DATA1, (u32)base_addr); 2490 base_addr >>= 32; 2491 t3_write_reg(adapter, A_SG_CONTEXT_DATA2, 2492 V_CQ_BASE_HI((u32)base_addr) | V_CQ_RSPQ(rspq) | 2493 V_CQ_GEN(1) | V_CQ_OVERFLOW_MODE(ovfl_mode) | 2494 V_CQ_ERR(ovfl_mode)); 2495 t3_write_reg(adapter, A_SG_CONTEXT_DATA3, V_CQ_CREDITS(credits) | 2496 V_CQ_CREDIT_THRES(credit_thres)); 2497 return t3_sge_write_context(adapter, id, F_CQ); 2498 } 2499 2500 /** 2501 * t3_sge_enable_ecntxt - enable/disable an SGE egress context 2502 * @adapter: the adapter 2503 * @id: the egress context id 2504 * @enable: enable (1) or disable (0) the context 2505 * 2506 * Enable or disable an SGE egress context. The caller is responsible for 2507 * ensuring only one context operation occurs at a time. 2508 */ 2509 int t3_sge_enable_ecntxt(adapter_t *adapter, unsigned int id, int enable) 2510 { 2511 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY) 2512 return -EBUSY; 2513 2514 t3_write_reg(adapter, A_SG_CONTEXT_MASK0, 0); 2515 t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0); 2516 t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0); 2517 t3_write_reg(adapter, A_SG_CONTEXT_MASK3, F_EC_VALID); 2518 t3_write_reg(adapter, A_SG_CONTEXT_DATA3, V_EC_VALID(enable)); 2519 t3_write_reg(adapter, A_SG_CONTEXT_CMD, 2520 V_CONTEXT_CMD_OPCODE(1) | F_EGRESS | V_CONTEXT(id)); 2521 return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY, 2522 0, SG_CONTEXT_CMD_ATTEMPTS, 1); 2523 } 2524 2525 /** 2526 * t3_sge_disable_fl - disable an SGE free-buffer list 2527 * @adapter: the adapter 2528 * @id: the free list context id 2529 * 2530 * Disable an SGE free-buffer list. The caller is responsible for 2531 * ensuring only one context operation occurs at a time. 2532 */ 2533 int t3_sge_disable_fl(adapter_t *adapter, unsigned int id) 2534 { 2535 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY) 2536 return -EBUSY; 2537 2538 t3_write_reg(adapter, A_SG_CONTEXT_MASK0, 0); 2539 t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0); 2540 t3_write_reg(adapter, A_SG_CONTEXT_MASK2, V_FL_SIZE(M_FL_SIZE)); 2541 t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0); 2542 t3_write_reg(adapter, A_SG_CONTEXT_DATA2, 0); 2543 t3_write_reg(adapter, A_SG_CONTEXT_CMD, 2544 V_CONTEXT_CMD_OPCODE(1) | F_FREELIST | V_CONTEXT(id)); 2545 return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY, 2546 0, SG_CONTEXT_CMD_ATTEMPTS, 1); 2547 } 2548 2549 /** 2550 * t3_sge_disable_rspcntxt - disable an SGE response queue 2551 * @adapter: the adapter 2552 * @id: the response queue context id 2553 * 2554 * Disable an SGE response queue. The caller is responsible for 2555 * ensuring only one context operation occurs at a time. 2556 */ 2557 int t3_sge_disable_rspcntxt(adapter_t *adapter, unsigned int id) 2558 { 2559 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY) 2560 return -EBUSY; 2561 2562 t3_write_reg(adapter, A_SG_CONTEXT_MASK0, V_CQ_SIZE(M_CQ_SIZE)); 2563 t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0); 2564 t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0); 2565 t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0); 2566 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, 0); 2567 t3_write_reg(adapter, A_SG_CONTEXT_CMD, 2568 V_CONTEXT_CMD_OPCODE(1) | F_RESPONSEQ | V_CONTEXT(id)); 2569 return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY, 2570 0, SG_CONTEXT_CMD_ATTEMPTS, 1); 2571 } 2572 2573 /** 2574 * t3_sge_disable_cqcntxt - disable an SGE completion queue 2575 * @adapter: the adapter 2576 * @id: the completion queue context id 2577 * 2578 * Disable an SGE completion queue. The caller is responsible for 2579 * ensuring only one context operation occurs at a time. 2580 */ 2581 int t3_sge_disable_cqcntxt(adapter_t *adapter, unsigned int id) 2582 { 2583 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY) 2584 return -EBUSY; 2585 2586 t3_write_reg(adapter, A_SG_CONTEXT_MASK0, V_CQ_SIZE(M_CQ_SIZE)); 2587 t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0); 2588 t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0); 2589 t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0); 2590 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, 0); 2591 t3_write_reg(adapter, A_SG_CONTEXT_CMD, 2592 V_CONTEXT_CMD_OPCODE(1) | F_CQ | V_CONTEXT(id)); 2593 return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY, 2594 0, SG_CONTEXT_CMD_ATTEMPTS, 1); 2595 } 2596 2597 /** 2598 * t3_sge_cqcntxt_op - perform an operation on a completion queue context 2599 * @adapter: the adapter 2600 * @id: the context id 2601 * @op: the operation to perform 2602 * @credits: credits to return to the CQ 2603 * 2604 * Perform the selected operation on an SGE completion queue context. 2605 * The caller is responsible for ensuring only one context operation 2606 * occurs at a time. 2607 * 2608 * For most operations the function returns the current HW position in 2609 * the completion queue. 2610 */ 2611 int t3_sge_cqcntxt_op(adapter_t *adapter, unsigned int id, unsigned int op, 2612 unsigned int credits) 2613 { 2614 u32 val; 2615 2616 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY) 2617 return -EBUSY; 2618 2619 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, credits << 16); 2620 t3_write_reg(adapter, A_SG_CONTEXT_CMD, V_CONTEXT_CMD_OPCODE(op) | 2621 V_CONTEXT(id) | F_CQ); 2622 if (t3_wait_op_done_val(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY, 2623 0, SG_CONTEXT_CMD_ATTEMPTS, 1, &val)) 2624 return -EIO; 2625 2626 if (op >= 2 && op < 7) { 2627 if (adapter->params.rev > 0) 2628 return G_CQ_INDEX(val); 2629 2630 t3_write_reg(adapter, A_SG_CONTEXT_CMD, 2631 V_CONTEXT_CMD_OPCODE(0) | F_CQ | V_CONTEXT(id)); 2632 if (t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, 2633 F_CONTEXT_CMD_BUSY, 0, 2634 SG_CONTEXT_CMD_ATTEMPTS, 1)) 2635 return -EIO; 2636 return G_CQ_INDEX(t3_read_reg(adapter, A_SG_CONTEXT_DATA0)); 2637 } 2638 return 0; 2639 } 2640 2641 /** 2642 * t3_sge_read_context - read an SGE context 2643 * @type: the context type 2644 * @adapter: the adapter 2645 * @id: the context id 2646 * @data: holds the retrieved context 2647 * 2648 * Read an SGE egress context. The caller is responsible for ensuring 2649 * only one context operation occurs at a time. 2650 */ 2651 static int t3_sge_read_context(unsigned int type, adapter_t *adapter, 2652 unsigned int id, u32 data[4]) 2653 { 2654 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY) 2655 return -EBUSY; 2656 2657 t3_write_reg(adapter, A_SG_CONTEXT_CMD, 2658 V_CONTEXT_CMD_OPCODE(0) | type | V_CONTEXT(id)); 2659 if (t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY, 0, 2660 SG_CONTEXT_CMD_ATTEMPTS, 1)) 2661 return -EIO; 2662 data[0] = t3_read_reg(adapter, A_SG_CONTEXT_DATA0); 2663 data[1] = t3_read_reg(adapter, A_SG_CONTEXT_DATA1); 2664 data[2] = t3_read_reg(adapter, A_SG_CONTEXT_DATA2); 2665 data[3] = t3_read_reg(adapter, A_SG_CONTEXT_DATA3); 2666 return 0; 2667 } 2668 2669 /** 2670 * t3_sge_read_ecntxt - read an SGE egress context 2671 * @adapter: the adapter 2672 * @id: the context id 2673 * @data: holds the retrieved context 2674 * 2675 * Read an SGE egress context. The caller is responsible for ensuring 2676 * only one context operation occurs at a time. 2677 */ 2678 int t3_sge_read_ecntxt(adapter_t *adapter, unsigned int id, u32 data[4]) 2679 { 2680 if (id >= 65536) 2681 return -EINVAL; 2682 return t3_sge_read_context(F_EGRESS, adapter, id, data); 2683 } 2684 2685 /** 2686 * t3_sge_read_cq - read an SGE CQ context 2687 * @adapter: the adapter 2688 * @id: the context id 2689 * @data: holds the retrieved context 2690 * 2691 * Read an SGE CQ context. The caller is responsible for ensuring 2692 * only one context operation occurs at a time. 2693 */ 2694 int t3_sge_read_cq(adapter_t *adapter, unsigned int id, u32 data[4]) 2695 { 2696 if (id >= 65536) 2697 return -EINVAL; 2698 return t3_sge_read_context(F_CQ, adapter, id, data); 2699 } 2700 2701 /** 2702 * t3_sge_read_fl - read an SGE free-list context 2703 * @adapter: the adapter 2704 * @id: the context id 2705 * @data: holds the retrieved context 2706 * 2707 * Read an SGE free-list context. The caller is responsible for ensuring 2708 * only one context operation occurs at a time. 2709 */ 2710 int t3_sge_read_fl(adapter_t *adapter, unsigned int id, u32 data[4]) 2711 { 2712 if (id >= SGE_QSETS * 2) 2713 return -EINVAL; 2714 return t3_sge_read_context(F_FREELIST, adapter, id, data); 2715 } 2716 2717 /** 2718 * t3_sge_read_rspq - read an SGE response queue context 2719 * @adapter: the adapter 2720 * @id: the context id 2721 * @data: holds the retrieved context 2722 * 2723 * Read an SGE response queue context. The caller is responsible for 2724 * ensuring only one context operation occurs at a time. 2725 */ 2726 int t3_sge_read_rspq(adapter_t *adapter, unsigned int id, u32 data[4]) 2727 { 2728 if (id >= SGE_QSETS) 2729 return -EINVAL; 2730 return t3_sge_read_context(F_RESPONSEQ, adapter, id, data); 2731 } 2732 2733 /** 2734 * t3_config_rss - configure Rx packet steering 2735 * @adapter: the adapter 2736 * @rss_config: RSS settings (written to TP_RSS_CONFIG) 2737 * @cpus: values for the CPU lookup table (0xff terminated) 2738 * @rspq: values for the response queue lookup table (0xffff terminated) 2739 * 2740 * Programs the receive packet steering logic. @cpus and @rspq provide 2741 * the values for the CPU and response queue lookup tables. If they 2742 * provide fewer values than the size of the tables the supplied values 2743 * are used repeatedly until the tables are fully populated. 2744 */ 2745 void t3_config_rss(adapter_t *adapter, unsigned int rss_config, const u8 *cpus, 2746 const u16 *rspq) 2747 { 2748 int i, j, cpu_idx = 0, q_idx = 0; 2749 2750 if (cpus) 2751 for (i = 0; i < RSS_TABLE_SIZE; ++i) { 2752 u32 val = i << 16; 2753 2754 for (j = 0; j < 2; ++j) { 2755 val |= (cpus[cpu_idx++] & 0x3f) << (8 * j); 2756 if (cpus[cpu_idx] == 0xff) 2757 cpu_idx = 0; 2758 } 2759 t3_write_reg(adapter, A_TP_RSS_LKP_TABLE, val); 2760 } 2761 2762 if (rspq) 2763 for (i = 0; i < RSS_TABLE_SIZE; ++i) { 2764 t3_write_reg(adapter, A_TP_RSS_MAP_TABLE, 2765 (i << 16) | rspq[q_idx++]); 2766 if (rspq[q_idx] == 0xffff) 2767 q_idx = 0; 2768 } 2769 2770 t3_write_reg(adapter, A_TP_RSS_CONFIG, rss_config); 2771 } 2772 2773 /** 2774 * t3_read_rss - read the contents of the RSS tables 2775 * @adapter: the adapter 2776 * @lkup: holds the contents of the RSS lookup table 2777 * @map: holds the contents of the RSS map table 2778 * 2779 * Reads the contents of the receive packet steering tables. 2780 */ 2781 int t3_read_rss(adapter_t *adapter, u8 *lkup, u16 *map) 2782 { 2783 int i; 2784 u32 val; 2785 2786 if (lkup) 2787 for (i = 0; i < RSS_TABLE_SIZE; ++i) { 2788 t3_write_reg(adapter, A_TP_RSS_LKP_TABLE, 2789 0xffff0000 | i); 2790 val = t3_read_reg(adapter, A_TP_RSS_LKP_TABLE); 2791 if (!(val & 0x80000000)) 2792 return -EAGAIN; 2793 *lkup++ = (u8)val; 2794 *lkup++ = (u8)(val >> 8); 2795 } 2796 2797 if (map) 2798 for (i = 0; i < RSS_TABLE_SIZE; ++i) { 2799 t3_write_reg(adapter, A_TP_RSS_MAP_TABLE, 2800 0xffff0000 | i); 2801 val = t3_read_reg(adapter, A_TP_RSS_MAP_TABLE); 2802 if (!(val & 0x80000000)) 2803 return -EAGAIN; 2804 *map++ = (u16)val; 2805 } 2806 return 0; 2807 } 2808 2809 /** 2810 * t3_tp_set_offload_mode - put TP in NIC/offload mode 2811 * @adap: the adapter 2812 * @enable: 1 to select offload mode, 0 for regular NIC 2813 * 2814 * Switches TP to NIC/offload mode. 2815 */ 2816 void t3_tp_set_offload_mode(adapter_t *adap, int enable) 2817 { 2818 if (is_offload(adap) || !enable) 2819 t3_set_reg_field(adap, A_TP_IN_CONFIG, F_NICMODE, 2820 V_NICMODE(!enable)); 2821 } 2822 2823 /** 2824 * tp_wr_bits_indirect - set/clear bits in an indirect TP register 2825 * @adap: the adapter 2826 * @addr: the indirect TP register address 2827 * @mask: specifies the field within the register to modify 2828 * @val: new value for the field 2829 * 2830 * Sets a field of an indirect TP register to the given value. 2831 */ 2832 static void tp_wr_bits_indirect(adapter_t *adap, unsigned int addr, 2833 unsigned int mask, unsigned int val) 2834 { 2835 t3_write_reg(adap, A_TP_PIO_ADDR, addr); 2836 val |= t3_read_reg(adap, A_TP_PIO_DATA) & ~mask; 2837 t3_write_reg(adap, A_TP_PIO_DATA, val); 2838 } 2839 2840 /** 2841 * t3_enable_filters - enable the HW filters 2842 * @adap: the adapter 2843 * 2844 * Enables the HW filters for NIC traffic. 2845 */ 2846 void t3_enable_filters(adapter_t *adap) 2847 { 2848 t3_set_reg_field(adap, A_TP_IN_CONFIG, F_NICMODE, 0); 2849 t3_set_reg_field(adap, A_MC5_DB_CONFIG, 0, F_FILTEREN); 2850 t3_set_reg_field(adap, A_TP_GLOBAL_CONFIG, 0, V_FIVETUPLELOOKUP(3)); 2851 tp_wr_bits_indirect(adap, A_TP_INGRESS_CONFIG, 0, F_LOOKUPEVERYPKT); 2852 } 2853 2854 /** 2855 * t3_disable_filters - disable the HW filters 2856 * @adap: the adapter 2857 * 2858 * Disables the HW filters for NIC traffic. 2859 */ 2860 void t3_disable_filters(adapter_t *adap) 2861 { 2862 /* note that we don't want to revert to NIC-only mode */ 2863 t3_set_reg_field(adap, A_MC5_DB_CONFIG, F_FILTEREN, 0); 2864 t3_set_reg_field(adap, A_TP_GLOBAL_CONFIG, 2865 V_FIVETUPLELOOKUP(M_FIVETUPLELOOKUP), 0); 2866 tp_wr_bits_indirect(adap, A_TP_INGRESS_CONFIG, F_LOOKUPEVERYPKT, 0); 2867 } 2868 2869 /** 2870 * pm_num_pages - calculate the number of pages of the payload memory 2871 * @mem_size: the size of the payload memory 2872 * @pg_size: the size of each payload memory page 2873 * 2874 * Calculate the number of pages, each of the given size, that fit in a 2875 * memory of the specified size, respecting the HW requirement that the 2876 * number of pages must be a multiple of 24. 2877 */ 2878 static inline unsigned int pm_num_pages(unsigned int mem_size, 2879 unsigned int pg_size) 2880 { 2881 unsigned int n = mem_size / pg_size; 2882 2883 return n - n % 24; 2884 } 2885 2886 #define mem_region(adap, start, size, reg) \ 2887 t3_write_reg((adap), A_ ## reg, (start)); \ 2888 start += size 2889 2890 /** 2891 * partition_mem - partition memory and configure TP memory settings 2892 * @adap: the adapter 2893 * @p: the TP parameters 2894 * 2895 * Partitions context and payload memory and configures TP's memory 2896 * registers. 2897 */ 2898 static void partition_mem(adapter_t *adap, const struct tp_params *p) 2899 { 2900 unsigned int m, pstructs, tids = t3_mc5_size(&adap->mc5); 2901 unsigned int timers = 0, timers_shift = 22; 2902 2903 if (adap->params.rev > 0) { 2904 if (tids <= 16 * 1024) { 2905 timers = 1; 2906 timers_shift = 16; 2907 } else if (tids <= 64 * 1024) { 2908 timers = 2; 2909 timers_shift = 18; 2910 } else if (tids <= 256 * 1024) { 2911 timers = 3; 2912 timers_shift = 20; 2913 } 2914 } 2915 2916 t3_write_reg(adap, A_TP_PMM_SIZE, 2917 p->chan_rx_size | (p->chan_tx_size >> 16)); 2918 2919 t3_write_reg(adap, A_TP_PMM_TX_BASE, 0); 2920 t3_write_reg(adap, A_TP_PMM_TX_PAGE_SIZE, p->tx_pg_size); 2921 t3_write_reg(adap, A_TP_PMM_TX_MAX_PAGE, p->tx_num_pgs); 2922 t3_set_reg_field(adap, A_TP_PARA_REG3, V_TXDATAACKIDX(M_TXDATAACKIDX), 2923 V_TXDATAACKIDX(fls(p->tx_pg_size) - 12)); 2924 2925 t3_write_reg(adap, A_TP_PMM_RX_BASE, 0); 2926 t3_write_reg(adap, A_TP_PMM_RX_PAGE_SIZE, p->rx_pg_size); 2927 t3_write_reg(adap, A_TP_PMM_RX_MAX_PAGE, p->rx_num_pgs); 2928 2929 pstructs = p->rx_num_pgs + p->tx_num_pgs; 2930 /* Add a bit of headroom and make multiple of 24 */ 2931 pstructs += 48; 2932 pstructs -= pstructs % 24; 2933 t3_write_reg(adap, A_TP_CMM_MM_MAX_PSTRUCT, pstructs); 2934 2935 m = tids * TCB_SIZE; 2936 mem_region(adap, m, (64 << 10) * 64, SG_EGR_CNTX_BADDR); 2937 mem_region(adap, m, (64 << 10) * 64, SG_CQ_CONTEXT_BADDR); 2938 t3_write_reg(adap, A_TP_CMM_TIMER_BASE, V_CMTIMERMAXNUM(timers) | m); 2939 m += ((p->ntimer_qs - 1) << timers_shift) + (1 << 22); 2940 mem_region(adap, m, pstructs * 64, TP_CMM_MM_BASE); 2941 mem_region(adap, m, 64 * (pstructs / 24), TP_CMM_MM_PS_FLST_BASE); 2942 mem_region(adap, m, 64 * (p->rx_num_pgs / 24), TP_CMM_MM_RX_FLST_BASE); 2943 mem_region(adap, m, 64 * (p->tx_num_pgs / 24), TP_CMM_MM_TX_FLST_BASE); 2944 2945 m = (m + 4095) & ~0xfff; 2946 t3_write_reg(adap, A_CIM_SDRAM_BASE_ADDR, m); 2947 t3_write_reg(adap, A_CIM_SDRAM_ADDR_SIZE, p->cm_size - m); 2948 2949 tids = (p->cm_size - m - (3 << 20)) / 3072 - 32; 2950 m = t3_mc5_size(&adap->mc5) - adap->params.mc5.nservers - 2951 adap->params.mc5.nfilters - adap->params.mc5.nroutes; 2952 if (tids < m) 2953 adap->params.mc5.nservers += m - tids; 2954 } 2955 2956 static inline void tp_wr_indirect(adapter_t *adap, unsigned int addr, u32 val) 2957 { 2958 t3_write_reg(adap, A_TP_PIO_ADDR, addr); 2959 t3_write_reg(adap, A_TP_PIO_DATA, val); 2960 } 2961 2962 static inline u32 tp_rd_indirect(adapter_t *adap, unsigned int addr) 2963 { 2964 t3_write_reg(adap, A_TP_PIO_ADDR, addr); 2965 return t3_read_reg(adap, A_TP_PIO_DATA); 2966 } 2967 2968 static void tp_config(adapter_t *adap, const struct tp_params *p) 2969 { 2970 t3_write_reg(adap, A_TP_GLOBAL_CONFIG, F_TXPACINGENABLE | F_PATHMTU | 2971 F_IPCHECKSUMOFFLOAD | F_UDPCHECKSUMOFFLOAD | 2972 F_TCPCHECKSUMOFFLOAD | V_IPTTL(64)); 2973 t3_write_reg(adap, A_TP_TCP_OPTIONS, V_MTUDEFAULT(576) | 2974 F_MTUENABLE | V_WINDOWSCALEMODE(1) | 2975 V_TIMESTAMPSMODE(1) | V_SACKMODE(1) | V_SACKRX(1)); 2976 t3_write_reg(adap, A_TP_DACK_CONFIG, V_AUTOSTATE3(1) | 2977 V_AUTOSTATE2(1) | V_AUTOSTATE1(0) | 2978 V_BYTETHRESHOLD(26880) | V_MSSTHRESHOLD(2) | 2979 F_AUTOCAREFUL | F_AUTOENABLE | V_DACK_MODE(1)); 2980 t3_set_reg_field(adap, A_TP_IN_CONFIG, F_RXFBARBPRIO | F_TXFBARBPRIO, 2981 F_IPV6ENABLE | F_NICMODE); 2982 t3_write_reg(adap, A_TP_TX_RESOURCE_LIMIT, 0x18141814); 2983 t3_write_reg(adap, A_TP_PARA_REG4, 0x5050105); 2984 t3_set_reg_field(adap, A_TP_PARA_REG6, 0, 2985 adap->params.rev > 0 ? F_ENABLEESND : 2986 F_T3A_ENABLEESND); 2987 t3_set_reg_field(adap, A_TP_PC_CONFIG, 2988 F_ENABLEEPCMDAFULL, 2989 F_ENABLEOCSPIFULL |F_TXDEFERENABLE | F_HEARBEATDACK | 2990 F_TXCONGESTIONMODE | F_RXCONGESTIONMODE); 2991 t3_set_reg_field(adap, A_TP_PC_CONFIG2, F_CHDRAFULL, 2992 F_ENABLEIPV6RSS | F_ENABLENONOFDTNLSYN | 2993 F_ENABLEARPMISS | F_DISBLEDAPARBIT0); 2994 t3_write_reg(adap, A_TP_PROXY_FLOW_CNTL, 1080); 2995 t3_write_reg(adap, A_TP_PROXY_FLOW_CNTL, 1000); 2996 2997 if (adap->params.rev > 0) { 2998 tp_wr_indirect(adap, A_TP_EGRESS_CONFIG, F_REWRITEFORCETOSIZE); 2999 t3_set_reg_field(adap, A_TP_PARA_REG3, 0, 3000 F_TXPACEAUTO | F_TXPACEAUTOSTRICT); 3001 t3_set_reg_field(adap, A_TP_PC_CONFIG, F_LOCKTID, F_LOCKTID); 3002 tp_wr_indirect(adap, A_TP_VLAN_PRI_MAP, 0xfa50); 3003 tp_wr_indirect(adap, A_TP_MAC_MATCH_MAP0, 0xfac688); 3004 tp_wr_indirect(adap, A_TP_MAC_MATCH_MAP1, 0xfac688); 3005 } else 3006 t3_set_reg_field(adap, A_TP_PARA_REG3, 0, F_TXPACEFIXED); 3007 3008 if (adap->params.rev == T3_REV_C) 3009 t3_set_reg_field(adap, A_TP_PC_CONFIG, 3010 V_TABLELATENCYDELTA(M_TABLELATENCYDELTA), 3011 V_TABLELATENCYDELTA(4)); 3012 3013 t3_write_reg(adap, A_TP_TX_MOD_QUEUE_WEIGHT1, 0); 3014 t3_write_reg(adap, A_TP_TX_MOD_QUEUE_WEIGHT0, 0); 3015 t3_write_reg(adap, A_TP_MOD_CHANNEL_WEIGHT, 0); 3016 t3_write_reg(adap, A_TP_MOD_RATE_LIMIT, 0xf2200000); 3017 3018 if (adap->params.nports > 2) { 3019 t3_set_reg_field(adap, A_TP_PC_CONFIG2, 0, 3020 F_ENABLETXPORTFROMDA2 | F_ENABLETXPORTFROMDA | 3021 F_ENABLERXPORTFROMADDR); 3022 tp_wr_bits_indirect(adap, A_TP_QOS_RX_MAP_MODE, 3023 V_RXMAPMODE(M_RXMAPMODE), 0); 3024 tp_wr_indirect(adap, A_TP_INGRESS_CONFIG, V_BITPOS0(48) | 3025 V_BITPOS1(49) | V_BITPOS2(50) | V_BITPOS3(51) | 3026 F_ENABLEEXTRACT | F_ENABLEEXTRACTIONSFD | 3027 F_ENABLEINSERTION | F_ENABLEINSERTIONSFD); 3028 tp_wr_indirect(adap, A_TP_PREAMBLE_MSB, 0xfb000000); 3029 tp_wr_indirect(adap, A_TP_PREAMBLE_LSB, 0xd5); 3030 tp_wr_indirect(adap, A_TP_INTF_FROM_TX_PKT, F_INTFFROMTXPKT); 3031 } 3032 } 3033 3034 /* TCP timer values in ms */ 3035 #define TP_DACK_TIMER 50 3036 #define TP_RTO_MIN 250 3037 3038 /** 3039 * tp_set_timers - set TP timing parameters 3040 * @adap: the adapter to set 3041 * @core_clk: the core clock frequency in Hz 3042 * 3043 * Set TP's timing parameters, such as the various timer resolutions and 3044 * the TCP timer values. 3045 */ 3046 static void tp_set_timers(adapter_t *adap, unsigned int core_clk) 3047 { 3048 unsigned int tre = adap->params.tp.tre; 3049 unsigned int dack_re = adap->params.tp.dack_re; 3050 unsigned int tstamp_re = fls(core_clk / 1000); /* 1ms, at least */ 3051 unsigned int tps = core_clk >> tre; 3052 3053 t3_write_reg(adap, A_TP_TIMER_RESOLUTION, V_TIMERRESOLUTION(tre) | 3054 V_DELAYEDACKRESOLUTION(dack_re) | 3055 V_TIMESTAMPRESOLUTION(tstamp_re)); 3056 t3_write_reg(adap, A_TP_DACK_TIMER, 3057 (core_clk >> dack_re) / (1000 / TP_DACK_TIMER)); 3058 t3_write_reg(adap, A_TP_TCP_BACKOFF_REG0, 0x3020100); 3059 t3_write_reg(adap, A_TP_TCP_BACKOFF_REG1, 0x7060504); 3060 t3_write_reg(adap, A_TP_TCP_BACKOFF_REG2, 0xb0a0908); 3061 t3_write_reg(adap, A_TP_TCP_BACKOFF_REG3, 0xf0e0d0c); 3062 t3_write_reg(adap, A_TP_SHIFT_CNT, V_SYNSHIFTMAX(6) | 3063 V_RXTSHIFTMAXR1(4) | V_RXTSHIFTMAXR2(15) | 3064 V_PERSHIFTBACKOFFMAX(8) | V_PERSHIFTMAX(8) | 3065 V_KEEPALIVEMAX(9)); 3066 3067 #define SECONDS * tps 3068 3069 t3_write_reg(adap, A_TP_MSL, 3070 adap->params.rev > 0 ? 0 : 2 SECONDS); 3071 t3_write_reg(adap, A_TP_RXT_MIN, tps / (1000 / TP_RTO_MIN)); 3072 t3_write_reg(adap, A_TP_RXT_MAX, 64 SECONDS); 3073 t3_write_reg(adap, A_TP_PERS_MIN, 5 SECONDS); 3074 t3_write_reg(adap, A_TP_PERS_MAX, 64 SECONDS); 3075 t3_write_reg(adap, A_TP_KEEP_IDLE, 7200 SECONDS); 3076 t3_write_reg(adap, A_TP_KEEP_INTVL, 75 SECONDS); 3077 t3_write_reg(adap, A_TP_INIT_SRTT, 3 SECONDS); 3078 t3_write_reg(adap, A_TP_FINWAIT2_TIMER, 600 SECONDS); 3079 3080 #undef SECONDS 3081 } 3082 3083 #ifdef CONFIG_CHELSIO_T3_CORE 3084 /** 3085 * t3_tp_set_coalescing_size - set receive coalescing size 3086 * @adap: the adapter 3087 * @size: the receive coalescing size 3088 * @psh: whether a set PSH bit should deliver coalesced data 3089 * 3090 * Set the receive coalescing size and PSH bit handling. 3091 */ 3092 int t3_tp_set_coalescing_size(adapter_t *adap, unsigned int size, int psh) 3093 { 3094 u32 val; 3095 3096 if (size > MAX_RX_COALESCING_LEN) 3097 return -EINVAL; 3098 3099 val = t3_read_reg(adap, A_TP_PARA_REG3); 3100 val &= ~(F_RXCOALESCEENABLE | F_RXCOALESCEPSHEN); 3101 3102 if (size) { 3103 val |= F_RXCOALESCEENABLE; 3104 if (psh) 3105 val |= F_RXCOALESCEPSHEN; 3106 size = min(MAX_RX_COALESCING_LEN, size); 3107 t3_write_reg(adap, A_TP_PARA_REG2, V_RXCOALESCESIZE(size) | 3108 V_MAXRXDATA(MAX_RX_COALESCING_LEN)); 3109 } 3110 t3_write_reg(adap, A_TP_PARA_REG3, val); 3111 return 0; 3112 } 3113 3114 /** 3115 * t3_tp_set_max_rxsize - set the max receive size 3116 * @adap: the adapter 3117 * @size: the max receive size 3118 * 3119 * Set TP's max receive size. This is the limit that applies when 3120 * receive coalescing is disabled. 3121 */ 3122 void t3_tp_set_max_rxsize(adapter_t *adap, unsigned int size) 3123 { 3124 t3_write_reg(adap, A_TP_PARA_REG7, 3125 V_PMMAXXFERLEN0(size) | V_PMMAXXFERLEN1(size)); 3126 } 3127 3128 static void __devinit init_mtus(unsigned short mtus[]) 3129 { 3130 /* 3131 * See draft-mathis-plpmtud-00.txt for the values. The min is 88 so 3132 * it can accomodate max size TCP/IP headers when SACK and timestamps 3133 * are enabled and still have at least 8 bytes of payload. 3134 */ 3135 mtus[0] = 88; 3136 mtus[1] = 88; 3137 mtus[2] = 256; 3138 mtus[3] = 512; 3139 mtus[4] = 576; 3140 mtus[5] = 1024; 3141 mtus[6] = 1280; 3142 mtus[7] = 1492; 3143 mtus[8] = 1500; 3144 mtus[9] = 2002; 3145 mtus[10] = 2048; 3146 mtus[11] = 4096; 3147 mtus[12] = 4352; 3148 mtus[13] = 8192; 3149 mtus[14] = 9000; 3150 mtus[15] = 9600; 3151 } 3152 3153 /** 3154 * init_cong_ctrl - initialize congestion control parameters 3155 * @a: the alpha values for congestion control 3156 * @b: the beta values for congestion control 3157 * 3158 * Initialize the congestion control parameters. 3159 */ 3160 static void __devinit init_cong_ctrl(unsigned short *a, unsigned short *b) 3161 { 3162 a[0] = a[1] = a[2] = a[3] = a[4] = a[5] = a[6] = a[7] = a[8] = 1; 3163 a[9] = 2; 3164 a[10] = 3; 3165 a[11] = 4; 3166 a[12] = 5; 3167 a[13] = 6; 3168 a[14] = 7; 3169 a[15] = 8; 3170 a[16] = 9; 3171 a[17] = 10; 3172 a[18] = 14; 3173 a[19] = 17; 3174 a[20] = 21; 3175 a[21] = 25; 3176 a[22] = 30; 3177 a[23] = 35; 3178 a[24] = 45; 3179 a[25] = 60; 3180 a[26] = 80; 3181 a[27] = 100; 3182 a[28] = 200; 3183 a[29] = 300; 3184 a[30] = 400; 3185 a[31] = 500; 3186 3187 b[0] = b[1] = b[2] = b[3] = b[4] = b[5] = b[6] = b[7] = b[8] = 0; 3188 b[9] = b[10] = 1; 3189 b[11] = b[12] = 2; 3190 b[13] = b[14] = b[15] = b[16] = 3; 3191 b[17] = b[18] = b[19] = b[20] = b[21] = 4; 3192 b[22] = b[23] = b[24] = b[25] = b[26] = b[27] = 5; 3193 b[28] = b[29] = 6; 3194 b[30] = b[31] = 7; 3195 } 3196 3197 /* The minimum additive increment value for the congestion control table */ 3198 #define CC_MIN_INCR 2U 3199 3200 /** 3201 * t3_load_mtus - write the MTU and congestion control HW tables 3202 * @adap: the adapter 3203 * @mtus: the unrestricted values for the MTU table 3204 * @alpha: the values for the congestion control alpha parameter 3205 * @beta: the values for the congestion control beta parameter 3206 * @mtu_cap: the maximum permitted effective MTU 3207 * 3208 * Write the MTU table with the supplied MTUs capping each at &mtu_cap. 3209 * Update the high-speed congestion control table with the supplied alpha, 3210 * beta, and MTUs. 3211 */ 3212 void t3_load_mtus(adapter_t *adap, unsigned short mtus[NMTUS], 3213 unsigned short alpha[NCCTRL_WIN], 3214 unsigned short beta[NCCTRL_WIN], unsigned short mtu_cap) 3215 { 3216 static const unsigned int avg_pkts[NCCTRL_WIN] = { 3217 2, 6, 10, 14, 20, 28, 40, 56, 80, 112, 160, 224, 320, 448, 640, 3218 896, 1281, 1792, 2560, 3584, 5120, 7168, 10240, 14336, 20480, 3219 28672, 40960, 57344, 81920, 114688, 163840, 229376 }; 3220 3221 unsigned int i, w; 3222 3223 for (i = 0; i < NMTUS; ++i) { 3224 unsigned int mtu = min(mtus[i], mtu_cap); 3225 unsigned int log2 = fls(mtu); 3226 3227 if (!(mtu & ((1 << log2) >> 2))) /* round */ 3228 log2--; 3229 t3_write_reg(adap, A_TP_MTU_TABLE, 3230 (i << 24) | (log2 << 16) | mtu); 3231 3232 for (w = 0; w < NCCTRL_WIN; ++w) { 3233 unsigned int inc; 3234 3235 inc = max(((mtu - 40) * alpha[w]) / avg_pkts[w], 3236 CC_MIN_INCR); 3237 3238 t3_write_reg(adap, A_TP_CCTRL_TABLE, (i << 21) | 3239 (w << 16) | (beta[w] << 13) | inc); 3240 } 3241 } 3242 } 3243 3244 /** 3245 * t3_read_hw_mtus - returns the values in the HW MTU table 3246 * @adap: the adapter 3247 * @mtus: where to store the HW MTU values 3248 * 3249 * Reads the HW MTU table. 3250 */ 3251 void t3_read_hw_mtus(adapter_t *adap, unsigned short mtus[NMTUS]) 3252 { 3253 int i; 3254 3255 for (i = 0; i < NMTUS; ++i) { 3256 unsigned int val; 3257 3258 t3_write_reg(adap, A_TP_MTU_TABLE, 0xff000000 | i); 3259 val = t3_read_reg(adap, A_TP_MTU_TABLE); 3260 mtus[i] = val & 0x3fff; 3261 } 3262 } 3263 3264 /** 3265 * t3_get_cong_cntl_tab - reads the congestion control table 3266 * @adap: the adapter 3267 * @incr: where to store the alpha values 3268 * 3269 * Reads the additive increments programmed into the HW congestion 3270 * control table. 3271 */ 3272 void t3_get_cong_cntl_tab(adapter_t *adap, 3273 unsigned short incr[NMTUS][NCCTRL_WIN]) 3274 { 3275 unsigned int mtu, w; 3276 3277 for (mtu = 0; mtu < NMTUS; ++mtu) 3278 for (w = 0; w < NCCTRL_WIN; ++w) { 3279 t3_write_reg(adap, A_TP_CCTRL_TABLE, 3280 0xffff0000 | (mtu << 5) | w); 3281 incr[mtu][w] = (unsigned short)t3_read_reg(adap, 3282 A_TP_CCTRL_TABLE) & 0x1fff; 3283 } 3284 } 3285 3286 /** 3287 * t3_tp_get_mib_stats - read TP's MIB counters 3288 * @adap: the adapter 3289 * @tps: holds the returned counter values 3290 * 3291 * Returns the values of TP's MIB counters. 3292 */ 3293 void t3_tp_get_mib_stats(adapter_t *adap, struct tp_mib_stats *tps) 3294 { 3295 t3_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_RDATA, (u32 *)tps, 3296 sizeof(*tps) / sizeof(u32), 0); 3297 } 3298 3299 /** 3300 * t3_read_pace_tbl - read the pace table 3301 * @adap: the adapter 3302 * @pace_vals: holds the returned values 3303 * 3304 * Returns the values of TP's pace table in nanoseconds. 3305 */ 3306 void t3_read_pace_tbl(adapter_t *adap, unsigned int pace_vals[NTX_SCHED]) 3307 { 3308 unsigned int i, tick_ns = dack_ticks_to_usec(adap, 1000); 3309 3310 for (i = 0; i < NTX_SCHED; i++) { 3311 t3_write_reg(adap, A_TP_PACE_TABLE, 0xffff0000 + i); 3312 pace_vals[i] = t3_read_reg(adap, A_TP_PACE_TABLE) * tick_ns; 3313 } 3314 } 3315 3316 /** 3317 * t3_set_pace_tbl - set the pace table 3318 * @adap: the adapter 3319 * @pace_vals: the pace values in nanoseconds 3320 * @start: index of the first entry in the HW pace table to set 3321 * @n: how many entries to set 3322 * 3323 * Sets (a subset of the) HW pace table. 3324 */ 3325 void t3_set_pace_tbl(adapter_t *adap, unsigned int *pace_vals, 3326 unsigned int start, unsigned int n) 3327 { 3328 unsigned int tick_ns = dack_ticks_to_usec(adap, 1000); 3329 3330 for ( ; n; n--, start++, pace_vals++) 3331 t3_write_reg(adap, A_TP_PACE_TABLE, (start << 16) | 3332 ((*pace_vals + tick_ns / 2) / tick_ns)); 3333 } 3334 3335 #define ulp_region(adap, name, start, len) \ 3336 t3_write_reg((adap), A_ULPRX_ ## name ## _LLIMIT, (start)); \ 3337 t3_write_reg((adap), A_ULPRX_ ## name ## _ULIMIT, \ 3338 (start) + (len) - 1); \ 3339 start += len 3340 3341 #define ulptx_region(adap, name, start, len) \ 3342 t3_write_reg((adap), A_ULPTX_ ## name ## _LLIMIT, (start)); \ 3343 t3_write_reg((adap), A_ULPTX_ ## name ## _ULIMIT, \ 3344 (start) + (len) - 1) 3345 3346 static void ulp_config(adapter_t *adap, const struct tp_params *p) 3347 { 3348 unsigned int m = p->chan_rx_size; 3349 3350 ulp_region(adap, ISCSI, m, p->chan_rx_size / 8); 3351 ulp_region(adap, TDDP, m, p->chan_rx_size / 8); 3352 ulptx_region(adap, TPT, m, p->chan_rx_size / 4); 3353 ulp_region(adap, STAG, m, p->chan_rx_size / 4); 3354 ulp_region(adap, RQ, m, p->chan_rx_size / 4); 3355 ulptx_region(adap, PBL, m, p->chan_rx_size / 4); 3356 ulp_region(adap, PBL, m, p->chan_rx_size / 4); 3357 t3_write_reg(adap, A_ULPRX_TDDP_TAGMASK, 0xffffffff); 3358 } 3359 3360 3361 /** 3362 * t3_set_proto_sram - set the contents of the protocol sram 3363 * @adapter: the adapter 3364 * @data: the protocol image 3365 * 3366 * Write the contents of the protocol SRAM. 3367 */ 3368 int t3_set_proto_sram(adapter_t *adap, const u8 *data) 3369 { 3370 int i; 3371 const u32 *buf = (const u32 *)data; 3372 3373 for (i = 0; i < PROTO_SRAM_LINES; i++) { 3374 t3_write_reg(adap, A_TP_EMBED_OP_FIELD5, cpu_to_be32(*buf++)); 3375 t3_write_reg(adap, A_TP_EMBED_OP_FIELD4, cpu_to_be32(*buf++)); 3376 t3_write_reg(adap, A_TP_EMBED_OP_FIELD3, cpu_to_be32(*buf++)); 3377 t3_write_reg(adap, A_TP_EMBED_OP_FIELD2, cpu_to_be32(*buf++)); 3378 t3_write_reg(adap, A_TP_EMBED_OP_FIELD1, cpu_to_be32(*buf++)); 3379 3380 t3_write_reg(adap, A_TP_EMBED_OP_FIELD0, i << 1 | 1 << 31); 3381 if (t3_wait_op_done(adap, A_TP_EMBED_OP_FIELD0, 1, 1, 5, 1)) 3382 return -EIO; 3383 } 3384 return 0; 3385 } 3386 #endif 3387 3388 /** 3389 * t3_config_trace_filter - configure one of the tracing filters 3390 * @adapter: the adapter 3391 * @tp: the desired trace filter parameters 3392 * @filter_index: which filter to configure 3393 * @invert: if set non-matching packets are traced instead of matching ones 3394 * @enable: whether to enable or disable the filter 3395 * 3396 * Configures one of the tracing filters available in HW. 3397 */ 3398 void t3_config_trace_filter(adapter_t *adapter, const struct trace_params *tp, 3399 int filter_index, int invert, int enable) 3400 { 3401 u32 addr, key[4], mask[4]; 3402 3403 key[0] = tp->sport | (tp->sip << 16); 3404 key[1] = (tp->sip >> 16) | (tp->dport << 16); 3405 key[2] = tp->dip; 3406 key[3] = tp->proto | (tp->vlan << 8) | (tp->intf << 20); 3407 3408 mask[0] = tp->sport_mask | (tp->sip_mask << 16); 3409 mask[1] = (tp->sip_mask >> 16) | (tp->dport_mask << 16); 3410 mask[2] = tp->dip_mask; 3411 mask[3] = tp->proto_mask | (tp->vlan_mask << 8) | (tp->intf_mask << 20); 3412 3413 if (invert) 3414 key[3] |= (1 << 29); 3415 if (enable) 3416 key[3] |= (1 << 28); 3417 3418 addr = filter_index ? A_TP_RX_TRC_KEY0 : A_TP_TX_TRC_KEY0; 3419 tp_wr_indirect(adapter, addr++, key[0]); 3420 tp_wr_indirect(adapter, addr++, mask[0]); 3421 tp_wr_indirect(adapter, addr++, key[1]); 3422 tp_wr_indirect(adapter, addr++, mask[1]); 3423 tp_wr_indirect(adapter, addr++, key[2]); 3424 tp_wr_indirect(adapter, addr++, mask[2]); 3425 tp_wr_indirect(adapter, addr++, key[3]); 3426 tp_wr_indirect(adapter, addr, mask[3]); 3427 (void) t3_read_reg(adapter, A_TP_PIO_DATA); 3428 } 3429 3430 /** 3431 * t3_query_trace_filter - query a tracing filter 3432 * @adapter: the adapter 3433 * @tp: the current trace filter parameters 3434 * @filter_index: which filter to query 3435 * @inverted: non-zero if the filter is inverted 3436 * @enabled: non-zero if the filter is enabled 3437 * 3438 * Returns the current settings of the specified HW tracing filter. 3439 */ 3440 void t3_query_trace_filter(adapter_t *adapter, struct trace_params *tp, 3441 int filter_index, int *inverted, int *enabled) 3442 { 3443 u32 addr, key[4], mask[4]; 3444 3445 addr = filter_index ? A_TP_RX_TRC_KEY0 : A_TP_TX_TRC_KEY0; 3446 key[0] = tp_rd_indirect(adapter, addr++); 3447 mask[0] = tp_rd_indirect(adapter, addr++); 3448 key[1] = tp_rd_indirect(adapter, addr++); 3449 mask[1] = tp_rd_indirect(adapter, addr++); 3450 key[2] = tp_rd_indirect(adapter, addr++); 3451 mask[2] = tp_rd_indirect(adapter, addr++); 3452 key[3] = tp_rd_indirect(adapter, addr++); 3453 mask[3] = tp_rd_indirect(adapter, addr); 3454 3455 tp->sport = key[0] & 0xffff; 3456 tp->sip = (key[0] >> 16) | ((key[1] & 0xffff) << 16); 3457 tp->dport = key[1] >> 16; 3458 tp->dip = key[2]; 3459 tp->proto = key[3] & 0xff; 3460 tp->vlan = key[3] >> 8; 3461 tp->intf = key[3] >> 20; 3462 3463 tp->sport_mask = mask[0] & 0xffff; 3464 tp->sip_mask = (mask[0] >> 16) | ((mask[1] & 0xffff) << 16); 3465 tp->dport_mask = mask[1] >> 16; 3466 tp->dip_mask = mask[2]; 3467 tp->proto_mask = mask[3] & 0xff; 3468 tp->vlan_mask = mask[3] >> 8; 3469 tp->intf_mask = mask[3] >> 20; 3470 3471 *inverted = key[3] & (1 << 29); 3472 *enabled = key[3] & (1 << 28); 3473 } 3474 3475 /** 3476 * t3_config_sched - configure a HW traffic scheduler 3477 * @adap: the adapter 3478 * @kbps: target rate in Kbps 3479 * @sched: the scheduler index 3480 * 3481 * Configure a Tx HW scheduler for the target rate. 3482 */ 3483 int t3_config_sched(adapter_t *adap, unsigned int kbps, int sched) 3484 { 3485 unsigned int v, tps, cpt, bpt, delta, mindelta = ~0; 3486 unsigned int clk = adap->params.vpd.cclk * 1000; 3487 unsigned int selected_cpt = 0, selected_bpt = 0; 3488 3489 if (kbps > 0) { 3490 kbps *= 125; /* -> bytes */ 3491 for (cpt = 1; cpt <= 255; cpt++) { 3492 tps = clk / cpt; 3493 bpt = (kbps + tps / 2) / tps; 3494 if (bpt > 0 && bpt <= 255) { 3495 v = bpt * tps; 3496 delta = v >= kbps ? v - kbps : kbps - v; 3497 if (delta < mindelta) { 3498 mindelta = delta; 3499 selected_cpt = cpt; 3500 selected_bpt = bpt; 3501 } 3502 } else if (selected_cpt) 3503 break; 3504 } 3505 if (!selected_cpt) 3506 return -EINVAL; 3507 } 3508 t3_write_reg(adap, A_TP_TM_PIO_ADDR, 3509 A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2); 3510 v = t3_read_reg(adap, A_TP_TM_PIO_DATA); 3511 if (sched & 1) 3512 v = (v & 0xffff) | (selected_cpt << 16) | (selected_bpt << 24); 3513 else 3514 v = (v & 0xffff0000) | selected_cpt | (selected_bpt << 8); 3515 t3_write_reg(adap, A_TP_TM_PIO_DATA, v); 3516 return 0; 3517 } 3518 3519 /** 3520 * t3_set_sched_ipg - set the IPG for a Tx HW packet rate scheduler 3521 * @adap: the adapter 3522 * @sched: the scheduler index 3523 * @ipg: the interpacket delay in tenths of nanoseconds 3524 * 3525 * Set the interpacket delay for a HW packet rate scheduler. 3526 */ 3527 int t3_set_sched_ipg(adapter_t *adap, int sched, unsigned int ipg) 3528 { 3529 unsigned int v, addr = A_TP_TX_MOD_Q1_Q0_TIMER_SEPARATOR - sched / 2; 3530 3531 /* convert ipg to nearest number of core clocks */ 3532 ipg *= core_ticks_per_usec(adap); 3533 ipg = (ipg + 5000) / 10000; 3534 if (ipg > 0xffff) 3535 return -EINVAL; 3536 3537 t3_write_reg(adap, A_TP_TM_PIO_ADDR, addr); 3538 v = t3_read_reg(adap, A_TP_TM_PIO_DATA); 3539 if (sched & 1) 3540 v = (v & 0xffff) | (ipg << 16); 3541 else 3542 v = (v & 0xffff0000) | ipg; 3543 t3_write_reg(adap, A_TP_TM_PIO_DATA, v); 3544 t3_read_reg(adap, A_TP_TM_PIO_DATA); 3545 return 0; 3546 } 3547 3548 /** 3549 * t3_get_tx_sched - get the configuration of a Tx HW traffic scheduler 3550 * @adap: the adapter 3551 * @sched: the scheduler index 3552 * @kbps: the byte rate in Kbps 3553 * @ipg: the interpacket delay in tenths of nanoseconds 3554 * 3555 * Return the current configuration of a HW Tx scheduler. 3556 */ 3557 void t3_get_tx_sched(adapter_t *adap, unsigned int sched, unsigned int *kbps, 3558 unsigned int *ipg) 3559 { 3560 unsigned int v, addr, bpt, cpt; 3561 3562 if (kbps) { 3563 addr = A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2; 3564 t3_write_reg(adap, A_TP_TM_PIO_ADDR, addr); 3565 v = t3_read_reg(adap, A_TP_TM_PIO_DATA); 3566 if (sched & 1) 3567 v >>= 16; 3568 bpt = (v >> 8) & 0xff; 3569 cpt = v & 0xff; 3570 if (!cpt) 3571 *kbps = 0; /* scheduler disabled */ 3572 else { 3573 v = (adap->params.vpd.cclk * 1000) / cpt; 3574 *kbps = (v * bpt) / 125; 3575 } 3576 } 3577 if (ipg) { 3578 addr = A_TP_TX_MOD_Q1_Q0_TIMER_SEPARATOR - sched / 2; 3579 t3_write_reg(adap, A_TP_TM_PIO_ADDR, addr); 3580 v = t3_read_reg(adap, A_TP_TM_PIO_DATA); 3581 if (sched & 1) 3582 v >>= 16; 3583 v &= 0xffff; 3584 *ipg = (10000 * v) / core_ticks_per_usec(adap); 3585 } 3586 } 3587 3588 /** 3589 * tp_init - configure TP 3590 * @adap: the adapter 3591 * @p: TP configuration parameters 3592 * 3593 * Initializes the TP HW module. 3594 */ 3595 static int tp_init(adapter_t *adap, const struct tp_params *p) 3596 { 3597 int busy = 0; 3598 3599 tp_config(adap, p); 3600 t3_set_vlan_accel(adap, 3, 0); 3601 3602 if (is_offload(adap)) { 3603 tp_set_timers(adap, adap->params.vpd.cclk * 1000); 3604 t3_write_reg(adap, A_TP_RESET, F_FLSTINITENABLE); 3605 busy = t3_wait_op_done(adap, A_TP_RESET, F_FLSTINITENABLE, 3606 0, 1000, 5); 3607 if (busy) 3608 CH_ERR(adap, "TP initialization timed out\n"); 3609 } 3610 3611 if (!busy) 3612 t3_write_reg(adap, A_TP_RESET, F_TPRESET); 3613 return busy; 3614 } 3615 3616 /** 3617 * t3_mps_set_active_ports - configure port failover 3618 * @adap: the adapter 3619 * @port_mask: bitmap of active ports 3620 * 3621 * Sets the active ports according to the supplied bitmap. 3622 */ 3623 int t3_mps_set_active_ports(adapter_t *adap, unsigned int port_mask) 3624 { 3625 if (port_mask & ~((1 << adap->params.nports) - 1)) 3626 return -EINVAL; 3627 t3_set_reg_field(adap, A_MPS_CFG, F_PORT1ACTIVE | F_PORT0ACTIVE, 3628 port_mask << S_PORT0ACTIVE); 3629 return 0; 3630 } 3631 3632 /** 3633 * chan_init_hw - channel-dependent HW initialization 3634 * @adap: the adapter 3635 * @chan_map: bitmap of Tx channels being used 3636 * 3637 * Perform the bits of HW initialization that are dependent on the Tx 3638 * channels being used. 3639 */ 3640 static void chan_init_hw(adapter_t *adap, unsigned int chan_map) 3641 { 3642 int i; 3643 3644 if (chan_map != 3) { /* one channel */ 3645 t3_set_reg_field(adap, A_ULPRX_CTL, F_ROUND_ROBIN, 0); 3646 t3_set_reg_field(adap, A_ULPTX_CONFIG, F_CFG_RR_ARB, 0); 3647 t3_write_reg(adap, A_MPS_CFG, F_TPRXPORTEN | F_ENFORCEPKT | 3648 (chan_map == 1 ? F_TPTXPORT0EN | F_PORT0ACTIVE : 3649 F_TPTXPORT1EN | F_PORT1ACTIVE)); 3650 t3_write_reg(adap, A_PM1_TX_CFG, 3651 chan_map == 1 ? 0xffffffff : 0); 3652 if (chan_map == 2) 3653 t3_write_reg(adap, A_TP_TX_MOD_QUEUE_REQ_MAP, 3654 V_TX_MOD_QUEUE_REQ_MAP(0xff)); 3655 t3_write_reg(adap, A_TP_TX_MOD_QUE_TABLE, (12 << 16) | 0xd9c8); 3656 t3_write_reg(adap, A_TP_TX_MOD_QUE_TABLE, (13 << 16) | 0xfbea); 3657 } else { /* two channels */ 3658 t3_set_reg_field(adap, A_ULPRX_CTL, 0, F_ROUND_ROBIN); 3659 t3_set_reg_field(adap, A_ULPTX_CONFIG, 0, F_CFG_RR_ARB); 3660 t3_write_reg(adap, A_ULPTX_DMA_WEIGHT, 3661 V_D1_WEIGHT(16) | V_D0_WEIGHT(16)); 3662 t3_write_reg(adap, A_MPS_CFG, F_TPTXPORT0EN | F_TPTXPORT1EN | 3663 F_TPRXPORTEN | F_PORT0ACTIVE | F_PORT1ACTIVE | 3664 F_ENFORCEPKT); 3665 t3_write_reg(adap, A_PM1_TX_CFG, 0x80008000); 3666 t3_set_reg_field(adap, A_TP_PC_CONFIG, 0, F_TXTOSQUEUEMAPMODE); 3667 t3_write_reg(adap, A_TP_TX_MOD_QUEUE_REQ_MAP, 3668 V_TX_MOD_QUEUE_REQ_MAP(0xaa)); 3669 for (i = 0; i < 16; i++) 3670 t3_write_reg(adap, A_TP_TX_MOD_QUE_TABLE, 3671 (i << 16) | 0x1010); 3672 t3_write_reg(adap, A_TP_TX_MOD_QUE_TABLE, (12 << 16) | 0xba98); 3673 t3_write_reg(adap, A_TP_TX_MOD_QUE_TABLE, (13 << 16) | 0xfedc); 3674 } 3675 } 3676 3677 static int calibrate_xgm(adapter_t *adapter) 3678 { 3679 if (uses_xaui(adapter)) { 3680 unsigned int v, i; 3681 3682 for (i = 0; i < 5; ++i) { 3683 t3_write_reg(adapter, A_XGM_XAUI_IMP, 0); 3684 (void) t3_read_reg(adapter, A_XGM_XAUI_IMP); 3685 msleep(1); 3686 v = t3_read_reg(adapter, A_XGM_XAUI_IMP); 3687 if (!(v & (F_XGM_CALFAULT | F_CALBUSY))) { 3688 t3_write_reg(adapter, A_XGM_XAUI_IMP, 3689 V_XAUIIMP(G_CALIMP(v) >> 2)); 3690 return 0; 3691 } 3692 } 3693 CH_ERR(adapter, "MAC calibration failed\n"); 3694 return -1; 3695 } else { 3696 t3_write_reg(adapter, A_XGM_RGMII_IMP, 3697 V_RGMIIIMPPD(2) | V_RGMIIIMPPU(3)); 3698 t3_set_reg_field(adapter, A_XGM_RGMII_IMP, F_XGM_IMPSETUPDATE, 3699 F_XGM_IMPSETUPDATE); 3700 } 3701 return 0; 3702 } 3703 3704 static void calibrate_xgm_t3b(adapter_t *adapter) 3705 { 3706 if (!uses_xaui(adapter)) { 3707 t3_write_reg(adapter, A_XGM_RGMII_IMP, F_CALRESET | 3708 F_CALUPDATE | V_RGMIIIMPPD(2) | V_RGMIIIMPPU(3)); 3709 t3_set_reg_field(adapter, A_XGM_RGMII_IMP, F_CALRESET, 0); 3710 t3_set_reg_field(adapter, A_XGM_RGMII_IMP, 0, 3711 F_XGM_IMPSETUPDATE); 3712 t3_set_reg_field(adapter, A_XGM_RGMII_IMP, F_XGM_IMPSETUPDATE, 3713 0); 3714 t3_set_reg_field(adapter, A_XGM_RGMII_IMP, F_CALUPDATE, 0); 3715 t3_set_reg_field(adapter, A_XGM_RGMII_IMP, 0, F_CALUPDATE); 3716 } 3717 } 3718 3719 struct mc7_timing_params { 3720 unsigned char ActToPreDly; 3721 unsigned char ActToRdWrDly; 3722 unsigned char PreCyc; 3723 unsigned char RefCyc[5]; 3724 unsigned char BkCyc; 3725 unsigned char WrToRdDly; 3726 unsigned char RdToWrDly; 3727 }; 3728 3729 /* 3730 * Write a value to a register and check that the write completed. These 3731 * writes normally complete in a cycle or two, so one read should suffice. 3732 * The very first read exists to flush the posted write to the device. 3733 */ 3734 static int wrreg_wait(adapter_t *adapter, unsigned int addr, u32 val) 3735 { 3736 t3_write_reg(adapter, addr, val); 3737 (void) t3_read_reg(adapter, addr); /* flush */ 3738 if (!(t3_read_reg(adapter, addr) & F_BUSY)) 3739 return 0; 3740 CH_ERR(adapter, "write to MC7 register 0x%x timed out\n", addr); 3741 return -EIO; 3742 } 3743 3744 static int mc7_init(struct mc7 *mc7, unsigned int mc7_clock, int mem_type) 3745 { 3746 static const unsigned int mc7_mode[] = { 3747 0x632, 0x642, 0x652, 0x432, 0x442 3748 }; 3749 static const struct mc7_timing_params mc7_timings[] = { 3750 { 12, 3, 4, { 20, 28, 34, 52, 0 }, 15, 6, 4 }, 3751 { 12, 4, 5, { 20, 28, 34, 52, 0 }, 16, 7, 4 }, 3752 { 12, 5, 6, { 20, 28, 34, 52, 0 }, 17, 8, 4 }, 3753 { 9, 3, 4, { 15, 21, 26, 39, 0 }, 12, 6, 4 }, 3754 { 9, 4, 5, { 15, 21, 26, 39, 0 }, 13, 7, 4 } 3755 }; 3756 3757 u32 val; 3758 unsigned int width, density, slow, attempts; 3759 adapter_t *adapter = mc7->adapter; 3760 const struct mc7_timing_params *p = &mc7_timings[mem_type]; 3761 3762 if (!mc7->size) 3763 return 0; 3764 3765 val = t3_read_reg(adapter, mc7->offset + A_MC7_CFG); 3766 slow = val & F_SLOW; 3767 width = G_WIDTH(val); 3768 density = G_DEN(val); 3769 3770 t3_write_reg(adapter, mc7->offset + A_MC7_CFG, val | F_IFEN); 3771 val = t3_read_reg(adapter, mc7->offset + A_MC7_CFG); /* flush */ 3772 msleep(1); 3773 3774 if (!slow) { 3775 t3_write_reg(adapter, mc7->offset + A_MC7_CAL, F_SGL_CAL_EN); 3776 (void) t3_read_reg(adapter, mc7->offset + A_MC7_CAL); 3777 msleep(1); 3778 if (t3_read_reg(adapter, mc7->offset + A_MC7_CAL) & 3779 (F_BUSY | F_SGL_CAL_EN | F_CAL_FAULT)) { 3780 CH_ERR(adapter, "%s MC7 calibration timed out\n", 3781 mc7->name); 3782 goto out_fail; 3783 } 3784 } 3785 3786 t3_write_reg(adapter, mc7->offset + A_MC7_PARM, 3787 V_ACTTOPREDLY(p->ActToPreDly) | 3788 V_ACTTORDWRDLY(p->ActToRdWrDly) | V_PRECYC(p->PreCyc) | 3789 V_REFCYC(p->RefCyc[density]) | V_BKCYC(p->BkCyc) | 3790 V_WRTORDDLY(p->WrToRdDly) | V_RDTOWRDLY(p->RdToWrDly)); 3791 3792 t3_write_reg(adapter, mc7->offset + A_MC7_CFG, 3793 val | F_CLKEN | F_TERM150); 3794 (void) t3_read_reg(adapter, mc7->offset + A_MC7_CFG); /* flush */ 3795 3796 if (!slow) 3797 t3_set_reg_field(adapter, mc7->offset + A_MC7_DLL, F_DLLENB, 3798 F_DLLENB); 3799 udelay(1); 3800 3801 val = slow ? 3 : 6; 3802 if (wrreg_wait(adapter, mc7->offset + A_MC7_PRE, 0) || 3803 wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE2, 0) || 3804 wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE3, 0) || 3805 wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE1, val)) 3806 goto out_fail; 3807 3808 if (!slow) { 3809 t3_write_reg(adapter, mc7->offset + A_MC7_MODE, 0x100); 3810 t3_set_reg_field(adapter, mc7->offset + A_MC7_DLL, 3811 F_DLLRST, 0); 3812 udelay(5); 3813 } 3814 3815 if (wrreg_wait(adapter, mc7->offset + A_MC7_PRE, 0) || 3816 wrreg_wait(adapter, mc7->offset + A_MC7_REF, 0) || 3817 wrreg_wait(adapter, mc7->offset + A_MC7_REF, 0) || 3818 wrreg_wait(adapter, mc7->offset + A_MC7_MODE, 3819 mc7_mode[mem_type]) || 3820 wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE1, val | 0x380) || 3821 wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE1, val)) 3822 goto out_fail; 3823 3824 /* clock value is in KHz */ 3825 mc7_clock = mc7_clock * 7812 + mc7_clock / 2; /* ns */ 3826 mc7_clock /= 1000000; /* KHz->MHz, ns->us */ 3827 3828 t3_write_reg(adapter, mc7->offset + A_MC7_REF, 3829 F_PERREFEN | V_PREREFDIV(mc7_clock)); 3830 (void) t3_read_reg(adapter, mc7->offset + A_MC7_REF); /* flush */ 3831 3832 t3_write_reg(adapter, mc7->offset + A_MC7_ECC, 3833 F_ECCGENEN | F_ECCCHKEN); 3834 t3_write_reg(adapter, mc7->offset + A_MC7_BIST_DATA, 0); 3835 t3_write_reg(adapter, mc7->offset + A_MC7_BIST_ADDR_BEG, 0); 3836 t3_write_reg(adapter, mc7->offset + A_MC7_BIST_ADDR_END, 3837 (mc7->size << width) - 1); 3838 t3_write_reg(adapter, mc7->offset + A_MC7_BIST_OP, V_OP(1)); 3839 (void) t3_read_reg(adapter, mc7->offset + A_MC7_BIST_OP); /* flush */ 3840 3841 attempts = 50; 3842 do { 3843 msleep(250); 3844 val = t3_read_reg(adapter, mc7->offset + A_MC7_BIST_OP); 3845 } while ((val & F_BUSY) && --attempts); 3846 if (val & F_BUSY) { 3847 CH_ERR(adapter, "%s MC7 BIST timed out\n", mc7->name); 3848 goto out_fail; 3849 } 3850 3851 /* Enable normal memory accesses. */ 3852 t3_set_reg_field(adapter, mc7->offset + A_MC7_CFG, 0, F_RDY); 3853 return 0; 3854 3855 out_fail: 3856 return -1; 3857 } 3858 3859 static void config_pcie(adapter_t *adap) 3860 { 3861 static const u16 ack_lat[4][6] = { 3862 { 237, 416, 559, 1071, 2095, 4143 }, 3863 { 128, 217, 289, 545, 1057, 2081 }, 3864 { 73, 118, 154, 282, 538, 1050 }, 3865 { 67, 107, 86, 150, 278, 534 } 3866 }; 3867 static const u16 rpl_tmr[4][6] = { 3868 { 711, 1248, 1677, 3213, 6285, 12429 }, 3869 { 384, 651, 867, 1635, 3171, 6243 }, 3870 { 219, 354, 462, 846, 1614, 3150 }, 3871 { 201, 321, 258, 450, 834, 1602 } 3872 }; 3873 3874 u16 val; 3875 unsigned int log2_width, pldsize; 3876 unsigned int fst_trn_rx, fst_trn_tx, acklat, rpllmt; 3877 3878 t3_os_pci_read_config_2(adap, 3879 adap->params.pci.pcie_cap_addr + PCI_EXP_DEVCTL, 3880 &val); 3881 pldsize = (val & PCI_EXP_DEVCTL_PAYLOAD) >> 5; 3882 3883 t3_os_pci_read_config_2(adap, 3884 adap->params.pci.pcie_cap_addr + PCI_EXP_LNKCTL, 3885 &val); 3886 3887 fst_trn_tx = G_NUMFSTTRNSEQ(t3_read_reg(adap, A_PCIE_PEX_CTRL0)); 3888 fst_trn_rx = adap->params.rev == 0 ? fst_trn_tx : 3889 G_NUMFSTTRNSEQRX(t3_read_reg(adap, A_PCIE_MODE)); 3890 log2_width = fls(adap->params.pci.width) - 1; 3891 acklat = ack_lat[log2_width][pldsize]; 3892 if (val & 1) /* check LOsEnable */ 3893 acklat += fst_trn_tx * 4; 3894 rpllmt = rpl_tmr[log2_width][pldsize] + fst_trn_rx * 4; 3895 3896 if (adap->params.rev == 0) 3897 t3_set_reg_field(adap, A_PCIE_PEX_CTRL1, 3898 V_T3A_ACKLAT(M_T3A_ACKLAT), 3899 V_T3A_ACKLAT(acklat)); 3900 else 3901 t3_set_reg_field(adap, A_PCIE_PEX_CTRL1, V_ACKLAT(M_ACKLAT), 3902 V_ACKLAT(acklat)); 3903 3904 t3_set_reg_field(adap, A_PCIE_PEX_CTRL0, V_REPLAYLMT(M_REPLAYLMT), 3905 V_REPLAYLMT(rpllmt)); 3906 3907 t3_write_reg(adap, A_PCIE_PEX_ERR, 0xffffffff); 3908 t3_set_reg_field(adap, A_PCIE_CFG, 0, 3909 F_ENABLELINKDWNDRST | F_ENABLELINKDOWNRST | 3910 F_PCIE_DMASTOPEN | F_PCIE_CLIDECEN); 3911 } 3912 3913 /** 3914 * t3_init_hw - initialize and configure T3 HW modules 3915 * @adapter: the adapter 3916 * @fw_params: initial parameters to pass to firmware (optional) 3917 * 3918 * Initialize and configure T3 HW modules. This performs the 3919 * initialization steps that need to be done once after a card is reset. 3920 * MAC and PHY initialization is handled separarely whenever a port is 3921 * enabled. 3922 * 3923 * @fw_params are passed to FW and their value is platform dependent. 3924 * Only the top 8 bits are available for use, the rest must be 0. 3925 */ 3926 int t3_init_hw(adapter_t *adapter, u32 fw_params) 3927 { 3928 int err = -EIO, attempts, i; 3929 const struct vpd_params *vpd = &adapter->params.vpd; 3930 3931 if (adapter->params.rev > 0) 3932 calibrate_xgm_t3b(adapter); 3933 else if (calibrate_xgm(adapter)) 3934 goto out_err; 3935 3936 if (adapter->params.nports > 2) 3937 t3_mac_reset(&adap2pinfo(adapter, 0)->mac); 3938 3939 if (vpd->mclk) { 3940 partition_mem(adapter, &adapter->params.tp); 3941 3942 if (mc7_init(&adapter->pmrx, vpd->mclk, vpd->mem_timing) || 3943 mc7_init(&adapter->pmtx, vpd->mclk, vpd->mem_timing) || 3944 mc7_init(&adapter->cm, vpd->mclk, vpd->mem_timing) || 3945 t3_mc5_init(&adapter->mc5, adapter->params.mc5.nservers, 3946 adapter->params.mc5.nfilters, 3947 adapter->params.mc5.nroutes)) 3948 goto out_err; 3949 3950 for (i = 0; i < 32; i++) 3951 if (clear_sge_ctxt(adapter, i, F_CQ)) 3952 goto out_err; 3953 } 3954 3955 if (tp_init(adapter, &adapter->params.tp)) 3956 goto out_err; 3957 3958 #ifdef CONFIG_CHELSIO_T3_CORE 3959 t3_tp_set_coalescing_size(adapter, 3960 min(adapter->params.sge.max_pkt_size, 3961 MAX_RX_COALESCING_LEN), 1); 3962 t3_tp_set_max_rxsize(adapter, 3963 min(adapter->params.sge.max_pkt_size, 16384U)); 3964 ulp_config(adapter, &adapter->params.tp); 3965 #endif 3966 if (is_pcie(adapter)) 3967 config_pcie(adapter); 3968 else 3969 t3_set_reg_field(adapter, A_PCIX_CFG, 0, 3970 F_DMASTOPEN | F_CLIDECEN); 3971 3972 if (adapter->params.rev == T3_REV_C) 3973 t3_set_reg_field(adapter, A_ULPTX_CONFIG, 0, 3974 F_CFG_CQE_SOP_MASK); 3975 3976 t3_write_reg(adapter, A_PM1_RX_CFG, 0xffffffff); 3977 t3_write_reg(adapter, A_PM1_RX_MODE, 0); 3978 t3_write_reg(adapter, A_PM1_TX_MODE, 0); 3979 chan_init_hw(adapter, adapter->params.chan_map); 3980 t3_sge_init(adapter, &adapter->params.sge); 3981 3982 t3_write_reg(adapter, A_T3DBG_GPIO_ACT_LOW, calc_gpio_intr(adapter)); 3983 3984 t3_write_reg(adapter, A_CIM_HOST_ACC_DATA, vpd->uclk | fw_params); 3985 t3_write_reg(adapter, A_CIM_BOOT_CFG, 3986 V_BOOTADDR(FW_FLASH_BOOT_ADDR >> 2)); 3987 (void) t3_read_reg(adapter, A_CIM_BOOT_CFG); /* flush */ 3988 3989 attempts = 100; 3990 do { /* wait for uP to initialize */ 3991 msleep(20); 3992 } while (t3_read_reg(adapter, A_CIM_HOST_ACC_DATA) && --attempts); 3993 if (!attempts) { 3994 CH_ERR(adapter, "uP initialization timed out\n"); 3995 goto out_err; 3996 } 3997 3998 err = 0; 3999 out_err: 4000 return err; 4001 } 4002 4003 /** 4004 * get_pci_mode - determine a card's PCI mode 4005 * @adapter: the adapter 4006 * @p: where to store the PCI settings 4007 * 4008 * Determines a card's PCI mode and associated parameters, such as speed 4009 * and width. 4010 */ 4011 static void __devinit get_pci_mode(adapter_t *adapter, struct pci_params *p) 4012 { 4013 static unsigned short speed_map[] = { 33, 66, 100, 133 }; 4014 u32 pci_mode, pcie_cap; 4015 4016 pcie_cap = t3_os_find_pci_capability(adapter, PCI_CAP_ID_EXP); 4017 if (pcie_cap) { 4018 u16 val; 4019 4020 p->variant = PCI_VARIANT_PCIE; 4021 p->pcie_cap_addr = pcie_cap; 4022 t3_os_pci_read_config_2(adapter, pcie_cap + PCI_EXP_LNKSTA, 4023 &val); 4024 p->width = (val >> 4) & 0x3f; 4025 return; 4026 } 4027 4028 pci_mode = t3_read_reg(adapter, A_PCIX_MODE); 4029 p->speed = speed_map[G_PCLKRANGE(pci_mode)]; 4030 p->width = (pci_mode & F_64BIT) ? 64 : 32; 4031 pci_mode = G_PCIXINITPAT(pci_mode); 4032 if (pci_mode == 0) 4033 p->variant = PCI_VARIANT_PCI; 4034 else if (pci_mode < 4) 4035 p->variant = PCI_VARIANT_PCIX_MODE1_PARITY; 4036 else if (pci_mode < 8) 4037 p->variant = PCI_VARIANT_PCIX_MODE1_ECC; 4038 else 4039 p->variant = PCI_VARIANT_PCIX_266_MODE2; 4040 } 4041 4042 /** 4043 * init_link_config - initialize a link's SW state 4044 * @lc: structure holding the link state 4045 * @caps: link capabilities 4046 * 4047 * Initializes the SW state maintained for each link, including the link's 4048 * capabilities and default speed/duplex/flow-control/autonegotiation 4049 * settings. 4050 */ 4051 static void __devinit init_link_config(struct link_config *lc, 4052 unsigned int caps) 4053 { 4054 lc->supported = caps; 4055 lc->requested_speed = lc->speed = SPEED_INVALID; 4056 lc->requested_duplex = lc->duplex = DUPLEX_INVALID; 4057 lc->requested_fc = lc->fc = PAUSE_RX | PAUSE_TX; 4058 if (lc->supported & SUPPORTED_Autoneg) { 4059 lc->advertising = lc->supported; 4060 lc->autoneg = AUTONEG_ENABLE; 4061 lc->requested_fc |= PAUSE_AUTONEG; 4062 } else { 4063 lc->advertising = 0; 4064 lc->autoneg = AUTONEG_DISABLE; 4065 } 4066 } 4067 4068 /** 4069 * mc7_calc_size - calculate MC7 memory size 4070 * @cfg: the MC7 configuration 4071 * 4072 * Calculates the size of an MC7 memory in bytes from the value of its 4073 * configuration register. 4074 */ 4075 static unsigned int __devinit mc7_calc_size(u32 cfg) 4076 { 4077 unsigned int width = G_WIDTH(cfg); 4078 unsigned int banks = !!(cfg & F_BKS) + 1; 4079 unsigned int org = !!(cfg & F_ORG) + 1; 4080 unsigned int density = G_DEN(cfg); 4081 unsigned int MBs = ((256 << density) * banks) / (org << width); 4082 4083 return MBs << 20; 4084 } 4085 4086 static void __devinit mc7_prep(adapter_t *adapter, struct mc7 *mc7, 4087 unsigned int base_addr, const char *name) 4088 { 4089 u32 cfg; 4090 4091 mc7->adapter = adapter; 4092 mc7->name = name; 4093 mc7->offset = base_addr - MC7_PMRX_BASE_ADDR; 4094 cfg = t3_read_reg(adapter, mc7->offset + A_MC7_CFG); 4095 mc7->size = G_DEN(cfg) == M_DEN ? 0 : mc7_calc_size(cfg); 4096 mc7->width = G_WIDTH(cfg); 4097 } 4098 4099 void mac_prep(struct cmac *mac, adapter_t *adapter, int index) 4100 { 4101 mac->adapter = adapter; 4102 mac->multiport = adapter->params.nports > 2; 4103 if (mac->multiport) { 4104 mac->ext_port = (unsigned char)index; 4105 mac->nucast = 8; 4106 index = 0; 4107 } else 4108 mac->nucast = 1; 4109 4110 mac->offset = (XGMAC0_1_BASE_ADDR - XGMAC0_0_BASE_ADDR) * index; 4111 4112 if (adapter->params.rev == 0 && uses_xaui(adapter)) { 4113 t3_write_reg(adapter, A_XGM_SERDES_CTRL + mac->offset, 4114 is_10G(adapter) ? 0x2901c04 : 0x2301c04); 4115 t3_set_reg_field(adapter, A_XGM_PORT_CFG + mac->offset, 4116 F_ENRGMII, 0); 4117 } 4118 } 4119 4120 /** 4121 * early_hw_init - HW initialization done at card detection time 4122 * @adapter: the adapter 4123 * @ai: contains information about the adapter type and properties 4124 * 4125 * Perfoms the part of HW initialization that is done early on when the 4126 * driver first detecs the card. Most of the HW state is initialized 4127 * lazily later on when a port or an offload function are first used. 4128 */ 4129 void early_hw_init(adapter_t *adapter, const struct adapter_info *ai) 4130 { 4131 u32 val = V_PORTSPEED(is_10G(adapter) || adapter->params.nports > 2 ? 4132 3 : 2); 4133 u32 gpio_out = ai->gpio_out; 4134 4135 mi1_init(adapter, ai); 4136 t3_write_reg(adapter, A_I2C_CFG, /* set for 80KHz */ 4137 V_I2C_CLKDIV(adapter->params.vpd.cclk / 80 - 1)); 4138 t3_write_reg(adapter, A_T3DBG_GPIO_EN, 4139 gpio_out | F_GPIO0_OEN | F_GPIO0_OUT_VAL); 4140 t3_write_reg(adapter, A_MC5_DB_SERVER_INDEX, 0); 4141 t3_write_reg(adapter, A_SG_OCO_BASE, V_BASE1(0xfff)); 4142 4143 if (adapter->params.rev == 0 || !uses_xaui(adapter)) 4144 val |= F_ENRGMII; 4145 4146 /* Enable MAC clocks so we can access the registers */ 4147 t3_write_reg(adapter, A_XGM_PORT_CFG, val); 4148 (void) t3_read_reg(adapter, A_XGM_PORT_CFG); 4149 4150 val |= F_CLKDIVRESET_; 4151 t3_write_reg(adapter, A_XGM_PORT_CFG, val); 4152 (void) t3_read_reg(adapter, A_XGM_PORT_CFG); 4153 t3_write_reg(adapter, XGM_REG(A_XGM_PORT_CFG, 1), val); 4154 (void) t3_read_reg(adapter, A_XGM_PORT_CFG); 4155 } 4156 4157 /** 4158 * t3_reset_adapter - reset the adapter 4159 * @adapter: the adapter 4160 * 4161 * Reset the adapter. 4162 */ 4163 int t3_reset_adapter(adapter_t *adapter) 4164 { 4165 int i, save_and_restore_pcie = 4166 adapter->params.rev < T3_REV_B2 && is_pcie(adapter); 4167 uint16_t devid = 0; 4168 4169 if (save_and_restore_pcie) 4170 t3_os_pci_save_state(adapter); 4171 t3_write_reg(adapter, A_PL_RST, F_CRSTWRM | F_CRSTWRMMODE); 4172 4173 /* 4174 * Delay. Give Some time to device to reset fully. 4175 * XXX The delay time should be modified. 4176 */ 4177 for (i = 0; i < 10; i++) { 4178 msleep(50); 4179 t3_os_pci_read_config_2(adapter, 0x00, &devid); 4180 if (devid == 0x1425) 4181 break; 4182 } 4183 4184 if (devid != 0x1425) 4185 return -1; 4186 4187 if (save_and_restore_pcie) 4188 t3_os_pci_restore_state(adapter); 4189 return 0; 4190 } 4191 4192 static int init_parity(adapter_t *adap) 4193 { 4194 int i, err, addr; 4195 4196 if (t3_read_reg(adap, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY) 4197 return -EBUSY; 4198 4199 for (err = i = 0; !err && i < 16; i++) 4200 err = clear_sge_ctxt(adap, i, F_EGRESS); 4201 for (i = 0xfff0; !err && i <= 0xffff; i++) 4202 err = clear_sge_ctxt(adap, i, F_EGRESS); 4203 for (i = 0; !err && i < SGE_QSETS; i++) 4204 err = clear_sge_ctxt(adap, i, F_RESPONSEQ); 4205 if (err) 4206 return err; 4207 4208 t3_write_reg(adap, A_CIM_IBQ_DBG_DATA, 0); 4209 for (i = 0; i < 4; i++) 4210 for (addr = 0; addr <= M_IBQDBGADDR; addr++) { 4211 t3_write_reg(adap, A_CIM_IBQ_DBG_CFG, F_IBQDBGEN | 4212 F_IBQDBGWR | V_IBQDBGQID(i) | 4213 V_IBQDBGADDR(addr)); 4214 err = t3_wait_op_done(adap, A_CIM_IBQ_DBG_CFG, 4215 F_IBQDBGBUSY, 0, 2, 1); 4216 if (err) 4217 return err; 4218 } 4219 return 0; 4220 } 4221 4222 /** 4223 * t3_prep_adapter - prepare SW and HW for operation 4224 * @adapter: the adapter 4225 * @ai: contains information about the adapter type and properties 4226 * 4227 * Initialize adapter SW state for the various HW modules, set initial 4228 * values for some adapter tunables, take PHYs out of reset, and 4229 * initialize the MDIO interface. 4230 */ 4231 int __devinit t3_prep_adapter(adapter_t *adapter, 4232 const struct adapter_info *ai, int reset) 4233 { 4234 int ret; 4235 unsigned int i, j = 0; 4236 4237 get_pci_mode(adapter, &adapter->params.pci); 4238 4239 adapter->params.info = ai; 4240 adapter->params.nports = ai->nports0 + ai->nports1; 4241 adapter->params.chan_map = !!ai->nports0 | (!!ai->nports1 << 1); 4242 adapter->params.rev = t3_read_reg(adapter, A_PL_REV); 4243 4244 /* 4245 * We used to only run the "adapter check task" once a second if 4246 * we had PHYs which didn't support interrupts (we would check 4247 * their link status once a second). Now we check other conditions 4248 * in that routine which would [potentially] impose a very high 4249 * interrupt load on the system. As such, we now always scan the 4250 * adapter state once a second ... 4251 */ 4252 adapter->params.linkpoll_period = 10; 4253 4254 if (adapter->params.nports > 2) 4255 adapter->params.stats_update_period = VSC_STATS_ACCUM_SECS; 4256 else 4257 adapter->params.stats_update_period = is_10G(adapter) ? 4258 MAC_STATS_ACCUM_SECS : (MAC_STATS_ACCUM_SECS * 10); 4259 adapter->params.pci.vpd_cap_addr = 4260 t3_os_find_pci_capability(adapter, PCI_CAP_ID_VPD); 4261 4262 ret = get_vpd_params(adapter, &adapter->params.vpd); 4263 if (ret < 0) 4264 return ret; 4265 4266 if (reset && t3_reset_adapter(adapter)) 4267 return -1; 4268 4269 t3_sge_prep(adapter, &adapter->params.sge); 4270 4271 if (adapter->params.vpd.mclk) { 4272 struct tp_params *p = &adapter->params.tp; 4273 4274 mc7_prep(adapter, &adapter->pmrx, MC7_PMRX_BASE_ADDR, "PMRX"); 4275 mc7_prep(adapter, &adapter->pmtx, MC7_PMTX_BASE_ADDR, "PMTX"); 4276 mc7_prep(adapter, &adapter->cm, MC7_CM_BASE_ADDR, "CM"); 4277 4278 p->nchan = adapter->params.chan_map == 3 ? 2 : 1; 4279 p->pmrx_size = t3_mc7_size(&adapter->pmrx); 4280 p->pmtx_size = t3_mc7_size(&adapter->pmtx); 4281 p->cm_size = t3_mc7_size(&adapter->cm); 4282 p->chan_rx_size = p->pmrx_size / 2; /* only 1 Rx channel */ 4283 p->chan_tx_size = p->pmtx_size / p->nchan; 4284 p->rx_pg_size = 64 * 1024; 4285 p->tx_pg_size = is_10G(adapter) ? 64 * 1024 : 16 * 1024; 4286 p->rx_num_pgs = pm_num_pages(p->chan_rx_size, p->rx_pg_size); 4287 p->tx_num_pgs = pm_num_pages(p->chan_tx_size, p->tx_pg_size); 4288 p->ntimer_qs = p->cm_size >= (128 << 20) || 4289 adapter->params.rev > 0 ? 12 : 6; 4290 p->tre = fls(adapter->params.vpd.cclk / (1000 / TP_TMR_RES)) - 4291 1; 4292 p->dack_re = fls(adapter->params.vpd.cclk / 10) - 1; /* 100us */ 4293 } 4294 4295 adapter->params.offload = t3_mc7_size(&adapter->pmrx) && 4296 t3_mc7_size(&adapter->pmtx) && 4297 t3_mc7_size(&adapter->cm); 4298 4299 if (is_offload(adapter)) { 4300 adapter->params.mc5.nservers = DEFAULT_NSERVERS; 4301 /* PR 6487. TOE and filtering are mutually exclusive */ 4302 adapter->params.mc5.nfilters = 0; 4303 adapter->params.mc5.nroutes = 0; 4304 t3_mc5_prep(adapter, &adapter->mc5, MC5_MODE_144_BIT); 4305 4306 #ifdef CONFIG_CHELSIO_T3_CORE 4307 init_mtus(adapter->params.mtus); 4308 init_cong_ctrl(adapter->params.a_wnd, adapter->params.b_wnd); 4309 #endif 4310 } 4311 4312 early_hw_init(adapter, ai); 4313 ret = init_parity(adapter); 4314 if (ret) 4315 return ret; 4316 4317 if (adapter->params.nports > 2 && 4318 (ret = t3_vsc7323_init(adapter, adapter->params.nports))) 4319 return ret; 4320 4321 for_each_port(adapter, i) { 4322 u8 hw_addr[6]; 4323 const struct port_type_info *pti; 4324 struct port_info *p = adap2pinfo(adapter, i); 4325 4326 for (;;) { 4327 unsigned port_type = adapter->params.vpd.port_type[j]; 4328 if (port_type) { 4329 if (port_type < ARRAY_SIZE(port_types)) { 4330 pti = &port_types[port_type]; 4331 break; 4332 } else 4333 return -EINVAL; 4334 } 4335 j++; 4336 if (j >= ARRAY_SIZE(adapter->params.vpd.port_type)) 4337 return -EINVAL; 4338 } 4339 ret = pti->phy_prep(&p->phy, adapter, ai->phy_base_addr + j, 4340 ai->mdio_ops); 4341 if (ret) 4342 return ret; 4343 mac_prep(&p->mac, adapter, j); 4344 ++j; 4345 4346 /* 4347 * The VPD EEPROM stores the base Ethernet address for the 4348 * card. A port's address is derived from the base by adding 4349 * the port's index to the base's low octet. 4350 */ 4351 memcpy(hw_addr, adapter->params.vpd.eth_base, 5); 4352 hw_addr[5] = adapter->params.vpd.eth_base[5] + i; 4353 4354 t3_os_set_hw_addr(adapter, i, hw_addr); 4355 init_link_config(&p->link_config, p->phy.caps); 4356 p->phy.ops->power_down(&p->phy, 1); 4357 4358 /* 4359 * If the PHY doesn't support interrupts for link status 4360 * changes, schedule a scan of the adapter links at least 4361 * once a second. 4362 */ 4363 if (!(p->phy.caps & SUPPORTED_IRQ) && 4364 adapter->params.linkpoll_period > 10) 4365 adapter->params.linkpoll_period = 10; 4366 } 4367 4368 return 0; 4369 } 4370 4371 /** 4372 * t3_reinit_adapter - prepare HW for operation again 4373 * @adapter: the adapter 4374 * 4375 * Put HW in the same state as @t3_prep_adapter without any changes to 4376 * SW state. This is a cut down version of @t3_prep_adapter intended 4377 * to be used after events that wipe out HW state but preserve SW state, 4378 * e.g., EEH. The device must be reset before calling this. 4379 */ 4380 int t3_reinit_adapter(adapter_t *adap) 4381 { 4382 unsigned int i; 4383 int ret, j = 0; 4384 4385 early_hw_init(adap, adap->params.info); 4386 ret = init_parity(adap); 4387 if (ret) 4388 return ret; 4389 4390 if (adap->params.nports > 2 && 4391 (ret = t3_vsc7323_init(adap, adap->params.nports))) 4392 return ret; 4393 4394 for_each_port(adap, i) { 4395 const struct port_type_info *pti; 4396 struct port_info *p = adap2pinfo(adap, i); 4397 4398 for (;;) { 4399 unsigned port_type = adap->params.vpd.port_type[j]; 4400 if (port_type) { 4401 if (port_type < ARRAY_SIZE(port_types)) { 4402 pti = &port_types[port_type]; 4403 break; 4404 } else 4405 return -EINVAL; 4406 } 4407 j++; 4408 if (j >= ARRAY_SIZE(adap->params.vpd.port_type)) 4409 return -EINVAL; 4410 } 4411 ret = pti->phy_prep(&p->phy, adap, p->phy.addr, NULL); 4412 if (ret) 4413 return ret; 4414 p->phy.ops->power_down(&p->phy, 1); 4415 } 4416 return 0; 4417 } 4418 4419 void t3_led_ready(adapter_t *adapter) 4420 { 4421 t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL, 4422 F_GPIO0_OUT_VAL); 4423 } 4424 4425 void t3_port_failover(adapter_t *adapter, int port) 4426 { 4427 u32 val; 4428 4429 val = port ? F_PORT1ACTIVE : F_PORT0ACTIVE; 4430 t3_set_reg_field(adapter, A_MPS_CFG, F_PORT0ACTIVE | F_PORT1ACTIVE, 4431 val); 4432 } 4433 4434 void t3_failover_done(adapter_t *adapter, int port) 4435 { 4436 t3_set_reg_field(adapter, A_MPS_CFG, F_PORT0ACTIVE | F_PORT1ACTIVE, 4437 F_PORT0ACTIVE | F_PORT1ACTIVE); 4438 } 4439 4440 void t3_failover_clear(adapter_t *adapter) 4441 { 4442 t3_set_reg_field(adapter, A_MPS_CFG, F_PORT0ACTIVE | F_PORT1ACTIVE, 4443 F_PORT0ACTIVE | F_PORT1ACTIVE); 4444 } 4445 4446 static int t3_cim_hac_read(adapter_t *adapter, u32 addr, u32 *val) 4447 { 4448 u32 v; 4449 4450 t3_write_reg(adapter, A_CIM_HOST_ACC_CTRL, addr); 4451 if (t3_wait_op_done_val(adapter, A_CIM_HOST_ACC_CTRL, 4452 F_HOSTBUSY, 0, 10, 10, &v)) 4453 return -EIO; 4454 4455 *val = t3_read_reg(adapter, A_CIM_HOST_ACC_DATA); 4456 4457 return 0; 4458 } 4459 4460 static int t3_cim_hac_write(adapter_t *adapter, u32 addr, u32 val) 4461 { 4462 u32 v; 4463 4464 t3_write_reg(adapter, A_CIM_HOST_ACC_DATA, val); 4465 4466 addr |= F_HOSTWRITE; 4467 t3_write_reg(adapter, A_CIM_HOST_ACC_CTRL, addr); 4468 4469 if (t3_wait_op_done_val(adapter, A_CIM_HOST_ACC_CTRL, 4470 F_HOSTBUSY, 0, 10, 5, &v)) 4471 return -EIO; 4472 return 0; 4473 } 4474 4475 int t3_get_up_la(adapter_t *adapter, u32 *stopped, u32 *index, 4476 u32 *size, void *data) 4477 { 4478 u32 v, *buf = data; 4479 int i, cnt, ret; 4480 4481 if (*size < LA_ENTRIES * 4) 4482 return -EINVAL; 4483 4484 ret = t3_cim_hac_read(adapter, LA_CTRL, &v); 4485 if (ret) 4486 goto out; 4487 4488 *stopped = !(v & 1); 4489 4490 /* Freeze LA */ 4491 if (!*stopped) { 4492 ret = t3_cim_hac_write(adapter, LA_CTRL, 0); 4493 if (ret) 4494 goto out; 4495 } 4496 4497 for (i = 0; i < LA_ENTRIES; i++) { 4498 v = (i << 2) | (1 << 1); 4499 ret = t3_cim_hac_write(adapter, LA_CTRL, v); 4500 if (ret) 4501 goto out; 4502 4503 ret = t3_cim_hac_read(adapter, LA_CTRL, &v); 4504 if (ret) 4505 goto out; 4506 4507 cnt = 20; 4508 while ((v & (1 << 1)) && cnt) { 4509 udelay(5); 4510 --cnt; 4511 ret = t3_cim_hac_read(adapter, LA_CTRL, &v); 4512 if (ret) 4513 goto out; 4514 } 4515 4516 if (v & (1 << 1)) 4517 return -EIO; 4518 4519 ret = t3_cim_hac_read(adapter, LA_DATA, &v); 4520 if (ret) 4521 goto out; 4522 4523 *buf++ = v; 4524 } 4525 4526 ret = t3_cim_hac_read(adapter, LA_CTRL, &v); 4527 if (ret) 4528 goto out; 4529 4530 *index = (v >> 16) + 4; 4531 *size = LA_ENTRIES * 4; 4532 out: 4533 /* Unfreeze LA */ 4534 t3_cim_hac_write(adapter, LA_CTRL, 1); 4535 return ret; 4536 } 4537 4538 int t3_get_up_ioqs(adapter_t *adapter, u32 *size, void *data) 4539 { 4540 u32 v, *buf = data; 4541 int i, j, ret; 4542 4543 if (*size < IOQ_ENTRIES * sizeof(struct t3_ioq_entry)) 4544 return -EINVAL; 4545 4546 for (i = 0; i < 4; i++) { 4547 ret = t3_cim_hac_read(adapter, (4 * i), &v); 4548 if (ret) 4549 goto out; 4550 4551 *buf++ = v; 4552 } 4553 4554 for (i = 0; i < IOQ_ENTRIES; i++) { 4555 u32 base_addr = 0x10 * (i + 1); 4556 4557 for (j = 0; j < 4; j++) { 4558 ret = t3_cim_hac_read(adapter, base_addr + 4 * j, &v); 4559 if (ret) 4560 goto out; 4561 4562 *buf++ = v; 4563 } 4564 } 4565 4566 *size = IOQ_ENTRIES * sizeof(struct t3_ioq_entry); 4567 4568 out: 4569 return ret; 4570 } 4571 4572