1 /* 2 * This file is part of the Chelsio T4 Ethernet driver for Linux. 3 * 4 * Copyright (c) 2003-2010 Chelsio Communications, Inc. All rights reserved. 5 * 6 * This software is available to you under a choice of one of two 7 * licenses. You may choose to be licensed under the terms of the GNU 8 * General Public License (GPL) Version 2, available from the file 9 * COPYING in the main directory of this source tree, or the 10 * OpenIB.org BSD license below: 11 * 12 * Redistribution and use in source and binary forms, with or 13 * without modification, are permitted provided that the following 14 * conditions are met: 15 * 16 * - Redistributions of source code must retain the above 17 * copyright notice, this list of conditions and the following 18 * disclaimer. 19 * 20 * - Redistributions in binary form must reproduce the above 21 * copyright notice, this list of conditions and the following 22 * disclaimer in the documentation and/or other materials 23 * provided with the distribution. 24 * 25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 32 * SOFTWARE. 33 */ 34 35 #include <linux/init.h> 36 #include <linux/delay.h> 37 #include "cxgb4.h" 38 #include "t4_regs.h" 39 #include "t4fw_api.h" 40 41 /** 42 * t4_wait_op_done_val - wait until an operation is completed 43 * @adapter: the adapter performing the operation 44 * @reg: the register to check for completion 45 * @mask: a single-bit field within @reg that indicates completion 46 * @polarity: the value of the field when the operation is completed 47 * @attempts: number of check iterations 48 * @delay: delay in usecs between iterations 49 * @valp: where to store the value of the register at completion time 50 * 51 * Wait until an operation is completed by checking a bit in a register 52 * up to @attempts times. If @valp is not NULL the value of the register 53 * at the time it indicated completion is stored there. Returns 0 if the 54 * operation completes and -EAGAIN otherwise. 55 */ 56 static int t4_wait_op_done_val(struct adapter *adapter, int reg, u32 mask, 57 int polarity, int attempts, int delay, u32 *valp) 58 { 59 while (1) { 60 u32 val = t4_read_reg(adapter, reg); 61 62 if (!!(val & mask) == polarity) { 63 if (valp) 64 *valp = val; 65 return 0; 66 } 67 if (--attempts == 0) 68 return -EAGAIN; 69 if (delay) 70 udelay(delay); 71 } 72 } 73 74 static inline int t4_wait_op_done(struct adapter *adapter, int reg, u32 mask, 75 int polarity, int attempts, int delay) 76 { 77 return t4_wait_op_done_val(adapter, reg, mask, polarity, attempts, 78 delay, NULL); 79 } 80 81 /** 82 * t4_set_reg_field - set a register field to a value 83 * @adapter: the adapter to program 84 * @addr: the register address 85 * @mask: specifies the portion of the register to modify 86 * @val: the new value for the register field 87 * 88 * Sets a register field specified by the supplied mask to the 89 * given value. 90 */ 91 void t4_set_reg_field(struct adapter *adapter, unsigned int addr, u32 mask, 92 u32 val) 93 { 94 u32 v = t4_read_reg(adapter, addr) & ~mask; 95 96 t4_write_reg(adapter, addr, v | val); 97 (void) t4_read_reg(adapter, addr); /* flush */ 98 } 99 100 /** 101 * t4_read_indirect - read indirectly addressed registers 102 * @adap: the adapter 103 * @addr_reg: register holding the indirect address 104 * @data_reg: register holding the value of the indirect register 105 * @vals: where the read register values are stored 106 * @nregs: how many indirect registers to read 107 * @start_idx: index of first indirect register to read 108 * 109 * Reads registers that are accessed indirectly through an address/data 110 * register pair. 111 */ 112 static void t4_read_indirect(struct adapter *adap, unsigned int addr_reg, 113 unsigned int data_reg, u32 *vals, 114 unsigned int nregs, unsigned int start_idx) 115 { 116 while (nregs--) { 117 t4_write_reg(adap, addr_reg, start_idx); 118 *vals++ = t4_read_reg(adap, data_reg); 119 start_idx++; 120 } 121 } 122 123 /* 124 * Get the reply to a mailbox command and store it in @rpl in big-endian order. 125 */ 126 static void get_mbox_rpl(struct adapter *adap, __be64 *rpl, int nflit, 127 u32 mbox_addr) 128 { 129 for ( ; nflit; nflit--, mbox_addr += 8) 130 *rpl++ = cpu_to_be64(t4_read_reg64(adap, mbox_addr)); 131 } 132 133 /* 134 * Handle a FW assertion reported in a mailbox. 135 */ 136 static void fw_asrt(struct adapter *adap, u32 mbox_addr) 137 { 138 struct fw_debug_cmd asrt; 139 140 get_mbox_rpl(adap, (__be64 *)&asrt, sizeof(asrt) / 8, mbox_addr); 141 dev_alert(adap->pdev_dev, 142 "FW assertion at %.16s:%u, val0 %#x, val1 %#x\n", 143 asrt.u.assert.filename_0_7, ntohl(asrt.u.assert.line), 144 ntohl(asrt.u.assert.x), ntohl(asrt.u.assert.y)); 145 } 146 147 static void dump_mbox(struct adapter *adap, int mbox, u32 data_reg) 148 { 149 dev_err(adap->pdev_dev, 150 "mbox %d: %llx %llx %llx %llx %llx %llx %llx %llx\n", mbox, 151 (unsigned long long)t4_read_reg64(adap, data_reg), 152 (unsigned long long)t4_read_reg64(adap, data_reg + 8), 153 (unsigned long long)t4_read_reg64(adap, data_reg + 16), 154 (unsigned long long)t4_read_reg64(adap, data_reg + 24), 155 (unsigned long long)t4_read_reg64(adap, data_reg + 32), 156 (unsigned long long)t4_read_reg64(adap, data_reg + 40), 157 (unsigned long long)t4_read_reg64(adap, data_reg + 48), 158 (unsigned long long)t4_read_reg64(adap, data_reg + 56)); 159 } 160 161 /** 162 * t4_wr_mbox_meat - send a command to FW through the given mailbox 163 * @adap: the adapter 164 * @mbox: index of the mailbox to use 165 * @cmd: the command to write 166 * @size: command length in bytes 167 * @rpl: where to optionally store the reply 168 * @sleep_ok: if true we may sleep while awaiting command completion 169 * 170 * Sends the given command to FW through the selected mailbox and waits 171 * for the FW to execute the command. If @rpl is not %NULL it is used to 172 * store the FW's reply to the command. The command and its optional 173 * reply are of the same length. FW can take up to %FW_CMD_MAX_TIMEOUT ms 174 * to respond. @sleep_ok determines whether we may sleep while awaiting 175 * the response. If sleeping is allowed we use progressive backoff 176 * otherwise we spin. 177 * 178 * The return value is 0 on success or a negative errno on failure. A 179 * failure can happen either because we are not able to execute the 180 * command or FW executes it but signals an error. In the latter case 181 * the return value is the error code indicated by FW (negated). 182 */ 183 int t4_wr_mbox_meat(struct adapter *adap, int mbox, const void *cmd, int size, 184 void *rpl, bool sleep_ok) 185 { 186 static const int delay[] = { 187 1, 1, 3, 5, 10, 10, 20, 50, 100, 200 188 }; 189 190 u32 v; 191 u64 res; 192 int i, ms, delay_idx; 193 const __be64 *p = cmd; 194 u32 data_reg = PF_REG(mbox, CIM_PF_MAILBOX_DATA); 195 u32 ctl_reg = PF_REG(mbox, CIM_PF_MAILBOX_CTRL); 196 197 if ((size & 15) || size > MBOX_LEN) 198 return -EINVAL; 199 200 /* 201 * If the device is off-line, as in EEH, commands will time out. 202 * Fail them early so we don't waste time waiting. 203 */ 204 if (adap->pdev->error_state != pci_channel_io_normal) 205 return -EIO; 206 207 v = MBOWNER_GET(t4_read_reg(adap, ctl_reg)); 208 for (i = 0; v == MBOX_OWNER_NONE && i < 3; i++) 209 v = MBOWNER_GET(t4_read_reg(adap, ctl_reg)); 210 211 if (v != MBOX_OWNER_DRV) 212 return v ? -EBUSY : -ETIMEDOUT; 213 214 for (i = 0; i < size; i += 8) 215 t4_write_reg64(adap, data_reg + i, be64_to_cpu(*p++)); 216 217 t4_write_reg(adap, ctl_reg, MBMSGVALID | MBOWNER(MBOX_OWNER_FW)); 218 t4_read_reg(adap, ctl_reg); /* flush write */ 219 220 delay_idx = 0; 221 ms = delay[0]; 222 223 for (i = 0; i < FW_CMD_MAX_TIMEOUT; i += ms) { 224 if (sleep_ok) { 225 ms = delay[delay_idx]; /* last element may repeat */ 226 if (delay_idx < ARRAY_SIZE(delay) - 1) 227 delay_idx++; 228 msleep(ms); 229 } else 230 mdelay(ms); 231 232 v = t4_read_reg(adap, ctl_reg); 233 if (MBOWNER_GET(v) == MBOX_OWNER_DRV) { 234 if (!(v & MBMSGVALID)) { 235 t4_write_reg(adap, ctl_reg, 0); 236 continue; 237 } 238 239 res = t4_read_reg64(adap, data_reg); 240 if (FW_CMD_OP_GET(res >> 32) == FW_DEBUG_CMD) { 241 fw_asrt(adap, data_reg); 242 res = FW_CMD_RETVAL(EIO); 243 } else if (rpl) 244 get_mbox_rpl(adap, rpl, size / 8, data_reg); 245 246 if (FW_CMD_RETVAL_GET((int)res)) 247 dump_mbox(adap, mbox, data_reg); 248 t4_write_reg(adap, ctl_reg, 0); 249 return -FW_CMD_RETVAL_GET((int)res); 250 } 251 } 252 253 dump_mbox(adap, mbox, data_reg); 254 dev_err(adap->pdev_dev, "command %#x in mailbox %d timed out\n", 255 *(const u8 *)cmd, mbox); 256 return -ETIMEDOUT; 257 } 258 259 /** 260 * t4_mc_read - read from MC through backdoor accesses 261 * @adap: the adapter 262 * @addr: address of first byte requested 263 * @data: 64 bytes of data containing the requested address 264 * @ecc: where to store the corresponding 64-bit ECC word 265 * 266 * Read 64 bytes of data from MC starting at a 64-byte-aligned address 267 * that covers the requested address @addr. If @parity is not %NULL it 268 * is assigned the 64-bit ECC word for the read data. 269 */ 270 int t4_mc_read(struct adapter *adap, u32 addr, __be32 *data, u64 *ecc) 271 { 272 int i; 273 274 if (t4_read_reg(adap, MC_BIST_CMD) & START_BIST) 275 return -EBUSY; 276 t4_write_reg(adap, MC_BIST_CMD_ADDR, addr & ~0x3fU); 277 t4_write_reg(adap, MC_BIST_CMD_LEN, 64); 278 t4_write_reg(adap, MC_BIST_DATA_PATTERN, 0xc); 279 t4_write_reg(adap, MC_BIST_CMD, BIST_OPCODE(1) | START_BIST | 280 BIST_CMD_GAP(1)); 281 i = t4_wait_op_done(adap, MC_BIST_CMD, START_BIST, 0, 10, 1); 282 if (i) 283 return i; 284 285 #define MC_DATA(i) MC_BIST_STATUS_REG(MC_BIST_STATUS_RDATA, i) 286 287 for (i = 15; i >= 0; i--) 288 *data++ = htonl(t4_read_reg(adap, MC_DATA(i))); 289 if (ecc) 290 *ecc = t4_read_reg64(adap, MC_DATA(16)); 291 #undef MC_DATA 292 return 0; 293 } 294 295 /** 296 * t4_edc_read - read from EDC through backdoor accesses 297 * @adap: the adapter 298 * @idx: which EDC to access 299 * @addr: address of first byte requested 300 * @data: 64 bytes of data containing the requested address 301 * @ecc: where to store the corresponding 64-bit ECC word 302 * 303 * Read 64 bytes of data from EDC starting at a 64-byte-aligned address 304 * that covers the requested address @addr. If @parity is not %NULL it 305 * is assigned the 64-bit ECC word for the read data. 306 */ 307 int t4_edc_read(struct adapter *adap, int idx, u32 addr, __be32 *data, u64 *ecc) 308 { 309 int i; 310 311 idx *= EDC_STRIDE; 312 if (t4_read_reg(adap, EDC_BIST_CMD + idx) & START_BIST) 313 return -EBUSY; 314 t4_write_reg(adap, EDC_BIST_CMD_ADDR + idx, addr & ~0x3fU); 315 t4_write_reg(adap, EDC_BIST_CMD_LEN + idx, 64); 316 t4_write_reg(adap, EDC_BIST_DATA_PATTERN + idx, 0xc); 317 t4_write_reg(adap, EDC_BIST_CMD + idx, 318 BIST_OPCODE(1) | BIST_CMD_GAP(1) | START_BIST); 319 i = t4_wait_op_done(adap, EDC_BIST_CMD + idx, START_BIST, 0, 10, 1); 320 if (i) 321 return i; 322 323 #define EDC_DATA(i) (EDC_BIST_STATUS_REG(EDC_BIST_STATUS_RDATA, i) + idx) 324 325 for (i = 15; i >= 0; i--) 326 *data++ = htonl(t4_read_reg(adap, EDC_DATA(i))); 327 if (ecc) 328 *ecc = t4_read_reg64(adap, EDC_DATA(16)); 329 #undef EDC_DATA 330 return 0; 331 } 332 333 #define EEPROM_STAT_ADDR 0x7bfc 334 #define VPD_BASE 0 335 #define VPD_LEN 512 336 337 /** 338 * t4_seeprom_wp - enable/disable EEPROM write protection 339 * @adapter: the adapter 340 * @enable: whether to enable or disable write protection 341 * 342 * Enables or disables write protection on the serial EEPROM. 343 */ 344 int t4_seeprom_wp(struct adapter *adapter, bool enable) 345 { 346 unsigned int v = enable ? 0xc : 0; 347 int ret = pci_write_vpd(adapter->pdev, EEPROM_STAT_ADDR, 4, &v); 348 return ret < 0 ? ret : 0; 349 } 350 351 /** 352 * get_vpd_params - read VPD parameters from VPD EEPROM 353 * @adapter: adapter to read 354 * @p: where to store the parameters 355 * 356 * Reads card parameters stored in VPD EEPROM. 357 */ 358 static int get_vpd_params(struct adapter *adapter, struct vpd_params *p) 359 { 360 int i, ret; 361 int ec, sn; 362 u8 vpd[VPD_LEN], csum; 363 unsigned int vpdr_len, kw_offset, id_len; 364 365 ret = pci_read_vpd(adapter->pdev, VPD_BASE, sizeof(vpd), vpd); 366 if (ret < 0) 367 return ret; 368 369 if (vpd[0] != PCI_VPD_LRDT_ID_STRING) { 370 dev_err(adapter->pdev_dev, "missing VPD ID string\n"); 371 return -EINVAL; 372 } 373 374 id_len = pci_vpd_lrdt_size(vpd); 375 if (id_len > ID_LEN) 376 id_len = ID_LEN; 377 378 i = pci_vpd_find_tag(vpd, 0, VPD_LEN, PCI_VPD_LRDT_RO_DATA); 379 if (i < 0) { 380 dev_err(adapter->pdev_dev, "missing VPD-R section\n"); 381 return -EINVAL; 382 } 383 384 vpdr_len = pci_vpd_lrdt_size(&vpd[i]); 385 kw_offset = i + PCI_VPD_LRDT_TAG_SIZE; 386 if (vpdr_len + kw_offset > VPD_LEN) { 387 dev_err(adapter->pdev_dev, "bad VPD-R length %u\n", vpdr_len); 388 return -EINVAL; 389 } 390 391 #define FIND_VPD_KW(var, name) do { \ 392 var = pci_vpd_find_info_keyword(vpd, kw_offset, vpdr_len, name); \ 393 if (var < 0) { \ 394 dev_err(adapter->pdev_dev, "missing VPD keyword " name "\n"); \ 395 return -EINVAL; \ 396 } \ 397 var += PCI_VPD_INFO_FLD_HDR_SIZE; \ 398 } while (0) 399 400 FIND_VPD_KW(i, "RV"); 401 for (csum = 0; i >= 0; i--) 402 csum += vpd[i]; 403 404 if (csum) { 405 dev_err(adapter->pdev_dev, 406 "corrupted VPD EEPROM, actual csum %u\n", csum); 407 return -EINVAL; 408 } 409 410 FIND_VPD_KW(ec, "EC"); 411 FIND_VPD_KW(sn, "SN"); 412 #undef FIND_VPD_KW 413 414 memcpy(p->id, vpd + PCI_VPD_LRDT_TAG_SIZE, id_len); 415 strim(p->id); 416 memcpy(p->ec, vpd + ec, EC_LEN); 417 strim(p->ec); 418 i = pci_vpd_info_field_size(vpd + sn - PCI_VPD_INFO_FLD_HDR_SIZE); 419 memcpy(p->sn, vpd + sn, min(i, SERNUM_LEN)); 420 strim(p->sn); 421 return 0; 422 } 423 424 /* serial flash and firmware constants */ 425 enum { 426 SF_ATTEMPTS = 10, /* max retries for SF operations */ 427 428 /* flash command opcodes */ 429 SF_PROG_PAGE = 2, /* program page */ 430 SF_WR_DISABLE = 4, /* disable writes */ 431 SF_RD_STATUS = 5, /* read status register */ 432 SF_WR_ENABLE = 6, /* enable writes */ 433 SF_RD_DATA_FAST = 0xb, /* read flash */ 434 SF_RD_ID = 0x9f, /* read ID */ 435 SF_ERASE_SECTOR = 0xd8, /* erase sector */ 436 437 FW_MAX_SIZE = 512 * 1024, 438 }; 439 440 /** 441 * sf1_read - read data from the serial flash 442 * @adapter: the adapter 443 * @byte_cnt: number of bytes to read 444 * @cont: whether another operation will be chained 445 * @lock: whether to lock SF for PL access only 446 * @valp: where to store the read data 447 * 448 * Reads up to 4 bytes of data from the serial flash. The location of 449 * the read needs to be specified prior to calling this by issuing the 450 * appropriate commands to the serial flash. 451 */ 452 static int sf1_read(struct adapter *adapter, unsigned int byte_cnt, int cont, 453 int lock, u32 *valp) 454 { 455 int ret; 456 457 if (!byte_cnt || byte_cnt > 4) 458 return -EINVAL; 459 if (t4_read_reg(adapter, SF_OP) & BUSY) 460 return -EBUSY; 461 cont = cont ? SF_CONT : 0; 462 lock = lock ? SF_LOCK : 0; 463 t4_write_reg(adapter, SF_OP, lock | cont | BYTECNT(byte_cnt - 1)); 464 ret = t4_wait_op_done(adapter, SF_OP, BUSY, 0, SF_ATTEMPTS, 5); 465 if (!ret) 466 *valp = t4_read_reg(adapter, SF_DATA); 467 return ret; 468 } 469 470 /** 471 * sf1_write - write data to the serial flash 472 * @adapter: the adapter 473 * @byte_cnt: number of bytes to write 474 * @cont: whether another operation will be chained 475 * @lock: whether to lock SF for PL access only 476 * @val: value to write 477 * 478 * Writes up to 4 bytes of data to the serial flash. The location of 479 * the write needs to be specified prior to calling this by issuing the 480 * appropriate commands to the serial flash. 481 */ 482 static int sf1_write(struct adapter *adapter, unsigned int byte_cnt, int cont, 483 int lock, u32 val) 484 { 485 if (!byte_cnt || byte_cnt > 4) 486 return -EINVAL; 487 if (t4_read_reg(adapter, SF_OP) & BUSY) 488 return -EBUSY; 489 cont = cont ? SF_CONT : 0; 490 lock = lock ? SF_LOCK : 0; 491 t4_write_reg(adapter, SF_DATA, val); 492 t4_write_reg(adapter, SF_OP, lock | 493 cont | BYTECNT(byte_cnt - 1) | OP_WR); 494 return t4_wait_op_done(adapter, SF_OP, BUSY, 0, SF_ATTEMPTS, 5); 495 } 496 497 /** 498 * flash_wait_op - wait for a flash operation to complete 499 * @adapter: the adapter 500 * @attempts: max number of polls of the status register 501 * @delay: delay between polls in ms 502 * 503 * Wait for a flash operation to complete by polling the status register. 504 */ 505 static int flash_wait_op(struct adapter *adapter, int attempts, int delay) 506 { 507 int ret; 508 u32 status; 509 510 while (1) { 511 if ((ret = sf1_write(adapter, 1, 1, 1, SF_RD_STATUS)) != 0 || 512 (ret = sf1_read(adapter, 1, 0, 1, &status)) != 0) 513 return ret; 514 if (!(status & 1)) 515 return 0; 516 if (--attempts == 0) 517 return -EAGAIN; 518 if (delay) 519 msleep(delay); 520 } 521 } 522 523 /** 524 * t4_read_flash - read words from serial flash 525 * @adapter: the adapter 526 * @addr: the start address for the read 527 * @nwords: how many 32-bit words to read 528 * @data: where to store the read data 529 * @byte_oriented: whether to store data as bytes or as words 530 * 531 * Read the specified number of 32-bit words from the serial flash. 532 * If @byte_oriented is set the read data is stored as a byte array 533 * (i.e., big-endian), otherwise as 32-bit words in the platform's 534 * natural endianess. 535 */ 536 static int t4_read_flash(struct adapter *adapter, unsigned int addr, 537 unsigned int nwords, u32 *data, int byte_oriented) 538 { 539 int ret; 540 541 if (addr + nwords * sizeof(u32) > adapter->params.sf_size || (addr & 3)) 542 return -EINVAL; 543 544 addr = swab32(addr) | SF_RD_DATA_FAST; 545 546 if ((ret = sf1_write(adapter, 4, 1, 0, addr)) != 0 || 547 (ret = sf1_read(adapter, 1, 1, 0, data)) != 0) 548 return ret; 549 550 for ( ; nwords; nwords--, data++) { 551 ret = sf1_read(adapter, 4, nwords > 1, nwords == 1, data); 552 if (nwords == 1) 553 t4_write_reg(adapter, SF_OP, 0); /* unlock SF */ 554 if (ret) 555 return ret; 556 if (byte_oriented) 557 *data = htonl(*data); 558 } 559 return 0; 560 } 561 562 /** 563 * t4_write_flash - write up to a page of data to the serial flash 564 * @adapter: the adapter 565 * @addr: the start address to write 566 * @n: length of data to write in bytes 567 * @data: the data to write 568 * 569 * Writes up to a page of data (256 bytes) to the serial flash starting 570 * at the given address. All the data must be written to the same page. 571 */ 572 static int t4_write_flash(struct adapter *adapter, unsigned int addr, 573 unsigned int n, const u8 *data) 574 { 575 int ret; 576 u32 buf[64]; 577 unsigned int i, c, left, val, offset = addr & 0xff; 578 579 if (addr >= adapter->params.sf_size || offset + n > SF_PAGE_SIZE) 580 return -EINVAL; 581 582 val = swab32(addr) | SF_PROG_PAGE; 583 584 if ((ret = sf1_write(adapter, 1, 0, 1, SF_WR_ENABLE)) != 0 || 585 (ret = sf1_write(adapter, 4, 1, 1, val)) != 0) 586 goto unlock; 587 588 for (left = n; left; left -= c) { 589 c = min(left, 4U); 590 for (val = 0, i = 0; i < c; ++i) 591 val = (val << 8) + *data++; 592 593 ret = sf1_write(adapter, c, c != left, 1, val); 594 if (ret) 595 goto unlock; 596 } 597 ret = flash_wait_op(adapter, 8, 1); 598 if (ret) 599 goto unlock; 600 601 t4_write_reg(adapter, SF_OP, 0); /* unlock SF */ 602 603 /* Read the page to verify the write succeeded */ 604 ret = t4_read_flash(adapter, addr & ~0xff, ARRAY_SIZE(buf), buf, 1); 605 if (ret) 606 return ret; 607 608 if (memcmp(data - n, (u8 *)buf + offset, n)) { 609 dev_err(adapter->pdev_dev, 610 "failed to correctly write the flash page at %#x\n", 611 addr); 612 return -EIO; 613 } 614 return 0; 615 616 unlock: 617 t4_write_reg(adapter, SF_OP, 0); /* unlock SF */ 618 return ret; 619 } 620 621 /** 622 * get_fw_version - read the firmware version 623 * @adapter: the adapter 624 * @vers: where to place the version 625 * 626 * Reads the FW version from flash. 627 */ 628 static int get_fw_version(struct adapter *adapter, u32 *vers) 629 { 630 return t4_read_flash(adapter, adapter->params.sf_fw_start + 631 offsetof(struct fw_hdr, fw_ver), 1, vers, 0); 632 } 633 634 /** 635 * get_tp_version - read the TP microcode version 636 * @adapter: the adapter 637 * @vers: where to place the version 638 * 639 * Reads the TP microcode version from flash. 640 */ 641 static int get_tp_version(struct adapter *adapter, u32 *vers) 642 { 643 return t4_read_flash(adapter, adapter->params.sf_fw_start + 644 offsetof(struct fw_hdr, tp_microcode_ver), 645 1, vers, 0); 646 } 647 648 /** 649 * t4_check_fw_version - check if the FW is compatible with this driver 650 * @adapter: the adapter 651 * 652 * Checks if an adapter's FW is compatible with the driver. Returns 0 653 * if there's exact match, a negative error if the version could not be 654 * read or there's a major version mismatch, and a positive value if the 655 * expected major version is found but there's a minor version mismatch. 656 */ 657 int t4_check_fw_version(struct adapter *adapter) 658 { 659 u32 api_vers[2]; 660 int ret, major, minor, micro; 661 662 ret = get_fw_version(adapter, &adapter->params.fw_vers); 663 if (!ret) 664 ret = get_tp_version(adapter, &adapter->params.tp_vers); 665 if (!ret) 666 ret = t4_read_flash(adapter, adapter->params.sf_fw_start + 667 offsetof(struct fw_hdr, intfver_nic), 668 2, api_vers, 1); 669 if (ret) 670 return ret; 671 672 major = FW_HDR_FW_VER_MAJOR_GET(adapter->params.fw_vers); 673 minor = FW_HDR_FW_VER_MINOR_GET(adapter->params.fw_vers); 674 micro = FW_HDR_FW_VER_MICRO_GET(adapter->params.fw_vers); 675 memcpy(adapter->params.api_vers, api_vers, 676 sizeof(adapter->params.api_vers)); 677 678 if (major != FW_VERSION_MAJOR) { /* major mismatch - fail */ 679 dev_err(adapter->pdev_dev, 680 "card FW has major version %u, driver wants %u\n", 681 major, FW_VERSION_MAJOR); 682 return -EINVAL; 683 } 684 685 if (minor == FW_VERSION_MINOR && micro == FW_VERSION_MICRO) 686 return 0; /* perfect match */ 687 688 /* Minor/micro version mismatch. Report it but often it's OK. */ 689 return 1; 690 } 691 692 /** 693 * t4_flash_erase_sectors - erase a range of flash sectors 694 * @adapter: the adapter 695 * @start: the first sector to erase 696 * @end: the last sector to erase 697 * 698 * Erases the sectors in the given inclusive range. 699 */ 700 static int t4_flash_erase_sectors(struct adapter *adapter, int start, int end) 701 { 702 int ret = 0; 703 704 while (start <= end) { 705 if ((ret = sf1_write(adapter, 1, 0, 1, SF_WR_ENABLE)) != 0 || 706 (ret = sf1_write(adapter, 4, 0, 1, 707 SF_ERASE_SECTOR | (start << 8))) != 0 || 708 (ret = flash_wait_op(adapter, 14, 500)) != 0) { 709 dev_err(adapter->pdev_dev, 710 "erase of flash sector %d failed, error %d\n", 711 start, ret); 712 break; 713 } 714 start++; 715 } 716 t4_write_reg(adapter, SF_OP, 0); /* unlock SF */ 717 return ret; 718 } 719 720 /** 721 * t4_load_fw - download firmware 722 * @adap: the adapter 723 * @fw_data: the firmware image to write 724 * @size: image size 725 * 726 * Write the supplied firmware image to the card's serial flash. 727 */ 728 int t4_load_fw(struct adapter *adap, const u8 *fw_data, unsigned int size) 729 { 730 u32 csum; 731 int ret, addr; 732 unsigned int i; 733 u8 first_page[SF_PAGE_SIZE]; 734 const u32 *p = (const u32 *)fw_data; 735 const struct fw_hdr *hdr = (const struct fw_hdr *)fw_data; 736 unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec; 737 unsigned int fw_img_start = adap->params.sf_fw_start; 738 unsigned int fw_start_sec = fw_img_start / sf_sec_size; 739 740 if (!size) { 741 dev_err(adap->pdev_dev, "FW image has no data\n"); 742 return -EINVAL; 743 } 744 if (size & 511) { 745 dev_err(adap->pdev_dev, 746 "FW image size not multiple of 512 bytes\n"); 747 return -EINVAL; 748 } 749 if (ntohs(hdr->len512) * 512 != size) { 750 dev_err(adap->pdev_dev, 751 "FW image size differs from size in FW header\n"); 752 return -EINVAL; 753 } 754 if (size > FW_MAX_SIZE) { 755 dev_err(adap->pdev_dev, "FW image too large, max is %u bytes\n", 756 FW_MAX_SIZE); 757 return -EFBIG; 758 } 759 760 for (csum = 0, i = 0; i < size / sizeof(csum); i++) 761 csum += ntohl(p[i]); 762 763 if (csum != 0xffffffff) { 764 dev_err(adap->pdev_dev, 765 "corrupted firmware image, checksum %#x\n", csum); 766 return -EINVAL; 767 } 768 769 i = DIV_ROUND_UP(size, sf_sec_size); /* # of sectors spanned */ 770 ret = t4_flash_erase_sectors(adap, fw_start_sec, fw_start_sec + i - 1); 771 if (ret) 772 goto out; 773 774 /* 775 * We write the correct version at the end so the driver can see a bad 776 * version if the FW write fails. Start by writing a copy of the 777 * first page with a bad version. 778 */ 779 memcpy(first_page, fw_data, SF_PAGE_SIZE); 780 ((struct fw_hdr *)first_page)->fw_ver = htonl(0xffffffff); 781 ret = t4_write_flash(adap, fw_img_start, SF_PAGE_SIZE, first_page); 782 if (ret) 783 goto out; 784 785 addr = fw_img_start; 786 for (size -= SF_PAGE_SIZE; size; size -= SF_PAGE_SIZE) { 787 addr += SF_PAGE_SIZE; 788 fw_data += SF_PAGE_SIZE; 789 ret = t4_write_flash(adap, addr, SF_PAGE_SIZE, fw_data); 790 if (ret) 791 goto out; 792 } 793 794 ret = t4_write_flash(adap, 795 fw_img_start + offsetof(struct fw_hdr, fw_ver), 796 sizeof(hdr->fw_ver), (const u8 *)&hdr->fw_ver); 797 out: 798 if (ret) 799 dev_err(adap->pdev_dev, "firmware download failed, error %d\n", 800 ret); 801 return ret; 802 } 803 804 #define ADVERT_MASK (FW_PORT_CAP_SPEED_100M | FW_PORT_CAP_SPEED_1G |\ 805 FW_PORT_CAP_SPEED_10G | FW_PORT_CAP_ANEG) 806 807 /** 808 * t4_link_start - apply link configuration to MAC/PHY 809 * @phy: the PHY to setup 810 * @mac: the MAC to setup 811 * @lc: the requested link configuration 812 * 813 * Set up a port's MAC and PHY according to a desired link configuration. 814 * - If the PHY can auto-negotiate first decide what to advertise, then 815 * enable/disable auto-negotiation as desired, and reset. 816 * - If the PHY does not auto-negotiate just reset it. 817 * - If auto-negotiation is off set the MAC to the proper speed/duplex/FC, 818 * otherwise do it later based on the outcome of auto-negotiation. 819 */ 820 int t4_link_start(struct adapter *adap, unsigned int mbox, unsigned int port, 821 struct link_config *lc) 822 { 823 struct fw_port_cmd c; 824 unsigned int fc = 0, mdi = FW_PORT_MDI(FW_PORT_MDI_AUTO); 825 826 lc->link_ok = 0; 827 if (lc->requested_fc & PAUSE_RX) 828 fc |= FW_PORT_CAP_FC_RX; 829 if (lc->requested_fc & PAUSE_TX) 830 fc |= FW_PORT_CAP_FC_TX; 831 832 memset(&c, 0, sizeof(c)); 833 c.op_to_portid = htonl(FW_CMD_OP(FW_PORT_CMD) | FW_CMD_REQUEST | 834 FW_CMD_EXEC | FW_PORT_CMD_PORTID(port)); 835 c.action_to_len16 = htonl(FW_PORT_CMD_ACTION(FW_PORT_ACTION_L1_CFG) | 836 FW_LEN16(c)); 837 838 if (!(lc->supported & FW_PORT_CAP_ANEG)) { 839 c.u.l1cfg.rcap = htonl((lc->supported & ADVERT_MASK) | fc); 840 lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX); 841 } else if (lc->autoneg == AUTONEG_DISABLE) { 842 c.u.l1cfg.rcap = htonl(lc->requested_speed | fc | mdi); 843 lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX); 844 } else 845 c.u.l1cfg.rcap = htonl(lc->advertising | fc | mdi); 846 847 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); 848 } 849 850 /** 851 * t4_restart_aneg - restart autonegotiation 852 * @adap: the adapter 853 * @mbox: mbox to use for the FW command 854 * @port: the port id 855 * 856 * Restarts autonegotiation for the selected port. 857 */ 858 int t4_restart_aneg(struct adapter *adap, unsigned int mbox, unsigned int port) 859 { 860 struct fw_port_cmd c; 861 862 memset(&c, 0, sizeof(c)); 863 c.op_to_portid = htonl(FW_CMD_OP(FW_PORT_CMD) | FW_CMD_REQUEST | 864 FW_CMD_EXEC | FW_PORT_CMD_PORTID(port)); 865 c.action_to_len16 = htonl(FW_PORT_CMD_ACTION(FW_PORT_ACTION_L1_CFG) | 866 FW_LEN16(c)); 867 c.u.l1cfg.rcap = htonl(FW_PORT_CAP_ANEG); 868 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); 869 } 870 871 struct intr_info { 872 unsigned int mask; /* bits to check in interrupt status */ 873 const char *msg; /* message to print or NULL */ 874 short stat_idx; /* stat counter to increment or -1 */ 875 unsigned short fatal; /* whether the condition reported is fatal */ 876 }; 877 878 /** 879 * t4_handle_intr_status - table driven interrupt handler 880 * @adapter: the adapter that generated the interrupt 881 * @reg: the interrupt status register to process 882 * @acts: table of interrupt actions 883 * 884 * A table driven interrupt handler that applies a set of masks to an 885 * interrupt status word and performs the corresponding actions if the 886 * interrupts described by the mask have occurred. The actions include 887 * optionally emitting a warning or alert message. The table is terminated 888 * by an entry specifying mask 0. Returns the number of fatal interrupt 889 * conditions. 890 */ 891 static int t4_handle_intr_status(struct adapter *adapter, unsigned int reg, 892 const struct intr_info *acts) 893 { 894 int fatal = 0; 895 unsigned int mask = 0; 896 unsigned int status = t4_read_reg(adapter, reg); 897 898 for ( ; acts->mask; ++acts) { 899 if (!(status & acts->mask)) 900 continue; 901 if (acts->fatal) { 902 fatal++; 903 dev_alert(adapter->pdev_dev, "%s (0x%x)\n", acts->msg, 904 status & acts->mask); 905 } else if (acts->msg && printk_ratelimit()) 906 dev_warn(adapter->pdev_dev, "%s (0x%x)\n", acts->msg, 907 status & acts->mask); 908 mask |= acts->mask; 909 } 910 status &= mask; 911 if (status) /* clear processed interrupts */ 912 t4_write_reg(adapter, reg, status); 913 return fatal; 914 } 915 916 /* 917 * Interrupt handler for the PCIE module. 918 */ 919 static void pcie_intr_handler(struct adapter *adapter) 920 { 921 static const struct intr_info sysbus_intr_info[] = { 922 { RNPP, "RXNP array parity error", -1, 1 }, 923 { RPCP, "RXPC array parity error", -1, 1 }, 924 { RCIP, "RXCIF array parity error", -1, 1 }, 925 { RCCP, "Rx completions control array parity error", -1, 1 }, 926 { RFTP, "RXFT array parity error", -1, 1 }, 927 { 0 } 928 }; 929 static const struct intr_info pcie_port_intr_info[] = { 930 { TPCP, "TXPC array parity error", -1, 1 }, 931 { TNPP, "TXNP array parity error", -1, 1 }, 932 { TFTP, "TXFT array parity error", -1, 1 }, 933 { TCAP, "TXCA array parity error", -1, 1 }, 934 { TCIP, "TXCIF array parity error", -1, 1 }, 935 { RCAP, "RXCA array parity error", -1, 1 }, 936 { OTDD, "outbound request TLP discarded", -1, 1 }, 937 { RDPE, "Rx data parity error", -1, 1 }, 938 { TDUE, "Tx uncorrectable data error", -1, 1 }, 939 { 0 } 940 }; 941 static const struct intr_info pcie_intr_info[] = { 942 { MSIADDRLPERR, "MSI AddrL parity error", -1, 1 }, 943 { MSIADDRHPERR, "MSI AddrH parity error", -1, 1 }, 944 { MSIDATAPERR, "MSI data parity error", -1, 1 }, 945 { MSIXADDRLPERR, "MSI-X AddrL parity error", -1, 1 }, 946 { MSIXADDRHPERR, "MSI-X AddrH parity error", -1, 1 }, 947 { MSIXDATAPERR, "MSI-X data parity error", -1, 1 }, 948 { MSIXDIPERR, "MSI-X DI parity error", -1, 1 }, 949 { PIOCPLPERR, "PCI PIO completion FIFO parity error", -1, 1 }, 950 { PIOREQPERR, "PCI PIO request FIFO parity error", -1, 1 }, 951 { TARTAGPERR, "PCI PCI target tag FIFO parity error", -1, 1 }, 952 { CCNTPERR, "PCI CMD channel count parity error", -1, 1 }, 953 { CREQPERR, "PCI CMD channel request parity error", -1, 1 }, 954 { CRSPPERR, "PCI CMD channel response parity error", -1, 1 }, 955 { DCNTPERR, "PCI DMA channel count parity error", -1, 1 }, 956 { DREQPERR, "PCI DMA channel request parity error", -1, 1 }, 957 { DRSPPERR, "PCI DMA channel response parity error", -1, 1 }, 958 { HCNTPERR, "PCI HMA channel count parity error", -1, 1 }, 959 { HREQPERR, "PCI HMA channel request parity error", -1, 1 }, 960 { HRSPPERR, "PCI HMA channel response parity error", -1, 1 }, 961 { CFGSNPPERR, "PCI config snoop FIFO parity error", -1, 1 }, 962 { FIDPERR, "PCI FID parity error", -1, 1 }, 963 { INTXCLRPERR, "PCI INTx clear parity error", -1, 1 }, 964 { MATAGPERR, "PCI MA tag parity error", -1, 1 }, 965 { PIOTAGPERR, "PCI PIO tag parity error", -1, 1 }, 966 { RXCPLPERR, "PCI Rx completion parity error", -1, 1 }, 967 { RXWRPERR, "PCI Rx write parity error", -1, 1 }, 968 { RPLPERR, "PCI replay buffer parity error", -1, 1 }, 969 { PCIESINT, "PCI core secondary fault", -1, 1 }, 970 { PCIEPINT, "PCI core primary fault", -1, 1 }, 971 { UNXSPLCPLERR, "PCI unexpected split completion error", -1, 0 }, 972 { 0 } 973 }; 974 975 int fat; 976 977 fat = t4_handle_intr_status(adapter, 978 PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS, 979 sysbus_intr_info) + 980 t4_handle_intr_status(adapter, 981 PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS, 982 pcie_port_intr_info) + 983 t4_handle_intr_status(adapter, PCIE_INT_CAUSE, pcie_intr_info); 984 if (fat) 985 t4_fatal_err(adapter); 986 } 987 988 /* 989 * TP interrupt handler. 990 */ 991 static void tp_intr_handler(struct adapter *adapter) 992 { 993 static const struct intr_info tp_intr_info[] = { 994 { 0x3fffffff, "TP parity error", -1, 1 }, 995 { FLMTXFLSTEMPTY, "TP out of Tx pages", -1, 1 }, 996 { 0 } 997 }; 998 999 if (t4_handle_intr_status(adapter, TP_INT_CAUSE, tp_intr_info)) 1000 t4_fatal_err(adapter); 1001 } 1002 1003 /* 1004 * SGE interrupt handler. 1005 */ 1006 static void sge_intr_handler(struct adapter *adapter) 1007 { 1008 u64 v; 1009 1010 static const struct intr_info sge_intr_info[] = { 1011 { ERR_CPL_EXCEED_IQE_SIZE, 1012 "SGE received CPL exceeding IQE size", -1, 1 }, 1013 { ERR_INVALID_CIDX_INC, 1014 "SGE GTS CIDX increment too large", -1, 0 }, 1015 { ERR_CPL_OPCODE_0, "SGE received 0-length CPL", -1, 0 }, 1016 { ERR_DROPPED_DB, "SGE doorbell dropped", -1, 0 }, 1017 { ERR_DATA_CPL_ON_HIGH_QID1 | ERR_DATA_CPL_ON_HIGH_QID0, 1018 "SGE IQID > 1023 received CPL for FL", -1, 0 }, 1019 { ERR_BAD_DB_PIDX3, "SGE DBP 3 pidx increment too large", -1, 1020 0 }, 1021 { ERR_BAD_DB_PIDX2, "SGE DBP 2 pidx increment too large", -1, 1022 0 }, 1023 { ERR_BAD_DB_PIDX1, "SGE DBP 1 pidx increment too large", -1, 1024 0 }, 1025 { ERR_BAD_DB_PIDX0, "SGE DBP 0 pidx increment too large", -1, 1026 0 }, 1027 { ERR_ING_CTXT_PRIO, 1028 "SGE too many priority ingress contexts", -1, 0 }, 1029 { ERR_EGR_CTXT_PRIO, 1030 "SGE too many priority egress contexts", -1, 0 }, 1031 { INGRESS_SIZE_ERR, "SGE illegal ingress QID", -1, 0 }, 1032 { EGRESS_SIZE_ERR, "SGE illegal egress QID", -1, 0 }, 1033 { 0 } 1034 }; 1035 1036 v = (u64)t4_read_reg(adapter, SGE_INT_CAUSE1) | 1037 ((u64)t4_read_reg(adapter, SGE_INT_CAUSE2) << 32); 1038 if (v) { 1039 dev_alert(adapter->pdev_dev, "SGE parity error (%#llx)\n", 1040 (unsigned long long)v); 1041 t4_write_reg(adapter, SGE_INT_CAUSE1, v); 1042 t4_write_reg(adapter, SGE_INT_CAUSE2, v >> 32); 1043 } 1044 1045 if (t4_handle_intr_status(adapter, SGE_INT_CAUSE3, sge_intr_info) || 1046 v != 0) 1047 t4_fatal_err(adapter); 1048 } 1049 1050 /* 1051 * CIM interrupt handler. 1052 */ 1053 static void cim_intr_handler(struct adapter *adapter) 1054 { 1055 static const struct intr_info cim_intr_info[] = { 1056 { PREFDROPINT, "CIM control register prefetch drop", -1, 1 }, 1057 { OBQPARERR, "CIM OBQ parity error", -1, 1 }, 1058 { IBQPARERR, "CIM IBQ parity error", -1, 1 }, 1059 { MBUPPARERR, "CIM mailbox uP parity error", -1, 1 }, 1060 { MBHOSTPARERR, "CIM mailbox host parity error", -1, 1 }, 1061 { TIEQINPARERRINT, "CIM TIEQ outgoing parity error", -1, 1 }, 1062 { TIEQOUTPARERRINT, "CIM TIEQ incoming parity error", -1, 1 }, 1063 { 0 } 1064 }; 1065 static const struct intr_info cim_upintr_info[] = { 1066 { RSVDSPACEINT, "CIM reserved space access", -1, 1 }, 1067 { ILLTRANSINT, "CIM illegal transaction", -1, 1 }, 1068 { ILLWRINT, "CIM illegal write", -1, 1 }, 1069 { ILLRDINT, "CIM illegal read", -1, 1 }, 1070 { ILLRDBEINT, "CIM illegal read BE", -1, 1 }, 1071 { ILLWRBEINT, "CIM illegal write BE", -1, 1 }, 1072 { SGLRDBOOTINT, "CIM single read from boot space", -1, 1 }, 1073 { SGLWRBOOTINT, "CIM single write to boot space", -1, 1 }, 1074 { BLKWRBOOTINT, "CIM block write to boot space", -1, 1 }, 1075 { SGLRDFLASHINT, "CIM single read from flash space", -1, 1 }, 1076 { SGLWRFLASHINT, "CIM single write to flash space", -1, 1 }, 1077 { BLKWRFLASHINT, "CIM block write to flash space", -1, 1 }, 1078 { SGLRDEEPROMINT, "CIM single EEPROM read", -1, 1 }, 1079 { SGLWREEPROMINT, "CIM single EEPROM write", -1, 1 }, 1080 { BLKRDEEPROMINT, "CIM block EEPROM read", -1, 1 }, 1081 { BLKWREEPROMINT, "CIM block EEPROM write", -1, 1 }, 1082 { SGLRDCTLINT , "CIM single read from CTL space", -1, 1 }, 1083 { SGLWRCTLINT , "CIM single write to CTL space", -1, 1 }, 1084 { BLKRDCTLINT , "CIM block read from CTL space", -1, 1 }, 1085 { BLKWRCTLINT , "CIM block write to CTL space", -1, 1 }, 1086 { SGLRDPLINT , "CIM single read from PL space", -1, 1 }, 1087 { SGLWRPLINT , "CIM single write to PL space", -1, 1 }, 1088 { BLKRDPLINT , "CIM block read from PL space", -1, 1 }, 1089 { BLKWRPLINT , "CIM block write to PL space", -1, 1 }, 1090 { REQOVRLOOKUPINT , "CIM request FIFO overwrite", -1, 1 }, 1091 { RSPOVRLOOKUPINT , "CIM response FIFO overwrite", -1, 1 }, 1092 { TIMEOUTINT , "CIM PIF timeout", -1, 1 }, 1093 { TIMEOUTMAINT , "CIM PIF MA timeout", -1, 1 }, 1094 { 0 } 1095 }; 1096 1097 int fat; 1098 1099 fat = t4_handle_intr_status(adapter, CIM_HOST_INT_CAUSE, 1100 cim_intr_info) + 1101 t4_handle_intr_status(adapter, CIM_HOST_UPACC_INT_CAUSE, 1102 cim_upintr_info); 1103 if (fat) 1104 t4_fatal_err(adapter); 1105 } 1106 1107 /* 1108 * ULP RX interrupt handler. 1109 */ 1110 static void ulprx_intr_handler(struct adapter *adapter) 1111 { 1112 static const struct intr_info ulprx_intr_info[] = { 1113 { 0x1800000, "ULPRX context error", -1, 1 }, 1114 { 0x7fffff, "ULPRX parity error", -1, 1 }, 1115 { 0 } 1116 }; 1117 1118 if (t4_handle_intr_status(adapter, ULP_RX_INT_CAUSE, ulprx_intr_info)) 1119 t4_fatal_err(adapter); 1120 } 1121 1122 /* 1123 * ULP TX interrupt handler. 1124 */ 1125 static void ulptx_intr_handler(struct adapter *adapter) 1126 { 1127 static const struct intr_info ulptx_intr_info[] = { 1128 { PBL_BOUND_ERR_CH3, "ULPTX channel 3 PBL out of bounds", -1, 1129 0 }, 1130 { PBL_BOUND_ERR_CH2, "ULPTX channel 2 PBL out of bounds", -1, 1131 0 }, 1132 { PBL_BOUND_ERR_CH1, "ULPTX channel 1 PBL out of bounds", -1, 1133 0 }, 1134 { PBL_BOUND_ERR_CH0, "ULPTX channel 0 PBL out of bounds", -1, 1135 0 }, 1136 { 0xfffffff, "ULPTX parity error", -1, 1 }, 1137 { 0 } 1138 }; 1139 1140 if (t4_handle_intr_status(adapter, ULP_TX_INT_CAUSE, ulptx_intr_info)) 1141 t4_fatal_err(adapter); 1142 } 1143 1144 /* 1145 * PM TX interrupt handler. 1146 */ 1147 static void pmtx_intr_handler(struct adapter *adapter) 1148 { 1149 static const struct intr_info pmtx_intr_info[] = { 1150 { PCMD_LEN_OVFL0, "PMTX channel 0 pcmd too large", -1, 1 }, 1151 { PCMD_LEN_OVFL1, "PMTX channel 1 pcmd too large", -1, 1 }, 1152 { PCMD_LEN_OVFL2, "PMTX channel 2 pcmd too large", -1, 1 }, 1153 { ZERO_C_CMD_ERROR, "PMTX 0-length pcmd", -1, 1 }, 1154 { PMTX_FRAMING_ERROR, "PMTX framing error", -1, 1 }, 1155 { OESPI_PAR_ERROR, "PMTX oespi parity error", -1, 1 }, 1156 { DB_OPTIONS_PAR_ERROR, "PMTX db_options parity error", -1, 1 }, 1157 { ICSPI_PAR_ERROR, "PMTX icspi parity error", -1, 1 }, 1158 { C_PCMD_PAR_ERROR, "PMTX c_pcmd parity error", -1, 1}, 1159 { 0 } 1160 }; 1161 1162 if (t4_handle_intr_status(adapter, PM_TX_INT_CAUSE, pmtx_intr_info)) 1163 t4_fatal_err(adapter); 1164 } 1165 1166 /* 1167 * PM RX interrupt handler. 1168 */ 1169 static void pmrx_intr_handler(struct adapter *adapter) 1170 { 1171 static const struct intr_info pmrx_intr_info[] = { 1172 { ZERO_E_CMD_ERROR, "PMRX 0-length pcmd", -1, 1 }, 1173 { PMRX_FRAMING_ERROR, "PMRX framing error", -1, 1 }, 1174 { OCSPI_PAR_ERROR, "PMRX ocspi parity error", -1, 1 }, 1175 { DB_OPTIONS_PAR_ERROR, "PMRX db_options parity error", -1, 1 }, 1176 { IESPI_PAR_ERROR, "PMRX iespi parity error", -1, 1 }, 1177 { E_PCMD_PAR_ERROR, "PMRX e_pcmd parity error", -1, 1}, 1178 { 0 } 1179 }; 1180 1181 if (t4_handle_intr_status(adapter, PM_RX_INT_CAUSE, pmrx_intr_info)) 1182 t4_fatal_err(adapter); 1183 } 1184 1185 /* 1186 * CPL switch interrupt handler. 1187 */ 1188 static void cplsw_intr_handler(struct adapter *adapter) 1189 { 1190 static const struct intr_info cplsw_intr_info[] = { 1191 { CIM_OP_MAP_PERR, "CPLSW CIM op_map parity error", -1, 1 }, 1192 { CIM_OVFL_ERROR, "CPLSW CIM overflow", -1, 1 }, 1193 { TP_FRAMING_ERROR, "CPLSW TP framing error", -1, 1 }, 1194 { SGE_FRAMING_ERROR, "CPLSW SGE framing error", -1, 1 }, 1195 { CIM_FRAMING_ERROR, "CPLSW CIM framing error", -1, 1 }, 1196 { ZERO_SWITCH_ERROR, "CPLSW no-switch error", -1, 1 }, 1197 { 0 } 1198 }; 1199 1200 if (t4_handle_intr_status(adapter, CPL_INTR_CAUSE, cplsw_intr_info)) 1201 t4_fatal_err(adapter); 1202 } 1203 1204 /* 1205 * LE interrupt handler. 1206 */ 1207 static void le_intr_handler(struct adapter *adap) 1208 { 1209 static const struct intr_info le_intr_info[] = { 1210 { LIPMISS, "LE LIP miss", -1, 0 }, 1211 { LIP0, "LE 0 LIP error", -1, 0 }, 1212 { PARITYERR, "LE parity error", -1, 1 }, 1213 { UNKNOWNCMD, "LE unknown command", -1, 1 }, 1214 { REQQPARERR, "LE request queue parity error", -1, 1 }, 1215 { 0 } 1216 }; 1217 1218 if (t4_handle_intr_status(adap, LE_DB_INT_CAUSE, le_intr_info)) 1219 t4_fatal_err(adap); 1220 } 1221 1222 /* 1223 * MPS interrupt handler. 1224 */ 1225 static void mps_intr_handler(struct adapter *adapter) 1226 { 1227 static const struct intr_info mps_rx_intr_info[] = { 1228 { 0xffffff, "MPS Rx parity error", -1, 1 }, 1229 { 0 } 1230 }; 1231 static const struct intr_info mps_tx_intr_info[] = { 1232 { TPFIFO, "MPS Tx TP FIFO parity error", -1, 1 }, 1233 { NCSIFIFO, "MPS Tx NC-SI FIFO parity error", -1, 1 }, 1234 { TXDATAFIFO, "MPS Tx data FIFO parity error", -1, 1 }, 1235 { TXDESCFIFO, "MPS Tx desc FIFO parity error", -1, 1 }, 1236 { BUBBLE, "MPS Tx underflow", -1, 1 }, 1237 { SECNTERR, "MPS Tx SOP/EOP error", -1, 1 }, 1238 { FRMERR, "MPS Tx framing error", -1, 1 }, 1239 { 0 } 1240 }; 1241 static const struct intr_info mps_trc_intr_info[] = { 1242 { FILTMEM, "MPS TRC filter parity error", -1, 1 }, 1243 { PKTFIFO, "MPS TRC packet FIFO parity error", -1, 1 }, 1244 { MISCPERR, "MPS TRC misc parity error", -1, 1 }, 1245 { 0 } 1246 }; 1247 static const struct intr_info mps_stat_sram_intr_info[] = { 1248 { 0x1fffff, "MPS statistics SRAM parity error", -1, 1 }, 1249 { 0 } 1250 }; 1251 static const struct intr_info mps_stat_tx_intr_info[] = { 1252 { 0xfffff, "MPS statistics Tx FIFO parity error", -1, 1 }, 1253 { 0 } 1254 }; 1255 static const struct intr_info mps_stat_rx_intr_info[] = { 1256 { 0xffffff, "MPS statistics Rx FIFO parity error", -1, 1 }, 1257 { 0 } 1258 }; 1259 static const struct intr_info mps_cls_intr_info[] = { 1260 { MATCHSRAM, "MPS match SRAM parity error", -1, 1 }, 1261 { MATCHTCAM, "MPS match TCAM parity error", -1, 1 }, 1262 { HASHSRAM, "MPS hash SRAM parity error", -1, 1 }, 1263 { 0 } 1264 }; 1265 1266 int fat; 1267 1268 fat = t4_handle_intr_status(adapter, MPS_RX_PERR_INT_CAUSE, 1269 mps_rx_intr_info) + 1270 t4_handle_intr_status(adapter, MPS_TX_INT_CAUSE, 1271 mps_tx_intr_info) + 1272 t4_handle_intr_status(adapter, MPS_TRC_INT_CAUSE, 1273 mps_trc_intr_info) + 1274 t4_handle_intr_status(adapter, MPS_STAT_PERR_INT_CAUSE_SRAM, 1275 mps_stat_sram_intr_info) + 1276 t4_handle_intr_status(adapter, MPS_STAT_PERR_INT_CAUSE_TX_FIFO, 1277 mps_stat_tx_intr_info) + 1278 t4_handle_intr_status(adapter, MPS_STAT_PERR_INT_CAUSE_RX_FIFO, 1279 mps_stat_rx_intr_info) + 1280 t4_handle_intr_status(adapter, MPS_CLS_INT_CAUSE, 1281 mps_cls_intr_info); 1282 1283 t4_write_reg(adapter, MPS_INT_CAUSE, CLSINT | TRCINT | 1284 RXINT | TXINT | STATINT); 1285 t4_read_reg(adapter, MPS_INT_CAUSE); /* flush */ 1286 if (fat) 1287 t4_fatal_err(adapter); 1288 } 1289 1290 #define MEM_INT_MASK (PERR_INT_CAUSE | ECC_CE_INT_CAUSE | ECC_UE_INT_CAUSE) 1291 1292 /* 1293 * EDC/MC interrupt handler. 1294 */ 1295 static void mem_intr_handler(struct adapter *adapter, int idx) 1296 { 1297 static const char name[3][5] = { "EDC0", "EDC1", "MC" }; 1298 1299 unsigned int addr, cnt_addr, v; 1300 1301 if (idx <= MEM_EDC1) { 1302 addr = EDC_REG(EDC_INT_CAUSE, idx); 1303 cnt_addr = EDC_REG(EDC_ECC_STATUS, idx); 1304 } else { 1305 addr = MC_INT_CAUSE; 1306 cnt_addr = MC_ECC_STATUS; 1307 } 1308 1309 v = t4_read_reg(adapter, addr) & MEM_INT_MASK; 1310 if (v & PERR_INT_CAUSE) 1311 dev_alert(adapter->pdev_dev, "%s FIFO parity error\n", 1312 name[idx]); 1313 if (v & ECC_CE_INT_CAUSE) { 1314 u32 cnt = ECC_CECNT_GET(t4_read_reg(adapter, cnt_addr)); 1315 1316 t4_write_reg(adapter, cnt_addr, ECC_CECNT_MASK); 1317 if (printk_ratelimit()) 1318 dev_warn(adapter->pdev_dev, 1319 "%u %s correctable ECC data error%s\n", 1320 cnt, name[idx], cnt > 1 ? "s" : ""); 1321 } 1322 if (v & ECC_UE_INT_CAUSE) 1323 dev_alert(adapter->pdev_dev, 1324 "%s uncorrectable ECC data error\n", name[idx]); 1325 1326 t4_write_reg(adapter, addr, v); 1327 if (v & (PERR_INT_CAUSE | ECC_UE_INT_CAUSE)) 1328 t4_fatal_err(adapter); 1329 } 1330 1331 /* 1332 * MA interrupt handler. 1333 */ 1334 static void ma_intr_handler(struct adapter *adap) 1335 { 1336 u32 v, status = t4_read_reg(adap, MA_INT_CAUSE); 1337 1338 if (status & MEM_PERR_INT_CAUSE) 1339 dev_alert(adap->pdev_dev, 1340 "MA parity error, parity status %#x\n", 1341 t4_read_reg(adap, MA_PARITY_ERROR_STATUS)); 1342 if (status & MEM_WRAP_INT_CAUSE) { 1343 v = t4_read_reg(adap, MA_INT_WRAP_STATUS); 1344 dev_alert(adap->pdev_dev, "MA address wrap-around error by " 1345 "client %u to address %#x\n", 1346 MEM_WRAP_CLIENT_NUM_GET(v), 1347 MEM_WRAP_ADDRESS_GET(v) << 4); 1348 } 1349 t4_write_reg(adap, MA_INT_CAUSE, status); 1350 t4_fatal_err(adap); 1351 } 1352 1353 /* 1354 * SMB interrupt handler. 1355 */ 1356 static void smb_intr_handler(struct adapter *adap) 1357 { 1358 static const struct intr_info smb_intr_info[] = { 1359 { MSTTXFIFOPARINT, "SMB master Tx FIFO parity error", -1, 1 }, 1360 { MSTRXFIFOPARINT, "SMB master Rx FIFO parity error", -1, 1 }, 1361 { SLVFIFOPARINT, "SMB slave FIFO parity error", -1, 1 }, 1362 { 0 } 1363 }; 1364 1365 if (t4_handle_intr_status(adap, SMB_INT_CAUSE, smb_intr_info)) 1366 t4_fatal_err(adap); 1367 } 1368 1369 /* 1370 * NC-SI interrupt handler. 1371 */ 1372 static void ncsi_intr_handler(struct adapter *adap) 1373 { 1374 static const struct intr_info ncsi_intr_info[] = { 1375 { CIM_DM_PRTY_ERR, "NC-SI CIM parity error", -1, 1 }, 1376 { MPS_DM_PRTY_ERR, "NC-SI MPS parity error", -1, 1 }, 1377 { TXFIFO_PRTY_ERR, "NC-SI Tx FIFO parity error", -1, 1 }, 1378 { RXFIFO_PRTY_ERR, "NC-SI Rx FIFO parity error", -1, 1 }, 1379 { 0 } 1380 }; 1381 1382 if (t4_handle_intr_status(adap, NCSI_INT_CAUSE, ncsi_intr_info)) 1383 t4_fatal_err(adap); 1384 } 1385 1386 /* 1387 * XGMAC interrupt handler. 1388 */ 1389 static void xgmac_intr_handler(struct adapter *adap, int port) 1390 { 1391 u32 v = t4_read_reg(adap, PORT_REG(port, XGMAC_PORT_INT_CAUSE)); 1392 1393 v &= TXFIFO_PRTY_ERR | RXFIFO_PRTY_ERR; 1394 if (!v) 1395 return; 1396 1397 if (v & TXFIFO_PRTY_ERR) 1398 dev_alert(adap->pdev_dev, "XGMAC %d Tx FIFO parity error\n", 1399 port); 1400 if (v & RXFIFO_PRTY_ERR) 1401 dev_alert(adap->pdev_dev, "XGMAC %d Rx FIFO parity error\n", 1402 port); 1403 t4_write_reg(adap, PORT_REG(port, XGMAC_PORT_INT_CAUSE), v); 1404 t4_fatal_err(adap); 1405 } 1406 1407 /* 1408 * PL interrupt handler. 1409 */ 1410 static void pl_intr_handler(struct adapter *adap) 1411 { 1412 static const struct intr_info pl_intr_info[] = { 1413 { FATALPERR, "T4 fatal parity error", -1, 1 }, 1414 { PERRVFID, "PL VFID_MAP parity error", -1, 1 }, 1415 { 0 } 1416 }; 1417 1418 if (t4_handle_intr_status(adap, PL_PL_INT_CAUSE, pl_intr_info)) 1419 t4_fatal_err(adap); 1420 } 1421 1422 #define PF_INTR_MASK (PFSW) 1423 #define GLBL_INTR_MASK (CIM | MPS | PL | PCIE | MC | EDC0 | \ 1424 EDC1 | LE | TP | MA | PM_TX | PM_RX | ULP_RX | \ 1425 CPL_SWITCH | SGE | ULP_TX) 1426 1427 /** 1428 * t4_slow_intr_handler - control path interrupt handler 1429 * @adapter: the adapter 1430 * 1431 * T4 interrupt handler for non-data global interrupt events, e.g., errors. 1432 * The designation 'slow' is because it involves register reads, while 1433 * data interrupts typically don't involve any MMIOs. 1434 */ 1435 int t4_slow_intr_handler(struct adapter *adapter) 1436 { 1437 u32 cause = t4_read_reg(adapter, PL_INT_CAUSE); 1438 1439 if (!(cause & GLBL_INTR_MASK)) 1440 return 0; 1441 if (cause & CIM) 1442 cim_intr_handler(adapter); 1443 if (cause & MPS) 1444 mps_intr_handler(adapter); 1445 if (cause & NCSI) 1446 ncsi_intr_handler(adapter); 1447 if (cause & PL) 1448 pl_intr_handler(adapter); 1449 if (cause & SMB) 1450 smb_intr_handler(adapter); 1451 if (cause & XGMAC0) 1452 xgmac_intr_handler(adapter, 0); 1453 if (cause & XGMAC1) 1454 xgmac_intr_handler(adapter, 1); 1455 if (cause & XGMAC_KR0) 1456 xgmac_intr_handler(adapter, 2); 1457 if (cause & XGMAC_KR1) 1458 xgmac_intr_handler(adapter, 3); 1459 if (cause & PCIE) 1460 pcie_intr_handler(adapter); 1461 if (cause & MC) 1462 mem_intr_handler(adapter, MEM_MC); 1463 if (cause & EDC0) 1464 mem_intr_handler(adapter, MEM_EDC0); 1465 if (cause & EDC1) 1466 mem_intr_handler(adapter, MEM_EDC1); 1467 if (cause & LE) 1468 le_intr_handler(adapter); 1469 if (cause & TP) 1470 tp_intr_handler(adapter); 1471 if (cause & MA) 1472 ma_intr_handler(adapter); 1473 if (cause & PM_TX) 1474 pmtx_intr_handler(adapter); 1475 if (cause & PM_RX) 1476 pmrx_intr_handler(adapter); 1477 if (cause & ULP_RX) 1478 ulprx_intr_handler(adapter); 1479 if (cause & CPL_SWITCH) 1480 cplsw_intr_handler(adapter); 1481 if (cause & SGE) 1482 sge_intr_handler(adapter); 1483 if (cause & ULP_TX) 1484 ulptx_intr_handler(adapter); 1485 1486 /* Clear the interrupts just processed for which we are the master. */ 1487 t4_write_reg(adapter, PL_INT_CAUSE, cause & GLBL_INTR_MASK); 1488 (void) t4_read_reg(adapter, PL_INT_CAUSE); /* flush */ 1489 return 1; 1490 } 1491 1492 /** 1493 * t4_intr_enable - enable interrupts 1494 * @adapter: the adapter whose interrupts should be enabled 1495 * 1496 * Enable PF-specific interrupts for the calling function and the top-level 1497 * interrupt concentrator for global interrupts. Interrupts are already 1498 * enabled at each module, here we just enable the roots of the interrupt 1499 * hierarchies. 1500 * 1501 * Note: this function should be called only when the driver manages 1502 * non PF-specific interrupts from the various HW modules. Only one PCI 1503 * function at a time should be doing this. 1504 */ 1505 void t4_intr_enable(struct adapter *adapter) 1506 { 1507 u32 pf = SOURCEPF_GET(t4_read_reg(adapter, PL_WHOAMI)); 1508 1509 t4_write_reg(adapter, SGE_INT_ENABLE3, ERR_CPL_EXCEED_IQE_SIZE | 1510 ERR_INVALID_CIDX_INC | ERR_CPL_OPCODE_0 | 1511 ERR_DROPPED_DB | ERR_DATA_CPL_ON_HIGH_QID1 | 1512 ERR_DATA_CPL_ON_HIGH_QID0 | ERR_BAD_DB_PIDX3 | 1513 ERR_BAD_DB_PIDX2 | ERR_BAD_DB_PIDX1 | 1514 ERR_BAD_DB_PIDX0 | ERR_ING_CTXT_PRIO | 1515 ERR_EGR_CTXT_PRIO | INGRESS_SIZE_ERR | 1516 EGRESS_SIZE_ERR); 1517 t4_write_reg(adapter, MYPF_REG(PL_PF_INT_ENABLE), PF_INTR_MASK); 1518 t4_set_reg_field(adapter, PL_INT_MAP0, 0, 1 << pf); 1519 } 1520 1521 /** 1522 * t4_intr_disable - disable interrupts 1523 * @adapter: the adapter whose interrupts should be disabled 1524 * 1525 * Disable interrupts. We only disable the top-level interrupt 1526 * concentrators. The caller must be a PCI function managing global 1527 * interrupts. 1528 */ 1529 void t4_intr_disable(struct adapter *adapter) 1530 { 1531 u32 pf = SOURCEPF_GET(t4_read_reg(adapter, PL_WHOAMI)); 1532 1533 t4_write_reg(adapter, MYPF_REG(PL_PF_INT_ENABLE), 0); 1534 t4_set_reg_field(adapter, PL_INT_MAP0, 1 << pf, 0); 1535 } 1536 1537 /** 1538 * hash_mac_addr - return the hash value of a MAC address 1539 * @addr: the 48-bit Ethernet MAC address 1540 * 1541 * Hashes a MAC address according to the hash function used by HW inexact 1542 * (hash) address matching. 1543 */ 1544 static int hash_mac_addr(const u8 *addr) 1545 { 1546 u32 a = ((u32)addr[0] << 16) | ((u32)addr[1] << 8) | addr[2]; 1547 u32 b = ((u32)addr[3] << 16) | ((u32)addr[4] << 8) | addr[5]; 1548 a ^= b; 1549 a ^= (a >> 12); 1550 a ^= (a >> 6); 1551 return a & 0x3f; 1552 } 1553 1554 /** 1555 * t4_config_rss_range - configure a portion of the RSS mapping table 1556 * @adapter: the adapter 1557 * @mbox: mbox to use for the FW command 1558 * @viid: virtual interface whose RSS subtable is to be written 1559 * @start: start entry in the table to write 1560 * @n: how many table entries to write 1561 * @rspq: values for the response queue lookup table 1562 * @nrspq: number of values in @rspq 1563 * 1564 * Programs the selected part of the VI's RSS mapping table with the 1565 * provided values. If @nrspq < @n the supplied values are used repeatedly 1566 * until the full table range is populated. 1567 * 1568 * The caller must ensure the values in @rspq are in the range allowed for 1569 * @viid. 1570 */ 1571 int t4_config_rss_range(struct adapter *adapter, int mbox, unsigned int viid, 1572 int start, int n, const u16 *rspq, unsigned int nrspq) 1573 { 1574 int ret; 1575 const u16 *rsp = rspq; 1576 const u16 *rsp_end = rspq + nrspq; 1577 struct fw_rss_ind_tbl_cmd cmd; 1578 1579 memset(&cmd, 0, sizeof(cmd)); 1580 cmd.op_to_viid = htonl(FW_CMD_OP(FW_RSS_IND_TBL_CMD) | 1581 FW_CMD_REQUEST | FW_CMD_WRITE | 1582 FW_RSS_IND_TBL_CMD_VIID(viid)); 1583 cmd.retval_len16 = htonl(FW_LEN16(cmd)); 1584 1585 /* each fw_rss_ind_tbl_cmd takes up to 32 entries */ 1586 while (n > 0) { 1587 int nq = min(n, 32); 1588 __be32 *qp = &cmd.iq0_to_iq2; 1589 1590 cmd.niqid = htons(nq); 1591 cmd.startidx = htons(start); 1592 1593 start += nq; 1594 n -= nq; 1595 1596 while (nq > 0) { 1597 unsigned int v; 1598 1599 v = FW_RSS_IND_TBL_CMD_IQ0(*rsp); 1600 if (++rsp >= rsp_end) 1601 rsp = rspq; 1602 v |= FW_RSS_IND_TBL_CMD_IQ1(*rsp); 1603 if (++rsp >= rsp_end) 1604 rsp = rspq; 1605 v |= FW_RSS_IND_TBL_CMD_IQ2(*rsp); 1606 if (++rsp >= rsp_end) 1607 rsp = rspq; 1608 1609 *qp++ = htonl(v); 1610 nq -= 3; 1611 } 1612 1613 ret = t4_wr_mbox(adapter, mbox, &cmd, sizeof(cmd), NULL); 1614 if (ret) 1615 return ret; 1616 } 1617 return 0; 1618 } 1619 1620 /** 1621 * t4_config_glbl_rss - configure the global RSS mode 1622 * @adapter: the adapter 1623 * @mbox: mbox to use for the FW command 1624 * @mode: global RSS mode 1625 * @flags: mode-specific flags 1626 * 1627 * Sets the global RSS mode. 1628 */ 1629 int t4_config_glbl_rss(struct adapter *adapter, int mbox, unsigned int mode, 1630 unsigned int flags) 1631 { 1632 struct fw_rss_glb_config_cmd c; 1633 1634 memset(&c, 0, sizeof(c)); 1635 c.op_to_write = htonl(FW_CMD_OP(FW_RSS_GLB_CONFIG_CMD) | 1636 FW_CMD_REQUEST | FW_CMD_WRITE); 1637 c.retval_len16 = htonl(FW_LEN16(c)); 1638 if (mode == FW_RSS_GLB_CONFIG_CMD_MODE_MANUAL) { 1639 c.u.manual.mode_pkd = htonl(FW_RSS_GLB_CONFIG_CMD_MODE(mode)); 1640 } else if (mode == FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL) { 1641 c.u.basicvirtual.mode_pkd = 1642 htonl(FW_RSS_GLB_CONFIG_CMD_MODE(mode)); 1643 c.u.basicvirtual.synmapen_to_hashtoeplitz = htonl(flags); 1644 } else 1645 return -EINVAL; 1646 return t4_wr_mbox(adapter, mbox, &c, sizeof(c), NULL); 1647 } 1648 1649 /** 1650 * t4_tp_get_tcp_stats - read TP's TCP MIB counters 1651 * @adap: the adapter 1652 * @v4: holds the TCP/IP counter values 1653 * @v6: holds the TCP/IPv6 counter values 1654 * 1655 * Returns the values of TP's TCP/IP and TCP/IPv6 MIB counters. 1656 * Either @v4 or @v6 may be %NULL to skip the corresponding stats. 1657 */ 1658 void t4_tp_get_tcp_stats(struct adapter *adap, struct tp_tcp_stats *v4, 1659 struct tp_tcp_stats *v6) 1660 { 1661 u32 val[TP_MIB_TCP_RXT_SEG_LO - TP_MIB_TCP_OUT_RST + 1]; 1662 1663 #define STAT_IDX(x) ((TP_MIB_TCP_##x) - TP_MIB_TCP_OUT_RST) 1664 #define STAT(x) val[STAT_IDX(x)] 1665 #define STAT64(x) (((u64)STAT(x##_HI) << 32) | STAT(x##_LO)) 1666 1667 if (v4) { 1668 t4_read_indirect(adap, TP_MIB_INDEX, TP_MIB_DATA, val, 1669 ARRAY_SIZE(val), TP_MIB_TCP_OUT_RST); 1670 v4->tcpOutRsts = STAT(OUT_RST); 1671 v4->tcpInSegs = STAT64(IN_SEG); 1672 v4->tcpOutSegs = STAT64(OUT_SEG); 1673 v4->tcpRetransSegs = STAT64(RXT_SEG); 1674 } 1675 if (v6) { 1676 t4_read_indirect(adap, TP_MIB_INDEX, TP_MIB_DATA, val, 1677 ARRAY_SIZE(val), TP_MIB_TCP_V6OUT_RST); 1678 v6->tcpOutRsts = STAT(OUT_RST); 1679 v6->tcpInSegs = STAT64(IN_SEG); 1680 v6->tcpOutSegs = STAT64(OUT_SEG); 1681 v6->tcpRetransSegs = STAT64(RXT_SEG); 1682 } 1683 #undef STAT64 1684 #undef STAT 1685 #undef STAT_IDX 1686 } 1687 1688 /** 1689 * t4_read_mtu_tbl - returns the values in the HW path MTU table 1690 * @adap: the adapter 1691 * @mtus: where to store the MTU values 1692 * @mtu_log: where to store the MTU base-2 log (may be %NULL) 1693 * 1694 * Reads the HW path MTU table. 1695 */ 1696 void t4_read_mtu_tbl(struct adapter *adap, u16 *mtus, u8 *mtu_log) 1697 { 1698 u32 v; 1699 int i; 1700 1701 for (i = 0; i < NMTUS; ++i) { 1702 t4_write_reg(adap, TP_MTU_TABLE, 1703 MTUINDEX(0xff) | MTUVALUE(i)); 1704 v = t4_read_reg(adap, TP_MTU_TABLE); 1705 mtus[i] = MTUVALUE_GET(v); 1706 if (mtu_log) 1707 mtu_log[i] = MTUWIDTH_GET(v); 1708 } 1709 } 1710 1711 /** 1712 * init_cong_ctrl - initialize congestion control parameters 1713 * @a: the alpha values for congestion control 1714 * @b: the beta values for congestion control 1715 * 1716 * Initialize the congestion control parameters. 1717 */ 1718 static void __devinit init_cong_ctrl(unsigned short *a, unsigned short *b) 1719 { 1720 a[0] = a[1] = a[2] = a[3] = a[4] = a[5] = a[6] = a[7] = a[8] = 1; 1721 a[9] = 2; 1722 a[10] = 3; 1723 a[11] = 4; 1724 a[12] = 5; 1725 a[13] = 6; 1726 a[14] = 7; 1727 a[15] = 8; 1728 a[16] = 9; 1729 a[17] = 10; 1730 a[18] = 14; 1731 a[19] = 17; 1732 a[20] = 21; 1733 a[21] = 25; 1734 a[22] = 30; 1735 a[23] = 35; 1736 a[24] = 45; 1737 a[25] = 60; 1738 a[26] = 80; 1739 a[27] = 100; 1740 a[28] = 200; 1741 a[29] = 300; 1742 a[30] = 400; 1743 a[31] = 500; 1744 1745 b[0] = b[1] = b[2] = b[3] = b[4] = b[5] = b[6] = b[7] = b[8] = 0; 1746 b[9] = b[10] = 1; 1747 b[11] = b[12] = 2; 1748 b[13] = b[14] = b[15] = b[16] = 3; 1749 b[17] = b[18] = b[19] = b[20] = b[21] = 4; 1750 b[22] = b[23] = b[24] = b[25] = b[26] = b[27] = 5; 1751 b[28] = b[29] = 6; 1752 b[30] = b[31] = 7; 1753 } 1754 1755 /* The minimum additive increment value for the congestion control table */ 1756 #define CC_MIN_INCR 2U 1757 1758 /** 1759 * t4_load_mtus - write the MTU and congestion control HW tables 1760 * @adap: the adapter 1761 * @mtus: the values for the MTU table 1762 * @alpha: the values for the congestion control alpha parameter 1763 * @beta: the values for the congestion control beta parameter 1764 * 1765 * Write the HW MTU table with the supplied MTUs and the high-speed 1766 * congestion control table with the supplied alpha, beta, and MTUs. 1767 * We write the two tables together because the additive increments 1768 * depend on the MTUs. 1769 */ 1770 void t4_load_mtus(struct adapter *adap, const unsigned short *mtus, 1771 const unsigned short *alpha, const unsigned short *beta) 1772 { 1773 static const unsigned int avg_pkts[NCCTRL_WIN] = { 1774 2, 6, 10, 14, 20, 28, 40, 56, 80, 112, 160, 224, 320, 448, 640, 1775 896, 1281, 1792, 2560, 3584, 5120, 7168, 10240, 14336, 20480, 1776 28672, 40960, 57344, 81920, 114688, 163840, 229376 1777 }; 1778 1779 unsigned int i, w; 1780 1781 for (i = 0; i < NMTUS; ++i) { 1782 unsigned int mtu = mtus[i]; 1783 unsigned int log2 = fls(mtu); 1784 1785 if (!(mtu & ((1 << log2) >> 2))) /* round */ 1786 log2--; 1787 t4_write_reg(adap, TP_MTU_TABLE, MTUINDEX(i) | 1788 MTUWIDTH(log2) | MTUVALUE(mtu)); 1789 1790 for (w = 0; w < NCCTRL_WIN; ++w) { 1791 unsigned int inc; 1792 1793 inc = max(((mtu - 40) * alpha[w]) / avg_pkts[w], 1794 CC_MIN_INCR); 1795 1796 t4_write_reg(adap, TP_CCTRL_TABLE, (i << 21) | 1797 (w << 16) | (beta[w] << 13) | inc); 1798 } 1799 } 1800 } 1801 1802 /** 1803 * get_mps_bg_map - return the buffer groups associated with a port 1804 * @adap: the adapter 1805 * @idx: the port index 1806 * 1807 * Returns a bitmap indicating which MPS buffer groups are associated 1808 * with the given port. Bit i is set if buffer group i is used by the 1809 * port. 1810 */ 1811 static unsigned int get_mps_bg_map(struct adapter *adap, int idx) 1812 { 1813 u32 n = NUMPORTS_GET(t4_read_reg(adap, MPS_CMN_CTL)); 1814 1815 if (n == 0) 1816 return idx == 0 ? 0xf : 0; 1817 if (n == 1) 1818 return idx < 2 ? (3 << (2 * idx)) : 0; 1819 return 1 << idx; 1820 } 1821 1822 /** 1823 * t4_get_port_stats - collect port statistics 1824 * @adap: the adapter 1825 * @idx: the port index 1826 * @p: the stats structure to fill 1827 * 1828 * Collect statistics related to the given port from HW. 1829 */ 1830 void t4_get_port_stats(struct adapter *adap, int idx, struct port_stats *p) 1831 { 1832 u32 bgmap = get_mps_bg_map(adap, idx); 1833 1834 #define GET_STAT(name) \ 1835 t4_read_reg64(adap, PORT_REG(idx, MPS_PORT_STAT_##name##_L)) 1836 #define GET_STAT_COM(name) t4_read_reg64(adap, MPS_STAT_##name##_L) 1837 1838 p->tx_octets = GET_STAT(TX_PORT_BYTES); 1839 p->tx_frames = GET_STAT(TX_PORT_FRAMES); 1840 p->tx_bcast_frames = GET_STAT(TX_PORT_BCAST); 1841 p->tx_mcast_frames = GET_STAT(TX_PORT_MCAST); 1842 p->tx_ucast_frames = GET_STAT(TX_PORT_UCAST); 1843 p->tx_error_frames = GET_STAT(TX_PORT_ERROR); 1844 p->tx_frames_64 = GET_STAT(TX_PORT_64B); 1845 p->tx_frames_65_127 = GET_STAT(TX_PORT_65B_127B); 1846 p->tx_frames_128_255 = GET_STAT(TX_PORT_128B_255B); 1847 p->tx_frames_256_511 = GET_STAT(TX_PORT_256B_511B); 1848 p->tx_frames_512_1023 = GET_STAT(TX_PORT_512B_1023B); 1849 p->tx_frames_1024_1518 = GET_STAT(TX_PORT_1024B_1518B); 1850 p->tx_frames_1519_max = GET_STAT(TX_PORT_1519B_MAX); 1851 p->tx_drop = GET_STAT(TX_PORT_DROP); 1852 p->tx_pause = GET_STAT(TX_PORT_PAUSE); 1853 p->tx_ppp0 = GET_STAT(TX_PORT_PPP0); 1854 p->tx_ppp1 = GET_STAT(TX_PORT_PPP1); 1855 p->tx_ppp2 = GET_STAT(TX_PORT_PPP2); 1856 p->tx_ppp3 = GET_STAT(TX_PORT_PPP3); 1857 p->tx_ppp4 = GET_STAT(TX_PORT_PPP4); 1858 p->tx_ppp5 = GET_STAT(TX_PORT_PPP5); 1859 p->tx_ppp6 = GET_STAT(TX_PORT_PPP6); 1860 p->tx_ppp7 = GET_STAT(TX_PORT_PPP7); 1861 1862 p->rx_octets = GET_STAT(RX_PORT_BYTES); 1863 p->rx_frames = GET_STAT(RX_PORT_FRAMES); 1864 p->rx_bcast_frames = GET_STAT(RX_PORT_BCAST); 1865 p->rx_mcast_frames = GET_STAT(RX_PORT_MCAST); 1866 p->rx_ucast_frames = GET_STAT(RX_PORT_UCAST); 1867 p->rx_too_long = GET_STAT(RX_PORT_MTU_ERROR); 1868 p->rx_jabber = GET_STAT(RX_PORT_MTU_CRC_ERROR); 1869 p->rx_fcs_err = GET_STAT(RX_PORT_CRC_ERROR); 1870 p->rx_len_err = GET_STAT(RX_PORT_LEN_ERROR); 1871 p->rx_symbol_err = GET_STAT(RX_PORT_SYM_ERROR); 1872 p->rx_runt = GET_STAT(RX_PORT_LESS_64B); 1873 p->rx_frames_64 = GET_STAT(RX_PORT_64B); 1874 p->rx_frames_65_127 = GET_STAT(RX_PORT_65B_127B); 1875 p->rx_frames_128_255 = GET_STAT(RX_PORT_128B_255B); 1876 p->rx_frames_256_511 = GET_STAT(RX_PORT_256B_511B); 1877 p->rx_frames_512_1023 = GET_STAT(RX_PORT_512B_1023B); 1878 p->rx_frames_1024_1518 = GET_STAT(RX_PORT_1024B_1518B); 1879 p->rx_frames_1519_max = GET_STAT(RX_PORT_1519B_MAX); 1880 p->rx_pause = GET_STAT(RX_PORT_PAUSE); 1881 p->rx_ppp0 = GET_STAT(RX_PORT_PPP0); 1882 p->rx_ppp1 = GET_STAT(RX_PORT_PPP1); 1883 p->rx_ppp2 = GET_STAT(RX_PORT_PPP2); 1884 p->rx_ppp3 = GET_STAT(RX_PORT_PPP3); 1885 p->rx_ppp4 = GET_STAT(RX_PORT_PPP4); 1886 p->rx_ppp5 = GET_STAT(RX_PORT_PPP5); 1887 p->rx_ppp6 = GET_STAT(RX_PORT_PPP6); 1888 p->rx_ppp7 = GET_STAT(RX_PORT_PPP7); 1889 1890 p->rx_ovflow0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_MAC_DROP_FRAME) : 0; 1891 p->rx_ovflow1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_MAC_DROP_FRAME) : 0; 1892 p->rx_ovflow2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_MAC_DROP_FRAME) : 0; 1893 p->rx_ovflow3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_MAC_DROP_FRAME) : 0; 1894 p->rx_trunc0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_MAC_TRUNC_FRAME) : 0; 1895 p->rx_trunc1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_MAC_TRUNC_FRAME) : 0; 1896 p->rx_trunc2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_MAC_TRUNC_FRAME) : 0; 1897 p->rx_trunc3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_MAC_TRUNC_FRAME) : 0; 1898 1899 #undef GET_STAT 1900 #undef GET_STAT_COM 1901 } 1902 1903 /** 1904 * t4_wol_magic_enable - enable/disable magic packet WoL 1905 * @adap: the adapter 1906 * @port: the physical port index 1907 * @addr: MAC address expected in magic packets, %NULL to disable 1908 * 1909 * Enables/disables magic packet wake-on-LAN for the selected port. 1910 */ 1911 void t4_wol_magic_enable(struct adapter *adap, unsigned int port, 1912 const u8 *addr) 1913 { 1914 if (addr) { 1915 t4_write_reg(adap, PORT_REG(port, XGMAC_PORT_MAGIC_MACID_LO), 1916 (addr[2] << 24) | (addr[3] << 16) | 1917 (addr[4] << 8) | addr[5]); 1918 t4_write_reg(adap, PORT_REG(port, XGMAC_PORT_MAGIC_MACID_HI), 1919 (addr[0] << 8) | addr[1]); 1920 } 1921 t4_set_reg_field(adap, PORT_REG(port, XGMAC_PORT_CFG2), MAGICEN, 1922 addr ? MAGICEN : 0); 1923 } 1924 1925 /** 1926 * t4_wol_pat_enable - enable/disable pattern-based WoL 1927 * @adap: the adapter 1928 * @port: the physical port index 1929 * @map: bitmap of which HW pattern filters to set 1930 * @mask0: byte mask for bytes 0-63 of a packet 1931 * @mask1: byte mask for bytes 64-127 of a packet 1932 * @crc: Ethernet CRC for selected bytes 1933 * @enable: enable/disable switch 1934 * 1935 * Sets the pattern filters indicated in @map to mask out the bytes 1936 * specified in @mask0/@mask1 in received packets and compare the CRC of 1937 * the resulting packet against @crc. If @enable is %true pattern-based 1938 * WoL is enabled, otherwise disabled. 1939 */ 1940 int t4_wol_pat_enable(struct adapter *adap, unsigned int port, unsigned int map, 1941 u64 mask0, u64 mask1, unsigned int crc, bool enable) 1942 { 1943 int i; 1944 1945 if (!enable) { 1946 t4_set_reg_field(adap, PORT_REG(port, XGMAC_PORT_CFG2), 1947 PATEN, 0); 1948 return 0; 1949 } 1950 if (map > 0xff) 1951 return -EINVAL; 1952 1953 #define EPIO_REG(name) PORT_REG(port, XGMAC_PORT_EPIO_##name) 1954 1955 t4_write_reg(adap, EPIO_REG(DATA1), mask0 >> 32); 1956 t4_write_reg(adap, EPIO_REG(DATA2), mask1); 1957 t4_write_reg(adap, EPIO_REG(DATA3), mask1 >> 32); 1958 1959 for (i = 0; i < NWOL_PAT; i++, map >>= 1) { 1960 if (!(map & 1)) 1961 continue; 1962 1963 /* write byte masks */ 1964 t4_write_reg(adap, EPIO_REG(DATA0), mask0); 1965 t4_write_reg(adap, EPIO_REG(OP), ADDRESS(i) | EPIOWR); 1966 t4_read_reg(adap, EPIO_REG(OP)); /* flush */ 1967 if (t4_read_reg(adap, EPIO_REG(OP)) & BUSY) 1968 return -ETIMEDOUT; 1969 1970 /* write CRC */ 1971 t4_write_reg(adap, EPIO_REG(DATA0), crc); 1972 t4_write_reg(adap, EPIO_REG(OP), ADDRESS(i + 32) | EPIOWR); 1973 t4_read_reg(adap, EPIO_REG(OP)); /* flush */ 1974 if (t4_read_reg(adap, EPIO_REG(OP)) & BUSY) 1975 return -ETIMEDOUT; 1976 } 1977 #undef EPIO_REG 1978 1979 t4_set_reg_field(adap, PORT_REG(port, XGMAC_PORT_CFG2), 0, PATEN); 1980 return 0; 1981 } 1982 1983 #define INIT_CMD(var, cmd, rd_wr) do { \ 1984 (var).op_to_write = htonl(FW_CMD_OP(FW_##cmd##_CMD) | \ 1985 FW_CMD_REQUEST | FW_CMD_##rd_wr); \ 1986 (var).retval_len16 = htonl(FW_LEN16(var)); \ 1987 } while (0) 1988 1989 /** 1990 * t4_mdio_rd - read a PHY register through MDIO 1991 * @adap: the adapter 1992 * @mbox: mailbox to use for the FW command 1993 * @phy_addr: the PHY address 1994 * @mmd: the PHY MMD to access (0 for clause 22 PHYs) 1995 * @reg: the register to read 1996 * @valp: where to store the value 1997 * 1998 * Issues a FW command through the given mailbox to read a PHY register. 1999 */ 2000 int t4_mdio_rd(struct adapter *adap, unsigned int mbox, unsigned int phy_addr, 2001 unsigned int mmd, unsigned int reg, u16 *valp) 2002 { 2003 int ret; 2004 struct fw_ldst_cmd c; 2005 2006 memset(&c, 0, sizeof(c)); 2007 c.op_to_addrspace = htonl(FW_CMD_OP(FW_LDST_CMD) | FW_CMD_REQUEST | 2008 FW_CMD_READ | FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_MDIO)); 2009 c.cycles_to_len16 = htonl(FW_LEN16(c)); 2010 c.u.mdio.paddr_mmd = htons(FW_LDST_CMD_PADDR(phy_addr) | 2011 FW_LDST_CMD_MMD(mmd)); 2012 c.u.mdio.raddr = htons(reg); 2013 2014 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c); 2015 if (ret == 0) 2016 *valp = ntohs(c.u.mdio.rval); 2017 return ret; 2018 } 2019 2020 /** 2021 * t4_mdio_wr - write a PHY register through MDIO 2022 * @adap: the adapter 2023 * @mbox: mailbox to use for the FW command 2024 * @phy_addr: the PHY address 2025 * @mmd: the PHY MMD to access (0 for clause 22 PHYs) 2026 * @reg: the register to write 2027 * @valp: value to write 2028 * 2029 * Issues a FW command through the given mailbox to write a PHY register. 2030 */ 2031 int t4_mdio_wr(struct adapter *adap, unsigned int mbox, unsigned int phy_addr, 2032 unsigned int mmd, unsigned int reg, u16 val) 2033 { 2034 struct fw_ldst_cmd c; 2035 2036 memset(&c, 0, sizeof(c)); 2037 c.op_to_addrspace = htonl(FW_CMD_OP(FW_LDST_CMD) | FW_CMD_REQUEST | 2038 FW_CMD_WRITE | FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_MDIO)); 2039 c.cycles_to_len16 = htonl(FW_LEN16(c)); 2040 c.u.mdio.paddr_mmd = htons(FW_LDST_CMD_PADDR(phy_addr) | 2041 FW_LDST_CMD_MMD(mmd)); 2042 c.u.mdio.raddr = htons(reg); 2043 c.u.mdio.rval = htons(val); 2044 2045 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); 2046 } 2047 2048 /** 2049 * t4_fw_hello - establish communication with FW 2050 * @adap: the adapter 2051 * @mbox: mailbox to use for the FW command 2052 * @evt_mbox: mailbox to receive async FW events 2053 * @master: specifies the caller's willingness to be the device master 2054 * @state: returns the current device state 2055 * 2056 * Issues a command to establish communication with FW. 2057 */ 2058 int t4_fw_hello(struct adapter *adap, unsigned int mbox, unsigned int evt_mbox, 2059 enum dev_master master, enum dev_state *state) 2060 { 2061 int ret; 2062 struct fw_hello_cmd c; 2063 2064 INIT_CMD(c, HELLO, WRITE); 2065 c.err_to_mbasyncnot = htonl( 2066 FW_HELLO_CMD_MASTERDIS(master == MASTER_CANT) | 2067 FW_HELLO_CMD_MASTERFORCE(master == MASTER_MUST) | 2068 FW_HELLO_CMD_MBMASTER(master == MASTER_MUST ? mbox : 0xff) | 2069 FW_HELLO_CMD_MBASYNCNOT(evt_mbox)); 2070 2071 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c); 2072 if (ret == 0 && state) { 2073 u32 v = ntohl(c.err_to_mbasyncnot); 2074 if (v & FW_HELLO_CMD_INIT) 2075 *state = DEV_STATE_INIT; 2076 else if (v & FW_HELLO_CMD_ERR) 2077 *state = DEV_STATE_ERR; 2078 else 2079 *state = DEV_STATE_UNINIT; 2080 } 2081 return ret; 2082 } 2083 2084 /** 2085 * t4_fw_bye - end communication with FW 2086 * @adap: the adapter 2087 * @mbox: mailbox to use for the FW command 2088 * 2089 * Issues a command to terminate communication with FW. 2090 */ 2091 int t4_fw_bye(struct adapter *adap, unsigned int mbox) 2092 { 2093 struct fw_bye_cmd c; 2094 2095 INIT_CMD(c, BYE, WRITE); 2096 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); 2097 } 2098 2099 /** 2100 * t4_init_cmd - ask FW to initialize the device 2101 * @adap: the adapter 2102 * @mbox: mailbox to use for the FW command 2103 * 2104 * Issues a command to FW to partially initialize the device. This 2105 * performs initialization that generally doesn't depend on user input. 2106 */ 2107 int t4_early_init(struct adapter *adap, unsigned int mbox) 2108 { 2109 struct fw_initialize_cmd c; 2110 2111 INIT_CMD(c, INITIALIZE, WRITE); 2112 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); 2113 } 2114 2115 /** 2116 * t4_fw_reset - issue a reset to FW 2117 * @adap: the adapter 2118 * @mbox: mailbox to use for the FW command 2119 * @reset: specifies the type of reset to perform 2120 * 2121 * Issues a reset command of the specified type to FW. 2122 */ 2123 int t4_fw_reset(struct adapter *adap, unsigned int mbox, int reset) 2124 { 2125 struct fw_reset_cmd c; 2126 2127 INIT_CMD(c, RESET, WRITE); 2128 c.val = htonl(reset); 2129 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); 2130 } 2131 2132 /** 2133 * t4_query_params - query FW or device parameters 2134 * @adap: the adapter 2135 * @mbox: mailbox to use for the FW command 2136 * @pf: the PF 2137 * @vf: the VF 2138 * @nparams: the number of parameters 2139 * @params: the parameter names 2140 * @val: the parameter values 2141 * 2142 * Reads the value of FW or device parameters. Up to 7 parameters can be 2143 * queried at once. 2144 */ 2145 int t4_query_params(struct adapter *adap, unsigned int mbox, unsigned int pf, 2146 unsigned int vf, unsigned int nparams, const u32 *params, 2147 u32 *val) 2148 { 2149 int i, ret; 2150 struct fw_params_cmd c; 2151 __be32 *p = &c.param[0].mnem; 2152 2153 if (nparams > 7) 2154 return -EINVAL; 2155 2156 memset(&c, 0, sizeof(c)); 2157 c.op_to_vfn = htonl(FW_CMD_OP(FW_PARAMS_CMD) | FW_CMD_REQUEST | 2158 FW_CMD_READ | FW_PARAMS_CMD_PFN(pf) | 2159 FW_PARAMS_CMD_VFN(vf)); 2160 c.retval_len16 = htonl(FW_LEN16(c)); 2161 for (i = 0; i < nparams; i++, p += 2) 2162 *p = htonl(*params++); 2163 2164 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c); 2165 if (ret == 0) 2166 for (i = 0, p = &c.param[0].val; i < nparams; i++, p += 2) 2167 *val++ = ntohl(*p); 2168 return ret; 2169 } 2170 2171 /** 2172 * t4_set_params - sets FW or device parameters 2173 * @adap: the adapter 2174 * @mbox: mailbox to use for the FW command 2175 * @pf: the PF 2176 * @vf: the VF 2177 * @nparams: the number of parameters 2178 * @params: the parameter names 2179 * @val: the parameter values 2180 * 2181 * Sets the value of FW or device parameters. Up to 7 parameters can be 2182 * specified at once. 2183 */ 2184 int t4_set_params(struct adapter *adap, unsigned int mbox, unsigned int pf, 2185 unsigned int vf, unsigned int nparams, const u32 *params, 2186 const u32 *val) 2187 { 2188 struct fw_params_cmd c; 2189 __be32 *p = &c.param[0].mnem; 2190 2191 if (nparams > 7) 2192 return -EINVAL; 2193 2194 memset(&c, 0, sizeof(c)); 2195 c.op_to_vfn = htonl(FW_CMD_OP(FW_PARAMS_CMD) | FW_CMD_REQUEST | 2196 FW_CMD_WRITE | FW_PARAMS_CMD_PFN(pf) | 2197 FW_PARAMS_CMD_VFN(vf)); 2198 c.retval_len16 = htonl(FW_LEN16(c)); 2199 while (nparams--) { 2200 *p++ = htonl(*params++); 2201 *p++ = htonl(*val++); 2202 } 2203 2204 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); 2205 } 2206 2207 /** 2208 * t4_cfg_pfvf - configure PF/VF resource limits 2209 * @adap: the adapter 2210 * @mbox: mailbox to use for the FW command 2211 * @pf: the PF being configured 2212 * @vf: the VF being configured 2213 * @txq: the max number of egress queues 2214 * @txq_eth_ctrl: the max number of egress Ethernet or control queues 2215 * @rxqi: the max number of interrupt-capable ingress queues 2216 * @rxq: the max number of interruptless ingress queues 2217 * @tc: the PCI traffic class 2218 * @vi: the max number of virtual interfaces 2219 * @cmask: the channel access rights mask for the PF/VF 2220 * @pmask: the port access rights mask for the PF/VF 2221 * @nexact: the maximum number of exact MPS filters 2222 * @rcaps: read capabilities 2223 * @wxcaps: write/execute capabilities 2224 * 2225 * Configures resource limits and capabilities for a physical or virtual 2226 * function. 2227 */ 2228 int t4_cfg_pfvf(struct adapter *adap, unsigned int mbox, unsigned int pf, 2229 unsigned int vf, unsigned int txq, unsigned int txq_eth_ctrl, 2230 unsigned int rxqi, unsigned int rxq, unsigned int tc, 2231 unsigned int vi, unsigned int cmask, unsigned int pmask, 2232 unsigned int nexact, unsigned int rcaps, unsigned int wxcaps) 2233 { 2234 struct fw_pfvf_cmd c; 2235 2236 memset(&c, 0, sizeof(c)); 2237 c.op_to_vfn = htonl(FW_CMD_OP(FW_PFVF_CMD) | FW_CMD_REQUEST | 2238 FW_CMD_WRITE | FW_PFVF_CMD_PFN(pf) | 2239 FW_PFVF_CMD_VFN(vf)); 2240 c.retval_len16 = htonl(FW_LEN16(c)); 2241 c.niqflint_niq = htonl(FW_PFVF_CMD_NIQFLINT(rxqi) | 2242 FW_PFVF_CMD_NIQ(rxq)); 2243 c.type_to_neq = htonl(FW_PFVF_CMD_CMASK(cmask) | 2244 FW_PFVF_CMD_PMASK(pmask) | 2245 FW_PFVF_CMD_NEQ(txq)); 2246 c.tc_to_nexactf = htonl(FW_PFVF_CMD_TC(tc) | FW_PFVF_CMD_NVI(vi) | 2247 FW_PFVF_CMD_NEXACTF(nexact)); 2248 c.r_caps_to_nethctrl = htonl(FW_PFVF_CMD_R_CAPS(rcaps) | 2249 FW_PFVF_CMD_WX_CAPS(wxcaps) | 2250 FW_PFVF_CMD_NETHCTRL(txq_eth_ctrl)); 2251 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); 2252 } 2253 2254 /** 2255 * t4_alloc_vi - allocate a virtual interface 2256 * @adap: the adapter 2257 * @mbox: mailbox to use for the FW command 2258 * @port: physical port associated with the VI 2259 * @pf: the PF owning the VI 2260 * @vf: the VF owning the VI 2261 * @nmac: number of MAC addresses needed (1 to 5) 2262 * @mac: the MAC addresses of the VI 2263 * @rss_size: size of RSS table slice associated with this VI 2264 * 2265 * Allocates a virtual interface for the given physical port. If @mac is 2266 * not %NULL it contains the MAC addresses of the VI as assigned by FW. 2267 * @mac should be large enough to hold @nmac Ethernet addresses, they are 2268 * stored consecutively so the space needed is @nmac * 6 bytes. 2269 * Returns a negative error number or the non-negative VI id. 2270 */ 2271 int t4_alloc_vi(struct adapter *adap, unsigned int mbox, unsigned int port, 2272 unsigned int pf, unsigned int vf, unsigned int nmac, u8 *mac, 2273 unsigned int *rss_size) 2274 { 2275 int ret; 2276 struct fw_vi_cmd c; 2277 2278 memset(&c, 0, sizeof(c)); 2279 c.op_to_vfn = htonl(FW_CMD_OP(FW_VI_CMD) | FW_CMD_REQUEST | 2280 FW_CMD_WRITE | FW_CMD_EXEC | 2281 FW_VI_CMD_PFN(pf) | FW_VI_CMD_VFN(vf)); 2282 c.alloc_to_len16 = htonl(FW_VI_CMD_ALLOC | FW_LEN16(c)); 2283 c.portid_pkd = FW_VI_CMD_PORTID(port); 2284 c.nmac = nmac - 1; 2285 2286 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c); 2287 if (ret) 2288 return ret; 2289 2290 if (mac) { 2291 memcpy(mac, c.mac, sizeof(c.mac)); 2292 switch (nmac) { 2293 case 5: 2294 memcpy(mac + 24, c.nmac3, sizeof(c.nmac3)); 2295 case 4: 2296 memcpy(mac + 18, c.nmac2, sizeof(c.nmac2)); 2297 case 3: 2298 memcpy(mac + 12, c.nmac1, sizeof(c.nmac1)); 2299 case 2: 2300 memcpy(mac + 6, c.nmac0, sizeof(c.nmac0)); 2301 } 2302 } 2303 if (rss_size) 2304 *rss_size = FW_VI_CMD_RSSSIZE_GET(ntohs(c.rsssize_pkd)); 2305 return FW_VI_CMD_VIID_GET(ntohs(c.type_viid)); 2306 } 2307 2308 /** 2309 * t4_set_rxmode - set Rx properties of a virtual interface 2310 * @adap: the adapter 2311 * @mbox: mailbox to use for the FW command 2312 * @viid: the VI id 2313 * @mtu: the new MTU or -1 2314 * @promisc: 1 to enable promiscuous mode, 0 to disable it, -1 no change 2315 * @all_multi: 1 to enable all-multi mode, 0 to disable it, -1 no change 2316 * @bcast: 1 to enable broadcast Rx, 0 to disable it, -1 no change 2317 * @vlanex: 1 to enable HW VLAN extraction, 0 to disable it, -1 no change 2318 * @sleep_ok: if true we may sleep while awaiting command completion 2319 * 2320 * Sets Rx properties of a virtual interface. 2321 */ 2322 int t4_set_rxmode(struct adapter *adap, unsigned int mbox, unsigned int viid, 2323 int mtu, int promisc, int all_multi, int bcast, int vlanex, 2324 bool sleep_ok) 2325 { 2326 struct fw_vi_rxmode_cmd c; 2327 2328 /* convert to FW values */ 2329 if (mtu < 0) 2330 mtu = FW_RXMODE_MTU_NO_CHG; 2331 if (promisc < 0) 2332 promisc = FW_VI_RXMODE_CMD_PROMISCEN_MASK; 2333 if (all_multi < 0) 2334 all_multi = FW_VI_RXMODE_CMD_ALLMULTIEN_MASK; 2335 if (bcast < 0) 2336 bcast = FW_VI_RXMODE_CMD_BROADCASTEN_MASK; 2337 if (vlanex < 0) 2338 vlanex = FW_VI_RXMODE_CMD_VLANEXEN_MASK; 2339 2340 memset(&c, 0, sizeof(c)); 2341 c.op_to_viid = htonl(FW_CMD_OP(FW_VI_RXMODE_CMD) | FW_CMD_REQUEST | 2342 FW_CMD_WRITE | FW_VI_RXMODE_CMD_VIID(viid)); 2343 c.retval_len16 = htonl(FW_LEN16(c)); 2344 c.mtu_to_vlanexen = htonl(FW_VI_RXMODE_CMD_MTU(mtu) | 2345 FW_VI_RXMODE_CMD_PROMISCEN(promisc) | 2346 FW_VI_RXMODE_CMD_ALLMULTIEN(all_multi) | 2347 FW_VI_RXMODE_CMD_BROADCASTEN(bcast) | 2348 FW_VI_RXMODE_CMD_VLANEXEN(vlanex)); 2349 return t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), NULL, sleep_ok); 2350 } 2351 2352 /** 2353 * t4_alloc_mac_filt - allocates exact-match filters for MAC addresses 2354 * @adap: the adapter 2355 * @mbox: mailbox to use for the FW command 2356 * @viid: the VI id 2357 * @free: if true any existing filters for this VI id are first removed 2358 * @naddr: the number of MAC addresses to allocate filters for (up to 7) 2359 * @addr: the MAC address(es) 2360 * @idx: where to store the index of each allocated filter 2361 * @hash: pointer to hash address filter bitmap 2362 * @sleep_ok: call is allowed to sleep 2363 * 2364 * Allocates an exact-match filter for each of the supplied addresses and 2365 * sets it to the corresponding address. If @idx is not %NULL it should 2366 * have at least @naddr entries, each of which will be set to the index of 2367 * the filter allocated for the corresponding MAC address. If a filter 2368 * could not be allocated for an address its index is set to 0xffff. 2369 * If @hash is not %NULL addresses that fail to allocate an exact filter 2370 * are hashed and update the hash filter bitmap pointed at by @hash. 2371 * 2372 * Returns a negative error number or the number of filters allocated. 2373 */ 2374 int t4_alloc_mac_filt(struct adapter *adap, unsigned int mbox, 2375 unsigned int viid, bool free, unsigned int naddr, 2376 const u8 **addr, u16 *idx, u64 *hash, bool sleep_ok) 2377 { 2378 int i, ret; 2379 struct fw_vi_mac_cmd c; 2380 struct fw_vi_mac_exact *p; 2381 2382 if (naddr > 7) 2383 return -EINVAL; 2384 2385 memset(&c, 0, sizeof(c)); 2386 c.op_to_viid = htonl(FW_CMD_OP(FW_VI_MAC_CMD) | FW_CMD_REQUEST | 2387 FW_CMD_WRITE | (free ? FW_CMD_EXEC : 0) | 2388 FW_VI_MAC_CMD_VIID(viid)); 2389 c.freemacs_to_len16 = htonl(FW_VI_MAC_CMD_FREEMACS(free) | 2390 FW_CMD_LEN16((naddr + 2) / 2)); 2391 2392 for (i = 0, p = c.u.exact; i < naddr; i++, p++) { 2393 p->valid_to_idx = htons(FW_VI_MAC_CMD_VALID | 2394 FW_VI_MAC_CMD_IDX(FW_VI_MAC_ADD_MAC)); 2395 memcpy(p->macaddr, addr[i], sizeof(p->macaddr)); 2396 } 2397 2398 ret = t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), &c, sleep_ok); 2399 if (ret) 2400 return ret; 2401 2402 for (i = 0, p = c.u.exact; i < naddr; i++, p++) { 2403 u16 index = FW_VI_MAC_CMD_IDX_GET(ntohs(p->valid_to_idx)); 2404 2405 if (idx) 2406 idx[i] = index >= NEXACT_MAC ? 0xffff : index; 2407 if (index < NEXACT_MAC) 2408 ret++; 2409 else if (hash) 2410 *hash |= (1ULL << hash_mac_addr(addr[i])); 2411 } 2412 return ret; 2413 } 2414 2415 /** 2416 * t4_change_mac - modifies the exact-match filter for a MAC address 2417 * @adap: the adapter 2418 * @mbox: mailbox to use for the FW command 2419 * @viid: the VI id 2420 * @idx: index of existing filter for old value of MAC address, or -1 2421 * @addr: the new MAC address value 2422 * @persist: whether a new MAC allocation should be persistent 2423 * @add_smt: if true also add the address to the HW SMT 2424 * 2425 * Modifies an exact-match filter and sets it to the new MAC address. 2426 * Note that in general it is not possible to modify the value of a given 2427 * filter so the generic way to modify an address filter is to free the one 2428 * being used by the old address value and allocate a new filter for the 2429 * new address value. @idx can be -1 if the address is a new addition. 2430 * 2431 * Returns a negative error number or the index of the filter with the new 2432 * MAC value. 2433 */ 2434 int t4_change_mac(struct adapter *adap, unsigned int mbox, unsigned int viid, 2435 int idx, const u8 *addr, bool persist, bool add_smt) 2436 { 2437 int ret, mode; 2438 struct fw_vi_mac_cmd c; 2439 struct fw_vi_mac_exact *p = c.u.exact; 2440 2441 if (idx < 0) /* new allocation */ 2442 idx = persist ? FW_VI_MAC_ADD_PERSIST_MAC : FW_VI_MAC_ADD_MAC; 2443 mode = add_smt ? FW_VI_MAC_SMT_AND_MPSTCAM : FW_VI_MAC_MPS_TCAM_ENTRY; 2444 2445 memset(&c, 0, sizeof(c)); 2446 c.op_to_viid = htonl(FW_CMD_OP(FW_VI_MAC_CMD) | FW_CMD_REQUEST | 2447 FW_CMD_WRITE | FW_VI_MAC_CMD_VIID(viid)); 2448 c.freemacs_to_len16 = htonl(FW_CMD_LEN16(1)); 2449 p->valid_to_idx = htons(FW_VI_MAC_CMD_VALID | 2450 FW_VI_MAC_CMD_SMAC_RESULT(mode) | 2451 FW_VI_MAC_CMD_IDX(idx)); 2452 memcpy(p->macaddr, addr, sizeof(p->macaddr)); 2453 2454 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c); 2455 if (ret == 0) { 2456 ret = FW_VI_MAC_CMD_IDX_GET(ntohs(p->valid_to_idx)); 2457 if (ret >= NEXACT_MAC) 2458 ret = -ENOMEM; 2459 } 2460 return ret; 2461 } 2462 2463 /** 2464 * t4_set_addr_hash - program the MAC inexact-match hash filter 2465 * @adap: the adapter 2466 * @mbox: mailbox to use for the FW command 2467 * @viid: the VI id 2468 * @ucast: whether the hash filter should also match unicast addresses 2469 * @vec: the value to be written to the hash filter 2470 * @sleep_ok: call is allowed to sleep 2471 * 2472 * Sets the 64-bit inexact-match hash filter for a virtual interface. 2473 */ 2474 int t4_set_addr_hash(struct adapter *adap, unsigned int mbox, unsigned int viid, 2475 bool ucast, u64 vec, bool sleep_ok) 2476 { 2477 struct fw_vi_mac_cmd c; 2478 2479 memset(&c, 0, sizeof(c)); 2480 c.op_to_viid = htonl(FW_CMD_OP(FW_VI_MAC_CMD) | FW_CMD_REQUEST | 2481 FW_CMD_WRITE | FW_VI_ENABLE_CMD_VIID(viid)); 2482 c.freemacs_to_len16 = htonl(FW_VI_MAC_CMD_HASHVECEN | 2483 FW_VI_MAC_CMD_HASHUNIEN(ucast) | 2484 FW_CMD_LEN16(1)); 2485 c.u.hash.hashvec = cpu_to_be64(vec); 2486 return t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), NULL, sleep_ok); 2487 } 2488 2489 /** 2490 * t4_enable_vi - enable/disable a virtual interface 2491 * @adap: the adapter 2492 * @mbox: mailbox to use for the FW command 2493 * @viid: the VI id 2494 * @rx_en: 1=enable Rx, 0=disable Rx 2495 * @tx_en: 1=enable Tx, 0=disable Tx 2496 * 2497 * Enables/disables a virtual interface. 2498 */ 2499 int t4_enable_vi(struct adapter *adap, unsigned int mbox, unsigned int viid, 2500 bool rx_en, bool tx_en) 2501 { 2502 struct fw_vi_enable_cmd c; 2503 2504 memset(&c, 0, sizeof(c)); 2505 c.op_to_viid = htonl(FW_CMD_OP(FW_VI_ENABLE_CMD) | FW_CMD_REQUEST | 2506 FW_CMD_EXEC | FW_VI_ENABLE_CMD_VIID(viid)); 2507 c.ien_to_len16 = htonl(FW_VI_ENABLE_CMD_IEN(rx_en) | 2508 FW_VI_ENABLE_CMD_EEN(tx_en) | FW_LEN16(c)); 2509 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); 2510 } 2511 2512 /** 2513 * t4_identify_port - identify a VI's port by blinking its LED 2514 * @adap: the adapter 2515 * @mbox: mailbox to use for the FW command 2516 * @viid: the VI id 2517 * @nblinks: how many times to blink LED at 2.5 Hz 2518 * 2519 * Identifies a VI's port by blinking its LED. 2520 */ 2521 int t4_identify_port(struct adapter *adap, unsigned int mbox, unsigned int viid, 2522 unsigned int nblinks) 2523 { 2524 struct fw_vi_enable_cmd c; 2525 2526 c.op_to_viid = htonl(FW_CMD_OP(FW_VI_ENABLE_CMD) | FW_CMD_REQUEST | 2527 FW_CMD_EXEC | FW_VI_ENABLE_CMD_VIID(viid)); 2528 c.ien_to_len16 = htonl(FW_VI_ENABLE_CMD_LED | FW_LEN16(c)); 2529 c.blinkdur = htons(nblinks); 2530 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); 2531 } 2532 2533 /** 2534 * t4_iq_free - free an ingress queue and its FLs 2535 * @adap: the adapter 2536 * @mbox: mailbox to use for the FW command 2537 * @pf: the PF owning the queues 2538 * @vf: the VF owning the queues 2539 * @iqtype: the ingress queue type 2540 * @iqid: ingress queue id 2541 * @fl0id: FL0 queue id or 0xffff if no attached FL0 2542 * @fl1id: FL1 queue id or 0xffff if no attached FL1 2543 * 2544 * Frees an ingress queue and its associated FLs, if any. 2545 */ 2546 int t4_iq_free(struct adapter *adap, unsigned int mbox, unsigned int pf, 2547 unsigned int vf, unsigned int iqtype, unsigned int iqid, 2548 unsigned int fl0id, unsigned int fl1id) 2549 { 2550 struct fw_iq_cmd c; 2551 2552 memset(&c, 0, sizeof(c)); 2553 c.op_to_vfn = htonl(FW_CMD_OP(FW_IQ_CMD) | FW_CMD_REQUEST | 2554 FW_CMD_EXEC | FW_IQ_CMD_PFN(pf) | 2555 FW_IQ_CMD_VFN(vf)); 2556 c.alloc_to_len16 = htonl(FW_IQ_CMD_FREE | FW_LEN16(c)); 2557 c.type_to_iqandstindex = htonl(FW_IQ_CMD_TYPE(iqtype)); 2558 c.iqid = htons(iqid); 2559 c.fl0id = htons(fl0id); 2560 c.fl1id = htons(fl1id); 2561 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); 2562 } 2563 2564 /** 2565 * t4_eth_eq_free - free an Ethernet egress queue 2566 * @adap: the adapter 2567 * @mbox: mailbox to use for the FW command 2568 * @pf: the PF owning the queue 2569 * @vf: the VF owning the queue 2570 * @eqid: egress queue id 2571 * 2572 * Frees an Ethernet egress queue. 2573 */ 2574 int t4_eth_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf, 2575 unsigned int vf, unsigned int eqid) 2576 { 2577 struct fw_eq_eth_cmd c; 2578 2579 memset(&c, 0, sizeof(c)); 2580 c.op_to_vfn = htonl(FW_CMD_OP(FW_EQ_ETH_CMD) | FW_CMD_REQUEST | 2581 FW_CMD_EXEC | FW_EQ_ETH_CMD_PFN(pf) | 2582 FW_EQ_ETH_CMD_VFN(vf)); 2583 c.alloc_to_len16 = htonl(FW_EQ_ETH_CMD_FREE | FW_LEN16(c)); 2584 c.eqid_pkd = htonl(FW_EQ_ETH_CMD_EQID(eqid)); 2585 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); 2586 } 2587 2588 /** 2589 * t4_ctrl_eq_free - free a control egress queue 2590 * @adap: the adapter 2591 * @mbox: mailbox to use for the FW command 2592 * @pf: the PF owning the queue 2593 * @vf: the VF owning the queue 2594 * @eqid: egress queue id 2595 * 2596 * Frees a control egress queue. 2597 */ 2598 int t4_ctrl_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf, 2599 unsigned int vf, unsigned int eqid) 2600 { 2601 struct fw_eq_ctrl_cmd c; 2602 2603 memset(&c, 0, sizeof(c)); 2604 c.op_to_vfn = htonl(FW_CMD_OP(FW_EQ_CTRL_CMD) | FW_CMD_REQUEST | 2605 FW_CMD_EXEC | FW_EQ_CTRL_CMD_PFN(pf) | 2606 FW_EQ_CTRL_CMD_VFN(vf)); 2607 c.alloc_to_len16 = htonl(FW_EQ_CTRL_CMD_FREE | FW_LEN16(c)); 2608 c.cmpliqid_eqid = htonl(FW_EQ_CTRL_CMD_EQID(eqid)); 2609 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); 2610 } 2611 2612 /** 2613 * t4_ofld_eq_free - free an offload egress queue 2614 * @adap: the adapter 2615 * @mbox: mailbox to use for the FW command 2616 * @pf: the PF owning the queue 2617 * @vf: the VF owning the queue 2618 * @eqid: egress queue id 2619 * 2620 * Frees a control egress queue. 2621 */ 2622 int t4_ofld_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf, 2623 unsigned int vf, unsigned int eqid) 2624 { 2625 struct fw_eq_ofld_cmd c; 2626 2627 memset(&c, 0, sizeof(c)); 2628 c.op_to_vfn = htonl(FW_CMD_OP(FW_EQ_OFLD_CMD) | FW_CMD_REQUEST | 2629 FW_CMD_EXEC | FW_EQ_OFLD_CMD_PFN(pf) | 2630 FW_EQ_OFLD_CMD_VFN(vf)); 2631 c.alloc_to_len16 = htonl(FW_EQ_OFLD_CMD_FREE | FW_LEN16(c)); 2632 c.eqid_pkd = htonl(FW_EQ_OFLD_CMD_EQID(eqid)); 2633 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); 2634 } 2635 2636 /** 2637 * t4_handle_fw_rpl - process a FW reply message 2638 * @adap: the adapter 2639 * @rpl: start of the FW message 2640 * 2641 * Processes a FW message, such as link state change messages. 2642 */ 2643 int t4_handle_fw_rpl(struct adapter *adap, const __be64 *rpl) 2644 { 2645 u8 opcode = *(const u8 *)rpl; 2646 2647 if (opcode == FW_PORT_CMD) { /* link/module state change message */ 2648 int speed = 0, fc = 0; 2649 const struct fw_port_cmd *p = (void *)rpl; 2650 int chan = FW_PORT_CMD_PORTID_GET(ntohl(p->op_to_portid)); 2651 int port = adap->chan_map[chan]; 2652 struct port_info *pi = adap2pinfo(adap, port); 2653 struct link_config *lc = &pi->link_cfg; 2654 u32 stat = ntohl(p->u.info.lstatus_to_modtype); 2655 int link_ok = (stat & FW_PORT_CMD_LSTATUS) != 0; 2656 u32 mod = FW_PORT_CMD_MODTYPE_GET(stat); 2657 2658 if (stat & FW_PORT_CMD_RXPAUSE) 2659 fc |= PAUSE_RX; 2660 if (stat & FW_PORT_CMD_TXPAUSE) 2661 fc |= PAUSE_TX; 2662 if (stat & FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_100M)) 2663 speed = SPEED_100; 2664 else if (stat & FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_1G)) 2665 speed = SPEED_1000; 2666 else if (stat & FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_10G)) 2667 speed = SPEED_10000; 2668 2669 if (link_ok != lc->link_ok || speed != lc->speed || 2670 fc != lc->fc) { /* something changed */ 2671 lc->link_ok = link_ok; 2672 lc->speed = speed; 2673 lc->fc = fc; 2674 t4_os_link_changed(adap, port, link_ok); 2675 } 2676 if (mod != pi->mod_type) { 2677 pi->mod_type = mod; 2678 t4_os_portmod_changed(adap, port); 2679 } 2680 } 2681 return 0; 2682 } 2683 2684 static void __devinit get_pci_mode(struct adapter *adapter, 2685 struct pci_params *p) 2686 { 2687 u16 val; 2688 u32 pcie_cap = pci_pcie_cap(adapter->pdev); 2689 2690 if (pcie_cap) { 2691 pci_read_config_word(adapter->pdev, pcie_cap + PCI_EXP_LNKSTA, 2692 &val); 2693 p->speed = val & PCI_EXP_LNKSTA_CLS; 2694 p->width = (val & PCI_EXP_LNKSTA_NLW) >> 4; 2695 } 2696 } 2697 2698 /** 2699 * init_link_config - initialize a link's SW state 2700 * @lc: structure holding the link state 2701 * @caps: link capabilities 2702 * 2703 * Initializes the SW state maintained for each link, including the link's 2704 * capabilities and default speed/flow-control/autonegotiation settings. 2705 */ 2706 static void __devinit init_link_config(struct link_config *lc, 2707 unsigned int caps) 2708 { 2709 lc->supported = caps; 2710 lc->requested_speed = 0; 2711 lc->speed = 0; 2712 lc->requested_fc = lc->fc = PAUSE_RX | PAUSE_TX; 2713 if (lc->supported & FW_PORT_CAP_ANEG) { 2714 lc->advertising = lc->supported & ADVERT_MASK; 2715 lc->autoneg = AUTONEG_ENABLE; 2716 lc->requested_fc |= PAUSE_AUTONEG; 2717 } else { 2718 lc->advertising = 0; 2719 lc->autoneg = AUTONEG_DISABLE; 2720 } 2721 } 2722 2723 int t4_wait_dev_ready(struct adapter *adap) 2724 { 2725 if (t4_read_reg(adap, PL_WHOAMI) != 0xffffffff) 2726 return 0; 2727 msleep(500); 2728 return t4_read_reg(adap, PL_WHOAMI) != 0xffffffff ? 0 : -EIO; 2729 } 2730 2731 static int __devinit get_flash_params(struct adapter *adap) 2732 { 2733 int ret; 2734 u32 info; 2735 2736 ret = sf1_write(adap, 1, 1, 0, SF_RD_ID); 2737 if (!ret) 2738 ret = sf1_read(adap, 3, 0, 1, &info); 2739 t4_write_reg(adap, SF_OP, 0); /* unlock SF */ 2740 if (ret) 2741 return ret; 2742 2743 if ((info & 0xff) != 0x20) /* not a Numonix flash */ 2744 return -EINVAL; 2745 info >>= 16; /* log2 of size */ 2746 if (info >= 0x14 && info < 0x18) 2747 adap->params.sf_nsec = 1 << (info - 16); 2748 else if (info == 0x18) 2749 adap->params.sf_nsec = 64; 2750 else 2751 return -EINVAL; 2752 adap->params.sf_size = 1 << info; 2753 adap->params.sf_fw_start = 2754 t4_read_reg(adap, CIM_BOOT_CFG) & BOOTADDR_MASK; 2755 return 0; 2756 } 2757 2758 /** 2759 * t4_prep_adapter - prepare SW and HW for operation 2760 * @adapter: the adapter 2761 * @reset: if true perform a HW reset 2762 * 2763 * Initialize adapter SW state for the various HW modules, set initial 2764 * values for some adapter tunables, take PHYs out of reset, and 2765 * initialize the MDIO interface. 2766 */ 2767 int __devinit t4_prep_adapter(struct adapter *adapter) 2768 { 2769 int ret; 2770 2771 ret = t4_wait_dev_ready(adapter); 2772 if (ret < 0) 2773 return ret; 2774 2775 get_pci_mode(adapter, &adapter->params.pci); 2776 adapter->params.rev = t4_read_reg(adapter, PL_REV); 2777 2778 ret = get_flash_params(adapter); 2779 if (ret < 0) { 2780 dev_err(adapter->pdev_dev, "error %d identifying flash\n", ret); 2781 return ret; 2782 } 2783 2784 ret = get_vpd_params(adapter, &adapter->params.vpd); 2785 if (ret < 0) 2786 return ret; 2787 2788 init_cong_ctrl(adapter->params.a_wnd, adapter->params.b_wnd); 2789 2790 /* 2791 * Default port for debugging in case we can't reach FW. 2792 */ 2793 adapter->params.nports = 1; 2794 adapter->params.portvec = 1; 2795 return 0; 2796 } 2797 2798 int __devinit t4_port_init(struct adapter *adap, int mbox, int pf, int vf) 2799 { 2800 u8 addr[6]; 2801 int ret, i, j = 0; 2802 struct fw_port_cmd c; 2803 struct fw_rss_vi_config_cmd rvc; 2804 2805 memset(&c, 0, sizeof(c)); 2806 memset(&rvc, 0, sizeof(rvc)); 2807 2808 for_each_port(adap, i) { 2809 unsigned int rss_size; 2810 struct port_info *p = adap2pinfo(adap, i); 2811 2812 while ((adap->params.portvec & (1 << j)) == 0) 2813 j++; 2814 2815 c.op_to_portid = htonl(FW_CMD_OP(FW_PORT_CMD) | 2816 FW_CMD_REQUEST | FW_CMD_READ | 2817 FW_PORT_CMD_PORTID(j)); 2818 c.action_to_len16 = htonl( 2819 FW_PORT_CMD_ACTION(FW_PORT_ACTION_GET_PORT_INFO) | 2820 FW_LEN16(c)); 2821 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c); 2822 if (ret) 2823 return ret; 2824 2825 ret = t4_alloc_vi(adap, mbox, j, pf, vf, 1, addr, &rss_size); 2826 if (ret < 0) 2827 return ret; 2828 2829 p->viid = ret; 2830 p->tx_chan = j; 2831 p->lport = j; 2832 p->rss_size = rss_size; 2833 memcpy(adap->port[i]->dev_addr, addr, ETH_ALEN); 2834 memcpy(adap->port[i]->perm_addr, addr, ETH_ALEN); 2835 adap->port[i]->dev_id = j; 2836 2837 ret = ntohl(c.u.info.lstatus_to_modtype); 2838 p->mdio_addr = (ret & FW_PORT_CMD_MDIOCAP) ? 2839 FW_PORT_CMD_MDIOADDR_GET(ret) : -1; 2840 p->port_type = FW_PORT_CMD_PTYPE_GET(ret); 2841 p->mod_type = FW_PORT_MOD_TYPE_NA; 2842 2843 rvc.op_to_viid = htonl(FW_CMD_OP(FW_RSS_VI_CONFIG_CMD) | 2844 FW_CMD_REQUEST | FW_CMD_READ | 2845 FW_RSS_VI_CONFIG_CMD_VIID(p->viid)); 2846 rvc.retval_len16 = htonl(FW_LEN16(rvc)); 2847 ret = t4_wr_mbox(adap, mbox, &rvc, sizeof(rvc), &rvc); 2848 if (ret) 2849 return ret; 2850 p->rss_mode = ntohl(rvc.u.basicvirtual.defaultq_to_udpen); 2851 2852 init_link_config(&p->link_cfg, ntohs(c.u.info.pcap)); 2853 j++; 2854 } 2855 return 0; 2856 } 2857