1 /*- 2 * Copyright (c) 2012 Chelsio Communications, Inc. 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 */ 26 27 #include <sys/cdefs.h> 28 __FBSDID("$FreeBSD$"); 29 30 #include "opt_inet.h" 31 32 #include <sys/param.h> 33 #include <sys/eventhandler.h> 34 35 #include "common.h" 36 #include "t4_regs.h" 37 #include "t4_regs_values.h" 38 #include "firmware/t4fw_interface.h" 39 40 #undef msleep 41 #define msleep(x) do { \ 42 if (cold) \ 43 DELAY((x) * 1000); \ 44 else \ 45 pause("t4hw", (x) * hz / 1000); \ 46 } while (0) 47 48 /** 49 * t4_wait_op_done_val - wait until an operation is completed 50 * @adapter: the adapter performing the operation 51 * @reg: the register to check for completion 52 * @mask: a single-bit field within @reg that indicates completion 53 * @polarity: the value of the field when the operation is completed 54 * @attempts: number of check iterations 55 * @delay: delay in usecs between iterations 56 * @valp: where to store the value of the register at completion time 57 * 58 * Wait until an operation is completed by checking a bit in a register 59 * up to @attempts times. If @valp is not NULL the value of the register 60 * at the time it indicated completion is stored there. Returns 0 if the 61 * operation completes and -EAGAIN otherwise. 62 */ 63 int t4_wait_op_done_val(struct adapter *adapter, int reg, u32 mask, 64 int polarity, int attempts, int delay, u32 *valp) 65 { 66 while (1) { 67 u32 val = t4_read_reg(adapter, reg); 68 69 if (!!(val & mask) == polarity) { 70 if (valp) 71 *valp = val; 72 return 0; 73 } 74 if (--attempts == 0) 75 return -EAGAIN; 76 if (delay) 77 udelay(delay); 78 } 79 } 80 81 /** 82 * t4_set_reg_field - set a register field to a value 83 * @adapter: the adapter to program 84 * @addr: the register address 85 * @mask: specifies the portion of the register to modify 86 * @val: the new value for the register field 87 * 88 * Sets a register field specified by the supplied mask to the 89 * given value. 90 */ 91 void t4_set_reg_field(struct adapter *adapter, unsigned int addr, u32 mask, 92 u32 val) 93 { 94 u32 v = t4_read_reg(adapter, addr) & ~mask; 95 96 t4_write_reg(adapter, addr, v | val); 97 (void) t4_read_reg(adapter, addr); /* flush */ 98 } 99 100 /** 101 * t4_read_indirect - read indirectly addressed registers 102 * @adap: the adapter 103 * @addr_reg: register holding the indirect address 104 * @data_reg: register holding the value of the indirect register 105 * @vals: where the read register values are stored 106 * @nregs: how many indirect registers to read 107 * @start_idx: index of first indirect register to read 108 * 109 * Reads registers that are accessed indirectly through an address/data 110 * register pair. 111 */ 112 void t4_read_indirect(struct adapter *adap, unsigned int addr_reg, 113 unsigned int data_reg, u32 *vals, unsigned int nregs, 114 unsigned int start_idx) 115 { 116 while (nregs--) { 117 t4_write_reg(adap, addr_reg, start_idx); 118 *vals++ = t4_read_reg(adap, data_reg); 119 start_idx++; 120 } 121 } 122 123 /** 124 * t4_write_indirect - write indirectly addressed registers 125 * @adap: the adapter 126 * @addr_reg: register holding the indirect addresses 127 * @data_reg: register holding the value for the indirect registers 128 * @vals: values to write 129 * @nregs: how many indirect registers to write 130 * @start_idx: address of first indirect register to write 131 * 132 * Writes a sequential block of registers that are accessed indirectly 133 * through an address/data register pair. 134 */ 135 void t4_write_indirect(struct adapter *adap, unsigned int addr_reg, 136 unsigned int data_reg, const u32 *vals, 137 unsigned int nregs, unsigned int start_idx) 138 { 139 while (nregs--) { 140 t4_write_reg(adap, addr_reg, start_idx++); 141 t4_write_reg(adap, data_reg, *vals++); 142 } 143 } 144 145 /* 146 * Read a 32-bit PCI Configuration Space register via the PCI-E backdoor 147 * mechanism. This guarantees that we get the real value even if we're 148 * operating within a Virtual Machine and the Hypervisor is trapping our 149 * Configuration Space accesses. 150 */ 151 u32 t4_hw_pci_read_cfg4(adapter_t *adap, int reg) 152 { 153 t4_write_reg(adap, A_PCIE_CFG_SPACE_REQ, 154 F_ENABLE | F_LOCALCFG | V_FUNCTION(adap->pf) | 155 V_REGISTER(reg)); 156 return t4_read_reg(adap, A_PCIE_CFG_SPACE_DATA); 157 } 158 159 /* 160 * t4_report_fw_error - report firmware error 161 * @adap: the adapter 162 * 163 * The adapter firmware can indicate error conditions to the host. 164 * This routine prints out the reason for the firmware error (as 165 * reported by the firmware). 166 */ 167 static void t4_report_fw_error(struct adapter *adap) 168 { 169 static const char *reason[] = { 170 "Crash", /* PCIE_FW_EVAL_CRASH */ 171 "During Device Preparation", /* PCIE_FW_EVAL_PREP */ 172 "During Device Configuration", /* PCIE_FW_EVAL_CONF */ 173 "During Device Initialization", /* PCIE_FW_EVAL_INIT */ 174 "Unexpected Event", /* PCIE_FW_EVAL_UNEXPECTEDEVENT */ 175 "Insufficient Airflow", /* PCIE_FW_EVAL_OVERHEAT */ 176 "Device Shutdown", /* PCIE_FW_EVAL_DEVICESHUTDOWN */ 177 "Reserved", /* reserved */ 178 }; 179 u32 pcie_fw; 180 181 pcie_fw = t4_read_reg(adap, A_PCIE_FW); 182 if (pcie_fw & F_PCIE_FW_ERR) 183 CH_ERR(adap, "Firmware reports adapter error: %s\n", 184 reason[G_PCIE_FW_EVAL(pcie_fw)]); 185 } 186 187 /* 188 * Get the reply to a mailbox command and store it in @rpl in big-endian order. 189 */ 190 static void get_mbox_rpl(struct adapter *adap, __be64 *rpl, int nflit, 191 u32 mbox_addr) 192 { 193 for ( ; nflit; nflit--, mbox_addr += 8) 194 *rpl++ = cpu_to_be64(t4_read_reg64(adap, mbox_addr)); 195 } 196 197 /* 198 * Handle a FW assertion reported in a mailbox. 199 */ 200 static void fw_asrt(struct adapter *adap, u32 mbox_addr) 201 { 202 struct fw_debug_cmd asrt; 203 204 get_mbox_rpl(adap, (__be64 *)&asrt, sizeof(asrt) / 8, mbox_addr); 205 CH_ALERT(adap, "FW assertion at %.16s:%u, val0 %#x, val1 %#x\n", 206 asrt.u.assert.filename_0_7, ntohl(asrt.u.assert.line), 207 ntohl(asrt.u.assert.x), ntohl(asrt.u.assert.y)); 208 } 209 210 #define X_CIM_PF_NOACCESS 0xeeeeeeee 211 /** 212 * t4_wr_mbox_meat - send a command to FW through the given mailbox 213 * @adap: the adapter 214 * @mbox: index of the mailbox to use 215 * @cmd: the command to write 216 * @size: command length in bytes 217 * @rpl: where to optionally store the reply 218 * @sleep_ok: if true we may sleep while awaiting command completion 219 * 220 * Sends the given command to FW through the selected mailbox and waits 221 * for the FW to execute the command. If @rpl is not %NULL it is used to 222 * store the FW's reply to the command. The command and its optional 223 * reply are of the same length. Some FW commands like RESET and 224 * INITIALIZE can take a considerable amount of time to execute. 225 * @sleep_ok determines whether we may sleep while awaiting the response. 226 * If sleeping is allowed we use progressive backoff otherwise we spin. 227 * 228 * The return value is 0 on success or a negative errno on failure. A 229 * failure can happen either because we are not able to execute the 230 * command or FW executes it but signals an error. In the latter case 231 * the return value is the error code indicated by FW (negated). 232 */ 233 int t4_wr_mbox_meat(struct adapter *adap, int mbox, const void *cmd, int size, 234 void *rpl, bool sleep_ok) 235 { 236 /* 237 * We delay in small increments at first in an effort to maintain 238 * responsiveness for simple, fast executing commands but then back 239 * off to larger delays to a maximum retry delay. 240 */ 241 static const int delay[] = { 242 1, 1, 3, 5, 10, 10, 20, 50, 100 243 }; 244 245 u32 v; 246 u64 res; 247 int i, ms, delay_idx; 248 const __be64 *p = cmd; 249 u32 data_reg = PF_REG(mbox, A_CIM_PF_MAILBOX_DATA); 250 u32 ctl_reg = PF_REG(mbox, A_CIM_PF_MAILBOX_CTRL); 251 252 if ((size & 15) || size > MBOX_LEN) 253 return -EINVAL; 254 255 v = G_MBOWNER(t4_read_reg(adap, ctl_reg)); 256 for (i = 0; v == X_MBOWNER_NONE && i < 3; i++) 257 v = G_MBOWNER(t4_read_reg(adap, ctl_reg)); 258 259 if (v != X_MBOWNER_PL) 260 return v ? -EBUSY : -ETIMEDOUT; 261 262 for (i = 0; i < size; i += 8, p++) 263 t4_write_reg64(adap, data_reg + i, be64_to_cpu(*p)); 264 265 CH_DUMP_MBOX(adap, mbox, data_reg); 266 267 t4_write_reg(adap, ctl_reg, F_MBMSGVALID | V_MBOWNER(X_MBOWNER_FW)); 268 t4_read_reg(adap, ctl_reg); /* flush write */ 269 270 delay_idx = 0; 271 ms = delay[0]; 272 273 for (i = 0; i < FW_CMD_MAX_TIMEOUT; i += ms) { 274 if (sleep_ok) { 275 ms = delay[delay_idx]; /* last element may repeat */ 276 if (delay_idx < ARRAY_SIZE(delay) - 1) 277 delay_idx++; 278 msleep(ms); 279 } else 280 mdelay(ms); 281 282 v = t4_read_reg(adap, ctl_reg); 283 if (v == X_CIM_PF_NOACCESS) 284 continue; 285 if (G_MBOWNER(v) == X_MBOWNER_PL) { 286 if (!(v & F_MBMSGVALID)) { 287 t4_write_reg(adap, ctl_reg, 288 V_MBOWNER(X_MBOWNER_NONE)); 289 continue; 290 } 291 292 CH_DUMP_MBOX(adap, mbox, data_reg); 293 294 res = t4_read_reg64(adap, data_reg); 295 if (G_FW_CMD_OP(res >> 32) == FW_DEBUG_CMD) { 296 fw_asrt(adap, data_reg); 297 res = V_FW_CMD_RETVAL(EIO); 298 } else if (rpl) 299 get_mbox_rpl(adap, rpl, size / 8, data_reg); 300 t4_write_reg(adap, ctl_reg, V_MBOWNER(X_MBOWNER_NONE)); 301 return -G_FW_CMD_RETVAL((int)res); 302 } 303 } 304 305 /* 306 * We timed out waiting for a reply to our mailbox command. Report 307 * the error and also check to see if the firmware reported any 308 * errors ... 309 */ 310 CH_ERR(adap, "command %#x in mailbox %d timed out\n", 311 *(const u8 *)cmd, mbox); 312 if (t4_read_reg(adap, A_PCIE_FW) & F_PCIE_FW_ERR) 313 t4_report_fw_error(adap); 314 return -ETIMEDOUT; 315 } 316 317 /** 318 * t4_mc_read - read from MC through backdoor accesses 319 * @adap: the adapter 320 * @idx: which MC to access 321 * @addr: address of first byte requested 322 * @data: 64 bytes of data containing the requested address 323 * @ecc: where to store the corresponding 64-bit ECC word 324 * 325 * Read 64 bytes of data from MC starting at a 64-byte-aligned address 326 * that covers the requested address @addr. If @parity is not %NULL it 327 * is assigned the 64-bit ECC word for the read data. 328 */ 329 int t4_mc_read(struct adapter *adap, int idx, u32 addr, __be32 *data, u64 *ecc) 330 { 331 int i; 332 u32 mc_bist_cmd_reg, mc_bist_cmd_addr_reg, mc_bist_cmd_len_reg; 333 u32 mc_bist_status_rdata_reg, mc_bist_data_pattern_reg; 334 335 if (is_t4(adap)) { 336 mc_bist_cmd_reg = A_MC_BIST_CMD; 337 mc_bist_cmd_addr_reg = A_MC_BIST_CMD_ADDR; 338 mc_bist_cmd_len_reg = A_MC_BIST_CMD_LEN; 339 mc_bist_status_rdata_reg = A_MC_BIST_STATUS_RDATA; 340 mc_bist_data_pattern_reg = A_MC_BIST_DATA_PATTERN; 341 } else { 342 mc_bist_cmd_reg = MC_REG(A_MC_P_BIST_CMD, idx); 343 mc_bist_cmd_addr_reg = MC_REG(A_MC_P_BIST_CMD_ADDR, idx); 344 mc_bist_cmd_len_reg = MC_REG(A_MC_P_BIST_CMD_LEN, idx); 345 mc_bist_status_rdata_reg = MC_REG(A_MC_P_BIST_STATUS_RDATA, 346 idx); 347 mc_bist_data_pattern_reg = MC_REG(A_MC_P_BIST_DATA_PATTERN, 348 idx); 349 } 350 351 if (t4_read_reg(adap, mc_bist_cmd_reg) & F_START_BIST) 352 return -EBUSY; 353 t4_write_reg(adap, mc_bist_cmd_addr_reg, addr & ~0x3fU); 354 t4_write_reg(adap, mc_bist_cmd_len_reg, 64); 355 t4_write_reg(adap, mc_bist_data_pattern_reg, 0xc); 356 t4_write_reg(adap, mc_bist_cmd_reg, V_BIST_OPCODE(1) | 357 F_START_BIST | V_BIST_CMD_GAP(1)); 358 i = t4_wait_op_done(adap, mc_bist_cmd_reg, F_START_BIST, 0, 10, 1); 359 if (i) 360 return i; 361 362 #define MC_DATA(i) MC_BIST_STATUS_REG(mc_bist_status_rdata_reg, i) 363 364 for (i = 15; i >= 0; i--) 365 *data++ = ntohl(t4_read_reg(adap, MC_DATA(i))); 366 if (ecc) 367 *ecc = t4_read_reg64(adap, MC_DATA(16)); 368 #undef MC_DATA 369 return 0; 370 } 371 372 /** 373 * t4_edc_read - read from EDC through backdoor accesses 374 * @adap: the adapter 375 * @idx: which EDC to access 376 * @addr: address of first byte requested 377 * @data: 64 bytes of data containing the requested address 378 * @ecc: where to store the corresponding 64-bit ECC word 379 * 380 * Read 64 bytes of data from EDC starting at a 64-byte-aligned address 381 * that covers the requested address @addr. If @parity is not %NULL it 382 * is assigned the 64-bit ECC word for the read data. 383 */ 384 int t4_edc_read(struct adapter *adap, int idx, u32 addr, __be32 *data, u64 *ecc) 385 { 386 int i; 387 u32 edc_bist_cmd_reg, edc_bist_cmd_addr_reg, edc_bist_cmd_len_reg; 388 u32 edc_bist_cmd_data_pattern, edc_bist_status_rdata_reg; 389 390 if (is_t4(adap)) { 391 edc_bist_cmd_reg = EDC_REG(A_EDC_BIST_CMD, idx); 392 edc_bist_cmd_addr_reg = EDC_REG(A_EDC_BIST_CMD_ADDR, idx); 393 edc_bist_cmd_len_reg = EDC_REG(A_EDC_BIST_CMD_LEN, idx); 394 edc_bist_cmd_data_pattern = EDC_REG(A_EDC_BIST_DATA_PATTERN, 395 idx); 396 edc_bist_status_rdata_reg = EDC_REG(A_EDC_BIST_STATUS_RDATA, 397 idx); 398 } else { 399 /* 400 * These macro are missing in t4_regs.h file. 401 * Added temporarily for testing. 402 */ 403 #define EDC_STRIDE_T5 (EDC_T51_BASE_ADDR - EDC_T50_BASE_ADDR) 404 #define EDC_REG_T5(reg, idx) (reg + EDC_STRIDE_T5 * idx) 405 edc_bist_cmd_reg = EDC_REG_T5(A_EDC_H_BIST_CMD, idx); 406 edc_bist_cmd_addr_reg = EDC_REG_T5(A_EDC_H_BIST_CMD_ADDR, idx); 407 edc_bist_cmd_len_reg = EDC_REG_T5(A_EDC_H_BIST_CMD_LEN, idx); 408 edc_bist_cmd_data_pattern = EDC_REG_T5(A_EDC_H_BIST_DATA_PATTERN, 409 idx); 410 edc_bist_status_rdata_reg = EDC_REG_T5(A_EDC_H_BIST_STATUS_RDATA, 411 idx); 412 #undef EDC_REG_T5 413 #undef EDC_STRIDE_T5 414 } 415 416 if (t4_read_reg(adap, edc_bist_cmd_reg) & F_START_BIST) 417 return -EBUSY; 418 t4_write_reg(adap, edc_bist_cmd_addr_reg, addr & ~0x3fU); 419 t4_write_reg(adap, edc_bist_cmd_len_reg, 64); 420 t4_write_reg(adap, edc_bist_cmd_data_pattern, 0xc); 421 t4_write_reg(adap, edc_bist_cmd_reg, 422 V_BIST_OPCODE(1) | V_BIST_CMD_GAP(1) | F_START_BIST); 423 i = t4_wait_op_done(adap, edc_bist_cmd_reg, F_START_BIST, 0, 10, 1); 424 if (i) 425 return i; 426 427 #define EDC_DATA(i) EDC_BIST_STATUS_REG(edc_bist_status_rdata_reg, i) 428 429 for (i = 15; i >= 0; i--) 430 *data++ = ntohl(t4_read_reg(adap, EDC_DATA(i))); 431 if (ecc) 432 *ecc = t4_read_reg64(adap, EDC_DATA(16)); 433 #undef EDC_DATA 434 return 0; 435 } 436 437 /** 438 * t4_mem_read - read EDC 0, EDC 1 or MC into buffer 439 * @adap: the adapter 440 * @mtype: memory type: MEM_EDC0, MEM_EDC1 or MEM_MC 441 * @addr: address within indicated memory type 442 * @len: amount of memory to read 443 * @buf: host memory buffer 444 * 445 * Reads an [almost] arbitrary memory region in the firmware: the 446 * firmware memory address, length and host buffer must be aligned on 447 * 32-bit boudaries. The memory is returned as a raw byte sequence from 448 * the firmware's memory. If this memory contains data structures which 449 * contain multi-byte integers, it's the callers responsibility to 450 * perform appropriate byte order conversions. 451 */ 452 int t4_mem_read(struct adapter *adap, int mtype, u32 addr, u32 len, 453 __be32 *buf) 454 { 455 u32 pos, start, end, offset; 456 int ret; 457 458 /* 459 * Argument sanity checks ... 460 */ 461 if ((addr & 0x3) || (len & 0x3)) 462 return -EINVAL; 463 464 /* 465 * The underlaying EDC/MC read routines read 64 bytes at a time so we 466 * need to round down the start and round up the end. We'll start 467 * copying out of the first line at (addr - start) a word at a time. 468 */ 469 start = addr & ~(64-1); 470 end = (addr + len + 64-1) & ~(64-1); 471 offset = (addr - start)/sizeof(__be32); 472 473 for (pos = start; pos < end; pos += 64, offset = 0) { 474 __be32 data[16]; 475 476 /* 477 * Read the chip's memory block and bail if there's an error. 478 */ 479 if ((mtype == MEM_MC) || (mtype == MEM_MC1)) 480 ret = t4_mc_read(adap, mtype - MEM_MC, pos, data, NULL); 481 else 482 ret = t4_edc_read(adap, mtype, pos, data, NULL); 483 if (ret) 484 return ret; 485 486 /* 487 * Copy the data into the caller's memory buffer. 488 */ 489 while (offset < 16 && len > 0) { 490 *buf++ = data[offset++]; 491 len -= sizeof(__be32); 492 } 493 } 494 495 return 0; 496 } 497 498 /* 499 * Partial EEPROM Vital Product Data structure. Includes only the ID and 500 * VPD-R header. 501 */ 502 struct t4_vpd_hdr { 503 u8 id_tag; 504 u8 id_len[2]; 505 u8 id_data[ID_LEN]; 506 u8 vpdr_tag; 507 u8 vpdr_len[2]; 508 }; 509 510 /* 511 * EEPROM reads take a few tens of us while writes can take a bit over 5 ms. 512 */ 513 #define EEPROM_MAX_RD_POLL 40 514 #define EEPROM_MAX_WR_POLL 6 515 #define EEPROM_STAT_ADDR 0x7bfc 516 #define VPD_BASE 0x400 517 #define VPD_BASE_OLD 0 518 #define VPD_LEN 1024 519 #define VPD_INFO_FLD_HDR_SIZE 3 520 #define CHELSIO_VPD_UNIQUE_ID 0x82 521 522 /** 523 * t4_seeprom_read - read a serial EEPROM location 524 * @adapter: adapter to read 525 * @addr: EEPROM virtual address 526 * @data: where to store the read data 527 * 528 * Read a 32-bit word from a location in serial EEPROM using the card's PCI 529 * VPD capability. Note that this function must be called with a virtual 530 * address. 531 */ 532 int t4_seeprom_read(struct adapter *adapter, u32 addr, u32 *data) 533 { 534 u16 val; 535 int attempts = EEPROM_MAX_RD_POLL; 536 unsigned int base = adapter->params.pci.vpd_cap_addr; 537 538 if (addr >= EEPROMVSIZE || (addr & 3)) 539 return -EINVAL; 540 541 t4_os_pci_write_cfg2(adapter, base + PCI_VPD_ADDR, (u16)addr); 542 do { 543 udelay(10); 544 t4_os_pci_read_cfg2(adapter, base + PCI_VPD_ADDR, &val); 545 } while (!(val & PCI_VPD_ADDR_F) && --attempts); 546 547 if (!(val & PCI_VPD_ADDR_F)) { 548 CH_ERR(adapter, "reading EEPROM address 0x%x failed\n", addr); 549 return -EIO; 550 } 551 t4_os_pci_read_cfg4(adapter, base + PCI_VPD_DATA, data); 552 *data = le32_to_cpu(*data); 553 return 0; 554 } 555 556 /** 557 * t4_seeprom_write - write a serial EEPROM location 558 * @adapter: adapter to write 559 * @addr: virtual EEPROM address 560 * @data: value to write 561 * 562 * Write a 32-bit word to a location in serial EEPROM using the card's PCI 563 * VPD capability. Note that this function must be called with a virtual 564 * address. 565 */ 566 int t4_seeprom_write(struct adapter *adapter, u32 addr, u32 data) 567 { 568 u16 val; 569 int attempts = EEPROM_MAX_WR_POLL; 570 unsigned int base = adapter->params.pci.vpd_cap_addr; 571 572 if (addr >= EEPROMVSIZE || (addr & 3)) 573 return -EINVAL; 574 575 t4_os_pci_write_cfg4(adapter, base + PCI_VPD_DATA, 576 cpu_to_le32(data)); 577 t4_os_pci_write_cfg2(adapter, base + PCI_VPD_ADDR, 578 (u16)addr | PCI_VPD_ADDR_F); 579 do { 580 msleep(1); 581 t4_os_pci_read_cfg2(adapter, base + PCI_VPD_ADDR, &val); 582 } while ((val & PCI_VPD_ADDR_F) && --attempts); 583 584 if (val & PCI_VPD_ADDR_F) { 585 CH_ERR(adapter, "write to EEPROM address 0x%x failed\n", addr); 586 return -EIO; 587 } 588 return 0; 589 } 590 591 /** 592 * t4_eeprom_ptov - translate a physical EEPROM address to virtual 593 * @phys_addr: the physical EEPROM address 594 * @fn: the PCI function number 595 * @sz: size of function-specific area 596 * 597 * Translate a physical EEPROM address to virtual. The first 1K is 598 * accessed through virtual addresses starting at 31K, the rest is 599 * accessed through virtual addresses starting at 0. 600 * 601 * The mapping is as follows: 602 * [0..1K) -> [31K..32K) 603 * [1K..1K+A) -> [ES-A..ES) 604 * [1K+A..ES) -> [0..ES-A-1K) 605 * 606 * where A = @fn * @sz, and ES = EEPROM size. 607 */ 608 int t4_eeprom_ptov(unsigned int phys_addr, unsigned int fn, unsigned int sz) 609 { 610 fn *= sz; 611 if (phys_addr < 1024) 612 return phys_addr + (31 << 10); 613 if (phys_addr < 1024 + fn) 614 return EEPROMSIZE - fn + phys_addr - 1024; 615 if (phys_addr < EEPROMSIZE) 616 return phys_addr - 1024 - fn; 617 return -EINVAL; 618 } 619 620 /** 621 * t4_seeprom_wp - enable/disable EEPROM write protection 622 * @adapter: the adapter 623 * @enable: whether to enable or disable write protection 624 * 625 * Enables or disables write protection on the serial EEPROM. 626 */ 627 int t4_seeprom_wp(struct adapter *adapter, int enable) 628 { 629 return t4_seeprom_write(adapter, EEPROM_STAT_ADDR, enable ? 0xc : 0); 630 } 631 632 /** 633 * get_vpd_keyword_val - Locates an information field keyword in the VPD 634 * @v: Pointer to buffered vpd data structure 635 * @kw: The keyword to search for 636 * 637 * Returns the value of the information field keyword or 638 * -ENOENT otherwise. 639 */ 640 static int get_vpd_keyword_val(const struct t4_vpd_hdr *v, const char *kw) 641 { 642 int i; 643 unsigned int offset , len; 644 const u8 *buf = &v->id_tag; 645 const u8 *vpdr_len = &v->vpdr_tag; 646 offset = sizeof(struct t4_vpd_hdr); 647 len = (u16)vpdr_len[1] + ((u16)vpdr_len[2] << 8); 648 649 if (len + sizeof(struct t4_vpd_hdr) > VPD_LEN) { 650 return -ENOENT; 651 } 652 653 for (i = offset; i + VPD_INFO_FLD_HDR_SIZE <= offset + len;) { 654 if(memcmp(buf + i , kw , 2) == 0){ 655 i += VPD_INFO_FLD_HDR_SIZE; 656 return i; 657 } 658 659 i += VPD_INFO_FLD_HDR_SIZE + buf[i+2]; 660 } 661 662 return -ENOENT; 663 } 664 665 666 /** 667 * get_vpd_params - read VPD parameters from VPD EEPROM 668 * @adapter: adapter to read 669 * @p: where to store the parameters 670 * 671 * Reads card parameters stored in VPD EEPROM. 672 */ 673 static int get_vpd_params(struct adapter *adapter, struct vpd_params *p) 674 { 675 int i, ret, addr; 676 int ec, sn, pn, na; 677 u8 vpd[VPD_LEN], csum; 678 const struct t4_vpd_hdr *v; 679 680 /* 681 * Card information normally starts at VPD_BASE but early cards had 682 * it at 0. 683 */ 684 ret = t4_seeprom_read(adapter, VPD_BASE, (u32 *)(vpd)); 685 addr = *vpd == CHELSIO_VPD_UNIQUE_ID ? VPD_BASE : VPD_BASE_OLD; 686 687 for (i = 0; i < sizeof(vpd); i += 4) { 688 ret = t4_seeprom_read(adapter, addr + i, (u32 *)(vpd + i)); 689 if (ret) 690 return ret; 691 } 692 v = (const struct t4_vpd_hdr *)vpd; 693 694 #define FIND_VPD_KW(var,name) do { \ 695 var = get_vpd_keyword_val(v , name); \ 696 if (var < 0) { \ 697 CH_ERR(adapter, "missing VPD keyword " name "\n"); \ 698 return -EINVAL; \ 699 } \ 700 } while (0) 701 702 FIND_VPD_KW(i, "RV"); 703 for (csum = 0; i >= 0; i--) 704 csum += vpd[i]; 705 706 if (csum) { 707 CH_ERR(adapter, "corrupted VPD EEPROM, actual csum %u\n", csum); 708 return -EINVAL; 709 } 710 FIND_VPD_KW(ec, "EC"); 711 FIND_VPD_KW(sn, "SN"); 712 FIND_VPD_KW(pn, "PN"); 713 FIND_VPD_KW(na, "NA"); 714 #undef FIND_VPD_KW 715 716 memcpy(p->id, v->id_data, ID_LEN); 717 strstrip(p->id); 718 memcpy(p->ec, vpd + ec, EC_LEN); 719 strstrip(p->ec); 720 i = vpd[sn - VPD_INFO_FLD_HDR_SIZE + 2]; 721 memcpy(p->sn, vpd + sn, min(i, SERNUM_LEN)); 722 strstrip(p->sn); 723 i = vpd[pn - VPD_INFO_FLD_HDR_SIZE + 2]; 724 memcpy(p->pn, vpd + pn, min(i, PN_LEN)); 725 strstrip((char *)p->pn); 726 i = vpd[na - VPD_INFO_FLD_HDR_SIZE + 2]; 727 memcpy(p->na, vpd + na, min(i, MACADDR_LEN)); 728 strstrip((char *)p->na); 729 730 return 0; 731 } 732 733 /* serial flash and firmware constants and flash config file constants */ 734 enum { 735 SF_ATTEMPTS = 10, /* max retries for SF operations */ 736 737 /* flash command opcodes */ 738 SF_PROG_PAGE = 2, /* program page */ 739 SF_WR_DISABLE = 4, /* disable writes */ 740 SF_RD_STATUS = 5, /* read status register */ 741 SF_WR_ENABLE = 6, /* enable writes */ 742 SF_RD_DATA_FAST = 0xb, /* read flash */ 743 SF_RD_ID = 0x9f, /* read ID */ 744 SF_ERASE_SECTOR = 0xd8, /* erase sector */ 745 }; 746 747 /** 748 * sf1_read - read data from the serial flash 749 * @adapter: the adapter 750 * @byte_cnt: number of bytes to read 751 * @cont: whether another operation will be chained 752 * @lock: whether to lock SF for PL access only 753 * @valp: where to store the read data 754 * 755 * Reads up to 4 bytes of data from the serial flash. The location of 756 * the read needs to be specified prior to calling this by issuing the 757 * appropriate commands to the serial flash. 758 */ 759 static int sf1_read(struct adapter *adapter, unsigned int byte_cnt, int cont, 760 int lock, u32 *valp) 761 { 762 int ret; 763 764 if (!byte_cnt || byte_cnt > 4) 765 return -EINVAL; 766 if (t4_read_reg(adapter, A_SF_OP) & F_BUSY) 767 return -EBUSY; 768 t4_write_reg(adapter, A_SF_OP, 769 V_SF_LOCK(lock) | V_CONT(cont) | V_BYTECNT(byte_cnt - 1)); 770 ret = t4_wait_op_done(adapter, A_SF_OP, F_BUSY, 0, SF_ATTEMPTS, 5); 771 if (!ret) 772 *valp = t4_read_reg(adapter, A_SF_DATA); 773 return ret; 774 } 775 776 /** 777 * sf1_write - write data to the serial flash 778 * @adapter: the adapter 779 * @byte_cnt: number of bytes to write 780 * @cont: whether another operation will be chained 781 * @lock: whether to lock SF for PL access only 782 * @val: value to write 783 * 784 * Writes up to 4 bytes of data to the serial flash. The location of 785 * the write needs to be specified prior to calling this by issuing the 786 * appropriate commands to the serial flash. 787 */ 788 static int sf1_write(struct adapter *adapter, unsigned int byte_cnt, int cont, 789 int lock, u32 val) 790 { 791 if (!byte_cnt || byte_cnt > 4) 792 return -EINVAL; 793 if (t4_read_reg(adapter, A_SF_OP) & F_BUSY) 794 return -EBUSY; 795 t4_write_reg(adapter, A_SF_DATA, val); 796 t4_write_reg(adapter, A_SF_OP, V_SF_LOCK(lock) | 797 V_CONT(cont) | V_BYTECNT(byte_cnt - 1) | V_OP(1)); 798 return t4_wait_op_done(adapter, A_SF_OP, F_BUSY, 0, SF_ATTEMPTS, 5); 799 } 800 801 /** 802 * flash_wait_op - wait for a flash operation to complete 803 * @adapter: the adapter 804 * @attempts: max number of polls of the status register 805 * @delay: delay between polls in ms 806 * 807 * Wait for a flash operation to complete by polling the status register. 808 */ 809 static int flash_wait_op(struct adapter *adapter, int attempts, int delay) 810 { 811 int ret; 812 u32 status; 813 814 while (1) { 815 if ((ret = sf1_write(adapter, 1, 1, 1, SF_RD_STATUS)) != 0 || 816 (ret = sf1_read(adapter, 1, 0, 1, &status)) != 0) 817 return ret; 818 if (!(status & 1)) 819 return 0; 820 if (--attempts == 0) 821 return -EAGAIN; 822 if (delay) 823 msleep(delay); 824 } 825 } 826 827 /** 828 * t4_read_flash - read words from serial flash 829 * @adapter: the adapter 830 * @addr: the start address for the read 831 * @nwords: how many 32-bit words to read 832 * @data: where to store the read data 833 * @byte_oriented: whether to store data as bytes or as words 834 * 835 * Read the specified number of 32-bit words from the serial flash. 836 * If @byte_oriented is set the read data is stored as a byte array 837 * (i.e., big-endian), otherwise as 32-bit words in the platform's 838 * natural endianess. 839 */ 840 int t4_read_flash(struct adapter *adapter, unsigned int addr, 841 unsigned int nwords, u32 *data, int byte_oriented) 842 { 843 int ret; 844 845 if (addr + nwords * sizeof(u32) > adapter->params.sf_size || (addr & 3)) 846 return -EINVAL; 847 848 addr = swab32(addr) | SF_RD_DATA_FAST; 849 850 if ((ret = sf1_write(adapter, 4, 1, 0, addr)) != 0 || 851 (ret = sf1_read(adapter, 1, 1, 0, data)) != 0) 852 return ret; 853 854 for ( ; nwords; nwords--, data++) { 855 ret = sf1_read(adapter, 4, nwords > 1, nwords == 1, data); 856 if (nwords == 1) 857 t4_write_reg(adapter, A_SF_OP, 0); /* unlock SF */ 858 if (ret) 859 return ret; 860 if (byte_oriented) 861 *data = htonl(*data); 862 } 863 return 0; 864 } 865 866 /** 867 * t4_write_flash - write up to a page of data to the serial flash 868 * @adapter: the adapter 869 * @addr: the start address to write 870 * @n: length of data to write in bytes 871 * @data: the data to write 872 * @byte_oriented: whether to store data as bytes or as words 873 * 874 * Writes up to a page of data (256 bytes) to the serial flash starting 875 * at the given address. All the data must be written to the same page. 876 * If @byte_oriented is set the write data is stored as byte stream 877 * (i.e. matches what on disk), otherwise in big-endian. 878 */ 879 static int t4_write_flash(struct adapter *adapter, unsigned int addr, 880 unsigned int n, const u8 *data, int byte_oriented) 881 { 882 int ret; 883 u32 buf[SF_PAGE_SIZE / 4]; 884 unsigned int i, c, left, val, offset = addr & 0xff; 885 886 if (addr >= adapter->params.sf_size || offset + n > SF_PAGE_SIZE) 887 return -EINVAL; 888 889 val = swab32(addr) | SF_PROG_PAGE; 890 891 if ((ret = sf1_write(adapter, 1, 0, 1, SF_WR_ENABLE)) != 0 || 892 (ret = sf1_write(adapter, 4, 1, 1, val)) != 0) 893 goto unlock; 894 895 for (left = n; left; left -= c) { 896 c = min(left, 4U); 897 for (val = 0, i = 0; i < c; ++i) 898 val = (val << 8) + *data++; 899 900 if (!byte_oriented) 901 val = htonl(val); 902 903 ret = sf1_write(adapter, c, c != left, 1, val); 904 if (ret) 905 goto unlock; 906 } 907 ret = flash_wait_op(adapter, 8, 1); 908 if (ret) 909 goto unlock; 910 911 t4_write_reg(adapter, A_SF_OP, 0); /* unlock SF */ 912 913 /* Read the page to verify the write succeeded */ 914 ret = t4_read_flash(adapter, addr & ~0xff, ARRAY_SIZE(buf), buf, 915 byte_oriented); 916 if (ret) 917 return ret; 918 919 if (memcmp(data - n, (u8 *)buf + offset, n)) { 920 CH_ERR(adapter, "failed to correctly write the flash page " 921 "at %#x\n", addr); 922 return -EIO; 923 } 924 return 0; 925 926 unlock: 927 t4_write_reg(adapter, A_SF_OP, 0); /* unlock SF */ 928 return ret; 929 } 930 931 /** 932 * t4_get_fw_version - read the firmware version 933 * @adapter: the adapter 934 * @vers: where to place the version 935 * 936 * Reads the FW version from flash. 937 */ 938 int t4_get_fw_version(struct adapter *adapter, u32 *vers) 939 { 940 return t4_read_flash(adapter, 941 FLASH_FW_START + offsetof(struct fw_hdr, fw_ver), 1, 942 vers, 0); 943 } 944 945 /** 946 * t4_get_tp_version - read the TP microcode version 947 * @adapter: the adapter 948 * @vers: where to place the version 949 * 950 * Reads the TP microcode version from flash. 951 */ 952 int t4_get_tp_version(struct adapter *adapter, u32 *vers) 953 { 954 return t4_read_flash(adapter, FLASH_FW_START + offsetof(struct fw_hdr, 955 tp_microcode_ver), 956 1, vers, 0); 957 } 958 959 /** 960 * t4_check_fw_version - check if the FW is compatible with this driver 961 * @adapter: the adapter 962 * 963 * Checks if an adapter's FW is compatible with the driver. Returns 0 964 * if there's exact match, a negative error if the version could not be 965 * read or there's a major version mismatch, and a positive value if the 966 * expected major version is found but there's a minor version mismatch. 967 */ 968 int t4_check_fw_version(struct adapter *adapter) 969 { 970 int ret, major, minor, micro; 971 int exp_major, exp_minor, exp_micro; 972 973 ret = t4_get_fw_version(adapter, &adapter->params.fw_vers); 974 if (!ret) 975 ret = t4_get_tp_version(adapter, &adapter->params.tp_vers); 976 if (ret) 977 return ret; 978 979 major = G_FW_HDR_FW_VER_MAJOR(adapter->params.fw_vers); 980 minor = G_FW_HDR_FW_VER_MINOR(adapter->params.fw_vers); 981 micro = G_FW_HDR_FW_VER_MICRO(adapter->params.fw_vers); 982 983 switch (chip_id(adapter)) { 984 case CHELSIO_T4: 985 exp_major = T4FW_VERSION_MAJOR; 986 exp_minor = T4FW_VERSION_MINOR; 987 exp_micro = T4FW_VERSION_MICRO; 988 break; 989 case CHELSIO_T5: 990 exp_major = T5FW_VERSION_MAJOR; 991 exp_minor = T5FW_VERSION_MINOR; 992 exp_micro = T5FW_VERSION_MICRO; 993 break; 994 default: 995 CH_ERR(adapter, "Unsupported chip type, %x\n", 996 chip_id(adapter)); 997 return -EINVAL; 998 } 999 1000 if (major != exp_major) { /* major mismatch - fail */ 1001 CH_ERR(adapter, "card FW has major version %u, driver wants " 1002 "%u\n", major, exp_major); 1003 return -EINVAL; 1004 } 1005 1006 if (minor == exp_minor && micro == exp_micro) 1007 return 0; /* perfect match */ 1008 1009 /* Minor/micro version mismatch. Report it but often it's OK. */ 1010 return 1; 1011 } 1012 1013 /** 1014 * t4_flash_erase_sectors - erase a range of flash sectors 1015 * @adapter: the adapter 1016 * @start: the first sector to erase 1017 * @end: the last sector to erase 1018 * 1019 * Erases the sectors in the given inclusive range. 1020 */ 1021 static int t4_flash_erase_sectors(struct adapter *adapter, int start, int end) 1022 { 1023 int ret = 0; 1024 1025 while (start <= end) { 1026 if ((ret = sf1_write(adapter, 1, 0, 1, SF_WR_ENABLE)) != 0 || 1027 (ret = sf1_write(adapter, 4, 0, 1, 1028 SF_ERASE_SECTOR | (start << 8))) != 0 || 1029 (ret = flash_wait_op(adapter, 14, 500)) != 0) { 1030 CH_ERR(adapter, "erase of flash sector %d failed, " 1031 "error %d\n", start, ret); 1032 break; 1033 } 1034 start++; 1035 } 1036 t4_write_reg(adapter, A_SF_OP, 0); /* unlock SF */ 1037 return ret; 1038 } 1039 1040 /** 1041 * t4_flash_cfg_addr - return the address of the flash configuration file 1042 * @adapter: the adapter 1043 * 1044 * Return the address within the flash where the Firmware Configuration 1045 * File is stored, or an error if the device FLASH is too small to contain 1046 * a Firmware Configuration File. 1047 */ 1048 int t4_flash_cfg_addr(struct adapter *adapter) 1049 { 1050 /* 1051 * If the device FLASH isn't large enough to hold a Firmware 1052 * Configuration File, return an error. 1053 */ 1054 if (adapter->params.sf_size < FLASH_CFG_START + FLASH_CFG_MAX_SIZE) 1055 return -ENOSPC; 1056 1057 return FLASH_CFG_START; 1058 } 1059 1060 /** 1061 * t4_load_cfg - download config file 1062 * @adap: the adapter 1063 * @cfg_data: the cfg text file to write 1064 * @size: text file size 1065 * 1066 * Write the supplied config text file to the card's serial flash. 1067 */ 1068 int t4_load_cfg(struct adapter *adap, const u8 *cfg_data, unsigned int size) 1069 { 1070 int ret, i, n, cfg_addr; 1071 unsigned int addr; 1072 unsigned int flash_cfg_start_sec; 1073 unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec; 1074 1075 cfg_addr = t4_flash_cfg_addr(adap); 1076 if (cfg_addr < 0) 1077 return cfg_addr; 1078 1079 addr = cfg_addr; 1080 flash_cfg_start_sec = addr / SF_SEC_SIZE; 1081 1082 if (size > FLASH_CFG_MAX_SIZE) { 1083 CH_ERR(adap, "cfg file too large, max is %u bytes\n", 1084 FLASH_CFG_MAX_SIZE); 1085 return -EFBIG; 1086 } 1087 1088 i = DIV_ROUND_UP(FLASH_CFG_MAX_SIZE, /* # of sectors spanned */ 1089 sf_sec_size); 1090 ret = t4_flash_erase_sectors(adap, flash_cfg_start_sec, 1091 flash_cfg_start_sec + i - 1); 1092 /* 1093 * If size == 0 then we're simply erasing the FLASH sectors associated 1094 * with the on-adapter Firmware Configuration File. 1095 */ 1096 if (ret || size == 0) 1097 goto out; 1098 1099 /* this will write to the flash up to SF_PAGE_SIZE at a time */ 1100 for (i = 0; i< size; i+= SF_PAGE_SIZE) { 1101 if ( (size - i) < SF_PAGE_SIZE) 1102 n = size - i; 1103 else 1104 n = SF_PAGE_SIZE; 1105 ret = t4_write_flash(adap, addr, n, cfg_data, 1); 1106 if (ret) 1107 goto out; 1108 1109 addr += SF_PAGE_SIZE; 1110 cfg_data += SF_PAGE_SIZE; 1111 } 1112 1113 out: 1114 if (ret) 1115 CH_ERR(adap, "config file %s failed %d\n", 1116 (size == 0 ? "clear" : "download"), ret); 1117 return ret; 1118 } 1119 1120 1121 /** 1122 * t4_load_fw - download firmware 1123 * @adap: the adapter 1124 * @fw_data: the firmware image to write 1125 * @size: image size 1126 * 1127 * Write the supplied firmware image to the card's serial flash. 1128 */ 1129 int t4_load_fw(struct adapter *adap, const u8 *fw_data, unsigned int size) 1130 { 1131 u32 csum; 1132 int ret, addr; 1133 unsigned int i; 1134 u8 first_page[SF_PAGE_SIZE]; 1135 const u32 *p = (const u32 *)fw_data; 1136 const struct fw_hdr *hdr = (const struct fw_hdr *)fw_data; 1137 unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec; 1138 unsigned int fw_start_sec; 1139 unsigned int fw_start; 1140 unsigned int fw_size; 1141 1142 if (ntohl(hdr->magic) == FW_HDR_MAGIC_BOOTSTRAP) { 1143 fw_start_sec = FLASH_FWBOOTSTRAP_START_SEC; 1144 fw_start = FLASH_FWBOOTSTRAP_START; 1145 fw_size = FLASH_FWBOOTSTRAP_MAX_SIZE; 1146 } else { 1147 fw_start_sec = FLASH_FW_START_SEC; 1148 fw_start = FLASH_FW_START; 1149 fw_size = FLASH_FW_MAX_SIZE; 1150 } 1151 if (!size) { 1152 CH_ERR(adap, "FW image has no data\n"); 1153 return -EINVAL; 1154 } 1155 if (size & 511) { 1156 CH_ERR(adap, "FW image size not multiple of 512 bytes\n"); 1157 return -EINVAL; 1158 } 1159 if (ntohs(hdr->len512) * 512 != size) { 1160 CH_ERR(adap, "FW image size differs from size in FW header\n"); 1161 return -EINVAL; 1162 } 1163 if (size > fw_size) { 1164 CH_ERR(adap, "FW image too large, max is %u bytes\n", fw_size); 1165 return -EFBIG; 1166 } 1167 if ((is_t4(adap) && hdr->chip != FW_HDR_CHIP_T4) || 1168 (is_t5(adap) && hdr->chip != FW_HDR_CHIP_T5)) { 1169 CH_ERR(adap, 1170 "FW image (%d) is not suitable for this adapter (%d)\n", 1171 hdr->chip, chip_id(adap)); 1172 return -EINVAL; 1173 } 1174 1175 for (csum = 0, i = 0; i < size / sizeof(csum); i++) 1176 csum += ntohl(p[i]); 1177 1178 if (csum != 0xffffffff) { 1179 CH_ERR(adap, "corrupted firmware image, checksum %#x\n", 1180 csum); 1181 return -EINVAL; 1182 } 1183 1184 i = DIV_ROUND_UP(size, sf_sec_size); /* # of sectors spanned */ 1185 ret = t4_flash_erase_sectors(adap, fw_start_sec, fw_start_sec + i - 1); 1186 if (ret) 1187 goto out; 1188 1189 /* 1190 * We write the correct version at the end so the driver can see a bad 1191 * version if the FW write fails. Start by writing a copy of the 1192 * first page with a bad version. 1193 */ 1194 memcpy(first_page, fw_data, SF_PAGE_SIZE); 1195 ((struct fw_hdr *)first_page)->fw_ver = htonl(0xffffffff); 1196 ret = t4_write_flash(adap, fw_start, SF_PAGE_SIZE, first_page, 1); 1197 if (ret) 1198 goto out; 1199 1200 addr = fw_start; 1201 for (size -= SF_PAGE_SIZE; size; size -= SF_PAGE_SIZE) { 1202 addr += SF_PAGE_SIZE; 1203 fw_data += SF_PAGE_SIZE; 1204 ret = t4_write_flash(adap, addr, SF_PAGE_SIZE, fw_data, 1); 1205 if (ret) 1206 goto out; 1207 } 1208 1209 ret = t4_write_flash(adap, 1210 fw_start + offsetof(struct fw_hdr, fw_ver), 1211 sizeof(hdr->fw_ver), (const u8 *)&hdr->fw_ver, 1); 1212 out: 1213 if (ret) 1214 CH_ERR(adap, "firmware download failed, error %d\n", ret); 1215 return ret; 1216 } 1217 1218 /* BIOS boot headers */ 1219 typedef struct pci_expansion_rom_header { 1220 u8 signature[2]; /* ROM Signature. Should be 0xaa55 */ 1221 u8 reserved[22]; /* Reserved per processor Architecture data */ 1222 u8 pcir_offset[2]; /* Offset to PCI Data Structure */ 1223 } pci_exp_rom_header_t; /* PCI_EXPANSION_ROM_HEADER */ 1224 1225 /* Legacy PCI Expansion ROM Header */ 1226 typedef struct legacy_pci_expansion_rom_header { 1227 u8 signature[2]; /* ROM Signature. Should be 0xaa55 */ 1228 u8 size512; /* Current Image Size in units of 512 bytes */ 1229 u8 initentry_point[4]; 1230 u8 cksum; /* Checksum computed on the entire Image */ 1231 u8 reserved[16]; /* Reserved */ 1232 u8 pcir_offset[2]; /* Offset to PCI Data Struture */ 1233 } legacy_pci_exp_rom_header_t; /* LEGACY_PCI_EXPANSION_ROM_HEADER */ 1234 1235 /* EFI PCI Expansion ROM Header */ 1236 typedef struct efi_pci_expansion_rom_header { 1237 u8 signature[2]; // ROM signature. The value 0xaa55 1238 u8 initialization_size[2]; /* Units 512. Includes this header */ 1239 u8 efi_signature[4]; /* Signature from EFI image header. 0x0EF1 */ 1240 u8 efi_subsystem[2]; /* Subsystem value for EFI image header */ 1241 u8 efi_machine_type[2]; /* Machine type from EFI image header */ 1242 u8 compression_type[2]; /* Compression type. */ 1243 /* 1244 * Compression type definition 1245 * 0x0: uncompressed 1246 * 0x1: Compressed 1247 * 0x2-0xFFFF: Reserved 1248 */ 1249 u8 reserved[8]; /* Reserved */ 1250 u8 efi_image_header_offset[2]; /* Offset to EFI Image */ 1251 u8 pcir_offset[2]; /* Offset to PCI Data Structure */ 1252 } efi_pci_exp_rom_header_t; /* EFI PCI Expansion ROM Header */ 1253 1254 /* PCI Data Structure Format */ 1255 typedef struct pcir_data_structure { /* PCI Data Structure */ 1256 u8 signature[4]; /* Signature. The string "PCIR" */ 1257 u8 vendor_id[2]; /* Vendor Identification */ 1258 u8 device_id[2]; /* Device Identification */ 1259 u8 vital_product[2]; /* Pointer to Vital Product Data */ 1260 u8 length[2]; /* PCIR Data Structure Length */ 1261 u8 revision; /* PCIR Data Structure Revision */ 1262 u8 class_code[3]; /* Class Code */ 1263 u8 image_length[2]; /* Image Length. Multiple of 512B */ 1264 u8 code_revision[2]; /* Revision Level of Code/Data */ 1265 u8 code_type; /* Code Type. */ 1266 /* 1267 * PCI Expansion ROM Code Types 1268 * 0x00: Intel IA-32, PC-AT compatible. Legacy 1269 * 0x01: Open Firmware standard for PCI. FCODE 1270 * 0x02: Hewlett-Packard PA RISC. HP reserved 1271 * 0x03: EFI Image. EFI 1272 * 0x04-0xFF: Reserved. 1273 */ 1274 u8 indicator; /* Indicator. Identifies the last image in the ROM */ 1275 u8 reserved[2]; /* Reserved */ 1276 } pcir_data_t; /* PCI__DATA_STRUCTURE */ 1277 1278 /* BOOT constants */ 1279 enum { 1280 BOOT_FLASH_BOOT_ADDR = 0x0,/* start address of boot image in flash */ 1281 BOOT_SIGNATURE = 0xaa55, /* signature of BIOS boot ROM */ 1282 BOOT_SIZE_INC = 512, /* image size measured in 512B chunks */ 1283 BOOT_MIN_SIZE = sizeof(pci_exp_rom_header_t), /* basic header */ 1284 BOOT_MAX_SIZE = 1024*BOOT_SIZE_INC, /* 1 byte * length increment */ 1285 VENDOR_ID = 0x1425, /* Vendor ID */ 1286 PCIR_SIGNATURE = 0x52494350 /* PCIR signature */ 1287 }; 1288 1289 /* 1290 * modify_device_id - Modifies the device ID of the Boot BIOS image 1291 * @adatper: the device ID to write. 1292 * @boot_data: the boot image to modify. 1293 * 1294 * Write the supplied device ID to the boot BIOS image. 1295 */ 1296 static void modify_device_id(int device_id, u8 *boot_data) 1297 { 1298 legacy_pci_exp_rom_header_t *header; 1299 pcir_data_t *pcir_header; 1300 u32 cur_header = 0; 1301 1302 /* 1303 * Loop through all chained images and change the device ID's 1304 */ 1305 while (1) { 1306 header = (legacy_pci_exp_rom_header_t *) &boot_data[cur_header]; 1307 pcir_header = (pcir_data_t *) &boot_data[cur_header + 1308 le16_to_cpu(*(u16*)header->pcir_offset)]; 1309 1310 /* 1311 * Only modify the Device ID if code type is Legacy or HP. 1312 * 0x00: Okay to modify 1313 * 0x01: FCODE. Do not be modify 1314 * 0x03: Okay to modify 1315 * 0x04-0xFF: Do not modify 1316 */ 1317 if (pcir_header->code_type == 0x00) { 1318 u8 csum = 0; 1319 int i; 1320 1321 /* 1322 * Modify Device ID to match current adatper 1323 */ 1324 *(u16*) pcir_header->device_id = device_id; 1325 1326 /* 1327 * Set checksum temporarily to 0. 1328 * We will recalculate it later. 1329 */ 1330 header->cksum = 0x0; 1331 1332 /* 1333 * Calculate and update checksum 1334 */ 1335 for (i = 0; i < (header->size512 * 512); i++) 1336 csum += (u8)boot_data[cur_header + i]; 1337 1338 /* 1339 * Invert summed value to create the checksum 1340 * Writing new checksum value directly to the boot data 1341 */ 1342 boot_data[cur_header + 7] = -csum; 1343 1344 } else if (pcir_header->code_type == 0x03) { 1345 1346 /* 1347 * Modify Device ID to match current adatper 1348 */ 1349 *(u16*) pcir_header->device_id = device_id; 1350 1351 } 1352 1353 1354 /* 1355 * Check indicator element to identify if this is the last 1356 * image in the ROM. 1357 */ 1358 if (pcir_header->indicator & 0x80) 1359 break; 1360 1361 /* 1362 * Move header pointer up to the next image in the ROM. 1363 */ 1364 cur_header += header->size512 * 512; 1365 } 1366 } 1367 1368 /* 1369 * t4_load_boot - download boot flash 1370 * @adapter: the adapter 1371 * @boot_data: the boot image to write 1372 * @boot_addr: offset in flash to write boot_data 1373 * @size: image size 1374 * 1375 * Write the supplied boot image to the card's serial flash. 1376 * The boot image has the following sections: a 28-byte header and the 1377 * boot image. 1378 */ 1379 int t4_load_boot(struct adapter *adap, u8 *boot_data, 1380 unsigned int boot_addr, unsigned int size) 1381 { 1382 pci_exp_rom_header_t *header; 1383 int pcir_offset ; 1384 pcir_data_t *pcir_header; 1385 int ret, addr; 1386 uint16_t device_id; 1387 unsigned int i; 1388 unsigned int boot_sector = boot_addr * 1024; 1389 unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec; 1390 1391 /* 1392 * Make sure the boot image does not encroach on the firmware region 1393 */ 1394 if ((boot_sector + size) >> 16 > FLASH_FW_START_SEC) { 1395 CH_ERR(adap, "boot image encroaching on firmware region\n"); 1396 return -EFBIG; 1397 } 1398 1399 /* 1400 * Number of sectors spanned 1401 */ 1402 i = DIV_ROUND_UP(size ? size : FLASH_BOOTCFG_MAX_SIZE, 1403 sf_sec_size); 1404 ret = t4_flash_erase_sectors(adap, boot_sector >> 16, 1405 (boot_sector >> 16) + i - 1); 1406 1407 /* 1408 * If size == 0 then we're simply erasing the FLASH sectors associated 1409 * with the on-adapter option ROM file 1410 */ 1411 if (ret || (size == 0)) 1412 goto out; 1413 1414 /* Get boot header */ 1415 header = (pci_exp_rom_header_t *)boot_data; 1416 pcir_offset = le16_to_cpu(*(u16 *)header->pcir_offset); 1417 /* PCIR Data Structure */ 1418 pcir_header = (pcir_data_t *) &boot_data[pcir_offset]; 1419 1420 /* 1421 * Perform some primitive sanity testing to avoid accidentally 1422 * writing garbage over the boot sectors. We ought to check for 1423 * more but it's not worth it for now ... 1424 */ 1425 if (size < BOOT_MIN_SIZE || size > BOOT_MAX_SIZE) { 1426 CH_ERR(adap, "boot image too small/large\n"); 1427 return -EFBIG; 1428 } 1429 1430 /* 1431 * Check BOOT ROM header signature 1432 */ 1433 if (le16_to_cpu(*(u16*)header->signature) != BOOT_SIGNATURE ) { 1434 CH_ERR(adap, "Boot image missing signature\n"); 1435 return -EINVAL; 1436 } 1437 1438 /* 1439 * Check PCI header signature 1440 */ 1441 if (le32_to_cpu(*(u32*)pcir_header->signature) != PCIR_SIGNATURE) { 1442 CH_ERR(adap, "PCI header missing signature\n"); 1443 return -EINVAL; 1444 } 1445 1446 /* 1447 * Check Vendor ID matches Chelsio ID 1448 */ 1449 if (le16_to_cpu(*(u16*)pcir_header->vendor_id) != VENDOR_ID) { 1450 CH_ERR(adap, "Vendor ID missing signature\n"); 1451 return -EINVAL; 1452 } 1453 1454 /* 1455 * Retrieve adapter's device ID 1456 */ 1457 t4_os_pci_read_cfg2(adap, PCI_DEVICE_ID, &device_id); 1458 /* Want to deal with PF 0 so I strip off PF 4 indicator */ 1459 device_id = (device_id & 0xff) | 0x4000; 1460 1461 /* 1462 * Check PCIE Device ID 1463 */ 1464 if (le16_to_cpu(*(u16*)pcir_header->device_id) != device_id) { 1465 /* 1466 * Change the device ID in the Boot BIOS image to match 1467 * the Device ID of the current adapter. 1468 */ 1469 modify_device_id(device_id, boot_data); 1470 } 1471 1472 /* 1473 * Skip over the first SF_PAGE_SIZE worth of data and write it after 1474 * we finish copying the rest of the boot image. This will ensure 1475 * that the BIOS boot header will only be written if the boot image 1476 * was written in full. 1477 */ 1478 addr = boot_sector; 1479 for (size -= SF_PAGE_SIZE; size; size -= SF_PAGE_SIZE) { 1480 addr += SF_PAGE_SIZE; 1481 boot_data += SF_PAGE_SIZE; 1482 ret = t4_write_flash(adap, addr, SF_PAGE_SIZE, boot_data, 0); 1483 if (ret) 1484 goto out; 1485 } 1486 1487 ret = t4_write_flash(adap, boot_sector, SF_PAGE_SIZE, boot_data, 0); 1488 1489 out: 1490 if (ret) 1491 CH_ERR(adap, "boot image download failed, error %d\n", ret); 1492 return ret; 1493 } 1494 1495 /** 1496 * t4_read_cimq_cfg - read CIM queue configuration 1497 * @adap: the adapter 1498 * @base: holds the queue base addresses in bytes 1499 * @size: holds the queue sizes in bytes 1500 * @thres: holds the queue full thresholds in bytes 1501 * 1502 * Returns the current configuration of the CIM queues, starting with 1503 * the IBQs, then the OBQs. 1504 */ 1505 void t4_read_cimq_cfg(struct adapter *adap, u16 *base, u16 *size, u16 *thres) 1506 { 1507 unsigned int i, v; 1508 int cim_num_obq = is_t4(adap) ? CIM_NUM_OBQ : CIM_NUM_OBQ_T5; 1509 1510 for (i = 0; i < CIM_NUM_IBQ; i++) { 1511 t4_write_reg(adap, A_CIM_QUEUE_CONFIG_REF, F_IBQSELECT | 1512 V_QUENUMSELECT(i)); 1513 v = t4_read_reg(adap, A_CIM_QUEUE_CONFIG_CTRL); 1514 *base++ = G_CIMQBASE(v) * 256; /* value is in 256-byte units */ 1515 *size++ = G_CIMQSIZE(v) * 256; /* value is in 256-byte units */ 1516 *thres++ = G_QUEFULLTHRSH(v) * 8; /* 8-byte unit */ 1517 } 1518 for (i = 0; i < cim_num_obq; i++) { 1519 t4_write_reg(adap, A_CIM_QUEUE_CONFIG_REF, F_OBQSELECT | 1520 V_QUENUMSELECT(i)); 1521 v = t4_read_reg(adap, A_CIM_QUEUE_CONFIG_CTRL); 1522 *base++ = G_CIMQBASE(v) * 256; /* value is in 256-byte units */ 1523 *size++ = G_CIMQSIZE(v) * 256; /* value is in 256-byte units */ 1524 } 1525 } 1526 1527 /** 1528 * t4_read_cim_ibq - read the contents of a CIM inbound queue 1529 * @adap: the adapter 1530 * @qid: the queue index 1531 * @data: where to store the queue contents 1532 * @n: capacity of @data in 32-bit words 1533 * 1534 * Reads the contents of the selected CIM queue starting at address 0 up 1535 * to the capacity of @data. @n must be a multiple of 4. Returns < 0 on 1536 * error and the number of 32-bit words actually read on success. 1537 */ 1538 int t4_read_cim_ibq(struct adapter *adap, unsigned int qid, u32 *data, size_t n) 1539 { 1540 int i, err; 1541 unsigned int addr; 1542 const unsigned int nwords = CIM_IBQ_SIZE * 4; 1543 1544 if (qid > 5 || (n & 3)) 1545 return -EINVAL; 1546 1547 addr = qid * nwords; 1548 if (n > nwords) 1549 n = nwords; 1550 1551 for (i = 0; i < n; i++, addr++) { 1552 t4_write_reg(adap, A_CIM_IBQ_DBG_CFG, V_IBQDBGADDR(addr) | 1553 F_IBQDBGEN); 1554 /* 1555 * It might take 3-10ms before the IBQ debug read access is 1556 * allowed. Wait for 1 Sec with a delay of 1 usec. 1557 */ 1558 err = t4_wait_op_done(adap, A_CIM_IBQ_DBG_CFG, F_IBQDBGBUSY, 0, 1559 1000000, 1); 1560 if (err) 1561 return err; 1562 *data++ = t4_read_reg(adap, A_CIM_IBQ_DBG_DATA); 1563 } 1564 t4_write_reg(adap, A_CIM_IBQ_DBG_CFG, 0); 1565 return i; 1566 } 1567 1568 /** 1569 * t4_read_cim_obq - read the contents of a CIM outbound queue 1570 * @adap: the adapter 1571 * @qid: the queue index 1572 * @data: where to store the queue contents 1573 * @n: capacity of @data in 32-bit words 1574 * 1575 * Reads the contents of the selected CIM queue starting at address 0 up 1576 * to the capacity of @data. @n must be a multiple of 4. Returns < 0 on 1577 * error and the number of 32-bit words actually read on success. 1578 */ 1579 int t4_read_cim_obq(struct adapter *adap, unsigned int qid, u32 *data, size_t n) 1580 { 1581 int i, err; 1582 unsigned int addr, v, nwords; 1583 int cim_num_obq = is_t4(adap) ? CIM_NUM_OBQ : CIM_NUM_OBQ_T5; 1584 1585 if (qid >= cim_num_obq || (n & 3)) 1586 return -EINVAL; 1587 1588 t4_write_reg(adap, A_CIM_QUEUE_CONFIG_REF, F_OBQSELECT | 1589 V_QUENUMSELECT(qid)); 1590 v = t4_read_reg(adap, A_CIM_QUEUE_CONFIG_CTRL); 1591 1592 addr = G_CIMQBASE(v) * 64; /* muliple of 256 -> muliple of 4 */ 1593 nwords = G_CIMQSIZE(v) * 64; /* same */ 1594 if (n > nwords) 1595 n = nwords; 1596 1597 for (i = 0; i < n; i++, addr++) { 1598 t4_write_reg(adap, A_CIM_OBQ_DBG_CFG, V_OBQDBGADDR(addr) | 1599 F_OBQDBGEN); 1600 err = t4_wait_op_done(adap, A_CIM_OBQ_DBG_CFG, F_OBQDBGBUSY, 0, 1601 2, 1); 1602 if (err) 1603 return err; 1604 *data++ = t4_read_reg(adap, A_CIM_OBQ_DBG_DATA); 1605 } 1606 t4_write_reg(adap, A_CIM_OBQ_DBG_CFG, 0); 1607 return i; 1608 } 1609 1610 enum { 1611 CIM_QCTL_BASE = 0, 1612 CIM_CTL_BASE = 0x2000, 1613 CIM_PBT_ADDR_BASE = 0x2800, 1614 CIM_PBT_LRF_BASE = 0x3000, 1615 CIM_PBT_DATA_BASE = 0x3800 1616 }; 1617 1618 /** 1619 * t4_cim_read - read a block from CIM internal address space 1620 * @adap: the adapter 1621 * @addr: the start address within the CIM address space 1622 * @n: number of words to read 1623 * @valp: where to store the result 1624 * 1625 * Reads a block of 4-byte words from the CIM intenal address space. 1626 */ 1627 int t4_cim_read(struct adapter *adap, unsigned int addr, unsigned int n, 1628 unsigned int *valp) 1629 { 1630 int ret = 0; 1631 1632 if (t4_read_reg(adap, A_CIM_HOST_ACC_CTRL) & F_HOSTBUSY) 1633 return -EBUSY; 1634 1635 for ( ; !ret && n--; addr += 4) { 1636 t4_write_reg(adap, A_CIM_HOST_ACC_CTRL, addr); 1637 ret = t4_wait_op_done(adap, A_CIM_HOST_ACC_CTRL, F_HOSTBUSY, 1638 0, 5, 2); 1639 if (!ret) 1640 *valp++ = t4_read_reg(adap, A_CIM_HOST_ACC_DATA); 1641 } 1642 return ret; 1643 } 1644 1645 /** 1646 * t4_cim_write - write a block into CIM internal address space 1647 * @adap: the adapter 1648 * @addr: the start address within the CIM address space 1649 * @n: number of words to write 1650 * @valp: set of values to write 1651 * 1652 * Writes a block of 4-byte words into the CIM intenal address space. 1653 */ 1654 int t4_cim_write(struct adapter *adap, unsigned int addr, unsigned int n, 1655 const unsigned int *valp) 1656 { 1657 int ret = 0; 1658 1659 if (t4_read_reg(adap, A_CIM_HOST_ACC_CTRL) & F_HOSTBUSY) 1660 return -EBUSY; 1661 1662 for ( ; !ret && n--; addr += 4) { 1663 t4_write_reg(adap, A_CIM_HOST_ACC_DATA, *valp++); 1664 t4_write_reg(adap, A_CIM_HOST_ACC_CTRL, addr | F_HOSTWRITE); 1665 ret = t4_wait_op_done(adap, A_CIM_HOST_ACC_CTRL, F_HOSTBUSY, 1666 0, 5, 2); 1667 } 1668 return ret; 1669 } 1670 1671 static int t4_cim_write1(struct adapter *adap, unsigned int addr, unsigned int val) 1672 { 1673 return t4_cim_write(adap, addr, 1, &val); 1674 } 1675 1676 /** 1677 * t4_cim_ctl_read - read a block from CIM control region 1678 * @adap: the adapter 1679 * @addr: the start address within the CIM control region 1680 * @n: number of words to read 1681 * @valp: where to store the result 1682 * 1683 * Reads a block of 4-byte words from the CIM control region. 1684 */ 1685 int t4_cim_ctl_read(struct adapter *adap, unsigned int addr, unsigned int n, 1686 unsigned int *valp) 1687 { 1688 return t4_cim_read(adap, addr + CIM_CTL_BASE, n, valp); 1689 } 1690 1691 /** 1692 * t4_cim_read_la - read CIM LA capture buffer 1693 * @adap: the adapter 1694 * @la_buf: where to store the LA data 1695 * @wrptr: the HW write pointer within the capture buffer 1696 * 1697 * Reads the contents of the CIM LA buffer with the most recent entry at 1698 * the end of the returned data and with the entry at @wrptr first. 1699 * We try to leave the LA in the running state we find it in. 1700 */ 1701 int t4_cim_read_la(struct adapter *adap, u32 *la_buf, unsigned int *wrptr) 1702 { 1703 int i, ret; 1704 unsigned int cfg, val, idx; 1705 1706 ret = t4_cim_read(adap, A_UP_UP_DBG_LA_CFG, 1, &cfg); 1707 if (ret) 1708 return ret; 1709 1710 if (cfg & F_UPDBGLAEN) { /* LA is running, freeze it */ 1711 ret = t4_cim_write1(adap, A_UP_UP_DBG_LA_CFG, 0); 1712 if (ret) 1713 return ret; 1714 } 1715 1716 ret = t4_cim_read(adap, A_UP_UP_DBG_LA_CFG, 1, &val); 1717 if (ret) 1718 goto restart; 1719 1720 idx = G_UPDBGLAWRPTR(val); 1721 if (wrptr) 1722 *wrptr = idx; 1723 1724 for (i = 0; i < adap->params.cim_la_size; i++) { 1725 ret = t4_cim_write1(adap, A_UP_UP_DBG_LA_CFG, 1726 V_UPDBGLARDPTR(idx) | F_UPDBGLARDEN); 1727 if (ret) 1728 break; 1729 ret = t4_cim_read(adap, A_UP_UP_DBG_LA_CFG, 1, &val); 1730 if (ret) 1731 break; 1732 if (val & F_UPDBGLARDEN) { 1733 ret = -ETIMEDOUT; 1734 break; 1735 } 1736 ret = t4_cim_read(adap, A_UP_UP_DBG_LA_DATA, 1, &la_buf[i]); 1737 if (ret) 1738 break; 1739 idx = (idx + 1) & M_UPDBGLARDPTR; 1740 } 1741 restart: 1742 if (cfg & F_UPDBGLAEN) { 1743 int r = t4_cim_write1(adap, A_UP_UP_DBG_LA_CFG, 1744 cfg & ~F_UPDBGLARDEN); 1745 if (!ret) 1746 ret = r; 1747 } 1748 return ret; 1749 } 1750 1751 void t4_cim_read_pif_la(struct adapter *adap, u32 *pif_req, u32 *pif_rsp, 1752 unsigned int *pif_req_wrptr, 1753 unsigned int *pif_rsp_wrptr) 1754 { 1755 int i, j; 1756 u32 cfg, val, req, rsp; 1757 1758 cfg = t4_read_reg(adap, A_CIM_DEBUGCFG); 1759 if (cfg & F_LADBGEN) 1760 t4_write_reg(adap, A_CIM_DEBUGCFG, cfg ^ F_LADBGEN); 1761 1762 val = t4_read_reg(adap, A_CIM_DEBUGSTS); 1763 req = G_POLADBGWRPTR(val); 1764 rsp = G_PILADBGWRPTR(val); 1765 if (pif_req_wrptr) 1766 *pif_req_wrptr = req; 1767 if (pif_rsp_wrptr) 1768 *pif_rsp_wrptr = rsp; 1769 1770 for (i = 0; i < CIM_PIFLA_SIZE; i++) { 1771 for (j = 0; j < 6; j++) { 1772 t4_write_reg(adap, A_CIM_DEBUGCFG, V_POLADBGRDPTR(req) | 1773 V_PILADBGRDPTR(rsp)); 1774 *pif_req++ = t4_read_reg(adap, A_CIM_PO_LA_DEBUGDATA); 1775 *pif_rsp++ = t4_read_reg(adap, A_CIM_PI_LA_DEBUGDATA); 1776 req++; 1777 rsp++; 1778 } 1779 req = (req + 2) & M_POLADBGRDPTR; 1780 rsp = (rsp + 2) & M_PILADBGRDPTR; 1781 } 1782 t4_write_reg(adap, A_CIM_DEBUGCFG, cfg); 1783 } 1784 1785 void t4_cim_read_ma_la(struct adapter *adap, u32 *ma_req, u32 *ma_rsp) 1786 { 1787 u32 cfg; 1788 int i, j, idx; 1789 1790 cfg = t4_read_reg(adap, A_CIM_DEBUGCFG); 1791 if (cfg & F_LADBGEN) 1792 t4_write_reg(adap, A_CIM_DEBUGCFG, cfg ^ F_LADBGEN); 1793 1794 for (i = 0; i < CIM_MALA_SIZE; i++) { 1795 for (j = 0; j < 5; j++) { 1796 idx = 8 * i + j; 1797 t4_write_reg(adap, A_CIM_DEBUGCFG, V_POLADBGRDPTR(idx) | 1798 V_PILADBGRDPTR(idx)); 1799 *ma_req++ = t4_read_reg(adap, A_CIM_PO_LA_MADEBUGDATA); 1800 *ma_rsp++ = t4_read_reg(adap, A_CIM_PI_LA_MADEBUGDATA); 1801 } 1802 } 1803 t4_write_reg(adap, A_CIM_DEBUGCFG, cfg); 1804 } 1805 1806 /** 1807 * t4_tp_read_la - read TP LA capture buffer 1808 * @adap: the adapter 1809 * @la_buf: where to store the LA data 1810 * @wrptr: the HW write pointer within the capture buffer 1811 * 1812 * Reads the contents of the TP LA buffer with the most recent entry at 1813 * the end of the returned data and with the entry at @wrptr first. 1814 * We leave the LA in the running state we find it in. 1815 */ 1816 void t4_tp_read_la(struct adapter *adap, u64 *la_buf, unsigned int *wrptr) 1817 { 1818 bool last_incomplete; 1819 unsigned int i, cfg, val, idx; 1820 1821 cfg = t4_read_reg(adap, A_TP_DBG_LA_CONFIG) & 0xffff; 1822 if (cfg & F_DBGLAENABLE) /* freeze LA */ 1823 t4_write_reg(adap, A_TP_DBG_LA_CONFIG, 1824 adap->params.tp.la_mask | (cfg ^ F_DBGLAENABLE)); 1825 1826 val = t4_read_reg(adap, A_TP_DBG_LA_CONFIG); 1827 idx = G_DBGLAWPTR(val); 1828 last_incomplete = G_DBGLAMODE(val) >= 2 && (val & F_DBGLAWHLF) == 0; 1829 if (last_incomplete) 1830 idx = (idx + 1) & M_DBGLARPTR; 1831 if (wrptr) 1832 *wrptr = idx; 1833 1834 val &= 0xffff; 1835 val &= ~V_DBGLARPTR(M_DBGLARPTR); 1836 val |= adap->params.tp.la_mask; 1837 1838 for (i = 0; i < TPLA_SIZE; i++) { 1839 t4_write_reg(adap, A_TP_DBG_LA_CONFIG, V_DBGLARPTR(idx) | val); 1840 la_buf[i] = t4_read_reg64(adap, A_TP_DBG_LA_DATAL); 1841 idx = (idx + 1) & M_DBGLARPTR; 1842 } 1843 1844 /* Wipe out last entry if it isn't valid */ 1845 if (last_incomplete) 1846 la_buf[TPLA_SIZE - 1] = ~0ULL; 1847 1848 if (cfg & F_DBGLAENABLE) /* restore running state */ 1849 t4_write_reg(adap, A_TP_DBG_LA_CONFIG, 1850 cfg | adap->params.tp.la_mask); 1851 } 1852 1853 void t4_ulprx_read_la(struct adapter *adap, u32 *la_buf) 1854 { 1855 unsigned int i, j; 1856 1857 for (i = 0; i < 8; i++) { 1858 u32 *p = la_buf + i; 1859 1860 t4_write_reg(adap, A_ULP_RX_LA_CTL, i); 1861 j = t4_read_reg(adap, A_ULP_RX_LA_WRPTR); 1862 t4_write_reg(adap, A_ULP_RX_LA_RDPTR, j); 1863 for (j = 0; j < ULPRX_LA_SIZE; j++, p += 8) 1864 *p = t4_read_reg(adap, A_ULP_RX_LA_RDDATA); 1865 } 1866 } 1867 1868 #define ADVERT_MASK (FW_PORT_CAP_SPEED_100M | FW_PORT_CAP_SPEED_1G |\ 1869 FW_PORT_CAP_SPEED_10G | FW_PORT_CAP_SPEED_40G | \ 1870 FW_PORT_CAP_SPEED_100G | FW_PORT_CAP_ANEG) 1871 1872 /** 1873 * t4_link_start - apply link configuration to MAC/PHY 1874 * @phy: the PHY to setup 1875 * @mac: the MAC to setup 1876 * @lc: the requested link configuration 1877 * 1878 * Set up a port's MAC and PHY according to a desired link configuration. 1879 * - If the PHY can auto-negotiate first decide what to advertise, then 1880 * enable/disable auto-negotiation as desired, and reset. 1881 * - If the PHY does not auto-negotiate just reset it. 1882 * - If auto-negotiation is off set the MAC to the proper speed/duplex/FC, 1883 * otherwise do it later based on the outcome of auto-negotiation. 1884 */ 1885 int t4_link_start(struct adapter *adap, unsigned int mbox, unsigned int port, 1886 struct link_config *lc) 1887 { 1888 struct fw_port_cmd c; 1889 unsigned int fc = 0, mdi = V_FW_PORT_CAP_MDI(FW_PORT_CAP_MDI_AUTO); 1890 1891 lc->link_ok = 0; 1892 if (lc->requested_fc & PAUSE_RX) 1893 fc |= FW_PORT_CAP_FC_RX; 1894 if (lc->requested_fc & PAUSE_TX) 1895 fc |= FW_PORT_CAP_FC_TX; 1896 1897 memset(&c, 0, sizeof(c)); 1898 c.op_to_portid = htonl(V_FW_CMD_OP(FW_PORT_CMD) | F_FW_CMD_REQUEST | 1899 F_FW_CMD_EXEC | V_FW_PORT_CMD_PORTID(port)); 1900 c.action_to_len16 = htonl(V_FW_PORT_CMD_ACTION(FW_PORT_ACTION_L1_CFG) | 1901 FW_LEN16(c)); 1902 1903 if (!(lc->supported & FW_PORT_CAP_ANEG)) { 1904 c.u.l1cfg.rcap = htonl((lc->supported & ADVERT_MASK) | fc); 1905 lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX); 1906 } else if (lc->autoneg == AUTONEG_DISABLE) { 1907 c.u.l1cfg.rcap = htonl(lc->requested_speed | fc | mdi); 1908 lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX); 1909 } else 1910 c.u.l1cfg.rcap = htonl(lc->advertising | fc | mdi); 1911 1912 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); 1913 } 1914 1915 /** 1916 * t4_restart_aneg - restart autonegotiation 1917 * @adap: the adapter 1918 * @mbox: mbox to use for the FW command 1919 * @port: the port id 1920 * 1921 * Restarts autonegotiation for the selected port. 1922 */ 1923 int t4_restart_aneg(struct adapter *adap, unsigned int mbox, unsigned int port) 1924 { 1925 struct fw_port_cmd c; 1926 1927 memset(&c, 0, sizeof(c)); 1928 c.op_to_portid = htonl(V_FW_CMD_OP(FW_PORT_CMD) | F_FW_CMD_REQUEST | 1929 F_FW_CMD_EXEC | V_FW_PORT_CMD_PORTID(port)); 1930 c.action_to_len16 = htonl(V_FW_PORT_CMD_ACTION(FW_PORT_ACTION_L1_CFG) | 1931 FW_LEN16(c)); 1932 c.u.l1cfg.rcap = htonl(FW_PORT_CAP_ANEG); 1933 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); 1934 } 1935 1936 struct intr_info { 1937 unsigned int mask; /* bits to check in interrupt status */ 1938 const char *msg; /* message to print or NULL */ 1939 short stat_idx; /* stat counter to increment or -1 */ 1940 unsigned short fatal; /* whether the condition reported is fatal */ 1941 }; 1942 1943 /** 1944 * t4_handle_intr_status - table driven interrupt handler 1945 * @adapter: the adapter that generated the interrupt 1946 * @reg: the interrupt status register to process 1947 * @acts: table of interrupt actions 1948 * 1949 * A table driven interrupt handler that applies a set of masks to an 1950 * interrupt status word and performs the corresponding actions if the 1951 * interrupts described by the mask have occured. The actions include 1952 * optionally emitting a warning or alert message. The table is terminated 1953 * by an entry specifying mask 0. Returns the number of fatal interrupt 1954 * conditions. 1955 */ 1956 static int t4_handle_intr_status(struct adapter *adapter, unsigned int reg, 1957 const struct intr_info *acts) 1958 { 1959 int fatal = 0; 1960 unsigned int mask = 0; 1961 unsigned int status = t4_read_reg(adapter, reg); 1962 1963 for ( ; acts->mask; ++acts) { 1964 if (!(status & acts->mask)) 1965 continue; 1966 if (acts->fatal) { 1967 fatal++; 1968 CH_ALERT(adapter, "%s (0x%x)\n", 1969 acts->msg, status & acts->mask); 1970 } else if (acts->msg) 1971 CH_WARN_RATELIMIT(adapter, "%s (0x%x)\n", 1972 acts->msg, status & acts->mask); 1973 mask |= acts->mask; 1974 } 1975 status &= mask; 1976 if (status) /* clear processed interrupts */ 1977 t4_write_reg(adapter, reg, status); 1978 return fatal; 1979 } 1980 1981 /* 1982 * Interrupt handler for the PCIE module. 1983 */ 1984 static void pcie_intr_handler(struct adapter *adapter) 1985 { 1986 static struct intr_info sysbus_intr_info[] = { 1987 { F_RNPP, "RXNP array parity error", -1, 1 }, 1988 { F_RPCP, "RXPC array parity error", -1, 1 }, 1989 { F_RCIP, "RXCIF array parity error", -1, 1 }, 1990 { F_RCCP, "Rx completions control array parity error", -1, 1 }, 1991 { F_RFTP, "RXFT array parity error", -1, 1 }, 1992 { 0 } 1993 }; 1994 static struct intr_info pcie_port_intr_info[] = { 1995 { F_TPCP, "TXPC array parity error", -1, 1 }, 1996 { F_TNPP, "TXNP array parity error", -1, 1 }, 1997 { F_TFTP, "TXFT array parity error", -1, 1 }, 1998 { F_TCAP, "TXCA array parity error", -1, 1 }, 1999 { F_TCIP, "TXCIF array parity error", -1, 1 }, 2000 { F_RCAP, "RXCA array parity error", -1, 1 }, 2001 { F_OTDD, "outbound request TLP discarded", -1, 1 }, 2002 { F_RDPE, "Rx data parity error", -1, 1 }, 2003 { F_TDUE, "Tx uncorrectable data error", -1, 1 }, 2004 { 0 } 2005 }; 2006 static struct intr_info pcie_intr_info[] = { 2007 { F_MSIADDRLPERR, "MSI AddrL parity error", -1, 1 }, 2008 { F_MSIADDRHPERR, "MSI AddrH parity error", -1, 1 }, 2009 { F_MSIDATAPERR, "MSI data parity error", -1, 1 }, 2010 { F_MSIXADDRLPERR, "MSI-X AddrL parity error", -1, 1 }, 2011 { F_MSIXADDRHPERR, "MSI-X AddrH parity error", -1, 1 }, 2012 { F_MSIXDATAPERR, "MSI-X data parity error", -1, 1 }, 2013 { F_MSIXDIPERR, "MSI-X DI parity error", -1, 1 }, 2014 { F_PIOCPLPERR, "PCI PIO completion FIFO parity error", -1, 1 }, 2015 { F_PIOREQPERR, "PCI PIO request FIFO parity error", -1, 1 }, 2016 { F_TARTAGPERR, "PCI PCI target tag FIFO parity error", -1, 1 }, 2017 { F_CCNTPERR, "PCI CMD channel count parity error", -1, 1 }, 2018 { F_CREQPERR, "PCI CMD channel request parity error", -1, 1 }, 2019 { F_CRSPPERR, "PCI CMD channel response parity error", -1, 1 }, 2020 { F_DCNTPERR, "PCI DMA channel count parity error", -1, 1 }, 2021 { F_DREQPERR, "PCI DMA channel request parity error", -1, 1 }, 2022 { F_DRSPPERR, "PCI DMA channel response parity error", -1, 1 }, 2023 { F_HCNTPERR, "PCI HMA channel count parity error", -1, 1 }, 2024 { F_HREQPERR, "PCI HMA channel request parity error", -1, 1 }, 2025 { F_HRSPPERR, "PCI HMA channel response parity error", -1, 1 }, 2026 { F_CFGSNPPERR, "PCI config snoop FIFO parity error", -1, 1 }, 2027 { F_FIDPERR, "PCI FID parity error", -1, 1 }, 2028 { F_INTXCLRPERR, "PCI INTx clear parity error", -1, 1 }, 2029 { F_MATAGPERR, "PCI MA tag parity error", -1, 1 }, 2030 { F_PIOTAGPERR, "PCI PIO tag parity error", -1, 1 }, 2031 { F_RXCPLPERR, "PCI Rx completion parity error", -1, 1 }, 2032 { F_RXWRPERR, "PCI Rx write parity error", -1, 1 }, 2033 { F_RPLPERR, "PCI replay buffer parity error", -1, 1 }, 2034 { F_PCIESINT, "PCI core secondary fault", -1, 1 }, 2035 { F_PCIEPINT, "PCI core primary fault", -1, 1 }, 2036 { F_UNXSPLCPLERR, "PCI unexpected split completion error", -1, 2037 0 }, 2038 { 0 } 2039 }; 2040 2041 static struct intr_info t5_pcie_intr_info[] = { 2042 { F_MSTGRPPERR, "Master Response Read Queue parity error", 2043 -1, 1 }, 2044 { F_MSTTIMEOUTPERR, "Master Timeout FIFO parity error", -1, 1 }, 2045 { F_MSIXSTIPERR, "MSI-X STI SRAM parity error", -1, 1 }, 2046 { F_MSIXADDRLPERR, "MSI-X AddrL parity error", -1, 1 }, 2047 { F_MSIXADDRHPERR, "MSI-X AddrH parity error", -1, 1 }, 2048 { F_MSIXDATAPERR, "MSI-X data parity error", -1, 1 }, 2049 { F_MSIXDIPERR, "MSI-X DI parity error", -1, 1 }, 2050 { F_PIOCPLGRPPERR, "PCI PIO completion Group FIFO parity error", 2051 -1, 1 }, 2052 { F_PIOREQGRPPERR, "PCI PIO request Group FIFO parity error", 2053 -1, 1 }, 2054 { F_TARTAGPERR, "PCI PCI target tag FIFO parity error", -1, 1 }, 2055 { F_MSTTAGQPERR, "PCI master tag queue parity error", -1, 1 }, 2056 { F_CREQPERR, "PCI CMD channel request parity error", -1, 1 }, 2057 { F_CRSPPERR, "PCI CMD channel response parity error", -1, 1 }, 2058 { F_DREQWRPERR, "PCI DMA channel write request parity error", 2059 -1, 1 }, 2060 { F_DREQPERR, "PCI DMA channel request parity error", -1, 1 }, 2061 { F_DRSPPERR, "PCI DMA channel response parity error", -1, 1 }, 2062 { F_HREQWRPERR, "PCI HMA channel count parity error", -1, 1 }, 2063 { F_HREQPERR, "PCI HMA channel request parity error", -1, 1 }, 2064 { F_HRSPPERR, "PCI HMA channel response parity error", -1, 1 }, 2065 { F_CFGSNPPERR, "PCI config snoop FIFO parity error", -1, 1 }, 2066 { F_FIDPERR, "PCI FID parity error", -1, 1 }, 2067 { F_VFIDPERR, "PCI INTx clear parity error", -1, 1 }, 2068 { F_MAGRPPERR, "PCI MA group FIFO parity error", -1, 1 }, 2069 { F_PIOTAGPERR, "PCI PIO tag parity error", -1, 1 }, 2070 { F_IPRXHDRGRPPERR, "PCI IP Rx header group parity error", 2071 -1, 1 }, 2072 { F_IPRXDATAGRPPERR, "PCI IP Rx data group parity error", 2073 -1, 1 }, 2074 { F_RPLPERR, "PCI IP replay buffer parity error", -1, 1 }, 2075 { F_IPSOTPERR, "PCI IP SOT buffer parity error", -1, 1 }, 2076 { F_TRGT1GRPPERR, "PCI TRGT1 group FIFOs parity error", -1, 1 }, 2077 { F_READRSPERR, "Outbound read error", -1, 2078 0 }, 2079 { 0 } 2080 }; 2081 2082 int fat; 2083 2084 if (is_t4(adapter)) 2085 fat = t4_handle_intr_status(adapter, 2086 A_PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS, 2087 sysbus_intr_info) + 2088 t4_handle_intr_status(adapter, 2089 A_PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS, 2090 pcie_port_intr_info) + 2091 t4_handle_intr_status(adapter, A_PCIE_INT_CAUSE, 2092 pcie_intr_info); 2093 else 2094 fat = t4_handle_intr_status(adapter, A_PCIE_INT_CAUSE, 2095 t5_pcie_intr_info); 2096 if (fat) 2097 t4_fatal_err(adapter); 2098 } 2099 2100 /* 2101 * TP interrupt handler. 2102 */ 2103 static void tp_intr_handler(struct adapter *adapter) 2104 { 2105 static struct intr_info tp_intr_info[] = { 2106 { 0x3fffffff, "TP parity error", -1, 1 }, 2107 { F_FLMTXFLSTEMPTY, "TP out of Tx pages", -1, 1 }, 2108 { 0 } 2109 }; 2110 2111 if (t4_handle_intr_status(adapter, A_TP_INT_CAUSE, tp_intr_info)) 2112 t4_fatal_err(adapter); 2113 } 2114 2115 /* 2116 * SGE interrupt handler. 2117 */ 2118 static void sge_intr_handler(struct adapter *adapter) 2119 { 2120 u64 v; 2121 u32 err; 2122 2123 static struct intr_info sge_intr_info[] = { 2124 { F_ERR_CPL_EXCEED_IQE_SIZE, 2125 "SGE received CPL exceeding IQE size", -1, 1 }, 2126 { F_ERR_INVALID_CIDX_INC, 2127 "SGE GTS CIDX increment too large", -1, 0 }, 2128 { F_ERR_CPL_OPCODE_0, "SGE received 0-length CPL", -1, 0 }, 2129 { F_ERR_DROPPED_DB, "SGE doorbell dropped", -1, 0 }, 2130 { F_ERR_DATA_CPL_ON_HIGH_QID1 | F_ERR_DATA_CPL_ON_HIGH_QID0, 2131 "SGE IQID > 1023 received CPL for FL", -1, 0 }, 2132 { F_ERR_BAD_DB_PIDX3, "SGE DBP 3 pidx increment too large", -1, 2133 0 }, 2134 { F_ERR_BAD_DB_PIDX2, "SGE DBP 2 pidx increment too large", -1, 2135 0 }, 2136 { F_ERR_BAD_DB_PIDX1, "SGE DBP 1 pidx increment too large", -1, 2137 0 }, 2138 { F_ERR_BAD_DB_PIDX0, "SGE DBP 0 pidx increment too large", -1, 2139 0 }, 2140 { F_ERR_ING_CTXT_PRIO, 2141 "SGE too many priority ingress contexts", -1, 0 }, 2142 { F_ERR_EGR_CTXT_PRIO, 2143 "SGE too many priority egress contexts", -1, 0 }, 2144 { F_INGRESS_SIZE_ERR, "SGE illegal ingress QID", -1, 0 }, 2145 { F_EGRESS_SIZE_ERR, "SGE illegal egress QID", -1, 0 }, 2146 { 0 } 2147 }; 2148 2149 v = (u64)t4_read_reg(adapter, A_SGE_INT_CAUSE1) | 2150 ((u64)t4_read_reg(adapter, A_SGE_INT_CAUSE2) << 32); 2151 if (v) { 2152 CH_ALERT(adapter, "SGE parity error (%#llx)\n", 2153 (unsigned long long)v); 2154 t4_write_reg(adapter, A_SGE_INT_CAUSE1, v); 2155 t4_write_reg(adapter, A_SGE_INT_CAUSE2, v >> 32); 2156 } 2157 2158 v |= t4_handle_intr_status(adapter, A_SGE_INT_CAUSE3, sge_intr_info); 2159 2160 err = t4_read_reg(adapter, A_SGE_ERROR_STATS); 2161 if (err & F_ERROR_QID_VALID) { 2162 CH_ERR(adapter, "SGE error for queue %u\n", G_ERROR_QID(err)); 2163 if (err & F_UNCAPTURED_ERROR) 2164 CH_ERR(adapter, "SGE UNCAPTURED_ERROR set (clearing)\n"); 2165 t4_write_reg(adapter, A_SGE_ERROR_STATS, F_ERROR_QID_VALID | 2166 F_UNCAPTURED_ERROR); 2167 } 2168 2169 if (v != 0) 2170 t4_fatal_err(adapter); 2171 } 2172 2173 #define CIM_OBQ_INTR (F_OBQULP0PARERR | F_OBQULP1PARERR | F_OBQULP2PARERR |\ 2174 F_OBQULP3PARERR | F_OBQSGEPARERR | F_OBQNCSIPARERR) 2175 #define CIM_IBQ_INTR (F_IBQTP0PARERR | F_IBQTP1PARERR | F_IBQULPPARERR |\ 2176 F_IBQSGEHIPARERR | F_IBQSGELOPARERR | F_IBQNCSIPARERR) 2177 2178 /* 2179 * CIM interrupt handler. 2180 */ 2181 static void cim_intr_handler(struct adapter *adapter) 2182 { 2183 static struct intr_info cim_intr_info[] = { 2184 { F_PREFDROPINT, "CIM control register prefetch drop", -1, 1 }, 2185 { CIM_OBQ_INTR, "CIM OBQ parity error", -1, 1 }, 2186 { CIM_IBQ_INTR, "CIM IBQ parity error", -1, 1 }, 2187 { F_MBUPPARERR, "CIM mailbox uP parity error", -1, 1 }, 2188 { F_MBHOSTPARERR, "CIM mailbox host parity error", -1, 1 }, 2189 { F_TIEQINPARERRINT, "CIM TIEQ outgoing parity error", -1, 1 }, 2190 { F_TIEQOUTPARERRINT, "CIM TIEQ incoming parity error", -1, 1 }, 2191 { 0 } 2192 }; 2193 static struct intr_info cim_upintr_info[] = { 2194 { F_RSVDSPACEINT, "CIM reserved space access", -1, 1 }, 2195 { F_ILLTRANSINT, "CIM illegal transaction", -1, 1 }, 2196 { F_ILLWRINT, "CIM illegal write", -1, 1 }, 2197 { F_ILLRDINT, "CIM illegal read", -1, 1 }, 2198 { F_ILLRDBEINT, "CIM illegal read BE", -1, 1 }, 2199 { F_ILLWRBEINT, "CIM illegal write BE", -1, 1 }, 2200 { F_SGLRDBOOTINT, "CIM single read from boot space", -1, 1 }, 2201 { F_SGLWRBOOTINT, "CIM single write to boot space", -1, 1 }, 2202 { F_BLKWRBOOTINT, "CIM block write to boot space", -1, 1 }, 2203 { F_SGLRDFLASHINT, "CIM single read from flash space", -1, 1 }, 2204 { F_SGLWRFLASHINT, "CIM single write to flash space", -1, 1 }, 2205 { F_BLKWRFLASHINT, "CIM block write to flash space", -1, 1 }, 2206 { F_SGLRDEEPROMINT, "CIM single EEPROM read", -1, 1 }, 2207 { F_SGLWREEPROMINT, "CIM single EEPROM write", -1, 1 }, 2208 { F_BLKRDEEPROMINT, "CIM block EEPROM read", -1, 1 }, 2209 { F_BLKWREEPROMINT, "CIM block EEPROM write", -1, 1 }, 2210 { F_SGLRDCTLINT , "CIM single read from CTL space", -1, 1 }, 2211 { F_SGLWRCTLINT , "CIM single write to CTL space", -1, 1 }, 2212 { F_BLKRDCTLINT , "CIM block read from CTL space", -1, 1 }, 2213 { F_BLKWRCTLINT , "CIM block write to CTL space", -1, 1 }, 2214 { F_SGLRDPLINT , "CIM single read from PL space", -1, 1 }, 2215 { F_SGLWRPLINT , "CIM single write to PL space", -1, 1 }, 2216 { F_BLKRDPLINT , "CIM block read from PL space", -1, 1 }, 2217 { F_BLKWRPLINT , "CIM block write to PL space", -1, 1 }, 2218 { F_REQOVRLOOKUPINT , "CIM request FIFO overwrite", -1, 1 }, 2219 { F_RSPOVRLOOKUPINT , "CIM response FIFO overwrite", -1, 1 }, 2220 { F_TIMEOUTINT , "CIM PIF timeout", -1, 1 }, 2221 { F_TIMEOUTMAINT , "CIM PIF MA timeout", -1, 1 }, 2222 { 0 } 2223 }; 2224 int fat; 2225 2226 if (t4_read_reg(adapter, A_PCIE_FW) & F_PCIE_FW_ERR) 2227 t4_report_fw_error(adapter); 2228 2229 fat = t4_handle_intr_status(adapter, A_CIM_HOST_INT_CAUSE, 2230 cim_intr_info) + 2231 t4_handle_intr_status(adapter, A_CIM_HOST_UPACC_INT_CAUSE, 2232 cim_upintr_info); 2233 if (fat) 2234 t4_fatal_err(adapter); 2235 } 2236 2237 /* 2238 * ULP RX interrupt handler. 2239 */ 2240 static void ulprx_intr_handler(struct adapter *adapter) 2241 { 2242 static struct intr_info ulprx_intr_info[] = { 2243 { F_CAUSE_CTX_1, "ULPRX channel 1 context error", -1, 1 }, 2244 { F_CAUSE_CTX_0, "ULPRX channel 0 context error", -1, 1 }, 2245 { 0x7fffff, "ULPRX parity error", -1, 1 }, 2246 { 0 } 2247 }; 2248 2249 if (t4_handle_intr_status(adapter, A_ULP_RX_INT_CAUSE, ulprx_intr_info)) 2250 t4_fatal_err(adapter); 2251 } 2252 2253 /* 2254 * ULP TX interrupt handler. 2255 */ 2256 static void ulptx_intr_handler(struct adapter *adapter) 2257 { 2258 static struct intr_info ulptx_intr_info[] = { 2259 { F_PBL_BOUND_ERR_CH3, "ULPTX channel 3 PBL out of bounds", -1, 2260 0 }, 2261 { F_PBL_BOUND_ERR_CH2, "ULPTX channel 2 PBL out of bounds", -1, 2262 0 }, 2263 { F_PBL_BOUND_ERR_CH1, "ULPTX channel 1 PBL out of bounds", -1, 2264 0 }, 2265 { F_PBL_BOUND_ERR_CH0, "ULPTX channel 0 PBL out of bounds", -1, 2266 0 }, 2267 { 0xfffffff, "ULPTX parity error", -1, 1 }, 2268 { 0 } 2269 }; 2270 2271 if (t4_handle_intr_status(adapter, A_ULP_TX_INT_CAUSE, ulptx_intr_info)) 2272 t4_fatal_err(adapter); 2273 } 2274 2275 /* 2276 * PM TX interrupt handler. 2277 */ 2278 static void pmtx_intr_handler(struct adapter *adapter) 2279 { 2280 static struct intr_info pmtx_intr_info[] = { 2281 { F_PCMD_LEN_OVFL0, "PMTX channel 0 pcmd too large", -1, 1 }, 2282 { F_PCMD_LEN_OVFL1, "PMTX channel 1 pcmd too large", -1, 1 }, 2283 { F_PCMD_LEN_OVFL2, "PMTX channel 2 pcmd too large", -1, 1 }, 2284 { F_ZERO_C_CMD_ERROR, "PMTX 0-length pcmd", -1, 1 }, 2285 { 0xffffff0, "PMTX framing error", -1, 1 }, 2286 { F_OESPI_PAR_ERROR, "PMTX oespi parity error", -1, 1 }, 2287 { F_DB_OPTIONS_PAR_ERROR, "PMTX db_options parity error", -1, 2288 1 }, 2289 { F_ICSPI_PAR_ERROR, "PMTX icspi parity error", -1, 1 }, 2290 { F_C_PCMD_PAR_ERROR, "PMTX c_pcmd parity error", -1, 1}, 2291 { 0 } 2292 }; 2293 2294 if (t4_handle_intr_status(adapter, A_PM_TX_INT_CAUSE, pmtx_intr_info)) 2295 t4_fatal_err(adapter); 2296 } 2297 2298 /* 2299 * PM RX interrupt handler. 2300 */ 2301 static void pmrx_intr_handler(struct adapter *adapter) 2302 { 2303 static struct intr_info pmrx_intr_info[] = { 2304 { F_ZERO_E_CMD_ERROR, "PMRX 0-length pcmd", -1, 1 }, 2305 { 0x3ffff0, "PMRX framing error", -1, 1 }, 2306 { F_OCSPI_PAR_ERROR, "PMRX ocspi parity error", -1, 1 }, 2307 { F_DB_OPTIONS_PAR_ERROR, "PMRX db_options parity error", -1, 2308 1 }, 2309 { F_IESPI_PAR_ERROR, "PMRX iespi parity error", -1, 1 }, 2310 { F_E_PCMD_PAR_ERROR, "PMRX e_pcmd parity error", -1, 1}, 2311 { 0 } 2312 }; 2313 2314 if (t4_handle_intr_status(adapter, A_PM_RX_INT_CAUSE, pmrx_intr_info)) 2315 t4_fatal_err(adapter); 2316 } 2317 2318 /* 2319 * CPL switch interrupt handler. 2320 */ 2321 static void cplsw_intr_handler(struct adapter *adapter) 2322 { 2323 static struct intr_info cplsw_intr_info[] = { 2324 { F_CIM_OP_MAP_PERR, "CPLSW CIM op_map parity error", -1, 1 }, 2325 { F_CIM_OVFL_ERROR, "CPLSW CIM overflow", -1, 1 }, 2326 { F_TP_FRAMING_ERROR, "CPLSW TP framing error", -1, 1 }, 2327 { F_SGE_FRAMING_ERROR, "CPLSW SGE framing error", -1, 1 }, 2328 { F_CIM_FRAMING_ERROR, "CPLSW CIM framing error", -1, 1 }, 2329 { F_ZERO_SWITCH_ERROR, "CPLSW no-switch error", -1, 1 }, 2330 { 0 } 2331 }; 2332 2333 if (t4_handle_intr_status(adapter, A_CPL_INTR_CAUSE, cplsw_intr_info)) 2334 t4_fatal_err(adapter); 2335 } 2336 2337 /* 2338 * LE interrupt handler. 2339 */ 2340 static void le_intr_handler(struct adapter *adap) 2341 { 2342 static struct intr_info le_intr_info[] = { 2343 { F_LIPMISS, "LE LIP miss", -1, 0 }, 2344 { F_LIP0, "LE 0 LIP error", -1, 0 }, 2345 { F_PARITYERR, "LE parity error", -1, 1 }, 2346 { F_UNKNOWNCMD, "LE unknown command", -1, 1 }, 2347 { F_REQQPARERR, "LE request queue parity error", -1, 1 }, 2348 { 0 } 2349 }; 2350 2351 if (t4_handle_intr_status(adap, A_LE_DB_INT_CAUSE, le_intr_info)) 2352 t4_fatal_err(adap); 2353 } 2354 2355 /* 2356 * MPS interrupt handler. 2357 */ 2358 static void mps_intr_handler(struct adapter *adapter) 2359 { 2360 static struct intr_info mps_rx_intr_info[] = { 2361 { 0xffffff, "MPS Rx parity error", -1, 1 }, 2362 { 0 } 2363 }; 2364 static struct intr_info mps_tx_intr_info[] = { 2365 { V_TPFIFO(M_TPFIFO), "MPS Tx TP FIFO parity error", -1, 1 }, 2366 { F_NCSIFIFO, "MPS Tx NC-SI FIFO parity error", -1, 1 }, 2367 { V_TXDATAFIFO(M_TXDATAFIFO), "MPS Tx data FIFO parity error", 2368 -1, 1 }, 2369 { V_TXDESCFIFO(M_TXDESCFIFO), "MPS Tx desc FIFO parity error", 2370 -1, 1 }, 2371 { F_BUBBLE, "MPS Tx underflow", -1, 1 }, 2372 { F_SECNTERR, "MPS Tx SOP/EOP error", -1, 1 }, 2373 { F_FRMERR, "MPS Tx framing error", -1, 1 }, 2374 { 0 } 2375 }; 2376 static struct intr_info mps_trc_intr_info[] = { 2377 { V_FILTMEM(M_FILTMEM), "MPS TRC filter parity error", -1, 1 }, 2378 { V_PKTFIFO(M_PKTFIFO), "MPS TRC packet FIFO parity error", -1, 2379 1 }, 2380 { F_MISCPERR, "MPS TRC misc parity error", -1, 1 }, 2381 { 0 } 2382 }; 2383 static struct intr_info mps_stat_sram_intr_info[] = { 2384 { 0x1fffff, "MPS statistics SRAM parity error", -1, 1 }, 2385 { 0 } 2386 }; 2387 static struct intr_info mps_stat_tx_intr_info[] = { 2388 { 0xfffff, "MPS statistics Tx FIFO parity error", -1, 1 }, 2389 { 0 } 2390 }; 2391 static struct intr_info mps_stat_rx_intr_info[] = { 2392 { 0xffffff, "MPS statistics Rx FIFO parity error", -1, 1 }, 2393 { 0 } 2394 }; 2395 static struct intr_info mps_cls_intr_info[] = { 2396 { F_MATCHSRAM, "MPS match SRAM parity error", -1, 1 }, 2397 { F_MATCHTCAM, "MPS match TCAM parity error", -1, 1 }, 2398 { F_HASHSRAM, "MPS hash SRAM parity error", -1, 1 }, 2399 { 0 } 2400 }; 2401 2402 int fat; 2403 2404 fat = t4_handle_intr_status(adapter, A_MPS_RX_PERR_INT_CAUSE, 2405 mps_rx_intr_info) + 2406 t4_handle_intr_status(adapter, A_MPS_TX_INT_CAUSE, 2407 mps_tx_intr_info) + 2408 t4_handle_intr_status(adapter, A_MPS_TRC_INT_CAUSE, 2409 mps_trc_intr_info) + 2410 t4_handle_intr_status(adapter, A_MPS_STAT_PERR_INT_CAUSE_SRAM, 2411 mps_stat_sram_intr_info) + 2412 t4_handle_intr_status(adapter, A_MPS_STAT_PERR_INT_CAUSE_TX_FIFO, 2413 mps_stat_tx_intr_info) + 2414 t4_handle_intr_status(adapter, A_MPS_STAT_PERR_INT_CAUSE_RX_FIFO, 2415 mps_stat_rx_intr_info) + 2416 t4_handle_intr_status(adapter, A_MPS_CLS_INT_CAUSE, 2417 mps_cls_intr_info); 2418 2419 t4_write_reg(adapter, A_MPS_INT_CAUSE, 0); 2420 t4_read_reg(adapter, A_MPS_INT_CAUSE); /* flush */ 2421 if (fat) 2422 t4_fatal_err(adapter); 2423 } 2424 2425 #define MEM_INT_MASK (F_PERR_INT_CAUSE | F_ECC_CE_INT_CAUSE | F_ECC_UE_INT_CAUSE) 2426 2427 /* 2428 * EDC/MC interrupt handler. 2429 */ 2430 static void mem_intr_handler(struct adapter *adapter, int idx) 2431 { 2432 static const char name[3][5] = { "EDC0", "EDC1", "MC" }; 2433 2434 unsigned int addr, cnt_addr, v; 2435 2436 if (idx <= MEM_EDC1) { 2437 addr = EDC_REG(A_EDC_INT_CAUSE, idx); 2438 cnt_addr = EDC_REG(A_EDC_ECC_STATUS, idx); 2439 } else { 2440 if (is_t4(adapter)) { 2441 addr = A_MC_INT_CAUSE; 2442 cnt_addr = A_MC_ECC_STATUS; 2443 } else { 2444 addr = A_MC_P_INT_CAUSE; 2445 cnt_addr = A_MC_P_ECC_STATUS; 2446 } 2447 } 2448 2449 v = t4_read_reg(adapter, addr) & MEM_INT_MASK; 2450 if (v & F_PERR_INT_CAUSE) 2451 CH_ALERT(adapter, "%s FIFO parity error\n", name[idx]); 2452 if (v & F_ECC_CE_INT_CAUSE) { 2453 u32 cnt = G_ECC_CECNT(t4_read_reg(adapter, cnt_addr)); 2454 2455 t4_write_reg(adapter, cnt_addr, V_ECC_CECNT(M_ECC_CECNT)); 2456 CH_WARN_RATELIMIT(adapter, 2457 "%u %s correctable ECC data error%s\n", 2458 cnt, name[idx], cnt > 1 ? "s" : ""); 2459 } 2460 if (v & F_ECC_UE_INT_CAUSE) 2461 CH_ALERT(adapter, "%s uncorrectable ECC data error\n", 2462 name[idx]); 2463 2464 t4_write_reg(adapter, addr, v); 2465 if (v & (F_PERR_INT_CAUSE | F_ECC_UE_INT_CAUSE)) 2466 t4_fatal_err(adapter); 2467 } 2468 2469 /* 2470 * MA interrupt handler. 2471 */ 2472 static void ma_intr_handler(struct adapter *adapter) 2473 { 2474 u32 v, status = t4_read_reg(adapter, A_MA_INT_CAUSE); 2475 2476 if (status & F_MEM_PERR_INT_CAUSE) { 2477 CH_ALERT(adapter, "MA parity error, parity status %#x\n", 2478 t4_read_reg(adapter, A_MA_PARITY_ERROR_STATUS1)); 2479 if (is_t5(adapter)) 2480 CH_ALERT(adapter, 2481 "MA parity error, parity status %#x\n", 2482 t4_read_reg(adapter, 2483 A_MA_PARITY_ERROR_STATUS2)); 2484 } 2485 if (status & F_MEM_WRAP_INT_CAUSE) { 2486 v = t4_read_reg(adapter, A_MA_INT_WRAP_STATUS); 2487 CH_ALERT(adapter, "MA address wrap-around error by client %u to" 2488 " address %#x\n", G_MEM_WRAP_CLIENT_NUM(v), 2489 G_MEM_WRAP_ADDRESS(v) << 4); 2490 } 2491 t4_write_reg(adapter, A_MA_INT_CAUSE, status); 2492 t4_fatal_err(adapter); 2493 } 2494 2495 /* 2496 * SMB interrupt handler. 2497 */ 2498 static void smb_intr_handler(struct adapter *adap) 2499 { 2500 static struct intr_info smb_intr_info[] = { 2501 { F_MSTTXFIFOPARINT, "SMB master Tx FIFO parity error", -1, 1 }, 2502 { F_MSTRXFIFOPARINT, "SMB master Rx FIFO parity error", -1, 1 }, 2503 { F_SLVFIFOPARINT, "SMB slave FIFO parity error", -1, 1 }, 2504 { 0 } 2505 }; 2506 2507 if (t4_handle_intr_status(adap, A_SMB_INT_CAUSE, smb_intr_info)) 2508 t4_fatal_err(adap); 2509 } 2510 2511 /* 2512 * NC-SI interrupt handler. 2513 */ 2514 static void ncsi_intr_handler(struct adapter *adap) 2515 { 2516 static struct intr_info ncsi_intr_info[] = { 2517 { F_CIM_DM_PRTY_ERR, "NC-SI CIM parity error", -1, 1 }, 2518 { F_MPS_DM_PRTY_ERR, "NC-SI MPS parity error", -1, 1 }, 2519 { F_TXFIFO_PRTY_ERR, "NC-SI Tx FIFO parity error", -1, 1 }, 2520 { F_RXFIFO_PRTY_ERR, "NC-SI Rx FIFO parity error", -1, 1 }, 2521 { 0 } 2522 }; 2523 2524 if (t4_handle_intr_status(adap, A_NCSI_INT_CAUSE, ncsi_intr_info)) 2525 t4_fatal_err(adap); 2526 } 2527 2528 /* 2529 * XGMAC interrupt handler. 2530 */ 2531 static void xgmac_intr_handler(struct adapter *adap, int port) 2532 { 2533 u32 v, int_cause_reg; 2534 2535 if (is_t4(adap)) 2536 int_cause_reg = PORT_REG(port, A_XGMAC_PORT_INT_CAUSE); 2537 else 2538 int_cause_reg = T5_PORT_REG(port, A_MAC_PORT_INT_CAUSE); 2539 2540 v = t4_read_reg(adap, int_cause_reg); 2541 v &= (F_TXFIFO_PRTY_ERR | F_RXFIFO_PRTY_ERR); 2542 if (!v) 2543 return; 2544 2545 if (v & F_TXFIFO_PRTY_ERR) 2546 CH_ALERT(adap, "XGMAC %d Tx FIFO parity error\n", port); 2547 if (v & F_RXFIFO_PRTY_ERR) 2548 CH_ALERT(adap, "XGMAC %d Rx FIFO parity error\n", port); 2549 t4_write_reg(adap, int_cause_reg, v); 2550 t4_fatal_err(adap); 2551 } 2552 2553 /* 2554 * PL interrupt handler. 2555 */ 2556 static void pl_intr_handler(struct adapter *adap) 2557 { 2558 static struct intr_info pl_intr_info[] = { 2559 { F_FATALPERR, "Fatal parity error", -1, 1 }, 2560 { F_PERRVFID, "PL VFID_MAP parity error", -1, 1 }, 2561 { 0 } 2562 }; 2563 2564 static struct intr_info t5_pl_intr_info[] = { 2565 { F_PL_BUSPERR, "PL bus parity error", -1, 1 }, 2566 { F_FATALPERR, "Fatal parity error", -1, 1 }, 2567 { 0 } 2568 }; 2569 2570 if (t4_handle_intr_status(adap, A_PL_PL_INT_CAUSE, 2571 is_t4(adap) ? pl_intr_info : t5_pl_intr_info)) 2572 t4_fatal_err(adap); 2573 } 2574 2575 #define PF_INTR_MASK (F_PFSW | F_PFCIM) 2576 #define GLBL_INTR_MASK (F_CIM | F_MPS | F_PL | F_PCIE | F_MC | F_EDC0 | \ 2577 F_EDC1 | F_LE | F_TP | F_MA | F_PM_TX | F_PM_RX | F_ULP_RX | \ 2578 F_CPL_SWITCH | F_SGE | F_ULP_TX) 2579 2580 /** 2581 * t4_slow_intr_handler - control path interrupt handler 2582 * @adapter: the adapter 2583 * 2584 * T4 interrupt handler for non-data global interrupt events, e.g., errors. 2585 * The designation 'slow' is because it involves register reads, while 2586 * data interrupts typically don't involve any MMIOs. 2587 */ 2588 int t4_slow_intr_handler(struct adapter *adapter) 2589 { 2590 u32 cause = t4_read_reg(adapter, A_PL_INT_CAUSE); 2591 2592 if (!(cause & GLBL_INTR_MASK)) 2593 return 0; 2594 if (cause & F_CIM) 2595 cim_intr_handler(adapter); 2596 if (cause & F_MPS) 2597 mps_intr_handler(adapter); 2598 if (cause & F_NCSI) 2599 ncsi_intr_handler(adapter); 2600 if (cause & F_PL) 2601 pl_intr_handler(adapter); 2602 if (cause & F_SMB) 2603 smb_intr_handler(adapter); 2604 if (cause & F_XGMAC0) 2605 xgmac_intr_handler(adapter, 0); 2606 if (cause & F_XGMAC1) 2607 xgmac_intr_handler(adapter, 1); 2608 if (cause & F_XGMAC_KR0) 2609 xgmac_intr_handler(adapter, 2); 2610 if (cause & F_XGMAC_KR1) 2611 xgmac_intr_handler(adapter, 3); 2612 if (cause & F_PCIE) 2613 pcie_intr_handler(adapter); 2614 if (cause & F_MC) 2615 mem_intr_handler(adapter, MEM_MC); 2616 if (cause & F_EDC0) 2617 mem_intr_handler(adapter, MEM_EDC0); 2618 if (cause & F_EDC1) 2619 mem_intr_handler(adapter, MEM_EDC1); 2620 if (cause & F_LE) 2621 le_intr_handler(adapter); 2622 if (cause & F_TP) 2623 tp_intr_handler(adapter); 2624 if (cause & F_MA) 2625 ma_intr_handler(adapter); 2626 if (cause & F_PM_TX) 2627 pmtx_intr_handler(adapter); 2628 if (cause & F_PM_RX) 2629 pmrx_intr_handler(adapter); 2630 if (cause & F_ULP_RX) 2631 ulprx_intr_handler(adapter); 2632 if (cause & F_CPL_SWITCH) 2633 cplsw_intr_handler(adapter); 2634 if (cause & F_SGE) 2635 sge_intr_handler(adapter); 2636 if (cause & F_ULP_TX) 2637 ulptx_intr_handler(adapter); 2638 2639 /* Clear the interrupts just processed for which we are the master. */ 2640 t4_write_reg(adapter, A_PL_INT_CAUSE, cause & GLBL_INTR_MASK); 2641 (void) t4_read_reg(adapter, A_PL_INT_CAUSE); /* flush */ 2642 return 1; 2643 } 2644 2645 /** 2646 * t4_intr_enable - enable interrupts 2647 * @adapter: the adapter whose interrupts should be enabled 2648 * 2649 * Enable PF-specific interrupts for the calling function and the top-level 2650 * interrupt concentrator for global interrupts. Interrupts are already 2651 * enabled at each module, here we just enable the roots of the interrupt 2652 * hierarchies. 2653 * 2654 * Note: this function should be called only when the driver manages 2655 * non PF-specific interrupts from the various HW modules. Only one PCI 2656 * function at a time should be doing this. 2657 */ 2658 void t4_intr_enable(struct adapter *adapter) 2659 { 2660 u32 pf = G_SOURCEPF(t4_read_reg(adapter, A_PL_WHOAMI)); 2661 2662 t4_write_reg(adapter, A_SGE_INT_ENABLE3, F_ERR_CPL_EXCEED_IQE_SIZE | 2663 F_ERR_INVALID_CIDX_INC | F_ERR_CPL_OPCODE_0 | 2664 F_ERR_DROPPED_DB | F_ERR_DATA_CPL_ON_HIGH_QID1 | 2665 F_ERR_DATA_CPL_ON_HIGH_QID0 | F_ERR_BAD_DB_PIDX3 | 2666 F_ERR_BAD_DB_PIDX2 | F_ERR_BAD_DB_PIDX1 | 2667 F_ERR_BAD_DB_PIDX0 | F_ERR_ING_CTXT_PRIO | 2668 F_ERR_EGR_CTXT_PRIO | F_INGRESS_SIZE_ERR | 2669 F_EGRESS_SIZE_ERR); 2670 t4_write_reg(adapter, MYPF_REG(A_PL_PF_INT_ENABLE), PF_INTR_MASK); 2671 t4_set_reg_field(adapter, A_PL_INT_MAP0, 0, 1 << pf); 2672 } 2673 2674 /** 2675 * t4_intr_disable - disable interrupts 2676 * @adapter: the adapter whose interrupts should be disabled 2677 * 2678 * Disable interrupts. We only disable the top-level interrupt 2679 * concentrators. The caller must be a PCI function managing global 2680 * interrupts. 2681 */ 2682 void t4_intr_disable(struct adapter *adapter) 2683 { 2684 u32 pf = G_SOURCEPF(t4_read_reg(adapter, A_PL_WHOAMI)); 2685 2686 t4_write_reg(adapter, MYPF_REG(A_PL_PF_INT_ENABLE), 0); 2687 t4_set_reg_field(adapter, A_PL_INT_MAP0, 1 << pf, 0); 2688 } 2689 2690 /** 2691 * t4_intr_clear - clear all interrupts 2692 * @adapter: the adapter whose interrupts should be cleared 2693 * 2694 * Clears all interrupts. The caller must be a PCI function managing 2695 * global interrupts. 2696 */ 2697 void t4_intr_clear(struct adapter *adapter) 2698 { 2699 static const unsigned int cause_reg[] = { 2700 A_SGE_INT_CAUSE1, A_SGE_INT_CAUSE2, A_SGE_INT_CAUSE3, 2701 A_PCIE_NONFAT_ERR, A_PCIE_INT_CAUSE, 2702 A_MA_INT_WRAP_STATUS, A_MA_PARITY_ERROR_STATUS1, A_MA_INT_CAUSE, 2703 A_EDC_INT_CAUSE, EDC_REG(A_EDC_INT_CAUSE, 1), 2704 A_CIM_HOST_INT_CAUSE, A_CIM_HOST_UPACC_INT_CAUSE, 2705 MYPF_REG(A_CIM_PF_HOST_INT_CAUSE), 2706 A_TP_INT_CAUSE, 2707 A_ULP_RX_INT_CAUSE, A_ULP_TX_INT_CAUSE, 2708 A_PM_RX_INT_CAUSE, A_PM_TX_INT_CAUSE, 2709 A_MPS_RX_PERR_INT_CAUSE, 2710 A_CPL_INTR_CAUSE, 2711 MYPF_REG(A_PL_PF_INT_CAUSE), 2712 A_PL_PL_INT_CAUSE, 2713 A_LE_DB_INT_CAUSE, 2714 }; 2715 2716 unsigned int i; 2717 2718 for (i = 0; i < ARRAY_SIZE(cause_reg); ++i) 2719 t4_write_reg(adapter, cause_reg[i], 0xffffffff); 2720 2721 t4_write_reg(adapter, is_t4(adapter) ? A_MC_INT_CAUSE : 2722 A_MC_P_INT_CAUSE, 0xffffffff); 2723 2724 if (is_t4(adapter)) { 2725 t4_write_reg(adapter, A_PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS, 2726 0xffffffff); 2727 t4_write_reg(adapter, A_PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS, 2728 0xffffffff); 2729 } else 2730 t4_write_reg(adapter, A_MA_PARITY_ERROR_STATUS2, 0xffffffff); 2731 2732 t4_write_reg(adapter, A_PL_INT_CAUSE, GLBL_INTR_MASK); 2733 (void) t4_read_reg(adapter, A_PL_INT_CAUSE); /* flush */ 2734 } 2735 2736 /** 2737 * hash_mac_addr - return the hash value of a MAC address 2738 * @addr: the 48-bit Ethernet MAC address 2739 * 2740 * Hashes a MAC address according to the hash function used by HW inexact 2741 * (hash) address matching. 2742 */ 2743 static int hash_mac_addr(const u8 *addr) 2744 { 2745 u32 a = ((u32)addr[0] << 16) | ((u32)addr[1] << 8) | addr[2]; 2746 u32 b = ((u32)addr[3] << 16) | ((u32)addr[4] << 8) | addr[5]; 2747 a ^= b; 2748 a ^= (a >> 12); 2749 a ^= (a >> 6); 2750 return a & 0x3f; 2751 } 2752 2753 /** 2754 * t4_config_rss_range - configure a portion of the RSS mapping table 2755 * @adapter: the adapter 2756 * @mbox: mbox to use for the FW command 2757 * @viid: virtual interface whose RSS subtable is to be written 2758 * @start: start entry in the table to write 2759 * @n: how many table entries to write 2760 * @rspq: values for the "response queue" (Ingress Queue) lookup table 2761 * @nrspq: number of values in @rspq 2762 * 2763 * Programs the selected part of the VI's RSS mapping table with the 2764 * provided values. If @nrspq < @n the supplied values are used repeatedly 2765 * until the full table range is populated. 2766 * 2767 * The caller must ensure the values in @rspq are in the range allowed for 2768 * @viid. 2769 */ 2770 int t4_config_rss_range(struct adapter *adapter, int mbox, unsigned int viid, 2771 int start, int n, const u16 *rspq, unsigned int nrspq) 2772 { 2773 int ret; 2774 const u16 *rsp = rspq; 2775 const u16 *rsp_end = rspq + nrspq; 2776 struct fw_rss_ind_tbl_cmd cmd; 2777 2778 memset(&cmd, 0, sizeof(cmd)); 2779 cmd.op_to_viid = htonl(V_FW_CMD_OP(FW_RSS_IND_TBL_CMD) | 2780 F_FW_CMD_REQUEST | F_FW_CMD_WRITE | 2781 V_FW_RSS_IND_TBL_CMD_VIID(viid)); 2782 cmd.retval_len16 = htonl(FW_LEN16(cmd)); 2783 2784 2785 /* 2786 * Each firmware RSS command can accommodate up to 32 RSS Ingress 2787 * Queue Identifiers. These Ingress Queue IDs are packed three to 2788 * a 32-bit word as 10-bit values with the upper remaining 2 bits 2789 * reserved. 2790 */ 2791 while (n > 0) { 2792 int nq = min(n, 32); 2793 int nq_packed = 0; 2794 __be32 *qp = &cmd.iq0_to_iq2; 2795 2796 /* 2797 * Set up the firmware RSS command header to send the next 2798 * "nq" Ingress Queue IDs to the firmware. 2799 */ 2800 cmd.niqid = htons(nq); 2801 cmd.startidx = htons(start); 2802 2803 /* 2804 * "nq" more done for the start of the next loop. 2805 */ 2806 start += nq; 2807 n -= nq; 2808 2809 /* 2810 * While there are still Ingress Queue IDs to stuff into the 2811 * current firmware RSS command, retrieve them from the 2812 * Ingress Queue ID array and insert them into the command. 2813 */ 2814 while (nq > 0) { 2815 /* 2816 * Grab up to the next 3 Ingress Queue IDs (wrapping 2817 * around the Ingress Queue ID array if necessary) and 2818 * insert them into the firmware RSS command at the 2819 * current 3-tuple position within the commad. 2820 */ 2821 u16 qbuf[3]; 2822 u16 *qbp = qbuf; 2823 int nqbuf = min(3, nq); 2824 2825 nq -= nqbuf; 2826 qbuf[0] = qbuf[1] = qbuf[2] = 0; 2827 while (nqbuf && nq_packed < 32) { 2828 nqbuf--; 2829 nq_packed++; 2830 *qbp++ = *rsp++; 2831 if (rsp >= rsp_end) 2832 rsp = rspq; 2833 } 2834 *qp++ = cpu_to_be32(V_FW_RSS_IND_TBL_CMD_IQ0(qbuf[0]) | 2835 V_FW_RSS_IND_TBL_CMD_IQ1(qbuf[1]) | 2836 V_FW_RSS_IND_TBL_CMD_IQ2(qbuf[2])); 2837 } 2838 2839 /* 2840 * Send this portion of the RRS table update to the firmware; 2841 * bail out on any errors. 2842 */ 2843 ret = t4_wr_mbox(adapter, mbox, &cmd, sizeof(cmd), NULL); 2844 if (ret) 2845 return ret; 2846 } 2847 2848 return 0; 2849 } 2850 2851 /** 2852 * t4_config_glbl_rss - configure the global RSS mode 2853 * @adapter: the adapter 2854 * @mbox: mbox to use for the FW command 2855 * @mode: global RSS mode 2856 * @flags: mode-specific flags 2857 * 2858 * Sets the global RSS mode. 2859 */ 2860 int t4_config_glbl_rss(struct adapter *adapter, int mbox, unsigned int mode, 2861 unsigned int flags) 2862 { 2863 struct fw_rss_glb_config_cmd c; 2864 2865 memset(&c, 0, sizeof(c)); 2866 c.op_to_write = htonl(V_FW_CMD_OP(FW_RSS_GLB_CONFIG_CMD) | 2867 F_FW_CMD_REQUEST | F_FW_CMD_WRITE); 2868 c.retval_len16 = htonl(FW_LEN16(c)); 2869 if (mode == FW_RSS_GLB_CONFIG_CMD_MODE_MANUAL) { 2870 c.u.manual.mode_pkd = htonl(V_FW_RSS_GLB_CONFIG_CMD_MODE(mode)); 2871 } else if (mode == FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL) { 2872 c.u.basicvirtual.mode_pkd = 2873 htonl(V_FW_RSS_GLB_CONFIG_CMD_MODE(mode)); 2874 c.u.basicvirtual.synmapen_to_hashtoeplitz = htonl(flags); 2875 } else 2876 return -EINVAL; 2877 return t4_wr_mbox(adapter, mbox, &c, sizeof(c), NULL); 2878 } 2879 2880 /** 2881 * t4_config_vi_rss - configure per VI RSS settings 2882 * @adapter: the adapter 2883 * @mbox: mbox to use for the FW command 2884 * @viid: the VI id 2885 * @flags: RSS flags 2886 * @defq: id of the default RSS queue for the VI. 2887 * 2888 * Configures VI-specific RSS properties. 2889 */ 2890 int t4_config_vi_rss(struct adapter *adapter, int mbox, unsigned int viid, 2891 unsigned int flags, unsigned int defq) 2892 { 2893 struct fw_rss_vi_config_cmd c; 2894 2895 memset(&c, 0, sizeof(c)); 2896 c.op_to_viid = htonl(V_FW_CMD_OP(FW_RSS_VI_CONFIG_CMD) | 2897 F_FW_CMD_REQUEST | F_FW_CMD_WRITE | 2898 V_FW_RSS_VI_CONFIG_CMD_VIID(viid)); 2899 c.retval_len16 = htonl(FW_LEN16(c)); 2900 c.u.basicvirtual.defaultq_to_udpen = htonl(flags | 2901 V_FW_RSS_VI_CONFIG_CMD_DEFAULTQ(defq)); 2902 return t4_wr_mbox(adapter, mbox, &c, sizeof(c), NULL); 2903 } 2904 2905 /* Read an RSS table row */ 2906 static int rd_rss_row(struct adapter *adap, int row, u32 *val) 2907 { 2908 t4_write_reg(adap, A_TP_RSS_LKP_TABLE, 0xfff00000 | row); 2909 return t4_wait_op_done_val(adap, A_TP_RSS_LKP_TABLE, F_LKPTBLROWVLD, 1, 2910 5, 0, val); 2911 } 2912 2913 /** 2914 * t4_read_rss - read the contents of the RSS mapping table 2915 * @adapter: the adapter 2916 * @map: holds the contents of the RSS mapping table 2917 * 2918 * Reads the contents of the RSS hash->queue mapping table. 2919 */ 2920 int t4_read_rss(struct adapter *adapter, u16 *map) 2921 { 2922 u32 val; 2923 int i, ret; 2924 2925 for (i = 0; i < RSS_NENTRIES / 2; ++i) { 2926 ret = rd_rss_row(adapter, i, &val); 2927 if (ret) 2928 return ret; 2929 *map++ = G_LKPTBLQUEUE0(val); 2930 *map++ = G_LKPTBLQUEUE1(val); 2931 } 2932 return 0; 2933 } 2934 2935 /** 2936 * t4_read_rss_key - read the global RSS key 2937 * @adap: the adapter 2938 * @key: 10-entry array holding the 320-bit RSS key 2939 * 2940 * Reads the global 320-bit RSS key. 2941 */ 2942 void t4_read_rss_key(struct adapter *adap, u32 *key) 2943 { 2944 t4_read_indirect(adap, A_TP_PIO_ADDR, A_TP_PIO_DATA, key, 10, 2945 A_TP_RSS_SECRET_KEY0); 2946 } 2947 2948 /** 2949 * t4_write_rss_key - program one of the RSS keys 2950 * @adap: the adapter 2951 * @key: 10-entry array holding the 320-bit RSS key 2952 * @idx: which RSS key to write 2953 * 2954 * Writes one of the RSS keys with the given 320-bit value. If @idx is 2955 * 0..15 the corresponding entry in the RSS key table is written, 2956 * otherwise the global RSS key is written. 2957 */ 2958 void t4_write_rss_key(struct adapter *adap, const u32 *key, int idx) 2959 { 2960 t4_write_indirect(adap, A_TP_PIO_ADDR, A_TP_PIO_DATA, key, 10, 2961 A_TP_RSS_SECRET_KEY0); 2962 if (idx >= 0 && idx < 16) 2963 t4_write_reg(adap, A_TP_RSS_CONFIG_VRT, 2964 V_KEYWRADDR(idx) | F_KEYWREN); 2965 } 2966 2967 /** 2968 * t4_read_rss_pf_config - read PF RSS Configuration Table 2969 * @adapter: the adapter 2970 * @index: the entry in the PF RSS table to read 2971 * @valp: where to store the returned value 2972 * 2973 * Reads the PF RSS Configuration Table at the specified index and returns 2974 * the value found there. 2975 */ 2976 void t4_read_rss_pf_config(struct adapter *adapter, unsigned int index, u32 *valp) 2977 { 2978 t4_read_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA, 2979 valp, 1, A_TP_RSS_PF0_CONFIG + index); 2980 } 2981 2982 /** 2983 * t4_write_rss_pf_config - write PF RSS Configuration Table 2984 * @adapter: the adapter 2985 * @index: the entry in the VF RSS table to read 2986 * @val: the value to store 2987 * 2988 * Writes the PF RSS Configuration Table at the specified index with the 2989 * specified value. 2990 */ 2991 void t4_write_rss_pf_config(struct adapter *adapter, unsigned int index, u32 val) 2992 { 2993 t4_write_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA, 2994 &val, 1, A_TP_RSS_PF0_CONFIG + index); 2995 } 2996 2997 /** 2998 * t4_read_rss_vf_config - read VF RSS Configuration Table 2999 * @adapter: the adapter 3000 * @index: the entry in the VF RSS table to read 3001 * @vfl: where to store the returned VFL 3002 * @vfh: where to store the returned VFH 3003 * 3004 * Reads the VF RSS Configuration Table at the specified index and returns 3005 * the (VFL, VFH) values found there. 3006 */ 3007 void t4_read_rss_vf_config(struct adapter *adapter, unsigned int index, 3008 u32 *vfl, u32 *vfh) 3009 { 3010 u32 vrt; 3011 3012 /* 3013 * Request that the index'th VF Table values be read into VFL/VFH. 3014 */ 3015 vrt = t4_read_reg(adapter, A_TP_RSS_CONFIG_VRT); 3016 vrt &= ~(F_VFRDRG | V_VFWRADDR(M_VFWRADDR) | F_VFWREN | F_KEYWREN); 3017 vrt |= V_VFWRADDR(index) | F_VFRDEN; 3018 t4_write_reg(adapter, A_TP_RSS_CONFIG_VRT, vrt); 3019 3020 /* 3021 * Grab the VFL/VFH values ... 3022 */ 3023 t4_read_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA, 3024 vfl, 1, A_TP_RSS_VFL_CONFIG); 3025 t4_read_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA, 3026 vfh, 1, A_TP_RSS_VFH_CONFIG); 3027 } 3028 3029 /** 3030 * t4_write_rss_vf_config - write VF RSS Configuration Table 3031 * 3032 * @adapter: the adapter 3033 * @index: the entry in the VF RSS table to write 3034 * @vfl: the VFL to store 3035 * @vfh: the VFH to store 3036 * 3037 * Writes the VF RSS Configuration Table at the specified index with the 3038 * specified (VFL, VFH) values. 3039 */ 3040 void t4_write_rss_vf_config(struct adapter *adapter, unsigned int index, 3041 u32 vfl, u32 vfh) 3042 { 3043 u32 vrt; 3044 3045 /* 3046 * Load up VFL/VFH with the values to be written ... 3047 */ 3048 t4_write_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA, 3049 &vfl, 1, A_TP_RSS_VFL_CONFIG); 3050 t4_write_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA, 3051 &vfh, 1, A_TP_RSS_VFH_CONFIG); 3052 3053 /* 3054 * Write the VFL/VFH into the VF Table at index'th location. 3055 */ 3056 vrt = t4_read_reg(adapter, A_TP_RSS_CONFIG_VRT); 3057 vrt &= ~(F_VFRDRG | F_VFRDEN | V_VFWRADDR(M_VFWRADDR) | F_KEYWREN); 3058 vrt |= V_VFWRADDR(index) | F_VFWREN; 3059 t4_write_reg(adapter, A_TP_RSS_CONFIG_VRT, vrt); 3060 } 3061 3062 /** 3063 * t4_read_rss_pf_map - read PF RSS Map 3064 * @adapter: the adapter 3065 * 3066 * Reads the PF RSS Map register and returns its value. 3067 */ 3068 u32 t4_read_rss_pf_map(struct adapter *adapter) 3069 { 3070 u32 pfmap; 3071 3072 t4_read_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA, 3073 &pfmap, 1, A_TP_RSS_PF_MAP); 3074 return pfmap; 3075 } 3076 3077 /** 3078 * t4_write_rss_pf_map - write PF RSS Map 3079 * @adapter: the adapter 3080 * @pfmap: PF RSS Map value 3081 * 3082 * Writes the specified value to the PF RSS Map register. 3083 */ 3084 void t4_write_rss_pf_map(struct adapter *adapter, u32 pfmap) 3085 { 3086 t4_write_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA, 3087 &pfmap, 1, A_TP_RSS_PF_MAP); 3088 } 3089 3090 /** 3091 * t4_read_rss_pf_mask - read PF RSS Mask 3092 * @adapter: the adapter 3093 * 3094 * Reads the PF RSS Mask register and returns its value. 3095 */ 3096 u32 t4_read_rss_pf_mask(struct adapter *adapter) 3097 { 3098 u32 pfmask; 3099 3100 t4_read_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA, 3101 &pfmask, 1, A_TP_RSS_PF_MSK); 3102 return pfmask; 3103 } 3104 3105 /** 3106 * t4_write_rss_pf_mask - write PF RSS Mask 3107 * @adapter: the adapter 3108 * @pfmask: PF RSS Mask value 3109 * 3110 * Writes the specified value to the PF RSS Mask register. 3111 */ 3112 void t4_write_rss_pf_mask(struct adapter *adapter, u32 pfmask) 3113 { 3114 t4_write_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA, 3115 &pfmask, 1, A_TP_RSS_PF_MSK); 3116 } 3117 3118 static void refresh_vlan_pri_map(struct adapter *adap) 3119 { 3120 3121 t4_read_indirect(adap, A_TP_PIO_ADDR, A_TP_PIO_DATA, 3122 &adap->params.tp.vlan_pri_map, 1, 3123 A_TP_VLAN_PRI_MAP); 3124 3125 /* 3126 * Now that we have TP_VLAN_PRI_MAP cached, we can calculate the field 3127 * shift positions of several elements of the Compressed Filter Tuple 3128 * for this adapter which we need frequently ... 3129 */ 3130 adap->params.tp.vlan_shift = t4_filter_field_shift(adap, F_VLAN); 3131 adap->params.tp.vnic_shift = t4_filter_field_shift(adap, F_VNIC_ID); 3132 adap->params.tp.port_shift = t4_filter_field_shift(adap, F_PORT); 3133 adap->params.tp.protocol_shift = t4_filter_field_shift(adap, F_PROTOCOL); 3134 3135 /* 3136 * If TP_INGRESS_CONFIG.VNID == 0, then TP_VLAN_PRI_MAP.VNIC_ID 3137 * represents the presense of an Outer VLAN instead of a VNIC ID. 3138 */ 3139 if ((adap->params.tp.ingress_config & F_VNIC) == 0) 3140 adap->params.tp.vnic_shift = -1; 3141 } 3142 3143 /** 3144 * t4_set_filter_mode - configure the optional components of filter tuples 3145 * @adap: the adapter 3146 * @mode_map: a bitmap selcting which optional filter components to enable 3147 * 3148 * Sets the filter mode by selecting the optional components to enable 3149 * in filter tuples. Returns 0 on success and a negative error if the 3150 * requested mode needs more bits than are available for optional 3151 * components. 3152 */ 3153 int t4_set_filter_mode(struct adapter *adap, unsigned int mode_map) 3154 { 3155 static u8 width[] = { 1, 3, 17, 17, 8, 8, 16, 9, 3, 1 }; 3156 3157 int i, nbits = 0; 3158 3159 for (i = S_FCOE; i <= S_FRAGMENTATION; i++) 3160 if (mode_map & (1 << i)) 3161 nbits += width[i]; 3162 if (nbits > FILTER_OPT_LEN) 3163 return -EINVAL; 3164 t4_write_indirect(adap, A_TP_PIO_ADDR, A_TP_PIO_DATA, &mode_map, 1, 3165 A_TP_VLAN_PRI_MAP); 3166 refresh_vlan_pri_map(adap); 3167 3168 return 0; 3169 } 3170 3171 /** 3172 * t4_tp_get_tcp_stats - read TP's TCP MIB counters 3173 * @adap: the adapter 3174 * @v4: holds the TCP/IP counter values 3175 * @v6: holds the TCP/IPv6 counter values 3176 * 3177 * Returns the values of TP's TCP/IP and TCP/IPv6 MIB counters. 3178 * Either @v4 or @v6 may be %NULL to skip the corresponding stats. 3179 */ 3180 void t4_tp_get_tcp_stats(struct adapter *adap, struct tp_tcp_stats *v4, 3181 struct tp_tcp_stats *v6) 3182 { 3183 u32 val[A_TP_MIB_TCP_RXT_SEG_LO - A_TP_MIB_TCP_OUT_RST + 1]; 3184 3185 #define STAT_IDX(x) ((A_TP_MIB_TCP_##x) - A_TP_MIB_TCP_OUT_RST) 3186 #define STAT(x) val[STAT_IDX(x)] 3187 #define STAT64(x) (((u64)STAT(x##_HI) << 32) | STAT(x##_LO)) 3188 3189 if (v4) { 3190 t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, val, 3191 ARRAY_SIZE(val), A_TP_MIB_TCP_OUT_RST); 3192 v4->tcpOutRsts = STAT(OUT_RST); 3193 v4->tcpInSegs = STAT64(IN_SEG); 3194 v4->tcpOutSegs = STAT64(OUT_SEG); 3195 v4->tcpRetransSegs = STAT64(RXT_SEG); 3196 } 3197 if (v6) { 3198 t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, val, 3199 ARRAY_SIZE(val), A_TP_MIB_TCP_V6OUT_RST); 3200 v6->tcpOutRsts = STAT(OUT_RST); 3201 v6->tcpInSegs = STAT64(IN_SEG); 3202 v6->tcpOutSegs = STAT64(OUT_SEG); 3203 v6->tcpRetransSegs = STAT64(RXT_SEG); 3204 } 3205 #undef STAT64 3206 #undef STAT 3207 #undef STAT_IDX 3208 } 3209 3210 /** 3211 * t4_tp_get_err_stats - read TP's error MIB counters 3212 * @adap: the adapter 3213 * @st: holds the counter values 3214 * 3215 * Returns the values of TP's error counters. 3216 */ 3217 void t4_tp_get_err_stats(struct adapter *adap, struct tp_err_stats *st) 3218 { 3219 t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, st->macInErrs, 3220 12, A_TP_MIB_MAC_IN_ERR_0); 3221 t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, st->tnlCongDrops, 3222 8, A_TP_MIB_TNL_CNG_DROP_0); 3223 t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, st->tnlTxDrops, 3224 4, A_TP_MIB_TNL_DROP_0); 3225 t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, st->ofldVlanDrops, 3226 4, A_TP_MIB_OFD_VLN_DROP_0); 3227 t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, st->tcp6InErrs, 3228 4, A_TP_MIB_TCP_V6IN_ERR_0); 3229 t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, &st->ofldNoNeigh, 3230 2, A_TP_MIB_OFD_ARP_DROP); 3231 } 3232 3233 /** 3234 * t4_tp_get_proxy_stats - read TP's proxy MIB counters 3235 * @adap: the adapter 3236 * @st: holds the counter values 3237 * 3238 * Returns the values of TP's proxy counters. 3239 */ 3240 void t4_tp_get_proxy_stats(struct adapter *adap, struct tp_proxy_stats *st) 3241 { 3242 t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, st->proxy, 3243 4, A_TP_MIB_TNL_LPBK_0); 3244 } 3245 3246 /** 3247 * t4_tp_get_cpl_stats - read TP's CPL MIB counters 3248 * @adap: the adapter 3249 * @st: holds the counter values 3250 * 3251 * Returns the values of TP's CPL counters. 3252 */ 3253 void t4_tp_get_cpl_stats(struct adapter *adap, struct tp_cpl_stats *st) 3254 { 3255 t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, st->req, 3256 8, A_TP_MIB_CPL_IN_REQ_0); 3257 } 3258 3259 /** 3260 * t4_tp_get_rdma_stats - read TP's RDMA MIB counters 3261 * @adap: the adapter 3262 * @st: holds the counter values 3263 * 3264 * Returns the values of TP's RDMA counters. 3265 */ 3266 void t4_tp_get_rdma_stats(struct adapter *adap, struct tp_rdma_stats *st) 3267 { 3268 t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, &st->rqe_dfr_mod, 3269 2, A_TP_MIB_RQE_DFR_MOD); 3270 } 3271 3272 /** 3273 * t4_get_fcoe_stats - read TP's FCoE MIB counters for a port 3274 * @adap: the adapter 3275 * @idx: the port index 3276 * @st: holds the counter values 3277 * 3278 * Returns the values of TP's FCoE counters for the selected port. 3279 */ 3280 void t4_get_fcoe_stats(struct adapter *adap, unsigned int idx, 3281 struct tp_fcoe_stats *st) 3282 { 3283 u32 val[2]; 3284 3285 t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, &st->framesDDP, 3286 1, A_TP_MIB_FCOE_DDP_0 + idx); 3287 t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, &st->framesDrop, 3288 1, A_TP_MIB_FCOE_DROP_0 + idx); 3289 t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, val, 3290 2, A_TP_MIB_FCOE_BYTE_0_HI + 2 * idx); 3291 st->octetsDDP = ((u64)val[0] << 32) | val[1]; 3292 } 3293 3294 /** 3295 * t4_get_usm_stats - read TP's non-TCP DDP MIB counters 3296 * @adap: the adapter 3297 * @st: holds the counter values 3298 * 3299 * Returns the values of TP's counters for non-TCP directly-placed packets. 3300 */ 3301 void t4_get_usm_stats(struct adapter *adap, struct tp_usm_stats *st) 3302 { 3303 u32 val[4]; 3304 3305 t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, val, 4, 3306 A_TP_MIB_USM_PKTS); 3307 st->frames = val[0]; 3308 st->drops = val[1]; 3309 st->octets = ((u64)val[2] << 32) | val[3]; 3310 } 3311 3312 /** 3313 * t4_read_mtu_tbl - returns the values in the HW path MTU table 3314 * @adap: the adapter 3315 * @mtus: where to store the MTU values 3316 * @mtu_log: where to store the MTU base-2 log (may be %NULL) 3317 * 3318 * Reads the HW path MTU table. 3319 */ 3320 void t4_read_mtu_tbl(struct adapter *adap, u16 *mtus, u8 *mtu_log) 3321 { 3322 u32 v; 3323 int i; 3324 3325 for (i = 0; i < NMTUS; ++i) { 3326 t4_write_reg(adap, A_TP_MTU_TABLE, 3327 V_MTUINDEX(0xff) | V_MTUVALUE(i)); 3328 v = t4_read_reg(adap, A_TP_MTU_TABLE); 3329 mtus[i] = G_MTUVALUE(v); 3330 if (mtu_log) 3331 mtu_log[i] = G_MTUWIDTH(v); 3332 } 3333 } 3334 3335 /** 3336 * t4_read_cong_tbl - reads the congestion control table 3337 * @adap: the adapter 3338 * @incr: where to store the alpha values 3339 * 3340 * Reads the additive increments programmed into the HW congestion 3341 * control table. 3342 */ 3343 void t4_read_cong_tbl(struct adapter *adap, u16 incr[NMTUS][NCCTRL_WIN]) 3344 { 3345 unsigned int mtu, w; 3346 3347 for (mtu = 0; mtu < NMTUS; ++mtu) 3348 for (w = 0; w < NCCTRL_WIN; ++w) { 3349 t4_write_reg(adap, A_TP_CCTRL_TABLE, 3350 V_ROWINDEX(0xffff) | (mtu << 5) | w); 3351 incr[mtu][w] = (u16)t4_read_reg(adap, 3352 A_TP_CCTRL_TABLE) & 0x1fff; 3353 } 3354 } 3355 3356 /** 3357 * t4_read_pace_tbl - read the pace table 3358 * @adap: the adapter 3359 * @pace_vals: holds the returned values 3360 * 3361 * Returns the values of TP's pace table in microseconds. 3362 */ 3363 void t4_read_pace_tbl(struct adapter *adap, unsigned int pace_vals[NTX_SCHED]) 3364 { 3365 unsigned int i, v; 3366 3367 for (i = 0; i < NTX_SCHED; i++) { 3368 t4_write_reg(adap, A_TP_PACE_TABLE, 0xffff0000 + i); 3369 v = t4_read_reg(adap, A_TP_PACE_TABLE); 3370 pace_vals[i] = dack_ticks_to_usec(adap, v); 3371 } 3372 } 3373 3374 /** 3375 * t4_tp_wr_bits_indirect - set/clear bits in an indirect TP register 3376 * @adap: the adapter 3377 * @addr: the indirect TP register address 3378 * @mask: specifies the field within the register to modify 3379 * @val: new value for the field 3380 * 3381 * Sets a field of an indirect TP register to the given value. 3382 */ 3383 void t4_tp_wr_bits_indirect(struct adapter *adap, unsigned int addr, 3384 unsigned int mask, unsigned int val) 3385 { 3386 t4_write_reg(adap, A_TP_PIO_ADDR, addr); 3387 val |= t4_read_reg(adap, A_TP_PIO_DATA) & ~mask; 3388 t4_write_reg(adap, A_TP_PIO_DATA, val); 3389 } 3390 3391 /** 3392 * init_cong_ctrl - initialize congestion control parameters 3393 * @a: the alpha values for congestion control 3394 * @b: the beta values for congestion control 3395 * 3396 * Initialize the congestion control parameters. 3397 */ 3398 static void __devinit init_cong_ctrl(unsigned short *a, unsigned short *b) 3399 { 3400 a[0] = a[1] = a[2] = a[3] = a[4] = a[5] = a[6] = a[7] = a[8] = 1; 3401 a[9] = 2; 3402 a[10] = 3; 3403 a[11] = 4; 3404 a[12] = 5; 3405 a[13] = 6; 3406 a[14] = 7; 3407 a[15] = 8; 3408 a[16] = 9; 3409 a[17] = 10; 3410 a[18] = 14; 3411 a[19] = 17; 3412 a[20] = 21; 3413 a[21] = 25; 3414 a[22] = 30; 3415 a[23] = 35; 3416 a[24] = 45; 3417 a[25] = 60; 3418 a[26] = 80; 3419 a[27] = 100; 3420 a[28] = 200; 3421 a[29] = 300; 3422 a[30] = 400; 3423 a[31] = 500; 3424 3425 b[0] = b[1] = b[2] = b[3] = b[4] = b[5] = b[6] = b[7] = b[8] = 0; 3426 b[9] = b[10] = 1; 3427 b[11] = b[12] = 2; 3428 b[13] = b[14] = b[15] = b[16] = 3; 3429 b[17] = b[18] = b[19] = b[20] = b[21] = 4; 3430 b[22] = b[23] = b[24] = b[25] = b[26] = b[27] = 5; 3431 b[28] = b[29] = 6; 3432 b[30] = b[31] = 7; 3433 } 3434 3435 /* The minimum additive increment value for the congestion control table */ 3436 #define CC_MIN_INCR 2U 3437 3438 /** 3439 * t4_load_mtus - write the MTU and congestion control HW tables 3440 * @adap: the adapter 3441 * @mtus: the values for the MTU table 3442 * @alpha: the values for the congestion control alpha parameter 3443 * @beta: the values for the congestion control beta parameter 3444 * 3445 * Write the HW MTU table with the supplied MTUs and the high-speed 3446 * congestion control table with the supplied alpha, beta, and MTUs. 3447 * We write the two tables together because the additive increments 3448 * depend on the MTUs. 3449 */ 3450 void t4_load_mtus(struct adapter *adap, const unsigned short *mtus, 3451 const unsigned short *alpha, const unsigned short *beta) 3452 { 3453 static const unsigned int avg_pkts[NCCTRL_WIN] = { 3454 2, 6, 10, 14, 20, 28, 40, 56, 80, 112, 160, 224, 320, 448, 640, 3455 896, 1281, 1792, 2560, 3584, 5120, 7168, 10240, 14336, 20480, 3456 28672, 40960, 57344, 81920, 114688, 163840, 229376 3457 }; 3458 3459 unsigned int i, w; 3460 3461 for (i = 0; i < NMTUS; ++i) { 3462 unsigned int mtu = mtus[i]; 3463 unsigned int log2 = fls(mtu); 3464 3465 if (!(mtu & ((1 << log2) >> 2))) /* round */ 3466 log2--; 3467 t4_write_reg(adap, A_TP_MTU_TABLE, V_MTUINDEX(i) | 3468 V_MTUWIDTH(log2) | V_MTUVALUE(mtu)); 3469 3470 for (w = 0; w < NCCTRL_WIN; ++w) { 3471 unsigned int inc; 3472 3473 inc = max(((mtu - 40) * alpha[w]) / avg_pkts[w], 3474 CC_MIN_INCR); 3475 3476 t4_write_reg(adap, A_TP_CCTRL_TABLE, (i << 21) | 3477 (w << 16) | (beta[w] << 13) | inc); 3478 } 3479 } 3480 } 3481 3482 /** 3483 * t4_set_pace_tbl - set the pace table 3484 * @adap: the adapter 3485 * @pace_vals: the pace values in microseconds 3486 * @start: index of the first entry in the HW pace table to set 3487 * @n: how many entries to set 3488 * 3489 * Sets (a subset of the) HW pace table. 3490 */ 3491 int t4_set_pace_tbl(struct adapter *adap, const unsigned int *pace_vals, 3492 unsigned int start, unsigned int n) 3493 { 3494 unsigned int vals[NTX_SCHED], i; 3495 unsigned int tick_ns = dack_ticks_to_usec(adap, 1000); 3496 3497 if (n > NTX_SCHED) 3498 return -ERANGE; 3499 3500 /* convert values from us to dack ticks, rounding to closest value */ 3501 for (i = 0; i < n; i++, pace_vals++) { 3502 vals[i] = (1000 * *pace_vals + tick_ns / 2) / tick_ns; 3503 if (vals[i] > 0x7ff) 3504 return -ERANGE; 3505 if (*pace_vals && vals[i] == 0) 3506 return -ERANGE; 3507 } 3508 for (i = 0; i < n; i++, start++) 3509 t4_write_reg(adap, A_TP_PACE_TABLE, (start << 16) | vals[i]); 3510 return 0; 3511 } 3512 3513 /** 3514 * t4_set_sched_bps - set the bit rate for a HW traffic scheduler 3515 * @adap: the adapter 3516 * @kbps: target rate in Kbps 3517 * @sched: the scheduler index 3518 * 3519 * Configure a Tx HW scheduler for the target rate. 3520 */ 3521 int t4_set_sched_bps(struct adapter *adap, int sched, unsigned int kbps) 3522 { 3523 unsigned int v, tps, cpt, bpt, delta, mindelta = ~0; 3524 unsigned int clk = adap->params.vpd.cclk * 1000; 3525 unsigned int selected_cpt = 0, selected_bpt = 0; 3526 3527 if (kbps > 0) { 3528 kbps *= 125; /* -> bytes */ 3529 for (cpt = 1; cpt <= 255; cpt++) { 3530 tps = clk / cpt; 3531 bpt = (kbps + tps / 2) / tps; 3532 if (bpt > 0 && bpt <= 255) { 3533 v = bpt * tps; 3534 delta = v >= kbps ? v - kbps : kbps - v; 3535 if (delta < mindelta) { 3536 mindelta = delta; 3537 selected_cpt = cpt; 3538 selected_bpt = bpt; 3539 } 3540 } else if (selected_cpt) 3541 break; 3542 } 3543 if (!selected_cpt) 3544 return -EINVAL; 3545 } 3546 t4_write_reg(adap, A_TP_TM_PIO_ADDR, 3547 A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2); 3548 v = t4_read_reg(adap, A_TP_TM_PIO_DATA); 3549 if (sched & 1) 3550 v = (v & 0xffff) | (selected_cpt << 16) | (selected_bpt << 24); 3551 else 3552 v = (v & 0xffff0000) | selected_cpt | (selected_bpt << 8); 3553 t4_write_reg(adap, A_TP_TM_PIO_DATA, v); 3554 return 0; 3555 } 3556 3557 /** 3558 * t4_set_sched_ipg - set the IPG for a Tx HW packet rate scheduler 3559 * @adap: the adapter 3560 * @sched: the scheduler index 3561 * @ipg: the interpacket delay in tenths of nanoseconds 3562 * 3563 * Set the interpacket delay for a HW packet rate scheduler. 3564 */ 3565 int t4_set_sched_ipg(struct adapter *adap, int sched, unsigned int ipg) 3566 { 3567 unsigned int v, addr = A_TP_TX_MOD_Q1_Q0_TIMER_SEPARATOR - sched / 2; 3568 3569 /* convert ipg to nearest number of core clocks */ 3570 ipg *= core_ticks_per_usec(adap); 3571 ipg = (ipg + 5000) / 10000; 3572 if (ipg > M_TXTIMERSEPQ0) 3573 return -EINVAL; 3574 3575 t4_write_reg(adap, A_TP_TM_PIO_ADDR, addr); 3576 v = t4_read_reg(adap, A_TP_TM_PIO_DATA); 3577 if (sched & 1) 3578 v = (v & V_TXTIMERSEPQ0(M_TXTIMERSEPQ0)) | V_TXTIMERSEPQ1(ipg); 3579 else 3580 v = (v & V_TXTIMERSEPQ1(M_TXTIMERSEPQ1)) | V_TXTIMERSEPQ0(ipg); 3581 t4_write_reg(adap, A_TP_TM_PIO_DATA, v); 3582 t4_read_reg(adap, A_TP_TM_PIO_DATA); 3583 return 0; 3584 } 3585 3586 /** 3587 * t4_get_tx_sched - get the configuration of a Tx HW traffic scheduler 3588 * @adap: the adapter 3589 * @sched: the scheduler index 3590 * @kbps: the byte rate in Kbps 3591 * @ipg: the interpacket delay in tenths of nanoseconds 3592 * 3593 * Return the current configuration of a HW Tx scheduler. 3594 */ 3595 void t4_get_tx_sched(struct adapter *adap, unsigned int sched, unsigned int *kbps, 3596 unsigned int *ipg) 3597 { 3598 unsigned int v, addr, bpt, cpt; 3599 3600 if (kbps) { 3601 addr = A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2; 3602 t4_write_reg(adap, A_TP_TM_PIO_ADDR, addr); 3603 v = t4_read_reg(adap, A_TP_TM_PIO_DATA); 3604 if (sched & 1) 3605 v >>= 16; 3606 bpt = (v >> 8) & 0xff; 3607 cpt = v & 0xff; 3608 if (!cpt) 3609 *kbps = 0; /* scheduler disabled */ 3610 else { 3611 v = (adap->params.vpd.cclk * 1000) / cpt; /* ticks/s */ 3612 *kbps = (v * bpt) / 125; 3613 } 3614 } 3615 if (ipg) { 3616 addr = A_TP_TX_MOD_Q1_Q0_TIMER_SEPARATOR - sched / 2; 3617 t4_write_reg(adap, A_TP_TM_PIO_ADDR, addr); 3618 v = t4_read_reg(adap, A_TP_TM_PIO_DATA); 3619 if (sched & 1) 3620 v >>= 16; 3621 v &= 0xffff; 3622 *ipg = (10000 * v) / core_ticks_per_usec(adap); 3623 } 3624 } 3625 3626 /* 3627 * Calculates a rate in bytes/s given the number of 256-byte units per 4K core 3628 * clocks. The formula is 3629 * 3630 * bytes/s = bytes256 * 256 * ClkFreq / 4096 3631 * 3632 * which is equivalent to 3633 * 3634 * bytes/s = 62.5 * bytes256 * ClkFreq_ms 3635 */ 3636 static u64 chan_rate(struct adapter *adap, unsigned int bytes256) 3637 { 3638 u64 v = bytes256 * adap->params.vpd.cclk; 3639 3640 return v * 62 + v / 2; 3641 } 3642 3643 /** 3644 * t4_get_chan_txrate - get the current per channel Tx rates 3645 * @adap: the adapter 3646 * @nic_rate: rates for NIC traffic 3647 * @ofld_rate: rates for offloaded traffic 3648 * 3649 * Return the current Tx rates in bytes/s for NIC and offloaded traffic 3650 * for each channel. 3651 */ 3652 void t4_get_chan_txrate(struct adapter *adap, u64 *nic_rate, u64 *ofld_rate) 3653 { 3654 u32 v; 3655 3656 v = t4_read_reg(adap, A_TP_TX_TRATE); 3657 nic_rate[0] = chan_rate(adap, G_TNLRATE0(v)); 3658 nic_rate[1] = chan_rate(adap, G_TNLRATE1(v)); 3659 nic_rate[2] = chan_rate(adap, G_TNLRATE2(v)); 3660 nic_rate[3] = chan_rate(adap, G_TNLRATE3(v)); 3661 3662 v = t4_read_reg(adap, A_TP_TX_ORATE); 3663 ofld_rate[0] = chan_rate(adap, G_OFDRATE0(v)); 3664 ofld_rate[1] = chan_rate(adap, G_OFDRATE1(v)); 3665 ofld_rate[2] = chan_rate(adap, G_OFDRATE2(v)); 3666 ofld_rate[3] = chan_rate(adap, G_OFDRATE3(v)); 3667 } 3668 3669 /** 3670 * t4_set_trace_filter - configure one of the tracing filters 3671 * @adap: the adapter 3672 * @tp: the desired trace filter parameters 3673 * @idx: which filter to configure 3674 * @enable: whether to enable or disable the filter 3675 * 3676 * Configures one of the tracing filters available in HW. If @tp is %NULL 3677 * it indicates that the filter is already written in the register and it 3678 * just needs to be enabled or disabled. 3679 */ 3680 int t4_set_trace_filter(struct adapter *adap, const struct trace_params *tp, 3681 int idx, int enable) 3682 { 3683 int i, ofst = idx * 4; 3684 u32 data_reg, mask_reg, cfg; 3685 u32 multitrc = F_TRCMULTIFILTER; 3686 u32 en = is_t4(adap) ? F_TFEN : F_T5_TFEN; 3687 3688 if (idx < 0 || idx >= NTRACE) 3689 return -EINVAL; 3690 3691 if (tp == NULL || !enable) { 3692 t4_set_reg_field(adap, A_MPS_TRC_FILTER_MATCH_CTL_A + ofst, en, 3693 enable ? en : 0); 3694 return 0; 3695 } 3696 3697 /* 3698 * TODO - After T4 data book is updated, specify the exact 3699 * section below. 3700 * 3701 * See T4 data book - MPS section for a complete description 3702 * of the below if..else handling of A_MPS_TRC_CFG register 3703 * value. 3704 */ 3705 cfg = t4_read_reg(adap, A_MPS_TRC_CFG); 3706 if (cfg & F_TRCMULTIFILTER) { 3707 /* 3708 * If multiple tracers are enabled, then maximum 3709 * capture size is 2.5KB (FIFO size of a single channel) 3710 * minus 2 flits for CPL_TRACE_PKT header. 3711 */ 3712 if (tp->snap_len > ((10 * 1024 / 4) - (2 * 8))) 3713 return -EINVAL; 3714 } else { 3715 /* 3716 * If multiple tracers are disabled, to avoid deadlocks 3717 * maximum packet capture size of 9600 bytes is recommended. 3718 * Also in this mode, only trace0 can be enabled and running. 3719 */ 3720 multitrc = 0; 3721 if (tp->snap_len > 9600 || idx) 3722 return -EINVAL; 3723 } 3724 3725 if (tp->port > (is_t4(adap) ? 11 : 19) || tp->invert > 1 || 3726 tp->skip_len > M_TFLENGTH || tp->skip_ofst > M_TFOFFSET || 3727 tp->min_len > M_TFMINPKTSIZE) 3728 return -EINVAL; 3729 3730 /* stop the tracer we'll be changing */ 3731 t4_set_reg_field(adap, A_MPS_TRC_FILTER_MATCH_CTL_A + ofst, en, 0); 3732 3733 idx *= (A_MPS_TRC_FILTER1_MATCH - A_MPS_TRC_FILTER0_MATCH); 3734 data_reg = A_MPS_TRC_FILTER0_MATCH + idx; 3735 mask_reg = A_MPS_TRC_FILTER0_DONT_CARE + idx; 3736 3737 for (i = 0; i < TRACE_LEN / 4; i++, data_reg += 4, mask_reg += 4) { 3738 t4_write_reg(adap, data_reg, tp->data[i]); 3739 t4_write_reg(adap, mask_reg, ~tp->mask[i]); 3740 } 3741 t4_write_reg(adap, A_MPS_TRC_FILTER_MATCH_CTL_B + ofst, 3742 V_TFCAPTUREMAX(tp->snap_len) | 3743 V_TFMINPKTSIZE(tp->min_len)); 3744 t4_write_reg(adap, A_MPS_TRC_FILTER_MATCH_CTL_A + ofst, 3745 V_TFOFFSET(tp->skip_ofst) | V_TFLENGTH(tp->skip_len) | en | 3746 (is_t4(adap) ? 3747 V_TFPORT(tp->port) | V_TFINVERTMATCH(tp->invert) : 3748 V_T5_TFPORT(tp->port) | V_T5_TFINVERTMATCH(tp->invert))); 3749 3750 return 0; 3751 } 3752 3753 /** 3754 * t4_get_trace_filter - query one of the tracing filters 3755 * @adap: the adapter 3756 * @tp: the current trace filter parameters 3757 * @idx: which trace filter to query 3758 * @enabled: non-zero if the filter is enabled 3759 * 3760 * Returns the current settings of one of the HW tracing filters. 3761 */ 3762 void t4_get_trace_filter(struct adapter *adap, struct trace_params *tp, int idx, 3763 int *enabled) 3764 { 3765 u32 ctla, ctlb; 3766 int i, ofst = idx * 4; 3767 u32 data_reg, mask_reg; 3768 3769 ctla = t4_read_reg(adap, A_MPS_TRC_FILTER_MATCH_CTL_A + ofst); 3770 ctlb = t4_read_reg(adap, A_MPS_TRC_FILTER_MATCH_CTL_B + ofst); 3771 3772 if (is_t4(adap)) { 3773 *enabled = !!(ctla & F_TFEN); 3774 tp->port = G_TFPORT(ctla); 3775 tp->invert = !!(ctla & F_TFINVERTMATCH); 3776 } else { 3777 *enabled = !!(ctla & F_T5_TFEN); 3778 tp->port = G_T5_TFPORT(ctla); 3779 tp->invert = !!(ctla & F_T5_TFINVERTMATCH); 3780 } 3781 tp->snap_len = G_TFCAPTUREMAX(ctlb); 3782 tp->min_len = G_TFMINPKTSIZE(ctlb); 3783 tp->skip_ofst = G_TFOFFSET(ctla); 3784 tp->skip_len = G_TFLENGTH(ctla); 3785 3786 ofst = (A_MPS_TRC_FILTER1_MATCH - A_MPS_TRC_FILTER0_MATCH) * idx; 3787 data_reg = A_MPS_TRC_FILTER0_MATCH + ofst; 3788 mask_reg = A_MPS_TRC_FILTER0_DONT_CARE + ofst; 3789 3790 for (i = 0; i < TRACE_LEN / 4; i++, data_reg += 4, mask_reg += 4) { 3791 tp->mask[i] = ~t4_read_reg(adap, mask_reg); 3792 tp->data[i] = t4_read_reg(adap, data_reg) & tp->mask[i]; 3793 } 3794 } 3795 3796 /** 3797 * t4_pmtx_get_stats - returns the HW stats from PMTX 3798 * @adap: the adapter 3799 * @cnt: where to store the count statistics 3800 * @cycles: where to store the cycle statistics 3801 * 3802 * Returns performance statistics from PMTX. 3803 */ 3804 void t4_pmtx_get_stats(struct adapter *adap, u32 cnt[], u64 cycles[]) 3805 { 3806 int i; 3807 u32 data[2]; 3808 3809 for (i = 0; i < PM_NSTATS; i++) { 3810 t4_write_reg(adap, A_PM_TX_STAT_CONFIG, i + 1); 3811 cnt[i] = t4_read_reg(adap, A_PM_TX_STAT_COUNT); 3812 if (is_t4(adap)) 3813 cycles[i] = t4_read_reg64(adap, A_PM_TX_STAT_LSB); 3814 else { 3815 t4_read_indirect(adap, A_PM_TX_DBG_CTRL, 3816 A_PM_TX_DBG_DATA, data, 2, 3817 A_PM_TX_DBG_STAT_MSB); 3818 cycles[i] = (((u64)data[0] << 32) | data[1]); 3819 } 3820 } 3821 } 3822 3823 /** 3824 * t4_pmrx_get_stats - returns the HW stats from PMRX 3825 * @adap: the adapter 3826 * @cnt: where to store the count statistics 3827 * @cycles: where to store the cycle statistics 3828 * 3829 * Returns performance statistics from PMRX. 3830 */ 3831 void t4_pmrx_get_stats(struct adapter *adap, u32 cnt[], u64 cycles[]) 3832 { 3833 int i; 3834 u32 data[2]; 3835 3836 for (i = 0; i < PM_NSTATS; i++) { 3837 t4_write_reg(adap, A_PM_RX_STAT_CONFIG, i + 1); 3838 cnt[i] = t4_read_reg(adap, A_PM_RX_STAT_COUNT); 3839 if (is_t4(adap)) 3840 cycles[i] = t4_read_reg64(adap, A_PM_RX_STAT_LSB); 3841 else { 3842 t4_read_indirect(adap, A_PM_RX_DBG_CTRL, 3843 A_PM_RX_DBG_DATA, data, 2, 3844 A_PM_RX_DBG_STAT_MSB); 3845 cycles[i] = (((u64)data[0] << 32) | data[1]); 3846 } 3847 } 3848 } 3849 3850 /** 3851 * get_mps_bg_map - return the buffer groups associated with a port 3852 * @adap: the adapter 3853 * @idx: the port index 3854 * 3855 * Returns a bitmap indicating which MPS buffer groups are associated 3856 * with the given port. Bit i is set if buffer group i is used by the 3857 * port. 3858 */ 3859 static unsigned int get_mps_bg_map(struct adapter *adap, int idx) 3860 { 3861 u32 n = G_NUMPORTS(t4_read_reg(adap, A_MPS_CMN_CTL)); 3862 3863 if (n == 0) 3864 return idx == 0 ? 0xf : 0; 3865 if (n == 1) 3866 return idx < 2 ? (3 << (2 * idx)) : 0; 3867 return 1 << idx; 3868 } 3869 3870 /** 3871 * t4_get_port_stats_offset - collect port stats relative to a previous 3872 * snapshot 3873 * @adap: The adapter 3874 * @idx: The port 3875 * @stats: Current stats to fill 3876 * @offset: Previous stats snapshot 3877 */ 3878 void t4_get_port_stats_offset(struct adapter *adap, int idx, 3879 struct port_stats *stats, 3880 struct port_stats *offset) 3881 { 3882 u64 *s, *o; 3883 int i; 3884 3885 t4_get_port_stats(adap, idx, stats); 3886 for (i = 0, s = (u64 *)stats, o = (u64 *)offset ; 3887 i < (sizeof(struct port_stats)/sizeof(u64)) ; 3888 i++, s++, o++) 3889 *s -= *o; 3890 } 3891 3892 /** 3893 * t4_get_port_stats - collect port statistics 3894 * @adap: the adapter 3895 * @idx: the port index 3896 * @p: the stats structure to fill 3897 * 3898 * Collect statistics related to the given port from HW. 3899 */ 3900 void t4_get_port_stats(struct adapter *adap, int idx, struct port_stats *p) 3901 { 3902 u32 bgmap = get_mps_bg_map(adap, idx); 3903 3904 #define GET_STAT(name) \ 3905 t4_read_reg64(adap, \ 3906 (is_t4(adap) ? PORT_REG(idx, A_MPS_PORT_STAT_##name##_L) : \ 3907 T5_PORT_REG(idx, A_MPS_PORT_STAT_##name##_L))) 3908 #define GET_STAT_COM(name) t4_read_reg64(adap, A_MPS_STAT_##name##_L) 3909 3910 p->tx_pause = GET_STAT(TX_PORT_PAUSE); 3911 p->tx_octets = GET_STAT(TX_PORT_BYTES); 3912 p->tx_frames = GET_STAT(TX_PORT_FRAMES); 3913 p->tx_bcast_frames = GET_STAT(TX_PORT_BCAST); 3914 p->tx_mcast_frames = GET_STAT(TX_PORT_MCAST); 3915 p->tx_ucast_frames = GET_STAT(TX_PORT_UCAST); 3916 p->tx_error_frames = GET_STAT(TX_PORT_ERROR); 3917 p->tx_frames_64 = GET_STAT(TX_PORT_64B); 3918 p->tx_frames_65_127 = GET_STAT(TX_PORT_65B_127B); 3919 p->tx_frames_128_255 = GET_STAT(TX_PORT_128B_255B); 3920 p->tx_frames_256_511 = GET_STAT(TX_PORT_256B_511B); 3921 p->tx_frames_512_1023 = GET_STAT(TX_PORT_512B_1023B); 3922 p->tx_frames_1024_1518 = GET_STAT(TX_PORT_1024B_1518B); 3923 p->tx_frames_1519_max = GET_STAT(TX_PORT_1519B_MAX); 3924 p->tx_drop = GET_STAT(TX_PORT_DROP); 3925 p->tx_ppp0 = GET_STAT(TX_PORT_PPP0); 3926 p->tx_ppp1 = GET_STAT(TX_PORT_PPP1); 3927 p->tx_ppp2 = GET_STAT(TX_PORT_PPP2); 3928 p->tx_ppp3 = GET_STAT(TX_PORT_PPP3); 3929 p->tx_ppp4 = GET_STAT(TX_PORT_PPP4); 3930 p->tx_ppp5 = GET_STAT(TX_PORT_PPP5); 3931 p->tx_ppp6 = GET_STAT(TX_PORT_PPP6); 3932 p->tx_ppp7 = GET_STAT(TX_PORT_PPP7); 3933 3934 p->rx_pause = GET_STAT(RX_PORT_PAUSE); 3935 p->rx_octets = GET_STAT(RX_PORT_BYTES); 3936 p->rx_frames = GET_STAT(RX_PORT_FRAMES); 3937 p->rx_bcast_frames = GET_STAT(RX_PORT_BCAST); 3938 p->rx_mcast_frames = GET_STAT(RX_PORT_MCAST); 3939 p->rx_ucast_frames = GET_STAT(RX_PORT_UCAST); 3940 p->rx_too_long = GET_STAT(RX_PORT_MTU_ERROR); 3941 p->rx_jabber = GET_STAT(RX_PORT_MTU_CRC_ERROR); 3942 p->rx_fcs_err = GET_STAT(RX_PORT_CRC_ERROR); 3943 p->rx_len_err = GET_STAT(RX_PORT_LEN_ERROR); 3944 p->rx_symbol_err = GET_STAT(RX_PORT_SYM_ERROR); 3945 p->rx_runt = GET_STAT(RX_PORT_LESS_64B); 3946 p->rx_frames_64 = GET_STAT(RX_PORT_64B); 3947 p->rx_frames_65_127 = GET_STAT(RX_PORT_65B_127B); 3948 p->rx_frames_128_255 = GET_STAT(RX_PORT_128B_255B); 3949 p->rx_frames_256_511 = GET_STAT(RX_PORT_256B_511B); 3950 p->rx_frames_512_1023 = GET_STAT(RX_PORT_512B_1023B); 3951 p->rx_frames_1024_1518 = GET_STAT(RX_PORT_1024B_1518B); 3952 p->rx_frames_1519_max = GET_STAT(RX_PORT_1519B_MAX); 3953 p->rx_ppp0 = GET_STAT(RX_PORT_PPP0); 3954 p->rx_ppp1 = GET_STAT(RX_PORT_PPP1); 3955 p->rx_ppp2 = GET_STAT(RX_PORT_PPP2); 3956 p->rx_ppp3 = GET_STAT(RX_PORT_PPP3); 3957 p->rx_ppp4 = GET_STAT(RX_PORT_PPP4); 3958 p->rx_ppp5 = GET_STAT(RX_PORT_PPP5); 3959 p->rx_ppp6 = GET_STAT(RX_PORT_PPP6); 3960 p->rx_ppp7 = GET_STAT(RX_PORT_PPP7); 3961 3962 p->rx_ovflow0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_MAC_DROP_FRAME) : 0; 3963 p->rx_ovflow1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_MAC_DROP_FRAME) : 0; 3964 p->rx_ovflow2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_MAC_DROP_FRAME) : 0; 3965 p->rx_ovflow3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_MAC_DROP_FRAME) : 0; 3966 p->rx_trunc0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_MAC_TRUNC_FRAME) : 0; 3967 p->rx_trunc1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_MAC_TRUNC_FRAME) : 0; 3968 p->rx_trunc2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_MAC_TRUNC_FRAME) : 0; 3969 p->rx_trunc3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_MAC_TRUNC_FRAME) : 0; 3970 3971 #undef GET_STAT 3972 #undef GET_STAT_COM 3973 } 3974 3975 /** 3976 * t4_clr_port_stats - clear port statistics 3977 * @adap: the adapter 3978 * @idx: the port index 3979 * 3980 * Clear HW statistics for the given port. 3981 */ 3982 void t4_clr_port_stats(struct adapter *adap, int idx) 3983 { 3984 unsigned int i; 3985 u32 bgmap = get_mps_bg_map(adap, idx); 3986 u32 port_base_addr; 3987 3988 if (is_t4(adap)) 3989 port_base_addr = PORT_BASE(idx); 3990 else 3991 port_base_addr = T5_PORT_BASE(idx); 3992 3993 for (i = A_MPS_PORT_STAT_TX_PORT_BYTES_L; 3994 i <= A_MPS_PORT_STAT_TX_PORT_PPP7_H; i += 8) 3995 t4_write_reg(adap, port_base_addr + i, 0); 3996 for (i = A_MPS_PORT_STAT_RX_PORT_BYTES_L; 3997 i <= A_MPS_PORT_STAT_RX_PORT_LESS_64B_H; i += 8) 3998 t4_write_reg(adap, port_base_addr + i, 0); 3999 for (i = 0; i < 4; i++) 4000 if (bgmap & (1 << i)) { 4001 t4_write_reg(adap, 4002 A_MPS_STAT_RX_BG_0_MAC_DROP_FRAME_L + i * 8, 0); 4003 t4_write_reg(adap, 4004 A_MPS_STAT_RX_BG_0_MAC_TRUNC_FRAME_L + i * 8, 0); 4005 } 4006 } 4007 4008 /** 4009 * t4_get_lb_stats - collect loopback port statistics 4010 * @adap: the adapter 4011 * @idx: the loopback port index 4012 * @p: the stats structure to fill 4013 * 4014 * Return HW statistics for the given loopback port. 4015 */ 4016 void t4_get_lb_stats(struct adapter *adap, int idx, struct lb_port_stats *p) 4017 { 4018 u32 bgmap = get_mps_bg_map(adap, idx); 4019 4020 #define GET_STAT(name) \ 4021 t4_read_reg64(adap, \ 4022 (is_t4(adap) ? \ 4023 PORT_REG(idx, A_MPS_PORT_STAT_LB_PORT_##name##_L) : \ 4024 T5_PORT_REG(idx, A_MPS_PORT_STAT_LB_PORT_##name##_L))) 4025 #define GET_STAT_COM(name) t4_read_reg64(adap, A_MPS_STAT_##name##_L) 4026 4027 p->octets = GET_STAT(BYTES); 4028 p->frames = GET_STAT(FRAMES); 4029 p->bcast_frames = GET_STAT(BCAST); 4030 p->mcast_frames = GET_STAT(MCAST); 4031 p->ucast_frames = GET_STAT(UCAST); 4032 p->error_frames = GET_STAT(ERROR); 4033 4034 p->frames_64 = GET_STAT(64B); 4035 p->frames_65_127 = GET_STAT(65B_127B); 4036 p->frames_128_255 = GET_STAT(128B_255B); 4037 p->frames_256_511 = GET_STAT(256B_511B); 4038 p->frames_512_1023 = GET_STAT(512B_1023B); 4039 p->frames_1024_1518 = GET_STAT(1024B_1518B); 4040 p->frames_1519_max = GET_STAT(1519B_MAX); 4041 p->drop = GET_STAT(DROP_FRAMES); 4042 4043 p->ovflow0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_LB_DROP_FRAME) : 0; 4044 p->ovflow1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_LB_DROP_FRAME) : 0; 4045 p->ovflow2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_LB_DROP_FRAME) : 0; 4046 p->ovflow3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_LB_DROP_FRAME) : 0; 4047 p->trunc0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_LB_TRUNC_FRAME) : 0; 4048 p->trunc1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_LB_TRUNC_FRAME) : 0; 4049 p->trunc2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_LB_TRUNC_FRAME) : 0; 4050 p->trunc3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_LB_TRUNC_FRAME) : 0; 4051 4052 #undef GET_STAT 4053 #undef GET_STAT_COM 4054 } 4055 4056 /** 4057 * t4_wol_magic_enable - enable/disable magic packet WoL 4058 * @adap: the adapter 4059 * @port: the physical port index 4060 * @addr: MAC address expected in magic packets, %NULL to disable 4061 * 4062 * Enables/disables magic packet wake-on-LAN for the selected port. 4063 */ 4064 void t4_wol_magic_enable(struct adapter *adap, unsigned int port, 4065 const u8 *addr) 4066 { 4067 u32 mag_id_reg_l, mag_id_reg_h, port_cfg_reg; 4068 4069 if (is_t4(adap)) { 4070 mag_id_reg_l = PORT_REG(port, A_XGMAC_PORT_MAGIC_MACID_LO); 4071 mag_id_reg_h = PORT_REG(port, A_XGMAC_PORT_MAGIC_MACID_HI); 4072 port_cfg_reg = PORT_REG(port, A_XGMAC_PORT_CFG2); 4073 } else { 4074 mag_id_reg_l = T5_PORT_REG(port, A_MAC_PORT_MAGIC_MACID_LO); 4075 mag_id_reg_h = T5_PORT_REG(port, A_MAC_PORT_MAGIC_MACID_HI); 4076 port_cfg_reg = T5_PORT_REG(port, A_MAC_PORT_CFG2); 4077 } 4078 4079 if (addr) { 4080 t4_write_reg(adap, mag_id_reg_l, 4081 (addr[2] << 24) | (addr[3] << 16) | 4082 (addr[4] << 8) | addr[5]); 4083 t4_write_reg(adap, mag_id_reg_h, 4084 (addr[0] << 8) | addr[1]); 4085 } 4086 t4_set_reg_field(adap, port_cfg_reg, F_MAGICEN, 4087 V_MAGICEN(addr != NULL)); 4088 } 4089 4090 /** 4091 * t4_wol_pat_enable - enable/disable pattern-based WoL 4092 * @adap: the adapter 4093 * @port: the physical port index 4094 * @map: bitmap of which HW pattern filters to set 4095 * @mask0: byte mask for bytes 0-63 of a packet 4096 * @mask1: byte mask for bytes 64-127 of a packet 4097 * @crc: Ethernet CRC for selected bytes 4098 * @enable: enable/disable switch 4099 * 4100 * Sets the pattern filters indicated in @map to mask out the bytes 4101 * specified in @mask0/@mask1 in received packets and compare the CRC of 4102 * the resulting packet against @crc. If @enable is %true pattern-based 4103 * WoL is enabled, otherwise disabled. 4104 */ 4105 int t4_wol_pat_enable(struct adapter *adap, unsigned int port, unsigned int map, 4106 u64 mask0, u64 mask1, unsigned int crc, bool enable) 4107 { 4108 int i; 4109 u32 port_cfg_reg; 4110 4111 if (is_t4(adap)) 4112 port_cfg_reg = PORT_REG(port, A_XGMAC_PORT_CFG2); 4113 else 4114 port_cfg_reg = T5_PORT_REG(port, A_MAC_PORT_CFG2); 4115 4116 if (!enable) { 4117 t4_set_reg_field(adap, port_cfg_reg, F_PATEN, 0); 4118 return 0; 4119 } 4120 if (map > 0xff) 4121 return -EINVAL; 4122 4123 #define EPIO_REG(name) \ 4124 (is_t4(adap) ? PORT_REG(port, A_XGMAC_PORT_EPIO_##name) : \ 4125 T5_PORT_REG(port, A_MAC_PORT_EPIO_##name)) 4126 4127 t4_write_reg(adap, EPIO_REG(DATA1), mask0 >> 32); 4128 t4_write_reg(adap, EPIO_REG(DATA2), mask1); 4129 t4_write_reg(adap, EPIO_REG(DATA3), mask1 >> 32); 4130 4131 for (i = 0; i < NWOL_PAT; i++, map >>= 1) { 4132 if (!(map & 1)) 4133 continue; 4134 4135 /* write byte masks */ 4136 t4_write_reg(adap, EPIO_REG(DATA0), mask0); 4137 t4_write_reg(adap, EPIO_REG(OP), V_ADDRESS(i) | F_EPIOWR); 4138 t4_read_reg(adap, EPIO_REG(OP)); /* flush */ 4139 if (t4_read_reg(adap, EPIO_REG(OP)) & F_BUSY) 4140 return -ETIMEDOUT; 4141 4142 /* write CRC */ 4143 t4_write_reg(adap, EPIO_REG(DATA0), crc); 4144 t4_write_reg(adap, EPIO_REG(OP), V_ADDRESS(i + 32) | F_EPIOWR); 4145 t4_read_reg(adap, EPIO_REG(OP)); /* flush */ 4146 if (t4_read_reg(adap, EPIO_REG(OP)) & F_BUSY) 4147 return -ETIMEDOUT; 4148 } 4149 #undef EPIO_REG 4150 4151 t4_set_reg_field(adap, port_cfg_reg, 0, F_PATEN); 4152 return 0; 4153 } 4154 4155 /** 4156 * t4_mk_filtdelwr - create a delete filter WR 4157 * @ftid: the filter ID 4158 * @wr: the filter work request to populate 4159 * @qid: ingress queue to receive the delete notification 4160 * 4161 * Creates a filter work request to delete the supplied filter. If @qid is 4162 * negative the delete notification is suppressed. 4163 */ 4164 void t4_mk_filtdelwr(unsigned int ftid, struct fw_filter_wr *wr, int qid) 4165 { 4166 memset(wr, 0, sizeof(*wr)); 4167 wr->op_pkd = htonl(V_FW_WR_OP(FW_FILTER_WR)); 4168 wr->len16_pkd = htonl(V_FW_WR_LEN16(sizeof(*wr) / 16)); 4169 wr->tid_to_iq = htonl(V_FW_FILTER_WR_TID(ftid) | 4170 V_FW_FILTER_WR_NOREPLY(qid < 0)); 4171 wr->del_filter_to_l2tix = htonl(F_FW_FILTER_WR_DEL_FILTER); 4172 if (qid >= 0) 4173 wr->rx_chan_rx_rpl_iq = htons(V_FW_FILTER_WR_RX_RPL_IQ(qid)); 4174 } 4175 4176 #define INIT_CMD(var, cmd, rd_wr) do { \ 4177 (var).op_to_write = htonl(V_FW_CMD_OP(FW_##cmd##_CMD) | \ 4178 F_FW_CMD_REQUEST | F_FW_CMD_##rd_wr); \ 4179 (var).retval_len16 = htonl(FW_LEN16(var)); \ 4180 } while (0) 4181 4182 int t4_fwaddrspace_write(struct adapter *adap, unsigned int mbox, u32 addr, u32 val) 4183 { 4184 struct fw_ldst_cmd c; 4185 4186 memset(&c, 0, sizeof(c)); 4187 c.op_to_addrspace = htonl(V_FW_CMD_OP(FW_LDST_CMD) | F_FW_CMD_REQUEST | 4188 F_FW_CMD_WRITE | V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_FIRMWARE)); 4189 c.cycles_to_len16 = htonl(FW_LEN16(c)); 4190 c.u.addrval.addr = htonl(addr); 4191 c.u.addrval.val = htonl(val); 4192 4193 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); 4194 } 4195 4196 /** 4197 * t4_mdio_rd - read a PHY register through MDIO 4198 * @adap: the adapter 4199 * @mbox: mailbox to use for the FW command 4200 * @phy_addr: the PHY address 4201 * @mmd: the PHY MMD to access (0 for clause 22 PHYs) 4202 * @reg: the register to read 4203 * @valp: where to store the value 4204 * 4205 * Issues a FW command through the given mailbox to read a PHY register. 4206 */ 4207 int t4_mdio_rd(struct adapter *adap, unsigned int mbox, unsigned int phy_addr, 4208 unsigned int mmd, unsigned int reg, unsigned int *valp) 4209 { 4210 int ret; 4211 struct fw_ldst_cmd c; 4212 4213 memset(&c, 0, sizeof(c)); 4214 c.op_to_addrspace = htonl(V_FW_CMD_OP(FW_LDST_CMD) | F_FW_CMD_REQUEST | 4215 F_FW_CMD_READ | V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_MDIO)); 4216 c.cycles_to_len16 = htonl(FW_LEN16(c)); 4217 c.u.mdio.paddr_mmd = htons(V_FW_LDST_CMD_PADDR(phy_addr) | 4218 V_FW_LDST_CMD_MMD(mmd)); 4219 c.u.mdio.raddr = htons(reg); 4220 4221 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c); 4222 if (ret == 0) 4223 *valp = ntohs(c.u.mdio.rval); 4224 return ret; 4225 } 4226 4227 /** 4228 * t4_mdio_wr - write a PHY register through MDIO 4229 * @adap: the adapter 4230 * @mbox: mailbox to use for the FW command 4231 * @phy_addr: the PHY address 4232 * @mmd: the PHY MMD to access (0 for clause 22 PHYs) 4233 * @reg: the register to write 4234 * @valp: value to write 4235 * 4236 * Issues a FW command through the given mailbox to write a PHY register. 4237 */ 4238 int t4_mdio_wr(struct adapter *adap, unsigned int mbox, unsigned int phy_addr, 4239 unsigned int mmd, unsigned int reg, unsigned int val) 4240 { 4241 struct fw_ldst_cmd c; 4242 4243 memset(&c, 0, sizeof(c)); 4244 c.op_to_addrspace = htonl(V_FW_CMD_OP(FW_LDST_CMD) | F_FW_CMD_REQUEST | 4245 F_FW_CMD_WRITE | V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_MDIO)); 4246 c.cycles_to_len16 = htonl(FW_LEN16(c)); 4247 c.u.mdio.paddr_mmd = htons(V_FW_LDST_CMD_PADDR(phy_addr) | 4248 V_FW_LDST_CMD_MMD(mmd)); 4249 c.u.mdio.raddr = htons(reg); 4250 c.u.mdio.rval = htons(val); 4251 4252 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); 4253 } 4254 4255 /** 4256 * t4_i2c_rd - read I2C data from adapter 4257 * @adap: the adapter 4258 * @port: Port number if per-port device; <0 if not 4259 * @devid: per-port device ID or absolute device ID 4260 * @offset: byte offset into device I2C space 4261 * @len: byte length of I2C space data 4262 * @buf: buffer in which to return I2C data 4263 * 4264 * Reads the I2C data from the indicated device and location. 4265 */ 4266 int t4_i2c_rd(struct adapter *adap, unsigned int mbox, 4267 int port, unsigned int devid, 4268 unsigned int offset, unsigned int len, 4269 u8 *buf) 4270 { 4271 struct fw_ldst_cmd ldst; 4272 int ret; 4273 4274 if (port >= 4 || 4275 devid >= 256 || 4276 offset >= 256 || 4277 len > sizeof ldst.u.i2c.data) 4278 return -EINVAL; 4279 4280 memset(&ldst, 0, sizeof ldst); 4281 ldst.op_to_addrspace = 4282 cpu_to_be32(V_FW_CMD_OP(FW_LDST_CMD) | 4283 F_FW_CMD_REQUEST | 4284 F_FW_CMD_READ | 4285 V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_I2C)); 4286 ldst.cycles_to_len16 = cpu_to_be32(FW_LEN16(ldst)); 4287 ldst.u.i2c.pid = (port < 0 ? 0xff : port); 4288 ldst.u.i2c.did = devid; 4289 ldst.u.i2c.boffset = offset; 4290 ldst.u.i2c.blen = len; 4291 ret = t4_wr_mbox(adap, mbox, &ldst, sizeof ldst, &ldst); 4292 if (!ret) 4293 memcpy(buf, ldst.u.i2c.data, len); 4294 return ret; 4295 } 4296 4297 /** 4298 * t4_i2c_wr - write I2C data to adapter 4299 * @adap: the adapter 4300 * @port: Port number if per-port device; <0 if not 4301 * @devid: per-port device ID or absolute device ID 4302 * @offset: byte offset into device I2C space 4303 * @len: byte length of I2C space data 4304 * @buf: buffer containing new I2C data 4305 * 4306 * Write the I2C data to the indicated device and location. 4307 */ 4308 int t4_i2c_wr(struct adapter *adap, unsigned int mbox, 4309 int port, unsigned int devid, 4310 unsigned int offset, unsigned int len, 4311 u8 *buf) 4312 { 4313 struct fw_ldst_cmd ldst; 4314 4315 if (port >= 4 || 4316 devid >= 256 || 4317 offset >= 256 || 4318 len > sizeof ldst.u.i2c.data) 4319 return -EINVAL; 4320 4321 memset(&ldst, 0, sizeof ldst); 4322 ldst.op_to_addrspace = 4323 cpu_to_be32(V_FW_CMD_OP(FW_LDST_CMD) | 4324 F_FW_CMD_REQUEST | 4325 F_FW_CMD_WRITE | 4326 V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_I2C)); 4327 ldst.cycles_to_len16 = cpu_to_be32(FW_LEN16(ldst)); 4328 ldst.u.i2c.pid = (port < 0 ? 0xff : port); 4329 ldst.u.i2c.did = devid; 4330 ldst.u.i2c.boffset = offset; 4331 ldst.u.i2c.blen = len; 4332 memcpy(ldst.u.i2c.data, buf, len); 4333 return t4_wr_mbox(adap, mbox, &ldst, sizeof ldst, &ldst); 4334 } 4335 4336 /** 4337 * t4_sge_ctxt_flush - flush the SGE context cache 4338 * @adap: the adapter 4339 * @mbox: mailbox to use for the FW command 4340 * 4341 * Issues a FW command through the given mailbox to flush the 4342 * SGE context cache. 4343 */ 4344 int t4_sge_ctxt_flush(struct adapter *adap, unsigned int mbox) 4345 { 4346 int ret; 4347 struct fw_ldst_cmd c; 4348 4349 memset(&c, 0, sizeof(c)); 4350 c.op_to_addrspace = htonl(V_FW_CMD_OP(FW_LDST_CMD) | F_FW_CMD_REQUEST | 4351 F_FW_CMD_READ | 4352 V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_SGE_EGRC)); 4353 c.cycles_to_len16 = htonl(FW_LEN16(c)); 4354 c.u.idctxt.msg_ctxtflush = htonl(F_FW_LDST_CMD_CTXTFLUSH); 4355 4356 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c); 4357 return ret; 4358 } 4359 4360 /** 4361 * t4_sge_ctxt_rd - read an SGE context through FW 4362 * @adap: the adapter 4363 * @mbox: mailbox to use for the FW command 4364 * @cid: the context id 4365 * @ctype: the context type 4366 * @data: where to store the context data 4367 * 4368 * Issues a FW command through the given mailbox to read an SGE context. 4369 */ 4370 int t4_sge_ctxt_rd(struct adapter *adap, unsigned int mbox, unsigned int cid, 4371 enum ctxt_type ctype, u32 *data) 4372 { 4373 int ret; 4374 struct fw_ldst_cmd c; 4375 4376 if (ctype == CTXT_EGRESS) 4377 ret = FW_LDST_ADDRSPC_SGE_EGRC; 4378 else if (ctype == CTXT_INGRESS) 4379 ret = FW_LDST_ADDRSPC_SGE_INGC; 4380 else if (ctype == CTXT_FLM) 4381 ret = FW_LDST_ADDRSPC_SGE_FLMC; 4382 else 4383 ret = FW_LDST_ADDRSPC_SGE_CONMC; 4384 4385 memset(&c, 0, sizeof(c)); 4386 c.op_to_addrspace = htonl(V_FW_CMD_OP(FW_LDST_CMD) | F_FW_CMD_REQUEST | 4387 F_FW_CMD_READ | V_FW_LDST_CMD_ADDRSPACE(ret)); 4388 c.cycles_to_len16 = htonl(FW_LEN16(c)); 4389 c.u.idctxt.physid = htonl(cid); 4390 4391 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c); 4392 if (ret == 0) { 4393 data[0] = ntohl(c.u.idctxt.ctxt_data0); 4394 data[1] = ntohl(c.u.idctxt.ctxt_data1); 4395 data[2] = ntohl(c.u.idctxt.ctxt_data2); 4396 data[3] = ntohl(c.u.idctxt.ctxt_data3); 4397 data[4] = ntohl(c.u.idctxt.ctxt_data4); 4398 data[5] = ntohl(c.u.idctxt.ctxt_data5); 4399 } 4400 return ret; 4401 } 4402 4403 /** 4404 * t4_sge_ctxt_rd_bd - read an SGE context bypassing FW 4405 * @adap: the adapter 4406 * @cid: the context id 4407 * @ctype: the context type 4408 * @data: where to store the context data 4409 * 4410 * Reads an SGE context directly, bypassing FW. This is only for 4411 * debugging when FW is unavailable. 4412 */ 4413 int t4_sge_ctxt_rd_bd(struct adapter *adap, unsigned int cid, enum ctxt_type ctype, 4414 u32 *data) 4415 { 4416 int i, ret; 4417 4418 t4_write_reg(adap, A_SGE_CTXT_CMD, V_CTXTQID(cid) | V_CTXTTYPE(ctype)); 4419 ret = t4_wait_op_done(adap, A_SGE_CTXT_CMD, F_BUSY, 0, 3, 1); 4420 if (!ret) 4421 for (i = A_SGE_CTXT_DATA0; i <= A_SGE_CTXT_DATA5; i += 4) 4422 *data++ = t4_read_reg(adap, i); 4423 return ret; 4424 } 4425 4426 /** 4427 * t4_fw_hello - establish communication with FW 4428 * @adap: the adapter 4429 * @mbox: mailbox to use for the FW command 4430 * @evt_mbox: mailbox to receive async FW events 4431 * @master: specifies the caller's willingness to be the device master 4432 * @state: returns the current device state (if non-NULL) 4433 * 4434 * Issues a command to establish communication with FW. Returns either 4435 * an error (negative integer) or the mailbox of the Master PF. 4436 */ 4437 int t4_fw_hello(struct adapter *adap, unsigned int mbox, unsigned int evt_mbox, 4438 enum dev_master master, enum dev_state *state) 4439 { 4440 int ret; 4441 struct fw_hello_cmd c; 4442 u32 v; 4443 unsigned int master_mbox; 4444 int retries = FW_CMD_HELLO_RETRIES; 4445 4446 retry: 4447 memset(&c, 0, sizeof(c)); 4448 INIT_CMD(c, HELLO, WRITE); 4449 c.err_to_clearinit = htonl( 4450 V_FW_HELLO_CMD_MASTERDIS(master == MASTER_CANT) | 4451 V_FW_HELLO_CMD_MASTERFORCE(master == MASTER_MUST) | 4452 V_FW_HELLO_CMD_MBMASTER(master == MASTER_MUST ? mbox : 4453 M_FW_HELLO_CMD_MBMASTER) | 4454 V_FW_HELLO_CMD_MBASYNCNOT(evt_mbox) | 4455 V_FW_HELLO_CMD_STAGE(FW_HELLO_CMD_STAGE_OS) | 4456 F_FW_HELLO_CMD_CLEARINIT); 4457 4458 /* 4459 * Issue the HELLO command to the firmware. If it's not successful 4460 * but indicates that we got a "busy" or "timeout" condition, retry 4461 * the HELLO until we exhaust our retry limit. If we do exceed our 4462 * retry limit, check to see if the firmware left us any error 4463 * information and report that if so ... 4464 */ 4465 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c); 4466 if (ret != FW_SUCCESS) { 4467 if ((ret == -EBUSY || ret == -ETIMEDOUT) && retries-- > 0) 4468 goto retry; 4469 if (t4_read_reg(adap, A_PCIE_FW) & F_PCIE_FW_ERR) 4470 t4_report_fw_error(adap); 4471 return ret; 4472 } 4473 4474 v = ntohl(c.err_to_clearinit); 4475 master_mbox = G_FW_HELLO_CMD_MBMASTER(v); 4476 if (state) { 4477 if (v & F_FW_HELLO_CMD_ERR) 4478 *state = DEV_STATE_ERR; 4479 else if (v & F_FW_HELLO_CMD_INIT) 4480 *state = DEV_STATE_INIT; 4481 else 4482 *state = DEV_STATE_UNINIT; 4483 } 4484 4485 /* 4486 * If we're not the Master PF then we need to wait around for the 4487 * Master PF Driver to finish setting up the adapter. 4488 * 4489 * Note that we also do this wait if we're a non-Master-capable PF and 4490 * there is no current Master PF; a Master PF may show up momentarily 4491 * and we wouldn't want to fail pointlessly. (This can happen when an 4492 * OS loads lots of different drivers rapidly at the same time). In 4493 * this case, the Master PF returned by the firmware will be 4494 * M_PCIE_FW_MASTER so the test below will work ... 4495 */ 4496 if ((v & (F_FW_HELLO_CMD_ERR|F_FW_HELLO_CMD_INIT)) == 0 && 4497 master_mbox != mbox) { 4498 int waiting = FW_CMD_HELLO_TIMEOUT; 4499 4500 /* 4501 * Wait for the firmware to either indicate an error or 4502 * initialized state. If we see either of these we bail out 4503 * and report the issue to the caller. If we exhaust the 4504 * "hello timeout" and we haven't exhausted our retries, try 4505 * again. Otherwise bail with a timeout error. 4506 */ 4507 for (;;) { 4508 u32 pcie_fw; 4509 4510 msleep(50); 4511 waiting -= 50; 4512 4513 /* 4514 * If neither Error nor Initialialized are indicated 4515 * by the firmware keep waiting till we exhaust our 4516 * timeout ... and then retry if we haven't exhausted 4517 * our retries ... 4518 */ 4519 pcie_fw = t4_read_reg(adap, A_PCIE_FW); 4520 if (!(pcie_fw & (F_PCIE_FW_ERR|F_PCIE_FW_INIT))) { 4521 if (waiting <= 0) { 4522 if (retries-- > 0) 4523 goto retry; 4524 4525 return -ETIMEDOUT; 4526 } 4527 continue; 4528 } 4529 4530 /* 4531 * We either have an Error or Initialized condition 4532 * report errors preferentially. 4533 */ 4534 if (state) { 4535 if (pcie_fw & F_PCIE_FW_ERR) 4536 *state = DEV_STATE_ERR; 4537 else if (pcie_fw & F_PCIE_FW_INIT) 4538 *state = DEV_STATE_INIT; 4539 } 4540 4541 /* 4542 * If we arrived before a Master PF was selected and 4543 * there's not a valid Master PF, grab its identity 4544 * for our caller. 4545 */ 4546 if (master_mbox == M_PCIE_FW_MASTER && 4547 (pcie_fw & F_PCIE_FW_MASTER_VLD)) 4548 master_mbox = G_PCIE_FW_MASTER(pcie_fw); 4549 break; 4550 } 4551 } 4552 4553 return master_mbox; 4554 } 4555 4556 /** 4557 * t4_fw_bye - end communication with FW 4558 * @adap: the adapter 4559 * @mbox: mailbox to use for the FW command 4560 * 4561 * Issues a command to terminate communication with FW. 4562 */ 4563 int t4_fw_bye(struct adapter *adap, unsigned int mbox) 4564 { 4565 struct fw_bye_cmd c; 4566 4567 memset(&c, 0, sizeof(c)); 4568 INIT_CMD(c, BYE, WRITE); 4569 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); 4570 } 4571 4572 /** 4573 * t4_fw_reset - issue a reset to FW 4574 * @adap: the adapter 4575 * @mbox: mailbox to use for the FW command 4576 * @reset: specifies the type of reset to perform 4577 * 4578 * Issues a reset command of the specified type to FW. 4579 */ 4580 int t4_fw_reset(struct adapter *adap, unsigned int mbox, int reset) 4581 { 4582 struct fw_reset_cmd c; 4583 4584 memset(&c, 0, sizeof(c)); 4585 INIT_CMD(c, RESET, WRITE); 4586 c.val = htonl(reset); 4587 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); 4588 } 4589 4590 /** 4591 * t4_fw_halt - issue a reset/halt to FW and put uP into RESET 4592 * @adap: the adapter 4593 * @mbox: mailbox to use for the FW RESET command (if desired) 4594 * @force: force uP into RESET even if FW RESET command fails 4595 * 4596 * Issues a RESET command to firmware (if desired) with a HALT indication 4597 * and then puts the microprocessor into RESET state. The RESET command 4598 * will only be issued if a legitimate mailbox is provided (mbox <= 4599 * M_PCIE_FW_MASTER). 4600 * 4601 * This is generally used in order for the host to safely manipulate the 4602 * adapter without fear of conflicting with whatever the firmware might 4603 * be doing. The only way out of this state is to RESTART the firmware 4604 * ... 4605 */ 4606 int t4_fw_halt(struct adapter *adap, unsigned int mbox, int force) 4607 { 4608 int ret = 0; 4609 4610 /* 4611 * If a legitimate mailbox is provided, issue a RESET command 4612 * with a HALT indication. 4613 */ 4614 if (mbox <= M_PCIE_FW_MASTER) { 4615 struct fw_reset_cmd c; 4616 4617 memset(&c, 0, sizeof(c)); 4618 INIT_CMD(c, RESET, WRITE); 4619 c.val = htonl(F_PIORST | F_PIORSTMODE); 4620 c.halt_pkd = htonl(F_FW_RESET_CMD_HALT); 4621 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); 4622 } 4623 4624 /* 4625 * Normally we won't complete the operation if the firmware RESET 4626 * command fails but if our caller insists we'll go ahead and put the 4627 * uP into RESET. This can be useful if the firmware is hung or even 4628 * missing ... We'll have to take the risk of putting the uP into 4629 * RESET without the cooperation of firmware in that case. 4630 * 4631 * We also force the firmware's HALT flag to be on in case we bypassed 4632 * the firmware RESET command above or we're dealing with old firmware 4633 * which doesn't have the HALT capability. This will serve as a flag 4634 * for the incoming firmware to know that it's coming out of a HALT 4635 * rather than a RESET ... if it's new enough to understand that ... 4636 */ 4637 if (ret == 0 || force) { 4638 t4_set_reg_field(adap, A_CIM_BOOT_CFG, F_UPCRST, F_UPCRST); 4639 t4_set_reg_field(adap, A_PCIE_FW, F_PCIE_FW_HALT, F_PCIE_FW_HALT); 4640 } 4641 4642 /* 4643 * And we always return the result of the firmware RESET command 4644 * even when we force the uP into RESET ... 4645 */ 4646 return ret; 4647 } 4648 4649 /** 4650 * t4_fw_restart - restart the firmware by taking the uP out of RESET 4651 * @adap: the adapter 4652 * @reset: if we want to do a RESET to restart things 4653 * 4654 * Restart firmware previously halted by t4_fw_halt(). On successful 4655 * return the previous PF Master remains as the new PF Master and there 4656 * is no need to issue a new HELLO command, etc. 4657 * 4658 * We do this in two ways: 4659 * 4660 * 1. If we're dealing with newer firmware we'll simply want to take 4661 * the chip's microprocessor out of RESET. This will cause the 4662 * firmware to start up from its start vector. And then we'll loop 4663 * until the firmware indicates it's started again (PCIE_FW.HALT 4664 * reset to 0) or we timeout. 4665 * 4666 * 2. If we're dealing with older firmware then we'll need to RESET 4667 * the chip since older firmware won't recognize the PCIE_FW.HALT 4668 * flag and automatically RESET itself on startup. 4669 */ 4670 int t4_fw_restart(struct adapter *adap, unsigned int mbox, int reset) 4671 { 4672 if (reset) { 4673 /* 4674 * Since we're directing the RESET instead of the firmware 4675 * doing it automatically, we need to clear the PCIE_FW.HALT 4676 * bit. 4677 */ 4678 t4_set_reg_field(adap, A_PCIE_FW, F_PCIE_FW_HALT, 0); 4679 4680 /* 4681 * If we've been given a valid mailbox, first try to get the 4682 * firmware to do the RESET. If that works, great and we can 4683 * return success. Otherwise, if we haven't been given a 4684 * valid mailbox or the RESET command failed, fall back to 4685 * hitting the chip with a hammer. 4686 */ 4687 if (mbox <= M_PCIE_FW_MASTER) { 4688 t4_set_reg_field(adap, A_CIM_BOOT_CFG, F_UPCRST, 0); 4689 msleep(100); 4690 if (t4_fw_reset(adap, mbox, 4691 F_PIORST | F_PIORSTMODE) == 0) 4692 return 0; 4693 } 4694 4695 t4_write_reg(adap, A_PL_RST, F_PIORST | F_PIORSTMODE); 4696 msleep(2000); 4697 } else { 4698 int ms; 4699 4700 t4_set_reg_field(adap, A_CIM_BOOT_CFG, F_UPCRST, 0); 4701 for (ms = 0; ms < FW_CMD_MAX_TIMEOUT; ) { 4702 if (!(t4_read_reg(adap, A_PCIE_FW) & F_PCIE_FW_HALT)) 4703 return FW_SUCCESS; 4704 msleep(100); 4705 ms += 100; 4706 } 4707 return -ETIMEDOUT; 4708 } 4709 return 0; 4710 } 4711 4712 /** 4713 * t4_fw_upgrade - perform all of the steps necessary to upgrade FW 4714 * @adap: the adapter 4715 * @mbox: mailbox to use for the FW RESET command (if desired) 4716 * @fw_data: the firmware image to write 4717 * @size: image size 4718 * @force: force upgrade even if firmware doesn't cooperate 4719 * 4720 * Perform all of the steps necessary for upgrading an adapter's 4721 * firmware image. Normally this requires the cooperation of the 4722 * existing firmware in order to halt all existing activities 4723 * but if an invalid mailbox token is passed in we skip that step 4724 * (though we'll still put the adapter microprocessor into RESET in 4725 * that case). 4726 * 4727 * On successful return the new firmware will have been loaded and 4728 * the adapter will have been fully RESET losing all previous setup 4729 * state. On unsuccessful return the adapter may be completely hosed ... 4730 * positive errno indicates that the adapter is ~probably~ intact, a 4731 * negative errno indicates that things are looking bad ... 4732 */ 4733 int t4_fw_upgrade(struct adapter *adap, unsigned int mbox, 4734 const u8 *fw_data, unsigned int size, int force) 4735 { 4736 const struct fw_hdr *fw_hdr = (const struct fw_hdr *)fw_data; 4737 unsigned int bootstrap = ntohl(fw_hdr->magic) == FW_HDR_MAGIC_BOOTSTRAP; 4738 int reset, ret; 4739 4740 if (!bootstrap) { 4741 ret = t4_fw_halt(adap, mbox, force); 4742 if (ret < 0 && !force) 4743 return ret; 4744 } 4745 4746 ret = t4_load_fw(adap, fw_data, size); 4747 if (ret < 0 || bootstrap) 4748 return ret; 4749 4750 /* 4751 * Older versions of the firmware don't understand the new 4752 * PCIE_FW.HALT flag and so won't know to perform a RESET when they 4753 * restart. So for newly loaded older firmware we'll have to do the 4754 * RESET for it so it starts up on a clean slate. We can tell if 4755 * the newly loaded firmware will handle this right by checking 4756 * its header flags to see if it advertises the capability. 4757 */ 4758 reset = ((ntohl(fw_hdr->flags) & FW_HDR_FLAGS_RESET_HALT) == 0); 4759 return t4_fw_restart(adap, mbox, reset); 4760 } 4761 4762 /** 4763 * t4_fw_initialize - ask FW to initialize the device 4764 * @adap: the adapter 4765 * @mbox: mailbox to use for the FW command 4766 * 4767 * Issues a command to FW to partially initialize the device. This 4768 * performs initialization that generally doesn't depend on user input. 4769 */ 4770 int t4_fw_initialize(struct adapter *adap, unsigned int mbox) 4771 { 4772 struct fw_initialize_cmd c; 4773 4774 memset(&c, 0, sizeof(c)); 4775 INIT_CMD(c, INITIALIZE, WRITE); 4776 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); 4777 } 4778 4779 /** 4780 * t4_query_params - query FW or device parameters 4781 * @adap: the adapter 4782 * @mbox: mailbox to use for the FW command 4783 * @pf: the PF 4784 * @vf: the VF 4785 * @nparams: the number of parameters 4786 * @params: the parameter names 4787 * @val: the parameter values 4788 * 4789 * Reads the value of FW or device parameters. Up to 7 parameters can be 4790 * queried at once. 4791 */ 4792 int t4_query_params(struct adapter *adap, unsigned int mbox, unsigned int pf, 4793 unsigned int vf, unsigned int nparams, const u32 *params, 4794 u32 *val) 4795 { 4796 int i, ret; 4797 struct fw_params_cmd c; 4798 __be32 *p = &c.param[0].mnem; 4799 4800 if (nparams > 7) 4801 return -EINVAL; 4802 4803 memset(&c, 0, sizeof(c)); 4804 c.op_to_vfn = htonl(V_FW_CMD_OP(FW_PARAMS_CMD) | F_FW_CMD_REQUEST | 4805 F_FW_CMD_READ | V_FW_PARAMS_CMD_PFN(pf) | 4806 V_FW_PARAMS_CMD_VFN(vf)); 4807 c.retval_len16 = htonl(FW_LEN16(c)); 4808 4809 for (i = 0; i < nparams; i++, p += 2, params++) 4810 *p = htonl(*params); 4811 4812 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c); 4813 if (ret == 0) 4814 for (i = 0, p = &c.param[0].val; i < nparams; i++, p += 2) 4815 *val++ = ntohl(*p); 4816 return ret; 4817 } 4818 4819 /** 4820 * t4_set_params - sets FW or device parameters 4821 * @adap: the adapter 4822 * @mbox: mailbox to use for the FW command 4823 * @pf: the PF 4824 * @vf: the VF 4825 * @nparams: the number of parameters 4826 * @params: the parameter names 4827 * @val: the parameter values 4828 * 4829 * Sets the value of FW or device parameters. Up to 7 parameters can be 4830 * specified at once. 4831 */ 4832 int t4_set_params(struct adapter *adap, unsigned int mbox, unsigned int pf, 4833 unsigned int vf, unsigned int nparams, const u32 *params, 4834 const u32 *val) 4835 { 4836 struct fw_params_cmd c; 4837 __be32 *p = &c.param[0].mnem; 4838 4839 if (nparams > 7) 4840 return -EINVAL; 4841 4842 memset(&c, 0, sizeof(c)); 4843 c.op_to_vfn = htonl(V_FW_CMD_OP(FW_PARAMS_CMD) | F_FW_CMD_REQUEST | 4844 F_FW_CMD_WRITE | V_FW_PARAMS_CMD_PFN(pf) | 4845 V_FW_PARAMS_CMD_VFN(vf)); 4846 c.retval_len16 = htonl(FW_LEN16(c)); 4847 4848 while (nparams--) { 4849 *p++ = htonl(*params); 4850 params++; 4851 *p++ = htonl(*val); 4852 val++; 4853 } 4854 4855 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); 4856 } 4857 4858 /** 4859 * t4_cfg_pfvf - configure PF/VF resource limits 4860 * @adap: the adapter 4861 * @mbox: mailbox to use for the FW command 4862 * @pf: the PF being configured 4863 * @vf: the VF being configured 4864 * @txq: the max number of egress queues 4865 * @txq_eth_ctrl: the max number of egress Ethernet or control queues 4866 * @rxqi: the max number of interrupt-capable ingress queues 4867 * @rxq: the max number of interruptless ingress queues 4868 * @tc: the PCI traffic class 4869 * @vi: the max number of virtual interfaces 4870 * @cmask: the channel access rights mask for the PF/VF 4871 * @pmask: the port access rights mask for the PF/VF 4872 * @nexact: the maximum number of exact MPS filters 4873 * @rcaps: read capabilities 4874 * @wxcaps: write/execute capabilities 4875 * 4876 * Configures resource limits and capabilities for a physical or virtual 4877 * function. 4878 */ 4879 int t4_cfg_pfvf(struct adapter *adap, unsigned int mbox, unsigned int pf, 4880 unsigned int vf, unsigned int txq, unsigned int txq_eth_ctrl, 4881 unsigned int rxqi, unsigned int rxq, unsigned int tc, 4882 unsigned int vi, unsigned int cmask, unsigned int pmask, 4883 unsigned int nexact, unsigned int rcaps, unsigned int wxcaps) 4884 { 4885 struct fw_pfvf_cmd c; 4886 4887 memset(&c, 0, sizeof(c)); 4888 c.op_to_vfn = htonl(V_FW_CMD_OP(FW_PFVF_CMD) | F_FW_CMD_REQUEST | 4889 F_FW_CMD_WRITE | V_FW_PFVF_CMD_PFN(pf) | 4890 V_FW_PFVF_CMD_VFN(vf)); 4891 c.retval_len16 = htonl(FW_LEN16(c)); 4892 c.niqflint_niq = htonl(V_FW_PFVF_CMD_NIQFLINT(rxqi) | 4893 V_FW_PFVF_CMD_NIQ(rxq)); 4894 c.type_to_neq = htonl(V_FW_PFVF_CMD_CMASK(cmask) | 4895 V_FW_PFVF_CMD_PMASK(pmask) | 4896 V_FW_PFVF_CMD_NEQ(txq)); 4897 c.tc_to_nexactf = htonl(V_FW_PFVF_CMD_TC(tc) | V_FW_PFVF_CMD_NVI(vi) | 4898 V_FW_PFVF_CMD_NEXACTF(nexact)); 4899 c.r_caps_to_nethctrl = htonl(V_FW_PFVF_CMD_R_CAPS(rcaps) | 4900 V_FW_PFVF_CMD_WX_CAPS(wxcaps) | 4901 V_FW_PFVF_CMD_NETHCTRL(txq_eth_ctrl)); 4902 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); 4903 } 4904 4905 /** 4906 * t4_alloc_vi_func - allocate a virtual interface 4907 * @adap: the adapter 4908 * @mbox: mailbox to use for the FW command 4909 * @port: physical port associated with the VI 4910 * @pf: the PF owning the VI 4911 * @vf: the VF owning the VI 4912 * @nmac: number of MAC addresses needed (1 to 5) 4913 * @mac: the MAC addresses of the VI 4914 * @rss_size: size of RSS table slice associated with this VI 4915 * @portfunc: which Port Application Function MAC Address is desired 4916 * @idstype: Intrusion Detection Type 4917 * 4918 * Allocates a virtual interface for the given physical port. If @mac is 4919 * not %NULL it contains the MAC addresses of the VI as assigned by FW. 4920 * @mac should be large enough to hold @nmac Ethernet addresses, they are 4921 * stored consecutively so the space needed is @nmac * 6 bytes. 4922 * Returns a negative error number or the non-negative VI id. 4923 */ 4924 int t4_alloc_vi_func(struct adapter *adap, unsigned int mbox, 4925 unsigned int port, unsigned int pf, unsigned int vf, 4926 unsigned int nmac, u8 *mac, u16 *rss_size, 4927 unsigned int portfunc, unsigned int idstype) 4928 { 4929 int ret; 4930 struct fw_vi_cmd c; 4931 4932 memset(&c, 0, sizeof(c)); 4933 c.op_to_vfn = htonl(V_FW_CMD_OP(FW_VI_CMD) | F_FW_CMD_REQUEST | 4934 F_FW_CMD_WRITE | F_FW_CMD_EXEC | 4935 V_FW_VI_CMD_PFN(pf) | V_FW_VI_CMD_VFN(vf)); 4936 c.alloc_to_len16 = htonl(F_FW_VI_CMD_ALLOC | FW_LEN16(c)); 4937 c.type_to_viid = htons(V_FW_VI_CMD_TYPE(idstype) | 4938 V_FW_VI_CMD_FUNC(portfunc)); 4939 c.portid_pkd = V_FW_VI_CMD_PORTID(port); 4940 c.nmac = nmac - 1; 4941 4942 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c); 4943 if (ret) 4944 return ret; 4945 4946 if (mac) { 4947 memcpy(mac, c.mac, sizeof(c.mac)); 4948 switch (nmac) { 4949 case 5: 4950 memcpy(mac + 24, c.nmac3, sizeof(c.nmac3)); 4951 case 4: 4952 memcpy(mac + 18, c.nmac2, sizeof(c.nmac2)); 4953 case 3: 4954 memcpy(mac + 12, c.nmac1, sizeof(c.nmac1)); 4955 case 2: 4956 memcpy(mac + 6, c.nmac0, sizeof(c.nmac0)); 4957 } 4958 } 4959 if (rss_size) 4960 *rss_size = G_FW_VI_CMD_RSSSIZE(ntohs(c.norss_rsssize)); 4961 return G_FW_VI_CMD_VIID(htons(c.type_to_viid)); 4962 } 4963 4964 /** 4965 * t4_alloc_vi - allocate an [Ethernet Function] virtual interface 4966 * @adap: the adapter 4967 * @mbox: mailbox to use for the FW command 4968 * @port: physical port associated with the VI 4969 * @pf: the PF owning the VI 4970 * @vf: the VF owning the VI 4971 * @nmac: number of MAC addresses needed (1 to 5) 4972 * @mac: the MAC addresses of the VI 4973 * @rss_size: size of RSS table slice associated with this VI 4974 * 4975 * backwards compatible and convieniance routine to allocate a Virtual 4976 * Interface with a Ethernet Port Application Function and Intrustion 4977 * Detection System disabled. 4978 */ 4979 int t4_alloc_vi(struct adapter *adap, unsigned int mbox, unsigned int port, 4980 unsigned int pf, unsigned int vf, unsigned int nmac, u8 *mac, 4981 u16 *rss_size) 4982 { 4983 return t4_alloc_vi_func(adap, mbox, port, pf, vf, nmac, mac, rss_size, 4984 FW_VI_FUNC_ETH, 0); 4985 } 4986 4987 /** 4988 * t4_free_vi - free a virtual interface 4989 * @adap: the adapter 4990 * @mbox: mailbox to use for the FW command 4991 * @pf: the PF owning the VI 4992 * @vf: the VF owning the VI 4993 * @viid: virtual interface identifiler 4994 * 4995 * Free a previously allocated virtual interface. 4996 */ 4997 int t4_free_vi(struct adapter *adap, unsigned int mbox, unsigned int pf, 4998 unsigned int vf, unsigned int viid) 4999 { 5000 struct fw_vi_cmd c; 5001 5002 memset(&c, 0, sizeof(c)); 5003 c.op_to_vfn = htonl(V_FW_CMD_OP(FW_VI_CMD) | 5004 F_FW_CMD_REQUEST | 5005 F_FW_CMD_EXEC | 5006 V_FW_VI_CMD_PFN(pf) | 5007 V_FW_VI_CMD_VFN(vf)); 5008 c.alloc_to_len16 = htonl(F_FW_VI_CMD_FREE | FW_LEN16(c)); 5009 c.type_to_viid = htons(V_FW_VI_CMD_VIID(viid)); 5010 5011 return t4_wr_mbox(adap, mbox, &c, sizeof(c), &c); 5012 } 5013 5014 /** 5015 * t4_set_rxmode - set Rx properties of a virtual interface 5016 * @adap: the adapter 5017 * @mbox: mailbox to use for the FW command 5018 * @viid: the VI id 5019 * @mtu: the new MTU or -1 5020 * @promisc: 1 to enable promiscuous mode, 0 to disable it, -1 no change 5021 * @all_multi: 1 to enable all-multi mode, 0 to disable it, -1 no change 5022 * @bcast: 1 to enable broadcast Rx, 0 to disable it, -1 no change 5023 * @vlanex: 1 to enable HVLAN extraction, 0 to disable it, -1 no change 5024 * @sleep_ok: if true we may sleep while awaiting command completion 5025 * 5026 * Sets Rx properties of a virtual interface. 5027 */ 5028 int t4_set_rxmode(struct adapter *adap, unsigned int mbox, unsigned int viid, 5029 int mtu, int promisc, int all_multi, int bcast, int vlanex, 5030 bool sleep_ok) 5031 { 5032 struct fw_vi_rxmode_cmd c; 5033 5034 /* convert to FW values */ 5035 if (mtu < 0) 5036 mtu = M_FW_VI_RXMODE_CMD_MTU; 5037 if (promisc < 0) 5038 promisc = M_FW_VI_RXMODE_CMD_PROMISCEN; 5039 if (all_multi < 0) 5040 all_multi = M_FW_VI_RXMODE_CMD_ALLMULTIEN; 5041 if (bcast < 0) 5042 bcast = M_FW_VI_RXMODE_CMD_BROADCASTEN; 5043 if (vlanex < 0) 5044 vlanex = M_FW_VI_RXMODE_CMD_VLANEXEN; 5045 5046 memset(&c, 0, sizeof(c)); 5047 c.op_to_viid = htonl(V_FW_CMD_OP(FW_VI_RXMODE_CMD) | F_FW_CMD_REQUEST | 5048 F_FW_CMD_WRITE | V_FW_VI_RXMODE_CMD_VIID(viid)); 5049 c.retval_len16 = htonl(FW_LEN16(c)); 5050 c.mtu_to_vlanexen = htonl(V_FW_VI_RXMODE_CMD_MTU(mtu) | 5051 V_FW_VI_RXMODE_CMD_PROMISCEN(promisc) | 5052 V_FW_VI_RXMODE_CMD_ALLMULTIEN(all_multi) | 5053 V_FW_VI_RXMODE_CMD_BROADCASTEN(bcast) | 5054 V_FW_VI_RXMODE_CMD_VLANEXEN(vlanex)); 5055 return t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), NULL, sleep_ok); 5056 } 5057 5058 /** 5059 * t4_alloc_mac_filt - allocates exact-match filters for MAC addresses 5060 * @adap: the adapter 5061 * @mbox: mailbox to use for the FW command 5062 * @viid: the VI id 5063 * @free: if true any existing filters for this VI id are first removed 5064 * @naddr: the number of MAC addresses to allocate filters for (up to 7) 5065 * @addr: the MAC address(es) 5066 * @idx: where to store the index of each allocated filter 5067 * @hash: pointer to hash address filter bitmap 5068 * @sleep_ok: call is allowed to sleep 5069 * 5070 * Allocates an exact-match filter for each of the supplied addresses and 5071 * sets it to the corresponding address. If @idx is not %NULL it should 5072 * have at least @naddr entries, each of which will be set to the index of 5073 * the filter allocated for the corresponding MAC address. If a filter 5074 * could not be allocated for an address its index is set to 0xffff. 5075 * If @hash is not %NULL addresses that fail to allocate an exact filter 5076 * are hashed and update the hash filter bitmap pointed at by @hash. 5077 * 5078 * Returns a negative error number or the number of filters allocated. 5079 */ 5080 int t4_alloc_mac_filt(struct adapter *adap, unsigned int mbox, 5081 unsigned int viid, bool free, unsigned int naddr, 5082 const u8 **addr, u16 *idx, u64 *hash, bool sleep_ok) 5083 { 5084 int offset, ret = 0; 5085 struct fw_vi_mac_cmd c; 5086 unsigned int nfilters = 0; 5087 unsigned int max_naddr = is_t4(adap) ? 5088 NUM_MPS_CLS_SRAM_L_INSTANCES : 5089 NUM_MPS_T5_CLS_SRAM_L_INSTANCES; 5090 unsigned int rem = naddr; 5091 5092 if (naddr > max_naddr) 5093 return -EINVAL; 5094 5095 for (offset = 0; offset < naddr ; /**/) { 5096 unsigned int fw_naddr = (rem < ARRAY_SIZE(c.u.exact) 5097 ? rem 5098 : ARRAY_SIZE(c.u.exact)); 5099 size_t len16 = DIV_ROUND_UP(offsetof(struct fw_vi_mac_cmd, 5100 u.exact[fw_naddr]), 16); 5101 struct fw_vi_mac_exact *p; 5102 int i; 5103 5104 memset(&c, 0, sizeof(c)); 5105 c.op_to_viid = htonl(V_FW_CMD_OP(FW_VI_MAC_CMD) | 5106 F_FW_CMD_REQUEST | 5107 F_FW_CMD_WRITE | 5108 V_FW_CMD_EXEC(free) | 5109 V_FW_VI_MAC_CMD_VIID(viid)); 5110 c.freemacs_to_len16 = htonl(V_FW_VI_MAC_CMD_FREEMACS(free) | 5111 V_FW_CMD_LEN16(len16)); 5112 5113 for (i = 0, p = c.u.exact; i < fw_naddr; i++, p++) { 5114 p->valid_to_idx = htons( 5115 F_FW_VI_MAC_CMD_VALID | 5116 V_FW_VI_MAC_CMD_IDX(FW_VI_MAC_ADD_MAC)); 5117 memcpy(p->macaddr, addr[offset+i], sizeof(p->macaddr)); 5118 } 5119 5120 /* 5121 * It's okay if we run out of space in our MAC address arena. 5122 * Some of the addresses we submit may get stored so we need 5123 * to run through the reply to see what the results were ... 5124 */ 5125 ret = t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), &c, sleep_ok); 5126 if (ret && ret != -FW_ENOMEM) 5127 break; 5128 5129 for (i = 0, p = c.u.exact; i < fw_naddr; i++, p++) { 5130 u16 index = G_FW_VI_MAC_CMD_IDX(ntohs(p->valid_to_idx)); 5131 5132 if (idx) 5133 idx[offset+i] = (index >= max_naddr 5134 ? 0xffff 5135 : index); 5136 if (index < max_naddr) 5137 nfilters++; 5138 else if (hash) 5139 *hash |= (1ULL << hash_mac_addr(addr[offset+i])); 5140 } 5141 5142 free = false; 5143 offset += fw_naddr; 5144 rem -= fw_naddr; 5145 } 5146 5147 if (ret == 0 || ret == -FW_ENOMEM) 5148 ret = nfilters; 5149 return ret; 5150 } 5151 5152 /** 5153 * t4_change_mac - modifies the exact-match filter for a MAC address 5154 * @adap: the adapter 5155 * @mbox: mailbox to use for the FW command 5156 * @viid: the VI id 5157 * @idx: index of existing filter for old value of MAC address, or -1 5158 * @addr: the new MAC address value 5159 * @persist: whether a new MAC allocation should be persistent 5160 * @add_smt: if true also add the address to the HW SMT 5161 * 5162 * Modifies an exact-match filter and sets it to the new MAC address if 5163 * @idx >= 0, or adds the MAC address to a new filter if @idx < 0. In the 5164 * latter case the address is added persistently if @persist is %true. 5165 * 5166 * Note that in general it is not possible to modify the value of a given 5167 * filter so the generic way to modify an address filter is to free the one 5168 * being used by the old address value and allocate a new filter for the 5169 * new address value. 5170 * 5171 * Returns a negative error number or the index of the filter with the new 5172 * MAC value. Note that this index may differ from @idx. 5173 */ 5174 int t4_change_mac(struct adapter *adap, unsigned int mbox, unsigned int viid, 5175 int idx, const u8 *addr, bool persist, bool add_smt) 5176 { 5177 int ret, mode; 5178 struct fw_vi_mac_cmd c; 5179 struct fw_vi_mac_exact *p = c.u.exact; 5180 unsigned int max_mac_addr = is_t4(adap) ? 5181 NUM_MPS_CLS_SRAM_L_INSTANCES : 5182 NUM_MPS_T5_CLS_SRAM_L_INSTANCES; 5183 5184 if (idx < 0) /* new allocation */ 5185 idx = persist ? FW_VI_MAC_ADD_PERSIST_MAC : FW_VI_MAC_ADD_MAC; 5186 mode = add_smt ? FW_VI_MAC_SMT_AND_MPSTCAM : FW_VI_MAC_MPS_TCAM_ENTRY; 5187 5188 memset(&c, 0, sizeof(c)); 5189 c.op_to_viid = htonl(V_FW_CMD_OP(FW_VI_MAC_CMD) | F_FW_CMD_REQUEST | 5190 F_FW_CMD_WRITE | V_FW_VI_MAC_CMD_VIID(viid)); 5191 c.freemacs_to_len16 = htonl(V_FW_CMD_LEN16(1)); 5192 p->valid_to_idx = htons(F_FW_VI_MAC_CMD_VALID | 5193 V_FW_VI_MAC_CMD_SMAC_RESULT(mode) | 5194 V_FW_VI_MAC_CMD_IDX(idx)); 5195 memcpy(p->macaddr, addr, sizeof(p->macaddr)); 5196 5197 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c); 5198 if (ret == 0) { 5199 ret = G_FW_VI_MAC_CMD_IDX(ntohs(p->valid_to_idx)); 5200 if (ret >= max_mac_addr) 5201 ret = -ENOMEM; 5202 } 5203 return ret; 5204 } 5205 5206 /** 5207 * t4_set_addr_hash - program the MAC inexact-match hash filter 5208 * @adap: the adapter 5209 * @mbox: mailbox to use for the FW command 5210 * @viid: the VI id 5211 * @ucast: whether the hash filter should also match unicast addresses 5212 * @vec: the value to be written to the hash filter 5213 * @sleep_ok: call is allowed to sleep 5214 * 5215 * Sets the 64-bit inexact-match hash filter for a virtual interface. 5216 */ 5217 int t4_set_addr_hash(struct adapter *adap, unsigned int mbox, unsigned int viid, 5218 bool ucast, u64 vec, bool sleep_ok) 5219 { 5220 struct fw_vi_mac_cmd c; 5221 5222 memset(&c, 0, sizeof(c)); 5223 c.op_to_viid = htonl(V_FW_CMD_OP(FW_VI_MAC_CMD) | F_FW_CMD_REQUEST | 5224 F_FW_CMD_WRITE | V_FW_VI_ENABLE_CMD_VIID(viid)); 5225 c.freemacs_to_len16 = htonl(F_FW_VI_MAC_CMD_HASHVECEN | 5226 V_FW_VI_MAC_CMD_HASHUNIEN(ucast) | 5227 V_FW_CMD_LEN16(1)); 5228 c.u.hash.hashvec = cpu_to_be64(vec); 5229 return t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), NULL, sleep_ok); 5230 } 5231 5232 /** 5233 * t4_enable_vi - enable/disable a virtual interface 5234 * @adap: the adapter 5235 * @mbox: mailbox to use for the FW command 5236 * @viid: the VI id 5237 * @rx_en: 1=enable Rx, 0=disable Rx 5238 * @tx_en: 1=enable Tx, 0=disable Tx 5239 * 5240 * Enables/disables a virtual interface. 5241 */ 5242 int t4_enable_vi(struct adapter *adap, unsigned int mbox, unsigned int viid, 5243 bool rx_en, bool tx_en) 5244 { 5245 struct fw_vi_enable_cmd c; 5246 5247 memset(&c, 0, sizeof(c)); 5248 c.op_to_viid = htonl(V_FW_CMD_OP(FW_VI_ENABLE_CMD) | F_FW_CMD_REQUEST | 5249 F_FW_CMD_EXEC | V_FW_VI_ENABLE_CMD_VIID(viid)); 5250 c.ien_to_len16 = htonl(V_FW_VI_ENABLE_CMD_IEN(rx_en) | 5251 V_FW_VI_ENABLE_CMD_EEN(tx_en) | FW_LEN16(c)); 5252 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); 5253 } 5254 5255 /** 5256 * t4_identify_port - identify a VI's port by blinking its LED 5257 * @adap: the adapter 5258 * @mbox: mailbox to use for the FW command 5259 * @viid: the VI id 5260 * @nblinks: how many times to blink LED at 2.5 Hz 5261 * 5262 * Identifies a VI's port by blinking its LED. 5263 */ 5264 int t4_identify_port(struct adapter *adap, unsigned int mbox, unsigned int viid, 5265 unsigned int nblinks) 5266 { 5267 struct fw_vi_enable_cmd c; 5268 5269 memset(&c, 0, sizeof(c)); 5270 c.op_to_viid = htonl(V_FW_CMD_OP(FW_VI_ENABLE_CMD) | F_FW_CMD_REQUEST | 5271 F_FW_CMD_EXEC | V_FW_VI_ENABLE_CMD_VIID(viid)); 5272 c.ien_to_len16 = htonl(F_FW_VI_ENABLE_CMD_LED | FW_LEN16(c)); 5273 c.blinkdur = htons(nblinks); 5274 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); 5275 } 5276 5277 /** 5278 * t4_iq_start_stop - enable/disable an ingress queue and its FLs 5279 * @adap: the adapter 5280 * @mbox: mailbox to use for the FW command 5281 * @start: %true to enable the queues, %false to disable them 5282 * @pf: the PF owning the queues 5283 * @vf: the VF owning the queues 5284 * @iqid: ingress queue id 5285 * @fl0id: FL0 queue id or 0xffff if no attached FL0 5286 * @fl1id: FL1 queue id or 0xffff if no attached FL1 5287 * 5288 * Starts or stops an ingress queue and its associated FLs, if any. 5289 */ 5290 int t4_iq_start_stop(struct adapter *adap, unsigned int mbox, bool start, 5291 unsigned int pf, unsigned int vf, unsigned int iqid, 5292 unsigned int fl0id, unsigned int fl1id) 5293 { 5294 struct fw_iq_cmd c; 5295 5296 memset(&c, 0, sizeof(c)); 5297 c.op_to_vfn = htonl(V_FW_CMD_OP(FW_IQ_CMD) | F_FW_CMD_REQUEST | 5298 F_FW_CMD_EXEC | V_FW_IQ_CMD_PFN(pf) | 5299 V_FW_IQ_CMD_VFN(vf)); 5300 c.alloc_to_len16 = htonl(V_FW_IQ_CMD_IQSTART(start) | 5301 V_FW_IQ_CMD_IQSTOP(!start) | FW_LEN16(c)); 5302 c.iqid = htons(iqid); 5303 c.fl0id = htons(fl0id); 5304 c.fl1id = htons(fl1id); 5305 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); 5306 } 5307 5308 /** 5309 * t4_iq_free - free an ingress queue and its FLs 5310 * @adap: the adapter 5311 * @mbox: mailbox to use for the FW command 5312 * @pf: the PF owning the queues 5313 * @vf: the VF owning the queues 5314 * @iqtype: the ingress queue type (FW_IQ_TYPE_FL_INT_CAP, etc.) 5315 * @iqid: ingress queue id 5316 * @fl0id: FL0 queue id or 0xffff if no attached FL0 5317 * @fl1id: FL1 queue id or 0xffff if no attached FL1 5318 * 5319 * Frees an ingress queue and its associated FLs, if any. 5320 */ 5321 int t4_iq_free(struct adapter *adap, unsigned int mbox, unsigned int pf, 5322 unsigned int vf, unsigned int iqtype, unsigned int iqid, 5323 unsigned int fl0id, unsigned int fl1id) 5324 { 5325 struct fw_iq_cmd c; 5326 5327 memset(&c, 0, sizeof(c)); 5328 c.op_to_vfn = htonl(V_FW_CMD_OP(FW_IQ_CMD) | F_FW_CMD_REQUEST | 5329 F_FW_CMD_EXEC | V_FW_IQ_CMD_PFN(pf) | 5330 V_FW_IQ_CMD_VFN(vf)); 5331 c.alloc_to_len16 = htonl(F_FW_IQ_CMD_FREE | FW_LEN16(c)); 5332 c.type_to_iqandstindex = htonl(V_FW_IQ_CMD_TYPE(iqtype)); 5333 c.iqid = htons(iqid); 5334 c.fl0id = htons(fl0id); 5335 c.fl1id = htons(fl1id); 5336 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); 5337 } 5338 5339 /** 5340 * t4_eth_eq_free - free an Ethernet egress queue 5341 * @adap: the adapter 5342 * @mbox: mailbox to use for the FW command 5343 * @pf: the PF owning the queue 5344 * @vf: the VF owning the queue 5345 * @eqid: egress queue id 5346 * 5347 * Frees an Ethernet egress queue. 5348 */ 5349 int t4_eth_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf, 5350 unsigned int vf, unsigned int eqid) 5351 { 5352 struct fw_eq_eth_cmd c; 5353 5354 memset(&c, 0, sizeof(c)); 5355 c.op_to_vfn = htonl(V_FW_CMD_OP(FW_EQ_ETH_CMD) | F_FW_CMD_REQUEST | 5356 F_FW_CMD_EXEC | V_FW_EQ_ETH_CMD_PFN(pf) | 5357 V_FW_EQ_ETH_CMD_VFN(vf)); 5358 c.alloc_to_len16 = htonl(F_FW_EQ_ETH_CMD_FREE | FW_LEN16(c)); 5359 c.eqid_pkd = htonl(V_FW_EQ_ETH_CMD_EQID(eqid)); 5360 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); 5361 } 5362 5363 /** 5364 * t4_ctrl_eq_free - free a control egress queue 5365 * @adap: the adapter 5366 * @mbox: mailbox to use for the FW command 5367 * @pf: the PF owning the queue 5368 * @vf: the VF owning the queue 5369 * @eqid: egress queue id 5370 * 5371 * Frees a control egress queue. 5372 */ 5373 int t4_ctrl_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf, 5374 unsigned int vf, unsigned int eqid) 5375 { 5376 struct fw_eq_ctrl_cmd c; 5377 5378 memset(&c, 0, sizeof(c)); 5379 c.op_to_vfn = htonl(V_FW_CMD_OP(FW_EQ_CTRL_CMD) | F_FW_CMD_REQUEST | 5380 F_FW_CMD_EXEC | V_FW_EQ_CTRL_CMD_PFN(pf) | 5381 V_FW_EQ_CTRL_CMD_VFN(vf)); 5382 c.alloc_to_len16 = htonl(F_FW_EQ_CTRL_CMD_FREE | FW_LEN16(c)); 5383 c.cmpliqid_eqid = htonl(V_FW_EQ_CTRL_CMD_EQID(eqid)); 5384 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); 5385 } 5386 5387 /** 5388 * t4_ofld_eq_free - free an offload egress queue 5389 * @adap: the adapter 5390 * @mbox: mailbox to use for the FW command 5391 * @pf: the PF owning the queue 5392 * @vf: the VF owning the queue 5393 * @eqid: egress queue id 5394 * 5395 * Frees a control egress queue. 5396 */ 5397 int t4_ofld_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf, 5398 unsigned int vf, unsigned int eqid) 5399 { 5400 struct fw_eq_ofld_cmd c; 5401 5402 memset(&c, 0, sizeof(c)); 5403 c.op_to_vfn = htonl(V_FW_CMD_OP(FW_EQ_OFLD_CMD) | F_FW_CMD_REQUEST | 5404 F_FW_CMD_EXEC | V_FW_EQ_OFLD_CMD_PFN(pf) | 5405 V_FW_EQ_OFLD_CMD_VFN(vf)); 5406 c.alloc_to_len16 = htonl(F_FW_EQ_OFLD_CMD_FREE | FW_LEN16(c)); 5407 c.eqid_pkd = htonl(V_FW_EQ_OFLD_CMD_EQID(eqid)); 5408 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); 5409 } 5410 5411 /** 5412 * t4_handle_fw_rpl - process a FW reply message 5413 * @adap: the adapter 5414 * @rpl: start of the FW message 5415 * 5416 * Processes a FW message, such as link state change messages. 5417 */ 5418 int t4_handle_fw_rpl(struct adapter *adap, const __be64 *rpl) 5419 { 5420 u8 opcode = *(const u8 *)rpl; 5421 const struct fw_port_cmd *p = (const void *)rpl; 5422 unsigned int action = G_FW_PORT_CMD_ACTION(ntohl(p->action_to_len16)); 5423 5424 if (opcode == FW_PORT_CMD && action == FW_PORT_ACTION_GET_PORT_INFO) { 5425 /* link/module state change message */ 5426 int speed = 0, fc = 0, i; 5427 int chan = G_FW_PORT_CMD_PORTID(ntohl(p->op_to_portid)); 5428 struct port_info *pi = NULL; 5429 struct link_config *lc; 5430 u32 stat = ntohl(p->u.info.lstatus_to_modtype); 5431 int link_ok = (stat & F_FW_PORT_CMD_LSTATUS) != 0; 5432 u32 mod = G_FW_PORT_CMD_MODTYPE(stat); 5433 5434 if (stat & F_FW_PORT_CMD_RXPAUSE) 5435 fc |= PAUSE_RX; 5436 if (stat & F_FW_PORT_CMD_TXPAUSE) 5437 fc |= PAUSE_TX; 5438 if (stat & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_100M)) 5439 speed = SPEED_100; 5440 else if (stat & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_1G)) 5441 speed = SPEED_1000; 5442 else if (stat & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_10G)) 5443 speed = SPEED_10000; 5444 else if (stat & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_40G)) 5445 speed = SPEED_40000; 5446 5447 for_each_port(adap, i) { 5448 pi = adap2pinfo(adap, i); 5449 if (pi->tx_chan == chan) 5450 break; 5451 } 5452 lc = &pi->link_cfg; 5453 5454 if (mod != pi->mod_type) { 5455 pi->mod_type = mod; 5456 t4_os_portmod_changed(adap, i); 5457 } 5458 if (link_ok != lc->link_ok || speed != lc->speed || 5459 fc != lc->fc) { /* something changed */ 5460 int reason; 5461 5462 if (!link_ok && lc->link_ok) 5463 reason = G_FW_PORT_CMD_LINKDNRC(stat); 5464 else 5465 reason = -1; 5466 5467 lc->link_ok = link_ok; 5468 lc->speed = speed; 5469 lc->fc = fc; 5470 lc->supported = ntohs(p->u.info.pcap); 5471 t4_os_link_changed(adap, i, link_ok, reason); 5472 } 5473 } else { 5474 CH_WARN_RATELIMIT(adap, 5475 "Unknown firmware reply 0x%x (0x%x)\n", opcode, action); 5476 return -EINVAL; 5477 } 5478 return 0; 5479 } 5480 5481 /** 5482 * get_pci_mode - determine a card's PCI mode 5483 * @adapter: the adapter 5484 * @p: where to store the PCI settings 5485 * 5486 * Determines a card's PCI mode and associated parameters, such as speed 5487 * and width. 5488 */ 5489 static void __devinit get_pci_mode(struct adapter *adapter, 5490 struct pci_params *p) 5491 { 5492 u16 val; 5493 u32 pcie_cap; 5494 5495 pcie_cap = t4_os_find_pci_capability(adapter, PCI_CAP_ID_EXP); 5496 if (pcie_cap) { 5497 t4_os_pci_read_cfg2(adapter, pcie_cap + PCI_EXP_LNKSTA, &val); 5498 p->speed = val & PCI_EXP_LNKSTA_CLS; 5499 p->width = (val & PCI_EXP_LNKSTA_NLW) >> 4; 5500 } 5501 } 5502 5503 /** 5504 * init_link_config - initialize a link's SW state 5505 * @lc: structure holding the link state 5506 * @caps: link capabilities 5507 * 5508 * Initializes the SW state maintained for each link, including the link's 5509 * capabilities and default speed/flow-control/autonegotiation settings. 5510 */ 5511 static void __devinit init_link_config(struct link_config *lc, 5512 unsigned int caps) 5513 { 5514 lc->supported = caps; 5515 lc->requested_speed = 0; 5516 lc->speed = 0; 5517 lc->requested_fc = lc->fc = PAUSE_RX | PAUSE_TX; 5518 if (lc->supported & FW_PORT_CAP_ANEG) { 5519 lc->advertising = lc->supported & ADVERT_MASK; 5520 lc->autoneg = AUTONEG_ENABLE; 5521 lc->requested_fc |= PAUSE_AUTONEG; 5522 } else { 5523 lc->advertising = 0; 5524 lc->autoneg = AUTONEG_DISABLE; 5525 } 5526 } 5527 5528 static int __devinit get_flash_params(struct adapter *adapter) 5529 { 5530 int ret; 5531 u32 info = 0; 5532 5533 ret = sf1_write(adapter, 1, 1, 0, SF_RD_ID); 5534 if (!ret) 5535 ret = sf1_read(adapter, 3, 0, 1, &info); 5536 t4_write_reg(adapter, A_SF_OP, 0); /* unlock SF */ 5537 if (ret < 0) 5538 return ret; 5539 5540 if ((info & 0xff) != 0x20) /* not a Numonix flash */ 5541 return -EINVAL; 5542 info >>= 16; /* log2 of size */ 5543 if (info >= 0x14 && info < 0x18) 5544 adapter->params.sf_nsec = 1 << (info - 16); 5545 else if (info == 0x18) 5546 adapter->params.sf_nsec = 64; 5547 else 5548 return -EINVAL; 5549 adapter->params.sf_size = 1 << info; 5550 return 0; 5551 } 5552 5553 static void __devinit set_pcie_completion_timeout(struct adapter *adapter, 5554 u8 range) 5555 { 5556 u16 val; 5557 u32 pcie_cap; 5558 5559 pcie_cap = t4_os_find_pci_capability(adapter, PCI_CAP_ID_EXP); 5560 if (pcie_cap) { 5561 t4_os_pci_read_cfg2(adapter, pcie_cap + PCI_EXP_DEVCTL2, &val); 5562 val &= 0xfff0; 5563 val |= range ; 5564 t4_os_pci_write_cfg2(adapter, pcie_cap + PCI_EXP_DEVCTL2, val); 5565 } 5566 } 5567 5568 /** 5569 * t4_prep_adapter - prepare SW and HW for operation 5570 * @adapter: the adapter 5571 * @reset: if true perform a HW reset 5572 * 5573 * Initialize adapter SW state for the various HW modules, set initial 5574 * values for some adapter tunables, take PHYs out of reset, and 5575 * initialize the MDIO interface. 5576 */ 5577 int __devinit t4_prep_adapter(struct adapter *adapter) 5578 { 5579 int ret; 5580 uint16_t device_id; 5581 uint32_t pl_rev; 5582 5583 get_pci_mode(adapter, &adapter->params.pci); 5584 5585 pl_rev = t4_read_reg(adapter, A_PL_REV); 5586 adapter->params.chipid = G_CHIPID(pl_rev); 5587 adapter->params.rev = G_REV(pl_rev); 5588 if (adapter->params.chipid == 0) { 5589 /* T4 did not have chipid in PL_REV (T5 onwards do) */ 5590 adapter->params.chipid = CHELSIO_T4; 5591 5592 /* T4A1 chip is not supported */ 5593 if (adapter->params.rev == 1) { 5594 CH_ALERT(adapter, "T4 rev 1 chip is not supported.\n"); 5595 return -EINVAL; 5596 } 5597 } 5598 adapter->params.pci.vpd_cap_addr = 5599 t4_os_find_pci_capability(adapter, PCI_CAP_ID_VPD); 5600 5601 ret = get_flash_params(adapter); 5602 if (ret < 0) 5603 return ret; 5604 5605 ret = get_vpd_params(adapter, &adapter->params.vpd); 5606 if (ret < 0) 5607 return ret; 5608 5609 /* Cards with real ASICs have the chipid in the PCIe device id */ 5610 t4_os_pci_read_cfg2(adapter, PCI_DEVICE_ID, &device_id); 5611 if (device_id >> 12 == adapter->params.chipid) 5612 adapter->params.cim_la_size = CIMLA_SIZE; 5613 else { 5614 /* FPGA */ 5615 adapter->params.fpga = 1; 5616 adapter->params.cim_la_size = 2 * CIMLA_SIZE; 5617 } 5618 5619 init_cong_ctrl(adapter->params.a_wnd, adapter->params.b_wnd); 5620 5621 /* 5622 * Default port and clock for debugging in case we can't reach FW. 5623 */ 5624 adapter->params.nports = 1; 5625 adapter->params.portvec = 1; 5626 adapter->params.vpd.cclk = 50000; 5627 5628 /* Set pci completion timeout value to 4 seconds. */ 5629 set_pcie_completion_timeout(adapter, 0xd); 5630 return 0; 5631 } 5632 5633 /** 5634 * t4_init_tp_params - initialize adap->params.tp 5635 * @adap: the adapter 5636 * 5637 * Initialize various fields of the adapter's TP Parameters structure. 5638 */ 5639 int __devinit t4_init_tp_params(struct adapter *adap) 5640 { 5641 int chan; 5642 u32 v; 5643 5644 v = t4_read_reg(adap, A_TP_TIMER_RESOLUTION); 5645 adap->params.tp.tre = G_TIMERRESOLUTION(v); 5646 adap->params.tp.dack_re = G_DELAYEDACKRESOLUTION(v); 5647 5648 /* MODQ_REQ_MAP defaults to setting queues 0-3 to chan 0-3 */ 5649 for (chan = 0; chan < NCHAN; chan++) 5650 adap->params.tp.tx_modq[chan] = chan; 5651 5652 t4_read_indirect(adap, A_TP_PIO_ADDR, A_TP_PIO_DATA, 5653 &adap->params.tp.ingress_config, 1, 5654 A_TP_INGRESS_CONFIG); 5655 refresh_vlan_pri_map(adap); 5656 5657 return 0; 5658 } 5659 5660 /** 5661 * t4_filter_field_shift - calculate filter field shift 5662 * @adap: the adapter 5663 * @filter_sel: the desired field (from TP_VLAN_PRI_MAP bits) 5664 * 5665 * Return the shift position of a filter field within the Compressed 5666 * Filter Tuple. The filter field is specified via its selection bit 5667 * within TP_VLAN_PRI_MAL (filter mode). E.g. F_VLAN. 5668 */ 5669 int t4_filter_field_shift(const struct adapter *adap, int filter_sel) 5670 { 5671 unsigned int filter_mode = adap->params.tp.vlan_pri_map; 5672 unsigned int sel; 5673 int field_shift; 5674 5675 if ((filter_mode & filter_sel) == 0) 5676 return -1; 5677 5678 for (sel = 1, field_shift = 0; sel < filter_sel; sel <<= 1) { 5679 switch (filter_mode & sel) { 5680 case F_FCOE: field_shift += W_FT_FCOE; break; 5681 case F_PORT: field_shift += W_FT_PORT; break; 5682 case F_VNIC_ID: field_shift += W_FT_VNIC_ID; break; 5683 case F_VLAN: field_shift += W_FT_VLAN; break; 5684 case F_TOS: field_shift += W_FT_TOS; break; 5685 case F_PROTOCOL: field_shift += W_FT_PROTOCOL; break; 5686 case F_ETHERTYPE: field_shift += W_FT_ETHERTYPE; break; 5687 case F_MACMATCH: field_shift += W_FT_MACMATCH; break; 5688 case F_MPSHITTYPE: field_shift += W_FT_MPSHITTYPE; break; 5689 case F_FRAGMENTATION: field_shift += W_FT_FRAGMENTATION; break; 5690 } 5691 } 5692 return field_shift; 5693 } 5694 5695 int __devinit t4_port_init(struct port_info *p, int mbox, int pf, int vf) 5696 { 5697 u8 addr[6]; 5698 int ret, i, j; 5699 struct fw_port_cmd c; 5700 u16 rss_size; 5701 adapter_t *adap = p->adapter; 5702 u32 param, val; 5703 5704 memset(&c, 0, sizeof(c)); 5705 5706 for (i = 0, j = -1; i <= p->port_id; i++) { 5707 do { 5708 j++; 5709 } while ((adap->params.portvec & (1 << j)) == 0); 5710 } 5711 5712 c.op_to_portid = htonl(V_FW_CMD_OP(FW_PORT_CMD) | 5713 F_FW_CMD_REQUEST | F_FW_CMD_READ | 5714 V_FW_PORT_CMD_PORTID(j)); 5715 c.action_to_len16 = htonl( 5716 V_FW_PORT_CMD_ACTION(FW_PORT_ACTION_GET_PORT_INFO) | 5717 FW_LEN16(c)); 5718 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c); 5719 if (ret) 5720 return ret; 5721 5722 ret = t4_alloc_vi(adap, mbox, j, pf, vf, 1, addr, &rss_size); 5723 if (ret < 0) 5724 return ret; 5725 5726 p->viid = ret; 5727 p->tx_chan = j; 5728 p->rx_chan_map = get_mps_bg_map(adap, j); 5729 p->lport = j; 5730 p->rss_size = rss_size; 5731 t4_os_set_hw_addr(adap, p->port_id, addr); 5732 5733 ret = ntohl(c.u.info.lstatus_to_modtype); 5734 p->mdio_addr = (ret & F_FW_PORT_CMD_MDIOCAP) ? 5735 G_FW_PORT_CMD_MDIOADDR(ret) : -1; 5736 p->port_type = G_FW_PORT_CMD_PTYPE(ret); 5737 p->mod_type = G_FW_PORT_CMD_MODTYPE(ret); 5738 5739 init_link_config(&p->link_cfg, ntohs(c.u.info.pcap)); 5740 5741 param = V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | 5742 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_RSSINFO) | 5743 V_FW_PARAMS_PARAM_YZ(p->viid); 5744 ret = t4_query_params(adap, mbox, pf, vf, 1, ¶m, &val); 5745 if (ret) 5746 p->rss_base = 0xffff; 5747 else { 5748 /* MPASS((val >> 16) == rss_size); */ 5749 p->rss_base = val & 0xffff; 5750 } 5751 5752 return 0; 5753 } 5754 5755 int t4_sched_config(struct adapter *adapter, int type, int minmaxen, 5756 int sleep_ok) 5757 { 5758 struct fw_sched_cmd cmd; 5759 5760 memset(&cmd, 0, sizeof(cmd)); 5761 cmd.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_SCHED_CMD) | 5762 F_FW_CMD_REQUEST | 5763 F_FW_CMD_WRITE); 5764 cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd)); 5765 5766 cmd.u.config.sc = FW_SCHED_SC_CONFIG; 5767 cmd.u.config.type = type; 5768 cmd.u.config.minmaxen = minmaxen; 5769 5770 return t4_wr_mbox_meat(adapter,adapter->mbox, &cmd, sizeof(cmd), 5771 NULL, sleep_ok); 5772 } 5773 5774 int t4_sched_params(struct adapter *adapter, int type, int level, int mode, 5775 int rateunit, int ratemode, int channel, int cl, 5776 int minrate, int maxrate, int weight, int pktsize, 5777 int sleep_ok) 5778 { 5779 struct fw_sched_cmd cmd; 5780 5781 memset(&cmd, 0, sizeof(cmd)); 5782 cmd.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_SCHED_CMD) | 5783 F_FW_CMD_REQUEST | 5784 F_FW_CMD_WRITE); 5785 cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd)); 5786 5787 cmd.u.params.sc = FW_SCHED_SC_PARAMS; 5788 cmd.u.params.type = type; 5789 cmd.u.params.level = level; 5790 cmd.u.params.mode = mode; 5791 cmd.u.params.ch = channel; 5792 cmd.u.params.cl = cl; 5793 cmd.u.params.unit = rateunit; 5794 cmd.u.params.rate = ratemode; 5795 cmd.u.params.min = cpu_to_be32(minrate); 5796 cmd.u.params.max = cpu_to_be32(maxrate); 5797 cmd.u.params.weight = cpu_to_be16(weight); 5798 cmd.u.params.pktsize = cpu_to_be16(pktsize); 5799 5800 return t4_wr_mbox_meat(adapter,adapter->mbox, &cmd, sizeof(cmd), 5801 NULL, sleep_ok); 5802 } 5803