1 /*- 2 * Copyright (c) 2012 Chelsio Communications, Inc. 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 */ 26 27 #include <sys/cdefs.h> 28 __FBSDID("$FreeBSD$"); 29 30 #include "opt_inet.h" 31 32 #include <sys/param.h> 33 #include <sys/eventhandler.h> 34 35 #include "common.h" 36 #include "t4_regs.h" 37 #include "t4_regs_values.h" 38 #include "firmware/t4fw_interface.h" 39 40 #undef msleep 41 #define msleep(x) do { \ 42 if (cold) \ 43 DELAY((x) * 1000); \ 44 else \ 45 pause("t4hw", (x) * hz / 1000); \ 46 } while (0) 47 48 /** 49 * t4_wait_op_done_val - wait until an operation is completed 50 * @adapter: the adapter performing the operation 51 * @reg: the register to check for completion 52 * @mask: a single-bit field within @reg that indicates completion 53 * @polarity: the value of the field when the operation is completed 54 * @attempts: number of check iterations 55 * @delay: delay in usecs between iterations 56 * @valp: where to store the value of the register at completion time 57 * 58 * Wait until an operation is completed by checking a bit in a register 59 * up to @attempts times. If @valp is not NULL the value of the register 60 * at the time it indicated completion is stored there. Returns 0 if the 61 * operation completes and -EAGAIN otherwise. 62 */ 63 int t4_wait_op_done_val(struct adapter *adapter, int reg, u32 mask, 64 int polarity, int attempts, int delay, u32 *valp) 65 { 66 while (1) { 67 u32 val = t4_read_reg(adapter, reg); 68 69 if (!!(val & mask) == polarity) { 70 if (valp) 71 *valp = val; 72 return 0; 73 } 74 if (--attempts == 0) 75 return -EAGAIN; 76 if (delay) 77 udelay(delay); 78 } 79 } 80 81 /** 82 * t4_set_reg_field - set a register field to a value 83 * @adapter: the adapter to program 84 * @addr: the register address 85 * @mask: specifies the portion of the register to modify 86 * @val: the new value for the register field 87 * 88 * Sets a register field specified by the supplied mask to the 89 * given value. 90 */ 91 void t4_set_reg_field(struct adapter *adapter, unsigned int addr, u32 mask, 92 u32 val) 93 { 94 u32 v = t4_read_reg(adapter, addr) & ~mask; 95 96 t4_write_reg(adapter, addr, v | val); 97 (void) t4_read_reg(adapter, addr); /* flush */ 98 } 99 100 /** 101 * t4_read_indirect - read indirectly addressed registers 102 * @adap: the adapter 103 * @addr_reg: register holding the indirect address 104 * @data_reg: register holding the value of the indirect register 105 * @vals: where the read register values are stored 106 * @nregs: how many indirect registers to read 107 * @start_idx: index of first indirect register to read 108 * 109 * Reads registers that are accessed indirectly through an address/data 110 * register pair. 111 */ 112 void t4_read_indirect(struct adapter *adap, unsigned int addr_reg, 113 unsigned int data_reg, u32 *vals, unsigned int nregs, 114 unsigned int start_idx) 115 { 116 while (nregs--) { 117 t4_write_reg(adap, addr_reg, start_idx); 118 *vals++ = t4_read_reg(adap, data_reg); 119 start_idx++; 120 } 121 } 122 123 /** 124 * t4_write_indirect - write indirectly addressed registers 125 * @adap: the adapter 126 * @addr_reg: register holding the indirect addresses 127 * @data_reg: register holding the value for the indirect registers 128 * @vals: values to write 129 * @nregs: how many indirect registers to write 130 * @start_idx: address of first indirect register to write 131 * 132 * Writes a sequential block of registers that are accessed indirectly 133 * through an address/data register pair. 134 */ 135 void t4_write_indirect(struct adapter *adap, unsigned int addr_reg, 136 unsigned int data_reg, const u32 *vals, 137 unsigned int nregs, unsigned int start_idx) 138 { 139 while (nregs--) { 140 t4_write_reg(adap, addr_reg, start_idx++); 141 t4_write_reg(adap, data_reg, *vals++); 142 } 143 } 144 145 /* 146 * Read a 32-bit PCI Configuration Space register via the PCI-E backdoor 147 * mechanism. This guarantees that we get the real value even if we're 148 * operating within a Virtual Machine and the Hypervisor is trapping our 149 * Configuration Space accesses. 150 */ 151 u32 t4_hw_pci_read_cfg4(adapter_t *adap, int reg) 152 { 153 t4_write_reg(adap, A_PCIE_CFG_SPACE_REQ, 154 F_ENABLE | F_LOCALCFG | V_FUNCTION(adap->pf) | 155 V_REGISTER(reg)); 156 return t4_read_reg(adap, A_PCIE_CFG_SPACE_DATA); 157 } 158 159 /* 160 * t4_report_fw_error - report firmware error 161 * @adap: the adapter 162 * 163 * The adapter firmware can indicate error conditions to the host. 164 * This routine prints out the reason for the firmware error (as 165 * reported by the firmware). 166 */ 167 static void t4_report_fw_error(struct adapter *adap) 168 { 169 static const char *reason[] = { 170 "Crash", /* PCIE_FW_EVAL_CRASH */ 171 "During Device Preparation", /* PCIE_FW_EVAL_PREP */ 172 "During Device Configuration", /* PCIE_FW_EVAL_CONF */ 173 "During Device Initialization", /* PCIE_FW_EVAL_INIT */ 174 "Unexpected Event", /* PCIE_FW_EVAL_UNEXPECTEDEVENT */ 175 "Insufficient Airflow", /* PCIE_FW_EVAL_OVERHEAT */ 176 "Device Shutdown", /* PCIE_FW_EVAL_DEVICESHUTDOWN */ 177 "Reserved", /* reserved */ 178 }; 179 u32 pcie_fw; 180 181 pcie_fw = t4_read_reg(adap, A_PCIE_FW); 182 if (pcie_fw & F_PCIE_FW_ERR) 183 CH_ERR(adap, "Firmware reports adapter error: %s\n", 184 reason[G_PCIE_FW_EVAL(pcie_fw)]); 185 } 186 187 /* 188 * Get the reply to a mailbox command and store it in @rpl in big-endian order. 189 */ 190 static void get_mbox_rpl(struct adapter *adap, __be64 *rpl, int nflit, 191 u32 mbox_addr) 192 { 193 for ( ; nflit; nflit--, mbox_addr += 8) 194 *rpl++ = cpu_to_be64(t4_read_reg64(adap, mbox_addr)); 195 } 196 197 /* 198 * Handle a FW assertion reported in a mailbox. 199 */ 200 static void fw_asrt(struct adapter *adap, u32 mbox_addr) 201 { 202 struct fw_debug_cmd asrt; 203 204 get_mbox_rpl(adap, (__be64 *)&asrt, sizeof(asrt) / 8, mbox_addr); 205 CH_ALERT(adap, "FW assertion at %.16s:%u, val0 %#x, val1 %#x\n", 206 asrt.u.assert.filename_0_7, ntohl(asrt.u.assert.line), 207 ntohl(asrt.u.assert.x), ntohl(asrt.u.assert.y)); 208 } 209 210 #define X_CIM_PF_NOACCESS 0xeeeeeeee 211 /** 212 * t4_wr_mbox_meat - send a command to FW through the given mailbox 213 * @adap: the adapter 214 * @mbox: index of the mailbox to use 215 * @cmd: the command to write 216 * @size: command length in bytes 217 * @rpl: where to optionally store the reply 218 * @sleep_ok: if true we may sleep while awaiting command completion 219 * 220 * Sends the given command to FW through the selected mailbox and waits 221 * for the FW to execute the command. If @rpl is not %NULL it is used to 222 * store the FW's reply to the command. The command and its optional 223 * reply are of the same length. Some FW commands like RESET and 224 * INITIALIZE can take a considerable amount of time to execute. 225 * @sleep_ok determines whether we may sleep while awaiting the response. 226 * If sleeping is allowed we use progressive backoff otherwise we spin. 227 * 228 * The return value is 0 on success or a negative errno on failure. A 229 * failure can happen either because we are not able to execute the 230 * command or FW executes it but signals an error. In the latter case 231 * the return value is the error code indicated by FW (negated). 232 */ 233 int t4_wr_mbox_meat(struct adapter *adap, int mbox, const void *cmd, int size, 234 void *rpl, bool sleep_ok) 235 { 236 /* 237 * We delay in small increments at first in an effort to maintain 238 * responsiveness for simple, fast executing commands but then back 239 * off to larger delays to a maximum retry delay. 240 */ 241 static const int delay[] = { 242 1, 1, 3, 5, 10, 10, 20, 50, 100 243 }; 244 245 u32 v; 246 u64 res; 247 int i, ms, delay_idx; 248 const __be64 *p = cmd; 249 u32 data_reg = PF_REG(mbox, A_CIM_PF_MAILBOX_DATA); 250 u32 ctl_reg = PF_REG(mbox, A_CIM_PF_MAILBOX_CTRL); 251 252 if ((size & 15) || size > MBOX_LEN) 253 return -EINVAL; 254 255 v = G_MBOWNER(t4_read_reg(adap, ctl_reg)); 256 for (i = 0; v == X_MBOWNER_NONE && i < 3; i++) 257 v = G_MBOWNER(t4_read_reg(adap, ctl_reg)); 258 259 if (v != X_MBOWNER_PL) 260 return v ? -EBUSY : -ETIMEDOUT; 261 262 for (i = 0; i < size; i += 8, p++) 263 t4_write_reg64(adap, data_reg + i, be64_to_cpu(*p)); 264 265 t4_write_reg(adap, ctl_reg, F_MBMSGVALID | V_MBOWNER(X_MBOWNER_FW)); 266 t4_read_reg(adap, ctl_reg); /* flush write */ 267 268 delay_idx = 0; 269 ms = delay[0]; 270 271 for (i = 0; i < FW_CMD_MAX_TIMEOUT; i += ms) { 272 if (sleep_ok) { 273 ms = delay[delay_idx]; /* last element may repeat */ 274 if (delay_idx < ARRAY_SIZE(delay) - 1) 275 delay_idx++; 276 msleep(ms); 277 } else 278 mdelay(ms); 279 280 v = t4_read_reg(adap, ctl_reg); 281 if (v == X_CIM_PF_NOACCESS) 282 continue; 283 if (G_MBOWNER(v) == X_MBOWNER_PL) { 284 if (!(v & F_MBMSGVALID)) { 285 t4_write_reg(adap, ctl_reg, 286 V_MBOWNER(X_MBOWNER_NONE)); 287 continue; 288 } 289 290 res = t4_read_reg64(adap, data_reg); 291 if (G_FW_CMD_OP(res >> 32) == FW_DEBUG_CMD) { 292 fw_asrt(adap, data_reg); 293 res = V_FW_CMD_RETVAL(EIO); 294 } else if (rpl) 295 get_mbox_rpl(adap, rpl, size / 8, data_reg); 296 t4_write_reg(adap, ctl_reg, V_MBOWNER(X_MBOWNER_NONE)); 297 return -G_FW_CMD_RETVAL((int)res); 298 } 299 } 300 301 /* 302 * We timed out waiting for a reply to our mailbox command. Report 303 * the error and also check to see if the firmware reported any 304 * errors ... 305 */ 306 CH_ERR(adap, "command %#x in mailbox %d timed out\n", 307 *(const u8 *)cmd, mbox); 308 if (t4_read_reg(adap, A_PCIE_FW) & F_PCIE_FW_ERR) 309 t4_report_fw_error(adap); 310 return -ETIMEDOUT; 311 } 312 313 /** 314 * t4_mc_read - read from MC through backdoor accesses 315 * @adap: the adapter 316 * @idx: which MC to access 317 * @addr: address of first byte requested 318 * @data: 64 bytes of data containing the requested address 319 * @ecc: where to store the corresponding 64-bit ECC word 320 * 321 * Read 64 bytes of data from MC starting at a 64-byte-aligned address 322 * that covers the requested address @addr. If @parity is not %NULL it 323 * is assigned the 64-bit ECC word for the read data. 324 */ 325 int t4_mc_read(struct adapter *adap, int idx, u32 addr, __be32 *data, u64 *ecc) 326 { 327 int i; 328 u32 mc_bist_cmd_reg, mc_bist_cmd_addr_reg, mc_bist_cmd_len_reg; 329 u32 mc_bist_status_rdata_reg, mc_bist_data_pattern_reg; 330 331 if (is_t4(adap)) { 332 mc_bist_cmd_reg = A_MC_BIST_CMD; 333 mc_bist_cmd_addr_reg = A_MC_BIST_CMD_ADDR; 334 mc_bist_cmd_len_reg = A_MC_BIST_CMD_LEN; 335 mc_bist_status_rdata_reg = A_MC_BIST_STATUS_RDATA; 336 mc_bist_data_pattern_reg = A_MC_BIST_DATA_PATTERN; 337 } else { 338 mc_bist_cmd_reg = MC_REG(A_MC_P_BIST_CMD, idx); 339 mc_bist_cmd_addr_reg = MC_REG(A_MC_P_BIST_CMD_ADDR, idx); 340 mc_bist_cmd_len_reg = MC_REG(A_MC_P_BIST_CMD_LEN, idx); 341 mc_bist_status_rdata_reg = MC_REG(A_MC_P_BIST_STATUS_RDATA, 342 idx); 343 mc_bist_data_pattern_reg = MC_REG(A_MC_P_BIST_DATA_PATTERN, 344 idx); 345 } 346 347 if (t4_read_reg(adap, mc_bist_cmd_reg) & F_START_BIST) 348 return -EBUSY; 349 t4_write_reg(adap, mc_bist_cmd_addr_reg, addr & ~0x3fU); 350 t4_write_reg(adap, mc_bist_cmd_len_reg, 64); 351 t4_write_reg(adap, mc_bist_data_pattern_reg, 0xc); 352 t4_write_reg(adap, mc_bist_cmd_reg, V_BIST_OPCODE(1) | 353 F_START_BIST | V_BIST_CMD_GAP(1)); 354 i = t4_wait_op_done(adap, mc_bist_cmd_reg, F_START_BIST, 0, 10, 1); 355 if (i) 356 return i; 357 358 #define MC_DATA(i) MC_BIST_STATUS_REG(mc_bist_status_rdata_reg, i) 359 360 for (i = 15; i >= 0; i--) 361 *data++ = ntohl(t4_read_reg(adap, MC_DATA(i))); 362 if (ecc) 363 *ecc = t4_read_reg64(adap, MC_DATA(16)); 364 #undef MC_DATA 365 return 0; 366 } 367 368 /** 369 * t4_edc_read - read from EDC through backdoor accesses 370 * @adap: the adapter 371 * @idx: which EDC to access 372 * @addr: address of first byte requested 373 * @data: 64 bytes of data containing the requested address 374 * @ecc: where to store the corresponding 64-bit ECC word 375 * 376 * Read 64 bytes of data from EDC starting at a 64-byte-aligned address 377 * that covers the requested address @addr. If @parity is not %NULL it 378 * is assigned the 64-bit ECC word for the read data. 379 */ 380 int t4_edc_read(struct adapter *adap, int idx, u32 addr, __be32 *data, u64 *ecc) 381 { 382 int i; 383 u32 edc_bist_cmd_reg, edc_bist_cmd_addr_reg, edc_bist_cmd_len_reg; 384 u32 edc_bist_cmd_data_pattern, edc_bist_status_rdata_reg; 385 386 if (is_t4(adap)) { 387 edc_bist_cmd_reg = EDC_REG(A_EDC_BIST_CMD, idx); 388 edc_bist_cmd_addr_reg = EDC_REG(A_EDC_BIST_CMD_ADDR, idx); 389 edc_bist_cmd_len_reg = EDC_REG(A_EDC_BIST_CMD_LEN, idx); 390 edc_bist_cmd_data_pattern = EDC_REG(A_EDC_BIST_DATA_PATTERN, 391 idx); 392 edc_bist_status_rdata_reg = EDC_REG(A_EDC_BIST_STATUS_RDATA, 393 idx); 394 } else { 395 /* 396 * These macro are missing in t4_regs.h file. 397 * Added temporarily for testing. 398 */ 399 #define EDC_STRIDE_T5 (EDC_T51_BASE_ADDR - EDC_T50_BASE_ADDR) 400 #define EDC_REG_T5(reg, idx) (reg + EDC_STRIDE_T5 * idx) 401 edc_bist_cmd_reg = EDC_REG_T5(A_EDC_H_BIST_CMD, idx); 402 edc_bist_cmd_addr_reg = EDC_REG_T5(A_EDC_H_BIST_CMD_ADDR, idx); 403 edc_bist_cmd_len_reg = EDC_REG_T5(A_EDC_H_BIST_CMD_LEN, idx); 404 edc_bist_cmd_data_pattern = EDC_REG_T5(A_EDC_H_BIST_DATA_PATTERN, 405 idx); 406 edc_bist_status_rdata_reg = EDC_REG_T5(A_EDC_H_BIST_STATUS_RDATA, 407 idx); 408 #undef EDC_REG_T5 409 #undef EDC_STRIDE_T5 410 } 411 412 if (t4_read_reg(adap, edc_bist_cmd_reg) & F_START_BIST) 413 return -EBUSY; 414 t4_write_reg(adap, edc_bist_cmd_addr_reg, addr & ~0x3fU); 415 t4_write_reg(adap, edc_bist_cmd_len_reg, 64); 416 t4_write_reg(adap, edc_bist_cmd_data_pattern, 0xc); 417 t4_write_reg(adap, edc_bist_cmd_reg, 418 V_BIST_OPCODE(1) | V_BIST_CMD_GAP(1) | F_START_BIST); 419 i = t4_wait_op_done(adap, edc_bist_cmd_reg, F_START_BIST, 0, 10, 1); 420 if (i) 421 return i; 422 423 #define EDC_DATA(i) EDC_BIST_STATUS_REG(edc_bist_status_rdata_reg, i) 424 425 for (i = 15; i >= 0; i--) 426 *data++ = ntohl(t4_read_reg(adap, EDC_DATA(i))); 427 if (ecc) 428 *ecc = t4_read_reg64(adap, EDC_DATA(16)); 429 #undef EDC_DATA 430 return 0; 431 } 432 433 /** 434 * t4_mem_read - read EDC 0, EDC 1 or MC into buffer 435 * @adap: the adapter 436 * @mtype: memory type: MEM_EDC0, MEM_EDC1 or MEM_MC 437 * @addr: address within indicated memory type 438 * @len: amount of memory to read 439 * @buf: host memory buffer 440 * 441 * Reads an [almost] arbitrary memory region in the firmware: the 442 * firmware memory address, length and host buffer must be aligned on 443 * 32-bit boudaries. The memory is returned as a raw byte sequence from 444 * the firmware's memory. If this memory contains data structures which 445 * contain multi-byte integers, it's the callers responsibility to 446 * perform appropriate byte order conversions. 447 */ 448 int t4_mem_read(struct adapter *adap, int mtype, u32 addr, u32 len, 449 __be32 *buf) 450 { 451 u32 pos, start, end, offset; 452 int ret; 453 454 /* 455 * Argument sanity checks ... 456 */ 457 if ((addr & 0x3) || (len & 0x3)) 458 return -EINVAL; 459 460 /* 461 * The underlaying EDC/MC read routines read 64 bytes at a time so we 462 * need to round down the start and round up the end. We'll start 463 * copying out of the first line at (addr - start) a word at a time. 464 */ 465 start = addr & ~(64-1); 466 end = (addr + len + 64-1) & ~(64-1); 467 offset = (addr - start)/sizeof(__be32); 468 469 for (pos = start; pos < end; pos += 64, offset = 0) { 470 __be32 data[16]; 471 472 /* 473 * Read the chip's memory block and bail if there's an error. 474 */ 475 if ((mtype == MEM_MC) || (mtype == MEM_MC1)) 476 ret = t4_mc_read(adap, mtype - MEM_MC, pos, data, NULL); 477 else 478 ret = t4_edc_read(adap, mtype, pos, data, NULL); 479 if (ret) 480 return ret; 481 482 /* 483 * Copy the data into the caller's memory buffer. 484 */ 485 while (offset < 16 && len > 0) { 486 *buf++ = data[offset++]; 487 len -= sizeof(__be32); 488 } 489 } 490 491 return 0; 492 } 493 494 /* 495 * Partial EEPROM Vital Product Data structure. Includes only the ID and 496 * VPD-R header. 497 */ 498 struct t4_vpd_hdr { 499 u8 id_tag; 500 u8 id_len[2]; 501 u8 id_data[ID_LEN]; 502 u8 vpdr_tag; 503 u8 vpdr_len[2]; 504 }; 505 506 /* 507 * EEPROM reads take a few tens of us while writes can take a bit over 5 ms. 508 */ 509 #define EEPROM_MAX_RD_POLL 40 510 #define EEPROM_MAX_WR_POLL 6 511 #define EEPROM_STAT_ADDR 0x7bfc 512 #define VPD_BASE 0x400 513 #define VPD_BASE_OLD 0 514 #define VPD_LEN 1024 515 #define VPD_INFO_FLD_HDR_SIZE 3 516 #define CHELSIO_VPD_UNIQUE_ID 0x82 517 518 /** 519 * t4_seeprom_read - read a serial EEPROM location 520 * @adapter: adapter to read 521 * @addr: EEPROM virtual address 522 * @data: where to store the read data 523 * 524 * Read a 32-bit word from a location in serial EEPROM using the card's PCI 525 * VPD capability. Note that this function must be called with a virtual 526 * address. 527 */ 528 int t4_seeprom_read(struct adapter *adapter, u32 addr, u32 *data) 529 { 530 u16 val; 531 int attempts = EEPROM_MAX_RD_POLL; 532 unsigned int base = adapter->params.pci.vpd_cap_addr; 533 534 if (addr >= EEPROMVSIZE || (addr & 3)) 535 return -EINVAL; 536 537 t4_os_pci_write_cfg2(adapter, base + PCI_VPD_ADDR, (u16)addr); 538 do { 539 udelay(10); 540 t4_os_pci_read_cfg2(adapter, base + PCI_VPD_ADDR, &val); 541 } while (!(val & PCI_VPD_ADDR_F) && --attempts); 542 543 if (!(val & PCI_VPD_ADDR_F)) { 544 CH_ERR(adapter, "reading EEPROM address 0x%x failed\n", addr); 545 return -EIO; 546 } 547 t4_os_pci_read_cfg4(adapter, base + PCI_VPD_DATA, data); 548 *data = le32_to_cpu(*data); 549 return 0; 550 } 551 552 /** 553 * t4_seeprom_write - write a serial EEPROM location 554 * @adapter: adapter to write 555 * @addr: virtual EEPROM address 556 * @data: value to write 557 * 558 * Write a 32-bit word to a location in serial EEPROM using the card's PCI 559 * VPD capability. Note that this function must be called with a virtual 560 * address. 561 */ 562 int t4_seeprom_write(struct adapter *adapter, u32 addr, u32 data) 563 { 564 u16 val; 565 int attempts = EEPROM_MAX_WR_POLL; 566 unsigned int base = adapter->params.pci.vpd_cap_addr; 567 568 if (addr >= EEPROMVSIZE || (addr & 3)) 569 return -EINVAL; 570 571 t4_os_pci_write_cfg4(adapter, base + PCI_VPD_DATA, 572 cpu_to_le32(data)); 573 t4_os_pci_write_cfg2(adapter, base + PCI_VPD_ADDR, 574 (u16)addr | PCI_VPD_ADDR_F); 575 do { 576 msleep(1); 577 t4_os_pci_read_cfg2(adapter, base + PCI_VPD_ADDR, &val); 578 } while ((val & PCI_VPD_ADDR_F) && --attempts); 579 580 if (val & PCI_VPD_ADDR_F) { 581 CH_ERR(adapter, "write to EEPROM address 0x%x failed\n", addr); 582 return -EIO; 583 } 584 return 0; 585 } 586 587 /** 588 * t4_eeprom_ptov - translate a physical EEPROM address to virtual 589 * @phys_addr: the physical EEPROM address 590 * @fn: the PCI function number 591 * @sz: size of function-specific area 592 * 593 * Translate a physical EEPROM address to virtual. The first 1K is 594 * accessed through virtual addresses starting at 31K, the rest is 595 * accessed through virtual addresses starting at 0. 596 * 597 * The mapping is as follows: 598 * [0..1K) -> [31K..32K) 599 * [1K..1K+A) -> [ES-A..ES) 600 * [1K+A..ES) -> [0..ES-A-1K) 601 * 602 * where A = @fn * @sz, and ES = EEPROM size. 603 */ 604 int t4_eeprom_ptov(unsigned int phys_addr, unsigned int fn, unsigned int sz) 605 { 606 fn *= sz; 607 if (phys_addr < 1024) 608 return phys_addr + (31 << 10); 609 if (phys_addr < 1024 + fn) 610 return EEPROMSIZE - fn + phys_addr - 1024; 611 if (phys_addr < EEPROMSIZE) 612 return phys_addr - 1024 - fn; 613 return -EINVAL; 614 } 615 616 /** 617 * t4_seeprom_wp - enable/disable EEPROM write protection 618 * @adapter: the adapter 619 * @enable: whether to enable or disable write protection 620 * 621 * Enables or disables write protection on the serial EEPROM. 622 */ 623 int t4_seeprom_wp(struct adapter *adapter, int enable) 624 { 625 return t4_seeprom_write(adapter, EEPROM_STAT_ADDR, enable ? 0xc : 0); 626 } 627 628 /** 629 * get_vpd_keyword_val - Locates an information field keyword in the VPD 630 * @v: Pointer to buffered vpd data structure 631 * @kw: The keyword to search for 632 * 633 * Returns the value of the information field keyword or 634 * -ENOENT otherwise. 635 */ 636 static int get_vpd_keyword_val(const struct t4_vpd_hdr *v, const char *kw) 637 { 638 int i; 639 unsigned int offset , len; 640 const u8 *buf = &v->id_tag; 641 const u8 *vpdr_len = &v->vpdr_tag; 642 offset = sizeof(struct t4_vpd_hdr); 643 len = (u16)vpdr_len[1] + ((u16)vpdr_len[2] << 8); 644 645 if (len + sizeof(struct t4_vpd_hdr) > VPD_LEN) { 646 return -ENOENT; 647 } 648 649 for (i = offset; i + VPD_INFO_FLD_HDR_SIZE <= offset + len;) { 650 if(memcmp(buf + i , kw , 2) == 0){ 651 i += VPD_INFO_FLD_HDR_SIZE; 652 return i; 653 } 654 655 i += VPD_INFO_FLD_HDR_SIZE + buf[i+2]; 656 } 657 658 return -ENOENT; 659 } 660 661 662 /** 663 * get_vpd_params - read VPD parameters from VPD EEPROM 664 * @adapter: adapter to read 665 * @p: where to store the parameters 666 * 667 * Reads card parameters stored in VPD EEPROM. 668 */ 669 static int get_vpd_params(struct adapter *adapter, struct vpd_params *p) 670 { 671 int i, ret, addr; 672 int ec, sn, pn, na; 673 u8 vpd[VPD_LEN], csum; 674 const struct t4_vpd_hdr *v; 675 676 /* 677 * Card information normally starts at VPD_BASE but early cards had 678 * it at 0. 679 */ 680 ret = t4_seeprom_read(adapter, VPD_BASE, (u32 *)(vpd)); 681 addr = *vpd == CHELSIO_VPD_UNIQUE_ID ? VPD_BASE : VPD_BASE_OLD; 682 683 for (i = 0; i < sizeof(vpd); i += 4) { 684 ret = t4_seeprom_read(adapter, addr + i, (u32 *)(vpd + i)); 685 if (ret) 686 return ret; 687 } 688 v = (const struct t4_vpd_hdr *)vpd; 689 690 #define FIND_VPD_KW(var,name) do { \ 691 var = get_vpd_keyword_val(v , name); \ 692 if (var < 0) { \ 693 CH_ERR(adapter, "missing VPD keyword " name "\n"); \ 694 return -EINVAL; \ 695 } \ 696 } while (0) 697 698 FIND_VPD_KW(i, "RV"); 699 for (csum = 0; i >= 0; i--) 700 csum += vpd[i]; 701 702 if (csum) { 703 CH_ERR(adapter, "corrupted VPD EEPROM, actual csum %u\n", csum); 704 return -EINVAL; 705 } 706 FIND_VPD_KW(ec, "EC"); 707 FIND_VPD_KW(sn, "SN"); 708 FIND_VPD_KW(pn, "PN"); 709 FIND_VPD_KW(na, "NA"); 710 #undef FIND_VPD_KW 711 712 memcpy(p->id, v->id_data, ID_LEN); 713 strstrip(p->id); 714 memcpy(p->ec, vpd + ec, EC_LEN); 715 strstrip(p->ec); 716 i = vpd[sn - VPD_INFO_FLD_HDR_SIZE + 2]; 717 memcpy(p->sn, vpd + sn, min(i, SERNUM_LEN)); 718 strstrip(p->sn); 719 i = vpd[pn - VPD_INFO_FLD_HDR_SIZE + 2]; 720 memcpy(p->pn, vpd + pn, min(i, PN_LEN)); 721 strstrip((char *)p->pn); 722 i = vpd[na - VPD_INFO_FLD_HDR_SIZE + 2]; 723 memcpy(p->na, vpd + na, min(i, MACADDR_LEN)); 724 strstrip((char *)p->na); 725 726 return 0; 727 } 728 729 /* serial flash and firmware constants and flash config file constants */ 730 enum { 731 SF_ATTEMPTS = 10, /* max retries for SF operations */ 732 733 /* flash command opcodes */ 734 SF_PROG_PAGE = 2, /* program page */ 735 SF_WR_DISABLE = 4, /* disable writes */ 736 SF_RD_STATUS = 5, /* read status register */ 737 SF_WR_ENABLE = 6, /* enable writes */ 738 SF_RD_DATA_FAST = 0xb, /* read flash */ 739 SF_RD_ID = 0x9f, /* read ID */ 740 SF_ERASE_SECTOR = 0xd8, /* erase sector */ 741 }; 742 743 /** 744 * sf1_read - read data from the serial flash 745 * @adapter: the adapter 746 * @byte_cnt: number of bytes to read 747 * @cont: whether another operation will be chained 748 * @lock: whether to lock SF for PL access only 749 * @valp: where to store the read data 750 * 751 * Reads up to 4 bytes of data from the serial flash. The location of 752 * the read needs to be specified prior to calling this by issuing the 753 * appropriate commands to the serial flash. 754 */ 755 static int sf1_read(struct adapter *adapter, unsigned int byte_cnt, int cont, 756 int lock, u32 *valp) 757 { 758 int ret; 759 760 if (!byte_cnt || byte_cnt > 4) 761 return -EINVAL; 762 if (t4_read_reg(adapter, A_SF_OP) & F_BUSY) 763 return -EBUSY; 764 t4_write_reg(adapter, A_SF_OP, 765 V_SF_LOCK(lock) | V_CONT(cont) | V_BYTECNT(byte_cnt - 1)); 766 ret = t4_wait_op_done(adapter, A_SF_OP, F_BUSY, 0, SF_ATTEMPTS, 5); 767 if (!ret) 768 *valp = t4_read_reg(adapter, A_SF_DATA); 769 return ret; 770 } 771 772 /** 773 * sf1_write - write data to the serial flash 774 * @adapter: the adapter 775 * @byte_cnt: number of bytes to write 776 * @cont: whether another operation will be chained 777 * @lock: whether to lock SF for PL access only 778 * @val: value to write 779 * 780 * Writes up to 4 bytes of data to the serial flash. The location of 781 * the write needs to be specified prior to calling this by issuing the 782 * appropriate commands to the serial flash. 783 */ 784 static int sf1_write(struct adapter *adapter, unsigned int byte_cnt, int cont, 785 int lock, u32 val) 786 { 787 if (!byte_cnt || byte_cnt > 4) 788 return -EINVAL; 789 if (t4_read_reg(adapter, A_SF_OP) & F_BUSY) 790 return -EBUSY; 791 t4_write_reg(adapter, A_SF_DATA, val); 792 t4_write_reg(adapter, A_SF_OP, V_SF_LOCK(lock) | 793 V_CONT(cont) | V_BYTECNT(byte_cnt - 1) | V_OP(1)); 794 return t4_wait_op_done(adapter, A_SF_OP, F_BUSY, 0, SF_ATTEMPTS, 5); 795 } 796 797 /** 798 * flash_wait_op - wait for a flash operation to complete 799 * @adapter: the adapter 800 * @attempts: max number of polls of the status register 801 * @delay: delay between polls in ms 802 * 803 * Wait for a flash operation to complete by polling the status register. 804 */ 805 static int flash_wait_op(struct adapter *adapter, int attempts, int delay) 806 { 807 int ret; 808 u32 status; 809 810 while (1) { 811 if ((ret = sf1_write(adapter, 1, 1, 1, SF_RD_STATUS)) != 0 || 812 (ret = sf1_read(adapter, 1, 0, 1, &status)) != 0) 813 return ret; 814 if (!(status & 1)) 815 return 0; 816 if (--attempts == 0) 817 return -EAGAIN; 818 if (delay) 819 msleep(delay); 820 } 821 } 822 823 /** 824 * t4_read_flash - read words from serial flash 825 * @adapter: the adapter 826 * @addr: the start address for the read 827 * @nwords: how many 32-bit words to read 828 * @data: where to store the read data 829 * @byte_oriented: whether to store data as bytes or as words 830 * 831 * Read the specified number of 32-bit words from the serial flash. 832 * If @byte_oriented is set the read data is stored as a byte array 833 * (i.e., big-endian), otherwise as 32-bit words in the platform's 834 * natural endianess. 835 */ 836 int t4_read_flash(struct adapter *adapter, unsigned int addr, 837 unsigned int nwords, u32 *data, int byte_oriented) 838 { 839 int ret; 840 841 if (addr + nwords * sizeof(u32) > adapter->params.sf_size || (addr & 3)) 842 return -EINVAL; 843 844 addr = swab32(addr) | SF_RD_DATA_FAST; 845 846 if ((ret = sf1_write(adapter, 4, 1, 0, addr)) != 0 || 847 (ret = sf1_read(adapter, 1, 1, 0, data)) != 0) 848 return ret; 849 850 for ( ; nwords; nwords--, data++) { 851 ret = sf1_read(adapter, 4, nwords > 1, nwords == 1, data); 852 if (nwords == 1) 853 t4_write_reg(adapter, A_SF_OP, 0); /* unlock SF */ 854 if (ret) 855 return ret; 856 if (byte_oriented) 857 *data = htonl(*data); 858 } 859 return 0; 860 } 861 862 /** 863 * t4_write_flash - write up to a page of data to the serial flash 864 * @adapter: the adapter 865 * @addr: the start address to write 866 * @n: length of data to write in bytes 867 * @data: the data to write 868 * @byte_oriented: whether to store data as bytes or as words 869 * 870 * Writes up to a page of data (256 bytes) to the serial flash starting 871 * at the given address. All the data must be written to the same page. 872 * If @byte_oriented is set the write data is stored as byte stream 873 * (i.e. matches what on disk), otherwise in big-endian. 874 */ 875 static int t4_write_flash(struct adapter *adapter, unsigned int addr, 876 unsigned int n, const u8 *data, int byte_oriented) 877 { 878 int ret; 879 u32 buf[SF_PAGE_SIZE / 4]; 880 unsigned int i, c, left, val, offset = addr & 0xff; 881 882 if (addr >= adapter->params.sf_size || offset + n > SF_PAGE_SIZE) 883 return -EINVAL; 884 885 val = swab32(addr) | SF_PROG_PAGE; 886 887 if ((ret = sf1_write(adapter, 1, 0, 1, SF_WR_ENABLE)) != 0 || 888 (ret = sf1_write(adapter, 4, 1, 1, val)) != 0) 889 goto unlock; 890 891 for (left = n; left; left -= c) { 892 c = min(left, 4U); 893 for (val = 0, i = 0; i < c; ++i) 894 val = (val << 8) + *data++; 895 896 if (!byte_oriented) 897 val = htonl(val); 898 899 ret = sf1_write(adapter, c, c != left, 1, val); 900 if (ret) 901 goto unlock; 902 } 903 ret = flash_wait_op(adapter, 8, 1); 904 if (ret) 905 goto unlock; 906 907 t4_write_reg(adapter, A_SF_OP, 0); /* unlock SF */ 908 909 /* Read the page to verify the write succeeded */ 910 ret = t4_read_flash(adapter, addr & ~0xff, ARRAY_SIZE(buf), buf, 911 byte_oriented); 912 if (ret) 913 return ret; 914 915 if (memcmp(data - n, (u8 *)buf + offset, n)) { 916 CH_ERR(adapter, "failed to correctly write the flash page " 917 "at %#x\n", addr); 918 return -EIO; 919 } 920 return 0; 921 922 unlock: 923 t4_write_reg(adapter, A_SF_OP, 0); /* unlock SF */ 924 return ret; 925 } 926 927 /** 928 * t4_get_fw_version - read the firmware version 929 * @adapter: the adapter 930 * @vers: where to place the version 931 * 932 * Reads the FW version from flash. 933 */ 934 int t4_get_fw_version(struct adapter *adapter, u32 *vers) 935 { 936 return t4_read_flash(adapter, 937 FLASH_FW_START + offsetof(struct fw_hdr, fw_ver), 1, 938 vers, 0); 939 } 940 941 /** 942 * t4_get_tp_version - read the TP microcode version 943 * @adapter: the adapter 944 * @vers: where to place the version 945 * 946 * Reads the TP microcode version from flash. 947 */ 948 int t4_get_tp_version(struct adapter *adapter, u32 *vers) 949 { 950 return t4_read_flash(adapter, FLASH_FW_START + offsetof(struct fw_hdr, 951 tp_microcode_ver), 952 1, vers, 0); 953 } 954 955 /** 956 * t4_check_fw_version - check if the FW is compatible with this driver 957 * @adapter: the adapter 958 * 959 * Checks if an adapter's FW is compatible with the driver. Returns 0 960 * if there's exact match, a negative error if the version could not be 961 * read or there's a major version mismatch, and a positive value if the 962 * expected major version is found but there's a minor version mismatch. 963 */ 964 int t4_check_fw_version(struct adapter *adapter) 965 { 966 int ret, major, minor, micro; 967 int exp_major, exp_minor, exp_micro; 968 969 ret = t4_get_fw_version(adapter, &adapter->params.fw_vers); 970 if (!ret) 971 ret = t4_get_tp_version(adapter, &adapter->params.tp_vers); 972 if (ret) 973 return ret; 974 975 major = G_FW_HDR_FW_VER_MAJOR(adapter->params.fw_vers); 976 minor = G_FW_HDR_FW_VER_MINOR(adapter->params.fw_vers); 977 micro = G_FW_HDR_FW_VER_MICRO(adapter->params.fw_vers); 978 979 switch (chip_id(adapter)) { 980 case CHELSIO_T4: 981 exp_major = T4FW_VERSION_MAJOR; 982 exp_minor = T4FW_VERSION_MINOR; 983 exp_micro = T4FW_VERSION_MICRO; 984 break; 985 case CHELSIO_T5: 986 exp_major = T5FW_VERSION_MAJOR; 987 exp_minor = T5FW_VERSION_MINOR; 988 exp_micro = T5FW_VERSION_MICRO; 989 break; 990 default: 991 CH_ERR(adapter, "Unsupported chip type, %x\n", 992 chip_id(adapter)); 993 return -EINVAL; 994 } 995 996 if (major != exp_major) { /* major mismatch - fail */ 997 CH_ERR(adapter, "card FW has major version %u, driver wants " 998 "%u\n", major, exp_major); 999 return -EINVAL; 1000 } 1001 1002 if (minor == exp_minor && micro == exp_micro) 1003 return 0; /* perfect match */ 1004 1005 /* Minor/micro version mismatch. Report it but often it's OK. */ 1006 return 1; 1007 } 1008 1009 /** 1010 * t4_flash_erase_sectors - erase a range of flash sectors 1011 * @adapter: the adapter 1012 * @start: the first sector to erase 1013 * @end: the last sector to erase 1014 * 1015 * Erases the sectors in the given inclusive range. 1016 */ 1017 static int t4_flash_erase_sectors(struct adapter *adapter, int start, int end) 1018 { 1019 int ret = 0; 1020 1021 while (start <= end) { 1022 if ((ret = sf1_write(adapter, 1, 0, 1, SF_WR_ENABLE)) != 0 || 1023 (ret = sf1_write(adapter, 4, 0, 1, 1024 SF_ERASE_SECTOR | (start << 8))) != 0 || 1025 (ret = flash_wait_op(adapter, 14, 500)) != 0) { 1026 CH_ERR(adapter, "erase of flash sector %d failed, " 1027 "error %d\n", start, ret); 1028 break; 1029 } 1030 start++; 1031 } 1032 t4_write_reg(adapter, A_SF_OP, 0); /* unlock SF */ 1033 return ret; 1034 } 1035 1036 /** 1037 * t4_flash_cfg_addr - return the address of the flash configuration file 1038 * @adapter: the adapter 1039 * 1040 * Return the address within the flash where the Firmware Configuration 1041 * File is stored, or an error if the device FLASH is too small to contain 1042 * a Firmware Configuration File. 1043 */ 1044 int t4_flash_cfg_addr(struct adapter *adapter) 1045 { 1046 /* 1047 * If the device FLASH isn't large enough to hold a Firmware 1048 * Configuration File, return an error. 1049 */ 1050 if (adapter->params.sf_size < FLASH_CFG_START + FLASH_CFG_MAX_SIZE) 1051 return -ENOSPC; 1052 1053 return FLASH_CFG_START; 1054 } 1055 1056 /** 1057 * t4_load_cfg - download config file 1058 * @adap: the adapter 1059 * @cfg_data: the cfg text file to write 1060 * @size: text file size 1061 * 1062 * Write the supplied config text file to the card's serial flash. 1063 */ 1064 int t4_load_cfg(struct adapter *adap, const u8 *cfg_data, unsigned int size) 1065 { 1066 int ret, i, n, cfg_addr; 1067 unsigned int addr; 1068 unsigned int flash_cfg_start_sec; 1069 unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec; 1070 1071 cfg_addr = t4_flash_cfg_addr(adap); 1072 if (cfg_addr < 0) 1073 return cfg_addr; 1074 1075 addr = cfg_addr; 1076 flash_cfg_start_sec = addr / SF_SEC_SIZE; 1077 1078 if (size > FLASH_CFG_MAX_SIZE) { 1079 CH_ERR(adap, "cfg file too large, max is %u bytes\n", 1080 FLASH_CFG_MAX_SIZE); 1081 return -EFBIG; 1082 } 1083 1084 i = DIV_ROUND_UP(FLASH_CFG_MAX_SIZE, /* # of sectors spanned */ 1085 sf_sec_size); 1086 ret = t4_flash_erase_sectors(adap, flash_cfg_start_sec, 1087 flash_cfg_start_sec + i - 1); 1088 /* 1089 * If size == 0 then we're simply erasing the FLASH sectors associated 1090 * with the on-adapter Firmware Configuration File. 1091 */ 1092 if (ret || size == 0) 1093 goto out; 1094 1095 /* this will write to the flash up to SF_PAGE_SIZE at a time */ 1096 for (i = 0; i< size; i+= SF_PAGE_SIZE) { 1097 if ( (size - i) < SF_PAGE_SIZE) 1098 n = size - i; 1099 else 1100 n = SF_PAGE_SIZE; 1101 ret = t4_write_flash(adap, addr, n, cfg_data, 1); 1102 if (ret) 1103 goto out; 1104 1105 addr += SF_PAGE_SIZE; 1106 cfg_data += SF_PAGE_SIZE; 1107 } 1108 1109 out: 1110 if (ret) 1111 CH_ERR(adap, "config file %s failed %d\n", 1112 (size == 0 ? "clear" : "download"), ret); 1113 return ret; 1114 } 1115 1116 1117 /** 1118 * t4_load_fw - download firmware 1119 * @adap: the adapter 1120 * @fw_data: the firmware image to write 1121 * @size: image size 1122 * 1123 * Write the supplied firmware image to the card's serial flash. 1124 */ 1125 int t4_load_fw(struct adapter *adap, const u8 *fw_data, unsigned int size) 1126 { 1127 u32 csum; 1128 int ret, addr; 1129 unsigned int i; 1130 u8 first_page[SF_PAGE_SIZE]; 1131 const u32 *p = (const u32 *)fw_data; 1132 const struct fw_hdr *hdr = (const struct fw_hdr *)fw_data; 1133 unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec; 1134 unsigned int fw_start_sec; 1135 unsigned int fw_start; 1136 unsigned int fw_size; 1137 1138 if (ntohl(hdr->magic) == FW_HDR_MAGIC_BOOTSTRAP) { 1139 fw_start_sec = FLASH_FWBOOTSTRAP_START_SEC; 1140 fw_start = FLASH_FWBOOTSTRAP_START; 1141 fw_size = FLASH_FWBOOTSTRAP_MAX_SIZE; 1142 } else { 1143 fw_start_sec = FLASH_FW_START_SEC; 1144 fw_start = FLASH_FW_START; 1145 fw_size = FLASH_FW_MAX_SIZE; 1146 } 1147 if (!size) { 1148 CH_ERR(adap, "FW image has no data\n"); 1149 return -EINVAL; 1150 } 1151 if (size & 511) { 1152 CH_ERR(adap, "FW image size not multiple of 512 bytes\n"); 1153 return -EINVAL; 1154 } 1155 if (ntohs(hdr->len512) * 512 != size) { 1156 CH_ERR(adap, "FW image size differs from size in FW header\n"); 1157 return -EINVAL; 1158 } 1159 if (size > fw_size) { 1160 CH_ERR(adap, "FW image too large, max is %u bytes\n", fw_size); 1161 return -EFBIG; 1162 } 1163 if ((is_t4(adap) && hdr->chip != FW_HDR_CHIP_T4) || 1164 (is_t5(adap) && hdr->chip != FW_HDR_CHIP_T5)) { 1165 CH_ERR(adap, 1166 "FW image (%d) is not suitable for this adapter (%d)\n", 1167 hdr->chip, chip_id(adap)); 1168 return -EINVAL; 1169 } 1170 1171 for (csum = 0, i = 0; i < size / sizeof(csum); i++) 1172 csum += ntohl(p[i]); 1173 1174 if (csum != 0xffffffff) { 1175 CH_ERR(adap, "corrupted firmware image, checksum %#x\n", 1176 csum); 1177 return -EINVAL; 1178 } 1179 1180 i = DIV_ROUND_UP(size, sf_sec_size); /* # of sectors spanned */ 1181 ret = t4_flash_erase_sectors(adap, fw_start_sec, fw_start_sec + i - 1); 1182 if (ret) 1183 goto out; 1184 1185 /* 1186 * We write the correct version at the end so the driver can see a bad 1187 * version if the FW write fails. Start by writing a copy of the 1188 * first page with a bad version. 1189 */ 1190 memcpy(first_page, fw_data, SF_PAGE_SIZE); 1191 ((struct fw_hdr *)first_page)->fw_ver = htonl(0xffffffff); 1192 ret = t4_write_flash(adap, fw_start, SF_PAGE_SIZE, first_page, 1); 1193 if (ret) 1194 goto out; 1195 1196 addr = fw_start; 1197 for (size -= SF_PAGE_SIZE; size; size -= SF_PAGE_SIZE) { 1198 addr += SF_PAGE_SIZE; 1199 fw_data += SF_PAGE_SIZE; 1200 ret = t4_write_flash(adap, addr, SF_PAGE_SIZE, fw_data, 1); 1201 if (ret) 1202 goto out; 1203 } 1204 1205 ret = t4_write_flash(adap, 1206 fw_start + offsetof(struct fw_hdr, fw_ver), 1207 sizeof(hdr->fw_ver), (const u8 *)&hdr->fw_ver, 1); 1208 out: 1209 if (ret) 1210 CH_ERR(adap, "firmware download failed, error %d\n", ret); 1211 return ret; 1212 } 1213 1214 /* BIOS boot headers */ 1215 typedef struct pci_expansion_rom_header { 1216 u8 signature[2]; /* ROM Signature. Should be 0xaa55 */ 1217 u8 reserved[22]; /* Reserved per processor Architecture data */ 1218 u8 pcir_offset[2]; /* Offset to PCI Data Structure */ 1219 } pci_exp_rom_header_t; /* PCI_EXPANSION_ROM_HEADER */ 1220 1221 /* Legacy PCI Expansion ROM Header */ 1222 typedef struct legacy_pci_expansion_rom_header { 1223 u8 signature[2]; /* ROM Signature. Should be 0xaa55 */ 1224 u8 size512; /* Current Image Size in units of 512 bytes */ 1225 u8 initentry_point[4]; 1226 u8 cksum; /* Checksum computed on the entire Image */ 1227 u8 reserved[16]; /* Reserved */ 1228 u8 pcir_offset[2]; /* Offset to PCI Data Struture */ 1229 } legacy_pci_exp_rom_header_t; /* LEGACY_PCI_EXPANSION_ROM_HEADER */ 1230 1231 /* EFI PCI Expansion ROM Header */ 1232 typedef struct efi_pci_expansion_rom_header { 1233 u8 signature[2]; // ROM signature. The value 0xaa55 1234 u8 initialization_size[2]; /* Units 512. Includes this header */ 1235 u8 efi_signature[4]; /* Signature from EFI image header. 0x0EF1 */ 1236 u8 efi_subsystem[2]; /* Subsystem value for EFI image header */ 1237 u8 efi_machine_type[2]; /* Machine type from EFI image header */ 1238 u8 compression_type[2]; /* Compression type. */ 1239 /* 1240 * Compression type definition 1241 * 0x0: uncompressed 1242 * 0x1: Compressed 1243 * 0x2-0xFFFF: Reserved 1244 */ 1245 u8 reserved[8]; /* Reserved */ 1246 u8 efi_image_header_offset[2]; /* Offset to EFI Image */ 1247 u8 pcir_offset[2]; /* Offset to PCI Data Structure */ 1248 } efi_pci_exp_rom_header_t; /* EFI PCI Expansion ROM Header */ 1249 1250 /* PCI Data Structure Format */ 1251 typedef struct pcir_data_structure { /* PCI Data Structure */ 1252 u8 signature[4]; /* Signature. The string "PCIR" */ 1253 u8 vendor_id[2]; /* Vendor Identification */ 1254 u8 device_id[2]; /* Device Identification */ 1255 u8 vital_product[2]; /* Pointer to Vital Product Data */ 1256 u8 length[2]; /* PCIR Data Structure Length */ 1257 u8 revision; /* PCIR Data Structure Revision */ 1258 u8 class_code[3]; /* Class Code */ 1259 u8 image_length[2]; /* Image Length. Multiple of 512B */ 1260 u8 code_revision[2]; /* Revision Level of Code/Data */ 1261 u8 code_type; /* Code Type. */ 1262 /* 1263 * PCI Expansion ROM Code Types 1264 * 0x00: Intel IA-32, PC-AT compatible. Legacy 1265 * 0x01: Open Firmware standard for PCI. FCODE 1266 * 0x02: Hewlett-Packard PA RISC. HP reserved 1267 * 0x03: EFI Image. EFI 1268 * 0x04-0xFF: Reserved. 1269 */ 1270 u8 indicator; /* Indicator. Identifies the last image in the ROM */ 1271 u8 reserved[2]; /* Reserved */ 1272 } pcir_data_t; /* PCI__DATA_STRUCTURE */ 1273 1274 /* BOOT constants */ 1275 enum { 1276 BOOT_FLASH_BOOT_ADDR = 0x0,/* start address of boot image in flash */ 1277 BOOT_SIGNATURE = 0xaa55, /* signature of BIOS boot ROM */ 1278 BOOT_SIZE_INC = 512, /* image size measured in 512B chunks */ 1279 BOOT_MIN_SIZE = sizeof(pci_exp_rom_header_t), /* basic header */ 1280 BOOT_MAX_SIZE = 1024*BOOT_SIZE_INC, /* 1 byte * length increment */ 1281 VENDOR_ID = 0x1425, /* Vendor ID */ 1282 PCIR_SIGNATURE = 0x52494350 /* PCIR signature */ 1283 }; 1284 1285 /* 1286 * modify_device_id - Modifies the device ID of the Boot BIOS image 1287 * @adatper: the device ID to write. 1288 * @boot_data: the boot image to modify. 1289 * 1290 * Write the supplied device ID to the boot BIOS image. 1291 */ 1292 static void modify_device_id(int device_id, u8 *boot_data) 1293 { 1294 legacy_pci_exp_rom_header_t *header; 1295 pcir_data_t *pcir_header; 1296 u32 cur_header = 0; 1297 1298 /* 1299 * Loop through all chained images and change the device ID's 1300 */ 1301 while (1) { 1302 header = (legacy_pci_exp_rom_header_t *) &boot_data[cur_header]; 1303 pcir_header = (pcir_data_t *) &boot_data[cur_header + 1304 le16_to_cpu(*(u16*)header->pcir_offset)]; 1305 1306 /* 1307 * Only modify the Device ID if code type is Legacy or HP. 1308 * 0x00: Okay to modify 1309 * 0x01: FCODE. Do not be modify 1310 * 0x03: Okay to modify 1311 * 0x04-0xFF: Do not modify 1312 */ 1313 if (pcir_header->code_type == 0x00) { 1314 u8 csum = 0; 1315 int i; 1316 1317 /* 1318 * Modify Device ID to match current adatper 1319 */ 1320 *(u16*) pcir_header->device_id = device_id; 1321 1322 /* 1323 * Set checksum temporarily to 0. 1324 * We will recalculate it later. 1325 */ 1326 header->cksum = 0x0; 1327 1328 /* 1329 * Calculate and update checksum 1330 */ 1331 for (i = 0; i < (header->size512 * 512); i++) 1332 csum += (u8)boot_data[cur_header + i]; 1333 1334 /* 1335 * Invert summed value to create the checksum 1336 * Writing new checksum value directly to the boot data 1337 */ 1338 boot_data[cur_header + 7] = -csum; 1339 1340 } else if (pcir_header->code_type == 0x03) { 1341 1342 /* 1343 * Modify Device ID to match current adatper 1344 */ 1345 *(u16*) pcir_header->device_id = device_id; 1346 1347 } 1348 1349 1350 /* 1351 * Check indicator element to identify if this is the last 1352 * image in the ROM. 1353 */ 1354 if (pcir_header->indicator & 0x80) 1355 break; 1356 1357 /* 1358 * Move header pointer up to the next image in the ROM. 1359 */ 1360 cur_header += header->size512 * 512; 1361 } 1362 } 1363 1364 /* 1365 * t4_load_boot - download boot flash 1366 * @adapter: the adapter 1367 * @boot_data: the boot image to write 1368 * @boot_addr: offset in flash to write boot_data 1369 * @size: image size 1370 * 1371 * Write the supplied boot image to the card's serial flash. 1372 * The boot image has the following sections: a 28-byte header and the 1373 * boot image. 1374 */ 1375 int t4_load_boot(struct adapter *adap, u8 *boot_data, 1376 unsigned int boot_addr, unsigned int size) 1377 { 1378 pci_exp_rom_header_t *header; 1379 int pcir_offset ; 1380 pcir_data_t *pcir_header; 1381 int ret, addr; 1382 uint16_t device_id; 1383 unsigned int i; 1384 unsigned int boot_sector = boot_addr * 1024; 1385 unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec; 1386 1387 /* 1388 * Make sure the boot image does not encroach on the firmware region 1389 */ 1390 if ((boot_sector + size) >> 16 > FLASH_FW_START_SEC) { 1391 CH_ERR(adap, "boot image encroaching on firmware region\n"); 1392 return -EFBIG; 1393 } 1394 1395 /* 1396 * Number of sectors spanned 1397 */ 1398 i = DIV_ROUND_UP(size ? size : FLASH_BOOTCFG_MAX_SIZE, 1399 sf_sec_size); 1400 ret = t4_flash_erase_sectors(adap, boot_sector >> 16, 1401 (boot_sector >> 16) + i - 1); 1402 1403 /* 1404 * If size == 0 then we're simply erasing the FLASH sectors associated 1405 * with the on-adapter option ROM file 1406 */ 1407 if (ret || (size == 0)) 1408 goto out; 1409 1410 /* Get boot header */ 1411 header = (pci_exp_rom_header_t *)boot_data; 1412 pcir_offset = le16_to_cpu(*(u16 *)header->pcir_offset); 1413 /* PCIR Data Structure */ 1414 pcir_header = (pcir_data_t *) &boot_data[pcir_offset]; 1415 1416 /* 1417 * Perform some primitive sanity testing to avoid accidentally 1418 * writing garbage over the boot sectors. We ought to check for 1419 * more but it's not worth it for now ... 1420 */ 1421 if (size < BOOT_MIN_SIZE || size > BOOT_MAX_SIZE) { 1422 CH_ERR(adap, "boot image too small/large\n"); 1423 return -EFBIG; 1424 } 1425 1426 /* 1427 * Check BOOT ROM header signature 1428 */ 1429 if (le16_to_cpu(*(u16*)header->signature) != BOOT_SIGNATURE ) { 1430 CH_ERR(adap, "Boot image missing signature\n"); 1431 return -EINVAL; 1432 } 1433 1434 /* 1435 * Check PCI header signature 1436 */ 1437 if (le32_to_cpu(*(u32*)pcir_header->signature) != PCIR_SIGNATURE) { 1438 CH_ERR(adap, "PCI header missing signature\n"); 1439 return -EINVAL; 1440 } 1441 1442 /* 1443 * Check Vendor ID matches Chelsio ID 1444 */ 1445 if (le16_to_cpu(*(u16*)pcir_header->vendor_id) != VENDOR_ID) { 1446 CH_ERR(adap, "Vendor ID missing signature\n"); 1447 return -EINVAL; 1448 } 1449 1450 /* 1451 * Retrieve adapter's device ID 1452 */ 1453 t4_os_pci_read_cfg2(adap, PCI_DEVICE_ID, &device_id); 1454 /* Want to deal with PF 0 so I strip off PF 4 indicator */ 1455 device_id = (device_id & 0xff) | 0x4000; 1456 1457 /* 1458 * Check PCIE Device ID 1459 */ 1460 if (le16_to_cpu(*(u16*)pcir_header->device_id) != device_id) { 1461 /* 1462 * Change the device ID in the Boot BIOS image to match 1463 * the Device ID of the current adapter. 1464 */ 1465 modify_device_id(device_id, boot_data); 1466 } 1467 1468 /* 1469 * Skip over the first SF_PAGE_SIZE worth of data and write it after 1470 * we finish copying the rest of the boot image. This will ensure 1471 * that the BIOS boot header will only be written if the boot image 1472 * was written in full. 1473 */ 1474 addr = boot_sector; 1475 for (size -= SF_PAGE_SIZE; size; size -= SF_PAGE_SIZE) { 1476 addr += SF_PAGE_SIZE; 1477 boot_data += SF_PAGE_SIZE; 1478 ret = t4_write_flash(adap, addr, SF_PAGE_SIZE, boot_data, 0); 1479 if (ret) 1480 goto out; 1481 } 1482 1483 ret = t4_write_flash(adap, boot_sector, SF_PAGE_SIZE, boot_data, 0); 1484 1485 out: 1486 if (ret) 1487 CH_ERR(adap, "boot image download failed, error %d\n", ret); 1488 return ret; 1489 } 1490 1491 /** 1492 * t4_read_cimq_cfg - read CIM queue configuration 1493 * @adap: the adapter 1494 * @base: holds the queue base addresses in bytes 1495 * @size: holds the queue sizes in bytes 1496 * @thres: holds the queue full thresholds in bytes 1497 * 1498 * Returns the current configuration of the CIM queues, starting with 1499 * the IBQs, then the OBQs. 1500 */ 1501 void t4_read_cimq_cfg(struct adapter *adap, u16 *base, u16 *size, u16 *thres) 1502 { 1503 unsigned int i, v; 1504 int cim_num_obq = is_t4(adap) ? CIM_NUM_OBQ : CIM_NUM_OBQ_T5; 1505 1506 for (i = 0; i < CIM_NUM_IBQ; i++) { 1507 t4_write_reg(adap, A_CIM_QUEUE_CONFIG_REF, F_IBQSELECT | 1508 V_QUENUMSELECT(i)); 1509 v = t4_read_reg(adap, A_CIM_QUEUE_CONFIG_CTRL); 1510 *base++ = G_CIMQBASE(v) * 256; /* value is in 256-byte units */ 1511 *size++ = G_CIMQSIZE(v) * 256; /* value is in 256-byte units */ 1512 *thres++ = G_QUEFULLTHRSH(v) * 8; /* 8-byte unit */ 1513 } 1514 for (i = 0; i < cim_num_obq; i++) { 1515 t4_write_reg(adap, A_CIM_QUEUE_CONFIG_REF, F_OBQSELECT | 1516 V_QUENUMSELECT(i)); 1517 v = t4_read_reg(adap, A_CIM_QUEUE_CONFIG_CTRL); 1518 *base++ = G_CIMQBASE(v) * 256; /* value is in 256-byte units */ 1519 *size++ = G_CIMQSIZE(v) * 256; /* value is in 256-byte units */ 1520 } 1521 } 1522 1523 /** 1524 * t4_read_cim_ibq - read the contents of a CIM inbound queue 1525 * @adap: the adapter 1526 * @qid: the queue index 1527 * @data: where to store the queue contents 1528 * @n: capacity of @data in 32-bit words 1529 * 1530 * Reads the contents of the selected CIM queue starting at address 0 up 1531 * to the capacity of @data. @n must be a multiple of 4. Returns < 0 on 1532 * error and the number of 32-bit words actually read on success. 1533 */ 1534 int t4_read_cim_ibq(struct adapter *adap, unsigned int qid, u32 *data, size_t n) 1535 { 1536 int i, err; 1537 unsigned int addr; 1538 const unsigned int nwords = CIM_IBQ_SIZE * 4; 1539 1540 if (qid > 5 || (n & 3)) 1541 return -EINVAL; 1542 1543 addr = qid * nwords; 1544 if (n > nwords) 1545 n = nwords; 1546 1547 for (i = 0; i < n; i++, addr++) { 1548 t4_write_reg(adap, A_CIM_IBQ_DBG_CFG, V_IBQDBGADDR(addr) | 1549 F_IBQDBGEN); 1550 /* 1551 * It might take 3-10ms before the IBQ debug read access is 1552 * allowed. Wait for 1 Sec with a delay of 1 usec. 1553 */ 1554 err = t4_wait_op_done(adap, A_CIM_IBQ_DBG_CFG, F_IBQDBGBUSY, 0, 1555 1000000, 1); 1556 if (err) 1557 return err; 1558 *data++ = t4_read_reg(adap, A_CIM_IBQ_DBG_DATA); 1559 } 1560 t4_write_reg(adap, A_CIM_IBQ_DBG_CFG, 0); 1561 return i; 1562 } 1563 1564 /** 1565 * t4_read_cim_obq - read the contents of a CIM outbound queue 1566 * @adap: the adapter 1567 * @qid: the queue index 1568 * @data: where to store the queue contents 1569 * @n: capacity of @data in 32-bit words 1570 * 1571 * Reads the contents of the selected CIM queue starting at address 0 up 1572 * to the capacity of @data. @n must be a multiple of 4. Returns < 0 on 1573 * error and the number of 32-bit words actually read on success. 1574 */ 1575 int t4_read_cim_obq(struct adapter *adap, unsigned int qid, u32 *data, size_t n) 1576 { 1577 int i, err; 1578 unsigned int addr, v, nwords; 1579 int cim_num_obq = is_t4(adap) ? CIM_NUM_OBQ : CIM_NUM_OBQ_T5; 1580 1581 if (qid >= cim_num_obq || (n & 3)) 1582 return -EINVAL; 1583 1584 t4_write_reg(adap, A_CIM_QUEUE_CONFIG_REF, F_OBQSELECT | 1585 V_QUENUMSELECT(qid)); 1586 v = t4_read_reg(adap, A_CIM_QUEUE_CONFIG_CTRL); 1587 1588 addr = G_CIMQBASE(v) * 64; /* muliple of 256 -> muliple of 4 */ 1589 nwords = G_CIMQSIZE(v) * 64; /* same */ 1590 if (n > nwords) 1591 n = nwords; 1592 1593 for (i = 0; i < n; i++, addr++) { 1594 t4_write_reg(adap, A_CIM_OBQ_DBG_CFG, V_OBQDBGADDR(addr) | 1595 F_OBQDBGEN); 1596 err = t4_wait_op_done(adap, A_CIM_OBQ_DBG_CFG, F_OBQDBGBUSY, 0, 1597 2, 1); 1598 if (err) 1599 return err; 1600 *data++ = t4_read_reg(adap, A_CIM_OBQ_DBG_DATA); 1601 } 1602 t4_write_reg(adap, A_CIM_OBQ_DBG_CFG, 0); 1603 return i; 1604 } 1605 1606 enum { 1607 CIM_QCTL_BASE = 0, 1608 CIM_CTL_BASE = 0x2000, 1609 CIM_PBT_ADDR_BASE = 0x2800, 1610 CIM_PBT_LRF_BASE = 0x3000, 1611 CIM_PBT_DATA_BASE = 0x3800 1612 }; 1613 1614 /** 1615 * t4_cim_read - read a block from CIM internal address space 1616 * @adap: the adapter 1617 * @addr: the start address within the CIM address space 1618 * @n: number of words to read 1619 * @valp: where to store the result 1620 * 1621 * Reads a block of 4-byte words from the CIM intenal address space. 1622 */ 1623 int t4_cim_read(struct adapter *adap, unsigned int addr, unsigned int n, 1624 unsigned int *valp) 1625 { 1626 int ret = 0; 1627 1628 if (t4_read_reg(adap, A_CIM_HOST_ACC_CTRL) & F_HOSTBUSY) 1629 return -EBUSY; 1630 1631 for ( ; !ret && n--; addr += 4) { 1632 t4_write_reg(adap, A_CIM_HOST_ACC_CTRL, addr); 1633 ret = t4_wait_op_done(adap, A_CIM_HOST_ACC_CTRL, F_HOSTBUSY, 1634 0, 5, 2); 1635 if (!ret) 1636 *valp++ = t4_read_reg(adap, A_CIM_HOST_ACC_DATA); 1637 } 1638 return ret; 1639 } 1640 1641 /** 1642 * t4_cim_write - write a block into CIM internal address space 1643 * @adap: the adapter 1644 * @addr: the start address within the CIM address space 1645 * @n: number of words to write 1646 * @valp: set of values to write 1647 * 1648 * Writes a block of 4-byte words into the CIM intenal address space. 1649 */ 1650 int t4_cim_write(struct adapter *adap, unsigned int addr, unsigned int n, 1651 const unsigned int *valp) 1652 { 1653 int ret = 0; 1654 1655 if (t4_read_reg(adap, A_CIM_HOST_ACC_CTRL) & F_HOSTBUSY) 1656 return -EBUSY; 1657 1658 for ( ; !ret && n--; addr += 4) { 1659 t4_write_reg(adap, A_CIM_HOST_ACC_DATA, *valp++); 1660 t4_write_reg(adap, A_CIM_HOST_ACC_CTRL, addr | F_HOSTWRITE); 1661 ret = t4_wait_op_done(adap, A_CIM_HOST_ACC_CTRL, F_HOSTBUSY, 1662 0, 5, 2); 1663 } 1664 return ret; 1665 } 1666 1667 static int t4_cim_write1(struct adapter *adap, unsigned int addr, unsigned int val) 1668 { 1669 return t4_cim_write(adap, addr, 1, &val); 1670 } 1671 1672 /** 1673 * t4_cim_ctl_read - read a block from CIM control region 1674 * @adap: the adapter 1675 * @addr: the start address within the CIM control region 1676 * @n: number of words to read 1677 * @valp: where to store the result 1678 * 1679 * Reads a block of 4-byte words from the CIM control region. 1680 */ 1681 int t4_cim_ctl_read(struct adapter *adap, unsigned int addr, unsigned int n, 1682 unsigned int *valp) 1683 { 1684 return t4_cim_read(adap, addr + CIM_CTL_BASE, n, valp); 1685 } 1686 1687 /** 1688 * t4_cim_read_la - read CIM LA capture buffer 1689 * @adap: the adapter 1690 * @la_buf: where to store the LA data 1691 * @wrptr: the HW write pointer within the capture buffer 1692 * 1693 * Reads the contents of the CIM LA buffer with the most recent entry at 1694 * the end of the returned data and with the entry at @wrptr first. 1695 * We try to leave the LA in the running state we find it in. 1696 */ 1697 int t4_cim_read_la(struct adapter *adap, u32 *la_buf, unsigned int *wrptr) 1698 { 1699 int i, ret; 1700 unsigned int cfg, val, idx; 1701 1702 ret = t4_cim_read(adap, A_UP_UP_DBG_LA_CFG, 1, &cfg); 1703 if (ret) 1704 return ret; 1705 1706 if (cfg & F_UPDBGLAEN) { /* LA is running, freeze it */ 1707 ret = t4_cim_write1(adap, A_UP_UP_DBG_LA_CFG, 0); 1708 if (ret) 1709 return ret; 1710 } 1711 1712 ret = t4_cim_read(adap, A_UP_UP_DBG_LA_CFG, 1, &val); 1713 if (ret) 1714 goto restart; 1715 1716 idx = G_UPDBGLAWRPTR(val); 1717 if (wrptr) 1718 *wrptr = idx; 1719 1720 for (i = 0; i < adap->params.cim_la_size; i++) { 1721 ret = t4_cim_write1(adap, A_UP_UP_DBG_LA_CFG, 1722 V_UPDBGLARDPTR(idx) | F_UPDBGLARDEN); 1723 if (ret) 1724 break; 1725 ret = t4_cim_read(adap, A_UP_UP_DBG_LA_CFG, 1, &val); 1726 if (ret) 1727 break; 1728 if (val & F_UPDBGLARDEN) { 1729 ret = -ETIMEDOUT; 1730 break; 1731 } 1732 ret = t4_cim_read(adap, A_UP_UP_DBG_LA_DATA, 1, &la_buf[i]); 1733 if (ret) 1734 break; 1735 idx = (idx + 1) & M_UPDBGLARDPTR; 1736 } 1737 restart: 1738 if (cfg & F_UPDBGLAEN) { 1739 int r = t4_cim_write1(adap, A_UP_UP_DBG_LA_CFG, 1740 cfg & ~F_UPDBGLARDEN); 1741 if (!ret) 1742 ret = r; 1743 } 1744 return ret; 1745 } 1746 1747 void t4_cim_read_pif_la(struct adapter *adap, u32 *pif_req, u32 *pif_rsp, 1748 unsigned int *pif_req_wrptr, 1749 unsigned int *pif_rsp_wrptr) 1750 { 1751 int i, j; 1752 u32 cfg, val, req, rsp; 1753 1754 cfg = t4_read_reg(adap, A_CIM_DEBUGCFG); 1755 if (cfg & F_LADBGEN) 1756 t4_write_reg(adap, A_CIM_DEBUGCFG, cfg ^ F_LADBGEN); 1757 1758 val = t4_read_reg(adap, A_CIM_DEBUGSTS); 1759 req = G_POLADBGWRPTR(val); 1760 rsp = G_PILADBGWRPTR(val); 1761 if (pif_req_wrptr) 1762 *pif_req_wrptr = req; 1763 if (pif_rsp_wrptr) 1764 *pif_rsp_wrptr = rsp; 1765 1766 for (i = 0; i < CIM_PIFLA_SIZE; i++) { 1767 for (j = 0; j < 6; j++) { 1768 t4_write_reg(adap, A_CIM_DEBUGCFG, V_POLADBGRDPTR(req) | 1769 V_PILADBGRDPTR(rsp)); 1770 *pif_req++ = t4_read_reg(adap, A_CIM_PO_LA_DEBUGDATA); 1771 *pif_rsp++ = t4_read_reg(adap, A_CIM_PI_LA_DEBUGDATA); 1772 req++; 1773 rsp++; 1774 } 1775 req = (req + 2) & M_POLADBGRDPTR; 1776 rsp = (rsp + 2) & M_PILADBGRDPTR; 1777 } 1778 t4_write_reg(adap, A_CIM_DEBUGCFG, cfg); 1779 } 1780 1781 void t4_cim_read_ma_la(struct adapter *adap, u32 *ma_req, u32 *ma_rsp) 1782 { 1783 u32 cfg; 1784 int i, j, idx; 1785 1786 cfg = t4_read_reg(adap, A_CIM_DEBUGCFG); 1787 if (cfg & F_LADBGEN) 1788 t4_write_reg(adap, A_CIM_DEBUGCFG, cfg ^ F_LADBGEN); 1789 1790 for (i = 0; i < CIM_MALA_SIZE; i++) { 1791 for (j = 0; j < 5; j++) { 1792 idx = 8 * i + j; 1793 t4_write_reg(adap, A_CIM_DEBUGCFG, V_POLADBGRDPTR(idx) | 1794 V_PILADBGRDPTR(idx)); 1795 *ma_req++ = t4_read_reg(adap, A_CIM_PO_LA_MADEBUGDATA); 1796 *ma_rsp++ = t4_read_reg(adap, A_CIM_PI_LA_MADEBUGDATA); 1797 } 1798 } 1799 t4_write_reg(adap, A_CIM_DEBUGCFG, cfg); 1800 } 1801 1802 /** 1803 * t4_tp_read_la - read TP LA capture buffer 1804 * @adap: the adapter 1805 * @la_buf: where to store the LA data 1806 * @wrptr: the HW write pointer within the capture buffer 1807 * 1808 * Reads the contents of the TP LA buffer with the most recent entry at 1809 * the end of the returned data and with the entry at @wrptr first. 1810 * We leave the LA in the running state we find it in. 1811 */ 1812 void t4_tp_read_la(struct adapter *adap, u64 *la_buf, unsigned int *wrptr) 1813 { 1814 bool last_incomplete; 1815 unsigned int i, cfg, val, idx; 1816 1817 cfg = t4_read_reg(adap, A_TP_DBG_LA_CONFIG) & 0xffff; 1818 if (cfg & F_DBGLAENABLE) /* freeze LA */ 1819 t4_write_reg(adap, A_TP_DBG_LA_CONFIG, 1820 adap->params.tp.la_mask | (cfg ^ F_DBGLAENABLE)); 1821 1822 val = t4_read_reg(adap, A_TP_DBG_LA_CONFIG); 1823 idx = G_DBGLAWPTR(val); 1824 last_incomplete = G_DBGLAMODE(val) >= 2 && (val & F_DBGLAWHLF) == 0; 1825 if (last_incomplete) 1826 idx = (idx + 1) & M_DBGLARPTR; 1827 if (wrptr) 1828 *wrptr = idx; 1829 1830 val &= 0xffff; 1831 val &= ~V_DBGLARPTR(M_DBGLARPTR); 1832 val |= adap->params.tp.la_mask; 1833 1834 for (i = 0; i < TPLA_SIZE; i++) { 1835 t4_write_reg(adap, A_TP_DBG_LA_CONFIG, V_DBGLARPTR(idx) | val); 1836 la_buf[i] = t4_read_reg64(adap, A_TP_DBG_LA_DATAL); 1837 idx = (idx + 1) & M_DBGLARPTR; 1838 } 1839 1840 /* Wipe out last entry if it isn't valid */ 1841 if (last_incomplete) 1842 la_buf[TPLA_SIZE - 1] = ~0ULL; 1843 1844 if (cfg & F_DBGLAENABLE) /* restore running state */ 1845 t4_write_reg(adap, A_TP_DBG_LA_CONFIG, 1846 cfg | adap->params.tp.la_mask); 1847 } 1848 1849 void t4_ulprx_read_la(struct adapter *adap, u32 *la_buf) 1850 { 1851 unsigned int i, j; 1852 1853 for (i = 0; i < 8; i++) { 1854 u32 *p = la_buf + i; 1855 1856 t4_write_reg(adap, A_ULP_RX_LA_CTL, i); 1857 j = t4_read_reg(adap, A_ULP_RX_LA_WRPTR); 1858 t4_write_reg(adap, A_ULP_RX_LA_RDPTR, j); 1859 for (j = 0; j < ULPRX_LA_SIZE; j++, p += 8) 1860 *p = t4_read_reg(adap, A_ULP_RX_LA_RDDATA); 1861 } 1862 } 1863 1864 #define ADVERT_MASK (FW_PORT_CAP_SPEED_100M | FW_PORT_CAP_SPEED_1G |\ 1865 FW_PORT_CAP_SPEED_10G | FW_PORT_CAP_SPEED_40G | \ 1866 FW_PORT_CAP_SPEED_100G | FW_PORT_CAP_ANEG) 1867 1868 /** 1869 * t4_link_start - apply link configuration to MAC/PHY 1870 * @phy: the PHY to setup 1871 * @mac: the MAC to setup 1872 * @lc: the requested link configuration 1873 * 1874 * Set up a port's MAC and PHY according to a desired link configuration. 1875 * - If the PHY can auto-negotiate first decide what to advertise, then 1876 * enable/disable auto-negotiation as desired, and reset. 1877 * - If the PHY does not auto-negotiate just reset it. 1878 * - If auto-negotiation is off set the MAC to the proper speed/duplex/FC, 1879 * otherwise do it later based on the outcome of auto-negotiation. 1880 */ 1881 int t4_link_start(struct adapter *adap, unsigned int mbox, unsigned int port, 1882 struct link_config *lc) 1883 { 1884 struct fw_port_cmd c; 1885 unsigned int fc = 0, mdi = V_FW_PORT_CAP_MDI(FW_PORT_CAP_MDI_AUTO); 1886 1887 lc->link_ok = 0; 1888 if (lc->requested_fc & PAUSE_RX) 1889 fc |= FW_PORT_CAP_FC_RX; 1890 if (lc->requested_fc & PAUSE_TX) 1891 fc |= FW_PORT_CAP_FC_TX; 1892 1893 memset(&c, 0, sizeof(c)); 1894 c.op_to_portid = htonl(V_FW_CMD_OP(FW_PORT_CMD) | F_FW_CMD_REQUEST | 1895 F_FW_CMD_EXEC | V_FW_PORT_CMD_PORTID(port)); 1896 c.action_to_len16 = htonl(V_FW_PORT_CMD_ACTION(FW_PORT_ACTION_L1_CFG) | 1897 FW_LEN16(c)); 1898 1899 if (!(lc->supported & FW_PORT_CAP_ANEG)) { 1900 c.u.l1cfg.rcap = htonl((lc->supported & ADVERT_MASK) | fc); 1901 lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX); 1902 } else if (lc->autoneg == AUTONEG_DISABLE) { 1903 c.u.l1cfg.rcap = htonl(lc->requested_speed | fc | mdi); 1904 lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX); 1905 } else 1906 c.u.l1cfg.rcap = htonl(lc->advertising | fc | mdi); 1907 1908 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); 1909 } 1910 1911 /** 1912 * t4_restart_aneg - restart autonegotiation 1913 * @adap: the adapter 1914 * @mbox: mbox to use for the FW command 1915 * @port: the port id 1916 * 1917 * Restarts autonegotiation for the selected port. 1918 */ 1919 int t4_restart_aneg(struct adapter *adap, unsigned int mbox, unsigned int port) 1920 { 1921 struct fw_port_cmd c; 1922 1923 memset(&c, 0, sizeof(c)); 1924 c.op_to_portid = htonl(V_FW_CMD_OP(FW_PORT_CMD) | F_FW_CMD_REQUEST | 1925 F_FW_CMD_EXEC | V_FW_PORT_CMD_PORTID(port)); 1926 c.action_to_len16 = htonl(V_FW_PORT_CMD_ACTION(FW_PORT_ACTION_L1_CFG) | 1927 FW_LEN16(c)); 1928 c.u.l1cfg.rcap = htonl(FW_PORT_CAP_ANEG); 1929 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); 1930 } 1931 1932 struct intr_info { 1933 unsigned int mask; /* bits to check in interrupt status */ 1934 const char *msg; /* message to print or NULL */ 1935 short stat_idx; /* stat counter to increment or -1 */ 1936 unsigned short fatal; /* whether the condition reported is fatal */ 1937 }; 1938 1939 /** 1940 * t4_handle_intr_status - table driven interrupt handler 1941 * @adapter: the adapter that generated the interrupt 1942 * @reg: the interrupt status register to process 1943 * @acts: table of interrupt actions 1944 * 1945 * A table driven interrupt handler that applies a set of masks to an 1946 * interrupt status word and performs the corresponding actions if the 1947 * interrupts described by the mask have occured. The actions include 1948 * optionally emitting a warning or alert message. The table is terminated 1949 * by an entry specifying mask 0. Returns the number of fatal interrupt 1950 * conditions. 1951 */ 1952 static int t4_handle_intr_status(struct adapter *adapter, unsigned int reg, 1953 const struct intr_info *acts) 1954 { 1955 int fatal = 0; 1956 unsigned int mask = 0; 1957 unsigned int status = t4_read_reg(adapter, reg); 1958 1959 for ( ; acts->mask; ++acts) { 1960 if (!(status & acts->mask)) 1961 continue; 1962 if (acts->fatal) { 1963 fatal++; 1964 CH_ALERT(adapter, "%s (0x%x)\n", 1965 acts->msg, status & acts->mask); 1966 } else if (acts->msg) 1967 CH_WARN_RATELIMIT(adapter, "%s (0x%x)\n", 1968 acts->msg, status & acts->mask); 1969 mask |= acts->mask; 1970 } 1971 status &= mask; 1972 if (status) /* clear processed interrupts */ 1973 t4_write_reg(adapter, reg, status); 1974 return fatal; 1975 } 1976 1977 /* 1978 * Interrupt handler for the PCIE module. 1979 */ 1980 static void pcie_intr_handler(struct adapter *adapter) 1981 { 1982 static struct intr_info sysbus_intr_info[] = { 1983 { F_RNPP, "RXNP array parity error", -1, 1 }, 1984 { F_RPCP, "RXPC array parity error", -1, 1 }, 1985 { F_RCIP, "RXCIF array parity error", -1, 1 }, 1986 { F_RCCP, "Rx completions control array parity error", -1, 1 }, 1987 { F_RFTP, "RXFT array parity error", -1, 1 }, 1988 { 0 } 1989 }; 1990 static struct intr_info pcie_port_intr_info[] = { 1991 { F_TPCP, "TXPC array parity error", -1, 1 }, 1992 { F_TNPP, "TXNP array parity error", -1, 1 }, 1993 { F_TFTP, "TXFT array parity error", -1, 1 }, 1994 { F_TCAP, "TXCA array parity error", -1, 1 }, 1995 { F_TCIP, "TXCIF array parity error", -1, 1 }, 1996 { F_RCAP, "RXCA array parity error", -1, 1 }, 1997 { F_OTDD, "outbound request TLP discarded", -1, 1 }, 1998 { F_RDPE, "Rx data parity error", -1, 1 }, 1999 { F_TDUE, "Tx uncorrectable data error", -1, 1 }, 2000 { 0 } 2001 }; 2002 static struct intr_info pcie_intr_info[] = { 2003 { F_MSIADDRLPERR, "MSI AddrL parity error", -1, 1 }, 2004 { F_MSIADDRHPERR, "MSI AddrH parity error", -1, 1 }, 2005 { F_MSIDATAPERR, "MSI data parity error", -1, 1 }, 2006 { F_MSIXADDRLPERR, "MSI-X AddrL parity error", -1, 1 }, 2007 { F_MSIXADDRHPERR, "MSI-X AddrH parity error", -1, 1 }, 2008 { F_MSIXDATAPERR, "MSI-X data parity error", -1, 1 }, 2009 { F_MSIXDIPERR, "MSI-X DI parity error", -1, 1 }, 2010 { F_PIOCPLPERR, "PCI PIO completion FIFO parity error", -1, 1 }, 2011 { F_PIOREQPERR, "PCI PIO request FIFO parity error", -1, 1 }, 2012 { F_TARTAGPERR, "PCI PCI target tag FIFO parity error", -1, 1 }, 2013 { F_CCNTPERR, "PCI CMD channel count parity error", -1, 1 }, 2014 { F_CREQPERR, "PCI CMD channel request parity error", -1, 1 }, 2015 { F_CRSPPERR, "PCI CMD channel response parity error", -1, 1 }, 2016 { F_DCNTPERR, "PCI DMA channel count parity error", -1, 1 }, 2017 { F_DREQPERR, "PCI DMA channel request parity error", -1, 1 }, 2018 { F_DRSPPERR, "PCI DMA channel response parity error", -1, 1 }, 2019 { F_HCNTPERR, "PCI HMA channel count parity error", -1, 1 }, 2020 { F_HREQPERR, "PCI HMA channel request parity error", -1, 1 }, 2021 { F_HRSPPERR, "PCI HMA channel response parity error", -1, 1 }, 2022 { F_CFGSNPPERR, "PCI config snoop FIFO parity error", -1, 1 }, 2023 { F_FIDPERR, "PCI FID parity error", -1, 1 }, 2024 { F_INTXCLRPERR, "PCI INTx clear parity error", -1, 1 }, 2025 { F_MATAGPERR, "PCI MA tag parity error", -1, 1 }, 2026 { F_PIOTAGPERR, "PCI PIO tag parity error", -1, 1 }, 2027 { F_RXCPLPERR, "PCI Rx completion parity error", -1, 1 }, 2028 { F_RXWRPERR, "PCI Rx write parity error", -1, 1 }, 2029 { F_RPLPERR, "PCI replay buffer parity error", -1, 1 }, 2030 { F_PCIESINT, "PCI core secondary fault", -1, 1 }, 2031 { F_PCIEPINT, "PCI core primary fault", -1, 1 }, 2032 { F_UNXSPLCPLERR, "PCI unexpected split completion error", -1, 2033 0 }, 2034 { 0 } 2035 }; 2036 2037 static struct intr_info t5_pcie_intr_info[] = { 2038 { F_MSTGRPPERR, "Master Response Read Queue parity error", 2039 -1, 1 }, 2040 { F_MSTTIMEOUTPERR, "Master Timeout FIFO parity error", -1, 1 }, 2041 { F_MSIXSTIPERR, "MSI-X STI SRAM parity error", -1, 1 }, 2042 { F_MSIXADDRLPERR, "MSI-X AddrL parity error", -1, 1 }, 2043 { F_MSIXADDRHPERR, "MSI-X AddrH parity error", -1, 1 }, 2044 { F_MSIXDATAPERR, "MSI-X data parity error", -1, 1 }, 2045 { F_MSIXDIPERR, "MSI-X DI parity error", -1, 1 }, 2046 { F_PIOCPLGRPPERR, "PCI PIO completion Group FIFO parity error", 2047 -1, 1 }, 2048 { F_PIOREQGRPPERR, "PCI PIO request Group FIFO parity error", 2049 -1, 1 }, 2050 { F_TARTAGPERR, "PCI PCI target tag FIFO parity error", -1, 1 }, 2051 { F_MSTTAGQPERR, "PCI master tag queue parity error", -1, 1 }, 2052 { F_CREQPERR, "PCI CMD channel request parity error", -1, 1 }, 2053 { F_CRSPPERR, "PCI CMD channel response parity error", -1, 1 }, 2054 { F_DREQWRPERR, "PCI DMA channel write request parity error", 2055 -1, 1 }, 2056 { F_DREQPERR, "PCI DMA channel request parity error", -1, 1 }, 2057 { F_DRSPPERR, "PCI DMA channel response parity error", -1, 1 }, 2058 { F_HREQWRPERR, "PCI HMA channel count parity error", -1, 1 }, 2059 { F_HREQPERR, "PCI HMA channel request parity error", -1, 1 }, 2060 { F_HRSPPERR, "PCI HMA channel response parity error", -1, 1 }, 2061 { F_CFGSNPPERR, "PCI config snoop FIFO parity error", -1, 1 }, 2062 { F_FIDPERR, "PCI FID parity error", -1, 1 }, 2063 { F_VFIDPERR, "PCI INTx clear parity error", -1, 1 }, 2064 { F_MAGRPPERR, "PCI MA group FIFO parity error", -1, 1 }, 2065 { F_PIOTAGPERR, "PCI PIO tag parity error", -1, 1 }, 2066 { F_IPRXHDRGRPPERR, "PCI IP Rx header group parity error", 2067 -1, 1 }, 2068 { F_IPRXDATAGRPPERR, "PCI IP Rx data group parity error", 2069 -1, 1 }, 2070 { F_RPLPERR, "PCI IP replay buffer parity error", -1, 1 }, 2071 { F_IPSOTPERR, "PCI IP SOT buffer parity error", -1, 1 }, 2072 { F_TRGT1GRPPERR, "PCI TRGT1 group FIFOs parity error", -1, 1 }, 2073 { F_READRSPERR, "Outbound read error", -1, 2074 0 }, 2075 { 0 } 2076 }; 2077 2078 int fat; 2079 2080 if (is_t4(adapter)) 2081 fat = t4_handle_intr_status(adapter, 2082 A_PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS, 2083 sysbus_intr_info) + 2084 t4_handle_intr_status(adapter, 2085 A_PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS, 2086 pcie_port_intr_info) + 2087 t4_handle_intr_status(adapter, A_PCIE_INT_CAUSE, 2088 pcie_intr_info); 2089 else 2090 fat = t4_handle_intr_status(adapter, A_PCIE_INT_CAUSE, 2091 t5_pcie_intr_info); 2092 if (fat) 2093 t4_fatal_err(adapter); 2094 } 2095 2096 /* 2097 * TP interrupt handler. 2098 */ 2099 static void tp_intr_handler(struct adapter *adapter) 2100 { 2101 static struct intr_info tp_intr_info[] = { 2102 { 0x3fffffff, "TP parity error", -1, 1 }, 2103 { F_FLMTXFLSTEMPTY, "TP out of Tx pages", -1, 1 }, 2104 { 0 } 2105 }; 2106 2107 if (t4_handle_intr_status(adapter, A_TP_INT_CAUSE, tp_intr_info)) 2108 t4_fatal_err(adapter); 2109 } 2110 2111 /* 2112 * SGE interrupt handler. 2113 */ 2114 static void sge_intr_handler(struct adapter *adapter) 2115 { 2116 u64 v; 2117 u32 err; 2118 2119 static struct intr_info sge_intr_info[] = { 2120 { F_ERR_CPL_EXCEED_IQE_SIZE, 2121 "SGE received CPL exceeding IQE size", -1, 1 }, 2122 { F_ERR_INVALID_CIDX_INC, 2123 "SGE GTS CIDX increment too large", -1, 0 }, 2124 { F_ERR_CPL_OPCODE_0, "SGE received 0-length CPL", -1, 0 }, 2125 { F_ERR_DROPPED_DB, "SGE doorbell dropped", -1, 0 }, 2126 { F_ERR_DATA_CPL_ON_HIGH_QID1 | F_ERR_DATA_CPL_ON_HIGH_QID0, 2127 "SGE IQID > 1023 received CPL for FL", -1, 0 }, 2128 { F_ERR_BAD_DB_PIDX3, "SGE DBP 3 pidx increment too large", -1, 2129 0 }, 2130 { F_ERR_BAD_DB_PIDX2, "SGE DBP 2 pidx increment too large", -1, 2131 0 }, 2132 { F_ERR_BAD_DB_PIDX1, "SGE DBP 1 pidx increment too large", -1, 2133 0 }, 2134 { F_ERR_BAD_DB_PIDX0, "SGE DBP 0 pidx increment too large", -1, 2135 0 }, 2136 { F_ERR_ING_CTXT_PRIO, 2137 "SGE too many priority ingress contexts", -1, 0 }, 2138 { F_ERR_EGR_CTXT_PRIO, 2139 "SGE too many priority egress contexts", -1, 0 }, 2140 { F_INGRESS_SIZE_ERR, "SGE illegal ingress QID", -1, 0 }, 2141 { F_EGRESS_SIZE_ERR, "SGE illegal egress QID", -1, 0 }, 2142 { 0 } 2143 }; 2144 2145 v = (u64)t4_read_reg(adapter, A_SGE_INT_CAUSE1) | 2146 ((u64)t4_read_reg(adapter, A_SGE_INT_CAUSE2) << 32); 2147 if (v) { 2148 CH_ALERT(adapter, "SGE parity error (%#llx)\n", 2149 (unsigned long long)v); 2150 t4_write_reg(adapter, A_SGE_INT_CAUSE1, v); 2151 t4_write_reg(adapter, A_SGE_INT_CAUSE2, v >> 32); 2152 } 2153 2154 v |= t4_handle_intr_status(adapter, A_SGE_INT_CAUSE3, sge_intr_info); 2155 2156 err = t4_read_reg(adapter, A_SGE_ERROR_STATS); 2157 if (err & F_ERROR_QID_VALID) { 2158 CH_ERR(adapter, "SGE error for queue %u\n", G_ERROR_QID(err)); 2159 if (err & F_UNCAPTURED_ERROR) 2160 CH_ERR(adapter, "SGE UNCAPTURED_ERROR set (clearing)\n"); 2161 t4_write_reg(adapter, A_SGE_ERROR_STATS, F_ERROR_QID_VALID | 2162 F_UNCAPTURED_ERROR); 2163 } 2164 2165 if (v != 0) 2166 t4_fatal_err(adapter); 2167 } 2168 2169 #define CIM_OBQ_INTR (F_OBQULP0PARERR | F_OBQULP1PARERR | F_OBQULP2PARERR |\ 2170 F_OBQULP3PARERR | F_OBQSGEPARERR | F_OBQNCSIPARERR) 2171 #define CIM_IBQ_INTR (F_IBQTP0PARERR | F_IBQTP1PARERR | F_IBQULPPARERR |\ 2172 F_IBQSGEHIPARERR | F_IBQSGELOPARERR | F_IBQNCSIPARERR) 2173 2174 /* 2175 * CIM interrupt handler. 2176 */ 2177 static void cim_intr_handler(struct adapter *adapter) 2178 { 2179 static struct intr_info cim_intr_info[] = { 2180 { F_PREFDROPINT, "CIM control register prefetch drop", -1, 1 }, 2181 { CIM_OBQ_INTR, "CIM OBQ parity error", -1, 1 }, 2182 { CIM_IBQ_INTR, "CIM IBQ parity error", -1, 1 }, 2183 { F_MBUPPARERR, "CIM mailbox uP parity error", -1, 1 }, 2184 { F_MBHOSTPARERR, "CIM mailbox host parity error", -1, 1 }, 2185 { F_TIEQINPARERRINT, "CIM TIEQ outgoing parity error", -1, 1 }, 2186 { F_TIEQOUTPARERRINT, "CIM TIEQ incoming parity error", -1, 1 }, 2187 { 0 } 2188 }; 2189 static struct intr_info cim_upintr_info[] = { 2190 { F_RSVDSPACEINT, "CIM reserved space access", -1, 1 }, 2191 { F_ILLTRANSINT, "CIM illegal transaction", -1, 1 }, 2192 { F_ILLWRINT, "CIM illegal write", -1, 1 }, 2193 { F_ILLRDINT, "CIM illegal read", -1, 1 }, 2194 { F_ILLRDBEINT, "CIM illegal read BE", -1, 1 }, 2195 { F_ILLWRBEINT, "CIM illegal write BE", -1, 1 }, 2196 { F_SGLRDBOOTINT, "CIM single read from boot space", -1, 1 }, 2197 { F_SGLWRBOOTINT, "CIM single write to boot space", -1, 1 }, 2198 { F_BLKWRBOOTINT, "CIM block write to boot space", -1, 1 }, 2199 { F_SGLRDFLASHINT, "CIM single read from flash space", -1, 1 }, 2200 { F_SGLWRFLASHINT, "CIM single write to flash space", -1, 1 }, 2201 { F_BLKWRFLASHINT, "CIM block write to flash space", -1, 1 }, 2202 { F_SGLRDEEPROMINT, "CIM single EEPROM read", -1, 1 }, 2203 { F_SGLWREEPROMINT, "CIM single EEPROM write", -1, 1 }, 2204 { F_BLKRDEEPROMINT, "CIM block EEPROM read", -1, 1 }, 2205 { F_BLKWREEPROMINT, "CIM block EEPROM write", -1, 1 }, 2206 { F_SGLRDCTLINT , "CIM single read from CTL space", -1, 1 }, 2207 { F_SGLWRCTLINT , "CIM single write to CTL space", -1, 1 }, 2208 { F_BLKRDCTLINT , "CIM block read from CTL space", -1, 1 }, 2209 { F_BLKWRCTLINT , "CIM block write to CTL space", -1, 1 }, 2210 { F_SGLRDPLINT , "CIM single read from PL space", -1, 1 }, 2211 { F_SGLWRPLINT , "CIM single write to PL space", -1, 1 }, 2212 { F_BLKRDPLINT , "CIM block read from PL space", -1, 1 }, 2213 { F_BLKWRPLINT , "CIM block write to PL space", -1, 1 }, 2214 { F_REQOVRLOOKUPINT , "CIM request FIFO overwrite", -1, 1 }, 2215 { F_RSPOVRLOOKUPINT , "CIM response FIFO overwrite", -1, 1 }, 2216 { F_TIMEOUTINT , "CIM PIF timeout", -1, 1 }, 2217 { F_TIMEOUTMAINT , "CIM PIF MA timeout", -1, 1 }, 2218 { 0 } 2219 }; 2220 int fat; 2221 2222 if (t4_read_reg(adapter, A_PCIE_FW) & F_PCIE_FW_ERR) 2223 t4_report_fw_error(adapter); 2224 2225 fat = t4_handle_intr_status(adapter, A_CIM_HOST_INT_CAUSE, 2226 cim_intr_info) + 2227 t4_handle_intr_status(adapter, A_CIM_HOST_UPACC_INT_CAUSE, 2228 cim_upintr_info); 2229 if (fat) 2230 t4_fatal_err(adapter); 2231 } 2232 2233 /* 2234 * ULP RX interrupt handler. 2235 */ 2236 static void ulprx_intr_handler(struct adapter *adapter) 2237 { 2238 static struct intr_info ulprx_intr_info[] = { 2239 { F_CAUSE_CTX_1, "ULPRX channel 1 context error", -1, 1 }, 2240 { F_CAUSE_CTX_0, "ULPRX channel 0 context error", -1, 1 }, 2241 { 0x7fffff, "ULPRX parity error", -1, 1 }, 2242 { 0 } 2243 }; 2244 2245 if (t4_handle_intr_status(adapter, A_ULP_RX_INT_CAUSE, ulprx_intr_info)) 2246 t4_fatal_err(adapter); 2247 } 2248 2249 /* 2250 * ULP TX interrupt handler. 2251 */ 2252 static void ulptx_intr_handler(struct adapter *adapter) 2253 { 2254 static struct intr_info ulptx_intr_info[] = { 2255 { F_PBL_BOUND_ERR_CH3, "ULPTX channel 3 PBL out of bounds", -1, 2256 0 }, 2257 { F_PBL_BOUND_ERR_CH2, "ULPTX channel 2 PBL out of bounds", -1, 2258 0 }, 2259 { F_PBL_BOUND_ERR_CH1, "ULPTX channel 1 PBL out of bounds", -1, 2260 0 }, 2261 { F_PBL_BOUND_ERR_CH0, "ULPTX channel 0 PBL out of bounds", -1, 2262 0 }, 2263 { 0xfffffff, "ULPTX parity error", -1, 1 }, 2264 { 0 } 2265 }; 2266 2267 if (t4_handle_intr_status(adapter, A_ULP_TX_INT_CAUSE, ulptx_intr_info)) 2268 t4_fatal_err(adapter); 2269 } 2270 2271 /* 2272 * PM TX interrupt handler. 2273 */ 2274 static void pmtx_intr_handler(struct adapter *adapter) 2275 { 2276 static struct intr_info pmtx_intr_info[] = { 2277 { F_PCMD_LEN_OVFL0, "PMTX channel 0 pcmd too large", -1, 1 }, 2278 { F_PCMD_LEN_OVFL1, "PMTX channel 1 pcmd too large", -1, 1 }, 2279 { F_PCMD_LEN_OVFL2, "PMTX channel 2 pcmd too large", -1, 1 }, 2280 { F_ZERO_C_CMD_ERROR, "PMTX 0-length pcmd", -1, 1 }, 2281 { 0xffffff0, "PMTX framing error", -1, 1 }, 2282 { F_OESPI_PAR_ERROR, "PMTX oespi parity error", -1, 1 }, 2283 { F_DB_OPTIONS_PAR_ERROR, "PMTX db_options parity error", -1, 2284 1 }, 2285 { F_ICSPI_PAR_ERROR, "PMTX icspi parity error", -1, 1 }, 2286 { F_C_PCMD_PAR_ERROR, "PMTX c_pcmd parity error", -1, 1}, 2287 { 0 } 2288 }; 2289 2290 if (t4_handle_intr_status(adapter, A_PM_TX_INT_CAUSE, pmtx_intr_info)) 2291 t4_fatal_err(adapter); 2292 } 2293 2294 /* 2295 * PM RX interrupt handler. 2296 */ 2297 static void pmrx_intr_handler(struct adapter *adapter) 2298 { 2299 static struct intr_info pmrx_intr_info[] = { 2300 { F_ZERO_E_CMD_ERROR, "PMRX 0-length pcmd", -1, 1 }, 2301 { 0x3ffff0, "PMRX framing error", -1, 1 }, 2302 { F_OCSPI_PAR_ERROR, "PMRX ocspi parity error", -1, 1 }, 2303 { F_DB_OPTIONS_PAR_ERROR, "PMRX db_options parity error", -1, 2304 1 }, 2305 { F_IESPI_PAR_ERROR, "PMRX iespi parity error", -1, 1 }, 2306 { F_E_PCMD_PAR_ERROR, "PMRX e_pcmd parity error", -1, 1}, 2307 { 0 } 2308 }; 2309 2310 if (t4_handle_intr_status(adapter, A_PM_RX_INT_CAUSE, pmrx_intr_info)) 2311 t4_fatal_err(adapter); 2312 } 2313 2314 /* 2315 * CPL switch interrupt handler. 2316 */ 2317 static void cplsw_intr_handler(struct adapter *adapter) 2318 { 2319 static struct intr_info cplsw_intr_info[] = { 2320 { F_CIM_OP_MAP_PERR, "CPLSW CIM op_map parity error", -1, 1 }, 2321 { F_CIM_OVFL_ERROR, "CPLSW CIM overflow", -1, 1 }, 2322 { F_TP_FRAMING_ERROR, "CPLSW TP framing error", -1, 1 }, 2323 { F_SGE_FRAMING_ERROR, "CPLSW SGE framing error", -1, 1 }, 2324 { F_CIM_FRAMING_ERROR, "CPLSW CIM framing error", -1, 1 }, 2325 { F_ZERO_SWITCH_ERROR, "CPLSW no-switch error", -1, 1 }, 2326 { 0 } 2327 }; 2328 2329 if (t4_handle_intr_status(adapter, A_CPL_INTR_CAUSE, cplsw_intr_info)) 2330 t4_fatal_err(adapter); 2331 } 2332 2333 /* 2334 * LE interrupt handler. 2335 */ 2336 static void le_intr_handler(struct adapter *adap) 2337 { 2338 static struct intr_info le_intr_info[] = { 2339 { F_LIPMISS, "LE LIP miss", -1, 0 }, 2340 { F_LIP0, "LE 0 LIP error", -1, 0 }, 2341 { F_PARITYERR, "LE parity error", -1, 1 }, 2342 { F_UNKNOWNCMD, "LE unknown command", -1, 1 }, 2343 { F_REQQPARERR, "LE request queue parity error", -1, 1 }, 2344 { 0 } 2345 }; 2346 2347 if (t4_handle_intr_status(adap, A_LE_DB_INT_CAUSE, le_intr_info)) 2348 t4_fatal_err(adap); 2349 } 2350 2351 /* 2352 * MPS interrupt handler. 2353 */ 2354 static void mps_intr_handler(struct adapter *adapter) 2355 { 2356 static struct intr_info mps_rx_intr_info[] = { 2357 { 0xffffff, "MPS Rx parity error", -1, 1 }, 2358 { 0 } 2359 }; 2360 static struct intr_info mps_tx_intr_info[] = { 2361 { V_TPFIFO(M_TPFIFO), "MPS Tx TP FIFO parity error", -1, 1 }, 2362 { F_NCSIFIFO, "MPS Tx NC-SI FIFO parity error", -1, 1 }, 2363 { V_TXDATAFIFO(M_TXDATAFIFO), "MPS Tx data FIFO parity error", 2364 -1, 1 }, 2365 { V_TXDESCFIFO(M_TXDESCFIFO), "MPS Tx desc FIFO parity error", 2366 -1, 1 }, 2367 { F_BUBBLE, "MPS Tx underflow", -1, 1 }, 2368 { F_SECNTERR, "MPS Tx SOP/EOP error", -1, 1 }, 2369 { F_FRMERR, "MPS Tx framing error", -1, 1 }, 2370 { 0 } 2371 }; 2372 static struct intr_info mps_trc_intr_info[] = { 2373 { V_FILTMEM(M_FILTMEM), "MPS TRC filter parity error", -1, 1 }, 2374 { V_PKTFIFO(M_PKTFIFO), "MPS TRC packet FIFO parity error", -1, 2375 1 }, 2376 { F_MISCPERR, "MPS TRC misc parity error", -1, 1 }, 2377 { 0 } 2378 }; 2379 static struct intr_info mps_stat_sram_intr_info[] = { 2380 { 0x1fffff, "MPS statistics SRAM parity error", -1, 1 }, 2381 { 0 } 2382 }; 2383 static struct intr_info mps_stat_tx_intr_info[] = { 2384 { 0xfffff, "MPS statistics Tx FIFO parity error", -1, 1 }, 2385 { 0 } 2386 }; 2387 static struct intr_info mps_stat_rx_intr_info[] = { 2388 { 0xffffff, "MPS statistics Rx FIFO parity error", -1, 1 }, 2389 { 0 } 2390 }; 2391 static struct intr_info mps_cls_intr_info[] = { 2392 { F_MATCHSRAM, "MPS match SRAM parity error", -1, 1 }, 2393 { F_MATCHTCAM, "MPS match TCAM parity error", -1, 1 }, 2394 { F_HASHSRAM, "MPS hash SRAM parity error", -1, 1 }, 2395 { 0 } 2396 }; 2397 2398 int fat; 2399 2400 fat = t4_handle_intr_status(adapter, A_MPS_RX_PERR_INT_CAUSE, 2401 mps_rx_intr_info) + 2402 t4_handle_intr_status(adapter, A_MPS_TX_INT_CAUSE, 2403 mps_tx_intr_info) + 2404 t4_handle_intr_status(adapter, A_MPS_TRC_INT_CAUSE, 2405 mps_trc_intr_info) + 2406 t4_handle_intr_status(adapter, A_MPS_STAT_PERR_INT_CAUSE_SRAM, 2407 mps_stat_sram_intr_info) + 2408 t4_handle_intr_status(adapter, A_MPS_STAT_PERR_INT_CAUSE_TX_FIFO, 2409 mps_stat_tx_intr_info) + 2410 t4_handle_intr_status(adapter, A_MPS_STAT_PERR_INT_CAUSE_RX_FIFO, 2411 mps_stat_rx_intr_info) + 2412 t4_handle_intr_status(adapter, A_MPS_CLS_INT_CAUSE, 2413 mps_cls_intr_info); 2414 2415 t4_write_reg(adapter, A_MPS_INT_CAUSE, 0); 2416 t4_read_reg(adapter, A_MPS_INT_CAUSE); /* flush */ 2417 if (fat) 2418 t4_fatal_err(adapter); 2419 } 2420 2421 #define MEM_INT_MASK (F_PERR_INT_CAUSE | F_ECC_CE_INT_CAUSE | F_ECC_UE_INT_CAUSE) 2422 2423 /* 2424 * EDC/MC interrupt handler. 2425 */ 2426 static void mem_intr_handler(struct adapter *adapter, int idx) 2427 { 2428 static const char name[3][5] = { "EDC0", "EDC1", "MC" }; 2429 2430 unsigned int addr, cnt_addr, v; 2431 2432 if (idx <= MEM_EDC1) { 2433 addr = EDC_REG(A_EDC_INT_CAUSE, idx); 2434 cnt_addr = EDC_REG(A_EDC_ECC_STATUS, idx); 2435 } else { 2436 if (is_t4(adapter)) { 2437 addr = A_MC_INT_CAUSE; 2438 cnt_addr = A_MC_ECC_STATUS; 2439 } else { 2440 addr = A_MC_P_INT_CAUSE; 2441 cnt_addr = A_MC_P_ECC_STATUS; 2442 } 2443 } 2444 2445 v = t4_read_reg(adapter, addr) & MEM_INT_MASK; 2446 if (v & F_PERR_INT_CAUSE) 2447 CH_ALERT(adapter, "%s FIFO parity error\n", name[idx]); 2448 if (v & F_ECC_CE_INT_CAUSE) { 2449 u32 cnt = G_ECC_CECNT(t4_read_reg(adapter, cnt_addr)); 2450 2451 t4_write_reg(adapter, cnt_addr, V_ECC_CECNT(M_ECC_CECNT)); 2452 CH_WARN_RATELIMIT(adapter, 2453 "%u %s correctable ECC data error%s\n", 2454 cnt, name[idx], cnt > 1 ? "s" : ""); 2455 } 2456 if (v & F_ECC_UE_INT_CAUSE) 2457 CH_ALERT(adapter, "%s uncorrectable ECC data error\n", 2458 name[idx]); 2459 2460 t4_write_reg(adapter, addr, v); 2461 if (v & (F_PERR_INT_CAUSE | F_ECC_UE_INT_CAUSE)) 2462 t4_fatal_err(adapter); 2463 } 2464 2465 /* 2466 * MA interrupt handler. 2467 */ 2468 static void ma_intr_handler(struct adapter *adapter) 2469 { 2470 u32 v, status = t4_read_reg(adapter, A_MA_INT_CAUSE); 2471 2472 if (status & F_MEM_PERR_INT_CAUSE) { 2473 CH_ALERT(adapter, "MA parity error, parity status %#x\n", 2474 t4_read_reg(adapter, A_MA_PARITY_ERROR_STATUS1)); 2475 if (is_t5(adapter)) 2476 CH_ALERT(adapter, 2477 "MA parity error, parity status %#x\n", 2478 t4_read_reg(adapter, 2479 A_MA_PARITY_ERROR_STATUS2)); 2480 } 2481 if (status & F_MEM_WRAP_INT_CAUSE) { 2482 v = t4_read_reg(adapter, A_MA_INT_WRAP_STATUS); 2483 CH_ALERT(adapter, "MA address wrap-around error by client %u to" 2484 " address %#x\n", G_MEM_WRAP_CLIENT_NUM(v), 2485 G_MEM_WRAP_ADDRESS(v) << 4); 2486 } 2487 t4_write_reg(adapter, A_MA_INT_CAUSE, status); 2488 t4_fatal_err(adapter); 2489 } 2490 2491 /* 2492 * SMB interrupt handler. 2493 */ 2494 static void smb_intr_handler(struct adapter *adap) 2495 { 2496 static struct intr_info smb_intr_info[] = { 2497 { F_MSTTXFIFOPARINT, "SMB master Tx FIFO parity error", -1, 1 }, 2498 { F_MSTRXFIFOPARINT, "SMB master Rx FIFO parity error", -1, 1 }, 2499 { F_SLVFIFOPARINT, "SMB slave FIFO parity error", -1, 1 }, 2500 { 0 } 2501 }; 2502 2503 if (t4_handle_intr_status(adap, A_SMB_INT_CAUSE, smb_intr_info)) 2504 t4_fatal_err(adap); 2505 } 2506 2507 /* 2508 * NC-SI interrupt handler. 2509 */ 2510 static void ncsi_intr_handler(struct adapter *adap) 2511 { 2512 static struct intr_info ncsi_intr_info[] = { 2513 { F_CIM_DM_PRTY_ERR, "NC-SI CIM parity error", -1, 1 }, 2514 { F_MPS_DM_PRTY_ERR, "NC-SI MPS parity error", -1, 1 }, 2515 { F_TXFIFO_PRTY_ERR, "NC-SI Tx FIFO parity error", -1, 1 }, 2516 { F_RXFIFO_PRTY_ERR, "NC-SI Rx FIFO parity error", -1, 1 }, 2517 { 0 } 2518 }; 2519 2520 if (t4_handle_intr_status(adap, A_NCSI_INT_CAUSE, ncsi_intr_info)) 2521 t4_fatal_err(adap); 2522 } 2523 2524 /* 2525 * XGMAC interrupt handler. 2526 */ 2527 static void xgmac_intr_handler(struct adapter *adap, int port) 2528 { 2529 u32 v, int_cause_reg; 2530 2531 if (is_t4(adap)) 2532 int_cause_reg = PORT_REG(port, A_XGMAC_PORT_INT_CAUSE); 2533 else 2534 int_cause_reg = T5_PORT_REG(port, A_MAC_PORT_INT_CAUSE); 2535 2536 v = t4_read_reg(adap, int_cause_reg); 2537 v &= (F_TXFIFO_PRTY_ERR | F_RXFIFO_PRTY_ERR); 2538 if (!v) 2539 return; 2540 2541 if (v & F_TXFIFO_PRTY_ERR) 2542 CH_ALERT(adap, "XGMAC %d Tx FIFO parity error\n", port); 2543 if (v & F_RXFIFO_PRTY_ERR) 2544 CH_ALERT(adap, "XGMAC %d Rx FIFO parity error\n", port); 2545 t4_write_reg(adap, int_cause_reg, v); 2546 t4_fatal_err(adap); 2547 } 2548 2549 /* 2550 * PL interrupt handler. 2551 */ 2552 static void pl_intr_handler(struct adapter *adap) 2553 { 2554 static struct intr_info pl_intr_info[] = { 2555 { F_FATALPERR, "Fatal parity error", -1, 1 }, 2556 { F_PERRVFID, "PL VFID_MAP parity error", -1, 1 }, 2557 { 0 } 2558 }; 2559 2560 static struct intr_info t5_pl_intr_info[] = { 2561 { F_PL_BUSPERR, "PL bus parity error", -1, 1 }, 2562 { F_FATALPERR, "Fatal parity error", -1, 1 }, 2563 { 0 } 2564 }; 2565 2566 if (t4_handle_intr_status(adap, A_PL_PL_INT_CAUSE, 2567 is_t4(adap) ? pl_intr_info : t5_pl_intr_info)) 2568 t4_fatal_err(adap); 2569 } 2570 2571 #define PF_INTR_MASK (F_PFSW | F_PFCIM) 2572 #define GLBL_INTR_MASK (F_CIM | F_MPS | F_PL | F_PCIE | F_MC | F_EDC0 | \ 2573 F_EDC1 | F_LE | F_TP | F_MA | F_PM_TX | F_PM_RX | F_ULP_RX | \ 2574 F_CPL_SWITCH | F_SGE | F_ULP_TX) 2575 2576 /** 2577 * t4_slow_intr_handler - control path interrupt handler 2578 * @adapter: the adapter 2579 * 2580 * T4 interrupt handler for non-data global interrupt events, e.g., errors. 2581 * The designation 'slow' is because it involves register reads, while 2582 * data interrupts typically don't involve any MMIOs. 2583 */ 2584 int t4_slow_intr_handler(struct adapter *adapter) 2585 { 2586 u32 cause = t4_read_reg(adapter, A_PL_INT_CAUSE); 2587 2588 if (!(cause & GLBL_INTR_MASK)) 2589 return 0; 2590 if (cause & F_CIM) 2591 cim_intr_handler(adapter); 2592 if (cause & F_MPS) 2593 mps_intr_handler(adapter); 2594 if (cause & F_NCSI) 2595 ncsi_intr_handler(adapter); 2596 if (cause & F_PL) 2597 pl_intr_handler(adapter); 2598 if (cause & F_SMB) 2599 smb_intr_handler(adapter); 2600 if (cause & F_XGMAC0) 2601 xgmac_intr_handler(adapter, 0); 2602 if (cause & F_XGMAC1) 2603 xgmac_intr_handler(adapter, 1); 2604 if (cause & F_XGMAC_KR0) 2605 xgmac_intr_handler(adapter, 2); 2606 if (cause & F_XGMAC_KR1) 2607 xgmac_intr_handler(adapter, 3); 2608 if (cause & F_PCIE) 2609 pcie_intr_handler(adapter); 2610 if (cause & F_MC) 2611 mem_intr_handler(adapter, MEM_MC); 2612 if (cause & F_EDC0) 2613 mem_intr_handler(adapter, MEM_EDC0); 2614 if (cause & F_EDC1) 2615 mem_intr_handler(adapter, MEM_EDC1); 2616 if (cause & F_LE) 2617 le_intr_handler(adapter); 2618 if (cause & F_TP) 2619 tp_intr_handler(adapter); 2620 if (cause & F_MA) 2621 ma_intr_handler(adapter); 2622 if (cause & F_PM_TX) 2623 pmtx_intr_handler(adapter); 2624 if (cause & F_PM_RX) 2625 pmrx_intr_handler(adapter); 2626 if (cause & F_ULP_RX) 2627 ulprx_intr_handler(adapter); 2628 if (cause & F_CPL_SWITCH) 2629 cplsw_intr_handler(adapter); 2630 if (cause & F_SGE) 2631 sge_intr_handler(adapter); 2632 if (cause & F_ULP_TX) 2633 ulptx_intr_handler(adapter); 2634 2635 /* Clear the interrupts just processed for which we are the master. */ 2636 t4_write_reg(adapter, A_PL_INT_CAUSE, cause & GLBL_INTR_MASK); 2637 (void) t4_read_reg(adapter, A_PL_INT_CAUSE); /* flush */ 2638 return 1; 2639 } 2640 2641 /** 2642 * t4_intr_enable - enable interrupts 2643 * @adapter: the adapter whose interrupts should be enabled 2644 * 2645 * Enable PF-specific interrupts for the calling function and the top-level 2646 * interrupt concentrator for global interrupts. Interrupts are already 2647 * enabled at each module, here we just enable the roots of the interrupt 2648 * hierarchies. 2649 * 2650 * Note: this function should be called only when the driver manages 2651 * non PF-specific interrupts from the various HW modules. Only one PCI 2652 * function at a time should be doing this. 2653 */ 2654 void t4_intr_enable(struct adapter *adapter) 2655 { 2656 u32 pf = G_SOURCEPF(t4_read_reg(adapter, A_PL_WHOAMI)); 2657 2658 t4_write_reg(adapter, A_SGE_INT_ENABLE3, F_ERR_CPL_EXCEED_IQE_SIZE | 2659 F_ERR_INVALID_CIDX_INC | F_ERR_CPL_OPCODE_0 | 2660 F_ERR_DROPPED_DB | F_ERR_DATA_CPL_ON_HIGH_QID1 | 2661 F_ERR_DATA_CPL_ON_HIGH_QID0 | F_ERR_BAD_DB_PIDX3 | 2662 F_ERR_BAD_DB_PIDX2 | F_ERR_BAD_DB_PIDX1 | 2663 F_ERR_BAD_DB_PIDX0 | F_ERR_ING_CTXT_PRIO | 2664 F_ERR_EGR_CTXT_PRIO | F_INGRESS_SIZE_ERR | 2665 F_EGRESS_SIZE_ERR); 2666 t4_write_reg(adapter, MYPF_REG(A_PL_PF_INT_ENABLE), PF_INTR_MASK); 2667 t4_set_reg_field(adapter, A_PL_INT_MAP0, 0, 1 << pf); 2668 } 2669 2670 /** 2671 * t4_intr_disable - disable interrupts 2672 * @adapter: the adapter whose interrupts should be disabled 2673 * 2674 * Disable interrupts. We only disable the top-level interrupt 2675 * concentrators. The caller must be a PCI function managing global 2676 * interrupts. 2677 */ 2678 void t4_intr_disable(struct adapter *adapter) 2679 { 2680 u32 pf = G_SOURCEPF(t4_read_reg(adapter, A_PL_WHOAMI)); 2681 2682 t4_write_reg(adapter, MYPF_REG(A_PL_PF_INT_ENABLE), 0); 2683 t4_set_reg_field(adapter, A_PL_INT_MAP0, 1 << pf, 0); 2684 } 2685 2686 /** 2687 * t4_intr_clear - clear all interrupts 2688 * @adapter: the adapter whose interrupts should be cleared 2689 * 2690 * Clears all interrupts. The caller must be a PCI function managing 2691 * global interrupts. 2692 */ 2693 void t4_intr_clear(struct adapter *adapter) 2694 { 2695 static const unsigned int cause_reg[] = { 2696 A_SGE_INT_CAUSE1, A_SGE_INT_CAUSE2, A_SGE_INT_CAUSE3, 2697 A_PCIE_NONFAT_ERR, A_PCIE_INT_CAUSE, 2698 A_MA_INT_WRAP_STATUS, A_MA_PARITY_ERROR_STATUS1, A_MA_INT_CAUSE, 2699 A_EDC_INT_CAUSE, EDC_REG(A_EDC_INT_CAUSE, 1), 2700 A_CIM_HOST_INT_CAUSE, A_CIM_HOST_UPACC_INT_CAUSE, 2701 MYPF_REG(A_CIM_PF_HOST_INT_CAUSE), 2702 A_TP_INT_CAUSE, 2703 A_ULP_RX_INT_CAUSE, A_ULP_TX_INT_CAUSE, 2704 A_PM_RX_INT_CAUSE, A_PM_TX_INT_CAUSE, 2705 A_MPS_RX_PERR_INT_CAUSE, 2706 A_CPL_INTR_CAUSE, 2707 MYPF_REG(A_PL_PF_INT_CAUSE), 2708 A_PL_PL_INT_CAUSE, 2709 A_LE_DB_INT_CAUSE, 2710 }; 2711 2712 unsigned int i; 2713 2714 for (i = 0; i < ARRAY_SIZE(cause_reg); ++i) 2715 t4_write_reg(adapter, cause_reg[i], 0xffffffff); 2716 2717 t4_write_reg(adapter, is_t4(adapter) ? A_MC_INT_CAUSE : 2718 A_MC_P_INT_CAUSE, 0xffffffff); 2719 2720 if (is_t4(adapter)) { 2721 t4_write_reg(adapter, A_PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS, 2722 0xffffffff); 2723 t4_write_reg(adapter, A_PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS, 2724 0xffffffff); 2725 } else 2726 t4_write_reg(adapter, A_MA_PARITY_ERROR_STATUS2, 0xffffffff); 2727 2728 t4_write_reg(adapter, A_PL_INT_CAUSE, GLBL_INTR_MASK); 2729 (void) t4_read_reg(adapter, A_PL_INT_CAUSE); /* flush */ 2730 } 2731 2732 /** 2733 * hash_mac_addr - return the hash value of a MAC address 2734 * @addr: the 48-bit Ethernet MAC address 2735 * 2736 * Hashes a MAC address according to the hash function used by HW inexact 2737 * (hash) address matching. 2738 */ 2739 static int hash_mac_addr(const u8 *addr) 2740 { 2741 u32 a = ((u32)addr[0] << 16) | ((u32)addr[1] << 8) | addr[2]; 2742 u32 b = ((u32)addr[3] << 16) | ((u32)addr[4] << 8) | addr[5]; 2743 a ^= b; 2744 a ^= (a >> 12); 2745 a ^= (a >> 6); 2746 return a & 0x3f; 2747 } 2748 2749 /** 2750 * t4_config_rss_range - configure a portion of the RSS mapping table 2751 * @adapter: the adapter 2752 * @mbox: mbox to use for the FW command 2753 * @viid: virtual interface whose RSS subtable is to be written 2754 * @start: start entry in the table to write 2755 * @n: how many table entries to write 2756 * @rspq: values for the "response queue" (Ingress Queue) lookup table 2757 * @nrspq: number of values in @rspq 2758 * 2759 * Programs the selected part of the VI's RSS mapping table with the 2760 * provided values. If @nrspq < @n the supplied values are used repeatedly 2761 * until the full table range is populated. 2762 * 2763 * The caller must ensure the values in @rspq are in the range allowed for 2764 * @viid. 2765 */ 2766 int t4_config_rss_range(struct adapter *adapter, int mbox, unsigned int viid, 2767 int start, int n, const u16 *rspq, unsigned int nrspq) 2768 { 2769 int ret; 2770 const u16 *rsp = rspq; 2771 const u16 *rsp_end = rspq + nrspq; 2772 struct fw_rss_ind_tbl_cmd cmd; 2773 2774 memset(&cmd, 0, sizeof(cmd)); 2775 cmd.op_to_viid = htonl(V_FW_CMD_OP(FW_RSS_IND_TBL_CMD) | 2776 F_FW_CMD_REQUEST | F_FW_CMD_WRITE | 2777 V_FW_RSS_IND_TBL_CMD_VIID(viid)); 2778 cmd.retval_len16 = htonl(FW_LEN16(cmd)); 2779 2780 2781 /* 2782 * Each firmware RSS command can accommodate up to 32 RSS Ingress 2783 * Queue Identifiers. These Ingress Queue IDs are packed three to 2784 * a 32-bit word as 10-bit values with the upper remaining 2 bits 2785 * reserved. 2786 */ 2787 while (n > 0) { 2788 int nq = min(n, 32); 2789 int nq_packed = 0; 2790 __be32 *qp = &cmd.iq0_to_iq2; 2791 2792 /* 2793 * Set up the firmware RSS command header to send the next 2794 * "nq" Ingress Queue IDs to the firmware. 2795 */ 2796 cmd.niqid = htons(nq); 2797 cmd.startidx = htons(start); 2798 2799 /* 2800 * "nq" more done for the start of the next loop. 2801 */ 2802 start += nq; 2803 n -= nq; 2804 2805 /* 2806 * While there are still Ingress Queue IDs to stuff into the 2807 * current firmware RSS command, retrieve them from the 2808 * Ingress Queue ID array and insert them into the command. 2809 */ 2810 while (nq > 0) { 2811 /* 2812 * Grab up to the next 3 Ingress Queue IDs (wrapping 2813 * around the Ingress Queue ID array if necessary) and 2814 * insert them into the firmware RSS command at the 2815 * current 3-tuple position within the commad. 2816 */ 2817 u16 qbuf[3]; 2818 u16 *qbp = qbuf; 2819 int nqbuf = min(3, nq); 2820 2821 nq -= nqbuf; 2822 qbuf[0] = qbuf[1] = qbuf[2] = 0; 2823 while (nqbuf && nq_packed < 32) { 2824 nqbuf--; 2825 nq_packed++; 2826 *qbp++ = *rsp++; 2827 if (rsp >= rsp_end) 2828 rsp = rspq; 2829 } 2830 *qp++ = cpu_to_be32(V_FW_RSS_IND_TBL_CMD_IQ0(qbuf[0]) | 2831 V_FW_RSS_IND_TBL_CMD_IQ1(qbuf[1]) | 2832 V_FW_RSS_IND_TBL_CMD_IQ2(qbuf[2])); 2833 } 2834 2835 /* 2836 * Send this portion of the RRS table update to the firmware; 2837 * bail out on any errors. 2838 */ 2839 ret = t4_wr_mbox(adapter, mbox, &cmd, sizeof(cmd), NULL); 2840 if (ret) 2841 return ret; 2842 } 2843 2844 return 0; 2845 } 2846 2847 /** 2848 * t4_config_glbl_rss - configure the global RSS mode 2849 * @adapter: the adapter 2850 * @mbox: mbox to use for the FW command 2851 * @mode: global RSS mode 2852 * @flags: mode-specific flags 2853 * 2854 * Sets the global RSS mode. 2855 */ 2856 int t4_config_glbl_rss(struct adapter *adapter, int mbox, unsigned int mode, 2857 unsigned int flags) 2858 { 2859 struct fw_rss_glb_config_cmd c; 2860 2861 memset(&c, 0, sizeof(c)); 2862 c.op_to_write = htonl(V_FW_CMD_OP(FW_RSS_GLB_CONFIG_CMD) | 2863 F_FW_CMD_REQUEST | F_FW_CMD_WRITE); 2864 c.retval_len16 = htonl(FW_LEN16(c)); 2865 if (mode == FW_RSS_GLB_CONFIG_CMD_MODE_MANUAL) { 2866 c.u.manual.mode_pkd = htonl(V_FW_RSS_GLB_CONFIG_CMD_MODE(mode)); 2867 } else if (mode == FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL) { 2868 c.u.basicvirtual.mode_pkd = 2869 htonl(V_FW_RSS_GLB_CONFIG_CMD_MODE(mode)); 2870 c.u.basicvirtual.synmapen_to_hashtoeplitz = htonl(flags); 2871 } else 2872 return -EINVAL; 2873 return t4_wr_mbox(adapter, mbox, &c, sizeof(c), NULL); 2874 } 2875 2876 /** 2877 * t4_config_vi_rss - configure per VI RSS settings 2878 * @adapter: the adapter 2879 * @mbox: mbox to use for the FW command 2880 * @viid: the VI id 2881 * @flags: RSS flags 2882 * @defq: id of the default RSS queue for the VI. 2883 * 2884 * Configures VI-specific RSS properties. 2885 */ 2886 int t4_config_vi_rss(struct adapter *adapter, int mbox, unsigned int viid, 2887 unsigned int flags, unsigned int defq) 2888 { 2889 struct fw_rss_vi_config_cmd c; 2890 2891 memset(&c, 0, sizeof(c)); 2892 c.op_to_viid = htonl(V_FW_CMD_OP(FW_RSS_VI_CONFIG_CMD) | 2893 F_FW_CMD_REQUEST | F_FW_CMD_WRITE | 2894 V_FW_RSS_VI_CONFIG_CMD_VIID(viid)); 2895 c.retval_len16 = htonl(FW_LEN16(c)); 2896 c.u.basicvirtual.defaultq_to_udpen = htonl(flags | 2897 V_FW_RSS_VI_CONFIG_CMD_DEFAULTQ(defq)); 2898 return t4_wr_mbox(adapter, mbox, &c, sizeof(c), NULL); 2899 } 2900 2901 /* Read an RSS table row */ 2902 static int rd_rss_row(struct adapter *adap, int row, u32 *val) 2903 { 2904 t4_write_reg(adap, A_TP_RSS_LKP_TABLE, 0xfff00000 | row); 2905 return t4_wait_op_done_val(adap, A_TP_RSS_LKP_TABLE, F_LKPTBLROWVLD, 1, 2906 5, 0, val); 2907 } 2908 2909 /** 2910 * t4_read_rss - read the contents of the RSS mapping table 2911 * @adapter: the adapter 2912 * @map: holds the contents of the RSS mapping table 2913 * 2914 * Reads the contents of the RSS hash->queue mapping table. 2915 */ 2916 int t4_read_rss(struct adapter *adapter, u16 *map) 2917 { 2918 u32 val; 2919 int i, ret; 2920 2921 for (i = 0; i < RSS_NENTRIES / 2; ++i) { 2922 ret = rd_rss_row(adapter, i, &val); 2923 if (ret) 2924 return ret; 2925 *map++ = G_LKPTBLQUEUE0(val); 2926 *map++ = G_LKPTBLQUEUE1(val); 2927 } 2928 return 0; 2929 } 2930 2931 /** 2932 * t4_read_rss_key - read the global RSS key 2933 * @adap: the adapter 2934 * @key: 10-entry array holding the 320-bit RSS key 2935 * 2936 * Reads the global 320-bit RSS key. 2937 */ 2938 void t4_read_rss_key(struct adapter *adap, u32 *key) 2939 { 2940 t4_read_indirect(adap, A_TP_PIO_ADDR, A_TP_PIO_DATA, key, 10, 2941 A_TP_RSS_SECRET_KEY0); 2942 } 2943 2944 /** 2945 * t4_write_rss_key - program one of the RSS keys 2946 * @adap: the adapter 2947 * @key: 10-entry array holding the 320-bit RSS key 2948 * @idx: which RSS key to write 2949 * 2950 * Writes one of the RSS keys with the given 320-bit value. If @idx is 2951 * 0..15 the corresponding entry in the RSS key table is written, 2952 * otherwise the global RSS key is written. 2953 */ 2954 void t4_write_rss_key(struct adapter *adap, const u32 *key, int idx) 2955 { 2956 t4_write_indirect(adap, A_TP_PIO_ADDR, A_TP_PIO_DATA, key, 10, 2957 A_TP_RSS_SECRET_KEY0); 2958 if (idx >= 0 && idx < 16) 2959 t4_write_reg(adap, A_TP_RSS_CONFIG_VRT, 2960 V_KEYWRADDR(idx) | F_KEYWREN); 2961 } 2962 2963 /** 2964 * t4_read_rss_pf_config - read PF RSS Configuration Table 2965 * @adapter: the adapter 2966 * @index: the entry in the PF RSS table to read 2967 * @valp: where to store the returned value 2968 * 2969 * Reads the PF RSS Configuration Table at the specified index and returns 2970 * the value found there. 2971 */ 2972 void t4_read_rss_pf_config(struct adapter *adapter, unsigned int index, u32 *valp) 2973 { 2974 t4_read_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA, 2975 valp, 1, A_TP_RSS_PF0_CONFIG + index); 2976 } 2977 2978 /** 2979 * t4_write_rss_pf_config - write PF RSS Configuration Table 2980 * @adapter: the adapter 2981 * @index: the entry in the VF RSS table to read 2982 * @val: the value to store 2983 * 2984 * Writes the PF RSS Configuration Table at the specified index with the 2985 * specified value. 2986 */ 2987 void t4_write_rss_pf_config(struct adapter *adapter, unsigned int index, u32 val) 2988 { 2989 t4_write_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA, 2990 &val, 1, A_TP_RSS_PF0_CONFIG + index); 2991 } 2992 2993 /** 2994 * t4_read_rss_vf_config - read VF RSS Configuration Table 2995 * @adapter: the adapter 2996 * @index: the entry in the VF RSS table to read 2997 * @vfl: where to store the returned VFL 2998 * @vfh: where to store the returned VFH 2999 * 3000 * Reads the VF RSS Configuration Table at the specified index and returns 3001 * the (VFL, VFH) values found there. 3002 */ 3003 void t4_read_rss_vf_config(struct adapter *adapter, unsigned int index, 3004 u32 *vfl, u32 *vfh) 3005 { 3006 u32 vrt; 3007 3008 /* 3009 * Request that the index'th VF Table values be read into VFL/VFH. 3010 */ 3011 vrt = t4_read_reg(adapter, A_TP_RSS_CONFIG_VRT); 3012 vrt &= ~(F_VFRDRG | V_VFWRADDR(M_VFWRADDR) | F_VFWREN | F_KEYWREN); 3013 vrt |= V_VFWRADDR(index) | F_VFRDEN; 3014 t4_write_reg(adapter, A_TP_RSS_CONFIG_VRT, vrt); 3015 3016 /* 3017 * Grab the VFL/VFH values ... 3018 */ 3019 t4_read_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA, 3020 vfl, 1, A_TP_RSS_VFL_CONFIG); 3021 t4_read_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA, 3022 vfh, 1, A_TP_RSS_VFH_CONFIG); 3023 } 3024 3025 /** 3026 * t4_write_rss_vf_config - write VF RSS Configuration Table 3027 * 3028 * @adapter: the adapter 3029 * @index: the entry in the VF RSS table to write 3030 * @vfl: the VFL to store 3031 * @vfh: the VFH to store 3032 * 3033 * Writes the VF RSS Configuration Table at the specified index with the 3034 * specified (VFL, VFH) values. 3035 */ 3036 void t4_write_rss_vf_config(struct adapter *adapter, unsigned int index, 3037 u32 vfl, u32 vfh) 3038 { 3039 u32 vrt; 3040 3041 /* 3042 * Load up VFL/VFH with the values to be written ... 3043 */ 3044 t4_write_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA, 3045 &vfl, 1, A_TP_RSS_VFL_CONFIG); 3046 t4_write_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA, 3047 &vfh, 1, A_TP_RSS_VFH_CONFIG); 3048 3049 /* 3050 * Write the VFL/VFH into the VF Table at index'th location. 3051 */ 3052 vrt = t4_read_reg(adapter, A_TP_RSS_CONFIG_VRT); 3053 vrt &= ~(F_VFRDRG | F_VFRDEN | V_VFWRADDR(M_VFWRADDR) | F_KEYWREN); 3054 vrt |= V_VFWRADDR(index) | F_VFWREN; 3055 t4_write_reg(adapter, A_TP_RSS_CONFIG_VRT, vrt); 3056 } 3057 3058 /** 3059 * t4_read_rss_pf_map - read PF RSS Map 3060 * @adapter: the adapter 3061 * 3062 * Reads the PF RSS Map register and returns its value. 3063 */ 3064 u32 t4_read_rss_pf_map(struct adapter *adapter) 3065 { 3066 u32 pfmap; 3067 3068 t4_read_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA, 3069 &pfmap, 1, A_TP_RSS_PF_MAP); 3070 return pfmap; 3071 } 3072 3073 /** 3074 * t4_write_rss_pf_map - write PF RSS Map 3075 * @adapter: the adapter 3076 * @pfmap: PF RSS Map value 3077 * 3078 * Writes the specified value to the PF RSS Map register. 3079 */ 3080 void t4_write_rss_pf_map(struct adapter *adapter, u32 pfmap) 3081 { 3082 t4_write_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA, 3083 &pfmap, 1, A_TP_RSS_PF_MAP); 3084 } 3085 3086 /** 3087 * t4_read_rss_pf_mask - read PF RSS Mask 3088 * @adapter: the adapter 3089 * 3090 * Reads the PF RSS Mask register and returns its value. 3091 */ 3092 u32 t4_read_rss_pf_mask(struct adapter *adapter) 3093 { 3094 u32 pfmask; 3095 3096 t4_read_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA, 3097 &pfmask, 1, A_TP_RSS_PF_MSK); 3098 return pfmask; 3099 } 3100 3101 /** 3102 * t4_write_rss_pf_mask - write PF RSS Mask 3103 * @adapter: the adapter 3104 * @pfmask: PF RSS Mask value 3105 * 3106 * Writes the specified value to the PF RSS Mask register. 3107 */ 3108 void t4_write_rss_pf_mask(struct adapter *adapter, u32 pfmask) 3109 { 3110 t4_write_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA, 3111 &pfmask, 1, A_TP_RSS_PF_MSK); 3112 } 3113 3114 /** 3115 * t4_set_filter_mode - configure the optional components of filter tuples 3116 * @adap: the adapter 3117 * @mode_map: a bitmap selcting which optional filter components to enable 3118 * 3119 * Sets the filter mode by selecting the optional components to enable 3120 * in filter tuples. Returns 0 on success and a negative error if the 3121 * requested mode needs more bits than are available for optional 3122 * components. 3123 */ 3124 int t4_set_filter_mode(struct adapter *adap, unsigned int mode_map) 3125 { 3126 static u8 width[] = { 1, 3, 17, 17, 8, 8, 16, 9, 3, 1 }; 3127 3128 int i, nbits = 0; 3129 3130 for (i = S_FCOE; i <= S_FRAGMENTATION; i++) 3131 if (mode_map & (1 << i)) 3132 nbits += width[i]; 3133 if (nbits > FILTER_OPT_LEN) 3134 return -EINVAL; 3135 t4_write_indirect(adap, A_TP_PIO_ADDR, A_TP_PIO_DATA, &mode_map, 1, 3136 A_TP_VLAN_PRI_MAP); 3137 return 0; 3138 } 3139 3140 /** 3141 * t4_tp_get_tcp_stats - read TP's TCP MIB counters 3142 * @adap: the adapter 3143 * @v4: holds the TCP/IP counter values 3144 * @v6: holds the TCP/IPv6 counter values 3145 * 3146 * Returns the values of TP's TCP/IP and TCP/IPv6 MIB counters. 3147 * Either @v4 or @v6 may be %NULL to skip the corresponding stats. 3148 */ 3149 void t4_tp_get_tcp_stats(struct adapter *adap, struct tp_tcp_stats *v4, 3150 struct tp_tcp_stats *v6) 3151 { 3152 u32 val[A_TP_MIB_TCP_RXT_SEG_LO - A_TP_MIB_TCP_OUT_RST + 1]; 3153 3154 #define STAT_IDX(x) ((A_TP_MIB_TCP_##x) - A_TP_MIB_TCP_OUT_RST) 3155 #define STAT(x) val[STAT_IDX(x)] 3156 #define STAT64(x) (((u64)STAT(x##_HI) << 32) | STAT(x##_LO)) 3157 3158 if (v4) { 3159 t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, val, 3160 ARRAY_SIZE(val), A_TP_MIB_TCP_OUT_RST); 3161 v4->tcpOutRsts = STAT(OUT_RST); 3162 v4->tcpInSegs = STAT64(IN_SEG); 3163 v4->tcpOutSegs = STAT64(OUT_SEG); 3164 v4->tcpRetransSegs = STAT64(RXT_SEG); 3165 } 3166 if (v6) { 3167 t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, val, 3168 ARRAY_SIZE(val), A_TP_MIB_TCP_V6OUT_RST); 3169 v6->tcpOutRsts = STAT(OUT_RST); 3170 v6->tcpInSegs = STAT64(IN_SEG); 3171 v6->tcpOutSegs = STAT64(OUT_SEG); 3172 v6->tcpRetransSegs = STAT64(RXT_SEG); 3173 } 3174 #undef STAT64 3175 #undef STAT 3176 #undef STAT_IDX 3177 } 3178 3179 /** 3180 * t4_tp_get_err_stats - read TP's error MIB counters 3181 * @adap: the adapter 3182 * @st: holds the counter values 3183 * 3184 * Returns the values of TP's error counters. 3185 */ 3186 void t4_tp_get_err_stats(struct adapter *adap, struct tp_err_stats *st) 3187 { 3188 t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, st->macInErrs, 3189 12, A_TP_MIB_MAC_IN_ERR_0); 3190 t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, st->tnlCongDrops, 3191 8, A_TP_MIB_TNL_CNG_DROP_0); 3192 t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, st->tnlTxDrops, 3193 4, A_TP_MIB_TNL_DROP_0); 3194 t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, st->ofldVlanDrops, 3195 4, A_TP_MIB_OFD_VLN_DROP_0); 3196 t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, st->tcp6InErrs, 3197 4, A_TP_MIB_TCP_V6IN_ERR_0); 3198 t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, &st->ofldNoNeigh, 3199 2, A_TP_MIB_OFD_ARP_DROP); 3200 } 3201 3202 /** 3203 * t4_tp_get_proxy_stats - read TP's proxy MIB counters 3204 * @adap: the adapter 3205 * @st: holds the counter values 3206 * 3207 * Returns the values of TP's proxy counters. 3208 */ 3209 void t4_tp_get_proxy_stats(struct adapter *adap, struct tp_proxy_stats *st) 3210 { 3211 t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, st->proxy, 3212 4, A_TP_MIB_TNL_LPBK_0); 3213 } 3214 3215 /** 3216 * t4_tp_get_cpl_stats - read TP's CPL MIB counters 3217 * @adap: the adapter 3218 * @st: holds the counter values 3219 * 3220 * Returns the values of TP's CPL counters. 3221 */ 3222 void t4_tp_get_cpl_stats(struct adapter *adap, struct tp_cpl_stats *st) 3223 { 3224 t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, st->req, 3225 8, A_TP_MIB_CPL_IN_REQ_0); 3226 } 3227 3228 /** 3229 * t4_tp_get_rdma_stats - read TP's RDMA MIB counters 3230 * @adap: the adapter 3231 * @st: holds the counter values 3232 * 3233 * Returns the values of TP's RDMA counters. 3234 */ 3235 void t4_tp_get_rdma_stats(struct adapter *adap, struct tp_rdma_stats *st) 3236 { 3237 t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, &st->rqe_dfr_mod, 3238 2, A_TP_MIB_RQE_DFR_MOD); 3239 } 3240 3241 /** 3242 * t4_get_fcoe_stats - read TP's FCoE MIB counters for a port 3243 * @adap: the adapter 3244 * @idx: the port index 3245 * @st: holds the counter values 3246 * 3247 * Returns the values of TP's FCoE counters for the selected port. 3248 */ 3249 void t4_get_fcoe_stats(struct adapter *adap, unsigned int idx, 3250 struct tp_fcoe_stats *st) 3251 { 3252 u32 val[2]; 3253 3254 t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, &st->framesDDP, 3255 1, A_TP_MIB_FCOE_DDP_0 + idx); 3256 t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, &st->framesDrop, 3257 1, A_TP_MIB_FCOE_DROP_0 + idx); 3258 t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, val, 3259 2, A_TP_MIB_FCOE_BYTE_0_HI + 2 * idx); 3260 st->octetsDDP = ((u64)val[0] << 32) | val[1]; 3261 } 3262 3263 /** 3264 * t4_get_usm_stats - read TP's non-TCP DDP MIB counters 3265 * @adap: the adapter 3266 * @st: holds the counter values 3267 * 3268 * Returns the values of TP's counters for non-TCP directly-placed packets. 3269 */ 3270 void t4_get_usm_stats(struct adapter *adap, struct tp_usm_stats *st) 3271 { 3272 u32 val[4]; 3273 3274 t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, val, 4, 3275 A_TP_MIB_USM_PKTS); 3276 st->frames = val[0]; 3277 st->drops = val[1]; 3278 st->octets = ((u64)val[2] << 32) | val[3]; 3279 } 3280 3281 /** 3282 * t4_read_mtu_tbl - returns the values in the HW path MTU table 3283 * @adap: the adapter 3284 * @mtus: where to store the MTU values 3285 * @mtu_log: where to store the MTU base-2 log (may be %NULL) 3286 * 3287 * Reads the HW path MTU table. 3288 */ 3289 void t4_read_mtu_tbl(struct adapter *adap, u16 *mtus, u8 *mtu_log) 3290 { 3291 u32 v; 3292 int i; 3293 3294 for (i = 0; i < NMTUS; ++i) { 3295 t4_write_reg(adap, A_TP_MTU_TABLE, 3296 V_MTUINDEX(0xff) | V_MTUVALUE(i)); 3297 v = t4_read_reg(adap, A_TP_MTU_TABLE); 3298 mtus[i] = G_MTUVALUE(v); 3299 if (mtu_log) 3300 mtu_log[i] = G_MTUWIDTH(v); 3301 } 3302 } 3303 3304 /** 3305 * t4_read_cong_tbl - reads the congestion control table 3306 * @adap: the adapter 3307 * @incr: where to store the alpha values 3308 * 3309 * Reads the additive increments programmed into the HW congestion 3310 * control table. 3311 */ 3312 void t4_read_cong_tbl(struct adapter *adap, u16 incr[NMTUS][NCCTRL_WIN]) 3313 { 3314 unsigned int mtu, w; 3315 3316 for (mtu = 0; mtu < NMTUS; ++mtu) 3317 for (w = 0; w < NCCTRL_WIN; ++w) { 3318 t4_write_reg(adap, A_TP_CCTRL_TABLE, 3319 V_ROWINDEX(0xffff) | (mtu << 5) | w); 3320 incr[mtu][w] = (u16)t4_read_reg(adap, 3321 A_TP_CCTRL_TABLE) & 0x1fff; 3322 } 3323 } 3324 3325 /** 3326 * t4_read_pace_tbl - read the pace table 3327 * @adap: the adapter 3328 * @pace_vals: holds the returned values 3329 * 3330 * Returns the values of TP's pace table in microseconds. 3331 */ 3332 void t4_read_pace_tbl(struct adapter *adap, unsigned int pace_vals[NTX_SCHED]) 3333 { 3334 unsigned int i, v; 3335 3336 for (i = 0; i < NTX_SCHED; i++) { 3337 t4_write_reg(adap, A_TP_PACE_TABLE, 0xffff0000 + i); 3338 v = t4_read_reg(adap, A_TP_PACE_TABLE); 3339 pace_vals[i] = dack_ticks_to_usec(adap, v); 3340 } 3341 } 3342 3343 /** 3344 * t4_tp_wr_bits_indirect - set/clear bits in an indirect TP register 3345 * @adap: the adapter 3346 * @addr: the indirect TP register address 3347 * @mask: specifies the field within the register to modify 3348 * @val: new value for the field 3349 * 3350 * Sets a field of an indirect TP register to the given value. 3351 */ 3352 void t4_tp_wr_bits_indirect(struct adapter *adap, unsigned int addr, 3353 unsigned int mask, unsigned int val) 3354 { 3355 t4_write_reg(adap, A_TP_PIO_ADDR, addr); 3356 val |= t4_read_reg(adap, A_TP_PIO_DATA) & ~mask; 3357 t4_write_reg(adap, A_TP_PIO_DATA, val); 3358 } 3359 3360 /** 3361 * init_cong_ctrl - initialize congestion control parameters 3362 * @a: the alpha values for congestion control 3363 * @b: the beta values for congestion control 3364 * 3365 * Initialize the congestion control parameters. 3366 */ 3367 static void __devinit init_cong_ctrl(unsigned short *a, unsigned short *b) 3368 { 3369 a[0] = a[1] = a[2] = a[3] = a[4] = a[5] = a[6] = a[7] = a[8] = 1; 3370 a[9] = 2; 3371 a[10] = 3; 3372 a[11] = 4; 3373 a[12] = 5; 3374 a[13] = 6; 3375 a[14] = 7; 3376 a[15] = 8; 3377 a[16] = 9; 3378 a[17] = 10; 3379 a[18] = 14; 3380 a[19] = 17; 3381 a[20] = 21; 3382 a[21] = 25; 3383 a[22] = 30; 3384 a[23] = 35; 3385 a[24] = 45; 3386 a[25] = 60; 3387 a[26] = 80; 3388 a[27] = 100; 3389 a[28] = 200; 3390 a[29] = 300; 3391 a[30] = 400; 3392 a[31] = 500; 3393 3394 b[0] = b[1] = b[2] = b[3] = b[4] = b[5] = b[6] = b[7] = b[8] = 0; 3395 b[9] = b[10] = 1; 3396 b[11] = b[12] = 2; 3397 b[13] = b[14] = b[15] = b[16] = 3; 3398 b[17] = b[18] = b[19] = b[20] = b[21] = 4; 3399 b[22] = b[23] = b[24] = b[25] = b[26] = b[27] = 5; 3400 b[28] = b[29] = 6; 3401 b[30] = b[31] = 7; 3402 } 3403 3404 /* The minimum additive increment value for the congestion control table */ 3405 #define CC_MIN_INCR 2U 3406 3407 /** 3408 * t4_load_mtus - write the MTU and congestion control HW tables 3409 * @adap: the adapter 3410 * @mtus: the values for the MTU table 3411 * @alpha: the values for the congestion control alpha parameter 3412 * @beta: the values for the congestion control beta parameter 3413 * 3414 * Write the HW MTU table with the supplied MTUs and the high-speed 3415 * congestion control table with the supplied alpha, beta, and MTUs. 3416 * We write the two tables together because the additive increments 3417 * depend on the MTUs. 3418 */ 3419 void t4_load_mtus(struct adapter *adap, const unsigned short *mtus, 3420 const unsigned short *alpha, const unsigned short *beta) 3421 { 3422 static const unsigned int avg_pkts[NCCTRL_WIN] = { 3423 2, 6, 10, 14, 20, 28, 40, 56, 80, 112, 160, 224, 320, 448, 640, 3424 896, 1281, 1792, 2560, 3584, 5120, 7168, 10240, 14336, 20480, 3425 28672, 40960, 57344, 81920, 114688, 163840, 229376 3426 }; 3427 3428 unsigned int i, w; 3429 3430 for (i = 0; i < NMTUS; ++i) { 3431 unsigned int mtu = mtus[i]; 3432 unsigned int log2 = fls(mtu); 3433 3434 if (!(mtu & ((1 << log2) >> 2))) /* round */ 3435 log2--; 3436 t4_write_reg(adap, A_TP_MTU_TABLE, V_MTUINDEX(i) | 3437 V_MTUWIDTH(log2) | V_MTUVALUE(mtu)); 3438 3439 for (w = 0; w < NCCTRL_WIN; ++w) { 3440 unsigned int inc; 3441 3442 inc = max(((mtu - 40) * alpha[w]) / avg_pkts[w], 3443 CC_MIN_INCR); 3444 3445 t4_write_reg(adap, A_TP_CCTRL_TABLE, (i << 21) | 3446 (w << 16) | (beta[w] << 13) | inc); 3447 } 3448 } 3449 } 3450 3451 /** 3452 * t4_set_pace_tbl - set the pace table 3453 * @adap: the adapter 3454 * @pace_vals: the pace values in microseconds 3455 * @start: index of the first entry in the HW pace table to set 3456 * @n: how many entries to set 3457 * 3458 * Sets (a subset of the) HW pace table. 3459 */ 3460 int t4_set_pace_tbl(struct adapter *adap, const unsigned int *pace_vals, 3461 unsigned int start, unsigned int n) 3462 { 3463 unsigned int vals[NTX_SCHED], i; 3464 unsigned int tick_ns = dack_ticks_to_usec(adap, 1000); 3465 3466 if (n > NTX_SCHED) 3467 return -ERANGE; 3468 3469 /* convert values from us to dack ticks, rounding to closest value */ 3470 for (i = 0; i < n; i++, pace_vals++) { 3471 vals[i] = (1000 * *pace_vals + tick_ns / 2) / tick_ns; 3472 if (vals[i] > 0x7ff) 3473 return -ERANGE; 3474 if (*pace_vals && vals[i] == 0) 3475 return -ERANGE; 3476 } 3477 for (i = 0; i < n; i++, start++) 3478 t4_write_reg(adap, A_TP_PACE_TABLE, (start << 16) | vals[i]); 3479 return 0; 3480 } 3481 3482 /** 3483 * t4_set_sched_bps - set the bit rate for a HW traffic scheduler 3484 * @adap: the adapter 3485 * @kbps: target rate in Kbps 3486 * @sched: the scheduler index 3487 * 3488 * Configure a Tx HW scheduler for the target rate. 3489 */ 3490 int t4_set_sched_bps(struct adapter *adap, int sched, unsigned int kbps) 3491 { 3492 unsigned int v, tps, cpt, bpt, delta, mindelta = ~0; 3493 unsigned int clk = adap->params.vpd.cclk * 1000; 3494 unsigned int selected_cpt = 0, selected_bpt = 0; 3495 3496 if (kbps > 0) { 3497 kbps *= 125; /* -> bytes */ 3498 for (cpt = 1; cpt <= 255; cpt++) { 3499 tps = clk / cpt; 3500 bpt = (kbps + tps / 2) / tps; 3501 if (bpt > 0 && bpt <= 255) { 3502 v = bpt * tps; 3503 delta = v >= kbps ? v - kbps : kbps - v; 3504 if (delta < mindelta) { 3505 mindelta = delta; 3506 selected_cpt = cpt; 3507 selected_bpt = bpt; 3508 } 3509 } else if (selected_cpt) 3510 break; 3511 } 3512 if (!selected_cpt) 3513 return -EINVAL; 3514 } 3515 t4_write_reg(adap, A_TP_TM_PIO_ADDR, 3516 A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2); 3517 v = t4_read_reg(adap, A_TP_TM_PIO_DATA); 3518 if (sched & 1) 3519 v = (v & 0xffff) | (selected_cpt << 16) | (selected_bpt << 24); 3520 else 3521 v = (v & 0xffff0000) | selected_cpt | (selected_bpt << 8); 3522 t4_write_reg(adap, A_TP_TM_PIO_DATA, v); 3523 return 0; 3524 } 3525 3526 /** 3527 * t4_set_sched_ipg - set the IPG for a Tx HW packet rate scheduler 3528 * @adap: the adapter 3529 * @sched: the scheduler index 3530 * @ipg: the interpacket delay in tenths of nanoseconds 3531 * 3532 * Set the interpacket delay for a HW packet rate scheduler. 3533 */ 3534 int t4_set_sched_ipg(struct adapter *adap, int sched, unsigned int ipg) 3535 { 3536 unsigned int v, addr = A_TP_TX_MOD_Q1_Q0_TIMER_SEPARATOR - sched / 2; 3537 3538 /* convert ipg to nearest number of core clocks */ 3539 ipg *= core_ticks_per_usec(adap); 3540 ipg = (ipg + 5000) / 10000; 3541 if (ipg > M_TXTIMERSEPQ0) 3542 return -EINVAL; 3543 3544 t4_write_reg(adap, A_TP_TM_PIO_ADDR, addr); 3545 v = t4_read_reg(adap, A_TP_TM_PIO_DATA); 3546 if (sched & 1) 3547 v = (v & V_TXTIMERSEPQ0(M_TXTIMERSEPQ0)) | V_TXTIMERSEPQ1(ipg); 3548 else 3549 v = (v & V_TXTIMERSEPQ1(M_TXTIMERSEPQ1)) | V_TXTIMERSEPQ0(ipg); 3550 t4_write_reg(adap, A_TP_TM_PIO_DATA, v); 3551 t4_read_reg(adap, A_TP_TM_PIO_DATA); 3552 return 0; 3553 } 3554 3555 /** 3556 * t4_get_tx_sched - get the configuration of a Tx HW traffic scheduler 3557 * @adap: the adapter 3558 * @sched: the scheduler index 3559 * @kbps: the byte rate in Kbps 3560 * @ipg: the interpacket delay in tenths of nanoseconds 3561 * 3562 * Return the current configuration of a HW Tx scheduler. 3563 */ 3564 void t4_get_tx_sched(struct adapter *adap, unsigned int sched, unsigned int *kbps, 3565 unsigned int *ipg) 3566 { 3567 unsigned int v, addr, bpt, cpt; 3568 3569 if (kbps) { 3570 addr = A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2; 3571 t4_write_reg(adap, A_TP_TM_PIO_ADDR, addr); 3572 v = t4_read_reg(adap, A_TP_TM_PIO_DATA); 3573 if (sched & 1) 3574 v >>= 16; 3575 bpt = (v >> 8) & 0xff; 3576 cpt = v & 0xff; 3577 if (!cpt) 3578 *kbps = 0; /* scheduler disabled */ 3579 else { 3580 v = (adap->params.vpd.cclk * 1000) / cpt; /* ticks/s */ 3581 *kbps = (v * bpt) / 125; 3582 } 3583 } 3584 if (ipg) { 3585 addr = A_TP_TX_MOD_Q1_Q0_TIMER_SEPARATOR - sched / 2; 3586 t4_write_reg(adap, A_TP_TM_PIO_ADDR, addr); 3587 v = t4_read_reg(adap, A_TP_TM_PIO_DATA); 3588 if (sched & 1) 3589 v >>= 16; 3590 v &= 0xffff; 3591 *ipg = (10000 * v) / core_ticks_per_usec(adap); 3592 } 3593 } 3594 3595 /* 3596 * Calculates a rate in bytes/s given the number of 256-byte units per 4K core 3597 * clocks. The formula is 3598 * 3599 * bytes/s = bytes256 * 256 * ClkFreq / 4096 3600 * 3601 * which is equivalent to 3602 * 3603 * bytes/s = 62.5 * bytes256 * ClkFreq_ms 3604 */ 3605 static u64 chan_rate(struct adapter *adap, unsigned int bytes256) 3606 { 3607 u64 v = bytes256 * adap->params.vpd.cclk; 3608 3609 return v * 62 + v / 2; 3610 } 3611 3612 /** 3613 * t4_get_chan_txrate - get the current per channel Tx rates 3614 * @adap: the adapter 3615 * @nic_rate: rates for NIC traffic 3616 * @ofld_rate: rates for offloaded traffic 3617 * 3618 * Return the current Tx rates in bytes/s for NIC and offloaded traffic 3619 * for each channel. 3620 */ 3621 void t4_get_chan_txrate(struct adapter *adap, u64 *nic_rate, u64 *ofld_rate) 3622 { 3623 u32 v; 3624 3625 v = t4_read_reg(adap, A_TP_TX_TRATE); 3626 nic_rate[0] = chan_rate(adap, G_TNLRATE0(v)); 3627 nic_rate[1] = chan_rate(adap, G_TNLRATE1(v)); 3628 nic_rate[2] = chan_rate(adap, G_TNLRATE2(v)); 3629 nic_rate[3] = chan_rate(adap, G_TNLRATE3(v)); 3630 3631 v = t4_read_reg(adap, A_TP_TX_ORATE); 3632 ofld_rate[0] = chan_rate(adap, G_OFDRATE0(v)); 3633 ofld_rate[1] = chan_rate(adap, G_OFDRATE1(v)); 3634 ofld_rate[2] = chan_rate(adap, G_OFDRATE2(v)); 3635 ofld_rate[3] = chan_rate(adap, G_OFDRATE3(v)); 3636 } 3637 3638 /** 3639 * t4_set_trace_filter - configure one of the tracing filters 3640 * @adap: the adapter 3641 * @tp: the desired trace filter parameters 3642 * @idx: which filter to configure 3643 * @enable: whether to enable or disable the filter 3644 * 3645 * Configures one of the tracing filters available in HW. If @tp is %NULL 3646 * it indicates that the filter is already written in the register and it 3647 * just needs to be enabled or disabled. 3648 */ 3649 int t4_set_trace_filter(struct adapter *adap, const struct trace_params *tp, 3650 int idx, int enable) 3651 { 3652 int i, ofst = idx * 4; 3653 u32 data_reg, mask_reg, cfg; 3654 u32 multitrc = F_TRCMULTIFILTER; 3655 u32 en = is_t4(adap) ? F_TFEN : F_T5_TFEN; 3656 3657 if (idx < 0 || idx >= NTRACE) 3658 return -EINVAL; 3659 3660 if (tp == NULL || !enable) { 3661 t4_set_reg_field(adap, A_MPS_TRC_FILTER_MATCH_CTL_A + ofst, en, 3662 enable ? en : 0); 3663 return 0; 3664 } 3665 3666 /* 3667 * TODO - After T4 data book is updated, specify the exact 3668 * section below. 3669 * 3670 * See T4 data book - MPS section for a complete description 3671 * of the below if..else handling of A_MPS_TRC_CFG register 3672 * value. 3673 */ 3674 cfg = t4_read_reg(adap, A_MPS_TRC_CFG); 3675 if (cfg & F_TRCMULTIFILTER) { 3676 /* 3677 * If multiple tracers are enabled, then maximum 3678 * capture size is 2.5KB (FIFO size of a single channel) 3679 * minus 2 flits for CPL_TRACE_PKT header. 3680 */ 3681 if (tp->snap_len > ((10 * 1024 / 4) - (2 * 8))) 3682 return -EINVAL; 3683 } else { 3684 /* 3685 * If multiple tracers are disabled, to avoid deadlocks 3686 * maximum packet capture size of 9600 bytes is recommended. 3687 * Also in this mode, only trace0 can be enabled and running. 3688 */ 3689 multitrc = 0; 3690 if (tp->snap_len > 9600 || idx) 3691 return -EINVAL; 3692 } 3693 3694 if (tp->port > (is_t4(adap) ? 11 : 19) || tp->invert > 1 || 3695 tp->skip_len > M_TFLENGTH || tp->skip_ofst > M_TFOFFSET || 3696 tp->min_len > M_TFMINPKTSIZE) 3697 return -EINVAL; 3698 3699 /* stop the tracer we'll be changing */ 3700 t4_set_reg_field(adap, A_MPS_TRC_FILTER_MATCH_CTL_A + ofst, en, 0); 3701 3702 idx *= (A_MPS_TRC_FILTER1_MATCH - A_MPS_TRC_FILTER0_MATCH); 3703 data_reg = A_MPS_TRC_FILTER0_MATCH + idx; 3704 mask_reg = A_MPS_TRC_FILTER0_DONT_CARE + idx; 3705 3706 for (i = 0; i < TRACE_LEN / 4; i++, data_reg += 4, mask_reg += 4) { 3707 t4_write_reg(adap, data_reg, tp->data[i]); 3708 t4_write_reg(adap, mask_reg, ~tp->mask[i]); 3709 } 3710 t4_write_reg(adap, A_MPS_TRC_FILTER_MATCH_CTL_B + ofst, 3711 V_TFCAPTUREMAX(tp->snap_len) | 3712 V_TFMINPKTSIZE(tp->min_len)); 3713 t4_write_reg(adap, A_MPS_TRC_FILTER_MATCH_CTL_A + ofst, 3714 V_TFOFFSET(tp->skip_ofst) | V_TFLENGTH(tp->skip_len) | en | 3715 (is_t4(adap) ? 3716 V_TFPORT(tp->port) | V_TFINVERTMATCH(tp->invert) : 3717 V_T5_TFPORT(tp->port) | V_T5_TFINVERTMATCH(tp->invert))); 3718 3719 return 0; 3720 } 3721 3722 /** 3723 * t4_get_trace_filter - query one of the tracing filters 3724 * @adap: the adapter 3725 * @tp: the current trace filter parameters 3726 * @idx: which trace filter to query 3727 * @enabled: non-zero if the filter is enabled 3728 * 3729 * Returns the current settings of one of the HW tracing filters. 3730 */ 3731 void t4_get_trace_filter(struct adapter *adap, struct trace_params *tp, int idx, 3732 int *enabled) 3733 { 3734 u32 ctla, ctlb; 3735 int i, ofst = idx * 4; 3736 u32 data_reg, mask_reg; 3737 3738 ctla = t4_read_reg(adap, A_MPS_TRC_FILTER_MATCH_CTL_A + ofst); 3739 ctlb = t4_read_reg(adap, A_MPS_TRC_FILTER_MATCH_CTL_B + ofst); 3740 3741 if (is_t4(adap)) { 3742 *enabled = !!(ctla & F_TFEN); 3743 tp->port = G_TFPORT(ctla); 3744 tp->invert = !!(ctla & F_TFINVERTMATCH); 3745 } else { 3746 *enabled = !!(ctla & F_T5_TFEN); 3747 tp->port = G_T5_TFPORT(ctla); 3748 tp->invert = !!(ctla & F_T5_TFINVERTMATCH); 3749 } 3750 tp->snap_len = G_TFCAPTUREMAX(ctlb); 3751 tp->min_len = G_TFMINPKTSIZE(ctlb); 3752 tp->skip_ofst = G_TFOFFSET(ctla); 3753 tp->skip_len = G_TFLENGTH(ctla); 3754 3755 ofst = (A_MPS_TRC_FILTER1_MATCH - A_MPS_TRC_FILTER0_MATCH) * idx; 3756 data_reg = A_MPS_TRC_FILTER0_MATCH + ofst; 3757 mask_reg = A_MPS_TRC_FILTER0_DONT_CARE + ofst; 3758 3759 for (i = 0; i < TRACE_LEN / 4; i++, data_reg += 4, mask_reg += 4) { 3760 tp->mask[i] = ~t4_read_reg(adap, mask_reg); 3761 tp->data[i] = t4_read_reg(adap, data_reg) & tp->mask[i]; 3762 } 3763 } 3764 3765 /** 3766 * t4_pmtx_get_stats - returns the HW stats from PMTX 3767 * @adap: the adapter 3768 * @cnt: where to store the count statistics 3769 * @cycles: where to store the cycle statistics 3770 * 3771 * Returns performance statistics from PMTX. 3772 */ 3773 void t4_pmtx_get_stats(struct adapter *adap, u32 cnt[], u64 cycles[]) 3774 { 3775 int i; 3776 u32 data[2]; 3777 3778 for (i = 0; i < PM_NSTATS; i++) { 3779 t4_write_reg(adap, A_PM_TX_STAT_CONFIG, i + 1); 3780 cnt[i] = t4_read_reg(adap, A_PM_TX_STAT_COUNT); 3781 if (is_t4(adap)) 3782 cycles[i] = t4_read_reg64(adap, A_PM_TX_STAT_LSB); 3783 else { 3784 t4_read_indirect(adap, A_PM_TX_DBG_CTRL, 3785 A_PM_TX_DBG_DATA, data, 2, 3786 A_PM_TX_DBG_STAT_MSB); 3787 cycles[i] = (((u64)data[0] << 32) | data[1]); 3788 } 3789 } 3790 } 3791 3792 /** 3793 * t4_pmrx_get_stats - returns the HW stats from PMRX 3794 * @adap: the adapter 3795 * @cnt: where to store the count statistics 3796 * @cycles: where to store the cycle statistics 3797 * 3798 * Returns performance statistics from PMRX. 3799 */ 3800 void t4_pmrx_get_stats(struct adapter *adap, u32 cnt[], u64 cycles[]) 3801 { 3802 int i; 3803 u32 data[2]; 3804 3805 for (i = 0; i < PM_NSTATS; i++) { 3806 t4_write_reg(adap, A_PM_RX_STAT_CONFIG, i + 1); 3807 cnt[i] = t4_read_reg(adap, A_PM_RX_STAT_COUNT); 3808 if (is_t4(adap)) 3809 cycles[i] = t4_read_reg64(adap, A_PM_RX_STAT_LSB); 3810 else { 3811 t4_read_indirect(adap, A_PM_RX_DBG_CTRL, 3812 A_PM_RX_DBG_DATA, data, 2, 3813 A_PM_RX_DBG_STAT_MSB); 3814 cycles[i] = (((u64)data[0] << 32) | data[1]); 3815 } 3816 } 3817 } 3818 3819 /** 3820 * get_mps_bg_map - return the buffer groups associated with a port 3821 * @adap: the adapter 3822 * @idx: the port index 3823 * 3824 * Returns a bitmap indicating which MPS buffer groups are associated 3825 * with the given port. Bit i is set if buffer group i is used by the 3826 * port. 3827 */ 3828 static unsigned int get_mps_bg_map(struct adapter *adap, int idx) 3829 { 3830 u32 n = G_NUMPORTS(t4_read_reg(adap, A_MPS_CMN_CTL)); 3831 3832 if (n == 0) 3833 return idx == 0 ? 0xf : 0; 3834 if (n == 1) 3835 return idx < 2 ? (3 << (2 * idx)) : 0; 3836 return 1 << idx; 3837 } 3838 3839 /** 3840 * t4_get_port_stats_offset - collect port stats relative to a previous 3841 * snapshot 3842 * @adap: The adapter 3843 * @idx: The port 3844 * @stats: Current stats to fill 3845 * @offset: Previous stats snapshot 3846 */ 3847 void t4_get_port_stats_offset(struct adapter *adap, int idx, 3848 struct port_stats *stats, 3849 struct port_stats *offset) 3850 { 3851 u64 *s, *o; 3852 int i; 3853 3854 t4_get_port_stats(adap, idx, stats); 3855 for (i = 0, s = (u64 *)stats, o = (u64 *)offset ; 3856 i < (sizeof(struct port_stats)/sizeof(u64)) ; 3857 i++, s++, o++) 3858 *s -= *o; 3859 } 3860 3861 /** 3862 * t4_get_port_stats - collect port statistics 3863 * @adap: the adapter 3864 * @idx: the port index 3865 * @p: the stats structure to fill 3866 * 3867 * Collect statistics related to the given port from HW. 3868 */ 3869 void t4_get_port_stats(struct adapter *adap, int idx, struct port_stats *p) 3870 { 3871 u32 bgmap = get_mps_bg_map(adap, idx); 3872 3873 #define GET_STAT(name) \ 3874 t4_read_reg64(adap, \ 3875 (is_t4(adap) ? PORT_REG(idx, A_MPS_PORT_STAT_##name##_L) : \ 3876 T5_PORT_REG(idx, A_MPS_PORT_STAT_##name##_L))) 3877 #define GET_STAT_COM(name) t4_read_reg64(adap, A_MPS_STAT_##name##_L) 3878 3879 p->tx_pause = GET_STAT(TX_PORT_PAUSE); 3880 p->tx_octets = GET_STAT(TX_PORT_BYTES); 3881 p->tx_frames = GET_STAT(TX_PORT_FRAMES); 3882 p->tx_bcast_frames = GET_STAT(TX_PORT_BCAST); 3883 p->tx_mcast_frames = GET_STAT(TX_PORT_MCAST); 3884 p->tx_ucast_frames = GET_STAT(TX_PORT_UCAST); 3885 p->tx_error_frames = GET_STAT(TX_PORT_ERROR); 3886 p->tx_frames_64 = GET_STAT(TX_PORT_64B); 3887 p->tx_frames_65_127 = GET_STAT(TX_PORT_65B_127B); 3888 p->tx_frames_128_255 = GET_STAT(TX_PORT_128B_255B); 3889 p->tx_frames_256_511 = GET_STAT(TX_PORT_256B_511B); 3890 p->tx_frames_512_1023 = GET_STAT(TX_PORT_512B_1023B); 3891 p->tx_frames_1024_1518 = GET_STAT(TX_PORT_1024B_1518B); 3892 p->tx_frames_1519_max = GET_STAT(TX_PORT_1519B_MAX); 3893 p->tx_drop = GET_STAT(TX_PORT_DROP); 3894 p->tx_ppp0 = GET_STAT(TX_PORT_PPP0); 3895 p->tx_ppp1 = GET_STAT(TX_PORT_PPP1); 3896 p->tx_ppp2 = GET_STAT(TX_PORT_PPP2); 3897 p->tx_ppp3 = GET_STAT(TX_PORT_PPP3); 3898 p->tx_ppp4 = GET_STAT(TX_PORT_PPP4); 3899 p->tx_ppp5 = GET_STAT(TX_PORT_PPP5); 3900 p->tx_ppp6 = GET_STAT(TX_PORT_PPP6); 3901 p->tx_ppp7 = GET_STAT(TX_PORT_PPP7); 3902 3903 p->rx_pause = GET_STAT(RX_PORT_PAUSE); 3904 p->rx_octets = GET_STAT(RX_PORT_BYTES); 3905 p->rx_frames = GET_STAT(RX_PORT_FRAMES); 3906 p->rx_bcast_frames = GET_STAT(RX_PORT_BCAST); 3907 p->rx_mcast_frames = GET_STAT(RX_PORT_MCAST); 3908 p->rx_ucast_frames = GET_STAT(RX_PORT_UCAST); 3909 p->rx_too_long = GET_STAT(RX_PORT_MTU_ERROR); 3910 p->rx_jabber = GET_STAT(RX_PORT_MTU_CRC_ERROR); 3911 p->rx_fcs_err = GET_STAT(RX_PORT_CRC_ERROR); 3912 p->rx_len_err = GET_STAT(RX_PORT_LEN_ERROR); 3913 p->rx_symbol_err = GET_STAT(RX_PORT_SYM_ERROR); 3914 p->rx_runt = GET_STAT(RX_PORT_LESS_64B); 3915 p->rx_frames_64 = GET_STAT(RX_PORT_64B); 3916 p->rx_frames_65_127 = GET_STAT(RX_PORT_65B_127B); 3917 p->rx_frames_128_255 = GET_STAT(RX_PORT_128B_255B); 3918 p->rx_frames_256_511 = GET_STAT(RX_PORT_256B_511B); 3919 p->rx_frames_512_1023 = GET_STAT(RX_PORT_512B_1023B); 3920 p->rx_frames_1024_1518 = GET_STAT(RX_PORT_1024B_1518B); 3921 p->rx_frames_1519_max = GET_STAT(RX_PORT_1519B_MAX); 3922 p->rx_ppp0 = GET_STAT(RX_PORT_PPP0); 3923 p->rx_ppp1 = GET_STAT(RX_PORT_PPP1); 3924 p->rx_ppp2 = GET_STAT(RX_PORT_PPP2); 3925 p->rx_ppp3 = GET_STAT(RX_PORT_PPP3); 3926 p->rx_ppp4 = GET_STAT(RX_PORT_PPP4); 3927 p->rx_ppp5 = GET_STAT(RX_PORT_PPP5); 3928 p->rx_ppp6 = GET_STAT(RX_PORT_PPP6); 3929 p->rx_ppp7 = GET_STAT(RX_PORT_PPP7); 3930 3931 p->rx_ovflow0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_MAC_DROP_FRAME) : 0; 3932 p->rx_ovflow1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_MAC_DROP_FRAME) : 0; 3933 p->rx_ovflow2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_MAC_DROP_FRAME) : 0; 3934 p->rx_ovflow3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_MAC_DROP_FRAME) : 0; 3935 p->rx_trunc0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_MAC_TRUNC_FRAME) : 0; 3936 p->rx_trunc1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_MAC_TRUNC_FRAME) : 0; 3937 p->rx_trunc2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_MAC_TRUNC_FRAME) : 0; 3938 p->rx_trunc3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_MAC_TRUNC_FRAME) : 0; 3939 3940 #undef GET_STAT 3941 #undef GET_STAT_COM 3942 } 3943 3944 /** 3945 * t4_clr_port_stats - clear port statistics 3946 * @adap: the adapter 3947 * @idx: the port index 3948 * 3949 * Clear HW statistics for the given port. 3950 */ 3951 void t4_clr_port_stats(struct adapter *adap, int idx) 3952 { 3953 unsigned int i; 3954 u32 bgmap = get_mps_bg_map(adap, idx); 3955 u32 port_base_addr; 3956 3957 if (is_t4(adap)) 3958 port_base_addr = PORT_BASE(idx); 3959 else 3960 port_base_addr = T5_PORT_BASE(idx); 3961 3962 for (i = A_MPS_PORT_STAT_TX_PORT_BYTES_L; 3963 i <= A_MPS_PORT_STAT_TX_PORT_PPP7_H; i += 8) 3964 t4_write_reg(adap, port_base_addr + i, 0); 3965 for (i = A_MPS_PORT_STAT_RX_PORT_BYTES_L; 3966 i <= A_MPS_PORT_STAT_RX_PORT_LESS_64B_H; i += 8) 3967 t4_write_reg(adap, port_base_addr + i, 0); 3968 for (i = 0; i < 4; i++) 3969 if (bgmap & (1 << i)) { 3970 t4_write_reg(adap, 3971 A_MPS_STAT_RX_BG_0_MAC_DROP_FRAME_L + i * 8, 0); 3972 t4_write_reg(adap, 3973 A_MPS_STAT_RX_BG_0_MAC_TRUNC_FRAME_L + i * 8, 0); 3974 } 3975 } 3976 3977 /** 3978 * t4_get_lb_stats - collect loopback port statistics 3979 * @adap: the adapter 3980 * @idx: the loopback port index 3981 * @p: the stats structure to fill 3982 * 3983 * Return HW statistics for the given loopback port. 3984 */ 3985 void t4_get_lb_stats(struct adapter *adap, int idx, struct lb_port_stats *p) 3986 { 3987 u32 bgmap = get_mps_bg_map(adap, idx); 3988 3989 #define GET_STAT(name) \ 3990 t4_read_reg64(adap, \ 3991 (is_t4(adap) ? \ 3992 PORT_REG(idx, A_MPS_PORT_STAT_LB_PORT_##name##_L) : \ 3993 T5_PORT_REG(idx, A_MPS_PORT_STAT_LB_PORT_##name##_L))) 3994 #define GET_STAT_COM(name) t4_read_reg64(adap, A_MPS_STAT_##name##_L) 3995 3996 p->octets = GET_STAT(BYTES); 3997 p->frames = GET_STAT(FRAMES); 3998 p->bcast_frames = GET_STAT(BCAST); 3999 p->mcast_frames = GET_STAT(MCAST); 4000 p->ucast_frames = GET_STAT(UCAST); 4001 p->error_frames = GET_STAT(ERROR); 4002 4003 p->frames_64 = GET_STAT(64B); 4004 p->frames_65_127 = GET_STAT(65B_127B); 4005 p->frames_128_255 = GET_STAT(128B_255B); 4006 p->frames_256_511 = GET_STAT(256B_511B); 4007 p->frames_512_1023 = GET_STAT(512B_1023B); 4008 p->frames_1024_1518 = GET_STAT(1024B_1518B); 4009 p->frames_1519_max = GET_STAT(1519B_MAX); 4010 p->drop = GET_STAT(DROP_FRAMES); 4011 4012 p->ovflow0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_LB_DROP_FRAME) : 0; 4013 p->ovflow1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_LB_DROP_FRAME) : 0; 4014 p->ovflow2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_LB_DROP_FRAME) : 0; 4015 p->ovflow3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_LB_DROP_FRAME) : 0; 4016 p->trunc0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_LB_TRUNC_FRAME) : 0; 4017 p->trunc1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_LB_TRUNC_FRAME) : 0; 4018 p->trunc2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_LB_TRUNC_FRAME) : 0; 4019 p->trunc3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_LB_TRUNC_FRAME) : 0; 4020 4021 #undef GET_STAT 4022 #undef GET_STAT_COM 4023 } 4024 4025 /** 4026 * t4_wol_magic_enable - enable/disable magic packet WoL 4027 * @adap: the adapter 4028 * @port: the physical port index 4029 * @addr: MAC address expected in magic packets, %NULL to disable 4030 * 4031 * Enables/disables magic packet wake-on-LAN for the selected port. 4032 */ 4033 void t4_wol_magic_enable(struct adapter *adap, unsigned int port, 4034 const u8 *addr) 4035 { 4036 u32 mag_id_reg_l, mag_id_reg_h, port_cfg_reg; 4037 4038 if (is_t4(adap)) { 4039 mag_id_reg_l = PORT_REG(port, A_XGMAC_PORT_MAGIC_MACID_LO); 4040 mag_id_reg_h = PORT_REG(port, A_XGMAC_PORT_MAGIC_MACID_HI); 4041 port_cfg_reg = PORT_REG(port, A_XGMAC_PORT_CFG2); 4042 } else { 4043 mag_id_reg_l = T5_PORT_REG(port, A_MAC_PORT_MAGIC_MACID_LO); 4044 mag_id_reg_h = T5_PORT_REG(port, A_MAC_PORT_MAGIC_MACID_HI); 4045 port_cfg_reg = T5_PORT_REG(port, A_MAC_PORT_CFG2); 4046 } 4047 4048 if (addr) { 4049 t4_write_reg(adap, mag_id_reg_l, 4050 (addr[2] << 24) | (addr[3] << 16) | 4051 (addr[4] << 8) | addr[5]); 4052 t4_write_reg(adap, mag_id_reg_h, 4053 (addr[0] << 8) | addr[1]); 4054 } 4055 t4_set_reg_field(adap, port_cfg_reg, F_MAGICEN, 4056 V_MAGICEN(addr != NULL)); 4057 } 4058 4059 /** 4060 * t4_wol_pat_enable - enable/disable pattern-based WoL 4061 * @adap: the adapter 4062 * @port: the physical port index 4063 * @map: bitmap of which HW pattern filters to set 4064 * @mask0: byte mask for bytes 0-63 of a packet 4065 * @mask1: byte mask for bytes 64-127 of a packet 4066 * @crc: Ethernet CRC for selected bytes 4067 * @enable: enable/disable switch 4068 * 4069 * Sets the pattern filters indicated in @map to mask out the bytes 4070 * specified in @mask0/@mask1 in received packets and compare the CRC of 4071 * the resulting packet against @crc. If @enable is %true pattern-based 4072 * WoL is enabled, otherwise disabled. 4073 */ 4074 int t4_wol_pat_enable(struct adapter *adap, unsigned int port, unsigned int map, 4075 u64 mask0, u64 mask1, unsigned int crc, bool enable) 4076 { 4077 int i; 4078 u32 port_cfg_reg; 4079 4080 if (is_t4(adap)) 4081 port_cfg_reg = PORT_REG(port, A_XGMAC_PORT_CFG2); 4082 else 4083 port_cfg_reg = T5_PORT_REG(port, A_MAC_PORT_CFG2); 4084 4085 if (!enable) { 4086 t4_set_reg_field(adap, port_cfg_reg, F_PATEN, 0); 4087 return 0; 4088 } 4089 if (map > 0xff) 4090 return -EINVAL; 4091 4092 #define EPIO_REG(name) \ 4093 (is_t4(adap) ? PORT_REG(port, A_XGMAC_PORT_EPIO_##name) : \ 4094 T5_PORT_REG(port, A_MAC_PORT_EPIO_##name)) 4095 4096 t4_write_reg(adap, EPIO_REG(DATA1), mask0 >> 32); 4097 t4_write_reg(adap, EPIO_REG(DATA2), mask1); 4098 t4_write_reg(adap, EPIO_REG(DATA3), mask1 >> 32); 4099 4100 for (i = 0; i < NWOL_PAT; i++, map >>= 1) { 4101 if (!(map & 1)) 4102 continue; 4103 4104 /* write byte masks */ 4105 t4_write_reg(adap, EPIO_REG(DATA0), mask0); 4106 t4_write_reg(adap, EPIO_REG(OP), V_ADDRESS(i) | F_EPIOWR); 4107 t4_read_reg(adap, EPIO_REG(OP)); /* flush */ 4108 if (t4_read_reg(adap, EPIO_REG(OP)) & F_BUSY) 4109 return -ETIMEDOUT; 4110 4111 /* write CRC */ 4112 t4_write_reg(adap, EPIO_REG(DATA0), crc); 4113 t4_write_reg(adap, EPIO_REG(OP), V_ADDRESS(i + 32) | F_EPIOWR); 4114 t4_read_reg(adap, EPIO_REG(OP)); /* flush */ 4115 if (t4_read_reg(adap, EPIO_REG(OP)) & F_BUSY) 4116 return -ETIMEDOUT; 4117 } 4118 #undef EPIO_REG 4119 4120 t4_set_reg_field(adap, port_cfg_reg, 0, F_PATEN); 4121 return 0; 4122 } 4123 4124 /** 4125 * t4_mk_filtdelwr - create a delete filter WR 4126 * @ftid: the filter ID 4127 * @wr: the filter work request to populate 4128 * @qid: ingress queue to receive the delete notification 4129 * 4130 * Creates a filter work request to delete the supplied filter. If @qid is 4131 * negative the delete notification is suppressed. 4132 */ 4133 void t4_mk_filtdelwr(unsigned int ftid, struct fw_filter_wr *wr, int qid) 4134 { 4135 memset(wr, 0, sizeof(*wr)); 4136 wr->op_pkd = htonl(V_FW_WR_OP(FW_FILTER_WR)); 4137 wr->len16_pkd = htonl(V_FW_WR_LEN16(sizeof(*wr) / 16)); 4138 wr->tid_to_iq = htonl(V_FW_FILTER_WR_TID(ftid) | 4139 V_FW_FILTER_WR_NOREPLY(qid < 0)); 4140 wr->del_filter_to_l2tix = htonl(F_FW_FILTER_WR_DEL_FILTER); 4141 if (qid >= 0) 4142 wr->rx_chan_rx_rpl_iq = htons(V_FW_FILTER_WR_RX_RPL_IQ(qid)); 4143 } 4144 4145 #define INIT_CMD(var, cmd, rd_wr) do { \ 4146 (var).op_to_write = htonl(V_FW_CMD_OP(FW_##cmd##_CMD) | \ 4147 F_FW_CMD_REQUEST | F_FW_CMD_##rd_wr); \ 4148 (var).retval_len16 = htonl(FW_LEN16(var)); \ 4149 } while (0) 4150 4151 int t4_fwaddrspace_write(struct adapter *adap, unsigned int mbox, u32 addr, u32 val) 4152 { 4153 struct fw_ldst_cmd c; 4154 4155 memset(&c, 0, sizeof(c)); 4156 c.op_to_addrspace = htonl(V_FW_CMD_OP(FW_LDST_CMD) | F_FW_CMD_REQUEST | 4157 F_FW_CMD_WRITE | V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_FIRMWARE)); 4158 c.cycles_to_len16 = htonl(FW_LEN16(c)); 4159 c.u.addrval.addr = htonl(addr); 4160 c.u.addrval.val = htonl(val); 4161 4162 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); 4163 } 4164 4165 /** 4166 * t4_mdio_rd - read a PHY register through MDIO 4167 * @adap: the adapter 4168 * @mbox: mailbox to use for the FW command 4169 * @phy_addr: the PHY address 4170 * @mmd: the PHY MMD to access (0 for clause 22 PHYs) 4171 * @reg: the register to read 4172 * @valp: where to store the value 4173 * 4174 * Issues a FW command through the given mailbox to read a PHY register. 4175 */ 4176 int t4_mdio_rd(struct adapter *adap, unsigned int mbox, unsigned int phy_addr, 4177 unsigned int mmd, unsigned int reg, unsigned int *valp) 4178 { 4179 int ret; 4180 struct fw_ldst_cmd c; 4181 4182 memset(&c, 0, sizeof(c)); 4183 c.op_to_addrspace = htonl(V_FW_CMD_OP(FW_LDST_CMD) | F_FW_CMD_REQUEST | 4184 F_FW_CMD_READ | V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_MDIO)); 4185 c.cycles_to_len16 = htonl(FW_LEN16(c)); 4186 c.u.mdio.paddr_mmd = htons(V_FW_LDST_CMD_PADDR(phy_addr) | 4187 V_FW_LDST_CMD_MMD(mmd)); 4188 c.u.mdio.raddr = htons(reg); 4189 4190 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c); 4191 if (ret == 0) 4192 *valp = ntohs(c.u.mdio.rval); 4193 return ret; 4194 } 4195 4196 /** 4197 * t4_mdio_wr - write a PHY register through MDIO 4198 * @adap: the adapter 4199 * @mbox: mailbox to use for the FW command 4200 * @phy_addr: the PHY address 4201 * @mmd: the PHY MMD to access (0 for clause 22 PHYs) 4202 * @reg: the register to write 4203 * @valp: value to write 4204 * 4205 * Issues a FW command through the given mailbox to write a PHY register. 4206 */ 4207 int t4_mdio_wr(struct adapter *adap, unsigned int mbox, unsigned int phy_addr, 4208 unsigned int mmd, unsigned int reg, unsigned int val) 4209 { 4210 struct fw_ldst_cmd c; 4211 4212 memset(&c, 0, sizeof(c)); 4213 c.op_to_addrspace = htonl(V_FW_CMD_OP(FW_LDST_CMD) | F_FW_CMD_REQUEST | 4214 F_FW_CMD_WRITE | V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_MDIO)); 4215 c.cycles_to_len16 = htonl(FW_LEN16(c)); 4216 c.u.mdio.paddr_mmd = htons(V_FW_LDST_CMD_PADDR(phy_addr) | 4217 V_FW_LDST_CMD_MMD(mmd)); 4218 c.u.mdio.raddr = htons(reg); 4219 c.u.mdio.rval = htons(val); 4220 4221 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); 4222 } 4223 4224 /** 4225 * t4_i2c_rd - read I2C data from adapter 4226 * @adap: the adapter 4227 * @port: Port number if per-port device; <0 if not 4228 * @devid: per-port device ID or absolute device ID 4229 * @offset: byte offset into device I2C space 4230 * @len: byte length of I2C space data 4231 * @buf: buffer in which to return I2C data 4232 * 4233 * Reads the I2C data from the indicated device and location. 4234 */ 4235 int t4_i2c_rd(struct adapter *adap, unsigned int mbox, 4236 int port, unsigned int devid, 4237 unsigned int offset, unsigned int len, 4238 u8 *buf) 4239 { 4240 struct fw_ldst_cmd ldst; 4241 int ret; 4242 4243 if (port >= 4 || 4244 devid >= 256 || 4245 offset >= 256 || 4246 len > sizeof ldst.u.i2c.data) 4247 return -EINVAL; 4248 4249 memset(&ldst, 0, sizeof ldst); 4250 ldst.op_to_addrspace = 4251 cpu_to_be32(V_FW_CMD_OP(FW_LDST_CMD) | 4252 F_FW_CMD_REQUEST | 4253 F_FW_CMD_READ | 4254 V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_I2C)); 4255 ldst.cycles_to_len16 = cpu_to_be32(FW_LEN16(ldst)); 4256 ldst.u.i2c.pid = (port < 0 ? 0xff : port); 4257 ldst.u.i2c.did = devid; 4258 ldst.u.i2c.boffset = offset; 4259 ldst.u.i2c.blen = len; 4260 ret = t4_wr_mbox(adap, mbox, &ldst, sizeof ldst, &ldst); 4261 if (!ret) 4262 memcpy(buf, ldst.u.i2c.data, len); 4263 return ret; 4264 } 4265 4266 /** 4267 * t4_i2c_wr - write I2C data to adapter 4268 * @adap: the adapter 4269 * @port: Port number if per-port device; <0 if not 4270 * @devid: per-port device ID or absolute device ID 4271 * @offset: byte offset into device I2C space 4272 * @len: byte length of I2C space data 4273 * @buf: buffer containing new I2C data 4274 * 4275 * Write the I2C data to the indicated device and location. 4276 */ 4277 int t4_i2c_wr(struct adapter *adap, unsigned int mbox, 4278 int port, unsigned int devid, 4279 unsigned int offset, unsigned int len, 4280 u8 *buf) 4281 { 4282 struct fw_ldst_cmd ldst; 4283 4284 if (port >= 4 || 4285 devid >= 256 || 4286 offset >= 256 || 4287 len > sizeof ldst.u.i2c.data) 4288 return -EINVAL; 4289 4290 memset(&ldst, 0, sizeof ldst); 4291 ldst.op_to_addrspace = 4292 cpu_to_be32(V_FW_CMD_OP(FW_LDST_CMD) | 4293 F_FW_CMD_REQUEST | 4294 F_FW_CMD_WRITE | 4295 V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_I2C)); 4296 ldst.cycles_to_len16 = cpu_to_be32(FW_LEN16(ldst)); 4297 ldst.u.i2c.pid = (port < 0 ? 0xff : port); 4298 ldst.u.i2c.did = devid; 4299 ldst.u.i2c.boffset = offset; 4300 ldst.u.i2c.blen = len; 4301 memcpy(ldst.u.i2c.data, buf, len); 4302 return t4_wr_mbox(adap, mbox, &ldst, sizeof ldst, &ldst); 4303 } 4304 4305 /** 4306 * t4_sge_ctxt_flush - flush the SGE context cache 4307 * @adap: the adapter 4308 * @mbox: mailbox to use for the FW command 4309 * 4310 * Issues a FW command through the given mailbox to flush the 4311 * SGE context cache. 4312 */ 4313 int t4_sge_ctxt_flush(struct adapter *adap, unsigned int mbox) 4314 { 4315 int ret; 4316 struct fw_ldst_cmd c; 4317 4318 memset(&c, 0, sizeof(c)); 4319 c.op_to_addrspace = htonl(V_FW_CMD_OP(FW_LDST_CMD) | F_FW_CMD_REQUEST | 4320 F_FW_CMD_READ | 4321 V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_SGE_EGRC)); 4322 c.cycles_to_len16 = htonl(FW_LEN16(c)); 4323 c.u.idctxt.msg_ctxtflush = htonl(F_FW_LDST_CMD_CTXTFLUSH); 4324 4325 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c); 4326 return ret; 4327 } 4328 4329 /** 4330 * t4_sge_ctxt_rd - read an SGE context through FW 4331 * @adap: the adapter 4332 * @mbox: mailbox to use for the FW command 4333 * @cid: the context id 4334 * @ctype: the context type 4335 * @data: where to store the context data 4336 * 4337 * Issues a FW command through the given mailbox to read an SGE context. 4338 */ 4339 int t4_sge_ctxt_rd(struct adapter *adap, unsigned int mbox, unsigned int cid, 4340 enum ctxt_type ctype, u32 *data) 4341 { 4342 int ret; 4343 struct fw_ldst_cmd c; 4344 4345 if (ctype == CTXT_EGRESS) 4346 ret = FW_LDST_ADDRSPC_SGE_EGRC; 4347 else if (ctype == CTXT_INGRESS) 4348 ret = FW_LDST_ADDRSPC_SGE_INGC; 4349 else if (ctype == CTXT_FLM) 4350 ret = FW_LDST_ADDRSPC_SGE_FLMC; 4351 else 4352 ret = FW_LDST_ADDRSPC_SGE_CONMC; 4353 4354 memset(&c, 0, sizeof(c)); 4355 c.op_to_addrspace = htonl(V_FW_CMD_OP(FW_LDST_CMD) | F_FW_CMD_REQUEST | 4356 F_FW_CMD_READ | V_FW_LDST_CMD_ADDRSPACE(ret)); 4357 c.cycles_to_len16 = htonl(FW_LEN16(c)); 4358 c.u.idctxt.physid = htonl(cid); 4359 4360 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c); 4361 if (ret == 0) { 4362 data[0] = ntohl(c.u.idctxt.ctxt_data0); 4363 data[1] = ntohl(c.u.idctxt.ctxt_data1); 4364 data[2] = ntohl(c.u.idctxt.ctxt_data2); 4365 data[3] = ntohl(c.u.idctxt.ctxt_data3); 4366 data[4] = ntohl(c.u.idctxt.ctxt_data4); 4367 data[5] = ntohl(c.u.idctxt.ctxt_data5); 4368 } 4369 return ret; 4370 } 4371 4372 /** 4373 * t4_sge_ctxt_rd_bd - read an SGE context bypassing FW 4374 * @adap: the adapter 4375 * @cid: the context id 4376 * @ctype: the context type 4377 * @data: where to store the context data 4378 * 4379 * Reads an SGE context directly, bypassing FW. This is only for 4380 * debugging when FW is unavailable. 4381 */ 4382 int t4_sge_ctxt_rd_bd(struct adapter *adap, unsigned int cid, enum ctxt_type ctype, 4383 u32 *data) 4384 { 4385 int i, ret; 4386 4387 t4_write_reg(adap, A_SGE_CTXT_CMD, V_CTXTQID(cid) | V_CTXTTYPE(ctype)); 4388 ret = t4_wait_op_done(adap, A_SGE_CTXT_CMD, F_BUSY, 0, 3, 1); 4389 if (!ret) 4390 for (i = A_SGE_CTXT_DATA0; i <= A_SGE_CTXT_DATA5; i += 4) 4391 *data++ = t4_read_reg(adap, i); 4392 return ret; 4393 } 4394 4395 /** 4396 * t4_fw_hello - establish communication with FW 4397 * @adap: the adapter 4398 * @mbox: mailbox to use for the FW command 4399 * @evt_mbox: mailbox to receive async FW events 4400 * @master: specifies the caller's willingness to be the device master 4401 * @state: returns the current device state (if non-NULL) 4402 * 4403 * Issues a command to establish communication with FW. Returns either 4404 * an error (negative integer) or the mailbox of the Master PF. 4405 */ 4406 int t4_fw_hello(struct adapter *adap, unsigned int mbox, unsigned int evt_mbox, 4407 enum dev_master master, enum dev_state *state) 4408 { 4409 int ret; 4410 struct fw_hello_cmd c; 4411 u32 v; 4412 unsigned int master_mbox; 4413 int retries = FW_CMD_HELLO_RETRIES; 4414 4415 retry: 4416 memset(&c, 0, sizeof(c)); 4417 INIT_CMD(c, HELLO, WRITE); 4418 c.err_to_clearinit = htonl( 4419 V_FW_HELLO_CMD_MASTERDIS(master == MASTER_CANT) | 4420 V_FW_HELLO_CMD_MASTERFORCE(master == MASTER_MUST) | 4421 V_FW_HELLO_CMD_MBMASTER(master == MASTER_MUST ? mbox : 4422 M_FW_HELLO_CMD_MBMASTER) | 4423 V_FW_HELLO_CMD_MBASYNCNOT(evt_mbox) | 4424 V_FW_HELLO_CMD_STAGE(FW_HELLO_CMD_STAGE_OS) | 4425 F_FW_HELLO_CMD_CLEARINIT); 4426 4427 /* 4428 * Issue the HELLO command to the firmware. If it's not successful 4429 * but indicates that we got a "busy" or "timeout" condition, retry 4430 * the HELLO until we exhaust our retry limit. If we do exceed our 4431 * retry limit, check to see if the firmware left us any error 4432 * information and report that if so ... 4433 */ 4434 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c); 4435 if (ret != FW_SUCCESS) { 4436 if ((ret == -EBUSY || ret == -ETIMEDOUT) && retries-- > 0) 4437 goto retry; 4438 if (t4_read_reg(adap, A_PCIE_FW) & F_PCIE_FW_ERR) 4439 t4_report_fw_error(adap); 4440 return ret; 4441 } 4442 4443 v = ntohl(c.err_to_clearinit); 4444 master_mbox = G_FW_HELLO_CMD_MBMASTER(v); 4445 if (state) { 4446 if (v & F_FW_HELLO_CMD_ERR) 4447 *state = DEV_STATE_ERR; 4448 else if (v & F_FW_HELLO_CMD_INIT) 4449 *state = DEV_STATE_INIT; 4450 else 4451 *state = DEV_STATE_UNINIT; 4452 } 4453 4454 /* 4455 * If we're not the Master PF then we need to wait around for the 4456 * Master PF Driver to finish setting up the adapter. 4457 * 4458 * Note that we also do this wait if we're a non-Master-capable PF and 4459 * there is no current Master PF; a Master PF may show up momentarily 4460 * and we wouldn't want to fail pointlessly. (This can happen when an 4461 * OS loads lots of different drivers rapidly at the same time). In 4462 * this case, the Master PF returned by the firmware will be 4463 * M_PCIE_FW_MASTER so the test below will work ... 4464 */ 4465 if ((v & (F_FW_HELLO_CMD_ERR|F_FW_HELLO_CMD_INIT)) == 0 && 4466 master_mbox != mbox) { 4467 int waiting = FW_CMD_HELLO_TIMEOUT; 4468 4469 /* 4470 * Wait for the firmware to either indicate an error or 4471 * initialized state. If we see either of these we bail out 4472 * and report the issue to the caller. If we exhaust the 4473 * "hello timeout" and we haven't exhausted our retries, try 4474 * again. Otherwise bail with a timeout error. 4475 */ 4476 for (;;) { 4477 u32 pcie_fw; 4478 4479 msleep(50); 4480 waiting -= 50; 4481 4482 /* 4483 * If neither Error nor Initialialized are indicated 4484 * by the firmware keep waiting till we exhaust our 4485 * timeout ... and then retry if we haven't exhausted 4486 * our retries ... 4487 */ 4488 pcie_fw = t4_read_reg(adap, A_PCIE_FW); 4489 if (!(pcie_fw & (F_PCIE_FW_ERR|F_PCIE_FW_INIT))) { 4490 if (waiting <= 0) { 4491 if (retries-- > 0) 4492 goto retry; 4493 4494 return -ETIMEDOUT; 4495 } 4496 continue; 4497 } 4498 4499 /* 4500 * We either have an Error or Initialized condition 4501 * report errors preferentially. 4502 */ 4503 if (state) { 4504 if (pcie_fw & F_PCIE_FW_ERR) 4505 *state = DEV_STATE_ERR; 4506 else if (pcie_fw & F_PCIE_FW_INIT) 4507 *state = DEV_STATE_INIT; 4508 } 4509 4510 /* 4511 * If we arrived before a Master PF was selected and 4512 * there's not a valid Master PF, grab its identity 4513 * for our caller. 4514 */ 4515 if (master_mbox == M_PCIE_FW_MASTER && 4516 (pcie_fw & F_PCIE_FW_MASTER_VLD)) 4517 master_mbox = G_PCIE_FW_MASTER(pcie_fw); 4518 break; 4519 } 4520 } 4521 4522 return master_mbox; 4523 } 4524 4525 /** 4526 * t4_fw_bye - end communication with FW 4527 * @adap: the adapter 4528 * @mbox: mailbox to use for the FW command 4529 * 4530 * Issues a command to terminate communication with FW. 4531 */ 4532 int t4_fw_bye(struct adapter *adap, unsigned int mbox) 4533 { 4534 struct fw_bye_cmd c; 4535 4536 memset(&c, 0, sizeof(c)); 4537 INIT_CMD(c, BYE, WRITE); 4538 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); 4539 } 4540 4541 /** 4542 * t4_fw_reset - issue a reset to FW 4543 * @adap: the adapter 4544 * @mbox: mailbox to use for the FW command 4545 * @reset: specifies the type of reset to perform 4546 * 4547 * Issues a reset command of the specified type to FW. 4548 */ 4549 int t4_fw_reset(struct adapter *adap, unsigned int mbox, int reset) 4550 { 4551 struct fw_reset_cmd c; 4552 4553 memset(&c, 0, sizeof(c)); 4554 INIT_CMD(c, RESET, WRITE); 4555 c.val = htonl(reset); 4556 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); 4557 } 4558 4559 /** 4560 * t4_fw_halt - issue a reset/halt to FW and put uP into RESET 4561 * @adap: the adapter 4562 * @mbox: mailbox to use for the FW RESET command (if desired) 4563 * @force: force uP into RESET even if FW RESET command fails 4564 * 4565 * Issues a RESET command to firmware (if desired) with a HALT indication 4566 * and then puts the microprocessor into RESET state. The RESET command 4567 * will only be issued if a legitimate mailbox is provided (mbox <= 4568 * M_PCIE_FW_MASTER). 4569 * 4570 * This is generally used in order for the host to safely manipulate the 4571 * adapter without fear of conflicting with whatever the firmware might 4572 * be doing. The only way out of this state is to RESTART the firmware 4573 * ... 4574 */ 4575 int t4_fw_halt(struct adapter *adap, unsigned int mbox, int force) 4576 { 4577 int ret = 0; 4578 4579 /* 4580 * If a legitimate mailbox is provided, issue a RESET command 4581 * with a HALT indication. 4582 */ 4583 if (mbox <= M_PCIE_FW_MASTER) { 4584 struct fw_reset_cmd c; 4585 4586 memset(&c, 0, sizeof(c)); 4587 INIT_CMD(c, RESET, WRITE); 4588 c.val = htonl(F_PIORST | F_PIORSTMODE); 4589 c.halt_pkd = htonl(F_FW_RESET_CMD_HALT); 4590 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); 4591 } 4592 4593 /* 4594 * Normally we won't complete the operation if the firmware RESET 4595 * command fails but if our caller insists we'll go ahead and put the 4596 * uP into RESET. This can be useful if the firmware is hung or even 4597 * missing ... We'll have to take the risk of putting the uP into 4598 * RESET without the cooperation of firmware in that case. 4599 * 4600 * We also force the firmware's HALT flag to be on in case we bypassed 4601 * the firmware RESET command above or we're dealing with old firmware 4602 * which doesn't have the HALT capability. This will serve as a flag 4603 * for the incoming firmware to know that it's coming out of a HALT 4604 * rather than a RESET ... if it's new enough to understand that ... 4605 */ 4606 if (ret == 0 || force) { 4607 t4_set_reg_field(adap, A_CIM_BOOT_CFG, F_UPCRST, F_UPCRST); 4608 t4_set_reg_field(adap, A_PCIE_FW, F_PCIE_FW_HALT, F_PCIE_FW_HALT); 4609 } 4610 4611 /* 4612 * And we always return the result of the firmware RESET command 4613 * even when we force the uP into RESET ... 4614 */ 4615 return ret; 4616 } 4617 4618 /** 4619 * t4_fw_restart - restart the firmware by taking the uP out of RESET 4620 * @adap: the adapter 4621 * @reset: if we want to do a RESET to restart things 4622 * 4623 * Restart firmware previously halted by t4_fw_halt(). On successful 4624 * return the previous PF Master remains as the new PF Master and there 4625 * is no need to issue a new HELLO command, etc. 4626 * 4627 * We do this in two ways: 4628 * 4629 * 1. If we're dealing with newer firmware we'll simply want to take 4630 * the chip's microprocessor out of RESET. This will cause the 4631 * firmware to start up from its start vector. And then we'll loop 4632 * until the firmware indicates it's started again (PCIE_FW.HALT 4633 * reset to 0) or we timeout. 4634 * 4635 * 2. If we're dealing with older firmware then we'll need to RESET 4636 * the chip since older firmware won't recognize the PCIE_FW.HALT 4637 * flag and automatically RESET itself on startup. 4638 */ 4639 int t4_fw_restart(struct adapter *adap, unsigned int mbox, int reset) 4640 { 4641 if (reset) { 4642 /* 4643 * Since we're directing the RESET instead of the firmware 4644 * doing it automatically, we need to clear the PCIE_FW.HALT 4645 * bit. 4646 */ 4647 t4_set_reg_field(adap, A_PCIE_FW, F_PCIE_FW_HALT, 0); 4648 4649 /* 4650 * If we've been given a valid mailbox, first try to get the 4651 * firmware to do the RESET. If that works, great and we can 4652 * return success. Otherwise, if we haven't been given a 4653 * valid mailbox or the RESET command failed, fall back to 4654 * hitting the chip with a hammer. 4655 */ 4656 if (mbox <= M_PCIE_FW_MASTER) { 4657 t4_set_reg_field(adap, A_CIM_BOOT_CFG, F_UPCRST, 0); 4658 msleep(100); 4659 if (t4_fw_reset(adap, mbox, 4660 F_PIORST | F_PIORSTMODE) == 0) 4661 return 0; 4662 } 4663 4664 t4_write_reg(adap, A_PL_RST, F_PIORST | F_PIORSTMODE); 4665 msleep(2000); 4666 } else { 4667 int ms; 4668 4669 t4_set_reg_field(adap, A_CIM_BOOT_CFG, F_UPCRST, 0); 4670 for (ms = 0; ms < FW_CMD_MAX_TIMEOUT; ) { 4671 if (!(t4_read_reg(adap, A_PCIE_FW) & F_PCIE_FW_HALT)) 4672 return FW_SUCCESS; 4673 msleep(100); 4674 ms += 100; 4675 } 4676 return -ETIMEDOUT; 4677 } 4678 return 0; 4679 } 4680 4681 /** 4682 * t4_fw_upgrade - perform all of the steps necessary to upgrade FW 4683 * @adap: the adapter 4684 * @mbox: mailbox to use for the FW RESET command (if desired) 4685 * @fw_data: the firmware image to write 4686 * @size: image size 4687 * @force: force upgrade even if firmware doesn't cooperate 4688 * 4689 * Perform all of the steps necessary for upgrading an adapter's 4690 * firmware image. Normally this requires the cooperation of the 4691 * existing firmware in order to halt all existing activities 4692 * but if an invalid mailbox token is passed in we skip that step 4693 * (though we'll still put the adapter microprocessor into RESET in 4694 * that case). 4695 * 4696 * On successful return the new firmware will have been loaded and 4697 * the adapter will have been fully RESET losing all previous setup 4698 * state. On unsuccessful return the adapter may be completely hosed ... 4699 * positive errno indicates that the adapter is ~probably~ intact, a 4700 * negative errno indicates that things are looking bad ... 4701 */ 4702 int t4_fw_upgrade(struct adapter *adap, unsigned int mbox, 4703 const u8 *fw_data, unsigned int size, int force) 4704 { 4705 const struct fw_hdr *fw_hdr = (const struct fw_hdr *)fw_data; 4706 unsigned int bootstrap = ntohl(fw_hdr->magic) == FW_HDR_MAGIC_BOOTSTRAP; 4707 int reset, ret; 4708 4709 if (!bootstrap) { 4710 ret = t4_fw_halt(adap, mbox, force); 4711 if (ret < 0 && !force) 4712 return ret; 4713 } 4714 4715 ret = t4_load_fw(adap, fw_data, size); 4716 if (ret < 0 || bootstrap) 4717 return ret; 4718 4719 /* 4720 * Older versions of the firmware don't understand the new 4721 * PCIE_FW.HALT flag and so won't know to perform a RESET when they 4722 * restart. So for newly loaded older firmware we'll have to do the 4723 * RESET for it so it starts up on a clean slate. We can tell if 4724 * the newly loaded firmware will handle this right by checking 4725 * its header flags to see if it advertises the capability. 4726 */ 4727 reset = ((ntohl(fw_hdr->flags) & FW_HDR_FLAGS_RESET_HALT) == 0); 4728 return t4_fw_restart(adap, mbox, reset); 4729 } 4730 4731 /** 4732 * t4_fw_initialize - ask FW to initialize the device 4733 * @adap: the adapter 4734 * @mbox: mailbox to use for the FW command 4735 * 4736 * Issues a command to FW to partially initialize the device. This 4737 * performs initialization that generally doesn't depend on user input. 4738 */ 4739 int t4_fw_initialize(struct adapter *adap, unsigned int mbox) 4740 { 4741 struct fw_initialize_cmd c; 4742 4743 memset(&c, 0, sizeof(c)); 4744 INIT_CMD(c, INITIALIZE, WRITE); 4745 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); 4746 } 4747 4748 /** 4749 * t4_query_params - query FW or device parameters 4750 * @adap: the adapter 4751 * @mbox: mailbox to use for the FW command 4752 * @pf: the PF 4753 * @vf: the VF 4754 * @nparams: the number of parameters 4755 * @params: the parameter names 4756 * @val: the parameter values 4757 * 4758 * Reads the value of FW or device parameters. Up to 7 parameters can be 4759 * queried at once. 4760 */ 4761 int t4_query_params(struct adapter *adap, unsigned int mbox, unsigned int pf, 4762 unsigned int vf, unsigned int nparams, const u32 *params, 4763 u32 *val) 4764 { 4765 int i, ret; 4766 struct fw_params_cmd c; 4767 __be32 *p = &c.param[0].mnem; 4768 4769 if (nparams > 7) 4770 return -EINVAL; 4771 4772 memset(&c, 0, sizeof(c)); 4773 c.op_to_vfn = htonl(V_FW_CMD_OP(FW_PARAMS_CMD) | F_FW_CMD_REQUEST | 4774 F_FW_CMD_READ | V_FW_PARAMS_CMD_PFN(pf) | 4775 V_FW_PARAMS_CMD_VFN(vf)); 4776 c.retval_len16 = htonl(FW_LEN16(c)); 4777 4778 for (i = 0; i < nparams; i++, p += 2, params++) 4779 *p = htonl(*params); 4780 4781 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c); 4782 if (ret == 0) 4783 for (i = 0, p = &c.param[0].val; i < nparams; i++, p += 2) 4784 *val++ = ntohl(*p); 4785 return ret; 4786 } 4787 4788 /** 4789 * t4_set_params - sets FW or device parameters 4790 * @adap: the adapter 4791 * @mbox: mailbox to use for the FW command 4792 * @pf: the PF 4793 * @vf: the VF 4794 * @nparams: the number of parameters 4795 * @params: the parameter names 4796 * @val: the parameter values 4797 * 4798 * Sets the value of FW or device parameters. Up to 7 parameters can be 4799 * specified at once. 4800 */ 4801 int t4_set_params(struct adapter *adap, unsigned int mbox, unsigned int pf, 4802 unsigned int vf, unsigned int nparams, const u32 *params, 4803 const u32 *val) 4804 { 4805 struct fw_params_cmd c; 4806 __be32 *p = &c.param[0].mnem; 4807 4808 if (nparams > 7) 4809 return -EINVAL; 4810 4811 memset(&c, 0, sizeof(c)); 4812 c.op_to_vfn = htonl(V_FW_CMD_OP(FW_PARAMS_CMD) | F_FW_CMD_REQUEST | 4813 F_FW_CMD_WRITE | V_FW_PARAMS_CMD_PFN(pf) | 4814 V_FW_PARAMS_CMD_VFN(vf)); 4815 c.retval_len16 = htonl(FW_LEN16(c)); 4816 4817 while (nparams--) { 4818 *p++ = htonl(*params); 4819 params++; 4820 *p++ = htonl(*val); 4821 val++; 4822 } 4823 4824 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); 4825 } 4826 4827 /** 4828 * t4_cfg_pfvf - configure PF/VF resource limits 4829 * @adap: the adapter 4830 * @mbox: mailbox to use for the FW command 4831 * @pf: the PF being configured 4832 * @vf: the VF being configured 4833 * @txq: the max number of egress queues 4834 * @txq_eth_ctrl: the max number of egress Ethernet or control queues 4835 * @rxqi: the max number of interrupt-capable ingress queues 4836 * @rxq: the max number of interruptless ingress queues 4837 * @tc: the PCI traffic class 4838 * @vi: the max number of virtual interfaces 4839 * @cmask: the channel access rights mask for the PF/VF 4840 * @pmask: the port access rights mask for the PF/VF 4841 * @nexact: the maximum number of exact MPS filters 4842 * @rcaps: read capabilities 4843 * @wxcaps: write/execute capabilities 4844 * 4845 * Configures resource limits and capabilities for a physical or virtual 4846 * function. 4847 */ 4848 int t4_cfg_pfvf(struct adapter *adap, unsigned int mbox, unsigned int pf, 4849 unsigned int vf, unsigned int txq, unsigned int txq_eth_ctrl, 4850 unsigned int rxqi, unsigned int rxq, unsigned int tc, 4851 unsigned int vi, unsigned int cmask, unsigned int pmask, 4852 unsigned int nexact, unsigned int rcaps, unsigned int wxcaps) 4853 { 4854 struct fw_pfvf_cmd c; 4855 4856 memset(&c, 0, sizeof(c)); 4857 c.op_to_vfn = htonl(V_FW_CMD_OP(FW_PFVF_CMD) | F_FW_CMD_REQUEST | 4858 F_FW_CMD_WRITE | V_FW_PFVF_CMD_PFN(pf) | 4859 V_FW_PFVF_CMD_VFN(vf)); 4860 c.retval_len16 = htonl(FW_LEN16(c)); 4861 c.niqflint_niq = htonl(V_FW_PFVF_CMD_NIQFLINT(rxqi) | 4862 V_FW_PFVF_CMD_NIQ(rxq)); 4863 c.type_to_neq = htonl(V_FW_PFVF_CMD_CMASK(cmask) | 4864 V_FW_PFVF_CMD_PMASK(pmask) | 4865 V_FW_PFVF_CMD_NEQ(txq)); 4866 c.tc_to_nexactf = htonl(V_FW_PFVF_CMD_TC(tc) | V_FW_PFVF_CMD_NVI(vi) | 4867 V_FW_PFVF_CMD_NEXACTF(nexact)); 4868 c.r_caps_to_nethctrl = htonl(V_FW_PFVF_CMD_R_CAPS(rcaps) | 4869 V_FW_PFVF_CMD_WX_CAPS(wxcaps) | 4870 V_FW_PFVF_CMD_NETHCTRL(txq_eth_ctrl)); 4871 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); 4872 } 4873 4874 /** 4875 * t4_alloc_vi_func - allocate a virtual interface 4876 * @adap: the adapter 4877 * @mbox: mailbox to use for the FW command 4878 * @port: physical port associated with the VI 4879 * @pf: the PF owning the VI 4880 * @vf: the VF owning the VI 4881 * @nmac: number of MAC addresses needed (1 to 5) 4882 * @mac: the MAC addresses of the VI 4883 * @rss_size: size of RSS table slice associated with this VI 4884 * @portfunc: which Port Application Function MAC Address is desired 4885 * @idstype: Intrusion Detection Type 4886 * 4887 * Allocates a virtual interface for the given physical port. If @mac is 4888 * not %NULL it contains the MAC addresses of the VI as assigned by FW. 4889 * @mac should be large enough to hold @nmac Ethernet addresses, they are 4890 * stored consecutively so the space needed is @nmac * 6 bytes. 4891 * Returns a negative error number or the non-negative VI id. 4892 */ 4893 int t4_alloc_vi_func(struct adapter *adap, unsigned int mbox, 4894 unsigned int port, unsigned int pf, unsigned int vf, 4895 unsigned int nmac, u8 *mac, u16 *rss_size, 4896 unsigned int portfunc, unsigned int idstype) 4897 { 4898 int ret; 4899 struct fw_vi_cmd c; 4900 4901 memset(&c, 0, sizeof(c)); 4902 c.op_to_vfn = htonl(V_FW_CMD_OP(FW_VI_CMD) | F_FW_CMD_REQUEST | 4903 F_FW_CMD_WRITE | F_FW_CMD_EXEC | 4904 V_FW_VI_CMD_PFN(pf) | V_FW_VI_CMD_VFN(vf)); 4905 c.alloc_to_len16 = htonl(F_FW_VI_CMD_ALLOC | FW_LEN16(c)); 4906 c.type_to_viid = htons(V_FW_VI_CMD_TYPE(idstype) | 4907 V_FW_VI_CMD_FUNC(portfunc)); 4908 c.portid_pkd = V_FW_VI_CMD_PORTID(port); 4909 c.nmac = nmac - 1; 4910 4911 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c); 4912 if (ret) 4913 return ret; 4914 4915 if (mac) { 4916 memcpy(mac, c.mac, sizeof(c.mac)); 4917 switch (nmac) { 4918 case 5: 4919 memcpy(mac + 24, c.nmac3, sizeof(c.nmac3)); 4920 case 4: 4921 memcpy(mac + 18, c.nmac2, sizeof(c.nmac2)); 4922 case 3: 4923 memcpy(mac + 12, c.nmac1, sizeof(c.nmac1)); 4924 case 2: 4925 memcpy(mac + 6, c.nmac0, sizeof(c.nmac0)); 4926 } 4927 } 4928 if (rss_size) 4929 *rss_size = G_FW_VI_CMD_RSSSIZE(ntohs(c.norss_rsssize)); 4930 return G_FW_VI_CMD_VIID(htons(c.type_to_viid)); 4931 } 4932 4933 /** 4934 * t4_alloc_vi - allocate an [Ethernet Function] virtual interface 4935 * @adap: the adapter 4936 * @mbox: mailbox to use for the FW command 4937 * @port: physical port associated with the VI 4938 * @pf: the PF owning the VI 4939 * @vf: the VF owning the VI 4940 * @nmac: number of MAC addresses needed (1 to 5) 4941 * @mac: the MAC addresses of the VI 4942 * @rss_size: size of RSS table slice associated with this VI 4943 * 4944 * backwards compatible and convieniance routine to allocate a Virtual 4945 * Interface with a Ethernet Port Application Function and Intrustion 4946 * Detection System disabled. 4947 */ 4948 int t4_alloc_vi(struct adapter *adap, unsigned int mbox, unsigned int port, 4949 unsigned int pf, unsigned int vf, unsigned int nmac, u8 *mac, 4950 u16 *rss_size) 4951 { 4952 return t4_alloc_vi_func(adap, mbox, port, pf, vf, nmac, mac, rss_size, 4953 FW_VI_FUNC_ETH, 0); 4954 } 4955 4956 /** 4957 * t4_free_vi - free a virtual interface 4958 * @adap: the adapter 4959 * @mbox: mailbox to use for the FW command 4960 * @pf: the PF owning the VI 4961 * @vf: the VF owning the VI 4962 * @viid: virtual interface identifiler 4963 * 4964 * Free a previously allocated virtual interface. 4965 */ 4966 int t4_free_vi(struct adapter *adap, unsigned int mbox, unsigned int pf, 4967 unsigned int vf, unsigned int viid) 4968 { 4969 struct fw_vi_cmd c; 4970 4971 memset(&c, 0, sizeof(c)); 4972 c.op_to_vfn = htonl(V_FW_CMD_OP(FW_VI_CMD) | 4973 F_FW_CMD_REQUEST | 4974 F_FW_CMD_EXEC | 4975 V_FW_VI_CMD_PFN(pf) | 4976 V_FW_VI_CMD_VFN(vf)); 4977 c.alloc_to_len16 = htonl(F_FW_VI_CMD_FREE | FW_LEN16(c)); 4978 c.type_to_viid = htons(V_FW_VI_CMD_VIID(viid)); 4979 4980 return t4_wr_mbox(adap, mbox, &c, sizeof(c), &c); 4981 } 4982 4983 /** 4984 * t4_set_rxmode - set Rx properties of a virtual interface 4985 * @adap: the adapter 4986 * @mbox: mailbox to use for the FW command 4987 * @viid: the VI id 4988 * @mtu: the new MTU or -1 4989 * @promisc: 1 to enable promiscuous mode, 0 to disable it, -1 no change 4990 * @all_multi: 1 to enable all-multi mode, 0 to disable it, -1 no change 4991 * @bcast: 1 to enable broadcast Rx, 0 to disable it, -1 no change 4992 * @vlanex: 1 to enable HVLAN extraction, 0 to disable it, -1 no change 4993 * @sleep_ok: if true we may sleep while awaiting command completion 4994 * 4995 * Sets Rx properties of a virtual interface. 4996 */ 4997 int t4_set_rxmode(struct adapter *adap, unsigned int mbox, unsigned int viid, 4998 int mtu, int promisc, int all_multi, int bcast, int vlanex, 4999 bool sleep_ok) 5000 { 5001 struct fw_vi_rxmode_cmd c; 5002 5003 /* convert to FW values */ 5004 if (mtu < 0) 5005 mtu = M_FW_VI_RXMODE_CMD_MTU; 5006 if (promisc < 0) 5007 promisc = M_FW_VI_RXMODE_CMD_PROMISCEN; 5008 if (all_multi < 0) 5009 all_multi = M_FW_VI_RXMODE_CMD_ALLMULTIEN; 5010 if (bcast < 0) 5011 bcast = M_FW_VI_RXMODE_CMD_BROADCASTEN; 5012 if (vlanex < 0) 5013 vlanex = M_FW_VI_RXMODE_CMD_VLANEXEN; 5014 5015 memset(&c, 0, sizeof(c)); 5016 c.op_to_viid = htonl(V_FW_CMD_OP(FW_VI_RXMODE_CMD) | F_FW_CMD_REQUEST | 5017 F_FW_CMD_WRITE | V_FW_VI_RXMODE_CMD_VIID(viid)); 5018 c.retval_len16 = htonl(FW_LEN16(c)); 5019 c.mtu_to_vlanexen = htonl(V_FW_VI_RXMODE_CMD_MTU(mtu) | 5020 V_FW_VI_RXMODE_CMD_PROMISCEN(promisc) | 5021 V_FW_VI_RXMODE_CMD_ALLMULTIEN(all_multi) | 5022 V_FW_VI_RXMODE_CMD_BROADCASTEN(bcast) | 5023 V_FW_VI_RXMODE_CMD_VLANEXEN(vlanex)); 5024 return t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), NULL, sleep_ok); 5025 } 5026 5027 /** 5028 * t4_alloc_mac_filt - allocates exact-match filters for MAC addresses 5029 * @adap: the adapter 5030 * @mbox: mailbox to use for the FW command 5031 * @viid: the VI id 5032 * @free: if true any existing filters for this VI id are first removed 5033 * @naddr: the number of MAC addresses to allocate filters for (up to 7) 5034 * @addr: the MAC address(es) 5035 * @idx: where to store the index of each allocated filter 5036 * @hash: pointer to hash address filter bitmap 5037 * @sleep_ok: call is allowed to sleep 5038 * 5039 * Allocates an exact-match filter for each of the supplied addresses and 5040 * sets it to the corresponding address. If @idx is not %NULL it should 5041 * have at least @naddr entries, each of which will be set to the index of 5042 * the filter allocated for the corresponding MAC address. If a filter 5043 * could not be allocated for an address its index is set to 0xffff. 5044 * If @hash is not %NULL addresses that fail to allocate an exact filter 5045 * are hashed and update the hash filter bitmap pointed at by @hash. 5046 * 5047 * Returns a negative error number or the number of filters allocated. 5048 */ 5049 int t4_alloc_mac_filt(struct adapter *adap, unsigned int mbox, 5050 unsigned int viid, bool free, unsigned int naddr, 5051 const u8 **addr, u16 *idx, u64 *hash, bool sleep_ok) 5052 { 5053 int offset, ret = 0; 5054 struct fw_vi_mac_cmd c; 5055 unsigned int nfilters = 0; 5056 unsigned int max_naddr = is_t4(adap) ? 5057 NUM_MPS_CLS_SRAM_L_INSTANCES : 5058 NUM_MPS_T5_CLS_SRAM_L_INSTANCES; 5059 unsigned int rem = naddr; 5060 5061 if (naddr > max_naddr) 5062 return -EINVAL; 5063 5064 for (offset = 0; offset < naddr ; /**/) { 5065 unsigned int fw_naddr = (rem < ARRAY_SIZE(c.u.exact) 5066 ? rem 5067 : ARRAY_SIZE(c.u.exact)); 5068 size_t len16 = DIV_ROUND_UP(offsetof(struct fw_vi_mac_cmd, 5069 u.exact[fw_naddr]), 16); 5070 struct fw_vi_mac_exact *p; 5071 int i; 5072 5073 memset(&c, 0, sizeof(c)); 5074 c.op_to_viid = htonl(V_FW_CMD_OP(FW_VI_MAC_CMD) | 5075 F_FW_CMD_REQUEST | 5076 F_FW_CMD_WRITE | 5077 V_FW_CMD_EXEC(free) | 5078 V_FW_VI_MAC_CMD_VIID(viid)); 5079 c.freemacs_to_len16 = htonl(V_FW_VI_MAC_CMD_FREEMACS(free) | 5080 V_FW_CMD_LEN16(len16)); 5081 5082 for (i = 0, p = c.u.exact; i < fw_naddr; i++, p++) { 5083 p->valid_to_idx = htons( 5084 F_FW_VI_MAC_CMD_VALID | 5085 V_FW_VI_MAC_CMD_IDX(FW_VI_MAC_ADD_MAC)); 5086 memcpy(p->macaddr, addr[offset+i], sizeof(p->macaddr)); 5087 } 5088 5089 /* 5090 * It's okay if we run out of space in our MAC address arena. 5091 * Some of the addresses we submit may get stored so we need 5092 * to run through the reply to see what the results were ... 5093 */ 5094 ret = t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), &c, sleep_ok); 5095 if (ret && ret != -FW_ENOMEM) 5096 break; 5097 5098 for (i = 0, p = c.u.exact; i < fw_naddr; i++, p++) { 5099 u16 index = G_FW_VI_MAC_CMD_IDX(ntohs(p->valid_to_idx)); 5100 5101 if (idx) 5102 idx[offset+i] = (index >= max_naddr 5103 ? 0xffff 5104 : index); 5105 if (index < max_naddr) 5106 nfilters++; 5107 else if (hash) 5108 *hash |= (1ULL << hash_mac_addr(addr[offset+i])); 5109 } 5110 5111 free = false; 5112 offset += fw_naddr; 5113 rem -= fw_naddr; 5114 } 5115 5116 if (ret == 0 || ret == -FW_ENOMEM) 5117 ret = nfilters; 5118 return ret; 5119 } 5120 5121 /** 5122 * t4_change_mac - modifies the exact-match filter for a MAC address 5123 * @adap: the adapter 5124 * @mbox: mailbox to use for the FW command 5125 * @viid: the VI id 5126 * @idx: index of existing filter for old value of MAC address, or -1 5127 * @addr: the new MAC address value 5128 * @persist: whether a new MAC allocation should be persistent 5129 * @add_smt: if true also add the address to the HW SMT 5130 * 5131 * Modifies an exact-match filter and sets it to the new MAC address if 5132 * @idx >= 0, or adds the MAC address to a new filter if @idx < 0. In the 5133 * latter case the address is added persistently if @persist is %true. 5134 * 5135 * Note that in general it is not possible to modify the value of a given 5136 * filter so the generic way to modify an address filter is to free the one 5137 * being used by the old address value and allocate a new filter for the 5138 * new address value. 5139 * 5140 * Returns a negative error number or the index of the filter with the new 5141 * MAC value. Note that this index may differ from @idx. 5142 */ 5143 int t4_change_mac(struct adapter *adap, unsigned int mbox, unsigned int viid, 5144 int idx, const u8 *addr, bool persist, bool add_smt) 5145 { 5146 int ret, mode; 5147 struct fw_vi_mac_cmd c; 5148 struct fw_vi_mac_exact *p = c.u.exact; 5149 unsigned int max_mac_addr = is_t4(adap) ? 5150 NUM_MPS_CLS_SRAM_L_INSTANCES : 5151 NUM_MPS_T5_CLS_SRAM_L_INSTANCES; 5152 5153 if (idx < 0) /* new allocation */ 5154 idx = persist ? FW_VI_MAC_ADD_PERSIST_MAC : FW_VI_MAC_ADD_MAC; 5155 mode = add_smt ? FW_VI_MAC_SMT_AND_MPSTCAM : FW_VI_MAC_MPS_TCAM_ENTRY; 5156 5157 memset(&c, 0, sizeof(c)); 5158 c.op_to_viid = htonl(V_FW_CMD_OP(FW_VI_MAC_CMD) | F_FW_CMD_REQUEST | 5159 F_FW_CMD_WRITE | V_FW_VI_MAC_CMD_VIID(viid)); 5160 c.freemacs_to_len16 = htonl(V_FW_CMD_LEN16(1)); 5161 p->valid_to_idx = htons(F_FW_VI_MAC_CMD_VALID | 5162 V_FW_VI_MAC_CMD_SMAC_RESULT(mode) | 5163 V_FW_VI_MAC_CMD_IDX(idx)); 5164 memcpy(p->macaddr, addr, sizeof(p->macaddr)); 5165 5166 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c); 5167 if (ret == 0) { 5168 ret = G_FW_VI_MAC_CMD_IDX(ntohs(p->valid_to_idx)); 5169 if (ret >= max_mac_addr) 5170 ret = -ENOMEM; 5171 } 5172 return ret; 5173 } 5174 5175 /** 5176 * t4_set_addr_hash - program the MAC inexact-match hash filter 5177 * @adap: the adapter 5178 * @mbox: mailbox to use for the FW command 5179 * @viid: the VI id 5180 * @ucast: whether the hash filter should also match unicast addresses 5181 * @vec: the value to be written to the hash filter 5182 * @sleep_ok: call is allowed to sleep 5183 * 5184 * Sets the 64-bit inexact-match hash filter for a virtual interface. 5185 */ 5186 int t4_set_addr_hash(struct adapter *adap, unsigned int mbox, unsigned int viid, 5187 bool ucast, u64 vec, bool sleep_ok) 5188 { 5189 struct fw_vi_mac_cmd c; 5190 5191 memset(&c, 0, sizeof(c)); 5192 c.op_to_viid = htonl(V_FW_CMD_OP(FW_VI_MAC_CMD) | F_FW_CMD_REQUEST | 5193 F_FW_CMD_WRITE | V_FW_VI_ENABLE_CMD_VIID(viid)); 5194 c.freemacs_to_len16 = htonl(F_FW_VI_MAC_CMD_HASHVECEN | 5195 V_FW_VI_MAC_CMD_HASHUNIEN(ucast) | 5196 V_FW_CMD_LEN16(1)); 5197 c.u.hash.hashvec = cpu_to_be64(vec); 5198 return t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), NULL, sleep_ok); 5199 } 5200 5201 /** 5202 * t4_enable_vi - enable/disable a virtual interface 5203 * @adap: the adapter 5204 * @mbox: mailbox to use for the FW command 5205 * @viid: the VI id 5206 * @rx_en: 1=enable Rx, 0=disable Rx 5207 * @tx_en: 1=enable Tx, 0=disable Tx 5208 * 5209 * Enables/disables a virtual interface. 5210 */ 5211 int t4_enable_vi(struct adapter *adap, unsigned int mbox, unsigned int viid, 5212 bool rx_en, bool tx_en) 5213 { 5214 struct fw_vi_enable_cmd c; 5215 5216 memset(&c, 0, sizeof(c)); 5217 c.op_to_viid = htonl(V_FW_CMD_OP(FW_VI_ENABLE_CMD) | F_FW_CMD_REQUEST | 5218 F_FW_CMD_EXEC | V_FW_VI_ENABLE_CMD_VIID(viid)); 5219 c.ien_to_len16 = htonl(V_FW_VI_ENABLE_CMD_IEN(rx_en) | 5220 V_FW_VI_ENABLE_CMD_EEN(tx_en) | FW_LEN16(c)); 5221 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); 5222 } 5223 5224 /** 5225 * t4_identify_port - identify a VI's port by blinking its LED 5226 * @adap: the adapter 5227 * @mbox: mailbox to use for the FW command 5228 * @viid: the VI id 5229 * @nblinks: how many times to blink LED at 2.5 Hz 5230 * 5231 * Identifies a VI's port by blinking its LED. 5232 */ 5233 int t4_identify_port(struct adapter *adap, unsigned int mbox, unsigned int viid, 5234 unsigned int nblinks) 5235 { 5236 struct fw_vi_enable_cmd c; 5237 5238 memset(&c, 0, sizeof(c)); 5239 c.op_to_viid = htonl(V_FW_CMD_OP(FW_VI_ENABLE_CMD) | F_FW_CMD_REQUEST | 5240 F_FW_CMD_EXEC | V_FW_VI_ENABLE_CMD_VIID(viid)); 5241 c.ien_to_len16 = htonl(F_FW_VI_ENABLE_CMD_LED | FW_LEN16(c)); 5242 c.blinkdur = htons(nblinks); 5243 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); 5244 } 5245 5246 /** 5247 * t4_iq_start_stop - enable/disable an ingress queue and its FLs 5248 * @adap: the adapter 5249 * @mbox: mailbox to use for the FW command 5250 * @start: %true to enable the queues, %false to disable them 5251 * @pf: the PF owning the queues 5252 * @vf: the VF owning the queues 5253 * @iqid: ingress queue id 5254 * @fl0id: FL0 queue id or 0xffff if no attached FL0 5255 * @fl1id: FL1 queue id or 0xffff if no attached FL1 5256 * 5257 * Starts or stops an ingress queue and its associated FLs, if any. 5258 */ 5259 int t4_iq_start_stop(struct adapter *adap, unsigned int mbox, bool start, 5260 unsigned int pf, unsigned int vf, unsigned int iqid, 5261 unsigned int fl0id, unsigned int fl1id) 5262 { 5263 struct fw_iq_cmd c; 5264 5265 memset(&c, 0, sizeof(c)); 5266 c.op_to_vfn = htonl(V_FW_CMD_OP(FW_IQ_CMD) | F_FW_CMD_REQUEST | 5267 F_FW_CMD_EXEC | V_FW_IQ_CMD_PFN(pf) | 5268 V_FW_IQ_CMD_VFN(vf)); 5269 c.alloc_to_len16 = htonl(V_FW_IQ_CMD_IQSTART(start) | 5270 V_FW_IQ_CMD_IQSTOP(!start) | FW_LEN16(c)); 5271 c.iqid = htons(iqid); 5272 c.fl0id = htons(fl0id); 5273 c.fl1id = htons(fl1id); 5274 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); 5275 } 5276 5277 /** 5278 * t4_iq_free - free an ingress queue and its FLs 5279 * @adap: the adapter 5280 * @mbox: mailbox to use for the FW command 5281 * @pf: the PF owning the queues 5282 * @vf: the VF owning the queues 5283 * @iqtype: the ingress queue type (FW_IQ_TYPE_FL_INT_CAP, etc.) 5284 * @iqid: ingress queue id 5285 * @fl0id: FL0 queue id or 0xffff if no attached FL0 5286 * @fl1id: FL1 queue id or 0xffff if no attached FL1 5287 * 5288 * Frees an ingress queue and its associated FLs, if any. 5289 */ 5290 int t4_iq_free(struct adapter *adap, unsigned int mbox, unsigned int pf, 5291 unsigned int vf, unsigned int iqtype, unsigned int iqid, 5292 unsigned int fl0id, unsigned int fl1id) 5293 { 5294 struct fw_iq_cmd c; 5295 5296 memset(&c, 0, sizeof(c)); 5297 c.op_to_vfn = htonl(V_FW_CMD_OP(FW_IQ_CMD) | F_FW_CMD_REQUEST | 5298 F_FW_CMD_EXEC | V_FW_IQ_CMD_PFN(pf) | 5299 V_FW_IQ_CMD_VFN(vf)); 5300 c.alloc_to_len16 = htonl(F_FW_IQ_CMD_FREE | FW_LEN16(c)); 5301 c.type_to_iqandstindex = htonl(V_FW_IQ_CMD_TYPE(iqtype)); 5302 c.iqid = htons(iqid); 5303 c.fl0id = htons(fl0id); 5304 c.fl1id = htons(fl1id); 5305 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); 5306 } 5307 5308 /** 5309 * t4_eth_eq_free - free an Ethernet egress queue 5310 * @adap: the adapter 5311 * @mbox: mailbox to use for the FW command 5312 * @pf: the PF owning the queue 5313 * @vf: the VF owning the queue 5314 * @eqid: egress queue id 5315 * 5316 * Frees an Ethernet egress queue. 5317 */ 5318 int t4_eth_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf, 5319 unsigned int vf, unsigned int eqid) 5320 { 5321 struct fw_eq_eth_cmd c; 5322 5323 memset(&c, 0, sizeof(c)); 5324 c.op_to_vfn = htonl(V_FW_CMD_OP(FW_EQ_ETH_CMD) | F_FW_CMD_REQUEST | 5325 F_FW_CMD_EXEC | V_FW_EQ_ETH_CMD_PFN(pf) | 5326 V_FW_EQ_ETH_CMD_VFN(vf)); 5327 c.alloc_to_len16 = htonl(F_FW_EQ_ETH_CMD_FREE | FW_LEN16(c)); 5328 c.eqid_pkd = htonl(V_FW_EQ_ETH_CMD_EQID(eqid)); 5329 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); 5330 } 5331 5332 /** 5333 * t4_ctrl_eq_free - free a control egress queue 5334 * @adap: the adapter 5335 * @mbox: mailbox to use for the FW command 5336 * @pf: the PF owning the queue 5337 * @vf: the VF owning the queue 5338 * @eqid: egress queue id 5339 * 5340 * Frees a control egress queue. 5341 */ 5342 int t4_ctrl_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf, 5343 unsigned int vf, unsigned int eqid) 5344 { 5345 struct fw_eq_ctrl_cmd c; 5346 5347 memset(&c, 0, sizeof(c)); 5348 c.op_to_vfn = htonl(V_FW_CMD_OP(FW_EQ_CTRL_CMD) | F_FW_CMD_REQUEST | 5349 F_FW_CMD_EXEC | V_FW_EQ_CTRL_CMD_PFN(pf) | 5350 V_FW_EQ_CTRL_CMD_VFN(vf)); 5351 c.alloc_to_len16 = htonl(F_FW_EQ_CTRL_CMD_FREE | FW_LEN16(c)); 5352 c.cmpliqid_eqid = htonl(V_FW_EQ_CTRL_CMD_EQID(eqid)); 5353 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); 5354 } 5355 5356 /** 5357 * t4_ofld_eq_free - free an offload egress queue 5358 * @adap: the adapter 5359 * @mbox: mailbox to use for the FW command 5360 * @pf: the PF owning the queue 5361 * @vf: the VF owning the queue 5362 * @eqid: egress queue id 5363 * 5364 * Frees a control egress queue. 5365 */ 5366 int t4_ofld_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf, 5367 unsigned int vf, unsigned int eqid) 5368 { 5369 struct fw_eq_ofld_cmd c; 5370 5371 memset(&c, 0, sizeof(c)); 5372 c.op_to_vfn = htonl(V_FW_CMD_OP(FW_EQ_OFLD_CMD) | F_FW_CMD_REQUEST | 5373 F_FW_CMD_EXEC | V_FW_EQ_OFLD_CMD_PFN(pf) | 5374 V_FW_EQ_OFLD_CMD_VFN(vf)); 5375 c.alloc_to_len16 = htonl(F_FW_EQ_OFLD_CMD_FREE | FW_LEN16(c)); 5376 c.eqid_pkd = htonl(V_FW_EQ_OFLD_CMD_EQID(eqid)); 5377 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); 5378 } 5379 5380 /** 5381 * t4_handle_fw_rpl - process a FW reply message 5382 * @adap: the adapter 5383 * @rpl: start of the FW message 5384 * 5385 * Processes a FW message, such as link state change messages. 5386 */ 5387 int t4_handle_fw_rpl(struct adapter *adap, const __be64 *rpl) 5388 { 5389 u8 opcode = *(const u8 *)rpl; 5390 const struct fw_port_cmd *p = (const void *)rpl; 5391 unsigned int action = G_FW_PORT_CMD_ACTION(ntohl(p->action_to_len16)); 5392 5393 if (opcode == FW_PORT_CMD && action == FW_PORT_ACTION_GET_PORT_INFO) { 5394 /* link/module state change message */ 5395 int speed = 0, fc = 0, i; 5396 int chan = G_FW_PORT_CMD_PORTID(ntohl(p->op_to_portid)); 5397 struct port_info *pi = NULL; 5398 struct link_config *lc; 5399 u32 stat = ntohl(p->u.info.lstatus_to_modtype); 5400 int link_ok = (stat & F_FW_PORT_CMD_LSTATUS) != 0; 5401 u32 mod = G_FW_PORT_CMD_MODTYPE(stat); 5402 5403 if (stat & F_FW_PORT_CMD_RXPAUSE) 5404 fc |= PAUSE_RX; 5405 if (stat & F_FW_PORT_CMD_TXPAUSE) 5406 fc |= PAUSE_TX; 5407 if (stat & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_100M)) 5408 speed = SPEED_100; 5409 else if (stat & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_1G)) 5410 speed = SPEED_1000; 5411 else if (stat & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_10G)) 5412 speed = SPEED_10000; 5413 else if (stat & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_40G)) 5414 speed = SPEED_40000; 5415 5416 for_each_port(adap, i) { 5417 pi = adap2pinfo(adap, i); 5418 if (pi->tx_chan == chan) 5419 break; 5420 } 5421 lc = &pi->link_cfg; 5422 5423 if (mod != pi->mod_type) { 5424 pi->mod_type = mod; 5425 t4_os_portmod_changed(adap, i); 5426 } 5427 if (link_ok != lc->link_ok || speed != lc->speed || 5428 fc != lc->fc) { /* something changed */ 5429 int reason; 5430 5431 if (!link_ok && lc->link_ok) 5432 reason = G_FW_PORT_CMD_LINKDNRC(stat); 5433 else 5434 reason = -1; 5435 5436 lc->link_ok = link_ok; 5437 lc->speed = speed; 5438 lc->fc = fc; 5439 lc->supported = ntohs(p->u.info.pcap); 5440 t4_os_link_changed(adap, i, link_ok, reason); 5441 } 5442 } else { 5443 CH_WARN_RATELIMIT(adap, 5444 "Unknown firmware reply 0x%x (0x%x)\n", opcode, action); 5445 return -EINVAL; 5446 } 5447 return 0; 5448 } 5449 5450 /** 5451 * get_pci_mode - determine a card's PCI mode 5452 * @adapter: the adapter 5453 * @p: where to store the PCI settings 5454 * 5455 * Determines a card's PCI mode and associated parameters, such as speed 5456 * and width. 5457 */ 5458 static void __devinit get_pci_mode(struct adapter *adapter, 5459 struct pci_params *p) 5460 { 5461 u16 val; 5462 u32 pcie_cap; 5463 5464 pcie_cap = t4_os_find_pci_capability(adapter, PCI_CAP_ID_EXP); 5465 if (pcie_cap) { 5466 t4_os_pci_read_cfg2(adapter, pcie_cap + PCI_EXP_LNKSTA, &val); 5467 p->speed = val & PCI_EXP_LNKSTA_CLS; 5468 p->width = (val & PCI_EXP_LNKSTA_NLW) >> 4; 5469 } 5470 } 5471 5472 /** 5473 * init_link_config - initialize a link's SW state 5474 * @lc: structure holding the link state 5475 * @caps: link capabilities 5476 * 5477 * Initializes the SW state maintained for each link, including the link's 5478 * capabilities and default speed/flow-control/autonegotiation settings. 5479 */ 5480 static void __devinit init_link_config(struct link_config *lc, 5481 unsigned int caps) 5482 { 5483 lc->supported = caps; 5484 lc->requested_speed = 0; 5485 lc->speed = 0; 5486 lc->requested_fc = lc->fc = PAUSE_RX | PAUSE_TX; 5487 if (lc->supported & FW_PORT_CAP_ANEG) { 5488 lc->advertising = lc->supported & ADVERT_MASK; 5489 lc->autoneg = AUTONEG_ENABLE; 5490 lc->requested_fc |= PAUSE_AUTONEG; 5491 } else { 5492 lc->advertising = 0; 5493 lc->autoneg = AUTONEG_DISABLE; 5494 } 5495 } 5496 5497 static int __devinit get_flash_params(struct adapter *adapter) 5498 { 5499 int ret; 5500 u32 info = 0; 5501 5502 ret = sf1_write(adapter, 1, 1, 0, SF_RD_ID); 5503 if (!ret) 5504 ret = sf1_read(adapter, 3, 0, 1, &info); 5505 t4_write_reg(adapter, A_SF_OP, 0); /* unlock SF */ 5506 if (ret < 0) 5507 return ret; 5508 5509 if ((info & 0xff) != 0x20) /* not a Numonix flash */ 5510 return -EINVAL; 5511 info >>= 16; /* log2 of size */ 5512 if (info >= 0x14 && info < 0x18) 5513 adapter->params.sf_nsec = 1 << (info - 16); 5514 else if (info == 0x18) 5515 adapter->params.sf_nsec = 64; 5516 else 5517 return -EINVAL; 5518 adapter->params.sf_size = 1 << info; 5519 return 0; 5520 } 5521 5522 static void __devinit set_pcie_completion_timeout(struct adapter *adapter, 5523 u8 range) 5524 { 5525 u16 val; 5526 u32 pcie_cap; 5527 5528 pcie_cap = t4_os_find_pci_capability(adapter, PCI_CAP_ID_EXP); 5529 if (pcie_cap) { 5530 t4_os_pci_read_cfg2(adapter, pcie_cap + PCI_EXP_DEVCTL2, &val); 5531 val &= 0xfff0; 5532 val |= range ; 5533 t4_os_pci_write_cfg2(adapter, pcie_cap + PCI_EXP_DEVCTL2, val); 5534 } 5535 } 5536 5537 /** 5538 * t4_prep_adapter - prepare SW and HW for operation 5539 * @adapter: the adapter 5540 * @reset: if true perform a HW reset 5541 * 5542 * Initialize adapter SW state for the various HW modules, set initial 5543 * values for some adapter tunables, take PHYs out of reset, and 5544 * initialize the MDIO interface. 5545 */ 5546 int __devinit t4_prep_adapter(struct adapter *adapter) 5547 { 5548 int ret; 5549 uint16_t device_id; 5550 uint32_t pl_rev; 5551 5552 get_pci_mode(adapter, &adapter->params.pci); 5553 5554 pl_rev = t4_read_reg(adapter, A_PL_REV); 5555 adapter->params.chipid = G_CHIPID(pl_rev); 5556 adapter->params.rev = G_REV(pl_rev); 5557 if (adapter->params.chipid == 0) { 5558 /* T4 did not have chipid in PL_REV (T5 onwards do) */ 5559 adapter->params.chipid = CHELSIO_T4; 5560 5561 /* T4A1 chip is not supported */ 5562 if (adapter->params.rev == 1) { 5563 CH_ALERT(adapter, "T4 rev 1 chip is not supported.\n"); 5564 return -EINVAL; 5565 } 5566 } 5567 adapter->params.pci.vpd_cap_addr = 5568 t4_os_find_pci_capability(adapter, PCI_CAP_ID_VPD); 5569 5570 ret = get_flash_params(adapter); 5571 if (ret < 0) 5572 return ret; 5573 5574 ret = get_vpd_params(adapter, &adapter->params.vpd); 5575 if (ret < 0) 5576 return ret; 5577 5578 /* Cards with real ASICs have the chipid in the PCIe device id */ 5579 t4_os_pci_read_cfg2(adapter, PCI_DEVICE_ID, &device_id); 5580 if (device_id >> 12 == adapter->params.chipid) 5581 adapter->params.cim_la_size = CIMLA_SIZE; 5582 else { 5583 /* FPGA */ 5584 adapter->params.fpga = 1; 5585 adapter->params.cim_la_size = 2 * CIMLA_SIZE; 5586 } 5587 5588 init_cong_ctrl(adapter->params.a_wnd, adapter->params.b_wnd); 5589 5590 /* 5591 * Default port and clock for debugging in case we can't reach FW. 5592 */ 5593 adapter->params.nports = 1; 5594 adapter->params.portvec = 1; 5595 adapter->params.vpd.cclk = 50000; 5596 5597 /* Set pci completion timeout value to 4 seconds. */ 5598 set_pcie_completion_timeout(adapter, 0xd); 5599 return 0; 5600 } 5601 5602 /** 5603 * t4_init_tp_params - initialize adap->params.tp 5604 * @adap: the adapter 5605 * 5606 * Initialize various fields of the adapter's TP Parameters structure. 5607 */ 5608 int __devinit t4_init_tp_params(struct adapter *adap) 5609 { 5610 int chan; 5611 u32 v; 5612 5613 v = t4_read_reg(adap, A_TP_TIMER_RESOLUTION); 5614 adap->params.tp.tre = G_TIMERRESOLUTION(v); 5615 adap->params.tp.dack_re = G_DELAYEDACKRESOLUTION(v); 5616 5617 /* MODQ_REQ_MAP defaults to setting queues 0-3 to chan 0-3 */ 5618 for (chan = 0; chan < NCHAN; chan++) 5619 adap->params.tp.tx_modq[chan] = chan; 5620 5621 /* 5622 * Cache the adapter's Compressed Filter Mode and global Incress 5623 * Configuration. 5624 */ 5625 t4_read_indirect(adap, A_TP_PIO_ADDR, A_TP_PIO_DATA, 5626 &adap->params.tp.vlan_pri_map, 1, 5627 A_TP_VLAN_PRI_MAP); 5628 t4_read_indirect(adap, A_TP_PIO_ADDR, A_TP_PIO_DATA, 5629 &adap->params.tp.ingress_config, 1, 5630 A_TP_INGRESS_CONFIG); 5631 5632 /* 5633 * Now that we have TP_VLAN_PRI_MAP cached, we can calculate the field 5634 * shift positions of several elements of the Compressed Filter Tuple 5635 * for this adapter which we need frequently ... 5636 */ 5637 adap->params.tp.vlan_shift = t4_filter_field_shift(adap, F_VLAN); 5638 adap->params.tp.vnic_shift = t4_filter_field_shift(adap, F_VNIC_ID); 5639 adap->params.tp.port_shift = t4_filter_field_shift(adap, F_PORT); 5640 adap->params.tp.protocol_shift = t4_filter_field_shift(adap, F_PROTOCOL); 5641 5642 /* 5643 * If TP_INGRESS_CONFIG.VNID == 0, then TP_VLAN_PRI_MAP.VNIC_ID 5644 * represents the presense of an Outer VLAN instead of a VNIC ID. 5645 */ 5646 if ((adap->params.tp.ingress_config & F_VNIC) == 0) 5647 adap->params.tp.vnic_shift = -1; 5648 5649 return 0; 5650 } 5651 5652 /** 5653 * t4_filter_field_shift - calculate filter field shift 5654 * @adap: the adapter 5655 * @filter_sel: the desired field (from TP_VLAN_PRI_MAP bits) 5656 * 5657 * Return the shift position of a filter field within the Compressed 5658 * Filter Tuple. The filter field is specified via its selection bit 5659 * within TP_VLAN_PRI_MAL (filter mode). E.g. F_VLAN. 5660 */ 5661 int t4_filter_field_shift(const struct adapter *adap, int filter_sel) 5662 { 5663 unsigned int filter_mode = adap->params.tp.vlan_pri_map; 5664 unsigned int sel; 5665 int field_shift; 5666 5667 if ((filter_mode & filter_sel) == 0) 5668 return -1; 5669 5670 for (sel = 1, field_shift = 0; sel < filter_sel; sel <<= 1) { 5671 switch (filter_mode & sel) { 5672 case F_FCOE: field_shift += W_FT_FCOE; break; 5673 case F_PORT: field_shift += W_FT_PORT; break; 5674 case F_VNIC_ID: field_shift += W_FT_VNIC_ID; break; 5675 case F_VLAN: field_shift += W_FT_VLAN; break; 5676 case F_TOS: field_shift += W_FT_TOS; break; 5677 case F_PROTOCOL: field_shift += W_FT_PROTOCOL; break; 5678 case F_ETHERTYPE: field_shift += W_FT_ETHERTYPE; break; 5679 case F_MACMATCH: field_shift += W_FT_MACMATCH; break; 5680 case F_MPSHITTYPE: field_shift += W_FT_MPSHITTYPE; break; 5681 case F_FRAGMENTATION: field_shift += W_FT_FRAGMENTATION; break; 5682 } 5683 } 5684 return field_shift; 5685 } 5686 5687 int __devinit t4_port_init(struct port_info *p, int mbox, int pf, int vf) 5688 { 5689 u8 addr[6]; 5690 int ret, i, j; 5691 struct fw_port_cmd c; 5692 u16 rss_size; 5693 adapter_t *adap = p->adapter; 5694 5695 memset(&c, 0, sizeof(c)); 5696 5697 for (i = 0, j = -1; i <= p->port_id; i++) { 5698 do { 5699 j++; 5700 } while ((adap->params.portvec & (1 << j)) == 0); 5701 } 5702 5703 c.op_to_portid = htonl(V_FW_CMD_OP(FW_PORT_CMD) | 5704 F_FW_CMD_REQUEST | F_FW_CMD_READ | 5705 V_FW_PORT_CMD_PORTID(j)); 5706 c.action_to_len16 = htonl( 5707 V_FW_PORT_CMD_ACTION(FW_PORT_ACTION_GET_PORT_INFO) | 5708 FW_LEN16(c)); 5709 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c); 5710 if (ret) 5711 return ret; 5712 5713 ret = t4_alloc_vi(adap, mbox, j, pf, vf, 1, addr, &rss_size); 5714 if (ret < 0) 5715 return ret; 5716 5717 p->viid = ret; 5718 p->tx_chan = j; 5719 p->rx_chan_map = get_mps_bg_map(adap, j); 5720 p->lport = j; 5721 p->rss_size = rss_size; 5722 t4_os_set_hw_addr(adap, p->port_id, addr); 5723 5724 ret = ntohl(c.u.info.lstatus_to_modtype); 5725 p->mdio_addr = (ret & F_FW_PORT_CMD_MDIOCAP) ? 5726 G_FW_PORT_CMD_MDIOADDR(ret) : -1; 5727 p->port_type = G_FW_PORT_CMD_PTYPE(ret); 5728 p->mod_type = G_FW_PORT_CMD_MODTYPE(ret); 5729 5730 init_link_config(&p->link_cfg, ntohs(c.u.info.pcap)); 5731 5732 return 0; 5733 } 5734 5735 int t4_sched_config(struct adapter *adapter, int type, int minmaxen, 5736 int sleep_ok) 5737 { 5738 struct fw_sched_cmd cmd; 5739 5740 memset(&cmd, 0, sizeof(cmd)); 5741 cmd.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_SCHED_CMD) | 5742 F_FW_CMD_REQUEST | 5743 F_FW_CMD_WRITE); 5744 cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd)); 5745 5746 cmd.u.config.sc = FW_SCHED_SC_CONFIG; 5747 cmd.u.config.type = type; 5748 cmd.u.config.minmaxen = minmaxen; 5749 5750 return t4_wr_mbox_meat(adapter,adapter->mbox, &cmd, sizeof(cmd), 5751 NULL, sleep_ok); 5752 } 5753 5754 int t4_sched_params(struct adapter *adapter, int type, int level, int mode, 5755 int rateunit, int ratemode, int channel, int cl, 5756 int minrate, int maxrate, int weight, int pktsize, 5757 int sleep_ok) 5758 { 5759 struct fw_sched_cmd cmd; 5760 5761 memset(&cmd, 0, sizeof(cmd)); 5762 cmd.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_SCHED_CMD) | 5763 F_FW_CMD_REQUEST | 5764 F_FW_CMD_WRITE); 5765 cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd)); 5766 5767 cmd.u.params.sc = FW_SCHED_SC_PARAMS; 5768 cmd.u.params.type = type; 5769 cmd.u.params.level = level; 5770 cmd.u.params.mode = mode; 5771 cmd.u.params.ch = channel; 5772 cmd.u.params.cl = cl; 5773 cmd.u.params.unit = rateunit; 5774 cmd.u.params.rate = ratemode; 5775 cmd.u.params.min = cpu_to_be32(minrate); 5776 cmd.u.params.max = cpu_to_be32(maxrate); 5777 cmd.u.params.weight = cpu_to_be16(weight); 5778 cmd.u.params.pktsize = cpu_to_be16(pktsize); 5779 5780 return t4_wr_mbox_meat(adapter,adapter->mbox, &cmd, sizeof(cmd), 5781 NULL, sleep_ok); 5782 } 5783