1 /*- 2 * Copyright (c) 2012 Chelsio Communications, Inc. 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 */ 26 27 #include <sys/cdefs.h> 28 __FBSDID("$FreeBSD$"); 29 30 #include "opt_inet.h" 31 32 #include <sys/param.h> 33 #include <sys/eventhandler.h> 34 35 #include "common.h" 36 #include "t4_regs.h" 37 #include "t4_regs_values.h" 38 #include "firmware/t4fw_interface.h" 39 40 #undef msleep 41 #define msleep(x) do { \ 42 if (cold) \ 43 DELAY((x) * 1000); \ 44 else \ 45 pause("t4hw", (x) * hz / 1000); \ 46 } while (0) 47 48 /** 49 * t4_wait_op_done_val - wait until an operation is completed 50 * @adapter: the adapter performing the operation 51 * @reg: the register to check for completion 52 * @mask: a single-bit field within @reg that indicates completion 53 * @polarity: the value of the field when the operation is completed 54 * @attempts: number of check iterations 55 * @delay: delay in usecs between iterations 56 * @valp: where to store the value of the register at completion time 57 * 58 * Wait until an operation is completed by checking a bit in a register 59 * up to @attempts times. If @valp is not NULL the value of the register 60 * at the time it indicated completion is stored there. Returns 0 if the 61 * operation completes and -EAGAIN otherwise. 62 */ 63 static int t4_wait_op_done_val(struct adapter *adapter, int reg, u32 mask, 64 int polarity, int attempts, int delay, u32 *valp) 65 { 66 while (1) { 67 u32 val = t4_read_reg(adapter, reg); 68 69 if (!!(val & mask) == polarity) { 70 if (valp) 71 *valp = val; 72 return 0; 73 } 74 if (--attempts == 0) 75 return -EAGAIN; 76 if (delay) 77 udelay(delay); 78 } 79 } 80 81 static inline int t4_wait_op_done(struct adapter *adapter, int reg, u32 mask, 82 int polarity, int attempts, int delay) 83 { 84 return t4_wait_op_done_val(adapter, reg, mask, polarity, attempts, 85 delay, NULL); 86 } 87 88 /** 89 * t4_set_reg_field - set a register field to a value 90 * @adapter: the adapter to program 91 * @addr: the register address 92 * @mask: specifies the portion of the register to modify 93 * @val: the new value for the register field 94 * 95 * Sets a register field specified by the supplied mask to the 96 * given value. 97 */ 98 void t4_set_reg_field(struct adapter *adapter, unsigned int addr, u32 mask, 99 u32 val) 100 { 101 u32 v = t4_read_reg(adapter, addr) & ~mask; 102 103 t4_write_reg(adapter, addr, v | val); 104 (void) t4_read_reg(adapter, addr); /* flush */ 105 } 106 107 /** 108 * t4_read_indirect - read indirectly addressed registers 109 * @adap: the adapter 110 * @addr_reg: register holding the indirect address 111 * @data_reg: register holding the value of the indirect register 112 * @vals: where the read register values are stored 113 * @nregs: how many indirect registers to read 114 * @start_idx: index of first indirect register to read 115 * 116 * Reads registers that are accessed indirectly through an address/data 117 * register pair. 118 */ 119 void t4_read_indirect(struct adapter *adap, unsigned int addr_reg, 120 unsigned int data_reg, u32 *vals, unsigned int nregs, 121 unsigned int start_idx) 122 { 123 while (nregs--) { 124 t4_write_reg(adap, addr_reg, start_idx); 125 *vals++ = t4_read_reg(adap, data_reg); 126 start_idx++; 127 } 128 } 129 130 /** 131 * t4_write_indirect - write indirectly addressed registers 132 * @adap: the adapter 133 * @addr_reg: register holding the indirect addresses 134 * @data_reg: register holding the value for the indirect registers 135 * @vals: values to write 136 * @nregs: how many indirect registers to write 137 * @start_idx: address of first indirect register to write 138 * 139 * Writes a sequential block of registers that are accessed indirectly 140 * through an address/data register pair. 141 */ 142 void t4_write_indirect(struct adapter *adap, unsigned int addr_reg, 143 unsigned int data_reg, const u32 *vals, 144 unsigned int nregs, unsigned int start_idx) 145 { 146 while (nregs--) { 147 t4_write_reg(adap, addr_reg, start_idx++); 148 t4_write_reg(adap, data_reg, *vals++); 149 } 150 } 151 152 /* 153 * Read a 32-bit PCI Configuration Space register via the PCI-E backdoor 154 * mechanism. This guarantees that we get the real value even if we're 155 * operating within a Virtual Machine and the Hypervisor is trapping our 156 * Configuration Space accesses. 157 * 158 * N.B. This routine should only be used as a last resort: the firmware uses 159 * the backdoor registers on a regular basis and we can end up 160 * conflicting with it's uses! 161 */ 162 u32 t4_hw_pci_read_cfg4(adapter_t *adap, int reg) 163 { 164 u32 req = V_FUNCTION(adap->pf) | V_REGISTER(reg); 165 u32 val; 166 167 if (chip_id(adap) <= CHELSIO_T5) 168 req |= F_ENABLE; 169 else 170 req |= F_T6_ENABLE; 171 172 if (is_t4(adap)) 173 req |= F_LOCALCFG; 174 175 t4_write_reg(adap, A_PCIE_CFG_SPACE_REQ, req); 176 val = t4_read_reg(adap, A_PCIE_CFG_SPACE_DATA); 177 178 /* 179 * Reset F_ENABLE to 0 so reads of PCIE_CFG_SPACE_DATA won't cause a 180 * Configuration Space read. (None of the other fields matter when 181 * F_ENABLE is 0 so a simple register write is easier than a 182 * read-modify-write via t4_set_reg_field().) 183 */ 184 t4_write_reg(adap, A_PCIE_CFG_SPACE_REQ, 0); 185 186 return val; 187 } 188 189 /* 190 * t4_report_fw_error - report firmware error 191 * @adap: the adapter 192 * 193 * The adapter firmware can indicate error conditions to the host. 194 * If the firmware has indicated an error, print out the reason for 195 * the firmware error. 196 */ 197 static void t4_report_fw_error(struct adapter *adap) 198 { 199 static const char *const reason[] = { 200 "Crash", /* PCIE_FW_EVAL_CRASH */ 201 "During Device Preparation", /* PCIE_FW_EVAL_PREP */ 202 "During Device Configuration", /* PCIE_FW_EVAL_CONF */ 203 "During Device Initialization", /* PCIE_FW_EVAL_INIT */ 204 "Unexpected Event", /* PCIE_FW_EVAL_UNEXPECTEDEVENT */ 205 "Insufficient Airflow", /* PCIE_FW_EVAL_OVERHEAT */ 206 "Device Shutdown", /* PCIE_FW_EVAL_DEVICESHUTDOWN */ 207 "Reserved", /* reserved */ 208 }; 209 u32 pcie_fw; 210 211 pcie_fw = t4_read_reg(adap, A_PCIE_FW); 212 if (pcie_fw & F_PCIE_FW_ERR) 213 CH_ERR(adap, "Firmware reports adapter error: %s\n", 214 reason[G_PCIE_FW_EVAL(pcie_fw)]); 215 } 216 217 /* 218 * Get the reply to a mailbox command and store it in @rpl in big-endian order. 219 */ 220 static void get_mbox_rpl(struct adapter *adap, __be64 *rpl, int nflit, 221 u32 mbox_addr) 222 { 223 for ( ; nflit; nflit--, mbox_addr += 8) 224 *rpl++ = cpu_to_be64(t4_read_reg64(adap, mbox_addr)); 225 } 226 227 /* 228 * Handle a FW assertion reported in a mailbox. 229 */ 230 static void fw_asrt(struct adapter *adap, u32 mbox_addr) 231 { 232 struct fw_debug_cmd asrt; 233 234 get_mbox_rpl(adap, (__be64 *)&asrt, sizeof(asrt) / 8, mbox_addr); 235 CH_ALERT(adap, "FW assertion at %.16s:%u, val0 %#x, val1 %#x\n", 236 asrt.u.assert.filename_0_7, ntohl(asrt.u.assert.line), 237 ntohl(asrt.u.assert.x), ntohl(asrt.u.assert.y)); 238 } 239 240 #define X_CIM_PF_NOACCESS 0xeeeeeeee 241 /** 242 * t4_wr_mbox_meat - send a command to FW through the given mailbox 243 * @adap: the adapter 244 * @mbox: index of the mailbox to use 245 * @cmd: the command to write 246 * @size: command length in bytes 247 * @rpl: where to optionally store the reply 248 * @sleep_ok: if true we may sleep while awaiting command completion 249 * 250 * Sends the given command to FW through the selected mailbox and waits 251 * for the FW to execute the command. If @rpl is not %NULL it is used to 252 * store the FW's reply to the command. The command and its optional 253 * reply are of the same length. Some FW commands like RESET and 254 * INITIALIZE can take a considerable amount of time to execute. 255 * @sleep_ok determines whether we may sleep while awaiting the response. 256 * If sleeping is allowed we use progressive backoff otherwise we spin. 257 * 258 * The return value is 0 on success or a negative errno on failure. A 259 * failure can happen either because we are not able to execute the 260 * command or FW executes it but signals an error. In the latter case 261 * the return value is the error code indicated by FW (negated). 262 */ 263 int t4_wr_mbox_meat(struct adapter *adap, int mbox, const void *cmd, int size, 264 void *rpl, bool sleep_ok) 265 { 266 /* 267 * We delay in small increments at first in an effort to maintain 268 * responsiveness for simple, fast executing commands but then back 269 * off to larger delays to a maximum retry delay. 270 */ 271 static const int delay[] = { 272 1, 1, 3, 5, 10, 10, 20, 50, 100 273 }; 274 275 u32 v; 276 u64 res; 277 int i, ms, delay_idx; 278 const __be64 *p = cmd; 279 u32 data_reg = PF_REG(mbox, A_CIM_PF_MAILBOX_DATA); 280 u32 ctl_reg = PF_REG(mbox, A_CIM_PF_MAILBOX_CTRL); 281 282 if ((size & 15) || size > MBOX_LEN) 283 return -EINVAL; 284 285 v = G_MBOWNER(t4_read_reg(adap, ctl_reg)); 286 for (i = 0; v == X_MBOWNER_NONE && i < 3; i++) 287 v = G_MBOWNER(t4_read_reg(adap, ctl_reg)); 288 289 if (v != X_MBOWNER_PL) 290 return v ? -EBUSY : -ETIMEDOUT; 291 292 for (i = 0; i < size; i += 8, p++) 293 t4_write_reg64(adap, data_reg + i, be64_to_cpu(*p)); 294 295 CH_DUMP_MBOX(adap, mbox, data_reg); 296 297 t4_write_reg(adap, ctl_reg, F_MBMSGVALID | V_MBOWNER(X_MBOWNER_FW)); 298 t4_read_reg(adap, ctl_reg); /* flush write */ 299 300 delay_idx = 0; 301 ms = delay[0]; 302 303 for (i = 0; i < FW_CMD_MAX_TIMEOUT; i += ms) { 304 if (sleep_ok) { 305 ms = delay[delay_idx]; /* last element may repeat */ 306 if (delay_idx < ARRAY_SIZE(delay) - 1) 307 delay_idx++; 308 msleep(ms); 309 } else 310 mdelay(ms); 311 312 v = t4_read_reg(adap, ctl_reg); 313 if (v == X_CIM_PF_NOACCESS) 314 continue; 315 if (G_MBOWNER(v) == X_MBOWNER_PL) { 316 if (!(v & F_MBMSGVALID)) { 317 t4_write_reg(adap, ctl_reg, 318 V_MBOWNER(X_MBOWNER_NONE)); 319 continue; 320 } 321 322 CH_DUMP_MBOX(adap, mbox, data_reg); 323 324 res = t4_read_reg64(adap, data_reg); 325 if (G_FW_CMD_OP(res >> 32) == FW_DEBUG_CMD) { 326 fw_asrt(adap, data_reg); 327 res = V_FW_CMD_RETVAL(EIO); 328 } else if (rpl) 329 get_mbox_rpl(adap, rpl, size / 8, data_reg); 330 t4_write_reg(adap, ctl_reg, V_MBOWNER(X_MBOWNER_NONE)); 331 return -G_FW_CMD_RETVAL((int)res); 332 } 333 } 334 335 /* 336 * We timed out waiting for a reply to our mailbox command. Report 337 * the error and also check to see if the firmware reported any 338 * errors ... 339 */ 340 CH_ERR(adap, "command %#x in mailbox %d timed out\n", 341 *(const u8 *)cmd, mbox); 342 if (t4_read_reg(adap, A_PCIE_FW) & F_PCIE_FW_ERR) 343 t4_report_fw_error(adap); 344 return -ETIMEDOUT; 345 } 346 347 /** 348 * t4_mc_read - read from MC through backdoor accesses 349 * @adap: the adapter 350 * @idx: which MC to access 351 * @addr: address of first byte requested 352 * @data: 64 bytes of data containing the requested address 353 * @ecc: where to store the corresponding 64-bit ECC word 354 * 355 * Read 64 bytes of data from MC starting at a 64-byte-aligned address 356 * that covers the requested address @addr. If @parity is not %NULL it 357 * is assigned the 64-bit ECC word for the read data. 358 */ 359 int t4_mc_read(struct adapter *adap, int idx, u32 addr, __be32 *data, u64 *ecc) 360 { 361 int i; 362 u32 mc_bist_cmd_reg, mc_bist_cmd_addr_reg, mc_bist_cmd_len_reg; 363 u32 mc_bist_status_rdata_reg, mc_bist_data_pattern_reg; 364 365 if (is_t4(adap)) { 366 mc_bist_cmd_reg = A_MC_BIST_CMD; 367 mc_bist_cmd_addr_reg = A_MC_BIST_CMD_ADDR; 368 mc_bist_cmd_len_reg = A_MC_BIST_CMD_LEN; 369 mc_bist_status_rdata_reg = A_MC_BIST_STATUS_RDATA; 370 mc_bist_data_pattern_reg = A_MC_BIST_DATA_PATTERN; 371 } else { 372 mc_bist_cmd_reg = MC_REG(A_MC_P_BIST_CMD, idx); 373 mc_bist_cmd_addr_reg = MC_REG(A_MC_P_BIST_CMD_ADDR, idx); 374 mc_bist_cmd_len_reg = MC_REG(A_MC_P_BIST_CMD_LEN, idx); 375 mc_bist_status_rdata_reg = MC_REG(A_MC_P_BIST_STATUS_RDATA, 376 idx); 377 mc_bist_data_pattern_reg = MC_REG(A_MC_P_BIST_DATA_PATTERN, 378 idx); 379 } 380 381 if (t4_read_reg(adap, mc_bist_cmd_reg) & F_START_BIST) 382 return -EBUSY; 383 t4_write_reg(adap, mc_bist_cmd_addr_reg, addr & ~0x3fU); 384 t4_write_reg(adap, mc_bist_cmd_len_reg, 64); 385 t4_write_reg(adap, mc_bist_data_pattern_reg, 0xc); 386 t4_write_reg(adap, mc_bist_cmd_reg, V_BIST_OPCODE(1) | 387 F_START_BIST | V_BIST_CMD_GAP(1)); 388 i = t4_wait_op_done(adap, mc_bist_cmd_reg, F_START_BIST, 0, 10, 1); 389 if (i) 390 return i; 391 392 #define MC_DATA(i) MC_BIST_STATUS_REG(mc_bist_status_rdata_reg, i) 393 394 for (i = 15; i >= 0; i--) 395 *data++ = ntohl(t4_read_reg(adap, MC_DATA(i))); 396 if (ecc) 397 *ecc = t4_read_reg64(adap, MC_DATA(16)); 398 #undef MC_DATA 399 return 0; 400 } 401 402 /** 403 * t4_edc_read - read from EDC through backdoor accesses 404 * @adap: the adapter 405 * @idx: which EDC to access 406 * @addr: address of first byte requested 407 * @data: 64 bytes of data containing the requested address 408 * @ecc: where to store the corresponding 64-bit ECC word 409 * 410 * Read 64 bytes of data from EDC starting at a 64-byte-aligned address 411 * that covers the requested address @addr. If @parity is not %NULL it 412 * is assigned the 64-bit ECC word for the read data. 413 */ 414 int t4_edc_read(struct adapter *adap, int idx, u32 addr, __be32 *data, u64 *ecc) 415 { 416 int i; 417 u32 edc_bist_cmd_reg, edc_bist_cmd_addr_reg, edc_bist_cmd_len_reg; 418 u32 edc_bist_cmd_data_pattern, edc_bist_status_rdata_reg; 419 420 if (is_t4(adap)) { 421 edc_bist_cmd_reg = EDC_REG(A_EDC_BIST_CMD, idx); 422 edc_bist_cmd_addr_reg = EDC_REG(A_EDC_BIST_CMD_ADDR, idx); 423 edc_bist_cmd_len_reg = EDC_REG(A_EDC_BIST_CMD_LEN, idx); 424 edc_bist_cmd_data_pattern = EDC_REG(A_EDC_BIST_DATA_PATTERN, 425 idx); 426 edc_bist_status_rdata_reg = EDC_REG(A_EDC_BIST_STATUS_RDATA, 427 idx); 428 } else { 429 /* 430 * These macro are missing in t4_regs.h file. 431 * Added temporarily for testing. 432 */ 433 #define EDC_STRIDE_T5 (EDC_T51_BASE_ADDR - EDC_T50_BASE_ADDR) 434 #define EDC_REG_T5(reg, idx) (reg + EDC_STRIDE_T5 * idx) 435 edc_bist_cmd_reg = EDC_REG_T5(A_EDC_H_BIST_CMD, idx); 436 edc_bist_cmd_addr_reg = EDC_REG_T5(A_EDC_H_BIST_CMD_ADDR, idx); 437 edc_bist_cmd_len_reg = EDC_REG_T5(A_EDC_H_BIST_CMD_LEN, idx); 438 edc_bist_cmd_data_pattern = EDC_REG_T5(A_EDC_H_BIST_DATA_PATTERN, 439 idx); 440 edc_bist_status_rdata_reg = EDC_REG_T5(A_EDC_H_BIST_STATUS_RDATA, 441 idx); 442 #undef EDC_REG_T5 443 #undef EDC_STRIDE_T5 444 } 445 446 if (t4_read_reg(adap, edc_bist_cmd_reg) & F_START_BIST) 447 return -EBUSY; 448 t4_write_reg(adap, edc_bist_cmd_addr_reg, addr & ~0x3fU); 449 t4_write_reg(adap, edc_bist_cmd_len_reg, 64); 450 t4_write_reg(adap, edc_bist_cmd_data_pattern, 0xc); 451 t4_write_reg(adap, edc_bist_cmd_reg, 452 V_BIST_OPCODE(1) | V_BIST_CMD_GAP(1) | F_START_BIST); 453 i = t4_wait_op_done(adap, edc_bist_cmd_reg, F_START_BIST, 0, 10, 1); 454 if (i) 455 return i; 456 457 #define EDC_DATA(i) EDC_BIST_STATUS_REG(edc_bist_status_rdata_reg, i) 458 459 for (i = 15; i >= 0; i--) 460 *data++ = ntohl(t4_read_reg(adap, EDC_DATA(i))); 461 if (ecc) 462 *ecc = t4_read_reg64(adap, EDC_DATA(16)); 463 #undef EDC_DATA 464 return 0; 465 } 466 467 /** 468 * t4_mem_read - read EDC 0, EDC 1 or MC into buffer 469 * @adap: the adapter 470 * @mtype: memory type: MEM_EDC0, MEM_EDC1 or MEM_MC 471 * @addr: address within indicated memory type 472 * @len: amount of memory to read 473 * @buf: host memory buffer 474 * 475 * Reads an [almost] arbitrary memory region in the firmware: the 476 * firmware memory address, length and host buffer must be aligned on 477 * 32-bit boudaries. The memory is returned as a raw byte sequence from 478 * the firmware's memory. If this memory contains data structures which 479 * contain multi-byte integers, it's the callers responsibility to 480 * perform appropriate byte order conversions. 481 */ 482 int t4_mem_read(struct adapter *adap, int mtype, u32 addr, u32 len, 483 __be32 *buf) 484 { 485 u32 pos, start, end, offset; 486 int ret; 487 488 /* 489 * Argument sanity checks ... 490 */ 491 if ((addr & 0x3) || (len & 0x3)) 492 return -EINVAL; 493 494 /* 495 * The underlaying EDC/MC read routines read 64 bytes at a time so we 496 * need to round down the start and round up the end. We'll start 497 * copying out of the first line at (addr - start) a word at a time. 498 */ 499 start = addr & ~(64-1); 500 end = (addr + len + 64-1) & ~(64-1); 501 offset = (addr - start)/sizeof(__be32); 502 503 for (pos = start; pos < end; pos += 64, offset = 0) { 504 __be32 data[16]; 505 506 /* 507 * Read the chip's memory block and bail if there's an error. 508 */ 509 if ((mtype == MEM_MC) || (mtype == MEM_MC1)) 510 ret = t4_mc_read(adap, mtype - MEM_MC, pos, data, NULL); 511 else 512 ret = t4_edc_read(adap, mtype, pos, data, NULL); 513 if (ret) 514 return ret; 515 516 /* 517 * Copy the data into the caller's memory buffer. 518 */ 519 while (offset < 16 && len > 0) { 520 *buf++ = data[offset++]; 521 len -= sizeof(__be32); 522 } 523 } 524 525 return 0; 526 } 527 528 /* 529 * Partial EEPROM Vital Product Data structure. Includes only the ID and 530 * VPD-R header. 531 */ 532 struct t4_vpd_hdr { 533 u8 id_tag; 534 u8 id_len[2]; 535 u8 id_data[ID_LEN]; 536 u8 vpdr_tag; 537 u8 vpdr_len[2]; 538 }; 539 540 /* 541 * EEPROM reads take a few tens of us while writes can take a bit over 5 ms. 542 */ 543 #define EEPROM_MAX_RD_POLL 40 544 #define EEPROM_MAX_WR_POLL 6 545 #define EEPROM_STAT_ADDR 0x7bfc 546 #define VPD_BASE 0x400 547 #define VPD_BASE_OLD 0 548 #define VPD_LEN 1024 549 #define VPD_INFO_FLD_HDR_SIZE 3 550 #define CHELSIO_VPD_UNIQUE_ID 0x82 551 552 /** 553 * t4_seeprom_read - read a serial EEPROM location 554 * @adapter: adapter to read 555 * @addr: EEPROM virtual address 556 * @data: where to store the read data 557 * 558 * Read a 32-bit word from a location in serial EEPROM using the card's PCI 559 * VPD capability. Note that this function must be called with a virtual 560 * address. 561 */ 562 int t4_seeprom_read(struct adapter *adapter, u32 addr, u32 *data) 563 { 564 u16 val; 565 int attempts = EEPROM_MAX_RD_POLL; 566 unsigned int base = adapter->params.pci.vpd_cap_addr; 567 568 if (addr >= EEPROMVSIZE || (addr & 3)) 569 return -EINVAL; 570 571 t4_os_pci_write_cfg2(adapter, base + PCI_VPD_ADDR, (u16)addr); 572 do { 573 udelay(10); 574 t4_os_pci_read_cfg2(adapter, base + PCI_VPD_ADDR, &val); 575 } while (!(val & PCI_VPD_ADDR_F) && --attempts); 576 577 if (!(val & PCI_VPD_ADDR_F)) { 578 CH_ERR(adapter, "reading EEPROM address 0x%x failed\n", addr); 579 return -EIO; 580 } 581 t4_os_pci_read_cfg4(adapter, base + PCI_VPD_DATA, data); 582 *data = le32_to_cpu(*data); 583 return 0; 584 } 585 586 /** 587 * t4_seeprom_write - write a serial EEPROM location 588 * @adapter: adapter to write 589 * @addr: virtual EEPROM address 590 * @data: value to write 591 * 592 * Write a 32-bit word to a location in serial EEPROM using the card's PCI 593 * VPD capability. Note that this function must be called with a virtual 594 * address. 595 */ 596 int t4_seeprom_write(struct adapter *adapter, u32 addr, u32 data) 597 { 598 u16 val; 599 int attempts = EEPROM_MAX_WR_POLL; 600 unsigned int base = adapter->params.pci.vpd_cap_addr; 601 602 if (addr >= EEPROMVSIZE || (addr & 3)) 603 return -EINVAL; 604 605 t4_os_pci_write_cfg4(adapter, base + PCI_VPD_DATA, 606 cpu_to_le32(data)); 607 t4_os_pci_write_cfg2(adapter, base + PCI_VPD_ADDR, 608 (u16)addr | PCI_VPD_ADDR_F); 609 do { 610 msleep(1); 611 t4_os_pci_read_cfg2(adapter, base + PCI_VPD_ADDR, &val); 612 } while ((val & PCI_VPD_ADDR_F) && --attempts); 613 614 if (val & PCI_VPD_ADDR_F) { 615 CH_ERR(adapter, "write to EEPROM address 0x%x failed\n", addr); 616 return -EIO; 617 } 618 return 0; 619 } 620 621 /** 622 * t4_eeprom_ptov - translate a physical EEPROM address to virtual 623 * @phys_addr: the physical EEPROM address 624 * @fn: the PCI function number 625 * @sz: size of function-specific area 626 * 627 * Translate a physical EEPROM address to virtual. The first 1K is 628 * accessed through virtual addresses starting at 31K, the rest is 629 * accessed through virtual addresses starting at 0. 630 * 631 * The mapping is as follows: 632 * [0..1K) -> [31K..32K) 633 * [1K..1K+A) -> [ES-A..ES) 634 * [1K+A..ES) -> [0..ES-A-1K) 635 * 636 * where A = @fn * @sz, and ES = EEPROM size. 637 */ 638 int t4_eeprom_ptov(unsigned int phys_addr, unsigned int fn, unsigned int sz) 639 { 640 fn *= sz; 641 if (phys_addr < 1024) 642 return phys_addr + (31 << 10); 643 if (phys_addr < 1024 + fn) 644 return EEPROMSIZE - fn + phys_addr - 1024; 645 if (phys_addr < EEPROMSIZE) 646 return phys_addr - 1024 - fn; 647 return -EINVAL; 648 } 649 650 /** 651 * t4_seeprom_wp - enable/disable EEPROM write protection 652 * @adapter: the adapter 653 * @enable: whether to enable or disable write protection 654 * 655 * Enables or disables write protection on the serial EEPROM. 656 */ 657 int t4_seeprom_wp(struct adapter *adapter, int enable) 658 { 659 return t4_seeprom_write(adapter, EEPROM_STAT_ADDR, enable ? 0xc : 0); 660 } 661 662 /** 663 * get_vpd_keyword_val - Locates an information field keyword in the VPD 664 * @v: Pointer to buffered vpd data structure 665 * @kw: The keyword to search for 666 * 667 * Returns the value of the information field keyword or 668 * -ENOENT otherwise. 669 */ 670 static int get_vpd_keyword_val(const struct t4_vpd_hdr *v, const char *kw) 671 { 672 int i; 673 unsigned int offset , len; 674 const u8 *buf = &v->id_tag; 675 const u8 *vpdr_len = &v->vpdr_tag; 676 offset = sizeof(struct t4_vpd_hdr); 677 len = (u16)vpdr_len[1] + ((u16)vpdr_len[2] << 8); 678 679 if (len + sizeof(struct t4_vpd_hdr) > VPD_LEN) { 680 return -ENOENT; 681 } 682 683 for (i = offset; i + VPD_INFO_FLD_HDR_SIZE <= offset + len;) { 684 if(memcmp(buf + i , kw , 2) == 0){ 685 i += VPD_INFO_FLD_HDR_SIZE; 686 return i; 687 } 688 689 i += VPD_INFO_FLD_HDR_SIZE + buf[i+2]; 690 } 691 692 return -ENOENT; 693 } 694 695 696 /** 697 * get_vpd_params - read VPD parameters from VPD EEPROM 698 * @adapter: adapter to read 699 * @p: where to store the parameters 700 * 701 * Reads card parameters stored in VPD EEPROM. 702 */ 703 static int get_vpd_params(struct adapter *adapter, struct vpd_params *p) 704 { 705 int i, ret, addr; 706 int ec, sn, pn, na; 707 u8 vpd[VPD_LEN], csum; 708 const struct t4_vpd_hdr *v; 709 710 /* 711 * Card information normally starts at VPD_BASE but early cards had 712 * it at 0. 713 */ 714 ret = t4_seeprom_read(adapter, VPD_BASE, (u32 *)(vpd)); 715 addr = *vpd == CHELSIO_VPD_UNIQUE_ID ? VPD_BASE : VPD_BASE_OLD; 716 717 for (i = 0; i < sizeof(vpd); i += 4) { 718 ret = t4_seeprom_read(adapter, addr + i, (u32 *)(vpd + i)); 719 if (ret) 720 return ret; 721 } 722 v = (const struct t4_vpd_hdr *)vpd; 723 724 #define FIND_VPD_KW(var,name) do { \ 725 var = get_vpd_keyword_val(v , name); \ 726 if (var < 0) { \ 727 CH_ERR(adapter, "missing VPD keyword " name "\n"); \ 728 return -EINVAL; \ 729 } \ 730 } while (0) 731 732 FIND_VPD_KW(i, "RV"); 733 for (csum = 0; i >= 0; i--) 734 csum += vpd[i]; 735 736 if (csum) { 737 CH_ERR(adapter, "corrupted VPD EEPROM, actual csum %u\n", csum); 738 return -EINVAL; 739 } 740 FIND_VPD_KW(ec, "EC"); 741 FIND_VPD_KW(sn, "SN"); 742 FIND_VPD_KW(pn, "PN"); 743 FIND_VPD_KW(na, "NA"); 744 #undef FIND_VPD_KW 745 746 memcpy(p->id, v->id_data, ID_LEN); 747 strstrip(p->id); 748 memcpy(p->ec, vpd + ec, EC_LEN); 749 strstrip(p->ec); 750 i = vpd[sn - VPD_INFO_FLD_HDR_SIZE + 2]; 751 memcpy(p->sn, vpd + sn, min(i, SERNUM_LEN)); 752 strstrip(p->sn); 753 i = vpd[pn - VPD_INFO_FLD_HDR_SIZE + 2]; 754 memcpy(p->pn, vpd + pn, min(i, PN_LEN)); 755 strstrip((char *)p->pn); 756 i = vpd[na - VPD_INFO_FLD_HDR_SIZE + 2]; 757 memcpy(p->na, vpd + na, min(i, MACADDR_LEN)); 758 strstrip((char *)p->na); 759 760 return 0; 761 } 762 763 /* serial flash and firmware constants and flash config file constants */ 764 enum { 765 SF_ATTEMPTS = 10, /* max retries for SF operations */ 766 767 /* flash command opcodes */ 768 SF_PROG_PAGE = 2, /* program page */ 769 SF_WR_DISABLE = 4, /* disable writes */ 770 SF_RD_STATUS = 5, /* read status register */ 771 SF_WR_ENABLE = 6, /* enable writes */ 772 SF_RD_DATA_FAST = 0xb, /* read flash */ 773 SF_RD_ID = 0x9f, /* read ID */ 774 SF_ERASE_SECTOR = 0xd8, /* erase sector */ 775 }; 776 777 /** 778 * sf1_read - read data from the serial flash 779 * @adapter: the adapter 780 * @byte_cnt: number of bytes to read 781 * @cont: whether another operation will be chained 782 * @lock: whether to lock SF for PL access only 783 * @valp: where to store the read data 784 * 785 * Reads up to 4 bytes of data from the serial flash. The location of 786 * the read needs to be specified prior to calling this by issuing the 787 * appropriate commands to the serial flash. 788 */ 789 static int sf1_read(struct adapter *adapter, unsigned int byte_cnt, int cont, 790 int lock, u32 *valp) 791 { 792 int ret; 793 794 if (!byte_cnt || byte_cnt > 4) 795 return -EINVAL; 796 if (t4_read_reg(adapter, A_SF_OP) & F_BUSY) 797 return -EBUSY; 798 t4_write_reg(adapter, A_SF_OP, 799 V_SF_LOCK(lock) | V_CONT(cont) | V_BYTECNT(byte_cnt - 1)); 800 ret = t4_wait_op_done(adapter, A_SF_OP, F_BUSY, 0, SF_ATTEMPTS, 5); 801 if (!ret) 802 *valp = t4_read_reg(adapter, A_SF_DATA); 803 return ret; 804 } 805 806 /** 807 * sf1_write - write data to the serial flash 808 * @adapter: the adapter 809 * @byte_cnt: number of bytes to write 810 * @cont: whether another operation will be chained 811 * @lock: whether to lock SF for PL access only 812 * @val: value to write 813 * 814 * Writes up to 4 bytes of data to the serial flash. The location of 815 * the write needs to be specified prior to calling this by issuing the 816 * appropriate commands to the serial flash. 817 */ 818 static int sf1_write(struct adapter *adapter, unsigned int byte_cnt, int cont, 819 int lock, u32 val) 820 { 821 if (!byte_cnt || byte_cnt > 4) 822 return -EINVAL; 823 if (t4_read_reg(adapter, A_SF_OP) & F_BUSY) 824 return -EBUSY; 825 t4_write_reg(adapter, A_SF_DATA, val); 826 t4_write_reg(adapter, A_SF_OP, V_SF_LOCK(lock) | 827 V_CONT(cont) | V_BYTECNT(byte_cnt - 1) | V_OP(1)); 828 return t4_wait_op_done(adapter, A_SF_OP, F_BUSY, 0, SF_ATTEMPTS, 5); 829 } 830 831 /** 832 * flash_wait_op - wait for a flash operation to complete 833 * @adapter: the adapter 834 * @attempts: max number of polls of the status register 835 * @delay: delay between polls in ms 836 * 837 * Wait for a flash operation to complete by polling the status register. 838 */ 839 static int flash_wait_op(struct adapter *adapter, int attempts, int delay) 840 { 841 int ret; 842 u32 status; 843 844 while (1) { 845 if ((ret = sf1_write(adapter, 1, 1, 1, SF_RD_STATUS)) != 0 || 846 (ret = sf1_read(adapter, 1, 0, 1, &status)) != 0) 847 return ret; 848 if (!(status & 1)) 849 return 0; 850 if (--attempts == 0) 851 return -EAGAIN; 852 if (delay) 853 msleep(delay); 854 } 855 } 856 857 /** 858 * t4_read_flash - read words from serial flash 859 * @adapter: the adapter 860 * @addr: the start address for the read 861 * @nwords: how many 32-bit words to read 862 * @data: where to store the read data 863 * @byte_oriented: whether to store data as bytes or as words 864 * 865 * Read the specified number of 32-bit words from the serial flash. 866 * If @byte_oriented is set the read data is stored as a byte array 867 * (i.e., big-endian), otherwise as 32-bit words in the platform's 868 * natural endianess. 869 */ 870 int t4_read_flash(struct adapter *adapter, unsigned int addr, 871 unsigned int nwords, u32 *data, int byte_oriented) 872 { 873 int ret; 874 875 if (addr + nwords * sizeof(u32) > adapter->params.sf_size || (addr & 3)) 876 return -EINVAL; 877 878 addr = swab32(addr) | SF_RD_DATA_FAST; 879 880 if ((ret = sf1_write(adapter, 4, 1, 0, addr)) != 0 || 881 (ret = sf1_read(adapter, 1, 1, 0, data)) != 0) 882 return ret; 883 884 for ( ; nwords; nwords--, data++) { 885 ret = sf1_read(adapter, 4, nwords > 1, nwords == 1, data); 886 if (nwords == 1) 887 t4_write_reg(adapter, A_SF_OP, 0); /* unlock SF */ 888 if (ret) 889 return ret; 890 if (byte_oriented) 891 *data = htonl(*data); 892 } 893 return 0; 894 } 895 896 /** 897 * t4_write_flash - write up to a page of data to the serial flash 898 * @adapter: the adapter 899 * @addr: the start address to write 900 * @n: length of data to write in bytes 901 * @data: the data to write 902 * @byte_oriented: whether to store data as bytes or as words 903 * 904 * Writes up to a page of data (256 bytes) to the serial flash starting 905 * at the given address. All the data must be written to the same page. 906 * If @byte_oriented is set the write data is stored as byte stream 907 * (i.e. matches what on disk), otherwise in big-endian. 908 */ 909 static int t4_write_flash(struct adapter *adapter, unsigned int addr, 910 unsigned int n, const u8 *data, int byte_oriented) 911 { 912 int ret; 913 u32 buf[SF_PAGE_SIZE / 4]; 914 unsigned int i, c, left, val, offset = addr & 0xff; 915 916 if (addr >= adapter->params.sf_size || offset + n > SF_PAGE_SIZE) 917 return -EINVAL; 918 919 val = swab32(addr) | SF_PROG_PAGE; 920 921 if ((ret = sf1_write(adapter, 1, 0, 1, SF_WR_ENABLE)) != 0 || 922 (ret = sf1_write(adapter, 4, 1, 1, val)) != 0) 923 goto unlock; 924 925 for (left = n; left; left -= c) { 926 c = min(left, 4U); 927 for (val = 0, i = 0; i < c; ++i) 928 val = (val << 8) + *data++; 929 930 if (!byte_oriented) 931 val = htonl(val); 932 933 ret = sf1_write(adapter, c, c != left, 1, val); 934 if (ret) 935 goto unlock; 936 } 937 ret = flash_wait_op(adapter, 8, 1); 938 if (ret) 939 goto unlock; 940 941 t4_write_reg(adapter, A_SF_OP, 0); /* unlock SF */ 942 943 /* Read the page to verify the write succeeded */ 944 ret = t4_read_flash(adapter, addr & ~0xff, ARRAY_SIZE(buf), buf, 945 byte_oriented); 946 if (ret) 947 return ret; 948 949 if (memcmp(data - n, (u8 *)buf + offset, n)) { 950 CH_ERR(adapter, "failed to correctly write the flash page " 951 "at %#x\n", addr); 952 return -EIO; 953 } 954 return 0; 955 956 unlock: 957 t4_write_reg(adapter, A_SF_OP, 0); /* unlock SF */ 958 return ret; 959 } 960 961 /** 962 * t4_get_fw_version - read the firmware version 963 * @adapter: the adapter 964 * @vers: where to place the version 965 * 966 * Reads the FW version from flash. 967 */ 968 int t4_get_fw_version(struct adapter *adapter, u32 *vers) 969 { 970 return t4_read_flash(adapter, 971 FLASH_FW_START + offsetof(struct fw_hdr, fw_ver), 1, 972 vers, 0); 973 } 974 975 /** 976 * t4_get_tp_version - read the TP microcode version 977 * @adapter: the adapter 978 * @vers: where to place the version 979 * 980 * Reads the TP microcode version from flash. 981 */ 982 int t4_get_tp_version(struct adapter *adapter, u32 *vers) 983 { 984 return t4_read_flash(adapter, FLASH_FW_START + offsetof(struct fw_hdr, 985 tp_microcode_ver), 986 1, vers, 0); 987 } 988 989 /** 990 * t4_check_fw_version - check if the FW is compatible with this driver 991 * @adapter: the adapter 992 * 993 * Checks if an adapter's FW is compatible with the driver. Returns 0 994 * if there's exact match, a negative error if the version could not be 995 * read or there's a major version mismatch, and a positive value if the 996 * expected major version is found but there's a minor version mismatch. 997 */ 998 int t4_check_fw_version(struct adapter *adapter) 999 { 1000 int ret, major, minor, micro; 1001 int exp_major, exp_minor, exp_micro; 1002 1003 ret = t4_get_fw_version(adapter, &adapter->params.fw_vers); 1004 if (!ret) 1005 ret = t4_get_tp_version(adapter, &adapter->params.tp_vers); 1006 if (ret) 1007 return ret; 1008 1009 major = G_FW_HDR_FW_VER_MAJOR(adapter->params.fw_vers); 1010 minor = G_FW_HDR_FW_VER_MINOR(adapter->params.fw_vers); 1011 micro = G_FW_HDR_FW_VER_MICRO(adapter->params.fw_vers); 1012 1013 switch (chip_id(adapter)) { 1014 case CHELSIO_T4: 1015 exp_major = T4FW_VERSION_MAJOR; 1016 exp_minor = T4FW_VERSION_MINOR; 1017 exp_micro = T4FW_VERSION_MICRO; 1018 break; 1019 case CHELSIO_T5: 1020 exp_major = T5FW_VERSION_MAJOR; 1021 exp_minor = T5FW_VERSION_MINOR; 1022 exp_micro = T5FW_VERSION_MICRO; 1023 break; 1024 default: 1025 CH_ERR(adapter, "Unsupported chip type, %x\n", 1026 chip_id(adapter)); 1027 return -EINVAL; 1028 } 1029 1030 if (major != exp_major) { /* major mismatch - fail */ 1031 CH_ERR(adapter, "card FW has major version %u, driver wants " 1032 "%u\n", major, exp_major); 1033 return -EINVAL; 1034 } 1035 1036 if (minor == exp_minor && micro == exp_micro) 1037 return 0; /* perfect match */ 1038 1039 /* Minor/micro version mismatch. Report it but often it's OK. */ 1040 return 1; 1041 } 1042 1043 /** 1044 * t4_flash_erase_sectors - erase a range of flash sectors 1045 * @adapter: the adapter 1046 * @start: the first sector to erase 1047 * @end: the last sector to erase 1048 * 1049 * Erases the sectors in the given inclusive range. 1050 */ 1051 static int t4_flash_erase_sectors(struct adapter *adapter, int start, int end) 1052 { 1053 int ret = 0; 1054 1055 while (start <= end) { 1056 if ((ret = sf1_write(adapter, 1, 0, 1, SF_WR_ENABLE)) != 0 || 1057 (ret = sf1_write(adapter, 4, 0, 1, 1058 SF_ERASE_SECTOR | (start << 8))) != 0 || 1059 (ret = flash_wait_op(adapter, 14, 500)) != 0) { 1060 CH_ERR(adapter, "erase of flash sector %d failed, " 1061 "error %d\n", start, ret); 1062 break; 1063 } 1064 start++; 1065 } 1066 t4_write_reg(adapter, A_SF_OP, 0); /* unlock SF */ 1067 return ret; 1068 } 1069 1070 /** 1071 * t4_flash_cfg_addr - return the address of the flash configuration file 1072 * @adapter: the adapter 1073 * 1074 * Return the address within the flash where the Firmware Configuration 1075 * File is stored, or an error if the device FLASH is too small to contain 1076 * a Firmware Configuration File. 1077 */ 1078 int t4_flash_cfg_addr(struct adapter *adapter) 1079 { 1080 /* 1081 * If the device FLASH isn't large enough to hold a Firmware 1082 * Configuration File, return an error. 1083 */ 1084 if (adapter->params.sf_size < FLASH_CFG_START + FLASH_CFG_MAX_SIZE) 1085 return -ENOSPC; 1086 1087 return FLASH_CFG_START; 1088 } 1089 1090 /** 1091 * t4_load_cfg - download config file 1092 * @adap: the adapter 1093 * @cfg_data: the cfg text file to write 1094 * @size: text file size 1095 * 1096 * Write the supplied config text file to the card's serial flash. 1097 */ 1098 int t4_load_cfg(struct adapter *adap, const u8 *cfg_data, unsigned int size) 1099 { 1100 int ret, i, n, cfg_addr; 1101 unsigned int addr; 1102 unsigned int flash_cfg_start_sec; 1103 unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec; 1104 1105 cfg_addr = t4_flash_cfg_addr(adap); 1106 if (cfg_addr < 0) 1107 return cfg_addr; 1108 1109 addr = cfg_addr; 1110 flash_cfg_start_sec = addr / SF_SEC_SIZE; 1111 1112 if (size > FLASH_CFG_MAX_SIZE) { 1113 CH_ERR(adap, "cfg file too large, max is %u bytes\n", 1114 FLASH_CFG_MAX_SIZE); 1115 return -EFBIG; 1116 } 1117 1118 i = DIV_ROUND_UP(FLASH_CFG_MAX_SIZE, /* # of sectors spanned */ 1119 sf_sec_size); 1120 ret = t4_flash_erase_sectors(adap, flash_cfg_start_sec, 1121 flash_cfg_start_sec + i - 1); 1122 /* 1123 * If size == 0 then we're simply erasing the FLASH sectors associated 1124 * with the on-adapter Firmware Configuration File. 1125 */ 1126 if (ret || size == 0) 1127 goto out; 1128 1129 /* this will write to the flash up to SF_PAGE_SIZE at a time */ 1130 for (i = 0; i< size; i+= SF_PAGE_SIZE) { 1131 if ( (size - i) < SF_PAGE_SIZE) 1132 n = size - i; 1133 else 1134 n = SF_PAGE_SIZE; 1135 ret = t4_write_flash(adap, addr, n, cfg_data, 1); 1136 if (ret) 1137 goto out; 1138 1139 addr += SF_PAGE_SIZE; 1140 cfg_data += SF_PAGE_SIZE; 1141 } 1142 1143 out: 1144 if (ret) 1145 CH_ERR(adap, "config file %s failed %d\n", 1146 (size == 0 ? "clear" : "download"), ret); 1147 return ret; 1148 } 1149 1150 1151 /** 1152 * t4_load_fw - download firmware 1153 * @adap: the adapter 1154 * @fw_data: the firmware image to write 1155 * @size: image size 1156 * 1157 * Write the supplied firmware image to the card's serial flash. 1158 */ 1159 int t4_load_fw(struct adapter *adap, const u8 *fw_data, unsigned int size) 1160 { 1161 u32 csum; 1162 int ret, addr; 1163 unsigned int i; 1164 u8 first_page[SF_PAGE_SIZE]; 1165 const u32 *p = (const u32 *)fw_data; 1166 const struct fw_hdr *hdr = (const struct fw_hdr *)fw_data; 1167 unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec; 1168 unsigned int fw_start_sec; 1169 unsigned int fw_start; 1170 unsigned int fw_size; 1171 1172 if (ntohl(hdr->magic) == FW_HDR_MAGIC_BOOTSTRAP) { 1173 fw_start_sec = FLASH_FWBOOTSTRAP_START_SEC; 1174 fw_start = FLASH_FWBOOTSTRAP_START; 1175 fw_size = FLASH_FWBOOTSTRAP_MAX_SIZE; 1176 } else { 1177 fw_start_sec = FLASH_FW_START_SEC; 1178 fw_start = FLASH_FW_START; 1179 fw_size = FLASH_FW_MAX_SIZE; 1180 } 1181 if (!size) { 1182 CH_ERR(adap, "FW image has no data\n"); 1183 return -EINVAL; 1184 } 1185 if (size & 511) { 1186 CH_ERR(adap, "FW image size not multiple of 512 bytes\n"); 1187 return -EINVAL; 1188 } 1189 if (ntohs(hdr->len512) * 512 != size) { 1190 CH_ERR(adap, "FW image size differs from size in FW header\n"); 1191 return -EINVAL; 1192 } 1193 if (size > fw_size) { 1194 CH_ERR(adap, "FW image too large, max is %u bytes\n", fw_size); 1195 return -EFBIG; 1196 } 1197 if ((is_t4(adap) && hdr->chip != FW_HDR_CHIP_T4) || 1198 (is_t5(adap) && hdr->chip != FW_HDR_CHIP_T5)) { 1199 CH_ERR(adap, 1200 "FW image (%d) is not suitable for this adapter (%d)\n", 1201 hdr->chip, chip_id(adap)); 1202 return -EINVAL; 1203 } 1204 1205 for (csum = 0, i = 0; i < size / sizeof(csum); i++) 1206 csum += ntohl(p[i]); 1207 1208 if (csum != 0xffffffff) { 1209 CH_ERR(adap, "corrupted firmware image, checksum %#x\n", 1210 csum); 1211 return -EINVAL; 1212 } 1213 1214 i = DIV_ROUND_UP(size, sf_sec_size); /* # of sectors spanned */ 1215 ret = t4_flash_erase_sectors(adap, fw_start_sec, fw_start_sec + i - 1); 1216 if (ret) 1217 goto out; 1218 1219 /* 1220 * We write the correct version at the end so the driver can see a bad 1221 * version if the FW write fails. Start by writing a copy of the 1222 * first page with a bad version. 1223 */ 1224 memcpy(first_page, fw_data, SF_PAGE_SIZE); 1225 ((struct fw_hdr *)first_page)->fw_ver = htonl(0xffffffff); 1226 ret = t4_write_flash(adap, fw_start, SF_PAGE_SIZE, first_page, 1); 1227 if (ret) 1228 goto out; 1229 1230 addr = fw_start; 1231 for (size -= SF_PAGE_SIZE; size; size -= SF_PAGE_SIZE) { 1232 addr += SF_PAGE_SIZE; 1233 fw_data += SF_PAGE_SIZE; 1234 ret = t4_write_flash(adap, addr, SF_PAGE_SIZE, fw_data, 1); 1235 if (ret) 1236 goto out; 1237 } 1238 1239 ret = t4_write_flash(adap, 1240 fw_start + offsetof(struct fw_hdr, fw_ver), 1241 sizeof(hdr->fw_ver), (const u8 *)&hdr->fw_ver, 1); 1242 out: 1243 if (ret) 1244 CH_ERR(adap, "firmware download failed, error %d\n", ret); 1245 return ret; 1246 } 1247 1248 /* BIOS boot headers */ 1249 typedef struct pci_expansion_rom_header { 1250 u8 signature[2]; /* ROM Signature. Should be 0xaa55 */ 1251 u8 reserved[22]; /* Reserved per processor Architecture data */ 1252 u8 pcir_offset[2]; /* Offset to PCI Data Structure */ 1253 } pci_exp_rom_header_t; /* PCI_EXPANSION_ROM_HEADER */ 1254 1255 /* Legacy PCI Expansion ROM Header */ 1256 typedef struct legacy_pci_expansion_rom_header { 1257 u8 signature[2]; /* ROM Signature. Should be 0xaa55 */ 1258 u8 size512; /* Current Image Size in units of 512 bytes */ 1259 u8 initentry_point[4]; 1260 u8 cksum; /* Checksum computed on the entire Image */ 1261 u8 reserved[16]; /* Reserved */ 1262 u8 pcir_offset[2]; /* Offset to PCI Data Struture */ 1263 } legacy_pci_exp_rom_header_t; /* LEGACY_PCI_EXPANSION_ROM_HEADER */ 1264 1265 /* EFI PCI Expansion ROM Header */ 1266 typedef struct efi_pci_expansion_rom_header { 1267 u8 signature[2]; // ROM signature. The value 0xaa55 1268 u8 initialization_size[2]; /* Units 512. Includes this header */ 1269 u8 efi_signature[4]; /* Signature from EFI image header. 0x0EF1 */ 1270 u8 efi_subsystem[2]; /* Subsystem value for EFI image header */ 1271 u8 efi_machine_type[2]; /* Machine type from EFI image header */ 1272 u8 compression_type[2]; /* Compression type. */ 1273 /* 1274 * Compression type definition 1275 * 0x0: uncompressed 1276 * 0x1: Compressed 1277 * 0x2-0xFFFF: Reserved 1278 */ 1279 u8 reserved[8]; /* Reserved */ 1280 u8 efi_image_header_offset[2]; /* Offset to EFI Image */ 1281 u8 pcir_offset[2]; /* Offset to PCI Data Structure */ 1282 } efi_pci_exp_rom_header_t; /* EFI PCI Expansion ROM Header */ 1283 1284 /* PCI Data Structure Format */ 1285 typedef struct pcir_data_structure { /* PCI Data Structure */ 1286 u8 signature[4]; /* Signature. The string "PCIR" */ 1287 u8 vendor_id[2]; /* Vendor Identification */ 1288 u8 device_id[2]; /* Device Identification */ 1289 u8 vital_product[2]; /* Pointer to Vital Product Data */ 1290 u8 length[2]; /* PCIR Data Structure Length */ 1291 u8 revision; /* PCIR Data Structure Revision */ 1292 u8 class_code[3]; /* Class Code */ 1293 u8 image_length[2]; /* Image Length. Multiple of 512B */ 1294 u8 code_revision[2]; /* Revision Level of Code/Data */ 1295 u8 code_type; /* Code Type. */ 1296 /* 1297 * PCI Expansion ROM Code Types 1298 * 0x00: Intel IA-32, PC-AT compatible. Legacy 1299 * 0x01: Open Firmware standard for PCI. FCODE 1300 * 0x02: Hewlett-Packard PA RISC. HP reserved 1301 * 0x03: EFI Image. EFI 1302 * 0x04-0xFF: Reserved. 1303 */ 1304 u8 indicator; /* Indicator. Identifies the last image in the ROM */ 1305 u8 reserved[2]; /* Reserved */ 1306 } pcir_data_t; /* PCI__DATA_STRUCTURE */ 1307 1308 /* BOOT constants */ 1309 enum { 1310 BOOT_FLASH_BOOT_ADDR = 0x0,/* start address of boot image in flash */ 1311 BOOT_SIGNATURE = 0xaa55, /* signature of BIOS boot ROM */ 1312 BOOT_SIZE_INC = 512, /* image size measured in 512B chunks */ 1313 BOOT_MIN_SIZE = sizeof(pci_exp_rom_header_t), /* basic header */ 1314 BOOT_MAX_SIZE = 1024*BOOT_SIZE_INC, /* 1 byte * length increment */ 1315 VENDOR_ID = 0x1425, /* Vendor ID */ 1316 PCIR_SIGNATURE = 0x52494350 /* PCIR signature */ 1317 }; 1318 1319 /* 1320 * modify_device_id - Modifies the device ID of the Boot BIOS image 1321 * @adatper: the device ID to write. 1322 * @boot_data: the boot image to modify. 1323 * 1324 * Write the supplied device ID to the boot BIOS image. 1325 */ 1326 static void modify_device_id(int device_id, u8 *boot_data) 1327 { 1328 legacy_pci_exp_rom_header_t *header; 1329 pcir_data_t *pcir_header; 1330 u32 cur_header = 0; 1331 1332 /* 1333 * Loop through all chained images and change the device ID's 1334 */ 1335 while (1) { 1336 header = (legacy_pci_exp_rom_header_t *) &boot_data[cur_header]; 1337 pcir_header = (pcir_data_t *) &boot_data[cur_header + 1338 le16_to_cpu(*(u16*)header->pcir_offset)]; 1339 1340 /* 1341 * Only modify the Device ID if code type is Legacy or HP. 1342 * 0x00: Okay to modify 1343 * 0x01: FCODE. Do not be modify 1344 * 0x03: Okay to modify 1345 * 0x04-0xFF: Do not modify 1346 */ 1347 if (pcir_header->code_type == 0x00) { 1348 u8 csum = 0; 1349 int i; 1350 1351 /* 1352 * Modify Device ID to match current adatper 1353 */ 1354 *(u16*) pcir_header->device_id = device_id; 1355 1356 /* 1357 * Set checksum temporarily to 0. 1358 * We will recalculate it later. 1359 */ 1360 header->cksum = 0x0; 1361 1362 /* 1363 * Calculate and update checksum 1364 */ 1365 for (i = 0; i < (header->size512 * 512); i++) 1366 csum += (u8)boot_data[cur_header + i]; 1367 1368 /* 1369 * Invert summed value to create the checksum 1370 * Writing new checksum value directly to the boot data 1371 */ 1372 boot_data[cur_header + 7] = -csum; 1373 1374 } else if (pcir_header->code_type == 0x03) { 1375 1376 /* 1377 * Modify Device ID to match current adatper 1378 */ 1379 *(u16*) pcir_header->device_id = device_id; 1380 1381 } 1382 1383 1384 /* 1385 * Check indicator element to identify if this is the last 1386 * image in the ROM. 1387 */ 1388 if (pcir_header->indicator & 0x80) 1389 break; 1390 1391 /* 1392 * Move header pointer up to the next image in the ROM. 1393 */ 1394 cur_header += header->size512 * 512; 1395 } 1396 } 1397 1398 /* 1399 * t4_load_boot - download boot flash 1400 * @adapter: the adapter 1401 * @boot_data: the boot image to write 1402 * @boot_addr: offset in flash to write boot_data 1403 * @size: image size 1404 * 1405 * Write the supplied boot image to the card's serial flash. 1406 * The boot image has the following sections: a 28-byte header and the 1407 * boot image. 1408 */ 1409 int t4_load_boot(struct adapter *adap, u8 *boot_data, 1410 unsigned int boot_addr, unsigned int size) 1411 { 1412 pci_exp_rom_header_t *header; 1413 int pcir_offset ; 1414 pcir_data_t *pcir_header; 1415 int ret, addr; 1416 uint16_t device_id; 1417 unsigned int i; 1418 unsigned int boot_sector = boot_addr * 1024; 1419 unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec; 1420 1421 /* 1422 * Make sure the boot image does not encroach on the firmware region 1423 */ 1424 if ((boot_sector + size) >> 16 > FLASH_FW_START_SEC) { 1425 CH_ERR(adap, "boot image encroaching on firmware region\n"); 1426 return -EFBIG; 1427 } 1428 1429 /* 1430 * Number of sectors spanned 1431 */ 1432 i = DIV_ROUND_UP(size ? size : FLASH_BOOTCFG_MAX_SIZE, 1433 sf_sec_size); 1434 ret = t4_flash_erase_sectors(adap, boot_sector >> 16, 1435 (boot_sector >> 16) + i - 1); 1436 1437 /* 1438 * If size == 0 then we're simply erasing the FLASH sectors associated 1439 * with the on-adapter option ROM file 1440 */ 1441 if (ret || (size == 0)) 1442 goto out; 1443 1444 /* Get boot header */ 1445 header = (pci_exp_rom_header_t *)boot_data; 1446 pcir_offset = le16_to_cpu(*(u16 *)header->pcir_offset); 1447 /* PCIR Data Structure */ 1448 pcir_header = (pcir_data_t *) &boot_data[pcir_offset]; 1449 1450 /* 1451 * Perform some primitive sanity testing to avoid accidentally 1452 * writing garbage over the boot sectors. We ought to check for 1453 * more but it's not worth it for now ... 1454 */ 1455 if (size < BOOT_MIN_SIZE || size > BOOT_MAX_SIZE) { 1456 CH_ERR(adap, "boot image too small/large\n"); 1457 return -EFBIG; 1458 } 1459 1460 /* 1461 * Check BOOT ROM header signature 1462 */ 1463 if (le16_to_cpu(*(u16*)header->signature) != BOOT_SIGNATURE ) { 1464 CH_ERR(adap, "Boot image missing signature\n"); 1465 return -EINVAL; 1466 } 1467 1468 /* 1469 * Check PCI header signature 1470 */ 1471 if (le32_to_cpu(*(u32*)pcir_header->signature) != PCIR_SIGNATURE) { 1472 CH_ERR(adap, "PCI header missing signature\n"); 1473 return -EINVAL; 1474 } 1475 1476 /* 1477 * Check Vendor ID matches Chelsio ID 1478 */ 1479 if (le16_to_cpu(*(u16*)pcir_header->vendor_id) != VENDOR_ID) { 1480 CH_ERR(adap, "Vendor ID missing signature\n"); 1481 return -EINVAL; 1482 } 1483 1484 /* 1485 * Retrieve adapter's device ID 1486 */ 1487 t4_os_pci_read_cfg2(adap, PCI_DEVICE_ID, &device_id); 1488 /* Want to deal with PF 0 so I strip off PF 4 indicator */ 1489 device_id = (device_id & 0xff) | 0x4000; 1490 1491 /* 1492 * Check PCIE Device ID 1493 */ 1494 if (le16_to_cpu(*(u16*)pcir_header->device_id) != device_id) { 1495 /* 1496 * Change the device ID in the Boot BIOS image to match 1497 * the Device ID of the current adapter. 1498 */ 1499 modify_device_id(device_id, boot_data); 1500 } 1501 1502 /* 1503 * Skip over the first SF_PAGE_SIZE worth of data and write it after 1504 * we finish copying the rest of the boot image. This will ensure 1505 * that the BIOS boot header will only be written if the boot image 1506 * was written in full. 1507 */ 1508 addr = boot_sector; 1509 for (size -= SF_PAGE_SIZE; size; size -= SF_PAGE_SIZE) { 1510 addr += SF_PAGE_SIZE; 1511 boot_data += SF_PAGE_SIZE; 1512 ret = t4_write_flash(adap, addr, SF_PAGE_SIZE, boot_data, 0); 1513 if (ret) 1514 goto out; 1515 } 1516 1517 ret = t4_write_flash(adap, boot_sector, SF_PAGE_SIZE, boot_data, 0); 1518 1519 out: 1520 if (ret) 1521 CH_ERR(adap, "boot image download failed, error %d\n", ret); 1522 return ret; 1523 } 1524 1525 /** 1526 * t4_read_cimq_cfg - read CIM queue configuration 1527 * @adap: the adapter 1528 * @base: holds the queue base addresses in bytes 1529 * @size: holds the queue sizes in bytes 1530 * @thres: holds the queue full thresholds in bytes 1531 * 1532 * Returns the current configuration of the CIM queues, starting with 1533 * the IBQs, then the OBQs. 1534 */ 1535 void t4_read_cimq_cfg(struct adapter *adap, u16 *base, u16 *size, u16 *thres) 1536 { 1537 unsigned int i, v; 1538 1539 for (i = 0; i < CIM_NUM_IBQ; i++) { 1540 t4_write_reg(adap, A_CIM_QUEUE_CONFIG_REF, F_IBQSELECT | 1541 V_QUENUMSELECT(i)); 1542 v = t4_read_reg(adap, A_CIM_QUEUE_CONFIG_CTRL); 1543 *base++ = G_CIMQBASE(v) * 256; /* value is in 256-byte units */ 1544 *size++ = G_CIMQSIZE(v) * 256; /* value is in 256-byte units */ 1545 *thres++ = G_QUEFULLTHRSH(v) * 8; /* 8-byte unit */ 1546 } 1547 for (i = 0; i < adap->chip_params->cim_num_obq; i++) { 1548 t4_write_reg(adap, A_CIM_QUEUE_CONFIG_REF, F_OBQSELECT | 1549 V_QUENUMSELECT(i)); 1550 v = t4_read_reg(adap, A_CIM_QUEUE_CONFIG_CTRL); 1551 *base++ = G_CIMQBASE(v) * 256; /* value is in 256-byte units */ 1552 *size++ = G_CIMQSIZE(v) * 256; /* value is in 256-byte units */ 1553 } 1554 } 1555 1556 /** 1557 * t4_read_cim_ibq - read the contents of a CIM inbound queue 1558 * @adap: the adapter 1559 * @qid: the queue index 1560 * @data: where to store the queue contents 1561 * @n: capacity of @data in 32-bit words 1562 * 1563 * Reads the contents of the selected CIM queue starting at address 0 up 1564 * to the capacity of @data. @n must be a multiple of 4. Returns < 0 on 1565 * error and the number of 32-bit words actually read on success. 1566 */ 1567 int t4_read_cim_ibq(struct adapter *adap, unsigned int qid, u32 *data, size_t n) 1568 { 1569 int i, err; 1570 unsigned int addr; 1571 const unsigned int nwords = CIM_IBQ_SIZE * 4; 1572 1573 if (qid > 5 || (n & 3)) 1574 return -EINVAL; 1575 1576 addr = qid * nwords; 1577 if (n > nwords) 1578 n = nwords; 1579 1580 for (i = 0; i < n; i++, addr++) { 1581 t4_write_reg(adap, A_CIM_IBQ_DBG_CFG, V_IBQDBGADDR(addr) | 1582 F_IBQDBGEN); 1583 /* 1584 * It might take 3-10ms before the IBQ debug read access is 1585 * allowed. Wait for 1 Sec with a delay of 1 usec. 1586 */ 1587 err = t4_wait_op_done(adap, A_CIM_IBQ_DBG_CFG, F_IBQDBGBUSY, 0, 1588 1000000, 1); 1589 if (err) 1590 return err; 1591 *data++ = t4_read_reg(adap, A_CIM_IBQ_DBG_DATA); 1592 } 1593 t4_write_reg(adap, A_CIM_IBQ_DBG_CFG, 0); 1594 return i; 1595 } 1596 1597 /** 1598 * t4_read_cim_obq - read the contents of a CIM outbound queue 1599 * @adap: the adapter 1600 * @qid: the queue index 1601 * @data: where to store the queue contents 1602 * @n: capacity of @data in 32-bit words 1603 * 1604 * Reads the contents of the selected CIM queue starting at address 0 up 1605 * to the capacity of @data. @n must be a multiple of 4. Returns < 0 on 1606 * error and the number of 32-bit words actually read on success. 1607 */ 1608 int t4_read_cim_obq(struct adapter *adap, unsigned int qid, u32 *data, size_t n) 1609 { 1610 int i, err; 1611 unsigned int addr, v, nwords; 1612 1613 if (qid >= adap->chip_params->cim_num_obq || (n & 3)) 1614 return -EINVAL; 1615 1616 t4_write_reg(adap, A_CIM_QUEUE_CONFIG_REF, F_OBQSELECT | 1617 V_QUENUMSELECT(qid)); 1618 v = t4_read_reg(adap, A_CIM_QUEUE_CONFIG_CTRL); 1619 1620 addr = G_CIMQBASE(v) * 64; /* muliple of 256 -> muliple of 4 */ 1621 nwords = G_CIMQSIZE(v) * 64; /* same */ 1622 if (n > nwords) 1623 n = nwords; 1624 1625 for (i = 0; i < n; i++, addr++) { 1626 t4_write_reg(adap, A_CIM_OBQ_DBG_CFG, V_OBQDBGADDR(addr) | 1627 F_OBQDBGEN); 1628 err = t4_wait_op_done(adap, A_CIM_OBQ_DBG_CFG, F_OBQDBGBUSY, 0, 1629 2, 1); 1630 if (err) 1631 return err; 1632 *data++ = t4_read_reg(adap, A_CIM_OBQ_DBG_DATA); 1633 } 1634 t4_write_reg(adap, A_CIM_OBQ_DBG_CFG, 0); 1635 return i; 1636 } 1637 1638 enum { 1639 CIM_QCTL_BASE = 0, 1640 CIM_CTL_BASE = 0x2000, 1641 CIM_PBT_ADDR_BASE = 0x2800, 1642 CIM_PBT_LRF_BASE = 0x3000, 1643 CIM_PBT_DATA_BASE = 0x3800 1644 }; 1645 1646 /** 1647 * t4_cim_read - read a block from CIM internal address space 1648 * @adap: the adapter 1649 * @addr: the start address within the CIM address space 1650 * @n: number of words to read 1651 * @valp: where to store the result 1652 * 1653 * Reads a block of 4-byte words from the CIM intenal address space. 1654 */ 1655 int t4_cim_read(struct adapter *adap, unsigned int addr, unsigned int n, 1656 unsigned int *valp) 1657 { 1658 int ret = 0; 1659 1660 if (t4_read_reg(adap, A_CIM_HOST_ACC_CTRL) & F_HOSTBUSY) 1661 return -EBUSY; 1662 1663 for ( ; !ret && n--; addr += 4) { 1664 t4_write_reg(adap, A_CIM_HOST_ACC_CTRL, addr); 1665 ret = t4_wait_op_done(adap, A_CIM_HOST_ACC_CTRL, F_HOSTBUSY, 1666 0, 5, 2); 1667 if (!ret) 1668 *valp++ = t4_read_reg(adap, A_CIM_HOST_ACC_DATA); 1669 } 1670 return ret; 1671 } 1672 1673 /** 1674 * t4_cim_write - write a block into CIM internal address space 1675 * @adap: the adapter 1676 * @addr: the start address within the CIM address space 1677 * @n: number of words to write 1678 * @valp: set of values to write 1679 * 1680 * Writes a block of 4-byte words into the CIM intenal address space. 1681 */ 1682 int t4_cim_write(struct adapter *adap, unsigned int addr, unsigned int n, 1683 const unsigned int *valp) 1684 { 1685 int ret = 0; 1686 1687 if (t4_read_reg(adap, A_CIM_HOST_ACC_CTRL) & F_HOSTBUSY) 1688 return -EBUSY; 1689 1690 for ( ; !ret && n--; addr += 4) { 1691 t4_write_reg(adap, A_CIM_HOST_ACC_DATA, *valp++); 1692 t4_write_reg(adap, A_CIM_HOST_ACC_CTRL, addr | F_HOSTWRITE); 1693 ret = t4_wait_op_done(adap, A_CIM_HOST_ACC_CTRL, F_HOSTBUSY, 1694 0, 5, 2); 1695 } 1696 return ret; 1697 } 1698 1699 static int t4_cim_write1(struct adapter *adap, unsigned int addr, unsigned int val) 1700 { 1701 return t4_cim_write(adap, addr, 1, &val); 1702 } 1703 1704 /** 1705 * t4_cim_ctl_read - read a block from CIM control region 1706 * @adap: the adapter 1707 * @addr: the start address within the CIM control region 1708 * @n: number of words to read 1709 * @valp: where to store the result 1710 * 1711 * Reads a block of 4-byte words from the CIM control region. 1712 */ 1713 int t4_cim_ctl_read(struct adapter *adap, unsigned int addr, unsigned int n, 1714 unsigned int *valp) 1715 { 1716 return t4_cim_read(adap, addr + CIM_CTL_BASE, n, valp); 1717 } 1718 1719 /** 1720 * t4_cim_read_la - read CIM LA capture buffer 1721 * @adap: the adapter 1722 * @la_buf: where to store the LA data 1723 * @wrptr: the HW write pointer within the capture buffer 1724 * 1725 * Reads the contents of the CIM LA buffer with the most recent entry at 1726 * the end of the returned data and with the entry at @wrptr first. 1727 * We try to leave the LA in the running state we find it in. 1728 */ 1729 int t4_cim_read_la(struct adapter *adap, u32 *la_buf, unsigned int *wrptr) 1730 { 1731 int i, ret; 1732 unsigned int cfg, val, idx; 1733 1734 ret = t4_cim_read(adap, A_UP_UP_DBG_LA_CFG, 1, &cfg); 1735 if (ret) 1736 return ret; 1737 1738 if (cfg & F_UPDBGLAEN) { /* LA is running, freeze it */ 1739 ret = t4_cim_write1(adap, A_UP_UP_DBG_LA_CFG, 0); 1740 if (ret) 1741 return ret; 1742 } 1743 1744 ret = t4_cim_read(adap, A_UP_UP_DBG_LA_CFG, 1, &val); 1745 if (ret) 1746 goto restart; 1747 1748 idx = G_UPDBGLAWRPTR(val); 1749 if (wrptr) 1750 *wrptr = idx; 1751 1752 for (i = 0; i < adap->params.cim_la_size; i++) { 1753 ret = t4_cim_write1(adap, A_UP_UP_DBG_LA_CFG, 1754 V_UPDBGLARDPTR(idx) | F_UPDBGLARDEN); 1755 if (ret) 1756 break; 1757 ret = t4_cim_read(adap, A_UP_UP_DBG_LA_CFG, 1, &val); 1758 if (ret) 1759 break; 1760 if (val & F_UPDBGLARDEN) { 1761 ret = -ETIMEDOUT; 1762 break; 1763 } 1764 ret = t4_cim_read(adap, A_UP_UP_DBG_LA_DATA, 1, &la_buf[i]); 1765 if (ret) 1766 break; 1767 /* address can't exceed 0xfff (UpDbgLaRdPtr is of 12-bits) */ 1768 idx = (idx + 1) & M_UPDBGLARDPTR; 1769 /* 1770 * Bits 0-3 of UpDbgLaRdPtr can be between 0000 to 1001 to 1771 * identify the 32-bit portion of the full 312-bit data 1772 */ 1773 if (is_t6(adap)) 1774 while ((idx & 0xf) > 9) 1775 idx = (idx + 1) % M_UPDBGLARDPTR; 1776 } 1777 restart: 1778 if (cfg & F_UPDBGLAEN) { 1779 int r = t4_cim_write1(adap, A_UP_UP_DBG_LA_CFG, 1780 cfg & ~F_UPDBGLARDEN); 1781 if (!ret) 1782 ret = r; 1783 } 1784 return ret; 1785 } 1786 1787 void t4_cim_read_pif_la(struct adapter *adap, u32 *pif_req, u32 *pif_rsp, 1788 unsigned int *pif_req_wrptr, 1789 unsigned int *pif_rsp_wrptr) 1790 { 1791 int i, j; 1792 u32 cfg, val, req, rsp; 1793 1794 cfg = t4_read_reg(adap, A_CIM_DEBUGCFG); 1795 if (cfg & F_LADBGEN) 1796 t4_write_reg(adap, A_CIM_DEBUGCFG, cfg ^ F_LADBGEN); 1797 1798 val = t4_read_reg(adap, A_CIM_DEBUGSTS); 1799 req = G_POLADBGWRPTR(val); 1800 rsp = G_PILADBGWRPTR(val); 1801 if (pif_req_wrptr) 1802 *pif_req_wrptr = req; 1803 if (pif_rsp_wrptr) 1804 *pif_rsp_wrptr = rsp; 1805 1806 for (i = 0; i < CIM_PIFLA_SIZE; i++) { 1807 for (j = 0; j < 6; j++) { 1808 t4_write_reg(adap, A_CIM_DEBUGCFG, V_POLADBGRDPTR(req) | 1809 V_PILADBGRDPTR(rsp)); 1810 *pif_req++ = t4_read_reg(adap, A_CIM_PO_LA_DEBUGDATA); 1811 *pif_rsp++ = t4_read_reg(adap, A_CIM_PI_LA_DEBUGDATA); 1812 req++; 1813 rsp++; 1814 } 1815 req = (req + 2) & M_POLADBGRDPTR; 1816 rsp = (rsp + 2) & M_PILADBGRDPTR; 1817 } 1818 t4_write_reg(adap, A_CIM_DEBUGCFG, cfg); 1819 } 1820 1821 void t4_cim_read_ma_la(struct adapter *adap, u32 *ma_req, u32 *ma_rsp) 1822 { 1823 u32 cfg; 1824 int i, j, idx; 1825 1826 cfg = t4_read_reg(adap, A_CIM_DEBUGCFG); 1827 if (cfg & F_LADBGEN) 1828 t4_write_reg(adap, A_CIM_DEBUGCFG, cfg ^ F_LADBGEN); 1829 1830 for (i = 0; i < CIM_MALA_SIZE; i++) { 1831 for (j = 0; j < 5; j++) { 1832 idx = 8 * i + j; 1833 t4_write_reg(adap, A_CIM_DEBUGCFG, V_POLADBGRDPTR(idx) | 1834 V_PILADBGRDPTR(idx)); 1835 *ma_req++ = t4_read_reg(adap, A_CIM_PO_LA_MADEBUGDATA); 1836 *ma_rsp++ = t4_read_reg(adap, A_CIM_PI_LA_MADEBUGDATA); 1837 } 1838 } 1839 t4_write_reg(adap, A_CIM_DEBUGCFG, cfg); 1840 } 1841 1842 /** 1843 * t4_tp_read_la - read TP LA capture buffer 1844 * @adap: the adapter 1845 * @la_buf: where to store the LA data 1846 * @wrptr: the HW write pointer within the capture buffer 1847 * 1848 * Reads the contents of the TP LA buffer with the most recent entry at 1849 * the end of the returned data and with the entry at @wrptr first. 1850 * We leave the LA in the running state we find it in. 1851 */ 1852 void t4_tp_read_la(struct adapter *adap, u64 *la_buf, unsigned int *wrptr) 1853 { 1854 bool last_incomplete; 1855 unsigned int i, cfg, val, idx; 1856 1857 cfg = t4_read_reg(adap, A_TP_DBG_LA_CONFIG) & 0xffff; 1858 if (cfg & F_DBGLAENABLE) /* freeze LA */ 1859 t4_write_reg(adap, A_TP_DBG_LA_CONFIG, 1860 adap->params.tp.la_mask | (cfg ^ F_DBGLAENABLE)); 1861 1862 val = t4_read_reg(adap, A_TP_DBG_LA_CONFIG); 1863 idx = G_DBGLAWPTR(val); 1864 last_incomplete = G_DBGLAMODE(val) >= 2 && (val & F_DBGLAWHLF) == 0; 1865 if (last_incomplete) 1866 idx = (idx + 1) & M_DBGLARPTR; 1867 if (wrptr) 1868 *wrptr = idx; 1869 1870 val &= 0xffff; 1871 val &= ~V_DBGLARPTR(M_DBGLARPTR); 1872 val |= adap->params.tp.la_mask; 1873 1874 for (i = 0; i < TPLA_SIZE; i++) { 1875 t4_write_reg(adap, A_TP_DBG_LA_CONFIG, V_DBGLARPTR(idx) | val); 1876 la_buf[i] = t4_read_reg64(adap, A_TP_DBG_LA_DATAL); 1877 idx = (idx + 1) & M_DBGLARPTR; 1878 } 1879 1880 /* Wipe out last entry if it isn't valid */ 1881 if (last_incomplete) 1882 la_buf[TPLA_SIZE - 1] = ~0ULL; 1883 1884 if (cfg & F_DBGLAENABLE) /* restore running state */ 1885 t4_write_reg(adap, A_TP_DBG_LA_CONFIG, 1886 cfg | adap->params.tp.la_mask); 1887 } 1888 1889 void t4_ulprx_read_la(struct adapter *adap, u32 *la_buf) 1890 { 1891 unsigned int i, j; 1892 1893 for (i = 0; i < 8; i++) { 1894 u32 *p = la_buf + i; 1895 1896 t4_write_reg(adap, A_ULP_RX_LA_CTL, i); 1897 j = t4_read_reg(adap, A_ULP_RX_LA_WRPTR); 1898 t4_write_reg(adap, A_ULP_RX_LA_RDPTR, j); 1899 for (j = 0; j < ULPRX_LA_SIZE; j++, p += 8) 1900 *p = t4_read_reg(adap, A_ULP_RX_LA_RDDATA); 1901 } 1902 } 1903 1904 #define ADVERT_MASK (FW_PORT_CAP_SPEED_100M | FW_PORT_CAP_SPEED_1G |\ 1905 FW_PORT_CAP_SPEED_10G | FW_PORT_CAP_SPEED_40G | \ 1906 FW_PORT_CAP_SPEED_100G | FW_PORT_CAP_ANEG) 1907 1908 /** 1909 * t4_link_l1cfg - apply link configuration to MAC/PHY 1910 * @phy: the PHY to setup 1911 * @mac: the MAC to setup 1912 * @lc: the requested link configuration 1913 * 1914 * Set up a port's MAC and PHY according to a desired link configuration. 1915 * - If the PHY can auto-negotiate first decide what to advertise, then 1916 * enable/disable auto-negotiation as desired, and reset. 1917 * - If the PHY does not auto-negotiate just reset it. 1918 * - If auto-negotiation is off set the MAC to the proper speed/duplex/FC, 1919 * otherwise do it later based on the outcome of auto-negotiation. 1920 */ 1921 int t4_link_l1cfg(struct adapter *adap, unsigned int mbox, unsigned int port, 1922 struct link_config *lc) 1923 { 1924 struct fw_port_cmd c; 1925 unsigned int fc = 0, mdi = V_FW_PORT_CAP_MDI(FW_PORT_CAP_MDI_AUTO); 1926 1927 lc->link_ok = 0; 1928 if (lc->requested_fc & PAUSE_RX) 1929 fc |= FW_PORT_CAP_FC_RX; 1930 if (lc->requested_fc & PAUSE_TX) 1931 fc |= FW_PORT_CAP_FC_TX; 1932 1933 memset(&c, 0, sizeof(c)); 1934 c.op_to_portid = htonl(V_FW_CMD_OP(FW_PORT_CMD) | F_FW_CMD_REQUEST | 1935 F_FW_CMD_EXEC | V_FW_PORT_CMD_PORTID(port)); 1936 c.action_to_len16 = htonl(V_FW_PORT_CMD_ACTION(FW_PORT_ACTION_L1_CFG) | 1937 FW_LEN16(c)); 1938 1939 if (!(lc->supported & FW_PORT_CAP_ANEG)) { 1940 c.u.l1cfg.rcap = htonl((lc->supported & ADVERT_MASK) | fc); 1941 lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX); 1942 } else if (lc->autoneg == AUTONEG_DISABLE) { 1943 c.u.l1cfg.rcap = htonl(lc->requested_speed | fc | mdi); 1944 lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX); 1945 } else 1946 c.u.l1cfg.rcap = htonl(lc->advertising | fc | mdi); 1947 1948 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); 1949 } 1950 1951 /** 1952 * t4_restart_aneg - restart autonegotiation 1953 * @adap: the adapter 1954 * @mbox: mbox to use for the FW command 1955 * @port: the port id 1956 * 1957 * Restarts autonegotiation for the selected port. 1958 */ 1959 int t4_restart_aneg(struct adapter *adap, unsigned int mbox, unsigned int port) 1960 { 1961 struct fw_port_cmd c; 1962 1963 memset(&c, 0, sizeof(c)); 1964 c.op_to_portid = htonl(V_FW_CMD_OP(FW_PORT_CMD) | F_FW_CMD_REQUEST | 1965 F_FW_CMD_EXEC | V_FW_PORT_CMD_PORTID(port)); 1966 c.action_to_len16 = htonl(V_FW_PORT_CMD_ACTION(FW_PORT_ACTION_L1_CFG) | 1967 FW_LEN16(c)); 1968 c.u.l1cfg.rcap = htonl(FW_PORT_CAP_ANEG); 1969 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); 1970 } 1971 1972 struct intr_info { 1973 unsigned int mask; /* bits to check in interrupt status */ 1974 const char *msg; /* message to print or NULL */ 1975 short stat_idx; /* stat counter to increment or -1 */ 1976 unsigned short fatal; /* whether the condition reported is fatal */ 1977 }; 1978 1979 /** 1980 * t4_handle_intr_status - table driven interrupt handler 1981 * @adapter: the adapter that generated the interrupt 1982 * @reg: the interrupt status register to process 1983 * @acts: table of interrupt actions 1984 * 1985 * A table driven interrupt handler that applies a set of masks to an 1986 * interrupt status word and performs the corresponding actions if the 1987 * interrupts described by the mask have occured. The actions include 1988 * optionally emitting a warning or alert message. The table is terminated 1989 * by an entry specifying mask 0. Returns the number of fatal interrupt 1990 * conditions. 1991 */ 1992 static int t4_handle_intr_status(struct adapter *adapter, unsigned int reg, 1993 const struct intr_info *acts) 1994 { 1995 int fatal = 0; 1996 unsigned int mask = 0; 1997 unsigned int status = t4_read_reg(adapter, reg); 1998 1999 for ( ; acts->mask; ++acts) { 2000 if (!(status & acts->mask)) 2001 continue; 2002 if (acts->fatal) { 2003 fatal++; 2004 CH_ALERT(adapter, "%s (0x%x)\n", 2005 acts->msg, status & acts->mask); 2006 } else if (acts->msg) 2007 CH_WARN_RATELIMIT(adapter, "%s (0x%x)\n", 2008 acts->msg, status & acts->mask); 2009 mask |= acts->mask; 2010 } 2011 status &= mask; 2012 if (status) /* clear processed interrupts */ 2013 t4_write_reg(adapter, reg, status); 2014 return fatal; 2015 } 2016 2017 /* 2018 * Interrupt handler for the PCIE module. 2019 */ 2020 static void pcie_intr_handler(struct adapter *adapter) 2021 { 2022 static struct intr_info sysbus_intr_info[] = { 2023 { F_RNPP, "RXNP array parity error", -1, 1 }, 2024 { F_RPCP, "RXPC array parity error", -1, 1 }, 2025 { F_RCIP, "RXCIF array parity error", -1, 1 }, 2026 { F_RCCP, "Rx completions control array parity error", -1, 1 }, 2027 { F_RFTP, "RXFT array parity error", -1, 1 }, 2028 { 0 } 2029 }; 2030 static struct intr_info pcie_port_intr_info[] = { 2031 { F_TPCP, "TXPC array parity error", -1, 1 }, 2032 { F_TNPP, "TXNP array parity error", -1, 1 }, 2033 { F_TFTP, "TXFT array parity error", -1, 1 }, 2034 { F_TCAP, "TXCA array parity error", -1, 1 }, 2035 { F_TCIP, "TXCIF array parity error", -1, 1 }, 2036 { F_RCAP, "RXCA array parity error", -1, 1 }, 2037 { F_OTDD, "outbound request TLP discarded", -1, 1 }, 2038 { F_RDPE, "Rx data parity error", -1, 1 }, 2039 { F_TDUE, "Tx uncorrectable data error", -1, 1 }, 2040 { 0 } 2041 }; 2042 static struct intr_info pcie_intr_info[] = { 2043 { F_MSIADDRLPERR, "MSI AddrL parity error", -1, 1 }, 2044 { F_MSIADDRHPERR, "MSI AddrH parity error", -1, 1 }, 2045 { F_MSIDATAPERR, "MSI data parity error", -1, 1 }, 2046 { F_MSIXADDRLPERR, "MSI-X AddrL parity error", -1, 1 }, 2047 { F_MSIXADDRHPERR, "MSI-X AddrH parity error", -1, 1 }, 2048 { F_MSIXDATAPERR, "MSI-X data parity error", -1, 1 }, 2049 { F_MSIXDIPERR, "MSI-X DI parity error", -1, 1 }, 2050 { F_PIOCPLPERR, "PCI PIO completion FIFO parity error", -1, 1 }, 2051 { F_PIOREQPERR, "PCI PIO request FIFO parity error", -1, 1 }, 2052 { F_TARTAGPERR, "PCI PCI target tag FIFO parity error", -1, 1 }, 2053 { F_CCNTPERR, "PCI CMD channel count parity error", -1, 1 }, 2054 { F_CREQPERR, "PCI CMD channel request parity error", -1, 1 }, 2055 { F_CRSPPERR, "PCI CMD channel response parity error", -1, 1 }, 2056 { F_DCNTPERR, "PCI DMA channel count parity error", -1, 1 }, 2057 { F_DREQPERR, "PCI DMA channel request parity error", -1, 1 }, 2058 { F_DRSPPERR, "PCI DMA channel response parity error", -1, 1 }, 2059 { F_HCNTPERR, "PCI HMA channel count parity error", -1, 1 }, 2060 { F_HREQPERR, "PCI HMA channel request parity error", -1, 1 }, 2061 { F_HRSPPERR, "PCI HMA channel response parity error", -1, 1 }, 2062 { F_CFGSNPPERR, "PCI config snoop FIFO parity error", -1, 1 }, 2063 { F_FIDPERR, "PCI FID parity error", -1, 1 }, 2064 { F_INTXCLRPERR, "PCI INTx clear parity error", -1, 1 }, 2065 { F_MATAGPERR, "PCI MA tag parity error", -1, 1 }, 2066 { F_PIOTAGPERR, "PCI PIO tag parity error", -1, 1 }, 2067 { F_RXCPLPERR, "PCI Rx completion parity error", -1, 1 }, 2068 { F_RXWRPERR, "PCI Rx write parity error", -1, 1 }, 2069 { F_RPLPERR, "PCI replay buffer parity error", -1, 1 }, 2070 { F_PCIESINT, "PCI core secondary fault", -1, 1 }, 2071 { F_PCIEPINT, "PCI core primary fault", -1, 1 }, 2072 { F_UNXSPLCPLERR, "PCI unexpected split completion error", -1, 2073 0 }, 2074 { 0 } 2075 }; 2076 2077 static struct intr_info t5_pcie_intr_info[] = { 2078 { F_MSTGRPPERR, "Master Response Read Queue parity error", 2079 -1, 1 }, 2080 { F_MSTTIMEOUTPERR, "Master Timeout FIFO parity error", -1, 1 }, 2081 { F_MSIXSTIPERR, "MSI-X STI SRAM parity error", -1, 1 }, 2082 { F_MSIXADDRLPERR, "MSI-X AddrL parity error", -1, 1 }, 2083 { F_MSIXADDRHPERR, "MSI-X AddrH parity error", -1, 1 }, 2084 { F_MSIXDATAPERR, "MSI-X data parity error", -1, 1 }, 2085 { F_MSIXDIPERR, "MSI-X DI parity error", -1, 1 }, 2086 { F_PIOCPLGRPPERR, "PCI PIO completion Group FIFO parity error", 2087 -1, 1 }, 2088 { F_PIOREQGRPPERR, "PCI PIO request Group FIFO parity error", 2089 -1, 1 }, 2090 { F_TARTAGPERR, "PCI PCI target tag FIFO parity error", -1, 1 }, 2091 { F_MSTTAGQPERR, "PCI master tag queue parity error", -1, 1 }, 2092 { F_CREQPERR, "PCI CMD channel request parity error", -1, 1 }, 2093 { F_CRSPPERR, "PCI CMD channel response parity error", -1, 1 }, 2094 { F_DREQWRPERR, "PCI DMA channel write request parity error", 2095 -1, 1 }, 2096 { F_DREQPERR, "PCI DMA channel request parity error", -1, 1 }, 2097 { F_DRSPPERR, "PCI DMA channel response parity error", -1, 1 }, 2098 { F_HREQWRPERR, "PCI HMA channel count parity error", -1, 1 }, 2099 { F_HREQPERR, "PCI HMA channel request parity error", -1, 1 }, 2100 { F_HRSPPERR, "PCI HMA channel response parity error", -1, 1 }, 2101 { F_CFGSNPPERR, "PCI config snoop FIFO parity error", -1, 1 }, 2102 { F_FIDPERR, "PCI FID parity error", -1, 1 }, 2103 { F_VFIDPERR, "PCI INTx clear parity error", -1, 1 }, 2104 { F_MAGRPPERR, "PCI MA group FIFO parity error", -1, 1 }, 2105 { F_PIOTAGPERR, "PCI PIO tag parity error", -1, 1 }, 2106 { F_IPRXHDRGRPPERR, "PCI IP Rx header group parity error", 2107 -1, 1 }, 2108 { F_IPRXDATAGRPPERR, "PCI IP Rx data group parity error", 2109 -1, 1 }, 2110 { F_RPLPERR, "PCI IP replay buffer parity error", -1, 1 }, 2111 { F_IPSOTPERR, "PCI IP SOT buffer parity error", -1, 1 }, 2112 { F_TRGT1GRPPERR, "PCI TRGT1 group FIFOs parity error", -1, 1 }, 2113 { F_READRSPERR, "Outbound read error", -1, 2114 0 }, 2115 { 0 } 2116 }; 2117 2118 int fat; 2119 2120 if (is_t4(adapter)) 2121 fat = t4_handle_intr_status(adapter, 2122 A_PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS, 2123 sysbus_intr_info) + 2124 t4_handle_intr_status(adapter, 2125 A_PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS, 2126 pcie_port_intr_info) + 2127 t4_handle_intr_status(adapter, A_PCIE_INT_CAUSE, 2128 pcie_intr_info); 2129 else 2130 fat = t4_handle_intr_status(adapter, A_PCIE_INT_CAUSE, 2131 t5_pcie_intr_info); 2132 if (fat) 2133 t4_fatal_err(adapter); 2134 } 2135 2136 /* 2137 * TP interrupt handler. 2138 */ 2139 static void tp_intr_handler(struct adapter *adapter) 2140 { 2141 static struct intr_info tp_intr_info[] = { 2142 { 0x3fffffff, "TP parity error", -1, 1 }, 2143 { F_FLMTXFLSTEMPTY, "TP out of Tx pages", -1, 1 }, 2144 { 0 } 2145 }; 2146 2147 if (t4_handle_intr_status(adapter, A_TP_INT_CAUSE, tp_intr_info)) 2148 t4_fatal_err(adapter); 2149 } 2150 2151 /* 2152 * SGE interrupt handler. 2153 */ 2154 static void sge_intr_handler(struct adapter *adapter) 2155 { 2156 u64 v; 2157 u32 err; 2158 2159 static struct intr_info sge_intr_info[] = { 2160 { F_ERR_CPL_EXCEED_IQE_SIZE, 2161 "SGE received CPL exceeding IQE size", -1, 1 }, 2162 { F_ERR_INVALID_CIDX_INC, 2163 "SGE GTS CIDX increment too large", -1, 0 }, 2164 { F_ERR_CPL_OPCODE_0, "SGE received 0-length CPL", -1, 0 }, 2165 { F_ERR_DROPPED_DB, "SGE doorbell dropped", -1, 0 }, 2166 { F_ERR_DATA_CPL_ON_HIGH_QID1 | F_ERR_DATA_CPL_ON_HIGH_QID0, 2167 "SGE IQID > 1023 received CPL for FL", -1, 0 }, 2168 { F_ERR_BAD_DB_PIDX3, "SGE DBP 3 pidx increment too large", -1, 2169 0 }, 2170 { F_ERR_BAD_DB_PIDX2, "SGE DBP 2 pidx increment too large", -1, 2171 0 }, 2172 { F_ERR_BAD_DB_PIDX1, "SGE DBP 1 pidx increment too large", -1, 2173 0 }, 2174 { F_ERR_BAD_DB_PIDX0, "SGE DBP 0 pidx increment too large", -1, 2175 0 }, 2176 { F_ERR_ING_CTXT_PRIO, 2177 "SGE too many priority ingress contexts", -1, 0 }, 2178 { F_ERR_EGR_CTXT_PRIO, 2179 "SGE too many priority egress contexts", -1, 0 }, 2180 { F_INGRESS_SIZE_ERR, "SGE illegal ingress QID", -1, 0 }, 2181 { F_EGRESS_SIZE_ERR, "SGE illegal egress QID", -1, 0 }, 2182 { 0 } 2183 }; 2184 2185 v = (u64)t4_read_reg(adapter, A_SGE_INT_CAUSE1) | 2186 ((u64)t4_read_reg(adapter, A_SGE_INT_CAUSE2) << 32); 2187 if (v) { 2188 CH_ALERT(adapter, "SGE parity error (%#llx)\n", 2189 (unsigned long long)v); 2190 t4_write_reg(adapter, A_SGE_INT_CAUSE1, v); 2191 t4_write_reg(adapter, A_SGE_INT_CAUSE2, v >> 32); 2192 } 2193 2194 v |= t4_handle_intr_status(adapter, A_SGE_INT_CAUSE3, sge_intr_info); 2195 2196 err = t4_read_reg(adapter, A_SGE_ERROR_STATS); 2197 if (err & F_ERROR_QID_VALID) { 2198 CH_ERR(adapter, "SGE error for queue %u\n", G_ERROR_QID(err)); 2199 if (err & F_UNCAPTURED_ERROR) 2200 CH_ERR(adapter, "SGE UNCAPTURED_ERROR set (clearing)\n"); 2201 t4_write_reg(adapter, A_SGE_ERROR_STATS, F_ERROR_QID_VALID | 2202 F_UNCAPTURED_ERROR); 2203 } 2204 2205 if (v != 0) 2206 t4_fatal_err(adapter); 2207 } 2208 2209 #define CIM_OBQ_INTR (F_OBQULP0PARERR | F_OBQULP1PARERR | F_OBQULP2PARERR |\ 2210 F_OBQULP3PARERR | F_OBQSGEPARERR | F_OBQNCSIPARERR) 2211 #define CIM_IBQ_INTR (F_IBQTP0PARERR | F_IBQTP1PARERR | F_IBQULPPARERR |\ 2212 F_IBQSGEHIPARERR | F_IBQSGELOPARERR | F_IBQNCSIPARERR) 2213 2214 /* 2215 * CIM interrupt handler. 2216 */ 2217 static void cim_intr_handler(struct adapter *adapter) 2218 { 2219 static struct intr_info cim_intr_info[] = { 2220 { F_PREFDROPINT, "CIM control register prefetch drop", -1, 1 }, 2221 { CIM_OBQ_INTR, "CIM OBQ parity error", -1, 1 }, 2222 { CIM_IBQ_INTR, "CIM IBQ parity error", -1, 1 }, 2223 { F_MBUPPARERR, "CIM mailbox uP parity error", -1, 1 }, 2224 { F_MBHOSTPARERR, "CIM mailbox host parity error", -1, 1 }, 2225 { F_TIEQINPARERRINT, "CIM TIEQ outgoing parity error", -1, 1 }, 2226 { F_TIEQOUTPARERRINT, "CIM TIEQ incoming parity error", -1, 1 }, 2227 { 0 } 2228 }; 2229 static struct intr_info cim_upintr_info[] = { 2230 { F_RSVDSPACEINT, "CIM reserved space access", -1, 1 }, 2231 { F_ILLTRANSINT, "CIM illegal transaction", -1, 1 }, 2232 { F_ILLWRINT, "CIM illegal write", -1, 1 }, 2233 { F_ILLRDINT, "CIM illegal read", -1, 1 }, 2234 { F_ILLRDBEINT, "CIM illegal read BE", -1, 1 }, 2235 { F_ILLWRBEINT, "CIM illegal write BE", -1, 1 }, 2236 { F_SGLRDBOOTINT, "CIM single read from boot space", -1, 1 }, 2237 { F_SGLWRBOOTINT, "CIM single write to boot space", -1, 1 }, 2238 { F_BLKWRBOOTINT, "CIM block write to boot space", -1, 1 }, 2239 { F_SGLRDFLASHINT, "CIM single read from flash space", -1, 1 }, 2240 { F_SGLWRFLASHINT, "CIM single write to flash space", -1, 1 }, 2241 { F_BLKWRFLASHINT, "CIM block write to flash space", -1, 1 }, 2242 { F_SGLRDEEPROMINT, "CIM single EEPROM read", -1, 1 }, 2243 { F_SGLWREEPROMINT, "CIM single EEPROM write", -1, 1 }, 2244 { F_BLKRDEEPROMINT, "CIM block EEPROM read", -1, 1 }, 2245 { F_BLKWREEPROMINT, "CIM block EEPROM write", -1, 1 }, 2246 { F_SGLRDCTLINT , "CIM single read from CTL space", -1, 1 }, 2247 { F_SGLWRCTLINT , "CIM single write to CTL space", -1, 1 }, 2248 { F_BLKRDCTLINT , "CIM block read from CTL space", -1, 1 }, 2249 { F_BLKWRCTLINT , "CIM block write to CTL space", -1, 1 }, 2250 { F_SGLRDPLINT , "CIM single read from PL space", -1, 1 }, 2251 { F_SGLWRPLINT , "CIM single write to PL space", -1, 1 }, 2252 { F_BLKRDPLINT , "CIM block read from PL space", -1, 1 }, 2253 { F_BLKWRPLINT , "CIM block write to PL space", -1, 1 }, 2254 { F_REQOVRLOOKUPINT , "CIM request FIFO overwrite", -1, 1 }, 2255 { F_RSPOVRLOOKUPINT , "CIM response FIFO overwrite", -1, 1 }, 2256 { F_TIMEOUTINT , "CIM PIF timeout", -1, 1 }, 2257 { F_TIMEOUTMAINT , "CIM PIF MA timeout", -1, 1 }, 2258 { 0 } 2259 }; 2260 int fat; 2261 2262 if (t4_read_reg(adapter, A_PCIE_FW) & F_PCIE_FW_ERR) 2263 t4_report_fw_error(adapter); 2264 2265 fat = t4_handle_intr_status(adapter, A_CIM_HOST_INT_CAUSE, 2266 cim_intr_info) + 2267 t4_handle_intr_status(adapter, A_CIM_HOST_UPACC_INT_CAUSE, 2268 cim_upintr_info); 2269 if (fat) 2270 t4_fatal_err(adapter); 2271 } 2272 2273 /* 2274 * ULP RX interrupt handler. 2275 */ 2276 static void ulprx_intr_handler(struct adapter *adapter) 2277 { 2278 static struct intr_info ulprx_intr_info[] = { 2279 { F_CAUSE_CTX_1, "ULPRX channel 1 context error", -1, 1 }, 2280 { F_CAUSE_CTX_0, "ULPRX channel 0 context error", -1, 1 }, 2281 { 0x7fffff, "ULPRX parity error", -1, 1 }, 2282 { 0 } 2283 }; 2284 2285 if (t4_handle_intr_status(adapter, A_ULP_RX_INT_CAUSE, ulprx_intr_info)) 2286 t4_fatal_err(adapter); 2287 } 2288 2289 /* 2290 * ULP TX interrupt handler. 2291 */ 2292 static void ulptx_intr_handler(struct adapter *adapter) 2293 { 2294 static struct intr_info ulptx_intr_info[] = { 2295 { F_PBL_BOUND_ERR_CH3, "ULPTX channel 3 PBL out of bounds", -1, 2296 0 }, 2297 { F_PBL_BOUND_ERR_CH2, "ULPTX channel 2 PBL out of bounds", -1, 2298 0 }, 2299 { F_PBL_BOUND_ERR_CH1, "ULPTX channel 1 PBL out of bounds", -1, 2300 0 }, 2301 { F_PBL_BOUND_ERR_CH0, "ULPTX channel 0 PBL out of bounds", -1, 2302 0 }, 2303 { 0xfffffff, "ULPTX parity error", -1, 1 }, 2304 { 0 } 2305 }; 2306 2307 if (t4_handle_intr_status(adapter, A_ULP_TX_INT_CAUSE, ulptx_intr_info)) 2308 t4_fatal_err(adapter); 2309 } 2310 2311 /* 2312 * PM TX interrupt handler. 2313 */ 2314 static void pmtx_intr_handler(struct adapter *adapter) 2315 { 2316 static struct intr_info pmtx_intr_info[] = { 2317 { F_PCMD_LEN_OVFL0, "PMTX channel 0 pcmd too large", -1, 1 }, 2318 { F_PCMD_LEN_OVFL1, "PMTX channel 1 pcmd too large", -1, 1 }, 2319 { F_PCMD_LEN_OVFL2, "PMTX channel 2 pcmd too large", -1, 1 }, 2320 { F_ZERO_C_CMD_ERROR, "PMTX 0-length pcmd", -1, 1 }, 2321 { 0xffffff0, "PMTX framing error", -1, 1 }, 2322 { F_OESPI_PAR_ERROR, "PMTX oespi parity error", -1, 1 }, 2323 { F_DB_OPTIONS_PAR_ERROR, "PMTX db_options parity error", -1, 2324 1 }, 2325 { F_ICSPI_PAR_ERROR, "PMTX icspi parity error", -1, 1 }, 2326 { F_C_PCMD_PAR_ERROR, "PMTX c_pcmd parity error", -1, 1}, 2327 { 0 } 2328 }; 2329 2330 if (t4_handle_intr_status(adapter, A_PM_TX_INT_CAUSE, pmtx_intr_info)) 2331 t4_fatal_err(adapter); 2332 } 2333 2334 /* 2335 * PM RX interrupt handler. 2336 */ 2337 static void pmrx_intr_handler(struct adapter *adapter) 2338 { 2339 static struct intr_info pmrx_intr_info[] = { 2340 { F_ZERO_E_CMD_ERROR, "PMRX 0-length pcmd", -1, 1 }, 2341 { 0x3ffff0, "PMRX framing error", -1, 1 }, 2342 { F_OCSPI_PAR_ERROR, "PMRX ocspi parity error", -1, 1 }, 2343 { F_DB_OPTIONS_PAR_ERROR, "PMRX db_options parity error", -1, 2344 1 }, 2345 { F_IESPI_PAR_ERROR, "PMRX iespi parity error", -1, 1 }, 2346 { F_E_PCMD_PAR_ERROR, "PMRX e_pcmd parity error", -1, 1}, 2347 { 0 } 2348 }; 2349 2350 if (t4_handle_intr_status(adapter, A_PM_RX_INT_CAUSE, pmrx_intr_info)) 2351 t4_fatal_err(adapter); 2352 } 2353 2354 /* 2355 * CPL switch interrupt handler. 2356 */ 2357 static void cplsw_intr_handler(struct adapter *adapter) 2358 { 2359 static struct intr_info cplsw_intr_info[] = { 2360 { F_CIM_OP_MAP_PERR, "CPLSW CIM op_map parity error", -1, 1 }, 2361 { F_CIM_OVFL_ERROR, "CPLSW CIM overflow", -1, 1 }, 2362 { F_TP_FRAMING_ERROR, "CPLSW TP framing error", -1, 1 }, 2363 { F_SGE_FRAMING_ERROR, "CPLSW SGE framing error", -1, 1 }, 2364 { F_CIM_FRAMING_ERROR, "CPLSW CIM framing error", -1, 1 }, 2365 { F_ZERO_SWITCH_ERROR, "CPLSW no-switch error", -1, 1 }, 2366 { 0 } 2367 }; 2368 2369 if (t4_handle_intr_status(adapter, A_CPL_INTR_CAUSE, cplsw_intr_info)) 2370 t4_fatal_err(adapter); 2371 } 2372 2373 /* 2374 * LE interrupt handler. 2375 */ 2376 static void le_intr_handler(struct adapter *adap) 2377 { 2378 static struct intr_info le_intr_info[] = { 2379 { F_LIPMISS, "LE LIP miss", -1, 0 }, 2380 { F_LIP0, "LE 0 LIP error", -1, 0 }, 2381 { F_PARITYERR, "LE parity error", -1, 1 }, 2382 { F_UNKNOWNCMD, "LE unknown command", -1, 1 }, 2383 { F_REQQPARERR, "LE request queue parity error", -1, 1 }, 2384 { 0 } 2385 }; 2386 2387 if (t4_handle_intr_status(adap, A_LE_DB_INT_CAUSE, le_intr_info)) 2388 t4_fatal_err(adap); 2389 } 2390 2391 /* 2392 * MPS interrupt handler. 2393 */ 2394 static void mps_intr_handler(struct adapter *adapter) 2395 { 2396 static struct intr_info mps_rx_intr_info[] = { 2397 { 0xffffff, "MPS Rx parity error", -1, 1 }, 2398 { 0 } 2399 }; 2400 static struct intr_info mps_tx_intr_info[] = { 2401 { V_TPFIFO(M_TPFIFO), "MPS Tx TP FIFO parity error", -1, 1 }, 2402 { F_NCSIFIFO, "MPS Tx NC-SI FIFO parity error", -1, 1 }, 2403 { V_TXDATAFIFO(M_TXDATAFIFO), "MPS Tx data FIFO parity error", 2404 -1, 1 }, 2405 { V_TXDESCFIFO(M_TXDESCFIFO), "MPS Tx desc FIFO parity error", 2406 -1, 1 }, 2407 { F_BUBBLE, "MPS Tx underflow", -1, 1 }, 2408 { F_SECNTERR, "MPS Tx SOP/EOP error", -1, 1 }, 2409 { F_FRMERR, "MPS Tx framing error", -1, 1 }, 2410 { 0 } 2411 }; 2412 static struct intr_info mps_trc_intr_info[] = { 2413 { V_FILTMEM(M_FILTMEM), "MPS TRC filter parity error", -1, 1 }, 2414 { V_PKTFIFO(M_PKTFIFO), "MPS TRC packet FIFO parity error", -1, 2415 1 }, 2416 { F_MISCPERR, "MPS TRC misc parity error", -1, 1 }, 2417 { 0 } 2418 }; 2419 static struct intr_info mps_stat_sram_intr_info[] = { 2420 { 0x1fffff, "MPS statistics SRAM parity error", -1, 1 }, 2421 { 0 } 2422 }; 2423 static struct intr_info mps_stat_tx_intr_info[] = { 2424 { 0xfffff, "MPS statistics Tx FIFO parity error", -1, 1 }, 2425 { 0 } 2426 }; 2427 static struct intr_info mps_stat_rx_intr_info[] = { 2428 { 0xffffff, "MPS statistics Rx FIFO parity error", -1, 1 }, 2429 { 0 } 2430 }; 2431 static struct intr_info mps_cls_intr_info[] = { 2432 { F_MATCHSRAM, "MPS match SRAM parity error", -1, 1 }, 2433 { F_MATCHTCAM, "MPS match TCAM parity error", -1, 1 }, 2434 { F_HASHSRAM, "MPS hash SRAM parity error", -1, 1 }, 2435 { 0 } 2436 }; 2437 2438 int fat; 2439 2440 fat = t4_handle_intr_status(adapter, A_MPS_RX_PERR_INT_CAUSE, 2441 mps_rx_intr_info) + 2442 t4_handle_intr_status(adapter, A_MPS_TX_INT_CAUSE, 2443 mps_tx_intr_info) + 2444 t4_handle_intr_status(adapter, A_MPS_TRC_INT_CAUSE, 2445 mps_trc_intr_info) + 2446 t4_handle_intr_status(adapter, A_MPS_STAT_PERR_INT_CAUSE_SRAM, 2447 mps_stat_sram_intr_info) + 2448 t4_handle_intr_status(adapter, A_MPS_STAT_PERR_INT_CAUSE_TX_FIFO, 2449 mps_stat_tx_intr_info) + 2450 t4_handle_intr_status(adapter, A_MPS_STAT_PERR_INT_CAUSE_RX_FIFO, 2451 mps_stat_rx_intr_info) + 2452 t4_handle_intr_status(adapter, A_MPS_CLS_INT_CAUSE, 2453 mps_cls_intr_info); 2454 2455 t4_write_reg(adapter, A_MPS_INT_CAUSE, 0); 2456 t4_read_reg(adapter, A_MPS_INT_CAUSE); /* flush */ 2457 if (fat) 2458 t4_fatal_err(adapter); 2459 } 2460 2461 #define MEM_INT_MASK (F_PERR_INT_CAUSE | F_ECC_CE_INT_CAUSE | F_ECC_UE_INT_CAUSE) 2462 2463 /* 2464 * EDC/MC interrupt handler. 2465 */ 2466 static void mem_intr_handler(struct adapter *adapter, int idx) 2467 { 2468 static const char name[3][5] = { "EDC0", "EDC1", "MC" }; 2469 2470 unsigned int addr, cnt_addr, v; 2471 2472 if (idx <= MEM_EDC1) { 2473 addr = EDC_REG(A_EDC_INT_CAUSE, idx); 2474 cnt_addr = EDC_REG(A_EDC_ECC_STATUS, idx); 2475 } else { 2476 if (is_t4(adapter)) { 2477 addr = A_MC_INT_CAUSE; 2478 cnt_addr = A_MC_ECC_STATUS; 2479 } else { 2480 addr = A_MC_P_INT_CAUSE; 2481 cnt_addr = A_MC_P_ECC_STATUS; 2482 } 2483 } 2484 2485 v = t4_read_reg(adapter, addr) & MEM_INT_MASK; 2486 if (v & F_PERR_INT_CAUSE) 2487 CH_ALERT(adapter, "%s FIFO parity error\n", name[idx]); 2488 if (v & F_ECC_CE_INT_CAUSE) { 2489 u32 cnt = G_ECC_CECNT(t4_read_reg(adapter, cnt_addr)); 2490 2491 t4_write_reg(adapter, cnt_addr, V_ECC_CECNT(M_ECC_CECNT)); 2492 CH_WARN_RATELIMIT(adapter, 2493 "%u %s correctable ECC data error%s\n", 2494 cnt, name[idx], cnt > 1 ? "s" : ""); 2495 } 2496 if (v & F_ECC_UE_INT_CAUSE) 2497 CH_ALERT(adapter, "%s uncorrectable ECC data error\n", 2498 name[idx]); 2499 2500 t4_write_reg(adapter, addr, v); 2501 if (v & (F_PERR_INT_CAUSE | F_ECC_UE_INT_CAUSE)) 2502 t4_fatal_err(adapter); 2503 } 2504 2505 /* 2506 * MA interrupt handler. 2507 */ 2508 static void ma_intr_handler(struct adapter *adapter) 2509 { 2510 u32 v, status = t4_read_reg(adapter, A_MA_INT_CAUSE); 2511 2512 if (status & F_MEM_PERR_INT_CAUSE) { 2513 CH_ALERT(adapter, "MA parity error, parity status %#x\n", 2514 t4_read_reg(adapter, A_MA_PARITY_ERROR_STATUS1)); 2515 if (is_t5(adapter)) 2516 CH_ALERT(adapter, 2517 "MA parity error, parity status %#x\n", 2518 t4_read_reg(adapter, 2519 A_MA_PARITY_ERROR_STATUS2)); 2520 } 2521 if (status & F_MEM_WRAP_INT_CAUSE) { 2522 v = t4_read_reg(adapter, A_MA_INT_WRAP_STATUS); 2523 CH_ALERT(adapter, "MA address wrap-around error by client %u to" 2524 " address %#x\n", G_MEM_WRAP_CLIENT_NUM(v), 2525 G_MEM_WRAP_ADDRESS(v) << 4); 2526 } 2527 t4_write_reg(adapter, A_MA_INT_CAUSE, status); 2528 t4_fatal_err(adapter); 2529 } 2530 2531 /* 2532 * SMB interrupt handler. 2533 */ 2534 static void smb_intr_handler(struct adapter *adap) 2535 { 2536 static struct intr_info smb_intr_info[] = { 2537 { F_MSTTXFIFOPARINT, "SMB master Tx FIFO parity error", -1, 1 }, 2538 { F_MSTRXFIFOPARINT, "SMB master Rx FIFO parity error", -1, 1 }, 2539 { F_SLVFIFOPARINT, "SMB slave FIFO parity error", -1, 1 }, 2540 { 0 } 2541 }; 2542 2543 if (t4_handle_intr_status(adap, A_SMB_INT_CAUSE, smb_intr_info)) 2544 t4_fatal_err(adap); 2545 } 2546 2547 /* 2548 * NC-SI interrupt handler. 2549 */ 2550 static void ncsi_intr_handler(struct adapter *adap) 2551 { 2552 static struct intr_info ncsi_intr_info[] = { 2553 { F_CIM_DM_PRTY_ERR, "NC-SI CIM parity error", -1, 1 }, 2554 { F_MPS_DM_PRTY_ERR, "NC-SI MPS parity error", -1, 1 }, 2555 { F_TXFIFO_PRTY_ERR, "NC-SI Tx FIFO parity error", -1, 1 }, 2556 { F_RXFIFO_PRTY_ERR, "NC-SI Rx FIFO parity error", -1, 1 }, 2557 { 0 } 2558 }; 2559 2560 if (t4_handle_intr_status(adap, A_NCSI_INT_CAUSE, ncsi_intr_info)) 2561 t4_fatal_err(adap); 2562 } 2563 2564 /* 2565 * XGMAC interrupt handler. 2566 */ 2567 static void xgmac_intr_handler(struct adapter *adap, int port) 2568 { 2569 u32 v, int_cause_reg; 2570 2571 if (is_t4(adap)) 2572 int_cause_reg = PORT_REG(port, A_XGMAC_PORT_INT_CAUSE); 2573 else 2574 int_cause_reg = T5_PORT_REG(port, A_MAC_PORT_INT_CAUSE); 2575 2576 v = t4_read_reg(adap, int_cause_reg); 2577 v &= (F_TXFIFO_PRTY_ERR | F_RXFIFO_PRTY_ERR); 2578 if (!v) 2579 return; 2580 2581 if (v & F_TXFIFO_PRTY_ERR) 2582 CH_ALERT(adap, "XGMAC %d Tx FIFO parity error\n", port); 2583 if (v & F_RXFIFO_PRTY_ERR) 2584 CH_ALERT(adap, "XGMAC %d Rx FIFO parity error\n", port); 2585 t4_write_reg(adap, int_cause_reg, v); 2586 t4_fatal_err(adap); 2587 } 2588 2589 /* 2590 * PL interrupt handler. 2591 */ 2592 static void pl_intr_handler(struct adapter *adap) 2593 { 2594 static struct intr_info pl_intr_info[] = { 2595 { F_FATALPERR, "Fatal parity error", -1, 1 }, 2596 { F_PERRVFID, "PL VFID_MAP parity error", -1, 1 }, 2597 { 0 } 2598 }; 2599 2600 static struct intr_info t5_pl_intr_info[] = { 2601 { F_PL_BUSPERR, "PL bus parity error", -1, 1 }, 2602 { F_FATALPERR, "Fatal parity error", -1, 1 }, 2603 { 0 } 2604 }; 2605 2606 if (t4_handle_intr_status(adap, A_PL_PL_INT_CAUSE, 2607 is_t4(adap) ? pl_intr_info : t5_pl_intr_info)) 2608 t4_fatal_err(adap); 2609 } 2610 2611 #define PF_INTR_MASK (F_PFSW | F_PFCIM) 2612 #define GLBL_INTR_MASK (F_CIM | F_MPS | F_PL | F_PCIE | F_MC | F_EDC0 | \ 2613 F_EDC1 | F_LE | F_TP | F_MA | F_PM_TX | F_PM_RX | F_ULP_RX | \ 2614 F_CPL_SWITCH | F_SGE | F_ULP_TX) 2615 2616 /** 2617 * t4_slow_intr_handler - control path interrupt handler 2618 * @adapter: the adapter 2619 * 2620 * T4 interrupt handler for non-data global interrupt events, e.g., errors. 2621 * The designation 'slow' is because it involves register reads, while 2622 * data interrupts typically don't involve any MMIOs. 2623 */ 2624 int t4_slow_intr_handler(struct adapter *adapter) 2625 { 2626 u32 cause = t4_read_reg(adapter, A_PL_INT_CAUSE); 2627 2628 if (!(cause & GLBL_INTR_MASK)) 2629 return 0; 2630 if (cause & F_CIM) 2631 cim_intr_handler(adapter); 2632 if (cause & F_MPS) 2633 mps_intr_handler(adapter); 2634 if (cause & F_NCSI) 2635 ncsi_intr_handler(adapter); 2636 if (cause & F_PL) 2637 pl_intr_handler(adapter); 2638 if (cause & F_SMB) 2639 smb_intr_handler(adapter); 2640 if (cause & F_XGMAC0) 2641 xgmac_intr_handler(adapter, 0); 2642 if (cause & F_XGMAC1) 2643 xgmac_intr_handler(adapter, 1); 2644 if (cause & F_XGMAC_KR0) 2645 xgmac_intr_handler(adapter, 2); 2646 if (cause & F_XGMAC_KR1) 2647 xgmac_intr_handler(adapter, 3); 2648 if (cause & F_PCIE) 2649 pcie_intr_handler(adapter); 2650 if (cause & F_MC) 2651 mem_intr_handler(adapter, MEM_MC); 2652 if (cause & F_EDC0) 2653 mem_intr_handler(adapter, MEM_EDC0); 2654 if (cause & F_EDC1) 2655 mem_intr_handler(adapter, MEM_EDC1); 2656 if (cause & F_LE) 2657 le_intr_handler(adapter); 2658 if (cause & F_TP) 2659 tp_intr_handler(adapter); 2660 if (cause & F_MA) 2661 ma_intr_handler(adapter); 2662 if (cause & F_PM_TX) 2663 pmtx_intr_handler(adapter); 2664 if (cause & F_PM_RX) 2665 pmrx_intr_handler(adapter); 2666 if (cause & F_ULP_RX) 2667 ulprx_intr_handler(adapter); 2668 if (cause & F_CPL_SWITCH) 2669 cplsw_intr_handler(adapter); 2670 if (cause & F_SGE) 2671 sge_intr_handler(adapter); 2672 if (cause & F_ULP_TX) 2673 ulptx_intr_handler(adapter); 2674 2675 /* Clear the interrupts just processed for which we are the master. */ 2676 t4_write_reg(adapter, A_PL_INT_CAUSE, cause & GLBL_INTR_MASK); 2677 (void) t4_read_reg(adapter, A_PL_INT_CAUSE); /* flush */ 2678 return 1; 2679 } 2680 2681 /** 2682 * t4_intr_enable - enable interrupts 2683 * @adapter: the adapter whose interrupts should be enabled 2684 * 2685 * Enable PF-specific interrupts for the calling function and the top-level 2686 * interrupt concentrator for global interrupts. Interrupts are already 2687 * enabled at each module, here we just enable the roots of the interrupt 2688 * hierarchies. 2689 * 2690 * Note: this function should be called only when the driver manages 2691 * non PF-specific interrupts from the various HW modules. Only one PCI 2692 * function at a time should be doing this. 2693 */ 2694 void t4_intr_enable(struct adapter *adapter) 2695 { 2696 u32 pf = G_SOURCEPF(t4_read_reg(adapter, A_PL_WHOAMI)); 2697 2698 t4_write_reg(adapter, A_SGE_INT_ENABLE3, F_ERR_CPL_EXCEED_IQE_SIZE | 2699 F_ERR_INVALID_CIDX_INC | F_ERR_CPL_OPCODE_0 | 2700 F_ERR_DROPPED_DB | F_ERR_DATA_CPL_ON_HIGH_QID1 | 2701 F_ERR_DATA_CPL_ON_HIGH_QID0 | F_ERR_BAD_DB_PIDX3 | 2702 F_ERR_BAD_DB_PIDX2 | F_ERR_BAD_DB_PIDX1 | 2703 F_ERR_BAD_DB_PIDX0 | F_ERR_ING_CTXT_PRIO | 2704 F_ERR_EGR_CTXT_PRIO | F_INGRESS_SIZE_ERR | 2705 F_EGRESS_SIZE_ERR); 2706 t4_write_reg(adapter, MYPF_REG(A_PL_PF_INT_ENABLE), PF_INTR_MASK); 2707 t4_set_reg_field(adapter, A_PL_INT_MAP0, 0, 1 << pf); 2708 } 2709 2710 /** 2711 * t4_intr_disable - disable interrupts 2712 * @adapter: the adapter whose interrupts should be disabled 2713 * 2714 * Disable interrupts. We only disable the top-level interrupt 2715 * concentrators. The caller must be a PCI function managing global 2716 * interrupts. 2717 */ 2718 void t4_intr_disable(struct adapter *adapter) 2719 { 2720 u32 pf = G_SOURCEPF(t4_read_reg(adapter, A_PL_WHOAMI)); 2721 2722 t4_write_reg(adapter, MYPF_REG(A_PL_PF_INT_ENABLE), 0); 2723 t4_set_reg_field(adapter, A_PL_INT_MAP0, 1 << pf, 0); 2724 } 2725 2726 /** 2727 * t4_intr_clear - clear all interrupts 2728 * @adapter: the adapter whose interrupts should be cleared 2729 * 2730 * Clears all interrupts. The caller must be a PCI function managing 2731 * global interrupts. 2732 */ 2733 void t4_intr_clear(struct adapter *adapter) 2734 { 2735 static const unsigned int cause_reg[] = { 2736 A_SGE_INT_CAUSE1, A_SGE_INT_CAUSE2, A_SGE_INT_CAUSE3, 2737 A_PCIE_NONFAT_ERR, A_PCIE_INT_CAUSE, 2738 A_MA_INT_WRAP_STATUS, A_MA_PARITY_ERROR_STATUS1, A_MA_INT_CAUSE, 2739 A_EDC_INT_CAUSE, EDC_REG(A_EDC_INT_CAUSE, 1), 2740 A_CIM_HOST_INT_CAUSE, A_CIM_HOST_UPACC_INT_CAUSE, 2741 MYPF_REG(A_CIM_PF_HOST_INT_CAUSE), 2742 A_TP_INT_CAUSE, 2743 A_ULP_RX_INT_CAUSE, A_ULP_TX_INT_CAUSE, 2744 A_PM_RX_INT_CAUSE, A_PM_TX_INT_CAUSE, 2745 A_MPS_RX_PERR_INT_CAUSE, 2746 A_CPL_INTR_CAUSE, 2747 MYPF_REG(A_PL_PF_INT_CAUSE), 2748 A_PL_PL_INT_CAUSE, 2749 A_LE_DB_INT_CAUSE, 2750 }; 2751 2752 unsigned int i; 2753 2754 for (i = 0; i < ARRAY_SIZE(cause_reg); ++i) 2755 t4_write_reg(adapter, cause_reg[i], 0xffffffff); 2756 2757 t4_write_reg(adapter, is_t4(adapter) ? A_MC_INT_CAUSE : 2758 A_MC_P_INT_CAUSE, 0xffffffff); 2759 2760 if (is_t4(adapter)) { 2761 t4_write_reg(adapter, A_PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS, 2762 0xffffffff); 2763 t4_write_reg(adapter, A_PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS, 2764 0xffffffff); 2765 } else 2766 t4_write_reg(adapter, A_MA_PARITY_ERROR_STATUS2, 0xffffffff); 2767 2768 t4_write_reg(adapter, A_PL_INT_CAUSE, GLBL_INTR_MASK); 2769 (void) t4_read_reg(adapter, A_PL_INT_CAUSE); /* flush */ 2770 } 2771 2772 /** 2773 * hash_mac_addr - return the hash value of a MAC address 2774 * @addr: the 48-bit Ethernet MAC address 2775 * 2776 * Hashes a MAC address according to the hash function used by HW inexact 2777 * (hash) address matching. 2778 */ 2779 static int hash_mac_addr(const u8 *addr) 2780 { 2781 u32 a = ((u32)addr[0] << 16) | ((u32)addr[1] << 8) | addr[2]; 2782 u32 b = ((u32)addr[3] << 16) | ((u32)addr[4] << 8) | addr[5]; 2783 a ^= b; 2784 a ^= (a >> 12); 2785 a ^= (a >> 6); 2786 return a & 0x3f; 2787 } 2788 2789 /** 2790 * t4_config_rss_range - configure a portion of the RSS mapping table 2791 * @adapter: the adapter 2792 * @mbox: mbox to use for the FW command 2793 * @viid: virtual interface whose RSS subtable is to be written 2794 * @start: start entry in the table to write 2795 * @n: how many table entries to write 2796 * @rspq: values for the "response queue" (Ingress Queue) lookup table 2797 * @nrspq: number of values in @rspq 2798 * 2799 * Programs the selected part of the VI's RSS mapping table with the 2800 * provided values. If @nrspq < @n the supplied values are used repeatedly 2801 * until the full table range is populated. 2802 * 2803 * The caller must ensure the values in @rspq are in the range allowed for 2804 * @viid. 2805 */ 2806 int t4_config_rss_range(struct adapter *adapter, int mbox, unsigned int viid, 2807 int start, int n, const u16 *rspq, unsigned int nrspq) 2808 { 2809 int ret; 2810 const u16 *rsp = rspq; 2811 const u16 *rsp_end = rspq + nrspq; 2812 struct fw_rss_ind_tbl_cmd cmd; 2813 2814 memset(&cmd, 0, sizeof(cmd)); 2815 cmd.op_to_viid = htonl(V_FW_CMD_OP(FW_RSS_IND_TBL_CMD) | 2816 F_FW_CMD_REQUEST | F_FW_CMD_WRITE | 2817 V_FW_RSS_IND_TBL_CMD_VIID(viid)); 2818 cmd.retval_len16 = htonl(FW_LEN16(cmd)); 2819 2820 2821 /* 2822 * Each firmware RSS command can accommodate up to 32 RSS Ingress 2823 * Queue Identifiers. These Ingress Queue IDs are packed three to 2824 * a 32-bit word as 10-bit values with the upper remaining 2 bits 2825 * reserved. 2826 */ 2827 while (n > 0) { 2828 int nq = min(n, 32); 2829 int nq_packed = 0; 2830 __be32 *qp = &cmd.iq0_to_iq2; 2831 2832 /* 2833 * Set up the firmware RSS command header to send the next 2834 * "nq" Ingress Queue IDs to the firmware. 2835 */ 2836 cmd.niqid = htons(nq); 2837 cmd.startidx = htons(start); 2838 2839 /* 2840 * "nq" more done for the start of the next loop. 2841 */ 2842 start += nq; 2843 n -= nq; 2844 2845 /* 2846 * While there are still Ingress Queue IDs to stuff into the 2847 * current firmware RSS command, retrieve them from the 2848 * Ingress Queue ID array and insert them into the command. 2849 */ 2850 while (nq > 0) { 2851 /* 2852 * Grab up to the next 3 Ingress Queue IDs (wrapping 2853 * around the Ingress Queue ID array if necessary) and 2854 * insert them into the firmware RSS command at the 2855 * current 3-tuple position within the commad. 2856 */ 2857 u16 qbuf[3]; 2858 u16 *qbp = qbuf; 2859 int nqbuf = min(3, nq); 2860 2861 nq -= nqbuf; 2862 qbuf[0] = qbuf[1] = qbuf[2] = 0; 2863 while (nqbuf && nq_packed < 32) { 2864 nqbuf--; 2865 nq_packed++; 2866 *qbp++ = *rsp++; 2867 if (rsp >= rsp_end) 2868 rsp = rspq; 2869 } 2870 *qp++ = cpu_to_be32(V_FW_RSS_IND_TBL_CMD_IQ0(qbuf[0]) | 2871 V_FW_RSS_IND_TBL_CMD_IQ1(qbuf[1]) | 2872 V_FW_RSS_IND_TBL_CMD_IQ2(qbuf[2])); 2873 } 2874 2875 /* 2876 * Send this portion of the RRS table update to the firmware; 2877 * bail out on any errors. 2878 */ 2879 ret = t4_wr_mbox(adapter, mbox, &cmd, sizeof(cmd), NULL); 2880 if (ret) 2881 return ret; 2882 } 2883 2884 return 0; 2885 } 2886 2887 /** 2888 * t4_config_glbl_rss - configure the global RSS mode 2889 * @adapter: the adapter 2890 * @mbox: mbox to use for the FW command 2891 * @mode: global RSS mode 2892 * @flags: mode-specific flags 2893 * 2894 * Sets the global RSS mode. 2895 */ 2896 int t4_config_glbl_rss(struct adapter *adapter, int mbox, unsigned int mode, 2897 unsigned int flags) 2898 { 2899 struct fw_rss_glb_config_cmd c; 2900 2901 memset(&c, 0, sizeof(c)); 2902 c.op_to_write = htonl(V_FW_CMD_OP(FW_RSS_GLB_CONFIG_CMD) | 2903 F_FW_CMD_REQUEST | F_FW_CMD_WRITE); 2904 c.retval_len16 = htonl(FW_LEN16(c)); 2905 if (mode == FW_RSS_GLB_CONFIG_CMD_MODE_MANUAL) { 2906 c.u.manual.mode_pkd = htonl(V_FW_RSS_GLB_CONFIG_CMD_MODE(mode)); 2907 } else if (mode == FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL) { 2908 c.u.basicvirtual.mode_pkd = 2909 htonl(V_FW_RSS_GLB_CONFIG_CMD_MODE(mode)); 2910 c.u.basicvirtual.synmapen_to_hashtoeplitz = htonl(flags); 2911 } else 2912 return -EINVAL; 2913 return t4_wr_mbox(adapter, mbox, &c, sizeof(c), NULL); 2914 } 2915 2916 /** 2917 * t4_config_vi_rss - configure per VI RSS settings 2918 * @adapter: the adapter 2919 * @mbox: mbox to use for the FW command 2920 * @viid: the VI id 2921 * @flags: RSS flags 2922 * @defq: id of the default RSS queue for the VI. 2923 * 2924 * Configures VI-specific RSS properties. 2925 */ 2926 int t4_config_vi_rss(struct adapter *adapter, int mbox, unsigned int viid, 2927 unsigned int flags, unsigned int defq) 2928 { 2929 struct fw_rss_vi_config_cmd c; 2930 2931 memset(&c, 0, sizeof(c)); 2932 c.op_to_viid = htonl(V_FW_CMD_OP(FW_RSS_VI_CONFIG_CMD) | 2933 F_FW_CMD_REQUEST | F_FW_CMD_WRITE | 2934 V_FW_RSS_VI_CONFIG_CMD_VIID(viid)); 2935 c.retval_len16 = htonl(FW_LEN16(c)); 2936 c.u.basicvirtual.defaultq_to_udpen = htonl(flags | 2937 V_FW_RSS_VI_CONFIG_CMD_DEFAULTQ(defq)); 2938 return t4_wr_mbox(adapter, mbox, &c, sizeof(c), NULL); 2939 } 2940 2941 /* Read an RSS table row */ 2942 static int rd_rss_row(struct adapter *adap, int row, u32 *val) 2943 { 2944 t4_write_reg(adap, A_TP_RSS_LKP_TABLE, 0xfff00000 | row); 2945 return t4_wait_op_done_val(adap, A_TP_RSS_LKP_TABLE, F_LKPTBLROWVLD, 1, 2946 5, 0, val); 2947 } 2948 2949 /** 2950 * t4_read_rss - read the contents of the RSS mapping table 2951 * @adapter: the adapter 2952 * @map: holds the contents of the RSS mapping table 2953 * 2954 * Reads the contents of the RSS hash->queue mapping table. 2955 */ 2956 int t4_read_rss(struct adapter *adapter, u16 *map) 2957 { 2958 u32 val; 2959 int i, ret; 2960 2961 for (i = 0; i < RSS_NENTRIES / 2; ++i) { 2962 ret = rd_rss_row(adapter, i, &val); 2963 if (ret) 2964 return ret; 2965 *map++ = G_LKPTBLQUEUE0(val); 2966 *map++ = G_LKPTBLQUEUE1(val); 2967 } 2968 return 0; 2969 } 2970 2971 /** 2972 * t4_read_rss_key - read the global RSS key 2973 * @adap: the adapter 2974 * @key: 10-entry array holding the 320-bit RSS key 2975 * 2976 * Reads the global 320-bit RSS key. 2977 */ 2978 void t4_read_rss_key(struct adapter *adap, u32 *key) 2979 { 2980 t4_read_indirect(adap, A_TP_PIO_ADDR, A_TP_PIO_DATA, key, 10, 2981 A_TP_RSS_SECRET_KEY0); 2982 } 2983 2984 /** 2985 * t4_write_rss_key - program one of the RSS keys 2986 * @adap: the adapter 2987 * @key: 10-entry array holding the 320-bit RSS key 2988 * @idx: which RSS key to write 2989 * 2990 * Writes one of the RSS keys with the given 320-bit value. If @idx is 2991 * 0..15 the corresponding entry in the RSS key table is written, 2992 * otherwise the global RSS key is written. 2993 */ 2994 void t4_write_rss_key(struct adapter *adap, const u32 *key, int idx) 2995 { 2996 t4_write_indirect(adap, A_TP_PIO_ADDR, A_TP_PIO_DATA, key, 10, 2997 A_TP_RSS_SECRET_KEY0); 2998 if (idx >= 0 && idx < 16) 2999 t4_write_reg(adap, A_TP_RSS_CONFIG_VRT, 3000 V_KEYWRADDR(idx) | F_KEYWREN); 3001 } 3002 3003 /** 3004 * t4_read_rss_pf_config - read PF RSS Configuration Table 3005 * @adapter: the adapter 3006 * @index: the entry in the PF RSS table to read 3007 * @valp: where to store the returned value 3008 * 3009 * Reads the PF RSS Configuration Table at the specified index and returns 3010 * the value found there. 3011 */ 3012 void t4_read_rss_pf_config(struct adapter *adapter, unsigned int index, u32 *valp) 3013 { 3014 t4_read_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA, 3015 valp, 1, A_TP_RSS_PF0_CONFIG + index); 3016 } 3017 3018 /** 3019 * t4_write_rss_pf_config - write PF RSS Configuration Table 3020 * @adapter: the adapter 3021 * @index: the entry in the VF RSS table to read 3022 * @val: the value to store 3023 * 3024 * Writes the PF RSS Configuration Table at the specified index with the 3025 * specified value. 3026 */ 3027 void t4_write_rss_pf_config(struct adapter *adapter, unsigned int index, u32 val) 3028 { 3029 t4_write_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA, 3030 &val, 1, A_TP_RSS_PF0_CONFIG + index); 3031 } 3032 3033 /** 3034 * t4_read_rss_vf_config - read VF RSS Configuration Table 3035 * @adapter: the adapter 3036 * @index: the entry in the VF RSS table to read 3037 * @vfl: where to store the returned VFL 3038 * @vfh: where to store the returned VFH 3039 * 3040 * Reads the VF RSS Configuration Table at the specified index and returns 3041 * the (VFL, VFH) values found there. 3042 */ 3043 void t4_read_rss_vf_config(struct adapter *adapter, unsigned int index, 3044 u32 *vfl, u32 *vfh) 3045 { 3046 u32 vrt; 3047 3048 /* 3049 * Request that the index'th VF Table values be read into VFL/VFH. 3050 */ 3051 vrt = t4_read_reg(adapter, A_TP_RSS_CONFIG_VRT); 3052 vrt &= ~(F_VFRDRG | V_VFWRADDR(M_VFWRADDR) | F_VFWREN | F_KEYWREN); 3053 vrt |= V_VFWRADDR(index) | F_VFRDEN; 3054 t4_write_reg(adapter, A_TP_RSS_CONFIG_VRT, vrt); 3055 3056 /* 3057 * Grab the VFL/VFH values ... 3058 */ 3059 t4_read_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA, 3060 vfl, 1, A_TP_RSS_VFL_CONFIG); 3061 t4_read_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA, 3062 vfh, 1, A_TP_RSS_VFH_CONFIG); 3063 } 3064 3065 /** 3066 * t4_write_rss_vf_config - write VF RSS Configuration Table 3067 * 3068 * @adapter: the adapter 3069 * @index: the entry in the VF RSS table to write 3070 * @vfl: the VFL to store 3071 * @vfh: the VFH to store 3072 * 3073 * Writes the VF RSS Configuration Table at the specified index with the 3074 * specified (VFL, VFH) values. 3075 */ 3076 void t4_write_rss_vf_config(struct adapter *adapter, unsigned int index, 3077 u32 vfl, u32 vfh) 3078 { 3079 u32 vrt; 3080 3081 /* 3082 * Load up VFL/VFH with the values to be written ... 3083 */ 3084 t4_write_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA, 3085 &vfl, 1, A_TP_RSS_VFL_CONFIG); 3086 t4_write_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA, 3087 &vfh, 1, A_TP_RSS_VFH_CONFIG); 3088 3089 /* 3090 * Write the VFL/VFH into the VF Table at index'th location. 3091 */ 3092 vrt = t4_read_reg(adapter, A_TP_RSS_CONFIG_VRT); 3093 vrt &= ~(F_VFRDRG | F_VFRDEN | V_VFWRADDR(M_VFWRADDR) | F_KEYWREN); 3094 vrt |= V_VFWRADDR(index) | F_VFWREN; 3095 t4_write_reg(adapter, A_TP_RSS_CONFIG_VRT, vrt); 3096 } 3097 3098 /** 3099 * t4_read_rss_pf_map - read PF RSS Map 3100 * @adapter: the adapter 3101 * 3102 * Reads the PF RSS Map register and returns its value. 3103 */ 3104 u32 t4_read_rss_pf_map(struct adapter *adapter) 3105 { 3106 u32 pfmap; 3107 3108 t4_read_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA, 3109 &pfmap, 1, A_TP_RSS_PF_MAP); 3110 return pfmap; 3111 } 3112 3113 /** 3114 * t4_write_rss_pf_map - write PF RSS Map 3115 * @adapter: the adapter 3116 * @pfmap: PF RSS Map value 3117 * 3118 * Writes the specified value to the PF RSS Map register. 3119 */ 3120 void t4_write_rss_pf_map(struct adapter *adapter, u32 pfmap) 3121 { 3122 t4_write_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA, 3123 &pfmap, 1, A_TP_RSS_PF_MAP); 3124 } 3125 3126 /** 3127 * t4_read_rss_pf_mask - read PF RSS Mask 3128 * @adapter: the adapter 3129 * 3130 * Reads the PF RSS Mask register and returns its value. 3131 */ 3132 u32 t4_read_rss_pf_mask(struct adapter *adapter) 3133 { 3134 u32 pfmask; 3135 3136 t4_read_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA, 3137 &pfmask, 1, A_TP_RSS_PF_MSK); 3138 return pfmask; 3139 } 3140 3141 /** 3142 * t4_write_rss_pf_mask - write PF RSS Mask 3143 * @adapter: the adapter 3144 * @pfmask: PF RSS Mask value 3145 * 3146 * Writes the specified value to the PF RSS Mask register. 3147 */ 3148 void t4_write_rss_pf_mask(struct adapter *adapter, u32 pfmask) 3149 { 3150 t4_write_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA, 3151 &pfmask, 1, A_TP_RSS_PF_MSK); 3152 } 3153 3154 static void refresh_vlan_pri_map(struct adapter *adap) 3155 { 3156 3157 t4_read_indirect(adap, A_TP_PIO_ADDR, A_TP_PIO_DATA, 3158 &adap->params.tp.vlan_pri_map, 1, 3159 A_TP_VLAN_PRI_MAP); 3160 3161 /* 3162 * Now that we have TP_VLAN_PRI_MAP cached, we can calculate the field 3163 * shift positions of several elements of the Compressed Filter Tuple 3164 * for this adapter which we need frequently ... 3165 */ 3166 adap->params.tp.vlan_shift = t4_filter_field_shift(adap, F_VLAN); 3167 adap->params.tp.vnic_shift = t4_filter_field_shift(adap, F_VNIC_ID); 3168 adap->params.tp.port_shift = t4_filter_field_shift(adap, F_PORT); 3169 adap->params.tp.protocol_shift = t4_filter_field_shift(adap, F_PROTOCOL); 3170 3171 /* 3172 * If TP_INGRESS_CONFIG.VNID == 0, then TP_VLAN_PRI_MAP.VNIC_ID 3173 * represents the presense of an Outer VLAN instead of a VNIC ID. 3174 */ 3175 if ((adap->params.tp.ingress_config & F_VNIC) == 0) 3176 adap->params.tp.vnic_shift = -1; 3177 } 3178 3179 /** 3180 * t4_set_filter_mode - configure the optional components of filter tuples 3181 * @adap: the adapter 3182 * @mode_map: a bitmap selcting which optional filter components to enable 3183 * 3184 * Sets the filter mode by selecting the optional components to enable 3185 * in filter tuples. Returns 0 on success and a negative error if the 3186 * requested mode needs more bits than are available for optional 3187 * components. 3188 */ 3189 int t4_set_filter_mode(struct adapter *adap, unsigned int mode_map) 3190 { 3191 static u8 width[] = { 1, 3, 17, 17, 8, 8, 16, 9, 3, 1 }; 3192 3193 int i, nbits = 0; 3194 3195 for (i = S_FCOE; i <= S_FRAGMENTATION; i++) 3196 if (mode_map & (1 << i)) 3197 nbits += width[i]; 3198 if (nbits > FILTER_OPT_LEN) 3199 return -EINVAL; 3200 t4_write_indirect(adap, A_TP_PIO_ADDR, A_TP_PIO_DATA, &mode_map, 1, 3201 A_TP_VLAN_PRI_MAP); 3202 refresh_vlan_pri_map(adap); 3203 3204 return 0; 3205 } 3206 3207 /** 3208 * t4_tp_get_tcp_stats - read TP's TCP MIB counters 3209 * @adap: the adapter 3210 * @v4: holds the TCP/IP counter values 3211 * @v6: holds the TCP/IPv6 counter values 3212 * 3213 * Returns the values of TP's TCP/IP and TCP/IPv6 MIB counters. 3214 * Either @v4 or @v6 may be %NULL to skip the corresponding stats. 3215 */ 3216 void t4_tp_get_tcp_stats(struct adapter *adap, struct tp_tcp_stats *v4, 3217 struct tp_tcp_stats *v6) 3218 { 3219 u32 val[A_TP_MIB_TCP_RXT_SEG_LO - A_TP_MIB_TCP_OUT_RST + 1]; 3220 3221 #define STAT_IDX(x) ((A_TP_MIB_TCP_##x) - A_TP_MIB_TCP_OUT_RST) 3222 #define STAT(x) val[STAT_IDX(x)] 3223 #define STAT64(x) (((u64)STAT(x##_HI) << 32) | STAT(x##_LO)) 3224 3225 if (v4) { 3226 t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, val, 3227 ARRAY_SIZE(val), A_TP_MIB_TCP_OUT_RST); 3228 v4->tcp_out_rsts = STAT(OUT_RST); 3229 v4->tcp_in_segs = STAT64(IN_SEG); 3230 v4->tcp_out_segs = STAT64(OUT_SEG); 3231 v4->tcp_retrans_segs = STAT64(RXT_SEG); 3232 } 3233 if (v6) { 3234 t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, val, 3235 ARRAY_SIZE(val), A_TP_MIB_TCP_V6OUT_RST); 3236 v6->tcp_out_rsts = STAT(OUT_RST); 3237 v6->tcp_in_segs = STAT64(IN_SEG); 3238 v6->tcp_out_segs = STAT64(OUT_SEG); 3239 v6->tcp_retrans_segs = STAT64(RXT_SEG); 3240 } 3241 #undef STAT64 3242 #undef STAT 3243 #undef STAT_IDX 3244 } 3245 3246 /** 3247 * t4_tp_get_err_stats - read TP's error MIB counters 3248 * @adap: the adapter 3249 * @st: holds the counter values 3250 * 3251 * Returns the values of TP's error counters. 3252 */ 3253 void t4_tp_get_err_stats(struct adapter *adap, struct tp_err_stats *st) 3254 { 3255 int nchan = adap->chip_params->nchan; 3256 3257 t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, 3258 st->mac_in_errs, nchan, A_TP_MIB_MAC_IN_ERR_0); 3259 t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, 3260 st->hdr_in_errs, nchan, A_TP_MIB_HDR_IN_ERR_0); 3261 t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, 3262 st->tcp_in_errs, nchan, A_TP_MIB_TCP_IN_ERR_0); 3263 t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, 3264 st->tnl_cong_drops, nchan, A_TP_MIB_TNL_CNG_DROP_0); 3265 t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, 3266 st->ofld_chan_drops, nchan, A_TP_MIB_OFD_CHN_DROP_0); 3267 t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, 3268 st->tnl_tx_drops, nchan, A_TP_MIB_TNL_DROP_0); 3269 t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, 3270 st->ofld_vlan_drops, nchan, A_TP_MIB_OFD_VLN_DROP_0); 3271 t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, 3272 st->tcp6_in_errs, nchan, A_TP_MIB_TCP_V6IN_ERR_0); 3273 3274 t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, 3275 &st->ofld_no_neigh, 2, A_TP_MIB_OFD_ARP_DROP); 3276 } 3277 3278 /** 3279 * t4_tp_get_proxy_stats - read TP's proxy MIB counters 3280 * @adap: the adapter 3281 * @st: holds the counter values 3282 * 3283 * Returns the values of TP's proxy counters. 3284 */ 3285 void t4_tp_get_proxy_stats(struct adapter *adap, struct tp_proxy_stats *st) 3286 { 3287 int nchan = adap->chip_params->nchan; 3288 3289 t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, st->proxy, 3290 nchan, A_TP_MIB_TNL_LPBK_0); 3291 } 3292 3293 /** 3294 * t4_tp_get_cpl_stats - read TP's CPL MIB counters 3295 * @adap: the adapter 3296 * @st: holds the counter values 3297 * 3298 * Returns the values of TP's CPL counters. 3299 */ 3300 void t4_tp_get_cpl_stats(struct adapter *adap, struct tp_cpl_stats *st) 3301 { 3302 int nchan = adap->chip_params->nchan; 3303 3304 t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, st->req, 3305 nchan, A_TP_MIB_CPL_IN_REQ_0); 3306 t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, st->rsp, 3307 nchan, A_TP_MIB_CPL_OUT_RSP_0); 3308 } 3309 3310 /** 3311 * t4_tp_get_rdma_stats - read TP's RDMA MIB counters 3312 * @adap: the adapter 3313 * @st: holds the counter values 3314 * 3315 * Returns the values of TP's RDMA counters. 3316 */ 3317 void t4_tp_get_rdma_stats(struct adapter *adap, struct tp_rdma_stats *st) 3318 { 3319 t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, &st->rqe_dfr_mod, 3320 2, A_TP_MIB_RQE_DFR_PKT); 3321 } 3322 3323 /** 3324 * t4_get_fcoe_stats - read TP's FCoE MIB counters for a port 3325 * @adap: the adapter 3326 * @idx: the port index 3327 * @st: holds the counter values 3328 * 3329 * Returns the values of TP's FCoE counters for the selected port. 3330 */ 3331 void t4_get_fcoe_stats(struct adapter *adap, unsigned int idx, 3332 struct tp_fcoe_stats *st) 3333 { 3334 u32 val[2]; 3335 3336 t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, &st->frames_ddp, 3337 1, A_TP_MIB_FCOE_DDP_0 + idx); 3338 t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, &st->frames_drop, 3339 1, A_TP_MIB_FCOE_DROP_0 + idx); 3340 t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, val, 3341 2, A_TP_MIB_FCOE_BYTE_0_HI + 2 * idx); 3342 st->octets_ddp = ((u64)val[0] << 32) | val[1]; 3343 } 3344 3345 /** 3346 * t4_get_usm_stats - read TP's non-TCP DDP MIB counters 3347 * @adap: the adapter 3348 * @st: holds the counter values 3349 * 3350 * Returns the values of TP's counters for non-TCP directly-placed packets. 3351 */ 3352 void t4_get_usm_stats(struct adapter *adap, struct tp_usm_stats *st) 3353 { 3354 u32 val[4]; 3355 3356 t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, val, 4, 3357 A_TP_MIB_USM_PKTS); 3358 st->frames = val[0]; 3359 st->drops = val[1]; 3360 st->octets = ((u64)val[2] << 32) | val[3]; 3361 } 3362 3363 /** 3364 * t4_read_mtu_tbl - returns the values in the HW path MTU table 3365 * @adap: the adapter 3366 * @mtus: where to store the MTU values 3367 * @mtu_log: where to store the MTU base-2 log (may be %NULL) 3368 * 3369 * Reads the HW path MTU table. 3370 */ 3371 void t4_read_mtu_tbl(struct adapter *adap, u16 *mtus, u8 *mtu_log) 3372 { 3373 u32 v; 3374 int i; 3375 3376 for (i = 0; i < NMTUS; ++i) { 3377 t4_write_reg(adap, A_TP_MTU_TABLE, 3378 V_MTUINDEX(0xff) | V_MTUVALUE(i)); 3379 v = t4_read_reg(adap, A_TP_MTU_TABLE); 3380 mtus[i] = G_MTUVALUE(v); 3381 if (mtu_log) 3382 mtu_log[i] = G_MTUWIDTH(v); 3383 } 3384 } 3385 3386 /** 3387 * t4_read_cong_tbl - reads the congestion control table 3388 * @adap: the adapter 3389 * @incr: where to store the alpha values 3390 * 3391 * Reads the additive increments programmed into the HW congestion 3392 * control table. 3393 */ 3394 void t4_read_cong_tbl(struct adapter *adap, u16 incr[NMTUS][NCCTRL_WIN]) 3395 { 3396 unsigned int mtu, w; 3397 3398 for (mtu = 0; mtu < NMTUS; ++mtu) 3399 for (w = 0; w < NCCTRL_WIN; ++w) { 3400 t4_write_reg(adap, A_TP_CCTRL_TABLE, 3401 V_ROWINDEX(0xffff) | (mtu << 5) | w); 3402 incr[mtu][w] = (u16)t4_read_reg(adap, 3403 A_TP_CCTRL_TABLE) & 0x1fff; 3404 } 3405 } 3406 3407 /** 3408 * t4_read_pace_tbl - read the pace table 3409 * @adap: the adapter 3410 * @pace_vals: holds the returned values 3411 * 3412 * Returns the values of TP's pace table in microseconds. 3413 */ 3414 void t4_read_pace_tbl(struct adapter *adap, unsigned int pace_vals[NTX_SCHED]) 3415 { 3416 unsigned int i, v; 3417 3418 for (i = 0; i < NTX_SCHED; i++) { 3419 t4_write_reg(adap, A_TP_PACE_TABLE, 0xffff0000 + i); 3420 v = t4_read_reg(adap, A_TP_PACE_TABLE); 3421 pace_vals[i] = dack_ticks_to_usec(adap, v); 3422 } 3423 } 3424 3425 /** 3426 * t4_tp_wr_bits_indirect - set/clear bits in an indirect TP register 3427 * @adap: the adapter 3428 * @addr: the indirect TP register address 3429 * @mask: specifies the field within the register to modify 3430 * @val: new value for the field 3431 * 3432 * Sets a field of an indirect TP register to the given value. 3433 */ 3434 void t4_tp_wr_bits_indirect(struct adapter *adap, unsigned int addr, 3435 unsigned int mask, unsigned int val) 3436 { 3437 t4_write_reg(adap, A_TP_PIO_ADDR, addr); 3438 val |= t4_read_reg(adap, A_TP_PIO_DATA) & ~mask; 3439 t4_write_reg(adap, A_TP_PIO_DATA, val); 3440 } 3441 3442 /** 3443 * init_cong_ctrl - initialize congestion control parameters 3444 * @a: the alpha values for congestion control 3445 * @b: the beta values for congestion control 3446 * 3447 * Initialize the congestion control parameters. 3448 */ 3449 static void __devinit init_cong_ctrl(unsigned short *a, unsigned short *b) 3450 { 3451 a[0] = a[1] = a[2] = a[3] = a[4] = a[5] = a[6] = a[7] = a[8] = 1; 3452 a[9] = 2; 3453 a[10] = 3; 3454 a[11] = 4; 3455 a[12] = 5; 3456 a[13] = 6; 3457 a[14] = 7; 3458 a[15] = 8; 3459 a[16] = 9; 3460 a[17] = 10; 3461 a[18] = 14; 3462 a[19] = 17; 3463 a[20] = 21; 3464 a[21] = 25; 3465 a[22] = 30; 3466 a[23] = 35; 3467 a[24] = 45; 3468 a[25] = 60; 3469 a[26] = 80; 3470 a[27] = 100; 3471 a[28] = 200; 3472 a[29] = 300; 3473 a[30] = 400; 3474 a[31] = 500; 3475 3476 b[0] = b[1] = b[2] = b[3] = b[4] = b[5] = b[6] = b[7] = b[8] = 0; 3477 b[9] = b[10] = 1; 3478 b[11] = b[12] = 2; 3479 b[13] = b[14] = b[15] = b[16] = 3; 3480 b[17] = b[18] = b[19] = b[20] = b[21] = 4; 3481 b[22] = b[23] = b[24] = b[25] = b[26] = b[27] = 5; 3482 b[28] = b[29] = 6; 3483 b[30] = b[31] = 7; 3484 } 3485 3486 /* The minimum additive increment value for the congestion control table */ 3487 #define CC_MIN_INCR 2U 3488 3489 /** 3490 * t4_load_mtus - write the MTU and congestion control HW tables 3491 * @adap: the adapter 3492 * @mtus: the values for the MTU table 3493 * @alpha: the values for the congestion control alpha parameter 3494 * @beta: the values for the congestion control beta parameter 3495 * 3496 * Write the HW MTU table with the supplied MTUs and the high-speed 3497 * congestion control table with the supplied alpha, beta, and MTUs. 3498 * We write the two tables together because the additive increments 3499 * depend on the MTUs. 3500 */ 3501 void t4_load_mtus(struct adapter *adap, const unsigned short *mtus, 3502 const unsigned short *alpha, const unsigned short *beta) 3503 { 3504 static const unsigned int avg_pkts[NCCTRL_WIN] = { 3505 2, 6, 10, 14, 20, 28, 40, 56, 80, 112, 160, 224, 320, 448, 640, 3506 896, 1281, 1792, 2560, 3584, 5120, 7168, 10240, 14336, 20480, 3507 28672, 40960, 57344, 81920, 114688, 163840, 229376 3508 }; 3509 3510 unsigned int i, w; 3511 3512 for (i = 0; i < NMTUS; ++i) { 3513 unsigned int mtu = mtus[i]; 3514 unsigned int log2 = fls(mtu); 3515 3516 if (!(mtu & ((1 << log2) >> 2))) /* round */ 3517 log2--; 3518 t4_write_reg(adap, A_TP_MTU_TABLE, V_MTUINDEX(i) | 3519 V_MTUWIDTH(log2) | V_MTUVALUE(mtu)); 3520 3521 for (w = 0; w < NCCTRL_WIN; ++w) { 3522 unsigned int inc; 3523 3524 inc = max(((mtu - 40) * alpha[w]) / avg_pkts[w], 3525 CC_MIN_INCR); 3526 3527 t4_write_reg(adap, A_TP_CCTRL_TABLE, (i << 21) | 3528 (w << 16) | (beta[w] << 13) | inc); 3529 } 3530 } 3531 } 3532 3533 /** 3534 * t4_set_pace_tbl - set the pace table 3535 * @adap: the adapter 3536 * @pace_vals: the pace values in microseconds 3537 * @start: index of the first entry in the HW pace table to set 3538 * @n: how many entries to set 3539 * 3540 * Sets (a subset of the) HW pace table. 3541 */ 3542 int t4_set_pace_tbl(struct adapter *adap, const unsigned int *pace_vals, 3543 unsigned int start, unsigned int n) 3544 { 3545 unsigned int vals[NTX_SCHED], i; 3546 unsigned int tick_ns = dack_ticks_to_usec(adap, 1000); 3547 3548 if (n > NTX_SCHED) 3549 return -ERANGE; 3550 3551 /* convert values from us to dack ticks, rounding to closest value */ 3552 for (i = 0; i < n; i++, pace_vals++) { 3553 vals[i] = (1000 * *pace_vals + tick_ns / 2) / tick_ns; 3554 if (vals[i] > 0x7ff) 3555 return -ERANGE; 3556 if (*pace_vals && vals[i] == 0) 3557 return -ERANGE; 3558 } 3559 for (i = 0; i < n; i++, start++) 3560 t4_write_reg(adap, A_TP_PACE_TABLE, (start << 16) | vals[i]); 3561 return 0; 3562 } 3563 3564 /** 3565 * t4_set_sched_bps - set the bit rate for a HW traffic scheduler 3566 * @adap: the adapter 3567 * @kbps: target rate in Kbps 3568 * @sched: the scheduler index 3569 * 3570 * Configure a Tx HW scheduler for the target rate. 3571 */ 3572 int t4_set_sched_bps(struct adapter *adap, int sched, unsigned int kbps) 3573 { 3574 unsigned int v, tps, cpt, bpt, delta, mindelta = ~0; 3575 unsigned int clk = adap->params.vpd.cclk * 1000; 3576 unsigned int selected_cpt = 0, selected_bpt = 0; 3577 3578 if (kbps > 0) { 3579 kbps *= 125; /* -> bytes */ 3580 for (cpt = 1; cpt <= 255; cpt++) { 3581 tps = clk / cpt; 3582 bpt = (kbps + tps / 2) / tps; 3583 if (bpt > 0 && bpt <= 255) { 3584 v = bpt * tps; 3585 delta = v >= kbps ? v - kbps : kbps - v; 3586 if (delta < mindelta) { 3587 mindelta = delta; 3588 selected_cpt = cpt; 3589 selected_bpt = bpt; 3590 } 3591 } else if (selected_cpt) 3592 break; 3593 } 3594 if (!selected_cpt) 3595 return -EINVAL; 3596 } 3597 t4_write_reg(adap, A_TP_TM_PIO_ADDR, 3598 A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2); 3599 v = t4_read_reg(adap, A_TP_TM_PIO_DATA); 3600 if (sched & 1) 3601 v = (v & 0xffff) | (selected_cpt << 16) | (selected_bpt << 24); 3602 else 3603 v = (v & 0xffff0000) | selected_cpt | (selected_bpt << 8); 3604 t4_write_reg(adap, A_TP_TM_PIO_DATA, v); 3605 return 0; 3606 } 3607 3608 /** 3609 * t4_set_sched_ipg - set the IPG for a Tx HW packet rate scheduler 3610 * @adap: the adapter 3611 * @sched: the scheduler index 3612 * @ipg: the interpacket delay in tenths of nanoseconds 3613 * 3614 * Set the interpacket delay for a HW packet rate scheduler. 3615 */ 3616 int t4_set_sched_ipg(struct adapter *adap, int sched, unsigned int ipg) 3617 { 3618 unsigned int v, addr = A_TP_TX_MOD_Q1_Q0_TIMER_SEPARATOR - sched / 2; 3619 3620 /* convert ipg to nearest number of core clocks */ 3621 ipg *= core_ticks_per_usec(adap); 3622 ipg = (ipg + 5000) / 10000; 3623 if (ipg > M_TXTIMERSEPQ0) 3624 return -EINVAL; 3625 3626 t4_write_reg(adap, A_TP_TM_PIO_ADDR, addr); 3627 v = t4_read_reg(adap, A_TP_TM_PIO_DATA); 3628 if (sched & 1) 3629 v = (v & V_TXTIMERSEPQ0(M_TXTIMERSEPQ0)) | V_TXTIMERSEPQ1(ipg); 3630 else 3631 v = (v & V_TXTIMERSEPQ1(M_TXTIMERSEPQ1)) | V_TXTIMERSEPQ0(ipg); 3632 t4_write_reg(adap, A_TP_TM_PIO_DATA, v); 3633 t4_read_reg(adap, A_TP_TM_PIO_DATA); 3634 return 0; 3635 } 3636 3637 /** 3638 * t4_get_tx_sched - get the configuration of a Tx HW traffic scheduler 3639 * @adap: the adapter 3640 * @sched: the scheduler index 3641 * @kbps: the byte rate in Kbps 3642 * @ipg: the interpacket delay in tenths of nanoseconds 3643 * 3644 * Return the current configuration of a HW Tx scheduler. 3645 */ 3646 void t4_get_tx_sched(struct adapter *adap, unsigned int sched, unsigned int *kbps, 3647 unsigned int *ipg) 3648 { 3649 unsigned int v, addr, bpt, cpt; 3650 3651 if (kbps) { 3652 addr = A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2; 3653 t4_write_reg(adap, A_TP_TM_PIO_ADDR, addr); 3654 v = t4_read_reg(adap, A_TP_TM_PIO_DATA); 3655 if (sched & 1) 3656 v >>= 16; 3657 bpt = (v >> 8) & 0xff; 3658 cpt = v & 0xff; 3659 if (!cpt) 3660 *kbps = 0; /* scheduler disabled */ 3661 else { 3662 v = (adap->params.vpd.cclk * 1000) / cpt; /* ticks/s */ 3663 *kbps = (v * bpt) / 125; 3664 } 3665 } 3666 if (ipg) { 3667 addr = A_TP_TX_MOD_Q1_Q0_TIMER_SEPARATOR - sched / 2; 3668 t4_write_reg(adap, A_TP_TM_PIO_ADDR, addr); 3669 v = t4_read_reg(adap, A_TP_TM_PIO_DATA); 3670 if (sched & 1) 3671 v >>= 16; 3672 v &= 0xffff; 3673 *ipg = (10000 * v) / core_ticks_per_usec(adap); 3674 } 3675 } 3676 3677 /* 3678 * Calculates a rate in bytes/s given the number of 256-byte units per 4K core 3679 * clocks. The formula is 3680 * 3681 * bytes/s = bytes256 * 256 * ClkFreq / 4096 3682 * 3683 * which is equivalent to 3684 * 3685 * bytes/s = 62.5 * bytes256 * ClkFreq_ms 3686 */ 3687 static u64 chan_rate(struct adapter *adap, unsigned int bytes256) 3688 { 3689 u64 v = bytes256 * adap->params.vpd.cclk; 3690 3691 return v * 62 + v / 2; 3692 } 3693 3694 /** 3695 * t4_get_chan_txrate - get the current per channel Tx rates 3696 * @adap: the adapter 3697 * @nic_rate: rates for NIC traffic 3698 * @ofld_rate: rates for offloaded traffic 3699 * 3700 * Return the current Tx rates in bytes/s for NIC and offloaded traffic 3701 * for each channel. 3702 */ 3703 void t4_get_chan_txrate(struct adapter *adap, u64 *nic_rate, u64 *ofld_rate) 3704 { 3705 u32 v; 3706 3707 v = t4_read_reg(adap, A_TP_TX_TRATE); 3708 nic_rate[0] = chan_rate(adap, G_TNLRATE0(v)); 3709 nic_rate[1] = chan_rate(adap, G_TNLRATE1(v)); 3710 if (adap->chip_params->nchan > 2) { 3711 nic_rate[2] = chan_rate(adap, G_TNLRATE2(v)); 3712 nic_rate[3] = chan_rate(adap, G_TNLRATE3(v)); 3713 } 3714 3715 v = t4_read_reg(adap, A_TP_TX_ORATE); 3716 ofld_rate[0] = chan_rate(adap, G_OFDRATE0(v)); 3717 ofld_rate[1] = chan_rate(adap, G_OFDRATE1(v)); 3718 if (adap->chip_params->nchan > 2) { 3719 ofld_rate[2] = chan_rate(adap, G_OFDRATE2(v)); 3720 ofld_rate[3] = chan_rate(adap, G_OFDRATE3(v)); 3721 } 3722 } 3723 3724 /** 3725 * t4_set_trace_filter - configure one of the tracing filters 3726 * @adap: the adapter 3727 * @tp: the desired trace filter parameters 3728 * @idx: which filter to configure 3729 * @enable: whether to enable or disable the filter 3730 * 3731 * Configures one of the tracing filters available in HW. If @tp is %NULL 3732 * it indicates that the filter is already written in the register and it 3733 * just needs to be enabled or disabled. 3734 */ 3735 int t4_set_trace_filter(struct adapter *adap, const struct trace_params *tp, 3736 int idx, int enable) 3737 { 3738 int i, ofst = idx * 4; 3739 u32 data_reg, mask_reg, cfg; 3740 u32 multitrc = F_TRCMULTIFILTER; 3741 u32 en = is_t4(adap) ? F_TFEN : F_T5_TFEN; 3742 3743 if (idx < 0 || idx >= NTRACE) 3744 return -EINVAL; 3745 3746 if (tp == NULL || !enable) { 3747 t4_set_reg_field(adap, A_MPS_TRC_FILTER_MATCH_CTL_A + ofst, en, 3748 enable ? en : 0); 3749 return 0; 3750 } 3751 3752 /* 3753 * TODO - After T4 data book is updated, specify the exact 3754 * section below. 3755 * 3756 * See T4 data book - MPS section for a complete description 3757 * of the below if..else handling of A_MPS_TRC_CFG register 3758 * value. 3759 */ 3760 cfg = t4_read_reg(adap, A_MPS_TRC_CFG); 3761 if (cfg & F_TRCMULTIFILTER) { 3762 /* 3763 * If multiple tracers are enabled, then maximum 3764 * capture size is 2.5KB (FIFO size of a single channel) 3765 * minus 2 flits for CPL_TRACE_PKT header. 3766 */ 3767 if (tp->snap_len > ((10 * 1024 / 4) - (2 * 8))) 3768 return -EINVAL; 3769 } else { 3770 /* 3771 * If multiple tracers are disabled, to avoid deadlocks 3772 * maximum packet capture size of 9600 bytes is recommended. 3773 * Also in this mode, only trace0 can be enabled and running. 3774 */ 3775 multitrc = 0; 3776 if (tp->snap_len > 9600 || idx) 3777 return -EINVAL; 3778 } 3779 3780 if (tp->port > (is_t4(adap) ? 11 : 19) || tp->invert > 1 || 3781 tp->skip_len > M_TFLENGTH || tp->skip_ofst > M_TFOFFSET || 3782 tp->min_len > M_TFMINPKTSIZE) 3783 return -EINVAL; 3784 3785 /* stop the tracer we'll be changing */ 3786 t4_set_reg_field(adap, A_MPS_TRC_FILTER_MATCH_CTL_A + ofst, en, 0); 3787 3788 idx *= (A_MPS_TRC_FILTER1_MATCH - A_MPS_TRC_FILTER0_MATCH); 3789 data_reg = A_MPS_TRC_FILTER0_MATCH + idx; 3790 mask_reg = A_MPS_TRC_FILTER0_DONT_CARE + idx; 3791 3792 for (i = 0; i < TRACE_LEN / 4; i++, data_reg += 4, mask_reg += 4) { 3793 t4_write_reg(adap, data_reg, tp->data[i]); 3794 t4_write_reg(adap, mask_reg, ~tp->mask[i]); 3795 } 3796 t4_write_reg(adap, A_MPS_TRC_FILTER_MATCH_CTL_B + ofst, 3797 V_TFCAPTUREMAX(tp->snap_len) | 3798 V_TFMINPKTSIZE(tp->min_len)); 3799 t4_write_reg(adap, A_MPS_TRC_FILTER_MATCH_CTL_A + ofst, 3800 V_TFOFFSET(tp->skip_ofst) | V_TFLENGTH(tp->skip_len) | en | 3801 (is_t4(adap) ? 3802 V_TFPORT(tp->port) | V_TFINVERTMATCH(tp->invert) : 3803 V_T5_TFPORT(tp->port) | V_T5_TFINVERTMATCH(tp->invert))); 3804 3805 return 0; 3806 } 3807 3808 /** 3809 * t4_get_trace_filter - query one of the tracing filters 3810 * @adap: the adapter 3811 * @tp: the current trace filter parameters 3812 * @idx: which trace filter to query 3813 * @enabled: non-zero if the filter is enabled 3814 * 3815 * Returns the current settings of one of the HW tracing filters. 3816 */ 3817 void t4_get_trace_filter(struct adapter *adap, struct trace_params *tp, int idx, 3818 int *enabled) 3819 { 3820 u32 ctla, ctlb; 3821 int i, ofst = idx * 4; 3822 u32 data_reg, mask_reg; 3823 3824 ctla = t4_read_reg(adap, A_MPS_TRC_FILTER_MATCH_CTL_A + ofst); 3825 ctlb = t4_read_reg(adap, A_MPS_TRC_FILTER_MATCH_CTL_B + ofst); 3826 3827 if (is_t4(adap)) { 3828 *enabled = !!(ctla & F_TFEN); 3829 tp->port = G_TFPORT(ctla); 3830 tp->invert = !!(ctla & F_TFINVERTMATCH); 3831 } else { 3832 *enabled = !!(ctla & F_T5_TFEN); 3833 tp->port = G_T5_TFPORT(ctla); 3834 tp->invert = !!(ctla & F_T5_TFINVERTMATCH); 3835 } 3836 tp->snap_len = G_TFCAPTUREMAX(ctlb); 3837 tp->min_len = G_TFMINPKTSIZE(ctlb); 3838 tp->skip_ofst = G_TFOFFSET(ctla); 3839 tp->skip_len = G_TFLENGTH(ctla); 3840 3841 ofst = (A_MPS_TRC_FILTER1_MATCH - A_MPS_TRC_FILTER0_MATCH) * idx; 3842 data_reg = A_MPS_TRC_FILTER0_MATCH + ofst; 3843 mask_reg = A_MPS_TRC_FILTER0_DONT_CARE + ofst; 3844 3845 for (i = 0; i < TRACE_LEN / 4; i++, data_reg += 4, mask_reg += 4) { 3846 tp->mask[i] = ~t4_read_reg(adap, mask_reg); 3847 tp->data[i] = t4_read_reg(adap, data_reg) & tp->mask[i]; 3848 } 3849 } 3850 3851 /** 3852 * t4_pmtx_get_stats - returns the HW stats from PMTX 3853 * @adap: the adapter 3854 * @cnt: where to store the count statistics 3855 * @cycles: where to store the cycle statistics 3856 * 3857 * Returns performance statistics from PMTX. 3858 */ 3859 void t4_pmtx_get_stats(struct adapter *adap, u32 cnt[], u64 cycles[]) 3860 { 3861 int i; 3862 u32 data[2]; 3863 3864 for (i = 0; i < adap->chip_params->pm_stats_cnt; i++) { 3865 t4_write_reg(adap, A_PM_TX_STAT_CONFIG, i + 1); 3866 cnt[i] = t4_read_reg(adap, A_PM_TX_STAT_COUNT); 3867 if (is_t4(adap)) 3868 cycles[i] = t4_read_reg64(adap, A_PM_TX_STAT_LSB); 3869 else { 3870 t4_read_indirect(adap, A_PM_TX_DBG_CTRL, 3871 A_PM_TX_DBG_DATA, data, 2, 3872 A_PM_TX_DBG_STAT_MSB); 3873 cycles[i] = (((u64)data[0] << 32) | data[1]); 3874 } 3875 } 3876 } 3877 3878 /** 3879 * t4_pmrx_get_stats - returns the HW stats from PMRX 3880 * @adap: the adapter 3881 * @cnt: where to store the count statistics 3882 * @cycles: where to store the cycle statistics 3883 * 3884 * Returns performance statistics from PMRX. 3885 */ 3886 void t4_pmrx_get_stats(struct adapter *adap, u32 cnt[], u64 cycles[]) 3887 { 3888 int i; 3889 u32 data[2]; 3890 3891 for (i = 0; i < adap->chip_params->pm_stats_cnt; i++) { 3892 t4_write_reg(adap, A_PM_RX_STAT_CONFIG, i + 1); 3893 cnt[i] = t4_read_reg(adap, A_PM_RX_STAT_COUNT); 3894 if (is_t4(adap)) 3895 cycles[i] = t4_read_reg64(adap, A_PM_RX_STAT_LSB); 3896 else { 3897 t4_read_indirect(adap, A_PM_RX_DBG_CTRL, 3898 A_PM_RX_DBG_DATA, data, 2, 3899 A_PM_RX_DBG_STAT_MSB); 3900 cycles[i] = (((u64)data[0] << 32) | data[1]); 3901 } 3902 } 3903 } 3904 3905 /** 3906 * get_mps_bg_map - return the buffer groups associated with a port 3907 * @adap: the adapter 3908 * @idx: the port index 3909 * 3910 * Returns a bitmap indicating which MPS buffer groups are associated 3911 * with the given port. Bit i is set if buffer group i is used by the 3912 * port. 3913 */ 3914 static unsigned int get_mps_bg_map(struct adapter *adap, int idx) 3915 { 3916 u32 n = G_NUMPORTS(t4_read_reg(adap, A_MPS_CMN_CTL)); 3917 3918 if (n == 0) 3919 return idx == 0 ? 0xf : 0; 3920 if (n == 1 && chip_id(adap) <= CHELSIO_T5) 3921 return idx < 2 ? (3 << (2 * idx)) : 0; 3922 return 1 << idx; 3923 } 3924 3925 /** 3926 * t4_get_port_type_description - return Port Type string description 3927 * @port_type: firmware Port Type enumeration 3928 */ 3929 const char *t4_get_port_type_description(enum fw_port_type port_type) 3930 { 3931 static const char *port_type_description[] = { 3932 "Fiber_XFI", 3933 "Fiber_XAUI", 3934 "BT_SGMII", 3935 "BT_XFI", 3936 "BT_XAUI", 3937 "KX4", 3938 "CX4", 3939 "KX", 3940 "KR", 3941 "SFP", 3942 "BP_AP", 3943 "BP4_AP", 3944 "QSFP_10G", 3945 "", 3946 "QSFP", 3947 "BP40_BA", 3948 }; 3949 3950 if (port_type < ARRAY_SIZE(port_type_description)) 3951 return port_type_description[port_type]; 3952 return "UNKNOWN"; 3953 } 3954 3955 /** 3956 * t4_get_port_stats_offset - collect port stats relative to a previous 3957 * snapshot 3958 * @adap: The adapter 3959 * @idx: The port 3960 * @stats: Current stats to fill 3961 * @offset: Previous stats snapshot 3962 */ 3963 void t4_get_port_stats_offset(struct adapter *adap, int idx, 3964 struct port_stats *stats, 3965 struct port_stats *offset) 3966 { 3967 u64 *s, *o; 3968 int i; 3969 3970 t4_get_port_stats(adap, idx, stats); 3971 for (i = 0, s = (u64 *)stats, o = (u64 *)offset ; 3972 i < (sizeof(struct port_stats)/sizeof(u64)) ; 3973 i++, s++, o++) 3974 *s -= *o; 3975 } 3976 3977 /** 3978 * t4_get_port_stats - collect port statistics 3979 * @adap: the adapter 3980 * @idx: the port index 3981 * @p: the stats structure to fill 3982 * 3983 * Collect statistics related to the given port from HW. 3984 */ 3985 void t4_get_port_stats(struct adapter *adap, int idx, struct port_stats *p) 3986 { 3987 u32 bgmap = get_mps_bg_map(adap, idx); 3988 3989 #define GET_STAT(name) \ 3990 t4_read_reg64(adap, \ 3991 (is_t4(adap) ? PORT_REG(idx, A_MPS_PORT_STAT_##name##_L) : \ 3992 T5_PORT_REG(idx, A_MPS_PORT_STAT_##name##_L))) 3993 #define GET_STAT_COM(name) t4_read_reg64(adap, A_MPS_STAT_##name##_L) 3994 3995 p->tx_pause = GET_STAT(TX_PORT_PAUSE); 3996 p->tx_octets = GET_STAT(TX_PORT_BYTES); 3997 p->tx_frames = GET_STAT(TX_PORT_FRAMES); 3998 p->tx_bcast_frames = GET_STAT(TX_PORT_BCAST); 3999 p->tx_mcast_frames = GET_STAT(TX_PORT_MCAST); 4000 p->tx_ucast_frames = GET_STAT(TX_PORT_UCAST); 4001 p->tx_error_frames = GET_STAT(TX_PORT_ERROR); 4002 p->tx_frames_64 = GET_STAT(TX_PORT_64B); 4003 p->tx_frames_65_127 = GET_STAT(TX_PORT_65B_127B); 4004 p->tx_frames_128_255 = GET_STAT(TX_PORT_128B_255B); 4005 p->tx_frames_256_511 = GET_STAT(TX_PORT_256B_511B); 4006 p->tx_frames_512_1023 = GET_STAT(TX_PORT_512B_1023B); 4007 p->tx_frames_1024_1518 = GET_STAT(TX_PORT_1024B_1518B); 4008 p->tx_frames_1519_max = GET_STAT(TX_PORT_1519B_MAX); 4009 p->tx_drop = GET_STAT(TX_PORT_DROP); 4010 p->tx_ppp0 = GET_STAT(TX_PORT_PPP0); 4011 p->tx_ppp1 = GET_STAT(TX_PORT_PPP1); 4012 p->tx_ppp2 = GET_STAT(TX_PORT_PPP2); 4013 p->tx_ppp3 = GET_STAT(TX_PORT_PPP3); 4014 p->tx_ppp4 = GET_STAT(TX_PORT_PPP4); 4015 p->tx_ppp5 = GET_STAT(TX_PORT_PPP5); 4016 p->tx_ppp6 = GET_STAT(TX_PORT_PPP6); 4017 p->tx_ppp7 = GET_STAT(TX_PORT_PPP7); 4018 4019 p->rx_pause = GET_STAT(RX_PORT_PAUSE); 4020 p->rx_octets = GET_STAT(RX_PORT_BYTES); 4021 p->rx_frames = GET_STAT(RX_PORT_FRAMES); 4022 p->rx_bcast_frames = GET_STAT(RX_PORT_BCAST); 4023 p->rx_mcast_frames = GET_STAT(RX_PORT_MCAST); 4024 p->rx_ucast_frames = GET_STAT(RX_PORT_UCAST); 4025 p->rx_too_long = GET_STAT(RX_PORT_MTU_ERROR); 4026 p->rx_jabber = GET_STAT(RX_PORT_MTU_CRC_ERROR); 4027 p->rx_fcs_err = GET_STAT(RX_PORT_CRC_ERROR); 4028 p->rx_len_err = GET_STAT(RX_PORT_LEN_ERROR); 4029 p->rx_symbol_err = GET_STAT(RX_PORT_SYM_ERROR); 4030 p->rx_runt = GET_STAT(RX_PORT_LESS_64B); 4031 p->rx_frames_64 = GET_STAT(RX_PORT_64B); 4032 p->rx_frames_65_127 = GET_STAT(RX_PORT_65B_127B); 4033 p->rx_frames_128_255 = GET_STAT(RX_PORT_128B_255B); 4034 p->rx_frames_256_511 = GET_STAT(RX_PORT_256B_511B); 4035 p->rx_frames_512_1023 = GET_STAT(RX_PORT_512B_1023B); 4036 p->rx_frames_1024_1518 = GET_STAT(RX_PORT_1024B_1518B); 4037 p->rx_frames_1519_max = GET_STAT(RX_PORT_1519B_MAX); 4038 p->rx_ppp0 = GET_STAT(RX_PORT_PPP0); 4039 p->rx_ppp1 = GET_STAT(RX_PORT_PPP1); 4040 p->rx_ppp2 = GET_STAT(RX_PORT_PPP2); 4041 p->rx_ppp3 = GET_STAT(RX_PORT_PPP3); 4042 p->rx_ppp4 = GET_STAT(RX_PORT_PPP4); 4043 p->rx_ppp5 = GET_STAT(RX_PORT_PPP5); 4044 p->rx_ppp6 = GET_STAT(RX_PORT_PPP6); 4045 p->rx_ppp7 = GET_STAT(RX_PORT_PPP7); 4046 4047 p->rx_ovflow0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_MAC_DROP_FRAME) : 0; 4048 p->rx_ovflow1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_MAC_DROP_FRAME) : 0; 4049 p->rx_ovflow2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_MAC_DROP_FRAME) : 0; 4050 p->rx_ovflow3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_MAC_DROP_FRAME) : 0; 4051 p->rx_trunc0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_MAC_TRUNC_FRAME) : 0; 4052 p->rx_trunc1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_MAC_TRUNC_FRAME) : 0; 4053 p->rx_trunc2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_MAC_TRUNC_FRAME) : 0; 4054 p->rx_trunc3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_MAC_TRUNC_FRAME) : 0; 4055 4056 #undef GET_STAT 4057 #undef GET_STAT_COM 4058 } 4059 4060 /** 4061 * t4_clr_port_stats - clear port statistics 4062 * @adap: the adapter 4063 * @idx: the port index 4064 * 4065 * Clear HW statistics for the given port. 4066 */ 4067 void t4_clr_port_stats(struct adapter *adap, int idx) 4068 { 4069 unsigned int i; 4070 u32 bgmap = get_mps_bg_map(adap, idx); 4071 u32 port_base_addr; 4072 4073 if (is_t4(adap)) 4074 port_base_addr = PORT_BASE(idx); 4075 else 4076 port_base_addr = T5_PORT_BASE(idx); 4077 4078 for (i = A_MPS_PORT_STAT_TX_PORT_BYTES_L; 4079 i <= A_MPS_PORT_STAT_TX_PORT_PPP7_H; i += 8) 4080 t4_write_reg(adap, port_base_addr + i, 0); 4081 for (i = A_MPS_PORT_STAT_RX_PORT_BYTES_L; 4082 i <= A_MPS_PORT_STAT_RX_PORT_LESS_64B_H; i += 8) 4083 t4_write_reg(adap, port_base_addr + i, 0); 4084 for (i = 0; i < 4; i++) 4085 if (bgmap & (1 << i)) { 4086 t4_write_reg(adap, 4087 A_MPS_STAT_RX_BG_0_MAC_DROP_FRAME_L + i * 8, 0); 4088 t4_write_reg(adap, 4089 A_MPS_STAT_RX_BG_0_MAC_TRUNC_FRAME_L + i * 8, 0); 4090 } 4091 } 4092 4093 /** 4094 * t4_get_lb_stats - collect loopback port statistics 4095 * @adap: the adapter 4096 * @idx: the loopback port index 4097 * @p: the stats structure to fill 4098 * 4099 * Return HW statistics for the given loopback port. 4100 */ 4101 void t4_get_lb_stats(struct adapter *adap, int idx, struct lb_port_stats *p) 4102 { 4103 u32 bgmap = get_mps_bg_map(adap, idx); 4104 4105 #define GET_STAT(name) \ 4106 t4_read_reg64(adap, \ 4107 (is_t4(adap) ? \ 4108 PORT_REG(idx, A_MPS_PORT_STAT_LB_PORT_##name##_L) : \ 4109 T5_PORT_REG(idx, A_MPS_PORT_STAT_LB_PORT_##name##_L))) 4110 #define GET_STAT_COM(name) t4_read_reg64(adap, A_MPS_STAT_##name##_L) 4111 4112 p->octets = GET_STAT(BYTES); 4113 p->frames = GET_STAT(FRAMES); 4114 p->bcast_frames = GET_STAT(BCAST); 4115 p->mcast_frames = GET_STAT(MCAST); 4116 p->ucast_frames = GET_STAT(UCAST); 4117 p->error_frames = GET_STAT(ERROR); 4118 4119 p->frames_64 = GET_STAT(64B); 4120 p->frames_65_127 = GET_STAT(65B_127B); 4121 p->frames_128_255 = GET_STAT(128B_255B); 4122 p->frames_256_511 = GET_STAT(256B_511B); 4123 p->frames_512_1023 = GET_STAT(512B_1023B); 4124 p->frames_1024_1518 = GET_STAT(1024B_1518B); 4125 p->frames_1519_max = GET_STAT(1519B_MAX); 4126 p->drop = GET_STAT(DROP_FRAMES); 4127 4128 p->ovflow0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_LB_DROP_FRAME) : 0; 4129 p->ovflow1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_LB_DROP_FRAME) : 0; 4130 p->ovflow2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_LB_DROP_FRAME) : 0; 4131 p->ovflow3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_LB_DROP_FRAME) : 0; 4132 p->trunc0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_LB_TRUNC_FRAME) : 0; 4133 p->trunc1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_LB_TRUNC_FRAME) : 0; 4134 p->trunc2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_LB_TRUNC_FRAME) : 0; 4135 p->trunc3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_LB_TRUNC_FRAME) : 0; 4136 4137 #undef GET_STAT 4138 #undef GET_STAT_COM 4139 } 4140 4141 /** 4142 * t4_wol_magic_enable - enable/disable magic packet WoL 4143 * @adap: the adapter 4144 * @port: the physical port index 4145 * @addr: MAC address expected in magic packets, %NULL to disable 4146 * 4147 * Enables/disables magic packet wake-on-LAN for the selected port. 4148 */ 4149 void t4_wol_magic_enable(struct adapter *adap, unsigned int port, 4150 const u8 *addr) 4151 { 4152 u32 mag_id_reg_l, mag_id_reg_h, port_cfg_reg; 4153 4154 if (is_t4(adap)) { 4155 mag_id_reg_l = PORT_REG(port, A_XGMAC_PORT_MAGIC_MACID_LO); 4156 mag_id_reg_h = PORT_REG(port, A_XGMAC_PORT_MAGIC_MACID_HI); 4157 port_cfg_reg = PORT_REG(port, A_XGMAC_PORT_CFG2); 4158 } else { 4159 mag_id_reg_l = T5_PORT_REG(port, A_MAC_PORT_MAGIC_MACID_LO); 4160 mag_id_reg_h = T5_PORT_REG(port, A_MAC_PORT_MAGIC_MACID_HI); 4161 port_cfg_reg = T5_PORT_REG(port, A_MAC_PORT_CFG2); 4162 } 4163 4164 if (addr) { 4165 t4_write_reg(adap, mag_id_reg_l, 4166 (addr[2] << 24) | (addr[3] << 16) | 4167 (addr[4] << 8) | addr[5]); 4168 t4_write_reg(adap, mag_id_reg_h, 4169 (addr[0] << 8) | addr[1]); 4170 } 4171 t4_set_reg_field(adap, port_cfg_reg, F_MAGICEN, 4172 V_MAGICEN(addr != NULL)); 4173 } 4174 4175 /** 4176 * t4_wol_pat_enable - enable/disable pattern-based WoL 4177 * @adap: the adapter 4178 * @port: the physical port index 4179 * @map: bitmap of which HW pattern filters to set 4180 * @mask0: byte mask for bytes 0-63 of a packet 4181 * @mask1: byte mask for bytes 64-127 of a packet 4182 * @crc: Ethernet CRC for selected bytes 4183 * @enable: enable/disable switch 4184 * 4185 * Sets the pattern filters indicated in @map to mask out the bytes 4186 * specified in @mask0/@mask1 in received packets and compare the CRC of 4187 * the resulting packet against @crc. If @enable is %true pattern-based 4188 * WoL is enabled, otherwise disabled. 4189 */ 4190 int t4_wol_pat_enable(struct adapter *adap, unsigned int port, unsigned int map, 4191 u64 mask0, u64 mask1, unsigned int crc, bool enable) 4192 { 4193 int i; 4194 u32 port_cfg_reg; 4195 4196 if (is_t4(adap)) 4197 port_cfg_reg = PORT_REG(port, A_XGMAC_PORT_CFG2); 4198 else 4199 port_cfg_reg = T5_PORT_REG(port, A_MAC_PORT_CFG2); 4200 4201 if (!enable) { 4202 t4_set_reg_field(adap, port_cfg_reg, F_PATEN, 0); 4203 return 0; 4204 } 4205 if (map > 0xff) 4206 return -EINVAL; 4207 4208 #define EPIO_REG(name) \ 4209 (is_t4(adap) ? PORT_REG(port, A_XGMAC_PORT_EPIO_##name) : \ 4210 T5_PORT_REG(port, A_MAC_PORT_EPIO_##name)) 4211 4212 t4_write_reg(adap, EPIO_REG(DATA1), mask0 >> 32); 4213 t4_write_reg(adap, EPIO_REG(DATA2), mask1); 4214 t4_write_reg(adap, EPIO_REG(DATA3), mask1 >> 32); 4215 4216 for (i = 0; i < NWOL_PAT; i++, map >>= 1) { 4217 if (!(map & 1)) 4218 continue; 4219 4220 /* write byte masks */ 4221 t4_write_reg(adap, EPIO_REG(DATA0), mask0); 4222 t4_write_reg(adap, EPIO_REG(OP), V_ADDRESS(i) | F_EPIOWR); 4223 t4_read_reg(adap, EPIO_REG(OP)); /* flush */ 4224 if (t4_read_reg(adap, EPIO_REG(OP)) & F_BUSY) 4225 return -ETIMEDOUT; 4226 4227 /* write CRC */ 4228 t4_write_reg(adap, EPIO_REG(DATA0), crc); 4229 t4_write_reg(adap, EPIO_REG(OP), V_ADDRESS(i + 32) | F_EPIOWR); 4230 t4_read_reg(adap, EPIO_REG(OP)); /* flush */ 4231 if (t4_read_reg(adap, EPIO_REG(OP)) & F_BUSY) 4232 return -ETIMEDOUT; 4233 } 4234 #undef EPIO_REG 4235 4236 t4_set_reg_field(adap, port_cfg_reg, 0, F_PATEN); 4237 return 0; 4238 } 4239 4240 /** 4241 * t4_mk_filtdelwr - create a delete filter WR 4242 * @ftid: the filter ID 4243 * @wr: the filter work request to populate 4244 * @qid: ingress queue to receive the delete notification 4245 * 4246 * Creates a filter work request to delete the supplied filter. If @qid is 4247 * negative the delete notification is suppressed. 4248 */ 4249 void t4_mk_filtdelwr(unsigned int ftid, struct fw_filter_wr *wr, int qid) 4250 { 4251 memset(wr, 0, sizeof(*wr)); 4252 wr->op_pkd = htonl(V_FW_WR_OP(FW_FILTER_WR)); 4253 wr->len16_pkd = htonl(V_FW_WR_LEN16(sizeof(*wr) / 16)); 4254 wr->tid_to_iq = htonl(V_FW_FILTER_WR_TID(ftid) | 4255 V_FW_FILTER_WR_NOREPLY(qid < 0)); 4256 wr->del_filter_to_l2tix = htonl(F_FW_FILTER_WR_DEL_FILTER); 4257 if (qid >= 0) 4258 wr->rx_chan_rx_rpl_iq = htons(V_FW_FILTER_WR_RX_RPL_IQ(qid)); 4259 } 4260 4261 #define INIT_CMD(var, cmd, rd_wr) do { \ 4262 (var).op_to_write = htonl(V_FW_CMD_OP(FW_##cmd##_CMD) | \ 4263 F_FW_CMD_REQUEST | F_FW_CMD_##rd_wr); \ 4264 (var).retval_len16 = htonl(FW_LEN16(var)); \ 4265 } while (0) 4266 4267 int t4_fwaddrspace_write(struct adapter *adap, unsigned int mbox, u32 addr, u32 val) 4268 { 4269 struct fw_ldst_cmd c; 4270 4271 memset(&c, 0, sizeof(c)); 4272 c.op_to_addrspace = htonl(V_FW_CMD_OP(FW_LDST_CMD) | F_FW_CMD_REQUEST | 4273 F_FW_CMD_WRITE | V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_FIRMWARE)); 4274 c.cycles_to_len16 = htonl(FW_LEN16(c)); 4275 c.u.addrval.addr = htonl(addr); 4276 c.u.addrval.val = htonl(val); 4277 4278 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); 4279 } 4280 4281 /** 4282 * t4_mdio_rd - read a PHY register through MDIO 4283 * @adap: the adapter 4284 * @mbox: mailbox to use for the FW command 4285 * @phy_addr: the PHY address 4286 * @mmd: the PHY MMD to access (0 for clause 22 PHYs) 4287 * @reg: the register to read 4288 * @valp: where to store the value 4289 * 4290 * Issues a FW command through the given mailbox to read a PHY register. 4291 */ 4292 int t4_mdio_rd(struct adapter *adap, unsigned int mbox, unsigned int phy_addr, 4293 unsigned int mmd, unsigned int reg, unsigned int *valp) 4294 { 4295 int ret; 4296 struct fw_ldst_cmd c; 4297 4298 memset(&c, 0, sizeof(c)); 4299 c.op_to_addrspace = htonl(V_FW_CMD_OP(FW_LDST_CMD) | F_FW_CMD_REQUEST | 4300 F_FW_CMD_READ | V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_MDIO)); 4301 c.cycles_to_len16 = htonl(FW_LEN16(c)); 4302 c.u.mdio.paddr_mmd = htons(V_FW_LDST_CMD_PADDR(phy_addr) | 4303 V_FW_LDST_CMD_MMD(mmd)); 4304 c.u.mdio.raddr = htons(reg); 4305 4306 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c); 4307 if (ret == 0) 4308 *valp = ntohs(c.u.mdio.rval); 4309 return ret; 4310 } 4311 4312 /** 4313 * t4_mdio_wr - write a PHY register through MDIO 4314 * @adap: the adapter 4315 * @mbox: mailbox to use for the FW command 4316 * @phy_addr: the PHY address 4317 * @mmd: the PHY MMD to access (0 for clause 22 PHYs) 4318 * @reg: the register to write 4319 * @valp: value to write 4320 * 4321 * Issues a FW command through the given mailbox to write a PHY register. 4322 */ 4323 int t4_mdio_wr(struct adapter *adap, unsigned int mbox, unsigned int phy_addr, 4324 unsigned int mmd, unsigned int reg, unsigned int val) 4325 { 4326 struct fw_ldst_cmd c; 4327 4328 memset(&c, 0, sizeof(c)); 4329 c.op_to_addrspace = htonl(V_FW_CMD_OP(FW_LDST_CMD) | F_FW_CMD_REQUEST | 4330 F_FW_CMD_WRITE | V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_MDIO)); 4331 c.cycles_to_len16 = htonl(FW_LEN16(c)); 4332 c.u.mdio.paddr_mmd = htons(V_FW_LDST_CMD_PADDR(phy_addr) | 4333 V_FW_LDST_CMD_MMD(mmd)); 4334 c.u.mdio.raddr = htons(reg); 4335 c.u.mdio.rval = htons(val); 4336 4337 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); 4338 } 4339 4340 /** 4341 * t4_i2c_rd - read I2C data from adapter 4342 * @adap: the adapter 4343 * @port: Port number if per-port device; <0 if not 4344 * @devid: per-port device ID or absolute device ID 4345 * @offset: byte offset into device I2C space 4346 * @len: byte length of I2C space data 4347 * @buf: buffer in which to return I2C data 4348 * 4349 * Reads the I2C data from the indicated device and location. 4350 */ 4351 int t4_i2c_rd(struct adapter *adap, unsigned int mbox, 4352 int port, unsigned int devid, 4353 unsigned int offset, unsigned int len, 4354 u8 *buf) 4355 { 4356 struct fw_ldst_cmd ldst; 4357 int ret; 4358 4359 if (port >= 4 || 4360 devid >= 256 || 4361 offset >= 256 || 4362 len > sizeof ldst.u.i2c.data) 4363 return -EINVAL; 4364 4365 memset(&ldst, 0, sizeof ldst); 4366 ldst.op_to_addrspace = 4367 cpu_to_be32(V_FW_CMD_OP(FW_LDST_CMD) | 4368 F_FW_CMD_REQUEST | 4369 F_FW_CMD_READ | 4370 V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_I2C)); 4371 ldst.cycles_to_len16 = cpu_to_be32(FW_LEN16(ldst)); 4372 ldst.u.i2c.pid = (port < 0 ? 0xff : port); 4373 ldst.u.i2c.did = devid; 4374 ldst.u.i2c.boffset = offset; 4375 ldst.u.i2c.blen = len; 4376 ret = t4_wr_mbox(adap, mbox, &ldst, sizeof ldst, &ldst); 4377 if (!ret) 4378 memcpy(buf, ldst.u.i2c.data, len); 4379 return ret; 4380 } 4381 4382 /** 4383 * t4_i2c_wr - write I2C data to adapter 4384 * @adap: the adapter 4385 * @port: Port number if per-port device; <0 if not 4386 * @devid: per-port device ID or absolute device ID 4387 * @offset: byte offset into device I2C space 4388 * @len: byte length of I2C space data 4389 * @buf: buffer containing new I2C data 4390 * 4391 * Write the I2C data to the indicated device and location. 4392 */ 4393 int t4_i2c_wr(struct adapter *adap, unsigned int mbox, 4394 int port, unsigned int devid, 4395 unsigned int offset, unsigned int len, 4396 u8 *buf) 4397 { 4398 struct fw_ldst_cmd ldst; 4399 4400 if (port >= 4 || 4401 devid >= 256 || 4402 offset >= 256 || 4403 len > sizeof ldst.u.i2c.data) 4404 return -EINVAL; 4405 4406 memset(&ldst, 0, sizeof ldst); 4407 ldst.op_to_addrspace = 4408 cpu_to_be32(V_FW_CMD_OP(FW_LDST_CMD) | 4409 F_FW_CMD_REQUEST | 4410 F_FW_CMD_WRITE | 4411 V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_I2C)); 4412 ldst.cycles_to_len16 = cpu_to_be32(FW_LEN16(ldst)); 4413 ldst.u.i2c.pid = (port < 0 ? 0xff : port); 4414 ldst.u.i2c.did = devid; 4415 ldst.u.i2c.boffset = offset; 4416 ldst.u.i2c.blen = len; 4417 memcpy(ldst.u.i2c.data, buf, len); 4418 return t4_wr_mbox(adap, mbox, &ldst, sizeof ldst, &ldst); 4419 } 4420 4421 /** 4422 * t4_sge_ctxt_flush - flush the SGE context cache 4423 * @adap: the adapter 4424 * @mbox: mailbox to use for the FW command 4425 * 4426 * Issues a FW command through the given mailbox to flush the 4427 * SGE context cache. 4428 */ 4429 int t4_sge_ctxt_flush(struct adapter *adap, unsigned int mbox) 4430 { 4431 int ret; 4432 struct fw_ldst_cmd c; 4433 4434 memset(&c, 0, sizeof(c)); 4435 c.op_to_addrspace = htonl(V_FW_CMD_OP(FW_LDST_CMD) | F_FW_CMD_REQUEST | 4436 F_FW_CMD_READ | 4437 V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_SGE_EGRC)); 4438 c.cycles_to_len16 = htonl(FW_LEN16(c)); 4439 c.u.idctxt.msg_ctxtflush = htonl(F_FW_LDST_CMD_CTXTFLUSH); 4440 4441 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c); 4442 return ret; 4443 } 4444 4445 /** 4446 * t4_sge_ctxt_rd - read an SGE context through FW 4447 * @adap: the adapter 4448 * @mbox: mailbox to use for the FW command 4449 * @cid: the context id 4450 * @ctype: the context type 4451 * @data: where to store the context data 4452 * 4453 * Issues a FW command through the given mailbox to read an SGE context. 4454 */ 4455 int t4_sge_ctxt_rd(struct adapter *adap, unsigned int mbox, unsigned int cid, 4456 enum ctxt_type ctype, u32 *data) 4457 { 4458 int ret; 4459 struct fw_ldst_cmd c; 4460 4461 if (ctype == CTXT_EGRESS) 4462 ret = FW_LDST_ADDRSPC_SGE_EGRC; 4463 else if (ctype == CTXT_INGRESS) 4464 ret = FW_LDST_ADDRSPC_SGE_INGC; 4465 else if (ctype == CTXT_FLM) 4466 ret = FW_LDST_ADDRSPC_SGE_FLMC; 4467 else 4468 ret = FW_LDST_ADDRSPC_SGE_CONMC; 4469 4470 memset(&c, 0, sizeof(c)); 4471 c.op_to_addrspace = htonl(V_FW_CMD_OP(FW_LDST_CMD) | F_FW_CMD_REQUEST | 4472 F_FW_CMD_READ | V_FW_LDST_CMD_ADDRSPACE(ret)); 4473 c.cycles_to_len16 = htonl(FW_LEN16(c)); 4474 c.u.idctxt.physid = htonl(cid); 4475 4476 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c); 4477 if (ret == 0) { 4478 data[0] = ntohl(c.u.idctxt.ctxt_data0); 4479 data[1] = ntohl(c.u.idctxt.ctxt_data1); 4480 data[2] = ntohl(c.u.idctxt.ctxt_data2); 4481 data[3] = ntohl(c.u.idctxt.ctxt_data3); 4482 data[4] = ntohl(c.u.idctxt.ctxt_data4); 4483 data[5] = ntohl(c.u.idctxt.ctxt_data5); 4484 } 4485 return ret; 4486 } 4487 4488 /** 4489 * t4_sge_ctxt_rd_bd - read an SGE context bypassing FW 4490 * @adap: the adapter 4491 * @cid: the context id 4492 * @ctype: the context type 4493 * @data: where to store the context data 4494 * 4495 * Reads an SGE context directly, bypassing FW. This is only for 4496 * debugging when FW is unavailable. 4497 */ 4498 int t4_sge_ctxt_rd_bd(struct adapter *adap, unsigned int cid, enum ctxt_type ctype, 4499 u32 *data) 4500 { 4501 int i, ret; 4502 4503 t4_write_reg(adap, A_SGE_CTXT_CMD, V_CTXTQID(cid) | V_CTXTTYPE(ctype)); 4504 ret = t4_wait_op_done(adap, A_SGE_CTXT_CMD, F_BUSY, 0, 3, 1); 4505 if (!ret) 4506 for (i = A_SGE_CTXT_DATA0; i <= A_SGE_CTXT_DATA5; i += 4) 4507 *data++ = t4_read_reg(adap, i); 4508 return ret; 4509 } 4510 4511 /** 4512 * t4_fw_hello - establish communication with FW 4513 * @adap: the adapter 4514 * @mbox: mailbox to use for the FW command 4515 * @evt_mbox: mailbox to receive async FW events 4516 * @master: specifies the caller's willingness to be the device master 4517 * @state: returns the current device state (if non-NULL) 4518 * 4519 * Issues a command to establish communication with FW. Returns either 4520 * an error (negative integer) or the mailbox of the Master PF. 4521 */ 4522 int t4_fw_hello(struct adapter *adap, unsigned int mbox, unsigned int evt_mbox, 4523 enum dev_master master, enum dev_state *state) 4524 { 4525 int ret; 4526 struct fw_hello_cmd c; 4527 u32 v; 4528 unsigned int master_mbox; 4529 int retries = FW_CMD_HELLO_RETRIES; 4530 4531 retry: 4532 memset(&c, 0, sizeof(c)); 4533 INIT_CMD(c, HELLO, WRITE); 4534 c.err_to_clearinit = htonl( 4535 V_FW_HELLO_CMD_MASTERDIS(master == MASTER_CANT) | 4536 V_FW_HELLO_CMD_MASTERFORCE(master == MASTER_MUST) | 4537 V_FW_HELLO_CMD_MBMASTER(master == MASTER_MUST ? mbox : 4538 M_FW_HELLO_CMD_MBMASTER) | 4539 V_FW_HELLO_CMD_MBASYNCNOT(evt_mbox) | 4540 V_FW_HELLO_CMD_STAGE(FW_HELLO_CMD_STAGE_OS) | 4541 F_FW_HELLO_CMD_CLEARINIT); 4542 4543 /* 4544 * Issue the HELLO command to the firmware. If it's not successful 4545 * but indicates that we got a "busy" or "timeout" condition, retry 4546 * the HELLO until we exhaust our retry limit. If we do exceed our 4547 * retry limit, check to see if the firmware left us any error 4548 * information and report that if so ... 4549 */ 4550 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c); 4551 if (ret != FW_SUCCESS) { 4552 if ((ret == -EBUSY || ret == -ETIMEDOUT) && retries-- > 0) 4553 goto retry; 4554 if (t4_read_reg(adap, A_PCIE_FW) & F_PCIE_FW_ERR) 4555 t4_report_fw_error(adap); 4556 return ret; 4557 } 4558 4559 v = ntohl(c.err_to_clearinit); 4560 master_mbox = G_FW_HELLO_CMD_MBMASTER(v); 4561 if (state) { 4562 if (v & F_FW_HELLO_CMD_ERR) 4563 *state = DEV_STATE_ERR; 4564 else if (v & F_FW_HELLO_CMD_INIT) 4565 *state = DEV_STATE_INIT; 4566 else 4567 *state = DEV_STATE_UNINIT; 4568 } 4569 4570 /* 4571 * If we're not the Master PF then we need to wait around for the 4572 * Master PF Driver to finish setting up the adapter. 4573 * 4574 * Note that we also do this wait if we're a non-Master-capable PF and 4575 * there is no current Master PF; a Master PF may show up momentarily 4576 * and we wouldn't want to fail pointlessly. (This can happen when an 4577 * OS loads lots of different drivers rapidly at the same time). In 4578 * this case, the Master PF returned by the firmware will be 4579 * M_PCIE_FW_MASTER so the test below will work ... 4580 */ 4581 if ((v & (F_FW_HELLO_CMD_ERR|F_FW_HELLO_CMD_INIT)) == 0 && 4582 master_mbox != mbox) { 4583 int waiting = FW_CMD_HELLO_TIMEOUT; 4584 4585 /* 4586 * Wait for the firmware to either indicate an error or 4587 * initialized state. If we see either of these we bail out 4588 * and report the issue to the caller. If we exhaust the 4589 * "hello timeout" and we haven't exhausted our retries, try 4590 * again. Otherwise bail with a timeout error. 4591 */ 4592 for (;;) { 4593 u32 pcie_fw; 4594 4595 msleep(50); 4596 waiting -= 50; 4597 4598 /* 4599 * If neither Error nor Initialialized are indicated 4600 * by the firmware keep waiting till we exhaust our 4601 * timeout ... and then retry if we haven't exhausted 4602 * our retries ... 4603 */ 4604 pcie_fw = t4_read_reg(adap, A_PCIE_FW); 4605 if (!(pcie_fw & (F_PCIE_FW_ERR|F_PCIE_FW_INIT))) { 4606 if (waiting <= 0) { 4607 if (retries-- > 0) 4608 goto retry; 4609 4610 return -ETIMEDOUT; 4611 } 4612 continue; 4613 } 4614 4615 /* 4616 * We either have an Error or Initialized condition 4617 * report errors preferentially. 4618 */ 4619 if (state) { 4620 if (pcie_fw & F_PCIE_FW_ERR) 4621 *state = DEV_STATE_ERR; 4622 else if (pcie_fw & F_PCIE_FW_INIT) 4623 *state = DEV_STATE_INIT; 4624 } 4625 4626 /* 4627 * If we arrived before a Master PF was selected and 4628 * there's not a valid Master PF, grab its identity 4629 * for our caller. 4630 */ 4631 if (master_mbox == M_PCIE_FW_MASTER && 4632 (pcie_fw & F_PCIE_FW_MASTER_VLD)) 4633 master_mbox = G_PCIE_FW_MASTER(pcie_fw); 4634 break; 4635 } 4636 } 4637 4638 return master_mbox; 4639 } 4640 4641 /** 4642 * t4_fw_bye - end communication with FW 4643 * @adap: the adapter 4644 * @mbox: mailbox to use for the FW command 4645 * 4646 * Issues a command to terminate communication with FW. 4647 */ 4648 int t4_fw_bye(struct adapter *adap, unsigned int mbox) 4649 { 4650 struct fw_bye_cmd c; 4651 4652 memset(&c, 0, sizeof(c)); 4653 INIT_CMD(c, BYE, WRITE); 4654 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); 4655 } 4656 4657 /** 4658 * t4_fw_reset - issue a reset to FW 4659 * @adap: the adapter 4660 * @mbox: mailbox to use for the FW command 4661 * @reset: specifies the type of reset to perform 4662 * 4663 * Issues a reset command of the specified type to FW. 4664 */ 4665 int t4_fw_reset(struct adapter *adap, unsigned int mbox, int reset) 4666 { 4667 struct fw_reset_cmd c; 4668 4669 memset(&c, 0, sizeof(c)); 4670 INIT_CMD(c, RESET, WRITE); 4671 c.val = htonl(reset); 4672 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); 4673 } 4674 4675 /** 4676 * t4_fw_halt - issue a reset/halt to FW and put uP into RESET 4677 * @adap: the adapter 4678 * @mbox: mailbox to use for the FW RESET command (if desired) 4679 * @force: force uP into RESET even if FW RESET command fails 4680 * 4681 * Issues a RESET command to firmware (if desired) with a HALT indication 4682 * and then puts the microprocessor into RESET state. The RESET command 4683 * will only be issued if a legitimate mailbox is provided (mbox <= 4684 * M_PCIE_FW_MASTER). 4685 * 4686 * This is generally used in order for the host to safely manipulate the 4687 * adapter without fear of conflicting with whatever the firmware might 4688 * be doing. The only way out of this state is to RESTART the firmware 4689 * ... 4690 */ 4691 int t4_fw_halt(struct adapter *adap, unsigned int mbox, int force) 4692 { 4693 int ret = 0; 4694 4695 /* 4696 * If a legitimate mailbox is provided, issue a RESET command 4697 * with a HALT indication. 4698 */ 4699 if (mbox <= M_PCIE_FW_MASTER) { 4700 struct fw_reset_cmd c; 4701 4702 memset(&c, 0, sizeof(c)); 4703 INIT_CMD(c, RESET, WRITE); 4704 c.val = htonl(F_PIORST | F_PIORSTMODE); 4705 c.halt_pkd = htonl(F_FW_RESET_CMD_HALT); 4706 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); 4707 } 4708 4709 /* 4710 * Normally we won't complete the operation if the firmware RESET 4711 * command fails but if our caller insists we'll go ahead and put the 4712 * uP into RESET. This can be useful if the firmware is hung or even 4713 * missing ... We'll have to take the risk of putting the uP into 4714 * RESET without the cooperation of firmware in that case. 4715 * 4716 * We also force the firmware's HALT flag to be on in case we bypassed 4717 * the firmware RESET command above or we're dealing with old firmware 4718 * which doesn't have the HALT capability. This will serve as a flag 4719 * for the incoming firmware to know that it's coming out of a HALT 4720 * rather than a RESET ... if it's new enough to understand that ... 4721 */ 4722 if (ret == 0 || force) { 4723 t4_set_reg_field(adap, A_CIM_BOOT_CFG, F_UPCRST, F_UPCRST); 4724 t4_set_reg_field(adap, A_PCIE_FW, F_PCIE_FW_HALT, F_PCIE_FW_HALT); 4725 } 4726 4727 /* 4728 * And we always return the result of the firmware RESET command 4729 * even when we force the uP into RESET ... 4730 */ 4731 return ret; 4732 } 4733 4734 /** 4735 * t4_fw_restart - restart the firmware by taking the uP out of RESET 4736 * @adap: the adapter 4737 * @reset: if we want to do a RESET to restart things 4738 * 4739 * Restart firmware previously halted by t4_fw_halt(). On successful 4740 * return the previous PF Master remains as the new PF Master and there 4741 * is no need to issue a new HELLO command, etc. 4742 * 4743 * We do this in two ways: 4744 * 4745 * 1. If we're dealing with newer firmware we'll simply want to take 4746 * the chip's microprocessor out of RESET. This will cause the 4747 * firmware to start up from its start vector. And then we'll loop 4748 * until the firmware indicates it's started again (PCIE_FW.HALT 4749 * reset to 0) or we timeout. 4750 * 4751 * 2. If we're dealing with older firmware then we'll need to RESET 4752 * the chip since older firmware won't recognize the PCIE_FW.HALT 4753 * flag and automatically RESET itself on startup. 4754 */ 4755 int t4_fw_restart(struct adapter *adap, unsigned int mbox, int reset) 4756 { 4757 if (reset) { 4758 /* 4759 * Since we're directing the RESET instead of the firmware 4760 * doing it automatically, we need to clear the PCIE_FW.HALT 4761 * bit. 4762 */ 4763 t4_set_reg_field(adap, A_PCIE_FW, F_PCIE_FW_HALT, 0); 4764 4765 /* 4766 * If we've been given a valid mailbox, first try to get the 4767 * firmware to do the RESET. If that works, great and we can 4768 * return success. Otherwise, if we haven't been given a 4769 * valid mailbox or the RESET command failed, fall back to 4770 * hitting the chip with a hammer. 4771 */ 4772 if (mbox <= M_PCIE_FW_MASTER) { 4773 t4_set_reg_field(adap, A_CIM_BOOT_CFG, F_UPCRST, 0); 4774 msleep(100); 4775 if (t4_fw_reset(adap, mbox, 4776 F_PIORST | F_PIORSTMODE) == 0) 4777 return 0; 4778 } 4779 4780 t4_write_reg(adap, A_PL_RST, F_PIORST | F_PIORSTMODE); 4781 msleep(2000); 4782 } else { 4783 int ms; 4784 4785 t4_set_reg_field(adap, A_CIM_BOOT_CFG, F_UPCRST, 0); 4786 for (ms = 0; ms < FW_CMD_MAX_TIMEOUT; ) { 4787 if (!(t4_read_reg(adap, A_PCIE_FW) & F_PCIE_FW_HALT)) 4788 return FW_SUCCESS; 4789 msleep(100); 4790 ms += 100; 4791 } 4792 return -ETIMEDOUT; 4793 } 4794 return 0; 4795 } 4796 4797 /** 4798 * t4_fw_upgrade - perform all of the steps necessary to upgrade FW 4799 * @adap: the adapter 4800 * @mbox: mailbox to use for the FW RESET command (if desired) 4801 * @fw_data: the firmware image to write 4802 * @size: image size 4803 * @force: force upgrade even if firmware doesn't cooperate 4804 * 4805 * Perform all of the steps necessary for upgrading an adapter's 4806 * firmware image. Normally this requires the cooperation of the 4807 * existing firmware in order to halt all existing activities 4808 * but if an invalid mailbox token is passed in we skip that step 4809 * (though we'll still put the adapter microprocessor into RESET in 4810 * that case). 4811 * 4812 * On successful return the new firmware will have been loaded and 4813 * the adapter will have been fully RESET losing all previous setup 4814 * state. On unsuccessful return the adapter may be completely hosed ... 4815 * positive errno indicates that the adapter is ~probably~ intact, a 4816 * negative errno indicates that things are looking bad ... 4817 */ 4818 int t4_fw_upgrade(struct adapter *adap, unsigned int mbox, 4819 const u8 *fw_data, unsigned int size, int force) 4820 { 4821 const struct fw_hdr *fw_hdr = (const struct fw_hdr *)fw_data; 4822 unsigned int bootstrap = ntohl(fw_hdr->magic) == FW_HDR_MAGIC_BOOTSTRAP; 4823 int reset, ret; 4824 4825 if (!bootstrap) { 4826 ret = t4_fw_halt(adap, mbox, force); 4827 if (ret < 0 && !force) 4828 return ret; 4829 } 4830 4831 ret = t4_load_fw(adap, fw_data, size); 4832 if (ret < 0 || bootstrap) 4833 return ret; 4834 4835 /* 4836 * Older versions of the firmware don't understand the new 4837 * PCIE_FW.HALT flag and so won't know to perform a RESET when they 4838 * restart. So for newly loaded older firmware we'll have to do the 4839 * RESET for it so it starts up on a clean slate. We can tell if 4840 * the newly loaded firmware will handle this right by checking 4841 * its header flags to see if it advertises the capability. 4842 */ 4843 reset = ((ntohl(fw_hdr->flags) & FW_HDR_FLAGS_RESET_HALT) == 0); 4844 return t4_fw_restart(adap, mbox, reset); 4845 } 4846 4847 /** 4848 * t4_fw_initialize - ask FW to initialize the device 4849 * @adap: the adapter 4850 * @mbox: mailbox to use for the FW command 4851 * 4852 * Issues a command to FW to partially initialize the device. This 4853 * performs initialization that generally doesn't depend on user input. 4854 */ 4855 int t4_fw_initialize(struct adapter *adap, unsigned int mbox) 4856 { 4857 struct fw_initialize_cmd c; 4858 4859 memset(&c, 0, sizeof(c)); 4860 INIT_CMD(c, INITIALIZE, WRITE); 4861 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); 4862 } 4863 4864 /** 4865 * t4_query_params - query FW or device parameters 4866 * @adap: the adapter 4867 * @mbox: mailbox to use for the FW command 4868 * @pf: the PF 4869 * @vf: the VF 4870 * @nparams: the number of parameters 4871 * @params: the parameter names 4872 * @val: the parameter values 4873 * 4874 * Reads the value of FW or device parameters. Up to 7 parameters can be 4875 * queried at once. 4876 */ 4877 int t4_query_params(struct adapter *adap, unsigned int mbox, unsigned int pf, 4878 unsigned int vf, unsigned int nparams, const u32 *params, 4879 u32 *val) 4880 { 4881 int i, ret; 4882 struct fw_params_cmd c; 4883 __be32 *p = &c.param[0].mnem; 4884 4885 if (nparams > 7) 4886 return -EINVAL; 4887 4888 memset(&c, 0, sizeof(c)); 4889 c.op_to_vfn = htonl(V_FW_CMD_OP(FW_PARAMS_CMD) | F_FW_CMD_REQUEST | 4890 F_FW_CMD_READ | V_FW_PARAMS_CMD_PFN(pf) | 4891 V_FW_PARAMS_CMD_VFN(vf)); 4892 c.retval_len16 = htonl(FW_LEN16(c)); 4893 4894 for (i = 0; i < nparams; i++, p += 2, params++) 4895 *p = htonl(*params); 4896 4897 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c); 4898 if (ret == 0) 4899 for (i = 0, p = &c.param[0].val; i < nparams; i++, p += 2) 4900 *val++ = ntohl(*p); 4901 return ret; 4902 } 4903 4904 /** 4905 * t4_set_params - sets FW or device parameters 4906 * @adap: the adapter 4907 * @mbox: mailbox to use for the FW command 4908 * @pf: the PF 4909 * @vf: the VF 4910 * @nparams: the number of parameters 4911 * @params: the parameter names 4912 * @val: the parameter values 4913 * 4914 * Sets the value of FW or device parameters. Up to 7 parameters can be 4915 * specified at once. 4916 */ 4917 int t4_set_params(struct adapter *adap, unsigned int mbox, unsigned int pf, 4918 unsigned int vf, unsigned int nparams, const u32 *params, 4919 const u32 *val) 4920 { 4921 struct fw_params_cmd c; 4922 __be32 *p = &c.param[0].mnem; 4923 4924 if (nparams > 7) 4925 return -EINVAL; 4926 4927 memset(&c, 0, sizeof(c)); 4928 c.op_to_vfn = htonl(V_FW_CMD_OP(FW_PARAMS_CMD) | F_FW_CMD_REQUEST | 4929 F_FW_CMD_WRITE | V_FW_PARAMS_CMD_PFN(pf) | 4930 V_FW_PARAMS_CMD_VFN(vf)); 4931 c.retval_len16 = htonl(FW_LEN16(c)); 4932 4933 while (nparams--) { 4934 *p++ = htonl(*params); 4935 params++; 4936 *p++ = htonl(*val); 4937 val++; 4938 } 4939 4940 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); 4941 } 4942 4943 /** 4944 * t4_cfg_pfvf - configure PF/VF resource limits 4945 * @adap: the adapter 4946 * @mbox: mailbox to use for the FW command 4947 * @pf: the PF being configured 4948 * @vf: the VF being configured 4949 * @txq: the max number of egress queues 4950 * @txq_eth_ctrl: the max number of egress Ethernet or control queues 4951 * @rxqi: the max number of interrupt-capable ingress queues 4952 * @rxq: the max number of interruptless ingress queues 4953 * @tc: the PCI traffic class 4954 * @vi: the max number of virtual interfaces 4955 * @cmask: the channel access rights mask for the PF/VF 4956 * @pmask: the port access rights mask for the PF/VF 4957 * @nexact: the maximum number of exact MPS filters 4958 * @rcaps: read capabilities 4959 * @wxcaps: write/execute capabilities 4960 * 4961 * Configures resource limits and capabilities for a physical or virtual 4962 * function. 4963 */ 4964 int t4_cfg_pfvf(struct adapter *adap, unsigned int mbox, unsigned int pf, 4965 unsigned int vf, unsigned int txq, unsigned int txq_eth_ctrl, 4966 unsigned int rxqi, unsigned int rxq, unsigned int tc, 4967 unsigned int vi, unsigned int cmask, unsigned int pmask, 4968 unsigned int nexact, unsigned int rcaps, unsigned int wxcaps) 4969 { 4970 struct fw_pfvf_cmd c; 4971 4972 memset(&c, 0, sizeof(c)); 4973 c.op_to_vfn = htonl(V_FW_CMD_OP(FW_PFVF_CMD) | F_FW_CMD_REQUEST | 4974 F_FW_CMD_WRITE | V_FW_PFVF_CMD_PFN(pf) | 4975 V_FW_PFVF_CMD_VFN(vf)); 4976 c.retval_len16 = htonl(FW_LEN16(c)); 4977 c.niqflint_niq = htonl(V_FW_PFVF_CMD_NIQFLINT(rxqi) | 4978 V_FW_PFVF_CMD_NIQ(rxq)); 4979 c.type_to_neq = htonl(V_FW_PFVF_CMD_CMASK(cmask) | 4980 V_FW_PFVF_CMD_PMASK(pmask) | 4981 V_FW_PFVF_CMD_NEQ(txq)); 4982 c.tc_to_nexactf = htonl(V_FW_PFVF_CMD_TC(tc) | V_FW_PFVF_CMD_NVI(vi) | 4983 V_FW_PFVF_CMD_NEXACTF(nexact)); 4984 c.r_caps_to_nethctrl = htonl(V_FW_PFVF_CMD_R_CAPS(rcaps) | 4985 V_FW_PFVF_CMD_WX_CAPS(wxcaps) | 4986 V_FW_PFVF_CMD_NETHCTRL(txq_eth_ctrl)); 4987 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); 4988 } 4989 4990 /** 4991 * t4_alloc_vi_func - allocate a virtual interface 4992 * @adap: the adapter 4993 * @mbox: mailbox to use for the FW command 4994 * @port: physical port associated with the VI 4995 * @pf: the PF owning the VI 4996 * @vf: the VF owning the VI 4997 * @nmac: number of MAC addresses needed (1 to 5) 4998 * @mac: the MAC addresses of the VI 4999 * @rss_size: size of RSS table slice associated with this VI 5000 * @portfunc: which Port Application Function MAC Address is desired 5001 * @idstype: Intrusion Detection Type 5002 * 5003 * Allocates a virtual interface for the given physical port. If @mac is 5004 * not %NULL it contains the MAC addresses of the VI as assigned by FW. 5005 * @mac should be large enough to hold @nmac Ethernet addresses, they are 5006 * stored consecutively so the space needed is @nmac * 6 bytes. 5007 * Returns a negative error number or the non-negative VI id. 5008 */ 5009 int t4_alloc_vi_func(struct adapter *adap, unsigned int mbox, 5010 unsigned int port, unsigned int pf, unsigned int vf, 5011 unsigned int nmac, u8 *mac, u16 *rss_size, 5012 unsigned int portfunc, unsigned int idstype) 5013 { 5014 int ret; 5015 struct fw_vi_cmd c; 5016 5017 memset(&c, 0, sizeof(c)); 5018 c.op_to_vfn = htonl(V_FW_CMD_OP(FW_VI_CMD) | F_FW_CMD_REQUEST | 5019 F_FW_CMD_WRITE | F_FW_CMD_EXEC | 5020 V_FW_VI_CMD_PFN(pf) | V_FW_VI_CMD_VFN(vf)); 5021 c.alloc_to_len16 = htonl(F_FW_VI_CMD_ALLOC | FW_LEN16(c)); 5022 c.type_to_viid = htons(V_FW_VI_CMD_TYPE(idstype) | 5023 V_FW_VI_CMD_FUNC(portfunc)); 5024 c.portid_pkd = V_FW_VI_CMD_PORTID(port); 5025 c.nmac = nmac - 1; 5026 5027 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c); 5028 if (ret) 5029 return ret; 5030 5031 if (mac) { 5032 memcpy(mac, c.mac, sizeof(c.mac)); 5033 switch (nmac) { 5034 case 5: 5035 memcpy(mac + 24, c.nmac3, sizeof(c.nmac3)); 5036 case 4: 5037 memcpy(mac + 18, c.nmac2, sizeof(c.nmac2)); 5038 case 3: 5039 memcpy(mac + 12, c.nmac1, sizeof(c.nmac1)); 5040 case 2: 5041 memcpy(mac + 6, c.nmac0, sizeof(c.nmac0)); 5042 } 5043 } 5044 if (rss_size) 5045 *rss_size = G_FW_VI_CMD_RSSSIZE(ntohs(c.norss_rsssize)); 5046 return G_FW_VI_CMD_VIID(htons(c.type_to_viid)); 5047 } 5048 5049 /** 5050 * t4_alloc_vi - allocate an [Ethernet Function] virtual interface 5051 * @adap: the adapter 5052 * @mbox: mailbox to use for the FW command 5053 * @port: physical port associated with the VI 5054 * @pf: the PF owning the VI 5055 * @vf: the VF owning the VI 5056 * @nmac: number of MAC addresses needed (1 to 5) 5057 * @mac: the MAC addresses of the VI 5058 * @rss_size: size of RSS table slice associated with this VI 5059 * 5060 * backwards compatible and convieniance routine to allocate a Virtual 5061 * Interface with a Ethernet Port Application Function and Intrustion 5062 * Detection System disabled. 5063 */ 5064 int t4_alloc_vi(struct adapter *adap, unsigned int mbox, unsigned int port, 5065 unsigned int pf, unsigned int vf, unsigned int nmac, u8 *mac, 5066 u16 *rss_size) 5067 { 5068 return t4_alloc_vi_func(adap, mbox, port, pf, vf, nmac, mac, rss_size, 5069 FW_VI_FUNC_ETH, 0); 5070 } 5071 5072 /** 5073 * t4_free_vi - free a virtual interface 5074 * @adap: the adapter 5075 * @mbox: mailbox to use for the FW command 5076 * @pf: the PF owning the VI 5077 * @vf: the VF owning the VI 5078 * @viid: virtual interface identifiler 5079 * 5080 * Free a previously allocated virtual interface. 5081 */ 5082 int t4_free_vi(struct adapter *adap, unsigned int mbox, unsigned int pf, 5083 unsigned int vf, unsigned int viid) 5084 { 5085 struct fw_vi_cmd c; 5086 5087 memset(&c, 0, sizeof(c)); 5088 c.op_to_vfn = htonl(V_FW_CMD_OP(FW_VI_CMD) | 5089 F_FW_CMD_REQUEST | 5090 F_FW_CMD_EXEC | 5091 V_FW_VI_CMD_PFN(pf) | 5092 V_FW_VI_CMD_VFN(vf)); 5093 c.alloc_to_len16 = htonl(F_FW_VI_CMD_FREE | FW_LEN16(c)); 5094 c.type_to_viid = htons(V_FW_VI_CMD_VIID(viid)); 5095 5096 return t4_wr_mbox(adap, mbox, &c, sizeof(c), &c); 5097 } 5098 5099 /** 5100 * t4_set_rxmode - set Rx properties of a virtual interface 5101 * @adap: the adapter 5102 * @mbox: mailbox to use for the FW command 5103 * @viid: the VI id 5104 * @mtu: the new MTU or -1 5105 * @promisc: 1 to enable promiscuous mode, 0 to disable it, -1 no change 5106 * @all_multi: 1 to enable all-multi mode, 0 to disable it, -1 no change 5107 * @bcast: 1 to enable broadcast Rx, 0 to disable it, -1 no change 5108 * @vlanex: 1 to enable HVLAN extraction, 0 to disable it, -1 no change 5109 * @sleep_ok: if true we may sleep while awaiting command completion 5110 * 5111 * Sets Rx properties of a virtual interface. 5112 */ 5113 int t4_set_rxmode(struct adapter *adap, unsigned int mbox, unsigned int viid, 5114 int mtu, int promisc, int all_multi, int bcast, int vlanex, 5115 bool sleep_ok) 5116 { 5117 struct fw_vi_rxmode_cmd c; 5118 5119 /* convert to FW values */ 5120 if (mtu < 0) 5121 mtu = M_FW_VI_RXMODE_CMD_MTU; 5122 if (promisc < 0) 5123 promisc = M_FW_VI_RXMODE_CMD_PROMISCEN; 5124 if (all_multi < 0) 5125 all_multi = M_FW_VI_RXMODE_CMD_ALLMULTIEN; 5126 if (bcast < 0) 5127 bcast = M_FW_VI_RXMODE_CMD_BROADCASTEN; 5128 if (vlanex < 0) 5129 vlanex = M_FW_VI_RXMODE_CMD_VLANEXEN; 5130 5131 memset(&c, 0, sizeof(c)); 5132 c.op_to_viid = htonl(V_FW_CMD_OP(FW_VI_RXMODE_CMD) | F_FW_CMD_REQUEST | 5133 F_FW_CMD_WRITE | V_FW_VI_RXMODE_CMD_VIID(viid)); 5134 c.retval_len16 = htonl(FW_LEN16(c)); 5135 c.mtu_to_vlanexen = htonl(V_FW_VI_RXMODE_CMD_MTU(mtu) | 5136 V_FW_VI_RXMODE_CMD_PROMISCEN(promisc) | 5137 V_FW_VI_RXMODE_CMD_ALLMULTIEN(all_multi) | 5138 V_FW_VI_RXMODE_CMD_BROADCASTEN(bcast) | 5139 V_FW_VI_RXMODE_CMD_VLANEXEN(vlanex)); 5140 return t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), NULL, sleep_ok); 5141 } 5142 5143 /** 5144 * t4_alloc_mac_filt - allocates exact-match filters for MAC addresses 5145 * @adap: the adapter 5146 * @mbox: mailbox to use for the FW command 5147 * @viid: the VI id 5148 * @free: if true any existing filters for this VI id are first removed 5149 * @naddr: the number of MAC addresses to allocate filters for (up to 7) 5150 * @addr: the MAC address(es) 5151 * @idx: where to store the index of each allocated filter 5152 * @hash: pointer to hash address filter bitmap 5153 * @sleep_ok: call is allowed to sleep 5154 * 5155 * Allocates an exact-match filter for each of the supplied addresses and 5156 * sets it to the corresponding address. If @idx is not %NULL it should 5157 * have at least @naddr entries, each of which will be set to the index of 5158 * the filter allocated for the corresponding MAC address. If a filter 5159 * could not be allocated for an address its index is set to 0xffff. 5160 * If @hash is not %NULL addresses that fail to allocate an exact filter 5161 * are hashed and update the hash filter bitmap pointed at by @hash. 5162 * 5163 * Returns a negative error number or the number of filters allocated. 5164 */ 5165 int t4_alloc_mac_filt(struct adapter *adap, unsigned int mbox, 5166 unsigned int viid, bool free, unsigned int naddr, 5167 const u8 **addr, u16 *idx, u64 *hash, bool sleep_ok) 5168 { 5169 int offset, ret = 0; 5170 struct fw_vi_mac_cmd c; 5171 unsigned int nfilters = 0; 5172 unsigned int max_naddr = adap->chip_params->mps_tcam_size; 5173 unsigned int rem = naddr; 5174 5175 if (naddr > max_naddr) 5176 return -EINVAL; 5177 5178 for (offset = 0; offset < naddr ; /**/) { 5179 unsigned int fw_naddr = (rem < ARRAY_SIZE(c.u.exact) 5180 ? rem 5181 : ARRAY_SIZE(c.u.exact)); 5182 size_t len16 = DIV_ROUND_UP(offsetof(struct fw_vi_mac_cmd, 5183 u.exact[fw_naddr]), 16); 5184 struct fw_vi_mac_exact *p; 5185 int i; 5186 5187 memset(&c, 0, sizeof(c)); 5188 c.op_to_viid = htonl(V_FW_CMD_OP(FW_VI_MAC_CMD) | 5189 F_FW_CMD_REQUEST | 5190 F_FW_CMD_WRITE | 5191 V_FW_CMD_EXEC(free) | 5192 V_FW_VI_MAC_CMD_VIID(viid)); 5193 c.freemacs_to_len16 = htonl(V_FW_VI_MAC_CMD_FREEMACS(free) | 5194 V_FW_CMD_LEN16(len16)); 5195 5196 for (i = 0, p = c.u.exact; i < fw_naddr; i++, p++) { 5197 p->valid_to_idx = htons( 5198 F_FW_VI_MAC_CMD_VALID | 5199 V_FW_VI_MAC_CMD_IDX(FW_VI_MAC_ADD_MAC)); 5200 memcpy(p->macaddr, addr[offset+i], sizeof(p->macaddr)); 5201 } 5202 5203 /* 5204 * It's okay if we run out of space in our MAC address arena. 5205 * Some of the addresses we submit may get stored so we need 5206 * to run through the reply to see what the results were ... 5207 */ 5208 ret = t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), &c, sleep_ok); 5209 if (ret && ret != -FW_ENOMEM) 5210 break; 5211 5212 for (i = 0, p = c.u.exact; i < fw_naddr; i++, p++) { 5213 u16 index = G_FW_VI_MAC_CMD_IDX(ntohs(p->valid_to_idx)); 5214 5215 if (idx) 5216 idx[offset+i] = (index >= max_naddr 5217 ? 0xffff 5218 : index); 5219 if (index < max_naddr) 5220 nfilters++; 5221 else if (hash) 5222 *hash |= (1ULL << hash_mac_addr(addr[offset+i])); 5223 } 5224 5225 free = false; 5226 offset += fw_naddr; 5227 rem -= fw_naddr; 5228 } 5229 5230 if (ret == 0 || ret == -FW_ENOMEM) 5231 ret = nfilters; 5232 return ret; 5233 } 5234 5235 /** 5236 * t4_change_mac - modifies the exact-match filter for a MAC address 5237 * @adap: the adapter 5238 * @mbox: mailbox to use for the FW command 5239 * @viid: the VI id 5240 * @idx: index of existing filter for old value of MAC address, or -1 5241 * @addr: the new MAC address value 5242 * @persist: whether a new MAC allocation should be persistent 5243 * @add_smt: if true also add the address to the HW SMT 5244 * 5245 * Modifies an exact-match filter and sets it to the new MAC address if 5246 * @idx >= 0, or adds the MAC address to a new filter if @idx < 0. In the 5247 * latter case the address is added persistently if @persist is %true. 5248 * 5249 * Note that in general it is not possible to modify the value of a given 5250 * filter so the generic way to modify an address filter is to free the one 5251 * being used by the old address value and allocate a new filter for the 5252 * new address value. 5253 * 5254 * Returns a negative error number or the index of the filter with the new 5255 * MAC value. Note that this index may differ from @idx. 5256 */ 5257 int t4_change_mac(struct adapter *adap, unsigned int mbox, unsigned int viid, 5258 int idx, const u8 *addr, bool persist, bool add_smt) 5259 { 5260 int ret, mode; 5261 struct fw_vi_mac_cmd c; 5262 struct fw_vi_mac_exact *p = c.u.exact; 5263 unsigned int max_mac_addr = adap->chip_params->mps_tcam_size; 5264 5265 if (idx < 0) /* new allocation */ 5266 idx = persist ? FW_VI_MAC_ADD_PERSIST_MAC : FW_VI_MAC_ADD_MAC; 5267 mode = add_smt ? FW_VI_MAC_SMT_AND_MPSTCAM : FW_VI_MAC_MPS_TCAM_ENTRY; 5268 5269 memset(&c, 0, sizeof(c)); 5270 c.op_to_viid = htonl(V_FW_CMD_OP(FW_VI_MAC_CMD) | F_FW_CMD_REQUEST | 5271 F_FW_CMD_WRITE | V_FW_VI_MAC_CMD_VIID(viid)); 5272 c.freemacs_to_len16 = htonl(V_FW_CMD_LEN16(1)); 5273 p->valid_to_idx = htons(F_FW_VI_MAC_CMD_VALID | 5274 V_FW_VI_MAC_CMD_SMAC_RESULT(mode) | 5275 V_FW_VI_MAC_CMD_IDX(idx)); 5276 memcpy(p->macaddr, addr, sizeof(p->macaddr)); 5277 5278 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c); 5279 if (ret == 0) { 5280 ret = G_FW_VI_MAC_CMD_IDX(ntohs(p->valid_to_idx)); 5281 if (ret >= max_mac_addr) 5282 ret = -ENOMEM; 5283 } 5284 return ret; 5285 } 5286 5287 /** 5288 * t4_set_addr_hash - program the MAC inexact-match hash filter 5289 * @adap: the adapter 5290 * @mbox: mailbox to use for the FW command 5291 * @viid: the VI id 5292 * @ucast: whether the hash filter should also match unicast addresses 5293 * @vec: the value to be written to the hash filter 5294 * @sleep_ok: call is allowed to sleep 5295 * 5296 * Sets the 64-bit inexact-match hash filter for a virtual interface. 5297 */ 5298 int t4_set_addr_hash(struct adapter *adap, unsigned int mbox, unsigned int viid, 5299 bool ucast, u64 vec, bool sleep_ok) 5300 { 5301 struct fw_vi_mac_cmd c; 5302 u32 val; 5303 5304 memset(&c, 0, sizeof(c)); 5305 c.op_to_viid = htonl(V_FW_CMD_OP(FW_VI_MAC_CMD) | F_FW_CMD_REQUEST | 5306 F_FW_CMD_WRITE | V_FW_VI_ENABLE_CMD_VIID(viid)); 5307 val = V_FW_VI_MAC_CMD_ENTRY_TYPE(FW_VI_MAC_TYPE_HASHVEC) | 5308 V_FW_VI_MAC_CMD_HASHUNIEN(ucast) | V_FW_CMD_LEN16(1); 5309 c.freemacs_to_len16 = cpu_to_be32(val); 5310 c.u.hash.hashvec = cpu_to_be64(vec); 5311 return t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), NULL, sleep_ok); 5312 } 5313 5314 /** 5315 * t4_enable_vi - enable/disable a virtual interface 5316 * @adap: the adapter 5317 * @mbox: mailbox to use for the FW command 5318 * @viid: the VI id 5319 * @rx_en: 1=enable Rx, 0=disable Rx 5320 * @tx_en: 1=enable Tx, 0=disable Tx 5321 * 5322 * Enables/disables a virtual interface. 5323 */ 5324 int t4_enable_vi(struct adapter *adap, unsigned int mbox, unsigned int viid, 5325 bool rx_en, bool tx_en) 5326 { 5327 struct fw_vi_enable_cmd c; 5328 5329 memset(&c, 0, sizeof(c)); 5330 c.op_to_viid = htonl(V_FW_CMD_OP(FW_VI_ENABLE_CMD) | F_FW_CMD_REQUEST | 5331 F_FW_CMD_EXEC | V_FW_VI_ENABLE_CMD_VIID(viid)); 5332 c.ien_to_len16 = htonl(V_FW_VI_ENABLE_CMD_IEN(rx_en) | 5333 V_FW_VI_ENABLE_CMD_EEN(tx_en) | FW_LEN16(c)); 5334 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); 5335 } 5336 5337 /** 5338 * t4_identify_port - identify a VI's port by blinking its LED 5339 * @adap: the adapter 5340 * @mbox: mailbox to use for the FW command 5341 * @viid: the VI id 5342 * @nblinks: how many times to blink LED at 2.5 Hz 5343 * 5344 * Identifies a VI's port by blinking its LED. 5345 */ 5346 int t4_identify_port(struct adapter *adap, unsigned int mbox, unsigned int viid, 5347 unsigned int nblinks) 5348 { 5349 struct fw_vi_enable_cmd c; 5350 5351 memset(&c, 0, sizeof(c)); 5352 c.op_to_viid = htonl(V_FW_CMD_OP(FW_VI_ENABLE_CMD) | F_FW_CMD_REQUEST | 5353 F_FW_CMD_EXEC | V_FW_VI_ENABLE_CMD_VIID(viid)); 5354 c.ien_to_len16 = htonl(F_FW_VI_ENABLE_CMD_LED | FW_LEN16(c)); 5355 c.blinkdur = htons(nblinks); 5356 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); 5357 } 5358 5359 /** 5360 * t4_iq_free - free an ingress queue and its FLs 5361 * @adap: the adapter 5362 * @mbox: mailbox to use for the FW command 5363 * @pf: the PF owning the queues 5364 * @vf: the VF owning the queues 5365 * @iqtype: the ingress queue type (FW_IQ_TYPE_FL_INT_CAP, etc.) 5366 * @iqid: ingress queue id 5367 * @fl0id: FL0 queue id or 0xffff if no attached FL0 5368 * @fl1id: FL1 queue id or 0xffff if no attached FL1 5369 * 5370 * Frees an ingress queue and its associated FLs, if any. 5371 */ 5372 int t4_iq_free(struct adapter *adap, unsigned int mbox, unsigned int pf, 5373 unsigned int vf, unsigned int iqtype, unsigned int iqid, 5374 unsigned int fl0id, unsigned int fl1id) 5375 { 5376 struct fw_iq_cmd c; 5377 5378 memset(&c, 0, sizeof(c)); 5379 c.op_to_vfn = htonl(V_FW_CMD_OP(FW_IQ_CMD) | F_FW_CMD_REQUEST | 5380 F_FW_CMD_EXEC | V_FW_IQ_CMD_PFN(pf) | 5381 V_FW_IQ_CMD_VFN(vf)); 5382 c.alloc_to_len16 = htonl(F_FW_IQ_CMD_FREE | FW_LEN16(c)); 5383 c.type_to_iqandstindex = htonl(V_FW_IQ_CMD_TYPE(iqtype)); 5384 c.iqid = htons(iqid); 5385 c.fl0id = htons(fl0id); 5386 c.fl1id = htons(fl1id); 5387 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); 5388 } 5389 5390 /** 5391 * t4_eth_eq_free - free an Ethernet egress queue 5392 * @adap: the adapter 5393 * @mbox: mailbox to use for the FW command 5394 * @pf: the PF owning the queue 5395 * @vf: the VF owning the queue 5396 * @eqid: egress queue id 5397 * 5398 * Frees an Ethernet egress queue. 5399 */ 5400 int t4_eth_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf, 5401 unsigned int vf, unsigned int eqid) 5402 { 5403 struct fw_eq_eth_cmd c; 5404 5405 memset(&c, 0, sizeof(c)); 5406 c.op_to_vfn = htonl(V_FW_CMD_OP(FW_EQ_ETH_CMD) | F_FW_CMD_REQUEST | 5407 F_FW_CMD_EXEC | V_FW_EQ_ETH_CMD_PFN(pf) | 5408 V_FW_EQ_ETH_CMD_VFN(vf)); 5409 c.alloc_to_len16 = htonl(F_FW_EQ_ETH_CMD_FREE | FW_LEN16(c)); 5410 c.eqid_pkd = htonl(V_FW_EQ_ETH_CMD_EQID(eqid)); 5411 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); 5412 } 5413 5414 /** 5415 * t4_ctrl_eq_free - free a control egress queue 5416 * @adap: the adapter 5417 * @mbox: mailbox to use for the FW command 5418 * @pf: the PF owning the queue 5419 * @vf: the VF owning the queue 5420 * @eqid: egress queue id 5421 * 5422 * Frees a control egress queue. 5423 */ 5424 int t4_ctrl_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf, 5425 unsigned int vf, unsigned int eqid) 5426 { 5427 struct fw_eq_ctrl_cmd c; 5428 5429 memset(&c, 0, sizeof(c)); 5430 c.op_to_vfn = htonl(V_FW_CMD_OP(FW_EQ_CTRL_CMD) | F_FW_CMD_REQUEST | 5431 F_FW_CMD_EXEC | V_FW_EQ_CTRL_CMD_PFN(pf) | 5432 V_FW_EQ_CTRL_CMD_VFN(vf)); 5433 c.alloc_to_len16 = htonl(F_FW_EQ_CTRL_CMD_FREE | FW_LEN16(c)); 5434 c.cmpliqid_eqid = htonl(V_FW_EQ_CTRL_CMD_EQID(eqid)); 5435 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); 5436 } 5437 5438 /** 5439 * t4_ofld_eq_free - free an offload egress queue 5440 * @adap: the adapter 5441 * @mbox: mailbox to use for the FW command 5442 * @pf: the PF owning the queue 5443 * @vf: the VF owning the queue 5444 * @eqid: egress queue id 5445 * 5446 * Frees a control egress queue. 5447 */ 5448 int t4_ofld_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf, 5449 unsigned int vf, unsigned int eqid) 5450 { 5451 struct fw_eq_ofld_cmd c; 5452 5453 memset(&c, 0, sizeof(c)); 5454 c.op_to_vfn = htonl(V_FW_CMD_OP(FW_EQ_OFLD_CMD) | F_FW_CMD_REQUEST | 5455 F_FW_CMD_EXEC | V_FW_EQ_OFLD_CMD_PFN(pf) | 5456 V_FW_EQ_OFLD_CMD_VFN(vf)); 5457 c.alloc_to_len16 = htonl(F_FW_EQ_OFLD_CMD_FREE | FW_LEN16(c)); 5458 c.eqid_pkd = htonl(V_FW_EQ_OFLD_CMD_EQID(eqid)); 5459 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); 5460 } 5461 5462 /** 5463 * t4_handle_fw_rpl - process a FW reply message 5464 * @adap: the adapter 5465 * @rpl: start of the FW message 5466 * 5467 * Processes a FW message, such as link state change messages. 5468 */ 5469 int t4_handle_fw_rpl(struct adapter *adap, const __be64 *rpl) 5470 { 5471 u8 opcode = *(const u8 *)rpl; 5472 const struct fw_port_cmd *p = (const void *)rpl; 5473 unsigned int action = G_FW_PORT_CMD_ACTION(ntohl(p->action_to_len16)); 5474 5475 if (opcode == FW_PORT_CMD && action == FW_PORT_ACTION_GET_PORT_INFO) { 5476 /* link/module state change message */ 5477 int speed = 0, fc = 0, i; 5478 int chan = G_FW_PORT_CMD_PORTID(ntohl(p->op_to_portid)); 5479 struct port_info *pi = NULL; 5480 struct link_config *lc; 5481 u32 stat = ntohl(p->u.info.lstatus_to_modtype); 5482 int link_ok = (stat & F_FW_PORT_CMD_LSTATUS) != 0; 5483 u32 mod = G_FW_PORT_CMD_MODTYPE(stat); 5484 5485 if (stat & F_FW_PORT_CMD_RXPAUSE) 5486 fc |= PAUSE_RX; 5487 if (stat & F_FW_PORT_CMD_TXPAUSE) 5488 fc |= PAUSE_TX; 5489 if (stat & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_100M)) 5490 speed = SPEED_100; 5491 else if (stat & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_1G)) 5492 speed = SPEED_1000; 5493 else if (stat & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_10G)) 5494 speed = SPEED_10000; 5495 else if (stat & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_40G)) 5496 speed = SPEED_40000; 5497 5498 for_each_port(adap, i) { 5499 pi = adap2pinfo(adap, i); 5500 if (pi->tx_chan == chan) 5501 break; 5502 } 5503 lc = &pi->link_cfg; 5504 5505 if (mod != pi->mod_type) { 5506 pi->mod_type = mod; 5507 t4_os_portmod_changed(adap, i); 5508 } 5509 if (link_ok != lc->link_ok || speed != lc->speed || 5510 fc != lc->fc) { /* something changed */ 5511 int reason; 5512 5513 if (!link_ok && lc->link_ok) 5514 reason = G_FW_PORT_CMD_LINKDNRC(stat); 5515 else 5516 reason = -1; 5517 5518 lc->link_ok = link_ok; 5519 lc->speed = speed; 5520 lc->fc = fc; 5521 lc->supported = ntohs(p->u.info.pcap); 5522 t4_os_link_changed(adap, i, link_ok, reason); 5523 } 5524 } else { 5525 CH_WARN_RATELIMIT(adap, 5526 "Unknown firmware reply 0x%x (0x%x)\n", opcode, action); 5527 return -EINVAL; 5528 } 5529 return 0; 5530 } 5531 5532 /** 5533 * get_pci_mode - determine a card's PCI mode 5534 * @adapter: the adapter 5535 * @p: where to store the PCI settings 5536 * 5537 * Determines a card's PCI mode and associated parameters, such as speed 5538 * and width. 5539 */ 5540 static void __devinit get_pci_mode(struct adapter *adapter, 5541 struct pci_params *p) 5542 { 5543 u16 val; 5544 u32 pcie_cap; 5545 5546 pcie_cap = t4_os_find_pci_capability(adapter, PCI_CAP_ID_EXP); 5547 if (pcie_cap) { 5548 t4_os_pci_read_cfg2(adapter, pcie_cap + PCI_EXP_LNKSTA, &val); 5549 p->speed = val & PCI_EXP_LNKSTA_CLS; 5550 p->width = (val & PCI_EXP_LNKSTA_NLW) >> 4; 5551 } 5552 } 5553 5554 /** 5555 * init_link_config - initialize a link's SW state 5556 * @lc: structure holding the link state 5557 * @caps: link capabilities 5558 * 5559 * Initializes the SW state maintained for each link, including the link's 5560 * capabilities and default speed/flow-control/autonegotiation settings. 5561 */ 5562 static void __devinit init_link_config(struct link_config *lc, 5563 unsigned int caps) 5564 { 5565 lc->supported = caps; 5566 lc->requested_speed = 0; 5567 lc->speed = 0; 5568 lc->requested_fc = lc->fc = PAUSE_RX | PAUSE_TX; 5569 if (lc->supported & FW_PORT_CAP_ANEG) { 5570 lc->advertising = lc->supported & ADVERT_MASK; 5571 lc->autoneg = AUTONEG_ENABLE; 5572 lc->requested_fc |= PAUSE_AUTONEG; 5573 } else { 5574 lc->advertising = 0; 5575 lc->autoneg = AUTONEG_DISABLE; 5576 } 5577 } 5578 5579 static int __devinit get_flash_params(struct adapter *adapter) 5580 { 5581 int ret; 5582 u32 info = 0; 5583 5584 ret = sf1_write(adapter, 1, 1, 0, SF_RD_ID); 5585 if (!ret) 5586 ret = sf1_read(adapter, 3, 0, 1, &info); 5587 t4_write_reg(adapter, A_SF_OP, 0); /* unlock SF */ 5588 if (ret < 0) 5589 return ret; 5590 5591 if ((info & 0xff) != 0x20) /* not a Numonix flash */ 5592 return -EINVAL; 5593 info >>= 16; /* log2 of size */ 5594 if (info >= 0x14 && info < 0x18) 5595 adapter->params.sf_nsec = 1 << (info - 16); 5596 else if (info == 0x18) 5597 adapter->params.sf_nsec = 64; 5598 else 5599 return -EINVAL; 5600 adapter->params.sf_size = 1 << info; 5601 return 0; 5602 } 5603 5604 static void __devinit set_pcie_completion_timeout(struct adapter *adapter, 5605 u8 range) 5606 { 5607 u16 val; 5608 u32 pcie_cap; 5609 5610 pcie_cap = t4_os_find_pci_capability(adapter, PCI_CAP_ID_EXP); 5611 if (pcie_cap) { 5612 t4_os_pci_read_cfg2(adapter, pcie_cap + PCI_EXP_DEVCTL2, &val); 5613 val &= 0xfff0; 5614 val |= range ; 5615 t4_os_pci_write_cfg2(adapter, pcie_cap + PCI_EXP_DEVCTL2, val); 5616 } 5617 } 5618 5619 static const struct chip_params *get_chip_params(int chipid) 5620 { 5621 static const struct chip_params chip_params[] = { 5622 { 5623 /* T4 */ 5624 .nchan = NCHAN, 5625 .pm_stats_cnt = PM_NSTATS, 5626 .cng_ch_bits_log = 2, 5627 .nsched_cls = 15, 5628 .cim_num_obq = CIM_NUM_OBQ, 5629 .mps_rplc_size = 128, 5630 .vfcount = 128, 5631 .sge_fl_db = F_DBPRIO, 5632 .mps_tcam_size = NUM_MPS_CLS_SRAM_L_INSTANCES, 5633 }, 5634 { 5635 /* T5 */ 5636 .nchan = NCHAN, 5637 .pm_stats_cnt = PM_NSTATS, 5638 .cng_ch_bits_log = 2, 5639 .nsched_cls = 16, 5640 .cim_num_obq = CIM_NUM_OBQ_T5, 5641 .mps_rplc_size = 128, 5642 .vfcount = 128, 5643 .sge_fl_db = F_DBPRIO | F_DBTYPE, 5644 .mps_tcam_size = NUM_MPS_T5_CLS_SRAM_L_INSTANCES, 5645 }, 5646 { 5647 /* T6 */ 5648 .nchan = T6_NCHAN, 5649 .pm_stats_cnt = T6_PM_NSTATS, 5650 .cng_ch_bits_log = 3, 5651 .nsched_cls = 16, 5652 .cim_num_obq = CIM_NUM_OBQ_T5, 5653 .mps_rplc_size = 256, 5654 .vfcount = 256, 5655 .sge_fl_db = 0, 5656 .mps_tcam_size = NUM_MPS_T5_CLS_SRAM_L_INSTANCES, 5657 }, 5658 }; 5659 5660 chipid -= CHELSIO_T4; 5661 if (chipid < 0 || chipid >= ARRAY_SIZE(chip_params)) 5662 return NULL; 5663 5664 return &chip_params[chipid]; 5665 } 5666 5667 /** 5668 * t4_prep_adapter - prepare SW and HW for operation 5669 * @adapter: the adapter 5670 * @reset: if true perform a HW reset 5671 * 5672 * Initialize adapter SW state for the various HW modules, set initial 5673 * values for some adapter tunables, take PHYs out of reset, and 5674 * initialize the MDIO interface. 5675 */ 5676 int __devinit t4_prep_adapter(struct adapter *adapter) 5677 { 5678 int ret; 5679 uint16_t device_id; 5680 uint32_t pl_rev; 5681 5682 get_pci_mode(adapter, &adapter->params.pci); 5683 5684 pl_rev = t4_read_reg(adapter, A_PL_REV); 5685 adapter->params.chipid = G_CHIPID(pl_rev); 5686 adapter->params.rev = G_REV(pl_rev); 5687 if (adapter->params.chipid == 0) { 5688 /* T4 did not have chipid in PL_REV (T5 onwards do) */ 5689 adapter->params.chipid = CHELSIO_T4; 5690 5691 /* T4A1 chip is not supported */ 5692 if (adapter->params.rev == 1) { 5693 CH_ALERT(adapter, "T4 rev 1 chip is not supported.\n"); 5694 return -EINVAL; 5695 } 5696 } 5697 5698 adapter->chip_params = get_chip_params(chip_id(adapter)); 5699 if (adapter->chip_params == NULL) 5700 return -EINVAL; 5701 5702 adapter->params.pci.vpd_cap_addr = 5703 t4_os_find_pci_capability(adapter, PCI_CAP_ID_VPD); 5704 5705 ret = get_flash_params(adapter); 5706 if (ret < 0) 5707 return ret; 5708 5709 ret = get_vpd_params(adapter, &adapter->params.vpd); 5710 if (ret < 0) 5711 return ret; 5712 5713 /* Cards with real ASICs have the chipid in the PCIe device id */ 5714 t4_os_pci_read_cfg2(adapter, PCI_DEVICE_ID, &device_id); 5715 if (device_id >> 12 == chip_id(adapter)) 5716 adapter->params.cim_la_size = CIMLA_SIZE; 5717 else { 5718 /* FPGA */ 5719 adapter->params.fpga = 1; 5720 adapter->params.cim_la_size = 2 * CIMLA_SIZE; 5721 } 5722 5723 init_cong_ctrl(adapter->params.a_wnd, adapter->params.b_wnd); 5724 5725 /* 5726 * Default port and clock for debugging in case we can't reach FW. 5727 */ 5728 adapter->params.nports = 1; 5729 adapter->params.portvec = 1; 5730 adapter->params.vpd.cclk = 50000; 5731 5732 /* Set pci completion timeout value to 4 seconds. */ 5733 set_pcie_completion_timeout(adapter, 0xd); 5734 return 0; 5735 } 5736 5737 /** 5738 * t4_init_tp_params - initialize adap->params.tp 5739 * @adap: the adapter 5740 * 5741 * Initialize various fields of the adapter's TP Parameters structure. 5742 */ 5743 int __devinit t4_init_tp_params(struct adapter *adap) 5744 { 5745 int chan; 5746 u32 v; 5747 5748 v = t4_read_reg(adap, A_TP_TIMER_RESOLUTION); 5749 adap->params.tp.tre = G_TIMERRESOLUTION(v); 5750 adap->params.tp.dack_re = G_DELAYEDACKRESOLUTION(v); 5751 5752 /* MODQ_REQ_MAP defaults to setting queues 0-3 to chan 0-3 */ 5753 for (chan = 0; chan < MAX_NCHAN; chan++) 5754 adap->params.tp.tx_modq[chan] = chan; 5755 5756 t4_read_indirect(adap, A_TP_PIO_ADDR, A_TP_PIO_DATA, 5757 &adap->params.tp.ingress_config, 1, 5758 A_TP_INGRESS_CONFIG); 5759 refresh_vlan_pri_map(adap); 5760 5761 return 0; 5762 } 5763 5764 /** 5765 * t4_filter_field_shift - calculate filter field shift 5766 * @adap: the adapter 5767 * @filter_sel: the desired field (from TP_VLAN_PRI_MAP bits) 5768 * 5769 * Return the shift position of a filter field within the Compressed 5770 * Filter Tuple. The filter field is specified via its selection bit 5771 * within TP_VLAN_PRI_MAL (filter mode). E.g. F_VLAN. 5772 */ 5773 int t4_filter_field_shift(const struct adapter *adap, int filter_sel) 5774 { 5775 unsigned int filter_mode = adap->params.tp.vlan_pri_map; 5776 unsigned int sel; 5777 int field_shift; 5778 5779 if ((filter_mode & filter_sel) == 0) 5780 return -1; 5781 5782 for (sel = 1, field_shift = 0; sel < filter_sel; sel <<= 1) { 5783 switch (filter_mode & sel) { 5784 case F_FCOE: field_shift += W_FT_FCOE; break; 5785 case F_PORT: field_shift += W_FT_PORT; break; 5786 case F_VNIC_ID: field_shift += W_FT_VNIC_ID; break; 5787 case F_VLAN: field_shift += W_FT_VLAN; break; 5788 case F_TOS: field_shift += W_FT_TOS; break; 5789 case F_PROTOCOL: field_shift += W_FT_PROTOCOL; break; 5790 case F_ETHERTYPE: field_shift += W_FT_ETHERTYPE; break; 5791 case F_MACMATCH: field_shift += W_FT_MACMATCH; break; 5792 case F_MPSHITTYPE: field_shift += W_FT_MPSHITTYPE; break; 5793 case F_FRAGMENTATION: field_shift += W_FT_FRAGMENTATION; break; 5794 } 5795 } 5796 return field_shift; 5797 } 5798 5799 int __devinit t4_port_init(struct port_info *p, int mbox, int pf, int vf) 5800 { 5801 u8 addr[6]; 5802 int ret, i, j; 5803 struct fw_port_cmd c; 5804 u16 rss_size; 5805 adapter_t *adap = p->adapter; 5806 u32 param, val; 5807 5808 memset(&c, 0, sizeof(c)); 5809 5810 for (i = 0, j = -1; i <= p->port_id; i++) { 5811 do { 5812 j++; 5813 } while ((adap->params.portvec & (1 << j)) == 0); 5814 } 5815 5816 c.op_to_portid = htonl(V_FW_CMD_OP(FW_PORT_CMD) | 5817 F_FW_CMD_REQUEST | F_FW_CMD_READ | 5818 V_FW_PORT_CMD_PORTID(j)); 5819 c.action_to_len16 = htonl( 5820 V_FW_PORT_CMD_ACTION(FW_PORT_ACTION_GET_PORT_INFO) | 5821 FW_LEN16(c)); 5822 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c); 5823 if (ret) 5824 return ret; 5825 5826 ret = t4_alloc_vi(adap, mbox, j, pf, vf, 1, addr, &rss_size); 5827 if (ret < 0) 5828 return ret; 5829 5830 p->vi[0].viid = ret; 5831 p->tx_chan = j; 5832 p->rx_chan_map = get_mps_bg_map(adap, j); 5833 p->lport = j; 5834 p->vi[0].rss_size = rss_size; 5835 t4_os_set_hw_addr(adap, p->port_id, addr); 5836 5837 ret = ntohl(c.u.info.lstatus_to_modtype); 5838 p->mdio_addr = (ret & F_FW_PORT_CMD_MDIOCAP) ? 5839 G_FW_PORT_CMD_MDIOADDR(ret) : -1; 5840 p->port_type = G_FW_PORT_CMD_PTYPE(ret); 5841 p->mod_type = G_FW_PORT_CMD_MODTYPE(ret); 5842 5843 init_link_config(&p->link_cfg, ntohs(c.u.info.pcap)); 5844 5845 param = V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | 5846 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_RSSINFO) | 5847 V_FW_PARAMS_PARAM_YZ(p->vi[0].viid); 5848 ret = t4_query_params(adap, mbox, pf, vf, 1, ¶m, &val); 5849 if (ret) 5850 p->vi[0].rss_base = 0xffff; 5851 else { 5852 /* MPASS((val >> 16) == rss_size); */ 5853 p->vi[0].rss_base = val & 0xffff; 5854 } 5855 5856 return 0; 5857 } 5858 5859 int t4_sched_config(struct adapter *adapter, int type, int minmaxen, 5860 int sleep_ok) 5861 { 5862 struct fw_sched_cmd cmd; 5863 5864 memset(&cmd, 0, sizeof(cmd)); 5865 cmd.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_SCHED_CMD) | 5866 F_FW_CMD_REQUEST | 5867 F_FW_CMD_WRITE); 5868 cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd)); 5869 5870 cmd.u.config.sc = FW_SCHED_SC_CONFIG; 5871 cmd.u.config.type = type; 5872 cmd.u.config.minmaxen = minmaxen; 5873 5874 return t4_wr_mbox_meat(adapter,adapter->mbox, &cmd, sizeof(cmd), 5875 NULL, sleep_ok); 5876 } 5877 5878 int t4_sched_params(struct adapter *adapter, int type, int level, int mode, 5879 int rateunit, int ratemode, int channel, int cl, 5880 int minrate, int maxrate, int weight, int pktsize, 5881 int sleep_ok) 5882 { 5883 struct fw_sched_cmd cmd; 5884 5885 memset(&cmd, 0, sizeof(cmd)); 5886 cmd.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_SCHED_CMD) | 5887 F_FW_CMD_REQUEST | 5888 F_FW_CMD_WRITE); 5889 cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd)); 5890 5891 cmd.u.params.sc = FW_SCHED_SC_PARAMS; 5892 cmd.u.params.type = type; 5893 cmd.u.params.level = level; 5894 cmd.u.params.mode = mode; 5895 cmd.u.params.ch = channel; 5896 cmd.u.params.cl = cl; 5897 cmd.u.params.unit = rateunit; 5898 cmd.u.params.rate = ratemode; 5899 cmd.u.params.min = cpu_to_be32(minrate); 5900 cmd.u.params.max = cpu_to_be32(maxrate); 5901 cmd.u.params.weight = cpu_to_be16(weight); 5902 cmd.u.params.pktsize = cpu_to_be16(pktsize); 5903 5904 return t4_wr_mbox_meat(adapter,adapter->mbox, &cmd, sizeof(cmd), 5905 NULL, sleep_ok); 5906 } 5907