1 /*- 2 * Copyright (c) 2012, 2016 Chelsio Communications, Inc. 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 */ 26 27 #include <sys/cdefs.h> 28 __FBSDID("$FreeBSD$"); 29 30 #include "opt_inet.h" 31 32 #include <sys/param.h> 33 #include <sys/eventhandler.h> 34 35 #include "common.h" 36 #include "t4_regs.h" 37 #include "t4_regs_values.h" 38 #include "firmware/t4fw_interface.h" 39 40 #undef msleep 41 #define msleep(x) do { \ 42 if (cold) \ 43 DELAY((x) * 1000); \ 44 else \ 45 pause("t4hw", (x) * hz / 1000); \ 46 } while (0) 47 48 /** 49 * t4_wait_op_done_val - wait until an operation is completed 50 * @adapter: the adapter performing the operation 51 * @reg: the register to check for completion 52 * @mask: a single-bit field within @reg that indicates completion 53 * @polarity: the value of the field when the operation is completed 54 * @attempts: number of check iterations 55 * @delay: delay in usecs between iterations 56 * @valp: where to store the value of the register at completion time 57 * 58 * Wait until an operation is completed by checking a bit in a register 59 * up to @attempts times. If @valp is not NULL the value of the register 60 * at the time it indicated completion is stored there. Returns 0 if the 61 * operation completes and -EAGAIN otherwise. 62 */ 63 static int t4_wait_op_done_val(struct adapter *adapter, int reg, u32 mask, 64 int polarity, int attempts, int delay, u32 *valp) 65 { 66 while (1) { 67 u32 val = t4_read_reg(adapter, reg); 68 69 if (!!(val & mask) == polarity) { 70 if (valp) 71 *valp = val; 72 return 0; 73 } 74 if (--attempts == 0) 75 return -EAGAIN; 76 if (delay) 77 udelay(delay); 78 } 79 } 80 81 static inline int t4_wait_op_done(struct adapter *adapter, int reg, u32 mask, 82 int polarity, int attempts, int delay) 83 { 84 return t4_wait_op_done_val(adapter, reg, mask, polarity, attempts, 85 delay, NULL); 86 } 87 88 /** 89 * t4_set_reg_field - set a register field to a value 90 * @adapter: the adapter to program 91 * @addr: the register address 92 * @mask: specifies the portion of the register to modify 93 * @val: the new value for the register field 94 * 95 * Sets a register field specified by the supplied mask to the 96 * given value. 97 */ 98 void t4_set_reg_field(struct adapter *adapter, unsigned int addr, u32 mask, 99 u32 val) 100 { 101 u32 v = t4_read_reg(adapter, addr) & ~mask; 102 103 t4_write_reg(adapter, addr, v | val); 104 (void) t4_read_reg(adapter, addr); /* flush */ 105 } 106 107 /** 108 * t4_read_indirect - read indirectly addressed registers 109 * @adap: the adapter 110 * @addr_reg: register holding the indirect address 111 * @data_reg: register holding the value of the indirect register 112 * @vals: where the read register values are stored 113 * @nregs: how many indirect registers to read 114 * @start_idx: index of first indirect register to read 115 * 116 * Reads registers that are accessed indirectly through an address/data 117 * register pair. 118 */ 119 void t4_read_indirect(struct adapter *adap, unsigned int addr_reg, 120 unsigned int data_reg, u32 *vals, 121 unsigned int nregs, unsigned int start_idx) 122 { 123 while (nregs--) { 124 t4_write_reg(adap, addr_reg, start_idx); 125 *vals++ = t4_read_reg(adap, data_reg); 126 start_idx++; 127 } 128 } 129 130 /** 131 * t4_write_indirect - write indirectly addressed registers 132 * @adap: the adapter 133 * @addr_reg: register holding the indirect addresses 134 * @data_reg: register holding the value for the indirect registers 135 * @vals: values to write 136 * @nregs: how many indirect registers to write 137 * @start_idx: address of first indirect register to write 138 * 139 * Writes a sequential block of registers that are accessed indirectly 140 * through an address/data register pair. 141 */ 142 void t4_write_indirect(struct adapter *adap, unsigned int addr_reg, 143 unsigned int data_reg, const u32 *vals, 144 unsigned int nregs, unsigned int start_idx) 145 { 146 while (nregs--) { 147 t4_write_reg(adap, addr_reg, start_idx++); 148 t4_write_reg(adap, data_reg, *vals++); 149 } 150 } 151 152 /* 153 * Read a 32-bit PCI Configuration Space register via the PCI-E backdoor 154 * mechanism. This guarantees that we get the real value even if we're 155 * operating within a Virtual Machine and the Hypervisor is trapping our 156 * Configuration Space accesses. 157 * 158 * N.B. This routine should only be used as a last resort: the firmware uses 159 * the backdoor registers on a regular basis and we can end up 160 * conflicting with it's uses! 161 */ 162 u32 t4_hw_pci_read_cfg4(adapter_t *adap, int reg) 163 { 164 u32 req = V_FUNCTION(adap->pf) | V_REGISTER(reg); 165 u32 val; 166 167 if (chip_id(adap) <= CHELSIO_T5) 168 req |= F_ENABLE; 169 else 170 req |= F_T6_ENABLE; 171 172 if (is_t4(adap)) 173 req |= F_LOCALCFG; 174 175 t4_write_reg(adap, A_PCIE_CFG_SPACE_REQ, req); 176 val = t4_read_reg(adap, A_PCIE_CFG_SPACE_DATA); 177 178 /* 179 * Reset F_ENABLE to 0 so reads of PCIE_CFG_SPACE_DATA won't cause a 180 * Configuration Space read. (None of the other fields matter when 181 * F_ENABLE is 0 so a simple register write is easier than a 182 * read-modify-write via t4_set_reg_field().) 183 */ 184 t4_write_reg(adap, A_PCIE_CFG_SPACE_REQ, 0); 185 186 return val; 187 } 188 189 /* 190 * t4_report_fw_error - report firmware error 191 * @adap: the adapter 192 * 193 * The adapter firmware can indicate error conditions to the host. 194 * If the firmware has indicated an error, print out the reason for 195 * the firmware error. 196 */ 197 static void t4_report_fw_error(struct adapter *adap) 198 { 199 static const char *const reason[] = { 200 "Crash", /* PCIE_FW_EVAL_CRASH */ 201 "During Device Preparation", /* PCIE_FW_EVAL_PREP */ 202 "During Device Configuration", /* PCIE_FW_EVAL_CONF */ 203 "During Device Initialization", /* PCIE_FW_EVAL_INIT */ 204 "Unexpected Event", /* PCIE_FW_EVAL_UNEXPECTEDEVENT */ 205 "Insufficient Airflow", /* PCIE_FW_EVAL_OVERHEAT */ 206 "Device Shutdown", /* PCIE_FW_EVAL_DEVICESHUTDOWN */ 207 "Reserved", /* reserved */ 208 }; 209 u32 pcie_fw; 210 211 pcie_fw = t4_read_reg(adap, A_PCIE_FW); 212 if (pcie_fw & F_PCIE_FW_ERR) 213 CH_ERR(adap, "Firmware reports adapter error: %s\n", 214 reason[G_PCIE_FW_EVAL(pcie_fw)]); 215 } 216 217 /* 218 * Get the reply to a mailbox command and store it in @rpl in big-endian order. 219 */ 220 static void get_mbox_rpl(struct adapter *adap, __be64 *rpl, int nflit, 221 u32 mbox_addr) 222 { 223 for ( ; nflit; nflit--, mbox_addr += 8) 224 *rpl++ = cpu_to_be64(t4_read_reg64(adap, mbox_addr)); 225 } 226 227 /* 228 * Handle a FW assertion reported in a mailbox. 229 */ 230 static void fw_asrt(struct adapter *adap, struct fw_debug_cmd *asrt) 231 { 232 CH_ALERT(adap, 233 "FW assertion at %.16s:%u, val0 %#x, val1 %#x\n", 234 asrt->u.assert.filename_0_7, 235 be32_to_cpu(asrt->u.assert.line), 236 be32_to_cpu(asrt->u.assert.x), 237 be32_to_cpu(asrt->u.assert.y)); 238 } 239 240 #define X_CIM_PF_NOACCESS 0xeeeeeeee 241 /** 242 * t4_wr_mbox_meat_timeout - send a command to FW through the given mailbox 243 * @adap: the adapter 244 * @mbox: index of the mailbox to use 245 * @cmd: the command to write 246 * @size: command length in bytes 247 * @rpl: where to optionally store the reply 248 * @sleep_ok: if true we may sleep while awaiting command completion 249 * @timeout: time to wait for command to finish before timing out 250 * (negative implies @sleep_ok=false) 251 * 252 * Sends the given command to FW through the selected mailbox and waits 253 * for the FW to execute the command. If @rpl is not %NULL it is used to 254 * store the FW's reply to the command. The command and its optional 255 * reply are of the same length. Some FW commands like RESET and 256 * INITIALIZE can take a considerable amount of time to execute. 257 * @sleep_ok determines whether we may sleep while awaiting the response. 258 * If sleeping is allowed we use progressive backoff otherwise we spin. 259 * Note that passing in a negative @timeout is an alternate mechanism 260 * for specifying @sleep_ok=false. This is useful when a higher level 261 * interface allows for specification of @timeout but not @sleep_ok ... 262 * 263 * The return value is 0 on success or a negative errno on failure. A 264 * failure can happen either because we are not able to execute the 265 * command or FW executes it but signals an error. In the latter case 266 * the return value is the error code indicated by FW (negated). 267 */ 268 int t4_wr_mbox_meat_timeout(struct adapter *adap, int mbox, const void *cmd, 269 int size, void *rpl, bool sleep_ok, int timeout) 270 { 271 /* 272 * We delay in small increments at first in an effort to maintain 273 * responsiveness for simple, fast executing commands but then back 274 * off to larger delays to a maximum retry delay. 275 */ 276 static const int delay[] = { 277 1, 1, 3, 5, 10, 10, 20, 50, 100 278 }; 279 u32 v; 280 u64 res; 281 int i, ms, delay_idx, ret; 282 const __be64 *p = cmd; 283 u32 data_reg = PF_REG(mbox, A_CIM_PF_MAILBOX_DATA); 284 u32 ctl_reg = PF_REG(mbox, A_CIM_PF_MAILBOX_CTRL); 285 u32 ctl; 286 __be64 cmd_rpl[MBOX_LEN/8]; 287 u32 pcie_fw; 288 289 if (adap->flags & CHK_MBOX_ACCESS) 290 ASSERT_SYNCHRONIZED_OP(adap); 291 292 if ((size & 15) || size > MBOX_LEN) 293 return -EINVAL; 294 295 if (adap->flags & IS_VF) { 296 if (is_t6(adap)) 297 data_reg = FW_T6VF_MBDATA_BASE_ADDR; 298 else 299 data_reg = FW_T4VF_MBDATA_BASE_ADDR; 300 ctl_reg = VF_CIM_REG(A_CIM_VF_EXT_MAILBOX_CTRL); 301 } 302 303 /* 304 * If we have a negative timeout, that implies that we can't sleep. 305 */ 306 if (timeout < 0) { 307 sleep_ok = false; 308 timeout = -timeout; 309 } 310 311 /* 312 * Attempt to gain access to the mailbox. 313 */ 314 for (i = 0; i < 4; i++) { 315 ctl = t4_read_reg(adap, ctl_reg); 316 v = G_MBOWNER(ctl); 317 if (v != X_MBOWNER_NONE) 318 break; 319 } 320 321 /* 322 * If we were unable to gain access, dequeue ourselves from the 323 * mailbox atomic access list and report the error to our caller. 324 */ 325 if (v != X_MBOWNER_PL) { 326 t4_report_fw_error(adap); 327 ret = (v == X_MBOWNER_FW) ? -EBUSY : -ETIMEDOUT; 328 return ret; 329 } 330 331 /* 332 * If we gain ownership of the mailbox and there's a "valid" message 333 * in it, this is likely an asynchronous error message from the 334 * firmware. So we'll report that and then proceed on with attempting 335 * to issue our own command ... which may well fail if the error 336 * presaged the firmware crashing ... 337 */ 338 if (ctl & F_MBMSGVALID) { 339 CH_ERR(adap, "found VALID command in mbox %u: %016llx %016llx " 340 "%016llx %016llx %016llx %016llx %016llx %016llx\n", 341 mbox, (unsigned long long)t4_read_reg64(adap, data_reg), 342 (unsigned long long)t4_read_reg64(adap, data_reg + 8), 343 (unsigned long long)t4_read_reg64(adap, data_reg + 16), 344 (unsigned long long)t4_read_reg64(adap, data_reg + 24), 345 (unsigned long long)t4_read_reg64(adap, data_reg + 32), 346 (unsigned long long)t4_read_reg64(adap, data_reg + 40), 347 (unsigned long long)t4_read_reg64(adap, data_reg + 48), 348 (unsigned long long)t4_read_reg64(adap, data_reg + 56)); 349 } 350 351 /* 352 * Copy in the new mailbox command and send it on its way ... 353 */ 354 for (i = 0; i < size; i += 8, p++) 355 t4_write_reg64(adap, data_reg + i, be64_to_cpu(*p)); 356 357 if (adap->flags & IS_VF) { 358 /* 359 * For the VFs, the Mailbox Data "registers" are 360 * actually backed by T4's "MA" interface rather than 361 * PL Registers (as is the case for the PFs). Because 362 * these are in different coherency domains, the write 363 * to the VF's PL-register-backed Mailbox Control can 364 * race in front of the writes to the MA-backed VF 365 * Mailbox Data "registers". So we need to do a 366 * read-back on at least one byte of the VF Mailbox 367 * Data registers before doing the write to the VF 368 * Mailbox Control register. 369 */ 370 t4_read_reg(adap, data_reg); 371 } 372 373 CH_DUMP_MBOX(adap, mbox, data_reg); 374 375 t4_write_reg(adap, ctl_reg, F_MBMSGVALID | V_MBOWNER(X_MBOWNER_FW)); 376 t4_read_reg(adap, ctl_reg); /* flush write */ 377 378 delay_idx = 0; 379 ms = delay[0]; 380 381 /* 382 * Loop waiting for the reply; bail out if we time out or the firmware 383 * reports an error. 384 */ 385 pcie_fw = 0; 386 for (i = 0; i < timeout; i += ms) { 387 if (!(adap->flags & IS_VF)) { 388 pcie_fw = t4_read_reg(adap, A_PCIE_FW); 389 if (pcie_fw & F_PCIE_FW_ERR) 390 break; 391 } 392 if (sleep_ok) { 393 ms = delay[delay_idx]; /* last element may repeat */ 394 if (delay_idx < ARRAY_SIZE(delay) - 1) 395 delay_idx++; 396 msleep(ms); 397 } else { 398 mdelay(ms); 399 } 400 401 v = t4_read_reg(adap, ctl_reg); 402 if (v == X_CIM_PF_NOACCESS) 403 continue; 404 if (G_MBOWNER(v) == X_MBOWNER_PL) { 405 if (!(v & F_MBMSGVALID)) { 406 t4_write_reg(adap, ctl_reg, 407 V_MBOWNER(X_MBOWNER_NONE)); 408 continue; 409 } 410 411 /* 412 * Retrieve the command reply and release the mailbox. 413 */ 414 get_mbox_rpl(adap, cmd_rpl, MBOX_LEN/8, data_reg); 415 t4_write_reg(adap, ctl_reg, V_MBOWNER(X_MBOWNER_NONE)); 416 417 CH_DUMP_MBOX(adap, mbox, data_reg); 418 419 res = be64_to_cpu(cmd_rpl[0]); 420 if (G_FW_CMD_OP(res >> 32) == FW_DEBUG_CMD) { 421 fw_asrt(adap, (struct fw_debug_cmd *)cmd_rpl); 422 res = V_FW_CMD_RETVAL(EIO); 423 } else if (rpl) 424 memcpy(rpl, cmd_rpl, size); 425 return -G_FW_CMD_RETVAL((int)res); 426 } 427 } 428 429 /* 430 * We timed out waiting for a reply to our mailbox command. Report 431 * the error and also check to see if the firmware reported any 432 * errors ... 433 */ 434 ret = (pcie_fw & F_PCIE_FW_ERR) ? -ENXIO : -ETIMEDOUT; 435 CH_ERR(adap, "command %#x in mailbox %d timed out\n", 436 *(const u8 *)cmd, mbox); 437 438 /* If DUMP_MBOX is set the mbox has already been dumped */ 439 if ((adap->debug_flags & DF_DUMP_MBOX) == 0) { 440 p = cmd; 441 CH_ERR(adap, "mbox: %016llx %016llx %016llx %016llx " 442 "%016llx %016llx %016llx %016llx\n", 443 (unsigned long long)be64_to_cpu(p[0]), 444 (unsigned long long)be64_to_cpu(p[1]), 445 (unsigned long long)be64_to_cpu(p[2]), 446 (unsigned long long)be64_to_cpu(p[3]), 447 (unsigned long long)be64_to_cpu(p[4]), 448 (unsigned long long)be64_to_cpu(p[5]), 449 (unsigned long long)be64_to_cpu(p[6]), 450 (unsigned long long)be64_to_cpu(p[7])); 451 } 452 453 t4_report_fw_error(adap); 454 t4_fatal_err(adap); 455 return ret; 456 } 457 458 int t4_wr_mbox_meat(struct adapter *adap, int mbox, const void *cmd, int size, 459 void *rpl, bool sleep_ok) 460 { 461 return t4_wr_mbox_meat_timeout(adap, mbox, cmd, size, rpl, 462 sleep_ok, FW_CMD_MAX_TIMEOUT); 463 464 } 465 466 static int t4_edc_err_read(struct adapter *adap, int idx) 467 { 468 u32 edc_ecc_err_addr_reg; 469 u32 edc_bist_status_rdata_reg; 470 471 if (is_t4(adap)) { 472 CH_WARN(adap, "%s: T4 NOT supported.\n", __func__); 473 return 0; 474 } 475 if (idx != MEM_EDC0 && idx != MEM_EDC1) { 476 CH_WARN(adap, "%s: idx %d NOT supported.\n", __func__, idx); 477 return 0; 478 } 479 480 edc_ecc_err_addr_reg = EDC_T5_REG(A_EDC_H_ECC_ERR_ADDR, idx); 481 edc_bist_status_rdata_reg = EDC_T5_REG(A_EDC_H_BIST_STATUS_RDATA, idx); 482 483 CH_WARN(adap, 484 "edc%d err addr 0x%x: 0x%x.\n", 485 idx, edc_ecc_err_addr_reg, 486 t4_read_reg(adap, edc_ecc_err_addr_reg)); 487 CH_WARN(adap, 488 "bist: 0x%x, status %llx %llx %llx %llx %llx %llx %llx %llx %llx.\n", 489 edc_bist_status_rdata_reg, 490 (unsigned long long)t4_read_reg64(adap, edc_bist_status_rdata_reg), 491 (unsigned long long)t4_read_reg64(adap, edc_bist_status_rdata_reg + 8), 492 (unsigned long long)t4_read_reg64(adap, edc_bist_status_rdata_reg + 16), 493 (unsigned long long)t4_read_reg64(adap, edc_bist_status_rdata_reg + 24), 494 (unsigned long long)t4_read_reg64(adap, edc_bist_status_rdata_reg + 32), 495 (unsigned long long)t4_read_reg64(adap, edc_bist_status_rdata_reg + 40), 496 (unsigned long long)t4_read_reg64(adap, edc_bist_status_rdata_reg + 48), 497 (unsigned long long)t4_read_reg64(adap, edc_bist_status_rdata_reg + 56), 498 (unsigned long long)t4_read_reg64(adap, edc_bist_status_rdata_reg + 64)); 499 500 return 0; 501 } 502 503 /** 504 * t4_mc_read - read from MC through backdoor accesses 505 * @adap: the adapter 506 * @idx: which MC to access 507 * @addr: address of first byte requested 508 * @data: 64 bytes of data containing the requested address 509 * @ecc: where to store the corresponding 64-bit ECC word 510 * 511 * Read 64 bytes of data from MC starting at a 64-byte-aligned address 512 * that covers the requested address @addr. If @parity is not %NULL it 513 * is assigned the 64-bit ECC word for the read data. 514 */ 515 int t4_mc_read(struct adapter *adap, int idx, u32 addr, __be32 *data, u64 *ecc) 516 { 517 int i; 518 u32 mc_bist_cmd_reg, mc_bist_cmd_addr_reg, mc_bist_cmd_len_reg; 519 u32 mc_bist_status_rdata_reg, mc_bist_data_pattern_reg; 520 521 if (is_t4(adap)) { 522 mc_bist_cmd_reg = A_MC_BIST_CMD; 523 mc_bist_cmd_addr_reg = A_MC_BIST_CMD_ADDR; 524 mc_bist_cmd_len_reg = A_MC_BIST_CMD_LEN; 525 mc_bist_status_rdata_reg = A_MC_BIST_STATUS_RDATA; 526 mc_bist_data_pattern_reg = A_MC_BIST_DATA_PATTERN; 527 } else { 528 mc_bist_cmd_reg = MC_REG(A_MC_P_BIST_CMD, idx); 529 mc_bist_cmd_addr_reg = MC_REG(A_MC_P_BIST_CMD_ADDR, idx); 530 mc_bist_cmd_len_reg = MC_REG(A_MC_P_BIST_CMD_LEN, idx); 531 mc_bist_status_rdata_reg = MC_REG(A_MC_P_BIST_STATUS_RDATA, 532 idx); 533 mc_bist_data_pattern_reg = MC_REG(A_MC_P_BIST_DATA_PATTERN, 534 idx); 535 } 536 537 if (t4_read_reg(adap, mc_bist_cmd_reg) & F_START_BIST) 538 return -EBUSY; 539 t4_write_reg(adap, mc_bist_cmd_addr_reg, addr & ~0x3fU); 540 t4_write_reg(adap, mc_bist_cmd_len_reg, 64); 541 t4_write_reg(adap, mc_bist_data_pattern_reg, 0xc); 542 t4_write_reg(adap, mc_bist_cmd_reg, V_BIST_OPCODE(1) | 543 F_START_BIST | V_BIST_CMD_GAP(1)); 544 i = t4_wait_op_done(adap, mc_bist_cmd_reg, F_START_BIST, 0, 10, 1); 545 if (i) 546 return i; 547 548 #define MC_DATA(i) MC_BIST_STATUS_REG(mc_bist_status_rdata_reg, i) 549 550 for (i = 15; i >= 0; i--) 551 *data++ = ntohl(t4_read_reg(adap, MC_DATA(i))); 552 if (ecc) 553 *ecc = t4_read_reg64(adap, MC_DATA(16)); 554 #undef MC_DATA 555 return 0; 556 } 557 558 /** 559 * t4_edc_read - read from EDC through backdoor accesses 560 * @adap: the adapter 561 * @idx: which EDC to access 562 * @addr: address of first byte requested 563 * @data: 64 bytes of data containing the requested address 564 * @ecc: where to store the corresponding 64-bit ECC word 565 * 566 * Read 64 bytes of data from EDC starting at a 64-byte-aligned address 567 * that covers the requested address @addr. If @parity is not %NULL it 568 * is assigned the 64-bit ECC word for the read data. 569 */ 570 int t4_edc_read(struct adapter *adap, int idx, u32 addr, __be32 *data, u64 *ecc) 571 { 572 int i; 573 u32 edc_bist_cmd_reg, edc_bist_cmd_addr_reg, edc_bist_cmd_len_reg; 574 u32 edc_bist_cmd_data_pattern, edc_bist_status_rdata_reg; 575 576 if (is_t4(adap)) { 577 edc_bist_cmd_reg = EDC_REG(A_EDC_BIST_CMD, idx); 578 edc_bist_cmd_addr_reg = EDC_REG(A_EDC_BIST_CMD_ADDR, idx); 579 edc_bist_cmd_len_reg = EDC_REG(A_EDC_BIST_CMD_LEN, idx); 580 edc_bist_cmd_data_pattern = EDC_REG(A_EDC_BIST_DATA_PATTERN, 581 idx); 582 edc_bist_status_rdata_reg = EDC_REG(A_EDC_BIST_STATUS_RDATA, 583 idx); 584 } else { 585 /* 586 * These macro are missing in t4_regs.h file. 587 * Added temporarily for testing. 588 */ 589 #define EDC_STRIDE_T5 (EDC_T51_BASE_ADDR - EDC_T50_BASE_ADDR) 590 #define EDC_REG_T5(reg, idx) (reg + EDC_STRIDE_T5 * idx) 591 edc_bist_cmd_reg = EDC_REG_T5(A_EDC_H_BIST_CMD, idx); 592 edc_bist_cmd_addr_reg = EDC_REG_T5(A_EDC_H_BIST_CMD_ADDR, idx); 593 edc_bist_cmd_len_reg = EDC_REG_T5(A_EDC_H_BIST_CMD_LEN, idx); 594 edc_bist_cmd_data_pattern = EDC_REG_T5(A_EDC_H_BIST_DATA_PATTERN, 595 idx); 596 edc_bist_status_rdata_reg = EDC_REG_T5(A_EDC_H_BIST_STATUS_RDATA, 597 idx); 598 #undef EDC_REG_T5 599 #undef EDC_STRIDE_T5 600 } 601 602 if (t4_read_reg(adap, edc_bist_cmd_reg) & F_START_BIST) 603 return -EBUSY; 604 t4_write_reg(adap, edc_bist_cmd_addr_reg, addr & ~0x3fU); 605 t4_write_reg(adap, edc_bist_cmd_len_reg, 64); 606 t4_write_reg(adap, edc_bist_cmd_data_pattern, 0xc); 607 t4_write_reg(adap, edc_bist_cmd_reg, 608 V_BIST_OPCODE(1) | V_BIST_CMD_GAP(1) | F_START_BIST); 609 i = t4_wait_op_done(adap, edc_bist_cmd_reg, F_START_BIST, 0, 10, 1); 610 if (i) 611 return i; 612 613 #define EDC_DATA(i) EDC_BIST_STATUS_REG(edc_bist_status_rdata_reg, i) 614 615 for (i = 15; i >= 0; i--) 616 *data++ = ntohl(t4_read_reg(adap, EDC_DATA(i))); 617 if (ecc) 618 *ecc = t4_read_reg64(adap, EDC_DATA(16)); 619 #undef EDC_DATA 620 return 0; 621 } 622 623 /** 624 * t4_mem_read - read EDC 0, EDC 1 or MC into buffer 625 * @adap: the adapter 626 * @mtype: memory type: MEM_EDC0, MEM_EDC1 or MEM_MC 627 * @addr: address within indicated memory type 628 * @len: amount of memory to read 629 * @buf: host memory buffer 630 * 631 * Reads an [almost] arbitrary memory region in the firmware: the 632 * firmware memory address, length and host buffer must be aligned on 633 * 32-bit boudaries. The memory is returned as a raw byte sequence from 634 * the firmware's memory. If this memory contains data structures which 635 * contain multi-byte integers, it's the callers responsibility to 636 * perform appropriate byte order conversions. 637 */ 638 int t4_mem_read(struct adapter *adap, int mtype, u32 addr, u32 len, 639 __be32 *buf) 640 { 641 u32 pos, start, end, offset; 642 int ret; 643 644 /* 645 * Argument sanity checks ... 646 */ 647 if ((addr & 0x3) || (len & 0x3)) 648 return -EINVAL; 649 650 /* 651 * The underlaying EDC/MC read routines read 64 bytes at a time so we 652 * need to round down the start and round up the end. We'll start 653 * copying out of the first line at (addr - start) a word at a time. 654 */ 655 start = rounddown2(addr, 64); 656 end = roundup2(addr + len, 64); 657 offset = (addr - start)/sizeof(__be32); 658 659 for (pos = start; pos < end; pos += 64, offset = 0) { 660 __be32 data[16]; 661 662 /* 663 * Read the chip's memory block and bail if there's an error. 664 */ 665 if ((mtype == MEM_MC) || (mtype == MEM_MC1)) 666 ret = t4_mc_read(adap, mtype - MEM_MC, pos, data, NULL); 667 else 668 ret = t4_edc_read(adap, mtype, pos, data, NULL); 669 if (ret) 670 return ret; 671 672 /* 673 * Copy the data into the caller's memory buffer. 674 */ 675 while (offset < 16 && len > 0) { 676 *buf++ = data[offset++]; 677 len -= sizeof(__be32); 678 } 679 } 680 681 return 0; 682 } 683 684 /* 685 * Return the specified PCI-E Configuration Space register from our Physical 686 * Function. We try first via a Firmware LDST Command (if fw_attach != 0) 687 * since we prefer to let the firmware own all of these registers, but if that 688 * fails we go for it directly ourselves. 689 */ 690 u32 t4_read_pcie_cfg4(struct adapter *adap, int reg, int drv_fw_attach) 691 { 692 693 /* 694 * If fw_attach != 0, construct and send the Firmware LDST Command to 695 * retrieve the specified PCI-E Configuration Space register. 696 */ 697 if (drv_fw_attach != 0) { 698 struct fw_ldst_cmd ldst_cmd; 699 int ret; 700 701 memset(&ldst_cmd, 0, sizeof(ldst_cmd)); 702 ldst_cmd.op_to_addrspace = 703 cpu_to_be32(V_FW_CMD_OP(FW_LDST_CMD) | 704 F_FW_CMD_REQUEST | 705 F_FW_CMD_READ | 706 V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_FUNC_PCIE)); 707 ldst_cmd.cycles_to_len16 = cpu_to_be32(FW_LEN16(ldst_cmd)); 708 ldst_cmd.u.pcie.select_naccess = V_FW_LDST_CMD_NACCESS(1); 709 ldst_cmd.u.pcie.ctrl_to_fn = 710 (F_FW_LDST_CMD_LC | V_FW_LDST_CMD_FN(adap->pf)); 711 ldst_cmd.u.pcie.r = reg; 712 713 /* 714 * If the LDST Command succeeds, return the result, otherwise 715 * fall through to reading it directly ourselves ... 716 */ 717 ret = t4_wr_mbox(adap, adap->mbox, &ldst_cmd, sizeof(ldst_cmd), 718 &ldst_cmd); 719 if (ret == 0) 720 return be32_to_cpu(ldst_cmd.u.pcie.data[0]); 721 722 CH_WARN(adap, "Firmware failed to return " 723 "Configuration Space register %d, err = %d\n", 724 reg, -ret); 725 } 726 727 /* 728 * Read the desired Configuration Space register via the PCI-E 729 * Backdoor mechanism. 730 */ 731 return t4_hw_pci_read_cfg4(adap, reg); 732 } 733 734 /** 735 * t4_get_regs_len - return the size of the chips register set 736 * @adapter: the adapter 737 * 738 * Returns the size of the chip's BAR0 register space. 739 */ 740 unsigned int t4_get_regs_len(struct adapter *adapter) 741 { 742 unsigned int chip_version = chip_id(adapter); 743 744 switch (chip_version) { 745 case CHELSIO_T4: 746 if (adapter->flags & IS_VF) 747 return FW_T4VF_REGMAP_SIZE; 748 return T4_REGMAP_SIZE; 749 750 case CHELSIO_T5: 751 case CHELSIO_T6: 752 if (adapter->flags & IS_VF) 753 return FW_T4VF_REGMAP_SIZE; 754 return T5_REGMAP_SIZE; 755 } 756 757 CH_ERR(adapter, 758 "Unsupported chip version %d\n", chip_version); 759 return 0; 760 } 761 762 /** 763 * t4_get_regs - read chip registers into provided buffer 764 * @adap: the adapter 765 * @buf: register buffer 766 * @buf_size: size (in bytes) of register buffer 767 * 768 * If the provided register buffer isn't large enough for the chip's 769 * full register range, the register dump will be truncated to the 770 * register buffer's size. 771 */ 772 void t4_get_regs(struct adapter *adap, u8 *buf, size_t buf_size) 773 { 774 static const unsigned int t4_reg_ranges[] = { 775 0x1008, 0x1108, 776 0x1180, 0x1184, 777 0x1190, 0x1194, 778 0x11a0, 0x11a4, 779 0x11b0, 0x11b4, 780 0x11fc, 0x123c, 781 0x1300, 0x173c, 782 0x1800, 0x18fc, 783 0x3000, 0x30d8, 784 0x30e0, 0x30e4, 785 0x30ec, 0x5910, 786 0x5920, 0x5924, 787 0x5960, 0x5960, 788 0x5968, 0x5968, 789 0x5970, 0x5970, 790 0x5978, 0x5978, 791 0x5980, 0x5980, 792 0x5988, 0x5988, 793 0x5990, 0x5990, 794 0x5998, 0x5998, 795 0x59a0, 0x59d4, 796 0x5a00, 0x5ae0, 797 0x5ae8, 0x5ae8, 798 0x5af0, 0x5af0, 799 0x5af8, 0x5af8, 800 0x6000, 0x6098, 801 0x6100, 0x6150, 802 0x6200, 0x6208, 803 0x6240, 0x6248, 804 0x6280, 0x62b0, 805 0x62c0, 0x6338, 806 0x6370, 0x638c, 807 0x6400, 0x643c, 808 0x6500, 0x6524, 809 0x6a00, 0x6a04, 810 0x6a14, 0x6a38, 811 0x6a60, 0x6a70, 812 0x6a78, 0x6a78, 813 0x6b00, 0x6b0c, 814 0x6b1c, 0x6b84, 815 0x6bf0, 0x6bf8, 816 0x6c00, 0x6c0c, 817 0x6c1c, 0x6c84, 818 0x6cf0, 0x6cf8, 819 0x6d00, 0x6d0c, 820 0x6d1c, 0x6d84, 821 0x6df0, 0x6df8, 822 0x6e00, 0x6e0c, 823 0x6e1c, 0x6e84, 824 0x6ef0, 0x6ef8, 825 0x6f00, 0x6f0c, 826 0x6f1c, 0x6f84, 827 0x6ff0, 0x6ff8, 828 0x7000, 0x700c, 829 0x701c, 0x7084, 830 0x70f0, 0x70f8, 831 0x7100, 0x710c, 832 0x711c, 0x7184, 833 0x71f0, 0x71f8, 834 0x7200, 0x720c, 835 0x721c, 0x7284, 836 0x72f0, 0x72f8, 837 0x7300, 0x730c, 838 0x731c, 0x7384, 839 0x73f0, 0x73f8, 840 0x7400, 0x7450, 841 0x7500, 0x7530, 842 0x7600, 0x760c, 843 0x7614, 0x761c, 844 0x7680, 0x76cc, 845 0x7700, 0x7798, 846 0x77c0, 0x77fc, 847 0x7900, 0x79fc, 848 0x7b00, 0x7b58, 849 0x7b60, 0x7b84, 850 0x7b8c, 0x7c38, 851 0x7d00, 0x7d38, 852 0x7d40, 0x7d80, 853 0x7d8c, 0x7ddc, 854 0x7de4, 0x7e04, 855 0x7e10, 0x7e1c, 856 0x7e24, 0x7e38, 857 0x7e40, 0x7e44, 858 0x7e4c, 0x7e78, 859 0x7e80, 0x7ea4, 860 0x7eac, 0x7edc, 861 0x7ee8, 0x7efc, 862 0x8dc0, 0x8e04, 863 0x8e10, 0x8e1c, 864 0x8e30, 0x8e78, 865 0x8ea0, 0x8eb8, 866 0x8ec0, 0x8f6c, 867 0x8fc0, 0x9008, 868 0x9010, 0x9058, 869 0x9060, 0x9060, 870 0x9068, 0x9074, 871 0x90fc, 0x90fc, 872 0x9400, 0x9408, 873 0x9410, 0x9458, 874 0x9600, 0x9600, 875 0x9608, 0x9638, 876 0x9640, 0x96bc, 877 0x9800, 0x9808, 878 0x9820, 0x983c, 879 0x9850, 0x9864, 880 0x9c00, 0x9c6c, 881 0x9c80, 0x9cec, 882 0x9d00, 0x9d6c, 883 0x9d80, 0x9dec, 884 0x9e00, 0x9e6c, 885 0x9e80, 0x9eec, 886 0x9f00, 0x9f6c, 887 0x9f80, 0x9fec, 888 0xd004, 0xd004, 889 0xd010, 0xd03c, 890 0xdfc0, 0xdfe0, 891 0xe000, 0xea7c, 892 0xf000, 0x11110, 893 0x11118, 0x11190, 894 0x19040, 0x1906c, 895 0x19078, 0x19080, 896 0x1908c, 0x190e4, 897 0x190f0, 0x190f8, 898 0x19100, 0x19110, 899 0x19120, 0x19124, 900 0x19150, 0x19194, 901 0x1919c, 0x191b0, 902 0x191d0, 0x191e8, 903 0x19238, 0x1924c, 904 0x193f8, 0x1943c, 905 0x1944c, 0x19474, 906 0x19490, 0x194e0, 907 0x194f0, 0x194f8, 908 0x19800, 0x19c08, 909 0x19c10, 0x19c90, 910 0x19ca0, 0x19ce4, 911 0x19cf0, 0x19d40, 912 0x19d50, 0x19d94, 913 0x19da0, 0x19de8, 914 0x19df0, 0x19e40, 915 0x19e50, 0x19e90, 916 0x19ea0, 0x19f4c, 917 0x1a000, 0x1a004, 918 0x1a010, 0x1a06c, 919 0x1a0b0, 0x1a0e4, 920 0x1a0ec, 0x1a0f4, 921 0x1a100, 0x1a108, 922 0x1a114, 0x1a120, 923 0x1a128, 0x1a130, 924 0x1a138, 0x1a138, 925 0x1a190, 0x1a1c4, 926 0x1a1fc, 0x1a1fc, 927 0x1e040, 0x1e04c, 928 0x1e284, 0x1e28c, 929 0x1e2c0, 0x1e2c0, 930 0x1e2e0, 0x1e2e0, 931 0x1e300, 0x1e384, 932 0x1e3c0, 0x1e3c8, 933 0x1e440, 0x1e44c, 934 0x1e684, 0x1e68c, 935 0x1e6c0, 0x1e6c0, 936 0x1e6e0, 0x1e6e0, 937 0x1e700, 0x1e784, 938 0x1e7c0, 0x1e7c8, 939 0x1e840, 0x1e84c, 940 0x1ea84, 0x1ea8c, 941 0x1eac0, 0x1eac0, 942 0x1eae0, 0x1eae0, 943 0x1eb00, 0x1eb84, 944 0x1ebc0, 0x1ebc8, 945 0x1ec40, 0x1ec4c, 946 0x1ee84, 0x1ee8c, 947 0x1eec0, 0x1eec0, 948 0x1eee0, 0x1eee0, 949 0x1ef00, 0x1ef84, 950 0x1efc0, 0x1efc8, 951 0x1f040, 0x1f04c, 952 0x1f284, 0x1f28c, 953 0x1f2c0, 0x1f2c0, 954 0x1f2e0, 0x1f2e0, 955 0x1f300, 0x1f384, 956 0x1f3c0, 0x1f3c8, 957 0x1f440, 0x1f44c, 958 0x1f684, 0x1f68c, 959 0x1f6c0, 0x1f6c0, 960 0x1f6e0, 0x1f6e0, 961 0x1f700, 0x1f784, 962 0x1f7c0, 0x1f7c8, 963 0x1f840, 0x1f84c, 964 0x1fa84, 0x1fa8c, 965 0x1fac0, 0x1fac0, 966 0x1fae0, 0x1fae0, 967 0x1fb00, 0x1fb84, 968 0x1fbc0, 0x1fbc8, 969 0x1fc40, 0x1fc4c, 970 0x1fe84, 0x1fe8c, 971 0x1fec0, 0x1fec0, 972 0x1fee0, 0x1fee0, 973 0x1ff00, 0x1ff84, 974 0x1ffc0, 0x1ffc8, 975 0x20000, 0x2002c, 976 0x20100, 0x2013c, 977 0x20190, 0x201a0, 978 0x201a8, 0x201b8, 979 0x201c4, 0x201c8, 980 0x20200, 0x20318, 981 0x20400, 0x204b4, 982 0x204c0, 0x20528, 983 0x20540, 0x20614, 984 0x21000, 0x21040, 985 0x2104c, 0x21060, 986 0x210c0, 0x210ec, 987 0x21200, 0x21268, 988 0x21270, 0x21284, 989 0x212fc, 0x21388, 990 0x21400, 0x21404, 991 0x21500, 0x21500, 992 0x21510, 0x21518, 993 0x2152c, 0x21530, 994 0x2153c, 0x2153c, 995 0x21550, 0x21554, 996 0x21600, 0x21600, 997 0x21608, 0x2161c, 998 0x21624, 0x21628, 999 0x21630, 0x21634, 1000 0x2163c, 0x2163c, 1001 0x21700, 0x2171c, 1002 0x21780, 0x2178c, 1003 0x21800, 0x21818, 1004 0x21820, 0x21828, 1005 0x21830, 0x21848, 1006 0x21850, 0x21854, 1007 0x21860, 0x21868, 1008 0x21870, 0x21870, 1009 0x21878, 0x21898, 1010 0x218a0, 0x218a8, 1011 0x218b0, 0x218c8, 1012 0x218d0, 0x218d4, 1013 0x218e0, 0x218e8, 1014 0x218f0, 0x218f0, 1015 0x218f8, 0x21a18, 1016 0x21a20, 0x21a28, 1017 0x21a30, 0x21a48, 1018 0x21a50, 0x21a54, 1019 0x21a60, 0x21a68, 1020 0x21a70, 0x21a70, 1021 0x21a78, 0x21a98, 1022 0x21aa0, 0x21aa8, 1023 0x21ab0, 0x21ac8, 1024 0x21ad0, 0x21ad4, 1025 0x21ae0, 0x21ae8, 1026 0x21af0, 0x21af0, 1027 0x21af8, 0x21c18, 1028 0x21c20, 0x21c20, 1029 0x21c28, 0x21c30, 1030 0x21c38, 0x21c38, 1031 0x21c80, 0x21c98, 1032 0x21ca0, 0x21ca8, 1033 0x21cb0, 0x21cc8, 1034 0x21cd0, 0x21cd4, 1035 0x21ce0, 0x21ce8, 1036 0x21cf0, 0x21cf0, 1037 0x21cf8, 0x21d7c, 1038 0x21e00, 0x21e04, 1039 0x22000, 0x2202c, 1040 0x22100, 0x2213c, 1041 0x22190, 0x221a0, 1042 0x221a8, 0x221b8, 1043 0x221c4, 0x221c8, 1044 0x22200, 0x22318, 1045 0x22400, 0x224b4, 1046 0x224c0, 0x22528, 1047 0x22540, 0x22614, 1048 0x23000, 0x23040, 1049 0x2304c, 0x23060, 1050 0x230c0, 0x230ec, 1051 0x23200, 0x23268, 1052 0x23270, 0x23284, 1053 0x232fc, 0x23388, 1054 0x23400, 0x23404, 1055 0x23500, 0x23500, 1056 0x23510, 0x23518, 1057 0x2352c, 0x23530, 1058 0x2353c, 0x2353c, 1059 0x23550, 0x23554, 1060 0x23600, 0x23600, 1061 0x23608, 0x2361c, 1062 0x23624, 0x23628, 1063 0x23630, 0x23634, 1064 0x2363c, 0x2363c, 1065 0x23700, 0x2371c, 1066 0x23780, 0x2378c, 1067 0x23800, 0x23818, 1068 0x23820, 0x23828, 1069 0x23830, 0x23848, 1070 0x23850, 0x23854, 1071 0x23860, 0x23868, 1072 0x23870, 0x23870, 1073 0x23878, 0x23898, 1074 0x238a0, 0x238a8, 1075 0x238b0, 0x238c8, 1076 0x238d0, 0x238d4, 1077 0x238e0, 0x238e8, 1078 0x238f0, 0x238f0, 1079 0x238f8, 0x23a18, 1080 0x23a20, 0x23a28, 1081 0x23a30, 0x23a48, 1082 0x23a50, 0x23a54, 1083 0x23a60, 0x23a68, 1084 0x23a70, 0x23a70, 1085 0x23a78, 0x23a98, 1086 0x23aa0, 0x23aa8, 1087 0x23ab0, 0x23ac8, 1088 0x23ad0, 0x23ad4, 1089 0x23ae0, 0x23ae8, 1090 0x23af0, 0x23af0, 1091 0x23af8, 0x23c18, 1092 0x23c20, 0x23c20, 1093 0x23c28, 0x23c30, 1094 0x23c38, 0x23c38, 1095 0x23c80, 0x23c98, 1096 0x23ca0, 0x23ca8, 1097 0x23cb0, 0x23cc8, 1098 0x23cd0, 0x23cd4, 1099 0x23ce0, 0x23ce8, 1100 0x23cf0, 0x23cf0, 1101 0x23cf8, 0x23d7c, 1102 0x23e00, 0x23e04, 1103 0x24000, 0x2402c, 1104 0x24100, 0x2413c, 1105 0x24190, 0x241a0, 1106 0x241a8, 0x241b8, 1107 0x241c4, 0x241c8, 1108 0x24200, 0x24318, 1109 0x24400, 0x244b4, 1110 0x244c0, 0x24528, 1111 0x24540, 0x24614, 1112 0x25000, 0x25040, 1113 0x2504c, 0x25060, 1114 0x250c0, 0x250ec, 1115 0x25200, 0x25268, 1116 0x25270, 0x25284, 1117 0x252fc, 0x25388, 1118 0x25400, 0x25404, 1119 0x25500, 0x25500, 1120 0x25510, 0x25518, 1121 0x2552c, 0x25530, 1122 0x2553c, 0x2553c, 1123 0x25550, 0x25554, 1124 0x25600, 0x25600, 1125 0x25608, 0x2561c, 1126 0x25624, 0x25628, 1127 0x25630, 0x25634, 1128 0x2563c, 0x2563c, 1129 0x25700, 0x2571c, 1130 0x25780, 0x2578c, 1131 0x25800, 0x25818, 1132 0x25820, 0x25828, 1133 0x25830, 0x25848, 1134 0x25850, 0x25854, 1135 0x25860, 0x25868, 1136 0x25870, 0x25870, 1137 0x25878, 0x25898, 1138 0x258a0, 0x258a8, 1139 0x258b0, 0x258c8, 1140 0x258d0, 0x258d4, 1141 0x258e0, 0x258e8, 1142 0x258f0, 0x258f0, 1143 0x258f8, 0x25a18, 1144 0x25a20, 0x25a28, 1145 0x25a30, 0x25a48, 1146 0x25a50, 0x25a54, 1147 0x25a60, 0x25a68, 1148 0x25a70, 0x25a70, 1149 0x25a78, 0x25a98, 1150 0x25aa0, 0x25aa8, 1151 0x25ab0, 0x25ac8, 1152 0x25ad0, 0x25ad4, 1153 0x25ae0, 0x25ae8, 1154 0x25af0, 0x25af0, 1155 0x25af8, 0x25c18, 1156 0x25c20, 0x25c20, 1157 0x25c28, 0x25c30, 1158 0x25c38, 0x25c38, 1159 0x25c80, 0x25c98, 1160 0x25ca0, 0x25ca8, 1161 0x25cb0, 0x25cc8, 1162 0x25cd0, 0x25cd4, 1163 0x25ce0, 0x25ce8, 1164 0x25cf0, 0x25cf0, 1165 0x25cf8, 0x25d7c, 1166 0x25e00, 0x25e04, 1167 0x26000, 0x2602c, 1168 0x26100, 0x2613c, 1169 0x26190, 0x261a0, 1170 0x261a8, 0x261b8, 1171 0x261c4, 0x261c8, 1172 0x26200, 0x26318, 1173 0x26400, 0x264b4, 1174 0x264c0, 0x26528, 1175 0x26540, 0x26614, 1176 0x27000, 0x27040, 1177 0x2704c, 0x27060, 1178 0x270c0, 0x270ec, 1179 0x27200, 0x27268, 1180 0x27270, 0x27284, 1181 0x272fc, 0x27388, 1182 0x27400, 0x27404, 1183 0x27500, 0x27500, 1184 0x27510, 0x27518, 1185 0x2752c, 0x27530, 1186 0x2753c, 0x2753c, 1187 0x27550, 0x27554, 1188 0x27600, 0x27600, 1189 0x27608, 0x2761c, 1190 0x27624, 0x27628, 1191 0x27630, 0x27634, 1192 0x2763c, 0x2763c, 1193 0x27700, 0x2771c, 1194 0x27780, 0x2778c, 1195 0x27800, 0x27818, 1196 0x27820, 0x27828, 1197 0x27830, 0x27848, 1198 0x27850, 0x27854, 1199 0x27860, 0x27868, 1200 0x27870, 0x27870, 1201 0x27878, 0x27898, 1202 0x278a0, 0x278a8, 1203 0x278b0, 0x278c8, 1204 0x278d0, 0x278d4, 1205 0x278e0, 0x278e8, 1206 0x278f0, 0x278f0, 1207 0x278f8, 0x27a18, 1208 0x27a20, 0x27a28, 1209 0x27a30, 0x27a48, 1210 0x27a50, 0x27a54, 1211 0x27a60, 0x27a68, 1212 0x27a70, 0x27a70, 1213 0x27a78, 0x27a98, 1214 0x27aa0, 0x27aa8, 1215 0x27ab0, 0x27ac8, 1216 0x27ad0, 0x27ad4, 1217 0x27ae0, 0x27ae8, 1218 0x27af0, 0x27af0, 1219 0x27af8, 0x27c18, 1220 0x27c20, 0x27c20, 1221 0x27c28, 0x27c30, 1222 0x27c38, 0x27c38, 1223 0x27c80, 0x27c98, 1224 0x27ca0, 0x27ca8, 1225 0x27cb0, 0x27cc8, 1226 0x27cd0, 0x27cd4, 1227 0x27ce0, 0x27ce8, 1228 0x27cf0, 0x27cf0, 1229 0x27cf8, 0x27d7c, 1230 0x27e00, 0x27e04, 1231 }; 1232 1233 static const unsigned int t4vf_reg_ranges[] = { 1234 VF_SGE_REG(A_SGE_VF_KDOORBELL), VF_SGE_REG(A_SGE_VF_GTS), 1235 VF_MPS_REG(A_MPS_VF_CTL), 1236 VF_MPS_REG(A_MPS_VF_STAT_RX_VF_ERR_FRAMES_H), 1237 VF_PL_REG(A_PL_VF_WHOAMI), VF_PL_REG(A_PL_VF_WHOAMI), 1238 VF_CIM_REG(A_CIM_VF_EXT_MAILBOX_CTRL), 1239 VF_CIM_REG(A_CIM_VF_EXT_MAILBOX_STATUS), 1240 FW_T4VF_MBDATA_BASE_ADDR, 1241 FW_T4VF_MBDATA_BASE_ADDR + 1242 ((NUM_CIM_PF_MAILBOX_DATA_INSTANCES - 1) * 4), 1243 }; 1244 1245 static const unsigned int t5_reg_ranges[] = { 1246 0x1008, 0x10c0, 1247 0x10cc, 0x10f8, 1248 0x1100, 0x1100, 1249 0x110c, 0x1148, 1250 0x1180, 0x1184, 1251 0x1190, 0x1194, 1252 0x11a0, 0x11a4, 1253 0x11b0, 0x11b4, 1254 0x11fc, 0x123c, 1255 0x1280, 0x173c, 1256 0x1800, 0x18fc, 1257 0x3000, 0x3028, 1258 0x3060, 0x30b0, 1259 0x30b8, 0x30d8, 1260 0x30e0, 0x30fc, 1261 0x3140, 0x357c, 1262 0x35a8, 0x35cc, 1263 0x35ec, 0x35ec, 1264 0x3600, 0x5624, 1265 0x56cc, 0x56ec, 1266 0x56f4, 0x5720, 1267 0x5728, 0x575c, 1268 0x580c, 0x5814, 1269 0x5890, 0x589c, 1270 0x58a4, 0x58ac, 1271 0x58b8, 0x58bc, 1272 0x5940, 0x59c8, 1273 0x59d0, 0x59dc, 1274 0x59fc, 0x5a18, 1275 0x5a60, 0x5a70, 1276 0x5a80, 0x5a9c, 1277 0x5b94, 0x5bfc, 1278 0x6000, 0x6020, 1279 0x6028, 0x6040, 1280 0x6058, 0x609c, 1281 0x60a8, 0x614c, 1282 0x7700, 0x7798, 1283 0x77c0, 0x78fc, 1284 0x7b00, 0x7b58, 1285 0x7b60, 0x7b84, 1286 0x7b8c, 0x7c54, 1287 0x7d00, 0x7d38, 1288 0x7d40, 0x7d80, 1289 0x7d8c, 0x7ddc, 1290 0x7de4, 0x7e04, 1291 0x7e10, 0x7e1c, 1292 0x7e24, 0x7e38, 1293 0x7e40, 0x7e44, 1294 0x7e4c, 0x7e78, 1295 0x7e80, 0x7edc, 1296 0x7ee8, 0x7efc, 1297 0x8dc0, 0x8de0, 1298 0x8df8, 0x8e04, 1299 0x8e10, 0x8e84, 1300 0x8ea0, 0x8f84, 1301 0x8fc0, 0x9058, 1302 0x9060, 0x9060, 1303 0x9068, 0x90f8, 1304 0x9400, 0x9408, 1305 0x9410, 0x9470, 1306 0x9600, 0x9600, 1307 0x9608, 0x9638, 1308 0x9640, 0x96f4, 1309 0x9800, 0x9808, 1310 0x9820, 0x983c, 1311 0x9850, 0x9864, 1312 0x9c00, 0x9c6c, 1313 0x9c80, 0x9cec, 1314 0x9d00, 0x9d6c, 1315 0x9d80, 0x9dec, 1316 0x9e00, 0x9e6c, 1317 0x9e80, 0x9eec, 1318 0x9f00, 0x9f6c, 1319 0x9f80, 0xa020, 1320 0xd004, 0xd004, 1321 0xd010, 0xd03c, 1322 0xdfc0, 0xdfe0, 1323 0xe000, 0x1106c, 1324 0x11074, 0x11088, 1325 0x1109c, 0x1117c, 1326 0x11190, 0x11204, 1327 0x19040, 0x1906c, 1328 0x19078, 0x19080, 1329 0x1908c, 0x190e8, 1330 0x190f0, 0x190f8, 1331 0x19100, 0x19110, 1332 0x19120, 0x19124, 1333 0x19150, 0x19194, 1334 0x1919c, 0x191b0, 1335 0x191d0, 0x191e8, 1336 0x19238, 0x19290, 1337 0x193f8, 0x19428, 1338 0x19430, 0x19444, 1339 0x1944c, 0x1946c, 1340 0x19474, 0x19474, 1341 0x19490, 0x194cc, 1342 0x194f0, 0x194f8, 1343 0x19c00, 0x19c08, 1344 0x19c10, 0x19c60, 1345 0x19c94, 0x19ce4, 1346 0x19cf0, 0x19d40, 1347 0x19d50, 0x19d94, 1348 0x19da0, 0x19de8, 1349 0x19df0, 0x19e10, 1350 0x19e50, 0x19e90, 1351 0x19ea0, 0x19f24, 1352 0x19f34, 0x19f34, 1353 0x19f40, 0x19f50, 1354 0x19f90, 0x19fb4, 1355 0x19fc4, 0x19fe4, 1356 0x1a000, 0x1a004, 1357 0x1a010, 0x1a06c, 1358 0x1a0b0, 0x1a0e4, 1359 0x1a0ec, 0x1a0f8, 1360 0x1a100, 0x1a108, 1361 0x1a114, 0x1a120, 1362 0x1a128, 0x1a130, 1363 0x1a138, 0x1a138, 1364 0x1a190, 0x1a1c4, 1365 0x1a1fc, 0x1a1fc, 1366 0x1e008, 0x1e00c, 1367 0x1e040, 0x1e044, 1368 0x1e04c, 0x1e04c, 1369 0x1e284, 0x1e290, 1370 0x1e2c0, 0x1e2c0, 1371 0x1e2e0, 0x1e2e0, 1372 0x1e300, 0x1e384, 1373 0x1e3c0, 0x1e3c8, 1374 0x1e408, 0x1e40c, 1375 0x1e440, 0x1e444, 1376 0x1e44c, 0x1e44c, 1377 0x1e684, 0x1e690, 1378 0x1e6c0, 0x1e6c0, 1379 0x1e6e0, 0x1e6e0, 1380 0x1e700, 0x1e784, 1381 0x1e7c0, 0x1e7c8, 1382 0x1e808, 0x1e80c, 1383 0x1e840, 0x1e844, 1384 0x1e84c, 0x1e84c, 1385 0x1ea84, 0x1ea90, 1386 0x1eac0, 0x1eac0, 1387 0x1eae0, 0x1eae0, 1388 0x1eb00, 0x1eb84, 1389 0x1ebc0, 0x1ebc8, 1390 0x1ec08, 0x1ec0c, 1391 0x1ec40, 0x1ec44, 1392 0x1ec4c, 0x1ec4c, 1393 0x1ee84, 0x1ee90, 1394 0x1eec0, 0x1eec0, 1395 0x1eee0, 0x1eee0, 1396 0x1ef00, 0x1ef84, 1397 0x1efc0, 0x1efc8, 1398 0x1f008, 0x1f00c, 1399 0x1f040, 0x1f044, 1400 0x1f04c, 0x1f04c, 1401 0x1f284, 0x1f290, 1402 0x1f2c0, 0x1f2c0, 1403 0x1f2e0, 0x1f2e0, 1404 0x1f300, 0x1f384, 1405 0x1f3c0, 0x1f3c8, 1406 0x1f408, 0x1f40c, 1407 0x1f440, 0x1f444, 1408 0x1f44c, 0x1f44c, 1409 0x1f684, 0x1f690, 1410 0x1f6c0, 0x1f6c0, 1411 0x1f6e0, 0x1f6e0, 1412 0x1f700, 0x1f784, 1413 0x1f7c0, 0x1f7c8, 1414 0x1f808, 0x1f80c, 1415 0x1f840, 0x1f844, 1416 0x1f84c, 0x1f84c, 1417 0x1fa84, 0x1fa90, 1418 0x1fac0, 0x1fac0, 1419 0x1fae0, 0x1fae0, 1420 0x1fb00, 0x1fb84, 1421 0x1fbc0, 0x1fbc8, 1422 0x1fc08, 0x1fc0c, 1423 0x1fc40, 0x1fc44, 1424 0x1fc4c, 0x1fc4c, 1425 0x1fe84, 0x1fe90, 1426 0x1fec0, 0x1fec0, 1427 0x1fee0, 0x1fee0, 1428 0x1ff00, 0x1ff84, 1429 0x1ffc0, 0x1ffc8, 1430 0x30000, 0x30030, 1431 0x30100, 0x30144, 1432 0x30190, 0x301a0, 1433 0x301a8, 0x301b8, 1434 0x301c4, 0x301c8, 1435 0x301d0, 0x301d0, 1436 0x30200, 0x30318, 1437 0x30400, 0x304b4, 1438 0x304c0, 0x3052c, 1439 0x30540, 0x3061c, 1440 0x30800, 0x30828, 1441 0x30834, 0x30834, 1442 0x308c0, 0x30908, 1443 0x30910, 0x309ac, 1444 0x30a00, 0x30a14, 1445 0x30a1c, 0x30a2c, 1446 0x30a44, 0x30a50, 1447 0x30a74, 0x30a74, 1448 0x30a7c, 0x30afc, 1449 0x30b08, 0x30c24, 1450 0x30d00, 0x30d00, 1451 0x30d08, 0x30d14, 1452 0x30d1c, 0x30d20, 1453 0x30d3c, 0x30d3c, 1454 0x30d48, 0x30d50, 1455 0x31200, 0x3120c, 1456 0x31220, 0x31220, 1457 0x31240, 0x31240, 1458 0x31600, 0x3160c, 1459 0x31a00, 0x31a1c, 1460 0x31e00, 0x31e20, 1461 0x31e38, 0x31e3c, 1462 0x31e80, 0x31e80, 1463 0x31e88, 0x31ea8, 1464 0x31eb0, 0x31eb4, 1465 0x31ec8, 0x31ed4, 1466 0x31fb8, 0x32004, 1467 0x32200, 0x32200, 1468 0x32208, 0x32240, 1469 0x32248, 0x32280, 1470 0x32288, 0x322c0, 1471 0x322c8, 0x322fc, 1472 0x32600, 0x32630, 1473 0x32a00, 0x32abc, 1474 0x32b00, 0x32b10, 1475 0x32b20, 0x32b30, 1476 0x32b40, 0x32b50, 1477 0x32b60, 0x32b70, 1478 0x33000, 0x33028, 1479 0x33030, 0x33048, 1480 0x33060, 0x33068, 1481 0x33070, 0x3309c, 1482 0x330f0, 0x33128, 1483 0x33130, 0x33148, 1484 0x33160, 0x33168, 1485 0x33170, 0x3319c, 1486 0x331f0, 0x33238, 1487 0x33240, 0x33240, 1488 0x33248, 0x33250, 1489 0x3325c, 0x33264, 1490 0x33270, 0x332b8, 1491 0x332c0, 0x332e4, 1492 0x332f8, 0x33338, 1493 0x33340, 0x33340, 1494 0x33348, 0x33350, 1495 0x3335c, 0x33364, 1496 0x33370, 0x333b8, 1497 0x333c0, 0x333e4, 1498 0x333f8, 0x33428, 1499 0x33430, 0x33448, 1500 0x33460, 0x33468, 1501 0x33470, 0x3349c, 1502 0x334f0, 0x33528, 1503 0x33530, 0x33548, 1504 0x33560, 0x33568, 1505 0x33570, 0x3359c, 1506 0x335f0, 0x33638, 1507 0x33640, 0x33640, 1508 0x33648, 0x33650, 1509 0x3365c, 0x33664, 1510 0x33670, 0x336b8, 1511 0x336c0, 0x336e4, 1512 0x336f8, 0x33738, 1513 0x33740, 0x33740, 1514 0x33748, 0x33750, 1515 0x3375c, 0x33764, 1516 0x33770, 0x337b8, 1517 0x337c0, 0x337e4, 1518 0x337f8, 0x337fc, 1519 0x33814, 0x33814, 1520 0x3382c, 0x3382c, 1521 0x33880, 0x3388c, 1522 0x338e8, 0x338ec, 1523 0x33900, 0x33928, 1524 0x33930, 0x33948, 1525 0x33960, 0x33968, 1526 0x33970, 0x3399c, 1527 0x339f0, 0x33a38, 1528 0x33a40, 0x33a40, 1529 0x33a48, 0x33a50, 1530 0x33a5c, 0x33a64, 1531 0x33a70, 0x33ab8, 1532 0x33ac0, 0x33ae4, 1533 0x33af8, 0x33b10, 1534 0x33b28, 0x33b28, 1535 0x33b3c, 0x33b50, 1536 0x33bf0, 0x33c10, 1537 0x33c28, 0x33c28, 1538 0x33c3c, 0x33c50, 1539 0x33cf0, 0x33cfc, 1540 0x34000, 0x34030, 1541 0x34100, 0x34144, 1542 0x34190, 0x341a0, 1543 0x341a8, 0x341b8, 1544 0x341c4, 0x341c8, 1545 0x341d0, 0x341d0, 1546 0x34200, 0x34318, 1547 0x34400, 0x344b4, 1548 0x344c0, 0x3452c, 1549 0x34540, 0x3461c, 1550 0x34800, 0x34828, 1551 0x34834, 0x34834, 1552 0x348c0, 0x34908, 1553 0x34910, 0x349ac, 1554 0x34a00, 0x34a14, 1555 0x34a1c, 0x34a2c, 1556 0x34a44, 0x34a50, 1557 0x34a74, 0x34a74, 1558 0x34a7c, 0x34afc, 1559 0x34b08, 0x34c24, 1560 0x34d00, 0x34d00, 1561 0x34d08, 0x34d14, 1562 0x34d1c, 0x34d20, 1563 0x34d3c, 0x34d3c, 1564 0x34d48, 0x34d50, 1565 0x35200, 0x3520c, 1566 0x35220, 0x35220, 1567 0x35240, 0x35240, 1568 0x35600, 0x3560c, 1569 0x35a00, 0x35a1c, 1570 0x35e00, 0x35e20, 1571 0x35e38, 0x35e3c, 1572 0x35e80, 0x35e80, 1573 0x35e88, 0x35ea8, 1574 0x35eb0, 0x35eb4, 1575 0x35ec8, 0x35ed4, 1576 0x35fb8, 0x36004, 1577 0x36200, 0x36200, 1578 0x36208, 0x36240, 1579 0x36248, 0x36280, 1580 0x36288, 0x362c0, 1581 0x362c8, 0x362fc, 1582 0x36600, 0x36630, 1583 0x36a00, 0x36abc, 1584 0x36b00, 0x36b10, 1585 0x36b20, 0x36b30, 1586 0x36b40, 0x36b50, 1587 0x36b60, 0x36b70, 1588 0x37000, 0x37028, 1589 0x37030, 0x37048, 1590 0x37060, 0x37068, 1591 0x37070, 0x3709c, 1592 0x370f0, 0x37128, 1593 0x37130, 0x37148, 1594 0x37160, 0x37168, 1595 0x37170, 0x3719c, 1596 0x371f0, 0x37238, 1597 0x37240, 0x37240, 1598 0x37248, 0x37250, 1599 0x3725c, 0x37264, 1600 0x37270, 0x372b8, 1601 0x372c0, 0x372e4, 1602 0x372f8, 0x37338, 1603 0x37340, 0x37340, 1604 0x37348, 0x37350, 1605 0x3735c, 0x37364, 1606 0x37370, 0x373b8, 1607 0x373c0, 0x373e4, 1608 0x373f8, 0x37428, 1609 0x37430, 0x37448, 1610 0x37460, 0x37468, 1611 0x37470, 0x3749c, 1612 0x374f0, 0x37528, 1613 0x37530, 0x37548, 1614 0x37560, 0x37568, 1615 0x37570, 0x3759c, 1616 0x375f0, 0x37638, 1617 0x37640, 0x37640, 1618 0x37648, 0x37650, 1619 0x3765c, 0x37664, 1620 0x37670, 0x376b8, 1621 0x376c0, 0x376e4, 1622 0x376f8, 0x37738, 1623 0x37740, 0x37740, 1624 0x37748, 0x37750, 1625 0x3775c, 0x37764, 1626 0x37770, 0x377b8, 1627 0x377c0, 0x377e4, 1628 0x377f8, 0x377fc, 1629 0x37814, 0x37814, 1630 0x3782c, 0x3782c, 1631 0x37880, 0x3788c, 1632 0x378e8, 0x378ec, 1633 0x37900, 0x37928, 1634 0x37930, 0x37948, 1635 0x37960, 0x37968, 1636 0x37970, 0x3799c, 1637 0x379f0, 0x37a38, 1638 0x37a40, 0x37a40, 1639 0x37a48, 0x37a50, 1640 0x37a5c, 0x37a64, 1641 0x37a70, 0x37ab8, 1642 0x37ac0, 0x37ae4, 1643 0x37af8, 0x37b10, 1644 0x37b28, 0x37b28, 1645 0x37b3c, 0x37b50, 1646 0x37bf0, 0x37c10, 1647 0x37c28, 0x37c28, 1648 0x37c3c, 0x37c50, 1649 0x37cf0, 0x37cfc, 1650 0x38000, 0x38030, 1651 0x38100, 0x38144, 1652 0x38190, 0x381a0, 1653 0x381a8, 0x381b8, 1654 0x381c4, 0x381c8, 1655 0x381d0, 0x381d0, 1656 0x38200, 0x38318, 1657 0x38400, 0x384b4, 1658 0x384c0, 0x3852c, 1659 0x38540, 0x3861c, 1660 0x38800, 0x38828, 1661 0x38834, 0x38834, 1662 0x388c0, 0x38908, 1663 0x38910, 0x389ac, 1664 0x38a00, 0x38a14, 1665 0x38a1c, 0x38a2c, 1666 0x38a44, 0x38a50, 1667 0x38a74, 0x38a74, 1668 0x38a7c, 0x38afc, 1669 0x38b08, 0x38c24, 1670 0x38d00, 0x38d00, 1671 0x38d08, 0x38d14, 1672 0x38d1c, 0x38d20, 1673 0x38d3c, 0x38d3c, 1674 0x38d48, 0x38d50, 1675 0x39200, 0x3920c, 1676 0x39220, 0x39220, 1677 0x39240, 0x39240, 1678 0x39600, 0x3960c, 1679 0x39a00, 0x39a1c, 1680 0x39e00, 0x39e20, 1681 0x39e38, 0x39e3c, 1682 0x39e80, 0x39e80, 1683 0x39e88, 0x39ea8, 1684 0x39eb0, 0x39eb4, 1685 0x39ec8, 0x39ed4, 1686 0x39fb8, 0x3a004, 1687 0x3a200, 0x3a200, 1688 0x3a208, 0x3a240, 1689 0x3a248, 0x3a280, 1690 0x3a288, 0x3a2c0, 1691 0x3a2c8, 0x3a2fc, 1692 0x3a600, 0x3a630, 1693 0x3aa00, 0x3aabc, 1694 0x3ab00, 0x3ab10, 1695 0x3ab20, 0x3ab30, 1696 0x3ab40, 0x3ab50, 1697 0x3ab60, 0x3ab70, 1698 0x3b000, 0x3b028, 1699 0x3b030, 0x3b048, 1700 0x3b060, 0x3b068, 1701 0x3b070, 0x3b09c, 1702 0x3b0f0, 0x3b128, 1703 0x3b130, 0x3b148, 1704 0x3b160, 0x3b168, 1705 0x3b170, 0x3b19c, 1706 0x3b1f0, 0x3b238, 1707 0x3b240, 0x3b240, 1708 0x3b248, 0x3b250, 1709 0x3b25c, 0x3b264, 1710 0x3b270, 0x3b2b8, 1711 0x3b2c0, 0x3b2e4, 1712 0x3b2f8, 0x3b338, 1713 0x3b340, 0x3b340, 1714 0x3b348, 0x3b350, 1715 0x3b35c, 0x3b364, 1716 0x3b370, 0x3b3b8, 1717 0x3b3c0, 0x3b3e4, 1718 0x3b3f8, 0x3b428, 1719 0x3b430, 0x3b448, 1720 0x3b460, 0x3b468, 1721 0x3b470, 0x3b49c, 1722 0x3b4f0, 0x3b528, 1723 0x3b530, 0x3b548, 1724 0x3b560, 0x3b568, 1725 0x3b570, 0x3b59c, 1726 0x3b5f0, 0x3b638, 1727 0x3b640, 0x3b640, 1728 0x3b648, 0x3b650, 1729 0x3b65c, 0x3b664, 1730 0x3b670, 0x3b6b8, 1731 0x3b6c0, 0x3b6e4, 1732 0x3b6f8, 0x3b738, 1733 0x3b740, 0x3b740, 1734 0x3b748, 0x3b750, 1735 0x3b75c, 0x3b764, 1736 0x3b770, 0x3b7b8, 1737 0x3b7c0, 0x3b7e4, 1738 0x3b7f8, 0x3b7fc, 1739 0x3b814, 0x3b814, 1740 0x3b82c, 0x3b82c, 1741 0x3b880, 0x3b88c, 1742 0x3b8e8, 0x3b8ec, 1743 0x3b900, 0x3b928, 1744 0x3b930, 0x3b948, 1745 0x3b960, 0x3b968, 1746 0x3b970, 0x3b99c, 1747 0x3b9f0, 0x3ba38, 1748 0x3ba40, 0x3ba40, 1749 0x3ba48, 0x3ba50, 1750 0x3ba5c, 0x3ba64, 1751 0x3ba70, 0x3bab8, 1752 0x3bac0, 0x3bae4, 1753 0x3baf8, 0x3bb10, 1754 0x3bb28, 0x3bb28, 1755 0x3bb3c, 0x3bb50, 1756 0x3bbf0, 0x3bc10, 1757 0x3bc28, 0x3bc28, 1758 0x3bc3c, 0x3bc50, 1759 0x3bcf0, 0x3bcfc, 1760 0x3c000, 0x3c030, 1761 0x3c100, 0x3c144, 1762 0x3c190, 0x3c1a0, 1763 0x3c1a8, 0x3c1b8, 1764 0x3c1c4, 0x3c1c8, 1765 0x3c1d0, 0x3c1d0, 1766 0x3c200, 0x3c318, 1767 0x3c400, 0x3c4b4, 1768 0x3c4c0, 0x3c52c, 1769 0x3c540, 0x3c61c, 1770 0x3c800, 0x3c828, 1771 0x3c834, 0x3c834, 1772 0x3c8c0, 0x3c908, 1773 0x3c910, 0x3c9ac, 1774 0x3ca00, 0x3ca14, 1775 0x3ca1c, 0x3ca2c, 1776 0x3ca44, 0x3ca50, 1777 0x3ca74, 0x3ca74, 1778 0x3ca7c, 0x3cafc, 1779 0x3cb08, 0x3cc24, 1780 0x3cd00, 0x3cd00, 1781 0x3cd08, 0x3cd14, 1782 0x3cd1c, 0x3cd20, 1783 0x3cd3c, 0x3cd3c, 1784 0x3cd48, 0x3cd50, 1785 0x3d200, 0x3d20c, 1786 0x3d220, 0x3d220, 1787 0x3d240, 0x3d240, 1788 0x3d600, 0x3d60c, 1789 0x3da00, 0x3da1c, 1790 0x3de00, 0x3de20, 1791 0x3de38, 0x3de3c, 1792 0x3de80, 0x3de80, 1793 0x3de88, 0x3dea8, 1794 0x3deb0, 0x3deb4, 1795 0x3dec8, 0x3ded4, 1796 0x3dfb8, 0x3e004, 1797 0x3e200, 0x3e200, 1798 0x3e208, 0x3e240, 1799 0x3e248, 0x3e280, 1800 0x3e288, 0x3e2c0, 1801 0x3e2c8, 0x3e2fc, 1802 0x3e600, 0x3e630, 1803 0x3ea00, 0x3eabc, 1804 0x3eb00, 0x3eb10, 1805 0x3eb20, 0x3eb30, 1806 0x3eb40, 0x3eb50, 1807 0x3eb60, 0x3eb70, 1808 0x3f000, 0x3f028, 1809 0x3f030, 0x3f048, 1810 0x3f060, 0x3f068, 1811 0x3f070, 0x3f09c, 1812 0x3f0f0, 0x3f128, 1813 0x3f130, 0x3f148, 1814 0x3f160, 0x3f168, 1815 0x3f170, 0x3f19c, 1816 0x3f1f0, 0x3f238, 1817 0x3f240, 0x3f240, 1818 0x3f248, 0x3f250, 1819 0x3f25c, 0x3f264, 1820 0x3f270, 0x3f2b8, 1821 0x3f2c0, 0x3f2e4, 1822 0x3f2f8, 0x3f338, 1823 0x3f340, 0x3f340, 1824 0x3f348, 0x3f350, 1825 0x3f35c, 0x3f364, 1826 0x3f370, 0x3f3b8, 1827 0x3f3c0, 0x3f3e4, 1828 0x3f3f8, 0x3f428, 1829 0x3f430, 0x3f448, 1830 0x3f460, 0x3f468, 1831 0x3f470, 0x3f49c, 1832 0x3f4f0, 0x3f528, 1833 0x3f530, 0x3f548, 1834 0x3f560, 0x3f568, 1835 0x3f570, 0x3f59c, 1836 0x3f5f0, 0x3f638, 1837 0x3f640, 0x3f640, 1838 0x3f648, 0x3f650, 1839 0x3f65c, 0x3f664, 1840 0x3f670, 0x3f6b8, 1841 0x3f6c0, 0x3f6e4, 1842 0x3f6f8, 0x3f738, 1843 0x3f740, 0x3f740, 1844 0x3f748, 0x3f750, 1845 0x3f75c, 0x3f764, 1846 0x3f770, 0x3f7b8, 1847 0x3f7c0, 0x3f7e4, 1848 0x3f7f8, 0x3f7fc, 1849 0x3f814, 0x3f814, 1850 0x3f82c, 0x3f82c, 1851 0x3f880, 0x3f88c, 1852 0x3f8e8, 0x3f8ec, 1853 0x3f900, 0x3f928, 1854 0x3f930, 0x3f948, 1855 0x3f960, 0x3f968, 1856 0x3f970, 0x3f99c, 1857 0x3f9f0, 0x3fa38, 1858 0x3fa40, 0x3fa40, 1859 0x3fa48, 0x3fa50, 1860 0x3fa5c, 0x3fa64, 1861 0x3fa70, 0x3fab8, 1862 0x3fac0, 0x3fae4, 1863 0x3faf8, 0x3fb10, 1864 0x3fb28, 0x3fb28, 1865 0x3fb3c, 0x3fb50, 1866 0x3fbf0, 0x3fc10, 1867 0x3fc28, 0x3fc28, 1868 0x3fc3c, 0x3fc50, 1869 0x3fcf0, 0x3fcfc, 1870 0x40000, 0x4000c, 1871 0x40040, 0x40050, 1872 0x40060, 0x40068, 1873 0x4007c, 0x4008c, 1874 0x40094, 0x400b0, 1875 0x400c0, 0x40144, 1876 0x40180, 0x4018c, 1877 0x40200, 0x40254, 1878 0x40260, 0x40264, 1879 0x40270, 0x40288, 1880 0x40290, 0x40298, 1881 0x402ac, 0x402c8, 1882 0x402d0, 0x402e0, 1883 0x402f0, 0x402f0, 1884 0x40300, 0x4033c, 1885 0x403f8, 0x403fc, 1886 0x41304, 0x413c4, 1887 0x41400, 0x4140c, 1888 0x41414, 0x4141c, 1889 0x41480, 0x414d0, 1890 0x44000, 0x44054, 1891 0x4405c, 0x44078, 1892 0x440c0, 0x44174, 1893 0x44180, 0x441ac, 1894 0x441b4, 0x441b8, 1895 0x441c0, 0x44254, 1896 0x4425c, 0x44278, 1897 0x442c0, 0x44374, 1898 0x44380, 0x443ac, 1899 0x443b4, 0x443b8, 1900 0x443c0, 0x44454, 1901 0x4445c, 0x44478, 1902 0x444c0, 0x44574, 1903 0x44580, 0x445ac, 1904 0x445b4, 0x445b8, 1905 0x445c0, 0x44654, 1906 0x4465c, 0x44678, 1907 0x446c0, 0x44774, 1908 0x44780, 0x447ac, 1909 0x447b4, 0x447b8, 1910 0x447c0, 0x44854, 1911 0x4485c, 0x44878, 1912 0x448c0, 0x44974, 1913 0x44980, 0x449ac, 1914 0x449b4, 0x449b8, 1915 0x449c0, 0x449fc, 1916 0x45000, 0x45004, 1917 0x45010, 0x45030, 1918 0x45040, 0x45060, 1919 0x45068, 0x45068, 1920 0x45080, 0x45084, 1921 0x450a0, 0x450b0, 1922 0x45200, 0x45204, 1923 0x45210, 0x45230, 1924 0x45240, 0x45260, 1925 0x45268, 0x45268, 1926 0x45280, 0x45284, 1927 0x452a0, 0x452b0, 1928 0x460c0, 0x460e4, 1929 0x47000, 0x4703c, 1930 0x47044, 0x4708c, 1931 0x47200, 0x47250, 1932 0x47400, 0x47408, 1933 0x47414, 0x47420, 1934 0x47600, 0x47618, 1935 0x47800, 0x47814, 1936 0x48000, 0x4800c, 1937 0x48040, 0x48050, 1938 0x48060, 0x48068, 1939 0x4807c, 0x4808c, 1940 0x48094, 0x480b0, 1941 0x480c0, 0x48144, 1942 0x48180, 0x4818c, 1943 0x48200, 0x48254, 1944 0x48260, 0x48264, 1945 0x48270, 0x48288, 1946 0x48290, 0x48298, 1947 0x482ac, 0x482c8, 1948 0x482d0, 0x482e0, 1949 0x482f0, 0x482f0, 1950 0x48300, 0x4833c, 1951 0x483f8, 0x483fc, 1952 0x49304, 0x493c4, 1953 0x49400, 0x4940c, 1954 0x49414, 0x4941c, 1955 0x49480, 0x494d0, 1956 0x4c000, 0x4c054, 1957 0x4c05c, 0x4c078, 1958 0x4c0c0, 0x4c174, 1959 0x4c180, 0x4c1ac, 1960 0x4c1b4, 0x4c1b8, 1961 0x4c1c0, 0x4c254, 1962 0x4c25c, 0x4c278, 1963 0x4c2c0, 0x4c374, 1964 0x4c380, 0x4c3ac, 1965 0x4c3b4, 0x4c3b8, 1966 0x4c3c0, 0x4c454, 1967 0x4c45c, 0x4c478, 1968 0x4c4c0, 0x4c574, 1969 0x4c580, 0x4c5ac, 1970 0x4c5b4, 0x4c5b8, 1971 0x4c5c0, 0x4c654, 1972 0x4c65c, 0x4c678, 1973 0x4c6c0, 0x4c774, 1974 0x4c780, 0x4c7ac, 1975 0x4c7b4, 0x4c7b8, 1976 0x4c7c0, 0x4c854, 1977 0x4c85c, 0x4c878, 1978 0x4c8c0, 0x4c974, 1979 0x4c980, 0x4c9ac, 1980 0x4c9b4, 0x4c9b8, 1981 0x4c9c0, 0x4c9fc, 1982 0x4d000, 0x4d004, 1983 0x4d010, 0x4d030, 1984 0x4d040, 0x4d060, 1985 0x4d068, 0x4d068, 1986 0x4d080, 0x4d084, 1987 0x4d0a0, 0x4d0b0, 1988 0x4d200, 0x4d204, 1989 0x4d210, 0x4d230, 1990 0x4d240, 0x4d260, 1991 0x4d268, 0x4d268, 1992 0x4d280, 0x4d284, 1993 0x4d2a0, 0x4d2b0, 1994 0x4e0c0, 0x4e0e4, 1995 0x4f000, 0x4f03c, 1996 0x4f044, 0x4f08c, 1997 0x4f200, 0x4f250, 1998 0x4f400, 0x4f408, 1999 0x4f414, 0x4f420, 2000 0x4f600, 0x4f618, 2001 0x4f800, 0x4f814, 2002 0x50000, 0x50084, 2003 0x50090, 0x500cc, 2004 0x50400, 0x50400, 2005 0x50800, 0x50884, 2006 0x50890, 0x508cc, 2007 0x50c00, 0x50c00, 2008 0x51000, 0x5101c, 2009 0x51300, 0x51308, 2010 }; 2011 2012 static const unsigned int t5vf_reg_ranges[] = { 2013 VF_SGE_REG(A_SGE_VF_KDOORBELL), VF_SGE_REG(A_SGE_VF_GTS), 2014 VF_MPS_REG(A_MPS_VF_CTL), 2015 VF_MPS_REG(A_MPS_VF_STAT_RX_VF_ERR_FRAMES_H), 2016 VF_PL_REG(A_PL_VF_WHOAMI), VF_PL_REG(A_PL_VF_REVISION), 2017 VF_CIM_REG(A_CIM_VF_EXT_MAILBOX_CTRL), 2018 VF_CIM_REG(A_CIM_VF_EXT_MAILBOX_STATUS), 2019 FW_T4VF_MBDATA_BASE_ADDR, 2020 FW_T4VF_MBDATA_BASE_ADDR + 2021 ((NUM_CIM_PF_MAILBOX_DATA_INSTANCES - 1) * 4), 2022 }; 2023 2024 static const unsigned int t6_reg_ranges[] = { 2025 0x1008, 0x101c, 2026 0x1024, 0x10a8, 2027 0x10b4, 0x10f8, 2028 0x1100, 0x1114, 2029 0x111c, 0x112c, 2030 0x1138, 0x113c, 2031 0x1144, 0x114c, 2032 0x1180, 0x1184, 2033 0x1190, 0x1194, 2034 0x11a0, 0x11a4, 2035 0x11b0, 0x11b4, 2036 0x11fc, 0x1274, 2037 0x1280, 0x133c, 2038 0x1800, 0x18fc, 2039 0x3000, 0x302c, 2040 0x3060, 0x30b0, 2041 0x30b8, 0x30d8, 2042 0x30e0, 0x30fc, 2043 0x3140, 0x357c, 2044 0x35a8, 0x35cc, 2045 0x35ec, 0x35ec, 2046 0x3600, 0x5624, 2047 0x56cc, 0x56ec, 2048 0x56f4, 0x5720, 2049 0x5728, 0x575c, 2050 0x580c, 0x5814, 2051 0x5890, 0x589c, 2052 0x58a4, 0x58ac, 2053 0x58b8, 0x58bc, 2054 0x5940, 0x595c, 2055 0x5980, 0x598c, 2056 0x59b0, 0x59c8, 2057 0x59d0, 0x59dc, 2058 0x59fc, 0x5a18, 2059 0x5a60, 0x5a6c, 2060 0x5a80, 0x5a8c, 2061 0x5a94, 0x5a9c, 2062 0x5b94, 0x5bfc, 2063 0x5c10, 0x5e48, 2064 0x5e50, 0x5e94, 2065 0x5ea0, 0x5eb0, 2066 0x5ec0, 0x5ec0, 2067 0x5ec8, 0x5ed0, 2068 0x5ee0, 0x5ee0, 2069 0x5ef0, 0x5ef0, 2070 0x5f00, 0x5f00, 2071 0x6000, 0x6020, 2072 0x6028, 0x6040, 2073 0x6058, 0x609c, 2074 0x60a8, 0x619c, 2075 0x7700, 0x7798, 2076 0x77c0, 0x7880, 2077 0x78cc, 0x78fc, 2078 0x7b00, 0x7b58, 2079 0x7b60, 0x7b84, 2080 0x7b8c, 0x7c54, 2081 0x7d00, 0x7d38, 2082 0x7d40, 0x7d84, 2083 0x7d8c, 0x7ddc, 2084 0x7de4, 0x7e04, 2085 0x7e10, 0x7e1c, 2086 0x7e24, 0x7e38, 2087 0x7e40, 0x7e44, 2088 0x7e4c, 0x7e78, 2089 0x7e80, 0x7edc, 2090 0x7ee8, 0x7efc, 2091 0x8dc0, 0x8de4, 2092 0x8df8, 0x8e04, 2093 0x8e10, 0x8e84, 2094 0x8ea0, 0x8f88, 2095 0x8fb8, 0x9058, 2096 0x9060, 0x9060, 2097 0x9068, 0x90f8, 2098 0x9100, 0x9124, 2099 0x9400, 0x9470, 2100 0x9600, 0x9600, 2101 0x9608, 0x9638, 2102 0x9640, 0x9704, 2103 0x9710, 0x971c, 2104 0x9800, 0x9808, 2105 0x9820, 0x983c, 2106 0x9850, 0x9864, 2107 0x9c00, 0x9c6c, 2108 0x9c80, 0x9cec, 2109 0x9d00, 0x9d6c, 2110 0x9d80, 0x9dec, 2111 0x9e00, 0x9e6c, 2112 0x9e80, 0x9eec, 2113 0x9f00, 0x9f6c, 2114 0x9f80, 0xa020, 2115 0xd004, 0xd03c, 2116 0xd100, 0xd118, 2117 0xd200, 0xd214, 2118 0xd220, 0xd234, 2119 0xd240, 0xd254, 2120 0xd260, 0xd274, 2121 0xd280, 0xd294, 2122 0xd2a0, 0xd2b4, 2123 0xd2c0, 0xd2d4, 2124 0xd2e0, 0xd2f4, 2125 0xd300, 0xd31c, 2126 0xdfc0, 0xdfe0, 2127 0xe000, 0xf008, 2128 0xf010, 0xf018, 2129 0xf020, 0xf028, 2130 0x11000, 0x11014, 2131 0x11048, 0x1106c, 2132 0x11074, 0x11088, 2133 0x11098, 0x11120, 2134 0x1112c, 0x1117c, 2135 0x11190, 0x112e0, 2136 0x11300, 0x1130c, 2137 0x12000, 0x1206c, 2138 0x19040, 0x1906c, 2139 0x19078, 0x19080, 2140 0x1908c, 0x190e8, 2141 0x190f0, 0x190f8, 2142 0x19100, 0x19110, 2143 0x19120, 0x19124, 2144 0x19150, 0x19194, 2145 0x1919c, 0x191b0, 2146 0x191d0, 0x191e8, 2147 0x19238, 0x19290, 2148 0x192a4, 0x192b0, 2149 0x192bc, 0x192bc, 2150 0x19348, 0x1934c, 2151 0x193f8, 0x19418, 2152 0x19420, 0x19428, 2153 0x19430, 0x19444, 2154 0x1944c, 0x1946c, 2155 0x19474, 0x19474, 2156 0x19490, 0x194cc, 2157 0x194f0, 0x194f8, 2158 0x19c00, 0x19c48, 2159 0x19c50, 0x19c80, 2160 0x19c94, 0x19c98, 2161 0x19ca0, 0x19cbc, 2162 0x19ce4, 0x19ce4, 2163 0x19cf0, 0x19cf8, 2164 0x19d00, 0x19d28, 2165 0x19d50, 0x19d78, 2166 0x19d94, 0x19d98, 2167 0x19da0, 0x19dc8, 2168 0x19df0, 0x19e10, 2169 0x19e50, 0x19e6c, 2170 0x19ea0, 0x19ebc, 2171 0x19ec4, 0x19ef4, 2172 0x19f04, 0x19f2c, 2173 0x19f34, 0x19f34, 2174 0x19f40, 0x19f50, 2175 0x19f90, 0x19fac, 2176 0x19fc4, 0x19fc8, 2177 0x19fd0, 0x19fe4, 2178 0x1a000, 0x1a004, 2179 0x1a010, 0x1a06c, 2180 0x1a0b0, 0x1a0e4, 2181 0x1a0ec, 0x1a0f8, 2182 0x1a100, 0x1a108, 2183 0x1a114, 0x1a120, 2184 0x1a128, 0x1a130, 2185 0x1a138, 0x1a138, 2186 0x1a190, 0x1a1c4, 2187 0x1a1fc, 0x1a1fc, 2188 0x1e008, 0x1e00c, 2189 0x1e040, 0x1e044, 2190 0x1e04c, 0x1e04c, 2191 0x1e284, 0x1e290, 2192 0x1e2c0, 0x1e2c0, 2193 0x1e2e0, 0x1e2e0, 2194 0x1e300, 0x1e384, 2195 0x1e3c0, 0x1e3c8, 2196 0x1e408, 0x1e40c, 2197 0x1e440, 0x1e444, 2198 0x1e44c, 0x1e44c, 2199 0x1e684, 0x1e690, 2200 0x1e6c0, 0x1e6c0, 2201 0x1e6e0, 0x1e6e0, 2202 0x1e700, 0x1e784, 2203 0x1e7c0, 0x1e7c8, 2204 0x1e808, 0x1e80c, 2205 0x1e840, 0x1e844, 2206 0x1e84c, 0x1e84c, 2207 0x1ea84, 0x1ea90, 2208 0x1eac0, 0x1eac0, 2209 0x1eae0, 0x1eae0, 2210 0x1eb00, 0x1eb84, 2211 0x1ebc0, 0x1ebc8, 2212 0x1ec08, 0x1ec0c, 2213 0x1ec40, 0x1ec44, 2214 0x1ec4c, 0x1ec4c, 2215 0x1ee84, 0x1ee90, 2216 0x1eec0, 0x1eec0, 2217 0x1eee0, 0x1eee0, 2218 0x1ef00, 0x1ef84, 2219 0x1efc0, 0x1efc8, 2220 0x1f008, 0x1f00c, 2221 0x1f040, 0x1f044, 2222 0x1f04c, 0x1f04c, 2223 0x1f284, 0x1f290, 2224 0x1f2c0, 0x1f2c0, 2225 0x1f2e0, 0x1f2e0, 2226 0x1f300, 0x1f384, 2227 0x1f3c0, 0x1f3c8, 2228 0x1f408, 0x1f40c, 2229 0x1f440, 0x1f444, 2230 0x1f44c, 0x1f44c, 2231 0x1f684, 0x1f690, 2232 0x1f6c0, 0x1f6c0, 2233 0x1f6e0, 0x1f6e0, 2234 0x1f700, 0x1f784, 2235 0x1f7c0, 0x1f7c8, 2236 0x1f808, 0x1f80c, 2237 0x1f840, 0x1f844, 2238 0x1f84c, 0x1f84c, 2239 0x1fa84, 0x1fa90, 2240 0x1fac0, 0x1fac0, 2241 0x1fae0, 0x1fae0, 2242 0x1fb00, 0x1fb84, 2243 0x1fbc0, 0x1fbc8, 2244 0x1fc08, 0x1fc0c, 2245 0x1fc40, 0x1fc44, 2246 0x1fc4c, 0x1fc4c, 2247 0x1fe84, 0x1fe90, 2248 0x1fec0, 0x1fec0, 2249 0x1fee0, 0x1fee0, 2250 0x1ff00, 0x1ff84, 2251 0x1ffc0, 0x1ffc8, 2252 0x30000, 0x30030, 2253 0x30100, 0x30168, 2254 0x30190, 0x301a0, 2255 0x301a8, 0x301b8, 2256 0x301c4, 0x301c8, 2257 0x301d0, 0x301d0, 2258 0x30200, 0x30320, 2259 0x30400, 0x304b4, 2260 0x304c0, 0x3052c, 2261 0x30540, 0x3061c, 2262 0x30800, 0x308a0, 2263 0x308c0, 0x30908, 2264 0x30910, 0x309b8, 2265 0x30a00, 0x30a04, 2266 0x30a0c, 0x30a14, 2267 0x30a1c, 0x30a2c, 2268 0x30a44, 0x30a50, 2269 0x30a74, 0x30a74, 2270 0x30a7c, 0x30afc, 2271 0x30b08, 0x30c24, 2272 0x30d00, 0x30d14, 2273 0x30d1c, 0x30d3c, 2274 0x30d44, 0x30d4c, 2275 0x30d54, 0x30d74, 2276 0x30d7c, 0x30d7c, 2277 0x30de0, 0x30de0, 2278 0x30e00, 0x30ed4, 2279 0x30f00, 0x30fa4, 2280 0x30fc0, 0x30fc4, 2281 0x31000, 0x31004, 2282 0x31080, 0x310fc, 2283 0x31208, 0x31220, 2284 0x3123c, 0x31254, 2285 0x31300, 0x31300, 2286 0x31308, 0x3131c, 2287 0x31338, 0x3133c, 2288 0x31380, 0x31380, 2289 0x31388, 0x313a8, 2290 0x313b4, 0x313b4, 2291 0x31400, 0x31420, 2292 0x31438, 0x3143c, 2293 0x31480, 0x31480, 2294 0x314a8, 0x314a8, 2295 0x314b0, 0x314b4, 2296 0x314c8, 0x314d4, 2297 0x31a40, 0x31a4c, 2298 0x31af0, 0x31b20, 2299 0x31b38, 0x31b3c, 2300 0x31b80, 0x31b80, 2301 0x31ba8, 0x31ba8, 2302 0x31bb0, 0x31bb4, 2303 0x31bc8, 0x31bd4, 2304 0x32140, 0x3218c, 2305 0x321f0, 0x321f4, 2306 0x32200, 0x32200, 2307 0x32218, 0x32218, 2308 0x32400, 0x32400, 2309 0x32408, 0x3241c, 2310 0x32618, 0x32620, 2311 0x32664, 0x32664, 2312 0x326a8, 0x326a8, 2313 0x326ec, 0x326ec, 2314 0x32a00, 0x32abc, 2315 0x32b00, 0x32b18, 2316 0x32b20, 0x32b38, 2317 0x32b40, 0x32b58, 2318 0x32b60, 0x32b78, 2319 0x32c00, 0x32c00, 2320 0x32c08, 0x32c3c, 2321 0x33000, 0x3302c, 2322 0x33034, 0x33050, 2323 0x33058, 0x33058, 2324 0x33060, 0x3308c, 2325 0x3309c, 0x330ac, 2326 0x330c0, 0x330c0, 2327 0x330c8, 0x330d0, 2328 0x330d8, 0x330e0, 2329 0x330ec, 0x3312c, 2330 0x33134, 0x33150, 2331 0x33158, 0x33158, 2332 0x33160, 0x3318c, 2333 0x3319c, 0x331ac, 2334 0x331c0, 0x331c0, 2335 0x331c8, 0x331d0, 2336 0x331d8, 0x331e0, 2337 0x331ec, 0x33290, 2338 0x33298, 0x332c4, 2339 0x332e4, 0x33390, 2340 0x33398, 0x333c4, 2341 0x333e4, 0x3342c, 2342 0x33434, 0x33450, 2343 0x33458, 0x33458, 2344 0x33460, 0x3348c, 2345 0x3349c, 0x334ac, 2346 0x334c0, 0x334c0, 2347 0x334c8, 0x334d0, 2348 0x334d8, 0x334e0, 2349 0x334ec, 0x3352c, 2350 0x33534, 0x33550, 2351 0x33558, 0x33558, 2352 0x33560, 0x3358c, 2353 0x3359c, 0x335ac, 2354 0x335c0, 0x335c0, 2355 0x335c8, 0x335d0, 2356 0x335d8, 0x335e0, 2357 0x335ec, 0x33690, 2358 0x33698, 0x336c4, 2359 0x336e4, 0x33790, 2360 0x33798, 0x337c4, 2361 0x337e4, 0x337fc, 2362 0x33814, 0x33814, 2363 0x33854, 0x33868, 2364 0x33880, 0x3388c, 2365 0x338c0, 0x338d0, 2366 0x338e8, 0x338ec, 2367 0x33900, 0x3392c, 2368 0x33934, 0x33950, 2369 0x33958, 0x33958, 2370 0x33960, 0x3398c, 2371 0x3399c, 0x339ac, 2372 0x339c0, 0x339c0, 2373 0x339c8, 0x339d0, 2374 0x339d8, 0x339e0, 2375 0x339ec, 0x33a90, 2376 0x33a98, 0x33ac4, 2377 0x33ae4, 0x33b10, 2378 0x33b24, 0x33b28, 2379 0x33b38, 0x33b50, 2380 0x33bf0, 0x33c10, 2381 0x33c24, 0x33c28, 2382 0x33c38, 0x33c50, 2383 0x33cf0, 0x33cfc, 2384 0x34000, 0x34030, 2385 0x34100, 0x34168, 2386 0x34190, 0x341a0, 2387 0x341a8, 0x341b8, 2388 0x341c4, 0x341c8, 2389 0x341d0, 0x341d0, 2390 0x34200, 0x34320, 2391 0x34400, 0x344b4, 2392 0x344c0, 0x3452c, 2393 0x34540, 0x3461c, 2394 0x34800, 0x348a0, 2395 0x348c0, 0x34908, 2396 0x34910, 0x349b8, 2397 0x34a00, 0x34a04, 2398 0x34a0c, 0x34a14, 2399 0x34a1c, 0x34a2c, 2400 0x34a44, 0x34a50, 2401 0x34a74, 0x34a74, 2402 0x34a7c, 0x34afc, 2403 0x34b08, 0x34c24, 2404 0x34d00, 0x34d14, 2405 0x34d1c, 0x34d3c, 2406 0x34d44, 0x34d4c, 2407 0x34d54, 0x34d74, 2408 0x34d7c, 0x34d7c, 2409 0x34de0, 0x34de0, 2410 0x34e00, 0x34ed4, 2411 0x34f00, 0x34fa4, 2412 0x34fc0, 0x34fc4, 2413 0x35000, 0x35004, 2414 0x35080, 0x350fc, 2415 0x35208, 0x35220, 2416 0x3523c, 0x35254, 2417 0x35300, 0x35300, 2418 0x35308, 0x3531c, 2419 0x35338, 0x3533c, 2420 0x35380, 0x35380, 2421 0x35388, 0x353a8, 2422 0x353b4, 0x353b4, 2423 0x35400, 0x35420, 2424 0x35438, 0x3543c, 2425 0x35480, 0x35480, 2426 0x354a8, 0x354a8, 2427 0x354b0, 0x354b4, 2428 0x354c8, 0x354d4, 2429 0x35a40, 0x35a4c, 2430 0x35af0, 0x35b20, 2431 0x35b38, 0x35b3c, 2432 0x35b80, 0x35b80, 2433 0x35ba8, 0x35ba8, 2434 0x35bb0, 0x35bb4, 2435 0x35bc8, 0x35bd4, 2436 0x36140, 0x3618c, 2437 0x361f0, 0x361f4, 2438 0x36200, 0x36200, 2439 0x36218, 0x36218, 2440 0x36400, 0x36400, 2441 0x36408, 0x3641c, 2442 0x36618, 0x36620, 2443 0x36664, 0x36664, 2444 0x366a8, 0x366a8, 2445 0x366ec, 0x366ec, 2446 0x36a00, 0x36abc, 2447 0x36b00, 0x36b18, 2448 0x36b20, 0x36b38, 2449 0x36b40, 0x36b58, 2450 0x36b60, 0x36b78, 2451 0x36c00, 0x36c00, 2452 0x36c08, 0x36c3c, 2453 0x37000, 0x3702c, 2454 0x37034, 0x37050, 2455 0x37058, 0x37058, 2456 0x37060, 0x3708c, 2457 0x3709c, 0x370ac, 2458 0x370c0, 0x370c0, 2459 0x370c8, 0x370d0, 2460 0x370d8, 0x370e0, 2461 0x370ec, 0x3712c, 2462 0x37134, 0x37150, 2463 0x37158, 0x37158, 2464 0x37160, 0x3718c, 2465 0x3719c, 0x371ac, 2466 0x371c0, 0x371c0, 2467 0x371c8, 0x371d0, 2468 0x371d8, 0x371e0, 2469 0x371ec, 0x37290, 2470 0x37298, 0x372c4, 2471 0x372e4, 0x37390, 2472 0x37398, 0x373c4, 2473 0x373e4, 0x3742c, 2474 0x37434, 0x37450, 2475 0x37458, 0x37458, 2476 0x37460, 0x3748c, 2477 0x3749c, 0x374ac, 2478 0x374c0, 0x374c0, 2479 0x374c8, 0x374d0, 2480 0x374d8, 0x374e0, 2481 0x374ec, 0x3752c, 2482 0x37534, 0x37550, 2483 0x37558, 0x37558, 2484 0x37560, 0x3758c, 2485 0x3759c, 0x375ac, 2486 0x375c0, 0x375c0, 2487 0x375c8, 0x375d0, 2488 0x375d8, 0x375e0, 2489 0x375ec, 0x37690, 2490 0x37698, 0x376c4, 2491 0x376e4, 0x37790, 2492 0x37798, 0x377c4, 2493 0x377e4, 0x377fc, 2494 0x37814, 0x37814, 2495 0x37854, 0x37868, 2496 0x37880, 0x3788c, 2497 0x378c0, 0x378d0, 2498 0x378e8, 0x378ec, 2499 0x37900, 0x3792c, 2500 0x37934, 0x37950, 2501 0x37958, 0x37958, 2502 0x37960, 0x3798c, 2503 0x3799c, 0x379ac, 2504 0x379c0, 0x379c0, 2505 0x379c8, 0x379d0, 2506 0x379d8, 0x379e0, 2507 0x379ec, 0x37a90, 2508 0x37a98, 0x37ac4, 2509 0x37ae4, 0x37b10, 2510 0x37b24, 0x37b28, 2511 0x37b38, 0x37b50, 2512 0x37bf0, 0x37c10, 2513 0x37c24, 0x37c28, 2514 0x37c38, 0x37c50, 2515 0x37cf0, 0x37cfc, 2516 0x40040, 0x40040, 2517 0x40080, 0x40084, 2518 0x40100, 0x40100, 2519 0x40140, 0x401bc, 2520 0x40200, 0x40214, 2521 0x40228, 0x40228, 2522 0x40240, 0x40258, 2523 0x40280, 0x40280, 2524 0x40304, 0x40304, 2525 0x40330, 0x4033c, 2526 0x41304, 0x413c8, 2527 0x413d0, 0x413dc, 2528 0x413f0, 0x413f0, 2529 0x41400, 0x4140c, 2530 0x41414, 0x4141c, 2531 0x41480, 0x414d0, 2532 0x44000, 0x4407c, 2533 0x440c0, 0x441ac, 2534 0x441b4, 0x4427c, 2535 0x442c0, 0x443ac, 2536 0x443b4, 0x4447c, 2537 0x444c0, 0x445ac, 2538 0x445b4, 0x4467c, 2539 0x446c0, 0x447ac, 2540 0x447b4, 0x4487c, 2541 0x448c0, 0x449ac, 2542 0x449b4, 0x44a7c, 2543 0x44ac0, 0x44bac, 2544 0x44bb4, 0x44c7c, 2545 0x44cc0, 0x44dac, 2546 0x44db4, 0x44e7c, 2547 0x44ec0, 0x44fac, 2548 0x44fb4, 0x4507c, 2549 0x450c0, 0x451ac, 2550 0x451b4, 0x451fc, 2551 0x45800, 0x45804, 2552 0x45810, 0x45830, 2553 0x45840, 0x45860, 2554 0x45868, 0x45868, 2555 0x45880, 0x45884, 2556 0x458a0, 0x458b0, 2557 0x45a00, 0x45a04, 2558 0x45a10, 0x45a30, 2559 0x45a40, 0x45a60, 2560 0x45a68, 0x45a68, 2561 0x45a80, 0x45a84, 2562 0x45aa0, 0x45ab0, 2563 0x460c0, 0x460e4, 2564 0x47000, 0x4703c, 2565 0x47044, 0x4708c, 2566 0x47200, 0x47250, 2567 0x47400, 0x47408, 2568 0x47414, 0x47420, 2569 0x47600, 0x47618, 2570 0x47800, 0x47814, 2571 0x47820, 0x4782c, 2572 0x50000, 0x50084, 2573 0x50090, 0x500cc, 2574 0x50300, 0x50384, 2575 0x50400, 0x50400, 2576 0x50800, 0x50884, 2577 0x50890, 0x508cc, 2578 0x50b00, 0x50b84, 2579 0x50c00, 0x50c00, 2580 0x51000, 0x51020, 2581 0x51028, 0x510b0, 2582 0x51300, 0x51324, 2583 }; 2584 2585 static const unsigned int t6vf_reg_ranges[] = { 2586 VF_SGE_REG(A_SGE_VF_KDOORBELL), VF_SGE_REG(A_SGE_VF_GTS), 2587 VF_MPS_REG(A_MPS_VF_CTL), 2588 VF_MPS_REG(A_MPS_VF_STAT_RX_VF_ERR_FRAMES_H), 2589 VF_PL_REG(A_PL_VF_WHOAMI), VF_PL_REG(A_PL_VF_REVISION), 2590 VF_CIM_REG(A_CIM_VF_EXT_MAILBOX_CTRL), 2591 VF_CIM_REG(A_CIM_VF_EXT_MAILBOX_STATUS), 2592 FW_T6VF_MBDATA_BASE_ADDR, 2593 FW_T6VF_MBDATA_BASE_ADDR + 2594 ((NUM_CIM_PF_MAILBOX_DATA_INSTANCES - 1) * 4), 2595 }; 2596 2597 u32 *buf_end = (u32 *)(buf + buf_size); 2598 const unsigned int *reg_ranges; 2599 int reg_ranges_size, range; 2600 unsigned int chip_version = chip_id(adap); 2601 2602 /* 2603 * Select the right set of register ranges to dump depending on the 2604 * adapter chip type. 2605 */ 2606 switch (chip_version) { 2607 case CHELSIO_T4: 2608 if (adap->flags & IS_VF) { 2609 reg_ranges = t4vf_reg_ranges; 2610 reg_ranges_size = ARRAY_SIZE(t4vf_reg_ranges); 2611 } else { 2612 reg_ranges = t4_reg_ranges; 2613 reg_ranges_size = ARRAY_SIZE(t4_reg_ranges); 2614 } 2615 break; 2616 2617 case CHELSIO_T5: 2618 if (adap->flags & IS_VF) { 2619 reg_ranges = t5vf_reg_ranges; 2620 reg_ranges_size = ARRAY_SIZE(t5vf_reg_ranges); 2621 } else { 2622 reg_ranges = t5_reg_ranges; 2623 reg_ranges_size = ARRAY_SIZE(t5_reg_ranges); 2624 } 2625 break; 2626 2627 case CHELSIO_T6: 2628 if (adap->flags & IS_VF) { 2629 reg_ranges = t6vf_reg_ranges; 2630 reg_ranges_size = ARRAY_SIZE(t6vf_reg_ranges); 2631 } else { 2632 reg_ranges = t6_reg_ranges; 2633 reg_ranges_size = ARRAY_SIZE(t6_reg_ranges); 2634 } 2635 break; 2636 2637 default: 2638 CH_ERR(adap, 2639 "Unsupported chip version %d\n", chip_version); 2640 return; 2641 } 2642 2643 /* 2644 * Clear the register buffer and insert the appropriate register 2645 * values selected by the above register ranges. 2646 */ 2647 memset(buf, 0, buf_size); 2648 for (range = 0; range < reg_ranges_size; range += 2) { 2649 unsigned int reg = reg_ranges[range]; 2650 unsigned int last_reg = reg_ranges[range + 1]; 2651 u32 *bufp = (u32 *)(buf + reg); 2652 2653 /* 2654 * Iterate across the register range filling in the register 2655 * buffer but don't write past the end of the register buffer. 2656 */ 2657 while (reg <= last_reg && bufp < buf_end) { 2658 *bufp++ = t4_read_reg(adap, reg); 2659 reg += sizeof(u32); 2660 } 2661 } 2662 } 2663 2664 /* 2665 * Partial EEPROM Vital Product Data structure. Includes only the ID and 2666 * VPD-R sections. 2667 */ 2668 struct t4_vpd_hdr { 2669 u8 id_tag; 2670 u8 id_len[2]; 2671 u8 id_data[ID_LEN]; 2672 u8 vpdr_tag; 2673 u8 vpdr_len[2]; 2674 }; 2675 2676 /* 2677 * EEPROM reads take a few tens of us while writes can take a bit over 5 ms. 2678 */ 2679 #define EEPROM_DELAY 10 /* 10us per poll spin */ 2680 #define EEPROM_MAX_POLL 5000 /* x 5000 == 50ms */ 2681 2682 #define EEPROM_STAT_ADDR 0x7bfc 2683 #define VPD_SIZE 0x800 2684 #define VPD_BASE 0x400 2685 #define VPD_BASE_OLD 0 2686 #define VPD_LEN 1024 2687 #define VPD_INFO_FLD_HDR_SIZE 3 2688 #define CHELSIO_VPD_UNIQUE_ID 0x82 2689 2690 /* 2691 * Small utility function to wait till any outstanding VPD Access is complete. 2692 * We have a per-adapter state variable "VPD Busy" to indicate when we have a 2693 * VPD Access in flight. This allows us to handle the problem of having a 2694 * previous VPD Access time out and prevent an attempt to inject a new VPD 2695 * Request before any in-flight VPD reguest has completed. 2696 */ 2697 static int t4_seeprom_wait(struct adapter *adapter) 2698 { 2699 unsigned int base = adapter->params.pci.vpd_cap_addr; 2700 int max_poll; 2701 2702 /* 2703 * If no VPD Access is in flight, we can just return success right 2704 * away. 2705 */ 2706 if (!adapter->vpd_busy) 2707 return 0; 2708 2709 /* 2710 * Poll the VPD Capability Address/Flag register waiting for it 2711 * to indicate that the operation is complete. 2712 */ 2713 max_poll = EEPROM_MAX_POLL; 2714 do { 2715 u16 val; 2716 2717 udelay(EEPROM_DELAY); 2718 t4_os_pci_read_cfg2(adapter, base + PCI_VPD_ADDR, &val); 2719 2720 /* 2721 * If the operation is complete, mark the VPD as no longer 2722 * busy and return success. 2723 */ 2724 if ((val & PCI_VPD_ADDR_F) == adapter->vpd_flag) { 2725 adapter->vpd_busy = 0; 2726 return 0; 2727 } 2728 } while (--max_poll); 2729 2730 /* 2731 * Failure! Note that we leave the VPD Busy status set in order to 2732 * avoid pushing a new VPD Access request into the VPD Capability till 2733 * the current operation eventually succeeds. It's a bug to issue a 2734 * new request when an existing request is in flight and will result 2735 * in corrupt hardware state. 2736 */ 2737 return -ETIMEDOUT; 2738 } 2739 2740 /** 2741 * t4_seeprom_read - read a serial EEPROM location 2742 * @adapter: adapter to read 2743 * @addr: EEPROM virtual address 2744 * @data: where to store the read data 2745 * 2746 * Read a 32-bit word from a location in serial EEPROM using the card's PCI 2747 * VPD capability. Note that this function must be called with a virtual 2748 * address. 2749 */ 2750 int t4_seeprom_read(struct adapter *adapter, u32 addr, u32 *data) 2751 { 2752 unsigned int base = adapter->params.pci.vpd_cap_addr; 2753 int ret; 2754 2755 /* 2756 * VPD Accesses must alway be 4-byte aligned! 2757 */ 2758 if (addr >= EEPROMVSIZE || (addr & 3)) 2759 return -EINVAL; 2760 2761 /* 2762 * Wait for any previous operation which may still be in flight to 2763 * complete. 2764 */ 2765 ret = t4_seeprom_wait(adapter); 2766 if (ret) { 2767 CH_ERR(adapter, "VPD still busy from previous operation\n"); 2768 return ret; 2769 } 2770 2771 /* 2772 * Issue our new VPD Read request, mark the VPD as being busy and wait 2773 * for our request to complete. If it doesn't complete, note the 2774 * error and return it to our caller. Note that we do not reset the 2775 * VPD Busy status! 2776 */ 2777 t4_os_pci_write_cfg2(adapter, base + PCI_VPD_ADDR, (u16)addr); 2778 adapter->vpd_busy = 1; 2779 adapter->vpd_flag = PCI_VPD_ADDR_F; 2780 ret = t4_seeprom_wait(adapter); 2781 if (ret) { 2782 CH_ERR(adapter, "VPD read of address %#x failed\n", addr); 2783 return ret; 2784 } 2785 2786 /* 2787 * Grab the returned data, swizzle it into our endianness and 2788 * return success. 2789 */ 2790 t4_os_pci_read_cfg4(adapter, base + PCI_VPD_DATA, data); 2791 *data = le32_to_cpu(*data); 2792 return 0; 2793 } 2794 2795 /** 2796 * t4_seeprom_write - write a serial EEPROM location 2797 * @adapter: adapter to write 2798 * @addr: virtual EEPROM address 2799 * @data: value to write 2800 * 2801 * Write a 32-bit word to a location in serial EEPROM using the card's PCI 2802 * VPD capability. Note that this function must be called with a virtual 2803 * address. 2804 */ 2805 int t4_seeprom_write(struct adapter *adapter, u32 addr, u32 data) 2806 { 2807 unsigned int base = adapter->params.pci.vpd_cap_addr; 2808 int ret; 2809 u32 stats_reg; 2810 int max_poll; 2811 2812 /* 2813 * VPD Accesses must alway be 4-byte aligned! 2814 */ 2815 if (addr >= EEPROMVSIZE || (addr & 3)) 2816 return -EINVAL; 2817 2818 /* 2819 * Wait for any previous operation which may still be in flight to 2820 * complete. 2821 */ 2822 ret = t4_seeprom_wait(adapter); 2823 if (ret) { 2824 CH_ERR(adapter, "VPD still busy from previous operation\n"); 2825 return ret; 2826 } 2827 2828 /* 2829 * Issue our new VPD Read request, mark the VPD as being busy and wait 2830 * for our request to complete. If it doesn't complete, note the 2831 * error and return it to our caller. Note that we do not reset the 2832 * VPD Busy status! 2833 */ 2834 t4_os_pci_write_cfg4(adapter, base + PCI_VPD_DATA, 2835 cpu_to_le32(data)); 2836 t4_os_pci_write_cfg2(adapter, base + PCI_VPD_ADDR, 2837 (u16)addr | PCI_VPD_ADDR_F); 2838 adapter->vpd_busy = 1; 2839 adapter->vpd_flag = 0; 2840 ret = t4_seeprom_wait(adapter); 2841 if (ret) { 2842 CH_ERR(adapter, "VPD write of address %#x failed\n", addr); 2843 return ret; 2844 } 2845 2846 /* 2847 * Reset PCI_VPD_DATA register after a transaction and wait for our 2848 * request to complete. If it doesn't complete, return error. 2849 */ 2850 t4_os_pci_write_cfg4(adapter, base + PCI_VPD_DATA, 0); 2851 max_poll = EEPROM_MAX_POLL; 2852 do { 2853 udelay(EEPROM_DELAY); 2854 t4_seeprom_read(adapter, EEPROM_STAT_ADDR, &stats_reg); 2855 } while ((stats_reg & 0x1) && --max_poll); 2856 if (!max_poll) 2857 return -ETIMEDOUT; 2858 2859 /* Return success! */ 2860 return 0; 2861 } 2862 2863 /** 2864 * t4_eeprom_ptov - translate a physical EEPROM address to virtual 2865 * @phys_addr: the physical EEPROM address 2866 * @fn: the PCI function number 2867 * @sz: size of function-specific area 2868 * 2869 * Translate a physical EEPROM address to virtual. The first 1K is 2870 * accessed through virtual addresses starting at 31K, the rest is 2871 * accessed through virtual addresses starting at 0. 2872 * 2873 * The mapping is as follows: 2874 * [0..1K) -> [31K..32K) 2875 * [1K..1K+A) -> [ES-A..ES) 2876 * [1K+A..ES) -> [0..ES-A-1K) 2877 * 2878 * where A = @fn * @sz, and ES = EEPROM size. 2879 */ 2880 int t4_eeprom_ptov(unsigned int phys_addr, unsigned int fn, unsigned int sz) 2881 { 2882 fn *= sz; 2883 if (phys_addr < 1024) 2884 return phys_addr + (31 << 10); 2885 if (phys_addr < 1024 + fn) 2886 return EEPROMSIZE - fn + phys_addr - 1024; 2887 if (phys_addr < EEPROMSIZE) 2888 return phys_addr - 1024 - fn; 2889 return -EINVAL; 2890 } 2891 2892 /** 2893 * t4_seeprom_wp - enable/disable EEPROM write protection 2894 * @adapter: the adapter 2895 * @enable: whether to enable or disable write protection 2896 * 2897 * Enables or disables write protection on the serial EEPROM. 2898 */ 2899 int t4_seeprom_wp(struct adapter *adapter, int enable) 2900 { 2901 return t4_seeprom_write(adapter, EEPROM_STAT_ADDR, enable ? 0xc : 0); 2902 } 2903 2904 /** 2905 * get_vpd_keyword_val - Locates an information field keyword in the VPD 2906 * @v: Pointer to buffered vpd data structure 2907 * @kw: The keyword to search for 2908 * 2909 * Returns the value of the information field keyword or 2910 * -ENOENT otherwise. 2911 */ 2912 static int get_vpd_keyword_val(const struct t4_vpd_hdr *v, const char *kw) 2913 { 2914 int i; 2915 unsigned int offset , len; 2916 const u8 *buf = (const u8 *)v; 2917 const u8 *vpdr_len = &v->vpdr_len[0]; 2918 offset = sizeof(struct t4_vpd_hdr); 2919 len = (u16)vpdr_len[0] + ((u16)vpdr_len[1] << 8); 2920 2921 if (len + sizeof(struct t4_vpd_hdr) > VPD_LEN) { 2922 return -ENOENT; 2923 } 2924 2925 for (i = offset; i + VPD_INFO_FLD_HDR_SIZE <= offset + len;) { 2926 if(memcmp(buf + i , kw , 2) == 0){ 2927 i += VPD_INFO_FLD_HDR_SIZE; 2928 return i; 2929 } 2930 2931 i += VPD_INFO_FLD_HDR_SIZE + buf[i+2]; 2932 } 2933 2934 return -ENOENT; 2935 } 2936 2937 2938 /** 2939 * get_vpd_params - read VPD parameters from VPD EEPROM 2940 * @adapter: adapter to read 2941 * @p: where to store the parameters 2942 * @vpd: caller provided temporary space to read the VPD into 2943 * 2944 * Reads card parameters stored in VPD EEPROM. 2945 */ 2946 static int get_vpd_params(struct adapter *adapter, struct vpd_params *p, 2947 u8 *vpd) 2948 { 2949 int i, ret, addr; 2950 int ec, sn, pn, na; 2951 u8 csum; 2952 const struct t4_vpd_hdr *v; 2953 2954 /* 2955 * Card information normally starts at VPD_BASE but early cards had 2956 * it at 0. 2957 */ 2958 ret = t4_seeprom_read(adapter, VPD_BASE, (u32 *)(vpd)); 2959 if (ret) 2960 return (ret); 2961 2962 /* 2963 * The VPD shall have a unique identifier specified by the PCI SIG. 2964 * For chelsio adapters, the identifier is 0x82. The first byte of a VPD 2965 * shall be CHELSIO_VPD_UNIQUE_ID (0x82). The VPD programming software 2966 * is expected to automatically put this entry at the 2967 * beginning of the VPD. 2968 */ 2969 addr = *vpd == CHELSIO_VPD_UNIQUE_ID ? VPD_BASE : VPD_BASE_OLD; 2970 2971 for (i = 0; i < VPD_LEN; i += 4) { 2972 ret = t4_seeprom_read(adapter, addr + i, (u32 *)(vpd + i)); 2973 if (ret) 2974 return ret; 2975 } 2976 v = (const struct t4_vpd_hdr *)vpd; 2977 2978 #define FIND_VPD_KW(var,name) do { \ 2979 var = get_vpd_keyword_val(v , name); \ 2980 if (var < 0) { \ 2981 CH_ERR(adapter, "missing VPD keyword " name "\n"); \ 2982 return -EINVAL; \ 2983 } \ 2984 } while (0) 2985 2986 FIND_VPD_KW(i, "RV"); 2987 for (csum = 0; i >= 0; i--) 2988 csum += vpd[i]; 2989 2990 if (csum) { 2991 CH_ERR(adapter, 2992 "corrupted VPD EEPROM, actual csum %u\n", csum); 2993 return -EINVAL; 2994 } 2995 2996 FIND_VPD_KW(ec, "EC"); 2997 FIND_VPD_KW(sn, "SN"); 2998 FIND_VPD_KW(pn, "PN"); 2999 FIND_VPD_KW(na, "NA"); 3000 #undef FIND_VPD_KW 3001 3002 memcpy(p->id, v->id_data, ID_LEN); 3003 strstrip(p->id); 3004 memcpy(p->ec, vpd + ec, EC_LEN); 3005 strstrip(p->ec); 3006 i = vpd[sn - VPD_INFO_FLD_HDR_SIZE + 2]; 3007 memcpy(p->sn, vpd + sn, min(i, SERNUM_LEN)); 3008 strstrip(p->sn); 3009 i = vpd[pn - VPD_INFO_FLD_HDR_SIZE + 2]; 3010 memcpy(p->pn, vpd + pn, min(i, PN_LEN)); 3011 strstrip((char *)p->pn); 3012 i = vpd[na - VPD_INFO_FLD_HDR_SIZE + 2]; 3013 memcpy(p->na, vpd + na, min(i, MACADDR_LEN)); 3014 strstrip((char *)p->na); 3015 3016 return 0; 3017 } 3018 3019 /* serial flash and firmware constants and flash config file constants */ 3020 enum { 3021 SF_ATTEMPTS = 10, /* max retries for SF operations */ 3022 3023 /* flash command opcodes */ 3024 SF_PROG_PAGE = 2, /* program 256B page */ 3025 SF_WR_DISABLE = 4, /* disable writes */ 3026 SF_RD_STATUS = 5, /* read status register */ 3027 SF_WR_ENABLE = 6, /* enable writes */ 3028 SF_RD_DATA_FAST = 0xb, /* read flash */ 3029 SF_RD_ID = 0x9f, /* read ID */ 3030 SF_ERASE_SECTOR = 0xd8, /* erase 64KB sector */ 3031 }; 3032 3033 /** 3034 * sf1_read - read data from the serial flash 3035 * @adapter: the adapter 3036 * @byte_cnt: number of bytes to read 3037 * @cont: whether another operation will be chained 3038 * @lock: whether to lock SF for PL access only 3039 * @valp: where to store the read data 3040 * 3041 * Reads up to 4 bytes of data from the serial flash. The location of 3042 * the read needs to be specified prior to calling this by issuing the 3043 * appropriate commands to the serial flash. 3044 */ 3045 static int sf1_read(struct adapter *adapter, unsigned int byte_cnt, int cont, 3046 int lock, u32 *valp) 3047 { 3048 int ret; 3049 3050 if (!byte_cnt || byte_cnt > 4) 3051 return -EINVAL; 3052 if (t4_read_reg(adapter, A_SF_OP) & F_BUSY) 3053 return -EBUSY; 3054 t4_write_reg(adapter, A_SF_OP, 3055 V_SF_LOCK(lock) | V_CONT(cont) | V_BYTECNT(byte_cnt - 1)); 3056 ret = t4_wait_op_done(adapter, A_SF_OP, F_BUSY, 0, SF_ATTEMPTS, 5); 3057 if (!ret) 3058 *valp = t4_read_reg(adapter, A_SF_DATA); 3059 return ret; 3060 } 3061 3062 /** 3063 * sf1_write - write data to the serial flash 3064 * @adapter: the adapter 3065 * @byte_cnt: number of bytes to write 3066 * @cont: whether another operation will be chained 3067 * @lock: whether to lock SF for PL access only 3068 * @val: value to write 3069 * 3070 * Writes up to 4 bytes of data to the serial flash. The location of 3071 * the write needs to be specified prior to calling this by issuing the 3072 * appropriate commands to the serial flash. 3073 */ 3074 static int sf1_write(struct adapter *adapter, unsigned int byte_cnt, int cont, 3075 int lock, u32 val) 3076 { 3077 if (!byte_cnt || byte_cnt > 4) 3078 return -EINVAL; 3079 if (t4_read_reg(adapter, A_SF_OP) & F_BUSY) 3080 return -EBUSY; 3081 t4_write_reg(adapter, A_SF_DATA, val); 3082 t4_write_reg(adapter, A_SF_OP, V_SF_LOCK(lock) | 3083 V_CONT(cont) | V_BYTECNT(byte_cnt - 1) | V_OP(1)); 3084 return t4_wait_op_done(adapter, A_SF_OP, F_BUSY, 0, SF_ATTEMPTS, 5); 3085 } 3086 3087 /** 3088 * flash_wait_op - wait for a flash operation to complete 3089 * @adapter: the adapter 3090 * @attempts: max number of polls of the status register 3091 * @delay: delay between polls in ms 3092 * 3093 * Wait for a flash operation to complete by polling the status register. 3094 */ 3095 static int flash_wait_op(struct adapter *adapter, int attempts, int delay) 3096 { 3097 int ret; 3098 u32 status; 3099 3100 while (1) { 3101 if ((ret = sf1_write(adapter, 1, 1, 1, SF_RD_STATUS)) != 0 || 3102 (ret = sf1_read(adapter, 1, 0, 1, &status)) != 0) 3103 return ret; 3104 if (!(status & 1)) 3105 return 0; 3106 if (--attempts == 0) 3107 return -EAGAIN; 3108 if (delay) 3109 msleep(delay); 3110 } 3111 } 3112 3113 /** 3114 * t4_read_flash - read words from serial flash 3115 * @adapter: the adapter 3116 * @addr: the start address for the read 3117 * @nwords: how many 32-bit words to read 3118 * @data: where to store the read data 3119 * @byte_oriented: whether to store data as bytes or as words 3120 * 3121 * Read the specified number of 32-bit words from the serial flash. 3122 * If @byte_oriented is set the read data is stored as a byte array 3123 * (i.e., big-endian), otherwise as 32-bit words in the platform's 3124 * natural endianness. 3125 */ 3126 int t4_read_flash(struct adapter *adapter, unsigned int addr, 3127 unsigned int nwords, u32 *data, int byte_oriented) 3128 { 3129 int ret; 3130 3131 if (addr + nwords * sizeof(u32) > adapter->params.sf_size || (addr & 3)) 3132 return -EINVAL; 3133 3134 addr = swab32(addr) | SF_RD_DATA_FAST; 3135 3136 if ((ret = sf1_write(adapter, 4, 1, 0, addr)) != 0 || 3137 (ret = sf1_read(adapter, 1, 1, 0, data)) != 0) 3138 return ret; 3139 3140 for ( ; nwords; nwords--, data++) { 3141 ret = sf1_read(adapter, 4, nwords > 1, nwords == 1, data); 3142 if (nwords == 1) 3143 t4_write_reg(adapter, A_SF_OP, 0); /* unlock SF */ 3144 if (ret) 3145 return ret; 3146 if (byte_oriented) 3147 *data = (__force __u32)(cpu_to_be32(*data)); 3148 } 3149 return 0; 3150 } 3151 3152 /** 3153 * t4_write_flash - write up to a page of data to the serial flash 3154 * @adapter: the adapter 3155 * @addr: the start address to write 3156 * @n: length of data to write in bytes 3157 * @data: the data to write 3158 * @byte_oriented: whether to store data as bytes or as words 3159 * 3160 * Writes up to a page of data (256 bytes) to the serial flash starting 3161 * at the given address. All the data must be written to the same page. 3162 * If @byte_oriented is set the write data is stored as byte stream 3163 * (i.e. matches what on disk), otherwise in big-endian. 3164 */ 3165 int t4_write_flash(struct adapter *adapter, unsigned int addr, 3166 unsigned int n, const u8 *data, int byte_oriented) 3167 { 3168 int ret; 3169 u32 buf[SF_PAGE_SIZE / 4]; 3170 unsigned int i, c, left, val, offset = addr & 0xff; 3171 3172 if (addr >= adapter->params.sf_size || offset + n > SF_PAGE_SIZE) 3173 return -EINVAL; 3174 3175 val = swab32(addr) | SF_PROG_PAGE; 3176 3177 if ((ret = sf1_write(adapter, 1, 0, 1, SF_WR_ENABLE)) != 0 || 3178 (ret = sf1_write(adapter, 4, 1, 1, val)) != 0) 3179 goto unlock; 3180 3181 for (left = n; left; left -= c) { 3182 c = min(left, 4U); 3183 for (val = 0, i = 0; i < c; ++i) 3184 val = (val << 8) + *data++; 3185 3186 if (!byte_oriented) 3187 val = cpu_to_be32(val); 3188 3189 ret = sf1_write(adapter, c, c != left, 1, val); 3190 if (ret) 3191 goto unlock; 3192 } 3193 ret = flash_wait_op(adapter, 8, 1); 3194 if (ret) 3195 goto unlock; 3196 3197 t4_write_reg(adapter, A_SF_OP, 0); /* unlock SF */ 3198 3199 /* Read the page to verify the write succeeded */ 3200 ret = t4_read_flash(adapter, addr & ~0xff, ARRAY_SIZE(buf), buf, 3201 byte_oriented); 3202 if (ret) 3203 return ret; 3204 3205 if (memcmp(data - n, (u8 *)buf + offset, n)) { 3206 CH_ERR(adapter, 3207 "failed to correctly write the flash page at %#x\n", 3208 addr); 3209 return -EIO; 3210 } 3211 return 0; 3212 3213 unlock: 3214 t4_write_reg(adapter, A_SF_OP, 0); /* unlock SF */ 3215 return ret; 3216 } 3217 3218 /** 3219 * t4_get_fw_version - read the firmware version 3220 * @adapter: the adapter 3221 * @vers: where to place the version 3222 * 3223 * Reads the FW version from flash. 3224 */ 3225 int t4_get_fw_version(struct adapter *adapter, u32 *vers) 3226 { 3227 return t4_read_flash(adapter, FLASH_FW_START + 3228 offsetof(struct fw_hdr, fw_ver), 1, 3229 vers, 0); 3230 } 3231 3232 /** 3233 * t4_get_bs_version - read the firmware bootstrap version 3234 * @adapter: the adapter 3235 * @vers: where to place the version 3236 * 3237 * Reads the FW Bootstrap version from flash. 3238 */ 3239 int t4_get_bs_version(struct adapter *adapter, u32 *vers) 3240 { 3241 return t4_read_flash(adapter, FLASH_FWBOOTSTRAP_START + 3242 offsetof(struct fw_hdr, fw_ver), 1, 3243 vers, 0); 3244 } 3245 3246 /** 3247 * t4_get_tp_version - read the TP microcode version 3248 * @adapter: the adapter 3249 * @vers: where to place the version 3250 * 3251 * Reads the TP microcode version from flash. 3252 */ 3253 int t4_get_tp_version(struct adapter *adapter, u32 *vers) 3254 { 3255 return t4_read_flash(adapter, FLASH_FW_START + 3256 offsetof(struct fw_hdr, tp_microcode_ver), 3257 1, vers, 0); 3258 } 3259 3260 /** 3261 * t4_get_exprom_version - return the Expansion ROM version (if any) 3262 * @adapter: the adapter 3263 * @vers: where to place the version 3264 * 3265 * Reads the Expansion ROM header from FLASH and returns the version 3266 * number (if present) through the @vers return value pointer. We return 3267 * this in the Firmware Version Format since it's convenient. Return 3268 * 0 on success, -ENOENT if no Expansion ROM is present. 3269 */ 3270 int t4_get_exprom_version(struct adapter *adap, u32 *vers) 3271 { 3272 struct exprom_header { 3273 unsigned char hdr_arr[16]; /* must start with 0x55aa */ 3274 unsigned char hdr_ver[4]; /* Expansion ROM version */ 3275 } *hdr; 3276 u32 exprom_header_buf[DIV_ROUND_UP(sizeof(struct exprom_header), 3277 sizeof(u32))]; 3278 int ret; 3279 3280 ret = t4_read_flash(adap, FLASH_EXP_ROM_START, 3281 ARRAY_SIZE(exprom_header_buf), exprom_header_buf, 3282 0); 3283 if (ret) 3284 return ret; 3285 3286 hdr = (struct exprom_header *)exprom_header_buf; 3287 if (hdr->hdr_arr[0] != 0x55 || hdr->hdr_arr[1] != 0xaa) 3288 return -ENOENT; 3289 3290 *vers = (V_FW_HDR_FW_VER_MAJOR(hdr->hdr_ver[0]) | 3291 V_FW_HDR_FW_VER_MINOR(hdr->hdr_ver[1]) | 3292 V_FW_HDR_FW_VER_MICRO(hdr->hdr_ver[2]) | 3293 V_FW_HDR_FW_VER_BUILD(hdr->hdr_ver[3])); 3294 return 0; 3295 } 3296 3297 /** 3298 * t4_get_scfg_version - return the Serial Configuration version 3299 * @adapter: the adapter 3300 * @vers: where to place the version 3301 * 3302 * Reads the Serial Configuration Version via the Firmware interface 3303 * (thus this can only be called once we're ready to issue Firmware 3304 * commands). The format of the Serial Configuration version is 3305 * adapter specific. Returns 0 on success, an error on failure. 3306 * 3307 * Note that early versions of the Firmware didn't include the ability 3308 * to retrieve the Serial Configuration version, so we zero-out the 3309 * return-value parameter in that case to avoid leaving it with 3310 * garbage in it. 3311 * 3312 * Also note that the Firmware will return its cached copy of the Serial 3313 * Initialization Revision ID, not the actual Revision ID as written in 3314 * the Serial EEPROM. This is only an issue if a new VPD has been written 3315 * and the Firmware/Chip haven't yet gone through a RESET sequence. So 3316 * it's best to defer calling this routine till after a FW_RESET_CMD has 3317 * been issued if the Host Driver will be performing a full adapter 3318 * initialization. 3319 */ 3320 int t4_get_scfg_version(struct adapter *adapter, u32 *vers) 3321 { 3322 u32 scfgrev_param; 3323 int ret; 3324 3325 scfgrev_param = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | 3326 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_SCFGREV)); 3327 ret = t4_query_params(adapter, adapter->mbox, adapter->pf, 0, 3328 1, &scfgrev_param, vers); 3329 if (ret) 3330 *vers = 0; 3331 return ret; 3332 } 3333 3334 /** 3335 * t4_get_vpd_version - return the VPD version 3336 * @adapter: the adapter 3337 * @vers: where to place the version 3338 * 3339 * Reads the VPD via the Firmware interface (thus this can only be called 3340 * once we're ready to issue Firmware commands). The format of the 3341 * VPD version is adapter specific. Returns 0 on success, an error on 3342 * failure. 3343 * 3344 * Note that early versions of the Firmware didn't include the ability 3345 * to retrieve the VPD version, so we zero-out the return-value parameter 3346 * in that case to avoid leaving it with garbage in it. 3347 * 3348 * Also note that the Firmware will return its cached copy of the VPD 3349 * Revision ID, not the actual Revision ID as written in the Serial 3350 * EEPROM. This is only an issue if a new VPD has been written and the 3351 * Firmware/Chip haven't yet gone through a RESET sequence. So it's best 3352 * to defer calling this routine till after a FW_RESET_CMD has been issued 3353 * if the Host Driver will be performing a full adapter initialization. 3354 */ 3355 int t4_get_vpd_version(struct adapter *adapter, u32 *vers) 3356 { 3357 u32 vpdrev_param; 3358 int ret; 3359 3360 vpdrev_param = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | 3361 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_VPDREV)); 3362 ret = t4_query_params(adapter, adapter->mbox, adapter->pf, 0, 3363 1, &vpdrev_param, vers); 3364 if (ret) 3365 *vers = 0; 3366 return ret; 3367 } 3368 3369 /** 3370 * t4_get_version_info - extract various chip/firmware version information 3371 * @adapter: the adapter 3372 * 3373 * Reads various chip/firmware version numbers and stores them into the 3374 * adapter Adapter Parameters structure. If any of the efforts fails 3375 * the first failure will be returned, but all of the version numbers 3376 * will be read. 3377 */ 3378 int t4_get_version_info(struct adapter *adapter) 3379 { 3380 int ret = 0; 3381 3382 #define FIRST_RET(__getvinfo) \ 3383 do { \ 3384 int __ret = __getvinfo; \ 3385 if (__ret && !ret) \ 3386 ret = __ret; \ 3387 } while (0) 3388 3389 FIRST_RET(t4_get_fw_version(adapter, &adapter->params.fw_vers)); 3390 FIRST_RET(t4_get_bs_version(adapter, &adapter->params.bs_vers)); 3391 FIRST_RET(t4_get_tp_version(adapter, &adapter->params.tp_vers)); 3392 FIRST_RET(t4_get_exprom_version(adapter, &adapter->params.er_vers)); 3393 FIRST_RET(t4_get_scfg_version(adapter, &adapter->params.scfg_vers)); 3394 FIRST_RET(t4_get_vpd_version(adapter, &adapter->params.vpd_vers)); 3395 3396 #undef FIRST_RET 3397 3398 return ret; 3399 } 3400 3401 /** 3402 * t4_flash_erase_sectors - erase a range of flash sectors 3403 * @adapter: the adapter 3404 * @start: the first sector to erase 3405 * @end: the last sector to erase 3406 * 3407 * Erases the sectors in the given inclusive range. 3408 */ 3409 int t4_flash_erase_sectors(struct adapter *adapter, int start, int end) 3410 { 3411 int ret = 0; 3412 3413 if (end >= adapter->params.sf_nsec) 3414 return -EINVAL; 3415 3416 while (start <= end) { 3417 if ((ret = sf1_write(adapter, 1, 0, 1, SF_WR_ENABLE)) != 0 || 3418 (ret = sf1_write(adapter, 4, 0, 1, 3419 SF_ERASE_SECTOR | (start << 8))) != 0 || 3420 (ret = flash_wait_op(adapter, 14, 500)) != 0) { 3421 CH_ERR(adapter, 3422 "erase of flash sector %d failed, error %d\n", 3423 start, ret); 3424 break; 3425 } 3426 start++; 3427 } 3428 t4_write_reg(adapter, A_SF_OP, 0); /* unlock SF */ 3429 return ret; 3430 } 3431 3432 /** 3433 * t4_flash_cfg_addr - return the address of the flash configuration file 3434 * @adapter: the adapter 3435 * 3436 * Return the address within the flash where the Firmware Configuration 3437 * File is stored, or an error if the device FLASH is too small to contain 3438 * a Firmware Configuration File. 3439 */ 3440 int t4_flash_cfg_addr(struct adapter *adapter) 3441 { 3442 /* 3443 * If the device FLASH isn't large enough to hold a Firmware 3444 * Configuration File, return an error. 3445 */ 3446 if (adapter->params.sf_size < FLASH_CFG_START + FLASH_CFG_MAX_SIZE) 3447 return -ENOSPC; 3448 3449 return FLASH_CFG_START; 3450 } 3451 3452 /* 3453 * Return TRUE if the specified firmware matches the adapter. I.e. T4 3454 * firmware for T4 adapters, T5 firmware for T5 adapters, etc. We go ahead 3455 * and emit an error message for mismatched firmware to save our caller the 3456 * effort ... 3457 */ 3458 static int t4_fw_matches_chip(struct adapter *adap, 3459 const struct fw_hdr *hdr) 3460 { 3461 /* 3462 * The expression below will return FALSE for any unsupported adapter 3463 * which will keep us "honest" in the future ... 3464 */ 3465 if ((is_t4(adap) && hdr->chip == FW_HDR_CHIP_T4) || 3466 (is_t5(adap) && hdr->chip == FW_HDR_CHIP_T5) || 3467 (is_t6(adap) && hdr->chip == FW_HDR_CHIP_T6)) 3468 return 1; 3469 3470 CH_ERR(adap, 3471 "FW image (%d) is not suitable for this adapter (%d)\n", 3472 hdr->chip, chip_id(adap)); 3473 return 0; 3474 } 3475 3476 /** 3477 * t4_load_fw - download firmware 3478 * @adap: the adapter 3479 * @fw_data: the firmware image to write 3480 * @size: image size 3481 * 3482 * Write the supplied firmware image to the card's serial flash. 3483 */ 3484 int t4_load_fw(struct adapter *adap, const u8 *fw_data, unsigned int size) 3485 { 3486 u32 csum; 3487 int ret, addr; 3488 unsigned int i; 3489 u8 first_page[SF_PAGE_SIZE]; 3490 const u32 *p = (const u32 *)fw_data; 3491 const struct fw_hdr *hdr = (const struct fw_hdr *)fw_data; 3492 unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec; 3493 unsigned int fw_start_sec; 3494 unsigned int fw_start; 3495 unsigned int fw_size; 3496 3497 if (ntohl(hdr->magic) == FW_HDR_MAGIC_BOOTSTRAP) { 3498 fw_start_sec = FLASH_FWBOOTSTRAP_START_SEC; 3499 fw_start = FLASH_FWBOOTSTRAP_START; 3500 fw_size = FLASH_FWBOOTSTRAP_MAX_SIZE; 3501 } else { 3502 fw_start_sec = FLASH_FW_START_SEC; 3503 fw_start = FLASH_FW_START; 3504 fw_size = FLASH_FW_MAX_SIZE; 3505 } 3506 3507 if (!size) { 3508 CH_ERR(adap, "FW image has no data\n"); 3509 return -EINVAL; 3510 } 3511 if (size & 511) { 3512 CH_ERR(adap, 3513 "FW image size not multiple of 512 bytes\n"); 3514 return -EINVAL; 3515 } 3516 if ((unsigned int) be16_to_cpu(hdr->len512) * 512 != size) { 3517 CH_ERR(adap, 3518 "FW image size differs from size in FW header\n"); 3519 return -EINVAL; 3520 } 3521 if (size > fw_size) { 3522 CH_ERR(adap, "FW image too large, max is %u bytes\n", 3523 fw_size); 3524 return -EFBIG; 3525 } 3526 if (!t4_fw_matches_chip(adap, hdr)) 3527 return -EINVAL; 3528 3529 for (csum = 0, i = 0; i < size / sizeof(csum); i++) 3530 csum += be32_to_cpu(p[i]); 3531 3532 if (csum != 0xffffffff) { 3533 CH_ERR(adap, 3534 "corrupted firmware image, checksum %#x\n", csum); 3535 return -EINVAL; 3536 } 3537 3538 i = DIV_ROUND_UP(size, sf_sec_size); /* # of sectors spanned */ 3539 ret = t4_flash_erase_sectors(adap, fw_start_sec, fw_start_sec + i - 1); 3540 if (ret) 3541 goto out; 3542 3543 /* 3544 * We write the correct version at the end so the driver can see a bad 3545 * version if the FW write fails. Start by writing a copy of the 3546 * first page with a bad version. 3547 */ 3548 memcpy(first_page, fw_data, SF_PAGE_SIZE); 3549 ((struct fw_hdr *)first_page)->fw_ver = cpu_to_be32(0xffffffff); 3550 ret = t4_write_flash(adap, fw_start, SF_PAGE_SIZE, first_page, 1); 3551 if (ret) 3552 goto out; 3553 3554 addr = fw_start; 3555 for (size -= SF_PAGE_SIZE; size; size -= SF_PAGE_SIZE) { 3556 addr += SF_PAGE_SIZE; 3557 fw_data += SF_PAGE_SIZE; 3558 ret = t4_write_flash(adap, addr, SF_PAGE_SIZE, fw_data, 1); 3559 if (ret) 3560 goto out; 3561 } 3562 3563 ret = t4_write_flash(adap, 3564 fw_start + offsetof(struct fw_hdr, fw_ver), 3565 sizeof(hdr->fw_ver), (const u8 *)&hdr->fw_ver, 1); 3566 out: 3567 if (ret) 3568 CH_ERR(adap, "firmware download failed, error %d\n", 3569 ret); 3570 return ret; 3571 } 3572 3573 /** 3574 * t4_fwcache - firmware cache operation 3575 * @adap: the adapter 3576 * @op : the operation (flush or flush and invalidate) 3577 */ 3578 int t4_fwcache(struct adapter *adap, enum fw_params_param_dev_fwcache op) 3579 { 3580 struct fw_params_cmd c; 3581 3582 memset(&c, 0, sizeof(c)); 3583 c.op_to_vfn = 3584 cpu_to_be32(V_FW_CMD_OP(FW_PARAMS_CMD) | 3585 F_FW_CMD_REQUEST | F_FW_CMD_WRITE | 3586 V_FW_PARAMS_CMD_PFN(adap->pf) | 3587 V_FW_PARAMS_CMD_VFN(0)); 3588 c.retval_len16 = cpu_to_be32(FW_LEN16(c)); 3589 c.param[0].mnem = 3590 cpu_to_be32(V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | 3591 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_FWCACHE)); 3592 c.param[0].val = (__force __be32)op; 3593 3594 return t4_wr_mbox(adap, adap->mbox, &c, sizeof(c), NULL); 3595 } 3596 3597 void t4_cim_read_pif_la(struct adapter *adap, u32 *pif_req, u32 *pif_rsp, 3598 unsigned int *pif_req_wrptr, 3599 unsigned int *pif_rsp_wrptr) 3600 { 3601 int i, j; 3602 u32 cfg, val, req, rsp; 3603 3604 cfg = t4_read_reg(adap, A_CIM_DEBUGCFG); 3605 if (cfg & F_LADBGEN) 3606 t4_write_reg(adap, A_CIM_DEBUGCFG, cfg ^ F_LADBGEN); 3607 3608 val = t4_read_reg(adap, A_CIM_DEBUGSTS); 3609 req = G_POLADBGWRPTR(val); 3610 rsp = G_PILADBGWRPTR(val); 3611 if (pif_req_wrptr) 3612 *pif_req_wrptr = req; 3613 if (pif_rsp_wrptr) 3614 *pif_rsp_wrptr = rsp; 3615 3616 for (i = 0; i < CIM_PIFLA_SIZE; i++) { 3617 for (j = 0; j < 6; j++) { 3618 t4_write_reg(adap, A_CIM_DEBUGCFG, V_POLADBGRDPTR(req) | 3619 V_PILADBGRDPTR(rsp)); 3620 *pif_req++ = t4_read_reg(adap, A_CIM_PO_LA_DEBUGDATA); 3621 *pif_rsp++ = t4_read_reg(adap, A_CIM_PI_LA_DEBUGDATA); 3622 req++; 3623 rsp++; 3624 } 3625 req = (req + 2) & M_POLADBGRDPTR; 3626 rsp = (rsp + 2) & M_PILADBGRDPTR; 3627 } 3628 t4_write_reg(adap, A_CIM_DEBUGCFG, cfg); 3629 } 3630 3631 void t4_cim_read_ma_la(struct adapter *adap, u32 *ma_req, u32 *ma_rsp) 3632 { 3633 u32 cfg; 3634 int i, j, idx; 3635 3636 cfg = t4_read_reg(adap, A_CIM_DEBUGCFG); 3637 if (cfg & F_LADBGEN) 3638 t4_write_reg(adap, A_CIM_DEBUGCFG, cfg ^ F_LADBGEN); 3639 3640 for (i = 0; i < CIM_MALA_SIZE; i++) { 3641 for (j = 0; j < 5; j++) { 3642 idx = 8 * i + j; 3643 t4_write_reg(adap, A_CIM_DEBUGCFG, V_POLADBGRDPTR(idx) | 3644 V_PILADBGRDPTR(idx)); 3645 *ma_req++ = t4_read_reg(adap, A_CIM_PO_LA_MADEBUGDATA); 3646 *ma_rsp++ = t4_read_reg(adap, A_CIM_PI_LA_MADEBUGDATA); 3647 } 3648 } 3649 t4_write_reg(adap, A_CIM_DEBUGCFG, cfg); 3650 } 3651 3652 void t4_ulprx_read_la(struct adapter *adap, u32 *la_buf) 3653 { 3654 unsigned int i, j; 3655 3656 for (i = 0; i < 8; i++) { 3657 u32 *p = la_buf + i; 3658 3659 t4_write_reg(adap, A_ULP_RX_LA_CTL, i); 3660 j = t4_read_reg(adap, A_ULP_RX_LA_WRPTR); 3661 t4_write_reg(adap, A_ULP_RX_LA_RDPTR, j); 3662 for (j = 0; j < ULPRX_LA_SIZE; j++, p += 8) 3663 *p = t4_read_reg(adap, A_ULP_RX_LA_RDDATA); 3664 } 3665 } 3666 3667 /** 3668 * t4_link_l1cfg - apply link configuration to MAC/PHY 3669 * @phy: the PHY to setup 3670 * @mac: the MAC to setup 3671 * @lc: the requested link configuration 3672 * 3673 * Set up a port's MAC and PHY according to a desired link configuration. 3674 * - If the PHY can auto-negotiate first decide what to advertise, then 3675 * enable/disable auto-negotiation as desired, and reset. 3676 * - If the PHY does not auto-negotiate just reset it. 3677 * - If auto-negotiation is off set the MAC to the proper speed/duplex/FC, 3678 * otherwise do it later based on the outcome of auto-negotiation. 3679 */ 3680 int t4_link_l1cfg(struct adapter *adap, unsigned int mbox, unsigned int port, 3681 struct link_config *lc) 3682 { 3683 struct fw_port_cmd c; 3684 unsigned int mdi = V_FW_PORT_CAP_MDI(FW_PORT_CAP_MDI_AUTO); 3685 unsigned int aneg, fc, fec, speed, rcap; 3686 3687 fc = 0; 3688 if (lc->requested_fc & PAUSE_RX) 3689 fc |= FW_PORT_CAP_FC_RX; 3690 if (lc->requested_fc & PAUSE_TX) 3691 fc |= FW_PORT_CAP_FC_TX; 3692 3693 fec = 0; 3694 if (lc->requested_fec & FEC_RS) 3695 fec = FW_PORT_CAP_FEC_RS; 3696 else if (lc->requested_fec & FEC_BASER_RS) 3697 fec = FW_PORT_CAP_FEC_BASER_RS; 3698 else if (lc->requested_fec & FEC_RESERVED) 3699 fec = FW_PORT_CAP_FEC_RESERVED; 3700 3701 if (!(lc->supported & FW_PORT_CAP_ANEG) || 3702 lc->requested_aneg == AUTONEG_DISABLE) { 3703 aneg = 0; 3704 switch (lc->requested_speed) { 3705 case 100: 3706 speed = FW_PORT_CAP_SPEED_100G; 3707 break; 3708 case 40: 3709 speed = FW_PORT_CAP_SPEED_40G; 3710 break; 3711 case 25: 3712 speed = FW_PORT_CAP_SPEED_25G; 3713 break; 3714 case 10: 3715 speed = FW_PORT_CAP_SPEED_10G; 3716 break; 3717 case 1: 3718 speed = FW_PORT_CAP_SPEED_1G; 3719 break; 3720 default: 3721 return -EINVAL; 3722 break; 3723 } 3724 } else { 3725 aneg = FW_PORT_CAP_ANEG; 3726 speed = lc->supported & 3727 V_FW_PORT_CAP_SPEED(M_FW_PORT_CAP_SPEED); 3728 } 3729 3730 rcap = aneg | speed | fc | fec; 3731 if ((rcap | lc->supported) != lc->supported) { 3732 CH_WARN(adap, "rcap 0x%08x, pcap 0x%08x\n", rcap, 3733 lc->supported); 3734 rcap &= lc->supported; 3735 } 3736 rcap |= mdi; 3737 3738 memset(&c, 0, sizeof(c)); 3739 c.op_to_portid = cpu_to_be32(V_FW_CMD_OP(FW_PORT_CMD) | 3740 F_FW_CMD_REQUEST | F_FW_CMD_EXEC | 3741 V_FW_PORT_CMD_PORTID(port)); 3742 c.action_to_len16 = 3743 cpu_to_be32(V_FW_PORT_CMD_ACTION(FW_PORT_ACTION_L1_CFG) | 3744 FW_LEN16(c)); 3745 c.u.l1cfg.rcap = cpu_to_be32(rcap); 3746 3747 return t4_wr_mbox_ns(adap, mbox, &c, sizeof(c), NULL); 3748 } 3749 3750 /** 3751 * t4_restart_aneg - restart autonegotiation 3752 * @adap: the adapter 3753 * @mbox: mbox to use for the FW command 3754 * @port: the port id 3755 * 3756 * Restarts autonegotiation for the selected port. 3757 */ 3758 int t4_restart_aneg(struct adapter *adap, unsigned int mbox, unsigned int port) 3759 { 3760 struct fw_port_cmd c; 3761 3762 memset(&c, 0, sizeof(c)); 3763 c.op_to_portid = cpu_to_be32(V_FW_CMD_OP(FW_PORT_CMD) | 3764 F_FW_CMD_REQUEST | F_FW_CMD_EXEC | 3765 V_FW_PORT_CMD_PORTID(port)); 3766 c.action_to_len16 = 3767 cpu_to_be32(V_FW_PORT_CMD_ACTION(FW_PORT_ACTION_L1_CFG) | 3768 FW_LEN16(c)); 3769 c.u.l1cfg.rcap = cpu_to_be32(FW_PORT_CAP_ANEG); 3770 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); 3771 } 3772 3773 typedef void (*int_handler_t)(struct adapter *adap); 3774 3775 struct intr_info { 3776 unsigned int mask; /* bits to check in interrupt status */ 3777 const char *msg; /* message to print or NULL */ 3778 short stat_idx; /* stat counter to increment or -1 */ 3779 unsigned short fatal; /* whether the condition reported is fatal */ 3780 int_handler_t int_handler; /* platform-specific int handler */ 3781 }; 3782 3783 /** 3784 * t4_handle_intr_status - table driven interrupt handler 3785 * @adapter: the adapter that generated the interrupt 3786 * @reg: the interrupt status register to process 3787 * @acts: table of interrupt actions 3788 * 3789 * A table driven interrupt handler that applies a set of masks to an 3790 * interrupt status word and performs the corresponding actions if the 3791 * interrupts described by the mask have occurred. The actions include 3792 * optionally emitting a warning or alert message. The table is terminated 3793 * by an entry specifying mask 0. Returns the number of fatal interrupt 3794 * conditions. 3795 */ 3796 static int t4_handle_intr_status(struct adapter *adapter, unsigned int reg, 3797 const struct intr_info *acts) 3798 { 3799 int fatal = 0; 3800 unsigned int mask = 0; 3801 unsigned int status = t4_read_reg(adapter, reg); 3802 3803 for ( ; acts->mask; ++acts) { 3804 if (!(status & acts->mask)) 3805 continue; 3806 if (acts->fatal) { 3807 fatal++; 3808 CH_ALERT(adapter, "%s (0x%x)\n", acts->msg, 3809 status & acts->mask); 3810 } else if (acts->msg) 3811 CH_WARN_RATELIMIT(adapter, "%s (0x%x)\n", acts->msg, 3812 status & acts->mask); 3813 if (acts->int_handler) 3814 acts->int_handler(adapter); 3815 mask |= acts->mask; 3816 } 3817 status &= mask; 3818 if (status) /* clear processed interrupts */ 3819 t4_write_reg(adapter, reg, status); 3820 return fatal; 3821 } 3822 3823 /* 3824 * Interrupt handler for the PCIE module. 3825 */ 3826 static void pcie_intr_handler(struct adapter *adapter) 3827 { 3828 static const struct intr_info sysbus_intr_info[] = { 3829 { F_RNPP, "RXNP array parity error", -1, 1 }, 3830 { F_RPCP, "RXPC array parity error", -1, 1 }, 3831 { F_RCIP, "RXCIF array parity error", -1, 1 }, 3832 { F_RCCP, "Rx completions control array parity error", -1, 1 }, 3833 { F_RFTP, "RXFT array parity error", -1, 1 }, 3834 { 0 } 3835 }; 3836 static const struct intr_info pcie_port_intr_info[] = { 3837 { F_TPCP, "TXPC array parity error", -1, 1 }, 3838 { F_TNPP, "TXNP array parity error", -1, 1 }, 3839 { F_TFTP, "TXFT array parity error", -1, 1 }, 3840 { F_TCAP, "TXCA array parity error", -1, 1 }, 3841 { F_TCIP, "TXCIF array parity error", -1, 1 }, 3842 { F_RCAP, "RXCA array parity error", -1, 1 }, 3843 { F_OTDD, "outbound request TLP discarded", -1, 1 }, 3844 { F_RDPE, "Rx data parity error", -1, 1 }, 3845 { F_TDUE, "Tx uncorrectable data error", -1, 1 }, 3846 { 0 } 3847 }; 3848 static const struct intr_info pcie_intr_info[] = { 3849 { F_MSIADDRLPERR, "MSI AddrL parity error", -1, 1 }, 3850 { F_MSIADDRHPERR, "MSI AddrH parity error", -1, 1 }, 3851 { F_MSIDATAPERR, "MSI data parity error", -1, 1 }, 3852 { F_MSIXADDRLPERR, "MSI-X AddrL parity error", -1, 1 }, 3853 { F_MSIXADDRHPERR, "MSI-X AddrH parity error", -1, 1 }, 3854 { F_MSIXDATAPERR, "MSI-X data parity error", -1, 1 }, 3855 { F_MSIXDIPERR, "MSI-X DI parity error", -1, 1 }, 3856 { F_PIOCPLPERR, "PCI PIO completion FIFO parity error", -1, 1 }, 3857 { F_PIOREQPERR, "PCI PIO request FIFO parity error", -1, 1 }, 3858 { F_TARTAGPERR, "PCI PCI target tag FIFO parity error", -1, 1 }, 3859 { F_CCNTPERR, "PCI CMD channel count parity error", -1, 1 }, 3860 { F_CREQPERR, "PCI CMD channel request parity error", -1, 1 }, 3861 { F_CRSPPERR, "PCI CMD channel response parity error", -1, 1 }, 3862 { F_DCNTPERR, "PCI DMA channel count parity error", -1, 1 }, 3863 { F_DREQPERR, "PCI DMA channel request parity error", -1, 1 }, 3864 { F_DRSPPERR, "PCI DMA channel response parity error", -1, 1 }, 3865 { F_HCNTPERR, "PCI HMA channel count parity error", -1, 1 }, 3866 { F_HREQPERR, "PCI HMA channel request parity error", -1, 1 }, 3867 { F_HRSPPERR, "PCI HMA channel response parity error", -1, 1 }, 3868 { F_CFGSNPPERR, "PCI config snoop FIFO parity error", -1, 1 }, 3869 { F_FIDPERR, "PCI FID parity error", -1, 1 }, 3870 { F_INTXCLRPERR, "PCI INTx clear parity error", -1, 1 }, 3871 { F_MATAGPERR, "PCI MA tag parity error", -1, 1 }, 3872 { F_PIOTAGPERR, "PCI PIO tag parity error", -1, 1 }, 3873 { F_RXCPLPERR, "PCI Rx completion parity error", -1, 1 }, 3874 { F_RXWRPERR, "PCI Rx write parity error", -1, 1 }, 3875 { F_RPLPERR, "PCI replay buffer parity error", -1, 1 }, 3876 { F_PCIESINT, "PCI core secondary fault", -1, 1 }, 3877 { F_PCIEPINT, "PCI core primary fault", -1, 1 }, 3878 { F_UNXSPLCPLERR, "PCI unexpected split completion error", -1, 3879 0 }, 3880 { 0 } 3881 }; 3882 3883 static const struct intr_info t5_pcie_intr_info[] = { 3884 { F_MSTGRPPERR, "Master Response Read Queue parity error", 3885 -1, 1 }, 3886 { F_MSTTIMEOUTPERR, "Master Timeout FIFO parity error", -1, 1 }, 3887 { F_MSIXSTIPERR, "MSI-X STI SRAM parity error", -1, 1 }, 3888 { F_MSIXADDRLPERR, "MSI-X AddrL parity error", -1, 1 }, 3889 { F_MSIXADDRHPERR, "MSI-X AddrH parity error", -1, 1 }, 3890 { F_MSIXDATAPERR, "MSI-X data parity error", -1, 1 }, 3891 { F_MSIXDIPERR, "MSI-X DI parity error", -1, 1 }, 3892 { F_PIOCPLGRPPERR, "PCI PIO completion Group FIFO parity error", 3893 -1, 1 }, 3894 { F_PIOREQGRPPERR, "PCI PIO request Group FIFO parity error", 3895 -1, 1 }, 3896 { F_TARTAGPERR, "PCI PCI target tag FIFO parity error", -1, 1 }, 3897 { F_MSTTAGQPERR, "PCI master tag queue parity error", -1, 1 }, 3898 { F_CREQPERR, "PCI CMD channel request parity error", -1, 1 }, 3899 { F_CRSPPERR, "PCI CMD channel response parity error", -1, 1 }, 3900 { F_DREQWRPERR, "PCI DMA channel write request parity error", 3901 -1, 1 }, 3902 { F_DREQPERR, "PCI DMA channel request parity error", -1, 1 }, 3903 { F_DRSPPERR, "PCI DMA channel response parity error", -1, 1 }, 3904 { F_HREQWRPERR, "PCI HMA channel count parity error", -1, 1 }, 3905 { F_HREQPERR, "PCI HMA channel request parity error", -1, 1 }, 3906 { F_HRSPPERR, "PCI HMA channel response parity error", -1, 1 }, 3907 { F_CFGSNPPERR, "PCI config snoop FIFO parity error", -1, 1 }, 3908 { F_FIDPERR, "PCI FID parity error", -1, 1 }, 3909 { F_VFIDPERR, "PCI INTx clear parity error", -1, 1 }, 3910 { F_MAGRPPERR, "PCI MA group FIFO parity error", -1, 1 }, 3911 { F_PIOTAGPERR, "PCI PIO tag parity error", -1, 1 }, 3912 { F_IPRXHDRGRPPERR, "PCI IP Rx header group parity error", 3913 -1, 1 }, 3914 { F_IPRXDATAGRPPERR, "PCI IP Rx data group parity error", 3915 -1, 1 }, 3916 { F_RPLPERR, "PCI IP replay buffer parity error", -1, 1 }, 3917 { F_IPSOTPERR, "PCI IP SOT buffer parity error", -1, 1 }, 3918 { F_TRGT1GRPPERR, "PCI TRGT1 group FIFOs parity error", -1, 1 }, 3919 { F_READRSPERR, "Outbound read error", -1, 3920 0 }, 3921 { 0 } 3922 }; 3923 3924 int fat; 3925 3926 if (is_t4(adapter)) 3927 fat = t4_handle_intr_status(adapter, 3928 A_PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS, 3929 sysbus_intr_info) + 3930 t4_handle_intr_status(adapter, 3931 A_PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS, 3932 pcie_port_intr_info) + 3933 t4_handle_intr_status(adapter, A_PCIE_INT_CAUSE, 3934 pcie_intr_info); 3935 else 3936 fat = t4_handle_intr_status(adapter, A_PCIE_INT_CAUSE, 3937 t5_pcie_intr_info); 3938 if (fat) 3939 t4_fatal_err(adapter); 3940 } 3941 3942 /* 3943 * TP interrupt handler. 3944 */ 3945 static void tp_intr_handler(struct adapter *adapter) 3946 { 3947 static const struct intr_info tp_intr_info[] = { 3948 { 0x3fffffff, "TP parity error", -1, 1 }, 3949 { F_FLMTXFLSTEMPTY, "TP out of Tx pages", -1, 1 }, 3950 { 0 } 3951 }; 3952 3953 if (t4_handle_intr_status(adapter, A_TP_INT_CAUSE, tp_intr_info)) 3954 t4_fatal_err(adapter); 3955 } 3956 3957 /* 3958 * SGE interrupt handler. 3959 */ 3960 static void sge_intr_handler(struct adapter *adapter) 3961 { 3962 u64 v; 3963 u32 err; 3964 3965 static const struct intr_info sge_intr_info[] = { 3966 { F_ERR_CPL_EXCEED_IQE_SIZE, 3967 "SGE received CPL exceeding IQE size", -1, 1 }, 3968 { F_ERR_INVALID_CIDX_INC, 3969 "SGE GTS CIDX increment too large", -1, 0 }, 3970 { F_ERR_CPL_OPCODE_0, "SGE received 0-length CPL", -1, 0 }, 3971 { F_DBFIFO_LP_INT, NULL, -1, 0, t4_db_full }, 3972 { F_ERR_DATA_CPL_ON_HIGH_QID1 | F_ERR_DATA_CPL_ON_HIGH_QID0, 3973 "SGE IQID > 1023 received CPL for FL", -1, 0 }, 3974 { F_ERR_BAD_DB_PIDX3, "SGE DBP 3 pidx increment too large", -1, 3975 0 }, 3976 { F_ERR_BAD_DB_PIDX2, "SGE DBP 2 pidx increment too large", -1, 3977 0 }, 3978 { F_ERR_BAD_DB_PIDX1, "SGE DBP 1 pidx increment too large", -1, 3979 0 }, 3980 { F_ERR_BAD_DB_PIDX0, "SGE DBP 0 pidx increment too large", -1, 3981 0 }, 3982 { F_ERR_ING_CTXT_PRIO, 3983 "SGE too many priority ingress contexts", -1, 0 }, 3984 { F_INGRESS_SIZE_ERR, "SGE illegal ingress QID", -1, 0 }, 3985 { F_EGRESS_SIZE_ERR, "SGE illegal egress QID", -1, 0 }, 3986 { F_ERR_PCIE_ERROR0 | F_ERR_PCIE_ERROR1 | 3987 F_ERR_PCIE_ERROR2 | F_ERR_PCIE_ERROR3, 3988 "SGE PCIe error for a DBP thread", -1, 0 }, 3989 { 0 } 3990 }; 3991 3992 static const struct intr_info t4t5_sge_intr_info[] = { 3993 { F_ERR_DROPPED_DB, NULL, -1, 0, t4_db_dropped }, 3994 { F_DBFIFO_HP_INT, NULL, -1, 0, t4_db_full }, 3995 { F_ERR_EGR_CTXT_PRIO, 3996 "SGE too many priority egress contexts", -1, 0 }, 3997 { 0 } 3998 }; 3999 4000 /* 4001 * For now, treat below interrupts as fatal so that we disable SGE and 4002 * get better debug */ 4003 static const struct intr_info t6_sge_intr_info[] = { 4004 { F_FATAL_WRE_LEN, 4005 "SGE Actual WRE packet is less than advertized length", 4006 -1, 1 }, 4007 { 0 } 4008 }; 4009 4010 v = (u64)t4_read_reg(adapter, A_SGE_INT_CAUSE1) | 4011 ((u64)t4_read_reg(adapter, A_SGE_INT_CAUSE2) << 32); 4012 if (v) { 4013 CH_ALERT(adapter, "SGE parity error (%#llx)\n", 4014 (unsigned long long)v); 4015 t4_write_reg(adapter, A_SGE_INT_CAUSE1, v); 4016 t4_write_reg(adapter, A_SGE_INT_CAUSE2, v >> 32); 4017 } 4018 4019 v |= t4_handle_intr_status(adapter, A_SGE_INT_CAUSE3, sge_intr_info); 4020 if (chip_id(adapter) <= CHELSIO_T5) 4021 v |= t4_handle_intr_status(adapter, A_SGE_INT_CAUSE3, 4022 t4t5_sge_intr_info); 4023 else 4024 v |= t4_handle_intr_status(adapter, A_SGE_INT_CAUSE3, 4025 t6_sge_intr_info); 4026 4027 err = t4_read_reg(adapter, A_SGE_ERROR_STATS); 4028 if (err & F_ERROR_QID_VALID) { 4029 CH_ERR(adapter, "SGE error for queue %u\n", G_ERROR_QID(err)); 4030 if (err & F_UNCAPTURED_ERROR) 4031 CH_ERR(adapter, "SGE UNCAPTURED_ERROR set (clearing)\n"); 4032 t4_write_reg(adapter, A_SGE_ERROR_STATS, F_ERROR_QID_VALID | 4033 F_UNCAPTURED_ERROR); 4034 } 4035 4036 if (v != 0) 4037 t4_fatal_err(adapter); 4038 } 4039 4040 #define CIM_OBQ_INTR (F_OBQULP0PARERR | F_OBQULP1PARERR | F_OBQULP2PARERR |\ 4041 F_OBQULP3PARERR | F_OBQSGEPARERR | F_OBQNCSIPARERR) 4042 #define CIM_IBQ_INTR (F_IBQTP0PARERR | F_IBQTP1PARERR | F_IBQULPPARERR |\ 4043 F_IBQSGEHIPARERR | F_IBQSGELOPARERR | F_IBQNCSIPARERR) 4044 4045 /* 4046 * CIM interrupt handler. 4047 */ 4048 static void cim_intr_handler(struct adapter *adapter) 4049 { 4050 static const struct intr_info cim_intr_info[] = { 4051 { F_PREFDROPINT, "CIM control register prefetch drop", -1, 1 }, 4052 { CIM_OBQ_INTR, "CIM OBQ parity error", -1, 1 }, 4053 { CIM_IBQ_INTR, "CIM IBQ parity error", -1, 1 }, 4054 { F_MBUPPARERR, "CIM mailbox uP parity error", -1, 1 }, 4055 { F_MBHOSTPARERR, "CIM mailbox host parity error", -1, 1 }, 4056 { F_TIEQINPARERRINT, "CIM TIEQ outgoing parity error", -1, 1 }, 4057 { F_TIEQOUTPARERRINT, "CIM TIEQ incoming parity error", -1, 1 }, 4058 { F_TIMER0INT, "CIM TIMER0 interrupt", -1, 1 }, 4059 { 0 } 4060 }; 4061 static const struct intr_info cim_upintr_info[] = { 4062 { F_RSVDSPACEINT, "CIM reserved space access", -1, 1 }, 4063 { F_ILLTRANSINT, "CIM illegal transaction", -1, 1 }, 4064 { F_ILLWRINT, "CIM illegal write", -1, 1 }, 4065 { F_ILLRDINT, "CIM illegal read", -1, 1 }, 4066 { F_ILLRDBEINT, "CIM illegal read BE", -1, 1 }, 4067 { F_ILLWRBEINT, "CIM illegal write BE", -1, 1 }, 4068 { F_SGLRDBOOTINT, "CIM single read from boot space", -1, 1 }, 4069 { F_SGLWRBOOTINT, "CIM single write to boot space", -1, 1 }, 4070 { F_BLKWRBOOTINT, "CIM block write to boot space", -1, 1 }, 4071 { F_SGLRDFLASHINT, "CIM single read from flash space", -1, 1 }, 4072 { F_SGLWRFLASHINT, "CIM single write to flash space", -1, 1 }, 4073 { F_BLKWRFLASHINT, "CIM block write to flash space", -1, 1 }, 4074 { F_SGLRDEEPROMINT, "CIM single EEPROM read", -1, 1 }, 4075 { F_SGLWREEPROMINT, "CIM single EEPROM write", -1, 1 }, 4076 { F_BLKRDEEPROMINT, "CIM block EEPROM read", -1, 1 }, 4077 { F_BLKWREEPROMINT, "CIM block EEPROM write", -1, 1 }, 4078 { F_SGLRDCTLINT , "CIM single read from CTL space", -1, 1 }, 4079 { F_SGLWRCTLINT , "CIM single write to CTL space", -1, 1 }, 4080 { F_BLKRDCTLINT , "CIM block read from CTL space", -1, 1 }, 4081 { F_BLKWRCTLINT , "CIM block write to CTL space", -1, 1 }, 4082 { F_SGLRDPLINT , "CIM single read from PL space", -1, 1 }, 4083 { F_SGLWRPLINT , "CIM single write to PL space", -1, 1 }, 4084 { F_BLKRDPLINT , "CIM block read from PL space", -1, 1 }, 4085 { F_BLKWRPLINT , "CIM block write to PL space", -1, 1 }, 4086 { F_REQOVRLOOKUPINT , "CIM request FIFO overwrite", -1, 1 }, 4087 { F_RSPOVRLOOKUPINT , "CIM response FIFO overwrite", -1, 1 }, 4088 { F_TIMEOUTINT , "CIM PIF timeout", -1, 1 }, 4089 { F_TIMEOUTMAINT , "CIM PIF MA timeout", -1, 1 }, 4090 { 0 } 4091 }; 4092 u32 val, fw_err; 4093 int fat; 4094 4095 fw_err = t4_read_reg(adapter, A_PCIE_FW); 4096 if (fw_err & F_PCIE_FW_ERR) 4097 t4_report_fw_error(adapter); 4098 4099 /* When the Firmware detects an internal error which normally wouldn't 4100 * raise a Host Interrupt, it forces a CIM Timer0 interrupt in order 4101 * to make sure the Host sees the Firmware Crash. So if we have a 4102 * Timer0 interrupt and don't see a Firmware Crash, ignore the Timer0 4103 * interrupt. 4104 */ 4105 val = t4_read_reg(adapter, A_CIM_HOST_INT_CAUSE); 4106 if (val & F_TIMER0INT) 4107 if (!(fw_err & F_PCIE_FW_ERR) || 4108 (G_PCIE_FW_EVAL(fw_err) != PCIE_FW_EVAL_CRASH)) 4109 t4_write_reg(adapter, A_CIM_HOST_INT_CAUSE, 4110 F_TIMER0INT); 4111 4112 fat = t4_handle_intr_status(adapter, A_CIM_HOST_INT_CAUSE, 4113 cim_intr_info) + 4114 t4_handle_intr_status(adapter, A_CIM_HOST_UPACC_INT_CAUSE, 4115 cim_upintr_info); 4116 if (fat) 4117 t4_fatal_err(adapter); 4118 } 4119 4120 /* 4121 * ULP RX interrupt handler. 4122 */ 4123 static void ulprx_intr_handler(struct adapter *adapter) 4124 { 4125 static const struct intr_info ulprx_intr_info[] = { 4126 { F_CAUSE_CTX_1, "ULPRX channel 1 context error", -1, 1 }, 4127 { F_CAUSE_CTX_0, "ULPRX channel 0 context error", -1, 1 }, 4128 { 0x7fffff, "ULPRX parity error", -1, 1 }, 4129 { 0 } 4130 }; 4131 4132 if (t4_handle_intr_status(adapter, A_ULP_RX_INT_CAUSE, ulprx_intr_info)) 4133 t4_fatal_err(adapter); 4134 } 4135 4136 /* 4137 * ULP TX interrupt handler. 4138 */ 4139 static void ulptx_intr_handler(struct adapter *adapter) 4140 { 4141 static const struct intr_info ulptx_intr_info[] = { 4142 { F_PBL_BOUND_ERR_CH3, "ULPTX channel 3 PBL out of bounds", -1, 4143 0 }, 4144 { F_PBL_BOUND_ERR_CH2, "ULPTX channel 2 PBL out of bounds", -1, 4145 0 }, 4146 { F_PBL_BOUND_ERR_CH1, "ULPTX channel 1 PBL out of bounds", -1, 4147 0 }, 4148 { F_PBL_BOUND_ERR_CH0, "ULPTX channel 0 PBL out of bounds", -1, 4149 0 }, 4150 { 0xfffffff, "ULPTX parity error", -1, 1 }, 4151 { 0 } 4152 }; 4153 4154 if (t4_handle_intr_status(adapter, A_ULP_TX_INT_CAUSE, ulptx_intr_info)) 4155 t4_fatal_err(adapter); 4156 } 4157 4158 /* 4159 * PM TX interrupt handler. 4160 */ 4161 static void pmtx_intr_handler(struct adapter *adapter) 4162 { 4163 static const struct intr_info pmtx_intr_info[] = { 4164 { F_PCMD_LEN_OVFL0, "PMTX channel 0 pcmd too large", -1, 1 }, 4165 { F_PCMD_LEN_OVFL1, "PMTX channel 1 pcmd too large", -1, 1 }, 4166 { F_PCMD_LEN_OVFL2, "PMTX channel 2 pcmd too large", -1, 1 }, 4167 { F_ZERO_C_CMD_ERROR, "PMTX 0-length pcmd", -1, 1 }, 4168 { 0xffffff0, "PMTX framing error", -1, 1 }, 4169 { F_OESPI_PAR_ERROR, "PMTX oespi parity error", -1, 1 }, 4170 { F_DB_OPTIONS_PAR_ERROR, "PMTX db_options parity error", -1, 4171 1 }, 4172 { F_ICSPI_PAR_ERROR, "PMTX icspi parity error", -1, 1 }, 4173 { F_C_PCMD_PAR_ERROR, "PMTX c_pcmd parity error", -1, 1}, 4174 { 0 } 4175 }; 4176 4177 if (t4_handle_intr_status(adapter, A_PM_TX_INT_CAUSE, pmtx_intr_info)) 4178 t4_fatal_err(adapter); 4179 } 4180 4181 /* 4182 * PM RX interrupt handler. 4183 */ 4184 static void pmrx_intr_handler(struct adapter *adapter) 4185 { 4186 static const struct intr_info pmrx_intr_info[] = { 4187 { F_ZERO_E_CMD_ERROR, "PMRX 0-length pcmd", -1, 1 }, 4188 { 0x3ffff0, "PMRX framing error", -1, 1 }, 4189 { F_OCSPI_PAR_ERROR, "PMRX ocspi parity error", -1, 1 }, 4190 { F_DB_OPTIONS_PAR_ERROR, "PMRX db_options parity error", -1, 4191 1 }, 4192 { F_IESPI_PAR_ERROR, "PMRX iespi parity error", -1, 1 }, 4193 { F_E_PCMD_PAR_ERROR, "PMRX e_pcmd parity error", -1, 1}, 4194 { 0 } 4195 }; 4196 4197 if (t4_handle_intr_status(adapter, A_PM_RX_INT_CAUSE, pmrx_intr_info)) 4198 t4_fatal_err(adapter); 4199 } 4200 4201 /* 4202 * CPL switch interrupt handler. 4203 */ 4204 static void cplsw_intr_handler(struct adapter *adapter) 4205 { 4206 static const struct intr_info cplsw_intr_info[] = { 4207 { F_CIM_OP_MAP_PERR, "CPLSW CIM op_map parity error", -1, 1 }, 4208 { F_CIM_OVFL_ERROR, "CPLSW CIM overflow", -1, 1 }, 4209 { F_TP_FRAMING_ERROR, "CPLSW TP framing error", -1, 1 }, 4210 { F_SGE_FRAMING_ERROR, "CPLSW SGE framing error", -1, 1 }, 4211 { F_CIM_FRAMING_ERROR, "CPLSW CIM framing error", -1, 1 }, 4212 { F_ZERO_SWITCH_ERROR, "CPLSW no-switch error", -1, 1 }, 4213 { 0 } 4214 }; 4215 4216 if (t4_handle_intr_status(adapter, A_CPL_INTR_CAUSE, cplsw_intr_info)) 4217 t4_fatal_err(adapter); 4218 } 4219 4220 /* 4221 * LE interrupt handler. 4222 */ 4223 static void le_intr_handler(struct adapter *adap) 4224 { 4225 unsigned int chip_ver = chip_id(adap); 4226 static const struct intr_info le_intr_info[] = { 4227 { F_LIPMISS, "LE LIP miss", -1, 0 }, 4228 { F_LIP0, "LE 0 LIP error", -1, 0 }, 4229 { F_PARITYERR, "LE parity error", -1, 1 }, 4230 { F_UNKNOWNCMD, "LE unknown command", -1, 1 }, 4231 { F_REQQPARERR, "LE request queue parity error", -1, 1 }, 4232 { 0 } 4233 }; 4234 4235 static const struct intr_info t6_le_intr_info[] = { 4236 { F_T6_LIPMISS, "LE LIP miss", -1, 0 }, 4237 { F_T6_LIP0, "LE 0 LIP error", -1, 0 }, 4238 { F_TCAMINTPERR, "LE parity error", -1, 1 }, 4239 { F_T6_UNKNOWNCMD, "LE unknown command", -1, 1 }, 4240 { F_SSRAMINTPERR, "LE request queue parity error", -1, 1 }, 4241 { 0 } 4242 }; 4243 4244 if (t4_handle_intr_status(adap, A_LE_DB_INT_CAUSE, 4245 (chip_ver <= CHELSIO_T5) ? 4246 le_intr_info : t6_le_intr_info)) 4247 t4_fatal_err(adap); 4248 } 4249 4250 /* 4251 * MPS interrupt handler. 4252 */ 4253 static void mps_intr_handler(struct adapter *adapter) 4254 { 4255 static const struct intr_info mps_rx_intr_info[] = { 4256 { 0xffffff, "MPS Rx parity error", -1, 1 }, 4257 { 0 } 4258 }; 4259 static const struct intr_info mps_tx_intr_info[] = { 4260 { V_TPFIFO(M_TPFIFO), "MPS Tx TP FIFO parity error", -1, 1 }, 4261 { F_NCSIFIFO, "MPS Tx NC-SI FIFO parity error", -1, 1 }, 4262 { V_TXDATAFIFO(M_TXDATAFIFO), "MPS Tx data FIFO parity error", 4263 -1, 1 }, 4264 { V_TXDESCFIFO(M_TXDESCFIFO), "MPS Tx desc FIFO parity error", 4265 -1, 1 }, 4266 { F_BUBBLE, "MPS Tx underflow", -1, 1 }, 4267 { F_SECNTERR, "MPS Tx SOP/EOP error", -1, 1 }, 4268 { F_FRMERR, "MPS Tx framing error", -1, 1 }, 4269 { 0 } 4270 }; 4271 static const struct intr_info mps_trc_intr_info[] = { 4272 { V_FILTMEM(M_FILTMEM), "MPS TRC filter parity error", -1, 1 }, 4273 { V_PKTFIFO(M_PKTFIFO), "MPS TRC packet FIFO parity error", -1, 4274 1 }, 4275 { F_MISCPERR, "MPS TRC misc parity error", -1, 1 }, 4276 { 0 } 4277 }; 4278 static const struct intr_info mps_stat_sram_intr_info[] = { 4279 { 0x1fffff, "MPS statistics SRAM parity error", -1, 1 }, 4280 { 0 } 4281 }; 4282 static const struct intr_info mps_stat_tx_intr_info[] = { 4283 { 0xfffff, "MPS statistics Tx FIFO parity error", -1, 1 }, 4284 { 0 } 4285 }; 4286 static const struct intr_info mps_stat_rx_intr_info[] = { 4287 { 0xffffff, "MPS statistics Rx FIFO parity error", -1, 1 }, 4288 { 0 } 4289 }; 4290 static const struct intr_info mps_cls_intr_info[] = { 4291 { F_MATCHSRAM, "MPS match SRAM parity error", -1, 1 }, 4292 { F_MATCHTCAM, "MPS match TCAM parity error", -1, 1 }, 4293 { F_HASHSRAM, "MPS hash SRAM parity error", -1, 1 }, 4294 { 0 } 4295 }; 4296 4297 int fat; 4298 4299 fat = t4_handle_intr_status(adapter, A_MPS_RX_PERR_INT_CAUSE, 4300 mps_rx_intr_info) + 4301 t4_handle_intr_status(adapter, A_MPS_TX_INT_CAUSE, 4302 mps_tx_intr_info) + 4303 t4_handle_intr_status(adapter, A_MPS_TRC_INT_CAUSE, 4304 mps_trc_intr_info) + 4305 t4_handle_intr_status(adapter, A_MPS_STAT_PERR_INT_CAUSE_SRAM, 4306 mps_stat_sram_intr_info) + 4307 t4_handle_intr_status(adapter, A_MPS_STAT_PERR_INT_CAUSE_TX_FIFO, 4308 mps_stat_tx_intr_info) + 4309 t4_handle_intr_status(adapter, A_MPS_STAT_PERR_INT_CAUSE_RX_FIFO, 4310 mps_stat_rx_intr_info) + 4311 t4_handle_intr_status(adapter, A_MPS_CLS_INT_CAUSE, 4312 mps_cls_intr_info); 4313 4314 t4_write_reg(adapter, A_MPS_INT_CAUSE, 0); 4315 t4_read_reg(adapter, A_MPS_INT_CAUSE); /* flush */ 4316 if (fat) 4317 t4_fatal_err(adapter); 4318 } 4319 4320 #define MEM_INT_MASK (F_PERR_INT_CAUSE | F_ECC_CE_INT_CAUSE | \ 4321 F_ECC_UE_INT_CAUSE) 4322 4323 /* 4324 * EDC/MC interrupt handler. 4325 */ 4326 static void mem_intr_handler(struct adapter *adapter, int idx) 4327 { 4328 static const char name[4][7] = { "EDC0", "EDC1", "MC/MC0", "MC1" }; 4329 4330 unsigned int addr, cnt_addr, v; 4331 4332 if (idx <= MEM_EDC1) { 4333 addr = EDC_REG(A_EDC_INT_CAUSE, idx); 4334 cnt_addr = EDC_REG(A_EDC_ECC_STATUS, idx); 4335 } else if (idx == MEM_MC) { 4336 if (is_t4(adapter)) { 4337 addr = A_MC_INT_CAUSE; 4338 cnt_addr = A_MC_ECC_STATUS; 4339 } else { 4340 addr = A_MC_P_INT_CAUSE; 4341 cnt_addr = A_MC_P_ECC_STATUS; 4342 } 4343 } else { 4344 addr = MC_REG(A_MC_P_INT_CAUSE, 1); 4345 cnt_addr = MC_REG(A_MC_P_ECC_STATUS, 1); 4346 } 4347 4348 v = t4_read_reg(adapter, addr) & MEM_INT_MASK; 4349 if (v & F_PERR_INT_CAUSE) 4350 CH_ALERT(adapter, "%s FIFO parity error\n", 4351 name[idx]); 4352 if (v & F_ECC_CE_INT_CAUSE) { 4353 u32 cnt = G_ECC_CECNT(t4_read_reg(adapter, cnt_addr)); 4354 4355 if (idx <= MEM_EDC1) 4356 t4_edc_err_read(adapter, idx); 4357 4358 t4_write_reg(adapter, cnt_addr, V_ECC_CECNT(M_ECC_CECNT)); 4359 CH_WARN_RATELIMIT(adapter, 4360 "%u %s correctable ECC data error%s\n", 4361 cnt, name[idx], cnt > 1 ? "s" : ""); 4362 } 4363 if (v & F_ECC_UE_INT_CAUSE) 4364 CH_ALERT(adapter, 4365 "%s uncorrectable ECC data error\n", name[idx]); 4366 4367 t4_write_reg(adapter, addr, v); 4368 if (v & (F_PERR_INT_CAUSE | F_ECC_UE_INT_CAUSE)) 4369 t4_fatal_err(adapter); 4370 } 4371 4372 /* 4373 * MA interrupt handler. 4374 */ 4375 static void ma_intr_handler(struct adapter *adapter) 4376 { 4377 u32 v, status = t4_read_reg(adapter, A_MA_INT_CAUSE); 4378 4379 if (status & F_MEM_PERR_INT_CAUSE) { 4380 CH_ALERT(adapter, 4381 "MA parity error, parity status %#x\n", 4382 t4_read_reg(adapter, A_MA_PARITY_ERROR_STATUS1)); 4383 if (is_t5(adapter)) 4384 CH_ALERT(adapter, 4385 "MA parity error, parity status %#x\n", 4386 t4_read_reg(adapter, 4387 A_MA_PARITY_ERROR_STATUS2)); 4388 } 4389 if (status & F_MEM_WRAP_INT_CAUSE) { 4390 v = t4_read_reg(adapter, A_MA_INT_WRAP_STATUS); 4391 CH_ALERT(adapter, "MA address wrap-around error by " 4392 "client %u to address %#x\n", 4393 G_MEM_WRAP_CLIENT_NUM(v), 4394 G_MEM_WRAP_ADDRESS(v) << 4); 4395 } 4396 t4_write_reg(adapter, A_MA_INT_CAUSE, status); 4397 t4_fatal_err(adapter); 4398 } 4399 4400 /* 4401 * SMB interrupt handler. 4402 */ 4403 static void smb_intr_handler(struct adapter *adap) 4404 { 4405 static const struct intr_info smb_intr_info[] = { 4406 { F_MSTTXFIFOPARINT, "SMB master Tx FIFO parity error", -1, 1 }, 4407 { F_MSTRXFIFOPARINT, "SMB master Rx FIFO parity error", -1, 1 }, 4408 { F_SLVFIFOPARINT, "SMB slave FIFO parity error", -1, 1 }, 4409 { 0 } 4410 }; 4411 4412 if (t4_handle_intr_status(adap, A_SMB_INT_CAUSE, smb_intr_info)) 4413 t4_fatal_err(adap); 4414 } 4415 4416 /* 4417 * NC-SI interrupt handler. 4418 */ 4419 static void ncsi_intr_handler(struct adapter *adap) 4420 { 4421 static const struct intr_info ncsi_intr_info[] = { 4422 { F_CIM_DM_PRTY_ERR, "NC-SI CIM parity error", -1, 1 }, 4423 { F_MPS_DM_PRTY_ERR, "NC-SI MPS parity error", -1, 1 }, 4424 { F_TXFIFO_PRTY_ERR, "NC-SI Tx FIFO parity error", -1, 1 }, 4425 { F_RXFIFO_PRTY_ERR, "NC-SI Rx FIFO parity error", -1, 1 }, 4426 { 0 } 4427 }; 4428 4429 if (t4_handle_intr_status(adap, A_NCSI_INT_CAUSE, ncsi_intr_info)) 4430 t4_fatal_err(adap); 4431 } 4432 4433 /* 4434 * XGMAC interrupt handler. 4435 */ 4436 static void xgmac_intr_handler(struct adapter *adap, int port) 4437 { 4438 u32 v, int_cause_reg; 4439 4440 if (is_t4(adap)) 4441 int_cause_reg = PORT_REG(port, A_XGMAC_PORT_INT_CAUSE); 4442 else 4443 int_cause_reg = T5_PORT_REG(port, A_MAC_PORT_INT_CAUSE); 4444 4445 v = t4_read_reg(adap, int_cause_reg); 4446 4447 v &= (F_TXFIFO_PRTY_ERR | F_RXFIFO_PRTY_ERR); 4448 if (!v) 4449 return; 4450 4451 if (v & F_TXFIFO_PRTY_ERR) 4452 CH_ALERT(adap, "XGMAC %d Tx FIFO parity error\n", 4453 port); 4454 if (v & F_RXFIFO_PRTY_ERR) 4455 CH_ALERT(adap, "XGMAC %d Rx FIFO parity error\n", 4456 port); 4457 t4_write_reg(adap, int_cause_reg, v); 4458 t4_fatal_err(adap); 4459 } 4460 4461 /* 4462 * PL interrupt handler. 4463 */ 4464 static void pl_intr_handler(struct adapter *adap) 4465 { 4466 static const struct intr_info pl_intr_info[] = { 4467 { F_FATALPERR, "Fatal parity error", -1, 1 }, 4468 { F_PERRVFID, "PL VFID_MAP parity error", -1, 1 }, 4469 { 0 } 4470 }; 4471 4472 static const struct intr_info t5_pl_intr_info[] = { 4473 { F_FATALPERR, "Fatal parity error", -1, 1 }, 4474 { 0 } 4475 }; 4476 4477 if (t4_handle_intr_status(adap, A_PL_PL_INT_CAUSE, 4478 is_t4(adap) ? 4479 pl_intr_info : t5_pl_intr_info)) 4480 t4_fatal_err(adap); 4481 } 4482 4483 #define PF_INTR_MASK (F_PFSW | F_PFCIM) 4484 4485 /** 4486 * t4_slow_intr_handler - control path interrupt handler 4487 * @adapter: the adapter 4488 * 4489 * T4 interrupt handler for non-data global interrupt events, e.g., errors. 4490 * The designation 'slow' is because it involves register reads, while 4491 * data interrupts typically don't involve any MMIOs. 4492 */ 4493 int t4_slow_intr_handler(struct adapter *adapter) 4494 { 4495 u32 cause = t4_read_reg(adapter, A_PL_INT_CAUSE); 4496 4497 if (!(cause & GLBL_INTR_MASK)) 4498 return 0; 4499 if (cause & F_CIM) 4500 cim_intr_handler(adapter); 4501 if (cause & F_MPS) 4502 mps_intr_handler(adapter); 4503 if (cause & F_NCSI) 4504 ncsi_intr_handler(adapter); 4505 if (cause & F_PL) 4506 pl_intr_handler(adapter); 4507 if (cause & F_SMB) 4508 smb_intr_handler(adapter); 4509 if (cause & F_MAC0) 4510 xgmac_intr_handler(adapter, 0); 4511 if (cause & F_MAC1) 4512 xgmac_intr_handler(adapter, 1); 4513 if (cause & F_MAC2) 4514 xgmac_intr_handler(adapter, 2); 4515 if (cause & F_MAC3) 4516 xgmac_intr_handler(adapter, 3); 4517 if (cause & F_PCIE) 4518 pcie_intr_handler(adapter); 4519 if (cause & F_MC0) 4520 mem_intr_handler(adapter, MEM_MC); 4521 if (is_t5(adapter) && (cause & F_MC1)) 4522 mem_intr_handler(adapter, MEM_MC1); 4523 if (cause & F_EDC0) 4524 mem_intr_handler(adapter, MEM_EDC0); 4525 if (cause & F_EDC1) 4526 mem_intr_handler(adapter, MEM_EDC1); 4527 if (cause & F_LE) 4528 le_intr_handler(adapter); 4529 if (cause & F_TP) 4530 tp_intr_handler(adapter); 4531 if (cause & F_MA) 4532 ma_intr_handler(adapter); 4533 if (cause & F_PM_TX) 4534 pmtx_intr_handler(adapter); 4535 if (cause & F_PM_RX) 4536 pmrx_intr_handler(adapter); 4537 if (cause & F_ULP_RX) 4538 ulprx_intr_handler(adapter); 4539 if (cause & F_CPL_SWITCH) 4540 cplsw_intr_handler(adapter); 4541 if (cause & F_SGE) 4542 sge_intr_handler(adapter); 4543 if (cause & F_ULP_TX) 4544 ulptx_intr_handler(adapter); 4545 4546 /* Clear the interrupts just processed for which we are the master. */ 4547 t4_write_reg(adapter, A_PL_INT_CAUSE, cause & GLBL_INTR_MASK); 4548 (void)t4_read_reg(adapter, A_PL_INT_CAUSE); /* flush */ 4549 return 1; 4550 } 4551 4552 /** 4553 * t4_intr_enable - enable interrupts 4554 * @adapter: the adapter whose interrupts should be enabled 4555 * 4556 * Enable PF-specific interrupts for the calling function and the top-level 4557 * interrupt concentrator for global interrupts. Interrupts are already 4558 * enabled at each module, here we just enable the roots of the interrupt 4559 * hierarchies. 4560 * 4561 * Note: this function should be called only when the driver manages 4562 * non PF-specific interrupts from the various HW modules. Only one PCI 4563 * function at a time should be doing this. 4564 */ 4565 void t4_intr_enable(struct adapter *adapter) 4566 { 4567 u32 val = 0; 4568 u32 whoami = t4_read_reg(adapter, A_PL_WHOAMI); 4569 u32 pf = (chip_id(adapter) <= CHELSIO_T5 4570 ? G_SOURCEPF(whoami) 4571 : G_T6_SOURCEPF(whoami)); 4572 4573 if (chip_id(adapter) <= CHELSIO_T5) 4574 val = F_ERR_DROPPED_DB | F_ERR_EGR_CTXT_PRIO | F_DBFIFO_HP_INT; 4575 else 4576 val = F_ERR_PCIE_ERROR0 | F_ERR_PCIE_ERROR1 | F_FATAL_WRE_LEN; 4577 t4_write_reg(adapter, A_SGE_INT_ENABLE3, F_ERR_CPL_EXCEED_IQE_SIZE | 4578 F_ERR_INVALID_CIDX_INC | F_ERR_CPL_OPCODE_0 | 4579 F_ERR_DATA_CPL_ON_HIGH_QID1 | F_INGRESS_SIZE_ERR | 4580 F_ERR_DATA_CPL_ON_HIGH_QID0 | F_ERR_BAD_DB_PIDX3 | 4581 F_ERR_BAD_DB_PIDX2 | F_ERR_BAD_DB_PIDX1 | 4582 F_ERR_BAD_DB_PIDX0 | F_ERR_ING_CTXT_PRIO | 4583 F_DBFIFO_LP_INT | F_EGRESS_SIZE_ERR | val); 4584 t4_write_reg(adapter, MYPF_REG(A_PL_PF_INT_ENABLE), PF_INTR_MASK); 4585 t4_set_reg_field(adapter, A_PL_INT_MAP0, 0, 1 << pf); 4586 } 4587 4588 /** 4589 * t4_intr_disable - disable interrupts 4590 * @adapter: the adapter whose interrupts should be disabled 4591 * 4592 * Disable interrupts. We only disable the top-level interrupt 4593 * concentrators. The caller must be a PCI function managing global 4594 * interrupts. 4595 */ 4596 void t4_intr_disable(struct adapter *adapter) 4597 { 4598 u32 whoami = t4_read_reg(adapter, A_PL_WHOAMI); 4599 u32 pf = (chip_id(adapter) <= CHELSIO_T5 4600 ? G_SOURCEPF(whoami) 4601 : G_T6_SOURCEPF(whoami)); 4602 4603 t4_write_reg(adapter, MYPF_REG(A_PL_PF_INT_ENABLE), 0); 4604 t4_set_reg_field(adapter, A_PL_INT_MAP0, 1 << pf, 0); 4605 } 4606 4607 /** 4608 * t4_intr_clear - clear all interrupts 4609 * @adapter: the adapter whose interrupts should be cleared 4610 * 4611 * Clears all interrupts. The caller must be a PCI function managing 4612 * global interrupts. 4613 */ 4614 void t4_intr_clear(struct adapter *adapter) 4615 { 4616 static const unsigned int cause_reg[] = { 4617 A_SGE_INT_CAUSE1, A_SGE_INT_CAUSE2, A_SGE_INT_CAUSE3, 4618 A_PCIE_NONFAT_ERR, A_PCIE_INT_CAUSE, 4619 A_MA_INT_WRAP_STATUS, A_MA_PARITY_ERROR_STATUS1, A_MA_INT_CAUSE, 4620 A_EDC_INT_CAUSE, EDC_REG(A_EDC_INT_CAUSE, 1), 4621 A_CIM_HOST_INT_CAUSE, A_CIM_HOST_UPACC_INT_CAUSE, 4622 MYPF_REG(A_CIM_PF_HOST_INT_CAUSE), 4623 A_TP_INT_CAUSE, 4624 A_ULP_RX_INT_CAUSE, A_ULP_TX_INT_CAUSE, 4625 A_PM_RX_INT_CAUSE, A_PM_TX_INT_CAUSE, 4626 A_MPS_RX_PERR_INT_CAUSE, 4627 A_CPL_INTR_CAUSE, 4628 MYPF_REG(A_PL_PF_INT_CAUSE), 4629 A_PL_PL_INT_CAUSE, 4630 A_LE_DB_INT_CAUSE, 4631 }; 4632 4633 unsigned int i; 4634 4635 for (i = 0; i < ARRAY_SIZE(cause_reg); ++i) 4636 t4_write_reg(adapter, cause_reg[i], 0xffffffff); 4637 4638 t4_write_reg(adapter, is_t4(adapter) ? A_MC_INT_CAUSE : 4639 A_MC_P_INT_CAUSE, 0xffffffff); 4640 4641 if (is_t4(adapter)) { 4642 t4_write_reg(adapter, A_PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS, 4643 0xffffffff); 4644 t4_write_reg(adapter, A_PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS, 4645 0xffffffff); 4646 } else 4647 t4_write_reg(adapter, A_MA_PARITY_ERROR_STATUS2, 0xffffffff); 4648 4649 t4_write_reg(adapter, A_PL_INT_CAUSE, GLBL_INTR_MASK); 4650 (void) t4_read_reg(adapter, A_PL_INT_CAUSE); /* flush */ 4651 } 4652 4653 /** 4654 * hash_mac_addr - return the hash value of a MAC address 4655 * @addr: the 48-bit Ethernet MAC address 4656 * 4657 * Hashes a MAC address according to the hash function used by HW inexact 4658 * (hash) address matching. 4659 */ 4660 static int hash_mac_addr(const u8 *addr) 4661 { 4662 u32 a = ((u32)addr[0] << 16) | ((u32)addr[1] << 8) | addr[2]; 4663 u32 b = ((u32)addr[3] << 16) | ((u32)addr[4] << 8) | addr[5]; 4664 a ^= b; 4665 a ^= (a >> 12); 4666 a ^= (a >> 6); 4667 return a & 0x3f; 4668 } 4669 4670 /** 4671 * t4_config_rss_range - configure a portion of the RSS mapping table 4672 * @adapter: the adapter 4673 * @mbox: mbox to use for the FW command 4674 * @viid: virtual interface whose RSS subtable is to be written 4675 * @start: start entry in the table to write 4676 * @n: how many table entries to write 4677 * @rspq: values for the "response queue" (Ingress Queue) lookup table 4678 * @nrspq: number of values in @rspq 4679 * 4680 * Programs the selected part of the VI's RSS mapping table with the 4681 * provided values. If @nrspq < @n the supplied values are used repeatedly 4682 * until the full table range is populated. 4683 * 4684 * The caller must ensure the values in @rspq are in the range allowed for 4685 * @viid. 4686 */ 4687 int t4_config_rss_range(struct adapter *adapter, int mbox, unsigned int viid, 4688 int start, int n, const u16 *rspq, unsigned int nrspq) 4689 { 4690 int ret; 4691 const u16 *rsp = rspq; 4692 const u16 *rsp_end = rspq + nrspq; 4693 struct fw_rss_ind_tbl_cmd cmd; 4694 4695 memset(&cmd, 0, sizeof(cmd)); 4696 cmd.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_RSS_IND_TBL_CMD) | 4697 F_FW_CMD_REQUEST | F_FW_CMD_WRITE | 4698 V_FW_RSS_IND_TBL_CMD_VIID(viid)); 4699 cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd)); 4700 4701 /* 4702 * Each firmware RSS command can accommodate up to 32 RSS Ingress 4703 * Queue Identifiers. These Ingress Queue IDs are packed three to 4704 * a 32-bit word as 10-bit values with the upper remaining 2 bits 4705 * reserved. 4706 */ 4707 while (n > 0) { 4708 int nq = min(n, 32); 4709 int nq_packed = 0; 4710 __be32 *qp = &cmd.iq0_to_iq2; 4711 4712 /* 4713 * Set up the firmware RSS command header to send the next 4714 * "nq" Ingress Queue IDs to the firmware. 4715 */ 4716 cmd.niqid = cpu_to_be16(nq); 4717 cmd.startidx = cpu_to_be16(start); 4718 4719 /* 4720 * "nq" more done for the start of the next loop. 4721 */ 4722 start += nq; 4723 n -= nq; 4724 4725 /* 4726 * While there are still Ingress Queue IDs to stuff into the 4727 * current firmware RSS command, retrieve them from the 4728 * Ingress Queue ID array and insert them into the command. 4729 */ 4730 while (nq > 0) { 4731 /* 4732 * Grab up to the next 3 Ingress Queue IDs (wrapping 4733 * around the Ingress Queue ID array if necessary) and 4734 * insert them into the firmware RSS command at the 4735 * current 3-tuple position within the commad. 4736 */ 4737 u16 qbuf[3]; 4738 u16 *qbp = qbuf; 4739 int nqbuf = min(3, nq); 4740 4741 nq -= nqbuf; 4742 qbuf[0] = qbuf[1] = qbuf[2] = 0; 4743 while (nqbuf && nq_packed < 32) { 4744 nqbuf--; 4745 nq_packed++; 4746 *qbp++ = *rsp++; 4747 if (rsp >= rsp_end) 4748 rsp = rspq; 4749 } 4750 *qp++ = cpu_to_be32(V_FW_RSS_IND_TBL_CMD_IQ0(qbuf[0]) | 4751 V_FW_RSS_IND_TBL_CMD_IQ1(qbuf[1]) | 4752 V_FW_RSS_IND_TBL_CMD_IQ2(qbuf[2])); 4753 } 4754 4755 /* 4756 * Send this portion of the RRS table update to the firmware; 4757 * bail out on any errors. 4758 */ 4759 ret = t4_wr_mbox(adapter, mbox, &cmd, sizeof(cmd), NULL); 4760 if (ret) 4761 return ret; 4762 } 4763 return 0; 4764 } 4765 4766 /** 4767 * t4_config_glbl_rss - configure the global RSS mode 4768 * @adapter: the adapter 4769 * @mbox: mbox to use for the FW command 4770 * @mode: global RSS mode 4771 * @flags: mode-specific flags 4772 * 4773 * Sets the global RSS mode. 4774 */ 4775 int t4_config_glbl_rss(struct adapter *adapter, int mbox, unsigned int mode, 4776 unsigned int flags) 4777 { 4778 struct fw_rss_glb_config_cmd c; 4779 4780 memset(&c, 0, sizeof(c)); 4781 c.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_RSS_GLB_CONFIG_CMD) | 4782 F_FW_CMD_REQUEST | F_FW_CMD_WRITE); 4783 c.retval_len16 = cpu_to_be32(FW_LEN16(c)); 4784 if (mode == FW_RSS_GLB_CONFIG_CMD_MODE_MANUAL) { 4785 c.u.manual.mode_pkd = 4786 cpu_to_be32(V_FW_RSS_GLB_CONFIG_CMD_MODE(mode)); 4787 } else if (mode == FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL) { 4788 c.u.basicvirtual.mode_keymode = 4789 cpu_to_be32(V_FW_RSS_GLB_CONFIG_CMD_MODE(mode)); 4790 c.u.basicvirtual.synmapen_to_hashtoeplitz = cpu_to_be32(flags); 4791 } else 4792 return -EINVAL; 4793 return t4_wr_mbox(adapter, mbox, &c, sizeof(c), NULL); 4794 } 4795 4796 /** 4797 * t4_config_vi_rss - configure per VI RSS settings 4798 * @adapter: the adapter 4799 * @mbox: mbox to use for the FW command 4800 * @viid: the VI id 4801 * @flags: RSS flags 4802 * @defq: id of the default RSS queue for the VI. 4803 * @skeyidx: RSS secret key table index for non-global mode 4804 * @skey: RSS vf_scramble key for VI. 4805 * 4806 * Configures VI-specific RSS properties. 4807 */ 4808 int t4_config_vi_rss(struct adapter *adapter, int mbox, unsigned int viid, 4809 unsigned int flags, unsigned int defq, unsigned int skeyidx, 4810 unsigned int skey) 4811 { 4812 struct fw_rss_vi_config_cmd c; 4813 4814 memset(&c, 0, sizeof(c)); 4815 c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_RSS_VI_CONFIG_CMD) | 4816 F_FW_CMD_REQUEST | F_FW_CMD_WRITE | 4817 V_FW_RSS_VI_CONFIG_CMD_VIID(viid)); 4818 c.retval_len16 = cpu_to_be32(FW_LEN16(c)); 4819 c.u.basicvirtual.defaultq_to_udpen = cpu_to_be32(flags | 4820 V_FW_RSS_VI_CONFIG_CMD_DEFAULTQ(defq)); 4821 c.u.basicvirtual.secretkeyidx_pkd = cpu_to_be32( 4822 V_FW_RSS_VI_CONFIG_CMD_SECRETKEYIDX(skeyidx)); 4823 c.u.basicvirtual.secretkeyxor = cpu_to_be32(skey); 4824 4825 return t4_wr_mbox(adapter, mbox, &c, sizeof(c), NULL); 4826 } 4827 4828 /* Read an RSS table row */ 4829 static int rd_rss_row(struct adapter *adap, int row, u32 *val) 4830 { 4831 t4_write_reg(adap, A_TP_RSS_LKP_TABLE, 0xfff00000 | row); 4832 return t4_wait_op_done_val(adap, A_TP_RSS_LKP_TABLE, F_LKPTBLROWVLD, 1, 4833 5, 0, val); 4834 } 4835 4836 /** 4837 * t4_read_rss - read the contents of the RSS mapping table 4838 * @adapter: the adapter 4839 * @map: holds the contents of the RSS mapping table 4840 * 4841 * Reads the contents of the RSS hash->queue mapping table. 4842 */ 4843 int t4_read_rss(struct adapter *adapter, u16 *map) 4844 { 4845 u32 val; 4846 int i, ret; 4847 4848 for (i = 0; i < RSS_NENTRIES / 2; ++i) { 4849 ret = rd_rss_row(adapter, i, &val); 4850 if (ret) 4851 return ret; 4852 *map++ = G_LKPTBLQUEUE0(val); 4853 *map++ = G_LKPTBLQUEUE1(val); 4854 } 4855 return 0; 4856 } 4857 4858 /** 4859 * t4_tp_fw_ldst_rw - Access TP indirect register through LDST 4860 * @adap: the adapter 4861 * @cmd: TP fw ldst address space type 4862 * @vals: where the indirect register values are stored/written 4863 * @nregs: how many indirect registers to read/write 4864 * @start_idx: index of first indirect register to read/write 4865 * @rw: Read (1) or Write (0) 4866 * @sleep_ok: if true we may sleep while awaiting command completion 4867 * 4868 * Access TP indirect registers through LDST 4869 **/ 4870 static int t4_tp_fw_ldst_rw(struct adapter *adap, int cmd, u32 *vals, 4871 unsigned int nregs, unsigned int start_index, 4872 unsigned int rw, bool sleep_ok) 4873 { 4874 int ret = 0; 4875 unsigned int i; 4876 struct fw_ldst_cmd c; 4877 4878 for (i = 0; i < nregs; i++) { 4879 memset(&c, 0, sizeof(c)); 4880 c.op_to_addrspace = cpu_to_be32(V_FW_CMD_OP(FW_LDST_CMD) | 4881 F_FW_CMD_REQUEST | 4882 (rw ? F_FW_CMD_READ : 4883 F_FW_CMD_WRITE) | 4884 V_FW_LDST_CMD_ADDRSPACE(cmd)); 4885 c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c)); 4886 4887 c.u.addrval.addr = cpu_to_be32(start_index + i); 4888 c.u.addrval.val = rw ? 0 : cpu_to_be32(vals[i]); 4889 ret = t4_wr_mbox_meat(adap, adap->mbox, &c, sizeof(c), &c, 4890 sleep_ok); 4891 if (ret) 4892 return ret; 4893 4894 if (rw) 4895 vals[i] = be32_to_cpu(c.u.addrval.val); 4896 } 4897 return 0; 4898 } 4899 4900 /** 4901 * t4_tp_indirect_rw - Read/Write TP indirect register through LDST or backdoor 4902 * @adap: the adapter 4903 * @reg_addr: Address Register 4904 * @reg_data: Data register 4905 * @buff: where the indirect register values are stored/written 4906 * @nregs: how many indirect registers to read/write 4907 * @start_index: index of first indirect register to read/write 4908 * @rw: READ(1) or WRITE(0) 4909 * @sleep_ok: if true we may sleep while awaiting command completion 4910 * 4911 * Read/Write TP indirect registers through LDST if possible. 4912 * Else, use backdoor access 4913 **/ 4914 static void t4_tp_indirect_rw(struct adapter *adap, u32 reg_addr, u32 reg_data, 4915 u32 *buff, u32 nregs, u32 start_index, int rw, 4916 bool sleep_ok) 4917 { 4918 int rc = -EINVAL; 4919 int cmd; 4920 4921 switch (reg_addr) { 4922 case A_TP_PIO_ADDR: 4923 cmd = FW_LDST_ADDRSPC_TP_PIO; 4924 break; 4925 case A_TP_TM_PIO_ADDR: 4926 cmd = FW_LDST_ADDRSPC_TP_TM_PIO; 4927 break; 4928 case A_TP_MIB_INDEX: 4929 cmd = FW_LDST_ADDRSPC_TP_MIB; 4930 break; 4931 default: 4932 goto indirect_access; 4933 } 4934 4935 if (t4_use_ldst(adap)) 4936 rc = t4_tp_fw_ldst_rw(adap, cmd, buff, nregs, start_index, rw, 4937 sleep_ok); 4938 4939 indirect_access: 4940 4941 if (rc) { 4942 if (rw) 4943 t4_read_indirect(adap, reg_addr, reg_data, buff, nregs, 4944 start_index); 4945 else 4946 t4_write_indirect(adap, reg_addr, reg_data, buff, nregs, 4947 start_index); 4948 } 4949 } 4950 4951 /** 4952 * t4_tp_pio_read - Read TP PIO registers 4953 * @adap: the adapter 4954 * @buff: where the indirect register values are written 4955 * @nregs: how many indirect registers to read 4956 * @start_index: index of first indirect register to read 4957 * @sleep_ok: if true we may sleep while awaiting command completion 4958 * 4959 * Read TP PIO Registers 4960 **/ 4961 void t4_tp_pio_read(struct adapter *adap, u32 *buff, u32 nregs, 4962 u32 start_index, bool sleep_ok) 4963 { 4964 t4_tp_indirect_rw(adap, A_TP_PIO_ADDR, A_TP_PIO_DATA, buff, nregs, 4965 start_index, 1, sleep_ok); 4966 } 4967 4968 /** 4969 * t4_tp_pio_write - Write TP PIO registers 4970 * @adap: the adapter 4971 * @buff: where the indirect register values are stored 4972 * @nregs: how many indirect registers to write 4973 * @start_index: index of first indirect register to write 4974 * @sleep_ok: if true we may sleep while awaiting command completion 4975 * 4976 * Write TP PIO Registers 4977 **/ 4978 void t4_tp_pio_write(struct adapter *adap, const u32 *buff, u32 nregs, 4979 u32 start_index, bool sleep_ok) 4980 { 4981 t4_tp_indirect_rw(adap, A_TP_PIO_ADDR, A_TP_PIO_DATA, 4982 __DECONST(u32 *, buff), nregs, start_index, 0, sleep_ok); 4983 } 4984 4985 /** 4986 * t4_tp_tm_pio_read - Read TP TM PIO registers 4987 * @adap: the adapter 4988 * @buff: where the indirect register values are written 4989 * @nregs: how many indirect registers to read 4990 * @start_index: index of first indirect register to read 4991 * @sleep_ok: if true we may sleep while awaiting command completion 4992 * 4993 * Read TP TM PIO Registers 4994 **/ 4995 void t4_tp_tm_pio_read(struct adapter *adap, u32 *buff, u32 nregs, 4996 u32 start_index, bool sleep_ok) 4997 { 4998 t4_tp_indirect_rw(adap, A_TP_TM_PIO_ADDR, A_TP_TM_PIO_DATA, buff, 4999 nregs, start_index, 1, sleep_ok); 5000 } 5001 5002 /** 5003 * t4_tp_mib_read - Read TP MIB registers 5004 * @adap: the adapter 5005 * @buff: where the indirect register values are written 5006 * @nregs: how many indirect registers to read 5007 * @start_index: index of first indirect register to read 5008 * @sleep_ok: if true we may sleep while awaiting command completion 5009 * 5010 * Read TP MIB Registers 5011 **/ 5012 void t4_tp_mib_read(struct adapter *adap, u32 *buff, u32 nregs, u32 start_index, 5013 bool sleep_ok) 5014 { 5015 t4_tp_indirect_rw(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, buff, nregs, 5016 start_index, 1, sleep_ok); 5017 } 5018 5019 /** 5020 * t4_read_rss_key - read the global RSS key 5021 * @adap: the adapter 5022 * @key: 10-entry array holding the 320-bit RSS key 5023 * @sleep_ok: if true we may sleep while awaiting command completion 5024 * 5025 * Reads the global 320-bit RSS key. 5026 */ 5027 void t4_read_rss_key(struct adapter *adap, u32 *key, bool sleep_ok) 5028 { 5029 t4_tp_pio_read(adap, key, 10, A_TP_RSS_SECRET_KEY0, sleep_ok); 5030 } 5031 5032 /** 5033 * t4_write_rss_key - program one of the RSS keys 5034 * @adap: the adapter 5035 * @key: 10-entry array holding the 320-bit RSS key 5036 * @idx: which RSS key to write 5037 * @sleep_ok: if true we may sleep while awaiting command completion 5038 * 5039 * Writes one of the RSS keys with the given 320-bit value. If @idx is 5040 * 0..15 the corresponding entry in the RSS key table is written, 5041 * otherwise the global RSS key is written. 5042 */ 5043 void t4_write_rss_key(struct adapter *adap, const u32 *key, int idx, 5044 bool sleep_ok) 5045 { 5046 u8 rss_key_addr_cnt = 16; 5047 u32 vrt = t4_read_reg(adap, A_TP_RSS_CONFIG_VRT); 5048 5049 /* 5050 * T6 and later: for KeyMode 3 (per-vf and per-vf scramble), 5051 * allows access to key addresses 16-63 by using KeyWrAddrX 5052 * as index[5:4](upper 2) into key table 5053 */ 5054 if ((chip_id(adap) > CHELSIO_T5) && 5055 (vrt & F_KEYEXTEND) && (G_KEYMODE(vrt) == 3)) 5056 rss_key_addr_cnt = 32; 5057 5058 t4_tp_pio_write(adap, key, 10, A_TP_RSS_SECRET_KEY0, sleep_ok); 5059 5060 if (idx >= 0 && idx < rss_key_addr_cnt) { 5061 if (rss_key_addr_cnt > 16) 5062 t4_write_reg(adap, A_TP_RSS_CONFIG_VRT, 5063 vrt | V_KEYWRADDRX(idx >> 4) | 5064 V_T6_VFWRADDR(idx) | F_KEYWREN); 5065 else 5066 t4_write_reg(adap, A_TP_RSS_CONFIG_VRT, 5067 vrt| V_KEYWRADDR(idx) | F_KEYWREN); 5068 } 5069 } 5070 5071 /** 5072 * t4_read_rss_pf_config - read PF RSS Configuration Table 5073 * @adapter: the adapter 5074 * @index: the entry in the PF RSS table to read 5075 * @valp: where to store the returned value 5076 * @sleep_ok: if true we may sleep while awaiting command completion 5077 * 5078 * Reads the PF RSS Configuration Table at the specified index and returns 5079 * the value found there. 5080 */ 5081 void t4_read_rss_pf_config(struct adapter *adapter, unsigned int index, 5082 u32 *valp, bool sleep_ok) 5083 { 5084 t4_tp_pio_read(adapter, valp, 1, A_TP_RSS_PF0_CONFIG + index, sleep_ok); 5085 } 5086 5087 /** 5088 * t4_write_rss_pf_config - write PF RSS Configuration Table 5089 * @adapter: the adapter 5090 * @index: the entry in the VF RSS table to read 5091 * @val: the value to store 5092 * @sleep_ok: if true we may sleep while awaiting command completion 5093 * 5094 * Writes the PF RSS Configuration Table at the specified index with the 5095 * specified value. 5096 */ 5097 void t4_write_rss_pf_config(struct adapter *adapter, unsigned int index, 5098 u32 val, bool sleep_ok) 5099 { 5100 t4_tp_pio_write(adapter, &val, 1, A_TP_RSS_PF0_CONFIG + index, 5101 sleep_ok); 5102 } 5103 5104 /** 5105 * t4_read_rss_vf_config - read VF RSS Configuration Table 5106 * @adapter: the adapter 5107 * @index: the entry in the VF RSS table to read 5108 * @vfl: where to store the returned VFL 5109 * @vfh: where to store the returned VFH 5110 * @sleep_ok: if true we may sleep while awaiting command completion 5111 * 5112 * Reads the VF RSS Configuration Table at the specified index and returns 5113 * the (VFL, VFH) values found there. 5114 */ 5115 void t4_read_rss_vf_config(struct adapter *adapter, unsigned int index, 5116 u32 *vfl, u32 *vfh, bool sleep_ok) 5117 { 5118 u32 vrt, mask, data; 5119 5120 if (chip_id(adapter) <= CHELSIO_T5) { 5121 mask = V_VFWRADDR(M_VFWRADDR); 5122 data = V_VFWRADDR(index); 5123 } else { 5124 mask = V_T6_VFWRADDR(M_T6_VFWRADDR); 5125 data = V_T6_VFWRADDR(index); 5126 } 5127 /* 5128 * Request that the index'th VF Table values be read into VFL/VFH. 5129 */ 5130 vrt = t4_read_reg(adapter, A_TP_RSS_CONFIG_VRT); 5131 vrt &= ~(F_VFRDRG | F_VFWREN | F_KEYWREN | mask); 5132 vrt |= data | F_VFRDEN; 5133 t4_write_reg(adapter, A_TP_RSS_CONFIG_VRT, vrt); 5134 5135 /* 5136 * Grab the VFL/VFH values ... 5137 */ 5138 t4_tp_pio_read(adapter, vfl, 1, A_TP_RSS_VFL_CONFIG, sleep_ok); 5139 t4_tp_pio_read(adapter, vfh, 1, A_TP_RSS_VFH_CONFIG, sleep_ok); 5140 } 5141 5142 /** 5143 * t4_write_rss_vf_config - write VF RSS Configuration Table 5144 * 5145 * @adapter: the adapter 5146 * @index: the entry in the VF RSS table to write 5147 * @vfl: the VFL to store 5148 * @vfh: the VFH to store 5149 * 5150 * Writes the VF RSS Configuration Table at the specified index with the 5151 * specified (VFL, VFH) values. 5152 */ 5153 void t4_write_rss_vf_config(struct adapter *adapter, unsigned int index, 5154 u32 vfl, u32 vfh, bool sleep_ok) 5155 { 5156 u32 vrt, mask, data; 5157 5158 if (chip_id(adapter) <= CHELSIO_T5) { 5159 mask = V_VFWRADDR(M_VFWRADDR); 5160 data = V_VFWRADDR(index); 5161 } else { 5162 mask = V_T6_VFWRADDR(M_T6_VFWRADDR); 5163 data = V_T6_VFWRADDR(index); 5164 } 5165 5166 /* 5167 * Load up VFL/VFH with the values to be written ... 5168 */ 5169 t4_tp_pio_write(adapter, &vfl, 1, A_TP_RSS_VFL_CONFIG, sleep_ok); 5170 t4_tp_pio_write(adapter, &vfh, 1, A_TP_RSS_VFH_CONFIG, sleep_ok); 5171 5172 /* 5173 * Write the VFL/VFH into the VF Table at index'th location. 5174 */ 5175 vrt = t4_read_reg(adapter, A_TP_RSS_CONFIG_VRT); 5176 vrt &= ~(F_VFRDRG | F_VFWREN | F_KEYWREN | mask); 5177 vrt |= data | F_VFRDEN; 5178 t4_write_reg(adapter, A_TP_RSS_CONFIG_VRT, vrt); 5179 } 5180 5181 /** 5182 * t4_read_rss_pf_map - read PF RSS Map 5183 * @adapter: the adapter 5184 * @sleep_ok: if true we may sleep while awaiting command completion 5185 * 5186 * Reads the PF RSS Map register and returns its value. 5187 */ 5188 u32 t4_read_rss_pf_map(struct adapter *adapter, bool sleep_ok) 5189 { 5190 u32 pfmap; 5191 5192 t4_tp_pio_read(adapter, &pfmap, 1, A_TP_RSS_PF_MAP, sleep_ok); 5193 5194 return pfmap; 5195 } 5196 5197 /** 5198 * t4_write_rss_pf_map - write PF RSS Map 5199 * @adapter: the adapter 5200 * @pfmap: PF RSS Map value 5201 * 5202 * Writes the specified value to the PF RSS Map register. 5203 */ 5204 void t4_write_rss_pf_map(struct adapter *adapter, u32 pfmap, bool sleep_ok) 5205 { 5206 t4_tp_pio_write(adapter, &pfmap, 1, A_TP_RSS_PF_MAP, sleep_ok); 5207 } 5208 5209 /** 5210 * t4_read_rss_pf_mask - read PF RSS Mask 5211 * @adapter: the adapter 5212 * @sleep_ok: if true we may sleep while awaiting command completion 5213 * 5214 * Reads the PF RSS Mask register and returns its value. 5215 */ 5216 u32 t4_read_rss_pf_mask(struct adapter *adapter, bool sleep_ok) 5217 { 5218 u32 pfmask; 5219 5220 t4_tp_pio_read(adapter, &pfmask, 1, A_TP_RSS_PF_MSK, sleep_ok); 5221 5222 return pfmask; 5223 } 5224 5225 /** 5226 * t4_write_rss_pf_mask - write PF RSS Mask 5227 * @adapter: the adapter 5228 * @pfmask: PF RSS Mask value 5229 * 5230 * Writes the specified value to the PF RSS Mask register. 5231 */ 5232 void t4_write_rss_pf_mask(struct adapter *adapter, u32 pfmask, bool sleep_ok) 5233 { 5234 t4_tp_pio_write(adapter, &pfmask, 1, A_TP_RSS_PF_MSK, sleep_ok); 5235 } 5236 5237 /** 5238 * t4_tp_get_tcp_stats - read TP's TCP MIB counters 5239 * @adap: the adapter 5240 * @v4: holds the TCP/IP counter values 5241 * @v6: holds the TCP/IPv6 counter values 5242 * @sleep_ok: if true we may sleep while awaiting command completion 5243 * 5244 * Returns the values of TP's TCP/IP and TCP/IPv6 MIB counters. 5245 * Either @v4 or @v6 may be %NULL to skip the corresponding stats. 5246 */ 5247 void t4_tp_get_tcp_stats(struct adapter *adap, struct tp_tcp_stats *v4, 5248 struct tp_tcp_stats *v6, bool sleep_ok) 5249 { 5250 u32 val[A_TP_MIB_TCP_RXT_SEG_LO - A_TP_MIB_TCP_OUT_RST + 1]; 5251 5252 #define STAT_IDX(x) ((A_TP_MIB_TCP_##x) - A_TP_MIB_TCP_OUT_RST) 5253 #define STAT(x) val[STAT_IDX(x)] 5254 #define STAT64(x) (((u64)STAT(x##_HI) << 32) | STAT(x##_LO)) 5255 5256 if (v4) { 5257 t4_tp_mib_read(adap, val, ARRAY_SIZE(val), 5258 A_TP_MIB_TCP_OUT_RST, sleep_ok); 5259 v4->tcp_out_rsts = STAT(OUT_RST); 5260 v4->tcp_in_segs = STAT64(IN_SEG); 5261 v4->tcp_out_segs = STAT64(OUT_SEG); 5262 v4->tcp_retrans_segs = STAT64(RXT_SEG); 5263 } 5264 if (v6) { 5265 t4_tp_mib_read(adap, val, ARRAY_SIZE(val), 5266 A_TP_MIB_TCP_V6OUT_RST, sleep_ok); 5267 v6->tcp_out_rsts = STAT(OUT_RST); 5268 v6->tcp_in_segs = STAT64(IN_SEG); 5269 v6->tcp_out_segs = STAT64(OUT_SEG); 5270 v6->tcp_retrans_segs = STAT64(RXT_SEG); 5271 } 5272 #undef STAT64 5273 #undef STAT 5274 #undef STAT_IDX 5275 } 5276 5277 /** 5278 * t4_tp_get_err_stats - read TP's error MIB counters 5279 * @adap: the adapter 5280 * @st: holds the counter values 5281 * @sleep_ok: if true we may sleep while awaiting command completion 5282 * 5283 * Returns the values of TP's error counters. 5284 */ 5285 void t4_tp_get_err_stats(struct adapter *adap, struct tp_err_stats *st, 5286 bool sleep_ok) 5287 { 5288 int nchan = adap->chip_params->nchan; 5289 5290 t4_tp_mib_read(adap, st->mac_in_errs, nchan, A_TP_MIB_MAC_IN_ERR_0, 5291 sleep_ok); 5292 5293 t4_tp_mib_read(adap, st->hdr_in_errs, nchan, A_TP_MIB_HDR_IN_ERR_0, 5294 sleep_ok); 5295 5296 t4_tp_mib_read(adap, st->tcp_in_errs, nchan, A_TP_MIB_TCP_IN_ERR_0, 5297 sleep_ok); 5298 5299 t4_tp_mib_read(adap, st->tnl_cong_drops, nchan, 5300 A_TP_MIB_TNL_CNG_DROP_0, sleep_ok); 5301 5302 t4_tp_mib_read(adap, st->ofld_chan_drops, nchan, 5303 A_TP_MIB_OFD_CHN_DROP_0, sleep_ok); 5304 5305 t4_tp_mib_read(adap, st->tnl_tx_drops, nchan, A_TP_MIB_TNL_DROP_0, 5306 sleep_ok); 5307 5308 t4_tp_mib_read(adap, st->ofld_vlan_drops, nchan, 5309 A_TP_MIB_OFD_VLN_DROP_0, sleep_ok); 5310 5311 t4_tp_mib_read(adap, st->tcp6_in_errs, nchan, 5312 A_TP_MIB_TCP_V6IN_ERR_0, sleep_ok); 5313 5314 t4_tp_mib_read(adap, &st->ofld_no_neigh, 2, A_TP_MIB_OFD_ARP_DROP, 5315 sleep_ok); 5316 } 5317 5318 /** 5319 * t4_tp_get_proxy_stats - read TP's proxy MIB counters 5320 * @adap: the adapter 5321 * @st: holds the counter values 5322 * 5323 * Returns the values of TP's proxy counters. 5324 */ 5325 void t4_tp_get_proxy_stats(struct adapter *adap, struct tp_proxy_stats *st, 5326 bool sleep_ok) 5327 { 5328 int nchan = adap->chip_params->nchan; 5329 5330 t4_tp_mib_read(adap, st->proxy, nchan, A_TP_MIB_TNL_LPBK_0, sleep_ok); 5331 } 5332 5333 /** 5334 * t4_tp_get_cpl_stats - read TP's CPL MIB counters 5335 * @adap: the adapter 5336 * @st: holds the counter values 5337 * @sleep_ok: if true we may sleep while awaiting command completion 5338 * 5339 * Returns the values of TP's CPL counters. 5340 */ 5341 void t4_tp_get_cpl_stats(struct adapter *adap, struct tp_cpl_stats *st, 5342 bool sleep_ok) 5343 { 5344 int nchan = adap->chip_params->nchan; 5345 5346 t4_tp_mib_read(adap, st->req, nchan, A_TP_MIB_CPL_IN_REQ_0, sleep_ok); 5347 5348 t4_tp_mib_read(adap, st->rsp, nchan, A_TP_MIB_CPL_OUT_RSP_0, sleep_ok); 5349 } 5350 5351 /** 5352 * t4_tp_get_rdma_stats - read TP's RDMA MIB counters 5353 * @adap: the adapter 5354 * @st: holds the counter values 5355 * 5356 * Returns the values of TP's RDMA counters. 5357 */ 5358 void t4_tp_get_rdma_stats(struct adapter *adap, struct tp_rdma_stats *st, 5359 bool sleep_ok) 5360 { 5361 t4_tp_mib_read(adap, &st->rqe_dfr_pkt, 2, A_TP_MIB_RQE_DFR_PKT, 5362 sleep_ok); 5363 } 5364 5365 /** 5366 * t4_get_fcoe_stats - read TP's FCoE MIB counters for a port 5367 * @adap: the adapter 5368 * @idx: the port index 5369 * @st: holds the counter values 5370 * @sleep_ok: if true we may sleep while awaiting command completion 5371 * 5372 * Returns the values of TP's FCoE counters for the selected port. 5373 */ 5374 void t4_get_fcoe_stats(struct adapter *adap, unsigned int idx, 5375 struct tp_fcoe_stats *st, bool sleep_ok) 5376 { 5377 u32 val[2]; 5378 5379 t4_tp_mib_read(adap, &st->frames_ddp, 1, A_TP_MIB_FCOE_DDP_0 + idx, 5380 sleep_ok); 5381 5382 t4_tp_mib_read(adap, &st->frames_drop, 1, 5383 A_TP_MIB_FCOE_DROP_0 + idx, sleep_ok); 5384 5385 t4_tp_mib_read(adap, val, 2, A_TP_MIB_FCOE_BYTE_0_HI + 2 * idx, 5386 sleep_ok); 5387 5388 st->octets_ddp = ((u64)val[0] << 32) | val[1]; 5389 } 5390 5391 /** 5392 * t4_get_usm_stats - read TP's non-TCP DDP MIB counters 5393 * @adap: the adapter 5394 * @st: holds the counter values 5395 * @sleep_ok: if true we may sleep while awaiting command completion 5396 * 5397 * Returns the values of TP's counters for non-TCP directly-placed packets. 5398 */ 5399 void t4_get_usm_stats(struct adapter *adap, struct tp_usm_stats *st, 5400 bool sleep_ok) 5401 { 5402 u32 val[4]; 5403 5404 t4_tp_mib_read(adap, val, 4, A_TP_MIB_USM_PKTS, sleep_ok); 5405 5406 st->frames = val[0]; 5407 st->drops = val[1]; 5408 st->octets = ((u64)val[2] << 32) | val[3]; 5409 } 5410 5411 /** 5412 * t4_read_mtu_tbl - returns the values in the HW path MTU table 5413 * @adap: the adapter 5414 * @mtus: where to store the MTU values 5415 * @mtu_log: where to store the MTU base-2 log (may be %NULL) 5416 * 5417 * Reads the HW path MTU table. 5418 */ 5419 void t4_read_mtu_tbl(struct adapter *adap, u16 *mtus, u8 *mtu_log) 5420 { 5421 u32 v; 5422 int i; 5423 5424 for (i = 0; i < NMTUS; ++i) { 5425 t4_write_reg(adap, A_TP_MTU_TABLE, 5426 V_MTUINDEX(0xff) | V_MTUVALUE(i)); 5427 v = t4_read_reg(adap, A_TP_MTU_TABLE); 5428 mtus[i] = G_MTUVALUE(v); 5429 if (mtu_log) 5430 mtu_log[i] = G_MTUWIDTH(v); 5431 } 5432 } 5433 5434 /** 5435 * t4_read_cong_tbl - reads the congestion control table 5436 * @adap: the adapter 5437 * @incr: where to store the alpha values 5438 * 5439 * Reads the additive increments programmed into the HW congestion 5440 * control table. 5441 */ 5442 void t4_read_cong_tbl(struct adapter *adap, u16 incr[NMTUS][NCCTRL_WIN]) 5443 { 5444 unsigned int mtu, w; 5445 5446 for (mtu = 0; mtu < NMTUS; ++mtu) 5447 for (w = 0; w < NCCTRL_WIN; ++w) { 5448 t4_write_reg(adap, A_TP_CCTRL_TABLE, 5449 V_ROWINDEX(0xffff) | (mtu << 5) | w); 5450 incr[mtu][w] = (u16)t4_read_reg(adap, 5451 A_TP_CCTRL_TABLE) & 0x1fff; 5452 } 5453 } 5454 5455 /** 5456 * t4_tp_wr_bits_indirect - set/clear bits in an indirect TP register 5457 * @adap: the adapter 5458 * @addr: the indirect TP register address 5459 * @mask: specifies the field within the register to modify 5460 * @val: new value for the field 5461 * 5462 * Sets a field of an indirect TP register to the given value. 5463 */ 5464 void t4_tp_wr_bits_indirect(struct adapter *adap, unsigned int addr, 5465 unsigned int mask, unsigned int val) 5466 { 5467 t4_write_reg(adap, A_TP_PIO_ADDR, addr); 5468 val |= t4_read_reg(adap, A_TP_PIO_DATA) & ~mask; 5469 t4_write_reg(adap, A_TP_PIO_DATA, val); 5470 } 5471 5472 /** 5473 * init_cong_ctrl - initialize congestion control parameters 5474 * @a: the alpha values for congestion control 5475 * @b: the beta values for congestion control 5476 * 5477 * Initialize the congestion control parameters. 5478 */ 5479 static void init_cong_ctrl(unsigned short *a, unsigned short *b) 5480 { 5481 a[0] = a[1] = a[2] = a[3] = a[4] = a[5] = a[6] = a[7] = a[8] = 1; 5482 a[9] = 2; 5483 a[10] = 3; 5484 a[11] = 4; 5485 a[12] = 5; 5486 a[13] = 6; 5487 a[14] = 7; 5488 a[15] = 8; 5489 a[16] = 9; 5490 a[17] = 10; 5491 a[18] = 14; 5492 a[19] = 17; 5493 a[20] = 21; 5494 a[21] = 25; 5495 a[22] = 30; 5496 a[23] = 35; 5497 a[24] = 45; 5498 a[25] = 60; 5499 a[26] = 80; 5500 a[27] = 100; 5501 a[28] = 200; 5502 a[29] = 300; 5503 a[30] = 400; 5504 a[31] = 500; 5505 5506 b[0] = b[1] = b[2] = b[3] = b[4] = b[5] = b[6] = b[7] = b[8] = 0; 5507 b[9] = b[10] = 1; 5508 b[11] = b[12] = 2; 5509 b[13] = b[14] = b[15] = b[16] = 3; 5510 b[17] = b[18] = b[19] = b[20] = b[21] = 4; 5511 b[22] = b[23] = b[24] = b[25] = b[26] = b[27] = 5; 5512 b[28] = b[29] = 6; 5513 b[30] = b[31] = 7; 5514 } 5515 5516 /* The minimum additive increment value for the congestion control table */ 5517 #define CC_MIN_INCR 2U 5518 5519 /** 5520 * t4_load_mtus - write the MTU and congestion control HW tables 5521 * @adap: the adapter 5522 * @mtus: the values for the MTU table 5523 * @alpha: the values for the congestion control alpha parameter 5524 * @beta: the values for the congestion control beta parameter 5525 * 5526 * Write the HW MTU table with the supplied MTUs and the high-speed 5527 * congestion control table with the supplied alpha, beta, and MTUs. 5528 * We write the two tables together because the additive increments 5529 * depend on the MTUs. 5530 */ 5531 void t4_load_mtus(struct adapter *adap, const unsigned short *mtus, 5532 const unsigned short *alpha, const unsigned short *beta) 5533 { 5534 static const unsigned int avg_pkts[NCCTRL_WIN] = { 5535 2, 6, 10, 14, 20, 28, 40, 56, 80, 112, 160, 224, 320, 448, 640, 5536 896, 1281, 1792, 2560, 3584, 5120, 7168, 10240, 14336, 20480, 5537 28672, 40960, 57344, 81920, 114688, 163840, 229376 5538 }; 5539 5540 unsigned int i, w; 5541 5542 for (i = 0; i < NMTUS; ++i) { 5543 unsigned int mtu = mtus[i]; 5544 unsigned int log2 = fls(mtu); 5545 5546 if (!(mtu & ((1 << log2) >> 2))) /* round */ 5547 log2--; 5548 t4_write_reg(adap, A_TP_MTU_TABLE, V_MTUINDEX(i) | 5549 V_MTUWIDTH(log2) | V_MTUVALUE(mtu)); 5550 5551 for (w = 0; w < NCCTRL_WIN; ++w) { 5552 unsigned int inc; 5553 5554 inc = max(((mtu - 40) * alpha[w]) / avg_pkts[w], 5555 CC_MIN_INCR); 5556 5557 t4_write_reg(adap, A_TP_CCTRL_TABLE, (i << 21) | 5558 (w << 16) | (beta[w] << 13) | inc); 5559 } 5560 } 5561 } 5562 5563 /** 5564 * t4_set_pace_tbl - set the pace table 5565 * @adap: the adapter 5566 * @pace_vals: the pace values in microseconds 5567 * @start: index of the first entry in the HW pace table to set 5568 * @n: how many entries to set 5569 * 5570 * Sets (a subset of the) HW pace table. 5571 */ 5572 int t4_set_pace_tbl(struct adapter *adap, const unsigned int *pace_vals, 5573 unsigned int start, unsigned int n) 5574 { 5575 unsigned int vals[NTX_SCHED], i; 5576 unsigned int tick_ns = dack_ticks_to_usec(adap, 1000); 5577 5578 if (n > NTX_SCHED) 5579 return -ERANGE; 5580 5581 /* convert values from us to dack ticks, rounding to closest value */ 5582 for (i = 0; i < n; i++, pace_vals++) { 5583 vals[i] = (1000 * *pace_vals + tick_ns / 2) / tick_ns; 5584 if (vals[i] > 0x7ff) 5585 return -ERANGE; 5586 if (*pace_vals && vals[i] == 0) 5587 return -ERANGE; 5588 } 5589 for (i = 0; i < n; i++, start++) 5590 t4_write_reg(adap, A_TP_PACE_TABLE, (start << 16) | vals[i]); 5591 return 0; 5592 } 5593 5594 /** 5595 * t4_set_sched_bps - set the bit rate for a HW traffic scheduler 5596 * @adap: the adapter 5597 * @kbps: target rate in Kbps 5598 * @sched: the scheduler index 5599 * 5600 * Configure a Tx HW scheduler for the target rate. 5601 */ 5602 int t4_set_sched_bps(struct adapter *adap, int sched, unsigned int kbps) 5603 { 5604 unsigned int v, tps, cpt, bpt, delta, mindelta = ~0; 5605 unsigned int clk = adap->params.vpd.cclk * 1000; 5606 unsigned int selected_cpt = 0, selected_bpt = 0; 5607 5608 if (kbps > 0) { 5609 kbps *= 125; /* -> bytes */ 5610 for (cpt = 1; cpt <= 255; cpt++) { 5611 tps = clk / cpt; 5612 bpt = (kbps + tps / 2) / tps; 5613 if (bpt > 0 && bpt <= 255) { 5614 v = bpt * tps; 5615 delta = v >= kbps ? v - kbps : kbps - v; 5616 if (delta < mindelta) { 5617 mindelta = delta; 5618 selected_cpt = cpt; 5619 selected_bpt = bpt; 5620 } 5621 } else if (selected_cpt) 5622 break; 5623 } 5624 if (!selected_cpt) 5625 return -EINVAL; 5626 } 5627 t4_write_reg(adap, A_TP_TM_PIO_ADDR, 5628 A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2); 5629 v = t4_read_reg(adap, A_TP_TM_PIO_DATA); 5630 if (sched & 1) 5631 v = (v & 0xffff) | (selected_cpt << 16) | (selected_bpt << 24); 5632 else 5633 v = (v & 0xffff0000) | selected_cpt | (selected_bpt << 8); 5634 t4_write_reg(adap, A_TP_TM_PIO_DATA, v); 5635 return 0; 5636 } 5637 5638 /** 5639 * t4_set_sched_ipg - set the IPG for a Tx HW packet rate scheduler 5640 * @adap: the adapter 5641 * @sched: the scheduler index 5642 * @ipg: the interpacket delay in tenths of nanoseconds 5643 * 5644 * Set the interpacket delay for a HW packet rate scheduler. 5645 */ 5646 int t4_set_sched_ipg(struct adapter *adap, int sched, unsigned int ipg) 5647 { 5648 unsigned int v, addr = A_TP_TX_MOD_Q1_Q0_TIMER_SEPARATOR - sched / 2; 5649 5650 /* convert ipg to nearest number of core clocks */ 5651 ipg *= core_ticks_per_usec(adap); 5652 ipg = (ipg + 5000) / 10000; 5653 if (ipg > M_TXTIMERSEPQ0) 5654 return -EINVAL; 5655 5656 t4_write_reg(adap, A_TP_TM_PIO_ADDR, addr); 5657 v = t4_read_reg(adap, A_TP_TM_PIO_DATA); 5658 if (sched & 1) 5659 v = (v & V_TXTIMERSEPQ0(M_TXTIMERSEPQ0)) | V_TXTIMERSEPQ1(ipg); 5660 else 5661 v = (v & V_TXTIMERSEPQ1(M_TXTIMERSEPQ1)) | V_TXTIMERSEPQ0(ipg); 5662 t4_write_reg(adap, A_TP_TM_PIO_DATA, v); 5663 t4_read_reg(adap, A_TP_TM_PIO_DATA); 5664 return 0; 5665 } 5666 5667 /* 5668 * Calculates a rate in bytes/s given the number of 256-byte units per 4K core 5669 * clocks. The formula is 5670 * 5671 * bytes/s = bytes256 * 256 * ClkFreq / 4096 5672 * 5673 * which is equivalent to 5674 * 5675 * bytes/s = 62.5 * bytes256 * ClkFreq_ms 5676 */ 5677 static u64 chan_rate(struct adapter *adap, unsigned int bytes256) 5678 { 5679 u64 v = bytes256 * adap->params.vpd.cclk; 5680 5681 return v * 62 + v / 2; 5682 } 5683 5684 /** 5685 * t4_get_chan_txrate - get the current per channel Tx rates 5686 * @adap: the adapter 5687 * @nic_rate: rates for NIC traffic 5688 * @ofld_rate: rates for offloaded traffic 5689 * 5690 * Return the current Tx rates in bytes/s for NIC and offloaded traffic 5691 * for each channel. 5692 */ 5693 void t4_get_chan_txrate(struct adapter *adap, u64 *nic_rate, u64 *ofld_rate) 5694 { 5695 u32 v; 5696 5697 v = t4_read_reg(adap, A_TP_TX_TRATE); 5698 nic_rate[0] = chan_rate(adap, G_TNLRATE0(v)); 5699 nic_rate[1] = chan_rate(adap, G_TNLRATE1(v)); 5700 if (adap->chip_params->nchan > 2) { 5701 nic_rate[2] = chan_rate(adap, G_TNLRATE2(v)); 5702 nic_rate[3] = chan_rate(adap, G_TNLRATE3(v)); 5703 } 5704 5705 v = t4_read_reg(adap, A_TP_TX_ORATE); 5706 ofld_rate[0] = chan_rate(adap, G_OFDRATE0(v)); 5707 ofld_rate[1] = chan_rate(adap, G_OFDRATE1(v)); 5708 if (adap->chip_params->nchan > 2) { 5709 ofld_rate[2] = chan_rate(adap, G_OFDRATE2(v)); 5710 ofld_rate[3] = chan_rate(adap, G_OFDRATE3(v)); 5711 } 5712 } 5713 5714 /** 5715 * t4_set_trace_filter - configure one of the tracing filters 5716 * @adap: the adapter 5717 * @tp: the desired trace filter parameters 5718 * @idx: which filter to configure 5719 * @enable: whether to enable or disable the filter 5720 * 5721 * Configures one of the tracing filters available in HW. If @tp is %NULL 5722 * it indicates that the filter is already written in the register and it 5723 * just needs to be enabled or disabled. 5724 */ 5725 int t4_set_trace_filter(struct adapter *adap, const struct trace_params *tp, 5726 int idx, int enable) 5727 { 5728 int i, ofst = idx * 4; 5729 u32 data_reg, mask_reg, cfg; 5730 u32 multitrc = F_TRCMULTIFILTER; 5731 u32 en = is_t4(adap) ? F_TFEN : F_T5_TFEN; 5732 5733 if (idx < 0 || idx >= NTRACE) 5734 return -EINVAL; 5735 5736 if (tp == NULL || !enable) { 5737 t4_set_reg_field(adap, A_MPS_TRC_FILTER_MATCH_CTL_A + ofst, en, 5738 enable ? en : 0); 5739 return 0; 5740 } 5741 5742 /* 5743 * TODO - After T4 data book is updated, specify the exact 5744 * section below. 5745 * 5746 * See T4 data book - MPS section for a complete description 5747 * of the below if..else handling of A_MPS_TRC_CFG register 5748 * value. 5749 */ 5750 cfg = t4_read_reg(adap, A_MPS_TRC_CFG); 5751 if (cfg & F_TRCMULTIFILTER) { 5752 /* 5753 * If multiple tracers are enabled, then maximum 5754 * capture size is 2.5KB (FIFO size of a single channel) 5755 * minus 2 flits for CPL_TRACE_PKT header. 5756 */ 5757 if (tp->snap_len > ((10 * 1024 / 4) - (2 * 8))) 5758 return -EINVAL; 5759 } else { 5760 /* 5761 * If multiple tracers are disabled, to avoid deadlocks 5762 * maximum packet capture size of 9600 bytes is recommended. 5763 * Also in this mode, only trace0 can be enabled and running. 5764 */ 5765 multitrc = 0; 5766 if (tp->snap_len > 9600 || idx) 5767 return -EINVAL; 5768 } 5769 5770 if (tp->port > (is_t4(adap) ? 11 : 19) || tp->invert > 1 || 5771 tp->skip_len > M_TFLENGTH || tp->skip_ofst > M_TFOFFSET || 5772 tp->min_len > M_TFMINPKTSIZE) 5773 return -EINVAL; 5774 5775 /* stop the tracer we'll be changing */ 5776 t4_set_reg_field(adap, A_MPS_TRC_FILTER_MATCH_CTL_A + ofst, en, 0); 5777 5778 idx *= (A_MPS_TRC_FILTER1_MATCH - A_MPS_TRC_FILTER0_MATCH); 5779 data_reg = A_MPS_TRC_FILTER0_MATCH + idx; 5780 mask_reg = A_MPS_TRC_FILTER0_DONT_CARE + idx; 5781 5782 for (i = 0; i < TRACE_LEN / 4; i++, data_reg += 4, mask_reg += 4) { 5783 t4_write_reg(adap, data_reg, tp->data[i]); 5784 t4_write_reg(adap, mask_reg, ~tp->mask[i]); 5785 } 5786 t4_write_reg(adap, A_MPS_TRC_FILTER_MATCH_CTL_B + ofst, 5787 V_TFCAPTUREMAX(tp->snap_len) | 5788 V_TFMINPKTSIZE(tp->min_len)); 5789 t4_write_reg(adap, A_MPS_TRC_FILTER_MATCH_CTL_A + ofst, 5790 V_TFOFFSET(tp->skip_ofst) | V_TFLENGTH(tp->skip_len) | en | 5791 (is_t4(adap) ? 5792 V_TFPORT(tp->port) | V_TFINVERTMATCH(tp->invert) : 5793 V_T5_TFPORT(tp->port) | V_T5_TFINVERTMATCH(tp->invert))); 5794 5795 return 0; 5796 } 5797 5798 /** 5799 * t4_get_trace_filter - query one of the tracing filters 5800 * @adap: the adapter 5801 * @tp: the current trace filter parameters 5802 * @idx: which trace filter to query 5803 * @enabled: non-zero if the filter is enabled 5804 * 5805 * Returns the current settings of one of the HW tracing filters. 5806 */ 5807 void t4_get_trace_filter(struct adapter *adap, struct trace_params *tp, int idx, 5808 int *enabled) 5809 { 5810 u32 ctla, ctlb; 5811 int i, ofst = idx * 4; 5812 u32 data_reg, mask_reg; 5813 5814 ctla = t4_read_reg(adap, A_MPS_TRC_FILTER_MATCH_CTL_A + ofst); 5815 ctlb = t4_read_reg(adap, A_MPS_TRC_FILTER_MATCH_CTL_B + ofst); 5816 5817 if (is_t4(adap)) { 5818 *enabled = !!(ctla & F_TFEN); 5819 tp->port = G_TFPORT(ctla); 5820 tp->invert = !!(ctla & F_TFINVERTMATCH); 5821 } else { 5822 *enabled = !!(ctla & F_T5_TFEN); 5823 tp->port = G_T5_TFPORT(ctla); 5824 tp->invert = !!(ctla & F_T5_TFINVERTMATCH); 5825 } 5826 tp->snap_len = G_TFCAPTUREMAX(ctlb); 5827 tp->min_len = G_TFMINPKTSIZE(ctlb); 5828 tp->skip_ofst = G_TFOFFSET(ctla); 5829 tp->skip_len = G_TFLENGTH(ctla); 5830 5831 ofst = (A_MPS_TRC_FILTER1_MATCH - A_MPS_TRC_FILTER0_MATCH) * idx; 5832 data_reg = A_MPS_TRC_FILTER0_MATCH + ofst; 5833 mask_reg = A_MPS_TRC_FILTER0_DONT_CARE + ofst; 5834 5835 for (i = 0; i < TRACE_LEN / 4; i++, data_reg += 4, mask_reg += 4) { 5836 tp->mask[i] = ~t4_read_reg(adap, mask_reg); 5837 tp->data[i] = t4_read_reg(adap, data_reg) & tp->mask[i]; 5838 } 5839 } 5840 5841 /** 5842 * t4_pmtx_get_stats - returns the HW stats from PMTX 5843 * @adap: the adapter 5844 * @cnt: where to store the count statistics 5845 * @cycles: where to store the cycle statistics 5846 * 5847 * Returns performance statistics from PMTX. 5848 */ 5849 void t4_pmtx_get_stats(struct adapter *adap, u32 cnt[], u64 cycles[]) 5850 { 5851 int i; 5852 u32 data[2]; 5853 5854 for (i = 0; i < adap->chip_params->pm_stats_cnt; i++) { 5855 t4_write_reg(adap, A_PM_TX_STAT_CONFIG, i + 1); 5856 cnt[i] = t4_read_reg(adap, A_PM_TX_STAT_COUNT); 5857 if (is_t4(adap)) 5858 cycles[i] = t4_read_reg64(adap, A_PM_TX_STAT_LSB); 5859 else { 5860 t4_read_indirect(adap, A_PM_TX_DBG_CTRL, 5861 A_PM_TX_DBG_DATA, data, 2, 5862 A_PM_TX_DBG_STAT_MSB); 5863 cycles[i] = (((u64)data[0] << 32) | data[1]); 5864 } 5865 } 5866 } 5867 5868 /** 5869 * t4_pmrx_get_stats - returns the HW stats from PMRX 5870 * @adap: the adapter 5871 * @cnt: where to store the count statistics 5872 * @cycles: where to store the cycle statistics 5873 * 5874 * Returns performance statistics from PMRX. 5875 */ 5876 void t4_pmrx_get_stats(struct adapter *adap, u32 cnt[], u64 cycles[]) 5877 { 5878 int i; 5879 u32 data[2]; 5880 5881 for (i = 0; i < adap->chip_params->pm_stats_cnt; i++) { 5882 t4_write_reg(adap, A_PM_RX_STAT_CONFIG, i + 1); 5883 cnt[i] = t4_read_reg(adap, A_PM_RX_STAT_COUNT); 5884 if (is_t4(adap)) { 5885 cycles[i] = t4_read_reg64(adap, A_PM_RX_STAT_LSB); 5886 } else { 5887 t4_read_indirect(adap, A_PM_RX_DBG_CTRL, 5888 A_PM_RX_DBG_DATA, data, 2, 5889 A_PM_RX_DBG_STAT_MSB); 5890 cycles[i] = (((u64)data[0] << 32) | data[1]); 5891 } 5892 } 5893 } 5894 5895 /** 5896 * t4_get_mps_bg_map - return the buffer groups associated with a port 5897 * @adap: the adapter 5898 * @idx: the port index 5899 * 5900 * Returns a bitmap indicating which MPS buffer groups are associated 5901 * with the given port. Bit i is set if buffer group i is used by the 5902 * port. 5903 */ 5904 static unsigned int t4_get_mps_bg_map(struct adapter *adap, int idx) 5905 { 5906 u32 n; 5907 5908 if (adap->params.mps_bg_map) 5909 return ((adap->params.mps_bg_map >> (idx << 3)) & 0xff); 5910 5911 n = G_NUMPORTS(t4_read_reg(adap, A_MPS_CMN_CTL)); 5912 if (n == 0) 5913 return idx == 0 ? 0xf : 0; 5914 if (n == 1 && chip_id(adap) <= CHELSIO_T5) 5915 return idx < 2 ? (3 << (2 * idx)) : 0; 5916 return 1 << idx; 5917 } 5918 5919 /* 5920 * TP RX e-channels associated with the port. 5921 */ 5922 static unsigned int t4_get_rx_e_chan_map(struct adapter *adap, int idx) 5923 { 5924 u32 n = G_NUMPORTS(t4_read_reg(adap, A_MPS_CMN_CTL)); 5925 5926 if (n == 0) 5927 return idx == 0 ? 0xf : 0; 5928 if (n == 1 && chip_id(adap) <= CHELSIO_T5) 5929 return idx < 2 ? (3 << (2 * idx)) : 0; 5930 return 1 << idx; 5931 } 5932 5933 /** 5934 * t4_get_port_type_description - return Port Type string description 5935 * @port_type: firmware Port Type enumeration 5936 */ 5937 const char *t4_get_port_type_description(enum fw_port_type port_type) 5938 { 5939 static const char *const port_type_description[] = { 5940 "Fiber_XFI", 5941 "Fiber_XAUI", 5942 "BT_SGMII", 5943 "BT_XFI", 5944 "BT_XAUI", 5945 "KX4", 5946 "CX4", 5947 "KX", 5948 "KR", 5949 "SFP", 5950 "BP_AP", 5951 "BP4_AP", 5952 "QSFP_10G", 5953 "QSA", 5954 "QSFP", 5955 "BP40_BA", 5956 "KR4_100G", 5957 "CR4_QSFP", 5958 "CR_QSFP", 5959 "CR2_QSFP", 5960 "SFP28", 5961 "KR_SFP28", 5962 }; 5963 5964 if (port_type < ARRAY_SIZE(port_type_description)) 5965 return port_type_description[port_type]; 5966 return "UNKNOWN"; 5967 } 5968 5969 /** 5970 * t4_get_port_stats_offset - collect port stats relative to a previous 5971 * snapshot 5972 * @adap: The adapter 5973 * @idx: The port 5974 * @stats: Current stats to fill 5975 * @offset: Previous stats snapshot 5976 */ 5977 void t4_get_port_stats_offset(struct adapter *adap, int idx, 5978 struct port_stats *stats, 5979 struct port_stats *offset) 5980 { 5981 u64 *s, *o; 5982 int i; 5983 5984 t4_get_port_stats(adap, idx, stats); 5985 for (i = 0, s = (u64 *)stats, o = (u64 *)offset ; 5986 i < (sizeof(struct port_stats)/sizeof(u64)) ; 5987 i++, s++, o++) 5988 *s -= *o; 5989 } 5990 5991 /** 5992 * t4_get_port_stats - collect port statistics 5993 * @adap: the adapter 5994 * @idx: the port index 5995 * @p: the stats structure to fill 5996 * 5997 * Collect statistics related to the given port from HW. 5998 */ 5999 void t4_get_port_stats(struct adapter *adap, int idx, struct port_stats *p) 6000 { 6001 u32 bgmap = adap2pinfo(adap, idx)->mps_bg_map; 6002 u32 stat_ctl = t4_read_reg(adap, A_MPS_STAT_CTL); 6003 6004 #define GET_STAT(name) \ 6005 t4_read_reg64(adap, \ 6006 (is_t4(adap) ? PORT_REG(idx, A_MPS_PORT_STAT_##name##_L) : \ 6007 T5_PORT_REG(idx, A_MPS_PORT_STAT_##name##_L))) 6008 #define GET_STAT_COM(name) t4_read_reg64(adap, A_MPS_STAT_##name##_L) 6009 6010 p->tx_pause = GET_STAT(TX_PORT_PAUSE); 6011 p->tx_octets = GET_STAT(TX_PORT_BYTES); 6012 p->tx_frames = GET_STAT(TX_PORT_FRAMES); 6013 p->tx_bcast_frames = GET_STAT(TX_PORT_BCAST); 6014 p->tx_mcast_frames = GET_STAT(TX_PORT_MCAST); 6015 p->tx_ucast_frames = GET_STAT(TX_PORT_UCAST); 6016 p->tx_error_frames = GET_STAT(TX_PORT_ERROR); 6017 p->tx_frames_64 = GET_STAT(TX_PORT_64B); 6018 p->tx_frames_65_127 = GET_STAT(TX_PORT_65B_127B); 6019 p->tx_frames_128_255 = GET_STAT(TX_PORT_128B_255B); 6020 p->tx_frames_256_511 = GET_STAT(TX_PORT_256B_511B); 6021 p->tx_frames_512_1023 = GET_STAT(TX_PORT_512B_1023B); 6022 p->tx_frames_1024_1518 = GET_STAT(TX_PORT_1024B_1518B); 6023 p->tx_frames_1519_max = GET_STAT(TX_PORT_1519B_MAX); 6024 p->tx_drop = GET_STAT(TX_PORT_DROP); 6025 p->tx_ppp0 = GET_STAT(TX_PORT_PPP0); 6026 p->tx_ppp1 = GET_STAT(TX_PORT_PPP1); 6027 p->tx_ppp2 = GET_STAT(TX_PORT_PPP2); 6028 p->tx_ppp3 = GET_STAT(TX_PORT_PPP3); 6029 p->tx_ppp4 = GET_STAT(TX_PORT_PPP4); 6030 p->tx_ppp5 = GET_STAT(TX_PORT_PPP5); 6031 p->tx_ppp6 = GET_STAT(TX_PORT_PPP6); 6032 p->tx_ppp7 = GET_STAT(TX_PORT_PPP7); 6033 6034 if (chip_id(adap) >= CHELSIO_T5) { 6035 if (stat_ctl & F_COUNTPAUSESTATTX) { 6036 p->tx_frames -= p->tx_pause; 6037 p->tx_octets -= p->tx_pause * 64; 6038 } 6039 if (stat_ctl & F_COUNTPAUSEMCTX) 6040 p->tx_mcast_frames -= p->tx_pause; 6041 } 6042 6043 p->rx_pause = GET_STAT(RX_PORT_PAUSE); 6044 p->rx_octets = GET_STAT(RX_PORT_BYTES); 6045 p->rx_frames = GET_STAT(RX_PORT_FRAMES); 6046 p->rx_bcast_frames = GET_STAT(RX_PORT_BCAST); 6047 p->rx_mcast_frames = GET_STAT(RX_PORT_MCAST); 6048 p->rx_ucast_frames = GET_STAT(RX_PORT_UCAST); 6049 p->rx_too_long = GET_STAT(RX_PORT_MTU_ERROR); 6050 p->rx_jabber = GET_STAT(RX_PORT_MTU_CRC_ERROR); 6051 p->rx_fcs_err = GET_STAT(RX_PORT_CRC_ERROR); 6052 p->rx_len_err = GET_STAT(RX_PORT_LEN_ERROR); 6053 p->rx_symbol_err = GET_STAT(RX_PORT_SYM_ERROR); 6054 p->rx_runt = GET_STAT(RX_PORT_LESS_64B); 6055 p->rx_frames_64 = GET_STAT(RX_PORT_64B); 6056 p->rx_frames_65_127 = GET_STAT(RX_PORT_65B_127B); 6057 p->rx_frames_128_255 = GET_STAT(RX_PORT_128B_255B); 6058 p->rx_frames_256_511 = GET_STAT(RX_PORT_256B_511B); 6059 p->rx_frames_512_1023 = GET_STAT(RX_PORT_512B_1023B); 6060 p->rx_frames_1024_1518 = GET_STAT(RX_PORT_1024B_1518B); 6061 p->rx_frames_1519_max = GET_STAT(RX_PORT_1519B_MAX); 6062 p->rx_ppp0 = GET_STAT(RX_PORT_PPP0); 6063 p->rx_ppp1 = GET_STAT(RX_PORT_PPP1); 6064 p->rx_ppp2 = GET_STAT(RX_PORT_PPP2); 6065 p->rx_ppp3 = GET_STAT(RX_PORT_PPP3); 6066 p->rx_ppp4 = GET_STAT(RX_PORT_PPP4); 6067 p->rx_ppp5 = GET_STAT(RX_PORT_PPP5); 6068 p->rx_ppp6 = GET_STAT(RX_PORT_PPP6); 6069 p->rx_ppp7 = GET_STAT(RX_PORT_PPP7); 6070 6071 if (chip_id(adap) >= CHELSIO_T5) { 6072 if (stat_ctl & F_COUNTPAUSESTATRX) { 6073 p->rx_frames -= p->rx_pause; 6074 p->rx_octets -= p->rx_pause * 64; 6075 } 6076 if (stat_ctl & F_COUNTPAUSEMCRX) 6077 p->rx_mcast_frames -= p->rx_pause; 6078 } 6079 6080 p->rx_ovflow0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_MAC_DROP_FRAME) : 0; 6081 p->rx_ovflow1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_MAC_DROP_FRAME) : 0; 6082 p->rx_ovflow2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_MAC_DROP_FRAME) : 0; 6083 p->rx_ovflow3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_MAC_DROP_FRAME) : 0; 6084 p->rx_trunc0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_MAC_TRUNC_FRAME) : 0; 6085 p->rx_trunc1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_MAC_TRUNC_FRAME) : 0; 6086 p->rx_trunc2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_MAC_TRUNC_FRAME) : 0; 6087 p->rx_trunc3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_MAC_TRUNC_FRAME) : 0; 6088 6089 #undef GET_STAT 6090 #undef GET_STAT_COM 6091 } 6092 6093 /** 6094 * t4_get_lb_stats - collect loopback port statistics 6095 * @adap: the adapter 6096 * @idx: the loopback port index 6097 * @p: the stats structure to fill 6098 * 6099 * Return HW statistics for the given loopback port. 6100 */ 6101 void t4_get_lb_stats(struct adapter *adap, int idx, struct lb_port_stats *p) 6102 { 6103 u32 bgmap = adap2pinfo(adap, idx)->mps_bg_map; 6104 6105 #define GET_STAT(name) \ 6106 t4_read_reg64(adap, \ 6107 (is_t4(adap) ? \ 6108 PORT_REG(idx, A_MPS_PORT_STAT_LB_PORT_##name##_L) : \ 6109 T5_PORT_REG(idx, A_MPS_PORT_STAT_LB_PORT_##name##_L))) 6110 #define GET_STAT_COM(name) t4_read_reg64(adap, A_MPS_STAT_##name##_L) 6111 6112 p->octets = GET_STAT(BYTES); 6113 p->frames = GET_STAT(FRAMES); 6114 p->bcast_frames = GET_STAT(BCAST); 6115 p->mcast_frames = GET_STAT(MCAST); 6116 p->ucast_frames = GET_STAT(UCAST); 6117 p->error_frames = GET_STAT(ERROR); 6118 6119 p->frames_64 = GET_STAT(64B); 6120 p->frames_65_127 = GET_STAT(65B_127B); 6121 p->frames_128_255 = GET_STAT(128B_255B); 6122 p->frames_256_511 = GET_STAT(256B_511B); 6123 p->frames_512_1023 = GET_STAT(512B_1023B); 6124 p->frames_1024_1518 = GET_STAT(1024B_1518B); 6125 p->frames_1519_max = GET_STAT(1519B_MAX); 6126 p->drop = GET_STAT(DROP_FRAMES); 6127 6128 p->ovflow0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_LB_DROP_FRAME) : 0; 6129 p->ovflow1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_LB_DROP_FRAME) : 0; 6130 p->ovflow2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_LB_DROP_FRAME) : 0; 6131 p->ovflow3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_LB_DROP_FRAME) : 0; 6132 p->trunc0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_LB_TRUNC_FRAME) : 0; 6133 p->trunc1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_LB_TRUNC_FRAME) : 0; 6134 p->trunc2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_LB_TRUNC_FRAME) : 0; 6135 p->trunc3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_LB_TRUNC_FRAME) : 0; 6136 6137 #undef GET_STAT 6138 #undef GET_STAT_COM 6139 } 6140 6141 /** 6142 * t4_wol_magic_enable - enable/disable magic packet WoL 6143 * @adap: the adapter 6144 * @port: the physical port index 6145 * @addr: MAC address expected in magic packets, %NULL to disable 6146 * 6147 * Enables/disables magic packet wake-on-LAN for the selected port. 6148 */ 6149 void t4_wol_magic_enable(struct adapter *adap, unsigned int port, 6150 const u8 *addr) 6151 { 6152 u32 mag_id_reg_l, mag_id_reg_h, port_cfg_reg; 6153 6154 if (is_t4(adap)) { 6155 mag_id_reg_l = PORT_REG(port, A_XGMAC_PORT_MAGIC_MACID_LO); 6156 mag_id_reg_h = PORT_REG(port, A_XGMAC_PORT_MAGIC_MACID_HI); 6157 port_cfg_reg = PORT_REG(port, A_XGMAC_PORT_CFG2); 6158 } else { 6159 mag_id_reg_l = T5_PORT_REG(port, A_MAC_PORT_MAGIC_MACID_LO); 6160 mag_id_reg_h = T5_PORT_REG(port, A_MAC_PORT_MAGIC_MACID_HI); 6161 port_cfg_reg = T5_PORT_REG(port, A_MAC_PORT_CFG2); 6162 } 6163 6164 if (addr) { 6165 t4_write_reg(adap, mag_id_reg_l, 6166 (addr[2] << 24) | (addr[3] << 16) | 6167 (addr[4] << 8) | addr[5]); 6168 t4_write_reg(adap, mag_id_reg_h, 6169 (addr[0] << 8) | addr[1]); 6170 } 6171 t4_set_reg_field(adap, port_cfg_reg, F_MAGICEN, 6172 V_MAGICEN(addr != NULL)); 6173 } 6174 6175 /** 6176 * t4_wol_pat_enable - enable/disable pattern-based WoL 6177 * @adap: the adapter 6178 * @port: the physical port index 6179 * @map: bitmap of which HW pattern filters to set 6180 * @mask0: byte mask for bytes 0-63 of a packet 6181 * @mask1: byte mask for bytes 64-127 of a packet 6182 * @crc: Ethernet CRC for selected bytes 6183 * @enable: enable/disable switch 6184 * 6185 * Sets the pattern filters indicated in @map to mask out the bytes 6186 * specified in @mask0/@mask1 in received packets and compare the CRC of 6187 * the resulting packet against @crc. If @enable is %true pattern-based 6188 * WoL is enabled, otherwise disabled. 6189 */ 6190 int t4_wol_pat_enable(struct adapter *adap, unsigned int port, unsigned int map, 6191 u64 mask0, u64 mask1, unsigned int crc, bool enable) 6192 { 6193 int i; 6194 u32 port_cfg_reg; 6195 6196 if (is_t4(adap)) 6197 port_cfg_reg = PORT_REG(port, A_XGMAC_PORT_CFG2); 6198 else 6199 port_cfg_reg = T5_PORT_REG(port, A_MAC_PORT_CFG2); 6200 6201 if (!enable) { 6202 t4_set_reg_field(adap, port_cfg_reg, F_PATEN, 0); 6203 return 0; 6204 } 6205 if (map > 0xff) 6206 return -EINVAL; 6207 6208 #define EPIO_REG(name) \ 6209 (is_t4(adap) ? PORT_REG(port, A_XGMAC_PORT_EPIO_##name) : \ 6210 T5_PORT_REG(port, A_MAC_PORT_EPIO_##name)) 6211 6212 t4_write_reg(adap, EPIO_REG(DATA1), mask0 >> 32); 6213 t4_write_reg(adap, EPIO_REG(DATA2), mask1); 6214 t4_write_reg(adap, EPIO_REG(DATA3), mask1 >> 32); 6215 6216 for (i = 0; i < NWOL_PAT; i++, map >>= 1) { 6217 if (!(map & 1)) 6218 continue; 6219 6220 /* write byte masks */ 6221 t4_write_reg(adap, EPIO_REG(DATA0), mask0); 6222 t4_write_reg(adap, EPIO_REG(OP), V_ADDRESS(i) | F_EPIOWR); 6223 t4_read_reg(adap, EPIO_REG(OP)); /* flush */ 6224 if (t4_read_reg(adap, EPIO_REG(OP)) & F_BUSY) 6225 return -ETIMEDOUT; 6226 6227 /* write CRC */ 6228 t4_write_reg(adap, EPIO_REG(DATA0), crc); 6229 t4_write_reg(adap, EPIO_REG(OP), V_ADDRESS(i + 32) | F_EPIOWR); 6230 t4_read_reg(adap, EPIO_REG(OP)); /* flush */ 6231 if (t4_read_reg(adap, EPIO_REG(OP)) & F_BUSY) 6232 return -ETIMEDOUT; 6233 } 6234 #undef EPIO_REG 6235 6236 t4_set_reg_field(adap, port_cfg_reg, 0, F_PATEN); 6237 return 0; 6238 } 6239 6240 /* t4_mk_filtdelwr - create a delete filter WR 6241 * @ftid: the filter ID 6242 * @wr: the filter work request to populate 6243 * @qid: ingress queue to receive the delete notification 6244 * 6245 * Creates a filter work request to delete the supplied filter. If @qid is 6246 * negative the delete notification is suppressed. 6247 */ 6248 void t4_mk_filtdelwr(unsigned int ftid, struct fw_filter_wr *wr, int qid) 6249 { 6250 memset(wr, 0, sizeof(*wr)); 6251 wr->op_pkd = cpu_to_be32(V_FW_WR_OP(FW_FILTER_WR)); 6252 wr->len16_pkd = cpu_to_be32(V_FW_WR_LEN16(sizeof(*wr) / 16)); 6253 wr->tid_to_iq = cpu_to_be32(V_FW_FILTER_WR_TID(ftid) | 6254 V_FW_FILTER_WR_NOREPLY(qid < 0)); 6255 wr->del_filter_to_l2tix = cpu_to_be32(F_FW_FILTER_WR_DEL_FILTER); 6256 if (qid >= 0) 6257 wr->rx_chan_rx_rpl_iq = 6258 cpu_to_be16(V_FW_FILTER_WR_RX_RPL_IQ(qid)); 6259 } 6260 6261 #define INIT_CMD(var, cmd, rd_wr) do { \ 6262 (var).op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_##cmd##_CMD) | \ 6263 F_FW_CMD_REQUEST | \ 6264 F_FW_CMD_##rd_wr); \ 6265 (var).retval_len16 = cpu_to_be32(FW_LEN16(var)); \ 6266 } while (0) 6267 6268 int t4_fwaddrspace_write(struct adapter *adap, unsigned int mbox, 6269 u32 addr, u32 val) 6270 { 6271 u32 ldst_addrspace; 6272 struct fw_ldst_cmd c; 6273 6274 memset(&c, 0, sizeof(c)); 6275 ldst_addrspace = V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_FIRMWARE); 6276 c.op_to_addrspace = cpu_to_be32(V_FW_CMD_OP(FW_LDST_CMD) | 6277 F_FW_CMD_REQUEST | 6278 F_FW_CMD_WRITE | 6279 ldst_addrspace); 6280 c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c)); 6281 c.u.addrval.addr = cpu_to_be32(addr); 6282 c.u.addrval.val = cpu_to_be32(val); 6283 6284 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); 6285 } 6286 6287 /** 6288 * t4_mdio_rd - read a PHY register through MDIO 6289 * @adap: the adapter 6290 * @mbox: mailbox to use for the FW command 6291 * @phy_addr: the PHY address 6292 * @mmd: the PHY MMD to access (0 for clause 22 PHYs) 6293 * @reg: the register to read 6294 * @valp: where to store the value 6295 * 6296 * Issues a FW command through the given mailbox to read a PHY register. 6297 */ 6298 int t4_mdio_rd(struct adapter *adap, unsigned int mbox, unsigned int phy_addr, 6299 unsigned int mmd, unsigned int reg, unsigned int *valp) 6300 { 6301 int ret; 6302 u32 ldst_addrspace; 6303 struct fw_ldst_cmd c; 6304 6305 memset(&c, 0, sizeof(c)); 6306 ldst_addrspace = V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_MDIO); 6307 c.op_to_addrspace = cpu_to_be32(V_FW_CMD_OP(FW_LDST_CMD) | 6308 F_FW_CMD_REQUEST | F_FW_CMD_READ | 6309 ldst_addrspace); 6310 c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c)); 6311 c.u.mdio.paddr_mmd = cpu_to_be16(V_FW_LDST_CMD_PADDR(phy_addr) | 6312 V_FW_LDST_CMD_MMD(mmd)); 6313 c.u.mdio.raddr = cpu_to_be16(reg); 6314 6315 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c); 6316 if (ret == 0) 6317 *valp = be16_to_cpu(c.u.mdio.rval); 6318 return ret; 6319 } 6320 6321 /** 6322 * t4_mdio_wr - write a PHY register through MDIO 6323 * @adap: the adapter 6324 * @mbox: mailbox to use for the FW command 6325 * @phy_addr: the PHY address 6326 * @mmd: the PHY MMD to access (0 for clause 22 PHYs) 6327 * @reg: the register to write 6328 * @valp: value to write 6329 * 6330 * Issues a FW command through the given mailbox to write a PHY register. 6331 */ 6332 int t4_mdio_wr(struct adapter *adap, unsigned int mbox, unsigned int phy_addr, 6333 unsigned int mmd, unsigned int reg, unsigned int val) 6334 { 6335 u32 ldst_addrspace; 6336 struct fw_ldst_cmd c; 6337 6338 memset(&c, 0, sizeof(c)); 6339 ldst_addrspace = V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_MDIO); 6340 c.op_to_addrspace = cpu_to_be32(V_FW_CMD_OP(FW_LDST_CMD) | 6341 F_FW_CMD_REQUEST | F_FW_CMD_WRITE | 6342 ldst_addrspace); 6343 c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c)); 6344 c.u.mdio.paddr_mmd = cpu_to_be16(V_FW_LDST_CMD_PADDR(phy_addr) | 6345 V_FW_LDST_CMD_MMD(mmd)); 6346 c.u.mdio.raddr = cpu_to_be16(reg); 6347 c.u.mdio.rval = cpu_to_be16(val); 6348 6349 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); 6350 } 6351 6352 /** 6353 * 6354 * t4_sge_decode_idma_state - decode the idma state 6355 * @adap: the adapter 6356 * @state: the state idma is stuck in 6357 */ 6358 void t4_sge_decode_idma_state(struct adapter *adapter, int state) 6359 { 6360 static const char * const t4_decode[] = { 6361 "IDMA_IDLE", 6362 "IDMA_PUSH_MORE_CPL_FIFO", 6363 "IDMA_PUSH_CPL_MSG_HEADER_TO_FIFO", 6364 "Not used", 6365 "IDMA_PHYSADDR_SEND_PCIEHDR", 6366 "IDMA_PHYSADDR_SEND_PAYLOAD_FIRST", 6367 "IDMA_PHYSADDR_SEND_PAYLOAD", 6368 "IDMA_SEND_FIFO_TO_IMSG", 6369 "IDMA_FL_REQ_DATA_FL_PREP", 6370 "IDMA_FL_REQ_DATA_FL", 6371 "IDMA_FL_DROP", 6372 "IDMA_FL_H_REQ_HEADER_FL", 6373 "IDMA_FL_H_SEND_PCIEHDR", 6374 "IDMA_FL_H_PUSH_CPL_FIFO", 6375 "IDMA_FL_H_SEND_CPL", 6376 "IDMA_FL_H_SEND_IP_HDR_FIRST", 6377 "IDMA_FL_H_SEND_IP_HDR", 6378 "IDMA_FL_H_REQ_NEXT_HEADER_FL", 6379 "IDMA_FL_H_SEND_NEXT_PCIEHDR", 6380 "IDMA_FL_H_SEND_IP_HDR_PADDING", 6381 "IDMA_FL_D_SEND_PCIEHDR", 6382 "IDMA_FL_D_SEND_CPL_AND_IP_HDR", 6383 "IDMA_FL_D_REQ_NEXT_DATA_FL", 6384 "IDMA_FL_SEND_PCIEHDR", 6385 "IDMA_FL_PUSH_CPL_FIFO", 6386 "IDMA_FL_SEND_CPL", 6387 "IDMA_FL_SEND_PAYLOAD_FIRST", 6388 "IDMA_FL_SEND_PAYLOAD", 6389 "IDMA_FL_REQ_NEXT_DATA_FL", 6390 "IDMA_FL_SEND_NEXT_PCIEHDR", 6391 "IDMA_FL_SEND_PADDING", 6392 "IDMA_FL_SEND_COMPLETION_TO_IMSG", 6393 "IDMA_FL_SEND_FIFO_TO_IMSG", 6394 "IDMA_FL_REQ_DATAFL_DONE", 6395 "IDMA_FL_REQ_HEADERFL_DONE", 6396 }; 6397 static const char * const t5_decode[] = { 6398 "IDMA_IDLE", 6399 "IDMA_ALMOST_IDLE", 6400 "IDMA_PUSH_MORE_CPL_FIFO", 6401 "IDMA_PUSH_CPL_MSG_HEADER_TO_FIFO", 6402 "IDMA_SGEFLRFLUSH_SEND_PCIEHDR", 6403 "IDMA_PHYSADDR_SEND_PCIEHDR", 6404 "IDMA_PHYSADDR_SEND_PAYLOAD_FIRST", 6405 "IDMA_PHYSADDR_SEND_PAYLOAD", 6406 "IDMA_SEND_FIFO_TO_IMSG", 6407 "IDMA_FL_REQ_DATA_FL", 6408 "IDMA_FL_DROP", 6409 "IDMA_FL_DROP_SEND_INC", 6410 "IDMA_FL_H_REQ_HEADER_FL", 6411 "IDMA_FL_H_SEND_PCIEHDR", 6412 "IDMA_FL_H_PUSH_CPL_FIFO", 6413 "IDMA_FL_H_SEND_CPL", 6414 "IDMA_FL_H_SEND_IP_HDR_FIRST", 6415 "IDMA_FL_H_SEND_IP_HDR", 6416 "IDMA_FL_H_REQ_NEXT_HEADER_FL", 6417 "IDMA_FL_H_SEND_NEXT_PCIEHDR", 6418 "IDMA_FL_H_SEND_IP_HDR_PADDING", 6419 "IDMA_FL_D_SEND_PCIEHDR", 6420 "IDMA_FL_D_SEND_CPL_AND_IP_HDR", 6421 "IDMA_FL_D_REQ_NEXT_DATA_FL", 6422 "IDMA_FL_SEND_PCIEHDR", 6423 "IDMA_FL_PUSH_CPL_FIFO", 6424 "IDMA_FL_SEND_CPL", 6425 "IDMA_FL_SEND_PAYLOAD_FIRST", 6426 "IDMA_FL_SEND_PAYLOAD", 6427 "IDMA_FL_REQ_NEXT_DATA_FL", 6428 "IDMA_FL_SEND_NEXT_PCIEHDR", 6429 "IDMA_FL_SEND_PADDING", 6430 "IDMA_FL_SEND_COMPLETION_TO_IMSG", 6431 }; 6432 static const char * const t6_decode[] = { 6433 "IDMA_IDLE", 6434 "IDMA_PUSH_MORE_CPL_FIFO", 6435 "IDMA_PUSH_CPL_MSG_HEADER_TO_FIFO", 6436 "IDMA_SGEFLRFLUSH_SEND_PCIEHDR", 6437 "IDMA_PHYSADDR_SEND_PCIEHDR", 6438 "IDMA_PHYSADDR_SEND_PAYLOAD_FIRST", 6439 "IDMA_PHYSADDR_SEND_PAYLOAD", 6440 "IDMA_FL_REQ_DATA_FL", 6441 "IDMA_FL_DROP", 6442 "IDMA_FL_DROP_SEND_INC", 6443 "IDMA_FL_H_REQ_HEADER_FL", 6444 "IDMA_FL_H_SEND_PCIEHDR", 6445 "IDMA_FL_H_PUSH_CPL_FIFO", 6446 "IDMA_FL_H_SEND_CPL", 6447 "IDMA_FL_H_SEND_IP_HDR_FIRST", 6448 "IDMA_FL_H_SEND_IP_HDR", 6449 "IDMA_FL_H_REQ_NEXT_HEADER_FL", 6450 "IDMA_FL_H_SEND_NEXT_PCIEHDR", 6451 "IDMA_FL_H_SEND_IP_HDR_PADDING", 6452 "IDMA_FL_D_SEND_PCIEHDR", 6453 "IDMA_FL_D_SEND_CPL_AND_IP_HDR", 6454 "IDMA_FL_D_REQ_NEXT_DATA_FL", 6455 "IDMA_FL_SEND_PCIEHDR", 6456 "IDMA_FL_PUSH_CPL_FIFO", 6457 "IDMA_FL_SEND_CPL", 6458 "IDMA_FL_SEND_PAYLOAD_FIRST", 6459 "IDMA_FL_SEND_PAYLOAD", 6460 "IDMA_FL_REQ_NEXT_DATA_FL", 6461 "IDMA_FL_SEND_NEXT_PCIEHDR", 6462 "IDMA_FL_SEND_PADDING", 6463 "IDMA_FL_SEND_COMPLETION_TO_IMSG", 6464 }; 6465 static const u32 sge_regs[] = { 6466 A_SGE_DEBUG_DATA_LOW_INDEX_2, 6467 A_SGE_DEBUG_DATA_LOW_INDEX_3, 6468 A_SGE_DEBUG_DATA_HIGH_INDEX_10, 6469 }; 6470 const char * const *sge_idma_decode; 6471 int sge_idma_decode_nstates; 6472 int i; 6473 unsigned int chip_version = chip_id(adapter); 6474 6475 /* Select the right set of decode strings to dump depending on the 6476 * adapter chip type. 6477 */ 6478 switch (chip_version) { 6479 case CHELSIO_T4: 6480 sge_idma_decode = (const char * const *)t4_decode; 6481 sge_idma_decode_nstates = ARRAY_SIZE(t4_decode); 6482 break; 6483 6484 case CHELSIO_T5: 6485 sge_idma_decode = (const char * const *)t5_decode; 6486 sge_idma_decode_nstates = ARRAY_SIZE(t5_decode); 6487 break; 6488 6489 case CHELSIO_T6: 6490 sge_idma_decode = (const char * const *)t6_decode; 6491 sge_idma_decode_nstates = ARRAY_SIZE(t6_decode); 6492 break; 6493 6494 default: 6495 CH_ERR(adapter, "Unsupported chip version %d\n", chip_version); 6496 return; 6497 } 6498 6499 if (state < sge_idma_decode_nstates) 6500 CH_WARN(adapter, "idma state %s\n", sge_idma_decode[state]); 6501 else 6502 CH_WARN(adapter, "idma state %d unknown\n", state); 6503 6504 for (i = 0; i < ARRAY_SIZE(sge_regs); i++) 6505 CH_WARN(adapter, "SGE register %#x value %#x\n", 6506 sge_regs[i], t4_read_reg(adapter, sge_regs[i])); 6507 } 6508 6509 /** 6510 * t4_sge_ctxt_flush - flush the SGE context cache 6511 * @adap: the adapter 6512 * @mbox: mailbox to use for the FW command 6513 * 6514 * Issues a FW command through the given mailbox to flush the 6515 * SGE context cache. 6516 */ 6517 int t4_sge_ctxt_flush(struct adapter *adap, unsigned int mbox) 6518 { 6519 int ret; 6520 u32 ldst_addrspace; 6521 struct fw_ldst_cmd c; 6522 6523 memset(&c, 0, sizeof(c)); 6524 ldst_addrspace = V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_SGE_EGRC); 6525 c.op_to_addrspace = cpu_to_be32(V_FW_CMD_OP(FW_LDST_CMD) | 6526 F_FW_CMD_REQUEST | F_FW_CMD_READ | 6527 ldst_addrspace); 6528 c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c)); 6529 c.u.idctxt.msg_ctxtflush = cpu_to_be32(F_FW_LDST_CMD_CTXTFLUSH); 6530 6531 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c); 6532 return ret; 6533 } 6534 6535 /** 6536 * t4_fw_hello - establish communication with FW 6537 * @adap: the adapter 6538 * @mbox: mailbox to use for the FW command 6539 * @evt_mbox: mailbox to receive async FW events 6540 * @master: specifies the caller's willingness to be the device master 6541 * @state: returns the current device state (if non-NULL) 6542 * 6543 * Issues a command to establish communication with FW. Returns either 6544 * an error (negative integer) or the mailbox of the Master PF. 6545 */ 6546 int t4_fw_hello(struct adapter *adap, unsigned int mbox, unsigned int evt_mbox, 6547 enum dev_master master, enum dev_state *state) 6548 { 6549 int ret; 6550 struct fw_hello_cmd c; 6551 u32 v; 6552 unsigned int master_mbox; 6553 int retries = FW_CMD_HELLO_RETRIES; 6554 6555 retry: 6556 memset(&c, 0, sizeof(c)); 6557 INIT_CMD(c, HELLO, WRITE); 6558 c.err_to_clearinit = cpu_to_be32( 6559 V_FW_HELLO_CMD_MASTERDIS(master == MASTER_CANT) | 6560 V_FW_HELLO_CMD_MASTERFORCE(master == MASTER_MUST) | 6561 V_FW_HELLO_CMD_MBMASTER(master == MASTER_MUST ? 6562 mbox : M_FW_HELLO_CMD_MBMASTER) | 6563 V_FW_HELLO_CMD_MBASYNCNOT(evt_mbox) | 6564 V_FW_HELLO_CMD_STAGE(FW_HELLO_CMD_STAGE_OS) | 6565 F_FW_HELLO_CMD_CLEARINIT); 6566 6567 /* 6568 * Issue the HELLO command to the firmware. If it's not successful 6569 * but indicates that we got a "busy" or "timeout" condition, retry 6570 * the HELLO until we exhaust our retry limit. If we do exceed our 6571 * retry limit, check to see if the firmware left us any error 6572 * information and report that if so ... 6573 */ 6574 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c); 6575 if (ret != FW_SUCCESS) { 6576 if ((ret == -EBUSY || ret == -ETIMEDOUT) && retries-- > 0) 6577 goto retry; 6578 if (t4_read_reg(adap, A_PCIE_FW) & F_PCIE_FW_ERR) 6579 t4_report_fw_error(adap); 6580 return ret; 6581 } 6582 6583 v = be32_to_cpu(c.err_to_clearinit); 6584 master_mbox = G_FW_HELLO_CMD_MBMASTER(v); 6585 if (state) { 6586 if (v & F_FW_HELLO_CMD_ERR) 6587 *state = DEV_STATE_ERR; 6588 else if (v & F_FW_HELLO_CMD_INIT) 6589 *state = DEV_STATE_INIT; 6590 else 6591 *state = DEV_STATE_UNINIT; 6592 } 6593 6594 /* 6595 * If we're not the Master PF then we need to wait around for the 6596 * Master PF Driver to finish setting up the adapter. 6597 * 6598 * Note that we also do this wait if we're a non-Master-capable PF and 6599 * there is no current Master PF; a Master PF may show up momentarily 6600 * and we wouldn't want to fail pointlessly. (This can happen when an 6601 * OS loads lots of different drivers rapidly at the same time). In 6602 * this case, the Master PF returned by the firmware will be 6603 * M_PCIE_FW_MASTER so the test below will work ... 6604 */ 6605 if ((v & (F_FW_HELLO_CMD_ERR|F_FW_HELLO_CMD_INIT)) == 0 && 6606 master_mbox != mbox) { 6607 int waiting = FW_CMD_HELLO_TIMEOUT; 6608 6609 /* 6610 * Wait for the firmware to either indicate an error or 6611 * initialized state. If we see either of these we bail out 6612 * and report the issue to the caller. If we exhaust the 6613 * "hello timeout" and we haven't exhausted our retries, try 6614 * again. Otherwise bail with a timeout error. 6615 */ 6616 for (;;) { 6617 u32 pcie_fw; 6618 6619 msleep(50); 6620 waiting -= 50; 6621 6622 /* 6623 * If neither Error nor Initialialized are indicated 6624 * by the firmware keep waiting till we exhaust our 6625 * timeout ... and then retry if we haven't exhausted 6626 * our retries ... 6627 */ 6628 pcie_fw = t4_read_reg(adap, A_PCIE_FW); 6629 if (!(pcie_fw & (F_PCIE_FW_ERR|F_PCIE_FW_INIT))) { 6630 if (waiting <= 0) { 6631 if (retries-- > 0) 6632 goto retry; 6633 6634 return -ETIMEDOUT; 6635 } 6636 continue; 6637 } 6638 6639 /* 6640 * We either have an Error or Initialized condition 6641 * report errors preferentially. 6642 */ 6643 if (state) { 6644 if (pcie_fw & F_PCIE_FW_ERR) 6645 *state = DEV_STATE_ERR; 6646 else if (pcie_fw & F_PCIE_FW_INIT) 6647 *state = DEV_STATE_INIT; 6648 } 6649 6650 /* 6651 * If we arrived before a Master PF was selected and 6652 * there's not a valid Master PF, grab its identity 6653 * for our caller. 6654 */ 6655 if (master_mbox == M_PCIE_FW_MASTER && 6656 (pcie_fw & F_PCIE_FW_MASTER_VLD)) 6657 master_mbox = G_PCIE_FW_MASTER(pcie_fw); 6658 break; 6659 } 6660 } 6661 6662 return master_mbox; 6663 } 6664 6665 /** 6666 * t4_fw_bye - end communication with FW 6667 * @adap: the adapter 6668 * @mbox: mailbox to use for the FW command 6669 * 6670 * Issues a command to terminate communication with FW. 6671 */ 6672 int t4_fw_bye(struct adapter *adap, unsigned int mbox) 6673 { 6674 struct fw_bye_cmd c; 6675 6676 memset(&c, 0, sizeof(c)); 6677 INIT_CMD(c, BYE, WRITE); 6678 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); 6679 } 6680 6681 /** 6682 * t4_fw_reset - issue a reset to FW 6683 * @adap: the adapter 6684 * @mbox: mailbox to use for the FW command 6685 * @reset: specifies the type of reset to perform 6686 * 6687 * Issues a reset command of the specified type to FW. 6688 */ 6689 int t4_fw_reset(struct adapter *adap, unsigned int mbox, int reset) 6690 { 6691 struct fw_reset_cmd c; 6692 6693 memset(&c, 0, sizeof(c)); 6694 INIT_CMD(c, RESET, WRITE); 6695 c.val = cpu_to_be32(reset); 6696 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); 6697 } 6698 6699 /** 6700 * t4_fw_halt - issue a reset/halt to FW and put uP into RESET 6701 * @adap: the adapter 6702 * @mbox: mailbox to use for the FW RESET command (if desired) 6703 * @force: force uP into RESET even if FW RESET command fails 6704 * 6705 * Issues a RESET command to firmware (if desired) with a HALT indication 6706 * and then puts the microprocessor into RESET state. The RESET command 6707 * will only be issued if a legitimate mailbox is provided (mbox <= 6708 * M_PCIE_FW_MASTER). 6709 * 6710 * This is generally used in order for the host to safely manipulate the 6711 * adapter without fear of conflicting with whatever the firmware might 6712 * be doing. The only way out of this state is to RESTART the firmware 6713 * ... 6714 */ 6715 int t4_fw_halt(struct adapter *adap, unsigned int mbox, int force) 6716 { 6717 int ret = 0; 6718 6719 /* 6720 * If a legitimate mailbox is provided, issue a RESET command 6721 * with a HALT indication. 6722 */ 6723 if (mbox <= M_PCIE_FW_MASTER) { 6724 struct fw_reset_cmd c; 6725 6726 memset(&c, 0, sizeof(c)); 6727 INIT_CMD(c, RESET, WRITE); 6728 c.val = cpu_to_be32(F_PIORST | F_PIORSTMODE); 6729 c.halt_pkd = cpu_to_be32(F_FW_RESET_CMD_HALT); 6730 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); 6731 } 6732 6733 /* 6734 * Normally we won't complete the operation if the firmware RESET 6735 * command fails but if our caller insists we'll go ahead and put the 6736 * uP into RESET. This can be useful if the firmware is hung or even 6737 * missing ... We'll have to take the risk of putting the uP into 6738 * RESET without the cooperation of firmware in that case. 6739 * 6740 * We also force the firmware's HALT flag to be on in case we bypassed 6741 * the firmware RESET command above or we're dealing with old firmware 6742 * which doesn't have the HALT capability. This will serve as a flag 6743 * for the incoming firmware to know that it's coming out of a HALT 6744 * rather than a RESET ... if it's new enough to understand that ... 6745 */ 6746 if (ret == 0 || force) { 6747 t4_set_reg_field(adap, A_CIM_BOOT_CFG, F_UPCRST, F_UPCRST); 6748 t4_set_reg_field(adap, A_PCIE_FW, F_PCIE_FW_HALT, 6749 F_PCIE_FW_HALT); 6750 } 6751 6752 /* 6753 * And we always return the result of the firmware RESET command 6754 * even when we force the uP into RESET ... 6755 */ 6756 return ret; 6757 } 6758 6759 /** 6760 * t4_fw_restart - restart the firmware by taking the uP out of RESET 6761 * @adap: the adapter 6762 * @reset: if we want to do a RESET to restart things 6763 * 6764 * Restart firmware previously halted by t4_fw_halt(). On successful 6765 * return the previous PF Master remains as the new PF Master and there 6766 * is no need to issue a new HELLO command, etc. 6767 * 6768 * We do this in two ways: 6769 * 6770 * 1. If we're dealing with newer firmware we'll simply want to take 6771 * the chip's microprocessor out of RESET. This will cause the 6772 * firmware to start up from its start vector. And then we'll loop 6773 * until the firmware indicates it's started again (PCIE_FW.HALT 6774 * reset to 0) or we timeout. 6775 * 6776 * 2. If we're dealing with older firmware then we'll need to RESET 6777 * the chip since older firmware won't recognize the PCIE_FW.HALT 6778 * flag and automatically RESET itself on startup. 6779 */ 6780 int t4_fw_restart(struct adapter *adap, unsigned int mbox, int reset) 6781 { 6782 if (reset) { 6783 /* 6784 * Since we're directing the RESET instead of the firmware 6785 * doing it automatically, we need to clear the PCIE_FW.HALT 6786 * bit. 6787 */ 6788 t4_set_reg_field(adap, A_PCIE_FW, F_PCIE_FW_HALT, 0); 6789 6790 /* 6791 * If we've been given a valid mailbox, first try to get the 6792 * firmware to do the RESET. If that works, great and we can 6793 * return success. Otherwise, if we haven't been given a 6794 * valid mailbox or the RESET command failed, fall back to 6795 * hitting the chip with a hammer. 6796 */ 6797 if (mbox <= M_PCIE_FW_MASTER) { 6798 t4_set_reg_field(adap, A_CIM_BOOT_CFG, F_UPCRST, 0); 6799 msleep(100); 6800 if (t4_fw_reset(adap, mbox, 6801 F_PIORST | F_PIORSTMODE) == 0) 6802 return 0; 6803 } 6804 6805 t4_write_reg(adap, A_PL_RST, F_PIORST | F_PIORSTMODE); 6806 msleep(2000); 6807 } else { 6808 int ms; 6809 6810 t4_set_reg_field(adap, A_CIM_BOOT_CFG, F_UPCRST, 0); 6811 for (ms = 0; ms < FW_CMD_MAX_TIMEOUT; ) { 6812 if (!(t4_read_reg(adap, A_PCIE_FW) & F_PCIE_FW_HALT)) 6813 return FW_SUCCESS; 6814 msleep(100); 6815 ms += 100; 6816 } 6817 return -ETIMEDOUT; 6818 } 6819 return 0; 6820 } 6821 6822 /** 6823 * t4_fw_upgrade - perform all of the steps necessary to upgrade FW 6824 * @adap: the adapter 6825 * @mbox: mailbox to use for the FW RESET command (if desired) 6826 * @fw_data: the firmware image to write 6827 * @size: image size 6828 * @force: force upgrade even if firmware doesn't cooperate 6829 * 6830 * Perform all of the steps necessary for upgrading an adapter's 6831 * firmware image. Normally this requires the cooperation of the 6832 * existing firmware in order to halt all existing activities 6833 * but if an invalid mailbox token is passed in we skip that step 6834 * (though we'll still put the adapter microprocessor into RESET in 6835 * that case). 6836 * 6837 * On successful return the new firmware will have been loaded and 6838 * the adapter will have been fully RESET losing all previous setup 6839 * state. On unsuccessful return the adapter may be completely hosed ... 6840 * positive errno indicates that the adapter is ~probably~ intact, a 6841 * negative errno indicates that things are looking bad ... 6842 */ 6843 int t4_fw_upgrade(struct adapter *adap, unsigned int mbox, 6844 const u8 *fw_data, unsigned int size, int force) 6845 { 6846 const struct fw_hdr *fw_hdr = (const struct fw_hdr *)fw_data; 6847 unsigned int bootstrap = 6848 be32_to_cpu(fw_hdr->magic) == FW_HDR_MAGIC_BOOTSTRAP; 6849 int reset, ret; 6850 6851 if (!t4_fw_matches_chip(adap, fw_hdr)) 6852 return -EINVAL; 6853 6854 if (!bootstrap) { 6855 ret = t4_fw_halt(adap, mbox, force); 6856 if (ret < 0 && !force) 6857 return ret; 6858 } 6859 6860 ret = t4_load_fw(adap, fw_data, size); 6861 if (ret < 0 || bootstrap) 6862 return ret; 6863 6864 /* 6865 * Older versions of the firmware don't understand the new 6866 * PCIE_FW.HALT flag and so won't know to perform a RESET when they 6867 * restart. So for newly loaded older firmware we'll have to do the 6868 * RESET for it so it starts up on a clean slate. We can tell if 6869 * the newly loaded firmware will handle this right by checking 6870 * its header flags to see if it advertises the capability. 6871 */ 6872 reset = ((be32_to_cpu(fw_hdr->flags) & FW_HDR_FLAGS_RESET_HALT) == 0); 6873 return t4_fw_restart(adap, mbox, reset); 6874 } 6875 6876 /* 6877 * Card doesn't have a firmware, install one. 6878 */ 6879 int t4_fw_forceinstall(struct adapter *adap, const u8 *fw_data, 6880 unsigned int size) 6881 { 6882 const struct fw_hdr *fw_hdr = (const struct fw_hdr *)fw_data; 6883 unsigned int bootstrap = 6884 be32_to_cpu(fw_hdr->magic) == FW_HDR_MAGIC_BOOTSTRAP; 6885 int ret; 6886 6887 if (!t4_fw_matches_chip(adap, fw_hdr) || bootstrap) 6888 return -EINVAL; 6889 6890 t4_set_reg_field(adap, A_CIM_BOOT_CFG, F_UPCRST, F_UPCRST); 6891 t4_write_reg(adap, A_PCIE_FW, 0); /* Clobber internal state */ 6892 ret = t4_load_fw(adap, fw_data, size); 6893 if (ret < 0) 6894 return ret; 6895 t4_write_reg(adap, A_PL_RST, F_PIORST | F_PIORSTMODE); 6896 msleep(1000); 6897 6898 return (0); 6899 } 6900 6901 /** 6902 * t4_fw_initialize - ask FW to initialize the device 6903 * @adap: the adapter 6904 * @mbox: mailbox to use for the FW command 6905 * 6906 * Issues a command to FW to partially initialize the device. This 6907 * performs initialization that generally doesn't depend on user input. 6908 */ 6909 int t4_fw_initialize(struct adapter *adap, unsigned int mbox) 6910 { 6911 struct fw_initialize_cmd c; 6912 6913 memset(&c, 0, sizeof(c)); 6914 INIT_CMD(c, INITIALIZE, WRITE); 6915 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); 6916 } 6917 6918 /** 6919 * t4_query_params_rw - query FW or device parameters 6920 * @adap: the adapter 6921 * @mbox: mailbox to use for the FW command 6922 * @pf: the PF 6923 * @vf: the VF 6924 * @nparams: the number of parameters 6925 * @params: the parameter names 6926 * @val: the parameter values 6927 * @rw: Write and read flag 6928 * 6929 * Reads the value of FW or device parameters. Up to 7 parameters can be 6930 * queried at once. 6931 */ 6932 int t4_query_params_rw(struct adapter *adap, unsigned int mbox, unsigned int pf, 6933 unsigned int vf, unsigned int nparams, const u32 *params, 6934 u32 *val, int rw) 6935 { 6936 int i, ret; 6937 struct fw_params_cmd c; 6938 __be32 *p = &c.param[0].mnem; 6939 6940 if (nparams > 7) 6941 return -EINVAL; 6942 6943 memset(&c, 0, sizeof(c)); 6944 c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_PARAMS_CMD) | 6945 F_FW_CMD_REQUEST | F_FW_CMD_READ | 6946 V_FW_PARAMS_CMD_PFN(pf) | 6947 V_FW_PARAMS_CMD_VFN(vf)); 6948 c.retval_len16 = cpu_to_be32(FW_LEN16(c)); 6949 6950 for (i = 0; i < nparams; i++) { 6951 *p++ = cpu_to_be32(*params++); 6952 if (rw) 6953 *p = cpu_to_be32(*(val + i)); 6954 p++; 6955 } 6956 6957 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c); 6958 if (ret == 0) 6959 for (i = 0, p = &c.param[0].val; i < nparams; i++, p += 2) 6960 *val++ = be32_to_cpu(*p); 6961 return ret; 6962 } 6963 6964 int t4_query_params(struct adapter *adap, unsigned int mbox, unsigned int pf, 6965 unsigned int vf, unsigned int nparams, const u32 *params, 6966 u32 *val) 6967 { 6968 return t4_query_params_rw(adap, mbox, pf, vf, nparams, params, val, 0); 6969 } 6970 6971 /** 6972 * t4_set_params_timeout - sets FW or device parameters 6973 * @adap: the adapter 6974 * @mbox: mailbox to use for the FW command 6975 * @pf: the PF 6976 * @vf: the VF 6977 * @nparams: the number of parameters 6978 * @params: the parameter names 6979 * @val: the parameter values 6980 * @timeout: the timeout time 6981 * 6982 * Sets the value of FW or device parameters. Up to 7 parameters can be 6983 * specified at once. 6984 */ 6985 int t4_set_params_timeout(struct adapter *adap, unsigned int mbox, 6986 unsigned int pf, unsigned int vf, 6987 unsigned int nparams, const u32 *params, 6988 const u32 *val, int timeout) 6989 { 6990 struct fw_params_cmd c; 6991 __be32 *p = &c.param[0].mnem; 6992 6993 if (nparams > 7) 6994 return -EINVAL; 6995 6996 memset(&c, 0, sizeof(c)); 6997 c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_PARAMS_CMD) | 6998 F_FW_CMD_REQUEST | F_FW_CMD_WRITE | 6999 V_FW_PARAMS_CMD_PFN(pf) | 7000 V_FW_PARAMS_CMD_VFN(vf)); 7001 c.retval_len16 = cpu_to_be32(FW_LEN16(c)); 7002 7003 while (nparams--) { 7004 *p++ = cpu_to_be32(*params++); 7005 *p++ = cpu_to_be32(*val++); 7006 } 7007 7008 return t4_wr_mbox_timeout(adap, mbox, &c, sizeof(c), NULL, timeout); 7009 } 7010 7011 /** 7012 * t4_set_params - sets FW or device parameters 7013 * @adap: the adapter 7014 * @mbox: mailbox to use for the FW command 7015 * @pf: the PF 7016 * @vf: the VF 7017 * @nparams: the number of parameters 7018 * @params: the parameter names 7019 * @val: the parameter values 7020 * 7021 * Sets the value of FW or device parameters. Up to 7 parameters can be 7022 * specified at once. 7023 */ 7024 int t4_set_params(struct adapter *adap, unsigned int mbox, unsigned int pf, 7025 unsigned int vf, unsigned int nparams, const u32 *params, 7026 const u32 *val) 7027 { 7028 return t4_set_params_timeout(adap, mbox, pf, vf, nparams, params, val, 7029 FW_CMD_MAX_TIMEOUT); 7030 } 7031 7032 /** 7033 * t4_cfg_pfvf - configure PF/VF resource limits 7034 * @adap: the adapter 7035 * @mbox: mailbox to use for the FW command 7036 * @pf: the PF being configured 7037 * @vf: the VF being configured 7038 * @txq: the max number of egress queues 7039 * @txq_eth_ctrl: the max number of egress Ethernet or control queues 7040 * @rxqi: the max number of interrupt-capable ingress queues 7041 * @rxq: the max number of interruptless ingress queues 7042 * @tc: the PCI traffic class 7043 * @vi: the max number of virtual interfaces 7044 * @cmask: the channel access rights mask for the PF/VF 7045 * @pmask: the port access rights mask for the PF/VF 7046 * @nexact: the maximum number of exact MPS filters 7047 * @rcaps: read capabilities 7048 * @wxcaps: write/execute capabilities 7049 * 7050 * Configures resource limits and capabilities for a physical or virtual 7051 * function. 7052 */ 7053 int t4_cfg_pfvf(struct adapter *adap, unsigned int mbox, unsigned int pf, 7054 unsigned int vf, unsigned int txq, unsigned int txq_eth_ctrl, 7055 unsigned int rxqi, unsigned int rxq, unsigned int tc, 7056 unsigned int vi, unsigned int cmask, unsigned int pmask, 7057 unsigned int nexact, unsigned int rcaps, unsigned int wxcaps) 7058 { 7059 struct fw_pfvf_cmd c; 7060 7061 memset(&c, 0, sizeof(c)); 7062 c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_PFVF_CMD) | F_FW_CMD_REQUEST | 7063 F_FW_CMD_WRITE | V_FW_PFVF_CMD_PFN(pf) | 7064 V_FW_PFVF_CMD_VFN(vf)); 7065 c.retval_len16 = cpu_to_be32(FW_LEN16(c)); 7066 c.niqflint_niq = cpu_to_be32(V_FW_PFVF_CMD_NIQFLINT(rxqi) | 7067 V_FW_PFVF_CMD_NIQ(rxq)); 7068 c.type_to_neq = cpu_to_be32(V_FW_PFVF_CMD_CMASK(cmask) | 7069 V_FW_PFVF_CMD_PMASK(pmask) | 7070 V_FW_PFVF_CMD_NEQ(txq)); 7071 c.tc_to_nexactf = cpu_to_be32(V_FW_PFVF_CMD_TC(tc) | 7072 V_FW_PFVF_CMD_NVI(vi) | 7073 V_FW_PFVF_CMD_NEXACTF(nexact)); 7074 c.r_caps_to_nethctrl = cpu_to_be32(V_FW_PFVF_CMD_R_CAPS(rcaps) | 7075 V_FW_PFVF_CMD_WX_CAPS(wxcaps) | 7076 V_FW_PFVF_CMD_NETHCTRL(txq_eth_ctrl)); 7077 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); 7078 } 7079 7080 /** 7081 * t4_alloc_vi_func - allocate a virtual interface 7082 * @adap: the adapter 7083 * @mbox: mailbox to use for the FW command 7084 * @port: physical port associated with the VI 7085 * @pf: the PF owning the VI 7086 * @vf: the VF owning the VI 7087 * @nmac: number of MAC addresses needed (1 to 5) 7088 * @mac: the MAC addresses of the VI 7089 * @rss_size: size of RSS table slice associated with this VI 7090 * @portfunc: which Port Application Function MAC Address is desired 7091 * @idstype: Intrusion Detection Type 7092 * 7093 * Allocates a virtual interface for the given physical port. If @mac is 7094 * not %NULL it contains the MAC addresses of the VI as assigned by FW. 7095 * If @rss_size is %NULL the VI is not assigned any RSS slice by FW. 7096 * @mac should be large enough to hold @nmac Ethernet addresses, they are 7097 * stored consecutively so the space needed is @nmac * 6 bytes. 7098 * Returns a negative error number or the non-negative VI id. 7099 */ 7100 int t4_alloc_vi_func(struct adapter *adap, unsigned int mbox, 7101 unsigned int port, unsigned int pf, unsigned int vf, 7102 unsigned int nmac, u8 *mac, u16 *rss_size, 7103 unsigned int portfunc, unsigned int idstype) 7104 { 7105 int ret; 7106 struct fw_vi_cmd c; 7107 7108 memset(&c, 0, sizeof(c)); 7109 c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_VI_CMD) | F_FW_CMD_REQUEST | 7110 F_FW_CMD_WRITE | F_FW_CMD_EXEC | 7111 V_FW_VI_CMD_PFN(pf) | V_FW_VI_CMD_VFN(vf)); 7112 c.alloc_to_len16 = cpu_to_be32(F_FW_VI_CMD_ALLOC | FW_LEN16(c)); 7113 c.type_to_viid = cpu_to_be16(V_FW_VI_CMD_TYPE(idstype) | 7114 V_FW_VI_CMD_FUNC(portfunc)); 7115 c.portid_pkd = V_FW_VI_CMD_PORTID(port); 7116 c.nmac = nmac - 1; 7117 if(!rss_size) 7118 c.norss_rsssize = F_FW_VI_CMD_NORSS; 7119 7120 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c); 7121 if (ret) 7122 return ret; 7123 7124 if (mac) { 7125 memcpy(mac, c.mac, sizeof(c.mac)); 7126 switch (nmac) { 7127 case 5: 7128 memcpy(mac + 24, c.nmac3, sizeof(c.nmac3)); 7129 case 4: 7130 memcpy(mac + 18, c.nmac2, sizeof(c.nmac2)); 7131 case 3: 7132 memcpy(mac + 12, c.nmac1, sizeof(c.nmac1)); 7133 case 2: 7134 memcpy(mac + 6, c.nmac0, sizeof(c.nmac0)); 7135 } 7136 } 7137 if (rss_size) 7138 *rss_size = G_FW_VI_CMD_RSSSIZE(be16_to_cpu(c.norss_rsssize)); 7139 return G_FW_VI_CMD_VIID(be16_to_cpu(c.type_to_viid)); 7140 } 7141 7142 /** 7143 * t4_alloc_vi - allocate an [Ethernet Function] virtual interface 7144 * @adap: the adapter 7145 * @mbox: mailbox to use for the FW command 7146 * @port: physical port associated with the VI 7147 * @pf: the PF owning the VI 7148 * @vf: the VF owning the VI 7149 * @nmac: number of MAC addresses needed (1 to 5) 7150 * @mac: the MAC addresses of the VI 7151 * @rss_size: size of RSS table slice associated with this VI 7152 * 7153 * backwards compatible and convieniance routine to allocate a Virtual 7154 * Interface with a Ethernet Port Application Function and Intrustion 7155 * Detection System disabled. 7156 */ 7157 int t4_alloc_vi(struct adapter *adap, unsigned int mbox, unsigned int port, 7158 unsigned int pf, unsigned int vf, unsigned int nmac, u8 *mac, 7159 u16 *rss_size) 7160 { 7161 return t4_alloc_vi_func(adap, mbox, port, pf, vf, nmac, mac, rss_size, 7162 FW_VI_FUNC_ETH, 0); 7163 } 7164 7165 /** 7166 * t4_free_vi - free a virtual interface 7167 * @adap: the adapter 7168 * @mbox: mailbox to use for the FW command 7169 * @pf: the PF owning the VI 7170 * @vf: the VF owning the VI 7171 * @viid: virtual interface identifiler 7172 * 7173 * Free a previously allocated virtual interface. 7174 */ 7175 int t4_free_vi(struct adapter *adap, unsigned int mbox, unsigned int pf, 7176 unsigned int vf, unsigned int viid) 7177 { 7178 struct fw_vi_cmd c; 7179 7180 memset(&c, 0, sizeof(c)); 7181 c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_VI_CMD) | 7182 F_FW_CMD_REQUEST | 7183 F_FW_CMD_EXEC | 7184 V_FW_VI_CMD_PFN(pf) | 7185 V_FW_VI_CMD_VFN(vf)); 7186 c.alloc_to_len16 = cpu_to_be32(F_FW_VI_CMD_FREE | FW_LEN16(c)); 7187 c.type_to_viid = cpu_to_be16(V_FW_VI_CMD_VIID(viid)); 7188 7189 return t4_wr_mbox(adap, mbox, &c, sizeof(c), &c); 7190 } 7191 7192 /** 7193 * t4_set_rxmode - set Rx properties of a virtual interface 7194 * @adap: the adapter 7195 * @mbox: mailbox to use for the FW command 7196 * @viid: the VI id 7197 * @mtu: the new MTU or -1 7198 * @promisc: 1 to enable promiscuous mode, 0 to disable it, -1 no change 7199 * @all_multi: 1 to enable all-multi mode, 0 to disable it, -1 no change 7200 * @bcast: 1 to enable broadcast Rx, 0 to disable it, -1 no change 7201 * @vlanex: 1 to enable HW VLAN extraction, 0 to disable it, -1 no change 7202 * @sleep_ok: if true we may sleep while awaiting command completion 7203 * 7204 * Sets Rx properties of a virtual interface. 7205 */ 7206 int t4_set_rxmode(struct adapter *adap, unsigned int mbox, unsigned int viid, 7207 int mtu, int promisc, int all_multi, int bcast, int vlanex, 7208 bool sleep_ok) 7209 { 7210 struct fw_vi_rxmode_cmd c; 7211 7212 /* convert to FW values */ 7213 if (mtu < 0) 7214 mtu = M_FW_VI_RXMODE_CMD_MTU; 7215 if (promisc < 0) 7216 promisc = M_FW_VI_RXMODE_CMD_PROMISCEN; 7217 if (all_multi < 0) 7218 all_multi = M_FW_VI_RXMODE_CMD_ALLMULTIEN; 7219 if (bcast < 0) 7220 bcast = M_FW_VI_RXMODE_CMD_BROADCASTEN; 7221 if (vlanex < 0) 7222 vlanex = M_FW_VI_RXMODE_CMD_VLANEXEN; 7223 7224 memset(&c, 0, sizeof(c)); 7225 c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_RXMODE_CMD) | 7226 F_FW_CMD_REQUEST | F_FW_CMD_WRITE | 7227 V_FW_VI_RXMODE_CMD_VIID(viid)); 7228 c.retval_len16 = cpu_to_be32(FW_LEN16(c)); 7229 c.mtu_to_vlanexen = 7230 cpu_to_be32(V_FW_VI_RXMODE_CMD_MTU(mtu) | 7231 V_FW_VI_RXMODE_CMD_PROMISCEN(promisc) | 7232 V_FW_VI_RXMODE_CMD_ALLMULTIEN(all_multi) | 7233 V_FW_VI_RXMODE_CMD_BROADCASTEN(bcast) | 7234 V_FW_VI_RXMODE_CMD_VLANEXEN(vlanex)); 7235 return t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), NULL, sleep_ok); 7236 } 7237 7238 /** 7239 * t4_alloc_mac_filt - allocates exact-match filters for MAC addresses 7240 * @adap: the adapter 7241 * @mbox: mailbox to use for the FW command 7242 * @viid: the VI id 7243 * @free: if true any existing filters for this VI id are first removed 7244 * @naddr: the number of MAC addresses to allocate filters for (up to 7) 7245 * @addr: the MAC address(es) 7246 * @idx: where to store the index of each allocated filter 7247 * @hash: pointer to hash address filter bitmap 7248 * @sleep_ok: call is allowed to sleep 7249 * 7250 * Allocates an exact-match filter for each of the supplied addresses and 7251 * sets it to the corresponding address. If @idx is not %NULL it should 7252 * have at least @naddr entries, each of which will be set to the index of 7253 * the filter allocated for the corresponding MAC address. If a filter 7254 * could not be allocated for an address its index is set to 0xffff. 7255 * If @hash is not %NULL addresses that fail to allocate an exact filter 7256 * are hashed and update the hash filter bitmap pointed at by @hash. 7257 * 7258 * Returns a negative error number or the number of filters allocated. 7259 */ 7260 int t4_alloc_mac_filt(struct adapter *adap, unsigned int mbox, 7261 unsigned int viid, bool free, unsigned int naddr, 7262 const u8 **addr, u16 *idx, u64 *hash, bool sleep_ok) 7263 { 7264 int offset, ret = 0; 7265 struct fw_vi_mac_cmd c; 7266 unsigned int nfilters = 0; 7267 unsigned int max_naddr = adap->chip_params->mps_tcam_size; 7268 unsigned int rem = naddr; 7269 7270 if (naddr > max_naddr) 7271 return -EINVAL; 7272 7273 for (offset = 0; offset < naddr ; /**/) { 7274 unsigned int fw_naddr = (rem < ARRAY_SIZE(c.u.exact) 7275 ? rem 7276 : ARRAY_SIZE(c.u.exact)); 7277 size_t len16 = DIV_ROUND_UP(offsetof(struct fw_vi_mac_cmd, 7278 u.exact[fw_naddr]), 16); 7279 struct fw_vi_mac_exact *p; 7280 int i; 7281 7282 memset(&c, 0, sizeof(c)); 7283 c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_MAC_CMD) | 7284 F_FW_CMD_REQUEST | 7285 F_FW_CMD_WRITE | 7286 V_FW_CMD_EXEC(free) | 7287 V_FW_VI_MAC_CMD_VIID(viid)); 7288 c.freemacs_to_len16 = cpu_to_be32(V_FW_VI_MAC_CMD_FREEMACS(free) | 7289 V_FW_CMD_LEN16(len16)); 7290 7291 for (i = 0, p = c.u.exact; i < fw_naddr; i++, p++) { 7292 p->valid_to_idx = 7293 cpu_to_be16(F_FW_VI_MAC_CMD_VALID | 7294 V_FW_VI_MAC_CMD_IDX(FW_VI_MAC_ADD_MAC)); 7295 memcpy(p->macaddr, addr[offset+i], sizeof(p->macaddr)); 7296 } 7297 7298 /* 7299 * It's okay if we run out of space in our MAC address arena. 7300 * Some of the addresses we submit may get stored so we need 7301 * to run through the reply to see what the results were ... 7302 */ 7303 ret = t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), &c, sleep_ok); 7304 if (ret && ret != -FW_ENOMEM) 7305 break; 7306 7307 for (i = 0, p = c.u.exact; i < fw_naddr; i++, p++) { 7308 u16 index = G_FW_VI_MAC_CMD_IDX( 7309 be16_to_cpu(p->valid_to_idx)); 7310 7311 if (idx) 7312 idx[offset+i] = (index >= max_naddr 7313 ? 0xffff 7314 : index); 7315 if (index < max_naddr) 7316 nfilters++; 7317 else if (hash) 7318 *hash |= (1ULL << hash_mac_addr(addr[offset+i])); 7319 } 7320 7321 free = false; 7322 offset += fw_naddr; 7323 rem -= fw_naddr; 7324 } 7325 7326 if (ret == 0 || ret == -FW_ENOMEM) 7327 ret = nfilters; 7328 return ret; 7329 } 7330 7331 /** 7332 * t4_change_mac - modifies the exact-match filter for a MAC address 7333 * @adap: the adapter 7334 * @mbox: mailbox to use for the FW command 7335 * @viid: the VI id 7336 * @idx: index of existing filter for old value of MAC address, or -1 7337 * @addr: the new MAC address value 7338 * @persist: whether a new MAC allocation should be persistent 7339 * @add_smt: if true also add the address to the HW SMT 7340 * 7341 * Modifies an exact-match filter and sets it to the new MAC address if 7342 * @idx >= 0, or adds the MAC address to a new filter if @idx < 0. In the 7343 * latter case the address is added persistently if @persist is %true. 7344 * 7345 * Note that in general it is not possible to modify the value of a given 7346 * filter so the generic way to modify an address filter is to free the one 7347 * being used by the old address value and allocate a new filter for the 7348 * new address value. 7349 * 7350 * Returns a negative error number or the index of the filter with the new 7351 * MAC value. Note that this index may differ from @idx. 7352 */ 7353 int t4_change_mac(struct adapter *adap, unsigned int mbox, unsigned int viid, 7354 int idx, const u8 *addr, bool persist, bool add_smt) 7355 { 7356 int ret, mode; 7357 struct fw_vi_mac_cmd c; 7358 struct fw_vi_mac_exact *p = c.u.exact; 7359 unsigned int max_mac_addr = adap->chip_params->mps_tcam_size; 7360 7361 if (idx < 0) /* new allocation */ 7362 idx = persist ? FW_VI_MAC_ADD_PERSIST_MAC : FW_VI_MAC_ADD_MAC; 7363 mode = add_smt ? FW_VI_MAC_SMT_AND_MPSTCAM : FW_VI_MAC_MPS_TCAM_ENTRY; 7364 7365 memset(&c, 0, sizeof(c)); 7366 c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_MAC_CMD) | 7367 F_FW_CMD_REQUEST | F_FW_CMD_WRITE | 7368 V_FW_VI_MAC_CMD_VIID(viid)); 7369 c.freemacs_to_len16 = cpu_to_be32(V_FW_CMD_LEN16(1)); 7370 p->valid_to_idx = cpu_to_be16(F_FW_VI_MAC_CMD_VALID | 7371 V_FW_VI_MAC_CMD_SMAC_RESULT(mode) | 7372 V_FW_VI_MAC_CMD_IDX(idx)); 7373 memcpy(p->macaddr, addr, sizeof(p->macaddr)); 7374 7375 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c); 7376 if (ret == 0) { 7377 ret = G_FW_VI_MAC_CMD_IDX(be16_to_cpu(p->valid_to_idx)); 7378 if (ret >= max_mac_addr) 7379 ret = -ENOMEM; 7380 } 7381 return ret; 7382 } 7383 7384 /** 7385 * t4_set_addr_hash - program the MAC inexact-match hash filter 7386 * @adap: the adapter 7387 * @mbox: mailbox to use for the FW command 7388 * @viid: the VI id 7389 * @ucast: whether the hash filter should also match unicast addresses 7390 * @vec: the value to be written to the hash filter 7391 * @sleep_ok: call is allowed to sleep 7392 * 7393 * Sets the 64-bit inexact-match hash filter for a virtual interface. 7394 */ 7395 int t4_set_addr_hash(struct adapter *adap, unsigned int mbox, unsigned int viid, 7396 bool ucast, u64 vec, bool sleep_ok) 7397 { 7398 struct fw_vi_mac_cmd c; 7399 u32 val; 7400 7401 memset(&c, 0, sizeof(c)); 7402 c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_MAC_CMD) | 7403 F_FW_CMD_REQUEST | F_FW_CMD_WRITE | 7404 V_FW_VI_ENABLE_CMD_VIID(viid)); 7405 val = V_FW_VI_MAC_CMD_ENTRY_TYPE(FW_VI_MAC_TYPE_HASHVEC) | 7406 V_FW_VI_MAC_CMD_HASHUNIEN(ucast) | V_FW_CMD_LEN16(1); 7407 c.freemacs_to_len16 = cpu_to_be32(val); 7408 c.u.hash.hashvec = cpu_to_be64(vec); 7409 return t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), NULL, sleep_ok); 7410 } 7411 7412 /** 7413 * t4_enable_vi_params - enable/disable a virtual interface 7414 * @adap: the adapter 7415 * @mbox: mailbox to use for the FW command 7416 * @viid: the VI id 7417 * @rx_en: 1=enable Rx, 0=disable Rx 7418 * @tx_en: 1=enable Tx, 0=disable Tx 7419 * @dcb_en: 1=enable delivery of Data Center Bridging messages. 7420 * 7421 * Enables/disables a virtual interface. Note that setting DCB Enable 7422 * only makes sense when enabling a Virtual Interface ... 7423 */ 7424 int t4_enable_vi_params(struct adapter *adap, unsigned int mbox, 7425 unsigned int viid, bool rx_en, bool tx_en, bool dcb_en) 7426 { 7427 struct fw_vi_enable_cmd c; 7428 7429 memset(&c, 0, sizeof(c)); 7430 c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_ENABLE_CMD) | 7431 F_FW_CMD_REQUEST | F_FW_CMD_EXEC | 7432 V_FW_VI_ENABLE_CMD_VIID(viid)); 7433 c.ien_to_len16 = cpu_to_be32(V_FW_VI_ENABLE_CMD_IEN(rx_en) | 7434 V_FW_VI_ENABLE_CMD_EEN(tx_en) | 7435 V_FW_VI_ENABLE_CMD_DCB_INFO(dcb_en) | 7436 FW_LEN16(c)); 7437 return t4_wr_mbox_ns(adap, mbox, &c, sizeof(c), NULL); 7438 } 7439 7440 /** 7441 * t4_enable_vi - enable/disable a virtual interface 7442 * @adap: the adapter 7443 * @mbox: mailbox to use for the FW command 7444 * @viid: the VI id 7445 * @rx_en: 1=enable Rx, 0=disable Rx 7446 * @tx_en: 1=enable Tx, 0=disable Tx 7447 * 7448 * Enables/disables a virtual interface. Note that setting DCB Enable 7449 * only makes sense when enabling a Virtual Interface ... 7450 */ 7451 int t4_enable_vi(struct adapter *adap, unsigned int mbox, unsigned int viid, 7452 bool rx_en, bool tx_en) 7453 { 7454 return t4_enable_vi_params(adap, mbox, viid, rx_en, tx_en, 0); 7455 } 7456 7457 /** 7458 * t4_identify_port - identify a VI's port by blinking its LED 7459 * @adap: the adapter 7460 * @mbox: mailbox to use for the FW command 7461 * @viid: the VI id 7462 * @nblinks: how many times to blink LED at 2.5 Hz 7463 * 7464 * Identifies a VI's port by blinking its LED. 7465 */ 7466 int t4_identify_port(struct adapter *adap, unsigned int mbox, unsigned int viid, 7467 unsigned int nblinks) 7468 { 7469 struct fw_vi_enable_cmd c; 7470 7471 memset(&c, 0, sizeof(c)); 7472 c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_ENABLE_CMD) | 7473 F_FW_CMD_REQUEST | F_FW_CMD_EXEC | 7474 V_FW_VI_ENABLE_CMD_VIID(viid)); 7475 c.ien_to_len16 = cpu_to_be32(F_FW_VI_ENABLE_CMD_LED | FW_LEN16(c)); 7476 c.blinkdur = cpu_to_be16(nblinks); 7477 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); 7478 } 7479 7480 /** 7481 * t4_iq_stop - stop an ingress queue and its FLs 7482 * @adap: the adapter 7483 * @mbox: mailbox to use for the FW command 7484 * @pf: the PF owning the queues 7485 * @vf: the VF owning the queues 7486 * @iqtype: the ingress queue type (FW_IQ_TYPE_FL_INT_CAP, etc.) 7487 * @iqid: ingress queue id 7488 * @fl0id: FL0 queue id or 0xffff if no attached FL0 7489 * @fl1id: FL1 queue id or 0xffff if no attached FL1 7490 * 7491 * Stops an ingress queue and its associated FLs, if any. This causes 7492 * any current or future data/messages destined for these queues to be 7493 * tossed. 7494 */ 7495 int t4_iq_stop(struct adapter *adap, unsigned int mbox, unsigned int pf, 7496 unsigned int vf, unsigned int iqtype, unsigned int iqid, 7497 unsigned int fl0id, unsigned int fl1id) 7498 { 7499 struct fw_iq_cmd c; 7500 7501 memset(&c, 0, sizeof(c)); 7502 c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_IQ_CMD) | F_FW_CMD_REQUEST | 7503 F_FW_CMD_EXEC | V_FW_IQ_CMD_PFN(pf) | 7504 V_FW_IQ_CMD_VFN(vf)); 7505 c.alloc_to_len16 = cpu_to_be32(F_FW_IQ_CMD_IQSTOP | FW_LEN16(c)); 7506 c.type_to_iqandstindex = cpu_to_be32(V_FW_IQ_CMD_TYPE(iqtype)); 7507 c.iqid = cpu_to_be16(iqid); 7508 c.fl0id = cpu_to_be16(fl0id); 7509 c.fl1id = cpu_to_be16(fl1id); 7510 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); 7511 } 7512 7513 /** 7514 * t4_iq_free - free an ingress queue and its FLs 7515 * @adap: the adapter 7516 * @mbox: mailbox to use for the FW command 7517 * @pf: the PF owning the queues 7518 * @vf: the VF owning the queues 7519 * @iqtype: the ingress queue type (FW_IQ_TYPE_FL_INT_CAP, etc.) 7520 * @iqid: ingress queue id 7521 * @fl0id: FL0 queue id or 0xffff if no attached FL0 7522 * @fl1id: FL1 queue id or 0xffff if no attached FL1 7523 * 7524 * Frees an ingress queue and its associated FLs, if any. 7525 */ 7526 int t4_iq_free(struct adapter *adap, unsigned int mbox, unsigned int pf, 7527 unsigned int vf, unsigned int iqtype, unsigned int iqid, 7528 unsigned int fl0id, unsigned int fl1id) 7529 { 7530 struct fw_iq_cmd c; 7531 7532 memset(&c, 0, sizeof(c)); 7533 c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_IQ_CMD) | F_FW_CMD_REQUEST | 7534 F_FW_CMD_EXEC | V_FW_IQ_CMD_PFN(pf) | 7535 V_FW_IQ_CMD_VFN(vf)); 7536 c.alloc_to_len16 = cpu_to_be32(F_FW_IQ_CMD_FREE | FW_LEN16(c)); 7537 c.type_to_iqandstindex = cpu_to_be32(V_FW_IQ_CMD_TYPE(iqtype)); 7538 c.iqid = cpu_to_be16(iqid); 7539 c.fl0id = cpu_to_be16(fl0id); 7540 c.fl1id = cpu_to_be16(fl1id); 7541 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); 7542 } 7543 7544 /** 7545 * t4_eth_eq_free - free an Ethernet egress queue 7546 * @adap: the adapter 7547 * @mbox: mailbox to use for the FW command 7548 * @pf: the PF owning the queue 7549 * @vf: the VF owning the queue 7550 * @eqid: egress queue id 7551 * 7552 * Frees an Ethernet egress queue. 7553 */ 7554 int t4_eth_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf, 7555 unsigned int vf, unsigned int eqid) 7556 { 7557 struct fw_eq_eth_cmd c; 7558 7559 memset(&c, 0, sizeof(c)); 7560 c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_EQ_ETH_CMD) | 7561 F_FW_CMD_REQUEST | F_FW_CMD_EXEC | 7562 V_FW_EQ_ETH_CMD_PFN(pf) | 7563 V_FW_EQ_ETH_CMD_VFN(vf)); 7564 c.alloc_to_len16 = cpu_to_be32(F_FW_EQ_ETH_CMD_FREE | FW_LEN16(c)); 7565 c.eqid_pkd = cpu_to_be32(V_FW_EQ_ETH_CMD_EQID(eqid)); 7566 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); 7567 } 7568 7569 /** 7570 * t4_ctrl_eq_free - free a control egress queue 7571 * @adap: the adapter 7572 * @mbox: mailbox to use for the FW command 7573 * @pf: the PF owning the queue 7574 * @vf: the VF owning the queue 7575 * @eqid: egress queue id 7576 * 7577 * Frees a control egress queue. 7578 */ 7579 int t4_ctrl_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf, 7580 unsigned int vf, unsigned int eqid) 7581 { 7582 struct fw_eq_ctrl_cmd c; 7583 7584 memset(&c, 0, sizeof(c)); 7585 c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_EQ_CTRL_CMD) | 7586 F_FW_CMD_REQUEST | F_FW_CMD_EXEC | 7587 V_FW_EQ_CTRL_CMD_PFN(pf) | 7588 V_FW_EQ_CTRL_CMD_VFN(vf)); 7589 c.alloc_to_len16 = cpu_to_be32(F_FW_EQ_CTRL_CMD_FREE | FW_LEN16(c)); 7590 c.cmpliqid_eqid = cpu_to_be32(V_FW_EQ_CTRL_CMD_EQID(eqid)); 7591 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); 7592 } 7593 7594 /** 7595 * t4_ofld_eq_free - free an offload egress queue 7596 * @adap: the adapter 7597 * @mbox: mailbox to use for the FW command 7598 * @pf: the PF owning the queue 7599 * @vf: the VF owning the queue 7600 * @eqid: egress queue id 7601 * 7602 * Frees a control egress queue. 7603 */ 7604 int t4_ofld_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf, 7605 unsigned int vf, unsigned int eqid) 7606 { 7607 struct fw_eq_ofld_cmd c; 7608 7609 memset(&c, 0, sizeof(c)); 7610 c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_EQ_OFLD_CMD) | 7611 F_FW_CMD_REQUEST | F_FW_CMD_EXEC | 7612 V_FW_EQ_OFLD_CMD_PFN(pf) | 7613 V_FW_EQ_OFLD_CMD_VFN(vf)); 7614 c.alloc_to_len16 = cpu_to_be32(F_FW_EQ_OFLD_CMD_FREE | FW_LEN16(c)); 7615 c.eqid_pkd = cpu_to_be32(V_FW_EQ_OFLD_CMD_EQID(eqid)); 7616 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); 7617 } 7618 7619 /** 7620 * t4_link_down_rc_str - return a string for a Link Down Reason Code 7621 * @link_down_rc: Link Down Reason Code 7622 * 7623 * Returns a string representation of the Link Down Reason Code. 7624 */ 7625 const char *t4_link_down_rc_str(unsigned char link_down_rc) 7626 { 7627 static const char *reason[] = { 7628 "Link Down", 7629 "Remote Fault", 7630 "Auto-negotiation Failure", 7631 "Reserved3", 7632 "Insufficient Airflow", 7633 "Unable To Determine Reason", 7634 "No RX Signal Detected", 7635 "Reserved7", 7636 }; 7637 7638 if (link_down_rc >= ARRAY_SIZE(reason)) 7639 return "Bad Reason Code"; 7640 7641 return reason[link_down_rc]; 7642 } 7643 7644 /* 7645 * Updates all fields owned by the common code in port_info and link_config 7646 * based on information provided by the firmware. Does not touch any 7647 * requested_* field. 7648 */ 7649 static void handle_port_info(struct port_info *pi, const struct fw_port_info *p) 7650 { 7651 struct link_config *lc = &pi->link_cfg; 7652 int speed; 7653 unsigned char fc, fec; 7654 u32 stat = be32_to_cpu(p->lstatus_to_modtype); 7655 7656 pi->port_type = G_FW_PORT_CMD_PTYPE(stat); 7657 pi->mod_type = G_FW_PORT_CMD_MODTYPE(stat); 7658 pi->mdio_addr = stat & F_FW_PORT_CMD_MDIOCAP ? 7659 G_FW_PORT_CMD_MDIOADDR(stat) : -1; 7660 7661 lc->supported = be16_to_cpu(p->pcap); 7662 lc->advertising = be16_to_cpu(p->acap); 7663 lc->lp_advertising = be16_to_cpu(p->lpacap); 7664 lc->link_ok = (stat & F_FW_PORT_CMD_LSTATUS) != 0; 7665 lc->link_down_rc = G_FW_PORT_CMD_LINKDNRC(stat); 7666 7667 speed = 0; 7668 if (stat & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_100M)) 7669 speed = 100; 7670 else if (stat & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_1G)) 7671 speed = 1000; 7672 else if (stat & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_10G)) 7673 speed = 10000; 7674 else if (stat & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_25G)) 7675 speed = 25000; 7676 else if (stat & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_40G)) 7677 speed = 40000; 7678 else if (stat & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_100G)) 7679 speed = 100000; 7680 lc->speed = speed; 7681 7682 fc = 0; 7683 if (stat & F_FW_PORT_CMD_RXPAUSE) 7684 fc |= PAUSE_RX; 7685 if (stat & F_FW_PORT_CMD_TXPAUSE) 7686 fc |= PAUSE_TX; 7687 lc->fc = fc; 7688 7689 fec = 0; 7690 if (lc->advertising & FW_PORT_CAP_FEC_RS) 7691 fec |= FEC_RS; 7692 if (lc->advertising & FW_PORT_CAP_FEC_BASER_RS) 7693 fec |= FEC_BASER_RS; 7694 if (lc->advertising & FW_PORT_CAP_FEC_RESERVED) 7695 fec |= FEC_RESERVED; 7696 lc->fec = fec; 7697 } 7698 7699 /** 7700 * t4_update_port_info - retrieve and update port information if changed 7701 * @pi: the port_info 7702 * 7703 * We issue a Get Port Information Command to the Firmware and, if 7704 * successful, we check to see if anything is different from what we 7705 * last recorded and update things accordingly. 7706 */ 7707 int t4_update_port_info(struct port_info *pi) 7708 { 7709 struct fw_port_cmd port_cmd; 7710 int ret; 7711 7712 memset(&port_cmd, 0, sizeof port_cmd); 7713 port_cmd.op_to_portid = cpu_to_be32(V_FW_CMD_OP(FW_PORT_CMD) | 7714 F_FW_CMD_REQUEST | F_FW_CMD_READ | 7715 V_FW_PORT_CMD_PORTID(pi->tx_chan)); 7716 port_cmd.action_to_len16 = cpu_to_be32( 7717 V_FW_PORT_CMD_ACTION(FW_PORT_ACTION_GET_PORT_INFO) | 7718 FW_LEN16(port_cmd)); 7719 ret = t4_wr_mbox_ns(pi->adapter, pi->adapter->mbox, 7720 &port_cmd, sizeof(port_cmd), &port_cmd); 7721 if (ret) 7722 return ret; 7723 7724 handle_port_info(pi, &port_cmd.u.info); 7725 return 0; 7726 } 7727 7728 /** 7729 * t4_handle_fw_rpl - process a FW reply message 7730 * @adap: the adapter 7731 * @rpl: start of the FW message 7732 * 7733 * Processes a FW message, such as link state change messages. 7734 */ 7735 int t4_handle_fw_rpl(struct adapter *adap, const __be64 *rpl) 7736 { 7737 u8 opcode = *(const u8 *)rpl; 7738 const struct fw_port_cmd *p = (const void *)rpl; 7739 unsigned int action = 7740 G_FW_PORT_CMD_ACTION(be32_to_cpu(p->action_to_len16)); 7741 7742 if (opcode == FW_PORT_CMD && action == FW_PORT_ACTION_GET_PORT_INFO) { 7743 /* link/module state change message */ 7744 int i, old_ptype, old_mtype; 7745 int chan = G_FW_PORT_CMD_PORTID(be32_to_cpu(p->op_to_portid)); 7746 struct port_info *pi = NULL; 7747 struct link_config *lc, *old_lc; 7748 7749 for_each_port(adap, i) { 7750 pi = adap2pinfo(adap, i); 7751 if (pi->tx_chan == chan) 7752 break; 7753 } 7754 7755 lc = &pi->link_cfg; 7756 old_lc = &pi->old_link_cfg; 7757 old_ptype = pi->port_type; 7758 old_mtype = pi->mod_type; 7759 7760 handle_port_info(pi, &p->u.info); 7761 if (old_ptype != pi->port_type || old_mtype != pi->mod_type) { 7762 t4_os_portmod_changed(pi); 7763 } 7764 if (old_lc->link_ok != lc->link_ok || 7765 old_lc->speed != lc->speed || 7766 old_lc->fec != lc->fec || 7767 old_lc->fc != lc->fc) { 7768 t4_os_link_changed(pi); 7769 *old_lc = *lc; 7770 } 7771 } else { 7772 CH_WARN_RATELIMIT(adap, "Unknown firmware reply %d\n", opcode); 7773 return -EINVAL; 7774 } 7775 return 0; 7776 } 7777 7778 /** 7779 * get_pci_mode - determine a card's PCI mode 7780 * @adapter: the adapter 7781 * @p: where to store the PCI settings 7782 * 7783 * Determines a card's PCI mode and associated parameters, such as speed 7784 * and width. 7785 */ 7786 static void get_pci_mode(struct adapter *adapter, 7787 struct pci_params *p) 7788 { 7789 u16 val; 7790 u32 pcie_cap; 7791 7792 pcie_cap = t4_os_find_pci_capability(adapter, PCI_CAP_ID_EXP); 7793 if (pcie_cap) { 7794 t4_os_pci_read_cfg2(adapter, pcie_cap + PCI_EXP_LNKSTA, &val); 7795 p->speed = val & PCI_EXP_LNKSTA_CLS; 7796 p->width = (val & PCI_EXP_LNKSTA_NLW) >> 4; 7797 } 7798 } 7799 7800 struct flash_desc { 7801 u32 vendor_and_model_id; 7802 u32 size_mb; 7803 }; 7804 7805 int t4_get_flash_params(struct adapter *adapter) 7806 { 7807 /* 7808 * Table for non-standard supported Flash parts. Note, all Flash 7809 * parts must have 64KB sectors. 7810 */ 7811 static struct flash_desc supported_flash[] = { 7812 { 0x00150201, 4 << 20 }, /* Spansion 4MB S25FL032P */ 7813 }; 7814 7815 int ret; 7816 u32 flashid = 0; 7817 unsigned int part, manufacturer; 7818 unsigned int density, size; 7819 7820 7821 /* 7822 * Issue a Read ID Command to the Flash part. We decode supported 7823 * Flash parts and their sizes from this. There's a newer Query 7824 * Command which can retrieve detailed geometry information but many 7825 * Flash parts don't support it. 7826 */ 7827 ret = sf1_write(adapter, 1, 1, 0, SF_RD_ID); 7828 if (!ret) 7829 ret = sf1_read(adapter, 3, 0, 1, &flashid); 7830 t4_write_reg(adapter, A_SF_OP, 0); /* unlock SF */ 7831 if (ret < 0) 7832 return ret; 7833 7834 /* 7835 * Check to see if it's one of our non-standard supported Flash parts. 7836 */ 7837 for (part = 0; part < ARRAY_SIZE(supported_flash); part++) 7838 if (supported_flash[part].vendor_and_model_id == flashid) { 7839 adapter->params.sf_size = 7840 supported_flash[part].size_mb; 7841 adapter->params.sf_nsec = 7842 adapter->params.sf_size / SF_SEC_SIZE; 7843 goto found; 7844 } 7845 7846 /* 7847 * Decode Flash part size. The code below looks repetative with 7848 * common encodings, but that's not guaranteed in the JEDEC 7849 * specification for the Read JADEC ID command. The only thing that 7850 * we're guaranteed by the JADEC specification is where the 7851 * Manufacturer ID is in the returned result. After that each 7852 * Manufacturer ~could~ encode things completely differently. 7853 * Note, all Flash parts must have 64KB sectors. 7854 */ 7855 manufacturer = flashid & 0xff; 7856 switch (manufacturer) { 7857 case 0x20: { /* Micron/Numonix */ 7858 /* 7859 * This Density -> Size decoding table is taken from Micron 7860 * Data Sheets. 7861 */ 7862 density = (flashid >> 16) & 0xff; 7863 switch (density) { 7864 case 0x14: size = 1 << 20; break; /* 1MB */ 7865 case 0x15: size = 1 << 21; break; /* 2MB */ 7866 case 0x16: size = 1 << 22; break; /* 4MB */ 7867 case 0x17: size = 1 << 23; break; /* 8MB */ 7868 case 0x18: size = 1 << 24; break; /* 16MB */ 7869 case 0x19: size = 1 << 25; break; /* 32MB */ 7870 case 0x20: size = 1 << 26; break; /* 64MB */ 7871 case 0x21: size = 1 << 27; break; /* 128MB */ 7872 case 0x22: size = 1 << 28; break; /* 256MB */ 7873 7874 default: 7875 CH_ERR(adapter, "Micron Flash Part has bad size, " 7876 "ID = %#x, Density code = %#x\n", 7877 flashid, density); 7878 return -EINVAL; 7879 } 7880 break; 7881 } 7882 7883 case 0xef: { /* Winbond */ 7884 /* 7885 * This Density -> Size decoding table is taken from Winbond 7886 * Data Sheets. 7887 */ 7888 density = (flashid >> 16) & 0xff; 7889 switch (density) { 7890 case 0x17: size = 1 << 23; break; /* 8MB */ 7891 case 0x18: size = 1 << 24; break; /* 16MB */ 7892 7893 default: 7894 CH_ERR(adapter, "Winbond Flash Part has bad size, " 7895 "ID = %#x, Density code = %#x\n", 7896 flashid, density); 7897 return -EINVAL; 7898 } 7899 break; 7900 } 7901 7902 default: 7903 CH_ERR(adapter, "Unsupported Flash Part, ID = %#x\n", flashid); 7904 return -EINVAL; 7905 } 7906 7907 /* 7908 * Store decoded Flash size and fall through into vetting code. 7909 */ 7910 adapter->params.sf_size = size; 7911 adapter->params.sf_nsec = size / SF_SEC_SIZE; 7912 7913 found: 7914 /* 7915 * We should ~probably~ reject adapters with FLASHes which are too 7916 * small but we have some legacy FPGAs with small FLASHes that we'd 7917 * still like to use. So instead we emit a scary message ... 7918 */ 7919 if (adapter->params.sf_size < FLASH_MIN_SIZE) 7920 CH_WARN(adapter, "WARNING: Flash Part ID %#x, size %#x < %#x\n", 7921 flashid, adapter->params.sf_size, FLASH_MIN_SIZE); 7922 7923 return 0; 7924 } 7925 7926 static void set_pcie_completion_timeout(struct adapter *adapter, 7927 u8 range) 7928 { 7929 u16 val; 7930 u32 pcie_cap; 7931 7932 pcie_cap = t4_os_find_pci_capability(adapter, PCI_CAP_ID_EXP); 7933 if (pcie_cap) { 7934 t4_os_pci_read_cfg2(adapter, pcie_cap + PCI_EXP_DEVCTL2, &val); 7935 val &= 0xfff0; 7936 val |= range ; 7937 t4_os_pci_write_cfg2(adapter, pcie_cap + PCI_EXP_DEVCTL2, val); 7938 } 7939 } 7940 7941 const struct chip_params *t4_get_chip_params(int chipid) 7942 { 7943 static const struct chip_params chip_params[] = { 7944 { 7945 /* T4 */ 7946 .nchan = NCHAN, 7947 .pm_stats_cnt = PM_NSTATS, 7948 .cng_ch_bits_log = 2, 7949 .nsched_cls = 15, 7950 .cim_num_obq = CIM_NUM_OBQ, 7951 .mps_rplc_size = 128, 7952 .vfcount = 128, 7953 .sge_fl_db = F_DBPRIO, 7954 .mps_tcam_size = NUM_MPS_CLS_SRAM_L_INSTANCES, 7955 }, 7956 { 7957 /* T5 */ 7958 .nchan = NCHAN, 7959 .pm_stats_cnt = PM_NSTATS, 7960 .cng_ch_bits_log = 2, 7961 .nsched_cls = 16, 7962 .cim_num_obq = CIM_NUM_OBQ_T5, 7963 .mps_rplc_size = 128, 7964 .vfcount = 128, 7965 .sge_fl_db = F_DBPRIO | F_DBTYPE, 7966 .mps_tcam_size = NUM_MPS_T5_CLS_SRAM_L_INSTANCES, 7967 }, 7968 { 7969 /* T6 */ 7970 .nchan = T6_NCHAN, 7971 .pm_stats_cnt = T6_PM_NSTATS, 7972 .cng_ch_bits_log = 3, 7973 .nsched_cls = 16, 7974 .cim_num_obq = CIM_NUM_OBQ_T5, 7975 .mps_rplc_size = 256, 7976 .vfcount = 256, 7977 .sge_fl_db = 0, 7978 .mps_tcam_size = NUM_MPS_T5_CLS_SRAM_L_INSTANCES, 7979 }, 7980 }; 7981 7982 chipid -= CHELSIO_T4; 7983 if (chipid < 0 || chipid >= ARRAY_SIZE(chip_params)) 7984 return NULL; 7985 7986 return &chip_params[chipid]; 7987 } 7988 7989 /** 7990 * t4_prep_adapter - prepare SW and HW for operation 7991 * @adapter: the adapter 7992 * @buf: temporary space of at least VPD_LEN size provided by the caller. 7993 * 7994 * Initialize adapter SW state for the various HW modules, set initial 7995 * values for some adapter tunables, take PHYs out of reset, and 7996 * initialize the MDIO interface. 7997 */ 7998 int t4_prep_adapter(struct adapter *adapter, u8 *buf) 7999 { 8000 int ret; 8001 uint16_t device_id; 8002 uint32_t pl_rev; 8003 8004 get_pci_mode(adapter, &adapter->params.pci); 8005 8006 pl_rev = t4_read_reg(adapter, A_PL_REV); 8007 adapter->params.chipid = G_CHIPID(pl_rev); 8008 adapter->params.rev = G_REV(pl_rev); 8009 if (adapter->params.chipid == 0) { 8010 /* T4 did not have chipid in PL_REV (T5 onwards do) */ 8011 adapter->params.chipid = CHELSIO_T4; 8012 8013 /* T4A1 chip is not supported */ 8014 if (adapter->params.rev == 1) { 8015 CH_ALERT(adapter, "T4 rev 1 chip is not supported.\n"); 8016 return -EINVAL; 8017 } 8018 } 8019 8020 adapter->chip_params = t4_get_chip_params(chip_id(adapter)); 8021 if (adapter->chip_params == NULL) 8022 return -EINVAL; 8023 8024 adapter->params.pci.vpd_cap_addr = 8025 t4_os_find_pci_capability(adapter, PCI_CAP_ID_VPD); 8026 8027 ret = t4_get_flash_params(adapter); 8028 if (ret < 0) 8029 return ret; 8030 8031 ret = get_vpd_params(adapter, &adapter->params.vpd, buf); 8032 if (ret < 0) 8033 return ret; 8034 8035 /* Cards with real ASICs have the chipid in the PCIe device id */ 8036 t4_os_pci_read_cfg2(adapter, PCI_DEVICE_ID, &device_id); 8037 if (device_id >> 12 == chip_id(adapter)) 8038 adapter->params.cim_la_size = CIMLA_SIZE; 8039 else { 8040 /* FPGA */ 8041 adapter->params.fpga = 1; 8042 adapter->params.cim_la_size = 2 * CIMLA_SIZE; 8043 } 8044 8045 init_cong_ctrl(adapter->params.a_wnd, adapter->params.b_wnd); 8046 8047 /* 8048 * Default port and clock for debugging in case we can't reach FW. 8049 */ 8050 adapter->params.nports = 1; 8051 adapter->params.portvec = 1; 8052 adapter->params.vpd.cclk = 50000; 8053 8054 /* Set pci completion timeout value to 4 seconds. */ 8055 set_pcie_completion_timeout(adapter, 0xd); 8056 return 0; 8057 } 8058 8059 /** 8060 * t4_shutdown_adapter - shut down adapter, host & wire 8061 * @adapter: the adapter 8062 * 8063 * Perform an emergency shutdown of the adapter and stop it from 8064 * continuing any further communication on the ports or DMA to the 8065 * host. This is typically used when the adapter and/or firmware 8066 * have crashed and we want to prevent any further accidental 8067 * communication with the rest of the world. This will also force 8068 * the port Link Status to go down -- if register writes work -- 8069 * which should help our peers figure out that we're down. 8070 */ 8071 int t4_shutdown_adapter(struct adapter *adapter) 8072 { 8073 int port; 8074 8075 t4_intr_disable(adapter); 8076 t4_write_reg(adapter, A_DBG_GPIO_EN, 0); 8077 for_each_port(adapter, port) { 8078 u32 a_port_cfg = is_t4(adapter) ? 8079 PORT_REG(port, A_XGMAC_PORT_CFG) : 8080 T5_PORT_REG(port, A_MAC_PORT_CFG); 8081 8082 t4_write_reg(adapter, a_port_cfg, 8083 t4_read_reg(adapter, a_port_cfg) 8084 & ~V_SIGNAL_DET(1)); 8085 } 8086 t4_set_reg_field(adapter, A_SGE_CONTROL, F_GLOBALENABLE, 0); 8087 8088 return 0; 8089 } 8090 8091 /** 8092 * t4_init_devlog_params - initialize adapter->params.devlog 8093 * @adap: the adapter 8094 * @fw_attach: whether we can talk to the firmware 8095 * 8096 * Initialize various fields of the adapter's Firmware Device Log 8097 * Parameters structure. 8098 */ 8099 int t4_init_devlog_params(struct adapter *adap, int fw_attach) 8100 { 8101 struct devlog_params *dparams = &adap->params.devlog; 8102 u32 pf_dparams; 8103 unsigned int devlog_meminfo; 8104 struct fw_devlog_cmd devlog_cmd; 8105 int ret; 8106 8107 /* If we're dealing with newer firmware, the Device Log Paramerters 8108 * are stored in a designated register which allows us to access the 8109 * Device Log even if we can't talk to the firmware. 8110 */ 8111 pf_dparams = 8112 t4_read_reg(adap, PCIE_FW_REG(A_PCIE_FW_PF, PCIE_FW_PF_DEVLOG)); 8113 if (pf_dparams) { 8114 unsigned int nentries, nentries128; 8115 8116 dparams->memtype = G_PCIE_FW_PF_DEVLOG_MEMTYPE(pf_dparams); 8117 dparams->start = G_PCIE_FW_PF_DEVLOG_ADDR16(pf_dparams) << 4; 8118 8119 nentries128 = G_PCIE_FW_PF_DEVLOG_NENTRIES128(pf_dparams); 8120 nentries = (nentries128 + 1) * 128; 8121 dparams->size = nentries * sizeof(struct fw_devlog_e); 8122 8123 return 0; 8124 } 8125 8126 /* 8127 * For any failing returns ... 8128 */ 8129 memset(dparams, 0, sizeof *dparams); 8130 8131 /* 8132 * If we can't talk to the firmware, there's really nothing we can do 8133 * at this point. 8134 */ 8135 if (!fw_attach) 8136 return -ENXIO; 8137 8138 /* Otherwise, ask the firmware for it's Device Log Parameters. 8139 */ 8140 memset(&devlog_cmd, 0, sizeof devlog_cmd); 8141 devlog_cmd.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_DEVLOG_CMD) | 8142 F_FW_CMD_REQUEST | F_FW_CMD_READ); 8143 devlog_cmd.retval_len16 = cpu_to_be32(FW_LEN16(devlog_cmd)); 8144 ret = t4_wr_mbox(adap, adap->mbox, &devlog_cmd, sizeof(devlog_cmd), 8145 &devlog_cmd); 8146 if (ret) 8147 return ret; 8148 8149 devlog_meminfo = 8150 be32_to_cpu(devlog_cmd.memtype_devlog_memaddr16_devlog); 8151 dparams->memtype = G_FW_DEVLOG_CMD_MEMTYPE_DEVLOG(devlog_meminfo); 8152 dparams->start = G_FW_DEVLOG_CMD_MEMADDR16_DEVLOG(devlog_meminfo) << 4; 8153 dparams->size = be32_to_cpu(devlog_cmd.memsize_devlog); 8154 8155 return 0; 8156 } 8157 8158 /** 8159 * t4_init_sge_params - initialize adap->params.sge 8160 * @adapter: the adapter 8161 * 8162 * Initialize various fields of the adapter's SGE Parameters structure. 8163 */ 8164 int t4_init_sge_params(struct adapter *adapter) 8165 { 8166 u32 r; 8167 struct sge_params *sp = &adapter->params.sge; 8168 unsigned i, tscale = 1; 8169 8170 r = t4_read_reg(adapter, A_SGE_INGRESS_RX_THRESHOLD); 8171 sp->counter_val[0] = G_THRESHOLD_0(r); 8172 sp->counter_val[1] = G_THRESHOLD_1(r); 8173 sp->counter_val[2] = G_THRESHOLD_2(r); 8174 sp->counter_val[3] = G_THRESHOLD_3(r); 8175 8176 if (chip_id(adapter) >= CHELSIO_T6) { 8177 r = t4_read_reg(adapter, A_SGE_ITP_CONTROL); 8178 tscale = G_TSCALE(r); 8179 if (tscale == 0) 8180 tscale = 1; 8181 else 8182 tscale += 2; 8183 } 8184 8185 r = t4_read_reg(adapter, A_SGE_TIMER_VALUE_0_AND_1); 8186 sp->timer_val[0] = core_ticks_to_us(adapter, G_TIMERVALUE0(r)) * tscale; 8187 sp->timer_val[1] = core_ticks_to_us(adapter, G_TIMERVALUE1(r)) * tscale; 8188 r = t4_read_reg(adapter, A_SGE_TIMER_VALUE_2_AND_3); 8189 sp->timer_val[2] = core_ticks_to_us(adapter, G_TIMERVALUE2(r)) * tscale; 8190 sp->timer_val[3] = core_ticks_to_us(adapter, G_TIMERVALUE3(r)) * tscale; 8191 r = t4_read_reg(adapter, A_SGE_TIMER_VALUE_4_AND_5); 8192 sp->timer_val[4] = core_ticks_to_us(adapter, G_TIMERVALUE4(r)) * tscale; 8193 sp->timer_val[5] = core_ticks_to_us(adapter, G_TIMERVALUE5(r)) * tscale; 8194 8195 r = t4_read_reg(adapter, A_SGE_CONM_CTRL); 8196 sp->fl_starve_threshold = G_EGRTHRESHOLD(r) * 2 + 1; 8197 if (is_t4(adapter)) 8198 sp->fl_starve_threshold2 = sp->fl_starve_threshold; 8199 else if (is_t5(adapter)) 8200 sp->fl_starve_threshold2 = G_EGRTHRESHOLDPACKING(r) * 2 + 1; 8201 else 8202 sp->fl_starve_threshold2 = G_T6_EGRTHRESHOLDPACKING(r) * 2 + 1; 8203 8204 /* egress queues: log2 of # of doorbells per BAR2 page */ 8205 r = t4_read_reg(adapter, A_SGE_EGRESS_QUEUES_PER_PAGE_PF); 8206 r >>= S_QUEUESPERPAGEPF0 + 8207 (S_QUEUESPERPAGEPF1 - S_QUEUESPERPAGEPF0) * adapter->pf; 8208 sp->eq_s_qpp = r & M_QUEUESPERPAGEPF0; 8209 8210 /* ingress queues: log2 of # of doorbells per BAR2 page */ 8211 r = t4_read_reg(adapter, A_SGE_INGRESS_QUEUES_PER_PAGE_PF); 8212 r >>= S_QUEUESPERPAGEPF0 + 8213 (S_QUEUESPERPAGEPF1 - S_QUEUESPERPAGEPF0) * adapter->pf; 8214 sp->iq_s_qpp = r & M_QUEUESPERPAGEPF0; 8215 8216 r = t4_read_reg(adapter, A_SGE_HOST_PAGE_SIZE); 8217 r >>= S_HOSTPAGESIZEPF0 + 8218 (S_HOSTPAGESIZEPF1 - S_HOSTPAGESIZEPF0) * adapter->pf; 8219 sp->page_shift = (r & M_HOSTPAGESIZEPF0) + 10; 8220 8221 r = t4_read_reg(adapter, A_SGE_CONTROL); 8222 sp->sge_control = r; 8223 sp->spg_len = r & F_EGRSTATUSPAGESIZE ? 128 : 64; 8224 sp->fl_pktshift = G_PKTSHIFT(r); 8225 if (chip_id(adapter) <= CHELSIO_T5) { 8226 sp->pad_boundary = 1 << (G_INGPADBOUNDARY(r) + 8227 X_INGPADBOUNDARY_SHIFT); 8228 } else { 8229 sp->pad_boundary = 1 << (G_INGPADBOUNDARY(r) + 8230 X_T6_INGPADBOUNDARY_SHIFT); 8231 } 8232 if (is_t4(adapter)) 8233 sp->pack_boundary = sp->pad_boundary; 8234 else { 8235 r = t4_read_reg(adapter, A_SGE_CONTROL2); 8236 if (G_INGPACKBOUNDARY(r) == 0) 8237 sp->pack_boundary = 16; 8238 else 8239 sp->pack_boundary = 1 << (G_INGPACKBOUNDARY(r) + 5); 8240 } 8241 for (i = 0; i < SGE_FLBUF_SIZES; i++) 8242 sp->sge_fl_buffer_size[i] = t4_read_reg(adapter, 8243 A_SGE_FL_BUFFER_SIZE0 + (4 * i)); 8244 8245 return 0; 8246 } 8247 8248 /* 8249 * Read and cache the adapter's compressed filter mode and ingress config. 8250 */ 8251 static void read_filter_mode_and_ingress_config(struct adapter *adap, 8252 bool sleep_ok) 8253 { 8254 struct tp_params *tpp = &adap->params.tp; 8255 8256 t4_tp_pio_read(adap, &tpp->vlan_pri_map, 1, A_TP_VLAN_PRI_MAP, 8257 sleep_ok); 8258 t4_tp_pio_read(adap, &tpp->ingress_config, 1, A_TP_INGRESS_CONFIG, 8259 sleep_ok); 8260 8261 /* 8262 * Now that we have TP_VLAN_PRI_MAP cached, we can calculate the field 8263 * shift positions of several elements of the Compressed Filter Tuple 8264 * for this adapter which we need frequently ... 8265 */ 8266 tpp->fcoe_shift = t4_filter_field_shift(adap, F_FCOE); 8267 tpp->port_shift = t4_filter_field_shift(adap, F_PORT); 8268 tpp->vnic_shift = t4_filter_field_shift(adap, F_VNIC_ID); 8269 tpp->vlan_shift = t4_filter_field_shift(adap, F_VLAN); 8270 tpp->tos_shift = t4_filter_field_shift(adap, F_TOS); 8271 tpp->protocol_shift = t4_filter_field_shift(adap, F_PROTOCOL); 8272 tpp->ethertype_shift = t4_filter_field_shift(adap, F_ETHERTYPE); 8273 tpp->macmatch_shift = t4_filter_field_shift(adap, F_MACMATCH); 8274 tpp->matchtype_shift = t4_filter_field_shift(adap, F_MPSHITTYPE); 8275 tpp->frag_shift = t4_filter_field_shift(adap, F_FRAGMENTATION); 8276 8277 /* 8278 * If TP_INGRESS_CONFIG.VNID == 0, then TP_VLAN_PRI_MAP.VNIC_ID 8279 * represents the presence of an Outer VLAN instead of a VNIC ID. 8280 */ 8281 if ((tpp->ingress_config & F_VNIC) == 0) 8282 tpp->vnic_shift = -1; 8283 } 8284 8285 /** 8286 * t4_init_tp_params - initialize adap->params.tp 8287 * @adap: the adapter 8288 * 8289 * Initialize various fields of the adapter's TP Parameters structure. 8290 */ 8291 int t4_init_tp_params(struct adapter *adap, bool sleep_ok) 8292 { 8293 int chan; 8294 u32 v; 8295 struct tp_params *tpp = &adap->params.tp; 8296 8297 v = t4_read_reg(adap, A_TP_TIMER_RESOLUTION); 8298 tpp->tre = G_TIMERRESOLUTION(v); 8299 tpp->dack_re = G_DELAYEDACKRESOLUTION(v); 8300 8301 /* MODQ_REQ_MAP defaults to setting queues 0-3 to chan 0-3 */ 8302 for (chan = 0; chan < MAX_NCHAN; chan++) 8303 tpp->tx_modq[chan] = chan; 8304 8305 read_filter_mode_and_ingress_config(adap, sleep_ok); 8306 8307 /* 8308 * Cache a mask of the bits that represent the error vector portion of 8309 * rx_pkt.err_vec. T6+ can use a compressed error vector to make room 8310 * for information about outer encapsulation (GENEVE/VXLAN/NVGRE). 8311 */ 8312 tpp->err_vec_mask = htobe16(0xffff); 8313 if (chip_id(adap) > CHELSIO_T5) { 8314 v = t4_read_reg(adap, A_TP_OUT_CONFIG); 8315 if (v & F_CRXPKTENC) { 8316 tpp->err_vec_mask = 8317 htobe16(V_T6_COMPR_RXERR_VEC(M_T6_COMPR_RXERR_VEC)); 8318 } 8319 } 8320 8321 return 0; 8322 } 8323 8324 /** 8325 * t4_filter_field_shift - calculate filter field shift 8326 * @adap: the adapter 8327 * @filter_sel: the desired field (from TP_VLAN_PRI_MAP bits) 8328 * 8329 * Return the shift position of a filter field within the Compressed 8330 * Filter Tuple. The filter field is specified via its selection bit 8331 * within TP_VLAN_PRI_MAL (filter mode). E.g. F_VLAN. 8332 */ 8333 int t4_filter_field_shift(const struct adapter *adap, int filter_sel) 8334 { 8335 unsigned int filter_mode = adap->params.tp.vlan_pri_map; 8336 unsigned int sel; 8337 int field_shift; 8338 8339 if ((filter_mode & filter_sel) == 0) 8340 return -1; 8341 8342 for (sel = 1, field_shift = 0; sel < filter_sel; sel <<= 1) { 8343 switch (filter_mode & sel) { 8344 case F_FCOE: 8345 field_shift += W_FT_FCOE; 8346 break; 8347 case F_PORT: 8348 field_shift += W_FT_PORT; 8349 break; 8350 case F_VNIC_ID: 8351 field_shift += W_FT_VNIC_ID; 8352 break; 8353 case F_VLAN: 8354 field_shift += W_FT_VLAN; 8355 break; 8356 case F_TOS: 8357 field_shift += W_FT_TOS; 8358 break; 8359 case F_PROTOCOL: 8360 field_shift += W_FT_PROTOCOL; 8361 break; 8362 case F_ETHERTYPE: 8363 field_shift += W_FT_ETHERTYPE; 8364 break; 8365 case F_MACMATCH: 8366 field_shift += W_FT_MACMATCH; 8367 break; 8368 case F_MPSHITTYPE: 8369 field_shift += W_FT_MPSHITTYPE; 8370 break; 8371 case F_FRAGMENTATION: 8372 field_shift += W_FT_FRAGMENTATION; 8373 break; 8374 } 8375 } 8376 return field_shift; 8377 } 8378 8379 int t4_port_init(struct adapter *adap, int mbox, int pf, int vf, int port_id) 8380 { 8381 u8 addr[6]; 8382 int ret, i, j; 8383 u16 rss_size; 8384 struct port_info *p = adap2pinfo(adap, port_id); 8385 u32 param, val; 8386 8387 for (i = 0, j = -1; i <= p->port_id; i++) { 8388 do { 8389 j++; 8390 } while ((adap->params.portvec & (1 << j)) == 0); 8391 } 8392 8393 if (!(adap->flags & IS_VF) || 8394 adap->params.vfres.r_caps & FW_CMD_CAP_PORT) { 8395 t4_update_port_info(p); 8396 } 8397 8398 ret = t4_alloc_vi(adap, mbox, j, pf, vf, 1, addr, &rss_size); 8399 if (ret < 0) 8400 return ret; 8401 8402 p->vi[0].viid = ret; 8403 if (chip_id(adap) <= CHELSIO_T5) 8404 p->vi[0].smt_idx = (ret & 0x7f) << 1; 8405 else 8406 p->vi[0].smt_idx = (ret & 0x7f); 8407 p->tx_chan = j; 8408 p->mps_bg_map = t4_get_mps_bg_map(adap, j); 8409 p->rx_e_chan_map = t4_get_rx_e_chan_map(adap, j); 8410 p->lport = j; 8411 p->vi[0].rss_size = rss_size; 8412 t4_os_set_hw_addr(p, addr); 8413 8414 param = V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | 8415 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_RSSINFO) | 8416 V_FW_PARAMS_PARAM_YZ(p->vi[0].viid); 8417 ret = t4_query_params(adap, mbox, pf, vf, 1, ¶m, &val); 8418 if (ret) 8419 p->vi[0].rss_base = 0xffff; 8420 else { 8421 /* MPASS((val >> 16) == rss_size); */ 8422 p->vi[0].rss_base = val & 0xffff; 8423 } 8424 8425 return 0; 8426 } 8427 8428 /** 8429 * t4_read_cimq_cfg - read CIM queue configuration 8430 * @adap: the adapter 8431 * @base: holds the queue base addresses in bytes 8432 * @size: holds the queue sizes in bytes 8433 * @thres: holds the queue full thresholds in bytes 8434 * 8435 * Returns the current configuration of the CIM queues, starting with 8436 * the IBQs, then the OBQs. 8437 */ 8438 void t4_read_cimq_cfg(struct adapter *adap, u16 *base, u16 *size, u16 *thres) 8439 { 8440 unsigned int i, v; 8441 int cim_num_obq = adap->chip_params->cim_num_obq; 8442 8443 for (i = 0; i < CIM_NUM_IBQ; i++) { 8444 t4_write_reg(adap, A_CIM_QUEUE_CONFIG_REF, F_IBQSELECT | 8445 V_QUENUMSELECT(i)); 8446 v = t4_read_reg(adap, A_CIM_QUEUE_CONFIG_CTRL); 8447 /* value is in 256-byte units */ 8448 *base++ = G_CIMQBASE(v) * 256; 8449 *size++ = G_CIMQSIZE(v) * 256; 8450 *thres++ = G_QUEFULLTHRSH(v) * 8; /* 8-byte unit */ 8451 } 8452 for (i = 0; i < cim_num_obq; i++) { 8453 t4_write_reg(adap, A_CIM_QUEUE_CONFIG_REF, F_OBQSELECT | 8454 V_QUENUMSELECT(i)); 8455 v = t4_read_reg(adap, A_CIM_QUEUE_CONFIG_CTRL); 8456 /* value is in 256-byte units */ 8457 *base++ = G_CIMQBASE(v) * 256; 8458 *size++ = G_CIMQSIZE(v) * 256; 8459 } 8460 } 8461 8462 /** 8463 * t4_read_cim_ibq - read the contents of a CIM inbound queue 8464 * @adap: the adapter 8465 * @qid: the queue index 8466 * @data: where to store the queue contents 8467 * @n: capacity of @data in 32-bit words 8468 * 8469 * Reads the contents of the selected CIM queue starting at address 0 up 8470 * to the capacity of @data. @n must be a multiple of 4. Returns < 0 on 8471 * error and the number of 32-bit words actually read on success. 8472 */ 8473 int t4_read_cim_ibq(struct adapter *adap, unsigned int qid, u32 *data, size_t n) 8474 { 8475 int i, err, attempts; 8476 unsigned int addr; 8477 const unsigned int nwords = CIM_IBQ_SIZE * 4; 8478 8479 if (qid > 5 || (n & 3)) 8480 return -EINVAL; 8481 8482 addr = qid * nwords; 8483 if (n > nwords) 8484 n = nwords; 8485 8486 /* It might take 3-10ms before the IBQ debug read access is allowed. 8487 * Wait for 1 Sec with a delay of 1 usec. 8488 */ 8489 attempts = 1000000; 8490 8491 for (i = 0; i < n; i++, addr++) { 8492 t4_write_reg(adap, A_CIM_IBQ_DBG_CFG, V_IBQDBGADDR(addr) | 8493 F_IBQDBGEN); 8494 err = t4_wait_op_done(adap, A_CIM_IBQ_DBG_CFG, F_IBQDBGBUSY, 0, 8495 attempts, 1); 8496 if (err) 8497 return err; 8498 *data++ = t4_read_reg(adap, A_CIM_IBQ_DBG_DATA); 8499 } 8500 t4_write_reg(adap, A_CIM_IBQ_DBG_CFG, 0); 8501 return i; 8502 } 8503 8504 /** 8505 * t4_read_cim_obq - read the contents of a CIM outbound queue 8506 * @adap: the adapter 8507 * @qid: the queue index 8508 * @data: where to store the queue contents 8509 * @n: capacity of @data in 32-bit words 8510 * 8511 * Reads the contents of the selected CIM queue starting at address 0 up 8512 * to the capacity of @data. @n must be a multiple of 4. Returns < 0 on 8513 * error and the number of 32-bit words actually read on success. 8514 */ 8515 int t4_read_cim_obq(struct adapter *adap, unsigned int qid, u32 *data, size_t n) 8516 { 8517 int i, err; 8518 unsigned int addr, v, nwords; 8519 int cim_num_obq = adap->chip_params->cim_num_obq; 8520 8521 if ((qid > (cim_num_obq - 1)) || (n & 3)) 8522 return -EINVAL; 8523 8524 t4_write_reg(adap, A_CIM_QUEUE_CONFIG_REF, F_OBQSELECT | 8525 V_QUENUMSELECT(qid)); 8526 v = t4_read_reg(adap, A_CIM_QUEUE_CONFIG_CTRL); 8527 8528 addr = G_CIMQBASE(v) * 64; /* muliple of 256 -> muliple of 4 */ 8529 nwords = G_CIMQSIZE(v) * 64; /* same */ 8530 if (n > nwords) 8531 n = nwords; 8532 8533 for (i = 0; i < n; i++, addr++) { 8534 t4_write_reg(adap, A_CIM_OBQ_DBG_CFG, V_OBQDBGADDR(addr) | 8535 F_OBQDBGEN); 8536 err = t4_wait_op_done(adap, A_CIM_OBQ_DBG_CFG, F_OBQDBGBUSY, 0, 8537 2, 1); 8538 if (err) 8539 return err; 8540 *data++ = t4_read_reg(adap, A_CIM_OBQ_DBG_DATA); 8541 } 8542 t4_write_reg(adap, A_CIM_OBQ_DBG_CFG, 0); 8543 return i; 8544 } 8545 8546 enum { 8547 CIM_QCTL_BASE = 0, 8548 CIM_CTL_BASE = 0x2000, 8549 CIM_PBT_ADDR_BASE = 0x2800, 8550 CIM_PBT_LRF_BASE = 0x3000, 8551 CIM_PBT_DATA_BASE = 0x3800 8552 }; 8553 8554 /** 8555 * t4_cim_read - read a block from CIM internal address space 8556 * @adap: the adapter 8557 * @addr: the start address within the CIM address space 8558 * @n: number of words to read 8559 * @valp: where to store the result 8560 * 8561 * Reads a block of 4-byte words from the CIM intenal address space. 8562 */ 8563 int t4_cim_read(struct adapter *adap, unsigned int addr, unsigned int n, 8564 unsigned int *valp) 8565 { 8566 int ret = 0; 8567 8568 if (t4_read_reg(adap, A_CIM_HOST_ACC_CTRL) & F_HOSTBUSY) 8569 return -EBUSY; 8570 8571 for ( ; !ret && n--; addr += 4) { 8572 t4_write_reg(adap, A_CIM_HOST_ACC_CTRL, addr); 8573 ret = t4_wait_op_done(adap, A_CIM_HOST_ACC_CTRL, F_HOSTBUSY, 8574 0, 5, 2); 8575 if (!ret) 8576 *valp++ = t4_read_reg(adap, A_CIM_HOST_ACC_DATA); 8577 } 8578 return ret; 8579 } 8580 8581 /** 8582 * t4_cim_write - write a block into CIM internal address space 8583 * @adap: the adapter 8584 * @addr: the start address within the CIM address space 8585 * @n: number of words to write 8586 * @valp: set of values to write 8587 * 8588 * Writes a block of 4-byte words into the CIM intenal address space. 8589 */ 8590 int t4_cim_write(struct adapter *adap, unsigned int addr, unsigned int n, 8591 const unsigned int *valp) 8592 { 8593 int ret = 0; 8594 8595 if (t4_read_reg(adap, A_CIM_HOST_ACC_CTRL) & F_HOSTBUSY) 8596 return -EBUSY; 8597 8598 for ( ; !ret && n--; addr += 4) { 8599 t4_write_reg(adap, A_CIM_HOST_ACC_DATA, *valp++); 8600 t4_write_reg(adap, A_CIM_HOST_ACC_CTRL, addr | F_HOSTWRITE); 8601 ret = t4_wait_op_done(adap, A_CIM_HOST_ACC_CTRL, F_HOSTBUSY, 8602 0, 5, 2); 8603 } 8604 return ret; 8605 } 8606 8607 static int t4_cim_write1(struct adapter *adap, unsigned int addr, 8608 unsigned int val) 8609 { 8610 return t4_cim_write(adap, addr, 1, &val); 8611 } 8612 8613 /** 8614 * t4_cim_ctl_read - read a block from CIM control region 8615 * @adap: the adapter 8616 * @addr: the start address within the CIM control region 8617 * @n: number of words to read 8618 * @valp: where to store the result 8619 * 8620 * Reads a block of 4-byte words from the CIM control region. 8621 */ 8622 int t4_cim_ctl_read(struct adapter *adap, unsigned int addr, unsigned int n, 8623 unsigned int *valp) 8624 { 8625 return t4_cim_read(adap, addr + CIM_CTL_BASE, n, valp); 8626 } 8627 8628 /** 8629 * t4_cim_read_la - read CIM LA capture buffer 8630 * @adap: the adapter 8631 * @la_buf: where to store the LA data 8632 * @wrptr: the HW write pointer within the capture buffer 8633 * 8634 * Reads the contents of the CIM LA buffer with the most recent entry at 8635 * the end of the returned data and with the entry at @wrptr first. 8636 * We try to leave the LA in the running state we find it in. 8637 */ 8638 int t4_cim_read_la(struct adapter *adap, u32 *la_buf, unsigned int *wrptr) 8639 { 8640 int i, ret; 8641 unsigned int cfg, val, idx; 8642 8643 ret = t4_cim_read(adap, A_UP_UP_DBG_LA_CFG, 1, &cfg); 8644 if (ret) 8645 return ret; 8646 8647 if (cfg & F_UPDBGLAEN) { /* LA is running, freeze it */ 8648 ret = t4_cim_write1(adap, A_UP_UP_DBG_LA_CFG, 0); 8649 if (ret) 8650 return ret; 8651 } 8652 8653 ret = t4_cim_read(adap, A_UP_UP_DBG_LA_CFG, 1, &val); 8654 if (ret) 8655 goto restart; 8656 8657 idx = G_UPDBGLAWRPTR(val); 8658 if (wrptr) 8659 *wrptr = idx; 8660 8661 for (i = 0; i < adap->params.cim_la_size; i++) { 8662 ret = t4_cim_write1(adap, A_UP_UP_DBG_LA_CFG, 8663 V_UPDBGLARDPTR(idx) | F_UPDBGLARDEN); 8664 if (ret) 8665 break; 8666 ret = t4_cim_read(adap, A_UP_UP_DBG_LA_CFG, 1, &val); 8667 if (ret) 8668 break; 8669 if (val & F_UPDBGLARDEN) { 8670 ret = -ETIMEDOUT; 8671 break; 8672 } 8673 ret = t4_cim_read(adap, A_UP_UP_DBG_LA_DATA, 1, &la_buf[i]); 8674 if (ret) 8675 break; 8676 8677 /* address can't exceed 0xfff (UpDbgLaRdPtr is of 12-bits) */ 8678 idx = (idx + 1) & M_UPDBGLARDPTR; 8679 /* 8680 * Bits 0-3 of UpDbgLaRdPtr can be between 0000 to 1001 to 8681 * identify the 32-bit portion of the full 312-bit data 8682 */ 8683 if (is_t6(adap)) 8684 while ((idx & 0xf) > 9) 8685 idx = (idx + 1) % M_UPDBGLARDPTR; 8686 } 8687 restart: 8688 if (cfg & F_UPDBGLAEN) { 8689 int r = t4_cim_write1(adap, A_UP_UP_DBG_LA_CFG, 8690 cfg & ~F_UPDBGLARDEN); 8691 if (!ret) 8692 ret = r; 8693 } 8694 return ret; 8695 } 8696 8697 /** 8698 * t4_tp_read_la - read TP LA capture buffer 8699 * @adap: the adapter 8700 * @la_buf: where to store the LA data 8701 * @wrptr: the HW write pointer within the capture buffer 8702 * 8703 * Reads the contents of the TP LA buffer with the most recent entry at 8704 * the end of the returned data and with the entry at @wrptr first. 8705 * We leave the LA in the running state we find it in. 8706 */ 8707 void t4_tp_read_la(struct adapter *adap, u64 *la_buf, unsigned int *wrptr) 8708 { 8709 bool last_incomplete; 8710 unsigned int i, cfg, val, idx; 8711 8712 cfg = t4_read_reg(adap, A_TP_DBG_LA_CONFIG) & 0xffff; 8713 if (cfg & F_DBGLAENABLE) /* freeze LA */ 8714 t4_write_reg(adap, A_TP_DBG_LA_CONFIG, 8715 adap->params.tp.la_mask | (cfg ^ F_DBGLAENABLE)); 8716 8717 val = t4_read_reg(adap, A_TP_DBG_LA_CONFIG); 8718 idx = G_DBGLAWPTR(val); 8719 last_incomplete = G_DBGLAMODE(val) >= 2 && (val & F_DBGLAWHLF) == 0; 8720 if (last_incomplete) 8721 idx = (idx + 1) & M_DBGLARPTR; 8722 if (wrptr) 8723 *wrptr = idx; 8724 8725 val &= 0xffff; 8726 val &= ~V_DBGLARPTR(M_DBGLARPTR); 8727 val |= adap->params.tp.la_mask; 8728 8729 for (i = 0; i < TPLA_SIZE; i++) { 8730 t4_write_reg(adap, A_TP_DBG_LA_CONFIG, V_DBGLARPTR(idx) | val); 8731 la_buf[i] = t4_read_reg64(adap, A_TP_DBG_LA_DATAL); 8732 idx = (idx + 1) & M_DBGLARPTR; 8733 } 8734 8735 /* Wipe out last entry if it isn't valid */ 8736 if (last_incomplete) 8737 la_buf[TPLA_SIZE - 1] = ~0ULL; 8738 8739 if (cfg & F_DBGLAENABLE) /* restore running state */ 8740 t4_write_reg(adap, A_TP_DBG_LA_CONFIG, 8741 cfg | adap->params.tp.la_mask); 8742 } 8743 8744 /* 8745 * SGE Hung Ingress DMA Warning Threshold time and Warning Repeat Rate (in 8746 * seconds). If we find one of the SGE Ingress DMA State Machines in the same 8747 * state for more than the Warning Threshold then we'll issue a warning about 8748 * a potential hang. We'll repeat the warning as the SGE Ingress DMA Channel 8749 * appears to be hung every Warning Repeat second till the situation clears. 8750 * If the situation clears, we'll note that as well. 8751 */ 8752 #define SGE_IDMA_WARN_THRESH 1 8753 #define SGE_IDMA_WARN_REPEAT 300 8754 8755 /** 8756 * t4_idma_monitor_init - initialize SGE Ingress DMA Monitor 8757 * @adapter: the adapter 8758 * @idma: the adapter IDMA Monitor state 8759 * 8760 * Initialize the state of an SGE Ingress DMA Monitor. 8761 */ 8762 void t4_idma_monitor_init(struct adapter *adapter, 8763 struct sge_idma_monitor_state *idma) 8764 { 8765 /* Initialize the state variables for detecting an SGE Ingress DMA 8766 * hang. The SGE has internal counters which count up on each clock 8767 * tick whenever the SGE finds its Ingress DMA State Engines in the 8768 * same state they were on the previous clock tick. The clock used is 8769 * the Core Clock so we have a limit on the maximum "time" they can 8770 * record; typically a very small number of seconds. For instance, 8771 * with a 600MHz Core Clock, we can only count up to a bit more than 8772 * 7s. So we'll synthesize a larger counter in order to not run the 8773 * risk of having the "timers" overflow and give us the flexibility to 8774 * maintain a Hung SGE State Machine of our own which operates across 8775 * a longer time frame. 8776 */ 8777 idma->idma_1s_thresh = core_ticks_per_usec(adapter) * 1000000; /* 1s */ 8778 idma->idma_stalled[0] = idma->idma_stalled[1] = 0; 8779 } 8780 8781 /** 8782 * t4_idma_monitor - monitor SGE Ingress DMA state 8783 * @adapter: the adapter 8784 * @idma: the adapter IDMA Monitor state 8785 * @hz: number of ticks/second 8786 * @ticks: number of ticks since the last IDMA Monitor call 8787 */ 8788 void t4_idma_monitor(struct adapter *adapter, 8789 struct sge_idma_monitor_state *idma, 8790 int hz, int ticks) 8791 { 8792 int i, idma_same_state_cnt[2]; 8793 8794 /* Read the SGE Debug Ingress DMA Same State Count registers. These 8795 * are counters inside the SGE which count up on each clock when the 8796 * SGE finds its Ingress DMA State Engines in the same states they 8797 * were in the previous clock. The counters will peg out at 8798 * 0xffffffff without wrapping around so once they pass the 1s 8799 * threshold they'll stay above that till the IDMA state changes. 8800 */ 8801 t4_write_reg(adapter, A_SGE_DEBUG_INDEX, 13); 8802 idma_same_state_cnt[0] = t4_read_reg(adapter, A_SGE_DEBUG_DATA_HIGH); 8803 idma_same_state_cnt[1] = t4_read_reg(adapter, A_SGE_DEBUG_DATA_LOW); 8804 8805 for (i = 0; i < 2; i++) { 8806 u32 debug0, debug11; 8807 8808 /* If the Ingress DMA Same State Counter ("timer") is less 8809 * than 1s, then we can reset our synthesized Stall Timer and 8810 * continue. If we have previously emitted warnings about a 8811 * potential stalled Ingress Queue, issue a note indicating 8812 * that the Ingress Queue has resumed forward progress. 8813 */ 8814 if (idma_same_state_cnt[i] < idma->idma_1s_thresh) { 8815 if (idma->idma_stalled[i] >= SGE_IDMA_WARN_THRESH*hz) 8816 CH_WARN(adapter, "SGE idma%d, queue %u, " 8817 "resumed after %d seconds\n", 8818 i, idma->idma_qid[i], 8819 idma->idma_stalled[i]/hz); 8820 idma->idma_stalled[i] = 0; 8821 continue; 8822 } 8823 8824 /* Synthesize an SGE Ingress DMA Same State Timer in the Hz 8825 * domain. The first time we get here it'll be because we 8826 * passed the 1s Threshold; each additional time it'll be 8827 * because the RX Timer Callback is being fired on its regular 8828 * schedule. 8829 * 8830 * If the stall is below our Potential Hung Ingress Queue 8831 * Warning Threshold, continue. 8832 */ 8833 if (idma->idma_stalled[i] == 0) { 8834 idma->idma_stalled[i] = hz; 8835 idma->idma_warn[i] = 0; 8836 } else { 8837 idma->idma_stalled[i] += ticks; 8838 idma->idma_warn[i] -= ticks; 8839 } 8840 8841 if (idma->idma_stalled[i] < SGE_IDMA_WARN_THRESH*hz) 8842 continue; 8843 8844 /* We'll issue a warning every SGE_IDMA_WARN_REPEAT seconds. 8845 */ 8846 if (idma->idma_warn[i] > 0) 8847 continue; 8848 idma->idma_warn[i] = SGE_IDMA_WARN_REPEAT*hz; 8849 8850 /* Read and save the SGE IDMA State and Queue ID information. 8851 * We do this every time in case it changes across time ... 8852 * can't be too careful ... 8853 */ 8854 t4_write_reg(adapter, A_SGE_DEBUG_INDEX, 0); 8855 debug0 = t4_read_reg(adapter, A_SGE_DEBUG_DATA_LOW); 8856 idma->idma_state[i] = (debug0 >> (i * 9)) & 0x3f; 8857 8858 t4_write_reg(adapter, A_SGE_DEBUG_INDEX, 11); 8859 debug11 = t4_read_reg(adapter, A_SGE_DEBUG_DATA_LOW); 8860 idma->idma_qid[i] = (debug11 >> (i * 16)) & 0xffff; 8861 8862 CH_WARN(adapter, "SGE idma%u, queue %u, potentially stuck in " 8863 " state %u for %d seconds (debug0=%#x, debug11=%#x)\n", 8864 i, idma->idma_qid[i], idma->idma_state[i], 8865 idma->idma_stalled[i]/hz, 8866 debug0, debug11); 8867 t4_sge_decode_idma_state(adapter, idma->idma_state[i]); 8868 } 8869 } 8870 8871 /** 8872 * t4_read_pace_tbl - read the pace table 8873 * @adap: the adapter 8874 * @pace_vals: holds the returned values 8875 * 8876 * Returns the values of TP's pace table in microseconds. 8877 */ 8878 void t4_read_pace_tbl(struct adapter *adap, unsigned int pace_vals[NTX_SCHED]) 8879 { 8880 unsigned int i, v; 8881 8882 for (i = 0; i < NTX_SCHED; i++) { 8883 t4_write_reg(adap, A_TP_PACE_TABLE, 0xffff0000 + i); 8884 v = t4_read_reg(adap, A_TP_PACE_TABLE); 8885 pace_vals[i] = dack_ticks_to_usec(adap, v); 8886 } 8887 } 8888 8889 /** 8890 * t4_get_tx_sched - get the configuration of a Tx HW traffic scheduler 8891 * @adap: the adapter 8892 * @sched: the scheduler index 8893 * @kbps: the byte rate in Kbps 8894 * @ipg: the interpacket delay in tenths of nanoseconds 8895 * 8896 * Return the current configuration of a HW Tx scheduler. 8897 */ 8898 void t4_get_tx_sched(struct adapter *adap, unsigned int sched, unsigned int *kbps, 8899 unsigned int *ipg, bool sleep_ok) 8900 { 8901 unsigned int v, addr, bpt, cpt; 8902 8903 if (kbps) { 8904 addr = A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2; 8905 t4_tp_tm_pio_read(adap, &v, 1, addr, sleep_ok); 8906 if (sched & 1) 8907 v >>= 16; 8908 bpt = (v >> 8) & 0xff; 8909 cpt = v & 0xff; 8910 if (!cpt) 8911 *kbps = 0; /* scheduler disabled */ 8912 else { 8913 v = (adap->params.vpd.cclk * 1000) / cpt; /* ticks/s */ 8914 *kbps = (v * bpt) / 125; 8915 } 8916 } 8917 if (ipg) { 8918 addr = A_TP_TX_MOD_Q1_Q0_TIMER_SEPARATOR - sched / 2; 8919 t4_tp_tm_pio_read(adap, &v, 1, addr, sleep_ok); 8920 if (sched & 1) 8921 v >>= 16; 8922 v &= 0xffff; 8923 *ipg = (10000 * v) / core_ticks_per_usec(adap); 8924 } 8925 } 8926 8927 /** 8928 * t4_load_cfg - download config file 8929 * @adap: the adapter 8930 * @cfg_data: the cfg text file to write 8931 * @size: text file size 8932 * 8933 * Write the supplied config text file to the card's serial flash. 8934 */ 8935 int t4_load_cfg(struct adapter *adap, const u8 *cfg_data, unsigned int size) 8936 { 8937 int ret, i, n, cfg_addr; 8938 unsigned int addr; 8939 unsigned int flash_cfg_start_sec; 8940 unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec; 8941 8942 cfg_addr = t4_flash_cfg_addr(adap); 8943 if (cfg_addr < 0) 8944 return cfg_addr; 8945 8946 addr = cfg_addr; 8947 flash_cfg_start_sec = addr / SF_SEC_SIZE; 8948 8949 if (size > FLASH_CFG_MAX_SIZE) { 8950 CH_ERR(adap, "cfg file too large, max is %u bytes\n", 8951 FLASH_CFG_MAX_SIZE); 8952 return -EFBIG; 8953 } 8954 8955 i = DIV_ROUND_UP(FLASH_CFG_MAX_SIZE, /* # of sectors spanned */ 8956 sf_sec_size); 8957 ret = t4_flash_erase_sectors(adap, flash_cfg_start_sec, 8958 flash_cfg_start_sec + i - 1); 8959 /* 8960 * If size == 0 then we're simply erasing the FLASH sectors associated 8961 * with the on-adapter Firmware Configuration File. 8962 */ 8963 if (ret || size == 0) 8964 goto out; 8965 8966 /* this will write to the flash up to SF_PAGE_SIZE at a time */ 8967 for (i = 0; i< size; i+= SF_PAGE_SIZE) { 8968 if ( (size - i) < SF_PAGE_SIZE) 8969 n = size - i; 8970 else 8971 n = SF_PAGE_SIZE; 8972 ret = t4_write_flash(adap, addr, n, cfg_data, 1); 8973 if (ret) 8974 goto out; 8975 8976 addr += SF_PAGE_SIZE; 8977 cfg_data += SF_PAGE_SIZE; 8978 } 8979 8980 out: 8981 if (ret) 8982 CH_ERR(adap, "config file %s failed %d\n", 8983 (size == 0 ? "clear" : "download"), ret); 8984 return ret; 8985 } 8986 8987 /** 8988 * t5_fw_init_extern_mem - initialize the external memory 8989 * @adap: the adapter 8990 * 8991 * Initializes the external memory on T5. 8992 */ 8993 int t5_fw_init_extern_mem(struct adapter *adap) 8994 { 8995 u32 params[1], val[1]; 8996 int ret; 8997 8998 if (!is_t5(adap)) 8999 return 0; 9000 9001 val[0] = 0xff; /* Initialize all MCs */ 9002 params[0] = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | 9003 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_MCINIT)); 9004 ret = t4_set_params_timeout(adap, adap->mbox, adap->pf, 0, 1, params, val, 9005 FW_CMD_MAX_TIMEOUT); 9006 9007 return ret; 9008 } 9009 9010 /* BIOS boot headers */ 9011 typedef struct pci_expansion_rom_header { 9012 u8 signature[2]; /* ROM Signature. Should be 0xaa55 */ 9013 u8 reserved[22]; /* Reserved per processor Architecture data */ 9014 u8 pcir_offset[2]; /* Offset to PCI Data Structure */ 9015 } pci_exp_rom_header_t; /* PCI_EXPANSION_ROM_HEADER */ 9016 9017 /* Legacy PCI Expansion ROM Header */ 9018 typedef struct legacy_pci_expansion_rom_header { 9019 u8 signature[2]; /* ROM Signature. Should be 0xaa55 */ 9020 u8 size512; /* Current Image Size in units of 512 bytes */ 9021 u8 initentry_point[4]; 9022 u8 cksum; /* Checksum computed on the entire Image */ 9023 u8 reserved[16]; /* Reserved */ 9024 u8 pcir_offset[2]; /* Offset to PCI Data Struture */ 9025 } legacy_pci_exp_rom_header_t; /* LEGACY_PCI_EXPANSION_ROM_HEADER */ 9026 9027 /* EFI PCI Expansion ROM Header */ 9028 typedef struct efi_pci_expansion_rom_header { 9029 u8 signature[2]; // ROM signature. The value 0xaa55 9030 u8 initialization_size[2]; /* Units 512. Includes this header */ 9031 u8 efi_signature[4]; /* Signature from EFI image header. 0x0EF1 */ 9032 u8 efi_subsystem[2]; /* Subsystem value for EFI image header */ 9033 u8 efi_machine_type[2]; /* Machine type from EFI image header */ 9034 u8 compression_type[2]; /* Compression type. */ 9035 /* 9036 * Compression type definition 9037 * 0x0: uncompressed 9038 * 0x1: Compressed 9039 * 0x2-0xFFFF: Reserved 9040 */ 9041 u8 reserved[8]; /* Reserved */ 9042 u8 efi_image_header_offset[2]; /* Offset to EFI Image */ 9043 u8 pcir_offset[2]; /* Offset to PCI Data Structure */ 9044 } efi_pci_exp_rom_header_t; /* EFI PCI Expansion ROM Header */ 9045 9046 /* PCI Data Structure Format */ 9047 typedef struct pcir_data_structure { /* PCI Data Structure */ 9048 u8 signature[4]; /* Signature. The string "PCIR" */ 9049 u8 vendor_id[2]; /* Vendor Identification */ 9050 u8 device_id[2]; /* Device Identification */ 9051 u8 vital_product[2]; /* Pointer to Vital Product Data */ 9052 u8 length[2]; /* PCIR Data Structure Length */ 9053 u8 revision; /* PCIR Data Structure Revision */ 9054 u8 class_code[3]; /* Class Code */ 9055 u8 image_length[2]; /* Image Length. Multiple of 512B */ 9056 u8 code_revision[2]; /* Revision Level of Code/Data */ 9057 u8 code_type; /* Code Type. */ 9058 /* 9059 * PCI Expansion ROM Code Types 9060 * 0x00: Intel IA-32, PC-AT compatible. Legacy 9061 * 0x01: Open Firmware standard for PCI. FCODE 9062 * 0x02: Hewlett-Packard PA RISC. HP reserved 9063 * 0x03: EFI Image. EFI 9064 * 0x04-0xFF: Reserved. 9065 */ 9066 u8 indicator; /* Indicator. Identifies the last image in the ROM */ 9067 u8 reserved[2]; /* Reserved */ 9068 } pcir_data_t; /* PCI__DATA_STRUCTURE */ 9069 9070 /* BOOT constants */ 9071 enum { 9072 BOOT_FLASH_BOOT_ADDR = 0x0,/* start address of boot image in flash */ 9073 BOOT_SIGNATURE = 0xaa55, /* signature of BIOS boot ROM */ 9074 BOOT_SIZE_INC = 512, /* image size measured in 512B chunks */ 9075 BOOT_MIN_SIZE = sizeof(pci_exp_rom_header_t), /* basic header */ 9076 BOOT_MAX_SIZE = 1024*BOOT_SIZE_INC, /* 1 byte * length increment */ 9077 VENDOR_ID = 0x1425, /* Vendor ID */ 9078 PCIR_SIGNATURE = 0x52494350 /* PCIR signature */ 9079 }; 9080 9081 /* 9082 * modify_device_id - Modifies the device ID of the Boot BIOS image 9083 * @adatper: the device ID to write. 9084 * @boot_data: the boot image to modify. 9085 * 9086 * Write the supplied device ID to the boot BIOS image. 9087 */ 9088 static void modify_device_id(int device_id, u8 *boot_data) 9089 { 9090 legacy_pci_exp_rom_header_t *header; 9091 pcir_data_t *pcir_header; 9092 u32 cur_header = 0; 9093 9094 /* 9095 * Loop through all chained images and change the device ID's 9096 */ 9097 while (1) { 9098 header = (legacy_pci_exp_rom_header_t *) &boot_data[cur_header]; 9099 pcir_header = (pcir_data_t *) &boot_data[cur_header + 9100 le16_to_cpu(*(u16*)header->pcir_offset)]; 9101 9102 /* 9103 * Only modify the Device ID if code type is Legacy or HP. 9104 * 0x00: Okay to modify 9105 * 0x01: FCODE. Do not be modify 9106 * 0x03: Okay to modify 9107 * 0x04-0xFF: Do not modify 9108 */ 9109 if (pcir_header->code_type == 0x00) { 9110 u8 csum = 0; 9111 int i; 9112 9113 /* 9114 * Modify Device ID to match current adatper 9115 */ 9116 *(u16*) pcir_header->device_id = device_id; 9117 9118 /* 9119 * Set checksum temporarily to 0. 9120 * We will recalculate it later. 9121 */ 9122 header->cksum = 0x0; 9123 9124 /* 9125 * Calculate and update checksum 9126 */ 9127 for (i = 0; i < (header->size512 * 512); i++) 9128 csum += (u8)boot_data[cur_header + i]; 9129 9130 /* 9131 * Invert summed value to create the checksum 9132 * Writing new checksum value directly to the boot data 9133 */ 9134 boot_data[cur_header + 7] = -csum; 9135 9136 } else if (pcir_header->code_type == 0x03) { 9137 9138 /* 9139 * Modify Device ID to match current adatper 9140 */ 9141 *(u16*) pcir_header->device_id = device_id; 9142 9143 } 9144 9145 9146 /* 9147 * Check indicator element to identify if this is the last 9148 * image in the ROM. 9149 */ 9150 if (pcir_header->indicator & 0x80) 9151 break; 9152 9153 /* 9154 * Move header pointer up to the next image in the ROM. 9155 */ 9156 cur_header += header->size512 * 512; 9157 } 9158 } 9159 9160 /* 9161 * t4_load_boot - download boot flash 9162 * @adapter: the adapter 9163 * @boot_data: the boot image to write 9164 * @boot_addr: offset in flash to write boot_data 9165 * @size: image size 9166 * 9167 * Write the supplied boot image to the card's serial flash. 9168 * The boot image has the following sections: a 28-byte header and the 9169 * boot image. 9170 */ 9171 int t4_load_boot(struct adapter *adap, u8 *boot_data, 9172 unsigned int boot_addr, unsigned int size) 9173 { 9174 pci_exp_rom_header_t *header; 9175 int pcir_offset ; 9176 pcir_data_t *pcir_header; 9177 int ret, addr; 9178 uint16_t device_id; 9179 unsigned int i; 9180 unsigned int boot_sector = (boot_addr * 1024 ); 9181 unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec; 9182 9183 /* 9184 * Make sure the boot image does not encroach on the firmware region 9185 */ 9186 if ((boot_sector + size) >> 16 > FLASH_FW_START_SEC) { 9187 CH_ERR(adap, "boot image encroaching on firmware region\n"); 9188 return -EFBIG; 9189 } 9190 9191 /* 9192 * The boot sector is comprised of the Expansion-ROM boot, iSCSI boot, 9193 * and Boot configuration data sections. These 3 boot sections span 9194 * sectors 0 to 7 in flash and live right before the FW image location. 9195 */ 9196 i = DIV_ROUND_UP(size ? size : FLASH_FW_START, 9197 sf_sec_size); 9198 ret = t4_flash_erase_sectors(adap, boot_sector >> 16, 9199 (boot_sector >> 16) + i - 1); 9200 9201 /* 9202 * If size == 0 then we're simply erasing the FLASH sectors associated 9203 * with the on-adapter option ROM file 9204 */ 9205 if (ret || (size == 0)) 9206 goto out; 9207 9208 /* Get boot header */ 9209 header = (pci_exp_rom_header_t *)boot_data; 9210 pcir_offset = le16_to_cpu(*(u16 *)header->pcir_offset); 9211 /* PCIR Data Structure */ 9212 pcir_header = (pcir_data_t *) &boot_data[pcir_offset]; 9213 9214 /* 9215 * Perform some primitive sanity testing to avoid accidentally 9216 * writing garbage over the boot sectors. We ought to check for 9217 * more but it's not worth it for now ... 9218 */ 9219 if (size < BOOT_MIN_SIZE || size > BOOT_MAX_SIZE) { 9220 CH_ERR(adap, "boot image too small/large\n"); 9221 return -EFBIG; 9222 } 9223 9224 #ifndef CHELSIO_T4_DIAGS 9225 /* 9226 * Check BOOT ROM header signature 9227 */ 9228 if (le16_to_cpu(*(u16*)header->signature) != BOOT_SIGNATURE ) { 9229 CH_ERR(adap, "Boot image missing signature\n"); 9230 return -EINVAL; 9231 } 9232 9233 /* 9234 * Check PCI header signature 9235 */ 9236 if (le32_to_cpu(*(u32*)pcir_header->signature) != PCIR_SIGNATURE) { 9237 CH_ERR(adap, "PCI header missing signature\n"); 9238 return -EINVAL; 9239 } 9240 9241 /* 9242 * Check Vendor ID matches Chelsio ID 9243 */ 9244 if (le16_to_cpu(*(u16*)pcir_header->vendor_id) != VENDOR_ID) { 9245 CH_ERR(adap, "Vendor ID missing signature\n"); 9246 return -EINVAL; 9247 } 9248 #endif 9249 9250 /* 9251 * Retrieve adapter's device ID 9252 */ 9253 t4_os_pci_read_cfg2(adap, PCI_DEVICE_ID, &device_id); 9254 /* Want to deal with PF 0 so I strip off PF 4 indicator */ 9255 device_id = device_id & 0xf0ff; 9256 9257 /* 9258 * Check PCIE Device ID 9259 */ 9260 if (le16_to_cpu(*(u16*)pcir_header->device_id) != device_id) { 9261 /* 9262 * Change the device ID in the Boot BIOS image to match 9263 * the Device ID of the current adapter. 9264 */ 9265 modify_device_id(device_id, boot_data); 9266 } 9267 9268 /* 9269 * Skip over the first SF_PAGE_SIZE worth of data and write it after 9270 * we finish copying the rest of the boot image. This will ensure 9271 * that the BIOS boot header will only be written if the boot image 9272 * was written in full. 9273 */ 9274 addr = boot_sector; 9275 for (size -= SF_PAGE_SIZE; size; size -= SF_PAGE_SIZE) { 9276 addr += SF_PAGE_SIZE; 9277 boot_data += SF_PAGE_SIZE; 9278 ret = t4_write_flash(adap, addr, SF_PAGE_SIZE, boot_data, 0); 9279 if (ret) 9280 goto out; 9281 } 9282 9283 ret = t4_write_flash(adap, boot_sector, SF_PAGE_SIZE, 9284 (const u8 *)header, 0); 9285 9286 out: 9287 if (ret) 9288 CH_ERR(adap, "boot image download failed, error %d\n", ret); 9289 return ret; 9290 } 9291 9292 /* 9293 * t4_flash_bootcfg_addr - return the address of the flash optionrom configuration 9294 * @adapter: the adapter 9295 * 9296 * Return the address within the flash where the OptionROM Configuration 9297 * is stored, or an error if the device FLASH is too small to contain 9298 * a OptionROM Configuration. 9299 */ 9300 static int t4_flash_bootcfg_addr(struct adapter *adapter) 9301 { 9302 /* 9303 * If the device FLASH isn't large enough to hold a Firmware 9304 * Configuration File, return an error. 9305 */ 9306 if (adapter->params.sf_size < FLASH_BOOTCFG_START + FLASH_BOOTCFG_MAX_SIZE) 9307 return -ENOSPC; 9308 9309 return FLASH_BOOTCFG_START; 9310 } 9311 9312 int t4_load_bootcfg(struct adapter *adap,const u8 *cfg_data, unsigned int size) 9313 { 9314 int ret, i, n, cfg_addr; 9315 unsigned int addr; 9316 unsigned int flash_cfg_start_sec; 9317 unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec; 9318 9319 cfg_addr = t4_flash_bootcfg_addr(adap); 9320 if (cfg_addr < 0) 9321 return cfg_addr; 9322 9323 addr = cfg_addr; 9324 flash_cfg_start_sec = addr / SF_SEC_SIZE; 9325 9326 if (size > FLASH_BOOTCFG_MAX_SIZE) { 9327 CH_ERR(adap, "bootcfg file too large, max is %u bytes\n", 9328 FLASH_BOOTCFG_MAX_SIZE); 9329 return -EFBIG; 9330 } 9331 9332 i = DIV_ROUND_UP(FLASH_BOOTCFG_MAX_SIZE,/* # of sectors spanned */ 9333 sf_sec_size); 9334 ret = t4_flash_erase_sectors(adap, flash_cfg_start_sec, 9335 flash_cfg_start_sec + i - 1); 9336 9337 /* 9338 * If size == 0 then we're simply erasing the FLASH sectors associated 9339 * with the on-adapter OptionROM Configuration File. 9340 */ 9341 if (ret || size == 0) 9342 goto out; 9343 9344 /* this will write to the flash up to SF_PAGE_SIZE at a time */ 9345 for (i = 0; i< size; i+= SF_PAGE_SIZE) { 9346 if ( (size - i) < SF_PAGE_SIZE) 9347 n = size - i; 9348 else 9349 n = SF_PAGE_SIZE; 9350 ret = t4_write_flash(adap, addr, n, cfg_data, 0); 9351 if (ret) 9352 goto out; 9353 9354 addr += SF_PAGE_SIZE; 9355 cfg_data += SF_PAGE_SIZE; 9356 } 9357 9358 out: 9359 if (ret) 9360 CH_ERR(adap, "boot config data %s failed %d\n", 9361 (size == 0 ? "clear" : "download"), ret); 9362 return ret; 9363 } 9364 9365 /** 9366 * t4_set_filter_mode - configure the optional components of filter tuples 9367 * @adap: the adapter 9368 * @mode_map: a bitmap selcting which optional filter components to enable 9369 * @sleep_ok: if true we may sleep while awaiting command completion 9370 * 9371 * Sets the filter mode by selecting the optional components to enable 9372 * in filter tuples. Returns 0 on success and a negative error if the 9373 * requested mode needs more bits than are available for optional 9374 * components. 9375 */ 9376 int t4_set_filter_mode(struct adapter *adap, unsigned int mode_map, 9377 bool sleep_ok) 9378 { 9379 static u8 width[] = { 1, 3, 17, 17, 8, 8, 16, 9, 3, 1 }; 9380 9381 int i, nbits = 0; 9382 9383 for (i = S_FCOE; i <= S_FRAGMENTATION; i++) 9384 if (mode_map & (1 << i)) 9385 nbits += width[i]; 9386 if (nbits > FILTER_OPT_LEN) 9387 return -EINVAL; 9388 t4_tp_pio_write(adap, &mode_map, 1, A_TP_VLAN_PRI_MAP, sleep_ok); 9389 read_filter_mode_and_ingress_config(adap, sleep_ok); 9390 9391 return 0; 9392 } 9393 9394 /** 9395 * t4_clr_port_stats - clear port statistics 9396 * @adap: the adapter 9397 * @idx: the port index 9398 * 9399 * Clear HW statistics for the given port. 9400 */ 9401 void t4_clr_port_stats(struct adapter *adap, int idx) 9402 { 9403 unsigned int i; 9404 u32 bgmap = adap2pinfo(adap, idx)->mps_bg_map; 9405 u32 port_base_addr; 9406 9407 if (is_t4(adap)) 9408 port_base_addr = PORT_BASE(idx); 9409 else 9410 port_base_addr = T5_PORT_BASE(idx); 9411 9412 for (i = A_MPS_PORT_STAT_TX_PORT_BYTES_L; 9413 i <= A_MPS_PORT_STAT_TX_PORT_PPP7_H; i += 8) 9414 t4_write_reg(adap, port_base_addr + i, 0); 9415 for (i = A_MPS_PORT_STAT_RX_PORT_BYTES_L; 9416 i <= A_MPS_PORT_STAT_RX_PORT_LESS_64B_H; i += 8) 9417 t4_write_reg(adap, port_base_addr + i, 0); 9418 for (i = 0; i < 4; i++) 9419 if (bgmap & (1 << i)) { 9420 t4_write_reg(adap, 9421 A_MPS_STAT_RX_BG_0_MAC_DROP_FRAME_L + i * 8, 0); 9422 t4_write_reg(adap, 9423 A_MPS_STAT_RX_BG_0_MAC_TRUNC_FRAME_L + i * 8, 0); 9424 } 9425 } 9426 9427 /** 9428 * t4_i2c_rd - read I2C data from adapter 9429 * @adap: the adapter 9430 * @port: Port number if per-port device; <0 if not 9431 * @devid: per-port device ID or absolute device ID 9432 * @offset: byte offset into device I2C space 9433 * @len: byte length of I2C space data 9434 * @buf: buffer in which to return I2C data 9435 * 9436 * Reads the I2C data from the indicated device and location. 9437 */ 9438 int t4_i2c_rd(struct adapter *adap, unsigned int mbox, 9439 int port, unsigned int devid, 9440 unsigned int offset, unsigned int len, 9441 u8 *buf) 9442 { 9443 u32 ldst_addrspace; 9444 struct fw_ldst_cmd ldst; 9445 int ret; 9446 9447 if (port >= 4 || 9448 devid >= 256 || 9449 offset >= 256 || 9450 len > sizeof ldst.u.i2c.data) 9451 return -EINVAL; 9452 9453 memset(&ldst, 0, sizeof ldst); 9454 ldst_addrspace = V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_I2C); 9455 ldst.op_to_addrspace = 9456 cpu_to_be32(V_FW_CMD_OP(FW_LDST_CMD) | 9457 F_FW_CMD_REQUEST | 9458 F_FW_CMD_READ | 9459 ldst_addrspace); 9460 ldst.cycles_to_len16 = cpu_to_be32(FW_LEN16(ldst)); 9461 ldst.u.i2c.pid = (port < 0 ? 0xff : port); 9462 ldst.u.i2c.did = devid; 9463 ldst.u.i2c.boffset = offset; 9464 ldst.u.i2c.blen = len; 9465 ret = t4_wr_mbox(adap, mbox, &ldst, sizeof ldst, &ldst); 9466 if (!ret) 9467 memcpy(buf, ldst.u.i2c.data, len); 9468 return ret; 9469 } 9470 9471 /** 9472 * t4_i2c_wr - write I2C data to adapter 9473 * @adap: the adapter 9474 * @port: Port number if per-port device; <0 if not 9475 * @devid: per-port device ID or absolute device ID 9476 * @offset: byte offset into device I2C space 9477 * @len: byte length of I2C space data 9478 * @buf: buffer containing new I2C data 9479 * 9480 * Write the I2C data to the indicated device and location. 9481 */ 9482 int t4_i2c_wr(struct adapter *adap, unsigned int mbox, 9483 int port, unsigned int devid, 9484 unsigned int offset, unsigned int len, 9485 u8 *buf) 9486 { 9487 u32 ldst_addrspace; 9488 struct fw_ldst_cmd ldst; 9489 9490 if (port >= 4 || 9491 devid >= 256 || 9492 offset >= 256 || 9493 len > sizeof ldst.u.i2c.data) 9494 return -EINVAL; 9495 9496 memset(&ldst, 0, sizeof ldst); 9497 ldst_addrspace = V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_I2C); 9498 ldst.op_to_addrspace = 9499 cpu_to_be32(V_FW_CMD_OP(FW_LDST_CMD) | 9500 F_FW_CMD_REQUEST | 9501 F_FW_CMD_WRITE | 9502 ldst_addrspace); 9503 ldst.cycles_to_len16 = cpu_to_be32(FW_LEN16(ldst)); 9504 ldst.u.i2c.pid = (port < 0 ? 0xff : port); 9505 ldst.u.i2c.did = devid; 9506 ldst.u.i2c.boffset = offset; 9507 ldst.u.i2c.blen = len; 9508 memcpy(ldst.u.i2c.data, buf, len); 9509 return t4_wr_mbox(adap, mbox, &ldst, sizeof ldst, &ldst); 9510 } 9511 9512 /** 9513 * t4_sge_ctxt_rd - read an SGE context through FW 9514 * @adap: the adapter 9515 * @mbox: mailbox to use for the FW command 9516 * @cid: the context id 9517 * @ctype: the context type 9518 * @data: where to store the context data 9519 * 9520 * Issues a FW command through the given mailbox to read an SGE context. 9521 */ 9522 int t4_sge_ctxt_rd(struct adapter *adap, unsigned int mbox, unsigned int cid, 9523 enum ctxt_type ctype, u32 *data) 9524 { 9525 int ret; 9526 struct fw_ldst_cmd c; 9527 9528 if (ctype == CTXT_EGRESS) 9529 ret = FW_LDST_ADDRSPC_SGE_EGRC; 9530 else if (ctype == CTXT_INGRESS) 9531 ret = FW_LDST_ADDRSPC_SGE_INGC; 9532 else if (ctype == CTXT_FLM) 9533 ret = FW_LDST_ADDRSPC_SGE_FLMC; 9534 else 9535 ret = FW_LDST_ADDRSPC_SGE_CONMC; 9536 9537 memset(&c, 0, sizeof(c)); 9538 c.op_to_addrspace = cpu_to_be32(V_FW_CMD_OP(FW_LDST_CMD) | 9539 F_FW_CMD_REQUEST | F_FW_CMD_READ | 9540 V_FW_LDST_CMD_ADDRSPACE(ret)); 9541 c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c)); 9542 c.u.idctxt.physid = cpu_to_be32(cid); 9543 9544 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c); 9545 if (ret == 0) { 9546 data[0] = be32_to_cpu(c.u.idctxt.ctxt_data0); 9547 data[1] = be32_to_cpu(c.u.idctxt.ctxt_data1); 9548 data[2] = be32_to_cpu(c.u.idctxt.ctxt_data2); 9549 data[3] = be32_to_cpu(c.u.idctxt.ctxt_data3); 9550 data[4] = be32_to_cpu(c.u.idctxt.ctxt_data4); 9551 data[5] = be32_to_cpu(c.u.idctxt.ctxt_data5); 9552 } 9553 return ret; 9554 } 9555 9556 /** 9557 * t4_sge_ctxt_rd_bd - read an SGE context bypassing FW 9558 * @adap: the adapter 9559 * @cid: the context id 9560 * @ctype: the context type 9561 * @data: where to store the context data 9562 * 9563 * Reads an SGE context directly, bypassing FW. This is only for 9564 * debugging when FW is unavailable. 9565 */ 9566 int t4_sge_ctxt_rd_bd(struct adapter *adap, unsigned int cid, enum ctxt_type ctype, 9567 u32 *data) 9568 { 9569 int i, ret; 9570 9571 t4_write_reg(adap, A_SGE_CTXT_CMD, V_CTXTQID(cid) | V_CTXTTYPE(ctype)); 9572 ret = t4_wait_op_done(adap, A_SGE_CTXT_CMD, F_BUSY, 0, 3, 1); 9573 if (!ret) 9574 for (i = A_SGE_CTXT_DATA0; i <= A_SGE_CTXT_DATA5; i += 4) 9575 *data++ = t4_read_reg(adap, i); 9576 return ret; 9577 } 9578 9579 int t4_sched_config(struct adapter *adapter, int type, int minmaxen, 9580 int sleep_ok) 9581 { 9582 struct fw_sched_cmd cmd; 9583 9584 memset(&cmd, 0, sizeof(cmd)); 9585 cmd.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_SCHED_CMD) | 9586 F_FW_CMD_REQUEST | 9587 F_FW_CMD_WRITE); 9588 cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd)); 9589 9590 cmd.u.config.sc = FW_SCHED_SC_CONFIG; 9591 cmd.u.config.type = type; 9592 cmd.u.config.minmaxen = minmaxen; 9593 9594 return t4_wr_mbox_meat(adapter,adapter->mbox, &cmd, sizeof(cmd), 9595 NULL, sleep_ok); 9596 } 9597 9598 int t4_sched_params(struct adapter *adapter, int type, int level, int mode, 9599 int rateunit, int ratemode, int channel, int cl, 9600 int minrate, int maxrate, int weight, int pktsize, 9601 int sleep_ok) 9602 { 9603 struct fw_sched_cmd cmd; 9604 9605 memset(&cmd, 0, sizeof(cmd)); 9606 cmd.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_SCHED_CMD) | 9607 F_FW_CMD_REQUEST | 9608 F_FW_CMD_WRITE); 9609 cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd)); 9610 9611 cmd.u.params.sc = FW_SCHED_SC_PARAMS; 9612 cmd.u.params.type = type; 9613 cmd.u.params.level = level; 9614 cmd.u.params.mode = mode; 9615 cmd.u.params.ch = channel; 9616 cmd.u.params.cl = cl; 9617 cmd.u.params.unit = rateunit; 9618 cmd.u.params.rate = ratemode; 9619 cmd.u.params.min = cpu_to_be32(minrate); 9620 cmd.u.params.max = cpu_to_be32(maxrate); 9621 cmd.u.params.weight = cpu_to_be16(weight); 9622 cmd.u.params.pktsize = cpu_to_be16(pktsize); 9623 9624 return t4_wr_mbox_meat(adapter,adapter->mbox, &cmd, sizeof(cmd), 9625 NULL, sleep_ok); 9626 } 9627 9628 int t4_sched_params_ch_rl(struct adapter *adapter, int channel, int ratemode, 9629 unsigned int maxrate, int sleep_ok) 9630 { 9631 struct fw_sched_cmd cmd; 9632 9633 memset(&cmd, 0, sizeof(cmd)); 9634 cmd.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_SCHED_CMD) | 9635 F_FW_CMD_REQUEST | 9636 F_FW_CMD_WRITE); 9637 cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd)); 9638 9639 cmd.u.params.sc = FW_SCHED_SC_PARAMS; 9640 cmd.u.params.type = FW_SCHED_TYPE_PKTSCHED; 9641 cmd.u.params.level = FW_SCHED_PARAMS_LEVEL_CH_RL; 9642 cmd.u.params.ch = channel; 9643 cmd.u.params.rate = ratemode; /* REL or ABS */ 9644 cmd.u.params.max = cpu_to_be32(maxrate);/* % or kbps */ 9645 9646 return t4_wr_mbox_meat(adapter,adapter->mbox, &cmd, sizeof(cmd), 9647 NULL, sleep_ok); 9648 } 9649 9650 int t4_sched_params_cl_wrr(struct adapter *adapter, int channel, int cl, 9651 int weight, int sleep_ok) 9652 { 9653 struct fw_sched_cmd cmd; 9654 9655 if (weight < 0 || weight > 100) 9656 return -EINVAL; 9657 9658 memset(&cmd, 0, sizeof(cmd)); 9659 cmd.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_SCHED_CMD) | 9660 F_FW_CMD_REQUEST | 9661 F_FW_CMD_WRITE); 9662 cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd)); 9663 9664 cmd.u.params.sc = FW_SCHED_SC_PARAMS; 9665 cmd.u.params.type = FW_SCHED_TYPE_PKTSCHED; 9666 cmd.u.params.level = FW_SCHED_PARAMS_LEVEL_CL_WRR; 9667 cmd.u.params.ch = channel; 9668 cmd.u.params.cl = cl; 9669 cmd.u.params.weight = cpu_to_be16(weight); 9670 9671 return t4_wr_mbox_meat(adapter,adapter->mbox, &cmd, sizeof(cmd), 9672 NULL, sleep_ok); 9673 } 9674 9675 int t4_sched_params_cl_rl_kbps(struct adapter *adapter, int channel, int cl, 9676 int mode, unsigned int maxrate, int pktsize, int sleep_ok) 9677 { 9678 struct fw_sched_cmd cmd; 9679 9680 memset(&cmd, 0, sizeof(cmd)); 9681 cmd.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_SCHED_CMD) | 9682 F_FW_CMD_REQUEST | 9683 F_FW_CMD_WRITE); 9684 cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd)); 9685 9686 cmd.u.params.sc = FW_SCHED_SC_PARAMS; 9687 cmd.u.params.type = FW_SCHED_TYPE_PKTSCHED; 9688 cmd.u.params.level = FW_SCHED_PARAMS_LEVEL_CL_RL; 9689 cmd.u.params.mode = mode; 9690 cmd.u.params.ch = channel; 9691 cmd.u.params.cl = cl; 9692 cmd.u.params.unit = FW_SCHED_PARAMS_UNIT_BITRATE; 9693 cmd.u.params.rate = FW_SCHED_PARAMS_RATE_ABS; 9694 cmd.u.params.max = cpu_to_be32(maxrate); 9695 cmd.u.params.pktsize = cpu_to_be16(pktsize); 9696 9697 return t4_wr_mbox_meat(adapter,adapter->mbox, &cmd, sizeof(cmd), 9698 NULL, sleep_ok); 9699 } 9700 9701 /* 9702 * t4_config_watchdog - configure (enable/disable) a watchdog timer 9703 * @adapter: the adapter 9704 * @mbox: mailbox to use for the FW command 9705 * @pf: the PF owning the queue 9706 * @vf: the VF owning the queue 9707 * @timeout: watchdog timeout in ms 9708 * @action: watchdog timer / action 9709 * 9710 * There are separate watchdog timers for each possible watchdog 9711 * action. Configure one of the watchdog timers by setting a non-zero 9712 * timeout. Disable a watchdog timer by using a timeout of zero. 9713 */ 9714 int t4_config_watchdog(struct adapter *adapter, unsigned int mbox, 9715 unsigned int pf, unsigned int vf, 9716 unsigned int timeout, unsigned int action) 9717 { 9718 struct fw_watchdog_cmd wdog; 9719 unsigned int ticks; 9720 9721 /* 9722 * The watchdog command expects a timeout in units of 10ms so we need 9723 * to convert it here (via rounding) and force a minimum of one 10ms 9724 * "tick" if the timeout is non-zero but the conversion results in 0 9725 * ticks. 9726 */ 9727 ticks = (timeout + 5)/10; 9728 if (timeout && !ticks) 9729 ticks = 1; 9730 9731 memset(&wdog, 0, sizeof wdog); 9732 wdog.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_WATCHDOG_CMD) | 9733 F_FW_CMD_REQUEST | 9734 F_FW_CMD_WRITE | 9735 V_FW_PARAMS_CMD_PFN(pf) | 9736 V_FW_PARAMS_CMD_VFN(vf)); 9737 wdog.retval_len16 = cpu_to_be32(FW_LEN16(wdog)); 9738 wdog.timeout = cpu_to_be32(ticks); 9739 wdog.action = cpu_to_be32(action); 9740 9741 return t4_wr_mbox(adapter, mbox, &wdog, sizeof wdog, NULL); 9742 } 9743 9744 int t4_get_devlog_level(struct adapter *adapter, unsigned int *level) 9745 { 9746 struct fw_devlog_cmd devlog_cmd; 9747 int ret; 9748 9749 memset(&devlog_cmd, 0, sizeof(devlog_cmd)); 9750 devlog_cmd.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_DEVLOG_CMD) | 9751 F_FW_CMD_REQUEST | F_FW_CMD_READ); 9752 devlog_cmd.retval_len16 = cpu_to_be32(FW_LEN16(devlog_cmd)); 9753 ret = t4_wr_mbox(adapter, adapter->mbox, &devlog_cmd, 9754 sizeof(devlog_cmd), &devlog_cmd); 9755 if (ret) 9756 return ret; 9757 9758 *level = devlog_cmd.level; 9759 return 0; 9760 } 9761 9762 int t4_set_devlog_level(struct adapter *adapter, unsigned int level) 9763 { 9764 struct fw_devlog_cmd devlog_cmd; 9765 9766 memset(&devlog_cmd, 0, sizeof(devlog_cmd)); 9767 devlog_cmd.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_DEVLOG_CMD) | 9768 F_FW_CMD_REQUEST | 9769 F_FW_CMD_WRITE); 9770 devlog_cmd.level = level; 9771 devlog_cmd.retval_len16 = cpu_to_be32(FW_LEN16(devlog_cmd)); 9772 return t4_wr_mbox(adapter, adapter->mbox, &devlog_cmd, 9773 sizeof(devlog_cmd), &devlog_cmd); 9774 } 9775