1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause 3 * 4 * Copyright (c) 2012, 2016, 2025 Chelsio Communications. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 25 * SUCH DAMAGE. 26 */ 27 28 #include <sys/cdefs.h> 29 #include "opt_inet.h" 30 31 #include <sys/param.h> 32 #include <sys/eventhandler.h> 33 34 #include "common.h" 35 #include "t4_regs.h" 36 #include "t4_regs_values.h" 37 #include "firmware/t4fw_interface.h" 38 39 #undef msleep 40 #define msleep(x) do { \ 41 if (cold) \ 42 DELAY((x) * 1000); \ 43 else \ 44 pause("t4hw", (x) * hz / 1000); \ 45 } while (0) 46 47 /** 48 * t4_wait_op_done_val - wait until an operation is completed 49 * @adapter: the adapter performing the operation 50 * @reg: the register to check for completion 51 * @mask: a single-bit field within @reg that indicates completion 52 * @polarity: the value of the field when the operation is completed 53 * @attempts: number of check iterations 54 * @delay: delay in usecs between iterations 55 * @valp: where to store the value of the register at completion time 56 * 57 * Wait until an operation is completed by checking a bit in a register 58 * up to @attempts times. If @valp is not NULL the value of the register 59 * at the time it indicated completion is stored there. Returns 0 if the 60 * operation completes and -EAGAIN otherwise. 61 */ 62 static int t4_wait_op_done_val(struct adapter *adapter, int reg, u32 mask, 63 int polarity, int attempts, int delay, u32 *valp) 64 { 65 while (1) { 66 u32 val = t4_read_reg(adapter, reg); 67 68 if (!!(val & mask) == polarity) { 69 if (valp) 70 *valp = val; 71 return 0; 72 } 73 if (--attempts == 0) 74 return -EAGAIN; 75 if (delay) 76 udelay(delay); 77 } 78 } 79 80 static inline int t4_wait_op_done(struct adapter *adapter, int reg, u32 mask, 81 int polarity, int attempts, int delay) 82 { 83 return t4_wait_op_done_val(adapter, reg, mask, polarity, attempts, 84 delay, NULL); 85 } 86 87 /** 88 * t4_set_reg_field - set a register field to a value 89 * @adapter: the adapter to program 90 * @addr: the register address 91 * @mask: specifies the portion of the register to modify 92 * @val: the new value for the register field 93 * 94 * Sets a register field specified by the supplied mask to the 95 * given value. 96 */ 97 void t4_set_reg_field(struct adapter *adapter, unsigned int addr, u32 mask, 98 u32 val) 99 { 100 u32 v = t4_read_reg(adapter, addr) & ~mask; 101 102 t4_write_reg(adapter, addr, v | val); 103 (void) t4_read_reg(adapter, addr); /* flush */ 104 } 105 106 /** 107 * t4_read_indirect - read indirectly addressed registers 108 * @adap: the adapter 109 * @addr_reg: register holding the indirect address 110 * @data_reg: register holding the value of the indirect register 111 * @vals: where the read register values are stored 112 * @nregs: how many indirect registers to read 113 * @start_idx: index of first indirect register to read 114 * 115 * Reads registers that are accessed indirectly through an address/data 116 * register pair. 117 */ 118 void t4_read_indirect(struct adapter *adap, unsigned int addr_reg, 119 unsigned int data_reg, u32 *vals, 120 unsigned int nregs, unsigned int start_idx) 121 { 122 while (nregs--) { 123 t4_write_reg(adap, addr_reg, start_idx); 124 *vals++ = t4_read_reg(adap, data_reg); 125 start_idx++; 126 } 127 } 128 129 /** 130 * t4_write_indirect - write indirectly addressed registers 131 * @adap: the adapter 132 * @addr_reg: register holding the indirect addresses 133 * @data_reg: register holding the value for the indirect registers 134 * @vals: values to write 135 * @nregs: how many indirect registers to write 136 * @start_idx: address of first indirect register to write 137 * 138 * Writes a sequential block of registers that are accessed indirectly 139 * through an address/data register pair. 140 */ 141 void t4_write_indirect(struct adapter *adap, unsigned int addr_reg, 142 unsigned int data_reg, const u32 *vals, 143 unsigned int nregs, unsigned int start_idx) 144 { 145 while (nregs--) { 146 t4_write_reg(adap, addr_reg, start_idx++); 147 t4_write_reg(adap, data_reg, *vals++); 148 } 149 } 150 151 /* 152 * Read a 32-bit PCI Configuration Space register via the PCI-E backdoor 153 * mechanism. This guarantees that we get the real value even if we're 154 * operating within a Virtual Machine and the Hypervisor is trapping our 155 * Configuration Space accesses. 156 * 157 * N.B. This routine should only be used as a last resort: the firmware uses 158 * the backdoor registers on a regular basis and we can end up 159 * conflicting with it's uses! 160 */ 161 u32 t4_hw_pci_read_cfg4(adapter_t *adap, int reg) 162 { 163 u32 req = V_FUNCTION(adap->pf) | V_REGISTER(reg); 164 u32 val; 165 166 if (chip_id(adap) <= CHELSIO_T5) 167 req |= F_ENABLE; 168 else 169 req |= F_T6_ENABLE; 170 171 if (is_t4(adap)) 172 req |= F_LOCALCFG; 173 174 t4_write_reg(adap, A_PCIE_CFG_SPACE_REQ, req); 175 val = t4_read_reg(adap, A_PCIE_CFG_SPACE_DATA); 176 177 /* 178 * Reset F_ENABLE to 0 so reads of PCIE_CFG_SPACE_DATA won't cause a 179 * Configuration Space read. (None of the other fields matter when 180 * F_ENABLE is 0 so a simple register write is easier than a 181 * read-modify-write via t4_set_reg_field().) 182 */ 183 t4_write_reg(adap, A_PCIE_CFG_SPACE_REQ, 0); 184 185 return val; 186 } 187 188 /* 189 * t4_report_fw_error - report firmware error 190 * @adap: the adapter 191 * 192 * The adapter firmware can indicate error conditions to the host. 193 * If the firmware has indicated an error, print out the reason for 194 * the firmware error. 195 */ 196 void t4_report_fw_error(struct adapter *adap) 197 { 198 static const char *const reason[] = { 199 "Crash", /* PCIE_FW_EVAL_CRASH */ 200 "During Device Preparation", /* PCIE_FW_EVAL_PREP */ 201 "During Device Configuration", /* PCIE_FW_EVAL_CONF */ 202 "During Device Initialization", /* PCIE_FW_EVAL_INIT */ 203 "Unexpected Event", /* PCIE_FW_EVAL_UNEXPECTEDEVENT */ 204 "Insufficient Airflow", /* PCIE_FW_EVAL_OVERHEAT */ 205 "Device Shutdown", /* PCIE_FW_EVAL_DEVICESHUTDOWN */ 206 "Reserved", /* reserved */ 207 }; 208 u32 pcie_fw; 209 210 pcie_fw = t4_read_reg(adap, A_PCIE_FW); 211 if (pcie_fw & F_PCIE_FW_ERR) { 212 CH_ERR(adap, "firmware reports adapter error: %s (0x%08x)\n", 213 reason[G_PCIE_FW_EVAL(pcie_fw)], pcie_fw); 214 } 215 } 216 217 /* 218 * Get the reply to a mailbox command and store it in @rpl in big-endian order. 219 */ 220 static void get_mbox_rpl(struct adapter *adap, __be64 *rpl, int nflit, 221 u32 mbox_addr) 222 { 223 for ( ; nflit; nflit--, mbox_addr += 8) 224 *rpl++ = cpu_to_be64(t4_read_reg64(adap, mbox_addr)); 225 } 226 227 /* 228 * Handle a FW assertion reported in a mailbox. 229 */ 230 static void fw_asrt(struct adapter *adap, struct fw_debug_cmd *asrt) 231 { 232 CH_ALERT(adap, 233 "FW assertion at %.16s:%u, val0 %#x, val1 %#x\n", 234 asrt->u.assert.filename_0_7, 235 be32_to_cpu(asrt->u.assert.line), 236 be32_to_cpu(asrt->u.assert.x), 237 be32_to_cpu(asrt->u.assert.y)); 238 } 239 240 struct port_tx_state { 241 uint64_t rx_pause; 242 uint64_t tx_frames; 243 }; 244 245 u32 246 t4_port_reg(struct adapter *adap, u8 port, u32 reg) 247 { 248 if (chip_id(adap) > CHELSIO_T6) 249 return T7_PORT_REG(port, reg); 250 if (chip_id(adap) > CHELSIO_T4) 251 return T5_PORT_REG(port, reg); 252 return PORT_REG(port, reg); 253 } 254 255 static void 256 read_tx_state_one(struct adapter *sc, int i, struct port_tx_state *tx_state) 257 { 258 uint32_t rx_pause_reg, tx_frames_reg; 259 260 rx_pause_reg = t4_port_reg(sc, i, A_MPS_PORT_STAT_RX_PORT_PAUSE_L); 261 tx_frames_reg = t4_port_reg(sc, i, A_MPS_PORT_STAT_TX_PORT_FRAMES_L); 262 263 tx_state->rx_pause = t4_read_reg64(sc, rx_pause_reg); 264 tx_state->tx_frames = t4_read_reg64(sc, tx_frames_reg); 265 } 266 267 static void 268 read_tx_state(struct adapter *sc, struct port_tx_state *tx_state) 269 { 270 int i; 271 272 for (i = 0; i < MAX_NCHAN; i++) { 273 if (sc->chan_map[i] != 0xff) 274 read_tx_state_one(sc, i, &tx_state[i]); 275 } 276 } 277 278 static void 279 check_tx_state(struct adapter *sc, struct port_tx_state *tx_state) 280 { 281 uint32_t port_ctl_reg; 282 uint64_t tx_frames, rx_pause; 283 int i; 284 285 for (i = 0; i < MAX_NCHAN; i++) { 286 if (sc->chan_map[i] == 0xff) 287 continue; 288 rx_pause = tx_state[i].rx_pause; 289 tx_frames = tx_state[i].tx_frames; 290 read_tx_state_one(sc, i, &tx_state[i]); /* update */ 291 292 port_ctl_reg = t4_port_reg(sc, i, A_MPS_PORT_CTL); 293 if (t4_read_reg(sc, port_ctl_reg) & F_PORTTXEN && 294 rx_pause != tx_state[i].rx_pause && 295 tx_frames == tx_state[i].tx_frames) { 296 t4_set_reg_field(sc, port_ctl_reg, F_PORTTXEN, 0); 297 mdelay(1); 298 t4_set_reg_field(sc, port_ctl_reg, F_PORTTXEN, F_PORTTXEN); 299 } 300 } 301 } 302 303 #define X_CIM_PF_NOACCESS 0xeeeeeeee 304 /** 305 * t4_wr_mbox_meat_timeout - send a command to FW through the given mailbox 306 * @adap: the adapter 307 * @mbox: index of the mailbox to use 308 * @cmd: the command to write 309 * @size: command length in bytes 310 * @rpl: where to optionally store the reply 311 * @sleep_ok: if true we may sleep while awaiting command completion 312 * @timeout: time to wait for command to finish before timing out 313 * (negative implies @sleep_ok=false) 314 * 315 * Sends the given command to FW through the selected mailbox and waits 316 * for the FW to execute the command. If @rpl is not %NULL it is used to 317 * store the FW's reply to the command. The command and its optional 318 * reply are of the same length. Some FW commands like RESET and 319 * INITIALIZE can take a considerable amount of time to execute. 320 * @sleep_ok determines whether we may sleep while awaiting the response. 321 * If sleeping is allowed we use progressive backoff otherwise we spin. 322 * Note that passing in a negative @timeout is an alternate mechanism 323 * for specifying @sleep_ok=false. This is useful when a higher level 324 * interface allows for specification of @timeout but not @sleep_ok ... 325 * 326 * The return value is 0 on success or a negative errno on failure. A 327 * failure can happen either because we are not able to execute the 328 * command or FW executes it but signals an error. In the latter case 329 * the return value is the error code indicated by FW (negated). 330 */ 331 int t4_wr_mbox_meat_timeout(struct adapter *adap, int mbox, const void *cmd, 332 int size, void *rpl, bool sleep_ok, int timeout) 333 { 334 /* 335 * We delay in small increments at first in an effort to maintain 336 * responsiveness for simple, fast executing commands but then back 337 * off to larger delays to a maximum retry delay. 338 */ 339 static const int delay[] = { 340 1, 1, 3, 5, 10, 10, 20, 50, 100 341 }; 342 u32 v; 343 u64 res; 344 int i, ms, delay_idx, ret, next_tx_check; 345 u32 data_reg = PF_REG(mbox, A_CIM_PF_MAILBOX_DATA); 346 u32 ctl_reg = PF_REG(mbox, A_CIM_PF_MAILBOX_CTRL); 347 u32 ctl; 348 __be64 cmd_rpl[MBOX_LEN/8]; 349 u32 pcie_fw; 350 struct port_tx_state tx_state[MAX_NPORTS]; 351 352 if (adap->flags & CHK_MBOX_ACCESS) 353 ASSERT_SYNCHRONIZED_OP(adap); 354 355 if (size <= 0 || (size & 15) || size > MBOX_LEN) 356 return -EINVAL; 357 358 if (adap->flags & IS_VF) { 359 if (chip_id(adap) >= CHELSIO_T6) 360 data_reg = FW_T6VF_MBDATA_BASE_ADDR; 361 else 362 data_reg = FW_T4VF_MBDATA_BASE_ADDR; 363 ctl_reg = VF_CIM_REG(A_CIM_VF_EXT_MAILBOX_CTRL); 364 } 365 366 /* 367 * If we have a negative timeout, that implies that we can't sleep. 368 */ 369 if (timeout < 0) { 370 sleep_ok = false; 371 timeout = -timeout; 372 } 373 374 /* 375 * Attempt to gain access to the mailbox. 376 */ 377 pcie_fw = 0; 378 if (!(adap->flags & IS_VF)) { 379 pcie_fw = t4_read_reg(adap, A_PCIE_FW); 380 if (pcie_fw & F_PCIE_FW_ERR) 381 goto failed; 382 } 383 for (i = 0; i < 4; i++) { 384 ctl = t4_read_reg(adap, ctl_reg); 385 v = G_MBOWNER(ctl); 386 if (v != X_MBOWNER_NONE) 387 break; 388 } 389 390 /* 391 * If we were unable to gain access, report the error to our caller. 392 */ 393 if (v != X_MBOWNER_PL) { 394 if (!(adap->flags & IS_VF)) { 395 pcie_fw = t4_read_reg(adap, A_PCIE_FW); 396 if (pcie_fw & F_PCIE_FW_ERR) 397 goto failed; 398 } 399 ret = (v == X_MBOWNER_FW) ? -EBUSY : -ETIMEDOUT; 400 return ret; 401 } 402 403 /* 404 * If we gain ownership of the mailbox and there's a "valid" message 405 * in it, this is likely an asynchronous error message from the 406 * firmware. So we'll report that and then proceed on with attempting 407 * to issue our own command ... which may well fail if the error 408 * presaged the firmware crashing ... 409 */ 410 if (ctl & F_MBMSGVALID) { 411 CH_DUMP_MBOX(adap, mbox, data_reg, "VLD", NULL, true); 412 } 413 414 /* 415 * Copy in the new mailbox command and send it on its way ... 416 */ 417 memset(cmd_rpl, 0, sizeof(cmd_rpl)); 418 memcpy(cmd_rpl, cmd, size); 419 CH_DUMP_MBOX(adap, mbox, 0, "cmd", cmd_rpl, false); 420 for (i = 0; i < ARRAY_SIZE(cmd_rpl); i++) 421 t4_write_reg64(adap, data_reg + i * 8, be64_to_cpu(cmd_rpl[i])); 422 423 if (adap->flags & IS_VF) { 424 /* 425 * For the VFs, the Mailbox Data "registers" are 426 * actually backed by T4's "MA" interface rather than 427 * PL Registers (as is the case for the PFs). Because 428 * these are in different coherency domains, the write 429 * to the VF's PL-register-backed Mailbox Control can 430 * race in front of the writes to the MA-backed VF 431 * Mailbox Data "registers". So we need to do a 432 * read-back on at least one byte of the VF Mailbox 433 * Data registers before doing the write to the VF 434 * Mailbox Control register. 435 */ 436 t4_read_reg(adap, data_reg); 437 } 438 439 t4_write_reg(adap, ctl_reg, F_MBMSGVALID | V_MBOWNER(X_MBOWNER_FW)); 440 read_tx_state(adap, &tx_state[0]); /* also flushes the write_reg */ 441 next_tx_check = 1000; 442 delay_idx = 0; 443 ms = delay[0]; 444 445 /* 446 * Loop waiting for the reply; bail out if we time out or the firmware 447 * reports an error. 448 */ 449 for (i = 0; i < timeout; i += ms) { 450 if (!(adap->flags & IS_VF)) { 451 pcie_fw = t4_read_reg(adap, A_PCIE_FW); 452 if (pcie_fw & F_PCIE_FW_ERR) 453 break; 454 } 455 456 if (i >= next_tx_check) { 457 check_tx_state(adap, &tx_state[0]); 458 next_tx_check = i + 1000; 459 } 460 461 if (sleep_ok) { 462 ms = delay[delay_idx]; /* last element may repeat */ 463 if (delay_idx < ARRAY_SIZE(delay) - 1) 464 delay_idx++; 465 msleep(ms); 466 } else { 467 mdelay(ms); 468 } 469 470 v = t4_read_reg(adap, ctl_reg); 471 if (v == X_CIM_PF_NOACCESS) 472 continue; 473 if (G_MBOWNER(v) == X_MBOWNER_PL) { 474 if (!(v & F_MBMSGVALID)) { 475 t4_write_reg(adap, ctl_reg, 476 V_MBOWNER(X_MBOWNER_NONE)); 477 continue; 478 } 479 480 /* 481 * Retrieve the command reply and release the mailbox. 482 */ 483 get_mbox_rpl(adap, cmd_rpl, MBOX_LEN/8, data_reg); 484 CH_DUMP_MBOX(adap, mbox, 0, "rpl", cmd_rpl, false); 485 t4_write_reg(adap, ctl_reg, V_MBOWNER(X_MBOWNER_NONE)); 486 487 res = be64_to_cpu(cmd_rpl[0]); 488 if (G_FW_CMD_OP(res >> 32) == FW_DEBUG_CMD) { 489 fw_asrt(adap, (struct fw_debug_cmd *)cmd_rpl); 490 res = V_FW_CMD_RETVAL(EIO); 491 } else if (rpl) 492 memcpy(rpl, cmd_rpl, size); 493 return -G_FW_CMD_RETVAL((int)res); 494 } 495 } 496 497 /* 498 * We timed out waiting for a reply to our mailbox command. Report 499 * the error and also check to see if the firmware reported any 500 * errors ... 501 */ 502 CH_ERR(adap, "command %#x in mbox %d timed out (0x%08x).\n", 503 *(const u8 *)cmd, mbox, pcie_fw); 504 CH_DUMP_MBOX(adap, mbox, 0, "cmdsent", cmd_rpl, true); 505 CH_DUMP_MBOX(adap, mbox, data_reg, "current", NULL, true); 506 failed: 507 adap->flags &= ~FW_OK; 508 ret = pcie_fw & F_PCIE_FW_ERR ? -ENXIO : -ETIMEDOUT; 509 t4_fatal_err(adap, true); 510 return ret; 511 } 512 513 int t4_wr_mbox_meat(struct adapter *adap, int mbox, const void *cmd, int size, 514 void *rpl, bool sleep_ok) 515 { 516 return t4_wr_mbox_meat_timeout(adap, mbox, cmd, size, rpl, 517 sleep_ok, FW_CMD_MAX_TIMEOUT); 518 } 519 520 static int t4_edc_err_read(struct adapter *adap, int idx) 521 { 522 u32 edc_ecc_err_addr_reg; 523 u32 edc_bist_status_rdata_reg; 524 525 if (is_t4(adap)) { 526 CH_WARN(adap, "%s: T4 NOT supported.\n", __func__); 527 return 0; 528 } 529 if (idx != MEM_EDC0 && idx != MEM_EDC1) { 530 CH_WARN(adap, "%s: idx %d NOT supported.\n", __func__, idx); 531 return 0; 532 } 533 534 edc_ecc_err_addr_reg = EDC_T5_REG(A_EDC_H_ECC_ERR_ADDR, idx); 535 edc_bist_status_rdata_reg = EDC_T5_REG(A_EDC_H_BIST_STATUS_RDATA, idx); 536 537 CH_WARN(adap, 538 "edc%d err addr 0x%x: 0x%x.\n", 539 idx, edc_ecc_err_addr_reg, 540 t4_read_reg(adap, edc_ecc_err_addr_reg)); 541 CH_WARN(adap, 542 "bist: 0x%x, status %llx %llx %llx %llx %llx %llx %llx %llx %llx.\n", 543 edc_bist_status_rdata_reg, 544 (unsigned long long)t4_read_reg64(adap, edc_bist_status_rdata_reg), 545 (unsigned long long)t4_read_reg64(adap, edc_bist_status_rdata_reg + 8), 546 (unsigned long long)t4_read_reg64(adap, edc_bist_status_rdata_reg + 16), 547 (unsigned long long)t4_read_reg64(adap, edc_bist_status_rdata_reg + 24), 548 (unsigned long long)t4_read_reg64(adap, edc_bist_status_rdata_reg + 32), 549 (unsigned long long)t4_read_reg64(adap, edc_bist_status_rdata_reg + 40), 550 (unsigned long long)t4_read_reg64(adap, edc_bist_status_rdata_reg + 48), 551 (unsigned long long)t4_read_reg64(adap, edc_bist_status_rdata_reg + 56), 552 (unsigned long long)t4_read_reg64(adap, edc_bist_status_rdata_reg + 64)); 553 554 return 0; 555 } 556 557 /** 558 * t4_mc_read - read from MC through backdoor accesses 559 * @adap: the adapter 560 * @idx: which MC to access 561 * @addr: address of first byte requested 562 * @data: 64 bytes of data containing the requested address 563 * @ecc: where to store the corresponding 64-bit ECC word 564 * 565 * Read 64 bytes of data from MC starting at a 64-byte-aligned address 566 * that covers the requested address @addr. If @parity is not %NULL it 567 * is assigned the 64-bit ECC word for the read data. 568 */ 569 int t4_mc_read(struct adapter *adap, int idx, u32 addr, __be32 *data, u64 *ecc) 570 { 571 int i; 572 u32 mc_bist_cmd_reg, mc_bist_cmd_addr_reg, mc_bist_cmd_len_reg; 573 u32 mc_bist_status_rdata_reg, mc_bist_data_pattern_reg; 574 575 if (is_t4(adap)) { 576 mc_bist_cmd_reg = A_MC_BIST_CMD; 577 mc_bist_cmd_addr_reg = A_MC_BIST_CMD_ADDR; 578 mc_bist_cmd_len_reg = A_MC_BIST_CMD_LEN; 579 mc_bist_status_rdata_reg = A_MC_BIST_STATUS_RDATA; 580 mc_bist_data_pattern_reg = A_MC_BIST_DATA_PATTERN; 581 } else { 582 mc_bist_cmd_reg = MC_REG(A_MC_P_BIST_CMD, idx); 583 mc_bist_cmd_addr_reg = MC_REG(A_MC_P_BIST_CMD_ADDR, idx); 584 mc_bist_cmd_len_reg = MC_REG(A_MC_P_BIST_CMD_LEN, idx); 585 mc_bist_status_rdata_reg = MC_REG(A_MC_P_BIST_STATUS_RDATA, 586 idx); 587 mc_bist_data_pattern_reg = MC_REG(A_MC_P_BIST_DATA_PATTERN, 588 idx); 589 } 590 591 if (t4_read_reg(adap, mc_bist_cmd_reg) & F_START_BIST) 592 return -EBUSY; 593 t4_write_reg(adap, mc_bist_cmd_addr_reg, addr & ~0x3fU); 594 t4_write_reg(adap, mc_bist_cmd_len_reg, 64); 595 t4_write_reg(adap, mc_bist_data_pattern_reg, 0xc); 596 t4_write_reg(adap, mc_bist_cmd_reg, V_BIST_OPCODE(1) | 597 F_START_BIST | V_BIST_CMD_GAP(1)); 598 i = t4_wait_op_done(adap, mc_bist_cmd_reg, F_START_BIST, 0, 10, 1); 599 if (i) 600 return i; 601 602 #define MC_DATA(i) MC_BIST_STATUS_REG(mc_bist_status_rdata_reg, i) 603 604 for (i = 15; i >= 0; i--) 605 *data++ = ntohl(t4_read_reg(adap, MC_DATA(i))); 606 if (ecc) 607 *ecc = t4_read_reg64(adap, MC_DATA(16)); 608 #undef MC_DATA 609 return 0; 610 } 611 612 /** 613 * t4_edc_read - read from EDC through backdoor accesses 614 * @adap: the adapter 615 * @idx: which EDC to access 616 * @addr: address of first byte requested 617 * @data: 64 bytes of data containing the requested address 618 * @ecc: where to store the corresponding 64-bit ECC word 619 * 620 * Read 64 bytes of data from EDC starting at a 64-byte-aligned address 621 * that covers the requested address @addr. If @parity is not %NULL it 622 * is assigned the 64-bit ECC word for the read data. 623 */ 624 int t4_edc_read(struct adapter *adap, int idx, u32 addr, __be32 *data, u64 *ecc) 625 { 626 int i; 627 u32 edc_bist_cmd_reg, edc_bist_cmd_addr_reg, edc_bist_cmd_len_reg; 628 u32 edc_bist_cmd_data_pattern, edc_bist_status_rdata_reg; 629 630 if (is_t4(adap)) { 631 edc_bist_cmd_reg = EDC_REG(A_EDC_BIST_CMD, idx); 632 edc_bist_cmd_addr_reg = EDC_REG(A_EDC_BIST_CMD_ADDR, idx); 633 edc_bist_cmd_len_reg = EDC_REG(A_EDC_BIST_CMD_LEN, idx); 634 edc_bist_cmd_data_pattern = EDC_REG(A_EDC_BIST_DATA_PATTERN, 635 idx); 636 edc_bist_status_rdata_reg = EDC_REG(A_EDC_BIST_STATUS_RDATA, 637 idx); 638 } else { 639 /* 640 * These macro are missing in t4_regs.h file. 641 * Added temporarily for testing. 642 */ 643 #define EDC_STRIDE_T5 (EDC_T51_BASE_ADDR - EDC_T50_BASE_ADDR) 644 #define EDC_REG_T5(reg, idx) (reg + EDC_STRIDE_T5 * idx) 645 edc_bist_cmd_reg = EDC_REG_T5(A_EDC_H_BIST_CMD, idx); 646 edc_bist_cmd_addr_reg = EDC_REG_T5(A_EDC_H_BIST_CMD_ADDR, idx); 647 edc_bist_cmd_len_reg = EDC_REG_T5(A_EDC_H_BIST_CMD_LEN, idx); 648 edc_bist_cmd_data_pattern = EDC_REG_T5(A_EDC_H_BIST_DATA_PATTERN, 649 idx); 650 edc_bist_status_rdata_reg = EDC_REG_T5(A_EDC_H_BIST_STATUS_RDATA, 651 idx); 652 #undef EDC_REG_T5 653 #undef EDC_STRIDE_T5 654 } 655 656 if (t4_read_reg(adap, edc_bist_cmd_reg) & F_START_BIST) 657 return -EBUSY; 658 t4_write_reg(adap, edc_bist_cmd_addr_reg, addr & ~0x3fU); 659 t4_write_reg(adap, edc_bist_cmd_len_reg, 64); 660 t4_write_reg(adap, edc_bist_cmd_data_pattern, 0xc); 661 t4_write_reg(adap, edc_bist_cmd_reg, 662 V_BIST_OPCODE(1) | V_BIST_CMD_GAP(1) | F_START_BIST); 663 i = t4_wait_op_done(adap, edc_bist_cmd_reg, F_START_BIST, 0, 10, 1); 664 if (i) 665 return i; 666 667 #define EDC_DATA(i) EDC_BIST_STATUS_REG(edc_bist_status_rdata_reg, i) 668 669 for (i = 15; i >= 0; i--) 670 *data++ = ntohl(t4_read_reg(adap, EDC_DATA(i))); 671 if (ecc) 672 *ecc = t4_read_reg64(adap, EDC_DATA(16)); 673 #undef EDC_DATA 674 return 0; 675 } 676 677 /** 678 * t4_mem_read - read EDC 0, EDC 1 or MC into buffer 679 * @adap: the adapter 680 * @mtype: memory type: MEM_EDC0, MEM_EDC1 or MEM_MC 681 * @addr: address within indicated memory type 682 * @len: amount of memory to read 683 * @buf: host memory buffer 684 * 685 * Reads an [almost] arbitrary memory region in the firmware: the 686 * firmware memory address, length and host buffer must be aligned on 687 * 32-bit boudaries. The memory is returned as a raw byte sequence from 688 * the firmware's memory. If this memory contains data structures which 689 * contain multi-byte integers, it's the callers responsibility to 690 * perform appropriate byte order conversions. 691 */ 692 int t4_mem_read(struct adapter *adap, int mtype, u32 addr, u32 len, 693 __be32 *buf) 694 { 695 u32 pos, start, end, offset; 696 int ret; 697 698 /* 699 * Argument sanity checks ... 700 */ 701 if ((addr & 0x3) || (len & 0x3)) 702 return -EINVAL; 703 704 /* 705 * The underlaying EDC/MC read routines read 64 bytes at a time so we 706 * need to round down the start and round up the end. We'll start 707 * copying out of the first line at (addr - start) a word at a time. 708 */ 709 start = rounddown2(addr, 64); 710 end = roundup2(addr + len, 64); 711 offset = (addr - start)/sizeof(__be32); 712 713 for (pos = start; pos < end; pos += 64, offset = 0) { 714 __be32 data[16]; 715 716 /* 717 * Read the chip's memory block and bail if there's an error. 718 */ 719 if ((mtype == MEM_MC) || (mtype == MEM_MC1)) 720 ret = t4_mc_read(adap, mtype - MEM_MC, pos, data, NULL); 721 else 722 ret = t4_edc_read(adap, mtype, pos, data, NULL); 723 if (ret) 724 return ret; 725 726 /* 727 * Copy the data into the caller's memory buffer. 728 */ 729 while (offset < 16 && len > 0) { 730 *buf++ = data[offset++]; 731 len -= sizeof(__be32); 732 } 733 } 734 735 return 0; 736 } 737 738 /* 739 * Return the specified PCI-E Configuration Space register from our Physical 740 * Function. We try first via a Firmware LDST Command (if fw_attach != 0) 741 * since we prefer to let the firmware own all of these registers, but if that 742 * fails we go for it directly ourselves. 743 */ 744 u32 t4_read_pcie_cfg4(struct adapter *adap, int reg, int drv_fw_attach) 745 { 746 747 /* 748 * If fw_attach != 0, construct and send the Firmware LDST Command to 749 * retrieve the specified PCI-E Configuration Space register. 750 */ 751 if (drv_fw_attach != 0) { 752 struct fw_ldst_cmd ldst_cmd; 753 int ret; 754 755 memset(&ldst_cmd, 0, sizeof(ldst_cmd)); 756 ldst_cmd.op_to_addrspace = 757 cpu_to_be32(V_FW_CMD_OP(FW_LDST_CMD) | 758 F_FW_CMD_REQUEST | 759 F_FW_CMD_READ | 760 V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_FUNC_PCIE)); 761 ldst_cmd.cycles_to_len16 = cpu_to_be32(FW_LEN16(ldst_cmd)); 762 ldst_cmd.u.pcie.select_naccess = V_FW_LDST_CMD_NACCESS(1); 763 ldst_cmd.u.pcie.ctrl_to_fn = 764 (F_FW_LDST_CMD_LC | V_FW_LDST_CMD_FN(adap->pf)); 765 ldst_cmd.u.pcie.r = reg; 766 767 /* 768 * If the LDST Command succeeds, return the result, otherwise 769 * fall through to reading it directly ourselves ... 770 */ 771 ret = t4_wr_mbox(adap, adap->mbox, &ldst_cmd, sizeof(ldst_cmd), 772 &ldst_cmd); 773 if (ret == 0) 774 return be32_to_cpu(ldst_cmd.u.pcie.data[0]); 775 776 CH_WARN(adap, "Firmware failed to return " 777 "Configuration Space register %d, err = %d\n", 778 reg, -ret); 779 } 780 781 /* 782 * Read the desired Configuration Space register via the PCI-E 783 * Backdoor mechanism. 784 */ 785 return t4_hw_pci_read_cfg4(adap, reg); 786 } 787 788 /** 789 * t4_get_regs_len - return the size of the chips register set 790 * @adapter: the adapter 791 * 792 * Returns the size of the chip's BAR0 register space. 793 */ 794 unsigned int t4_get_regs_len(struct adapter *adapter) 795 { 796 unsigned int chip_version = chip_id(adapter); 797 798 switch (chip_version) { 799 case CHELSIO_T4: 800 if (adapter->flags & IS_VF) 801 return FW_T4VF_REGMAP_SIZE; 802 return T4_REGMAP_SIZE; 803 804 case CHELSIO_T5: 805 case CHELSIO_T6: 806 case CHELSIO_T7: 807 if (adapter->flags & IS_VF) 808 return FW_T4VF_REGMAP_SIZE; 809 return T5_REGMAP_SIZE; 810 } 811 812 CH_ERR(adapter, 813 "Unsupported chip version %d\n", chip_version); 814 return 0; 815 } 816 817 /** 818 * t4_get_regs - read chip registers into provided buffer 819 * @adap: the adapter 820 * @buf: register buffer 821 * @buf_size: size (in bytes) of register buffer 822 * 823 * If the provided register buffer isn't large enough for the chip's 824 * full register range, the register dump will be truncated to the 825 * register buffer's size. 826 */ 827 void t4_get_regs(struct adapter *adap, u8 *buf, size_t buf_size) 828 { 829 static const unsigned int t4_reg_ranges[] = { 830 0x1008, 0x1108, 831 0x1180, 0x1184, 832 0x1190, 0x1194, 833 0x11a0, 0x11a4, 834 0x11b0, 0x11b4, 835 0x11fc, 0x123c, 836 0x1300, 0x173c, 837 0x1800, 0x18fc, 838 0x3000, 0x30d8, 839 0x30e0, 0x30e4, 840 0x30ec, 0x5910, 841 0x5920, 0x5924, 842 0x5960, 0x5960, 843 0x5968, 0x5968, 844 0x5970, 0x5970, 845 0x5978, 0x5978, 846 0x5980, 0x5980, 847 0x5988, 0x5988, 848 0x5990, 0x5990, 849 0x5998, 0x5998, 850 0x59a0, 0x59d4, 851 0x5a00, 0x5ae0, 852 0x5ae8, 0x5ae8, 853 0x5af0, 0x5af0, 854 0x5af8, 0x5af8, 855 0x6000, 0x6098, 856 0x6100, 0x6150, 857 0x6200, 0x6208, 858 0x6240, 0x6248, 859 0x6280, 0x62b0, 860 0x62c0, 0x6338, 861 0x6370, 0x638c, 862 0x6400, 0x643c, 863 0x6500, 0x6524, 864 0x6a00, 0x6a04, 865 0x6a14, 0x6a38, 866 0x6a60, 0x6a70, 867 0x6a78, 0x6a78, 868 0x6b00, 0x6b0c, 869 0x6b1c, 0x6b84, 870 0x6bf0, 0x6bf8, 871 0x6c00, 0x6c0c, 872 0x6c1c, 0x6c84, 873 0x6cf0, 0x6cf8, 874 0x6d00, 0x6d0c, 875 0x6d1c, 0x6d84, 876 0x6df0, 0x6df8, 877 0x6e00, 0x6e0c, 878 0x6e1c, 0x6e84, 879 0x6ef0, 0x6ef8, 880 0x6f00, 0x6f0c, 881 0x6f1c, 0x6f84, 882 0x6ff0, 0x6ff8, 883 0x7000, 0x700c, 884 0x701c, 0x7084, 885 0x70f0, 0x70f8, 886 0x7100, 0x710c, 887 0x711c, 0x7184, 888 0x71f0, 0x71f8, 889 0x7200, 0x720c, 890 0x721c, 0x7284, 891 0x72f0, 0x72f8, 892 0x7300, 0x730c, 893 0x731c, 0x7384, 894 0x73f0, 0x73f8, 895 0x7400, 0x7450, 896 0x7500, 0x7530, 897 0x7600, 0x760c, 898 0x7614, 0x761c, 899 0x7680, 0x76cc, 900 0x7700, 0x7798, 901 0x77c0, 0x77fc, 902 0x7900, 0x79fc, 903 0x7b00, 0x7b58, 904 0x7b60, 0x7b84, 905 0x7b8c, 0x7c38, 906 0x7d00, 0x7d38, 907 0x7d40, 0x7d80, 908 0x7d8c, 0x7ddc, 909 0x7de4, 0x7e04, 910 0x7e10, 0x7e1c, 911 0x7e24, 0x7e38, 912 0x7e40, 0x7e44, 913 0x7e4c, 0x7e78, 914 0x7e80, 0x7ea4, 915 0x7eac, 0x7edc, 916 0x7ee8, 0x7efc, 917 0x8dc0, 0x8e04, 918 0x8e10, 0x8e1c, 919 0x8e30, 0x8e78, 920 0x8ea0, 0x8eb8, 921 0x8ec0, 0x8f6c, 922 0x8fc0, 0x9008, 923 0x9010, 0x9058, 924 0x9060, 0x9060, 925 0x9068, 0x9074, 926 0x90fc, 0x90fc, 927 0x9400, 0x9408, 928 0x9410, 0x9458, 929 0x9600, 0x9600, 930 0x9608, 0x9638, 931 0x9640, 0x96bc, 932 0x9800, 0x9808, 933 0x9820, 0x983c, 934 0x9850, 0x9864, 935 0x9c00, 0x9c6c, 936 0x9c80, 0x9cec, 937 0x9d00, 0x9d6c, 938 0x9d80, 0x9dec, 939 0x9e00, 0x9e6c, 940 0x9e80, 0x9eec, 941 0x9f00, 0x9f6c, 942 0x9f80, 0x9fec, 943 0xd004, 0xd004, 944 0xd010, 0xd03c, 945 0xdfc0, 0xdfe0, 946 0xe000, 0xea7c, 947 0xf000, 0x11110, 948 0x11118, 0x11190, 949 0x19040, 0x1906c, 950 0x19078, 0x19080, 951 0x1908c, 0x190e4, 952 0x190f0, 0x190f8, 953 0x19100, 0x19110, 954 0x19120, 0x19124, 955 0x19150, 0x19194, 956 0x1919c, 0x191b0, 957 0x191d0, 0x191e8, 958 0x19238, 0x1924c, 959 0x193f8, 0x1943c, 960 0x1944c, 0x19474, 961 0x19490, 0x194e0, 962 0x194f0, 0x194f8, 963 0x19800, 0x19c08, 964 0x19c10, 0x19c90, 965 0x19ca0, 0x19ce4, 966 0x19cf0, 0x19d40, 967 0x19d50, 0x19d94, 968 0x19da0, 0x19de8, 969 0x19df0, 0x19e40, 970 0x19e50, 0x19e90, 971 0x19ea0, 0x19f4c, 972 0x1a000, 0x1a004, 973 0x1a010, 0x1a06c, 974 0x1a0b0, 0x1a0e4, 975 0x1a0ec, 0x1a0f4, 976 0x1a100, 0x1a108, 977 0x1a114, 0x1a120, 978 0x1a128, 0x1a130, 979 0x1a138, 0x1a138, 980 0x1a190, 0x1a1c4, 981 0x1a1fc, 0x1a1fc, 982 0x1e040, 0x1e04c, 983 0x1e284, 0x1e28c, 984 0x1e2c0, 0x1e2c0, 985 0x1e2e0, 0x1e2e0, 986 0x1e300, 0x1e384, 987 0x1e3c0, 0x1e3c8, 988 0x1e440, 0x1e44c, 989 0x1e684, 0x1e68c, 990 0x1e6c0, 0x1e6c0, 991 0x1e6e0, 0x1e6e0, 992 0x1e700, 0x1e784, 993 0x1e7c0, 0x1e7c8, 994 0x1e840, 0x1e84c, 995 0x1ea84, 0x1ea8c, 996 0x1eac0, 0x1eac0, 997 0x1eae0, 0x1eae0, 998 0x1eb00, 0x1eb84, 999 0x1ebc0, 0x1ebc8, 1000 0x1ec40, 0x1ec4c, 1001 0x1ee84, 0x1ee8c, 1002 0x1eec0, 0x1eec0, 1003 0x1eee0, 0x1eee0, 1004 0x1ef00, 0x1ef84, 1005 0x1efc0, 0x1efc8, 1006 0x1f040, 0x1f04c, 1007 0x1f284, 0x1f28c, 1008 0x1f2c0, 0x1f2c0, 1009 0x1f2e0, 0x1f2e0, 1010 0x1f300, 0x1f384, 1011 0x1f3c0, 0x1f3c8, 1012 0x1f440, 0x1f44c, 1013 0x1f684, 0x1f68c, 1014 0x1f6c0, 0x1f6c0, 1015 0x1f6e0, 0x1f6e0, 1016 0x1f700, 0x1f784, 1017 0x1f7c0, 0x1f7c8, 1018 0x1f840, 0x1f84c, 1019 0x1fa84, 0x1fa8c, 1020 0x1fac0, 0x1fac0, 1021 0x1fae0, 0x1fae0, 1022 0x1fb00, 0x1fb84, 1023 0x1fbc0, 0x1fbc8, 1024 0x1fc40, 0x1fc4c, 1025 0x1fe84, 0x1fe8c, 1026 0x1fec0, 0x1fec0, 1027 0x1fee0, 0x1fee0, 1028 0x1ff00, 0x1ff84, 1029 0x1ffc0, 0x1ffc8, 1030 0x20000, 0x2002c, 1031 0x20100, 0x2013c, 1032 0x20190, 0x201a0, 1033 0x201a8, 0x201b8, 1034 0x201c4, 0x201c8, 1035 0x20200, 0x20318, 1036 0x20400, 0x204b4, 1037 0x204c0, 0x20528, 1038 0x20540, 0x20614, 1039 0x21000, 0x21040, 1040 0x2104c, 0x21060, 1041 0x210c0, 0x210ec, 1042 0x21200, 0x21268, 1043 0x21270, 0x21284, 1044 0x212fc, 0x21388, 1045 0x21400, 0x21404, 1046 0x21500, 0x21500, 1047 0x21510, 0x21518, 1048 0x2152c, 0x21530, 1049 0x2153c, 0x2153c, 1050 0x21550, 0x21554, 1051 0x21600, 0x21600, 1052 0x21608, 0x2161c, 1053 0x21624, 0x21628, 1054 0x21630, 0x21634, 1055 0x2163c, 0x2163c, 1056 0x21700, 0x2171c, 1057 0x21780, 0x2178c, 1058 0x21800, 0x21818, 1059 0x21820, 0x21828, 1060 0x21830, 0x21848, 1061 0x21850, 0x21854, 1062 0x21860, 0x21868, 1063 0x21870, 0x21870, 1064 0x21878, 0x21898, 1065 0x218a0, 0x218a8, 1066 0x218b0, 0x218c8, 1067 0x218d0, 0x218d4, 1068 0x218e0, 0x218e8, 1069 0x218f0, 0x218f0, 1070 0x218f8, 0x21a18, 1071 0x21a20, 0x21a28, 1072 0x21a30, 0x21a48, 1073 0x21a50, 0x21a54, 1074 0x21a60, 0x21a68, 1075 0x21a70, 0x21a70, 1076 0x21a78, 0x21a98, 1077 0x21aa0, 0x21aa8, 1078 0x21ab0, 0x21ac8, 1079 0x21ad0, 0x21ad4, 1080 0x21ae0, 0x21ae8, 1081 0x21af0, 0x21af0, 1082 0x21af8, 0x21c18, 1083 0x21c20, 0x21c20, 1084 0x21c28, 0x21c30, 1085 0x21c38, 0x21c38, 1086 0x21c80, 0x21c98, 1087 0x21ca0, 0x21ca8, 1088 0x21cb0, 0x21cc8, 1089 0x21cd0, 0x21cd4, 1090 0x21ce0, 0x21ce8, 1091 0x21cf0, 0x21cf0, 1092 0x21cf8, 0x21d7c, 1093 0x21e00, 0x21e04, 1094 0x22000, 0x2202c, 1095 0x22100, 0x2213c, 1096 0x22190, 0x221a0, 1097 0x221a8, 0x221b8, 1098 0x221c4, 0x221c8, 1099 0x22200, 0x22318, 1100 0x22400, 0x224b4, 1101 0x224c0, 0x22528, 1102 0x22540, 0x22614, 1103 0x23000, 0x23040, 1104 0x2304c, 0x23060, 1105 0x230c0, 0x230ec, 1106 0x23200, 0x23268, 1107 0x23270, 0x23284, 1108 0x232fc, 0x23388, 1109 0x23400, 0x23404, 1110 0x23500, 0x23500, 1111 0x23510, 0x23518, 1112 0x2352c, 0x23530, 1113 0x2353c, 0x2353c, 1114 0x23550, 0x23554, 1115 0x23600, 0x23600, 1116 0x23608, 0x2361c, 1117 0x23624, 0x23628, 1118 0x23630, 0x23634, 1119 0x2363c, 0x2363c, 1120 0x23700, 0x2371c, 1121 0x23780, 0x2378c, 1122 0x23800, 0x23818, 1123 0x23820, 0x23828, 1124 0x23830, 0x23848, 1125 0x23850, 0x23854, 1126 0x23860, 0x23868, 1127 0x23870, 0x23870, 1128 0x23878, 0x23898, 1129 0x238a0, 0x238a8, 1130 0x238b0, 0x238c8, 1131 0x238d0, 0x238d4, 1132 0x238e0, 0x238e8, 1133 0x238f0, 0x238f0, 1134 0x238f8, 0x23a18, 1135 0x23a20, 0x23a28, 1136 0x23a30, 0x23a48, 1137 0x23a50, 0x23a54, 1138 0x23a60, 0x23a68, 1139 0x23a70, 0x23a70, 1140 0x23a78, 0x23a98, 1141 0x23aa0, 0x23aa8, 1142 0x23ab0, 0x23ac8, 1143 0x23ad0, 0x23ad4, 1144 0x23ae0, 0x23ae8, 1145 0x23af0, 0x23af0, 1146 0x23af8, 0x23c18, 1147 0x23c20, 0x23c20, 1148 0x23c28, 0x23c30, 1149 0x23c38, 0x23c38, 1150 0x23c80, 0x23c98, 1151 0x23ca0, 0x23ca8, 1152 0x23cb0, 0x23cc8, 1153 0x23cd0, 0x23cd4, 1154 0x23ce0, 0x23ce8, 1155 0x23cf0, 0x23cf0, 1156 0x23cf8, 0x23d7c, 1157 0x23e00, 0x23e04, 1158 0x24000, 0x2402c, 1159 0x24100, 0x2413c, 1160 0x24190, 0x241a0, 1161 0x241a8, 0x241b8, 1162 0x241c4, 0x241c8, 1163 0x24200, 0x24318, 1164 0x24400, 0x244b4, 1165 0x244c0, 0x24528, 1166 0x24540, 0x24614, 1167 0x25000, 0x25040, 1168 0x2504c, 0x25060, 1169 0x250c0, 0x250ec, 1170 0x25200, 0x25268, 1171 0x25270, 0x25284, 1172 0x252fc, 0x25388, 1173 0x25400, 0x25404, 1174 0x25500, 0x25500, 1175 0x25510, 0x25518, 1176 0x2552c, 0x25530, 1177 0x2553c, 0x2553c, 1178 0x25550, 0x25554, 1179 0x25600, 0x25600, 1180 0x25608, 0x2561c, 1181 0x25624, 0x25628, 1182 0x25630, 0x25634, 1183 0x2563c, 0x2563c, 1184 0x25700, 0x2571c, 1185 0x25780, 0x2578c, 1186 0x25800, 0x25818, 1187 0x25820, 0x25828, 1188 0x25830, 0x25848, 1189 0x25850, 0x25854, 1190 0x25860, 0x25868, 1191 0x25870, 0x25870, 1192 0x25878, 0x25898, 1193 0x258a0, 0x258a8, 1194 0x258b0, 0x258c8, 1195 0x258d0, 0x258d4, 1196 0x258e0, 0x258e8, 1197 0x258f0, 0x258f0, 1198 0x258f8, 0x25a18, 1199 0x25a20, 0x25a28, 1200 0x25a30, 0x25a48, 1201 0x25a50, 0x25a54, 1202 0x25a60, 0x25a68, 1203 0x25a70, 0x25a70, 1204 0x25a78, 0x25a98, 1205 0x25aa0, 0x25aa8, 1206 0x25ab0, 0x25ac8, 1207 0x25ad0, 0x25ad4, 1208 0x25ae0, 0x25ae8, 1209 0x25af0, 0x25af0, 1210 0x25af8, 0x25c18, 1211 0x25c20, 0x25c20, 1212 0x25c28, 0x25c30, 1213 0x25c38, 0x25c38, 1214 0x25c80, 0x25c98, 1215 0x25ca0, 0x25ca8, 1216 0x25cb0, 0x25cc8, 1217 0x25cd0, 0x25cd4, 1218 0x25ce0, 0x25ce8, 1219 0x25cf0, 0x25cf0, 1220 0x25cf8, 0x25d7c, 1221 0x25e00, 0x25e04, 1222 0x26000, 0x2602c, 1223 0x26100, 0x2613c, 1224 0x26190, 0x261a0, 1225 0x261a8, 0x261b8, 1226 0x261c4, 0x261c8, 1227 0x26200, 0x26318, 1228 0x26400, 0x264b4, 1229 0x264c0, 0x26528, 1230 0x26540, 0x26614, 1231 0x27000, 0x27040, 1232 0x2704c, 0x27060, 1233 0x270c0, 0x270ec, 1234 0x27200, 0x27268, 1235 0x27270, 0x27284, 1236 0x272fc, 0x27388, 1237 0x27400, 0x27404, 1238 0x27500, 0x27500, 1239 0x27510, 0x27518, 1240 0x2752c, 0x27530, 1241 0x2753c, 0x2753c, 1242 0x27550, 0x27554, 1243 0x27600, 0x27600, 1244 0x27608, 0x2761c, 1245 0x27624, 0x27628, 1246 0x27630, 0x27634, 1247 0x2763c, 0x2763c, 1248 0x27700, 0x2771c, 1249 0x27780, 0x2778c, 1250 0x27800, 0x27818, 1251 0x27820, 0x27828, 1252 0x27830, 0x27848, 1253 0x27850, 0x27854, 1254 0x27860, 0x27868, 1255 0x27870, 0x27870, 1256 0x27878, 0x27898, 1257 0x278a0, 0x278a8, 1258 0x278b0, 0x278c8, 1259 0x278d0, 0x278d4, 1260 0x278e0, 0x278e8, 1261 0x278f0, 0x278f0, 1262 0x278f8, 0x27a18, 1263 0x27a20, 0x27a28, 1264 0x27a30, 0x27a48, 1265 0x27a50, 0x27a54, 1266 0x27a60, 0x27a68, 1267 0x27a70, 0x27a70, 1268 0x27a78, 0x27a98, 1269 0x27aa0, 0x27aa8, 1270 0x27ab0, 0x27ac8, 1271 0x27ad0, 0x27ad4, 1272 0x27ae0, 0x27ae8, 1273 0x27af0, 0x27af0, 1274 0x27af8, 0x27c18, 1275 0x27c20, 0x27c20, 1276 0x27c28, 0x27c30, 1277 0x27c38, 0x27c38, 1278 0x27c80, 0x27c98, 1279 0x27ca0, 0x27ca8, 1280 0x27cb0, 0x27cc8, 1281 0x27cd0, 0x27cd4, 1282 0x27ce0, 0x27ce8, 1283 0x27cf0, 0x27cf0, 1284 0x27cf8, 0x27d7c, 1285 0x27e00, 0x27e04, 1286 }; 1287 1288 static const unsigned int t4vf_reg_ranges[] = { 1289 VF_SGE_REG(A_SGE_VF_KDOORBELL), VF_SGE_REG(A_SGE_VF_GTS), 1290 VF_MPS_REG(A_MPS_VF_CTL), 1291 VF_MPS_REG(A_MPS_VF_STAT_RX_VF_ERR_FRAMES_H), 1292 VF_PL_REG(A_PL_VF_WHOAMI), VF_PL_REG(A_PL_VF_WHOAMI), 1293 VF_CIM_REG(A_CIM_VF_EXT_MAILBOX_CTRL), 1294 VF_CIM_REG(A_CIM_VF_EXT_MAILBOX_STATUS), 1295 FW_T4VF_MBDATA_BASE_ADDR, 1296 FW_T4VF_MBDATA_BASE_ADDR + 1297 ((NUM_CIM_PF_MAILBOX_DATA_INSTANCES - 1) * 4), 1298 }; 1299 1300 static const unsigned int t5_reg_ranges[] = { 1301 0x1008, 0x10c0, 1302 0x10cc, 0x10f8, 1303 0x1100, 0x1100, 1304 0x110c, 0x1148, 1305 0x1180, 0x1184, 1306 0x1190, 0x1194, 1307 0x11a0, 0x11a4, 1308 0x11b0, 0x11b4, 1309 0x11fc, 0x123c, 1310 0x1280, 0x173c, 1311 0x1800, 0x18fc, 1312 0x3000, 0x3028, 1313 0x3060, 0x30b0, 1314 0x30b8, 0x30d8, 1315 0x30e0, 0x30fc, 1316 0x3140, 0x357c, 1317 0x35a8, 0x35cc, 1318 0x35ec, 0x35ec, 1319 0x3600, 0x5624, 1320 0x56cc, 0x56ec, 1321 0x56f4, 0x5720, 1322 0x5728, 0x575c, 1323 0x580c, 0x5814, 1324 0x5890, 0x589c, 1325 0x58a4, 0x58ac, 1326 0x58b8, 0x58bc, 1327 0x5940, 0x59c8, 1328 0x59d0, 0x59dc, 1329 0x59fc, 0x5a18, 1330 0x5a60, 0x5a70, 1331 0x5a80, 0x5a9c, 1332 0x5b94, 0x5bfc, 1333 0x6000, 0x6020, 1334 0x6028, 0x6040, 1335 0x6058, 0x609c, 1336 0x60a8, 0x614c, 1337 0x7700, 0x7798, 1338 0x77c0, 0x78fc, 1339 0x7b00, 0x7b58, 1340 0x7b60, 0x7b84, 1341 0x7b8c, 0x7c54, 1342 0x7d00, 0x7d38, 1343 0x7d40, 0x7d80, 1344 0x7d8c, 0x7ddc, 1345 0x7de4, 0x7e04, 1346 0x7e10, 0x7e1c, 1347 0x7e24, 0x7e38, 1348 0x7e40, 0x7e44, 1349 0x7e4c, 0x7e78, 1350 0x7e80, 0x7edc, 1351 0x7ee8, 0x7efc, 1352 0x8dc0, 0x8de0, 1353 0x8df8, 0x8e04, 1354 0x8e10, 0x8e84, 1355 0x8ea0, 0x8f84, 1356 0x8fc0, 0x9058, 1357 0x9060, 0x9060, 1358 0x9068, 0x90f8, 1359 0x9400, 0x9408, 1360 0x9410, 0x9470, 1361 0x9600, 0x9600, 1362 0x9608, 0x9638, 1363 0x9640, 0x96f4, 1364 0x9800, 0x9808, 1365 0x9810, 0x9864, 1366 0x9c00, 0x9c6c, 1367 0x9c80, 0x9cec, 1368 0x9d00, 0x9d6c, 1369 0x9d80, 0x9dec, 1370 0x9e00, 0x9e6c, 1371 0x9e80, 0x9eec, 1372 0x9f00, 0x9f6c, 1373 0x9f80, 0xa020, 1374 0xd000, 0xd004, 1375 0xd010, 0xd03c, 1376 0xdfc0, 0xdfe0, 1377 0xe000, 0x1106c, 1378 0x11074, 0x11088, 1379 0x1109c, 0x11110, 1380 0x11118, 0x1117c, 1381 0x11190, 0x11204, 1382 0x19040, 0x1906c, 1383 0x19078, 0x19080, 1384 0x1908c, 0x190e8, 1385 0x190f0, 0x190f8, 1386 0x19100, 0x19110, 1387 0x19120, 0x19124, 1388 0x19150, 0x19194, 1389 0x1919c, 0x191b0, 1390 0x191d0, 0x191e8, 1391 0x19238, 0x19290, 1392 0x193f8, 0x19428, 1393 0x19430, 0x19444, 1394 0x1944c, 0x1946c, 1395 0x19474, 0x19474, 1396 0x19490, 0x194cc, 1397 0x194f0, 0x194f8, 1398 0x19c00, 0x19c08, 1399 0x19c10, 0x19c60, 1400 0x19c94, 0x19ce4, 1401 0x19cf0, 0x19d40, 1402 0x19d50, 0x19d94, 1403 0x19da0, 0x19de8, 1404 0x19df0, 0x19e10, 1405 0x19e50, 0x19e90, 1406 0x19ea0, 0x19f24, 1407 0x19f34, 0x19f34, 1408 0x19f40, 0x19f50, 1409 0x19f90, 0x19fb4, 1410 0x19fc4, 0x19fe4, 1411 0x1a000, 0x1a004, 1412 0x1a010, 0x1a06c, 1413 0x1a0b0, 0x1a0e4, 1414 0x1a0ec, 0x1a0f8, 1415 0x1a100, 0x1a108, 1416 0x1a114, 0x1a130, 1417 0x1a138, 0x1a1c4, 1418 0x1a1fc, 0x1a1fc, 1419 0x1e008, 0x1e00c, 1420 0x1e040, 0x1e044, 1421 0x1e04c, 0x1e04c, 1422 0x1e284, 0x1e290, 1423 0x1e2c0, 0x1e2c0, 1424 0x1e2e0, 0x1e2e0, 1425 0x1e300, 0x1e384, 1426 0x1e3c0, 0x1e3c8, 1427 0x1e408, 0x1e40c, 1428 0x1e440, 0x1e444, 1429 0x1e44c, 0x1e44c, 1430 0x1e684, 0x1e690, 1431 0x1e6c0, 0x1e6c0, 1432 0x1e6e0, 0x1e6e0, 1433 0x1e700, 0x1e784, 1434 0x1e7c0, 0x1e7c8, 1435 0x1e808, 0x1e80c, 1436 0x1e840, 0x1e844, 1437 0x1e84c, 0x1e84c, 1438 0x1ea84, 0x1ea90, 1439 0x1eac0, 0x1eac0, 1440 0x1eae0, 0x1eae0, 1441 0x1eb00, 0x1eb84, 1442 0x1ebc0, 0x1ebc8, 1443 0x1ec08, 0x1ec0c, 1444 0x1ec40, 0x1ec44, 1445 0x1ec4c, 0x1ec4c, 1446 0x1ee84, 0x1ee90, 1447 0x1eec0, 0x1eec0, 1448 0x1eee0, 0x1eee0, 1449 0x1ef00, 0x1ef84, 1450 0x1efc0, 0x1efc8, 1451 0x1f008, 0x1f00c, 1452 0x1f040, 0x1f044, 1453 0x1f04c, 0x1f04c, 1454 0x1f284, 0x1f290, 1455 0x1f2c0, 0x1f2c0, 1456 0x1f2e0, 0x1f2e0, 1457 0x1f300, 0x1f384, 1458 0x1f3c0, 0x1f3c8, 1459 0x1f408, 0x1f40c, 1460 0x1f440, 0x1f444, 1461 0x1f44c, 0x1f44c, 1462 0x1f684, 0x1f690, 1463 0x1f6c0, 0x1f6c0, 1464 0x1f6e0, 0x1f6e0, 1465 0x1f700, 0x1f784, 1466 0x1f7c0, 0x1f7c8, 1467 0x1f808, 0x1f80c, 1468 0x1f840, 0x1f844, 1469 0x1f84c, 0x1f84c, 1470 0x1fa84, 0x1fa90, 1471 0x1fac0, 0x1fac0, 1472 0x1fae0, 0x1fae0, 1473 0x1fb00, 0x1fb84, 1474 0x1fbc0, 0x1fbc8, 1475 0x1fc08, 0x1fc0c, 1476 0x1fc40, 0x1fc44, 1477 0x1fc4c, 0x1fc4c, 1478 0x1fe84, 0x1fe90, 1479 0x1fec0, 0x1fec0, 1480 0x1fee0, 0x1fee0, 1481 0x1ff00, 0x1ff84, 1482 0x1ffc0, 0x1ffc8, 1483 0x30000, 0x30030, 1484 0x30100, 0x30144, 1485 0x30190, 0x301a0, 1486 0x301a8, 0x301b8, 1487 0x301c4, 0x301c8, 1488 0x301d0, 0x301d0, 1489 0x30200, 0x30318, 1490 0x30400, 0x304b4, 1491 0x304c0, 0x3052c, 1492 0x30540, 0x3061c, 1493 0x30800, 0x30828, 1494 0x30834, 0x30834, 1495 0x308c0, 0x30908, 1496 0x30910, 0x309ac, 1497 0x30a00, 0x30a14, 1498 0x30a1c, 0x30a2c, 1499 0x30a44, 0x30a50, 1500 0x30a74, 0x30a74, 1501 0x30a7c, 0x30afc, 1502 0x30b08, 0x30c24, 1503 0x30d00, 0x30d00, 1504 0x30d08, 0x30d14, 1505 0x30d1c, 0x30d20, 1506 0x30d3c, 0x30d3c, 1507 0x30d48, 0x30d50, 1508 0x31200, 0x3120c, 1509 0x31220, 0x31220, 1510 0x31240, 0x31240, 1511 0x31600, 0x3160c, 1512 0x31a00, 0x31a1c, 1513 0x31e00, 0x31e20, 1514 0x31e38, 0x31e3c, 1515 0x31e80, 0x31e80, 1516 0x31e88, 0x31ea8, 1517 0x31eb0, 0x31eb4, 1518 0x31ec8, 0x31ed4, 1519 0x31fb8, 0x32004, 1520 0x32200, 0x32200, 1521 0x32208, 0x32240, 1522 0x32248, 0x32280, 1523 0x32288, 0x322c0, 1524 0x322c8, 0x322fc, 1525 0x32600, 0x32630, 1526 0x32a00, 0x32abc, 1527 0x32b00, 0x32b10, 1528 0x32b20, 0x32b30, 1529 0x32b40, 0x32b50, 1530 0x32b60, 0x32b70, 1531 0x33000, 0x33028, 1532 0x33030, 0x33048, 1533 0x33060, 0x33068, 1534 0x33070, 0x3309c, 1535 0x330f0, 0x33128, 1536 0x33130, 0x33148, 1537 0x33160, 0x33168, 1538 0x33170, 0x3319c, 1539 0x331f0, 0x33238, 1540 0x33240, 0x33240, 1541 0x33248, 0x33250, 1542 0x3325c, 0x33264, 1543 0x33270, 0x332b8, 1544 0x332c0, 0x332e4, 1545 0x332f8, 0x33338, 1546 0x33340, 0x33340, 1547 0x33348, 0x33350, 1548 0x3335c, 0x33364, 1549 0x33370, 0x333b8, 1550 0x333c0, 0x333e4, 1551 0x333f8, 0x33428, 1552 0x33430, 0x33448, 1553 0x33460, 0x33468, 1554 0x33470, 0x3349c, 1555 0x334f0, 0x33528, 1556 0x33530, 0x33548, 1557 0x33560, 0x33568, 1558 0x33570, 0x3359c, 1559 0x335f0, 0x33638, 1560 0x33640, 0x33640, 1561 0x33648, 0x33650, 1562 0x3365c, 0x33664, 1563 0x33670, 0x336b8, 1564 0x336c0, 0x336e4, 1565 0x336f8, 0x33738, 1566 0x33740, 0x33740, 1567 0x33748, 0x33750, 1568 0x3375c, 0x33764, 1569 0x33770, 0x337b8, 1570 0x337c0, 0x337e4, 1571 0x337f8, 0x337fc, 1572 0x33814, 0x33814, 1573 0x3382c, 0x3382c, 1574 0x33880, 0x3388c, 1575 0x338e8, 0x338ec, 1576 0x33900, 0x33928, 1577 0x33930, 0x33948, 1578 0x33960, 0x33968, 1579 0x33970, 0x3399c, 1580 0x339f0, 0x33a38, 1581 0x33a40, 0x33a40, 1582 0x33a48, 0x33a50, 1583 0x33a5c, 0x33a64, 1584 0x33a70, 0x33ab8, 1585 0x33ac0, 0x33ae4, 1586 0x33af8, 0x33b10, 1587 0x33b28, 0x33b28, 1588 0x33b3c, 0x33b50, 1589 0x33bf0, 0x33c10, 1590 0x33c28, 0x33c28, 1591 0x33c3c, 0x33c50, 1592 0x33cf0, 0x33cfc, 1593 0x34000, 0x34030, 1594 0x34100, 0x34144, 1595 0x34190, 0x341a0, 1596 0x341a8, 0x341b8, 1597 0x341c4, 0x341c8, 1598 0x341d0, 0x341d0, 1599 0x34200, 0x34318, 1600 0x34400, 0x344b4, 1601 0x344c0, 0x3452c, 1602 0x34540, 0x3461c, 1603 0x34800, 0x34828, 1604 0x34834, 0x34834, 1605 0x348c0, 0x34908, 1606 0x34910, 0x349ac, 1607 0x34a00, 0x34a14, 1608 0x34a1c, 0x34a2c, 1609 0x34a44, 0x34a50, 1610 0x34a74, 0x34a74, 1611 0x34a7c, 0x34afc, 1612 0x34b08, 0x34c24, 1613 0x34d00, 0x34d00, 1614 0x34d08, 0x34d14, 1615 0x34d1c, 0x34d20, 1616 0x34d3c, 0x34d3c, 1617 0x34d48, 0x34d50, 1618 0x35200, 0x3520c, 1619 0x35220, 0x35220, 1620 0x35240, 0x35240, 1621 0x35600, 0x3560c, 1622 0x35a00, 0x35a1c, 1623 0x35e00, 0x35e20, 1624 0x35e38, 0x35e3c, 1625 0x35e80, 0x35e80, 1626 0x35e88, 0x35ea8, 1627 0x35eb0, 0x35eb4, 1628 0x35ec8, 0x35ed4, 1629 0x35fb8, 0x36004, 1630 0x36200, 0x36200, 1631 0x36208, 0x36240, 1632 0x36248, 0x36280, 1633 0x36288, 0x362c0, 1634 0x362c8, 0x362fc, 1635 0x36600, 0x36630, 1636 0x36a00, 0x36abc, 1637 0x36b00, 0x36b10, 1638 0x36b20, 0x36b30, 1639 0x36b40, 0x36b50, 1640 0x36b60, 0x36b70, 1641 0x37000, 0x37028, 1642 0x37030, 0x37048, 1643 0x37060, 0x37068, 1644 0x37070, 0x3709c, 1645 0x370f0, 0x37128, 1646 0x37130, 0x37148, 1647 0x37160, 0x37168, 1648 0x37170, 0x3719c, 1649 0x371f0, 0x37238, 1650 0x37240, 0x37240, 1651 0x37248, 0x37250, 1652 0x3725c, 0x37264, 1653 0x37270, 0x372b8, 1654 0x372c0, 0x372e4, 1655 0x372f8, 0x37338, 1656 0x37340, 0x37340, 1657 0x37348, 0x37350, 1658 0x3735c, 0x37364, 1659 0x37370, 0x373b8, 1660 0x373c0, 0x373e4, 1661 0x373f8, 0x37428, 1662 0x37430, 0x37448, 1663 0x37460, 0x37468, 1664 0x37470, 0x3749c, 1665 0x374f0, 0x37528, 1666 0x37530, 0x37548, 1667 0x37560, 0x37568, 1668 0x37570, 0x3759c, 1669 0x375f0, 0x37638, 1670 0x37640, 0x37640, 1671 0x37648, 0x37650, 1672 0x3765c, 0x37664, 1673 0x37670, 0x376b8, 1674 0x376c0, 0x376e4, 1675 0x376f8, 0x37738, 1676 0x37740, 0x37740, 1677 0x37748, 0x37750, 1678 0x3775c, 0x37764, 1679 0x37770, 0x377b8, 1680 0x377c0, 0x377e4, 1681 0x377f8, 0x377fc, 1682 0x37814, 0x37814, 1683 0x3782c, 0x3782c, 1684 0x37880, 0x3788c, 1685 0x378e8, 0x378ec, 1686 0x37900, 0x37928, 1687 0x37930, 0x37948, 1688 0x37960, 0x37968, 1689 0x37970, 0x3799c, 1690 0x379f0, 0x37a38, 1691 0x37a40, 0x37a40, 1692 0x37a48, 0x37a50, 1693 0x37a5c, 0x37a64, 1694 0x37a70, 0x37ab8, 1695 0x37ac0, 0x37ae4, 1696 0x37af8, 0x37b10, 1697 0x37b28, 0x37b28, 1698 0x37b3c, 0x37b50, 1699 0x37bf0, 0x37c10, 1700 0x37c28, 0x37c28, 1701 0x37c3c, 0x37c50, 1702 0x37cf0, 0x37cfc, 1703 0x38000, 0x38030, 1704 0x38100, 0x38144, 1705 0x38190, 0x381a0, 1706 0x381a8, 0x381b8, 1707 0x381c4, 0x381c8, 1708 0x381d0, 0x381d0, 1709 0x38200, 0x38318, 1710 0x38400, 0x384b4, 1711 0x384c0, 0x3852c, 1712 0x38540, 0x3861c, 1713 0x38800, 0x38828, 1714 0x38834, 0x38834, 1715 0x388c0, 0x38908, 1716 0x38910, 0x389ac, 1717 0x38a00, 0x38a14, 1718 0x38a1c, 0x38a2c, 1719 0x38a44, 0x38a50, 1720 0x38a74, 0x38a74, 1721 0x38a7c, 0x38afc, 1722 0x38b08, 0x38c24, 1723 0x38d00, 0x38d00, 1724 0x38d08, 0x38d14, 1725 0x38d1c, 0x38d20, 1726 0x38d3c, 0x38d3c, 1727 0x38d48, 0x38d50, 1728 0x39200, 0x3920c, 1729 0x39220, 0x39220, 1730 0x39240, 0x39240, 1731 0x39600, 0x3960c, 1732 0x39a00, 0x39a1c, 1733 0x39e00, 0x39e20, 1734 0x39e38, 0x39e3c, 1735 0x39e80, 0x39e80, 1736 0x39e88, 0x39ea8, 1737 0x39eb0, 0x39eb4, 1738 0x39ec8, 0x39ed4, 1739 0x39fb8, 0x3a004, 1740 0x3a200, 0x3a200, 1741 0x3a208, 0x3a240, 1742 0x3a248, 0x3a280, 1743 0x3a288, 0x3a2c0, 1744 0x3a2c8, 0x3a2fc, 1745 0x3a600, 0x3a630, 1746 0x3aa00, 0x3aabc, 1747 0x3ab00, 0x3ab10, 1748 0x3ab20, 0x3ab30, 1749 0x3ab40, 0x3ab50, 1750 0x3ab60, 0x3ab70, 1751 0x3b000, 0x3b028, 1752 0x3b030, 0x3b048, 1753 0x3b060, 0x3b068, 1754 0x3b070, 0x3b09c, 1755 0x3b0f0, 0x3b128, 1756 0x3b130, 0x3b148, 1757 0x3b160, 0x3b168, 1758 0x3b170, 0x3b19c, 1759 0x3b1f0, 0x3b238, 1760 0x3b240, 0x3b240, 1761 0x3b248, 0x3b250, 1762 0x3b25c, 0x3b264, 1763 0x3b270, 0x3b2b8, 1764 0x3b2c0, 0x3b2e4, 1765 0x3b2f8, 0x3b338, 1766 0x3b340, 0x3b340, 1767 0x3b348, 0x3b350, 1768 0x3b35c, 0x3b364, 1769 0x3b370, 0x3b3b8, 1770 0x3b3c0, 0x3b3e4, 1771 0x3b3f8, 0x3b428, 1772 0x3b430, 0x3b448, 1773 0x3b460, 0x3b468, 1774 0x3b470, 0x3b49c, 1775 0x3b4f0, 0x3b528, 1776 0x3b530, 0x3b548, 1777 0x3b560, 0x3b568, 1778 0x3b570, 0x3b59c, 1779 0x3b5f0, 0x3b638, 1780 0x3b640, 0x3b640, 1781 0x3b648, 0x3b650, 1782 0x3b65c, 0x3b664, 1783 0x3b670, 0x3b6b8, 1784 0x3b6c0, 0x3b6e4, 1785 0x3b6f8, 0x3b738, 1786 0x3b740, 0x3b740, 1787 0x3b748, 0x3b750, 1788 0x3b75c, 0x3b764, 1789 0x3b770, 0x3b7b8, 1790 0x3b7c0, 0x3b7e4, 1791 0x3b7f8, 0x3b7fc, 1792 0x3b814, 0x3b814, 1793 0x3b82c, 0x3b82c, 1794 0x3b880, 0x3b88c, 1795 0x3b8e8, 0x3b8ec, 1796 0x3b900, 0x3b928, 1797 0x3b930, 0x3b948, 1798 0x3b960, 0x3b968, 1799 0x3b970, 0x3b99c, 1800 0x3b9f0, 0x3ba38, 1801 0x3ba40, 0x3ba40, 1802 0x3ba48, 0x3ba50, 1803 0x3ba5c, 0x3ba64, 1804 0x3ba70, 0x3bab8, 1805 0x3bac0, 0x3bae4, 1806 0x3baf8, 0x3bb10, 1807 0x3bb28, 0x3bb28, 1808 0x3bb3c, 0x3bb50, 1809 0x3bbf0, 0x3bc10, 1810 0x3bc28, 0x3bc28, 1811 0x3bc3c, 0x3bc50, 1812 0x3bcf0, 0x3bcfc, 1813 0x3c000, 0x3c030, 1814 0x3c100, 0x3c144, 1815 0x3c190, 0x3c1a0, 1816 0x3c1a8, 0x3c1b8, 1817 0x3c1c4, 0x3c1c8, 1818 0x3c1d0, 0x3c1d0, 1819 0x3c200, 0x3c318, 1820 0x3c400, 0x3c4b4, 1821 0x3c4c0, 0x3c52c, 1822 0x3c540, 0x3c61c, 1823 0x3c800, 0x3c828, 1824 0x3c834, 0x3c834, 1825 0x3c8c0, 0x3c908, 1826 0x3c910, 0x3c9ac, 1827 0x3ca00, 0x3ca14, 1828 0x3ca1c, 0x3ca2c, 1829 0x3ca44, 0x3ca50, 1830 0x3ca74, 0x3ca74, 1831 0x3ca7c, 0x3cafc, 1832 0x3cb08, 0x3cc24, 1833 0x3cd00, 0x3cd00, 1834 0x3cd08, 0x3cd14, 1835 0x3cd1c, 0x3cd20, 1836 0x3cd3c, 0x3cd3c, 1837 0x3cd48, 0x3cd50, 1838 0x3d200, 0x3d20c, 1839 0x3d220, 0x3d220, 1840 0x3d240, 0x3d240, 1841 0x3d600, 0x3d60c, 1842 0x3da00, 0x3da1c, 1843 0x3de00, 0x3de20, 1844 0x3de38, 0x3de3c, 1845 0x3de80, 0x3de80, 1846 0x3de88, 0x3dea8, 1847 0x3deb0, 0x3deb4, 1848 0x3dec8, 0x3ded4, 1849 0x3dfb8, 0x3e004, 1850 0x3e200, 0x3e200, 1851 0x3e208, 0x3e240, 1852 0x3e248, 0x3e280, 1853 0x3e288, 0x3e2c0, 1854 0x3e2c8, 0x3e2fc, 1855 0x3e600, 0x3e630, 1856 0x3ea00, 0x3eabc, 1857 0x3eb00, 0x3eb10, 1858 0x3eb20, 0x3eb30, 1859 0x3eb40, 0x3eb50, 1860 0x3eb60, 0x3eb70, 1861 0x3f000, 0x3f028, 1862 0x3f030, 0x3f048, 1863 0x3f060, 0x3f068, 1864 0x3f070, 0x3f09c, 1865 0x3f0f0, 0x3f128, 1866 0x3f130, 0x3f148, 1867 0x3f160, 0x3f168, 1868 0x3f170, 0x3f19c, 1869 0x3f1f0, 0x3f238, 1870 0x3f240, 0x3f240, 1871 0x3f248, 0x3f250, 1872 0x3f25c, 0x3f264, 1873 0x3f270, 0x3f2b8, 1874 0x3f2c0, 0x3f2e4, 1875 0x3f2f8, 0x3f338, 1876 0x3f340, 0x3f340, 1877 0x3f348, 0x3f350, 1878 0x3f35c, 0x3f364, 1879 0x3f370, 0x3f3b8, 1880 0x3f3c0, 0x3f3e4, 1881 0x3f3f8, 0x3f428, 1882 0x3f430, 0x3f448, 1883 0x3f460, 0x3f468, 1884 0x3f470, 0x3f49c, 1885 0x3f4f0, 0x3f528, 1886 0x3f530, 0x3f548, 1887 0x3f560, 0x3f568, 1888 0x3f570, 0x3f59c, 1889 0x3f5f0, 0x3f638, 1890 0x3f640, 0x3f640, 1891 0x3f648, 0x3f650, 1892 0x3f65c, 0x3f664, 1893 0x3f670, 0x3f6b8, 1894 0x3f6c0, 0x3f6e4, 1895 0x3f6f8, 0x3f738, 1896 0x3f740, 0x3f740, 1897 0x3f748, 0x3f750, 1898 0x3f75c, 0x3f764, 1899 0x3f770, 0x3f7b8, 1900 0x3f7c0, 0x3f7e4, 1901 0x3f7f8, 0x3f7fc, 1902 0x3f814, 0x3f814, 1903 0x3f82c, 0x3f82c, 1904 0x3f880, 0x3f88c, 1905 0x3f8e8, 0x3f8ec, 1906 0x3f900, 0x3f928, 1907 0x3f930, 0x3f948, 1908 0x3f960, 0x3f968, 1909 0x3f970, 0x3f99c, 1910 0x3f9f0, 0x3fa38, 1911 0x3fa40, 0x3fa40, 1912 0x3fa48, 0x3fa50, 1913 0x3fa5c, 0x3fa64, 1914 0x3fa70, 0x3fab8, 1915 0x3fac0, 0x3fae4, 1916 0x3faf8, 0x3fb10, 1917 0x3fb28, 0x3fb28, 1918 0x3fb3c, 0x3fb50, 1919 0x3fbf0, 0x3fc10, 1920 0x3fc28, 0x3fc28, 1921 0x3fc3c, 0x3fc50, 1922 0x3fcf0, 0x3fcfc, 1923 0x40000, 0x4000c, 1924 0x40040, 0x40050, 1925 0x40060, 0x40068, 1926 0x4007c, 0x4008c, 1927 0x40094, 0x400b0, 1928 0x400c0, 0x40144, 1929 0x40180, 0x4018c, 1930 0x40200, 0x40254, 1931 0x40260, 0x40264, 1932 0x40270, 0x40288, 1933 0x40290, 0x40298, 1934 0x402ac, 0x402c8, 1935 0x402d0, 0x402e0, 1936 0x402f0, 0x402f0, 1937 0x40300, 0x4033c, 1938 0x403f8, 0x403fc, 1939 0x41304, 0x413c4, 1940 0x41400, 0x4140c, 1941 0x41414, 0x4141c, 1942 0x41480, 0x414d0, 1943 0x44000, 0x44054, 1944 0x4405c, 0x44078, 1945 0x440c0, 0x44174, 1946 0x44180, 0x441ac, 1947 0x441b4, 0x441b8, 1948 0x441c0, 0x44254, 1949 0x4425c, 0x44278, 1950 0x442c0, 0x44374, 1951 0x44380, 0x443ac, 1952 0x443b4, 0x443b8, 1953 0x443c0, 0x44454, 1954 0x4445c, 0x44478, 1955 0x444c0, 0x44574, 1956 0x44580, 0x445ac, 1957 0x445b4, 0x445b8, 1958 0x445c0, 0x44654, 1959 0x4465c, 0x44678, 1960 0x446c0, 0x44774, 1961 0x44780, 0x447ac, 1962 0x447b4, 0x447b8, 1963 0x447c0, 0x44854, 1964 0x4485c, 0x44878, 1965 0x448c0, 0x44974, 1966 0x44980, 0x449ac, 1967 0x449b4, 0x449b8, 1968 0x449c0, 0x449fc, 1969 0x45000, 0x45004, 1970 0x45010, 0x45030, 1971 0x45040, 0x45060, 1972 0x45068, 0x45068, 1973 0x45080, 0x45084, 1974 0x450a0, 0x450b0, 1975 0x45200, 0x45204, 1976 0x45210, 0x45230, 1977 0x45240, 0x45260, 1978 0x45268, 0x45268, 1979 0x45280, 0x45284, 1980 0x452a0, 0x452b0, 1981 0x460c0, 0x460e4, 1982 0x47000, 0x4703c, 1983 0x47044, 0x4708c, 1984 0x47200, 0x47250, 1985 0x47400, 0x47408, 1986 0x47414, 0x47420, 1987 0x47600, 0x47618, 1988 0x47800, 0x47814, 1989 0x48000, 0x4800c, 1990 0x48040, 0x48050, 1991 0x48060, 0x48068, 1992 0x4807c, 0x4808c, 1993 0x48094, 0x480b0, 1994 0x480c0, 0x48144, 1995 0x48180, 0x4818c, 1996 0x48200, 0x48254, 1997 0x48260, 0x48264, 1998 0x48270, 0x48288, 1999 0x48290, 0x48298, 2000 0x482ac, 0x482c8, 2001 0x482d0, 0x482e0, 2002 0x482f0, 0x482f0, 2003 0x48300, 0x4833c, 2004 0x483f8, 0x483fc, 2005 0x49304, 0x493c4, 2006 0x49400, 0x4940c, 2007 0x49414, 0x4941c, 2008 0x49480, 0x494d0, 2009 0x4c000, 0x4c054, 2010 0x4c05c, 0x4c078, 2011 0x4c0c0, 0x4c174, 2012 0x4c180, 0x4c1ac, 2013 0x4c1b4, 0x4c1b8, 2014 0x4c1c0, 0x4c254, 2015 0x4c25c, 0x4c278, 2016 0x4c2c0, 0x4c374, 2017 0x4c380, 0x4c3ac, 2018 0x4c3b4, 0x4c3b8, 2019 0x4c3c0, 0x4c454, 2020 0x4c45c, 0x4c478, 2021 0x4c4c0, 0x4c574, 2022 0x4c580, 0x4c5ac, 2023 0x4c5b4, 0x4c5b8, 2024 0x4c5c0, 0x4c654, 2025 0x4c65c, 0x4c678, 2026 0x4c6c0, 0x4c774, 2027 0x4c780, 0x4c7ac, 2028 0x4c7b4, 0x4c7b8, 2029 0x4c7c0, 0x4c854, 2030 0x4c85c, 0x4c878, 2031 0x4c8c0, 0x4c974, 2032 0x4c980, 0x4c9ac, 2033 0x4c9b4, 0x4c9b8, 2034 0x4c9c0, 0x4c9fc, 2035 0x4d000, 0x4d004, 2036 0x4d010, 0x4d030, 2037 0x4d040, 0x4d060, 2038 0x4d068, 0x4d068, 2039 0x4d080, 0x4d084, 2040 0x4d0a0, 0x4d0b0, 2041 0x4d200, 0x4d204, 2042 0x4d210, 0x4d230, 2043 0x4d240, 0x4d260, 2044 0x4d268, 0x4d268, 2045 0x4d280, 0x4d284, 2046 0x4d2a0, 0x4d2b0, 2047 0x4e0c0, 0x4e0e4, 2048 0x4f000, 0x4f03c, 2049 0x4f044, 0x4f08c, 2050 0x4f200, 0x4f250, 2051 0x4f400, 0x4f408, 2052 0x4f414, 0x4f420, 2053 0x4f600, 0x4f618, 2054 0x4f800, 0x4f814, 2055 0x50000, 0x50084, 2056 0x50090, 0x500cc, 2057 0x50400, 0x50400, 2058 0x50800, 0x50884, 2059 0x50890, 0x508cc, 2060 0x50c00, 0x50c00, 2061 0x51000, 0x5101c, 2062 0x51300, 0x51308, 2063 }; 2064 2065 static const unsigned int t5vf_reg_ranges[] = { 2066 VF_SGE_REG(A_SGE_VF_KDOORBELL), VF_SGE_REG(A_SGE_VF_GTS), 2067 VF_MPS_REG(A_MPS_VF_CTL), 2068 VF_MPS_REG(A_MPS_VF_STAT_RX_VF_ERR_FRAMES_H), 2069 VF_PL_REG(A_PL_VF_WHOAMI), VF_PL_REG(A_PL_VF_REVISION), 2070 VF_CIM_REG(A_CIM_VF_EXT_MAILBOX_CTRL), 2071 VF_CIM_REG(A_CIM_VF_EXT_MAILBOX_STATUS), 2072 FW_T4VF_MBDATA_BASE_ADDR, 2073 FW_T4VF_MBDATA_BASE_ADDR + 2074 ((NUM_CIM_PF_MAILBOX_DATA_INSTANCES - 1) * 4), 2075 }; 2076 2077 static const unsigned int t6_reg_ranges[] = { 2078 0x1008, 0x101c, 2079 0x1024, 0x10a8, 2080 0x10b4, 0x10f8, 2081 0x1100, 0x1114, 2082 0x111c, 0x112c, 2083 0x1138, 0x113c, 2084 0x1144, 0x114c, 2085 0x1180, 0x1184, 2086 0x1190, 0x1194, 2087 0x11a0, 0x11a4, 2088 0x11b0, 0x11c4, 2089 0x11fc, 0x123c, 2090 0x1254, 0x1274, 2091 0x1280, 0x133c, 2092 0x1800, 0x18fc, 2093 0x3000, 0x302c, 2094 0x3060, 0x30b0, 2095 0x30b8, 0x30d8, 2096 0x30e0, 0x30fc, 2097 0x3140, 0x357c, 2098 0x35a8, 0x35cc, 2099 0x35ec, 0x35ec, 2100 0x3600, 0x5624, 2101 0x56cc, 0x56ec, 2102 0x56f4, 0x5720, 2103 0x5728, 0x575c, 2104 0x580c, 0x5814, 2105 0x5890, 0x589c, 2106 0x58a4, 0x58ac, 2107 0x58b8, 0x58bc, 2108 0x5940, 0x595c, 2109 0x5980, 0x598c, 2110 0x59b0, 0x59c8, 2111 0x59d0, 0x59dc, 2112 0x59fc, 0x5a18, 2113 0x5a60, 0x5a6c, 2114 0x5a80, 0x5a8c, 2115 0x5a94, 0x5a9c, 2116 0x5b94, 0x5bfc, 2117 0x5c10, 0x5e48, 2118 0x5e50, 0x5e94, 2119 0x5ea0, 0x5eb0, 2120 0x5ec0, 0x5ec0, 2121 0x5ec8, 0x5ed0, 2122 0x5ee0, 0x5ee0, 2123 0x5ef0, 0x5ef0, 2124 0x5f00, 0x5f00, 2125 0x6000, 0x6020, 2126 0x6028, 0x6040, 2127 0x6058, 0x609c, 2128 0x60a8, 0x619c, 2129 0x7700, 0x7798, 2130 0x77c0, 0x7880, 2131 0x78cc, 0x78fc, 2132 0x7b00, 0x7b58, 2133 0x7b60, 0x7b84, 2134 0x7b8c, 0x7c54, 2135 0x7d00, 0x7d38, 2136 0x7d40, 0x7d84, 2137 0x7d8c, 0x7ddc, 2138 0x7de4, 0x7e04, 2139 0x7e10, 0x7e1c, 2140 0x7e24, 0x7e38, 2141 0x7e40, 0x7e44, 2142 0x7e4c, 0x7e78, 2143 0x7e80, 0x7edc, 2144 0x7ee8, 0x7efc, 2145 0x8dc0, 0x8de0, 2146 0x8df8, 0x8e04, 2147 0x8e10, 0x8e84, 2148 0x8ea0, 0x8f88, 2149 0x8fb8, 0x9058, 2150 0x9060, 0x9060, 2151 0x9068, 0x90f8, 2152 0x9100, 0x9124, 2153 0x9400, 0x9470, 2154 0x9600, 0x9600, 2155 0x9608, 0x9638, 2156 0x9640, 0x9704, 2157 0x9710, 0x971c, 2158 0x9800, 0x9808, 2159 0x9810, 0x9864, 2160 0x9c00, 0x9c6c, 2161 0x9c80, 0x9cec, 2162 0x9d00, 0x9d6c, 2163 0x9d80, 0x9dec, 2164 0x9e00, 0x9e6c, 2165 0x9e80, 0x9eec, 2166 0x9f00, 0x9f6c, 2167 0x9f80, 0xa020, 2168 0xd000, 0xd03c, 2169 0xd100, 0xd118, 2170 0xd200, 0xd214, 2171 0xd220, 0xd234, 2172 0xd240, 0xd254, 2173 0xd260, 0xd274, 2174 0xd280, 0xd294, 2175 0xd2a0, 0xd2b4, 2176 0xd2c0, 0xd2d4, 2177 0xd2e0, 0xd2f4, 2178 0xd300, 0xd31c, 2179 0xdfc0, 0xdfe0, 2180 0xe000, 0xf008, 2181 0xf010, 0xf018, 2182 0xf020, 0xf028, 2183 0x11000, 0x11014, 2184 0x11048, 0x1106c, 2185 0x11074, 0x11088, 2186 0x11098, 0x11120, 2187 0x1112c, 0x1117c, 2188 0x11190, 0x112e0, 2189 0x11300, 0x1130c, 2190 0x12000, 0x1206c, 2191 0x19040, 0x1906c, 2192 0x19078, 0x19080, 2193 0x1908c, 0x190e8, 2194 0x190f0, 0x190f8, 2195 0x19100, 0x19110, 2196 0x19120, 0x19124, 2197 0x19150, 0x19194, 2198 0x1919c, 0x191b0, 2199 0x191d0, 0x191e8, 2200 0x19238, 0x19290, 2201 0x192a4, 0x192b0, 2202 0x19348, 0x1934c, 2203 0x193f8, 0x19418, 2204 0x19420, 0x19428, 2205 0x19430, 0x19444, 2206 0x1944c, 0x1946c, 2207 0x19474, 0x19474, 2208 0x19490, 0x194cc, 2209 0x194f0, 0x194f8, 2210 0x19c00, 0x19c48, 2211 0x19c50, 0x19c80, 2212 0x19c94, 0x19c98, 2213 0x19ca0, 0x19cbc, 2214 0x19ce4, 0x19ce4, 2215 0x19cf0, 0x19cf8, 2216 0x19d00, 0x19d28, 2217 0x19d50, 0x19d78, 2218 0x19d94, 0x19d98, 2219 0x19da0, 0x19de0, 2220 0x19df0, 0x19e10, 2221 0x19e50, 0x19e6c, 2222 0x19ea0, 0x19ebc, 2223 0x19ec4, 0x19ef4, 2224 0x19f04, 0x19f2c, 2225 0x19f34, 0x19f34, 2226 0x19f40, 0x19f50, 2227 0x19f90, 0x19fac, 2228 0x19fc4, 0x19fc8, 2229 0x19fd0, 0x19fe4, 2230 0x1a000, 0x1a004, 2231 0x1a010, 0x1a06c, 2232 0x1a0b0, 0x1a0e4, 2233 0x1a0ec, 0x1a0f8, 2234 0x1a100, 0x1a108, 2235 0x1a114, 0x1a130, 2236 0x1a138, 0x1a1c4, 2237 0x1a1fc, 0x1a1fc, 2238 0x1e008, 0x1e00c, 2239 0x1e040, 0x1e044, 2240 0x1e04c, 0x1e04c, 2241 0x1e284, 0x1e290, 2242 0x1e2c0, 0x1e2c0, 2243 0x1e2e0, 0x1e2e0, 2244 0x1e300, 0x1e384, 2245 0x1e3c0, 0x1e3c8, 2246 0x1e408, 0x1e40c, 2247 0x1e440, 0x1e444, 2248 0x1e44c, 0x1e44c, 2249 0x1e684, 0x1e690, 2250 0x1e6c0, 0x1e6c0, 2251 0x1e6e0, 0x1e6e0, 2252 0x1e700, 0x1e784, 2253 0x1e7c0, 0x1e7c8, 2254 0x1e808, 0x1e80c, 2255 0x1e840, 0x1e844, 2256 0x1e84c, 0x1e84c, 2257 0x1ea84, 0x1ea90, 2258 0x1eac0, 0x1eac0, 2259 0x1eae0, 0x1eae0, 2260 0x1eb00, 0x1eb84, 2261 0x1ebc0, 0x1ebc8, 2262 0x1ec08, 0x1ec0c, 2263 0x1ec40, 0x1ec44, 2264 0x1ec4c, 0x1ec4c, 2265 0x1ee84, 0x1ee90, 2266 0x1eec0, 0x1eec0, 2267 0x1eee0, 0x1eee0, 2268 0x1ef00, 0x1ef84, 2269 0x1efc0, 0x1efc8, 2270 0x1f008, 0x1f00c, 2271 0x1f040, 0x1f044, 2272 0x1f04c, 0x1f04c, 2273 0x1f284, 0x1f290, 2274 0x1f2c0, 0x1f2c0, 2275 0x1f2e0, 0x1f2e0, 2276 0x1f300, 0x1f384, 2277 0x1f3c0, 0x1f3c8, 2278 0x1f408, 0x1f40c, 2279 0x1f440, 0x1f444, 2280 0x1f44c, 0x1f44c, 2281 0x1f684, 0x1f690, 2282 0x1f6c0, 0x1f6c0, 2283 0x1f6e0, 0x1f6e0, 2284 0x1f700, 0x1f784, 2285 0x1f7c0, 0x1f7c8, 2286 0x1f808, 0x1f80c, 2287 0x1f840, 0x1f844, 2288 0x1f84c, 0x1f84c, 2289 0x1fa84, 0x1fa90, 2290 0x1fac0, 0x1fac0, 2291 0x1fae0, 0x1fae0, 2292 0x1fb00, 0x1fb84, 2293 0x1fbc0, 0x1fbc8, 2294 0x1fc08, 0x1fc0c, 2295 0x1fc40, 0x1fc44, 2296 0x1fc4c, 0x1fc4c, 2297 0x1fe84, 0x1fe90, 2298 0x1fec0, 0x1fec0, 2299 0x1fee0, 0x1fee0, 2300 0x1ff00, 0x1ff84, 2301 0x1ffc0, 0x1ffc8, 2302 0x30000, 0x30030, 2303 0x30100, 0x30168, 2304 0x30190, 0x301a0, 2305 0x301a8, 0x301b8, 2306 0x301c4, 0x301c8, 2307 0x301d0, 0x301d0, 2308 0x30200, 0x30320, 2309 0x30400, 0x304b4, 2310 0x304c0, 0x3052c, 2311 0x30540, 0x3061c, 2312 0x30800, 0x308a0, 2313 0x308c0, 0x30908, 2314 0x30910, 0x309b8, 2315 0x30a00, 0x30a04, 2316 0x30a0c, 0x30a14, 2317 0x30a1c, 0x30a2c, 2318 0x30a44, 0x30a50, 2319 0x30a74, 0x30a74, 2320 0x30a7c, 0x30afc, 2321 0x30b08, 0x30c24, 2322 0x30d00, 0x30d14, 2323 0x30d1c, 0x30d3c, 2324 0x30d44, 0x30d4c, 2325 0x30d54, 0x30d74, 2326 0x30d7c, 0x30d7c, 2327 0x30de0, 0x30de0, 2328 0x30e00, 0x30ed4, 2329 0x30f00, 0x30fa4, 2330 0x30fc0, 0x30fc4, 2331 0x31000, 0x31004, 2332 0x31080, 0x310fc, 2333 0x31208, 0x31220, 2334 0x3123c, 0x31254, 2335 0x31300, 0x31300, 2336 0x31308, 0x3131c, 2337 0x31338, 0x3133c, 2338 0x31380, 0x31380, 2339 0x31388, 0x313a8, 2340 0x313b4, 0x313b4, 2341 0x31400, 0x31420, 2342 0x31438, 0x3143c, 2343 0x31480, 0x31480, 2344 0x314a8, 0x314a8, 2345 0x314b0, 0x314b4, 2346 0x314c8, 0x314d4, 2347 0x31a40, 0x31a4c, 2348 0x31af0, 0x31b20, 2349 0x31b38, 0x31b3c, 2350 0x31b80, 0x31b80, 2351 0x31ba8, 0x31ba8, 2352 0x31bb0, 0x31bb4, 2353 0x31bc8, 0x31bd4, 2354 0x32140, 0x3218c, 2355 0x321f0, 0x321f4, 2356 0x32200, 0x32200, 2357 0x32218, 0x32218, 2358 0x32400, 0x32400, 2359 0x32408, 0x3241c, 2360 0x32618, 0x32620, 2361 0x32664, 0x32664, 2362 0x326a8, 0x326a8, 2363 0x326ec, 0x326ec, 2364 0x32a00, 0x32abc, 2365 0x32b00, 0x32b18, 2366 0x32b20, 0x32b38, 2367 0x32b40, 0x32b58, 2368 0x32b60, 0x32b78, 2369 0x32c00, 0x32c00, 2370 0x32c08, 0x32c3c, 2371 0x33000, 0x3302c, 2372 0x33034, 0x33050, 2373 0x33058, 0x33058, 2374 0x33060, 0x3308c, 2375 0x3309c, 0x330ac, 2376 0x330c0, 0x330c0, 2377 0x330c8, 0x330d0, 2378 0x330d8, 0x330e0, 2379 0x330ec, 0x3312c, 2380 0x33134, 0x33150, 2381 0x33158, 0x33158, 2382 0x33160, 0x3318c, 2383 0x3319c, 0x331ac, 2384 0x331c0, 0x331c0, 2385 0x331c8, 0x331d0, 2386 0x331d8, 0x331e0, 2387 0x331ec, 0x33290, 2388 0x33298, 0x332c4, 2389 0x332e4, 0x33390, 2390 0x33398, 0x333c4, 2391 0x333e4, 0x3342c, 2392 0x33434, 0x33450, 2393 0x33458, 0x33458, 2394 0x33460, 0x3348c, 2395 0x3349c, 0x334ac, 2396 0x334c0, 0x334c0, 2397 0x334c8, 0x334d0, 2398 0x334d8, 0x334e0, 2399 0x334ec, 0x3352c, 2400 0x33534, 0x33550, 2401 0x33558, 0x33558, 2402 0x33560, 0x3358c, 2403 0x3359c, 0x335ac, 2404 0x335c0, 0x335c0, 2405 0x335c8, 0x335d0, 2406 0x335d8, 0x335e0, 2407 0x335ec, 0x33690, 2408 0x33698, 0x336c4, 2409 0x336e4, 0x33790, 2410 0x33798, 0x337c4, 2411 0x337e4, 0x337fc, 2412 0x33814, 0x33814, 2413 0x33854, 0x33868, 2414 0x33880, 0x3388c, 2415 0x338c0, 0x338d0, 2416 0x338e8, 0x338ec, 2417 0x33900, 0x3392c, 2418 0x33934, 0x33950, 2419 0x33958, 0x33958, 2420 0x33960, 0x3398c, 2421 0x3399c, 0x339ac, 2422 0x339c0, 0x339c0, 2423 0x339c8, 0x339d0, 2424 0x339d8, 0x339e0, 2425 0x339ec, 0x33a90, 2426 0x33a98, 0x33ac4, 2427 0x33ae4, 0x33b10, 2428 0x33b24, 0x33b28, 2429 0x33b38, 0x33b50, 2430 0x33bf0, 0x33c10, 2431 0x33c24, 0x33c28, 2432 0x33c38, 0x33c50, 2433 0x33cf0, 0x33cfc, 2434 0x34000, 0x34030, 2435 0x34100, 0x34168, 2436 0x34190, 0x341a0, 2437 0x341a8, 0x341b8, 2438 0x341c4, 0x341c8, 2439 0x341d0, 0x341d0, 2440 0x34200, 0x34320, 2441 0x34400, 0x344b4, 2442 0x344c0, 0x3452c, 2443 0x34540, 0x3461c, 2444 0x34800, 0x348a0, 2445 0x348c0, 0x34908, 2446 0x34910, 0x349b8, 2447 0x34a00, 0x34a04, 2448 0x34a0c, 0x34a14, 2449 0x34a1c, 0x34a2c, 2450 0x34a44, 0x34a50, 2451 0x34a74, 0x34a74, 2452 0x34a7c, 0x34afc, 2453 0x34b08, 0x34c24, 2454 0x34d00, 0x34d14, 2455 0x34d1c, 0x34d3c, 2456 0x34d44, 0x34d4c, 2457 0x34d54, 0x34d74, 2458 0x34d7c, 0x34d7c, 2459 0x34de0, 0x34de0, 2460 0x34e00, 0x34ed4, 2461 0x34f00, 0x34fa4, 2462 0x34fc0, 0x34fc4, 2463 0x35000, 0x35004, 2464 0x35080, 0x350fc, 2465 0x35208, 0x35220, 2466 0x3523c, 0x35254, 2467 0x35300, 0x35300, 2468 0x35308, 0x3531c, 2469 0x35338, 0x3533c, 2470 0x35380, 0x35380, 2471 0x35388, 0x353a8, 2472 0x353b4, 0x353b4, 2473 0x35400, 0x35420, 2474 0x35438, 0x3543c, 2475 0x35480, 0x35480, 2476 0x354a8, 0x354a8, 2477 0x354b0, 0x354b4, 2478 0x354c8, 0x354d4, 2479 0x35a40, 0x35a4c, 2480 0x35af0, 0x35b20, 2481 0x35b38, 0x35b3c, 2482 0x35b80, 0x35b80, 2483 0x35ba8, 0x35ba8, 2484 0x35bb0, 0x35bb4, 2485 0x35bc8, 0x35bd4, 2486 0x36140, 0x3618c, 2487 0x361f0, 0x361f4, 2488 0x36200, 0x36200, 2489 0x36218, 0x36218, 2490 0x36400, 0x36400, 2491 0x36408, 0x3641c, 2492 0x36618, 0x36620, 2493 0x36664, 0x36664, 2494 0x366a8, 0x366a8, 2495 0x366ec, 0x366ec, 2496 0x36a00, 0x36abc, 2497 0x36b00, 0x36b18, 2498 0x36b20, 0x36b38, 2499 0x36b40, 0x36b58, 2500 0x36b60, 0x36b78, 2501 0x36c00, 0x36c00, 2502 0x36c08, 0x36c3c, 2503 0x37000, 0x3702c, 2504 0x37034, 0x37050, 2505 0x37058, 0x37058, 2506 0x37060, 0x3708c, 2507 0x3709c, 0x370ac, 2508 0x370c0, 0x370c0, 2509 0x370c8, 0x370d0, 2510 0x370d8, 0x370e0, 2511 0x370ec, 0x3712c, 2512 0x37134, 0x37150, 2513 0x37158, 0x37158, 2514 0x37160, 0x3718c, 2515 0x3719c, 0x371ac, 2516 0x371c0, 0x371c0, 2517 0x371c8, 0x371d0, 2518 0x371d8, 0x371e0, 2519 0x371ec, 0x37290, 2520 0x37298, 0x372c4, 2521 0x372e4, 0x37390, 2522 0x37398, 0x373c4, 2523 0x373e4, 0x3742c, 2524 0x37434, 0x37450, 2525 0x37458, 0x37458, 2526 0x37460, 0x3748c, 2527 0x3749c, 0x374ac, 2528 0x374c0, 0x374c0, 2529 0x374c8, 0x374d0, 2530 0x374d8, 0x374e0, 2531 0x374ec, 0x3752c, 2532 0x37534, 0x37550, 2533 0x37558, 0x37558, 2534 0x37560, 0x3758c, 2535 0x3759c, 0x375ac, 2536 0x375c0, 0x375c0, 2537 0x375c8, 0x375d0, 2538 0x375d8, 0x375e0, 2539 0x375ec, 0x37690, 2540 0x37698, 0x376c4, 2541 0x376e4, 0x37790, 2542 0x37798, 0x377c4, 2543 0x377e4, 0x377fc, 2544 0x37814, 0x37814, 2545 0x37854, 0x37868, 2546 0x37880, 0x3788c, 2547 0x378c0, 0x378d0, 2548 0x378e8, 0x378ec, 2549 0x37900, 0x3792c, 2550 0x37934, 0x37950, 2551 0x37958, 0x37958, 2552 0x37960, 0x3798c, 2553 0x3799c, 0x379ac, 2554 0x379c0, 0x379c0, 2555 0x379c8, 0x379d0, 2556 0x379d8, 0x379e0, 2557 0x379ec, 0x37a90, 2558 0x37a98, 0x37ac4, 2559 0x37ae4, 0x37b10, 2560 0x37b24, 0x37b28, 2561 0x37b38, 0x37b50, 2562 0x37bf0, 0x37c10, 2563 0x37c24, 0x37c28, 2564 0x37c38, 0x37c50, 2565 0x37cf0, 0x37cfc, 2566 0x40040, 0x40040, 2567 0x40080, 0x40084, 2568 0x40100, 0x40100, 2569 0x40140, 0x401bc, 2570 0x40200, 0x40214, 2571 0x40228, 0x40228, 2572 0x40240, 0x40258, 2573 0x40280, 0x40280, 2574 0x40304, 0x40304, 2575 0x40330, 0x4033c, 2576 0x41304, 0x413c8, 2577 0x413d0, 0x413dc, 2578 0x413f0, 0x413f0, 2579 0x41400, 0x4140c, 2580 0x41414, 0x4141c, 2581 0x41480, 0x414d0, 2582 0x44000, 0x4407c, 2583 0x440c0, 0x441ac, 2584 0x441b4, 0x4427c, 2585 0x442c0, 0x443ac, 2586 0x443b4, 0x4447c, 2587 0x444c0, 0x445ac, 2588 0x445b4, 0x4467c, 2589 0x446c0, 0x447ac, 2590 0x447b4, 0x4487c, 2591 0x448c0, 0x449ac, 2592 0x449b4, 0x44a7c, 2593 0x44ac0, 0x44bac, 2594 0x44bb4, 0x44c7c, 2595 0x44cc0, 0x44dac, 2596 0x44db4, 0x44e7c, 2597 0x44ec0, 0x44fac, 2598 0x44fb4, 0x4507c, 2599 0x450c0, 0x451ac, 2600 0x451b4, 0x451fc, 2601 0x45800, 0x45804, 2602 0x45810, 0x45830, 2603 0x45840, 0x45860, 2604 0x45868, 0x45868, 2605 0x45880, 0x45884, 2606 0x458a0, 0x458b0, 2607 0x45a00, 0x45a04, 2608 0x45a10, 0x45a30, 2609 0x45a40, 0x45a60, 2610 0x45a68, 0x45a68, 2611 0x45a80, 0x45a84, 2612 0x45aa0, 0x45ab0, 2613 0x460c0, 0x460e4, 2614 0x47000, 0x4703c, 2615 0x47044, 0x4708c, 2616 0x47200, 0x47250, 2617 0x47400, 0x47408, 2618 0x47414, 0x47420, 2619 0x47600, 0x47618, 2620 0x47800, 0x47814, 2621 0x47820, 0x4782c, 2622 0x50000, 0x50084, 2623 0x50090, 0x500cc, 2624 0x50300, 0x50384, 2625 0x50400, 0x50400, 2626 0x50800, 0x50884, 2627 0x50890, 0x508cc, 2628 0x50b00, 0x50b84, 2629 0x50c00, 0x50c00, 2630 0x51000, 0x51020, 2631 0x51028, 0x510b0, 2632 0x51300, 0x51324, 2633 }; 2634 2635 static const unsigned int t6vf_reg_ranges[] = { 2636 VF_SGE_REG(A_SGE_VF_KDOORBELL), VF_SGE_REG(A_SGE_VF_GTS), 2637 VF_MPS_REG(A_MPS_VF_CTL), 2638 VF_MPS_REG(A_MPS_VF_STAT_RX_VF_ERR_FRAMES_H), 2639 VF_PL_REG(A_PL_VF_WHOAMI), VF_PL_REG(A_PL_VF_REVISION), 2640 VF_CIM_REG(A_CIM_VF_EXT_MAILBOX_CTRL), 2641 VF_CIM_REG(A_CIM_VF_EXT_MAILBOX_STATUS), 2642 FW_T6VF_MBDATA_BASE_ADDR, 2643 FW_T6VF_MBDATA_BASE_ADDR + 2644 ((NUM_CIM_PF_MAILBOX_DATA_INSTANCES - 1) * 4), 2645 }; 2646 2647 static const unsigned int t7_reg_ranges[] = { 2648 0x1008, 0x101c, 2649 0x1024, 0x10a8, 2650 0x10b4, 0x10f8, 2651 0x1100, 0x1114, 2652 0x111c, 0x112c, 2653 0x1138, 0x113c, 2654 0x1144, 0x115c, 2655 0x1180, 0x1184, 2656 0x1190, 0x1194, 2657 0x11a0, 0x11a4, 2658 0x11b0, 0x11d0, 2659 0x11fc, 0x1278, 2660 0x1280, 0x1368, 2661 0x1700, 0x172c, 2662 0x173c, 0x1760, 2663 0x1800, 0x18fc, 2664 0x3000, 0x3044, 2665 0x3060, 0x3064, 2666 0x30a4, 0x30b0, 2667 0x30b8, 0x30d8, 2668 0x30e0, 0x30fc, 2669 0x3140, 0x357c, 2670 0x35a8, 0x35cc, 2671 0x35e0, 0x35ec, 2672 0x3600, 0x37fc, 2673 0x3804, 0x3818, 2674 0x3880, 0x388c, 2675 0x3900, 0x3904, 2676 0x3910, 0x3978, 2677 0x3980, 0x399c, 2678 0x4700, 0x4720, 2679 0x4728, 0x475c, 2680 0x480c, 0x4814, 2681 0x4890, 0x489c, 2682 0x48a4, 0x48ac, 2683 0x48b8, 0x48c4, 2684 0x4900, 0x4924, 2685 0x4ffc, 0x4ffc, 2686 0x5500, 0x5624, 2687 0x56c4, 0x56ec, 2688 0x56f4, 0x5720, 2689 0x5728, 0x575c, 2690 0x580c, 0x5814, 2691 0x5890, 0x589c, 2692 0x58a4, 0x58ac, 2693 0x58b8, 0x58bc, 2694 0x5940, 0x598c, 2695 0x59b0, 0x59c8, 2696 0x59d0, 0x59dc, 2697 0x59fc, 0x5a18, 2698 0x5a60, 0x5a6c, 2699 0x5a80, 0x5a8c, 2700 0x5a94, 0x5a9c, 2701 0x5b94, 0x5bfc, 2702 0x5c10, 0x5e48, 2703 0x5e50, 0x5e94, 2704 0x5ea0, 0x5eb0, 2705 0x5ec0, 0x5ec0, 2706 0x5ec8, 0x5ed0, 2707 0x5ee0, 0x5ee0, 2708 0x5ef0, 0x5ef0, 2709 0x5f00, 0x5f04, 2710 0x5f0c, 0x5f10, 2711 0x5f20, 0x5f88, 2712 0x5f90, 0x5fd8, 2713 0x6000, 0x6020, 2714 0x6028, 0x6030, 2715 0x6044, 0x609c, 2716 0x60a8, 0x60ac, 2717 0x60b8, 0x60ec, 2718 0x6100, 0x6104, 2719 0x6118, 0x611c, 2720 0x6150, 0x6150, 2721 0x6180, 0x61b8, 2722 0x7700, 0x77a8, 2723 0x77b0, 0x7888, 2724 0x78cc, 0x7970, 2725 0x7b00, 0x7b00, 2726 0x7b08, 0x7b0c, 2727 0x7b24, 0x7b84, 2728 0x7b8c, 0x7c2c, 2729 0x7c34, 0x7c40, 2730 0x7c48, 0x7c68, 2731 0x7c70, 0x7c7c, 2732 0x7d00, 0x7ddc, 2733 0x7de4, 0x7e38, 2734 0x7e40, 0x7e44, 2735 0x7e4c, 0x7e74, 2736 0x7e80, 0x7ee0, 2737 0x7ee8, 0x7f0c, 2738 0x7f20, 0x7f5c, 2739 0x8dc0, 0x8de8, 2740 0x8df8, 0x8e04, 2741 0x8e10, 0x8e30, 2742 0x8e7c, 0x8ee8, 2743 0x8f88, 0x8f88, 2744 0x8f90, 0x8fb0, 2745 0x8fb8, 0x9058, 2746 0x9074, 0x90f8, 2747 0x9100, 0x912c, 2748 0x9138, 0x9188, 2749 0x9400, 0x9414, 2750 0x9430, 0x9440, 2751 0x9454, 0x9454, 2752 0x945c, 0x947c, 2753 0x9498, 0x94b8, 2754 0x9600, 0x9600, 2755 0x9608, 0x9638, 2756 0x9640, 0x9704, 2757 0x9710, 0x971c, 2758 0x9800, 0x9804, 2759 0x9854, 0x9854, 2760 0x9c00, 0x9c6c, 2761 0x9c80, 0x9cec, 2762 0x9d00, 0x9d6c, 2763 0x9d80, 0x9dec, 2764 0x9e00, 0x9e6c, 2765 0x9e80, 0x9eec, 2766 0x9f00, 0x9f6c, 2767 0x9f80, 0x9fec, 2768 0xa000, 0xa06c, 2769 0xa080, 0xa0ec, 2770 0xa100, 0xa16c, 2771 0xa180, 0xa1ec, 2772 0xa200, 0xa26c, 2773 0xa280, 0xa2ec, 2774 0xa300, 0xa36c, 2775 0xa380, 0xa458, 2776 0xa460, 0xa4f8, 2777 0xd000, 0xd03c, 2778 0xd100, 0xd134, 2779 0xd200, 0xd214, 2780 0xd220, 0xd234, 2781 0xd240, 0xd254, 2782 0xd260, 0xd274, 2783 0xd280, 0xd294, 2784 0xd2a0, 0xd2b4, 2785 0xd2c0, 0xd2d4, 2786 0xd2e0, 0xd2f4, 2787 0xd300, 0xd31c, 2788 0xdfc0, 0xdfe0, 2789 0xe000, 0xe00c, 2790 0xf000, 0xf008, 2791 0xf010, 0xf06c, 2792 0x11000, 0x11014, 2793 0x11048, 0x11120, 2794 0x11130, 0x11144, 2795 0x11174, 0x11178, 2796 0x11190, 0x111a0, 2797 0x111e4, 0x112f0, 2798 0x11300, 0x1133c, 2799 0x11408, 0x1146c, 2800 0x12000, 0x12004, 2801 0x12060, 0x122c4, 2802 0x19040, 0x1906c, 2803 0x19078, 0x19080, 2804 0x1908c, 0x190e8, 2805 0x190f0, 0x190f8, 2806 0x19100, 0x19110, 2807 0x19120, 0x19124, 2808 0x19150, 0x19194, 2809 0x1919c, 0x191a0, 2810 0x191ac, 0x191c8, 2811 0x191d0, 0x191e4, 2812 0x19250, 0x19250, 2813 0x19258, 0x19268, 2814 0x19278, 0x19278, 2815 0x19280, 0x192b0, 2816 0x192bc, 0x192f0, 2817 0x19300, 0x19308, 2818 0x19310, 0x19318, 2819 0x19320, 0x19328, 2820 0x19330, 0x19330, 2821 0x19348, 0x1934c, 2822 0x193f8, 0x19428, 2823 0x19430, 0x19444, 2824 0x1944c, 0x1946c, 2825 0x19474, 0x1947c, 2826 0x19488, 0x194cc, 2827 0x194f0, 0x194f8, 2828 0x19c00, 0x19c48, 2829 0x19c50, 0x19c80, 2830 0x19c94, 0x19c98, 2831 0x19ca0, 0x19cdc, 2832 0x19ce4, 0x19cf8, 2833 0x19d00, 0x19d30, 2834 0x19d50, 0x19d80, 2835 0x19d94, 0x19d98, 2836 0x19da0, 0x19de0, 2837 0x19df0, 0x19e10, 2838 0x19e50, 0x19e6c, 2839 0x19ea0, 0x19ebc, 2840 0x19ec4, 0x19ef4, 2841 0x19f04, 0x19f2c, 2842 0x19f34, 0x19f34, 2843 0x19f40, 0x19f50, 2844 0x19f90, 0x19fb4, 2845 0x19fbc, 0x19fbc, 2846 0x19fc4, 0x19fc8, 2847 0x19fd0, 0x19fe4, 2848 0x1a000, 0x1a004, 2849 0x1a010, 0x1a06c, 2850 0x1a0b0, 0x1a0e4, 2851 0x1a0ec, 0x1a108, 2852 0x1a114, 0x1a130, 2853 0x1a138, 0x1a1c4, 2854 0x1a1fc, 0x1a29c, 2855 0x1a2a8, 0x1a2b8, 2856 0x1a2c0, 0x1a388, 2857 0x1a398, 0x1a3ac, 2858 0x1e008, 0x1e00c, 2859 0x1e040, 0x1e044, 2860 0x1e04c, 0x1e04c, 2861 0x1e284, 0x1e290, 2862 0x1e2c0, 0x1e2c0, 2863 0x1e2e0, 0x1e2e4, 2864 0x1e300, 0x1e384, 2865 0x1e3c0, 0x1e3c8, 2866 0x1e408, 0x1e40c, 2867 0x1e440, 0x1e444, 2868 0x1e44c, 0x1e44c, 2869 0x1e684, 0x1e690, 2870 0x1e6c0, 0x1e6c0, 2871 0x1e6e0, 0x1e6e4, 2872 0x1e700, 0x1e784, 2873 0x1e7c0, 0x1e7c8, 2874 0x1e808, 0x1e80c, 2875 0x1e840, 0x1e844, 2876 0x1e84c, 0x1e84c, 2877 0x1ea84, 0x1ea90, 2878 0x1eac0, 0x1eac0, 2879 0x1eae0, 0x1eae4, 2880 0x1eb00, 0x1eb84, 2881 0x1ebc0, 0x1ebc8, 2882 0x1ec08, 0x1ec0c, 2883 0x1ec40, 0x1ec44, 2884 0x1ec4c, 0x1ec4c, 2885 0x1ee84, 0x1ee90, 2886 0x1eec0, 0x1eec0, 2887 0x1eee0, 0x1eee4, 2888 0x1ef00, 0x1ef84, 2889 0x1efc0, 0x1efc8, 2890 0x1f008, 0x1f00c, 2891 0x1f040, 0x1f044, 2892 0x1f04c, 0x1f04c, 2893 0x1f284, 0x1f290, 2894 0x1f2c0, 0x1f2c0, 2895 0x1f2e0, 0x1f2e4, 2896 0x1f300, 0x1f384, 2897 0x1f3c0, 0x1f3c8, 2898 0x1f408, 0x1f40c, 2899 0x1f440, 0x1f444, 2900 0x1f44c, 0x1f44c, 2901 0x1f684, 0x1f690, 2902 0x1f6c0, 0x1f6c0, 2903 0x1f6e0, 0x1f6e4, 2904 0x1f700, 0x1f784, 2905 0x1f7c0, 0x1f7c8, 2906 0x1f808, 0x1f80c, 2907 0x1f840, 0x1f844, 2908 0x1f84c, 0x1f84c, 2909 0x1fa84, 0x1fa90, 2910 0x1fac0, 0x1fac0, 2911 0x1fae0, 0x1fae4, 2912 0x1fb00, 0x1fb84, 2913 0x1fbc0, 0x1fbc8, 2914 0x1fc08, 0x1fc0c, 2915 0x1fc40, 0x1fc44, 2916 0x1fc4c, 0x1fc4c, 2917 0x1fe84, 0x1fe90, 2918 0x1fec0, 0x1fec0, 2919 0x1fee0, 0x1fee4, 2920 0x1ff00, 0x1ff84, 2921 0x1ffc0, 0x1ffc8, 2922 0x30000, 0x30038, 2923 0x30100, 0x3017c, 2924 0x30190, 0x301a0, 2925 0x301a8, 0x301b8, 2926 0x301c4, 0x301c8, 2927 0x301d0, 0x301e0, 2928 0x30200, 0x30344, 2929 0x30400, 0x304b4, 2930 0x304c0, 0x3052c, 2931 0x30540, 0x3065c, 2932 0x30800, 0x30848, 2933 0x30850, 0x308a8, 2934 0x308b8, 0x308c0, 2935 0x308cc, 0x308dc, 2936 0x30900, 0x30904, 2937 0x3090c, 0x30914, 2938 0x3091c, 0x30928, 2939 0x30930, 0x3093c, 2940 0x30944, 0x30948, 2941 0x30954, 0x30974, 2942 0x3097c, 0x30980, 2943 0x30a00, 0x30a20, 2944 0x30a38, 0x30a3c, 2945 0x30a50, 0x30a50, 2946 0x30a80, 0x30a80, 2947 0x30a88, 0x30aa8, 2948 0x30ab0, 0x30ab4, 2949 0x30ac8, 0x30ad4, 2950 0x30b28, 0x30b84, 2951 0x30b98, 0x30bb8, 2952 0x30c98, 0x30d14, 2953 0x31000, 0x31020, 2954 0x31038, 0x3103c, 2955 0x31050, 0x31050, 2956 0x31080, 0x31080, 2957 0x31088, 0x310a8, 2958 0x310b0, 0x310b4, 2959 0x310c8, 0x310d4, 2960 0x31128, 0x31184, 2961 0x31198, 0x311b8, 2962 0x32000, 0x32038, 2963 0x32100, 0x3217c, 2964 0x32190, 0x321a0, 2965 0x321a8, 0x321b8, 2966 0x321c4, 0x321c8, 2967 0x321d0, 0x321e0, 2968 0x32200, 0x32344, 2969 0x32400, 0x324b4, 2970 0x324c0, 0x3252c, 2971 0x32540, 0x3265c, 2972 0x32800, 0x32848, 2973 0x32850, 0x328a8, 2974 0x328b8, 0x328c0, 2975 0x328cc, 0x328dc, 2976 0x32900, 0x32904, 2977 0x3290c, 0x32914, 2978 0x3291c, 0x32928, 2979 0x32930, 0x3293c, 2980 0x32944, 0x32948, 2981 0x32954, 0x32974, 2982 0x3297c, 0x32980, 2983 0x32a00, 0x32a20, 2984 0x32a38, 0x32a3c, 2985 0x32a50, 0x32a50, 2986 0x32a80, 0x32a80, 2987 0x32a88, 0x32aa8, 2988 0x32ab0, 0x32ab4, 2989 0x32ac8, 0x32ad4, 2990 0x32b28, 0x32b84, 2991 0x32b98, 0x32bb8, 2992 0x32c98, 0x32d14, 2993 0x33000, 0x33020, 2994 0x33038, 0x3303c, 2995 0x33050, 0x33050, 2996 0x33080, 0x33080, 2997 0x33088, 0x330a8, 2998 0x330b0, 0x330b4, 2999 0x330c8, 0x330d4, 3000 0x33128, 0x33184, 3001 0x33198, 0x331b8, 3002 0x34000, 0x34038, 3003 0x34100, 0x3417c, 3004 0x34190, 0x341a0, 3005 0x341a8, 0x341b8, 3006 0x341c4, 0x341c8, 3007 0x341d0, 0x341e0, 3008 0x34200, 0x34344, 3009 0x34400, 0x344b4, 3010 0x344c0, 0x3452c, 3011 0x34540, 0x3465c, 3012 0x34800, 0x34848, 3013 0x34850, 0x348a8, 3014 0x348b8, 0x348c0, 3015 0x348cc, 0x348dc, 3016 0x34900, 0x34904, 3017 0x3490c, 0x34914, 3018 0x3491c, 0x34928, 3019 0x34930, 0x3493c, 3020 0x34944, 0x34948, 3021 0x34954, 0x34974, 3022 0x3497c, 0x34980, 3023 0x34a00, 0x34a20, 3024 0x34a38, 0x34a3c, 3025 0x34a50, 0x34a50, 3026 0x34a80, 0x34a80, 3027 0x34a88, 0x34aa8, 3028 0x34ab0, 0x34ab4, 3029 0x34ac8, 0x34ad4, 3030 0x34b28, 0x34b84, 3031 0x34b98, 0x34bb8, 3032 0x34c98, 0x34d14, 3033 0x35000, 0x35020, 3034 0x35038, 0x3503c, 3035 0x35050, 0x35050, 3036 0x35080, 0x35080, 3037 0x35088, 0x350a8, 3038 0x350b0, 0x350b4, 3039 0x350c8, 0x350d4, 3040 0x35128, 0x35184, 3041 0x35198, 0x351b8, 3042 0x36000, 0x36038, 3043 0x36100, 0x3617c, 3044 0x36190, 0x361a0, 3045 0x361a8, 0x361b8, 3046 0x361c4, 0x361c8, 3047 0x361d0, 0x361e0, 3048 0x36200, 0x36344, 3049 0x36400, 0x364b4, 3050 0x364c0, 0x3652c, 3051 0x36540, 0x3665c, 3052 0x36800, 0x36848, 3053 0x36850, 0x368a8, 3054 0x368b8, 0x368c0, 3055 0x368cc, 0x368dc, 3056 0x36900, 0x36904, 3057 0x3690c, 0x36914, 3058 0x3691c, 0x36928, 3059 0x36930, 0x3693c, 3060 0x36944, 0x36948, 3061 0x36954, 0x36974, 3062 0x3697c, 0x36980, 3063 0x36a00, 0x36a20, 3064 0x36a38, 0x36a3c, 3065 0x36a50, 0x36a50, 3066 0x36a80, 0x36a80, 3067 0x36a88, 0x36aa8, 3068 0x36ab0, 0x36ab4, 3069 0x36ac8, 0x36ad4, 3070 0x36b28, 0x36b84, 3071 0x36b98, 0x36bb8, 3072 0x36c98, 0x36d14, 3073 0x37000, 0x37020, 3074 0x37038, 0x3703c, 3075 0x37050, 0x37050, 3076 0x37080, 0x37080, 3077 0x37088, 0x370a8, 3078 0x370b0, 0x370b4, 3079 0x370c8, 0x370d4, 3080 0x37128, 0x37184, 3081 0x37198, 0x371b8, 3082 0x38000, 0x380b0, 3083 0x380b8, 0x38130, 3084 0x38140, 0x38140, 3085 0x38150, 0x38154, 3086 0x38160, 0x381c4, 3087 0x381f0, 0x38204, 3088 0x3820c, 0x38214, 3089 0x3821c, 0x3822c, 3090 0x38244, 0x38244, 3091 0x38254, 0x38274, 3092 0x3827c, 0x38280, 3093 0x38300, 0x38304, 3094 0x3830c, 0x38314, 3095 0x3831c, 0x3832c, 3096 0x38344, 0x38344, 3097 0x38354, 0x38374, 3098 0x3837c, 0x38380, 3099 0x38400, 0x38424, 3100 0x38438, 0x3843c, 3101 0x38480, 0x38480, 3102 0x384a8, 0x384a8, 3103 0x384b0, 0x384b4, 3104 0x384c8, 0x38514, 3105 0x38600, 0x3860c, 3106 0x3861c, 0x38624, 3107 0x38900, 0x38924, 3108 0x38938, 0x3893c, 3109 0x38980, 0x38980, 3110 0x389a8, 0x389a8, 3111 0x389b0, 0x389b4, 3112 0x389c8, 0x38a14, 3113 0x38b00, 0x38b0c, 3114 0x38b1c, 0x38b24, 3115 0x38e00, 0x38e00, 3116 0x38e18, 0x38e20, 3117 0x38e38, 0x38e40, 3118 0x38e58, 0x38e60, 3119 0x38e78, 0x38e80, 3120 0x38e98, 0x38ea0, 3121 0x38eb8, 0x38ec0, 3122 0x38ed8, 0x38ee0, 3123 0x38ef8, 0x38f08, 3124 0x38f10, 0x38f2c, 3125 0x38f80, 0x38ffc, 3126 0x39080, 0x39080, 3127 0x39088, 0x39090, 3128 0x39100, 0x39108, 3129 0x39120, 0x39128, 3130 0x39140, 0x39148, 3131 0x39160, 0x39168, 3132 0x39180, 0x39188, 3133 0x391a0, 0x391a8, 3134 0x391c0, 0x391c8, 3135 0x391e0, 0x391e8, 3136 0x39200, 0x39200, 3137 0x39208, 0x39240, 3138 0x39300, 0x39300, 3139 0x39308, 0x39340, 3140 0x39400, 0x39400, 3141 0x39408, 0x39440, 3142 0x39500, 0x39500, 3143 0x39508, 0x39540, 3144 0x39600, 0x39600, 3145 0x39608, 0x39640, 3146 0x39700, 0x39700, 3147 0x39708, 0x39740, 3148 0x39800, 0x39800, 3149 0x39808, 0x39840, 3150 0x39900, 0x39900, 3151 0x39908, 0x39940, 3152 0x39a00, 0x39a04, 3153 0x39a10, 0x39a14, 3154 0x39a1c, 0x39aa8, 3155 0x39b00, 0x39ecc, 3156 0x3a000, 0x3a004, 3157 0x3a050, 0x3a084, 3158 0x3a090, 0x3a09c, 3159 0x3e000, 0x3e020, 3160 0x3e03c, 0x3e05c, 3161 0x3e100, 0x3e120, 3162 0x3e13c, 0x3e15c, 3163 0x3e200, 0x3e220, 3164 0x3e23c, 0x3e25c, 3165 0x3e300, 0x3e320, 3166 0x3e33c, 0x3e35c, 3167 0x3f000, 0x3f034, 3168 0x3f100, 0x3f130, 3169 0x3f200, 0x3f218, 3170 0x44000, 0x44014, 3171 0x44020, 0x44028, 3172 0x44030, 0x44030, 3173 0x44100, 0x44114, 3174 0x44120, 0x44128, 3175 0x44130, 0x44130, 3176 0x44200, 0x44214, 3177 0x44220, 0x44228, 3178 0x44230, 0x44230, 3179 0x44300, 0x44314, 3180 0x44320, 0x44328, 3181 0x44330, 0x44330, 3182 0x44400, 0x44414, 3183 0x44420, 0x44428, 3184 0x44430, 0x44430, 3185 0x44500, 0x44514, 3186 0x44520, 0x44528, 3187 0x44530, 0x44530, 3188 0x44714, 0x44718, 3189 0x44730, 0x44730, 3190 0x447c0, 0x447c0, 3191 0x447f0, 0x447f0, 3192 0x447f8, 0x447fc, 3193 0x45000, 0x45014, 3194 0x45020, 0x45028, 3195 0x45030, 0x45030, 3196 0x45100, 0x45114, 3197 0x45120, 0x45128, 3198 0x45130, 0x45130, 3199 0x45200, 0x45214, 3200 0x45220, 0x45228, 3201 0x45230, 0x45230, 3202 0x45300, 0x45314, 3203 0x45320, 0x45328, 3204 0x45330, 0x45330, 3205 0x45400, 0x45414, 3206 0x45420, 0x45428, 3207 0x45430, 0x45430, 3208 0x45500, 0x45514, 3209 0x45520, 0x45528, 3210 0x45530, 0x45530, 3211 0x45714, 0x45718, 3212 0x45730, 0x45730, 3213 0x457c0, 0x457c0, 3214 0x457f0, 0x457f0, 3215 0x457f8, 0x457fc, 3216 0x46000, 0x46010, 3217 0x46020, 0x46034, 3218 0x46040, 0x46050, 3219 0x46060, 0x46088, 3220 0x47000, 0x4709c, 3221 0x470c0, 0x470d4, 3222 0x47100, 0x471a8, 3223 0x471b0, 0x471e8, 3224 0x47200, 0x47210, 3225 0x4721c, 0x47230, 3226 0x47238, 0x47238, 3227 0x47240, 0x472ac, 3228 0x472d0, 0x472f4, 3229 0x47300, 0x47310, 3230 0x47318, 0x47348, 3231 0x47350, 0x47354, 3232 0x47380, 0x47388, 3233 0x47390, 0x47394, 3234 0x47400, 0x47448, 3235 0x47450, 0x47458, 3236 0x47500, 0x4751c, 3237 0x47530, 0x4754c, 3238 0x47560, 0x4757c, 3239 0x47590, 0x475ac, 3240 0x47600, 0x47630, 3241 0x47640, 0x47644, 3242 0x47660, 0x4769c, 3243 0x47700, 0x47710, 3244 0x47740, 0x47750, 3245 0x4775c, 0x4779c, 3246 0x477b0, 0x477bc, 3247 0x477c4, 0x477c8, 3248 0x477d4, 0x477fc, 3249 0x48000, 0x48004, 3250 0x48018, 0x4801c, 3251 0x49304, 0x493f0, 3252 0x49400, 0x49410, 3253 0x49460, 0x494f4, 3254 0x50000, 0x50084, 3255 0x50090, 0x500cc, 3256 0x50300, 0x50384, 3257 0x50400, 0x50404, 3258 0x50800, 0x50884, 3259 0x50890, 0x508cc, 3260 0x50b00, 0x50b84, 3261 0x50c00, 0x50c04, 3262 0x51000, 0x51020, 3263 0x51028, 0x510c4, 3264 0x51104, 0x51108, 3265 0x51200, 0x51274, 3266 0x51300, 0x51324, 3267 0x51400, 0x51548, 3268 0x51550, 0x51554, 3269 0x5155c, 0x51584, 3270 0x5158c, 0x515c8, 3271 0x515f0, 0x515f4, 3272 0x58000, 0x58004, 3273 0x58018, 0x5801c, 3274 0x59304, 0x593f0, 3275 0x59400, 0x59410, 3276 0x59460, 0x594f4, 3277 }; 3278 3279 u32 *buf_end = (u32 *)(buf + buf_size); 3280 const unsigned int *reg_ranges; 3281 int reg_ranges_size, range; 3282 unsigned int chip_version = chip_id(adap); 3283 3284 /* 3285 * Select the right set of register ranges to dump depending on the 3286 * adapter chip type. 3287 */ 3288 switch (chip_version) { 3289 case CHELSIO_T4: 3290 if (adap->flags & IS_VF) { 3291 reg_ranges = t4vf_reg_ranges; 3292 reg_ranges_size = ARRAY_SIZE(t4vf_reg_ranges); 3293 } else { 3294 reg_ranges = t4_reg_ranges; 3295 reg_ranges_size = ARRAY_SIZE(t4_reg_ranges); 3296 } 3297 break; 3298 3299 case CHELSIO_T5: 3300 if (adap->flags & IS_VF) { 3301 reg_ranges = t5vf_reg_ranges; 3302 reg_ranges_size = ARRAY_SIZE(t5vf_reg_ranges); 3303 } else { 3304 reg_ranges = t5_reg_ranges; 3305 reg_ranges_size = ARRAY_SIZE(t5_reg_ranges); 3306 } 3307 break; 3308 3309 case CHELSIO_T6: 3310 if (adap->flags & IS_VF) { 3311 reg_ranges = t6vf_reg_ranges; 3312 reg_ranges_size = ARRAY_SIZE(t6vf_reg_ranges); 3313 } else { 3314 reg_ranges = t6_reg_ranges; 3315 reg_ranges_size = ARRAY_SIZE(t6_reg_ranges); 3316 } 3317 break; 3318 3319 case CHELSIO_T7: 3320 if (adap->flags & IS_VF) { 3321 reg_ranges = t6vf_reg_ranges; 3322 reg_ranges_size = ARRAY_SIZE(t6vf_reg_ranges); 3323 } else { 3324 reg_ranges = t7_reg_ranges; 3325 reg_ranges_size = ARRAY_SIZE(t7_reg_ranges); 3326 } 3327 break; 3328 3329 default: 3330 CH_ERR(adap, 3331 "Unsupported chip version %d\n", chip_version); 3332 return; 3333 } 3334 3335 /* 3336 * Clear the register buffer and insert the appropriate register 3337 * values selected by the above register ranges. 3338 */ 3339 memset(buf, 0, buf_size); 3340 for (range = 0; range < reg_ranges_size; range += 2) { 3341 unsigned int reg = reg_ranges[range]; 3342 unsigned int last_reg = reg_ranges[range + 1]; 3343 u32 *bufp = (u32 *)(buf + reg); 3344 3345 /* 3346 * Iterate across the register range filling in the register 3347 * buffer but don't write past the end of the register buffer. 3348 */ 3349 while (reg <= last_reg && bufp < buf_end) { 3350 *bufp++ = t4_read_reg(adap, reg); 3351 reg += sizeof(u32); 3352 } 3353 } 3354 } 3355 3356 /* 3357 * Partial EEPROM Vital Product Data structure. The VPD starts with one ID 3358 * header followed by one or more VPD-R sections, each with its own header. 3359 */ 3360 struct t4_vpd_hdr { 3361 u8 id_tag; 3362 u8 id_len[2]; 3363 u8 id_data[ID_LEN]; 3364 }; 3365 3366 struct t4_vpdr_hdr { 3367 u8 vpdr_tag; 3368 u8 vpdr_len[2]; 3369 }; 3370 3371 /* 3372 * EEPROM reads take a few tens of us while writes can take a bit over 5 ms. 3373 */ 3374 #define EEPROM_DELAY 10 /* 10us per poll spin */ 3375 #define EEPROM_MAX_POLL 5000 /* x 5000 == 50ms */ 3376 3377 #define EEPROM_STAT_ADDR 0x7bfc 3378 #define VPD_SIZE 0x800 3379 #define VPD_BASE 0x400 3380 #define VPD_BASE_OLD 0 3381 #define VPD_LEN 1024 3382 #define VPD_INFO_FLD_HDR_SIZE 3 3383 #define CHELSIO_VPD_UNIQUE_ID 0x82 3384 3385 /* 3386 * Small utility function to wait till any outstanding VPD Access is complete. 3387 * We have a per-adapter state variable "VPD Busy" to indicate when we have a 3388 * VPD Access in flight. This allows us to handle the problem of having a 3389 * previous VPD Access time out and prevent an attempt to inject a new VPD 3390 * Request before any in-flight VPD reguest has completed. 3391 */ 3392 static int t4_seeprom_wait(struct adapter *adapter) 3393 { 3394 unsigned int base = adapter->params.pci.vpd_cap_addr; 3395 int max_poll; 3396 3397 /* 3398 * If no VPD Access is in flight, we can just return success right 3399 * away. 3400 */ 3401 if (!adapter->vpd_busy) 3402 return 0; 3403 3404 /* 3405 * Poll the VPD Capability Address/Flag register waiting for it 3406 * to indicate that the operation is complete. 3407 */ 3408 max_poll = EEPROM_MAX_POLL; 3409 do { 3410 u16 val; 3411 3412 udelay(EEPROM_DELAY); 3413 t4_os_pci_read_cfg2(adapter, base + PCI_VPD_ADDR, &val); 3414 3415 /* 3416 * If the operation is complete, mark the VPD as no longer 3417 * busy and return success. 3418 */ 3419 if ((val & PCI_VPD_ADDR_F) == adapter->vpd_flag) { 3420 adapter->vpd_busy = 0; 3421 return 0; 3422 } 3423 } while (--max_poll); 3424 3425 /* 3426 * Failure! Note that we leave the VPD Busy status set in order to 3427 * avoid pushing a new VPD Access request into the VPD Capability till 3428 * the current operation eventually succeeds. It's a bug to issue a 3429 * new request when an existing request is in flight and will result 3430 * in corrupt hardware state. 3431 */ 3432 return -ETIMEDOUT; 3433 } 3434 3435 /** 3436 * t4_seeprom_read - read a serial EEPROM location 3437 * @adapter: adapter to read 3438 * @addr: EEPROM virtual address 3439 * @data: where to store the read data 3440 * 3441 * Read a 32-bit word from a location in serial EEPROM using the card's PCI 3442 * VPD capability. Note that this function must be called with a virtual 3443 * address. 3444 */ 3445 int t4_seeprom_read(struct adapter *adapter, u32 addr, u32 *data) 3446 { 3447 unsigned int base = adapter->params.pci.vpd_cap_addr; 3448 int ret; 3449 3450 /* 3451 * VPD Accesses must alway be 4-byte aligned! 3452 */ 3453 if (addr >= EEPROMVSIZE || (addr & 3)) 3454 return -EINVAL; 3455 3456 /* 3457 * Wait for any previous operation which may still be in flight to 3458 * complete. 3459 */ 3460 ret = t4_seeprom_wait(adapter); 3461 if (ret) { 3462 CH_ERR(adapter, "VPD still busy from previous operation\n"); 3463 return ret; 3464 } 3465 3466 /* 3467 * Issue our new VPD Read request, mark the VPD as being busy and wait 3468 * for our request to complete. If it doesn't complete, note the 3469 * error and return it to our caller. Note that we do not reset the 3470 * VPD Busy status! 3471 */ 3472 t4_os_pci_write_cfg2(adapter, base + PCI_VPD_ADDR, (u16)addr); 3473 adapter->vpd_busy = 1; 3474 adapter->vpd_flag = PCI_VPD_ADDR_F; 3475 ret = t4_seeprom_wait(adapter); 3476 if (ret) { 3477 CH_ERR(adapter, "VPD read of address %#x failed\n", addr); 3478 return ret; 3479 } 3480 3481 /* 3482 * Grab the returned data, swizzle it into our endianness and 3483 * return success. 3484 */ 3485 t4_os_pci_read_cfg4(adapter, base + PCI_VPD_DATA, data); 3486 *data = le32_to_cpu(*data); 3487 return 0; 3488 } 3489 3490 /** 3491 * t4_seeprom_write - write a serial EEPROM location 3492 * @adapter: adapter to write 3493 * @addr: virtual EEPROM address 3494 * @data: value to write 3495 * 3496 * Write a 32-bit word to a location in serial EEPROM using the card's PCI 3497 * VPD capability. Note that this function must be called with a virtual 3498 * address. 3499 */ 3500 int t4_seeprom_write(struct adapter *adapter, u32 addr, u32 data) 3501 { 3502 unsigned int base = adapter->params.pci.vpd_cap_addr; 3503 int ret; 3504 u32 stats_reg; 3505 int max_poll; 3506 3507 /* 3508 * VPD Accesses must alway be 4-byte aligned! 3509 */ 3510 if (addr >= EEPROMVSIZE || (addr & 3)) 3511 return -EINVAL; 3512 3513 /* 3514 * Wait for any previous operation which may still be in flight to 3515 * complete. 3516 */ 3517 ret = t4_seeprom_wait(adapter); 3518 if (ret) { 3519 CH_ERR(adapter, "VPD still busy from previous operation\n"); 3520 return ret; 3521 } 3522 3523 /* 3524 * Issue our new VPD Read request, mark the VPD as being busy and wait 3525 * for our request to complete. If it doesn't complete, note the 3526 * error and return it to our caller. Note that we do not reset the 3527 * VPD Busy status! 3528 */ 3529 t4_os_pci_write_cfg4(adapter, base + PCI_VPD_DATA, 3530 cpu_to_le32(data)); 3531 t4_os_pci_write_cfg2(adapter, base + PCI_VPD_ADDR, 3532 (u16)addr | PCI_VPD_ADDR_F); 3533 adapter->vpd_busy = 1; 3534 adapter->vpd_flag = 0; 3535 ret = t4_seeprom_wait(adapter); 3536 if (ret) { 3537 CH_ERR(adapter, "VPD write of address %#x failed\n", addr); 3538 return ret; 3539 } 3540 3541 /* 3542 * Reset PCI_VPD_DATA register after a transaction and wait for our 3543 * request to complete. If it doesn't complete, return error. 3544 */ 3545 t4_os_pci_write_cfg4(adapter, base + PCI_VPD_DATA, 0); 3546 max_poll = EEPROM_MAX_POLL; 3547 do { 3548 udelay(EEPROM_DELAY); 3549 t4_seeprom_read(adapter, EEPROM_STAT_ADDR, &stats_reg); 3550 } while ((stats_reg & 0x1) && --max_poll); 3551 if (!max_poll) 3552 return -ETIMEDOUT; 3553 3554 /* Return success! */ 3555 return 0; 3556 } 3557 3558 /** 3559 * t4_eeprom_ptov - translate a physical EEPROM address to virtual 3560 * @phys_addr: the physical EEPROM address 3561 * @fn: the PCI function number 3562 * @sz: size of function-specific area 3563 * 3564 * Translate a physical EEPROM address to virtual. The first 1K is 3565 * accessed through virtual addresses starting at 31K, the rest is 3566 * accessed through virtual addresses starting at 0. 3567 * 3568 * The mapping is as follows: 3569 * [0..1K) -> [31K..32K) 3570 * [1K..1K+A) -> [ES-A..ES) 3571 * [1K+A..ES) -> [0..ES-A-1K) 3572 * 3573 * where A = @fn * @sz, and ES = EEPROM size. 3574 */ 3575 int t4_eeprom_ptov(unsigned int phys_addr, unsigned int fn, unsigned int sz) 3576 { 3577 fn *= sz; 3578 if (phys_addr < 1024) 3579 return phys_addr + (31 << 10); 3580 if (phys_addr < 1024 + fn) 3581 return EEPROMSIZE - fn + phys_addr - 1024; 3582 if (phys_addr < EEPROMSIZE) 3583 return phys_addr - 1024 - fn; 3584 return -EINVAL; 3585 } 3586 3587 /** 3588 * t4_seeprom_wp - enable/disable EEPROM write protection 3589 * @adapter: the adapter 3590 * @enable: whether to enable or disable write protection 3591 * 3592 * Enables or disables write protection on the serial EEPROM. 3593 */ 3594 int t4_seeprom_wp(struct adapter *adapter, int enable) 3595 { 3596 return t4_seeprom_write(adapter, EEPROM_STAT_ADDR, enable ? 0xc : 0); 3597 } 3598 3599 /** 3600 * get_vpd_keyword_val - Locates an information field keyword in the VPD 3601 * @vpd: Pointer to buffered vpd data structure 3602 * @kw: The keyword to search for 3603 * @region: VPD region to search (starting from 0) 3604 * 3605 * Returns the value of the information field keyword or 3606 * -ENOENT otherwise. 3607 */ 3608 static int get_vpd_keyword_val(const u8 *vpd, const char *kw, int region) 3609 { 3610 int i, tag; 3611 unsigned int offset, len; 3612 const struct t4_vpdr_hdr *vpdr; 3613 3614 offset = sizeof(struct t4_vpd_hdr); 3615 vpdr = (const void *)(vpd + offset); 3616 tag = vpdr->vpdr_tag; 3617 len = (u16)vpdr->vpdr_len[0] + ((u16)vpdr->vpdr_len[1] << 8); 3618 while (region--) { 3619 offset += sizeof(struct t4_vpdr_hdr) + len; 3620 vpdr = (const void *)(vpd + offset); 3621 if (++tag != vpdr->vpdr_tag) 3622 return -ENOENT; 3623 len = (u16)vpdr->vpdr_len[0] + ((u16)vpdr->vpdr_len[1] << 8); 3624 } 3625 offset += sizeof(struct t4_vpdr_hdr); 3626 3627 if (offset + len > VPD_LEN) { 3628 return -ENOENT; 3629 } 3630 3631 for (i = offset; i + VPD_INFO_FLD_HDR_SIZE <= offset + len;) { 3632 if (memcmp(vpd + i , kw , 2) == 0){ 3633 i += VPD_INFO_FLD_HDR_SIZE; 3634 return i; 3635 } 3636 3637 i += VPD_INFO_FLD_HDR_SIZE + vpd[i+2]; 3638 } 3639 3640 return -ENOENT; 3641 } 3642 3643 3644 /** 3645 * get_vpd_params - read VPD parameters from VPD EEPROM 3646 * @adapter: adapter to read 3647 * @p: where to store the parameters 3648 * @vpd: caller provided temporary space to read the VPD into 3649 * 3650 * Reads card parameters stored in VPD EEPROM. 3651 */ 3652 static int get_vpd_params(struct adapter *adapter, struct vpd_params *p, 3653 uint16_t device_id, u32 *buf) 3654 { 3655 int i, ret, addr; 3656 int ec, sn, pn, na, md; 3657 u8 csum; 3658 const u8 *vpd = (const u8 *)buf; 3659 3660 /* 3661 * Card information normally starts at VPD_BASE but early cards had 3662 * it at 0. 3663 */ 3664 ret = t4_seeprom_read(adapter, VPD_BASE, buf); 3665 if (ret) 3666 return (ret); 3667 3668 /* 3669 * The VPD shall have a unique identifier specified by the PCI SIG. 3670 * For chelsio adapters, the identifier is 0x82. The first byte of a VPD 3671 * shall be CHELSIO_VPD_UNIQUE_ID (0x82). The VPD programming software 3672 * is expected to automatically put this entry at the 3673 * beginning of the VPD. 3674 */ 3675 addr = *vpd == CHELSIO_VPD_UNIQUE_ID ? VPD_BASE : VPD_BASE_OLD; 3676 3677 for (i = 0; i < VPD_LEN; i += 4) { 3678 ret = t4_seeprom_read(adapter, addr + i, buf++); 3679 if (ret) 3680 return ret; 3681 } 3682 3683 #define FIND_VPD_KW(var,name) do { \ 3684 var = get_vpd_keyword_val(vpd, name, 0); \ 3685 if (var < 0) { \ 3686 CH_ERR(adapter, "missing VPD keyword " name "\n"); \ 3687 return -EINVAL; \ 3688 } \ 3689 } while (0) 3690 3691 FIND_VPD_KW(i, "RV"); 3692 for (csum = 0; i >= 0; i--) 3693 csum += vpd[i]; 3694 3695 if (csum) { 3696 CH_ERR(adapter, 3697 "corrupted VPD EEPROM, actual csum %u\n", csum); 3698 return -EINVAL; 3699 } 3700 3701 FIND_VPD_KW(ec, "EC"); 3702 FIND_VPD_KW(sn, "SN"); 3703 FIND_VPD_KW(pn, "PN"); 3704 FIND_VPD_KW(na, "NA"); 3705 #undef FIND_VPD_KW 3706 3707 memcpy(p->id, vpd + offsetof(struct t4_vpd_hdr, id_data), ID_LEN); 3708 strstrip(p->id); 3709 memcpy(p->ec, vpd + ec, EC_LEN); 3710 strstrip(p->ec); 3711 i = vpd[sn - VPD_INFO_FLD_HDR_SIZE + 2]; 3712 memcpy(p->sn, vpd + sn, min(i, SERNUM_LEN)); 3713 strstrip(p->sn); 3714 i = vpd[pn - VPD_INFO_FLD_HDR_SIZE + 2]; 3715 memcpy(p->pn, vpd + pn, min(i, PN_LEN)); 3716 strstrip((char *)p->pn); 3717 i = vpd[na - VPD_INFO_FLD_HDR_SIZE + 2]; 3718 memcpy(p->na, vpd + na, min(i, MACADDR_LEN)); 3719 strstrip((char *)p->na); 3720 3721 if (device_id & 0x80) 3722 return 0; /* Custom card */ 3723 3724 md = get_vpd_keyword_val(vpd, "VF", 1); 3725 if (md < 0) { 3726 snprintf(p->md, sizeof(p->md), "unknown"); 3727 } else { 3728 i = vpd[md - VPD_INFO_FLD_HDR_SIZE + 2]; 3729 memcpy(p->md, vpd + md, min(i, MD_LEN)); 3730 strstrip((char *)p->md); 3731 } 3732 3733 return 0; 3734 } 3735 3736 /* Flash Layout {start sector, # of sectors} for T4/T5/T6 adapters */ 3737 static const struct t4_flash_loc_entry t4_flash_loc_arr[] = { 3738 [FLASH_LOC_EXP_ROM] = { 0, 6 }, 3739 [FLASH_LOC_IBFT] = { 6, 1 }, 3740 [FLASH_LOC_BOOTCFG] = { 7, 1 }, 3741 [FLASH_LOC_FW] = { 8, 16 }, 3742 [FLASH_LOC_FWBOOTSTRAP] = { 27, 1 }, 3743 [FLASH_LOC_ISCSI_CRASH] = { 29, 1 }, 3744 [FLASH_LOC_FCOE_CRASH] = { 30, 1 }, 3745 [FLASH_LOC_CFG] = { 31, 1 }, 3746 [FLASH_LOC_CUDBG] = { 32, 32 }, 3747 [FLASH_LOC_BOOT_AREA] = { 0, 8 }, /* Spans complete Boot Area */ 3748 [FLASH_LOC_END] = { 64, 0 }, 3749 }; 3750 3751 /* Flash Layout {start sector, # of sectors} for T7 adapters */ 3752 static const struct t4_flash_loc_entry t7_flash_loc_arr[] = { 3753 [FLASH_LOC_VPD] = { 0, 1 }, 3754 [FLASH_LOC_FWBOOTSTRAP] = { 1, 1 }, 3755 [FLASH_LOC_FW] = { 2, 29 }, 3756 [FLASH_LOC_CFG] = { 31, 1 }, 3757 [FLASH_LOC_EXP_ROM] = { 32, 15 }, 3758 [FLASH_LOC_IBFT] = { 47, 1 }, 3759 [FLASH_LOC_BOOTCFG] = { 48, 1 }, 3760 [FLASH_LOC_DPU_BOOT] = { 49, 13 }, 3761 [FLASH_LOC_ISCSI_CRASH] = { 62, 1 }, 3762 [FLASH_LOC_FCOE_CRASH] = { 63, 1 }, 3763 [FLASH_LOC_VPD_BACKUP] = { 64, 1 }, 3764 [FLASH_LOC_FWBOOTSTRAP_BACKUP] = { 65, 1 }, 3765 [FLASH_LOC_FW_BACKUP] = { 66, 29 }, 3766 [FLASH_LOC_CFG_BACK] = { 95, 1 }, 3767 [FLASH_LOC_CUDBG] = { 96, 48 }, 3768 [FLASH_LOC_CHIP_DUMP] = { 144, 48 }, 3769 [FLASH_LOC_DPU_AREA] = { 192, 64 }, 3770 [FLASH_LOC_BOOT_AREA] = { 32, 17 }, /* Spans complete UEFI/PXE Boot Area */ 3771 [FLASH_LOC_END] = { 256, 0 }, 3772 }; 3773 3774 int 3775 t4_flash_loc_start(struct adapter *adap, enum t4_flash_loc loc, 3776 unsigned int *lenp) 3777 { 3778 const struct t4_flash_loc_entry *l = chip_id(adap) >= CHELSIO_T7 ? 3779 &t7_flash_loc_arr[loc] : &t4_flash_loc_arr[loc]; 3780 3781 if (lenp != NULL) 3782 *lenp = FLASH_MAX_SIZE(l->nsecs); 3783 return (FLASH_START(l->start_sec)); 3784 } 3785 3786 /* serial flash and firmware constants and flash config file constants */ 3787 enum { 3788 SF_ATTEMPTS = 10, /* max retries for SF operations */ 3789 3790 /* flash command opcodes */ 3791 SF_PROG_PAGE = 2, /* program 256B page */ 3792 SF_WR_DISABLE = 4, /* disable writes */ 3793 SF_RD_STATUS = 5, /* read status register */ 3794 SF_WR_ENABLE = 6, /* enable writes */ 3795 SF_RD_DATA_FAST = 0xb, /* read flash */ 3796 SF_RD_ID = 0x9f, /* read ID */ 3797 SF_ERASE_SECTOR = 0xd8, /* erase 64KB sector */ 3798 }; 3799 3800 /** 3801 * sf1_read - read data from the serial flash 3802 * @adapter: the adapter 3803 * @byte_cnt: number of bytes to read 3804 * @cont: whether another operation will be chained 3805 * @lock: whether to lock SF for PL access only 3806 * @valp: where to store the read data 3807 * 3808 * Reads up to 4 bytes of data from the serial flash. The location of 3809 * the read needs to be specified prior to calling this by issuing the 3810 * appropriate commands to the serial flash. 3811 */ 3812 static int sf1_read(struct adapter *adapter, unsigned int byte_cnt, int cont, 3813 int lock, u32 *valp) 3814 { 3815 int ret; 3816 uint32_t op; 3817 3818 if (!byte_cnt || byte_cnt > 4) 3819 return -EINVAL; 3820 if (t4_read_reg(adapter, A_SF_OP) & F_BUSY) 3821 return -EBUSY; 3822 op = V_SF_LOCK(lock) | V_CONT(cont) | V_BYTECNT(byte_cnt - 1); 3823 if (chip_id(adapter) >= CHELSIO_T7) 3824 op |= F_QUADREADDISABLE; 3825 t4_write_reg(adapter, A_SF_OP, op); 3826 ret = t4_wait_op_done(adapter, A_SF_OP, F_BUSY, 0, SF_ATTEMPTS, 5); 3827 if (!ret) 3828 *valp = t4_read_reg(adapter, A_SF_DATA); 3829 return ret; 3830 } 3831 3832 /** 3833 * sf1_write - write data to the serial flash 3834 * @adapter: the adapter 3835 * @byte_cnt: number of bytes to write 3836 * @cont: whether another operation will be chained 3837 * @lock: whether to lock SF for PL access only 3838 * @val: value to write 3839 * 3840 * Writes up to 4 bytes of data to the serial flash. The location of 3841 * the write needs to be specified prior to calling this by issuing the 3842 * appropriate commands to the serial flash. 3843 */ 3844 static int sf1_write(struct adapter *adapter, unsigned int byte_cnt, int cont, 3845 int lock, u32 val) 3846 { 3847 if (!byte_cnt || byte_cnt > 4) 3848 return -EINVAL; 3849 if (t4_read_reg(adapter, A_SF_OP) & F_BUSY) 3850 return -EBUSY; 3851 t4_write_reg(adapter, A_SF_DATA, val); 3852 t4_write_reg(adapter, A_SF_OP, V_SF_LOCK(lock) | 3853 V_CONT(cont) | V_BYTECNT(byte_cnt - 1) | V_OP(1)); 3854 return t4_wait_op_done(adapter, A_SF_OP, F_BUSY, 0, SF_ATTEMPTS, 5); 3855 } 3856 3857 /** 3858 * flash_wait_op - wait for a flash operation to complete 3859 * @adapter: the adapter 3860 * @attempts: max number of polls of the status register 3861 * @delay: delay between polls in ms 3862 * 3863 * Wait for a flash operation to complete by polling the status register. 3864 */ 3865 static int flash_wait_op(struct adapter *adapter, int attempts, int delay) 3866 { 3867 int ret; 3868 u32 status; 3869 3870 while (1) { 3871 if ((ret = sf1_write(adapter, 1, 1, 1, SF_RD_STATUS)) != 0 || 3872 (ret = sf1_read(adapter, 1, 0, 1, &status)) != 0) 3873 return ret; 3874 if (!(status & 1)) 3875 return 0; 3876 if (--attempts == 0) 3877 return -EAGAIN; 3878 if (delay) 3879 msleep(delay); 3880 } 3881 } 3882 3883 /** 3884 * t4_read_flash - read words from serial flash 3885 * @adapter: the adapter 3886 * @addr: the start address for the read 3887 * @nwords: how many 32-bit words to read 3888 * @data: where to store the read data 3889 * @byte_oriented: whether to store data as bytes or as words 3890 * 3891 * Read the specified number of 32-bit words from the serial flash. 3892 * If @byte_oriented is set the read data is stored as a byte array 3893 * (i.e., big-endian), otherwise as 32-bit words in the platform's 3894 * natural endianness. 3895 */ 3896 int t4_read_flash(struct adapter *adapter, unsigned int addr, 3897 unsigned int nwords, u32 *data, int byte_oriented) 3898 { 3899 int ret; 3900 3901 if (addr + nwords * sizeof(u32) > adapter->params.sf_size || (addr & 3)) 3902 return -EINVAL; 3903 3904 addr = swab32(addr) | SF_RD_DATA_FAST; 3905 3906 if ((ret = sf1_write(adapter, 4, 1, 0, addr)) != 0 || 3907 (ret = sf1_read(adapter, 1, 1, 0, data)) != 0) 3908 return ret; 3909 3910 for ( ; nwords; nwords--, data++) { 3911 ret = sf1_read(adapter, 4, nwords > 1, nwords == 1, data); 3912 if (nwords == 1) 3913 t4_write_reg(adapter, A_SF_OP, 0); /* unlock SF */ 3914 if (ret) 3915 return ret; 3916 if (byte_oriented) 3917 *data = (__force __u32)(cpu_to_be32(*data)); 3918 } 3919 return 0; 3920 } 3921 3922 /** 3923 * t4_write_flash - write up to a page of data to the serial flash 3924 * @adapter: the adapter 3925 * @addr: the start address to write 3926 * @n: length of data to write in bytes 3927 * @data: the data to write 3928 * @byte_oriented: whether to store data as bytes or as words 3929 * 3930 * Writes up to a page of data (256 bytes) to the serial flash starting 3931 * at the given address. All the data must be written to the same page. 3932 * If @byte_oriented is set the write data is stored as byte stream 3933 * (i.e. matches what on disk), otherwise in big-endian. 3934 */ 3935 int t4_write_flash(struct adapter *adapter, unsigned int addr, 3936 unsigned int n, const u8 *data, int byte_oriented) 3937 { 3938 int ret; 3939 u32 buf[SF_PAGE_SIZE / 4]; 3940 unsigned int i, c, left, val, offset = addr & 0xff; 3941 3942 if (addr >= adapter->params.sf_size || offset + n > SF_PAGE_SIZE) 3943 return -EINVAL; 3944 3945 val = swab32(addr) | SF_PROG_PAGE; 3946 3947 if ((ret = sf1_write(adapter, 1, 0, 1, SF_WR_ENABLE)) != 0 || 3948 (ret = sf1_write(adapter, 4, 1, 1, val)) != 0) 3949 goto unlock; 3950 3951 for (left = n; left; left -= c) { 3952 c = min(left, 4U); 3953 for (val = 0, i = 0; i < c; ++i) 3954 val = (val << 8) + *data++; 3955 3956 if (!byte_oriented) 3957 val = cpu_to_be32(val); 3958 3959 ret = sf1_write(adapter, c, c != left, 1, val); 3960 if (ret) 3961 goto unlock; 3962 } 3963 ret = flash_wait_op(adapter, 8, 1); 3964 if (ret) 3965 goto unlock; 3966 3967 t4_write_reg(adapter, A_SF_OP, 0); /* unlock SF */ 3968 3969 /* Read the page to verify the write succeeded */ 3970 ret = t4_read_flash(adapter, addr & ~0xff, ARRAY_SIZE(buf), buf, 3971 byte_oriented); 3972 if (ret) 3973 return ret; 3974 3975 if (memcmp(data - n, (u8 *)buf + offset, n)) { 3976 CH_ERR(adapter, 3977 "failed to correctly write the flash page at %#x\n", 3978 addr); 3979 return -EIO; 3980 } 3981 return 0; 3982 3983 unlock: 3984 t4_write_reg(adapter, A_SF_OP, 0); /* unlock SF */ 3985 return ret; 3986 } 3987 3988 /** 3989 * t4_get_fw_version - read the firmware version 3990 * @adapter: the adapter 3991 * @vers: where to place the version 3992 * 3993 * Reads the FW version from flash. 3994 */ 3995 int t4_get_fw_version(struct adapter *adapter, u32 *vers) 3996 { 3997 const int start = t4_flash_loc_start(adapter, FLASH_LOC_FW, NULL); 3998 3999 return t4_read_flash(adapter, start + offsetof(struct fw_hdr, fw_ver), 4000 1, vers, 0); 4001 } 4002 4003 /** 4004 * t4_get_fw_hdr - read the firmware header 4005 * @adapter: the adapter 4006 * @hdr: where to place the version 4007 * 4008 * Reads the FW header from flash into caller provided buffer. 4009 */ 4010 int t4_get_fw_hdr(struct adapter *adapter, struct fw_hdr *hdr) 4011 { 4012 const int start = t4_flash_loc_start(adapter, FLASH_LOC_FW, NULL); 4013 4014 return t4_read_flash(adapter, start, sizeof (*hdr) / sizeof (uint32_t), 4015 (uint32_t *)hdr, 1); 4016 } 4017 4018 /** 4019 * t4_get_bs_version - read the firmware bootstrap version 4020 * @adapter: the adapter 4021 * @vers: where to place the version 4022 * 4023 * Reads the FW Bootstrap version from flash. 4024 */ 4025 int t4_get_bs_version(struct adapter *adapter, u32 *vers) 4026 { 4027 const int start = t4_flash_loc_start(adapter, FLASH_LOC_FWBOOTSTRAP, 4028 NULL); 4029 4030 return t4_read_flash(adapter, start + offsetof(struct fw_hdr, fw_ver), 4031 1, vers, 0); 4032 } 4033 4034 /** 4035 * t4_get_tp_version - read the TP microcode version 4036 * @adapter: the adapter 4037 * @vers: where to place the version 4038 * 4039 * Reads the TP microcode version from flash. 4040 */ 4041 int t4_get_tp_version(struct adapter *adapter, u32 *vers) 4042 { 4043 const int start = t4_flash_loc_start(adapter, FLASH_LOC_FW, NULL); 4044 4045 return t4_read_flash(adapter, start + 4046 offsetof(struct fw_hdr, tp_microcode_ver), 1, vers, 0); 4047 } 4048 4049 /** 4050 * t4_get_exprom_version - return the Expansion ROM version (if any) 4051 * @adapter: the adapter 4052 * @vers: where to place the version 4053 * 4054 * Reads the Expansion ROM header from FLASH and returns the version 4055 * number (if present) through the @vers return value pointer. We return 4056 * this in the Firmware Version Format since it's convenient. Return 4057 * 0 on success, -ENOENT if no Expansion ROM is present. 4058 */ 4059 int t4_get_exprom_version(struct adapter *adapter, u32 *vers) 4060 { 4061 struct exprom_header { 4062 unsigned char hdr_arr[16]; /* must start with 0x55aa */ 4063 unsigned char hdr_ver[4]; /* Expansion ROM version */ 4064 } *hdr; 4065 u32 exprom_header_buf[DIV_ROUND_UP(sizeof(struct exprom_header), 4066 sizeof(u32))]; 4067 int ret; 4068 const int start = t4_flash_loc_start(adapter, FLASH_LOC_EXP_ROM, NULL); 4069 4070 ret = t4_read_flash(adapter, start, ARRAY_SIZE(exprom_header_buf), 4071 exprom_header_buf, 0); 4072 if (ret) 4073 return ret; 4074 4075 hdr = (struct exprom_header *)exprom_header_buf; 4076 if (hdr->hdr_arr[0] != 0x55 || hdr->hdr_arr[1] != 0xaa) 4077 return -ENOENT; 4078 4079 *vers = (V_FW_HDR_FW_VER_MAJOR(hdr->hdr_ver[0]) | 4080 V_FW_HDR_FW_VER_MINOR(hdr->hdr_ver[1]) | 4081 V_FW_HDR_FW_VER_MICRO(hdr->hdr_ver[2]) | 4082 V_FW_HDR_FW_VER_BUILD(hdr->hdr_ver[3])); 4083 return 0; 4084 } 4085 4086 /** 4087 * t4_get_scfg_version - return the Serial Configuration version 4088 * @adapter: the adapter 4089 * @vers: where to place the version 4090 * 4091 * Reads the Serial Configuration Version via the Firmware interface 4092 * (thus this can only be called once we're ready to issue Firmware 4093 * commands). The format of the Serial Configuration version is 4094 * adapter specific. Returns 0 on success, an error on failure. 4095 * 4096 * Note that early versions of the Firmware didn't include the ability 4097 * to retrieve the Serial Configuration version, so we zero-out the 4098 * return-value parameter in that case to avoid leaving it with 4099 * garbage in it. 4100 * 4101 * Also note that the Firmware will return its cached copy of the Serial 4102 * Initialization Revision ID, not the actual Revision ID as written in 4103 * the Serial EEPROM. This is only an issue if a new VPD has been written 4104 * and the Firmware/Chip haven't yet gone through a RESET sequence. So 4105 * it's best to defer calling this routine till after a FW_RESET_CMD has 4106 * been issued if the Host Driver will be performing a full adapter 4107 * initialization. 4108 */ 4109 int t4_get_scfg_version(struct adapter *adapter, u32 *vers) 4110 { 4111 u32 scfgrev_param; 4112 int ret; 4113 4114 scfgrev_param = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | 4115 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_SCFGREV)); 4116 ret = t4_query_params(adapter, adapter->mbox, adapter->pf, 0, 4117 1, &scfgrev_param, vers); 4118 if (ret) 4119 *vers = 0; 4120 return ret; 4121 } 4122 4123 /** 4124 * t4_get_vpd_version - return the VPD version 4125 * @adapter: the adapter 4126 * @vers: where to place the version 4127 * 4128 * Reads the VPD via the Firmware interface (thus this can only be called 4129 * once we're ready to issue Firmware commands). The format of the 4130 * VPD version is adapter specific. Returns 0 on success, an error on 4131 * failure. 4132 * 4133 * Note that early versions of the Firmware didn't include the ability 4134 * to retrieve the VPD version, so we zero-out the return-value parameter 4135 * in that case to avoid leaving it with garbage in it. 4136 * 4137 * Also note that the Firmware will return its cached copy of the VPD 4138 * Revision ID, not the actual Revision ID as written in the Serial 4139 * EEPROM. This is only an issue if a new VPD has been written and the 4140 * Firmware/Chip haven't yet gone through a RESET sequence. So it's best 4141 * to defer calling this routine till after a FW_RESET_CMD has been issued 4142 * if the Host Driver will be performing a full adapter initialization. 4143 */ 4144 int t4_get_vpd_version(struct adapter *adapter, u32 *vers) 4145 { 4146 u32 vpdrev_param; 4147 int ret; 4148 4149 vpdrev_param = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | 4150 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_VPDREV)); 4151 ret = t4_query_params(adapter, adapter->mbox, adapter->pf, 0, 4152 1, &vpdrev_param, vers); 4153 if (ret) 4154 *vers = 0; 4155 return ret; 4156 } 4157 4158 /** 4159 * t4_get_version_info - extract various chip/firmware version information 4160 * @adapter: the adapter 4161 * 4162 * Reads various chip/firmware version numbers and stores them into the 4163 * adapter Adapter Parameters structure. If any of the efforts fails 4164 * the first failure will be returned, but all of the version numbers 4165 * will be read. 4166 */ 4167 int t4_get_version_info(struct adapter *adapter) 4168 { 4169 int ret = 0; 4170 4171 #define FIRST_RET(__getvinfo) \ 4172 do { \ 4173 int __ret = __getvinfo; \ 4174 if (__ret && !ret) \ 4175 ret = __ret; \ 4176 } while (0) 4177 4178 FIRST_RET(t4_get_fw_version(adapter, &adapter->params.fw_vers)); 4179 FIRST_RET(t4_get_bs_version(adapter, &adapter->params.bs_vers)); 4180 FIRST_RET(t4_get_tp_version(adapter, &adapter->params.tp_vers)); 4181 FIRST_RET(t4_get_exprom_version(adapter, &adapter->params.er_vers)); 4182 FIRST_RET(t4_get_scfg_version(adapter, &adapter->params.scfg_vers)); 4183 FIRST_RET(t4_get_vpd_version(adapter, &adapter->params.vpd_vers)); 4184 4185 #undef FIRST_RET 4186 4187 return ret; 4188 } 4189 4190 /** 4191 * t4_flash_erase_sectors - erase a range of flash sectors 4192 * @adapter: the adapter 4193 * @start: the first sector to erase 4194 * @end: the last sector to erase 4195 * 4196 * Erases the sectors in the given inclusive range. 4197 */ 4198 int t4_flash_erase_sectors(struct adapter *adapter, int start, int end) 4199 { 4200 int ret = 0; 4201 4202 if (end >= adapter->params.sf_nsec) 4203 return -EINVAL; 4204 4205 while (start <= end) { 4206 if ((ret = sf1_write(adapter, 1, 0, 1, SF_WR_ENABLE)) != 0 || 4207 (ret = sf1_write(adapter, 4, 0, 1, 4208 SF_ERASE_SECTOR | (start << 8))) != 0 || 4209 (ret = flash_wait_op(adapter, 14, 500)) != 0) { 4210 CH_ERR(adapter, 4211 "erase of flash sector %d failed, error %d\n", 4212 start, ret); 4213 break; 4214 } 4215 start++; 4216 } 4217 t4_write_reg(adapter, A_SF_OP, 0); /* unlock SF */ 4218 return ret; 4219 } 4220 4221 /** 4222 * t4_flash_cfg_addr - return the address of the flash configuration file 4223 * @adapter: the adapter 4224 * 4225 * Return the address within the flash where the Firmware Configuration 4226 * File is stored, or an error if the device FLASH is too small to contain 4227 * a Firmware Configuration File. 4228 */ 4229 int t4_flash_cfg_addr(struct adapter *adapter, unsigned int *lenp) 4230 { 4231 unsigned int len = 0; 4232 const int cfg_start = t4_flash_loc_start(adapter, FLASH_LOC_CFG, &len); 4233 4234 /* 4235 * If the device FLASH isn't large enough to hold a Firmware 4236 * Configuration File, return an error. 4237 */ 4238 if (adapter->params.sf_size < cfg_start + len) 4239 return -ENOSPC; 4240 if (lenp != NULL) 4241 *lenp = len; 4242 return (cfg_start); 4243 } 4244 4245 /* 4246 * Return TRUE if the specified firmware matches the adapter. I.e. T4 4247 * firmware for T4 adapters, T5 firmware for T5 adapters, etc. We go ahead 4248 * and emit an error message for mismatched firmware to save our caller the 4249 * effort ... 4250 */ 4251 static int t4_fw_matches_chip(struct adapter *adap, 4252 const struct fw_hdr *hdr) 4253 { 4254 /* 4255 * The expression below will return FALSE for any unsupported adapter 4256 * which will keep us "honest" in the future ... 4257 */ 4258 if ((is_t4(adap) && hdr->chip == FW_HDR_CHIP_T4) || 4259 (is_t5(adap) && hdr->chip == FW_HDR_CHIP_T5) || 4260 (is_t6(adap) && hdr->chip == FW_HDR_CHIP_T6) || 4261 (is_t7(adap) && hdr->chip == FW_HDR_CHIP_T7)) 4262 return 1; 4263 4264 CH_ERR(adap, 4265 "FW image (%d) is not suitable for this adapter (%d)\n", 4266 hdr->chip, chip_id(adap)); 4267 return 0; 4268 } 4269 4270 /** 4271 * t4_load_fw - download firmware 4272 * @adap: the adapter 4273 * @fw_data: the firmware image to write 4274 * @size: image size 4275 * 4276 * Write the supplied firmware image to the card's serial flash. 4277 */ 4278 int t4_load_fw(struct adapter *adap, const u8 *fw_data, unsigned int size) 4279 { 4280 u32 csum; 4281 int ret, addr; 4282 unsigned int i; 4283 u8 first_page[SF_PAGE_SIZE]; 4284 const u32 *p = (const u32 *)fw_data; 4285 const struct fw_hdr *hdr = (const struct fw_hdr *)fw_data; 4286 unsigned int fw_start_sec; 4287 unsigned int fw_start; 4288 unsigned int fw_size; 4289 enum t4_flash_loc loc; 4290 4291 loc = ntohl(hdr->magic) == FW_HDR_MAGIC_BOOTSTRAP ? 4292 FLASH_LOC_FWBOOTSTRAP : FLASH_LOC_FW; 4293 fw_start = t4_flash_loc_start(adap, loc, &fw_size); 4294 fw_start_sec = fw_start / SF_SEC_SIZE; 4295 4296 if (!size) { 4297 CH_ERR(adap, "FW image has no data\n"); 4298 return -EINVAL; 4299 } 4300 if (size & 511) { 4301 CH_ERR(adap, 4302 "FW image size not multiple of 512 bytes\n"); 4303 return -EINVAL; 4304 } 4305 if ((unsigned int) be16_to_cpu(hdr->len512) * 512 != size) { 4306 CH_ERR(adap, 4307 "FW image size differs from size in FW header\n"); 4308 return -EINVAL; 4309 } 4310 if (size > fw_size) { 4311 CH_ERR(adap, "FW image too large, max is %u bytes\n", 4312 fw_size); 4313 return -EFBIG; 4314 } 4315 if (!t4_fw_matches_chip(adap, hdr)) 4316 return -EINVAL; 4317 4318 for (csum = 0, i = 0; i < size / sizeof(csum); i++) 4319 csum += be32_to_cpu(p[i]); 4320 4321 if (csum != 0xffffffff) { 4322 CH_ERR(adap, 4323 "corrupted firmware image, checksum %#x\n", csum); 4324 return -EINVAL; 4325 } 4326 4327 i = DIV_ROUND_UP(size, SF_SEC_SIZE); /* # of sectors spanned */ 4328 ret = t4_flash_erase_sectors(adap, fw_start_sec, fw_start_sec + i - 1); 4329 if (ret) 4330 goto out; 4331 4332 /* 4333 * We write the correct version at the end so the driver can see a bad 4334 * version if the FW write fails. Start by writing a copy of the 4335 * first page with a bad version. 4336 */ 4337 memcpy(first_page, fw_data, SF_PAGE_SIZE); 4338 ((struct fw_hdr *)first_page)->fw_ver = cpu_to_be32(0xffffffff); 4339 ret = t4_write_flash(adap, fw_start, SF_PAGE_SIZE, first_page, 1); 4340 if (ret) 4341 goto out; 4342 4343 addr = fw_start; 4344 for (size -= SF_PAGE_SIZE; size; size -= SF_PAGE_SIZE) { 4345 addr += SF_PAGE_SIZE; 4346 fw_data += SF_PAGE_SIZE; 4347 ret = t4_write_flash(adap, addr, SF_PAGE_SIZE, fw_data, 1); 4348 if (ret) 4349 goto out; 4350 } 4351 4352 ret = t4_write_flash(adap, 4353 fw_start + offsetof(struct fw_hdr, fw_ver), 4354 sizeof(hdr->fw_ver), (const u8 *)&hdr->fw_ver, 1); 4355 out: 4356 if (ret) 4357 CH_ERR(adap, "firmware download failed, error %d\n", 4358 ret); 4359 return ret; 4360 } 4361 4362 /** 4363 * t4_fwcache - firmware cache operation 4364 * @adap: the adapter 4365 * @op : the operation (flush or flush and invalidate) 4366 */ 4367 int t4_fwcache(struct adapter *adap, enum fw_params_param_dev_fwcache op) 4368 { 4369 struct fw_params_cmd c; 4370 4371 memset(&c, 0, sizeof(c)); 4372 c.op_to_vfn = 4373 cpu_to_be32(V_FW_CMD_OP(FW_PARAMS_CMD) | 4374 F_FW_CMD_REQUEST | F_FW_CMD_WRITE | 4375 V_FW_PARAMS_CMD_PFN(adap->pf) | 4376 V_FW_PARAMS_CMD_VFN(0)); 4377 c.retval_len16 = cpu_to_be32(FW_LEN16(c)); 4378 c.param[0].mnem = 4379 cpu_to_be32(V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | 4380 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_FWCACHE)); 4381 c.param[0].val = cpu_to_be32(op); 4382 4383 return t4_wr_mbox(adap, adap->mbox, &c, sizeof(c), NULL); 4384 } 4385 4386 void t4_cim_read_pif_la(struct adapter *adap, u32 *pif_req, u32 *pif_rsp, 4387 unsigned int *pif_req_wrptr, 4388 unsigned int *pif_rsp_wrptr) 4389 { 4390 int i, j; 4391 u32 cfg, val, req, rsp; 4392 4393 cfg = t4_read_reg(adap, A_CIM_DEBUGCFG); 4394 if (cfg & F_LADBGEN) 4395 t4_write_reg(adap, A_CIM_DEBUGCFG, cfg ^ F_LADBGEN); 4396 4397 val = t4_read_reg(adap, A_CIM_DEBUGSTS); 4398 req = G_POLADBGWRPTR(val); 4399 rsp = G_PILADBGWRPTR(val); 4400 if (pif_req_wrptr) 4401 *pif_req_wrptr = req; 4402 if (pif_rsp_wrptr) 4403 *pif_rsp_wrptr = rsp; 4404 4405 for (i = 0; i < CIM_PIFLA_SIZE; i++) { 4406 for (j = 0; j < 6; j++) { 4407 t4_write_reg(adap, A_CIM_DEBUGCFG, V_POLADBGRDPTR(req) | 4408 V_PILADBGRDPTR(rsp)); 4409 *pif_req++ = t4_read_reg(adap, A_CIM_PO_LA_DEBUGDATA); 4410 *pif_rsp++ = t4_read_reg(adap, A_CIM_PI_LA_DEBUGDATA); 4411 req++; 4412 rsp++; 4413 } 4414 req = (req + 2) & M_POLADBGRDPTR; 4415 rsp = (rsp + 2) & M_PILADBGRDPTR; 4416 } 4417 t4_write_reg(adap, A_CIM_DEBUGCFG, cfg); 4418 } 4419 4420 void t4_cim_read_ma_la(struct adapter *adap, u32 *ma_req, u32 *ma_rsp) 4421 { 4422 u32 cfg; 4423 int i, j, idx; 4424 4425 cfg = t4_read_reg(adap, A_CIM_DEBUGCFG); 4426 if (cfg & F_LADBGEN) 4427 t4_write_reg(adap, A_CIM_DEBUGCFG, cfg ^ F_LADBGEN); 4428 4429 for (i = 0; i < CIM_MALA_SIZE; i++) { 4430 for (j = 0; j < 5; j++) { 4431 idx = 8 * i + j; 4432 t4_write_reg(adap, A_CIM_DEBUGCFG, V_POLADBGRDPTR(idx) | 4433 V_PILADBGRDPTR(idx)); 4434 *ma_req++ = t4_read_reg(adap, A_CIM_PO_LA_MADEBUGDATA); 4435 *ma_rsp++ = t4_read_reg(adap, A_CIM_PI_LA_MADEBUGDATA); 4436 } 4437 } 4438 t4_write_reg(adap, A_CIM_DEBUGCFG, cfg); 4439 } 4440 4441 void t4_ulprx_read_la(struct adapter *adap, u32 *la_buf) 4442 { 4443 unsigned int i, j; 4444 4445 for (i = 0; i < 8; i++) { 4446 u32 *p = la_buf + i; 4447 4448 t4_write_reg(adap, A_ULP_RX_LA_CTL, i); 4449 j = t4_read_reg(adap, A_ULP_RX_LA_WRPTR); 4450 t4_write_reg(adap, A_ULP_RX_LA_RDPTR, j); 4451 for (j = 0; j < ULPRX_LA_SIZE; j++, p += 8) 4452 *p = t4_read_reg(adap, A_ULP_RX_LA_RDDATA); 4453 } 4454 } 4455 4456 /** 4457 * fwcaps16_to_caps32 - convert 16-bit Port Capabilities to 32-bits 4458 * @caps16: a 16-bit Port Capabilities value 4459 * 4460 * Returns the equivalent 32-bit Port Capabilities value. 4461 */ 4462 static uint32_t fwcaps16_to_caps32(uint16_t caps16) 4463 { 4464 uint32_t caps32 = 0; 4465 4466 #define CAP16_TO_CAP32(__cap) \ 4467 do { \ 4468 if (caps16 & FW_PORT_CAP_##__cap) \ 4469 caps32 |= FW_PORT_CAP32_##__cap; \ 4470 } while (0) 4471 4472 CAP16_TO_CAP32(SPEED_100M); 4473 CAP16_TO_CAP32(SPEED_1G); 4474 CAP16_TO_CAP32(SPEED_25G); 4475 CAP16_TO_CAP32(SPEED_10G); 4476 CAP16_TO_CAP32(SPEED_40G); 4477 CAP16_TO_CAP32(SPEED_100G); 4478 CAP16_TO_CAP32(FC_RX); 4479 CAP16_TO_CAP32(FC_TX); 4480 CAP16_TO_CAP32(ANEG); 4481 CAP16_TO_CAP32(FORCE_PAUSE); 4482 CAP16_TO_CAP32(MDIAUTO); 4483 CAP16_TO_CAP32(MDISTRAIGHT); 4484 CAP16_TO_CAP32(FEC_RS); 4485 CAP16_TO_CAP32(FEC_BASER_RS); 4486 CAP16_TO_CAP32(802_3_PAUSE); 4487 CAP16_TO_CAP32(802_3_ASM_DIR); 4488 4489 #undef CAP16_TO_CAP32 4490 4491 return caps32; 4492 } 4493 4494 /** 4495 * fwcaps32_to_caps16 - convert 32-bit Port Capabilities to 16-bits 4496 * @caps32: a 32-bit Port Capabilities value 4497 * 4498 * Returns the equivalent 16-bit Port Capabilities value. Note that 4499 * not all 32-bit Port Capabilities can be represented in the 16-bit 4500 * Port Capabilities and some fields/values may not make it. 4501 */ 4502 static uint16_t fwcaps32_to_caps16(uint32_t caps32) 4503 { 4504 uint16_t caps16 = 0; 4505 4506 #define CAP32_TO_CAP16(__cap) \ 4507 do { \ 4508 if (caps32 & FW_PORT_CAP32_##__cap) \ 4509 caps16 |= FW_PORT_CAP_##__cap; \ 4510 } while (0) 4511 4512 CAP32_TO_CAP16(SPEED_100M); 4513 CAP32_TO_CAP16(SPEED_1G); 4514 CAP32_TO_CAP16(SPEED_10G); 4515 CAP32_TO_CAP16(SPEED_25G); 4516 CAP32_TO_CAP16(SPEED_40G); 4517 CAP32_TO_CAP16(SPEED_100G); 4518 CAP32_TO_CAP16(FC_RX); 4519 CAP32_TO_CAP16(FC_TX); 4520 CAP32_TO_CAP16(802_3_PAUSE); 4521 CAP32_TO_CAP16(802_3_ASM_DIR); 4522 CAP32_TO_CAP16(ANEG); 4523 CAP32_TO_CAP16(FORCE_PAUSE); 4524 CAP32_TO_CAP16(MDIAUTO); 4525 CAP32_TO_CAP16(MDISTRAIGHT); 4526 CAP32_TO_CAP16(FEC_RS); 4527 CAP32_TO_CAP16(FEC_BASER_RS); 4528 4529 #undef CAP32_TO_CAP16 4530 4531 return caps16; 4532 } 4533 4534 static int8_t fwcap_to_fec(uint32_t caps, bool unset_means_none) 4535 { 4536 int8_t fec = 0; 4537 4538 if ((caps & V_FW_PORT_CAP32_FEC(M_FW_PORT_CAP32_FEC)) == 0) 4539 return (unset_means_none ? FEC_NONE : 0); 4540 4541 if (caps & FW_PORT_CAP32_FEC_RS) 4542 fec |= FEC_RS; 4543 if (caps & FW_PORT_CAP32_FEC_BASER_RS) 4544 fec |= FEC_BASER_RS; 4545 if (caps & FW_PORT_CAP32_FEC_NO_FEC) 4546 fec |= FEC_NONE; 4547 4548 return (fec); 4549 } 4550 4551 /* 4552 * Note that 0 is not translated to NO_FEC. 4553 */ 4554 static uint32_t fec_to_fwcap(int8_t fec) 4555 { 4556 uint32_t caps = 0; 4557 4558 /* Only real FECs allowed. */ 4559 MPASS((fec & ~M_FW_PORT_CAP32_FEC) == 0); 4560 4561 if (fec & FEC_RS) 4562 caps |= FW_PORT_CAP32_FEC_RS; 4563 if (fec & FEC_BASER_RS) 4564 caps |= FW_PORT_CAP32_FEC_BASER_RS; 4565 if (fec & FEC_NONE) 4566 caps |= FW_PORT_CAP32_FEC_NO_FEC; 4567 4568 return (caps); 4569 } 4570 4571 /** 4572 * t4_link_l1cfg - apply link configuration to MAC/PHY 4573 * @phy: the PHY to setup 4574 * @mac: the MAC to setup 4575 * @lc: the requested link configuration 4576 * 4577 * Set up a port's MAC and PHY according to a desired link configuration. 4578 * - If the PHY can auto-negotiate first decide what to advertise, then 4579 * enable/disable auto-negotiation as desired, and reset. 4580 * - If the PHY does not auto-negotiate just reset it. 4581 * - If auto-negotiation is off set the MAC to the proper speed/duplex/FC, 4582 * otherwise do it later based on the outcome of auto-negotiation. 4583 */ 4584 int t4_link_l1cfg(struct adapter *adap, unsigned int mbox, unsigned int port, 4585 struct link_config *lc) 4586 { 4587 struct fw_port_cmd c; 4588 unsigned int mdi = V_FW_PORT_CAP32_MDI(FW_PORT_CAP32_MDI_AUTO); 4589 unsigned int aneg, fc, fec, speed, rcap; 4590 4591 fc = 0; 4592 if (lc->requested_fc & PAUSE_RX) 4593 fc |= FW_PORT_CAP32_FC_RX; 4594 if (lc->requested_fc & PAUSE_TX) 4595 fc |= FW_PORT_CAP32_FC_TX; 4596 if (!(lc->requested_fc & PAUSE_AUTONEG)) 4597 fc |= FW_PORT_CAP32_FORCE_PAUSE; 4598 4599 if (lc->requested_aneg == AUTONEG_DISABLE) 4600 aneg = 0; 4601 else if (lc->requested_aneg == AUTONEG_ENABLE) 4602 aneg = FW_PORT_CAP32_ANEG; 4603 else 4604 aneg = lc->pcaps & FW_PORT_CAP32_ANEG; 4605 4606 if (aneg) { 4607 speed = lc->pcaps & 4608 V_FW_PORT_CAP32_SPEED(M_FW_PORT_CAP32_SPEED); 4609 } else if (lc->requested_speed != 0) 4610 speed = speed_to_fwcap(lc->requested_speed); 4611 else 4612 speed = fwcap_top_speed(lc->pcaps); 4613 4614 fec = 0; 4615 if (fec_supported(speed)) { 4616 int force_fec; 4617 4618 if (lc->pcaps & FW_PORT_CAP32_FORCE_FEC) 4619 force_fec = lc->force_fec; 4620 else 4621 force_fec = 0; 4622 4623 if (lc->requested_fec == FEC_AUTO) { 4624 if (force_fec > 0) { 4625 /* 4626 * Must use FORCE_FEC even though requested FEC 4627 * is AUTO. Set all the FEC bits valid for the 4628 * speed and let the firmware pick one. 4629 */ 4630 fec |= FW_PORT_CAP32_FORCE_FEC; 4631 if (speed & FW_PORT_CAP32_SPEED_25G) { 4632 fec |= FW_PORT_CAP32_FEC_RS; 4633 fec |= FW_PORT_CAP32_FEC_BASER_RS; 4634 fec |= FW_PORT_CAP32_FEC_NO_FEC; 4635 } else { 4636 fec |= FW_PORT_CAP32_FEC_RS; 4637 fec |= FW_PORT_CAP32_FEC_NO_FEC; 4638 } 4639 } else { 4640 /* 4641 * Set only 1b. Old firmwares can't deal with 4642 * multiple bits and new firmwares are free to 4643 * ignore this and try whatever FECs they want 4644 * because we aren't setting FORCE_FEC here. 4645 */ 4646 fec |= fec_to_fwcap(lc->fec_hint); 4647 MPASS(powerof2(fec)); 4648 4649 /* 4650 * Override the hint if the FEC is not valid for 4651 * the potential top speed. Request the best 4652 * FEC at that speed instead. 4653 */ 4654 if ((speed & FW_PORT_CAP32_SPEED_25G) == 0 && 4655 fec == FW_PORT_CAP32_FEC_BASER_RS) { 4656 fec = FW_PORT_CAP32_FEC_RS; 4657 } 4658 } 4659 } else { 4660 /* 4661 * User has explicitly requested some FEC(s). Set 4662 * FORCE_FEC unless prohibited from using it. 4663 */ 4664 if (force_fec != 0) 4665 fec |= FW_PORT_CAP32_FORCE_FEC; 4666 fec |= fec_to_fwcap(lc->requested_fec & 4667 M_FW_PORT_CAP32_FEC); 4668 if (lc->requested_fec & FEC_MODULE) 4669 fec |= fec_to_fwcap(lc->fec_hint); 4670 } 4671 4672 /* 4673 * This is for compatibility with old firmwares. The original 4674 * way to request NO_FEC was to not set any of the FEC bits. New 4675 * firmwares understand this too. 4676 */ 4677 if (fec == FW_PORT_CAP32_FEC_NO_FEC) 4678 fec = 0; 4679 } 4680 4681 /* Force AN on for BT cards. */ 4682 if (isset(&adap->bt_map, port)) 4683 aneg = lc->pcaps & FW_PORT_CAP32_ANEG; 4684 4685 rcap = aneg | speed | fc | fec; 4686 if ((rcap | lc->pcaps) != lc->pcaps) { 4687 #ifdef INVARIANTS 4688 CH_WARN(adap, "rcap 0x%08x, pcap 0x%08x, removed 0x%x\n", rcap, 4689 lc->pcaps, rcap & (rcap ^ lc->pcaps)); 4690 #endif 4691 rcap &= lc->pcaps; 4692 } 4693 rcap |= mdi; 4694 4695 memset(&c, 0, sizeof(c)); 4696 c.op_to_portid = cpu_to_be32(V_FW_CMD_OP(FW_PORT_CMD) | 4697 F_FW_CMD_REQUEST | F_FW_CMD_EXEC | 4698 V_FW_PORT_CMD_PORTID(port)); 4699 if (adap->params.port_caps32) { 4700 c.action_to_len16 = 4701 cpu_to_be32(V_FW_PORT_CMD_ACTION(FW_PORT_ACTION_L1_CFG32) | 4702 FW_LEN16(c)); 4703 c.u.l1cfg32.rcap32 = cpu_to_be32(rcap); 4704 } else { 4705 c.action_to_len16 = 4706 cpu_to_be32(V_FW_PORT_CMD_ACTION(FW_PORT_ACTION_L1_CFG) | 4707 FW_LEN16(c)); 4708 c.u.l1cfg.rcap = cpu_to_be32(fwcaps32_to_caps16(rcap)); 4709 } 4710 4711 lc->requested_caps = rcap; 4712 return t4_wr_mbox_ns(adap, mbox, &c, sizeof(c), NULL); 4713 } 4714 4715 /** 4716 * t4_restart_aneg - restart autonegotiation 4717 * @adap: the adapter 4718 * @mbox: mbox to use for the FW command 4719 * @port: the port id 4720 * 4721 * Restarts autonegotiation for the selected port. 4722 */ 4723 int t4_restart_aneg(struct adapter *adap, unsigned int mbox, unsigned int port) 4724 { 4725 struct fw_port_cmd c; 4726 4727 memset(&c, 0, sizeof(c)); 4728 c.op_to_portid = cpu_to_be32(V_FW_CMD_OP(FW_PORT_CMD) | 4729 F_FW_CMD_REQUEST | F_FW_CMD_EXEC | 4730 V_FW_PORT_CMD_PORTID(port)); 4731 c.action_to_len16 = 4732 cpu_to_be32(V_FW_PORT_CMD_ACTION(FW_PORT_ACTION_L1_CFG) | 4733 FW_LEN16(c)); 4734 c.u.l1cfg.rcap = cpu_to_be32(FW_PORT_CAP_ANEG); 4735 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); 4736 } 4737 4738 struct intr_details { 4739 u32 mask; 4740 const char *msg; 4741 }; 4742 4743 struct intr_action { 4744 u32 mask; 4745 int arg; 4746 bool (*action)(struct adapter *, int, bool); 4747 }; 4748 4749 #define NONFATAL_IF_DISABLED 1 4750 struct intr_info { 4751 const char *name; /* name of the INT_CAUSE register */ 4752 int cause_reg; /* INT_CAUSE register */ 4753 int enable_reg; /* INT_ENABLE register */ 4754 u32 fatal; /* bits that are fatal */ 4755 int flags; /* hints */ 4756 const struct intr_details *details; 4757 const struct intr_action *actions; 4758 }; 4759 4760 static inline char 4761 intr_alert_char(u32 cause, u32 enable, u32 fatal) 4762 { 4763 4764 if (cause & fatal) 4765 return ('!'); 4766 if (cause & enable) 4767 return ('*'); 4768 return ('-'); 4769 } 4770 4771 static void 4772 t4_show_intr_info(struct adapter *adap, const struct intr_info *ii, u32 cause) 4773 { 4774 u32 enable, fatal, leftover; 4775 const struct intr_details *details; 4776 char alert; 4777 4778 enable = t4_read_reg(adap, ii->enable_reg); 4779 if (ii->flags & NONFATAL_IF_DISABLED) 4780 fatal = ii->fatal & t4_read_reg(adap, ii->enable_reg); 4781 else 4782 fatal = ii->fatal; 4783 alert = intr_alert_char(cause, enable, fatal); 4784 CH_ALERT(adap, "%c %s 0x%x = 0x%08x, E 0x%08x, F 0x%08x\n", 4785 alert, ii->name, ii->cause_reg, cause, enable, fatal); 4786 4787 leftover = cause; 4788 for (details = ii->details; details && details->mask != 0; details++) { 4789 u32 msgbits = details->mask & cause; 4790 if (msgbits == 0) 4791 continue; 4792 alert = intr_alert_char(msgbits, enable, ii->fatal); 4793 CH_ALERT(adap, " %c [0x%08x] %s\n", alert, msgbits, 4794 details->msg); 4795 leftover &= ~msgbits; 4796 } 4797 if (leftover != 0 && leftover != cause) 4798 CH_ALERT(adap, " ? [0x%08x]\n", leftover); 4799 } 4800 4801 /* 4802 * Returns true for fatal error. 4803 */ 4804 static bool 4805 t4_handle_intr(struct adapter *adap, const struct intr_info *ii, 4806 u32 additional_cause, bool verbose) 4807 { 4808 u32 cause, fatal; 4809 bool rc; 4810 const struct intr_action *action; 4811 4812 /* 4813 * Read and display cause. Note that the top level PL_INT_CAUSE is a 4814 * bit special and we need to completely ignore the bits that are not in 4815 * PL_INT_ENABLE. 4816 */ 4817 cause = t4_read_reg(adap, ii->cause_reg); 4818 if (ii->cause_reg == A_PL_INT_CAUSE) 4819 cause &= t4_read_reg(adap, ii->enable_reg); 4820 if (verbose || cause != 0) 4821 t4_show_intr_info(adap, ii, cause); 4822 fatal = cause & ii->fatal; 4823 if (fatal != 0 && ii->flags & NONFATAL_IF_DISABLED) 4824 fatal &= t4_read_reg(adap, ii->enable_reg); 4825 cause |= additional_cause; 4826 if (cause == 0) 4827 return (false); 4828 4829 rc = fatal != 0; 4830 for (action = ii->actions; action && action->mask != 0; action++) { 4831 if (!(action->mask & cause)) 4832 continue; 4833 rc |= (action->action)(adap, action->arg, verbose); 4834 } 4835 4836 /* clear */ 4837 t4_write_reg(adap, ii->cause_reg, cause); 4838 (void)t4_read_reg(adap, ii->cause_reg); 4839 4840 return (rc); 4841 } 4842 4843 /* 4844 * Interrupt handler for the PCIE module. 4845 */ 4846 static bool pcie_intr_handler(struct adapter *adap, int arg, bool verbose) 4847 { 4848 static const struct intr_details sysbus_intr_details[] = { 4849 { F_RNPP, "RXNP array parity error" }, 4850 { F_RPCP, "RXPC array parity error" }, 4851 { F_RCIP, "RXCIF array parity error" }, 4852 { F_RCCP, "Rx completions control array parity error" }, 4853 { F_RFTP, "RXFT array parity error" }, 4854 { 0 } 4855 }; 4856 static const struct intr_info sysbus_intr_info = { 4857 .name = "PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS", 4858 .cause_reg = A_PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS, 4859 .enable_reg = A_PCIE_CORE_UTL_SYSTEM_BUS_AGENT_INTERRUPT_ENABLE, 4860 .fatal = F_RFTP | F_RCCP | F_RCIP | F_RPCP | F_RNPP, 4861 .flags = 0, 4862 .details = sysbus_intr_details, 4863 .actions = NULL, 4864 }; 4865 static const struct intr_details pcie_port_intr_details[] = { 4866 { F_TPCP, "TXPC array parity error" }, 4867 { F_TNPP, "TXNP array parity error" }, 4868 { F_TFTP, "TXFT array parity error" }, 4869 { F_TCAP, "TXCA array parity error" }, 4870 { F_TCIP, "TXCIF array parity error" }, 4871 { F_RCAP, "RXCA array parity error" }, 4872 { F_OTDD, "outbound request TLP discarded" }, 4873 { F_RDPE, "Rx data parity error" }, 4874 { F_TDUE, "Tx uncorrectable data error" }, 4875 { 0 } 4876 }; 4877 static const struct intr_info pcie_port_intr_info = { 4878 .name = "PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS", 4879 .cause_reg = A_PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS, 4880 .enable_reg = A_PCIE_CORE_UTL_PCI_EXPRESS_PORT_INTERRUPT_ENABLE, 4881 .fatal = F_TPCP | F_TNPP | F_TFTP | F_TCAP | F_TCIP | F_RCAP | 4882 F_OTDD | F_RDPE | F_TDUE, 4883 .flags = 0, 4884 .details = pcie_port_intr_details, 4885 .actions = NULL, 4886 }; 4887 static const struct intr_details pcie_intr_details[] = { 4888 { F_MSIADDRLPERR, "MSI AddrL parity error" }, 4889 { F_MSIADDRHPERR, "MSI AddrH parity error" }, 4890 { F_MSIDATAPERR, "MSI data parity error" }, 4891 { F_MSIXADDRLPERR, "MSI-X AddrL parity error" }, 4892 { F_MSIXADDRHPERR, "MSI-X AddrH parity error" }, 4893 { F_MSIXDATAPERR, "MSI-X data parity error" }, 4894 { F_MSIXDIPERR, "MSI-X DI parity error" }, 4895 { F_PIOCPLPERR, "PCIe PIO completion FIFO parity error" }, 4896 { F_PIOREQPERR, "PCIe PIO request FIFO parity error" }, 4897 { F_TARTAGPERR, "PCIe target tag FIFO parity error" }, 4898 { F_CCNTPERR, "PCIe CMD channel count parity error" }, 4899 { F_CREQPERR, "PCIe CMD channel request parity error" }, 4900 { F_CRSPPERR, "PCIe CMD channel response parity error" }, 4901 { F_DCNTPERR, "PCIe DMA channel count parity error" }, 4902 { F_DREQPERR, "PCIe DMA channel request parity error" }, 4903 { F_DRSPPERR, "PCIe DMA channel response parity error" }, 4904 { F_HCNTPERR, "PCIe HMA channel count parity error" }, 4905 { F_HREQPERR, "PCIe HMA channel request parity error" }, 4906 { F_HRSPPERR, "PCIe HMA channel response parity error" }, 4907 { F_CFGSNPPERR, "PCIe config snoop FIFO parity error" }, 4908 { F_FIDPERR, "PCIe FID parity error" }, 4909 { F_INTXCLRPERR, "PCIe INTx clear parity error" }, 4910 { F_MATAGPERR, "PCIe MA tag parity error" }, 4911 { F_PIOTAGPERR, "PCIe PIO tag parity error" }, 4912 { F_RXCPLPERR, "PCIe Rx completion parity error" }, 4913 { F_RXWRPERR, "PCIe Rx write parity error" }, 4914 { F_RPLPERR, "PCIe replay buffer parity error" }, 4915 { F_PCIESINT, "PCIe core secondary fault" }, 4916 { F_PCIEPINT, "PCIe core primary fault" }, 4917 { F_UNXSPLCPLERR, "PCIe unexpected split completion error" }, 4918 { 0 } 4919 }; 4920 static const struct intr_details t5_pcie_intr_details[] = { 4921 { F_IPGRPPERR, "Parity errors observed by IP" }, 4922 { F_NONFATALERR, "PCIe non-fatal error" }, 4923 { F_READRSPERR, "Outbound read error" }, 4924 { F_TRGT1GRPPERR, "PCIe TRGT1 group FIFOs parity error" }, 4925 { F_IPSOTPERR, "PCIe IP SOT buffer SRAM parity error" }, 4926 { F_IPRETRYPERR, "PCIe IP replay buffer parity error" }, 4927 { F_IPRXDATAGRPPERR, "PCIe IP Rx data group SRAMs parity error" }, 4928 { F_IPRXHDRGRPPERR, "PCIe IP Rx header group SRAMs parity error" }, 4929 { F_PIOTAGQPERR, "PIO tag queue FIFO parity error" }, 4930 { F_MAGRPPERR, "MA group FIFO parity error" }, 4931 { F_VFIDPERR, "VFID SRAM parity error" }, 4932 { F_FIDPERR, "FID SRAM parity error" }, 4933 { F_CFGSNPPERR, "config snoop FIFO parity error" }, 4934 { F_HRSPPERR, "HMA channel response data SRAM parity error" }, 4935 { F_HREQRDPERR, "HMA channel read request SRAM parity error" }, 4936 { F_HREQWRPERR, "HMA channel write request SRAM parity error" }, 4937 { F_DRSPPERR, "DMA channel response data SRAM parity error" }, 4938 { F_DREQRDPERR, "DMA channel write request SRAM parity error" }, 4939 { F_CRSPPERR, "CMD channel response data SRAM parity error" }, 4940 { F_CREQRDPERR, "CMD channel read request SRAM parity error" }, 4941 { F_MSTTAGQPERR, "PCIe master tag queue SRAM parity error" }, 4942 { F_TGTTAGQPERR, "PCIe target tag queue FIFO parity error" }, 4943 { F_PIOREQGRPPERR, "PIO request group FIFOs parity error" }, 4944 { F_PIOCPLGRPPERR, "PIO completion group FIFOs parity error" }, 4945 { F_MSIXDIPERR, "MSI-X DI SRAM parity error" }, 4946 { F_MSIXDATAPERR, "MSI-X data SRAM parity error" }, 4947 { F_MSIXADDRHPERR, "MSI-X AddrH SRAM parity error" }, 4948 { F_MSIXADDRLPERR, "MSI-X AddrL SRAM parity error" }, 4949 { F_MSIXSTIPERR, "MSI-X STI SRAM parity error" }, 4950 { F_MSTTIMEOUTPERR, "Master timeout FIFO parity error" }, 4951 { F_MSTGRPPERR, "Master response read queue SRAM parity error" }, 4952 { 0 } 4953 }; 4954 struct intr_info pcie_intr_info = { 4955 .name = "PCIE_INT_CAUSE", 4956 .cause_reg = A_PCIE_INT_CAUSE, 4957 .enable_reg = A_PCIE_INT_ENABLE, 4958 .fatal = 0xffffffff, 4959 .flags = NONFATAL_IF_DISABLED, 4960 .details = NULL, 4961 .actions = NULL, 4962 }; 4963 bool fatal = false; 4964 4965 if (is_t4(adap)) { 4966 fatal |= t4_handle_intr(adap, &sysbus_intr_info, 0, verbose); 4967 fatal |= t4_handle_intr(adap, &pcie_port_intr_info, 0, verbose); 4968 4969 pcie_intr_info.details = pcie_intr_details; 4970 } else { 4971 pcie_intr_info.details = t5_pcie_intr_details; 4972 } 4973 fatal |= t4_handle_intr(adap, &pcie_intr_info, 0, verbose); 4974 4975 return (fatal); 4976 } 4977 4978 /* 4979 * TP interrupt handler. 4980 */ 4981 static bool tp_intr_handler(struct adapter *adap, int arg, bool verbose) 4982 { 4983 static const struct intr_details tp_intr_details[] = { 4984 { 0x3fffffff, "TP parity error" }, 4985 { F_FLMTXFLSTEMPTY, "TP out of Tx pages" }, 4986 { 0 } 4987 }; 4988 static const struct intr_info tp_intr_info = { 4989 .name = "TP_INT_CAUSE", 4990 .cause_reg = A_TP_INT_CAUSE, 4991 .enable_reg = A_TP_INT_ENABLE, 4992 .fatal = 0x7fffffff, 4993 .flags = NONFATAL_IF_DISABLED, 4994 .details = tp_intr_details, 4995 .actions = NULL, 4996 }; 4997 4998 return (t4_handle_intr(adap, &tp_intr_info, 0, verbose)); 4999 } 5000 5001 /* 5002 * SGE interrupt handler. 5003 */ 5004 static bool sge_intr_handler(struct adapter *adap, int arg, bool verbose) 5005 { 5006 static const struct intr_info sge_int1_info = { 5007 .name = "SGE_INT_CAUSE1", 5008 .cause_reg = A_SGE_INT_CAUSE1, 5009 .enable_reg = A_SGE_INT_ENABLE1, 5010 .fatal = 0xffffffff, 5011 .flags = NONFATAL_IF_DISABLED, 5012 .details = NULL, 5013 .actions = NULL, 5014 }; 5015 static const struct intr_info sge_int2_info = { 5016 .name = "SGE_INT_CAUSE2", 5017 .cause_reg = A_SGE_INT_CAUSE2, 5018 .enable_reg = A_SGE_INT_ENABLE2, 5019 .fatal = 0xffffffff, 5020 .flags = NONFATAL_IF_DISABLED, 5021 .details = NULL, 5022 .actions = NULL, 5023 }; 5024 static const struct intr_details sge_int3_details[] = { 5025 { F_ERR_FLM_DBP, 5026 "DBP pointer delivery for invalid context or QID" }, 5027 { F_ERR_FLM_IDMA1 | F_ERR_FLM_IDMA0, 5028 "Invalid QID or header request by IDMA" }, 5029 { F_ERR_FLM_HINT, "FLM hint is for invalid context or QID" }, 5030 { F_ERR_PCIE_ERROR3, "SGE PCIe error for DBP thread 3" }, 5031 { F_ERR_PCIE_ERROR2, "SGE PCIe error for DBP thread 2" }, 5032 { F_ERR_PCIE_ERROR1, "SGE PCIe error for DBP thread 1" }, 5033 { F_ERR_PCIE_ERROR0, "SGE PCIe error for DBP thread 0" }, 5034 { F_ERR_TIMER_ABOVE_MAX_QID, 5035 "SGE GTS with timer 0-5 for IQID > 1023" }, 5036 { F_ERR_CPL_EXCEED_IQE_SIZE, 5037 "SGE received CPL exceeding IQE size" }, 5038 { F_ERR_INVALID_CIDX_INC, "SGE GTS CIDX increment too large" }, 5039 { F_ERR_ITP_TIME_PAUSED, "SGE ITP error" }, 5040 { F_ERR_CPL_OPCODE_0, "SGE received 0-length CPL" }, 5041 { F_ERR_DROPPED_DB, "SGE DB dropped" }, 5042 { F_ERR_DATA_CPL_ON_HIGH_QID1 | F_ERR_DATA_CPL_ON_HIGH_QID0, 5043 "SGE IQID > 1023 received CPL for FL" }, 5044 { F_ERR_BAD_DB_PIDX3 | F_ERR_BAD_DB_PIDX2 | F_ERR_BAD_DB_PIDX1 | 5045 F_ERR_BAD_DB_PIDX0, "SGE DBP pidx increment too large" }, 5046 { F_ERR_ING_PCIE_CHAN, "SGE Ingress PCIe channel mismatch" }, 5047 { F_ERR_ING_CTXT_PRIO, 5048 "Ingress context manager priority user error" }, 5049 { F_ERR_EGR_CTXT_PRIO, 5050 "Egress context manager priority user error" }, 5051 { F_DBFIFO_HP_INT, "High priority DB FIFO threshold reached" }, 5052 { F_DBFIFO_LP_INT, "Low priority DB FIFO threshold reached" }, 5053 { F_REG_ADDRESS_ERR, "Undefined SGE register accessed" }, 5054 { F_INGRESS_SIZE_ERR, "SGE illegal ingress QID" }, 5055 { F_EGRESS_SIZE_ERR, "SGE illegal egress QID" }, 5056 { 0x0000000f, "SGE context access for invalid queue" }, 5057 { 0 } 5058 }; 5059 static const struct intr_details t6_sge_int3_details[] = { 5060 { F_ERR_FLM_DBP, 5061 "DBP pointer delivery for invalid context or QID" }, 5062 { F_ERR_FLM_IDMA1 | F_ERR_FLM_IDMA0, 5063 "Invalid QID or header request by IDMA" }, 5064 { F_ERR_FLM_HINT, "FLM hint is for invalid context or QID" }, 5065 { F_ERR_PCIE_ERROR3, "SGE PCIe error for DBP thread 3" }, 5066 { F_ERR_PCIE_ERROR2, "SGE PCIe error for DBP thread 2" }, 5067 { F_ERR_PCIE_ERROR1, "SGE PCIe error for DBP thread 1" }, 5068 { F_ERR_PCIE_ERROR0, "SGE PCIe error for DBP thread 0" }, 5069 { F_ERR_TIMER_ABOVE_MAX_QID, 5070 "SGE GTS with timer 0-5 for IQID > 1023" }, 5071 { F_ERR_CPL_EXCEED_IQE_SIZE, 5072 "SGE received CPL exceeding IQE size" }, 5073 { F_ERR_INVALID_CIDX_INC, "SGE GTS CIDX increment too large" }, 5074 { F_ERR_ITP_TIME_PAUSED, "SGE ITP error" }, 5075 { F_ERR_CPL_OPCODE_0, "SGE received 0-length CPL" }, 5076 { F_ERR_DROPPED_DB, "SGE DB dropped" }, 5077 { F_ERR_DATA_CPL_ON_HIGH_QID1 | F_ERR_DATA_CPL_ON_HIGH_QID0, 5078 "SGE IQID > 1023 received CPL for FL" }, 5079 { F_ERR_BAD_DB_PIDX3 | F_ERR_BAD_DB_PIDX2 | F_ERR_BAD_DB_PIDX1 | 5080 F_ERR_BAD_DB_PIDX0, "SGE DBP pidx increment too large" }, 5081 { F_ERR_ING_PCIE_CHAN, "SGE Ingress PCIe channel mismatch" }, 5082 { F_ERR_ING_CTXT_PRIO, 5083 "Ingress context manager priority user error" }, 5084 { F_ERR_EGR_CTXT_PRIO, 5085 "Egress context manager priority user error" }, 5086 { F_DBP_TBUF_FULL, "SGE DBP tbuf full" }, 5087 { F_FATAL_WRE_LEN, 5088 "SGE WRE packet less than advertized length" }, 5089 { F_REG_ADDRESS_ERR, "Undefined SGE register accessed" }, 5090 { F_INGRESS_SIZE_ERR, "SGE illegal ingress QID" }, 5091 { F_EGRESS_SIZE_ERR, "SGE illegal egress QID" }, 5092 { 0x0000000f, "SGE context access for invalid queue" }, 5093 { 0 } 5094 }; 5095 struct intr_info sge_int3_info = { 5096 .name = "SGE_INT_CAUSE3", 5097 .cause_reg = A_SGE_INT_CAUSE3, 5098 .enable_reg = A_SGE_INT_ENABLE3, 5099 .fatal = F_ERR_CPL_EXCEED_IQE_SIZE, 5100 .flags = 0, 5101 .details = NULL, 5102 .actions = NULL, 5103 }; 5104 static const struct intr_info sge_int4_info = { 5105 .name = "SGE_INT_CAUSE4", 5106 .cause_reg = A_SGE_INT_CAUSE4, 5107 .enable_reg = A_SGE_INT_ENABLE4, 5108 .fatal = 0, 5109 .flags = 0, 5110 .details = NULL, 5111 .actions = NULL, 5112 }; 5113 static const struct intr_info sge_int5_info = { 5114 .name = "SGE_INT_CAUSE5", 5115 .cause_reg = A_SGE_INT_CAUSE5, 5116 .enable_reg = A_SGE_INT_ENABLE5, 5117 .fatal = 0xffffffff, 5118 .flags = NONFATAL_IF_DISABLED, 5119 .details = NULL, 5120 .actions = NULL, 5121 }; 5122 static const struct intr_info sge_int6_info = { 5123 .name = "SGE_INT_CAUSE6", 5124 .cause_reg = A_SGE_INT_CAUSE6, 5125 .enable_reg = A_SGE_INT_ENABLE6, 5126 .fatal = 0, 5127 .flags = 0, 5128 .details = NULL, 5129 .actions = NULL, 5130 }; 5131 5132 bool fatal; 5133 u32 v; 5134 5135 if (chip_id(adap) <= CHELSIO_T5) { 5136 sge_int3_info.details = sge_int3_details; 5137 } else { 5138 sge_int3_info.details = t6_sge_int3_details; 5139 } 5140 5141 fatal = false; 5142 fatal |= t4_handle_intr(adap, &sge_int1_info, 0, verbose); 5143 fatal |= t4_handle_intr(adap, &sge_int2_info, 0, verbose); 5144 fatal |= t4_handle_intr(adap, &sge_int3_info, 0, verbose); 5145 fatal |= t4_handle_intr(adap, &sge_int4_info, 0, verbose); 5146 if (chip_id(adap) >= CHELSIO_T5) 5147 fatal |= t4_handle_intr(adap, &sge_int5_info, 0, verbose); 5148 if (chip_id(adap) >= CHELSIO_T6) 5149 fatal |= t4_handle_intr(adap, &sge_int6_info, 0, verbose); 5150 5151 v = t4_read_reg(adap, A_SGE_ERROR_STATS); 5152 if (v & F_ERROR_QID_VALID) { 5153 CH_ERR(adap, "SGE error for QID %u\n", G_ERROR_QID(v)); 5154 if (v & F_UNCAPTURED_ERROR) 5155 CH_ERR(adap, "SGE UNCAPTURED_ERROR set (clearing)\n"); 5156 t4_write_reg(adap, A_SGE_ERROR_STATS, 5157 F_ERROR_QID_VALID | F_UNCAPTURED_ERROR); 5158 } 5159 5160 return (fatal); 5161 } 5162 5163 /* 5164 * CIM interrupt handler. 5165 */ 5166 static bool cim_intr_handler(struct adapter *adap, int arg, bool verbose) 5167 { 5168 static const struct intr_details cim_host_intr_details[] = { 5169 /* T6+ */ 5170 { F_PCIE2CIMINTFPARERR, "CIM IBQ PCIe interface parity error" }, 5171 5172 /* T5+ */ 5173 { F_MA_CIM_INTFPERR, "MA2CIM interface parity error" }, 5174 { F_PLCIM_MSTRSPDATAPARERR, 5175 "PL2CIM master response data parity error" }, 5176 { F_NCSI2CIMINTFPARERR, "CIM IBQ NC-SI interface parity error" }, 5177 { F_SGE2CIMINTFPARERR, "CIM IBQ SGE interface parity error" }, 5178 { F_ULP2CIMINTFPARERR, "CIM IBQ ULP_TX interface parity error" }, 5179 { F_TP2CIMINTFPARERR, "CIM IBQ TP interface parity error" }, 5180 { F_OBQSGERX1PARERR, "CIM OBQ SGE1_RX parity error" }, 5181 { F_OBQSGERX0PARERR, "CIM OBQ SGE0_RX parity error" }, 5182 5183 /* T4+ */ 5184 { F_TIEQOUTPARERRINT, "CIM TIEQ outgoing FIFO parity error" }, 5185 { F_TIEQINPARERRINT, "CIM TIEQ incoming FIFO parity error" }, 5186 { F_MBHOSTPARERR, "CIM mailbox host read parity error" }, 5187 { F_MBUPPARERR, "CIM mailbox uP parity error" }, 5188 { F_IBQTP0PARERR, "CIM IBQ TP0 parity error" }, 5189 { F_IBQTP1PARERR, "CIM IBQ TP1 parity error" }, 5190 { F_IBQULPPARERR, "CIM IBQ ULP parity error" }, 5191 { F_IBQSGELOPARERR, "CIM IBQ SGE_LO parity error" }, 5192 { F_IBQSGEHIPARERR | F_IBQPCIEPARERR, /* same bit */ 5193 "CIM IBQ PCIe/SGE_HI parity error" }, 5194 { F_IBQNCSIPARERR, "CIM IBQ NC-SI parity error" }, 5195 { F_OBQULP0PARERR, "CIM OBQ ULP0 parity error" }, 5196 { F_OBQULP1PARERR, "CIM OBQ ULP1 parity error" }, 5197 { F_OBQULP2PARERR, "CIM OBQ ULP2 parity error" }, 5198 { F_OBQULP3PARERR, "CIM OBQ ULP3 parity error" }, 5199 { F_OBQSGEPARERR, "CIM OBQ SGE parity error" }, 5200 { F_OBQNCSIPARERR, "CIM OBQ NC-SI parity error" }, 5201 { F_TIMER1INT, "CIM TIMER0 interrupt" }, 5202 { F_TIMER0INT, "CIM TIMER0 interrupt" }, 5203 { F_PREFDROPINT, "CIM control register prefetch drop" }, 5204 { 0} 5205 }; 5206 static const struct intr_info cim_host_intr_info = { 5207 .name = "CIM_HOST_INT_CAUSE", 5208 .cause_reg = A_CIM_HOST_INT_CAUSE, 5209 .enable_reg = A_CIM_HOST_INT_ENABLE, 5210 .fatal = 0x007fffe6, 5211 .flags = NONFATAL_IF_DISABLED, 5212 .details = cim_host_intr_details, 5213 .actions = NULL, 5214 }; 5215 static const struct intr_details cim_host_upacc_intr_details[] = { 5216 { F_EEPROMWRINT, "CIM EEPROM came out of busy state" }, 5217 { F_TIMEOUTMAINT, "CIM PIF MA timeout" }, 5218 { F_TIMEOUTINT, "CIM PIF timeout" }, 5219 { F_RSPOVRLOOKUPINT, "CIM response FIFO overwrite" }, 5220 { F_REQOVRLOOKUPINT, "CIM request FIFO overwrite" }, 5221 { F_BLKWRPLINT, "CIM block write to PL space" }, 5222 { F_BLKRDPLINT, "CIM block read from PL space" }, 5223 { F_SGLWRPLINT, 5224 "CIM single write to PL space with illegal BEs" }, 5225 { F_SGLRDPLINT, 5226 "CIM single read from PL space with illegal BEs" }, 5227 { F_BLKWRCTLINT, "CIM block write to CTL space" }, 5228 { F_BLKRDCTLINT, "CIM block read from CTL space" }, 5229 { F_SGLWRCTLINT, 5230 "CIM single write to CTL space with illegal BEs" }, 5231 { F_SGLRDCTLINT, 5232 "CIM single read from CTL space with illegal BEs" }, 5233 { F_BLKWREEPROMINT, "CIM block write to EEPROM space" }, 5234 { F_BLKRDEEPROMINT, "CIM block read from EEPROM space" }, 5235 { F_SGLWREEPROMINT, 5236 "CIM single write to EEPROM space with illegal BEs" }, 5237 { F_SGLRDEEPROMINT, 5238 "CIM single read from EEPROM space with illegal BEs" }, 5239 { F_BLKWRFLASHINT, "CIM block write to flash space" }, 5240 { F_BLKRDFLASHINT, "CIM block read from flash space" }, 5241 { F_SGLWRFLASHINT, "CIM single write to flash space" }, 5242 { F_SGLRDFLASHINT, 5243 "CIM single read from flash space with illegal BEs" }, 5244 { F_BLKWRBOOTINT, "CIM block write to boot space" }, 5245 { F_BLKRDBOOTINT, "CIM block read from boot space" }, 5246 { F_SGLWRBOOTINT, "CIM single write to boot space" }, 5247 { F_SGLRDBOOTINT, 5248 "CIM single read from boot space with illegal BEs" }, 5249 { F_ILLWRBEINT, "CIM illegal write BEs" }, 5250 { F_ILLRDBEINT, "CIM illegal read BEs" }, 5251 { F_ILLRDINT, "CIM illegal read" }, 5252 { F_ILLWRINT, "CIM illegal write" }, 5253 { F_ILLTRANSINT, "CIM illegal transaction" }, 5254 { F_RSVDSPACEINT, "CIM reserved space access" }, 5255 {0} 5256 }; 5257 static const struct intr_info cim_host_upacc_intr_info = { 5258 .name = "CIM_HOST_UPACC_INT_CAUSE", 5259 .cause_reg = A_CIM_HOST_UPACC_INT_CAUSE, 5260 .enable_reg = A_CIM_HOST_UPACC_INT_ENABLE, 5261 .fatal = 0x3fffeeff, 5262 .flags = NONFATAL_IF_DISABLED, 5263 .details = cim_host_upacc_intr_details, 5264 .actions = NULL, 5265 }; 5266 static const struct intr_info cim_pf_host_intr_info = { 5267 .name = "CIM_PF_HOST_INT_CAUSE", 5268 .cause_reg = MYPF_REG(A_CIM_PF_HOST_INT_CAUSE), 5269 .enable_reg = MYPF_REG(A_CIM_PF_HOST_INT_ENABLE), 5270 .fatal = 0, 5271 .flags = 0, 5272 .details = NULL, 5273 .actions = NULL, 5274 }; 5275 u32 val, fw_err; 5276 bool fatal; 5277 5278 /* 5279 * When the Firmware detects an internal error which normally wouldn't 5280 * raise a Host Interrupt, it forces a CIM Timer0 interrupt in order 5281 * to make sure the Host sees the Firmware Crash. So if we have a 5282 * Timer0 interrupt and don't see a Firmware Crash, ignore the Timer0 5283 * interrupt. 5284 */ 5285 fw_err = t4_read_reg(adap, A_PCIE_FW); 5286 val = t4_read_reg(adap, A_CIM_HOST_INT_CAUSE); 5287 if (val & F_TIMER0INT && (!(fw_err & F_PCIE_FW_ERR) || 5288 G_PCIE_FW_EVAL(fw_err) != PCIE_FW_EVAL_CRASH)) { 5289 t4_write_reg(adap, A_CIM_HOST_INT_CAUSE, F_TIMER0INT); 5290 } 5291 5292 fatal = (fw_err & F_PCIE_FW_ERR) != 0; 5293 fatal |= t4_handle_intr(adap, &cim_host_intr_info, 0, verbose); 5294 fatal |= t4_handle_intr(adap, &cim_host_upacc_intr_info, 0, verbose); 5295 fatal |= t4_handle_intr(adap, &cim_pf_host_intr_info, 0, verbose); 5296 if (fatal) 5297 t4_os_cim_err(adap); 5298 5299 return (fatal); 5300 } 5301 5302 /* 5303 * ULP RX interrupt handler. 5304 */ 5305 static bool ulprx_intr_handler(struct adapter *adap, int arg, bool verbose) 5306 { 5307 static const struct intr_details ulprx_intr_details[] = { 5308 /* T5+ */ 5309 { F_SE_CNT_MISMATCH_1, "ULPRX SE count mismatch in channel 1" }, 5310 { F_SE_CNT_MISMATCH_0, "ULPRX SE count mismatch in channel 0" }, 5311 5312 /* T4+ */ 5313 { F_CAUSE_CTX_1, "ULPRX channel 1 context error" }, 5314 { F_CAUSE_CTX_0, "ULPRX channel 0 context error" }, 5315 { 0x007fffff, "ULPRX parity error" }, 5316 { 0 } 5317 }; 5318 static const struct intr_info ulprx_intr_info = { 5319 .name = "ULP_RX_INT_CAUSE", 5320 .cause_reg = A_ULP_RX_INT_CAUSE, 5321 .enable_reg = A_ULP_RX_INT_ENABLE, 5322 .fatal = 0x07ffffff, 5323 .flags = NONFATAL_IF_DISABLED, 5324 .details = ulprx_intr_details, 5325 .actions = NULL, 5326 }; 5327 static const struct intr_info ulprx_intr2_info = { 5328 .name = "ULP_RX_INT_CAUSE_2", 5329 .cause_reg = A_ULP_RX_INT_CAUSE_2, 5330 .enable_reg = A_ULP_RX_INT_ENABLE_2, 5331 .fatal = 0, 5332 .flags = 0, 5333 .details = NULL, 5334 .actions = NULL, 5335 }; 5336 bool fatal = false; 5337 5338 fatal |= t4_handle_intr(adap, &ulprx_intr_info, 0, verbose); 5339 fatal |= t4_handle_intr(adap, &ulprx_intr2_info, 0, verbose); 5340 5341 return (fatal); 5342 } 5343 5344 /* 5345 * ULP TX interrupt handler. 5346 */ 5347 static bool ulptx_intr_handler(struct adapter *adap, int arg, bool verbose) 5348 { 5349 static const struct intr_details ulptx_intr_details[] = { 5350 { F_PBL_BOUND_ERR_CH3, "ULPTX channel 3 PBL out of bounds" }, 5351 { F_PBL_BOUND_ERR_CH2, "ULPTX channel 2 PBL out of bounds" }, 5352 { F_PBL_BOUND_ERR_CH1, "ULPTX channel 1 PBL out of bounds" }, 5353 { F_PBL_BOUND_ERR_CH0, "ULPTX channel 0 PBL out of bounds" }, 5354 { 0x0fffffff, "ULPTX parity error" }, 5355 { 0 } 5356 }; 5357 static const struct intr_info ulptx_intr_info = { 5358 .name = "ULP_TX_INT_CAUSE", 5359 .cause_reg = A_ULP_TX_INT_CAUSE, 5360 .enable_reg = A_ULP_TX_INT_ENABLE, 5361 .fatal = 0x0fffffff, 5362 .flags = NONFATAL_IF_DISABLED, 5363 .details = ulptx_intr_details, 5364 .actions = NULL, 5365 }; 5366 static const struct intr_info ulptx_intr2_info = { 5367 .name = "ULP_TX_INT_CAUSE_2", 5368 .cause_reg = A_ULP_TX_INT_CAUSE_2, 5369 .enable_reg = A_ULP_TX_INT_ENABLE_2, 5370 .fatal = 0xf0, 5371 .flags = NONFATAL_IF_DISABLED, 5372 .details = NULL, 5373 .actions = NULL, 5374 }; 5375 bool fatal = false; 5376 5377 fatal |= t4_handle_intr(adap, &ulptx_intr_info, 0, verbose); 5378 fatal |= t4_handle_intr(adap, &ulptx_intr2_info, 0, verbose); 5379 5380 return (fatal); 5381 } 5382 5383 static bool pmtx_dump_dbg_stats(struct adapter *adap, int arg, bool verbose) 5384 { 5385 int i; 5386 u32 data[17]; 5387 5388 t4_read_indirect(adap, A_PM_TX_DBG_CTRL, A_PM_TX_DBG_DATA, &data[0], 5389 ARRAY_SIZE(data), A_PM_TX_DBG_STAT0); 5390 for (i = 0; i < ARRAY_SIZE(data); i++) { 5391 CH_ALERT(adap, " - PM_TX_DBG_STAT%u (0x%x) = 0x%08x\n", i, 5392 A_PM_TX_DBG_STAT0 + i, data[i]); 5393 } 5394 5395 return (false); 5396 } 5397 5398 /* 5399 * PM TX interrupt handler. 5400 */ 5401 static bool pmtx_intr_handler(struct adapter *adap, int arg, bool verbose) 5402 { 5403 static const struct intr_action pmtx_intr_actions[] = { 5404 { 0xffffffff, 0, pmtx_dump_dbg_stats }, 5405 { 0 }, 5406 }; 5407 static const struct intr_details pmtx_intr_details[] = { 5408 { F_PCMD_LEN_OVFL0, "PMTX channel 0 pcmd too large" }, 5409 { F_PCMD_LEN_OVFL1, "PMTX channel 1 pcmd too large" }, 5410 { F_PCMD_LEN_OVFL2, "PMTX channel 2 pcmd too large" }, 5411 { F_ZERO_C_CMD_ERROR, "PMTX 0-length pcmd" }, 5412 { 0x0f000000, "PMTX icspi FIFO2X Rx framing error" }, 5413 { 0x00f00000, "PMTX icspi FIFO Rx framing error" }, 5414 { 0x000f0000, "PMTX icspi FIFO Tx framing error" }, 5415 { 0x0000f000, "PMTX oespi FIFO Rx framing error" }, 5416 { 0x00000f00, "PMTX oespi FIFO Tx framing error" }, 5417 { 0x000000f0, "PMTX oespi FIFO2X Tx framing error" }, 5418 { F_OESPI_PAR_ERROR, "PMTX oespi parity error" }, 5419 { F_DB_OPTIONS_PAR_ERROR, "PMTX db_options parity error" }, 5420 { F_ICSPI_PAR_ERROR, "PMTX icspi parity error" }, 5421 { F_C_PCMD_PAR_ERROR, "PMTX c_pcmd parity error" }, 5422 { 0 } 5423 }; 5424 static const struct intr_info pmtx_intr_info = { 5425 .name = "PM_TX_INT_CAUSE", 5426 .cause_reg = A_PM_TX_INT_CAUSE, 5427 .enable_reg = A_PM_TX_INT_ENABLE, 5428 .fatal = 0xffffffff, 5429 .flags = 0, 5430 .details = pmtx_intr_details, 5431 .actions = pmtx_intr_actions, 5432 }; 5433 5434 return (t4_handle_intr(adap, &pmtx_intr_info, 0, verbose)); 5435 } 5436 5437 /* 5438 * PM RX interrupt handler. 5439 */ 5440 static bool pmrx_intr_handler(struct adapter *adap, int arg, bool verbose) 5441 { 5442 static const struct intr_details pmrx_intr_details[] = { 5443 /* T6+ */ 5444 { 0x18000000, "PMRX ospi overflow" }, 5445 { F_MA_INTF_SDC_ERR, "PMRX MA interface SDC parity error" }, 5446 { F_BUNDLE_LEN_PARERR, "PMRX bundle len FIFO parity error" }, 5447 { F_BUNDLE_LEN_OVFL, "PMRX bundle len FIFO overflow" }, 5448 { F_SDC_ERR, "PMRX SDC error" }, 5449 5450 /* T4+ */ 5451 { F_ZERO_E_CMD_ERROR, "PMRX 0-length pcmd" }, 5452 { 0x003c0000, "PMRX iespi FIFO2X Rx framing error" }, 5453 { 0x0003c000, "PMRX iespi Rx framing error" }, 5454 { 0x00003c00, "PMRX iespi Tx framing error" }, 5455 { 0x00000300, "PMRX ocspi Rx framing error" }, 5456 { 0x000000c0, "PMRX ocspi Tx framing error" }, 5457 { 0x00000030, "PMRX ocspi FIFO2X Tx framing error" }, 5458 { F_OCSPI_PAR_ERROR, "PMRX ocspi parity error" }, 5459 { F_DB_OPTIONS_PAR_ERROR, "PMRX db_options parity error" }, 5460 { F_IESPI_PAR_ERROR, "PMRX iespi parity error" }, 5461 { F_E_PCMD_PAR_ERROR, "PMRX e_pcmd parity error"}, 5462 { 0 } 5463 }; 5464 static const struct intr_info pmrx_intr_info = { 5465 .name = "PM_RX_INT_CAUSE", 5466 .cause_reg = A_PM_RX_INT_CAUSE, 5467 .enable_reg = A_PM_RX_INT_ENABLE, 5468 .fatal = 0x1fffffff, 5469 .flags = NONFATAL_IF_DISABLED, 5470 .details = pmrx_intr_details, 5471 .actions = NULL, 5472 }; 5473 5474 return (t4_handle_intr(adap, &pmrx_intr_info, 0, verbose)); 5475 } 5476 5477 /* 5478 * CPL switch interrupt handler. 5479 */ 5480 static bool cplsw_intr_handler(struct adapter *adap, int arg, bool verbose) 5481 { 5482 static const struct intr_details cplsw_intr_details[] = { 5483 /* T5+ */ 5484 { F_PERR_CPL_128TO128_1, "CPLSW 128TO128 FIFO1 parity error" }, 5485 { F_PERR_CPL_128TO128_0, "CPLSW 128TO128 FIFO0 parity error" }, 5486 5487 /* T4+ */ 5488 { F_CIM_OP_MAP_PERR, "CPLSW CIM op_map parity error" }, 5489 { F_CIM_OVFL_ERROR, "CPLSW CIM overflow" }, 5490 { F_TP_FRAMING_ERROR, "CPLSW TP framing error" }, 5491 { F_SGE_FRAMING_ERROR, "CPLSW SGE framing error" }, 5492 { F_CIM_FRAMING_ERROR, "CPLSW CIM framing error" }, 5493 { F_ZERO_SWITCH_ERROR, "CPLSW no-switch error" }, 5494 { 0 } 5495 }; 5496 static const struct intr_info cplsw_intr_info = { 5497 .name = "CPL_INTR_CAUSE", 5498 .cause_reg = A_CPL_INTR_CAUSE, 5499 .enable_reg = A_CPL_INTR_ENABLE, 5500 .fatal = 0xff, 5501 .flags = NONFATAL_IF_DISABLED, 5502 .details = cplsw_intr_details, 5503 .actions = NULL, 5504 }; 5505 5506 return (t4_handle_intr(adap, &cplsw_intr_info, 0, verbose)); 5507 } 5508 5509 #define T4_LE_FATAL_MASK (F_PARITYERR | F_UNKNOWNCMD | F_REQQPARERR) 5510 #define T5_LE_FATAL_MASK (T4_LE_FATAL_MASK | F_VFPARERR) 5511 #define T6_LE_PERRCRC_MASK (F_PIPELINEERR | F_CLIPTCAMACCFAIL | \ 5512 F_SRVSRAMACCFAIL | F_CLCAMCRCPARERR | F_CLCAMINTPERR | F_SSRAMINTPERR | \ 5513 F_SRVSRAMPERR | F_VFSRAMPERR | F_TCAMINTPERR | F_TCAMCRCERR | \ 5514 F_HASHTBLMEMACCERR | F_MAIFWRINTPERR | F_HASHTBLMEMCRCERR) 5515 #define T6_LE_FATAL_MASK (T6_LE_PERRCRC_MASK | F_T6_UNKNOWNCMD | \ 5516 F_TCAMACCFAIL | F_HASHTBLACCFAIL | F_CMDTIDERR | F_CMDPRSRINTERR | \ 5517 F_TOTCNTERR | F_CLCAMFIFOERR | F_CLIPSUBERR) 5518 5519 /* 5520 * LE interrupt handler. 5521 */ 5522 static bool le_intr_handler(struct adapter *adap, int arg, bool verbose) 5523 { 5524 static const struct intr_details le_intr_details[] = { 5525 { F_REQQPARERR, "LE request queue parity error" }, 5526 { F_UNKNOWNCMD, "LE unknown command" }, 5527 { F_ACTRGNFULL, "LE active region full" }, 5528 { F_PARITYERR, "LE parity error" }, 5529 { F_LIPMISS, "LE LIP miss" }, 5530 { F_LIP0, "LE 0 LIP error" }, 5531 { 0 } 5532 }; 5533 static const struct intr_details t6_le_intr_details[] = { 5534 { F_CLIPSUBERR, "LE CLIP CAM reverse substitution error" }, 5535 { F_CLCAMFIFOERR, "LE CLIP CAM internal FIFO error" }, 5536 { F_CTCAMINVLDENT, "Invalid IPv6 CLIP TCAM entry" }, 5537 { F_TCAMINVLDENT, "Invalid IPv6 TCAM entry" }, 5538 { F_TOTCNTERR, "LE total active < TCAM count" }, 5539 { F_CMDPRSRINTERR, "LE internal error in parser" }, 5540 { F_CMDTIDERR, "Incorrect tid in LE command" }, 5541 { F_T6_ACTRGNFULL, "LE active region full" }, 5542 { F_T6_ACTCNTIPV6TZERO, "LE IPv6 active open TCAM counter -ve" }, 5543 { F_T6_ACTCNTIPV4TZERO, "LE IPv4 active open TCAM counter -ve" }, 5544 { F_T6_ACTCNTIPV6ZERO, "LE IPv6 active open counter -ve" }, 5545 { F_T6_ACTCNTIPV4ZERO, "LE IPv4 active open counter -ve" }, 5546 { F_HASHTBLACCFAIL, "Hash table read error (proto conflict)" }, 5547 { F_TCAMACCFAIL, "LE TCAM access failure" }, 5548 { F_T6_UNKNOWNCMD, "LE unknown command" }, 5549 { F_T6_LIP0, "LE found 0 LIP during CLIP substitution" }, 5550 { F_T6_LIPMISS, "LE CLIP lookup miss" }, 5551 { T6_LE_PERRCRC_MASK, "LE parity/CRC error" }, 5552 { 0 } 5553 }; 5554 struct intr_info le_intr_info = { 5555 .name = "LE_DB_INT_CAUSE", 5556 .cause_reg = A_LE_DB_INT_CAUSE, 5557 .enable_reg = A_LE_DB_INT_ENABLE, 5558 .fatal = 0, 5559 .flags = NONFATAL_IF_DISABLED, 5560 .details = NULL, 5561 .actions = NULL, 5562 }; 5563 5564 if (chip_id(adap) <= CHELSIO_T5) { 5565 le_intr_info.details = le_intr_details; 5566 le_intr_info.fatal = T5_LE_FATAL_MASK; 5567 } else { 5568 le_intr_info.details = t6_le_intr_details; 5569 le_intr_info.fatal = T6_LE_FATAL_MASK; 5570 } 5571 5572 return (t4_handle_intr(adap, &le_intr_info, 0, verbose)); 5573 } 5574 5575 /* 5576 * MPS interrupt handler. 5577 */ 5578 static bool mps_intr_handler(struct adapter *adap, int arg, bool verbose) 5579 { 5580 static const struct intr_details mps_rx_perr_intr_details[] = { 5581 { 0xffffffff, "MPS Rx parity error" }, 5582 { 0 } 5583 }; 5584 static const struct intr_info mps_rx_perr_intr_info = { 5585 .name = "MPS_RX_PERR_INT_CAUSE", 5586 .cause_reg = A_MPS_RX_PERR_INT_CAUSE, 5587 .enable_reg = A_MPS_RX_PERR_INT_ENABLE, 5588 .fatal = 0xffffffff, 5589 .flags = NONFATAL_IF_DISABLED, 5590 .details = mps_rx_perr_intr_details, 5591 .actions = NULL, 5592 }; 5593 static const struct intr_details mps_tx_intr_details[] = { 5594 { F_PORTERR, "MPS Tx destination port is disabled" }, 5595 { F_FRMERR, "MPS Tx framing error" }, 5596 { F_SECNTERR, "MPS Tx SOP/EOP error" }, 5597 { F_BUBBLE, "MPS Tx underflow" }, 5598 { V_TXDESCFIFO(M_TXDESCFIFO), "MPS Tx desc FIFO parity error" }, 5599 { V_TXDATAFIFO(M_TXDATAFIFO), "MPS Tx data FIFO parity error" }, 5600 { F_NCSIFIFO, "MPS Tx NC-SI FIFO parity error" }, 5601 { V_TPFIFO(M_TPFIFO), "MPS Tx TP FIFO parity error" }, 5602 { 0 } 5603 }; 5604 static const struct intr_info mps_tx_intr_info = { 5605 .name = "MPS_TX_INT_CAUSE", 5606 .cause_reg = A_MPS_TX_INT_CAUSE, 5607 .enable_reg = A_MPS_TX_INT_ENABLE, 5608 .fatal = 0x1ffff, 5609 .flags = NONFATAL_IF_DISABLED, 5610 .details = mps_tx_intr_details, 5611 .actions = NULL, 5612 }; 5613 static const struct intr_details mps_trc_intr_details[] = { 5614 { F_MISCPERR, "MPS TRC misc parity error" }, 5615 { V_PKTFIFO(M_PKTFIFO), "MPS TRC packet FIFO parity error" }, 5616 { V_FILTMEM(M_FILTMEM), "MPS TRC filter parity error" }, 5617 { 0 } 5618 }; 5619 static const struct intr_info mps_trc_intr_info = { 5620 .name = "MPS_TRC_INT_CAUSE", 5621 .cause_reg = A_MPS_TRC_INT_CAUSE, 5622 .enable_reg = A_MPS_TRC_INT_ENABLE, 5623 .fatal = F_MISCPERR | V_PKTFIFO(M_PKTFIFO) | V_FILTMEM(M_FILTMEM), 5624 .flags = 0, 5625 .details = mps_trc_intr_details, 5626 .actions = NULL, 5627 }; 5628 static const struct intr_info t7_mps_trc_intr_info = { 5629 .name = "T7_MPS_TRC_INT_CAUSE", 5630 .cause_reg = A_T7_MPS_TRC_INT_CAUSE, 5631 .enable_reg = A_T7_MPS_TRC_INT_ENABLE, 5632 .fatal = F_MISCPERR | V_PKTFIFO(M_PKTFIFO) | V_FILTMEM(M_FILTMEM), 5633 .flags = 0, 5634 .details = mps_trc_intr_details, 5635 .actions = NULL, 5636 }; 5637 static const struct intr_details mps_stat_sram_intr_details[] = { 5638 { 0xffffffff, "MPS statistics SRAM parity error" }, 5639 { 0 } 5640 }; 5641 static const struct intr_info mps_stat_sram_intr_info = { 5642 .name = "MPS_STAT_PERR_INT_CAUSE_SRAM", 5643 .cause_reg = A_MPS_STAT_PERR_INT_CAUSE_SRAM, 5644 .enable_reg = A_MPS_STAT_PERR_INT_ENABLE_SRAM, 5645 .fatal = 0x1fffffff, 5646 .flags = NONFATAL_IF_DISABLED, 5647 .details = mps_stat_sram_intr_details, 5648 .actions = NULL, 5649 }; 5650 static const struct intr_details mps_stat_tx_intr_details[] = { 5651 { 0xffffff, "MPS statistics Tx FIFO parity error" }, 5652 { 0 } 5653 }; 5654 static const struct intr_info mps_stat_tx_intr_info = { 5655 .name = "MPS_STAT_PERR_INT_CAUSE_TX_FIFO", 5656 .cause_reg = A_MPS_STAT_PERR_INT_CAUSE_TX_FIFO, 5657 .enable_reg = A_MPS_STAT_PERR_INT_ENABLE_TX_FIFO, 5658 .fatal = 0xffffff, 5659 .flags = NONFATAL_IF_DISABLED, 5660 .details = mps_stat_tx_intr_details, 5661 .actions = NULL, 5662 }; 5663 static const struct intr_details mps_stat_rx_intr_details[] = { 5664 { 0xffffff, "MPS statistics Rx FIFO parity error" }, 5665 { 0 } 5666 }; 5667 static const struct intr_info mps_stat_rx_intr_info = { 5668 .name = "MPS_STAT_PERR_INT_CAUSE_RX_FIFO", 5669 .cause_reg = A_MPS_STAT_PERR_INT_CAUSE_RX_FIFO, 5670 .enable_reg = A_MPS_STAT_PERR_INT_ENABLE_RX_FIFO, 5671 .fatal = 0xffffff, 5672 .flags = 0, 5673 .details = mps_stat_rx_intr_details, 5674 .actions = NULL, 5675 }; 5676 static const struct intr_details mps_cls_intr_details[] = { 5677 { F_HASHSRAM, "MPS hash SRAM parity error" }, 5678 { F_MATCHTCAM, "MPS match TCAM parity error" }, 5679 { F_MATCHSRAM, "MPS match SRAM parity error" }, 5680 { 0 } 5681 }; 5682 static const struct intr_info mps_cls_intr_info = { 5683 .name = "MPS_CLS_INT_CAUSE", 5684 .cause_reg = A_MPS_CLS_INT_CAUSE, 5685 .enable_reg = A_MPS_CLS_INT_ENABLE, 5686 .fatal = F_MATCHSRAM | F_MATCHTCAM | F_HASHSRAM, 5687 .flags = 0, 5688 .details = mps_cls_intr_details, 5689 .actions = NULL, 5690 }; 5691 static const struct intr_details mps_stat_sram1_intr_details[] = { 5692 { 0xff, "MPS statistics SRAM1 parity error" }, 5693 { 0 } 5694 }; 5695 static const struct intr_info mps_stat_sram1_intr_info = { 5696 .name = "MPS_STAT_PERR_INT_CAUSE_SRAM1", 5697 .cause_reg = A_MPS_STAT_PERR_INT_CAUSE_SRAM1, 5698 .enable_reg = A_MPS_STAT_PERR_INT_ENABLE_SRAM1, 5699 .fatal = 0xff, 5700 .flags = 0, 5701 .details = mps_stat_sram1_intr_details, 5702 .actions = NULL, 5703 }; 5704 5705 bool fatal; 5706 5707 fatal = false; 5708 fatal |= t4_handle_intr(adap, &mps_rx_perr_intr_info, 0, verbose); 5709 fatal |= t4_handle_intr(adap, &mps_tx_intr_info, 0, verbose); 5710 if (chip_id(adap) > CHELSIO_T6) 5711 fatal |= t4_handle_intr(adap, &t7_mps_trc_intr_info, 0, verbose); 5712 else 5713 fatal |= t4_handle_intr(adap, &mps_trc_intr_info, 0, verbose); 5714 fatal |= t4_handle_intr(adap, &mps_stat_sram_intr_info, 0, verbose); 5715 fatal |= t4_handle_intr(adap, &mps_stat_tx_intr_info, 0, verbose); 5716 fatal |= t4_handle_intr(adap, &mps_stat_rx_intr_info, 0, verbose); 5717 fatal |= t4_handle_intr(adap, &mps_cls_intr_info, 0, verbose); 5718 if (chip_id(adap) > CHELSIO_T4) { 5719 fatal |= t4_handle_intr(adap, &mps_stat_sram1_intr_info, 0, 5720 verbose); 5721 } 5722 5723 t4_write_reg(adap, A_MPS_INT_CAUSE, is_t4(adap) ? 0 : 0xffffffff); 5724 t4_read_reg(adap, A_MPS_INT_CAUSE); /* flush */ 5725 5726 return (fatal); 5727 5728 } 5729 5730 /* 5731 * EDC/MC interrupt handler. 5732 */ 5733 static bool mem_intr_handler(struct adapter *adap, int idx, bool verbose) 5734 { 5735 static const char name[4][5] = { "EDC0", "EDC1", "MC0", "MC1" }; 5736 unsigned int count_reg, v; 5737 static const struct intr_details mem_intr_details[] = { 5738 { F_ECC_UE_INT_CAUSE, "Uncorrectable ECC data error(s)" }, 5739 { F_ECC_CE_INT_CAUSE, "Correctable ECC data error(s)" }, 5740 { F_PERR_INT_CAUSE, "FIFO parity error" }, 5741 { 0 } 5742 }; 5743 struct intr_info ii = { 5744 .fatal = F_PERR_INT_CAUSE | F_ECC_UE_INT_CAUSE, 5745 .details = mem_intr_details, 5746 .flags = 0, 5747 .actions = NULL, 5748 }; 5749 bool fatal; 5750 5751 switch (idx) { 5752 case MEM_EDC0: 5753 ii.name = "EDC0_INT_CAUSE"; 5754 ii.cause_reg = EDC_REG(A_EDC_INT_CAUSE, 0); 5755 ii.enable_reg = EDC_REG(A_EDC_INT_ENABLE, 0); 5756 count_reg = EDC_REG(A_EDC_ECC_STATUS, 0); 5757 break; 5758 case MEM_EDC1: 5759 ii.name = "EDC1_INT_CAUSE"; 5760 ii.cause_reg = EDC_REG(A_EDC_INT_CAUSE, 1); 5761 ii.enable_reg = EDC_REG(A_EDC_INT_ENABLE, 1); 5762 count_reg = EDC_REG(A_EDC_ECC_STATUS, 1); 5763 break; 5764 case MEM_MC0: 5765 ii.name = "MC0_INT_CAUSE"; 5766 if (is_t4(adap)) { 5767 ii.cause_reg = A_MC_INT_CAUSE; 5768 ii.enable_reg = A_MC_INT_ENABLE; 5769 count_reg = A_MC_ECC_STATUS; 5770 } else { 5771 ii.cause_reg = A_MC_P_INT_CAUSE; 5772 ii.enable_reg = A_MC_P_INT_ENABLE; 5773 count_reg = A_MC_P_ECC_STATUS; 5774 } 5775 break; 5776 case MEM_MC1: 5777 ii.name = "MC1_INT_CAUSE"; 5778 ii.cause_reg = MC_REG(A_MC_P_INT_CAUSE, 1); 5779 ii.enable_reg = MC_REG(A_MC_P_INT_ENABLE, 1); 5780 count_reg = MC_REG(A_MC_P_ECC_STATUS, 1); 5781 break; 5782 } 5783 5784 fatal = t4_handle_intr(adap, &ii, 0, verbose); 5785 5786 v = t4_read_reg(adap, count_reg); 5787 if (v != 0) { 5788 if (G_ECC_UECNT(v) != 0) { 5789 CH_ALERT(adap, 5790 "%s: %u uncorrectable ECC data error(s)\n", 5791 name[idx], G_ECC_UECNT(v)); 5792 } 5793 if (G_ECC_CECNT(v) != 0) { 5794 if (idx <= MEM_EDC1) 5795 t4_edc_err_read(adap, idx); 5796 CH_WARN_RATELIMIT(adap, 5797 "%s: %u correctable ECC data error(s)\n", 5798 name[idx], G_ECC_CECNT(v)); 5799 } 5800 t4_write_reg(adap, count_reg, 0xffffffff); 5801 } 5802 5803 return (fatal); 5804 } 5805 5806 static bool ma_wrap_status(struct adapter *adap, int arg, bool verbose) 5807 { 5808 u32 v; 5809 5810 v = t4_read_reg(adap, A_MA_INT_WRAP_STATUS); 5811 CH_ALERT(adap, 5812 "MA address wrap-around error by client %u to address %#x\n", 5813 G_MEM_WRAP_CLIENT_NUM(v), G_MEM_WRAP_ADDRESS(v) << 4); 5814 t4_write_reg(adap, A_MA_INT_WRAP_STATUS, v); 5815 5816 return (false); 5817 } 5818 5819 5820 /* 5821 * MA interrupt handler. 5822 */ 5823 static bool ma_intr_handler(struct adapter *adap, int arg, bool verbose) 5824 { 5825 static const struct intr_action ma_intr_actions[] = { 5826 { F_MEM_WRAP_INT_CAUSE, 0, ma_wrap_status }, 5827 { 0 }, 5828 }; 5829 static const struct intr_info ma_intr_info = { 5830 .name = "MA_INT_CAUSE", 5831 .cause_reg = A_MA_INT_CAUSE, 5832 .enable_reg = A_MA_INT_ENABLE, 5833 .fatal = F_MEM_PERR_INT_CAUSE | F_MEM_TO_INT_CAUSE, 5834 .flags = NONFATAL_IF_DISABLED, 5835 .details = NULL, 5836 .actions = ma_intr_actions, 5837 }; 5838 static const struct intr_info ma_perr_status1 = { 5839 .name = "MA_PARITY_ERROR_STATUS1", 5840 .cause_reg = A_MA_PARITY_ERROR_STATUS1, 5841 .enable_reg = A_MA_PARITY_ERROR_ENABLE1, 5842 .fatal = 0xffffffff, 5843 .flags = 0, 5844 .details = NULL, 5845 .actions = NULL, 5846 }; 5847 static const struct intr_info ma_perr_status2 = { 5848 .name = "MA_PARITY_ERROR_STATUS2", 5849 .cause_reg = A_MA_PARITY_ERROR_STATUS2, 5850 .enable_reg = A_MA_PARITY_ERROR_ENABLE2, 5851 .fatal = 0xffffffff, 5852 .flags = 0, 5853 .details = NULL, 5854 .actions = NULL, 5855 }; 5856 bool fatal; 5857 5858 fatal = false; 5859 fatal |= t4_handle_intr(adap, &ma_intr_info, 0, verbose); 5860 fatal |= t4_handle_intr(adap, &ma_perr_status1, 0, verbose); 5861 if (chip_id(adap) > CHELSIO_T4) 5862 fatal |= t4_handle_intr(adap, &ma_perr_status2, 0, verbose); 5863 5864 return (fatal); 5865 } 5866 5867 /* 5868 * SMB interrupt handler. 5869 */ 5870 static bool smb_intr_handler(struct adapter *adap, int arg, bool verbose) 5871 { 5872 static const struct intr_details smb_intr_details[] = { 5873 { F_MSTTXFIFOPARINT, "SMB master Tx FIFO parity error" }, 5874 { F_MSTRXFIFOPARINT, "SMB master Rx FIFO parity error" }, 5875 { F_SLVFIFOPARINT, "SMB slave FIFO parity error" }, 5876 { 0 } 5877 }; 5878 static const struct intr_info smb_intr_info = { 5879 .name = "SMB_INT_CAUSE", 5880 .cause_reg = A_SMB_INT_CAUSE, 5881 .enable_reg = A_SMB_INT_ENABLE, 5882 .fatal = F_SLVFIFOPARINT | F_MSTRXFIFOPARINT | F_MSTTXFIFOPARINT, 5883 .flags = 0, 5884 .details = smb_intr_details, 5885 .actions = NULL, 5886 }; 5887 5888 return (t4_handle_intr(adap, &smb_intr_info, 0, verbose)); 5889 } 5890 5891 /* 5892 * NC-SI interrupt handler. 5893 */ 5894 static bool ncsi_intr_handler(struct adapter *adap, int arg, bool verbose) 5895 { 5896 static const struct intr_details ncsi_intr_details[] = { 5897 { F_CIM_DM_PRTY_ERR, "NC-SI CIM parity error" }, 5898 { F_MPS_DM_PRTY_ERR, "NC-SI MPS parity error" }, 5899 { F_TXFIFO_PRTY_ERR, "NC-SI Tx FIFO parity error" }, 5900 { F_RXFIFO_PRTY_ERR, "NC-SI Rx FIFO parity error" }, 5901 { 0 } 5902 }; 5903 static const struct intr_info ncsi_intr_info = { 5904 .name = "NCSI_INT_CAUSE", 5905 .cause_reg = A_NCSI_INT_CAUSE, 5906 .enable_reg = A_NCSI_INT_ENABLE, 5907 .fatal = F_RXFIFO_PRTY_ERR | F_TXFIFO_PRTY_ERR | 5908 F_MPS_DM_PRTY_ERR | F_CIM_DM_PRTY_ERR, 5909 .flags = 0, 5910 .details = ncsi_intr_details, 5911 .actions = NULL, 5912 }; 5913 5914 return (t4_handle_intr(adap, &ncsi_intr_info, 0, verbose)); 5915 } 5916 5917 /* 5918 * MAC interrupt handler. 5919 */ 5920 static bool mac_intr_handler(struct adapter *adap, int port, bool verbose) 5921 { 5922 static const struct intr_details mac_intr_details[] = { 5923 { F_TXFIFO_PRTY_ERR, "MAC Tx FIFO parity error" }, 5924 { F_RXFIFO_PRTY_ERR, "MAC Rx FIFO parity error" }, 5925 { 0 } 5926 }; 5927 char name[32]; 5928 struct intr_info ii; 5929 bool fatal = false; 5930 5931 if (is_t4(adap)) { 5932 snprintf(name, sizeof(name), "XGMAC_PORT%u_INT_CAUSE", port); 5933 ii.name = &name[0]; 5934 ii.cause_reg = PORT_REG(port, A_XGMAC_PORT_INT_CAUSE); 5935 ii.enable_reg = PORT_REG(port, A_XGMAC_PORT_INT_EN); 5936 ii.fatal = F_TXFIFO_PRTY_ERR | F_RXFIFO_PRTY_ERR; 5937 ii.flags = 0; 5938 ii.details = mac_intr_details; 5939 ii.actions = NULL; 5940 } else if (chip_id(adap) < CHELSIO_T7) { 5941 snprintf(name, sizeof(name), "MAC_PORT%u_INT_CAUSE", port); 5942 ii.name = &name[0]; 5943 ii.cause_reg = T5_PORT_REG(port, A_MAC_PORT_INT_CAUSE); 5944 ii.enable_reg = T5_PORT_REG(port, A_MAC_PORT_INT_EN); 5945 ii.fatal = F_TXFIFO_PRTY_ERR | F_RXFIFO_PRTY_ERR; 5946 ii.flags = 0; 5947 ii.details = mac_intr_details; 5948 ii.actions = NULL; 5949 } else { 5950 snprintf(name, sizeof(name), "T7_MAC_PORT%u_INT_CAUSE", port); 5951 ii.name = &name[0]; 5952 ii.cause_reg = T7_PORT_REG(port, A_T7_MAC_PORT_INT_CAUSE); 5953 ii.enable_reg = T7_PORT_REG(port, A_T7_MAC_PORT_INT_EN); 5954 ii.fatal = F_TXFIFO_PRTY_ERR | F_RXFIFO_PRTY_ERR; 5955 ii.flags = 0; 5956 ii.details = mac_intr_details; 5957 ii.actions = NULL; 5958 } 5959 fatal |= t4_handle_intr(adap, &ii, 0, verbose); 5960 5961 if (chip_id(adap) > CHELSIO_T6) { 5962 snprintf(name, sizeof(name), "T7_MAC_PORT%u_PERR_INT_CAUSE", port); 5963 ii.name = &name[0]; 5964 ii.cause_reg = T7_PORT_REG(port, A_T7_MAC_PORT_PERR_INT_CAUSE); 5965 ii.enable_reg = T7_PORT_REG(port, A_T7_MAC_PORT_PERR_INT_EN); 5966 ii.fatal = 0; 5967 ii.flags = 0; 5968 ii.details = NULL; 5969 ii.actions = NULL; 5970 fatal |= t4_handle_intr(adap, &ii, 0, verbose); 5971 } else if (chip_id(adap) >= CHELSIO_T5) { 5972 snprintf(name, sizeof(name), "MAC_PORT%u_PERR_INT_CAUSE", port); 5973 ii.name = &name[0]; 5974 ii.cause_reg = T5_PORT_REG(port, A_MAC_PORT_PERR_INT_CAUSE); 5975 ii.enable_reg = T5_PORT_REG(port, A_MAC_PORT_PERR_INT_EN); 5976 ii.fatal = 0; 5977 ii.flags = 0; 5978 ii.details = NULL; 5979 ii.actions = NULL; 5980 fatal |= t4_handle_intr(adap, &ii, 0, verbose); 5981 } 5982 5983 if (chip_id(adap) > CHELSIO_T6) { 5984 snprintf(name, sizeof(name), "T7_MAC_PORT%u_PERR_INT_CAUSE_100G", port); 5985 ii.name = &name[0]; 5986 ii.cause_reg = T7_PORT_REG(port, A_T7_MAC_PORT_PERR_INT_CAUSE_100G); 5987 ii.enable_reg = T7_PORT_REG(port, A_T7_MAC_PORT_PERR_INT_EN_100G); 5988 ii.fatal = 0; 5989 ii.flags = 0; 5990 ii.details = NULL; 5991 ii.actions = NULL; 5992 fatal |= t4_handle_intr(adap, &ii, 0, verbose); 5993 } else if (is_t6(adap)) { 5994 snprintf(name, sizeof(name), "MAC_PORT%u_PERR_INT_CAUSE_100G", port); 5995 ii.name = &name[0]; 5996 ii.cause_reg = T5_PORT_REG(port, A_MAC_PORT_PERR_INT_CAUSE_100G); 5997 ii.enable_reg = T5_PORT_REG(port, A_MAC_PORT_PERR_INT_EN_100G); 5998 ii.fatal = 0; 5999 ii.flags = 0; 6000 ii.details = NULL; 6001 ii.actions = NULL; 6002 fatal |= t4_handle_intr(adap, &ii, 0, verbose); 6003 } 6004 6005 return (fatal); 6006 } 6007 6008 static bool pl_timeout_status(struct adapter *adap, int arg, bool verbose) 6009 { 6010 6011 CH_ALERT(adap, " PL_TIMEOUT_STATUS 0x%08x 0x%08x\n", 6012 t4_read_reg(adap, A_PL_TIMEOUT_STATUS0), 6013 t4_read_reg(adap, A_PL_TIMEOUT_STATUS1)); 6014 6015 return (false); 6016 } 6017 6018 static bool plpl_intr_handler(struct adapter *adap, int arg, bool verbose) 6019 { 6020 static const struct intr_action plpl_intr_actions[] = { 6021 { F_TIMEOUT, 0, pl_timeout_status }, 6022 { 0 }, 6023 }; 6024 static const struct intr_details plpl_intr_details[] = { 6025 { F_PL_BUSPERR, "Bus parity error" }, 6026 { F_FATALPERR, "Fatal parity error" }, 6027 { F_INVALIDACCESS, "Global reserved memory access" }, 6028 { F_TIMEOUT, "Bus timeout" }, 6029 { F_PLERR, "Module reserved access" }, 6030 { F_PERRVFID, "VFID_MAP parity error" }, 6031 { 0 } 6032 }; 6033 static const struct intr_info plpl_intr_info = { 6034 .name = "PL_PL_INT_CAUSE", 6035 .cause_reg = A_PL_PL_INT_CAUSE, 6036 .enable_reg = A_PL_PL_INT_ENABLE, 6037 .fatal = F_FATALPERR | F_PERRVFID, 6038 .flags = NONFATAL_IF_DISABLED, 6039 .details = plpl_intr_details, 6040 .actions = plpl_intr_actions, 6041 }; 6042 6043 return (t4_handle_intr(adap, &plpl_intr_info, 0, verbose)); 6044 } 6045 6046 /** 6047 * t4_slow_intr_handler - control path interrupt handler 6048 * @adap: the adapter 6049 * @verbose: increased verbosity, for debug 6050 * 6051 * T4 interrupt handler for non-data global interrupt events, e.g., errors. 6052 * The designation 'slow' is because it involves register reads, while 6053 * data interrupts typically don't involve any MMIOs. 6054 */ 6055 bool t4_slow_intr_handler(struct adapter *adap, bool verbose) 6056 { 6057 static const struct intr_details pl_intr_details[] = { 6058 { F_MC1, "MC1" }, 6059 { F_UART, "UART" }, 6060 { F_ULP_TX, "ULP TX" }, 6061 { F_SGE, "SGE" }, 6062 { F_HMA, "HMA" }, 6063 { F_CPL_SWITCH, "CPL Switch" }, 6064 { F_ULP_RX, "ULP RX" }, 6065 { F_PM_RX, "PM RX" }, 6066 { F_PM_TX, "PM TX" }, 6067 { F_MA, "MA" }, 6068 { F_TP, "TP" }, 6069 { F_LE, "LE" }, 6070 { F_EDC1, "EDC1" }, 6071 { F_EDC0, "EDC0" }, 6072 { F_MC, "MC0" }, 6073 { F_PCIE, "PCIE" }, 6074 { F_PMU, "PMU" }, 6075 { F_MAC3, "MAC3" }, 6076 { F_MAC2, "MAC2" }, 6077 { F_MAC1, "MAC1" }, 6078 { F_MAC0, "MAC0" }, 6079 { F_SMB, "SMB" }, 6080 { F_SF, "SF" }, 6081 { F_PL, "PL" }, 6082 { F_NCSI, "NC-SI" }, 6083 { F_MPS, "MPS" }, 6084 { F_MI, "MI" }, 6085 { F_DBG, "DBG" }, 6086 { F_I2CM, "I2CM" }, 6087 { F_CIM, "CIM" }, 6088 { 0 } 6089 }; 6090 static const struct intr_details t7_pl_intr_details[] = { 6091 { F_T7_MC1, "MC1" }, 6092 { F_T7_ULP_TX, "ULP TX" }, 6093 { F_T7_SGE, "SGE" }, 6094 { F_T7_CPL_SWITCH, "CPL Switch" }, 6095 { F_T7_ULP_RX, "ULP RX" }, 6096 { F_T7_PM_RX, "PM RX" }, 6097 { F_T7_PM_TX, "PM TX" }, 6098 { F_T7_MA, "MA" }, 6099 { F_T7_TP, "TP" }, 6100 { F_T7_LE, "LE" }, 6101 { F_T7_EDC1, "EDC1" }, 6102 { F_T7_EDC0, "EDC0" }, 6103 { F_T7_MC0, "MC0" }, 6104 { F_T7_PCIE, "PCIE" }, 6105 { F_MAC3, "MAC3" }, 6106 { F_MAC2, "MAC2" }, 6107 { F_MAC1, "MAC1" }, 6108 { F_MAC0, "MAC0" }, 6109 { F_SMB, "SMB" }, 6110 { F_PL, "PL" }, 6111 { F_NCSI, "NC-SI" }, 6112 { F_MPS, "MPS" }, 6113 { F_DBG, "DBG" }, 6114 { F_I2CM, "I2CM" }, 6115 { F_MI, "MI" }, 6116 { F_CIM, "CIM" }, 6117 { 0 } 6118 }; 6119 struct intr_info pl_perr_cause = { 6120 .name = "PL_PERR_CAUSE", 6121 .cause_reg = A_PL_PERR_CAUSE, 6122 .enable_reg = A_PL_PERR_ENABLE, 6123 .fatal = 0xffffffff, 6124 .flags = NONFATAL_IF_DISABLED, 6125 .details = NULL, 6126 .actions = NULL, 6127 }; 6128 static const struct intr_action pl_intr_action[] = { 6129 { F_MC1, MEM_MC1, mem_intr_handler }, 6130 { F_ULP_TX, -1, ulptx_intr_handler }, 6131 { F_SGE, -1, sge_intr_handler }, 6132 { F_CPL_SWITCH, -1, cplsw_intr_handler }, 6133 { F_ULP_RX, -1, ulprx_intr_handler }, 6134 { F_PM_RX, -1, pmrx_intr_handler}, 6135 { F_PM_TX, -1, pmtx_intr_handler}, 6136 { F_MA, -1, ma_intr_handler }, 6137 { F_TP, -1, tp_intr_handler }, 6138 { F_LE, -1, le_intr_handler }, 6139 { F_EDC1, MEM_EDC1, mem_intr_handler }, 6140 { F_EDC0, MEM_EDC0, mem_intr_handler }, 6141 { F_MC0, MEM_MC0, mem_intr_handler }, 6142 { F_PCIE, -1, pcie_intr_handler }, 6143 { F_MAC3, 3, mac_intr_handler}, 6144 { F_MAC2, 2, mac_intr_handler}, 6145 { F_MAC1, 1, mac_intr_handler}, 6146 { F_MAC0, 0, mac_intr_handler}, 6147 { F_SMB, -1, smb_intr_handler}, 6148 { F_PL, -1, plpl_intr_handler }, 6149 { F_NCSI, -1, ncsi_intr_handler}, 6150 { F_MPS, -1, mps_intr_handler }, 6151 { F_CIM, -1, cim_intr_handler }, 6152 { 0 } 6153 }; 6154 static const struct intr_action t7_pl_intr_action[] = { 6155 { F_T7_ULP_TX, -1, ulptx_intr_handler }, 6156 { F_T7_SGE, -1, sge_intr_handler }, 6157 { F_T7_CPL_SWITCH, -1, cplsw_intr_handler }, 6158 { F_T7_ULP_RX, -1, ulprx_intr_handler }, 6159 { F_T7_PM_RX, -1, pmrx_intr_handler}, 6160 { F_T7_PM_TX, -1, pmtx_intr_handler}, 6161 { F_T7_MA, -1, ma_intr_handler }, 6162 { F_T7_TP, -1, tp_intr_handler }, 6163 { F_T7_LE, -1, le_intr_handler }, 6164 { F_T7_EDC1, MEM_EDC1, mem_intr_handler }, 6165 { F_T7_EDC0, MEM_EDC0, mem_intr_handler }, 6166 { F_T7_MC1, MEM_MC1, mem_intr_handler }, 6167 { F_T7_MC0, MEM_MC0, mem_intr_handler }, 6168 { F_T7_PCIE, -1, pcie_intr_handler }, 6169 { F_MAC3, 3, mac_intr_handler}, 6170 { F_MAC2, 2, mac_intr_handler}, 6171 { F_MAC1, 1, mac_intr_handler}, 6172 { F_MAC0, 0, mac_intr_handler}, 6173 { F_SMB, -1, smb_intr_handler}, 6174 { F_PL, -1, plpl_intr_handler }, 6175 { F_NCSI, -1, ncsi_intr_handler}, 6176 { F_MPS, -1, mps_intr_handler }, 6177 { F_CIM, -1, cim_intr_handler }, 6178 { 0 } 6179 }; 6180 struct intr_info pl_intr_info = { 6181 .name = "PL_INT_CAUSE", 6182 .cause_reg = A_PL_INT_CAUSE, 6183 .enable_reg = A_PL_INT_ENABLE, 6184 .fatal = 0, 6185 .flags = 0, 6186 .details = NULL, 6187 .actions = NULL, 6188 }; 6189 u32 perr; 6190 6191 if (chip_id(adap) >= CHELSIO_T7) { 6192 pl_perr_cause.details = t7_pl_intr_details; 6193 pl_intr_info.details = t7_pl_intr_details; 6194 pl_intr_info.actions = t7_pl_intr_action; 6195 } else { 6196 pl_perr_cause.details = pl_intr_details; 6197 pl_intr_info.details = pl_intr_details; 6198 pl_intr_info.actions = pl_intr_action; 6199 } 6200 6201 perr = t4_read_reg(adap, pl_perr_cause.cause_reg); 6202 if (verbose || perr != 0) { 6203 t4_show_intr_info(adap, &pl_perr_cause, perr); 6204 if (perr != 0) 6205 t4_write_reg(adap, pl_perr_cause.cause_reg, perr); 6206 if (verbose) 6207 perr |= t4_read_reg(adap, pl_intr_info.enable_reg); 6208 } 6209 6210 return (t4_handle_intr(adap, &pl_intr_info, perr, verbose)); 6211 } 6212 6213 #define PF_INTR_MASK (F_PFSW | F_PFCIM) 6214 6215 /** 6216 * t4_intr_enable - enable interrupts 6217 * @adapter: the adapter whose interrupts should be enabled 6218 * 6219 * Enable PF-specific interrupts for the calling function and the top-level 6220 * interrupt concentrator for global interrupts. Interrupts are already 6221 * enabled at each module, here we just enable the roots of the interrupt 6222 * hierarchies. 6223 * 6224 * Note: this function should be called only when the driver manages 6225 * non PF-specific interrupts from the various HW modules. Only one PCI 6226 * function at a time should be doing this. 6227 */ 6228 void t4_intr_enable(struct adapter *adap) 6229 { 6230 u32 mask, val; 6231 6232 if (chip_id(adap) <= CHELSIO_T5) 6233 val = F_ERR_DROPPED_DB | F_ERR_EGR_CTXT_PRIO | F_DBFIFO_HP_INT | 6234 F_DBFIFO_LP_INT; 6235 else 6236 val = F_ERR_PCIE_ERROR0 | F_ERR_PCIE_ERROR1 | F_FATAL_WRE_LEN; 6237 val |= F_ERR_CPL_EXCEED_IQE_SIZE | F_ERR_INVALID_CIDX_INC | 6238 F_ERR_CPL_OPCODE_0 | F_ERR_DATA_CPL_ON_HIGH_QID1 | 6239 F_INGRESS_SIZE_ERR | F_ERR_DATA_CPL_ON_HIGH_QID0 | 6240 F_ERR_BAD_DB_PIDX3 | F_ERR_BAD_DB_PIDX2 | F_ERR_BAD_DB_PIDX1 | 6241 F_ERR_BAD_DB_PIDX0 | F_ERR_ING_CTXT_PRIO | F_EGRESS_SIZE_ERR; 6242 mask = val; 6243 t4_set_reg_field(adap, A_SGE_INT_ENABLE3, mask, val); 6244 t4_write_reg(adap, MYPF_REG(A_PL_PF_INT_ENABLE), PF_INTR_MASK); 6245 t4_set_reg_field(adap, A_PL_INT_ENABLE, F_SF | F_I2CM, 0); 6246 t4_set_reg_field(adap, A_PL_INT_MAP0, 0, 1 << adap->pf); 6247 } 6248 6249 /** 6250 * t4_intr_disable - disable interrupts 6251 * @adap: the adapter whose interrupts should be disabled 6252 * 6253 * Disable interrupts. We only disable the top-level interrupt 6254 * concentrators. The caller must be a PCI function managing global 6255 * interrupts. 6256 */ 6257 void t4_intr_disable(struct adapter *adap) 6258 { 6259 6260 t4_write_reg(adap, MYPF_REG(A_PL_PF_INT_ENABLE), 0); 6261 t4_set_reg_field(adap, A_PL_INT_MAP0, 1 << adap->pf, 0); 6262 } 6263 6264 /** 6265 * hash_mac_addr - return the hash value of a MAC address 6266 * @addr: the 48-bit Ethernet MAC address 6267 * 6268 * Hashes a MAC address according to the hash function used by HW inexact 6269 * (hash) address matching. 6270 */ 6271 static int hash_mac_addr(const u8 *addr) 6272 { 6273 u32 a = ((u32)addr[0] << 16) | ((u32)addr[1] << 8) | addr[2]; 6274 u32 b = ((u32)addr[3] << 16) | ((u32)addr[4] << 8) | addr[5]; 6275 a ^= b; 6276 a ^= (a >> 12); 6277 a ^= (a >> 6); 6278 return a & 0x3f; 6279 } 6280 6281 /** 6282 * t4_config_rss_range - configure a portion of the RSS mapping table 6283 * @adapter: the adapter 6284 * @mbox: mbox to use for the FW command 6285 * @viid: virtual interface whose RSS subtable is to be written 6286 * @start: start entry in the table to write 6287 * @n: how many table entries to write 6288 * @rspq: values for the "response queue" (Ingress Queue) lookup table 6289 * @nrspq: number of values in @rspq 6290 * 6291 * Programs the selected part of the VI's RSS mapping table with the 6292 * provided values. If @nrspq < @n the supplied values are used repeatedly 6293 * until the full table range is populated. 6294 * 6295 * The caller must ensure the values in @rspq are in the range allowed for 6296 * @viid. 6297 */ 6298 int t4_config_rss_range(struct adapter *adapter, int mbox, unsigned int viid, 6299 int start, int n, const u16 *rspq, unsigned int nrspq) 6300 { 6301 int ret; 6302 const u16 *rsp = rspq; 6303 const u16 *rsp_end = rspq + nrspq; 6304 struct fw_rss_ind_tbl_cmd cmd; 6305 6306 memset(&cmd, 0, sizeof(cmd)); 6307 cmd.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_RSS_IND_TBL_CMD) | 6308 F_FW_CMD_REQUEST | F_FW_CMD_WRITE | 6309 V_FW_RSS_IND_TBL_CMD_VIID(viid)); 6310 cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd)); 6311 6312 /* 6313 * Each firmware RSS command can accommodate up to 32 RSS Ingress 6314 * Queue Identifiers. These Ingress Queue IDs are packed three to 6315 * a 32-bit word as 10-bit values with the upper remaining 2 bits 6316 * reserved. 6317 */ 6318 while (n > 0) { 6319 int nq = min(n, 32); 6320 int nq_packed = 0; 6321 __be32 *qp = &cmd.iq0_to_iq2; 6322 6323 /* 6324 * Set up the firmware RSS command header to send the next 6325 * "nq" Ingress Queue IDs to the firmware. 6326 */ 6327 cmd.niqid = cpu_to_be16(nq); 6328 cmd.startidx = cpu_to_be16(start); 6329 6330 /* 6331 * "nq" more done for the start of the next loop. 6332 */ 6333 start += nq; 6334 n -= nq; 6335 6336 /* 6337 * While there are still Ingress Queue IDs to stuff into the 6338 * current firmware RSS command, retrieve them from the 6339 * Ingress Queue ID array and insert them into the command. 6340 */ 6341 while (nq > 0) { 6342 /* 6343 * Grab up to the next 3 Ingress Queue IDs (wrapping 6344 * around the Ingress Queue ID array if necessary) and 6345 * insert them into the firmware RSS command at the 6346 * current 3-tuple position within the commad. 6347 */ 6348 u16 qbuf[3]; 6349 u16 *qbp = qbuf; 6350 int nqbuf = min(3, nq); 6351 6352 nq -= nqbuf; 6353 qbuf[0] = qbuf[1] = qbuf[2] = 0; 6354 while (nqbuf && nq_packed < 32) { 6355 nqbuf--; 6356 nq_packed++; 6357 *qbp++ = *rsp++; 6358 if (rsp >= rsp_end) 6359 rsp = rspq; 6360 } 6361 *qp++ = cpu_to_be32(V_FW_RSS_IND_TBL_CMD_IQ0(qbuf[0]) | 6362 V_FW_RSS_IND_TBL_CMD_IQ1(qbuf[1]) | 6363 V_FW_RSS_IND_TBL_CMD_IQ2(qbuf[2])); 6364 } 6365 6366 /* 6367 * Send this portion of the RRS table update to the firmware; 6368 * bail out on any errors. 6369 */ 6370 ret = t4_wr_mbox(adapter, mbox, &cmd, sizeof(cmd), NULL); 6371 if (ret) 6372 return ret; 6373 } 6374 return 0; 6375 } 6376 6377 /** 6378 * t4_config_glbl_rss - configure the global RSS mode 6379 * @adapter: the adapter 6380 * @mbox: mbox to use for the FW command 6381 * @mode: global RSS mode 6382 * @flags: mode-specific flags 6383 * 6384 * Sets the global RSS mode. 6385 */ 6386 int t4_config_glbl_rss(struct adapter *adapter, int mbox, unsigned int mode, 6387 unsigned int flags) 6388 { 6389 struct fw_rss_glb_config_cmd c; 6390 6391 memset(&c, 0, sizeof(c)); 6392 c.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_RSS_GLB_CONFIG_CMD) | 6393 F_FW_CMD_REQUEST | F_FW_CMD_WRITE); 6394 c.retval_len16 = cpu_to_be32(FW_LEN16(c)); 6395 if (mode == FW_RSS_GLB_CONFIG_CMD_MODE_MANUAL) { 6396 c.u.manual.mode_pkd = 6397 cpu_to_be32(V_FW_RSS_GLB_CONFIG_CMD_MODE(mode)); 6398 } else if (mode == FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL) { 6399 c.u.basicvirtual.mode_keymode = 6400 cpu_to_be32(V_FW_RSS_GLB_CONFIG_CMD_MODE(mode)); 6401 c.u.basicvirtual.synmapen_to_hashtoeplitz = cpu_to_be32(flags); 6402 } else 6403 return -EINVAL; 6404 return t4_wr_mbox(adapter, mbox, &c, sizeof(c), NULL); 6405 } 6406 6407 /** 6408 * t4_config_vi_rss - configure per VI RSS settings 6409 * @adapter: the adapter 6410 * @mbox: mbox to use for the FW command 6411 * @viid: the VI id 6412 * @flags: RSS flags 6413 * @defq: id of the default RSS queue for the VI. 6414 * @skeyidx: RSS secret key table index for non-global mode 6415 * @skey: RSS vf_scramble key for VI. 6416 * 6417 * Configures VI-specific RSS properties. 6418 */ 6419 int t4_config_vi_rss(struct adapter *adapter, int mbox, unsigned int viid, 6420 unsigned int flags, unsigned int defq, unsigned int skeyidx, 6421 unsigned int skey) 6422 { 6423 struct fw_rss_vi_config_cmd c; 6424 6425 memset(&c, 0, sizeof(c)); 6426 c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_RSS_VI_CONFIG_CMD) | 6427 F_FW_CMD_REQUEST | F_FW_CMD_WRITE | 6428 V_FW_RSS_VI_CONFIG_CMD_VIID(viid)); 6429 c.retval_len16 = cpu_to_be32(FW_LEN16(c)); 6430 c.u.basicvirtual.defaultq_to_udpen = cpu_to_be32(flags | 6431 V_FW_RSS_VI_CONFIG_CMD_DEFAULTQ(defq)); 6432 c.u.basicvirtual.secretkeyidx_pkd = cpu_to_be32( 6433 V_FW_RSS_VI_CONFIG_CMD_SECRETKEYIDX(skeyidx)); 6434 c.u.basicvirtual.secretkeyxor = cpu_to_be32(skey); 6435 6436 return t4_wr_mbox(adapter, mbox, &c, sizeof(c), NULL); 6437 } 6438 6439 /* Read an RSS table row */ 6440 static int rd_rss_row(struct adapter *adap, int row, u32 *val) 6441 { 6442 t4_write_reg(adap, A_TP_RSS_LKP_TABLE, 0xfff00000 | row); 6443 return t4_wait_op_done_val(adap, A_TP_RSS_LKP_TABLE, F_LKPTBLROWVLD, 1, 6444 5, 0, val); 6445 } 6446 6447 /** 6448 * t4_read_rss - read the contents of the RSS mapping table 6449 * @adapter: the adapter 6450 * @map: holds the contents of the RSS mapping table 6451 * 6452 * Reads the contents of the RSS hash->queue mapping table. 6453 */ 6454 int t4_read_rss(struct adapter *adapter, u16 *map) 6455 { 6456 u32 val; 6457 int i, ret; 6458 int rss_nentries = adapter->chip_params->rss_nentries; 6459 6460 for (i = 0; i < rss_nentries / 2; ++i) { 6461 ret = rd_rss_row(adapter, i, &val); 6462 if (ret) 6463 return ret; 6464 *map++ = G_LKPTBLQUEUE0(val); 6465 *map++ = G_LKPTBLQUEUE1(val); 6466 } 6467 return 0; 6468 } 6469 6470 /** 6471 * t4_tp_fw_ldst_rw - Access TP indirect register through LDST 6472 * @adap: the adapter 6473 * @cmd: TP fw ldst address space type 6474 * @vals: where the indirect register values are stored/written 6475 * @nregs: how many indirect registers to read/write 6476 * @start_idx: index of first indirect register to read/write 6477 * @rw: Read (1) or Write (0) 6478 * @sleep_ok: if true we may sleep while awaiting command completion 6479 * 6480 * Access TP indirect registers through LDST 6481 **/ 6482 static int t4_tp_fw_ldst_rw(struct adapter *adap, int cmd, u32 *vals, 6483 unsigned int nregs, unsigned int start_index, 6484 unsigned int rw, bool sleep_ok) 6485 { 6486 int ret = 0; 6487 unsigned int i; 6488 struct fw_ldst_cmd c; 6489 6490 for (i = 0; i < nregs; i++) { 6491 memset(&c, 0, sizeof(c)); 6492 c.op_to_addrspace = cpu_to_be32(V_FW_CMD_OP(FW_LDST_CMD) | 6493 F_FW_CMD_REQUEST | 6494 (rw ? F_FW_CMD_READ : 6495 F_FW_CMD_WRITE) | 6496 V_FW_LDST_CMD_ADDRSPACE(cmd)); 6497 c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c)); 6498 6499 c.u.addrval.addr = cpu_to_be32(start_index + i); 6500 c.u.addrval.val = rw ? 0 : cpu_to_be32(vals[i]); 6501 ret = t4_wr_mbox_meat(adap, adap->mbox, &c, sizeof(c), &c, 6502 sleep_ok); 6503 if (ret) 6504 return ret; 6505 6506 if (rw) 6507 vals[i] = be32_to_cpu(c.u.addrval.val); 6508 } 6509 return 0; 6510 } 6511 6512 /** 6513 * t4_tp_indirect_rw - Read/Write TP indirect register through LDST or backdoor 6514 * @adap: the adapter 6515 * @reg_addr: Address Register 6516 * @reg_data: Data register 6517 * @buff: where the indirect register values are stored/written 6518 * @nregs: how many indirect registers to read/write 6519 * @start_index: index of first indirect register to read/write 6520 * @rw: READ(1) or WRITE(0) 6521 * @sleep_ok: if true we may sleep while awaiting command completion 6522 * 6523 * Read/Write TP indirect registers through LDST if possible. 6524 * Else, use backdoor access 6525 **/ 6526 static void t4_tp_indirect_rw(struct adapter *adap, u32 reg_addr, u32 reg_data, 6527 u32 *buff, u32 nregs, u32 start_index, int rw, 6528 bool sleep_ok) 6529 { 6530 int rc = -EINVAL; 6531 int cmd; 6532 6533 switch (reg_addr) { 6534 case A_TP_PIO_ADDR: 6535 cmd = FW_LDST_ADDRSPC_TP_PIO; 6536 break; 6537 case A_TP_TM_PIO_ADDR: 6538 cmd = FW_LDST_ADDRSPC_TP_TM_PIO; 6539 break; 6540 case A_TP_MIB_INDEX: 6541 cmd = FW_LDST_ADDRSPC_TP_MIB; 6542 break; 6543 default: 6544 goto indirect_access; 6545 } 6546 6547 if (t4_use_ldst(adap)) 6548 rc = t4_tp_fw_ldst_rw(adap, cmd, buff, nregs, start_index, rw, 6549 sleep_ok); 6550 6551 indirect_access: 6552 6553 if (rc) { 6554 if (rw) 6555 t4_read_indirect(adap, reg_addr, reg_data, buff, nregs, 6556 start_index); 6557 else 6558 t4_write_indirect(adap, reg_addr, reg_data, buff, nregs, 6559 start_index); 6560 } 6561 } 6562 6563 /** 6564 * t4_tp_pio_read - Read TP PIO registers 6565 * @adap: the adapter 6566 * @buff: where the indirect register values are written 6567 * @nregs: how many indirect registers to read 6568 * @start_index: index of first indirect register to read 6569 * @sleep_ok: if true we may sleep while awaiting command completion 6570 * 6571 * Read TP PIO Registers 6572 **/ 6573 void t4_tp_pio_read(struct adapter *adap, u32 *buff, u32 nregs, 6574 u32 start_index, bool sleep_ok) 6575 { 6576 t4_tp_indirect_rw(adap, A_TP_PIO_ADDR, A_TP_PIO_DATA, buff, nregs, 6577 start_index, 1, sleep_ok); 6578 } 6579 6580 /** 6581 * t4_tp_pio_write - Write TP PIO registers 6582 * @adap: the adapter 6583 * @buff: where the indirect register values are stored 6584 * @nregs: how many indirect registers to write 6585 * @start_index: index of first indirect register to write 6586 * @sleep_ok: if true we may sleep while awaiting command completion 6587 * 6588 * Write TP PIO Registers 6589 **/ 6590 void t4_tp_pio_write(struct adapter *adap, const u32 *buff, u32 nregs, 6591 u32 start_index, bool sleep_ok) 6592 { 6593 t4_tp_indirect_rw(adap, A_TP_PIO_ADDR, A_TP_PIO_DATA, 6594 __DECONST(u32 *, buff), nregs, start_index, 0, sleep_ok); 6595 } 6596 6597 /** 6598 * t4_tp_tm_pio_read - Read TP TM PIO registers 6599 * @adap: the adapter 6600 * @buff: where the indirect register values are written 6601 * @nregs: how many indirect registers to read 6602 * @start_index: index of first indirect register to read 6603 * @sleep_ok: if true we may sleep while awaiting command completion 6604 * 6605 * Read TP TM PIO Registers 6606 **/ 6607 void t4_tp_tm_pio_read(struct adapter *adap, u32 *buff, u32 nregs, 6608 u32 start_index, bool sleep_ok) 6609 { 6610 t4_tp_indirect_rw(adap, A_TP_TM_PIO_ADDR, A_TP_TM_PIO_DATA, buff, 6611 nregs, start_index, 1, sleep_ok); 6612 } 6613 6614 /** 6615 * t4_tp_mib_read - Read TP MIB registers 6616 * @adap: the adapter 6617 * @buff: where the indirect register values are written 6618 * @nregs: how many indirect registers to read 6619 * @start_index: index of first indirect register to read 6620 * @sleep_ok: if true we may sleep while awaiting command completion 6621 * 6622 * Read TP MIB Registers 6623 **/ 6624 void t4_tp_mib_read(struct adapter *adap, u32 *buff, u32 nregs, u32 start_index, 6625 bool sleep_ok) 6626 { 6627 t4_tp_indirect_rw(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, buff, nregs, 6628 start_index, 1, sleep_ok); 6629 } 6630 6631 /** 6632 * t4_read_rss_key - read the global RSS key 6633 * @adap: the adapter 6634 * @key: 10-entry array holding the 320-bit RSS key 6635 * @sleep_ok: if true we may sleep while awaiting command completion 6636 * 6637 * Reads the global 320-bit RSS key. 6638 */ 6639 void t4_read_rss_key(struct adapter *adap, u32 *key, bool sleep_ok) 6640 { 6641 t4_tp_pio_read(adap, key, 10, A_TP_RSS_SECRET_KEY0, sleep_ok); 6642 } 6643 6644 /** 6645 * t4_write_rss_key - program one of the RSS keys 6646 * @adap: the adapter 6647 * @key: 10-entry array holding the 320-bit RSS key 6648 * @idx: which RSS key to write 6649 * @sleep_ok: if true we may sleep while awaiting command completion 6650 * 6651 * Writes one of the RSS keys with the given 320-bit value. If @idx is 6652 * 0..15 the corresponding entry in the RSS key table is written, 6653 * otherwise the global RSS key is written. 6654 */ 6655 void t4_write_rss_key(struct adapter *adap, const u32 *key, int idx, 6656 bool sleep_ok) 6657 { 6658 u8 rss_key_addr_cnt = 16; 6659 u32 vrt = t4_read_reg(adap, A_TP_RSS_CONFIG_VRT); 6660 6661 /* 6662 * T6 and later: for KeyMode 3 (per-vf and per-vf scramble), 6663 * allows access to key addresses 16-63 by using KeyWrAddrX 6664 * as index[5:4](upper 2) into key table 6665 */ 6666 if ((chip_id(adap) > CHELSIO_T5) && 6667 (vrt & F_KEYEXTEND) && (G_KEYMODE(vrt) == 3)) 6668 rss_key_addr_cnt = 32; 6669 6670 t4_tp_pio_write(adap, key, 10, A_TP_RSS_SECRET_KEY0, sleep_ok); 6671 6672 if (idx >= 0 && idx < rss_key_addr_cnt) { 6673 if (rss_key_addr_cnt > 16) 6674 t4_write_reg(adap, A_TP_RSS_CONFIG_VRT, 6675 vrt | V_KEYWRADDRX(idx >> 4) | 6676 V_T6_VFWRADDR(idx) | F_KEYWREN); 6677 else 6678 t4_write_reg(adap, A_TP_RSS_CONFIG_VRT, 6679 vrt| V_KEYWRADDR(idx) | F_KEYWREN); 6680 } 6681 } 6682 6683 /** 6684 * t4_read_rss_pf_config - read PF RSS Configuration Table 6685 * @adapter: the adapter 6686 * @index: the entry in the PF RSS table to read 6687 * @valp: where to store the returned value 6688 * @sleep_ok: if true we may sleep while awaiting command completion 6689 * 6690 * Reads the PF RSS Configuration Table at the specified index and returns 6691 * the value found there. 6692 */ 6693 void t4_read_rss_pf_config(struct adapter *adapter, unsigned int index, 6694 u32 *valp, bool sleep_ok) 6695 { 6696 t4_tp_pio_read(adapter, valp, 1, A_TP_RSS_PF0_CONFIG + index, sleep_ok); 6697 } 6698 6699 /** 6700 * t4_write_rss_pf_config - write PF RSS Configuration Table 6701 * @adapter: the adapter 6702 * @index: the entry in the VF RSS table to read 6703 * @val: the value to store 6704 * @sleep_ok: if true we may sleep while awaiting command completion 6705 * 6706 * Writes the PF RSS Configuration Table at the specified index with the 6707 * specified value. 6708 */ 6709 void t4_write_rss_pf_config(struct adapter *adapter, unsigned int index, 6710 u32 val, bool sleep_ok) 6711 { 6712 t4_tp_pio_write(adapter, &val, 1, A_TP_RSS_PF0_CONFIG + index, 6713 sleep_ok); 6714 } 6715 6716 /** 6717 * t4_read_rss_vf_config - read VF RSS Configuration Table 6718 * @adapter: the adapter 6719 * @index: the entry in the VF RSS table to read 6720 * @vfl: where to store the returned VFL 6721 * @vfh: where to store the returned VFH 6722 * @sleep_ok: if true we may sleep while awaiting command completion 6723 * 6724 * Reads the VF RSS Configuration Table at the specified index and returns 6725 * the (VFL, VFH) values found there. 6726 */ 6727 void t4_read_rss_vf_config(struct adapter *adapter, unsigned int index, 6728 u32 *vfl, u32 *vfh, bool sleep_ok) 6729 { 6730 u32 vrt, mask, data; 6731 6732 if (chip_id(adapter) <= CHELSIO_T5) { 6733 mask = V_VFWRADDR(M_VFWRADDR); 6734 data = V_VFWRADDR(index); 6735 } else { 6736 mask = V_T6_VFWRADDR(M_T6_VFWRADDR); 6737 data = V_T6_VFWRADDR(index); 6738 } 6739 /* 6740 * Request that the index'th VF Table values be read into VFL/VFH. 6741 */ 6742 vrt = t4_read_reg(adapter, A_TP_RSS_CONFIG_VRT); 6743 vrt &= ~(F_VFRDRG | F_VFWREN | F_KEYWREN | mask); 6744 vrt |= data | F_VFRDEN; 6745 t4_write_reg(adapter, A_TP_RSS_CONFIG_VRT, vrt); 6746 6747 /* 6748 * Grab the VFL/VFH values ... 6749 */ 6750 t4_tp_pio_read(adapter, vfl, 1, A_TP_RSS_VFL_CONFIG, sleep_ok); 6751 t4_tp_pio_read(adapter, vfh, 1, A_TP_RSS_VFH_CONFIG, sleep_ok); 6752 } 6753 6754 /** 6755 * t4_write_rss_vf_config - write VF RSS Configuration Table 6756 * 6757 * @adapter: the adapter 6758 * @index: the entry in the VF RSS table to write 6759 * @vfl: the VFL to store 6760 * @vfh: the VFH to store 6761 * 6762 * Writes the VF RSS Configuration Table at the specified index with the 6763 * specified (VFL, VFH) values. 6764 */ 6765 void t4_write_rss_vf_config(struct adapter *adapter, unsigned int index, 6766 u32 vfl, u32 vfh, bool sleep_ok) 6767 { 6768 u32 vrt, mask, data; 6769 6770 if (chip_id(adapter) <= CHELSIO_T5) { 6771 mask = V_VFWRADDR(M_VFWRADDR); 6772 data = V_VFWRADDR(index); 6773 } else { 6774 mask = V_T6_VFWRADDR(M_T6_VFWRADDR); 6775 data = V_T6_VFWRADDR(index); 6776 } 6777 6778 /* 6779 * Load up VFL/VFH with the values to be written ... 6780 */ 6781 t4_tp_pio_write(adapter, &vfl, 1, A_TP_RSS_VFL_CONFIG, sleep_ok); 6782 t4_tp_pio_write(adapter, &vfh, 1, A_TP_RSS_VFH_CONFIG, sleep_ok); 6783 6784 /* 6785 * Write the VFL/VFH into the VF Table at index'th location. 6786 */ 6787 vrt = t4_read_reg(adapter, A_TP_RSS_CONFIG_VRT); 6788 vrt &= ~(F_VFRDRG | F_VFWREN | F_KEYWREN | mask); 6789 vrt |= data | F_VFRDEN; 6790 t4_write_reg(adapter, A_TP_RSS_CONFIG_VRT, vrt); 6791 } 6792 6793 /** 6794 * t4_read_rss_pf_map - read PF RSS Map 6795 * @adapter: the adapter 6796 * @sleep_ok: if true we may sleep while awaiting command completion 6797 * 6798 * Reads the PF RSS Map register and returns its value. 6799 */ 6800 u32 t4_read_rss_pf_map(struct adapter *adapter, bool sleep_ok) 6801 { 6802 u32 pfmap; 6803 6804 t4_tp_pio_read(adapter, &pfmap, 1, A_TP_RSS_PF_MAP, sleep_ok); 6805 6806 return pfmap; 6807 } 6808 6809 /** 6810 * t4_write_rss_pf_map - write PF RSS Map 6811 * @adapter: the adapter 6812 * @pfmap: PF RSS Map value 6813 * 6814 * Writes the specified value to the PF RSS Map register. 6815 */ 6816 void t4_write_rss_pf_map(struct adapter *adapter, u32 pfmap, bool sleep_ok) 6817 { 6818 t4_tp_pio_write(adapter, &pfmap, 1, A_TP_RSS_PF_MAP, sleep_ok); 6819 } 6820 6821 /** 6822 * t4_read_rss_pf_mask - read PF RSS Mask 6823 * @adapter: the adapter 6824 * @sleep_ok: if true we may sleep while awaiting command completion 6825 * 6826 * Reads the PF RSS Mask register and returns its value. 6827 */ 6828 u32 t4_read_rss_pf_mask(struct adapter *adapter, bool sleep_ok) 6829 { 6830 u32 pfmask; 6831 6832 t4_tp_pio_read(adapter, &pfmask, 1, A_TP_RSS_PF_MSK, sleep_ok); 6833 6834 return pfmask; 6835 } 6836 6837 /** 6838 * t4_write_rss_pf_mask - write PF RSS Mask 6839 * @adapter: the adapter 6840 * @pfmask: PF RSS Mask value 6841 * 6842 * Writes the specified value to the PF RSS Mask register. 6843 */ 6844 void t4_write_rss_pf_mask(struct adapter *adapter, u32 pfmask, bool sleep_ok) 6845 { 6846 t4_tp_pio_write(adapter, &pfmask, 1, A_TP_RSS_PF_MSK, sleep_ok); 6847 } 6848 6849 /** 6850 * t4_tp_get_tcp_stats - read TP's TCP MIB counters 6851 * @adap: the adapter 6852 * @v4: holds the TCP/IP counter values 6853 * @v6: holds the TCP/IPv6 counter values 6854 * @sleep_ok: if true we may sleep while awaiting command completion 6855 * 6856 * Returns the values of TP's TCP/IP and TCP/IPv6 MIB counters. 6857 * Either @v4 or @v6 may be %NULL to skip the corresponding stats. 6858 */ 6859 void t4_tp_get_tcp_stats(struct adapter *adap, struct tp_tcp_stats *v4, 6860 struct tp_tcp_stats *v6, bool sleep_ok) 6861 { 6862 u32 val[A_TP_MIB_TCP_RXT_SEG_LO - A_TP_MIB_TCP_OUT_RST + 1]; 6863 6864 #define STAT_IDX(x) ((A_TP_MIB_TCP_##x) - A_TP_MIB_TCP_OUT_RST) 6865 #define STAT(x) val[STAT_IDX(x)] 6866 #define STAT64(x) (((u64)STAT(x##_HI) << 32) | STAT(x##_LO)) 6867 6868 if (v4) { 6869 t4_tp_mib_read(adap, val, ARRAY_SIZE(val), 6870 A_TP_MIB_TCP_OUT_RST, sleep_ok); 6871 v4->tcp_out_rsts = STAT(OUT_RST); 6872 v4->tcp_in_segs = STAT64(IN_SEG); 6873 v4->tcp_out_segs = STAT64(OUT_SEG); 6874 v4->tcp_retrans_segs = STAT64(RXT_SEG); 6875 } 6876 if (v6) { 6877 t4_tp_mib_read(adap, val, ARRAY_SIZE(val), 6878 A_TP_MIB_TCP_V6OUT_RST, sleep_ok); 6879 v6->tcp_out_rsts = STAT(OUT_RST); 6880 v6->tcp_in_segs = STAT64(IN_SEG); 6881 v6->tcp_out_segs = STAT64(OUT_SEG); 6882 v6->tcp_retrans_segs = STAT64(RXT_SEG); 6883 } 6884 #undef STAT64 6885 #undef STAT 6886 #undef STAT_IDX 6887 } 6888 6889 /** 6890 * t4_tp_get_err_stats - read TP's error MIB counters 6891 * @adap: the adapter 6892 * @st: holds the counter values 6893 * @sleep_ok: if true we may sleep while awaiting command completion 6894 * 6895 * Returns the values of TP's error counters. 6896 */ 6897 void t4_tp_get_err_stats(struct adapter *adap, struct tp_err_stats *st, 6898 bool sleep_ok) 6899 { 6900 int nchan = adap->chip_params->nchan; 6901 6902 t4_tp_mib_read(adap, st->mac_in_errs, nchan, A_TP_MIB_MAC_IN_ERR_0, 6903 sleep_ok); 6904 6905 t4_tp_mib_read(adap, st->hdr_in_errs, nchan, A_TP_MIB_HDR_IN_ERR_0, 6906 sleep_ok); 6907 6908 t4_tp_mib_read(adap, st->tcp_in_errs, nchan, A_TP_MIB_TCP_IN_ERR_0, 6909 sleep_ok); 6910 6911 t4_tp_mib_read(adap, st->tnl_cong_drops, nchan, 6912 A_TP_MIB_TNL_CNG_DROP_0, sleep_ok); 6913 6914 t4_tp_mib_read(adap, st->ofld_chan_drops, nchan, 6915 A_TP_MIB_OFD_CHN_DROP_0, sleep_ok); 6916 6917 t4_tp_mib_read(adap, st->tnl_tx_drops, nchan, A_TP_MIB_TNL_DROP_0, 6918 sleep_ok); 6919 6920 t4_tp_mib_read(adap, st->ofld_vlan_drops, nchan, 6921 A_TP_MIB_OFD_VLN_DROP_0, sleep_ok); 6922 6923 t4_tp_mib_read(adap, st->tcp6_in_errs, nchan, 6924 A_TP_MIB_TCP_V6IN_ERR_0, sleep_ok); 6925 6926 t4_tp_mib_read(adap, &st->ofld_no_neigh, 2, A_TP_MIB_OFD_ARP_DROP, 6927 sleep_ok); 6928 } 6929 6930 /** 6931 * t4_tp_get_err_stats - read TP's error MIB counters 6932 * @adap: the adapter 6933 * @st: holds the counter values 6934 * @sleep_ok: if true we may sleep while awaiting command completion 6935 * 6936 * Returns the values of TP's error counters. 6937 */ 6938 void t4_tp_get_tnl_stats(struct adapter *adap, struct tp_tnl_stats *st, 6939 bool sleep_ok) 6940 { 6941 int nchan = adap->chip_params->nchan; 6942 6943 t4_tp_mib_read(adap, st->out_pkt, nchan, A_TP_MIB_TNL_OUT_PKT_0, 6944 sleep_ok); 6945 t4_tp_mib_read(adap, st->in_pkt, nchan, A_TP_MIB_TNL_IN_PKT_0, 6946 sleep_ok); 6947 } 6948 6949 /** 6950 * t4_tp_get_proxy_stats - read TP's proxy MIB counters 6951 * @adap: the adapter 6952 * @st: holds the counter values 6953 * 6954 * Returns the values of TP's proxy counters. 6955 */ 6956 void t4_tp_get_proxy_stats(struct adapter *adap, struct tp_proxy_stats *st, 6957 bool sleep_ok) 6958 { 6959 int nchan = adap->chip_params->nchan; 6960 6961 t4_tp_mib_read(adap, st->proxy, nchan, A_TP_MIB_TNL_LPBK_0, sleep_ok); 6962 } 6963 6964 /** 6965 * t4_tp_get_cpl_stats - read TP's CPL MIB counters 6966 * @adap: the adapter 6967 * @st: holds the counter values 6968 * @sleep_ok: if true we may sleep while awaiting command completion 6969 * 6970 * Returns the values of TP's CPL counters. 6971 */ 6972 void t4_tp_get_cpl_stats(struct adapter *adap, struct tp_cpl_stats *st, 6973 bool sleep_ok) 6974 { 6975 int nchan = adap->chip_params->nchan; 6976 6977 t4_tp_mib_read(adap, st->req, nchan, A_TP_MIB_CPL_IN_REQ_0, sleep_ok); 6978 6979 t4_tp_mib_read(adap, st->rsp, nchan, A_TP_MIB_CPL_OUT_RSP_0, sleep_ok); 6980 } 6981 6982 /** 6983 * t4_tp_get_rdma_stats - read TP's RDMA MIB counters 6984 * @adap: the adapter 6985 * @st: holds the counter values 6986 * 6987 * Returns the values of TP's RDMA counters. 6988 */ 6989 void t4_tp_get_rdma_stats(struct adapter *adap, struct tp_rdma_stats *st, 6990 bool sleep_ok) 6991 { 6992 t4_tp_mib_read(adap, &st->rqe_dfr_pkt, 2, A_TP_MIB_RQE_DFR_PKT, 6993 sleep_ok); 6994 6995 if (chip_id(adap) >= CHELSIO_T7) 6996 /* read RDMA stats IN and OUT for all ports at once */ 6997 t4_tp_mib_read(adap, &st->pkts_in[0], 28, A_TP_MIB_RDMA_IN_PKT_0, 6998 sleep_ok); 6999 } 7000 7001 /** 7002 * t4_get_fcoe_stats - read TP's FCoE MIB counters for a port 7003 * @adap: the adapter 7004 * @idx: the port index 7005 * @st: holds the counter values 7006 * @sleep_ok: if true we may sleep while awaiting command completion 7007 * 7008 * Returns the values of TP's FCoE counters for the selected port. 7009 */ 7010 void t4_get_fcoe_stats(struct adapter *adap, unsigned int idx, 7011 struct tp_fcoe_stats *st, bool sleep_ok) 7012 { 7013 u32 val[2]; 7014 7015 t4_tp_mib_read(adap, &st->frames_ddp, 1, A_TP_MIB_FCOE_DDP_0 + idx, 7016 sleep_ok); 7017 7018 t4_tp_mib_read(adap, &st->frames_drop, 1, 7019 A_TP_MIB_FCOE_DROP_0 + idx, sleep_ok); 7020 7021 t4_tp_mib_read(adap, val, 2, A_TP_MIB_FCOE_BYTE_0_HI + 2 * idx, 7022 sleep_ok); 7023 7024 st->octets_ddp = ((u64)val[0] << 32) | val[1]; 7025 } 7026 7027 /** 7028 * t4_get_usm_stats - read TP's non-TCP DDP MIB counters 7029 * @adap: the adapter 7030 * @st: holds the counter values 7031 * @sleep_ok: if true we may sleep while awaiting command completion 7032 * 7033 * Returns the values of TP's counters for non-TCP directly-placed packets. 7034 */ 7035 void t4_get_usm_stats(struct adapter *adap, struct tp_usm_stats *st, 7036 bool sleep_ok) 7037 { 7038 u32 val[4]; 7039 7040 t4_tp_mib_read(adap, val, 4, A_TP_MIB_USM_PKTS, sleep_ok); 7041 7042 st->frames = val[0]; 7043 st->drops = val[1]; 7044 st->octets = ((u64)val[2] << 32) | val[3]; 7045 } 7046 7047 /** 7048 * t4_tp_get_tid_stats - read TP's tid MIB counters. 7049 * @adap: the adapter 7050 * @st: holds the counter values 7051 * @sleep_ok: if true we may sleep while awaiting command completion 7052 * 7053 * Returns the values of TP's counters for tids. 7054 */ 7055 void t4_tp_get_tid_stats(struct adapter *adap, struct tp_tid_stats *st, 7056 bool sleep_ok) 7057 { 7058 7059 t4_tp_mib_read(adap, &st->del, 4, A_TP_MIB_TID_DEL, sleep_ok); 7060 } 7061 7062 /** 7063 * t4_read_mtu_tbl - returns the values in the HW path MTU table 7064 * @adap: the adapter 7065 * @mtus: where to store the MTU values 7066 * @mtu_log: where to store the MTU base-2 log (may be %NULL) 7067 * 7068 * Reads the HW path MTU table. 7069 */ 7070 void t4_read_mtu_tbl(struct adapter *adap, u16 *mtus, u8 *mtu_log) 7071 { 7072 u32 v; 7073 int i; 7074 7075 for (i = 0; i < NMTUS; ++i) { 7076 t4_write_reg(adap, A_TP_MTU_TABLE, 7077 V_MTUINDEX(0xff) | V_MTUVALUE(i)); 7078 v = t4_read_reg(adap, A_TP_MTU_TABLE); 7079 mtus[i] = G_MTUVALUE(v); 7080 if (mtu_log) 7081 mtu_log[i] = G_MTUWIDTH(v); 7082 } 7083 } 7084 7085 /** 7086 * t4_read_cong_tbl - reads the congestion control table 7087 * @adap: the adapter 7088 * @incr: where to store the alpha values 7089 * 7090 * Reads the additive increments programmed into the HW congestion 7091 * control table. 7092 */ 7093 void t4_read_cong_tbl(struct adapter *adap, u16 incr[NMTUS][NCCTRL_WIN]) 7094 { 7095 unsigned int mtu, w; 7096 7097 for (mtu = 0; mtu < NMTUS; ++mtu) 7098 for (w = 0; w < NCCTRL_WIN; ++w) { 7099 t4_write_reg(adap, A_TP_CCTRL_TABLE, 7100 V_ROWINDEX(0xffff) | (mtu << 5) | w); 7101 incr[mtu][w] = (u16)t4_read_reg(adap, 7102 A_TP_CCTRL_TABLE) & 0x1fff; 7103 } 7104 } 7105 7106 /** 7107 * t4_tp_wr_bits_indirect - set/clear bits in an indirect TP register 7108 * @adap: the adapter 7109 * @addr: the indirect TP register address 7110 * @mask: specifies the field within the register to modify 7111 * @val: new value for the field 7112 * 7113 * Sets a field of an indirect TP register to the given value. 7114 */ 7115 void t4_tp_wr_bits_indirect(struct adapter *adap, unsigned int addr, 7116 unsigned int mask, unsigned int val) 7117 { 7118 t4_write_reg(adap, A_TP_PIO_ADDR, addr); 7119 val |= t4_read_reg(adap, A_TP_PIO_DATA) & ~mask; 7120 t4_write_reg(adap, A_TP_PIO_DATA, val); 7121 } 7122 7123 /** 7124 * init_cong_ctrl - initialize congestion control parameters 7125 * @a: the alpha values for congestion control 7126 * @b: the beta values for congestion control 7127 * 7128 * Initialize the congestion control parameters. 7129 */ 7130 static void init_cong_ctrl(unsigned short *a, unsigned short *b) 7131 { 7132 a[0] = a[1] = a[2] = a[3] = a[4] = a[5] = a[6] = a[7] = a[8] = 1; 7133 a[9] = 2; 7134 a[10] = 3; 7135 a[11] = 4; 7136 a[12] = 5; 7137 a[13] = 6; 7138 a[14] = 7; 7139 a[15] = 8; 7140 a[16] = 9; 7141 a[17] = 10; 7142 a[18] = 14; 7143 a[19] = 17; 7144 a[20] = 21; 7145 a[21] = 25; 7146 a[22] = 30; 7147 a[23] = 35; 7148 a[24] = 45; 7149 a[25] = 60; 7150 a[26] = 80; 7151 a[27] = 100; 7152 a[28] = 200; 7153 a[29] = 300; 7154 a[30] = 400; 7155 a[31] = 500; 7156 7157 b[0] = b[1] = b[2] = b[3] = b[4] = b[5] = b[6] = b[7] = b[8] = 0; 7158 b[9] = b[10] = 1; 7159 b[11] = b[12] = 2; 7160 b[13] = b[14] = b[15] = b[16] = 3; 7161 b[17] = b[18] = b[19] = b[20] = b[21] = 4; 7162 b[22] = b[23] = b[24] = b[25] = b[26] = b[27] = 5; 7163 b[28] = b[29] = 6; 7164 b[30] = b[31] = 7; 7165 } 7166 7167 /* The minimum additive increment value for the congestion control table */ 7168 #define CC_MIN_INCR 2U 7169 7170 /** 7171 * t4_load_mtus - write the MTU and congestion control HW tables 7172 * @adap: the adapter 7173 * @mtus: the values for the MTU table 7174 * @alpha: the values for the congestion control alpha parameter 7175 * @beta: the values for the congestion control beta parameter 7176 * 7177 * Write the HW MTU table with the supplied MTUs and the high-speed 7178 * congestion control table with the supplied alpha, beta, and MTUs. 7179 * We write the two tables together because the additive increments 7180 * depend on the MTUs. 7181 */ 7182 void t4_load_mtus(struct adapter *adap, const unsigned short *mtus, 7183 const unsigned short *alpha, const unsigned short *beta) 7184 { 7185 static const unsigned int avg_pkts[NCCTRL_WIN] = { 7186 2, 6, 10, 14, 20, 28, 40, 56, 80, 112, 160, 224, 320, 448, 640, 7187 896, 1281, 1792, 2560, 3584, 5120, 7168, 10240, 14336, 20480, 7188 28672, 40960, 57344, 81920, 114688, 163840, 229376 7189 }; 7190 7191 unsigned int i, w; 7192 7193 for (i = 0; i < NMTUS; ++i) { 7194 unsigned int mtu = mtus[i]; 7195 unsigned int log2 = fls(mtu); 7196 7197 if (!(mtu & ((1 << log2) >> 2))) /* round */ 7198 log2--; 7199 t4_write_reg(adap, A_TP_MTU_TABLE, V_MTUINDEX(i) | 7200 V_MTUWIDTH(log2) | V_MTUVALUE(mtu)); 7201 7202 for (w = 0; w < NCCTRL_WIN; ++w) { 7203 unsigned int inc; 7204 7205 inc = max(((mtu - 40) * alpha[w]) / avg_pkts[w], 7206 CC_MIN_INCR); 7207 7208 t4_write_reg(adap, A_TP_CCTRL_TABLE, (i << 21) | 7209 (w << 16) | (beta[w] << 13) | inc); 7210 } 7211 } 7212 } 7213 7214 /** 7215 * t4_set_pace_tbl - set the pace table 7216 * @adap: the adapter 7217 * @pace_vals: the pace values in microseconds 7218 * @start: index of the first entry in the HW pace table to set 7219 * @n: how many entries to set 7220 * 7221 * Sets (a subset of the) HW pace table. 7222 */ 7223 int t4_set_pace_tbl(struct adapter *adap, const unsigned int *pace_vals, 7224 unsigned int start, unsigned int n) 7225 { 7226 unsigned int vals[NTX_SCHED], i; 7227 unsigned int tick_ns = dack_ticks_to_usec(adap, 1000); 7228 7229 if (n > NTX_SCHED) 7230 return -ERANGE; 7231 7232 /* convert values from us to dack ticks, rounding to closest value */ 7233 for (i = 0; i < n; i++, pace_vals++) { 7234 vals[i] = (1000 * *pace_vals + tick_ns / 2) / tick_ns; 7235 if (vals[i] > 0x7ff) 7236 return -ERANGE; 7237 if (*pace_vals && vals[i] == 0) 7238 return -ERANGE; 7239 } 7240 for (i = 0; i < n; i++, start++) 7241 t4_write_reg(adap, A_TP_PACE_TABLE, (start << 16) | vals[i]); 7242 return 0; 7243 } 7244 7245 /** 7246 * t4_set_sched_bps - set the bit rate for a HW traffic scheduler 7247 * @adap: the adapter 7248 * @kbps: target rate in Kbps 7249 * @sched: the scheduler index 7250 * 7251 * Configure a Tx HW scheduler for the target rate. 7252 */ 7253 int t4_set_sched_bps(struct adapter *adap, int sched, unsigned int kbps) 7254 { 7255 unsigned int v, tps, cpt, bpt, delta, mindelta = ~0; 7256 unsigned int clk = adap->params.vpd.cclk * 1000; 7257 unsigned int selected_cpt = 0, selected_bpt = 0; 7258 7259 if (kbps > 0) { 7260 kbps *= 125; /* -> bytes */ 7261 for (cpt = 1; cpt <= 255; cpt++) { 7262 tps = clk / cpt; 7263 bpt = (kbps + tps / 2) / tps; 7264 if (bpt > 0 && bpt <= 255) { 7265 v = bpt * tps; 7266 delta = v >= kbps ? v - kbps : kbps - v; 7267 if (delta < mindelta) { 7268 mindelta = delta; 7269 selected_cpt = cpt; 7270 selected_bpt = bpt; 7271 } 7272 } else if (selected_cpt) 7273 break; 7274 } 7275 if (!selected_cpt) 7276 return -EINVAL; 7277 } 7278 t4_write_reg(adap, A_TP_TM_PIO_ADDR, 7279 A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2); 7280 v = t4_read_reg(adap, A_TP_TM_PIO_DATA); 7281 if (sched & 1) 7282 v = (v & 0xffff) | (selected_cpt << 16) | (selected_bpt << 24); 7283 else 7284 v = (v & 0xffff0000) | selected_cpt | (selected_bpt << 8); 7285 t4_write_reg(adap, A_TP_TM_PIO_DATA, v); 7286 return 0; 7287 } 7288 7289 /** 7290 * t4_set_sched_ipg - set the IPG for a Tx HW packet rate scheduler 7291 * @adap: the adapter 7292 * @sched: the scheduler index 7293 * @ipg: the interpacket delay in tenths of nanoseconds 7294 * 7295 * Set the interpacket delay for a HW packet rate scheduler. 7296 */ 7297 int t4_set_sched_ipg(struct adapter *adap, int sched, unsigned int ipg) 7298 { 7299 unsigned int v, addr = A_TP_TX_MOD_Q1_Q0_TIMER_SEPARATOR - sched / 2; 7300 7301 /* convert ipg to nearest number of core clocks */ 7302 ipg *= core_ticks_per_usec(adap); 7303 ipg = (ipg + 5000) / 10000; 7304 if (ipg > M_TXTIMERSEPQ0) 7305 return -EINVAL; 7306 7307 t4_write_reg(adap, A_TP_TM_PIO_ADDR, addr); 7308 v = t4_read_reg(adap, A_TP_TM_PIO_DATA); 7309 if (sched & 1) 7310 v = (v & V_TXTIMERSEPQ0(M_TXTIMERSEPQ0)) | V_TXTIMERSEPQ1(ipg); 7311 else 7312 v = (v & V_TXTIMERSEPQ1(M_TXTIMERSEPQ1)) | V_TXTIMERSEPQ0(ipg); 7313 t4_write_reg(adap, A_TP_TM_PIO_DATA, v); 7314 t4_read_reg(adap, A_TP_TM_PIO_DATA); 7315 return 0; 7316 } 7317 7318 /* 7319 * Calculates a rate in bytes/s given the number of 256-byte units per 4K core 7320 * clocks. The formula is 7321 * 7322 * bytes/s = bytes256 * 256 * ClkFreq / 4096 7323 * 7324 * which is equivalent to 7325 * 7326 * bytes/s = 62.5 * bytes256 * ClkFreq_ms 7327 */ 7328 static u64 chan_rate(struct adapter *adap, unsigned int bytes256) 7329 { 7330 u64 v = (u64)bytes256 * adap->params.vpd.cclk; 7331 7332 return v * 62 + v / 2; 7333 } 7334 7335 /** 7336 * t4_get_chan_txrate - get the current per channel Tx rates 7337 * @adap: the adapter 7338 * @nic_rate: rates for NIC traffic 7339 * @ofld_rate: rates for offloaded traffic 7340 * 7341 * Return the current Tx rates in bytes/s for NIC and offloaded traffic 7342 * for each channel. 7343 */ 7344 void t4_get_chan_txrate(struct adapter *adap, u64 *nic_rate, u64 *ofld_rate) 7345 { 7346 u32 v; 7347 7348 v = t4_read_reg(adap, A_TP_TX_TRATE); 7349 nic_rate[0] = chan_rate(adap, G_TNLRATE0(v)); 7350 nic_rate[1] = chan_rate(adap, G_TNLRATE1(v)); 7351 if (adap->chip_params->nchan > 2) { 7352 nic_rate[2] = chan_rate(adap, G_TNLRATE2(v)); 7353 nic_rate[3] = chan_rate(adap, G_TNLRATE3(v)); 7354 } 7355 7356 v = t4_read_reg(adap, A_TP_TX_ORATE); 7357 ofld_rate[0] = chan_rate(adap, G_OFDRATE0(v)); 7358 ofld_rate[1] = chan_rate(adap, G_OFDRATE1(v)); 7359 if (adap->chip_params->nchan > 2) { 7360 ofld_rate[2] = chan_rate(adap, G_OFDRATE2(v)); 7361 ofld_rate[3] = chan_rate(adap, G_OFDRATE3(v)); 7362 } 7363 } 7364 7365 /** 7366 * t4_set_trace_filter - configure one of the tracing filters 7367 * @adap: the adapter 7368 * @tp: the desired trace filter parameters 7369 * @idx: which filter to configure 7370 * @enable: whether to enable or disable the filter 7371 * 7372 * Configures one of the tracing filters available in HW. If @tp is %NULL 7373 * it indicates that the filter is already written in the register and it 7374 * just needs to be enabled or disabled. 7375 */ 7376 int t4_set_trace_filter(struct adapter *adap, const struct trace_params *tp, 7377 int idx, int enable) 7378 { 7379 int i, ofst; 7380 u32 match_ctl_a, match_ctl_b; 7381 u32 data_reg, mask_reg, cfg; 7382 u32 en = is_t4(adap) ? F_TFEN : F_T5_TFEN; 7383 7384 if (idx < 0 || idx >= NTRACE) 7385 return -EINVAL; 7386 7387 if (chip_id(adap) >= CHELSIO_T7) { 7388 match_ctl_a = T7_MPS_TRC_FILTER_MATCH_CTL_A(idx); 7389 match_ctl_b = T7_MPS_TRC_FILTER_MATCH_CTL_B(idx); 7390 } else { 7391 match_ctl_a = MPS_TRC_FILTER_MATCH_CTL_A(idx); 7392 match_ctl_b = MPS_TRC_FILTER_MATCH_CTL_B(idx); 7393 } 7394 7395 if (tp == NULL || !enable) { 7396 t4_set_reg_field(adap, match_ctl_a, en, enable ? en : 0); 7397 return 0; 7398 } 7399 7400 /* 7401 * TODO - After T4 data book is updated, specify the exact 7402 * section below. 7403 * 7404 * See T4 data book - MPS section for a complete description 7405 * of the below if..else handling of A_MPS_TRC_CFG register 7406 * value. 7407 */ 7408 cfg = t4_read_reg(adap, A_MPS_TRC_CFG); 7409 if (cfg & F_TRCMULTIFILTER) { 7410 /* 7411 * If multiple tracers are enabled, then maximum 7412 * capture size is 2.5KB (FIFO size of a single channel) 7413 * minus 2 flits for CPL_TRACE_PKT header. 7414 */ 7415 if (tp->snap_len > ((10 * 1024 / 4) - (2 * 8))) 7416 return -EINVAL; 7417 } else { 7418 /* 7419 * If multiple tracers are disabled, to avoid deadlocks 7420 * maximum packet capture size of 9600 bytes is recommended. 7421 * Also in this mode, only trace0 can be enabled and running. 7422 */ 7423 if (tp->snap_len > 9600 || idx) 7424 return -EINVAL; 7425 } 7426 7427 if (tp->port > (is_t4(adap) ? 11 : 19) || tp->invert > 1 || 7428 tp->skip_len > M_TFLENGTH || tp->skip_ofst > M_TFOFFSET || 7429 tp->min_len > M_TFMINPKTSIZE) 7430 return -EINVAL; 7431 7432 /* stop the tracer we'll be changing */ 7433 t4_set_reg_field(adap, match_ctl_a, en, 0); 7434 7435 ofst = (A_MPS_TRC_FILTER1_MATCH - A_MPS_TRC_FILTER0_MATCH) * idx; 7436 data_reg = A_MPS_TRC_FILTER0_MATCH + ofst; 7437 mask_reg = A_MPS_TRC_FILTER0_DONT_CARE + ofst; 7438 7439 for (i = 0; i < TRACE_LEN / 4; i++, data_reg += 4, mask_reg += 4) { 7440 t4_write_reg(adap, data_reg, tp->data[i]); 7441 t4_write_reg(adap, mask_reg, ~tp->mask[i]); 7442 } 7443 t4_write_reg(adap, match_ctl_b, V_TFCAPTUREMAX(tp->snap_len) | 7444 V_TFMINPKTSIZE(tp->min_len)); 7445 t4_write_reg(adap, match_ctl_a, V_TFOFFSET(tp->skip_ofst) | 7446 V_TFLENGTH(tp->skip_len) | en | (is_t4(adap) ? 7447 V_TFPORT(tp->port) | V_TFINVERTMATCH(tp->invert) : 7448 V_T5_TFPORT(tp->port) | V_T5_TFINVERTMATCH(tp->invert))); 7449 7450 return 0; 7451 } 7452 7453 /** 7454 * t4_get_trace_filter - query one of the tracing filters 7455 * @adap: the adapter 7456 * @tp: the current trace filter parameters 7457 * @idx: which trace filter to query 7458 * @enabled: non-zero if the filter is enabled 7459 * 7460 * Returns the current settings of one of the HW tracing filters. 7461 */ 7462 void t4_get_trace_filter(struct adapter *adap, struct trace_params *tp, int idx, 7463 int *enabled) 7464 { 7465 u32 ctla, ctlb; 7466 int i, ofst; 7467 u32 data_reg, mask_reg; 7468 7469 if (chip_id(adap) >= CHELSIO_T7) { 7470 ctla = t4_read_reg(adap, T7_MPS_TRC_FILTER_MATCH_CTL_A(idx)); 7471 ctlb = t4_read_reg(adap, T7_MPS_TRC_FILTER_MATCH_CTL_B(idx)); 7472 } else { 7473 ctla = t4_read_reg(adap, MPS_TRC_FILTER_MATCH_CTL_A(idx)); 7474 ctlb = t4_read_reg(adap, MPS_TRC_FILTER_MATCH_CTL_B(idx)); 7475 } 7476 7477 if (is_t4(adap)) { 7478 *enabled = !!(ctla & F_TFEN); 7479 tp->port = G_TFPORT(ctla); 7480 tp->invert = !!(ctla & F_TFINVERTMATCH); 7481 } else { 7482 *enabled = !!(ctla & F_T5_TFEN); 7483 tp->port = G_T5_TFPORT(ctla); 7484 tp->invert = !!(ctla & F_T5_TFINVERTMATCH); 7485 } 7486 tp->snap_len = G_TFCAPTUREMAX(ctlb); 7487 tp->min_len = G_TFMINPKTSIZE(ctlb); 7488 tp->skip_ofst = G_TFOFFSET(ctla); 7489 tp->skip_len = G_TFLENGTH(ctla); 7490 7491 ofst = (A_MPS_TRC_FILTER1_MATCH - A_MPS_TRC_FILTER0_MATCH) * idx; 7492 data_reg = A_MPS_TRC_FILTER0_MATCH + ofst; 7493 mask_reg = A_MPS_TRC_FILTER0_DONT_CARE + ofst; 7494 7495 for (i = 0; i < TRACE_LEN / 4; i++, data_reg += 4, mask_reg += 4) { 7496 tp->mask[i] = ~t4_read_reg(adap, mask_reg); 7497 tp->data[i] = t4_read_reg(adap, data_reg) & tp->mask[i]; 7498 } 7499 } 7500 7501 /** 7502 * t4_set_trace_rss_control - configure the trace rss control register 7503 * @adap: the adapter 7504 * @chan: the channel number for RSS control 7505 * @qid: queue number 7506 * 7507 * Configures the MPS tracing RSS control parameter for specified 7508 * @chan channel and @qid queue number. 7509 */ 7510 void t4_set_trace_rss_control(struct adapter *adap, u8 chan, u16 qid) 7511 { 7512 u32 mps_trc_rss_control; 7513 7514 switch (chip_id(adap)) { 7515 case CHELSIO_T4: 7516 mps_trc_rss_control = A_MPS_TRC_RSS_CONTROL; 7517 break; 7518 case CHELSIO_T5: 7519 case CHELSIO_T6: 7520 mps_trc_rss_control = A_MPS_T5_TRC_RSS_CONTROL; 7521 break; 7522 case CHELSIO_T7: 7523 default: 7524 mps_trc_rss_control = A_T7_MPS_T5_TRC_RSS_CONTROL; 7525 break; 7526 } 7527 7528 t4_write_reg(adap, mps_trc_rss_control, 7529 V_RSSCONTROL(chan) | V_QUEUENUMBER(qid)); 7530 } 7531 7532 /** 7533 * t4_pmtx_get_stats - returns the HW stats from PMTX 7534 * @adap: the adapter 7535 * @cnt: where to store the count statistics 7536 * @cycles: where to store the cycle statistics 7537 * 7538 * Returns performance statistics from PMTX. 7539 */ 7540 void t4_pmtx_get_stats(struct adapter *adap, u32 cnt[], u64 cycles[]) 7541 { 7542 int i; 7543 u32 data[2]; 7544 7545 for (i = 0; i < adap->chip_params->pm_stats_cnt; i++) { 7546 t4_write_reg(adap, A_PM_TX_STAT_CONFIG, i + 1); 7547 cnt[i] = t4_read_reg(adap, A_PM_TX_STAT_COUNT); 7548 if (is_t4(adap)) 7549 cycles[i] = t4_read_reg64(adap, A_PM_TX_STAT_LSB); 7550 else { 7551 t4_read_indirect(adap, A_PM_TX_DBG_CTRL, 7552 A_PM_TX_DBG_DATA, data, 2, 7553 chip_id(adap) >= CHELSIO_T7 ? 7554 A_T7_PM_TX_DBG_STAT_MSB : 7555 A_PM_TX_DBG_STAT_MSB); 7556 cycles[i] = (((u64)data[0] << 32) | data[1]); 7557 } 7558 } 7559 } 7560 7561 /** 7562 * t4_pmrx_get_stats - returns the HW stats from PMRX 7563 * @adap: the adapter 7564 * @cnt: where to store the count statistics 7565 * @cycles: where to store the cycle statistics 7566 * 7567 * Returns performance statistics from PMRX. 7568 */ 7569 void t4_pmrx_get_stats(struct adapter *adap, u32 cnt[], u64 cycles[]) 7570 { 7571 int i; 7572 u32 data[2]; 7573 7574 for (i = 0; i < adap->chip_params->pm_stats_cnt; i++) { 7575 t4_write_reg(adap, A_PM_RX_STAT_CONFIG, i + 1); 7576 cnt[i] = t4_read_reg(adap, A_PM_RX_STAT_COUNT); 7577 if (is_t4(adap)) { 7578 cycles[i] = t4_read_reg64(adap, A_PM_RX_STAT_LSB); 7579 } else { 7580 t4_read_indirect(adap, A_PM_RX_DBG_CTRL, 7581 A_PM_RX_DBG_DATA, data, 2, 7582 A_PM_RX_DBG_STAT_MSB); 7583 cycles[i] = (((u64)data[0] << 32) | data[1]); 7584 } 7585 } 7586 } 7587 7588 /** 7589 * t4_pmrx_cache_get_stats - returns the HW PMRX cache stats 7590 * @adap: the adapter 7591 * @stats: where to store the statistics 7592 * 7593 * Returns performance statistics of PMRX cache. 7594 */ 7595 void t4_pmrx_cache_get_stats(struct adapter *adap, u32 stats[]) 7596 { 7597 u8 i, j; 7598 7599 for (i = 0, j = 0; i < T7_PM_RX_CACHE_NSTATS / 3; i++, j += 3) { 7600 t4_write_reg(adap, A_PM_RX_STAT_CONFIG, 0x100 + i); 7601 stats[j] = t4_read_reg(adap, A_PM_RX_STAT_COUNT); 7602 t4_read_indirect(adap, A_PM_RX_DBG_CTRL, A_PM_RX_DBG_DATA, 7603 &stats[j + 1], 2, A_PM_RX_DBG_STAT_MSB); 7604 } 7605 } 7606 7607 /** 7608 * t4_get_mps_bg_map - return the buffer groups associated with a port 7609 * @adap: the adapter 7610 * @idx: the port index 7611 * 7612 * Returns a bitmap indicating which MPS buffer groups are associated 7613 * with the given port. Bit i is set if buffer group i is used by the 7614 * port. 7615 */ 7616 static unsigned int t4_get_mps_bg_map(struct adapter *adap, int idx) 7617 { 7618 u32 n; 7619 7620 if (adap->params.mps_bg_map != UINT32_MAX) 7621 return ((adap->params.mps_bg_map >> (idx << 3)) & 0xff); 7622 7623 n = adap->params.nports; 7624 MPASS(n > 0 && n <= MAX_NPORTS); 7625 if (n == 1) 7626 return idx == 0 ? 0xf : 0; 7627 if (n == 2 && chip_id(adap) <= CHELSIO_T5) 7628 return idx < 2 ? (3 << (2 * idx)) : 0; 7629 return 1 << idx; 7630 } 7631 7632 /* 7633 * TP RX e-channels associated with the port. 7634 */ 7635 static unsigned int t4_get_rx_e_chan_map(struct adapter *adap, int idx) 7636 { 7637 const u32 n = adap->params.nports; 7638 const u32 all_chan = (1 << adap->chip_params->nchan) - 1; 7639 7640 switch (adap->params.tp.lb_mode) { 7641 case 0: 7642 if (n == 1) 7643 return (all_chan); 7644 if (n == 2 && chip_id(adap) <= CHELSIO_T5) 7645 return (3 << (2 * idx)); 7646 return (1 << idx); 7647 case 1: 7648 MPASS(n == 1); 7649 return (all_chan); 7650 case 2: 7651 MPASS(n <= 2); 7652 return (3 << (2 * idx)); 7653 default: 7654 CH_ERR(adap, "Unsupported LB mode %d\n", 7655 adap->params.tp.lb_mode); 7656 return (0); 7657 } 7658 } 7659 7660 /* 7661 * TP RX c-channel associated with the port. 7662 */ 7663 static unsigned int t4_get_rx_c_chan(struct adapter *adap, int idx) 7664 { 7665 if (adap->params.tp_ch_map != UINT32_MAX) 7666 return (adap->params.tp_ch_map >> (8 * idx)) & 0xff; 7667 return 0; 7668 } 7669 7670 /* 7671 * TP TX c-channel associated with the port. 7672 */ 7673 static unsigned int t4_get_tx_c_chan(struct adapter *adap, int idx) 7674 { 7675 if (adap->params.tx_tp_ch_map != UINT32_MAX) 7676 return (adap->params.tx_tp_ch_map >> (8 * idx)) & 0xff; 7677 return idx; 7678 } 7679 7680 /** 7681 * t4_get_port_type_description - return Port Type string description 7682 * @port_type: firmware Port Type enumeration 7683 */ 7684 const char *t4_get_port_type_description(enum fw_port_type port_type) 7685 { 7686 static const char *const port_type_description[] = { 7687 "Fiber_XFI", 7688 "Fiber_XAUI", 7689 "BT_SGMII", 7690 "BT_XFI", 7691 "BT_XAUI", 7692 "KX4", 7693 "CX4", 7694 "KX", 7695 "KR", 7696 "SFP", 7697 "BP_AP", 7698 "BP4_AP", 7699 "QSFP_10G", 7700 "QSA", 7701 "QSFP", 7702 "BP40_BA", 7703 "KR4_100G", 7704 "CR4_QSFP", 7705 "CR_QSFP", 7706 "CR2_QSFP", 7707 "SFP28", 7708 "KR_SFP28", 7709 "KR_XLAUI", 7710 }; 7711 7712 if (port_type < ARRAY_SIZE(port_type_description)) 7713 return port_type_description[port_type]; 7714 return "UNKNOWN"; 7715 } 7716 7717 /** 7718 * t4_get_port_stats_offset - collect port stats relative to a previous 7719 * snapshot 7720 * @adap: The adapter 7721 * @idx: The port 7722 * @stats: Current stats to fill 7723 * @offset: Previous stats snapshot 7724 */ 7725 void t4_get_port_stats_offset(struct adapter *adap, int idx, 7726 struct port_stats *stats, 7727 struct port_stats *offset) 7728 { 7729 u64 *s, *o; 7730 int i; 7731 7732 t4_get_port_stats(adap, idx, stats); 7733 for (i = 0, s = (u64 *)stats, o = (u64 *)offset ; 7734 i < (sizeof(struct port_stats)/sizeof(u64)) ; 7735 i++, s++, o++) 7736 *s -= *o; 7737 } 7738 7739 /** 7740 * t4_get_port_stats - collect port statistics 7741 * @adap: the adapter 7742 * @idx: the port index 7743 * @p: the stats structure to fill 7744 * 7745 * Collect statistics related to the given port from HW. 7746 */ 7747 void t4_get_port_stats(struct adapter *adap, int idx, struct port_stats *p) 7748 { 7749 struct port_info *pi; 7750 int port_id, tx_chan; 7751 u32 bgmap, stat_ctl; 7752 7753 port_id = adap->port_map[idx]; 7754 MPASS(port_id >= 0 && port_id <= adap->params.nports); 7755 pi = adap->port[port_id]; 7756 7757 #define GET_STAT(name) \ 7758 t4_read_reg64(adap, \ 7759 t4_port_reg(adap, tx_chan, A_MPS_PORT_STAT_##name##_L)); 7760 memset(p, 0, sizeof(*p)); 7761 for (tx_chan = pi->tx_chan; 7762 tx_chan < pi->tx_chan + adap->params.tp.lb_nchan; tx_chan++) { 7763 p->tx_pause += GET_STAT(TX_PORT_PAUSE); 7764 p->tx_octets += GET_STAT(TX_PORT_BYTES); 7765 p->tx_frames += GET_STAT(TX_PORT_FRAMES); 7766 p->tx_bcast_frames += GET_STAT(TX_PORT_BCAST); 7767 p->tx_mcast_frames += GET_STAT(TX_PORT_MCAST); 7768 p->tx_ucast_frames += GET_STAT(TX_PORT_UCAST); 7769 p->tx_error_frames += GET_STAT(TX_PORT_ERROR); 7770 p->tx_frames_64 += GET_STAT(TX_PORT_64B); 7771 p->tx_frames_65_127 += GET_STAT(TX_PORT_65B_127B); 7772 p->tx_frames_128_255 += GET_STAT(TX_PORT_128B_255B); 7773 p->tx_frames_256_511 += GET_STAT(TX_PORT_256B_511B); 7774 p->tx_frames_512_1023 += GET_STAT(TX_PORT_512B_1023B); 7775 p->tx_frames_1024_1518 += GET_STAT(TX_PORT_1024B_1518B); 7776 p->tx_frames_1519_max += GET_STAT(TX_PORT_1519B_MAX); 7777 p->tx_drop += GET_STAT(TX_PORT_DROP); 7778 p->tx_ppp0 += GET_STAT(TX_PORT_PPP0); 7779 p->tx_ppp1 += GET_STAT(TX_PORT_PPP1); 7780 p->tx_ppp2 += GET_STAT(TX_PORT_PPP2); 7781 p->tx_ppp3 += GET_STAT(TX_PORT_PPP3); 7782 p->tx_ppp4 += GET_STAT(TX_PORT_PPP4); 7783 p->tx_ppp5 += GET_STAT(TX_PORT_PPP5); 7784 p->tx_ppp6 += GET_STAT(TX_PORT_PPP6); 7785 p->tx_ppp7 += GET_STAT(TX_PORT_PPP7); 7786 7787 p->rx_pause += GET_STAT(RX_PORT_PAUSE); 7788 p->rx_octets += GET_STAT(RX_PORT_BYTES); 7789 p->rx_frames += GET_STAT(RX_PORT_FRAMES); 7790 p->rx_bcast_frames += GET_STAT(RX_PORT_BCAST); 7791 p->rx_mcast_frames += GET_STAT(RX_PORT_MCAST); 7792 p->rx_ucast_frames += GET_STAT(RX_PORT_UCAST); 7793 p->rx_too_long += GET_STAT(RX_PORT_MTU_ERROR); 7794 p->rx_jabber += GET_STAT(RX_PORT_MTU_CRC_ERROR); 7795 p->rx_len_err += GET_STAT(RX_PORT_LEN_ERROR); 7796 p->rx_symbol_err += GET_STAT(RX_PORT_SYM_ERROR); 7797 p->rx_runt += GET_STAT(RX_PORT_LESS_64B); 7798 p->rx_frames_64 += GET_STAT(RX_PORT_64B); 7799 p->rx_frames_65_127 += GET_STAT(RX_PORT_65B_127B); 7800 p->rx_frames_128_255 += GET_STAT(RX_PORT_128B_255B); 7801 p->rx_frames_256_511 += GET_STAT(RX_PORT_256B_511B); 7802 p->rx_frames_512_1023 += GET_STAT(RX_PORT_512B_1023B); 7803 p->rx_frames_1024_1518 += GET_STAT(RX_PORT_1024B_1518B); 7804 p->rx_frames_1519_max += GET_STAT(RX_PORT_1519B_MAX); 7805 p->rx_ppp0 += GET_STAT(RX_PORT_PPP0); 7806 p->rx_ppp1 += GET_STAT(RX_PORT_PPP1); 7807 p->rx_ppp2 += GET_STAT(RX_PORT_PPP2); 7808 p->rx_ppp3 += GET_STAT(RX_PORT_PPP3); 7809 p->rx_ppp4 += GET_STAT(RX_PORT_PPP4); 7810 p->rx_ppp5 += GET_STAT(RX_PORT_PPP5); 7811 p->rx_ppp6 += GET_STAT(RX_PORT_PPP6); 7812 p->rx_ppp7 += GET_STAT(RX_PORT_PPP7); 7813 if (!is_t6(adap)) { 7814 MPASS(pi->fcs_reg == A_MPS_PORT_STAT_RX_PORT_CRC_ERROR_L); 7815 p->rx_fcs_err += GET_STAT(RX_PORT_CRC_ERROR); 7816 } 7817 } 7818 #undef GET_STAT 7819 7820 if (is_t6(adap) && pi->fcs_reg != -1) 7821 p->rx_fcs_err = t4_read_reg64(adap, 7822 t4_port_reg(adap, pi->tx_chan, pi->fcs_reg)) - pi->fcs_base; 7823 7824 if (chip_id(adap) >= CHELSIO_T5) { 7825 stat_ctl = t4_read_reg(adap, A_MPS_STAT_CTL); 7826 if (stat_ctl & F_COUNTPAUSESTATTX) { 7827 p->tx_frames -= p->tx_pause; 7828 p->tx_octets -= p->tx_pause * 64; 7829 } 7830 if (stat_ctl & F_COUNTPAUSEMCTX) 7831 p->tx_mcast_frames -= p->tx_pause; 7832 if (stat_ctl & F_COUNTPAUSESTATRX) { 7833 p->rx_frames -= p->rx_pause; 7834 p->rx_octets -= p->rx_pause * 64; 7835 } 7836 if (stat_ctl & F_COUNTPAUSEMCRX) 7837 p->rx_mcast_frames -= p->rx_pause; 7838 } 7839 7840 #define GET_STAT_COM(name) t4_read_reg64(adap, A_MPS_STAT_##name##_L) 7841 bgmap = pi->mps_bg_map; 7842 p->rx_ovflow0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_MAC_DROP_FRAME) : 0; 7843 p->rx_ovflow1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_MAC_DROP_FRAME) : 0; 7844 p->rx_ovflow2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_MAC_DROP_FRAME) : 0; 7845 p->rx_ovflow3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_MAC_DROP_FRAME) : 0; 7846 p->rx_trunc0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_MAC_TRUNC_FRAME) : 0; 7847 p->rx_trunc1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_MAC_TRUNC_FRAME) : 0; 7848 p->rx_trunc2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_MAC_TRUNC_FRAME) : 0; 7849 p->rx_trunc3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_MAC_TRUNC_FRAME) : 0; 7850 #undef GET_STAT_COM 7851 } 7852 7853 /** 7854 * t4_get_lb_stats - collect loopback port statistics 7855 * @adap: the adapter 7856 * @idx: the loopback port index 7857 * @p: the stats structure to fill 7858 * 7859 * Return HW statistics for the given loopback port. 7860 */ 7861 void t4_get_lb_stats(struct adapter *adap, int idx, struct lb_port_stats *p) 7862 { 7863 7864 #define GET_STAT(name) \ 7865 t4_read_reg64(adap, \ 7866 t4_port_reg(adap, idx, A_MPS_PORT_STAT_LB_PORT_##name##_L)) 7867 #define GET_STAT_COM(name) t4_read_reg64(adap, A_MPS_STAT_##name##_L) 7868 7869 p->octets = GET_STAT(BYTES); 7870 p->frames = GET_STAT(FRAMES); 7871 p->bcast_frames = GET_STAT(BCAST); 7872 p->mcast_frames = GET_STAT(MCAST); 7873 p->ucast_frames = GET_STAT(UCAST); 7874 p->error_frames = GET_STAT(ERROR); 7875 7876 p->frames_64 = GET_STAT(64B); 7877 p->frames_65_127 = GET_STAT(65B_127B); 7878 p->frames_128_255 = GET_STAT(128B_255B); 7879 p->frames_256_511 = GET_STAT(256B_511B); 7880 p->frames_512_1023 = GET_STAT(512B_1023B); 7881 p->frames_1024_1518 = GET_STAT(1024B_1518B); 7882 p->frames_1519_max = GET_STAT(1519B_MAX); 7883 p->drop = GET_STAT(DROP_FRAMES); 7884 7885 if (idx < adap->params.nports) { 7886 u32 bg = adap2pinfo(adap, idx)->mps_bg_map; 7887 7888 p->ovflow0 = (bg & 1) ? GET_STAT_COM(RX_BG_0_LB_DROP_FRAME) : 0; 7889 p->ovflow1 = (bg & 2) ? GET_STAT_COM(RX_BG_1_LB_DROP_FRAME) : 0; 7890 p->ovflow2 = (bg & 4) ? GET_STAT_COM(RX_BG_2_LB_DROP_FRAME) : 0; 7891 p->ovflow3 = (bg & 8) ? GET_STAT_COM(RX_BG_3_LB_DROP_FRAME) : 0; 7892 p->trunc0 = (bg & 1) ? GET_STAT_COM(RX_BG_0_LB_TRUNC_FRAME) : 0; 7893 p->trunc1 = (bg & 2) ? GET_STAT_COM(RX_BG_1_LB_TRUNC_FRAME) : 0; 7894 p->trunc2 = (bg & 4) ? GET_STAT_COM(RX_BG_2_LB_TRUNC_FRAME) : 0; 7895 p->trunc3 = (bg & 8) ? GET_STAT_COM(RX_BG_3_LB_TRUNC_FRAME) : 0; 7896 } 7897 7898 #undef GET_STAT 7899 #undef GET_STAT_COM 7900 } 7901 7902 /** 7903 * t4_wol_magic_enable - enable/disable magic packet WoL 7904 * @adap: the adapter 7905 * @port: the physical port index 7906 * @addr: MAC address expected in magic packets, %NULL to disable 7907 * 7908 * Enables/disables magic packet wake-on-LAN for the selected port. 7909 */ 7910 void t4_wol_magic_enable(struct adapter *adap, unsigned int port, 7911 const u8 *addr) 7912 { 7913 u32 mag_id_reg_l, mag_id_reg_h, port_cfg_reg; 7914 7915 if (is_t4(adap)) { 7916 mag_id_reg_l = PORT_REG(port, A_XGMAC_PORT_MAGIC_MACID_LO); 7917 mag_id_reg_h = PORT_REG(port, A_XGMAC_PORT_MAGIC_MACID_HI); 7918 port_cfg_reg = PORT_REG(port, A_XGMAC_PORT_CFG2); 7919 } else if (chip_id(adap) < CHELSIO_T7) { 7920 mag_id_reg_l = T5_PORT_REG(port, A_MAC_PORT_MAGIC_MACID_LO); 7921 mag_id_reg_h = T5_PORT_REG(port, A_MAC_PORT_MAGIC_MACID_HI); 7922 port_cfg_reg = T5_PORT_REG(port, A_MAC_PORT_CFG2); 7923 } else { 7924 mag_id_reg_l = T7_PORT_REG(port, A_T7_MAC_PORT_MAGIC_MACID_LO); 7925 mag_id_reg_h = T7_PORT_REG(port, A_T7_MAC_PORT_MAGIC_MACID_HI); 7926 port_cfg_reg = T7_PORT_REG(port, A_MAC_PORT_CFG2); 7927 } 7928 7929 if (addr) { 7930 t4_write_reg(adap, mag_id_reg_l, 7931 (addr[2] << 24) | (addr[3] << 16) | 7932 (addr[4] << 8) | addr[5]); 7933 t4_write_reg(adap, mag_id_reg_h, 7934 (addr[0] << 8) | addr[1]); 7935 } 7936 t4_set_reg_field(adap, port_cfg_reg, F_MAGICEN, 7937 V_MAGICEN(addr != NULL)); 7938 } 7939 7940 /** 7941 * t4_wol_pat_enable - enable/disable pattern-based WoL 7942 * @adap: the adapter 7943 * @port: the physical port index 7944 * @map: bitmap of which HW pattern filters to set 7945 * @mask0: byte mask for bytes 0-63 of a packet 7946 * @mask1: byte mask for bytes 64-127 of a packet 7947 * @crc: Ethernet CRC for selected bytes 7948 * @enable: enable/disable switch 7949 * 7950 * Sets the pattern filters indicated in @map to mask out the bytes 7951 * specified in @mask0/@mask1 in received packets and compare the CRC of 7952 * the resulting packet against @crc. If @enable is %true pattern-based 7953 * WoL is enabled, otherwise disabled. 7954 */ 7955 int t4_wol_pat_enable(struct adapter *adap, unsigned int port, unsigned int map, 7956 u64 mask0, u64 mask1, unsigned int crc, bool enable) 7957 { 7958 int i; 7959 u32 port_cfg_reg; 7960 7961 if (is_t4(adap)) 7962 port_cfg_reg = PORT_REG(port, A_XGMAC_PORT_CFG2); 7963 else if (chip_id(adap) < CHELSIO_T7) 7964 port_cfg_reg = T5_PORT_REG(port, A_MAC_PORT_CFG2); 7965 else 7966 port_cfg_reg = T7_PORT_REG(port, A_MAC_PORT_CFG2); 7967 7968 if (!enable) { 7969 t4_set_reg_field(adap, port_cfg_reg, F_PATEN, 0); 7970 return 0; 7971 } 7972 if (map > 0xff) 7973 return -EINVAL; 7974 7975 #define EPIO_REG(name) \ 7976 (is_t4(adap) ? PORT_REG(port, A_XGMAC_PORT_EPIO_##name) : \ 7977 T5_PORT_REG(port, A_MAC_PORT_EPIO_##name)) 7978 7979 t4_write_reg(adap, EPIO_REG(DATA1), mask0 >> 32); 7980 t4_write_reg(adap, EPIO_REG(DATA2), mask1); 7981 t4_write_reg(adap, EPIO_REG(DATA3), mask1 >> 32); 7982 7983 for (i = 0; i < NWOL_PAT; i++, map >>= 1) { 7984 if (!(map & 1)) 7985 continue; 7986 7987 /* write byte masks */ 7988 t4_write_reg(adap, EPIO_REG(DATA0), mask0); 7989 t4_write_reg(adap, EPIO_REG(OP), V_ADDRESS(i) | F_EPIOWR); 7990 t4_read_reg(adap, EPIO_REG(OP)); /* flush */ 7991 if (t4_read_reg(adap, EPIO_REG(OP)) & F_BUSY) 7992 return -ETIMEDOUT; 7993 7994 /* write CRC */ 7995 t4_write_reg(adap, EPIO_REG(DATA0), crc); 7996 t4_write_reg(adap, EPIO_REG(OP), V_ADDRESS(i + 32) | F_EPIOWR); 7997 t4_read_reg(adap, EPIO_REG(OP)); /* flush */ 7998 if (t4_read_reg(adap, EPIO_REG(OP)) & F_BUSY) 7999 return -ETIMEDOUT; 8000 } 8001 #undef EPIO_REG 8002 8003 t4_set_reg_field(adap, port_cfg_reg, 0, F_PATEN); 8004 return 0; 8005 } 8006 8007 /* t4_mk_filtdelwr - create a delete filter WR 8008 * @ftid: the filter ID 8009 * @wr: the filter work request to populate 8010 * @qid: ingress queue to receive the delete notification 8011 * 8012 * Creates a filter work request to delete the supplied filter. If @qid is 8013 * negative the delete notification is suppressed. 8014 */ 8015 void t4_mk_filtdelwr(unsigned int ftid, struct fw_filter_wr *wr, int qid) 8016 { 8017 memset(wr, 0, sizeof(*wr)); 8018 wr->op_pkd = cpu_to_be32(V_FW_WR_OP(FW_FILTER_WR)); 8019 wr->len16_pkd = cpu_to_be32(V_FW_WR_LEN16(sizeof(*wr) / 16)); 8020 wr->tid_to_iq = cpu_to_be32(V_FW_FILTER_WR_TID(ftid) | 8021 V_FW_FILTER_WR_NOREPLY(qid < 0)); 8022 wr->del_filter_to_l2tix = cpu_to_be32(F_FW_FILTER_WR_DEL_FILTER); 8023 if (qid >= 0) 8024 wr->rx_chan_rx_rpl_iq = 8025 cpu_to_be16(V_FW_FILTER_WR_RX_RPL_IQ(qid)); 8026 } 8027 8028 #define INIT_CMD(var, cmd, rd_wr) do { \ 8029 (var).op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_##cmd##_CMD) | \ 8030 F_FW_CMD_REQUEST | \ 8031 F_FW_CMD_##rd_wr); \ 8032 (var).retval_len16 = cpu_to_be32(FW_LEN16(var)); \ 8033 } while (0) 8034 8035 int t4_fwaddrspace_write(struct adapter *adap, unsigned int mbox, 8036 u32 addr, u32 val) 8037 { 8038 u32 ldst_addrspace; 8039 struct fw_ldst_cmd c; 8040 8041 memset(&c, 0, sizeof(c)); 8042 ldst_addrspace = V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_FIRMWARE); 8043 c.op_to_addrspace = cpu_to_be32(V_FW_CMD_OP(FW_LDST_CMD) | 8044 F_FW_CMD_REQUEST | 8045 F_FW_CMD_WRITE | 8046 ldst_addrspace); 8047 c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c)); 8048 c.u.addrval.addr = cpu_to_be32(addr); 8049 c.u.addrval.val = cpu_to_be32(val); 8050 8051 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); 8052 } 8053 8054 /** 8055 * t4_mdio_rd - read a PHY register through MDIO 8056 * @adap: the adapter 8057 * @mbox: mailbox to use for the FW command 8058 * @phy_addr: the PHY address 8059 * @mmd: the PHY MMD to access (0 for clause 22 PHYs) 8060 * @reg: the register to read 8061 * @valp: where to store the value 8062 * 8063 * Issues a FW command through the given mailbox to read a PHY register. 8064 */ 8065 int t4_mdio_rd(struct adapter *adap, unsigned int mbox, unsigned int phy_addr, 8066 unsigned int mmd, unsigned int reg, unsigned int *valp) 8067 { 8068 int ret; 8069 u32 ldst_addrspace; 8070 struct fw_ldst_cmd c; 8071 8072 memset(&c, 0, sizeof(c)); 8073 ldst_addrspace = V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_MDIO); 8074 c.op_to_addrspace = cpu_to_be32(V_FW_CMD_OP(FW_LDST_CMD) | 8075 F_FW_CMD_REQUEST | F_FW_CMD_READ | 8076 ldst_addrspace); 8077 c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c)); 8078 c.u.mdio.paddr_mmd = cpu_to_be16(V_FW_LDST_CMD_PADDR(phy_addr) | 8079 V_FW_LDST_CMD_MMD(mmd)); 8080 c.u.mdio.raddr = cpu_to_be16(reg); 8081 8082 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c); 8083 if (ret == 0) 8084 *valp = be16_to_cpu(c.u.mdio.rval); 8085 return ret; 8086 } 8087 8088 /** 8089 * t4_mdio_wr - write a PHY register through MDIO 8090 * @adap: the adapter 8091 * @mbox: mailbox to use for the FW command 8092 * @phy_addr: the PHY address 8093 * @mmd: the PHY MMD to access (0 for clause 22 PHYs) 8094 * @reg: the register to write 8095 * @valp: value to write 8096 * 8097 * Issues a FW command through the given mailbox to write a PHY register. 8098 */ 8099 int t4_mdio_wr(struct adapter *adap, unsigned int mbox, unsigned int phy_addr, 8100 unsigned int mmd, unsigned int reg, unsigned int val) 8101 { 8102 u32 ldst_addrspace; 8103 struct fw_ldst_cmd c; 8104 8105 memset(&c, 0, sizeof(c)); 8106 ldst_addrspace = V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_MDIO); 8107 c.op_to_addrspace = cpu_to_be32(V_FW_CMD_OP(FW_LDST_CMD) | 8108 F_FW_CMD_REQUEST | F_FW_CMD_WRITE | 8109 ldst_addrspace); 8110 c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c)); 8111 c.u.mdio.paddr_mmd = cpu_to_be16(V_FW_LDST_CMD_PADDR(phy_addr) | 8112 V_FW_LDST_CMD_MMD(mmd)); 8113 c.u.mdio.raddr = cpu_to_be16(reg); 8114 c.u.mdio.rval = cpu_to_be16(val); 8115 8116 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); 8117 } 8118 8119 /** 8120 * 8121 * t4_sge_decode_idma_state - decode the idma state 8122 * @adap: the adapter 8123 * @state: the state idma is stuck in 8124 */ 8125 void t4_sge_decode_idma_state(struct adapter *adapter, int state) 8126 { 8127 static const char * const t4_decode[] = { 8128 "IDMA_IDLE", 8129 "IDMA_PUSH_MORE_CPL_FIFO", 8130 "IDMA_PUSH_CPL_MSG_HEADER_TO_FIFO", 8131 "Not used", 8132 "IDMA_PHYSADDR_SEND_PCIEHDR", 8133 "IDMA_PHYSADDR_SEND_PAYLOAD_FIRST", 8134 "IDMA_PHYSADDR_SEND_PAYLOAD", 8135 "IDMA_SEND_FIFO_TO_IMSG", 8136 "IDMA_FL_REQ_DATA_FL_PREP", 8137 "IDMA_FL_REQ_DATA_FL", 8138 "IDMA_FL_DROP", 8139 "IDMA_FL_H_REQ_HEADER_FL", 8140 "IDMA_FL_H_SEND_PCIEHDR", 8141 "IDMA_FL_H_PUSH_CPL_FIFO", 8142 "IDMA_FL_H_SEND_CPL", 8143 "IDMA_FL_H_SEND_IP_HDR_FIRST", 8144 "IDMA_FL_H_SEND_IP_HDR", 8145 "IDMA_FL_H_REQ_NEXT_HEADER_FL", 8146 "IDMA_FL_H_SEND_NEXT_PCIEHDR", 8147 "IDMA_FL_H_SEND_IP_HDR_PADDING", 8148 "IDMA_FL_D_SEND_PCIEHDR", 8149 "IDMA_FL_D_SEND_CPL_AND_IP_HDR", 8150 "IDMA_FL_D_REQ_NEXT_DATA_FL", 8151 "IDMA_FL_SEND_PCIEHDR", 8152 "IDMA_FL_PUSH_CPL_FIFO", 8153 "IDMA_FL_SEND_CPL", 8154 "IDMA_FL_SEND_PAYLOAD_FIRST", 8155 "IDMA_FL_SEND_PAYLOAD", 8156 "IDMA_FL_REQ_NEXT_DATA_FL", 8157 "IDMA_FL_SEND_NEXT_PCIEHDR", 8158 "IDMA_FL_SEND_PADDING", 8159 "IDMA_FL_SEND_COMPLETION_TO_IMSG", 8160 "IDMA_FL_SEND_FIFO_TO_IMSG", 8161 "IDMA_FL_REQ_DATAFL_DONE", 8162 "IDMA_FL_REQ_HEADERFL_DONE", 8163 }; 8164 static const char * const t5_decode[] = { 8165 "IDMA_IDLE", 8166 "IDMA_ALMOST_IDLE", 8167 "IDMA_PUSH_MORE_CPL_FIFO", 8168 "IDMA_PUSH_CPL_MSG_HEADER_TO_FIFO", 8169 "IDMA_SGEFLRFLUSH_SEND_PCIEHDR", 8170 "IDMA_PHYSADDR_SEND_PCIEHDR", 8171 "IDMA_PHYSADDR_SEND_PAYLOAD_FIRST", 8172 "IDMA_PHYSADDR_SEND_PAYLOAD", 8173 "IDMA_SEND_FIFO_TO_IMSG", 8174 "IDMA_FL_REQ_DATA_FL", 8175 "IDMA_FL_DROP", 8176 "IDMA_FL_DROP_SEND_INC", 8177 "IDMA_FL_H_REQ_HEADER_FL", 8178 "IDMA_FL_H_SEND_PCIEHDR", 8179 "IDMA_FL_H_PUSH_CPL_FIFO", 8180 "IDMA_FL_H_SEND_CPL", 8181 "IDMA_FL_H_SEND_IP_HDR_FIRST", 8182 "IDMA_FL_H_SEND_IP_HDR", 8183 "IDMA_FL_H_REQ_NEXT_HEADER_FL", 8184 "IDMA_FL_H_SEND_NEXT_PCIEHDR", 8185 "IDMA_FL_H_SEND_IP_HDR_PADDING", 8186 "IDMA_FL_D_SEND_PCIEHDR", 8187 "IDMA_FL_D_SEND_CPL_AND_IP_HDR", 8188 "IDMA_FL_D_REQ_NEXT_DATA_FL", 8189 "IDMA_FL_SEND_PCIEHDR", 8190 "IDMA_FL_PUSH_CPL_FIFO", 8191 "IDMA_FL_SEND_CPL", 8192 "IDMA_FL_SEND_PAYLOAD_FIRST", 8193 "IDMA_FL_SEND_PAYLOAD", 8194 "IDMA_FL_REQ_NEXT_DATA_FL", 8195 "IDMA_FL_SEND_NEXT_PCIEHDR", 8196 "IDMA_FL_SEND_PADDING", 8197 "IDMA_FL_SEND_COMPLETION_TO_IMSG", 8198 }; 8199 static const char * const t6_decode[] = { 8200 "IDMA_IDLE", 8201 "IDMA_PUSH_MORE_CPL_FIFO", 8202 "IDMA_PUSH_CPL_MSG_HEADER_TO_FIFO", 8203 "IDMA_SGEFLRFLUSH_SEND_PCIEHDR", 8204 "IDMA_PHYSADDR_SEND_PCIEHDR", 8205 "IDMA_PHYSADDR_SEND_PAYLOAD_FIRST", 8206 "IDMA_PHYSADDR_SEND_PAYLOAD", 8207 "IDMA_FL_REQ_DATA_FL", 8208 "IDMA_FL_DROP", 8209 "IDMA_FL_DROP_SEND_INC", 8210 "IDMA_FL_H_REQ_HEADER_FL", 8211 "IDMA_FL_H_SEND_PCIEHDR", 8212 "IDMA_FL_H_PUSH_CPL_FIFO", 8213 "IDMA_FL_H_SEND_CPL", 8214 "IDMA_FL_H_SEND_IP_HDR_FIRST", 8215 "IDMA_FL_H_SEND_IP_HDR", 8216 "IDMA_FL_H_REQ_NEXT_HEADER_FL", 8217 "IDMA_FL_H_SEND_NEXT_PCIEHDR", 8218 "IDMA_FL_H_SEND_IP_HDR_PADDING", 8219 "IDMA_FL_D_SEND_PCIEHDR", 8220 "IDMA_FL_D_SEND_CPL_AND_IP_HDR", 8221 "IDMA_FL_D_REQ_NEXT_DATA_FL", 8222 "IDMA_FL_SEND_PCIEHDR", 8223 "IDMA_FL_PUSH_CPL_FIFO", 8224 "IDMA_FL_SEND_CPL", 8225 "IDMA_FL_SEND_PAYLOAD_FIRST", 8226 "IDMA_FL_SEND_PAYLOAD", 8227 "IDMA_FL_REQ_NEXT_DATA_FL", 8228 "IDMA_FL_SEND_NEXT_PCIEHDR", 8229 "IDMA_FL_SEND_PADDING", 8230 "IDMA_FL_SEND_COMPLETION_TO_IMSG", 8231 }; 8232 static const u32 sge_regs[] = { 8233 A_SGE_DEBUG_DATA_LOW_INDEX_2, 8234 A_SGE_DEBUG_DATA_LOW_INDEX_3, 8235 A_SGE_DEBUG_DATA_HIGH_INDEX_10, 8236 }; 8237 const char * const *sge_idma_decode; 8238 int sge_idma_decode_nstates; 8239 int i; 8240 unsigned int chip_version = chip_id(adapter); 8241 8242 /* Select the right set of decode strings to dump depending on the 8243 * adapter chip type. 8244 */ 8245 switch (chip_version) { 8246 case CHELSIO_T4: 8247 sge_idma_decode = (const char * const *)t4_decode; 8248 sge_idma_decode_nstates = ARRAY_SIZE(t4_decode); 8249 break; 8250 8251 case CHELSIO_T5: 8252 sge_idma_decode = (const char * const *)t5_decode; 8253 sge_idma_decode_nstates = ARRAY_SIZE(t5_decode); 8254 break; 8255 8256 case CHELSIO_T6: 8257 case CHELSIO_T7: 8258 sge_idma_decode = (const char * const *)t6_decode; 8259 sge_idma_decode_nstates = ARRAY_SIZE(t6_decode); 8260 break; 8261 8262 default: 8263 CH_ERR(adapter, "Unsupported chip version %d\n", chip_version); 8264 return; 8265 } 8266 8267 if (state < sge_idma_decode_nstates) 8268 CH_WARN(adapter, "idma state %s\n", sge_idma_decode[state]); 8269 else 8270 CH_WARN(adapter, "idma state %d unknown\n", state); 8271 8272 for (i = 0; i < ARRAY_SIZE(sge_regs); i++) 8273 CH_WARN(adapter, "SGE register %#x value %#x\n", 8274 sge_regs[i], t4_read_reg(adapter, sge_regs[i])); 8275 } 8276 8277 /** 8278 * t4_sge_ctxt_flush - flush the SGE context cache 8279 * @adap: the adapter 8280 * @mbox: mailbox to use for the FW command 8281 * 8282 * Issues a FW command through the given mailbox to flush the 8283 * SGE context cache. 8284 */ 8285 int t4_sge_ctxt_flush(struct adapter *adap, unsigned int mbox, int ctxt_type) 8286 { 8287 int ret; 8288 u32 ldst_addrspace; 8289 struct fw_ldst_cmd c; 8290 8291 memset(&c, 0, sizeof(c)); 8292 ldst_addrspace = V_FW_LDST_CMD_ADDRSPACE(ctxt_type == CTXT_EGRESS ? 8293 FW_LDST_ADDRSPC_SGE_EGRC : 8294 FW_LDST_ADDRSPC_SGE_INGC); 8295 c.op_to_addrspace = cpu_to_be32(V_FW_CMD_OP(FW_LDST_CMD) | 8296 F_FW_CMD_REQUEST | F_FW_CMD_READ | 8297 ldst_addrspace); 8298 c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c)); 8299 c.u.idctxt.msg_ctxtflush = cpu_to_be32(F_FW_LDST_CMD_CTXTFLUSH); 8300 8301 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c); 8302 return ret; 8303 } 8304 8305 /** 8306 * t4_fw_hello - establish communication with FW 8307 * @adap: the adapter 8308 * @mbox: mailbox to use for the FW command 8309 * @evt_mbox: mailbox to receive async FW events 8310 * @master: specifies the caller's willingness to be the device master 8311 * @state: returns the current device state (if non-NULL) 8312 * 8313 * Issues a command to establish communication with FW. Returns either 8314 * an error (negative integer) or the mailbox of the Master PF. 8315 */ 8316 int t4_fw_hello(struct adapter *adap, unsigned int mbox, unsigned int evt_mbox, 8317 enum dev_master master, enum dev_state *state) 8318 { 8319 int ret; 8320 struct fw_hello_cmd c; 8321 u32 v; 8322 unsigned int master_mbox; 8323 int retries = FW_CMD_HELLO_RETRIES; 8324 8325 retry: 8326 memset(&c, 0, sizeof(c)); 8327 INIT_CMD(c, HELLO, WRITE); 8328 c.err_to_clearinit = cpu_to_be32( 8329 V_FW_HELLO_CMD_MASTERDIS(master == MASTER_CANT) | 8330 V_FW_HELLO_CMD_MASTERFORCE(master == MASTER_MUST) | 8331 V_FW_HELLO_CMD_MBMASTER(master == MASTER_MUST ? 8332 mbox : M_FW_HELLO_CMD_MBMASTER) | 8333 V_FW_HELLO_CMD_MBASYNCNOT(evt_mbox) | 8334 V_FW_HELLO_CMD_STAGE(FW_HELLO_CMD_STAGE_OS) | 8335 F_FW_HELLO_CMD_CLEARINIT); 8336 8337 /* 8338 * Issue the HELLO command to the firmware. If it's not successful 8339 * but indicates that we got a "busy" or "timeout" condition, retry 8340 * the HELLO until we exhaust our retry limit. If we do exceed our 8341 * retry limit, check to see if the firmware left us any error 8342 * information and report that if so ... 8343 */ 8344 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c); 8345 if (ret != FW_SUCCESS) { 8346 if ((ret == -EBUSY || ret == -ETIMEDOUT) && retries-- > 0) 8347 goto retry; 8348 return ret; 8349 } 8350 8351 v = be32_to_cpu(c.err_to_clearinit); 8352 master_mbox = G_FW_HELLO_CMD_MBMASTER(v); 8353 if (state) { 8354 if (v & F_FW_HELLO_CMD_ERR) 8355 *state = DEV_STATE_ERR; 8356 else if (v & F_FW_HELLO_CMD_INIT) 8357 *state = DEV_STATE_INIT; 8358 else 8359 *state = DEV_STATE_UNINIT; 8360 } 8361 8362 /* 8363 * If we're not the Master PF then we need to wait around for the 8364 * Master PF Driver to finish setting up the adapter. 8365 * 8366 * Note that we also do this wait if we're a non-Master-capable PF and 8367 * there is no current Master PF; a Master PF may show up momentarily 8368 * and we wouldn't want to fail pointlessly. (This can happen when an 8369 * OS loads lots of different drivers rapidly at the same time). In 8370 * this case, the Master PF returned by the firmware will be 8371 * M_PCIE_FW_MASTER so the test below will work ... 8372 */ 8373 if ((v & (F_FW_HELLO_CMD_ERR|F_FW_HELLO_CMD_INIT)) == 0 && 8374 master_mbox != mbox) { 8375 int waiting = FW_CMD_HELLO_TIMEOUT; 8376 8377 /* 8378 * Wait for the firmware to either indicate an error or 8379 * initialized state. If we see either of these we bail out 8380 * and report the issue to the caller. If we exhaust the 8381 * "hello timeout" and we haven't exhausted our retries, try 8382 * again. Otherwise bail with a timeout error. 8383 */ 8384 for (;;) { 8385 u32 pcie_fw; 8386 8387 msleep(50); 8388 waiting -= 50; 8389 8390 /* 8391 * If neither Error nor Initialialized are indicated 8392 * by the firmware keep waiting till we exhaust our 8393 * timeout ... and then retry if we haven't exhausted 8394 * our retries ... 8395 */ 8396 pcie_fw = t4_read_reg(adap, A_PCIE_FW); 8397 if (!(pcie_fw & (F_PCIE_FW_ERR|F_PCIE_FW_INIT))) { 8398 if (waiting <= 0) { 8399 if (retries-- > 0) 8400 goto retry; 8401 8402 return -ETIMEDOUT; 8403 } 8404 continue; 8405 } 8406 8407 /* 8408 * We either have an Error or Initialized condition 8409 * report errors preferentially. 8410 */ 8411 if (state) { 8412 if (pcie_fw & F_PCIE_FW_ERR) 8413 *state = DEV_STATE_ERR; 8414 else if (pcie_fw & F_PCIE_FW_INIT) 8415 *state = DEV_STATE_INIT; 8416 } 8417 8418 /* 8419 * If we arrived before a Master PF was selected and 8420 * there's not a valid Master PF, grab its identity 8421 * for our caller. 8422 */ 8423 if (master_mbox == M_PCIE_FW_MASTER && 8424 (pcie_fw & F_PCIE_FW_MASTER_VLD)) 8425 master_mbox = G_PCIE_FW_MASTER(pcie_fw); 8426 break; 8427 } 8428 } 8429 8430 return master_mbox; 8431 } 8432 8433 /** 8434 * t4_fw_bye - end communication with FW 8435 * @adap: the adapter 8436 * @mbox: mailbox to use for the FW command 8437 * 8438 * Issues a command to terminate communication with FW. 8439 */ 8440 int t4_fw_bye(struct adapter *adap, unsigned int mbox) 8441 { 8442 struct fw_bye_cmd c; 8443 8444 memset(&c, 0, sizeof(c)); 8445 INIT_CMD(c, BYE, WRITE); 8446 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); 8447 } 8448 8449 /** 8450 * t4_fw_reset - issue a reset to FW 8451 * @adap: the adapter 8452 * @mbox: mailbox to use for the FW command 8453 * @reset: specifies the type of reset to perform 8454 * 8455 * Issues a reset command of the specified type to FW. 8456 */ 8457 int t4_fw_reset(struct adapter *adap, unsigned int mbox, int reset) 8458 { 8459 struct fw_reset_cmd c; 8460 8461 memset(&c, 0, sizeof(c)); 8462 INIT_CMD(c, RESET, WRITE); 8463 c.val = cpu_to_be32(reset); 8464 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); 8465 } 8466 8467 /** 8468 * t4_fw_halt - issue a reset/halt to FW and put uP into RESET 8469 * @adap: the adapter 8470 * @mbox: mailbox to use for the FW RESET command (if desired) 8471 * @force: force uP into RESET even if FW RESET command fails 8472 * 8473 * Issues a RESET command to firmware (if desired) with a HALT indication 8474 * and then puts the microprocessor into RESET state. The RESET command 8475 * will only be issued if a legitimate mailbox is provided (mbox <= 8476 * M_PCIE_FW_MASTER). 8477 * 8478 * This is generally used in order for the host to safely manipulate the 8479 * adapter without fear of conflicting with whatever the firmware might 8480 * be doing. The only way out of this state is to RESTART the firmware 8481 * ... 8482 */ 8483 int t4_fw_halt(struct adapter *adap, unsigned int mbox, int force) 8484 { 8485 int ret = 0; 8486 8487 /* 8488 * If a legitimate mailbox is provided, issue a RESET command 8489 * with a HALT indication. 8490 */ 8491 if (adap->flags & FW_OK && mbox <= M_PCIE_FW_MASTER) { 8492 struct fw_reset_cmd c; 8493 8494 memset(&c, 0, sizeof(c)); 8495 INIT_CMD(c, RESET, WRITE); 8496 c.val = cpu_to_be32(F_PIORST | F_PIORSTMODE); 8497 c.halt_pkd = cpu_to_be32(F_FW_RESET_CMD_HALT); 8498 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); 8499 } 8500 8501 /* 8502 * Normally we won't complete the operation if the firmware RESET 8503 * command fails but if our caller insists we'll go ahead and put the 8504 * uP into RESET. This can be useful if the firmware is hung or even 8505 * missing ... We'll have to take the risk of putting the uP into 8506 * RESET without the cooperation of firmware in that case. 8507 * 8508 * We also force the firmware's HALT flag to be on in case we bypassed 8509 * the firmware RESET command above or we're dealing with old firmware 8510 * which doesn't have the HALT capability. This will serve as a flag 8511 * for the incoming firmware to know that it's coming out of a HALT 8512 * rather than a RESET ... if it's new enough to understand that ... 8513 */ 8514 if (ret == 0 || force) { 8515 t4_set_reg_field(adap, A_CIM_BOOT_CFG, F_UPCRST, F_UPCRST); 8516 t4_set_reg_field(adap, A_PCIE_FW, F_PCIE_FW_HALT, 8517 F_PCIE_FW_HALT); 8518 } 8519 8520 /* 8521 * And we always return the result of the firmware RESET command 8522 * even when we force the uP into RESET ... 8523 */ 8524 return ret; 8525 } 8526 8527 /** 8528 * t4_fw_restart - restart the firmware by taking the uP out of RESET 8529 * @adap: the adapter 8530 * 8531 * Restart firmware previously halted by t4_fw_halt(). On successful 8532 * return the previous PF Master remains as the new PF Master and there 8533 * is no need to issue a new HELLO command, etc. 8534 */ 8535 int t4_fw_restart(struct adapter *adap, unsigned int mbox) 8536 { 8537 int ms; 8538 8539 t4_set_reg_field(adap, A_CIM_BOOT_CFG, F_UPCRST, 0); 8540 for (ms = 0; ms < FW_CMD_MAX_TIMEOUT; ) { 8541 if (!(t4_read_reg(adap, A_PCIE_FW) & F_PCIE_FW_HALT)) 8542 return FW_SUCCESS; 8543 msleep(100); 8544 ms += 100; 8545 } 8546 8547 return -ETIMEDOUT; 8548 } 8549 8550 /** 8551 * t4_fw_upgrade - perform all of the steps necessary to upgrade FW 8552 * @adap: the adapter 8553 * @mbox: mailbox to use for the FW RESET command (if desired) 8554 * @fw_data: the firmware image to write 8555 * @size: image size 8556 * @force: force upgrade even if firmware doesn't cooperate 8557 * 8558 * Perform all of the steps necessary for upgrading an adapter's 8559 * firmware image. Normally this requires the cooperation of the 8560 * existing firmware in order to halt all existing activities 8561 * but if an invalid mailbox token is passed in we skip that step 8562 * (though we'll still put the adapter microprocessor into RESET in 8563 * that case). 8564 * 8565 * On successful return the new firmware will have been loaded and 8566 * the adapter will have been fully RESET losing all previous setup 8567 * state. On unsuccessful return the adapter may be completely hosed ... 8568 * positive errno indicates that the adapter is ~probably~ intact, a 8569 * negative errno indicates that things are looking bad ... 8570 */ 8571 int t4_fw_upgrade(struct adapter *adap, unsigned int mbox, 8572 const u8 *fw_data, unsigned int size, int force) 8573 { 8574 const struct fw_hdr *fw_hdr = (const struct fw_hdr *)fw_data; 8575 unsigned int bootstrap = 8576 be32_to_cpu(fw_hdr->magic) == FW_HDR_MAGIC_BOOTSTRAP; 8577 int ret; 8578 8579 if (!t4_fw_matches_chip(adap, fw_hdr)) 8580 return -EINVAL; 8581 8582 if (!bootstrap) { 8583 ret = t4_fw_halt(adap, mbox, force); 8584 if (ret < 0 && !force) 8585 return ret; 8586 } 8587 8588 ret = t4_load_fw(adap, fw_data, size); 8589 if (ret < 0 || bootstrap) 8590 return ret; 8591 8592 return t4_fw_restart(adap, mbox); 8593 } 8594 8595 /** 8596 * t4_fw_initialize - ask FW to initialize the device 8597 * @adap: the adapter 8598 * @mbox: mailbox to use for the FW command 8599 * 8600 * Issues a command to FW to partially initialize the device. This 8601 * performs initialization that generally doesn't depend on user input. 8602 */ 8603 int t4_fw_initialize(struct adapter *adap, unsigned int mbox) 8604 { 8605 struct fw_initialize_cmd c; 8606 8607 memset(&c, 0, sizeof(c)); 8608 INIT_CMD(c, INITIALIZE, WRITE); 8609 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); 8610 } 8611 8612 /** 8613 * t4_query_params_rw - query FW or device parameters 8614 * @adap: the adapter 8615 * @mbox: mailbox to use for the FW command 8616 * @pf: the PF 8617 * @vf: the VF 8618 * @nparams: the number of parameters 8619 * @params: the parameter names 8620 * @val: the parameter values 8621 * @rw: Write and read flag 8622 * 8623 * Reads the value of FW or device parameters. Up to 7 parameters can be 8624 * queried at once. 8625 */ 8626 int t4_query_params_rw(struct adapter *adap, unsigned int mbox, unsigned int pf, 8627 unsigned int vf, unsigned int nparams, const u32 *params, 8628 u32 *val, int rw) 8629 { 8630 int i, ret; 8631 struct fw_params_cmd c; 8632 __be32 *p = &c.param[0].mnem; 8633 8634 if (nparams > 7) 8635 return -EINVAL; 8636 8637 memset(&c, 0, sizeof(c)); 8638 c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_PARAMS_CMD) | 8639 F_FW_CMD_REQUEST | F_FW_CMD_READ | 8640 V_FW_PARAMS_CMD_PFN(pf) | 8641 V_FW_PARAMS_CMD_VFN(vf)); 8642 c.retval_len16 = cpu_to_be32(FW_LEN16(c)); 8643 8644 for (i = 0; i < nparams; i++) { 8645 *p++ = cpu_to_be32(*params++); 8646 if (rw) 8647 *p = cpu_to_be32(*(val + i)); 8648 p++; 8649 } 8650 8651 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c); 8652 8653 /* 8654 * We always copy back the results, even if there's an error. We'll 8655 * get an error if any of the parameters was unknown to the Firmware, 8656 * but there will be results for the others ... (Older Firmware 8657 * stopped at the first unknown parameter; newer Firmware processes 8658 * them all and flags the unknown parameters with a return value of 8659 * ~0UL.) 8660 */ 8661 for (i = 0, p = &c.param[0].val; i < nparams; i++, p += 2) 8662 *val++ = be32_to_cpu(*p); 8663 8664 return ret; 8665 } 8666 8667 int t4_query_params(struct adapter *adap, unsigned int mbox, unsigned int pf, 8668 unsigned int vf, unsigned int nparams, const u32 *params, 8669 u32 *val) 8670 { 8671 return t4_query_params_rw(adap, mbox, pf, vf, nparams, params, val, 0); 8672 } 8673 8674 /** 8675 * t4_set_params_timeout - sets FW or device parameters 8676 * @adap: the adapter 8677 * @mbox: mailbox to use for the FW command 8678 * @pf: the PF 8679 * @vf: the VF 8680 * @nparams: the number of parameters 8681 * @params: the parameter names 8682 * @val: the parameter values 8683 * @timeout: the timeout time 8684 * 8685 * Sets the value of FW or device parameters. Up to 7 parameters can be 8686 * specified at once. 8687 */ 8688 int t4_set_params_timeout(struct adapter *adap, unsigned int mbox, 8689 unsigned int pf, unsigned int vf, 8690 unsigned int nparams, const u32 *params, 8691 const u32 *val, int timeout) 8692 { 8693 struct fw_params_cmd c; 8694 __be32 *p = &c.param[0].mnem; 8695 8696 if (nparams > 7) 8697 return -EINVAL; 8698 8699 memset(&c, 0, sizeof(c)); 8700 c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_PARAMS_CMD) | 8701 F_FW_CMD_REQUEST | F_FW_CMD_WRITE | 8702 V_FW_PARAMS_CMD_PFN(pf) | 8703 V_FW_PARAMS_CMD_VFN(vf)); 8704 c.retval_len16 = cpu_to_be32(FW_LEN16(c)); 8705 8706 while (nparams--) { 8707 *p++ = cpu_to_be32(*params++); 8708 *p++ = cpu_to_be32(*val++); 8709 } 8710 8711 return t4_wr_mbox_timeout(adap, mbox, &c, sizeof(c), NULL, timeout); 8712 } 8713 8714 /** 8715 * t4_set_params - sets FW or device parameters 8716 * @adap: the adapter 8717 * @mbox: mailbox to use for the FW command 8718 * @pf: the PF 8719 * @vf: the VF 8720 * @nparams: the number of parameters 8721 * @params: the parameter names 8722 * @val: the parameter values 8723 * 8724 * Sets the value of FW or device parameters. Up to 7 parameters can be 8725 * specified at once. 8726 */ 8727 int t4_set_params(struct adapter *adap, unsigned int mbox, unsigned int pf, 8728 unsigned int vf, unsigned int nparams, const u32 *params, 8729 const u32 *val) 8730 { 8731 return t4_set_params_timeout(adap, mbox, pf, vf, nparams, params, val, 8732 FW_CMD_MAX_TIMEOUT); 8733 } 8734 8735 /** 8736 * t4_cfg_pfvf - configure PF/VF resource limits 8737 * @adap: the adapter 8738 * @mbox: mailbox to use for the FW command 8739 * @pf: the PF being configured 8740 * @vf: the VF being configured 8741 * @txq: the max number of egress queues 8742 * @txq_eth_ctrl: the max number of egress Ethernet or control queues 8743 * @rxqi: the max number of interrupt-capable ingress queues 8744 * @rxq: the max number of interruptless ingress queues 8745 * @tc: the PCI traffic class 8746 * @vi: the max number of virtual interfaces 8747 * @cmask: the channel access rights mask for the PF/VF 8748 * @pmask: the port access rights mask for the PF/VF 8749 * @nexact: the maximum number of exact MPS filters 8750 * @rcaps: read capabilities 8751 * @wxcaps: write/execute capabilities 8752 * 8753 * Configures resource limits and capabilities for a physical or virtual 8754 * function. 8755 */ 8756 int t4_cfg_pfvf(struct adapter *adap, unsigned int mbox, unsigned int pf, 8757 unsigned int vf, unsigned int txq, unsigned int txq_eth_ctrl, 8758 unsigned int rxqi, unsigned int rxq, unsigned int tc, 8759 unsigned int vi, unsigned int cmask, unsigned int pmask, 8760 unsigned int nexact, unsigned int rcaps, unsigned int wxcaps) 8761 { 8762 struct fw_pfvf_cmd c; 8763 8764 memset(&c, 0, sizeof(c)); 8765 c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_PFVF_CMD) | F_FW_CMD_REQUEST | 8766 F_FW_CMD_WRITE | V_FW_PFVF_CMD_PFN(pf) | 8767 V_FW_PFVF_CMD_VFN(vf)); 8768 c.retval_len16 = cpu_to_be32(FW_LEN16(c)); 8769 c.niqflint_niq = cpu_to_be32(V_FW_PFVF_CMD_NIQFLINT(rxqi) | 8770 V_FW_PFVF_CMD_NIQ(rxq)); 8771 c.type_to_neq = cpu_to_be32(V_FW_PFVF_CMD_CMASK(cmask) | 8772 V_FW_PFVF_CMD_PMASK(pmask) | 8773 V_FW_PFVF_CMD_NEQ(txq)); 8774 c.tc_to_nexactf = cpu_to_be32(V_FW_PFVF_CMD_TC(tc) | 8775 V_FW_PFVF_CMD_NVI(vi) | 8776 V_FW_PFVF_CMD_NEXACTF(nexact)); 8777 c.r_caps_to_nethctrl = cpu_to_be32(V_FW_PFVF_CMD_R_CAPS(rcaps) | 8778 V_FW_PFVF_CMD_WX_CAPS(wxcaps) | 8779 V_FW_PFVF_CMD_NETHCTRL(txq_eth_ctrl)); 8780 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); 8781 } 8782 8783 /** 8784 * t4_alloc_vi_func - allocate a virtual interface 8785 * @adap: the adapter 8786 * @mbox: mailbox to use for the FW command 8787 * @port: physical port associated with the VI 8788 * @pf: the PF owning the VI 8789 * @vf: the VF owning the VI 8790 * @nmac: number of MAC addresses needed (1 to 5) 8791 * @mac: the MAC addresses of the VI 8792 * @rss_size: size of RSS table slice associated with this VI 8793 * @portfunc: which Port Application Function MAC Address is desired 8794 * @idstype: Intrusion Detection Type 8795 * 8796 * Allocates a virtual interface for the given physical port. If @mac is 8797 * not %NULL it contains the MAC addresses of the VI as assigned by FW. 8798 * If @rss_size is %NULL the VI is not assigned any RSS slice by FW. 8799 * @mac should be large enough to hold @nmac Ethernet addresses, they are 8800 * stored consecutively so the space needed is @nmac * 6 bytes. 8801 * Returns a negative error number or the non-negative VI id. 8802 */ 8803 int t4_alloc_vi_func(struct adapter *adap, unsigned int mbox, 8804 unsigned int port, unsigned int pf, unsigned int vf, 8805 unsigned int nmac, u8 *mac, u16 *rss_size, 8806 uint8_t *vfvld, uint16_t *vin, 8807 unsigned int portfunc, unsigned int idstype) 8808 { 8809 int ret; 8810 struct fw_vi_cmd c; 8811 8812 memset(&c, 0, sizeof(c)); 8813 c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_VI_CMD) | F_FW_CMD_REQUEST | 8814 F_FW_CMD_WRITE | F_FW_CMD_EXEC | 8815 V_FW_VI_CMD_PFN(pf) | V_FW_VI_CMD_VFN(vf)); 8816 c.alloc_to_len16 = cpu_to_be32(F_FW_VI_CMD_ALLOC | FW_LEN16(c)); 8817 c.type_to_viid = cpu_to_be16(V_FW_VI_CMD_TYPE(idstype) | 8818 V_FW_VI_CMD_FUNC(portfunc)); 8819 c.portid_pkd = V_FW_VI_CMD_PORTID(port); 8820 c.nmac = nmac - 1; 8821 if(!rss_size) 8822 c.norss_rsssize = F_FW_VI_CMD_NORSS; 8823 8824 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c); 8825 if (ret) 8826 return ret; 8827 ret = G_FW_VI_CMD_VIID(be16_to_cpu(c.type_to_viid)); 8828 8829 if (mac) { 8830 memcpy(mac, c.mac, sizeof(c.mac)); 8831 switch (nmac) { 8832 case 5: 8833 memcpy(mac + 24, c.nmac3, sizeof(c.nmac3)); 8834 case 4: 8835 memcpy(mac + 18, c.nmac2, sizeof(c.nmac2)); 8836 case 3: 8837 memcpy(mac + 12, c.nmac1, sizeof(c.nmac1)); 8838 case 2: 8839 memcpy(mac + 6, c.nmac0, sizeof(c.nmac0)); 8840 } 8841 } 8842 if (rss_size) 8843 *rss_size = G_FW_VI_CMD_RSSSIZE(be16_to_cpu(c.norss_rsssize)); 8844 if (vfvld) { 8845 *vfvld = adap->params.viid_smt_extn_support ? 8846 G_FW_VI_CMD_VFVLD(be32_to_cpu(c.alloc_to_len16)) : 8847 G_FW_VIID_VIVLD(ret); 8848 } 8849 if (vin) { 8850 *vin = adap->params.viid_smt_extn_support ? 8851 G_FW_VI_CMD_VIN(be32_to_cpu(c.alloc_to_len16)) : 8852 G_FW_VIID_VIN(ret); 8853 } 8854 8855 return ret; 8856 } 8857 8858 /** 8859 * t4_alloc_vi - allocate an [Ethernet Function] virtual interface 8860 * @adap: the adapter 8861 * @mbox: mailbox to use for the FW command 8862 * @port: physical port associated with the VI 8863 * @pf: the PF owning the VI 8864 * @vf: the VF owning the VI 8865 * @nmac: number of MAC addresses needed (1 to 5) 8866 * @mac: the MAC addresses of the VI 8867 * @rss_size: size of RSS table slice associated with this VI 8868 * 8869 * backwards compatible and convieniance routine to allocate a Virtual 8870 * Interface with a Ethernet Port Application Function and Intrustion 8871 * Detection System disabled. 8872 */ 8873 int t4_alloc_vi(struct adapter *adap, unsigned int mbox, unsigned int port, 8874 unsigned int pf, unsigned int vf, unsigned int nmac, u8 *mac, 8875 u16 *rss_size, uint8_t *vfvld, uint16_t *vin) 8876 { 8877 return t4_alloc_vi_func(adap, mbox, port, pf, vf, nmac, mac, rss_size, 8878 vfvld, vin, FW_VI_FUNC_ETH, 0); 8879 } 8880 8881 /** 8882 * t4_free_vi - free a virtual interface 8883 * @adap: the adapter 8884 * @mbox: mailbox to use for the FW command 8885 * @pf: the PF owning the VI 8886 * @vf: the VF owning the VI 8887 * @viid: virtual interface identifiler 8888 * 8889 * Free a previously allocated virtual interface. 8890 */ 8891 int t4_free_vi(struct adapter *adap, unsigned int mbox, unsigned int pf, 8892 unsigned int vf, unsigned int viid) 8893 { 8894 struct fw_vi_cmd c; 8895 8896 memset(&c, 0, sizeof(c)); 8897 c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_VI_CMD) | 8898 F_FW_CMD_REQUEST | 8899 F_FW_CMD_EXEC | 8900 V_FW_VI_CMD_PFN(pf) | 8901 V_FW_VI_CMD_VFN(vf)); 8902 c.alloc_to_len16 = cpu_to_be32(F_FW_VI_CMD_FREE | FW_LEN16(c)); 8903 c.type_to_viid = cpu_to_be16(V_FW_VI_CMD_VIID(viid)); 8904 8905 return t4_wr_mbox(adap, mbox, &c, sizeof(c), &c); 8906 } 8907 8908 /** 8909 * t4_set_rxmode - set Rx properties of a virtual interface 8910 * @adap: the adapter 8911 * @mbox: mailbox to use for the FW command 8912 * @viid: the VI id 8913 * @mtu: the new MTU or -1 8914 * @promisc: 1 to enable promiscuous mode, 0 to disable it, -1 no change 8915 * @all_multi: 1 to enable all-multi mode, 0 to disable it, -1 no change 8916 * @bcast: 1 to enable broadcast Rx, 0 to disable it, -1 no change 8917 * @vlanex: 1 to enable HW VLAN extraction, 0 to disable it, -1 no change 8918 * @sleep_ok: if true we may sleep while awaiting command completion 8919 * 8920 * Sets Rx properties of a virtual interface. 8921 */ 8922 int t4_set_rxmode(struct adapter *adap, unsigned int mbox, unsigned int viid, 8923 int mtu, int promisc, int all_multi, int bcast, int vlanex, 8924 bool sleep_ok) 8925 { 8926 struct fw_vi_rxmode_cmd c; 8927 8928 /* convert to FW values */ 8929 if (mtu < 0) 8930 mtu = M_FW_VI_RXMODE_CMD_MTU; 8931 if (promisc < 0) 8932 promisc = M_FW_VI_RXMODE_CMD_PROMISCEN; 8933 if (all_multi < 0) 8934 all_multi = M_FW_VI_RXMODE_CMD_ALLMULTIEN; 8935 if (bcast < 0) 8936 bcast = M_FW_VI_RXMODE_CMD_BROADCASTEN; 8937 if (vlanex < 0) 8938 vlanex = M_FW_VI_RXMODE_CMD_VLANEXEN; 8939 8940 memset(&c, 0, sizeof(c)); 8941 c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_RXMODE_CMD) | 8942 F_FW_CMD_REQUEST | F_FW_CMD_WRITE | 8943 V_FW_VI_RXMODE_CMD_VIID(viid)); 8944 c.retval_len16 = cpu_to_be32(FW_LEN16(c)); 8945 c.mtu_to_vlanexen = 8946 cpu_to_be32(V_FW_VI_RXMODE_CMD_MTU(mtu) | 8947 V_FW_VI_RXMODE_CMD_PROMISCEN(promisc) | 8948 V_FW_VI_RXMODE_CMD_ALLMULTIEN(all_multi) | 8949 V_FW_VI_RXMODE_CMD_BROADCASTEN(bcast) | 8950 V_FW_VI_RXMODE_CMD_VLANEXEN(vlanex)); 8951 return t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), NULL, sleep_ok); 8952 } 8953 8954 /** 8955 * t4_alloc_encap_mac_filt - Adds a mac entry in mps tcam with VNI support 8956 * @adap: the adapter 8957 * @viid: the VI id 8958 * @mac: the MAC address 8959 * @mask: the mask 8960 * @vni: the VNI id for the tunnel protocol 8961 * @vni_mask: mask for the VNI id 8962 * @dip_hit: to enable DIP match for the MPS entry 8963 * @lookup_type: MAC address for inner (1) or outer (0) header 8964 * @sleep_ok: call is allowed to sleep 8965 * 8966 * Allocates an MPS entry with specified MAC address and VNI value. 8967 * 8968 * Returns a negative error number or the allocated index for this mac. 8969 */ 8970 int t4_alloc_encap_mac_filt(struct adapter *adap, unsigned int viid, 8971 const u8 *addr, const u8 *mask, unsigned int vni, 8972 unsigned int vni_mask, u8 dip_hit, u8 lookup_type, 8973 bool sleep_ok) 8974 { 8975 struct fw_vi_mac_cmd c; 8976 struct fw_vi_mac_vni *p = c.u.exact_vni; 8977 int ret = 0; 8978 u32 val; 8979 8980 memset(&c, 0, sizeof(c)); 8981 c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_MAC_CMD) | 8982 F_FW_CMD_REQUEST | F_FW_CMD_WRITE | 8983 V_FW_VI_MAC_CMD_VIID(viid)); 8984 val = V_FW_CMD_LEN16(1) | 8985 V_FW_VI_MAC_CMD_ENTRY_TYPE(FW_VI_MAC_TYPE_EXACTMAC_VNI); 8986 c.freemacs_to_len16 = cpu_to_be32(val); 8987 p->valid_to_idx = cpu_to_be16(F_FW_VI_MAC_CMD_VALID | 8988 V_FW_VI_MAC_CMD_IDX(FW_VI_MAC_ADD_MAC)); 8989 memcpy(p->macaddr, addr, sizeof(p->macaddr)); 8990 memcpy(p->macaddr_mask, mask, sizeof(p->macaddr_mask)); 8991 8992 p->lookup_type_to_vni = cpu_to_be32(V_FW_VI_MAC_CMD_VNI(vni) | 8993 V_FW_VI_MAC_CMD_DIP_HIT(dip_hit) | 8994 V_FW_VI_MAC_CMD_LOOKUP_TYPE(lookup_type)); 8995 p->vni_mask_pkd = cpu_to_be32(V_FW_VI_MAC_CMD_VNI_MASK(vni_mask)); 8996 8997 ret = t4_wr_mbox_meat(adap, adap->mbox, &c, sizeof(c), &c, sleep_ok); 8998 if (ret == 0) 8999 ret = G_FW_VI_MAC_CMD_IDX(be16_to_cpu(p->valid_to_idx)); 9000 return ret; 9001 } 9002 9003 /** 9004 * t4_alloc_raw_mac_filt - Adds a mac entry in mps tcam 9005 * @adap: the adapter 9006 * @viid: the VI id 9007 * @mac: the MAC address 9008 * @mask: the mask 9009 * @idx: index at which to add this entry 9010 * @port_id: the port index 9011 * @lookup_type: MAC address for inner (1) or outer (0) header 9012 * @sleep_ok: call is allowed to sleep 9013 * 9014 * Adds the mac entry at the specified index using raw mac interface. 9015 * 9016 * Returns a negative error number or the allocated index for this mac. 9017 */ 9018 int t4_alloc_raw_mac_filt(struct adapter *adap, unsigned int viid, 9019 const u8 *addr, const u8 *mask, unsigned int idx, 9020 u8 lookup_type, u8 port_id, bool sleep_ok) 9021 { 9022 int ret = 0; 9023 struct fw_vi_mac_cmd c; 9024 struct fw_vi_mac_raw *p = &c.u.raw; 9025 u32 val; 9026 9027 memset(&c, 0, sizeof(c)); 9028 c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_MAC_CMD) | 9029 F_FW_CMD_REQUEST | F_FW_CMD_WRITE | 9030 V_FW_VI_MAC_CMD_VIID(viid)); 9031 val = V_FW_CMD_LEN16(1) | 9032 V_FW_VI_MAC_CMD_ENTRY_TYPE(FW_VI_MAC_TYPE_RAW); 9033 c.freemacs_to_len16 = cpu_to_be32(val); 9034 9035 /* Specify that this is an inner mac address */ 9036 p->raw_idx_pkd = cpu_to_be32(V_FW_VI_MAC_CMD_RAW_IDX(idx)); 9037 9038 /* Lookup Type. Outer header: 0, Inner header: 1 */ 9039 p->data0_pkd = cpu_to_be32(V_DATALKPTYPE(lookup_type) | 9040 V_DATAPORTNUM(port_id)); 9041 /* Lookup mask and port mask */ 9042 p->data0m_pkd = cpu_to_be64(V_DATALKPTYPE(M_DATALKPTYPE) | 9043 V_DATAPORTNUM(M_DATAPORTNUM)); 9044 9045 /* Copy the address and the mask */ 9046 memcpy((u8 *)&p->data1[0] + 2, addr, ETHER_ADDR_LEN); 9047 memcpy((u8 *)&p->data1m[0] + 2, mask, ETHER_ADDR_LEN); 9048 9049 ret = t4_wr_mbox_meat(adap, adap->mbox, &c, sizeof(c), &c, sleep_ok); 9050 if (ret == 0) { 9051 ret = G_FW_VI_MAC_CMD_RAW_IDX(be32_to_cpu(p->raw_idx_pkd)); 9052 if (ret != idx) 9053 ret = -ENOMEM; 9054 } 9055 9056 return ret; 9057 } 9058 9059 /** 9060 * t4_alloc_mac_filt - allocates exact-match filters for MAC addresses 9061 * @adap: the adapter 9062 * @mbox: mailbox to use for the FW command 9063 * @viid: the VI id 9064 * @free: if true any existing filters for this VI id are first removed 9065 * @naddr: the number of MAC addresses to allocate filters for (up to 7) 9066 * @addr: the MAC address(es) 9067 * @idx: where to store the index of each allocated filter 9068 * @hash: pointer to hash address filter bitmap 9069 * @sleep_ok: call is allowed to sleep 9070 * 9071 * Allocates an exact-match filter for each of the supplied addresses and 9072 * sets it to the corresponding address. If @idx is not %NULL it should 9073 * have at least @naddr entries, each of which will be set to the index of 9074 * the filter allocated for the corresponding MAC address. If a filter 9075 * could not be allocated for an address its index is set to 0xffff. 9076 * If @hash is not %NULL addresses that fail to allocate an exact filter 9077 * are hashed and update the hash filter bitmap pointed at by @hash. 9078 * 9079 * Returns a negative error number or the number of filters allocated. 9080 */ 9081 int t4_alloc_mac_filt(struct adapter *adap, unsigned int mbox, 9082 unsigned int viid, bool free, unsigned int naddr, 9083 const u8 **addr, u16 *idx, u64 *hash, bool sleep_ok) 9084 { 9085 int offset, ret = 0; 9086 struct fw_vi_mac_cmd c; 9087 unsigned int nfilters = 0; 9088 unsigned int max_naddr = adap->chip_params->mps_tcam_size; 9089 unsigned int rem = naddr; 9090 9091 if (naddr > max_naddr) 9092 return -EINVAL; 9093 9094 for (offset = 0; offset < naddr ; /**/) { 9095 unsigned int fw_naddr = (rem < ARRAY_SIZE(c.u.exact) 9096 ? rem 9097 : ARRAY_SIZE(c.u.exact)); 9098 size_t len16 = DIV_ROUND_UP(offsetof(struct fw_vi_mac_cmd, 9099 u.exact[fw_naddr]), 16); 9100 struct fw_vi_mac_exact *p; 9101 int i; 9102 9103 memset(&c, 0, sizeof(c)); 9104 c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_MAC_CMD) | 9105 F_FW_CMD_REQUEST | 9106 F_FW_CMD_WRITE | 9107 V_FW_CMD_EXEC(free) | 9108 V_FW_VI_MAC_CMD_VIID(viid)); 9109 c.freemacs_to_len16 = cpu_to_be32(V_FW_VI_MAC_CMD_FREEMACS(free) | 9110 V_FW_CMD_LEN16(len16)); 9111 9112 for (i = 0, p = c.u.exact; i < fw_naddr; i++, p++) { 9113 p->valid_to_idx = 9114 cpu_to_be16(F_FW_VI_MAC_CMD_VALID | 9115 V_FW_VI_MAC_CMD_IDX(FW_VI_MAC_ADD_MAC)); 9116 memcpy(p->macaddr, addr[offset+i], sizeof(p->macaddr)); 9117 } 9118 9119 /* 9120 * It's okay if we run out of space in our MAC address arena. 9121 * Some of the addresses we submit may get stored so we need 9122 * to run through the reply to see what the results were ... 9123 */ 9124 ret = t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), &c, sleep_ok); 9125 if (ret && ret != -FW_ENOMEM) 9126 break; 9127 9128 for (i = 0, p = c.u.exact; i < fw_naddr; i++, p++) { 9129 u16 index = G_FW_VI_MAC_CMD_IDX( 9130 be16_to_cpu(p->valid_to_idx)); 9131 9132 if (idx) 9133 idx[offset+i] = (index >= max_naddr 9134 ? 0xffff 9135 : index); 9136 if (index < max_naddr) 9137 nfilters++; 9138 else if (hash) 9139 *hash |= (1ULL << hash_mac_addr(addr[offset+i])); 9140 } 9141 9142 free = false; 9143 offset += fw_naddr; 9144 rem -= fw_naddr; 9145 } 9146 9147 if (ret == 0 || ret == -FW_ENOMEM) 9148 ret = nfilters; 9149 return ret; 9150 } 9151 9152 /** 9153 * t4_free_encap_mac_filt - frees MPS entry at given index 9154 * @adap: the adapter 9155 * @viid: the VI id 9156 * @idx: index of MPS entry to be freed 9157 * @sleep_ok: call is allowed to sleep 9158 * 9159 * Frees the MPS entry at supplied index 9160 * 9161 * Returns a negative error number or zero on success 9162 */ 9163 int t4_free_encap_mac_filt(struct adapter *adap, unsigned int viid, 9164 int idx, bool sleep_ok) 9165 { 9166 struct fw_vi_mac_exact *p; 9167 struct fw_vi_mac_cmd c; 9168 u8 addr[] = {0,0,0,0,0,0}; 9169 int ret = 0; 9170 u32 exact; 9171 9172 memset(&c, 0, sizeof(c)); 9173 c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_MAC_CMD) | 9174 F_FW_CMD_REQUEST | 9175 F_FW_CMD_WRITE | 9176 V_FW_CMD_EXEC(0) | 9177 V_FW_VI_MAC_CMD_VIID(viid)); 9178 exact = V_FW_VI_MAC_CMD_ENTRY_TYPE(FW_VI_MAC_TYPE_EXACTMAC); 9179 c.freemacs_to_len16 = cpu_to_be32(V_FW_VI_MAC_CMD_FREEMACS(0) | 9180 exact | 9181 V_FW_CMD_LEN16(1)); 9182 p = c.u.exact; 9183 p->valid_to_idx = cpu_to_be16(F_FW_VI_MAC_CMD_VALID | 9184 V_FW_VI_MAC_CMD_IDX(idx)); 9185 memcpy(p->macaddr, addr, sizeof(p->macaddr)); 9186 9187 ret = t4_wr_mbox_meat(adap, adap->mbox, &c, sizeof(c), &c, sleep_ok); 9188 return ret; 9189 } 9190 9191 /** 9192 * t4_free_raw_mac_filt - Frees a raw mac entry in mps tcam 9193 * @adap: the adapter 9194 * @viid: the VI id 9195 * @addr: the MAC address 9196 * @mask: the mask 9197 * @idx: index of the entry in mps tcam 9198 * @lookup_type: MAC address for inner (1) or outer (0) header 9199 * @port_id: the port index 9200 * @sleep_ok: call is allowed to sleep 9201 * 9202 * Removes the mac entry at the specified index using raw mac interface. 9203 * 9204 * Returns a negative error number on failure. 9205 */ 9206 int t4_free_raw_mac_filt(struct adapter *adap, unsigned int viid, 9207 const u8 *addr, const u8 *mask, unsigned int idx, 9208 u8 lookup_type, u8 port_id, bool sleep_ok) 9209 { 9210 struct fw_vi_mac_cmd c; 9211 struct fw_vi_mac_raw *p = &c.u.raw; 9212 u32 raw; 9213 9214 memset(&c, 0, sizeof(c)); 9215 c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_MAC_CMD) | 9216 F_FW_CMD_REQUEST | F_FW_CMD_WRITE | 9217 V_FW_CMD_EXEC(0) | 9218 V_FW_VI_MAC_CMD_VIID(viid)); 9219 raw = V_FW_VI_MAC_CMD_ENTRY_TYPE(FW_VI_MAC_TYPE_RAW); 9220 c.freemacs_to_len16 = cpu_to_be32(V_FW_VI_MAC_CMD_FREEMACS(0) | 9221 raw | 9222 V_FW_CMD_LEN16(1)); 9223 9224 p->raw_idx_pkd = cpu_to_be32(V_FW_VI_MAC_CMD_RAW_IDX(idx) | 9225 FW_VI_MAC_ID_BASED_FREE); 9226 9227 /* Lookup Type. Outer header: 0, Inner header: 1 */ 9228 p->data0_pkd = cpu_to_be32(V_DATALKPTYPE(lookup_type) | 9229 V_DATAPORTNUM(port_id)); 9230 /* Lookup mask and port mask */ 9231 p->data0m_pkd = cpu_to_be64(V_DATALKPTYPE(M_DATALKPTYPE) | 9232 V_DATAPORTNUM(M_DATAPORTNUM)); 9233 9234 /* Copy the address and the mask */ 9235 memcpy((u8 *)&p->data1[0] + 2, addr, ETHER_ADDR_LEN); 9236 memcpy((u8 *)&p->data1m[0] + 2, mask, ETHER_ADDR_LEN); 9237 9238 return t4_wr_mbox_meat(adap, adap->mbox, &c, sizeof(c), &c, sleep_ok); 9239 } 9240 9241 /** 9242 * t4_free_mac_filt - frees exact-match filters of given MAC addresses 9243 * @adap: the adapter 9244 * @mbox: mailbox to use for the FW command 9245 * @viid: the VI id 9246 * @naddr: the number of MAC addresses to allocate filters for (up to 7) 9247 * @addr: the MAC address(es) 9248 * @sleep_ok: call is allowed to sleep 9249 * 9250 * Frees the exact-match filter for each of the supplied addresses 9251 * 9252 * Returns a negative error number or the number of filters freed. 9253 */ 9254 int t4_free_mac_filt(struct adapter *adap, unsigned int mbox, 9255 unsigned int viid, unsigned int naddr, 9256 const u8 **addr, bool sleep_ok) 9257 { 9258 int offset, ret = 0; 9259 struct fw_vi_mac_cmd c; 9260 unsigned int nfilters = 0; 9261 unsigned int max_naddr = adap->chip_params->mps_tcam_size; 9262 unsigned int rem = naddr; 9263 9264 if (naddr > max_naddr) 9265 return -EINVAL; 9266 9267 for (offset = 0; offset < (int)naddr ; /**/) { 9268 unsigned int fw_naddr = (rem < ARRAY_SIZE(c.u.exact) 9269 ? rem 9270 : ARRAY_SIZE(c.u.exact)); 9271 size_t len16 = DIV_ROUND_UP(offsetof(struct fw_vi_mac_cmd, 9272 u.exact[fw_naddr]), 16); 9273 struct fw_vi_mac_exact *p; 9274 int i; 9275 9276 memset(&c, 0, sizeof(c)); 9277 c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_MAC_CMD) | 9278 F_FW_CMD_REQUEST | 9279 F_FW_CMD_WRITE | 9280 V_FW_CMD_EXEC(0) | 9281 V_FW_VI_MAC_CMD_VIID(viid)); 9282 c.freemacs_to_len16 = 9283 cpu_to_be32(V_FW_VI_MAC_CMD_FREEMACS(0) | 9284 V_FW_CMD_LEN16(len16)); 9285 9286 for (i = 0, p = c.u.exact; i < (int)fw_naddr; i++, p++) { 9287 p->valid_to_idx = cpu_to_be16( 9288 F_FW_VI_MAC_CMD_VALID | 9289 V_FW_VI_MAC_CMD_IDX(FW_VI_MAC_MAC_BASED_FREE)); 9290 memcpy(p->macaddr, addr[offset+i], sizeof(p->macaddr)); 9291 } 9292 9293 ret = t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), &c, sleep_ok); 9294 if (ret) 9295 break; 9296 9297 for (i = 0, p = c.u.exact; i < fw_naddr; i++, p++) { 9298 u16 index = G_FW_VI_MAC_CMD_IDX( 9299 be16_to_cpu(p->valid_to_idx)); 9300 9301 if (index < max_naddr) 9302 nfilters++; 9303 } 9304 9305 offset += fw_naddr; 9306 rem -= fw_naddr; 9307 } 9308 9309 if (ret == 0) 9310 ret = nfilters; 9311 return ret; 9312 } 9313 9314 /** 9315 * t4_change_mac - modifies the exact-match filter for a MAC address 9316 * @adap: the adapter 9317 * @mbox: mailbox to use for the FW command 9318 * @viid: the VI id 9319 * @idx: index of existing filter for old value of MAC address, or -1 9320 * @addr: the new MAC address value 9321 * @persist: whether a new MAC allocation should be persistent 9322 * @smt_idx: add MAC to SMT and return its index, or NULL 9323 * 9324 * Modifies an exact-match filter and sets it to the new MAC address if 9325 * @idx >= 0, or adds the MAC address to a new filter if @idx < 0. In the 9326 * latter case the address is added persistently if @persist is %true. 9327 * 9328 * Note that in general it is not possible to modify the value of a given 9329 * filter so the generic way to modify an address filter is to free the one 9330 * being used by the old address value and allocate a new filter for the 9331 * new address value. 9332 * 9333 * Returns a negative error number or the index of the filter with the new 9334 * MAC value. Note that this index may differ from @idx. 9335 */ 9336 int t4_change_mac(struct adapter *adap, unsigned int mbox, unsigned int viid, 9337 int idx, const u8 *addr, bool persist, uint16_t *smt_idx) 9338 { 9339 int ret, mode; 9340 struct fw_vi_mac_cmd c; 9341 struct fw_vi_mac_exact *p = c.u.exact; 9342 unsigned int max_mac_addr = adap->chip_params->mps_tcam_size; 9343 9344 if (idx < 0) /* new allocation */ 9345 idx = persist ? FW_VI_MAC_ADD_PERSIST_MAC : FW_VI_MAC_ADD_MAC; 9346 mode = smt_idx ? FW_VI_MAC_SMT_AND_MPSTCAM : FW_VI_MAC_MPS_TCAM_ENTRY; 9347 9348 memset(&c, 0, sizeof(c)); 9349 c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_MAC_CMD) | 9350 F_FW_CMD_REQUEST | F_FW_CMD_WRITE | 9351 V_FW_VI_MAC_CMD_VIID(viid)); 9352 c.freemacs_to_len16 = cpu_to_be32(V_FW_CMD_LEN16(1)); 9353 p->valid_to_idx = cpu_to_be16(F_FW_VI_MAC_CMD_VALID | 9354 V_FW_VI_MAC_CMD_SMAC_RESULT(mode) | 9355 V_FW_VI_MAC_CMD_IDX(idx)); 9356 memcpy(p->macaddr, addr, sizeof(p->macaddr)); 9357 9358 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c); 9359 if (ret == 0) { 9360 ret = G_FW_VI_MAC_CMD_IDX(be16_to_cpu(p->valid_to_idx)); 9361 if (ret >= max_mac_addr) 9362 ret = -ENOMEM; 9363 if (smt_idx) { 9364 if (adap->params.viid_smt_extn_support) 9365 *smt_idx = G_FW_VI_MAC_CMD_SMTID(be32_to_cpu(c.op_to_viid)); 9366 else { 9367 if (chip_id(adap) <= CHELSIO_T5) 9368 *smt_idx = (viid & M_FW_VIID_VIN) << 1; 9369 else 9370 *smt_idx = viid & M_FW_VIID_VIN; 9371 } 9372 } 9373 } 9374 return ret; 9375 } 9376 9377 /** 9378 * t4_set_addr_hash - program the MAC inexact-match hash filter 9379 * @adap: the adapter 9380 * @mbox: mailbox to use for the FW command 9381 * @viid: the VI id 9382 * @ucast: whether the hash filter should also match unicast addresses 9383 * @vec: the value to be written to the hash filter 9384 * @sleep_ok: call is allowed to sleep 9385 * 9386 * Sets the 64-bit inexact-match hash filter for a virtual interface. 9387 */ 9388 int t4_set_addr_hash(struct adapter *adap, unsigned int mbox, unsigned int viid, 9389 bool ucast, u64 vec, bool sleep_ok) 9390 { 9391 struct fw_vi_mac_cmd c; 9392 u32 val; 9393 9394 memset(&c, 0, sizeof(c)); 9395 c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_MAC_CMD) | 9396 F_FW_CMD_REQUEST | F_FW_CMD_WRITE | 9397 V_FW_VI_ENABLE_CMD_VIID(viid)); 9398 val = V_FW_VI_MAC_CMD_ENTRY_TYPE(FW_VI_MAC_TYPE_HASHVEC) | 9399 V_FW_VI_MAC_CMD_HASHUNIEN(ucast) | V_FW_CMD_LEN16(1); 9400 c.freemacs_to_len16 = cpu_to_be32(val); 9401 c.u.hash.hashvec = cpu_to_be64(vec); 9402 return t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), NULL, sleep_ok); 9403 } 9404 9405 /** 9406 * t4_enable_vi_params - enable/disable a virtual interface 9407 * @adap: the adapter 9408 * @mbox: mailbox to use for the FW command 9409 * @viid: the VI id 9410 * @rx_en: 1=enable Rx, 0=disable Rx 9411 * @tx_en: 1=enable Tx, 0=disable Tx 9412 * @dcb_en: 1=enable delivery of Data Center Bridging messages. 9413 * 9414 * Enables/disables a virtual interface. Note that setting DCB Enable 9415 * only makes sense when enabling a Virtual Interface ... 9416 */ 9417 int t4_enable_vi_params(struct adapter *adap, unsigned int mbox, 9418 unsigned int viid, bool rx_en, bool tx_en, bool dcb_en) 9419 { 9420 struct fw_vi_enable_cmd c; 9421 9422 memset(&c, 0, sizeof(c)); 9423 c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_ENABLE_CMD) | 9424 F_FW_CMD_REQUEST | F_FW_CMD_EXEC | 9425 V_FW_VI_ENABLE_CMD_VIID(viid)); 9426 c.ien_to_len16 = cpu_to_be32(V_FW_VI_ENABLE_CMD_IEN(rx_en) | 9427 V_FW_VI_ENABLE_CMD_EEN(tx_en) | 9428 V_FW_VI_ENABLE_CMD_DCB_INFO(dcb_en) | 9429 FW_LEN16(c)); 9430 return t4_wr_mbox_ns(adap, mbox, &c, sizeof(c), NULL); 9431 } 9432 9433 /** 9434 * t4_enable_vi - enable/disable a virtual interface 9435 * @adap: the adapter 9436 * @mbox: mailbox to use for the FW command 9437 * @viid: the VI id 9438 * @rx_en: 1=enable Rx, 0=disable Rx 9439 * @tx_en: 1=enable Tx, 0=disable Tx 9440 * 9441 * Enables/disables a virtual interface. Note that setting DCB Enable 9442 * only makes sense when enabling a Virtual Interface ... 9443 */ 9444 int t4_enable_vi(struct adapter *adap, unsigned int mbox, unsigned int viid, 9445 bool rx_en, bool tx_en) 9446 { 9447 return t4_enable_vi_params(adap, mbox, viid, rx_en, tx_en, 0); 9448 } 9449 9450 /** 9451 * t4_identify_port - identify a VI's port by blinking its LED 9452 * @adap: the adapter 9453 * @mbox: mailbox to use for the FW command 9454 * @viid: the VI id 9455 * @nblinks: how many times to blink LED at 2.5 Hz 9456 * 9457 * Identifies a VI's port by blinking its LED. 9458 */ 9459 int t4_identify_port(struct adapter *adap, unsigned int mbox, unsigned int viid, 9460 unsigned int nblinks) 9461 { 9462 struct fw_vi_enable_cmd c; 9463 9464 memset(&c, 0, sizeof(c)); 9465 c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_ENABLE_CMD) | 9466 F_FW_CMD_REQUEST | F_FW_CMD_EXEC | 9467 V_FW_VI_ENABLE_CMD_VIID(viid)); 9468 c.ien_to_len16 = cpu_to_be32(F_FW_VI_ENABLE_CMD_LED | FW_LEN16(c)); 9469 c.blinkdur = cpu_to_be16(nblinks); 9470 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); 9471 } 9472 9473 /** 9474 * t4_iq_stop - stop an ingress queue and its FLs 9475 * @adap: the adapter 9476 * @mbox: mailbox to use for the FW command 9477 * @pf: the PF owning the queues 9478 * @vf: the VF owning the queues 9479 * @iqtype: the ingress queue type (FW_IQ_TYPE_FL_INT_CAP, etc.) 9480 * @iqid: ingress queue id 9481 * @fl0id: FL0 queue id or 0xffff if no attached FL0 9482 * @fl1id: FL1 queue id or 0xffff if no attached FL1 9483 * 9484 * Stops an ingress queue and its associated FLs, if any. This causes 9485 * any current or future data/messages destined for these queues to be 9486 * tossed. 9487 */ 9488 int t4_iq_stop(struct adapter *adap, unsigned int mbox, unsigned int pf, 9489 unsigned int vf, unsigned int iqtype, unsigned int iqid, 9490 unsigned int fl0id, unsigned int fl1id) 9491 { 9492 struct fw_iq_cmd c; 9493 9494 memset(&c, 0, sizeof(c)); 9495 c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_IQ_CMD) | F_FW_CMD_REQUEST | 9496 F_FW_CMD_EXEC | V_FW_IQ_CMD_PFN(pf) | 9497 V_FW_IQ_CMD_VFN(vf)); 9498 c.alloc_to_len16 = cpu_to_be32(F_FW_IQ_CMD_IQSTOP | FW_LEN16(c)); 9499 c.type_to_iqandstindex = cpu_to_be32(V_FW_IQ_CMD_TYPE(iqtype)); 9500 c.iqid = cpu_to_be16(iqid); 9501 c.fl0id = cpu_to_be16(fl0id); 9502 c.fl1id = cpu_to_be16(fl1id); 9503 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); 9504 } 9505 9506 /** 9507 * t4_iq_free - free an ingress queue and its FLs 9508 * @adap: the adapter 9509 * @mbox: mailbox to use for the FW command 9510 * @pf: the PF owning the queues 9511 * @vf: the VF owning the queues 9512 * @iqtype: the ingress queue type (FW_IQ_TYPE_FL_INT_CAP, etc.) 9513 * @iqid: ingress queue id 9514 * @fl0id: FL0 queue id or 0xffff if no attached FL0 9515 * @fl1id: FL1 queue id or 0xffff if no attached FL1 9516 * 9517 * Frees an ingress queue and its associated FLs, if any. 9518 */ 9519 int t4_iq_free(struct adapter *adap, unsigned int mbox, unsigned int pf, 9520 unsigned int vf, unsigned int iqtype, unsigned int iqid, 9521 unsigned int fl0id, unsigned int fl1id) 9522 { 9523 struct fw_iq_cmd c; 9524 9525 memset(&c, 0, sizeof(c)); 9526 c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_IQ_CMD) | F_FW_CMD_REQUEST | 9527 F_FW_CMD_EXEC | V_FW_IQ_CMD_PFN(pf) | 9528 V_FW_IQ_CMD_VFN(vf)); 9529 c.alloc_to_len16 = cpu_to_be32(F_FW_IQ_CMD_FREE | FW_LEN16(c)); 9530 c.type_to_iqandstindex = cpu_to_be32(V_FW_IQ_CMD_TYPE(iqtype)); 9531 c.iqid = cpu_to_be16(iqid); 9532 c.fl0id = cpu_to_be16(fl0id); 9533 c.fl1id = cpu_to_be16(fl1id); 9534 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); 9535 } 9536 9537 /** 9538 * t4_eth_eq_stop - stop an Ethernet egress queue 9539 * @adap: the adapter 9540 * @mbox: mailbox to use for the FW command 9541 * @pf: the PF owning the queues 9542 * @vf: the VF owning the queues 9543 * @eqid: egress queue id 9544 * 9545 * Stops an Ethernet egress queue. The queue can be reinitialized or 9546 * freed but is not otherwise functional after this call. 9547 */ 9548 int t4_eth_eq_stop(struct adapter *adap, unsigned int mbox, unsigned int pf, 9549 unsigned int vf, unsigned int eqid) 9550 { 9551 struct fw_eq_eth_cmd c; 9552 9553 memset(&c, 0, sizeof(c)); 9554 c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_EQ_ETH_CMD) | 9555 F_FW_CMD_REQUEST | F_FW_CMD_EXEC | 9556 V_FW_EQ_ETH_CMD_PFN(pf) | 9557 V_FW_EQ_ETH_CMD_VFN(vf)); 9558 c.alloc_to_len16 = cpu_to_be32(F_FW_EQ_ETH_CMD_EQSTOP | FW_LEN16(c)); 9559 c.eqid_pkd = cpu_to_be32(V_FW_EQ_ETH_CMD_EQID(eqid)); 9560 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); 9561 } 9562 9563 /** 9564 * t4_eth_eq_free - free an Ethernet egress queue 9565 * @adap: the adapter 9566 * @mbox: mailbox to use for the FW command 9567 * @pf: the PF owning the queue 9568 * @vf: the VF owning the queue 9569 * @eqid: egress queue id 9570 * 9571 * Frees an Ethernet egress queue. 9572 */ 9573 int t4_eth_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf, 9574 unsigned int vf, unsigned int eqid) 9575 { 9576 struct fw_eq_eth_cmd c; 9577 9578 memset(&c, 0, sizeof(c)); 9579 c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_EQ_ETH_CMD) | 9580 F_FW_CMD_REQUEST | F_FW_CMD_EXEC | 9581 V_FW_EQ_ETH_CMD_PFN(pf) | 9582 V_FW_EQ_ETH_CMD_VFN(vf)); 9583 c.alloc_to_len16 = cpu_to_be32(F_FW_EQ_ETH_CMD_FREE | FW_LEN16(c)); 9584 c.eqid_pkd = cpu_to_be32(V_FW_EQ_ETH_CMD_EQID(eqid)); 9585 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); 9586 } 9587 9588 /** 9589 * t4_ctrl_eq_free - free a control egress queue 9590 * @adap: the adapter 9591 * @mbox: mailbox to use for the FW command 9592 * @pf: the PF owning the queue 9593 * @vf: the VF owning the queue 9594 * @eqid: egress queue id 9595 * 9596 * Frees a control egress queue. 9597 */ 9598 int t4_ctrl_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf, 9599 unsigned int vf, unsigned int eqid) 9600 { 9601 struct fw_eq_ctrl_cmd c; 9602 9603 memset(&c, 0, sizeof(c)); 9604 c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_EQ_CTRL_CMD) | 9605 F_FW_CMD_REQUEST | F_FW_CMD_EXEC | 9606 V_FW_EQ_CTRL_CMD_PFN(pf) | 9607 V_FW_EQ_CTRL_CMD_VFN(vf)); 9608 c.alloc_to_len16 = cpu_to_be32(F_FW_EQ_CTRL_CMD_FREE | FW_LEN16(c)); 9609 c.cmpliqid_eqid = cpu_to_be32(V_FW_EQ_CTRL_CMD_EQID(eqid)); 9610 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); 9611 } 9612 9613 /** 9614 * t4_ofld_eq_free - free an offload egress queue 9615 * @adap: the adapter 9616 * @mbox: mailbox to use for the FW command 9617 * @pf: the PF owning the queue 9618 * @vf: the VF owning the queue 9619 * @eqid: egress queue id 9620 * 9621 * Frees a control egress queue. 9622 */ 9623 int t4_ofld_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf, 9624 unsigned int vf, unsigned int eqid) 9625 { 9626 struct fw_eq_ofld_cmd c; 9627 9628 memset(&c, 0, sizeof(c)); 9629 c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_EQ_OFLD_CMD) | 9630 F_FW_CMD_REQUEST | F_FW_CMD_EXEC | 9631 V_FW_EQ_OFLD_CMD_PFN(pf) | 9632 V_FW_EQ_OFLD_CMD_VFN(vf)); 9633 c.alloc_to_len16 = cpu_to_be32(F_FW_EQ_OFLD_CMD_FREE | FW_LEN16(c)); 9634 c.eqid_pkd = cpu_to_be32(V_FW_EQ_OFLD_CMD_EQID(eqid)); 9635 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); 9636 } 9637 9638 /** 9639 * t4_link_down_rc_str - return a string for a Link Down Reason Code 9640 * @link_down_rc: Link Down Reason Code 9641 * 9642 * Returns a string representation of the Link Down Reason Code. 9643 */ 9644 const char *t4_link_down_rc_str(unsigned char link_down_rc) 9645 { 9646 static const char *reason[] = { 9647 "Link Down", 9648 "Remote Fault", 9649 "Auto-negotiation Failure", 9650 "Reserved3", 9651 "Insufficient Airflow", 9652 "Unable To Determine Reason", 9653 "No RX Signal Detected", 9654 "Reserved7", 9655 }; 9656 9657 if (link_down_rc >= ARRAY_SIZE(reason)) 9658 return "Bad Reason Code"; 9659 9660 return reason[link_down_rc]; 9661 } 9662 9663 /* 9664 * Return the highest speed set in the port capabilities, in Mb/s. 9665 */ 9666 unsigned int fwcap_to_speed(uint32_t caps) 9667 { 9668 #define TEST_SPEED_RETURN(__caps_speed, __speed) \ 9669 do { \ 9670 if (caps & FW_PORT_CAP32_SPEED_##__caps_speed) \ 9671 return __speed; \ 9672 } while (0) 9673 9674 TEST_SPEED_RETURN(400G, 400000); 9675 TEST_SPEED_RETURN(200G, 200000); 9676 TEST_SPEED_RETURN(100G, 100000); 9677 TEST_SPEED_RETURN(50G, 50000); 9678 TEST_SPEED_RETURN(40G, 40000); 9679 TEST_SPEED_RETURN(25G, 25000); 9680 TEST_SPEED_RETURN(10G, 10000); 9681 TEST_SPEED_RETURN(1G, 1000); 9682 TEST_SPEED_RETURN(100M, 100); 9683 9684 #undef TEST_SPEED_RETURN 9685 9686 return 0; 9687 } 9688 9689 /* 9690 * Return the port capabilities bit for the given speed, which is in Mb/s. 9691 */ 9692 uint32_t speed_to_fwcap(unsigned int speed) 9693 { 9694 #define TEST_SPEED_RETURN(__caps_speed, __speed) \ 9695 do { \ 9696 if (speed == __speed) \ 9697 return FW_PORT_CAP32_SPEED_##__caps_speed; \ 9698 } while (0) 9699 9700 TEST_SPEED_RETURN(400G, 400000); 9701 TEST_SPEED_RETURN(200G, 200000); 9702 TEST_SPEED_RETURN(100G, 100000); 9703 TEST_SPEED_RETURN(50G, 50000); 9704 TEST_SPEED_RETURN(40G, 40000); 9705 TEST_SPEED_RETURN(25G, 25000); 9706 TEST_SPEED_RETURN(10G, 10000); 9707 TEST_SPEED_RETURN(1G, 1000); 9708 TEST_SPEED_RETURN(100M, 100); 9709 9710 #undef TEST_SPEED_RETURN 9711 9712 return 0; 9713 } 9714 9715 /* 9716 * Return the port capabilities bit for the highest speed in the capabilities. 9717 */ 9718 uint32_t fwcap_top_speed(uint32_t caps) 9719 { 9720 #define TEST_SPEED_RETURN(__caps_speed) \ 9721 do { \ 9722 if (caps & FW_PORT_CAP32_SPEED_##__caps_speed) \ 9723 return FW_PORT_CAP32_SPEED_##__caps_speed; \ 9724 } while (0) 9725 9726 TEST_SPEED_RETURN(400G); 9727 TEST_SPEED_RETURN(200G); 9728 TEST_SPEED_RETURN(100G); 9729 TEST_SPEED_RETURN(50G); 9730 TEST_SPEED_RETURN(40G); 9731 TEST_SPEED_RETURN(25G); 9732 TEST_SPEED_RETURN(10G); 9733 TEST_SPEED_RETURN(1G); 9734 TEST_SPEED_RETURN(100M); 9735 9736 #undef TEST_SPEED_RETURN 9737 9738 return 0; 9739 } 9740 9741 /** 9742 * lstatus_to_fwcap - translate old lstatus to 32-bit Port Capabilities 9743 * @lstatus: old FW_PORT_ACTION_GET_PORT_INFO lstatus value 9744 * 9745 * Translates old FW_PORT_ACTION_GET_PORT_INFO lstatus field into new 9746 * 32-bit Port Capabilities value. 9747 */ 9748 static uint32_t lstatus_to_fwcap(u32 lstatus) 9749 { 9750 uint32_t linkattr = 0; 9751 9752 /* 9753 * Unfortunately the format of the Link Status in the old 9754 * 16-bit Port Information message isn't the same as the 9755 * 16-bit Port Capabilities bitfield used everywhere else ... 9756 */ 9757 if (lstatus & F_FW_PORT_CMD_RXPAUSE) 9758 linkattr |= FW_PORT_CAP32_FC_RX; 9759 if (lstatus & F_FW_PORT_CMD_TXPAUSE) 9760 linkattr |= FW_PORT_CAP32_FC_TX; 9761 if (lstatus & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_100M)) 9762 linkattr |= FW_PORT_CAP32_SPEED_100M; 9763 if (lstatus & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_1G)) 9764 linkattr |= FW_PORT_CAP32_SPEED_1G; 9765 if (lstatus & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_10G)) 9766 linkattr |= FW_PORT_CAP32_SPEED_10G; 9767 if (lstatus & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_25G)) 9768 linkattr |= FW_PORT_CAP32_SPEED_25G; 9769 if (lstatus & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_40G)) 9770 linkattr |= FW_PORT_CAP32_SPEED_40G; 9771 if (lstatus & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_100G)) 9772 linkattr |= FW_PORT_CAP32_SPEED_100G; 9773 9774 return linkattr; 9775 } 9776 9777 /* 9778 * Updates all fields owned by the common code in port_info and link_config 9779 * based on information provided by the firmware. Does not touch any 9780 * requested_* field. 9781 */ 9782 static void handle_port_info(struct port_info *pi, const struct fw_port_cmd *p, 9783 enum fw_port_action action, bool *mod_changed, bool *link_changed) 9784 { 9785 struct link_config old_lc, *lc = &pi->link_cfg; 9786 unsigned char fc; 9787 u32 stat, linkattr; 9788 int old_ptype, old_mtype; 9789 9790 old_ptype = pi->port_type; 9791 old_mtype = pi->mod_type; 9792 old_lc = *lc; 9793 if (action == FW_PORT_ACTION_GET_PORT_INFO) { 9794 stat = be32_to_cpu(p->u.info.lstatus_to_modtype); 9795 9796 pi->port_type = G_FW_PORT_CMD_PTYPE(stat); 9797 pi->mod_type = G_FW_PORT_CMD_MODTYPE(stat); 9798 pi->mdio_addr = stat & F_FW_PORT_CMD_MDIOCAP ? 9799 G_FW_PORT_CMD_MDIOADDR(stat) : -1; 9800 9801 lc->pcaps = fwcaps16_to_caps32(be16_to_cpu(p->u.info.pcap)); 9802 lc->acaps = fwcaps16_to_caps32(be16_to_cpu(p->u.info.acap)); 9803 lc->lpacaps = fwcaps16_to_caps32(be16_to_cpu(p->u.info.lpacap)); 9804 lc->link_ok = (stat & F_FW_PORT_CMD_LSTATUS) != 0; 9805 lc->link_down_rc = G_FW_PORT_CMD_LINKDNRC(stat); 9806 9807 linkattr = lstatus_to_fwcap(stat); 9808 } else if (action == FW_PORT_ACTION_GET_PORT_INFO32) { 9809 stat = be32_to_cpu(p->u.info32.lstatus32_to_cbllen32); 9810 9811 pi->port_type = G_FW_PORT_CMD_PORTTYPE32(stat); 9812 pi->mod_type = G_FW_PORT_CMD_MODTYPE32(stat); 9813 pi->mdio_addr = stat & F_FW_PORT_CMD_MDIOCAP32 ? 9814 G_FW_PORT_CMD_MDIOADDR32(stat) : -1; 9815 9816 lc->pcaps = be32_to_cpu(p->u.info32.pcaps32); 9817 lc->acaps = be32_to_cpu(p->u.info32.acaps32); 9818 lc->lpacaps = be32_to_cpu(p->u.info32.lpacaps32); 9819 lc->link_ok = (stat & F_FW_PORT_CMD_LSTATUS32) != 0; 9820 lc->link_down_rc = G_FW_PORT_CMD_LINKDNRC32(stat); 9821 9822 linkattr = be32_to_cpu(p->u.info32.linkattr32); 9823 } else { 9824 CH_ERR(pi->adapter, "bad port_info action 0x%x\n", action); 9825 return; 9826 } 9827 9828 lc->speed = fwcap_to_speed(linkattr); 9829 lc->fec = fwcap_to_fec(linkattr, true); 9830 9831 fc = 0; 9832 if (linkattr & FW_PORT_CAP32_FC_RX) 9833 fc |= PAUSE_RX; 9834 if (linkattr & FW_PORT_CAP32_FC_TX) 9835 fc |= PAUSE_TX; 9836 lc->fc = fc; 9837 9838 if (mod_changed != NULL) 9839 *mod_changed = false; 9840 if (link_changed != NULL) 9841 *link_changed = false; 9842 if (old_ptype != pi->port_type || old_mtype != pi->mod_type || 9843 old_lc.pcaps != lc->pcaps) { 9844 if (pi->mod_type != FW_PORT_MOD_TYPE_NONE) 9845 lc->fec_hint = fwcap_to_fec(lc->acaps, true); 9846 if (mod_changed != NULL) 9847 *mod_changed = true; 9848 } 9849 if (old_lc.link_ok != lc->link_ok || old_lc.speed != lc->speed || 9850 old_lc.fec != lc->fec || old_lc.fc != lc->fc) { 9851 if (link_changed != NULL) 9852 *link_changed = true; 9853 } 9854 } 9855 9856 /** 9857 * t4_update_port_info - retrieve and update port information if changed 9858 * @pi: the port_info 9859 * 9860 * We issue a Get Port Information Command to the Firmware and, if 9861 * successful, we check to see if anything is different from what we 9862 * last recorded and update things accordingly. 9863 */ 9864 int t4_update_port_info(struct port_info *pi) 9865 { 9866 struct adapter *sc = pi->adapter; 9867 struct fw_port_cmd cmd; 9868 enum fw_port_action action; 9869 int ret; 9870 9871 memset(&cmd, 0, sizeof(cmd)); 9872 cmd.op_to_portid = cpu_to_be32(V_FW_CMD_OP(FW_PORT_CMD) | 9873 F_FW_CMD_REQUEST | F_FW_CMD_READ | 9874 V_FW_PORT_CMD_PORTID(pi->hw_port)); 9875 action = sc->params.port_caps32 ? FW_PORT_ACTION_GET_PORT_INFO32 : 9876 FW_PORT_ACTION_GET_PORT_INFO; 9877 cmd.action_to_len16 = cpu_to_be32(V_FW_PORT_CMD_ACTION(action) | 9878 FW_LEN16(cmd)); 9879 ret = t4_wr_mbox_ns(sc, sc->mbox, &cmd, sizeof(cmd), &cmd); 9880 if (ret) 9881 return ret; 9882 9883 handle_port_info(pi, &cmd, action, NULL, NULL); 9884 return 0; 9885 } 9886 9887 /** 9888 * t4_handle_fw_rpl - process a FW reply message 9889 * @adap: the adapter 9890 * @rpl: start of the FW message 9891 * 9892 * Processes a FW message, such as link state change messages. 9893 */ 9894 int t4_handle_fw_rpl(struct adapter *adap, const __be64 *rpl) 9895 { 9896 u8 opcode = *(const u8 *)rpl; 9897 const struct fw_port_cmd *p = (const void *)rpl; 9898 enum fw_port_action action = 9899 G_FW_PORT_CMD_ACTION(be32_to_cpu(p->action_to_len16)); 9900 bool mod_changed, link_changed; 9901 9902 if (opcode == FW_PORT_CMD && 9903 (action == FW_PORT_ACTION_GET_PORT_INFO || 9904 action == FW_PORT_ACTION_GET_PORT_INFO32)) { 9905 /* link/module state change message */ 9906 int hw_port = G_FW_PORT_CMD_PORTID(be32_to_cpu(p->op_to_portid)); 9907 int port_id = adap->port_map[hw_port]; 9908 struct port_info *pi; 9909 9910 MPASS(port_id >= 0 && port_id < adap->params.nports); 9911 pi = adap->port[port_id]; 9912 PORT_LOCK(pi); 9913 handle_port_info(pi, p, action, &mod_changed, &link_changed); 9914 PORT_UNLOCK(pi); 9915 if (mod_changed) 9916 t4_os_portmod_changed(pi); 9917 if (link_changed) { 9918 PORT_LOCK(pi); 9919 t4_os_link_changed(pi); 9920 PORT_UNLOCK(pi); 9921 } 9922 } else { 9923 CH_WARN_RATELIMIT(adap, "Unknown firmware reply %d\n", opcode); 9924 return -EINVAL; 9925 } 9926 return 0; 9927 } 9928 9929 /** 9930 * get_pci_mode - determine a card's PCI mode 9931 * @adapter: the adapter 9932 * @p: where to store the PCI settings 9933 * 9934 * Determines a card's PCI mode and associated parameters, such as speed 9935 * and width. 9936 */ 9937 static void get_pci_mode(struct adapter *adapter, 9938 struct pci_params *p) 9939 { 9940 u16 val; 9941 u32 pcie_cap; 9942 9943 pcie_cap = t4_os_find_pci_capability(adapter, PCI_CAP_ID_EXP); 9944 if (pcie_cap) { 9945 t4_os_pci_read_cfg2(adapter, pcie_cap + PCI_EXP_LNKSTA, &val); 9946 p->speed = val & PCI_EXP_LNKSTA_CLS; 9947 p->width = (val & PCI_EXP_LNKSTA_NLW) >> 4; 9948 } 9949 } 9950 9951 struct flash_desc { 9952 u32 vendor_and_model_id; 9953 u32 size_mb; 9954 }; 9955 9956 int t4_get_flash_params(struct adapter *adapter) 9957 { 9958 /* 9959 * Table for non-standard supported Flash parts. Note, all Flash 9960 * parts must have 64KB sectors. 9961 */ 9962 static struct flash_desc supported_flash[] = { 9963 { 0x00150201, 4 << 20 }, /* Spansion 4MB S25FL032P */ 9964 }; 9965 9966 int ret; 9967 u32 flashid = 0; 9968 unsigned int part, manufacturer; 9969 unsigned int density, size = 0; 9970 9971 9972 /* 9973 * Issue a Read ID Command to the Flash part. We decode supported 9974 * Flash parts and their sizes from this. There's a newer Query 9975 * Command which can retrieve detailed geometry information but many 9976 * Flash parts don't support it. 9977 */ 9978 ret = sf1_write(adapter, 1, 1, 0, SF_RD_ID); 9979 if (!ret) 9980 ret = sf1_read(adapter, 3, 0, 1, &flashid); 9981 t4_write_reg(adapter, A_SF_OP, 0); /* unlock SF */ 9982 if (ret < 0) 9983 return ret; 9984 9985 /* 9986 * Check to see if it's one of our non-standard supported Flash parts. 9987 */ 9988 for (part = 0; part < ARRAY_SIZE(supported_flash); part++) 9989 if (supported_flash[part].vendor_and_model_id == flashid) { 9990 adapter->params.sf_size = 9991 supported_flash[part].size_mb; 9992 adapter->params.sf_nsec = 9993 adapter->params.sf_size / SF_SEC_SIZE; 9994 goto found; 9995 } 9996 9997 /* 9998 * Decode Flash part size. The code below looks repetative with 9999 * common encodings, but that's not guaranteed in the JEDEC 10000 * specification for the Read JADEC ID command. The only thing that 10001 * we're guaranteed by the JADEC specification is where the 10002 * Manufacturer ID is in the returned result. After that each 10003 * Manufacturer ~could~ encode things completely differently. 10004 * Note, all Flash parts must have 64KB sectors. 10005 */ 10006 manufacturer = flashid & 0xff; 10007 switch (manufacturer) { 10008 case 0x20: /* Micron/Numonix */ 10009 /* 10010 * This Density -> Size decoding table is taken from Micron 10011 * Data Sheets. 10012 */ 10013 density = (flashid >> 16) & 0xff; 10014 switch (density) { 10015 case 0x14: size = 1 << 20; break; /* 1MB */ 10016 case 0x15: size = 1 << 21; break; /* 2MB */ 10017 case 0x16: size = 1 << 22; break; /* 4MB */ 10018 case 0x17: size = 1 << 23; break; /* 8MB */ 10019 case 0x18: size = 1 << 24; break; /* 16MB */ 10020 case 0x19: size = 1 << 25; break; /* 32MB */ 10021 case 0x20: size = 1 << 26; break; /* 64MB */ 10022 case 0x21: size = 1 << 27; break; /* 128MB */ 10023 case 0x22: size = 1 << 28; break; /* 256MB */ 10024 } 10025 break; 10026 10027 case 0x9d: /* ISSI -- Integrated Silicon Solution, Inc. */ 10028 /* 10029 * This Density -> Size decoding table is taken from ISSI 10030 * Data Sheets. 10031 */ 10032 density = (flashid >> 16) & 0xff; 10033 switch (density) { 10034 case 0x16: size = 1 << 25; break; /* 32MB */ 10035 case 0x17: size = 1 << 26; break; /* 64MB */ 10036 } 10037 break; 10038 10039 case 0xc2: /* Macronix */ 10040 /* 10041 * This Density -> Size decoding table is taken from Macronix 10042 * Data Sheets. 10043 */ 10044 density = (flashid >> 16) & 0xff; 10045 switch (density) { 10046 case 0x17: size = 1 << 23; break; /* 8MB */ 10047 case 0x18: size = 1 << 24; break; /* 16MB */ 10048 } 10049 break; 10050 10051 case 0xef: /* Winbond */ 10052 /* 10053 * This Density -> Size decoding table is taken from Winbond 10054 * Data Sheets. 10055 */ 10056 density = (flashid >> 16) & 0xff; 10057 switch (density) { 10058 case 0x17: size = 1 << 23; break; /* 8MB */ 10059 case 0x18: size = 1 << 24; break; /* 16MB */ 10060 } 10061 break; 10062 } 10063 10064 /* If we didn't recognize the FLASH part, that's no real issue: the 10065 * Hardware/Software contract says that Hardware will _*ALWAYS*_ use a 10066 * FLASH part which has 64KB sectors and is at least 4MB or 16MB in 10067 * size, depending on the board. 10068 */ 10069 if (size == 0) { 10070 size = chip_id(adapter) >= CHELSIO_T7 ? 16 : 4; 10071 CH_WARN(adapter, "Unknown Flash Part %#x, assuming %uMB\n", 10072 flashid, size); 10073 size <<= 20; 10074 } 10075 10076 /* 10077 * Store decoded Flash size and fall through into vetting code. 10078 */ 10079 adapter->params.sf_size = size; 10080 adapter->params.sf_nsec = size / SF_SEC_SIZE; 10081 10082 found: 10083 /* 10084 * We should ~probably~ reject adapters with FLASHes which are too 10085 * small but we have some legacy FPGAs with small FLASHes that we'd 10086 * still like to use. So instead we emit a scary message ... 10087 */ 10088 if (adapter->params.sf_size < FLASH_MIN_SIZE) 10089 CH_WARN(adapter, "WARNING: Flash Part ID %#x, size %#x < %#x\n", 10090 flashid, adapter->params.sf_size, FLASH_MIN_SIZE); 10091 10092 return 0; 10093 } 10094 10095 static void set_pcie_completion_timeout(struct adapter *adapter, 10096 u8 range) 10097 { 10098 u16 val; 10099 u32 pcie_cap; 10100 10101 pcie_cap = t4_os_find_pci_capability(adapter, PCI_CAP_ID_EXP); 10102 if (pcie_cap) { 10103 t4_os_pci_read_cfg2(adapter, pcie_cap + PCI_EXP_DEVCTL2, &val); 10104 val &= 0xfff0; 10105 val |= range ; 10106 t4_os_pci_write_cfg2(adapter, pcie_cap + PCI_EXP_DEVCTL2, val); 10107 } 10108 } 10109 10110 const struct chip_params *t4_get_chip_params(int chipid) 10111 { 10112 static const struct chip_params chip_params[] = { 10113 { 10114 /* T4 */ 10115 .nchan = NCHAN, 10116 .pm_stats_cnt = PM_NSTATS, 10117 .cng_ch_bits_log = 2, 10118 .nsched_cls = 15, 10119 .cim_num_ibq = CIM_NUM_IBQ, 10120 .cim_num_obq = CIM_NUM_OBQ, 10121 .filter_opt_len = FILTER_OPT_LEN, 10122 .filter_num_opt = S_FT_LAST + 1, 10123 .mps_rplc_size = 128, 10124 .vfcount = 128, 10125 .sge_fl_db = F_DBPRIO, 10126 .sge_ctxt_size = SGE_CTXT_SIZE, 10127 .mps_tcam_size = NUM_MPS_CLS_SRAM_L_INSTANCES, 10128 .rss_nentries = RSS_NENTRIES, 10129 .cim_la_size = CIMLA_SIZE, 10130 }, 10131 { 10132 /* T5 */ 10133 .nchan = NCHAN, 10134 .pm_stats_cnt = PM_NSTATS, 10135 .cng_ch_bits_log = 2, 10136 .nsched_cls = 16, 10137 .cim_num_ibq = CIM_NUM_IBQ, 10138 .cim_num_obq = CIM_NUM_OBQ_T5, 10139 .filter_opt_len = T5_FILTER_OPT_LEN, 10140 .filter_num_opt = S_FT_LAST + 1, 10141 .mps_rplc_size = 128, 10142 .vfcount = 128, 10143 .sge_fl_db = F_DBPRIO | F_DBTYPE, 10144 .sge_ctxt_size = SGE_CTXT_SIZE, 10145 .mps_tcam_size = NUM_MPS_T5_CLS_SRAM_L_INSTANCES, 10146 .rss_nentries = RSS_NENTRIES, 10147 .cim_la_size = CIMLA_SIZE, 10148 }, 10149 { 10150 /* T6 */ 10151 .nchan = T6_NCHAN, 10152 .pm_stats_cnt = T6_PM_NSTATS, 10153 .cng_ch_bits_log = 3, 10154 .nsched_cls = 16, 10155 .cim_num_ibq = CIM_NUM_IBQ, 10156 .cim_num_obq = CIM_NUM_OBQ_T5, 10157 .filter_opt_len = T5_FILTER_OPT_LEN, 10158 .filter_num_opt = S_FT_LAST + 1, 10159 .mps_rplc_size = 256, 10160 .vfcount = 256, 10161 .sge_fl_db = 0, 10162 .sge_ctxt_size = SGE_CTXT_SIZE, 10163 .mps_tcam_size = NUM_MPS_T5_CLS_SRAM_L_INSTANCES, 10164 .rss_nentries = T6_RSS_NENTRIES, 10165 .cim_la_size = CIMLA_SIZE_T6, 10166 }, 10167 { 10168 /* T7 */ 10169 .nchan = NCHAN, 10170 .pm_stats_cnt = T6_PM_NSTATS, 10171 .cng_ch_bits_log = 2, 10172 .nsched_cls = 16, 10173 .cim_num_ibq = CIM_NUM_IBQ_T7, 10174 .cim_num_obq = CIM_NUM_OBQ_T7, 10175 .filter_opt_len = T7_FILTER_OPT_LEN, 10176 .filter_num_opt = S_T7_FT_LAST + 1, 10177 .mps_rplc_size = 256, 10178 .vfcount = 256, 10179 .sge_fl_db = 0, 10180 .sge_ctxt_size = SGE_CTXT_SIZE_T7, 10181 .mps_tcam_size = NUM_MPS_T5_CLS_SRAM_L_INSTANCES, 10182 .rss_nentries = T7_RSS_NENTRIES, 10183 .cim_la_size = CIMLA_SIZE_T6, 10184 }, 10185 }; 10186 10187 chipid -= CHELSIO_T4; 10188 if (chipid < 0 || chipid >= ARRAY_SIZE(chip_params)) 10189 return NULL; 10190 10191 return &chip_params[chipid]; 10192 } 10193 10194 /** 10195 * t4_prep_adapter - prepare SW and HW for operation 10196 * @adapter: the adapter 10197 * @buf: temporary space of at least VPD_LEN size provided by the caller. 10198 * 10199 * Initialize adapter SW state for the various HW modules, set initial 10200 * values for some adapter tunables, take PHYs out of reset, and 10201 * initialize the MDIO interface. 10202 */ 10203 int t4_prep_adapter(struct adapter *adapter, u32 *buf) 10204 { 10205 int ret; 10206 uint16_t device_id; 10207 uint32_t pl_rev; 10208 10209 get_pci_mode(adapter, &adapter->params.pci); 10210 10211 pl_rev = t4_read_reg(adapter, A_PL_REV); 10212 adapter->params.chipid = G_CHIPID(pl_rev); 10213 adapter->params.rev = G_REV(pl_rev); 10214 if (adapter->params.chipid == 0) { 10215 /* T4 did not have chipid in PL_REV (T5 onwards do) */ 10216 adapter->params.chipid = CHELSIO_T4; 10217 10218 /* T4A1 chip is not supported */ 10219 if (adapter->params.rev == 1) { 10220 CH_ALERT(adapter, "T4 rev 1 chip is not supported.\n"); 10221 return -EINVAL; 10222 } 10223 } 10224 10225 adapter->chip_params = t4_get_chip_params(chip_id(adapter)); 10226 if (adapter->chip_params == NULL) 10227 return -EINVAL; 10228 10229 adapter->params.pci.vpd_cap_addr = 10230 t4_os_find_pci_capability(adapter, PCI_CAP_ID_VPD); 10231 10232 ret = t4_get_flash_params(adapter); 10233 if (ret < 0) 10234 return ret; 10235 10236 /* Cards with real ASICs have the chipid in the PCIe device id */ 10237 t4_os_pci_read_cfg2(adapter, PCI_DEVICE_ID, &device_id); 10238 if (device_id >> 12 == chip_id(adapter)) 10239 adapter->params.cim_la_size = adapter->chip_params->cim_la_size; 10240 else { 10241 /* FPGA */ 10242 adapter->params.fpga = 1; 10243 adapter->params.cim_la_size = 2 * adapter->chip_params->cim_la_size; 10244 } 10245 10246 ret = get_vpd_params(adapter, &adapter->params.vpd, device_id, buf); 10247 if (ret < 0) 10248 return ret; 10249 10250 init_cong_ctrl(adapter->params.a_wnd, adapter->params.b_wnd); 10251 10252 /* 10253 * Default port and clock for debugging in case we can't reach FW. 10254 */ 10255 adapter->params.nports = 1; 10256 adapter->params.portvec = 1; 10257 adapter->params.vpd.cclk = 50000; 10258 10259 /* Set pci completion timeout value to 4 seconds. */ 10260 set_pcie_completion_timeout(adapter, 0xd); 10261 return 0; 10262 } 10263 10264 /** 10265 * t4_shutdown_adapter - shut down adapter, host & wire 10266 * @adapter: the adapter 10267 * 10268 * Perform an emergency shutdown of the adapter and stop it from 10269 * continuing any further communication on the ports or DMA to the 10270 * host. This is typically used when the adapter and/or firmware 10271 * have crashed and we want to prevent any further accidental 10272 * communication with the rest of the world. This will also force 10273 * the port Link Status to go down -- if register writes work -- 10274 * which should help our peers figure out that we're down. 10275 */ 10276 int t4_shutdown_adapter(struct adapter *adapter) 10277 { 10278 int port; 10279 const bool bt = adapter->bt_map != 0; 10280 10281 t4_intr_disable(adapter); 10282 if (bt) 10283 t4_write_reg(adapter, A_DBG_GPIO_EN, 0xffff0000); 10284 for_each_port(adapter, port) { 10285 u32 a_port_cfg = is_t4(adapter) ? 10286 t4_port_reg(adapter, port, A_XGMAC_PORT_CFG) : 10287 t4_port_reg(adapter, port, A_MAC_PORT_CFG); 10288 10289 t4_write_reg(adapter, a_port_cfg, 10290 t4_read_reg(adapter, a_port_cfg) 10291 & ~V_SIGNAL_DET(1)); 10292 if (!bt) { 10293 u32 hss_cfg0 = is_t4(adapter) ? 10294 t4_port_reg(adapter, port, A_XGMAC_PORT_HSS_CFG0) : 10295 t4_port_reg(adapter, port, A_MAC_PORT_HSS_CFG0); 10296 t4_set_reg_field(adapter, hss_cfg0, F_HSSPDWNPLLB | 10297 F_HSSPDWNPLLA | F_HSSPLLBYPB | F_HSSPLLBYPA, 10298 F_HSSPDWNPLLB | F_HSSPDWNPLLA | F_HSSPLLBYPB | 10299 F_HSSPLLBYPA); 10300 } 10301 } 10302 t4_set_reg_field(adapter, A_SGE_CONTROL, F_GLOBALENABLE, 0); 10303 10304 return 0; 10305 } 10306 10307 /** 10308 * t4_bar2_sge_qregs - return BAR2 SGE Queue register information 10309 * @adapter: the adapter 10310 * @qid: the Queue ID 10311 * @qtype: the Ingress or Egress type for @qid 10312 * @user: true if this request is for a user mode queue 10313 * @pbar2_qoffset: BAR2 Queue Offset 10314 * @pbar2_qid: BAR2 Queue ID or 0 for Queue ID inferred SGE Queues 10315 * 10316 * Returns the BAR2 SGE Queue Registers information associated with the 10317 * indicated Absolute Queue ID. These are passed back in return value 10318 * pointers. @qtype should be T4_BAR2_QTYPE_EGRESS for Egress Queue 10319 * and T4_BAR2_QTYPE_INGRESS for Ingress Queues. 10320 * 10321 * This may return an error which indicates that BAR2 SGE Queue 10322 * registers aren't available. If an error is not returned, then the 10323 * following values are returned: 10324 * 10325 * *@pbar2_qoffset: the BAR2 Offset of the @qid Registers 10326 * *@pbar2_qid: the BAR2 SGE Queue ID or 0 of @qid 10327 * 10328 * If the returned BAR2 Queue ID is 0, then BAR2 SGE registers which 10329 * require the "Inferred Queue ID" ability may be used. E.g. the 10330 * Write Combining Doorbell Buffer. If the BAR2 Queue ID is not 0, 10331 * then these "Inferred Queue ID" register may not be used. 10332 */ 10333 int t4_bar2_sge_qregs(struct adapter *adapter, 10334 unsigned int qid, 10335 enum t4_bar2_qtype qtype, 10336 int user, 10337 u64 *pbar2_qoffset, 10338 unsigned int *pbar2_qid) 10339 { 10340 unsigned int page_shift, page_size, qpp_shift, qpp_mask; 10341 u64 bar2_page_offset, bar2_qoffset; 10342 unsigned int bar2_qid, bar2_qid_offset, bar2_qinferred; 10343 10344 /* T4 doesn't support BAR2 SGE Queue registers for kernel 10345 * mode queues. 10346 */ 10347 if (!user && is_t4(adapter)) 10348 return -EINVAL; 10349 10350 /* Get our SGE Page Size parameters. 10351 */ 10352 page_shift = adapter->params.sge.page_shift; 10353 page_size = 1 << page_shift; 10354 10355 /* Get the right Queues per Page parameters for our Queue. 10356 */ 10357 qpp_shift = (qtype == T4_BAR2_QTYPE_EGRESS 10358 ? adapter->params.sge.eq_s_qpp 10359 : adapter->params.sge.iq_s_qpp); 10360 qpp_mask = (1 << qpp_shift) - 1; 10361 10362 /* Calculate the basics of the BAR2 SGE Queue register area: 10363 * o The BAR2 page the Queue registers will be in. 10364 * o The BAR2 Queue ID. 10365 * o The BAR2 Queue ID Offset into the BAR2 page. 10366 */ 10367 bar2_page_offset = ((u64)(qid >> qpp_shift) << page_shift); 10368 bar2_qid = qid & qpp_mask; 10369 bar2_qid_offset = bar2_qid * SGE_UDB_SIZE; 10370 10371 /* If the BAR2 Queue ID Offset is less than the Page Size, then the 10372 * hardware will infer the Absolute Queue ID simply from the writes to 10373 * the BAR2 Queue ID Offset within the BAR2 Page (and we need to use a 10374 * BAR2 Queue ID of 0 for those writes). Otherwise, we'll simply 10375 * write to the first BAR2 SGE Queue Area within the BAR2 Page with 10376 * the BAR2 Queue ID and the hardware will infer the Absolute Queue ID 10377 * from the BAR2 Page and BAR2 Queue ID. 10378 * 10379 * One important censequence of this is that some BAR2 SGE registers 10380 * have a "Queue ID" field and we can write the BAR2 SGE Queue ID 10381 * there. But other registers synthesize the SGE Queue ID purely 10382 * from the writes to the registers -- the Write Combined Doorbell 10383 * Buffer is a good example. These BAR2 SGE Registers are only 10384 * available for those BAR2 SGE Register areas where the SGE Absolute 10385 * Queue ID can be inferred from simple writes. 10386 */ 10387 bar2_qoffset = bar2_page_offset; 10388 bar2_qinferred = (bar2_qid_offset < page_size); 10389 if (bar2_qinferred) { 10390 bar2_qoffset += bar2_qid_offset; 10391 bar2_qid = 0; 10392 } 10393 10394 *pbar2_qoffset = bar2_qoffset; 10395 *pbar2_qid = bar2_qid; 10396 return 0; 10397 } 10398 10399 /** 10400 * t4_init_devlog_ncores_params - initialize adap->params.devlog and ncores 10401 * @adap: the adapter 10402 * @fw_attach: whether we can talk to the firmware 10403 */ 10404 int t4_init_devlog_ncores_params(struct adapter *adap, int fw_attach) 10405 { 10406 struct devlog_params *dparams = &adap->params.devlog; 10407 u32 pf_dparams; 10408 unsigned int devlog_meminfo; 10409 struct fw_devlog_cmd devlog_cmd; 10410 int ret; 10411 10412 /* If we're dealing with newer firmware, the Device Log Paramerters 10413 * are stored in a designated register which allows us to access the 10414 * Device Log even if we can't talk to the firmware. 10415 */ 10416 pf_dparams = 10417 t4_read_reg(adap, PCIE_FW_REG(A_PCIE_FW_PF, PCIE_FW_PF_DEVLOG)); 10418 if (pf_dparams && pf_dparams != UINT32_MAX) { 10419 unsigned int nentries, nentries128, ncore_shift; 10420 10421 ncore_shift = (G_PCIE_FW_PF_DEVLOG_COUNT_MSB(pf_dparams) << 1) | 10422 G_PCIE_FW_PF_DEVLOG_COUNT_LSB(pf_dparams); 10423 adap->params.ncores = 1 << ncore_shift; 10424 10425 dparams->memtype = G_PCIE_FW_PF_DEVLOG_MEMTYPE(pf_dparams); 10426 dparams->start = G_PCIE_FW_PF_DEVLOG_ADDR16(pf_dparams) << 4; 10427 nentries128 = G_PCIE_FW_PF_DEVLOG_NENTRIES128(pf_dparams); 10428 nentries = (nentries128 + 1) * 128; 10429 dparams->size = nentries * sizeof(struct fw_devlog_e); 10430 10431 return 0; 10432 } 10433 10434 /* 10435 * For any failing returns ... 10436 */ 10437 adap->params.ncores = 1; 10438 memset(dparams, 0, sizeof *dparams); 10439 10440 /* 10441 * If we can't talk to the firmware, there's really nothing we can do 10442 * at this point. 10443 */ 10444 if (!fw_attach) 10445 return -ENXIO; 10446 10447 /* Otherwise, ask the firmware for it's Device Log Parameters. 10448 */ 10449 memset(&devlog_cmd, 0, sizeof devlog_cmd); 10450 devlog_cmd.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_DEVLOG_CMD) | 10451 F_FW_CMD_REQUEST | F_FW_CMD_READ); 10452 devlog_cmd.retval_len16 = cpu_to_be32(FW_LEN16(devlog_cmd)); 10453 ret = t4_wr_mbox(adap, adap->mbox, &devlog_cmd, sizeof(devlog_cmd), 10454 &devlog_cmd); 10455 if (ret) 10456 return ret; 10457 10458 devlog_meminfo = 10459 be32_to_cpu(devlog_cmd.memtype_devlog_memaddr16_devlog); 10460 dparams->memtype = G_FW_DEVLOG_CMD_MEMTYPE_DEVLOG(devlog_meminfo); 10461 dparams->start = G_FW_DEVLOG_CMD_MEMADDR16_DEVLOG(devlog_meminfo) << 4; 10462 dparams->size = be32_to_cpu(devlog_cmd.memsize_devlog); 10463 10464 return 0; 10465 } 10466 10467 /** 10468 * t4_init_sge_params - initialize adap->params.sge 10469 * @adapter: the adapter 10470 * 10471 * Initialize various fields of the adapter's SGE Parameters structure. 10472 */ 10473 int t4_init_sge_params(struct adapter *adapter) 10474 { 10475 u32 r; 10476 struct sge_params *sp = &adapter->params.sge; 10477 unsigned i, tscale = 1; 10478 10479 r = t4_read_reg(adapter, A_SGE_INGRESS_RX_THRESHOLD); 10480 sp->counter_val[0] = G_THRESHOLD_0(r); 10481 sp->counter_val[1] = G_THRESHOLD_1(r); 10482 sp->counter_val[2] = G_THRESHOLD_2(r); 10483 sp->counter_val[3] = G_THRESHOLD_3(r); 10484 10485 if (chip_id(adapter) >= CHELSIO_T6) { 10486 r = t4_read_reg(adapter, A_SGE_ITP_CONTROL); 10487 tscale = G_TSCALE(r); 10488 if (tscale == 0) 10489 tscale = 1; 10490 else 10491 tscale += 2; 10492 } 10493 10494 r = t4_read_reg(adapter, A_SGE_TIMER_VALUE_0_AND_1); 10495 sp->timer_val[0] = core_ticks_to_us(adapter, G_TIMERVALUE0(r)) * tscale; 10496 sp->timer_val[1] = core_ticks_to_us(adapter, G_TIMERVALUE1(r)) * tscale; 10497 r = t4_read_reg(adapter, A_SGE_TIMER_VALUE_2_AND_3); 10498 sp->timer_val[2] = core_ticks_to_us(adapter, G_TIMERVALUE2(r)) * tscale; 10499 sp->timer_val[3] = core_ticks_to_us(adapter, G_TIMERVALUE3(r)) * tscale; 10500 r = t4_read_reg(adapter, A_SGE_TIMER_VALUE_4_AND_5); 10501 sp->timer_val[4] = core_ticks_to_us(adapter, G_TIMERVALUE4(r)) * tscale; 10502 sp->timer_val[5] = core_ticks_to_us(adapter, G_TIMERVALUE5(r)) * tscale; 10503 10504 r = t4_read_reg(adapter, A_SGE_CONM_CTRL); 10505 sp->fl_starve_threshold = G_EGRTHRESHOLD(r) * 2 + 1; 10506 if (is_t4(adapter)) 10507 sp->fl_starve_threshold2 = sp->fl_starve_threshold; 10508 else if (is_t5(adapter)) 10509 sp->fl_starve_threshold2 = G_EGRTHRESHOLDPACKING(r) * 2 + 1; 10510 else 10511 sp->fl_starve_threshold2 = G_T6_EGRTHRESHOLDPACKING(r) * 2 + 1; 10512 10513 /* egress queues: log2 of # of doorbells per BAR2 page */ 10514 r = t4_read_reg(adapter, A_SGE_EGRESS_QUEUES_PER_PAGE_PF); 10515 r >>= S_QUEUESPERPAGEPF0 + 10516 (S_QUEUESPERPAGEPF1 - S_QUEUESPERPAGEPF0) * adapter->pf; 10517 sp->eq_s_qpp = r & M_QUEUESPERPAGEPF0; 10518 10519 /* ingress queues: log2 of # of doorbells per BAR2 page */ 10520 r = t4_read_reg(adapter, A_SGE_INGRESS_QUEUES_PER_PAGE_PF); 10521 r >>= S_QUEUESPERPAGEPF0 + 10522 (S_QUEUESPERPAGEPF1 - S_QUEUESPERPAGEPF0) * adapter->pf; 10523 sp->iq_s_qpp = r & M_QUEUESPERPAGEPF0; 10524 10525 r = t4_read_reg(adapter, A_SGE_HOST_PAGE_SIZE); 10526 r >>= S_HOSTPAGESIZEPF0 + 10527 (S_HOSTPAGESIZEPF1 - S_HOSTPAGESIZEPF0) * adapter->pf; 10528 sp->page_shift = (r & M_HOSTPAGESIZEPF0) + 10; 10529 10530 r = t4_read_reg(adapter, A_SGE_CONTROL); 10531 sp->sge_control = r; 10532 sp->spg_len = r & F_EGRSTATUSPAGESIZE ? 128 : 64; 10533 sp->fl_pktshift = G_PKTSHIFT(r); 10534 if (chip_id(adapter) <= CHELSIO_T5) { 10535 sp->pad_boundary = 1 << (G_INGPADBOUNDARY(r) + 10536 X_INGPADBOUNDARY_SHIFT); 10537 } else { 10538 sp->pad_boundary = 1 << (G_INGPADBOUNDARY(r) + 10539 X_T6_INGPADBOUNDARY_SHIFT); 10540 } 10541 if (is_t4(adapter)) 10542 sp->pack_boundary = sp->pad_boundary; 10543 else { 10544 r = t4_read_reg(adapter, A_SGE_CONTROL2); 10545 if (G_INGPACKBOUNDARY(r) == 0) 10546 sp->pack_boundary = 16; 10547 else 10548 sp->pack_boundary = 1 << (G_INGPACKBOUNDARY(r) + 5); 10549 } 10550 for (i = 0; i < SGE_FLBUF_SIZES; i++) 10551 sp->sge_fl_buffer_size[i] = t4_read_reg(adapter, 10552 A_SGE_FL_BUFFER_SIZE0 + (4 * i)); 10553 10554 return 0; 10555 } 10556 10557 /* Convert the LE's hardware hash mask to a shorter filter mask. */ 10558 static inline uint16_t 10559 hashmask_to_filtermask(struct adapter *adap, uint64_t hashmask, uint16_t filter_mode) 10560 { 10561 int first, last, i; 10562 uint16_t filter_mask; 10563 uint64_t mask; /* field mask */ 10564 10565 10566 if (chip_id(adap) >= CHELSIO_T7) { 10567 first = S_T7_FT_FIRST; 10568 last = S_T7_FT_LAST; 10569 } else { 10570 first = S_FT_FIRST; 10571 last = S_FT_LAST; 10572 } 10573 10574 for (filter_mask = 0, i = first; i <= last; i++) { 10575 if ((filter_mode & (1 << i)) == 0) 10576 continue; 10577 mask = (1 << t4_filter_field_width(adap, i)) - 1; 10578 if ((hashmask & mask) == mask) 10579 filter_mask |= 1 << i; 10580 hashmask >>= t4_filter_field_width(adap, i); 10581 } 10582 10583 return (filter_mask); 10584 } 10585 10586 /* 10587 * Read and cache the adapter's compressed filter mode and ingress config. 10588 */ 10589 static void 10590 read_filter_mode_and_ingress_config(struct adapter *adap) 10591 { 10592 int rc; 10593 uint32_t v, param[2], val[2]; 10594 struct tp_params *tpp = &adap->params.tp; 10595 uint64_t hash_mask; 10596 10597 param[0] = V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | 10598 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_FILTER) | 10599 V_FW_PARAMS_PARAM_Y(FW_PARAM_DEV_FILTER_MODE_MASK); 10600 param[1] = V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | 10601 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_FILTER) | 10602 V_FW_PARAMS_PARAM_Y(FW_PARAM_DEV_FILTER_VNIC_MODE); 10603 rc = -t4_query_params(adap, adap->mbox, adap->pf, 0, 2, param, val); 10604 if (rc == 0) { 10605 tpp->filter_mode = G_FW_PARAMS_PARAM_FILTER_MODE(val[0]); 10606 tpp->filter_mask = G_FW_PARAMS_PARAM_FILTER_MASK(val[0]); 10607 tpp->vnic_mode = val[1]; 10608 } else { 10609 /* 10610 * Old firmware. Read filter mode/mask and ingress config 10611 * straight from the hardware. 10612 */ 10613 t4_tp_pio_read(adap, &v, 1, A_TP_VLAN_PRI_MAP, true); 10614 tpp->filter_mode = v & 0xffff; 10615 10616 hash_mask = 0; 10617 if (chip_id(adap) > CHELSIO_T4) { 10618 v = t4_read_reg(adap, LE_HASH_MASK_GEN_IPV4T5(3)); 10619 hash_mask = v; 10620 v = t4_read_reg(adap, LE_HASH_MASK_GEN_IPV4T5(4)); 10621 hash_mask |= (u64)v << 32; 10622 } 10623 if (chip_id(adap) >= CHELSIO_T7) { 10624 /* 10625 * This param came before T7 so T7+ firmwares should 10626 * always support this query. 10627 */ 10628 CH_WARN(adap, "query for filter mode/mask failed: %d\n", 10629 rc); 10630 } 10631 tpp->filter_mask = hashmask_to_filtermask(adap, hash_mask, 10632 tpp->filter_mode); 10633 10634 t4_tp_pio_read(adap, &v, 1, A_TP_INGRESS_CONFIG, true); 10635 if (v & F_VNIC) 10636 tpp->vnic_mode = FW_VNIC_MODE_PF_VF; 10637 else 10638 tpp->vnic_mode = FW_VNIC_MODE_OUTER_VLAN; 10639 } 10640 10641 /* 10642 * Now that we have TP_VLAN_PRI_MAP cached, we can calculate the field 10643 * shift positions of several elements of the Compressed Filter Tuple 10644 * for this adapter which we need frequently ... 10645 */ 10646 if (chip_id(adap) >= CHELSIO_T7) { 10647 tpp->ipsecidx_shift = t4_filter_field_shift(adap, F_IPSECIDX); 10648 tpp->fcoe_shift = t4_filter_field_shift(adap, F_T7_FCOE); 10649 tpp->port_shift = t4_filter_field_shift(adap, F_T7_PORT); 10650 tpp->vnic_shift = t4_filter_field_shift(adap, F_T7_VNIC_ID); 10651 tpp->vlan_shift = t4_filter_field_shift(adap, F_T7_VLAN); 10652 tpp->tos_shift = t4_filter_field_shift(adap, F_T7_TOS); 10653 tpp->protocol_shift = t4_filter_field_shift(adap, F_T7_PROTOCOL); 10654 tpp->ethertype_shift = t4_filter_field_shift(adap, F_T7_ETHERTYPE); 10655 tpp->macmatch_shift = t4_filter_field_shift(adap, F_T7_MACMATCH); 10656 tpp->matchtype_shift = t4_filter_field_shift(adap, F_T7_MPSHITTYPE); 10657 tpp->frag_shift = t4_filter_field_shift(adap, F_T7_FRAGMENTATION); 10658 tpp->roce_shift = t4_filter_field_shift(adap, F_ROCE); 10659 tpp->synonly_shift = t4_filter_field_shift(adap, F_SYNONLY); 10660 tpp->tcpflags_shift = t4_filter_field_shift(adap, F_TCPFLAGS); 10661 } else { 10662 tpp->ipsecidx_shift = -1; 10663 tpp->fcoe_shift = t4_filter_field_shift(adap, F_FCOE); 10664 tpp->port_shift = t4_filter_field_shift(adap, F_PORT); 10665 tpp->vnic_shift = t4_filter_field_shift(adap, F_VNIC_ID); 10666 tpp->vlan_shift = t4_filter_field_shift(adap, F_VLAN); 10667 tpp->tos_shift = t4_filter_field_shift(adap, F_TOS); 10668 tpp->protocol_shift = t4_filter_field_shift(adap, F_PROTOCOL); 10669 tpp->ethertype_shift = t4_filter_field_shift(adap, F_ETHERTYPE); 10670 tpp->macmatch_shift = t4_filter_field_shift(adap, F_MACMATCH); 10671 tpp->matchtype_shift = t4_filter_field_shift(adap, F_MPSHITTYPE); 10672 tpp->frag_shift = t4_filter_field_shift(adap, F_FRAGMENTATION); 10673 tpp->roce_shift = -1; 10674 tpp->synonly_shift = -1; 10675 tpp->tcpflags_shift = -1; 10676 } 10677 } 10678 10679 /** 10680 * t4_init_tp_params - initialize adap->params.tp 10681 * @adap: the adapter 10682 * 10683 * Initialize various fields of the adapter's TP Parameters structure. 10684 */ 10685 int t4_init_tp_params(struct adapter *adap) 10686 { 10687 u32 tx_len, rx_len, r, v; 10688 struct tp_params *tpp = &adap->params.tp; 10689 10690 v = t4_read_reg(adap, A_TP_TIMER_RESOLUTION); 10691 tpp->tre = G_TIMERRESOLUTION(v); 10692 tpp->dack_re = G_DELAYEDACKRESOLUTION(v); 10693 10694 read_filter_mode_and_ingress_config(adap); 10695 10696 tpp->rx_pkt_encap = false; 10697 tpp->lb_mode = 0; 10698 tpp->lb_nchan = 1; 10699 if (chip_id(adap) > CHELSIO_T5) { 10700 v = t4_read_reg(adap, A_TP_OUT_CONFIG); 10701 tpp->rx_pkt_encap = v & F_CRXPKTENC; 10702 if (chip_id(adap) >= CHELSIO_T7) { 10703 t4_tp_pio_read(adap, &v, 1, A_TP_CHANNEL_MAP, true); 10704 tpp->lb_mode = G_T7_LB_MODE(v); 10705 if (tpp->lb_mode == 1) 10706 tpp->lb_nchan = 4; 10707 else if (tpp->lb_mode == 2) 10708 tpp->lb_nchan = 2; 10709 } 10710 } 10711 10712 rx_len = t4_read_reg(adap, A_TP_PMM_RX_PAGE_SIZE); 10713 tx_len = t4_read_reg(adap, A_TP_PMM_TX_PAGE_SIZE); 10714 10715 r = t4_read_reg(adap, A_TP_PARA_REG2); 10716 rx_len = min(rx_len, G_MAXRXDATA(r)); 10717 tx_len = min(tx_len, G_MAXRXDATA(r)); 10718 10719 r = t4_read_reg(adap, A_TP_PARA_REG7); 10720 v = min(G_PMMAXXFERLEN0(r), G_PMMAXXFERLEN1(r)); 10721 rx_len = min(rx_len, v); 10722 tx_len = min(tx_len, v); 10723 10724 tpp->max_tx_pdu = tx_len; 10725 tpp->max_rx_pdu = rx_len; 10726 10727 return 0; 10728 } 10729 10730 /** 10731 * t4_filter_field_width - returns the width of a filter field 10732 * @adap: the adapter 10733 * @filter_field: the filter field whose width is being requested 10734 * 10735 * Return the shift position of a filter field within the Compressed 10736 * Filter Tuple. The filter field is specified via its selection bit 10737 * within TP_VLAN_PRI_MAL (filter mode). E.g. F_VLAN. 10738 */ 10739 int t4_filter_field_width(const struct adapter *adap, int filter_field) 10740 { 10741 const int nopt = adap->chip_params->filter_num_opt; 10742 static const uint8_t width_t7[] = { 10743 W_FT_IPSECIDX, 10744 W_FT_FCOE, 10745 W_FT_PORT, 10746 W_FT_VNIC_ID, 10747 W_FT_VLAN, 10748 W_FT_TOS, 10749 W_FT_PROTOCOL, 10750 W_FT_ETHERTYPE, 10751 W_FT_MACMATCH, 10752 W_FT_MPSHITTYPE, 10753 W_FT_FRAGMENTATION, 10754 W_FT_ROCE, 10755 W_FT_SYNONLY, 10756 W_FT_TCPFLAGS 10757 }; 10758 static const uint8_t width_t4[] = { 10759 W_FT_FCOE, 10760 W_FT_PORT, 10761 W_FT_VNIC_ID, 10762 W_FT_VLAN, 10763 W_FT_TOS, 10764 W_FT_PROTOCOL, 10765 W_FT_ETHERTYPE, 10766 W_FT_MACMATCH, 10767 W_FT_MPSHITTYPE, 10768 W_FT_FRAGMENTATION 10769 }; 10770 const uint8_t *width = chip_id(adap) >= CHELSIO_T7 ? width_t7 : width_t4; 10771 10772 if (filter_field < 0 || filter_field >= nopt) 10773 return (0); 10774 return (width[filter_field]); 10775 } 10776 10777 /** 10778 * t4_filter_field_shift - calculate filter field shift 10779 * @adap: the adapter 10780 * @filter_sel: the desired field (from TP_VLAN_PRI_MAP bits) 10781 * 10782 * Return the shift position of a filter field within the Compressed 10783 * Filter Tuple. The filter field is specified via its selection bit 10784 * within TP_VLAN_PRI_MAL (filter mode). E.g. F_VLAN. 10785 */ 10786 int t4_filter_field_shift(const struct adapter *adap, int filter_sel) 10787 { 10788 const unsigned int filter_mode = adap->params.tp.filter_mode; 10789 unsigned int sel; 10790 int field_shift; 10791 10792 if ((filter_mode & filter_sel) == 0) 10793 return -1; 10794 10795 if (chip_id(adap) >= CHELSIO_T7) { 10796 for (sel = 1, field_shift = 0; sel < filter_sel; sel <<= 1) { 10797 switch (filter_mode & sel) { 10798 case F_IPSECIDX: 10799 field_shift += W_FT_IPSECIDX; 10800 break; 10801 case F_T7_FCOE: 10802 field_shift += W_FT_FCOE; 10803 break; 10804 case F_T7_PORT: 10805 field_shift += W_FT_PORT; 10806 break; 10807 case F_T7_VNIC_ID: 10808 field_shift += W_FT_VNIC_ID; 10809 break; 10810 case F_T7_VLAN: 10811 field_shift += W_FT_VLAN; 10812 break; 10813 case F_T7_TOS: 10814 field_shift += W_FT_TOS; 10815 break; 10816 case F_T7_PROTOCOL: 10817 field_shift += W_FT_PROTOCOL; 10818 break; 10819 case F_T7_ETHERTYPE: 10820 field_shift += W_FT_ETHERTYPE; 10821 break; 10822 case F_T7_MACMATCH: 10823 field_shift += W_FT_MACMATCH; 10824 break; 10825 case F_T7_MPSHITTYPE: 10826 field_shift += W_FT_MPSHITTYPE; 10827 break; 10828 case F_T7_FRAGMENTATION: 10829 field_shift += W_FT_FRAGMENTATION; 10830 break; 10831 case F_ROCE: 10832 field_shift += W_FT_ROCE; 10833 break; 10834 case F_SYNONLY: 10835 field_shift += W_FT_SYNONLY; 10836 break; 10837 case F_TCPFLAGS: 10838 field_shift += W_FT_TCPFLAGS; 10839 break; 10840 } 10841 } 10842 return field_shift; 10843 } 10844 10845 for (sel = 1, field_shift = 0; sel < filter_sel; sel <<= 1) { 10846 switch (filter_mode & sel) { 10847 case F_FCOE: 10848 field_shift += W_FT_FCOE; 10849 break; 10850 case F_PORT: 10851 field_shift += W_FT_PORT; 10852 break; 10853 case F_VNIC_ID: 10854 field_shift += W_FT_VNIC_ID; 10855 break; 10856 case F_VLAN: 10857 field_shift += W_FT_VLAN; 10858 break; 10859 case F_TOS: 10860 field_shift += W_FT_TOS; 10861 break; 10862 case F_PROTOCOL: 10863 field_shift += W_FT_PROTOCOL; 10864 break; 10865 case F_ETHERTYPE: 10866 field_shift += W_FT_ETHERTYPE; 10867 break; 10868 case F_MACMATCH: 10869 field_shift += W_FT_MACMATCH; 10870 break; 10871 case F_MPSHITTYPE: 10872 field_shift += W_FT_MPSHITTYPE; 10873 break; 10874 case F_FRAGMENTATION: 10875 field_shift += W_FT_FRAGMENTATION; 10876 break; 10877 } 10878 } 10879 return field_shift; 10880 } 10881 10882 int t4_port_init(struct adapter *adap, int mbox, int pf, int vf, int port_id) 10883 { 10884 u8 addr[6]; 10885 int ret, i, j; 10886 struct port_info *p = adap2pinfo(adap, port_id); 10887 u32 param, val; 10888 struct vi_info *vi = &p->vi[0]; 10889 10890 for (i = 0, j = -1; i <= p->port_id; i++) { 10891 do { 10892 j++; 10893 } while ((adap->params.portvec & (1 << j)) == 0); 10894 } 10895 10896 p->hw_port = j; 10897 p->tx_chan = t4_get_tx_c_chan(adap, j); 10898 p->rx_chan = t4_get_rx_c_chan(adap, j); 10899 p->mps_bg_map = t4_get_mps_bg_map(adap, j); 10900 p->rx_e_chan_map = t4_get_rx_e_chan_map(adap, j); 10901 10902 if (!(adap->flags & IS_VF) || 10903 adap->params.vfres.r_caps & FW_CMD_CAP_PORT) { 10904 t4_update_port_info(p); 10905 } 10906 10907 ret = t4_alloc_vi(adap, mbox, j, pf, vf, 1, addr, &vi->rss_size, 10908 &vi->vfvld, &vi->vin); 10909 if (ret < 0) 10910 return ret; 10911 10912 vi->viid = ret; 10913 t4_os_set_hw_addr(p, addr); 10914 10915 param = V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | 10916 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_RSSINFO) | 10917 V_FW_PARAMS_PARAM_YZ(vi->viid); 10918 ret = t4_query_params(adap, mbox, pf, vf, 1, ¶m, &val); 10919 if (ret) 10920 vi->rss_base = 0xffff; 10921 else { 10922 /* MPASS((val >> 16) == rss_size); */ 10923 vi->rss_base = val & 0xffff; 10924 } 10925 10926 return 0; 10927 } 10928 10929 static void t4_read_cimq_cfg_ibq_core(struct adapter *adap, u8 coreid, u32 qid, 10930 u16 *base, u16 *size, u16 *thres) 10931 { 10932 unsigned int v, m; 10933 10934 if (chip_id(adap) > CHELSIO_T6) { 10935 v = F_T7_IBQSELECT | V_T7_QUENUMSELECT(qid) | 10936 V_CORESELECT(coreid); 10937 /* value is in 512-byte units */ 10938 m = 512; 10939 } else { 10940 v = F_IBQSELECT | V_QUENUMSELECT(qid); 10941 /* value is in 256-byte units */ 10942 m = 256; 10943 } 10944 10945 t4_write_reg(adap, A_CIM_QUEUE_CONFIG_REF, v); 10946 v = t4_read_reg(adap, A_CIM_QUEUE_CONFIG_CTRL); 10947 if (base) 10948 *base = G_CIMQBASE(v) * m; 10949 if (size) 10950 *size = G_CIMQSIZE(v) * m; 10951 if (thres) 10952 *thres = G_QUEFULLTHRSH(v) * 8; /* 8-byte unit */ 10953 } 10954 10955 static void t4_read_cimq_cfg_obq_core(struct adapter *adap, u8 coreid, u32 qid, 10956 u16 *base, u16 *size) 10957 { 10958 unsigned int v, m; 10959 10960 if (chip_id(adap) > CHELSIO_T6) { 10961 v = F_T7_OBQSELECT | V_T7_QUENUMSELECT(qid) | 10962 V_CORESELECT(coreid); 10963 /* value is in 512-byte units */ 10964 m = 512; 10965 } else { 10966 v = F_OBQSELECT | V_QUENUMSELECT(qid); 10967 /* value is in 256-byte units */ 10968 m = 256; 10969 } 10970 10971 t4_write_reg(adap, A_CIM_QUEUE_CONFIG_REF, v); 10972 v = t4_read_reg(adap, A_CIM_QUEUE_CONFIG_CTRL); 10973 if (base) 10974 *base = G_CIMQBASE(v) * m; 10975 if (size) 10976 *size = G_CIMQSIZE(v) * m; 10977 } 10978 10979 /** 10980 * t4_read_cimq_cfg_core - read CIM queue configuration on specific core 10981 * @adap: the adapter 10982 * @coreid: the uP coreid 10983 * @base: holds the queue base addresses in bytes 10984 * @size: holds the queue sizes in bytes 10985 * @thres: holds the queue full thresholds in bytes 10986 * 10987 * Returns the current configuration of the CIM queues, starting with 10988 * the IBQs, then the OBQs, on a specific @coreid. 10989 */ 10990 void t4_read_cimq_cfg_core(struct adapter *adap, u8 coreid, u16 *base, 10991 u16 *size, u16 *thres) 10992 { 10993 unsigned int cim_num_ibq = adap->chip_params->cim_num_ibq; 10994 unsigned int cim_num_obq = adap->chip_params->cim_num_obq; 10995 unsigned int i; 10996 10997 for (i = 0; i < cim_num_ibq; i++, base++, size++, thres++) 10998 t4_read_cimq_cfg_ibq_core(adap, coreid, i, base, size, thres); 10999 11000 for (i = 0; i < cim_num_obq; i++, base++, size++) 11001 t4_read_cimq_cfg_obq_core(adap, coreid, i, base, size); 11002 } 11003 11004 static int t4_read_cim_ibq_data_core(struct adapter *adap, u8 coreid, u32 addr, 11005 u32 *data) 11006 { 11007 int ret, attempts; 11008 unsigned int v; 11009 11010 /* It might take 3-10ms before the IBQ debug read access is allowed. 11011 * Wait for 1 Sec with a delay of 1 usec. 11012 */ 11013 attempts = 1000000; 11014 11015 if (chip_id(adap) > CHELSIO_T6) 11016 v = V_T7_IBQDBGADDR(addr) | V_IBQDBGCORE(coreid); 11017 else 11018 v = V_IBQDBGADDR(addr); 11019 11020 t4_write_reg(adap, A_CIM_IBQ_DBG_CFG, v | F_IBQDBGEN); 11021 ret = t4_wait_op_done(adap, A_CIM_IBQ_DBG_CFG, F_IBQDBGBUSY, 0, 11022 attempts, 1); 11023 if (ret) 11024 return ret; 11025 11026 *data = t4_read_reg(adap, A_CIM_IBQ_DBG_DATA); 11027 return 0; 11028 } 11029 11030 /** 11031 * t4_read_cim_ibq_core - read the contents of a CIM inbound queue on 11032 * specific core 11033 * @adap: the adapter 11034 * @coreid: the uP coreid 11035 * @qid: the queue index 11036 * @data: where to store the queue contents 11037 * @n: capacity of @data in 32-bit words 11038 * 11039 * Reads the contents of the selected CIM queue starting at address 0 up 11040 * to the capacity of @data on a specific @coreid. @n must be a multiple 11041 * of 4. Returns < 0 on error and the number of 32-bit words actually 11042 * read on success. 11043 */ 11044 int t4_read_cim_ibq_core(struct adapter *adap, u8 coreid, u32 qid, u32 *data, 11045 size_t n) 11046 { 11047 unsigned int cim_num_ibq = adap->chip_params->cim_num_ibq; 11048 u16 i, addr, nwords; 11049 int ret; 11050 11051 if (qid > (cim_num_ibq - 1) || (n & 3)) 11052 return -EINVAL; 11053 11054 t4_read_cimq_cfg_ibq_core(adap, coreid, qid, &addr, &nwords, NULL); 11055 addr >>= sizeof(u16); 11056 nwords >>= sizeof(u16); 11057 if (n > nwords) 11058 n = nwords; 11059 11060 for (i = 0; i < n; i++, addr++, data++) { 11061 ret = t4_read_cim_ibq_data_core(adap, coreid, addr, data); 11062 if (ret < 0) 11063 return ret; 11064 } 11065 11066 t4_write_reg(adap, A_CIM_IBQ_DBG_CFG, 0); 11067 return i; 11068 } 11069 11070 static int t4_read_cim_obq_data_core(struct adapter *adap, u8 coreid, u32 addr, 11071 u32 *data) 11072 { 11073 unsigned int v; 11074 int ret; 11075 11076 if (chip_id(adap) > CHELSIO_T6) 11077 v = V_T7_OBQDBGADDR(addr) | V_OBQDBGCORE(coreid); 11078 else 11079 v = V_OBQDBGADDR(addr); 11080 11081 t4_write_reg(adap, A_CIM_OBQ_DBG_CFG, v | F_OBQDBGEN); 11082 ret = t4_wait_op_done(adap, A_CIM_OBQ_DBG_CFG, F_OBQDBGBUSY, 0, 2, 1); 11083 if (ret) 11084 return ret; 11085 11086 *data = t4_read_reg(adap, A_CIM_OBQ_DBG_DATA); 11087 return 0; 11088 } 11089 11090 /** 11091 * t4_read_cim_obq_core - read the contents of a CIM outbound queue on 11092 * specific core 11093 * @adap: the adapter 11094 * @coreid: the uP coreid 11095 * @qid: the queue index 11096 * @data: where to store the queue contents 11097 * @n: capacity of @data in 32-bit words 11098 * 11099 * Reads the contents of the selected CIM queue starting at address 0 up 11100 * to the capacity of @data on specific @coreid. @n must be a multiple 11101 * of 4. Returns < 0 on error and the number of 32-bit words actually 11102 * read on success. 11103 */ 11104 int t4_read_cim_obq_core(struct adapter *adap, u8 coreid, u32 qid, u32 *data, 11105 size_t n) 11106 { 11107 unsigned int cim_num_obq = adap->chip_params->cim_num_obq; 11108 u16 i, addr, nwords; 11109 int ret; 11110 11111 if ((qid > (cim_num_obq - 1)) || (n & 3)) 11112 return -EINVAL; 11113 11114 t4_read_cimq_cfg_obq_core(adap, coreid, qid, &addr, &nwords); 11115 addr >>= sizeof(u16); 11116 nwords >>= sizeof(u16); 11117 if (n > nwords) 11118 n = nwords; 11119 11120 for (i = 0; i < n; i++, addr++, data++) { 11121 ret = t4_read_cim_obq_data_core(adap, coreid, addr, data); 11122 if (ret < 0) 11123 return ret; 11124 } 11125 11126 t4_write_reg(adap, A_CIM_OBQ_DBG_CFG, 0); 11127 return i; 11128 } 11129 11130 /** 11131 * t4_cim_read_core - read a block from CIM internal address space 11132 * of a control register group on specific core. 11133 * @adap: the adapter 11134 * @group: the control register group to select for read 11135 * @coreid: the uP coreid 11136 * @addr: the start address within the CIM address space 11137 * @n: number of words to read 11138 * @valp: where to store the result 11139 * 11140 * Reads a block of 4-byte words from the CIM intenal address space 11141 * of a control register @group on a specific @coreid. 11142 */ 11143 int t4_cim_read_core(struct adapter *adap, u8 group, u8 coreid, 11144 unsigned int addr, unsigned int n, 11145 unsigned int *valp) 11146 { 11147 unsigned int hostbusy, v = 0; 11148 int ret = 0; 11149 11150 if (chip_id(adap) > CHELSIO_T6) { 11151 hostbusy = F_T7_HOSTBUSY; 11152 v = V_HOSTGRPSEL(group) | V_HOSTCORESEL(coreid); 11153 } else { 11154 hostbusy = F_HOSTBUSY; 11155 } 11156 11157 if (t4_read_reg(adap, A_CIM_HOST_ACC_CTRL) & hostbusy) 11158 return -EBUSY; 11159 11160 for ( ; !ret && n--; addr += 4) { 11161 t4_write_reg(adap, A_CIM_HOST_ACC_CTRL, addr | v); 11162 ret = t4_wait_op_done(adap, A_CIM_HOST_ACC_CTRL, hostbusy, 11163 0, 5, 2); 11164 if (!ret) 11165 *valp++ = t4_read_reg(adap, A_CIM_HOST_ACC_DATA); 11166 } 11167 11168 return ret; 11169 } 11170 11171 /** 11172 * t4_cim_write_core - write a block into CIM internal address space 11173 * of a control register group on specific core. 11174 * @adap: the adapter 11175 * @group: the control register group to select for write 11176 * @coreid: the uP coreid 11177 * @addr: the start address within the CIM address space 11178 * @n: number of words to write 11179 * @valp: set of values to write 11180 * 11181 * Writes a block of 4-byte words into the CIM intenal address space 11182 * of a control register @group on a specific @coreid. 11183 */ 11184 int t4_cim_write_core(struct adapter *adap, u8 group, u8 coreid, 11185 unsigned int addr, unsigned int n, 11186 const unsigned int *valp) 11187 { 11188 unsigned int hostbusy, v; 11189 int ret = 0; 11190 11191 if (chip_id(adap) > CHELSIO_T6) { 11192 hostbusy = F_T7_HOSTBUSY; 11193 v = F_T7_HOSTWRITE | V_HOSTGRPSEL(group) | 11194 V_HOSTCORESEL(coreid); 11195 } else { 11196 hostbusy = F_HOSTBUSY; 11197 v = F_HOSTWRITE; 11198 } 11199 11200 if (t4_read_reg(adap, A_CIM_HOST_ACC_CTRL) & hostbusy) 11201 return -EBUSY; 11202 11203 for ( ; !ret && n--; addr += 4) { 11204 t4_write_reg(adap, A_CIM_HOST_ACC_DATA, *valp++); 11205 t4_write_reg(adap, A_CIM_HOST_ACC_CTRL, addr | v); 11206 ret = t4_wait_op_done(adap, A_CIM_HOST_ACC_CTRL, hostbusy, 11207 0, 5, 2); 11208 } 11209 11210 return ret; 11211 } 11212 11213 /** 11214 * t4_cim_read_la_core - read CIM LA capture buffer on specific core 11215 * @adap: the adapter 11216 * @coreid: uP coreid 11217 * @la_buf: where to store the LA data 11218 * @wrptr: the HW write pointer within the capture buffer 11219 * 11220 * Reads the contents of the CIM LA buffer on a specific @coreid 11221 * with the most recent entry at the end of the returned data 11222 * and with the entry at @wrptr first. We try to leave the LA 11223 * in the running state we find it in. 11224 */ 11225 int t4_cim_read_la_core(struct adapter *adap, u8 coreid, u32 *la_buf, 11226 u32 *wrptr) 11227 { 11228 unsigned int cfg, val, idx; 11229 int i, ret; 11230 11231 ret = t4_cim_read_core(adap, 1, coreid, A_UP_UP_DBG_LA_CFG, 1, &cfg); 11232 if (ret) 11233 return ret; 11234 11235 if (cfg & F_UPDBGLAEN) { /* LA is running, freeze it */ 11236 val = 0; 11237 ret = t4_cim_write_core(adap, 1, coreid, A_UP_UP_DBG_LA_CFG, 1, 11238 &val); 11239 if (ret) 11240 return ret; 11241 } 11242 11243 ret = t4_cim_read_core(adap, 1, coreid, A_UP_UP_DBG_LA_CFG, 1, &val); 11244 if (ret) 11245 goto restart; 11246 11247 idx = G_UPDBGLAWRPTR(val); 11248 if (wrptr) 11249 *wrptr = idx; 11250 11251 for (i = 0; i < adap->params.cim_la_size; i++) { 11252 val = V_UPDBGLARDPTR(idx) | F_UPDBGLARDEN; 11253 ret = t4_cim_write_core(adap, 1, coreid, A_UP_UP_DBG_LA_CFG, 1, 11254 &val); 11255 if (ret) 11256 break; 11257 ret = t4_cim_read_core(adap, 1, coreid, A_UP_UP_DBG_LA_CFG, 1, 11258 &val); 11259 if (ret) 11260 break; 11261 if (val & F_UPDBGLARDEN) { 11262 ret = -ETIMEDOUT; 11263 break; 11264 } 11265 ret = t4_cim_read_core(adap, 1, coreid, A_UP_UP_DBG_LA_DATA, 1, 11266 &la_buf[i]); 11267 if (ret) 11268 break; 11269 11270 /* Bits 0-3 of UpDbgLaRdPtr can be between 0000 to 1001 to 11271 * identify the 32-bit portion of the full 312-bit data 11272 */ 11273 if ((chip_id(adap) > CHELSIO_T5) && (idx & 0xf) >= 9) 11274 idx = (idx & 0xff0) + 0x10; 11275 else 11276 idx++; 11277 /* address can't exceed 0xfff */ 11278 idx &= M_UPDBGLARDPTR; 11279 } 11280 restart: 11281 if (cfg & F_UPDBGLAEN) { 11282 int r; 11283 11284 val = cfg & ~F_UPDBGLARDEN; 11285 r = t4_cim_write_core(adap, 1, coreid, A_UP_UP_DBG_LA_CFG, 1, 11286 &val); 11287 if (!ret) 11288 ret = r; 11289 } 11290 11291 return ret; 11292 } 11293 11294 /** 11295 * t4_tp_read_la - read TP LA capture buffer 11296 * @adap: the adapter 11297 * @la_buf: where to store the LA data 11298 * @wrptr: the HW write pointer within the capture buffer 11299 * 11300 * Reads the contents of the TP LA buffer with the most recent entry at 11301 * the end of the returned data and with the entry at @wrptr first. 11302 * We leave the LA in the running state we find it in. 11303 */ 11304 void t4_tp_read_la(struct adapter *adap, u64 *la_buf, unsigned int *wrptr) 11305 { 11306 bool last_incomplete; 11307 unsigned int i, cfg, val, idx; 11308 11309 cfg = t4_read_reg(adap, A_TP_DBG_LA_CONFIG) & 0xffff; 11310 if (cfg & F_DBGLAENABLE) /* freeze LA */ 11311 t4_write_reg(adap, A_TP_DBG_LA_CONFIG, 11312 adap->params.tp.la_mask | (cfg ^ F_DBGLAENABLE)); 11313 11314 val = t4_read_reg(adap, A_TP_DBG_LA_CONFIG); 11315 idx = G_DBGLAWPTR(val); 11316 last_incomplete = G_DBGLAMODE(val) >= 2 && (val & F_DBGLAWHLF) == 0; 11317 if (last_incomplete) 11318 idx = (idx + 1) & M_DBGLARPTR; 11319 if (wrptr) 11320 *wrptr = idx; 11321 11322 val &= 0xffff; 11323 val &= ~V_DBGLARPTR(M_DBGLARPTR); 11324 val |= adap->params.tp.la_mask; 11325 11326 for (i = 0; i < TPLA_SIZE; i++) { 11327 t4_write_reg(adap, A_TP_DBG_LA_CONFIG, V_DBGLARPTR(idx) | val); 11328 la_buf[i] = t4_read_reg64(adap, A_TP_DBG_LA_DATAL); 11329 idx = (idx + 1) & M_DBGLARPTR; 11330 } 11331 11332 /* Wipe out last entry if it isn't valid */ 11333 if (last_incomplete) 11334 la_buf[TPLA_SIZE - 1] = ~0ULL; 11335 11336 if (cfg & F_DBGLAENABLE) /* restore running state */ 11337 t4_write_reg(adap, A_TP_DBG_LA_CONFIG, 11338 cfg | adap->params.tp.la_mask); 11339 } 11340 11341 /* 11342 * SGE Hung Ingress DMA Warning Threshold time and Warning Repeat Rate (in 11343 * seconds). If we find one of the SGE Ingress DMA State Machines in the same 11344 * state for more than the Warning Threshold then we'll issue a warning about 11345 * a potential hang. We'll repeat the warning as the SGE Ingress DMA Channel 11346 * appears to be hung every Warning Repeat second till the situation clears. 11347 * If the situation clears, we'll note that as well. 11348 */ 11349 #define SGE_IDMA_WARN_THRESH 1 11350 #define SGE_IDMA_WARN_REPEAT 300 11351 11352 /** 11353 * t4_idma_monitor_init - initialize SGE Ingress DMA Monitor 11354 * @adapter: the adapter 11355 * @idma: the adapter IDMA Monitor state 11356 * 11357 * Initialize the state of an SGE Ingress DMA Monitor. 11358 */ 11359 void t4_idma_monitor_init(struct adapter *adapter, 11360 struct sge_idma_monitor_state *idma) 11361 { 11362 /* Initialize the state variables for detecting an SGE Ingress DMA 11363 * hang. The SGE has internal counters which count up on each clock 11364 * tick whenever the SGE finds its Ingress DMA State Engines in the 11365 * same state they were on the previous clock tick. The clock used is 11366 * the Core Clock so we have a limit on the maximum "time" they can 11367 * record; typically a very small number of seconds. For instance, 11368 * with a 600MHz Core Clock, we can only count up to a bit more than 11369 * 7s. So we'll synthesize a larger counter in order to not run the 11370 * risk of having the "timers" overflow and give us the flexibility to 11371 * maintain a Hung SGE State Machine of our own which operates across 11372 * a longer time frame. 11373 */ 11374 idma->idma_1s_thresh = core_ticks_per_usec(adapter) * 1000000; /* 1s */ 11375 idma->idma_stalled[0] = idma->idma_stalled[1] = 0; 11376 } 11377 11378 /** 11379 * t4_idma_monitor - monitor SGE Ingress DMA state 11380 * @adapter: the adapter 11381 * @idma: the adapter IDMA Monitor state 11382 * @hz: number of ticks/second 11383 * @ticks: number of ticks since the last IDMA Monitor call 11384 */ 11385 void t4_idma_monitor(struct adapter *adapter, 11386 struct sge_idma_monitor_state *idma, 11387 int hz, int ticks) 11388 { 11389 int i, idma_same_state_cnt[2]; 11390 11391 /* Read the SGE Debug Ingress DMA Same State Count registers. These 11392 * are counters inside the SGE which count up on each clock when the 11393 * SGE finds its Ingress DMA State Engines in the same states they 11394 * were in the previous clock. The counters will peg out at 11395 * 0xffffffff without wrapping around so once they pass the 1s 11396 * threshold they'll stay above that till the IDMA state changes. 11397 */ 11398 t4_write_reg(adapter, A_SGE_DEBUG_INDEX, 13); 11399 idma_same_state_cnt[0] = t4_read_reg(adapter, A_SGE_DEBUG_DATA_HIGH); 11400 idma_same_state_cnt[1] = t4_read_reg(adapter, A_SGE_DEBUG_DATA_LOW); 11401 11402 for (i = 0; i < 2; i++) { 11403 u32 debug0, debug11; 11404 11405 /* If the Ingress DMA Same State Counter ("timer") is less 11406 * than 1s, then we can reset our synthesized Stall Timer and 11407 * continue. If we have previously emitted warnings about a 11408 * potential stalled Ingress Queue, issue a note indicating 11409 * that the Ingress Queue has resumed forward progress. 11410 */ 11411 if (idma_same_state_cnt[i] < idma->idma_1s_thresh) { 11412 if (idma->idma_stalled[i] >= SGE_IDMA_WARN_THRESH*hz) 11413 CH_WARN(adapter, "SGE idma%d, queue %u, " 11414 "resumed after %d seconds\n", 11415 i, idma->idma_qid[i], 11416 idma->idma_stalled[i]/hz); 11417 idma->idma_stalled[i] = 0; 11418 continue; 11419 } 11420 11421 /* Synthesize an SGE Ingress DMA Same State Timer in the Hz 11422 * domain. The first time we get here it'll be because we 11423 * passed the 1s Threshold; each additional time it'll be 11424 * because the RX Timer Callback is being fired on its regular 11425 * schedule. 11426 * 11427 * If the stall is below our Potential Hung Ingress Queue 11428 * Warning Threshold, continue. 11429 */ 11430 if (idma->idma_stalled[i] == 0) { 11431 idma->idma_stalled[i] = hz; 11432 idma->idma_warn[i] = 0; 11433 } else { 11434 idma->idma_stalled[i] += ticks; 11435 idma->idma_warn[i] -= ticks; 11436 } 11437 11438 if (idma->idma_stalled[i] < SGE_IDMA_WARN_THRESH*hz) 11439 continue; 11440 11441 /* We'll issue a warning every SGE_IDMA_WARN_REPEAT seconds. 11442 */ 11443 if (idma->idma_warn[i] > 0) 11444 continue; 11445 idma->idma_warn[i] = SGE_IDMA_WARN_REPEAT*hz; 11446 11447 /* Read and save the SGE IDMA State and Queue ID information. 11448 * We do this every time in case it changes across time ... 11449 * can't be too careful ... 11450 */ 11451 t4_write_reg(adapter, A_SGE_DEBUG_INDEX, 0); 11452 debug0 = t4_read_reg(adapter, A_SGE_DEBUG_DATA_LOW); 11453 idma->idma_state[i] = (debug0 >> (i * 9)) & 0x3f; 11454 11455 t4_write_reg(adapter, A_SGE_DEBUG_INDEX, 11); 11456 debug11 = t4_read_reg(adapter, A_SGE_DEBUG_DATA_LOW); 11457 idma->idma_qid[i] = (debug11 >> (i * 16)) & 0xffff; 11458 11459 CH_WARN(adapter, "SGE idma%u, queue %u, potentially stuck in " 11460 " state %u for %d seconds (debug0=%#x, debug11=%#x)\n", 11461 i, idma->idma_qid[i], idma->idma_state[i], 11462 idma->idma_stalled[i]/hz, 11463 debug0, debug11); 11464 t4_sge_decode_idma_state(adapter, idma->idma_state[i]); 11465 } 11466 } 11467 11468 /** 11469 * t4_set_vf_mac - Set MAC address for the specified VF 11470 * @adapter: The adapter 11471 * @pf: the PF used to instantiate the VFs 11472 * @vf: one of the VFs instantiated by the specified PF 11473 * @naddr: the number of MAC addresses 11474 * @addr: the MAC address(es) to be set to the specified VF 11475 */ 11476 int t4_set_vf_mac(struct adapter *adapter, unsigned int pf, unsigned int vf, 11477 unsigned int naddr, u8 *addr) 11478 { 11479 struct fw_acl_mac_cmd cmd; 11480 11481 memset(&cmd, 0, sizeof(cmd)); 11482 cmd.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_ACL_MAC_CMD) | 11483 F_FW_CMD_REQUEST | 11484 F_FW_CMD_WRITE | 11485 V_FW_ACL_MAC_CMD_PFN(pf) | 11486 V_FW_ACL_MAC_CMD_VFN(vf)); 11487 11488 /* Note: Do not enable the ACL */ 11489 cmd.en_to_len16 = cpu_to_be32((unsigned int)FW_LEN16(cmd)); 11490 cmd.nmac = naddr; 11491 11492 switch (pf) { 11493 case 3: 11494 memcpy(cmd.macaddr3, addr, sizeof(cmd.macaddr3)); 11495 break; 11496 case 2: 11497 memcpy(cmd.macaddr2, addr, sizeof(cmd.macaddr2)); 11498 break; 11499 case 1: 11500 memcpy(cmd.macaddr1, addr, sizeof(cmd.macaddr1)); 11501 break; 11502 case 0: 11503 memcpy(cmd.macaddr0, addr, sizeof(cmd.macaddr0)); 11504 break; 11505 } 11506 11507 return t4_wr_mbox(adapter, adapter->mbox, &cmd, sizeof(cmd), &cmd); 11508 } 11509 11510 /** 11511 * t4_read_pace_tbl - read the pace table 11512 * @adap: the adapter 11513 * @pace_vals: holds the returned values 11514 * 11515 * Returns the values of TP's pace table in microseconds. 11516 */ 11517 void t4_read_pace_tbl(struct adapter *adap, unsigned int pace_vals[NTX_SCHED]) 11518 { 11519 unsigned int i, v; 11520 11521 for (i = 0; i < NTX_SCHED; i++) { 11522 t4_write_reg(adap, A_TP_PACE_TABLE, 0xffff0000 + i); 11523 v = t4_read_reg(adap, A_TP_PACE_TABLE); 11524 pace_vals[i] = dack_ticks_to_usec(adap, v); 11525 } 11526 } 11527 11528 /** 11529 * t4_get_tx_sched - get the configuration of a Tx HW traffic scheduler 11530 * @adap: the adapter 11531 * @sched: the scheduler index 11532 * @kbps: the byte rate in Kbps 11533 * @ipg: the interpacket delay in tenths of nanoseconds 11534 * 11535 * Return the current configuration of a HW Tx scheduler. 11536 */ 11537 void t4_get_tx_sched(struct adapter *adap, unsigned int sched, unsigned int *kbps, 11538 unsigned int *ipg, bool sleep_ok) 11539 { 11540 unsigned int v, addr, bpt, cpt; 11541 11542 if (kbps) { 11543 addr = A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2; 11544 t4_tp_tm_pio_read(adap, &v, 1, addr, sleep_ok); 11545 if (sched & 1) 11546 v >>= 16; 11547 bpt = (v >> 8) & 0xff; 11548 cpt = v & 0xff; 11549 if (!cpt) 11550 *kbps = 0; /* scheduler disabled */ 11551 else { 11552 v = (adap->params.vpd.cclk * 1000) / cpt; /* ticks/s */ 11553 *kbps = (v * bpt) / 125; 11554 } 11555 } 11556 if (ipg) { 11557 addr = A_TP_TX_MOD_Q1_Q0_TIMER_SEPARATOR - sched / 2; 11558 t4_tp_tm_pio_read(adap, &v, 1, addr, sleep_ok); 11559 if (sched & 1) 11560 v >>= 16; 11561 v &= 0xffff; 11562 *ipg = (10000 * v) / core_ticks_per_usec(adap); 11563 } 11564 } 11565 11566 /** 11567 * t4_load_cfg - download config file 11568 * @adap: the adapter 11569 * @cfg_data: the cfg text file to write 11570 * @size: text file size 11571 * 11572 * Write the supplied config text file to the card's serial flash. 11573 */ 11574 int t4_load_cfg(struct adapter *adap, const u8 *cfg_data, unsigned int size) 11575 { 11576 int ret, i, n, cfg_addr; 11577 unsigned int addr, len; 11578 unsigned int flash_cfg_start_sec; 11579 11580 cfg_addr = t4_flash_cfg_addr(adap, &len); 11581 if (cfg_addr < 0) 11582 return cfg_addr; 11583 11584 if (size > len) { 11585 CH_ERR(adap, "cfg file too large, max is %u bytes\n", len); 11586 return -EFBIG; 11587 } 11588 11589 flash_cfg_start_sec = cfg_addr / SF_SEC_SIZE; 11590 i = DIV_ROUND_UP(len, SF_SEC_SIZE); 11591 ret = t4_flash_erase_sectors(adap, flash_cfg_start_sec, 11592 flash_cfg_start_sec + i - 1); 11593 /* 11594 * If size == 0 then we're simply erasing the FLASH sectors associated 11595 * with the on-adapter Firmware Configuration File. 11596 */ 11597 if (ret || size == 0) 11598 goto out; 11599 11600 /* this will write to the flash up to SF_PAGE_SIZE at a time */ 11601 addr = cfg_addr; 11602 for (i = 0; i < size; i += SF_PAGE_SIZE) { 11603 n = min(size - i, SF_PAGE_SIZE); 11604 ret = t4_write_flash(adap, addr, n, cfg_data, 1); 11605 if (ret) 11606 goto out; 11607 addr += SF_PAGE_SIZE; 11608 cfg_data += SF_PAGE_SIZE; 11609 } 11610 11611 out: 11612 if (ret) 11613 CH_ERR(adap, "config file %s failed %d\n", 11614 (size == 0 ? "clear" : "download"), ret); 11615 return ret; 11616 } 11617 11618 /** 11619 * t5_fw_init_extern_mem - initialize the external memory 11620 * @adap: the adapter 11621 * 11622 * Initializes the external memory on T5. 11623 */ 11624 int t5_fw_init_extern_mem(struct adapter *adap) 11625 { 11626 u32 params[1], val[1]; 11627 int ret; 11628 11629 if (!is_t5(adap)) 11630 return 0; 11631 11632 val[0] = 0xff; /* Initialize all MCs */ 11633 params[0] = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | 11634 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_MCINIT)); 11635 ret = t4_set_params_timeout(adap, adap->mbox, adap->pf, 0, 1, params, val, 11636 FW_CMD_MAX_TIMEOUT); 11637 11638 return ret; 11639 } 11640 11641 /* BIOS boot headers */ 11642 typedef struct pci_expansion_rom_header { 11643 u8 signature[2]; /* ROM Signature. Should be 0xaa55 */ 11644 u8 reserved[22]; /* Reserved per processor Architecture data */ 11645 u8 pcir_offset[2]; /* Offset to PCI Data Structure */ 11646 } pci_exp_rom_header_t; /* PCI_EXPANSION_ROM_HEADER */ 11647 11648 /* Legacy PCI Expansion ROM Header */ 11649 typedef struct legacy_pci_expansion_rom_header { 11650 u8 signature[2]; /* ROM Signature. Should be 0xaa55 */ 11651 u8 size512; /* Current Image Size in units of 512 bytes */ 11652 u8 initentry_point[4]; 11653 u8 cksum; /* Checksum computed on the entire Image */ 11654 u8 reserved[16]; /* Reserved */ 11655 u8 pcir_offset[2]; /* Offset to PCI Data Struture */ 11656 } legacy_pci_exp_rom_header_t; /* LEGACY_PCI_EXPANSION_ROM_HEADER */ 11657 11658 /* EFI PCI Expansion ROM Header */ 11659 typedef struct efi_pci_expansion_rom_header { 11660 u8 signature[2]; // ROM signature. The value 0xaa55 11661 u8 initialization_size[2]; /* Units 512. Includes this header */ 11662 u8 efi_signature[4]; /* Signature from EFI image header. 0x0EF1 */ 11663 u8 efi_subsystem[2]; /* Subsystem value for EFI image header */ 11664 u8 efi_machine_type[2]; /* Machine type from EFI image header */ 11665 u8 compression_type[2]; /* Compression type. */ 11666 /* 11667 * Compression type definition 11668 * 0x0: uncompressed 11669 * 0x1: Compressed 11670 * 0x2-0xFFFF: Reserved 11671 */ 11672 u8 reserved[8]; /* Reserved */ 11673 u8 efi_image_header_offset[2]; /* Offset to EFI Image */ 11674 u8 pcir_offset[2]; /* Offset to PCI Data Structure */ 11675 } efi_pci_exp_rom_header_t; /* EFI PCI Expansion ROM Header */ 11676 11677 /* PCI Data Structure Format */ 11678 typedef struct pcir_data_structure { /* PCI Data Structure */ 11679 u8 signature[4]; /* Signature. The string "PCIR" */ 11680 u8 vendor_id[2]; /* Vendor Identification */ 11681 u8 device_id[2]; /* Device Identification */ 11682 u8 vital_product[2]; /* Pointer to Vital Product Data */ 11683 u8 length[2]; /* PCIR Data Structure Length */ 11684 u8 revision; /* PCIR Data Structure Revision */ 11685 u8 class_code[3]; /* Class Code */ 11686 u8 image_length[2]; /* Image Length. Multiple of 512B */ 11687 u8 code_revision[2]; /* Revision Level of Code/Data */ 11688 u8 code_type; /* Code Type. */ 11689 /* 11690 * PCI Expansion ROM Code Types 11691 * 0x00: Intel IA-32, PC-AT compatible. Legacy 11692 * 0x01: Open Firmware standard for PCI. FCODE 11693 * 0x02: Hewlett-Packard PA RISC. HP reserved 11694 * 0x03: EFI Image. EFI 11695 * 0x04-0xFF: Reserved. 11696 */ 11697 u8 indicator; /* Indicator. Identifies the last image in the ROM */ 11698 u8 reserved[2]; /* Reserved */ 11699 } pcir_data_t; /* PCI__DATA_STRUCTURE */ 11700 11701 /* BOOT constants */ 11702 enum { 11703 BOOT_FLASH_BOOT_ADDR = 0x0,/* start address of boot image in flash */ 11704 BOOT_SIGNATURE = 0xaa55, /* signature of BIOS boot ROM */ 11705 BOOT_SIZE_INC = 512, /* image size measured in 512B chunks */ 11706 BOOT_MIN_SIZE = sizeof(pci_exp_rom_header_t), /* basic header */ 11707 BOOT_MAX_SIZE = 1024*BOOT_SIZE_INC, /* 1 byte * length increment */ 11708 VENDOR_ID = 0x1425, /* Vendor ID */ 11709 PCIR_SIGNATURE = 0x52494350 /* PCIR signature */ 11710 }; 11711 11712 /* 11713 * modify_device_id - Modifies the device ID of the Boot BIOS image 11714 * @adatper: the device ID to write. 11715 * @boot_data: the boot image to modify. 11716 * 11717 * Write the supplied device ID to the boot BIOS image. 11718 */ 11719 static void modify_device_id(int device_id, u8 *boot_data) 11720 { 11721 legacy_pci_exp_rom_header_t *header; 11722 pcir_data_t *pcir_header; 11723 u32 cur_header = 0; 11724 11725 /* 11726 * Loop through all chained images and change the device ID's 11727 */ 11728 while (1) { 11729 header = (legacy_pci_exp_rom_header_t *) &boot_data[cur_header]; 11730 pcir_header = (pcir_data_t *) &boot_data[cur_header + 11731 le16_to_cpu(*(u16*)header->pcir_offset)]; 11732 11733 /* 11734 * Only modify the Device ID if code type is Legacy or HP. 11735 * 0x00: Okay to modify 11736 * 0x01: FCODE. Do not be modify 11737 * 0x03: Okay to modify 11738 * 0x04-0xFF: Do not modify 11739 */ 11740 if (pcir_header->code_type == 0x00) { 11741 u8 csum = 0; 11742 int i; 11743 11744 /* 11745 * Modify Device ID to match current adatper 11746 */ 11747 *(u16*) pcir_header->device_id = device_id; 11748 11749 /* 11750 * Set checksum temporarily to 0. 11751 * We will recalculate it later. 11752 */ 11753 header->cksum = 0x0; 11754 11755 /* 11756 * Calculate and update checksum 11757 */ 11758 for (i = 0; i < (header->size512 * 512); i++) 11759 csum += (u8)boot_data[cur_header + i]; 11760 11761 /* 11762 * Invert summed value to create the checksum 11763 * Writing new checksum value directly to the boot data 11764 */ 11765 boot_data[cur_header + 7] = -csum; 11766 11767 } else if (pcir_header->code_type == 0x03) { 11768 11769 /* 11770 * Modify Device ID to match current adatper 11771 */ 11772 *(u16*) pcir_header->device_id = device_id; 11773 11774 } 11775 11776 11777 /* 11778 * Check indicator element to identify if this is the last 11779 * image in the ROM. 11780 */ 11781 if (pcir_header->indicator & 0x80) 11782 break; 11783 11784 /* 11785 * Move header pointer up to the next image in the ROM. 11786 */ 11787 cur_header += header->size512 * 512; 11788 } 11789 } 11790 11791 /* 11792 * t4_load_boot - download boot flash 11793 * @adapter: the adapter 11794 * @boot_data: the boot image to write 11795 * @boot_addr: offset in flash to write boot_data 11796 * @size: image size 11797 * 11798 * Write the supplied boot image to the card's serial flash. 11799 * The boot image has the following sections: a 28-byte header and the 11800 * boot image. 11801 */ 11802 int t4_load_boot(struct adapter *adap, u8 *boot_data, 11803 unsigned int boot_addr, unsigned int size) 11804 { 11805 pci_exp_rom_header_t *header; 11806 int pcir_offset ; 11807 pcir_data_t *pcir_header; 11808 int ret, addr; 11809 uint16_t device_id; 11810 unsigned int i, start, len; 11811 unsigned int boot_sector = boot_addr * 1024; 11812 11813 /* 11814 * Make sure the boot image does not exceed its available space. 11815 */ 11816 len = 0; 11817 start = t4_flash_loc_start(adap, FLASH_LOC_BOOT_AREA, &len); 11818 if (boot_sector + size > start + len) { 11819 CH_ERR(adap, "boot data is larger than available BOOT area\n"); 11820 return -EFBIG; 11821 } 11822 11823 /* 11824 * The boot sector is comprised of the Expansion-ROM boot, iSCSI boot, 11825 * and Boot configuration data sections. These 3 boot sections span 11826 * the entire FLASH_LOC_BOOT_AREA. 11827 */ 11828 i = DIV_ROUND_UP(size ? size : len, SF_SEC_SIZE); 11829 ret = t4_flash_erase_sectors(adap, boot_sector >> 16, 11830 (boot_sector >> 16) + i - 1); 11831 11832 /* 11833 * If size == 0 then we're simply erasing the FLASH sectors associated 11834 * with the on-adapter option ROM file 11835 */ 11836 if (ret || (size == 0)) 11837 goto out; 11838 11839 /* Get boot header */ 11840 header = (pci_exp_rom_header_t *)boot_data; 11841 pcir_offset = le16_to_cpu(*(u16 *)header->pcir_offset); 11842 /* PCIR Data Structure */ 11843 pcir_header = (pcir_data_t *) &boot_data[pcir_offset]; 11844 11845 /* 11846 * Perform some primitive sanity testing to avoid accidentally 11847 * writing garbage over the boot sectors. We ought to check for 11848 * more but it's not worth it for now ... 11849 */ 11850 if (size < BOOT_MIN_SIZE || size > BOOT_MAX_SIZE) { 11851 CH_ERR(adap, "boot image too small/large\n"); 11852 return -EFBIG; 11853 } 11854 11855 #ifndef CHELSIO_T4_DIAGS 11856 /* 11857 * Check BOOT ROM header signature 11858 */ 11859 if (le16_to_cpu(*(u16*)header->signature) != BOOT_SIGNATURE ) { 11860 CH_ERR(adap, "Boot image missing signature\n"); 11861 return -EINVAL; 11862 } 11863 11864 /* 11865 * Check PCI header signature 11866 */ 11867 if (le32_to_cpu(*(u32*)pcir_header->signature) != PCIR_SIGNATURE) { 11868 CH_ERR(adap, "PCI header missing signature\n"); 11869 return -EINVAL; 11870 } 11871 11872 /* 11873 * Check Vendor ID matches Chelsio ID 11874 */ 11875 if (le16_to_cpu(*(u16*)pcir_header->vendor_id) != VENDOR_ID) { 11876 CH_ERR(adap, "Vendor ID missing signature\n"); 11877 return -EINVAL; 11878 } 11879 #endif 11880 11881 /* 11882 * Retrieve adapter's device ID 11883 */ 11884 t4_os_pci_read_cfg2(adap, PCI_DEVICE_ID, &device_id); 11885 /* Want to deal with PF 0 so I strip off PF 4 indicator */ 11886 device_id = device_id & 0xf0ff; 11887 11888 /* 11889 * Check PCIE Device ID 11890 */ 11891 if (le16_to_cpu(*(u16*)pcir_header->device_id) != device_id) { 11892 /* 11893 * Change the device ID in the Boot BIOS image to match 11894 * the Device ID of the current adapter. 11895 */ 11896 modify_device_id(device_id, boot_data); 11897 } 11898 11899 /* 11900 * Skip over the first SF_PAGE_SIZE worth of data and write it after 11901 * we finish copying the rest of the boot image. This will ensure 11902 * that the BIOS boot header will only be written if the boot image 11903 * was written in full. 11904 */ 11905 addr = boot_sector; 11906 for (size -= SF_PAGE_SIZE; size; size -= SF_PAGE_SIZE) { 11907 addr += SF_PAGE_SIZE; 11908 boot_data += SF_PAGE_SIZE; 11909 ret = t4_write_flash(adap, addr, SF_PAGE_SIZE, boot_data, 0); 11910 if (ret) 11911 goto out; 11912 } 11913 11914 ret = t4_write_flash(adap, boot_sector, SF_PAGE_SIZE, 11915 (const u8 *)header, 0); 11916 11917 out: 11918 if (ret) 11919 CH_ERR(adap, "boot image download failed, error %d\n", ret); 11920 return ret; 11921 } 11922 11923 /* 11924 * t4_flash_bootcfg_addr - return the address of the flash optionrom configuration 11925 * @adapter: the adapter 11926 * 11927 * Return the address within the flash where the OptionROM Configuration 11928 * is stored, or an error if the device FLASH is too small to contain 11929 * a OptionROM Configuration. 11930 */ 11931 static int t4_flash_bootcfg_addr(struct adapter *adapter, unsigned int *lenp) 11932 { 11933 unsigned int len = 0; 11934 const int start = t4_flash_loc_start(adapter, FLASH_LOC_BOOTCFG, &len); 11935 11936 /* 11937 * If the device FLASH isn't large enough to hold a Firmware 11938 * Configuration File, return an error. 11939 */ 11940 if (adapter->params.sf_size < start + len) 11941 return -ENOSPC; 11942 if (lenp != NULL) 11943 *lenp = len; 11944 return (start); 11945 } 11946 11947 int t4_load_bootcfg(struct adapter *adap,const u8 *cfg_data, unsigned int size) 11948 { 11949 int ret, i, n, cfg_addr; 11950 unsigned int addr, len; 11951 unsigned int flash_cfg_start_sec; 11952 11953 cfg_addr = t4_flash_bootcfg_addr(adap, &len); 11954 if (cfg_addr < 0) 11955 return cfg_addr; 11956 11957 if (size > len) { 11958 CH_ERR(adap, "bootcfg file too large, max is %u bytes\n", len); 11959 return -EFBIG; 11960 } 11961 11962 flash_cfg_start_sec = cfg_addr / SF_SEC_SIZE; 11963 i = DIV_ROUND_UP(len, SF_SEC_SIZE); 11964 ret = t4_flash_erase_sectors(adap, flash_cfg_start_sec, 11965 flash_cfg_start_sec + i - 1); 11966 11967 /* 11968 * If size == 0 then we're simply erasing the FLASH sectors associated 11969 * with the on-adapter OptionROM Configuration File. 11970 */ 11971 if (ret || size == 0) 11972 goto out; 11973 11974 /* this will write to the flash up to SF_PAGE_SIZE at a time */ 11975 addr = cfg_addr; 11976 for (i = 0; i < size; i += SF_PAGE_SIZE) { 11977 n = min(size - i, SF_PAGE_SIZE); 11978 ret = t4_write_flash(adap, addr, n, cfg_data, 0); 11979 if (ret) 11980 goto out; 11981 addr += SF_PAGE_SIZE; 11982 cfg_data += SF_PAGE_SIZE; 11983 } 11984 11985 out: 11986 if (ret) 11987 CH_ERR(adap, "boot config data %s failed %d\n", 11988 (size == 0 ? "clear" : "download"), ret); 11989 return ret; 11990 } 11991 11992 /** 11993 * t4_set_filter_cfg - set up filter mode/mask and ingress config. 11994 * @adap: the adapter 11995 * @mode: a bitmap selecting which optional filter components to enable 11996 * @mask: a bitmap selecting which components to enable in filter mask 11997 * @vnic_mode: the ingress config/vnic mode setting 11998 * 11999 * Sets the filter mode and mask by selecting the optional components to 12000 * enable in filter tuples. Returns 0 on success and a negative error if 12001 * the requested mode needs more bits than are available for optional 12002 * components. The filter mask must be a subset of the filter mode. 12003 */ 12004 int t4_set_filter_cfg(struct adapter *adap, int mode, int mask, int vnic_mode) 12005 { 12006 int i, nbits, rc; 12007 uint32_t param, val; 12008 uint16_t fmode, fmask; 12009 const int maxbits = adap->chip_params->filter_opt_len; 12010 const int nopt = adap->chip_params->filter_num_opt; 12011 int width; 12012 12013 if (mode != -1 || mask != -1) { 12014 if (mode != -1) { 12015 fmode = mode; 12016 nbits = 0; 12017 for (i = 0; i < nopt; i++) { 12018 if (fmode & (1 << i)) 12019 nbits += t4_filter_field_width(adap, i); 12020 } 12021 if (nbits > maxbits) { 12022 CH_ERR(adap, "optional fields in the filter " 12023 "mode (0x%x) add up to %d bits " 12024 "(must be <= %db). Remove some fields and " 12025 "try again.\n", fmode, nbits, maxbits); 12026 return -E2BIG; 12027 } 12028 12029 /* 12030 * Hardware < T7 wants the bits to be maxed out. Keep 12031 * setting them until there's no room for more. 12032 */ 12033 if (chip_id(adap) < CHELSIO_T7) { 12034 for (i = 0; i < nopt; i++) { 12035 if (fmode & (1 << i)) 12036 continue; 12037 width = t4_filter_field_width(adap, i); 12038 if (nbits + width <= maxbits) { 12039 fmode |= 1 << i; 12040 nbits += width; 12041 if (nbits == maxbits) 12042 break; 12043 } 12044 } 12045 } 12046 12047 fmask = fmode & adap->params.tp.filter_mask; 12048 if (fmask != adap->params.tp.filter_mask) { 12049 CH_WARN(adap, 12050 "filter mask will be changed from 0x%x to " 12051 "0x%x to comply with the filter mode (0x%x).\n", 12052 adap->params.tp.filter_mask, fmask, fmode); 12053 } 12054 } else { 12055 fmode = adap->params.tp.filter_mode; 12056 fmask = mask; 12057 if ((fmode | fmask) != fmode) { 12058 CH_ERR(adap, 12059 "filter mask (0x%x) must be a subset of " 12060 "the filter mode (0x%x).\n", fmask, fmode); 12061 return -EINVAL; 12062 } 12063 } 12064 12065 param = V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | 12066 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_FILTER) | 12067 V_FW_PARAMS_PARAM_Y(FW_PARAM_DEV_FILTER_MODE_MASK); 12068 val = V_FW_PARAMS_PARAM_FILTER_MODE(fmode) | 12069 V_FW_PARAMS_PARAM_FILTER_MASK(fmask); 12070 rc = t4_set_params(adap, adap->mbox, adap->pf, 0, 1, ¶m, 12071 &val); 12072 if (rc < 0) 12073 return rc; 12074 } 12075 12076 if (vnic_mode != -1) { 12077 param = V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | 12078 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_FILTER) | 12079 V_FW_PARAMS_PARAM_Y(FW_PARAM_DEV_FILTER_VNIC_MODE); 12080 val = vnic_mode; 12081 rc = t4_set_params(adap, adap->mbox, adap->pf, 0, 1, ¶m, 12082 &val); 12083 if (rc < 0) 12084 return rc; 12085 } 12086 12087 /* Refresh. */ 12088 read_filter_mode_and_ingress_config(adap); 12089 12090 return 0; 12091 } 12092 12093 /** 12094 * t4_clr_port_stats - clear port statistics 12095 * @adap: the adapter 12096 * @idx: the port index 12097 * 12098 * Clear HW statistics for the given port. 12099 */ 12100 void t4_clr_port_stats(struct adapter *adap, int idx) 12101 { 12102 struct port_info *pi; 12103 int i, port_id, tx_chan; 12104 u32 bgmap, port_base_addr; 12105 12106 port_id = adap->port_map[idx]; 12107 MPASS(port_id >= 0 && port_id <= adap->params.nports); 12108 pi = adap->port[port_id]; 12109 12110 for (tx_chan = pi->tx_chan; 12111 tx_chan < pi->tx_chan + adap->params.tp.lb_nchan; tx_chan++) { 12112 port_base_addr = t4_port_reg(adap, tx_chan, 0); 12113 12114 for (i = A_MPS_PORT_STAT_TX_PORT_BYTES_L; 12115 i <= A_MPS_PORT_STAT_TX_PORT_PPP7_H; i += 8) 12116 t4_write_reg(adap, port_base_addr + i, 0); 12117 for (i = A_MPS_PORT_STAT_RX_PORT_BYTES_L; 12118 i <= A_MPS_PORT_STAT_RX_PORT_LESS_64B_H; i += 8) 12119 t4_write_reg(adap, port_base_addr + i, 0); 12120 } 12121 bgmap = pi->mps_bg_map; 12122 for (i = 0; i < 4; i++) 12123 if (bgmap & (1 << i)) { 12124 t4_write_reg(adap, 12125 A_MPS_STAT_RX_BG_0_MAC_DROP_FRAME_L + i * 8, 0); 12126 t4_write_reg(adap, 12127 A_MPS_STAT_RX_BG_0_MAC_TRUNC_FRAME_L + i * 8, 0); 12128 } 12129 } 12130 12131 /** 12132 * t4_i2c_io - read/write I2C data from adapter 12133 * @adap: the adapter 12134 * @port: Port number if per-port device; <0 if not 12135 * @devid: per-port device ID or absolute device ID 12136 * @offset: byte offset into device I2C space 12137 * @len: byte length of I2C space data 12138 * @buf: buffer in which to return I2C data for read 12139 * buffer which holds the I2C data for write 12140 * @write: if true, do a write; else do a read 12141 * Reads/Writes the I2C data from/to the indicated device and location. 12142 */ 12143 int t4_i2c_io(struct adapter *adap, unsigned int mbox, 12144 int port, unsigned int devid, 12145 unsigned int offset, unsigned int len, 12146 u8 *buf, bool write) 12147 { 12148 struct fw_ldst_cmd ldst_cmd, ldst_rpl; 12149 unsigned int i2c_max = sizeof(ldst_cmd.u.i2c.data); 12150 int ret = 0; 12151 12152 if (len > I2C_PAGE_SIZE) 12153 return -EINVAL; 12154 12155 /* Dont allow reads that spans multiple pages */ 12156 if (offset < I2C_PAGE_SIZE && offset + len > I2C_PAGE_SIZE) 12157 return -EINVAL; 12158 12159 memset(&ldst_cmd, 0, sizeof(ldst_cmd)); 12160 ldst_cmd.op_to_addrspace = 12161 cpu_to_be32(V_FW_CMD_OP(FW_LDST_CMD) | 12162 F_FW_CMD_REQUEST | 12163 (write ? F_FW_CMD_WRITE : F_FW_CMD_READ) | 12164 V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_I2C)); 12165 ldst_cmd.cycles_to_len16 = cpu_to_be32(FW_LEN16(ldst_cmd)); 12166 ldst_cmd.u.i2c.pid = (port < 0 ? 0xff : port); 12167 ldst_cmd.u.i2c.did = devid; 12168 12169 while (len > 0) { 12170 unsigned int i2c_len = (len < i2c_max) ? len : i2c_max; 12171 12172 ldst_cmd.u.i2c.boffset = offset; 12173 ldst_cmd.u.i2c.blen = i2c_len; 12174 12175 if (write) 12176 memcpy(ldst_cmd.u.i2c.data, buf, i2c_len); 12177 12178 ret = t4_wr_mbox(adap, mbox, &ldst_cmd, sizeof(ldst_cmd), 12179 write ? NULL : &ldst_rpl); 12180 if (ret) 12181 break; 12182 12183 if (!write) 12184 memcpy(buf, ldst_rpl.u.i2c.data, i2c_len); 12185 offset += i2c_len; 12186 buf += i2c_len; 12187 len -= i2c_len; 12188 } 12189 12190 return ret; 12191 } 12192 12193 int t4_i2c_rd(struct adapter *adap, unsigned int mbox, 12194 int port, unsigned int devid, 12195 unsigned int offset, unsigned int len, 12196 u8 *buf) 12197 { 12198 return t4_i2c_io(adap, mbox, port, devid, offset, len, buf, false); 12199 } 12200 12201 int t4_i2c_wr(struct adapter *adap, unsigned int mbox, 12202 int port, unsigned int devid, 12203 unsigned int offset, unsigned int len, 12204 u8 *buf) 12205 { 12206 return t4_i2c_io(adap, mbox, port, devid, offset, len, buf, true); 12207 } 12208 12209 /** 12210 * t4_sge_ctxt_rd - read an SGE context through FW 12211 * @adap: the adapter 12212 * @mbox: mailbox to use for the FW command 12213 * @cid: the context id 12214 * @ctype: the context type 12215 * @data: where to store the context data 12216 * 12217 * Issues a FW command through the given mailbox to read an SGE context. 12218 */ 12219 int t4_sge_ctxt_rd(struct adapter *adap, unsigned int mbox, unsigned int cid, 12220 enum ctxt_type ctype, u32 *data) 12221 { 12222 int ret; 12223 struct fw_ldst_cmd c; 12224 12225 if (ctype == CTXT_EGRESS) 12226 ret = FW_LDST_ADDRSPC_SGE_EGRC; 12227 else if (ctype == CTXT_INGRESS) 12228 ret = FW_LDST_ADDRSPC_SGE_INGC; 12229 else if (ctype == CTXT_FLM) 12230 ret = FW_LDST_ADDRSPC_SGE_FLMC; 12231 else 12232 ret = FW_LDST_ADDRSPC_SGE_CONMC; 12233 12234 memset(&c, 0, sizeof(c)); 12235 c.op_to_addrspace = cpu_to_be32(V_FW_CMD_OP(FW_LDST_CMD) | 12236 F_FW_CMD_REQUEST | F_FW_CMD_READ | 12237 V_FW_LDST_CMD_ADDRSPACE(ret)); 12238 c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c)); 12239 c.u.idctxt.physid = cpu_to_be32(cid); 12240 12241 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c); 12242 if (ret == 0) { 12243 data[0] = be32_to_cpu(c.u.idctxt.ctxt_data0); 12244 data[1] = be32_to_cpu(c.u.idctxt.ctxt_data1); 12245 data[2] = be32_to_cpu(c.u.idctxt.ctxt_data2); 12246 data[3] = be32_to_cpu(c.u.idctxt.ctxt_data3); 12247 data[4] = be32_to_cpu(c.u.idctxt.ctxt_data4); 12248 data[5] = be32_to_cpu(c.u.idctxt.ctxt_data5); 12249 if (chip_id(adap) > CHELSIO_T6) 12250 data[6] = be32_to_cpu(c.u.idctxt.ctxt_data6); 12251 } 12252 return ret; 12253 } 12254 12255 /** 12256 * t4_sge_ctxt_rd_bd - read an SGE context bypassing FW 12257 * @adap: the adapter 12258 * @cid: the context id 12259 * @ctype: the context type 12260 * @data: where to store the context data 12261 * 12262 * Reads an SGE context directly, bypassing FW. This is only for 12263 * debugging when FW is unavailable. 12264 */ 12265 int t4_sge_ctxt_rd_bd(struct adapter *adap, unsigned int cid, enum ctxt_type ctype, 12266 u32 *data) 12267 { 12268 int i, ret; 12269 12270 t4_write_reg(adap, A_SGE_CTXT_CMD, V_CTXTQID(cid) | V_CTXTTYPE(ctype)); 12271 ret = t4_wait_op_done(adap, A_SGE_CTXT_CMD, F_BUSY, 0, 3, 1); 12272 if (!ret) { 12273 for (i = A_SGE_CTXT_DATA0; i <= A_SGE_CTXT_DATA5; i += 4) 12274 *data++ = t4_read_reg(adap, i); 12275 if (chip_id(adap) > CHELSIO_T6) 12276 *data++ = t4_read_reg(adap, i); 12277 } 12278 return ret; 12279 } 12280 12281 int t4_sched_config(struct adapter *adapter, int type, int minmaxen, 12282 int sleep_ok) 12283 { 12284 struct fw_sched_cmd cmd; 12285 12286 memset(&cmd, 0, sizeof(cmd)); 12287 cmd.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_SCHED_CMD) | 12288 F_FW_CMD_REQUEST | 12289 F_FW_CMD_WRITE); 12290 cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd)); 12291 12292 cmd.u.config.sc = FW_SCHED_SC_CONFIG; 12293 cmd.u.config.type = type; 12294 cmd.u.config.minmaxen = minmaxen; 12295 12296 return t4_wr_mbox_meat(adapter,adapter->mbox, &cmd, sizeof(cmd), 12297 NULL, sleep_ok); 12298 } 12299 12300 int t4_sched_params(struct adapter *adapter, int type, int level, int mode, 12301 int rateunit, int ratemode, int channel, int cl, 12302 int minrate, int maxrate, int weight, int pktsize, 12303 int burstsize, int sleep_ok) 12304 { 12305 struct fw_sched_cmd cmd; 12306 12307 memset(&cmd, 0, sizeof(cmd)); 12308 cmd.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_SCHED_CMD) | 12309 F_FW_CMD_REQUEST | 12310 F_FW_CMD_WRITE); 12311 cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd)); 12312 12313 cmd.u.params.sc = FW_SCHED_SC_PARAMS; 12314 cmd.u.params.type = type; 12315 cmd.u.params.level = level; 12316 cmd.u.params.mode = mode; 12317 cmd.u.params.ch = channel; 12318 cmd.u.params.cl = cl; 12319 cmd.u.params.unit = rateunit; 12320 cmd.u.params.rate = ratemode; 12321 cmd.u.params.min = cpu_to_be32(minrate); 12322 cmd.u.params.max = cpu_to_be32(maxrate); 12323 cmd.u.params.weight = cpu_to_be16(weight); 12324 cmd.u.params.pktsize = cpu_to_be16(pktsize); 12325 cmd.u.params.burstsize = cpu_to_be16(burstsize); 12326 12327 return t4_wr_mbox_meat(adapter,adapter->mbox, &cmd, sizeof(cmd), 12328 NULL, sleep_ok); 12329 } 12330 12331 int t4_sched_params_ch_rl(struct adapter *adapter, int channel, int ratemode, 12332 unsigned int maxrate, int sleep_ok) 12333 { 12334 struct fw_sched_cmd cmd; 12335 12336 memset(&cmd, 0, sizeof(cmd)); 12337 cmd.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_SCHED_CMD) | 12338 F_FW_CMD_REQUEST | 12339 F_FW_CMD_WRITE); 12340 cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd)); 12341 12342 cmd.u.params.sc = FW_SCHED_SC_PARAMS; 12343 cmd.u.params.type = FW_SCHED_TYPE_PKTSCHED; 12344 cmd.u.params.level = FW_SCHED_PARAMS_LEVEL_CH_RL; 12345 cmd.u.params.ch = channel; 12346 cmd.u.params.rate = ratemode; /* REL or ABS */ 12347 cmd.u.params.max = cpu_to_be32(maxrate);/* % or kbps */ 12348 12349 return t4_wr_mbox_meat(adapter,adapter->mbox, &cmd, sizeof(cmd), 12350 NULL, sleep_ok); 12351 } 12352 12353 int t4_sched_params_cl_wrr(struct adapter *adapter, int channel, int cl, 12354 int weight, int sleep_ok) 12355 { 12356 struct fw_sched_cmd cmd; 12357 12358 if (weight < 0 || weight > 100) 12359 return -EINVAL; 12360 12361 memset(&cmd, 0, sizeof(cmd)); 12362 cmd.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_SCHED_CMD) | 12363 F_FW_CMD_REQUEST | 12364 F_FW_CMD_WRITE); 12365 cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd)); 12366 12367 cmd.u.params.sc = FW_SCHED_SC_PARAMS; 12368 cmd.u.params.type = FW_SCHED_TYPE_PKTSCHED; 12369 cmd.u.params.level = FW_SCHED_PARAMS_LEVEL_CL_WRR; 12370 cmd.u.params.ch = channel; 12371 cmd.u.params.cl = cl; 12372 cmd.u.params.weight = cpu_to_be16(weight); 12373 12374 return t4_wr_mbox_meat(adapter,adapter->mbox, &cmd, sizeof(cmd), 12375 NULL, sleep_ok); 12376 } 12377 12378 int t4_sched_params_cl_rl_kbps(struct adapter *adapter, int channel, int cl, 12379 int mode, unsigned int maxrate, int pktsize, int sleep_ok) 12380 { 12381 struct fw_sched_cmd cmd; 12382 12383 memset(&cmd, 0, sizeof(cmd)); 12384 cmd.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_SCHED_CMD) | 12385 F_FW_CMD_REQUEST | 12386 F_FW_CMD_WRITE); 12387 cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd)); 12388 12389 cmd.u.params.sc = FW_SCHED_SC_PARAMS; 12390 cmd.u.params.type = FW_SCHED_TYPE_PKTSCHED; 12391 cmd.u.params.level = FW_SCHED_PARAMS_LEVEL_CL_RL; 12392 cmd.u.params.mode = mode; 12393 cmd.u.params.ch = channel; 12394 cmd.u.params.cl = cl; 12395 cmd.u.params.unit = FW_SCHED_PARAMS_UNIT_BITRATE; 12396 cmd.u.params.rate = FW_SCHED_PARAMS_RATE_ABS; 12397 cmd.u.params.max = cpu_to_be32(maxrate); 12398 cmd.u.params.pktsize = cpu_to_be16(pktsize); 12399 12400 return t4_wr_mbox_meat(adapter,adapter->mbox, &cmd, sizeof(cmd), 12401 NULL, sleep_ok); 12402 } 12403 12404 /* 12405 * t4_config_watchdog - configure (enable/disable) a watchdog timer 12406 * @adapter: the adapter 12407 * @mbox: mailbox to use for the FW command 12408 * @pf: the PF owning the queue 12409 * @vf: the VF owning the queue 12410 * @timeout: watchdog timeout in ms 12411 * @action: watchdog timer / action 12412 * 12413 * There are separate watchdog timers for each possible watchdog 12414 * action. Configure one of the watchdog timers by setting a non-zero 12415 * timeout. Disable a watchdog timer by using a timeout of zero. 12416 */ 12417 int t4_config_watchdog(struct adapter *adapter, unsigned int mbox, 12418 unsigned int pf, unsigned int vf, 12419 unsigned int timeout, unsigned int action) 12420 { 12421 struct fw_watchdog_cmd wdog; 12422 unsigned int ticks; 12423 12424 /* 12425 * The watchdog command expects a timeout in units of 10ms so we need 12426 * to convert it here (via rounding) and force a minimum of one 10ms 12427 * "tick" if the timeout is non-zero but the conversion results in 0 12428 * ticks. 12429 */ 12430 ticks = (timeout + 5)/10; 12431 if (timeout && !ticks) 12432 ticks = 1; 12433 12434 memset(&wdog, 0, sizeof wdog); 12435 wdog.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_WATCHDOG_CMD) | 12436 F_FW_CMD_REQUEST | 12437 F_FW_CMD_WRITE | 12438 V_FW_PARAMS_CMD_PFN(pf) | 12439 V_FW_PARAMS_CMD_VFN(vf)); 12440 wdog.retval_len16 = cpu_to_be32(FW_LEN16(wdog)); 12441 wdog.timeout = cpu_to_be32(ticks); 12442 wdog.action = cpu_to_be32(action); 12443 12444 return t4_wr_mbox(adapter, mbox, &wdog, sizeof wdog, NULL); 12445 } 12446 12447 int t4_get_devlog_level(struct adapter *adapter, unsigned int *level) 12448 { 12449 struct fw_devlog_cmd devlog_cmd; 12450 int ret; 12451 12452 memset(&devlog_cmd, 0, sizeof(devlog_cmd)); 12453 devlog_cmd.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_DEVLOG_CMD) | 12454 F_FW_CMD_REQUEST | F_FW_CMD_READ); 12455 devlog_cmd.retval_len16 = cpu_to_be32(FW_LEN16(devlog_cmd)); 12456 ret = t4_wr_mbox(adapter, adapter->mbox, &devlog_cmd, 12457 sizeof(devlog_cmd), &devlog_cmd); 12458 if (ret) 12459 return ret; 12460 12461 *level = devlog_cmd.level; 12462 return 0; 12463 } 12464 12465 int t4_set_devlog_level(struct adapter *adapter, unsigned int level) 12466 { 12467 struct fw_devlog_cmd devlog_cmd; 12468 12469 memset(&devlog_cmd, 0, sizeof(devlog_cmd)); 12470 devlog_cmd.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_DEVLOG_CMD) | 12471 F_FW_CMD_REQUEST | 12472 F_FW_CMD_WRITE); 12473 devlog_cmd.level = level; 12474 devlog_cmd.retval_len16 = cpu_to_be32(FW_LEN16(devlog_cmd)); 12475 return t4_wr_mbox(adapter, adapter->mbox, &devlog_cmd, 12476 sizeof(devlog_cmd), &devlog_cmd); 12477 } 12478 12479 int t4_configure_add_smac(struct adapter *adap) 12480 { 12481 unsigned int param, val; 12482 int ret = 0; 12483 12484 adap->params.smac_add_support = 0; 12485 param = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | 12486 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_ADD_SMAC)); 12487 /* Query FW to check if FW supports adding source mac address 12488 * to TCAM feature or not. 12489 * If FW returns 1, driver can use this feature and driver need to send 12490 * FW_PARAMS_PARAM_DEV_ADD_SMAC write command with value 1 to 12491 * enable adding smac to TCAM. 12492 */ 12493 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1, ¶m, &val); 12494 if (ret) 12495 return ret; 12496 12497 if (val == 1) { 12498 ret = t4_set_params(adap, adap->mbox, adap->pf, 0, 1, 12499 ¶m, &val); 12500 if (!ret) 12501 /* Firmware allows adding explicit TCAM entries. 12502 * Save this internally. 12503 */ 12504 adap->params.smac_add_support = 1; 12505 } 12506 12507 return ret; 12508 } 12509 12510 int t4_configure_ringbb(struct adapter *adap) 12511 { 12512 unsigned int param, val; 12513 int ret = 0; 12514 12515 param = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | 12516 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_RING_BACKBONE)); 12517 /* Query FW to check if FW supports ring switch feature or not. 12518 * If FW returns 1, driver can use this feature and driver need to send 12519 * FW_PARAMS_PARAM_DEV_RING_BACKBONE write command with value 1 to 12520 * enable the ring backbone configuration. 12521 */ 12522 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1, ¶m, &val); 12523 if (ret < 0) { 12524 CH_ERR(adap, "Querying FW using Ring backbone params command failed, err=%d\n", 12525 ret); 12526 goto out; 12527 } 12528 12529 if (val != 1) { 12530 CH_ERR(adap, "FW doesnot support ringbackbone features\n"); 12531 goto out; 12532 } 12533 12534 ret = t4_set_params(adap, adap->mbox, adap->pf, 0, 1, ¶m, &val); 12535 if (ret < 0) { 12536 CH_ERR(adap, "Could not set Ringbackbone, err= %d\n", 12537 ret); 12538 goto out; 12539 } 12540 12541 out: 12542 return ret; 12543 } 12544 12545 /* 12546 * t4_set_vlan_acl - Set a VLAN id for the specified VF 12547 * @adapter: the adapter 12548 * @mbox: mailbox to use for the FW command 12549 * @vf: one of the VFs instantiated by the specified PF 12550 * @vlan: The vlanid to be set 12551 * 12552 */ 12553 int t4_set_vlan_acl(struct adapter *adap, unsigned int pf, unsigned int vf, 12554 u16 vlan) 12555 { 12556 struct fw_acl_vlan_cmd vlan_cmd; 12557 unsigned int enable; 12558 12559 enable = (vlan ? F_FW_ACL_VLAN_CMD_EN : 0); 12560 memset(&vlan_cmd, 0, sizeof(vlan_cmd)); 12561 vlan_cmd.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_ACL_VLAN_CMD) | 12562 F_FW_CMD_REQUEST | 12563 F_FW_CMD_WRITE | 12564 F_FW_CMD_EXEC | 12565 V_FW_ACL_VLAN_CMD_PFN(pf) | 12566 V_FW_ACL_VLAN_CMD_VFN(vf)); 12567 vlan_cmd.en_to_len16 = cpu_to_be32(enable | FW_LEN16(vlan_cmd) | 12568 V_FW_ACL_VLAN_CMD_PMASK(1 << pf)); 12569 /* Drop all packets that donot match vlan id */ 12570 vlan_cmd.dropnovlan_fm = (enable 12571 ? (F_FW_ACL_VLAN_CMD_DROPNOVLAN | 12572 F_FW_ACL_VLAN_CMD_FM) 12573 : 0); 12574 if (enable != 0) { 12575 vlan_cmd.nvlan = 1; 12576 vlan_cmd.vlanid[0] = cpu_to_be16(vlan); 12577 } 12578 12579 return t4_wr_mbox(adap, adap->mbox, &vlan_cmd, sizeof(vlan_cmd), NULL); 12580 } 12581 12582 /** 12583 * t4_del_mac - Removes the exact-match filter for a MAC address 12584 * @adap: the adapter 12585 * @mbox: mailbox to use for the FW command 12586 * @viid: the VI id 12587 * @addr: the MAC address value 12588 * @smac: if true, delete from only the smac region of MPS 12589 * 12590 * Modifies an exact-match filter and sets it to the new MAC address if 12591 * @idx >= 0, or adds the MAC address to a new filter if @idx < 0. In the 12592 * latter case the address is added persistently if @persist is %true. 12593 * 12594 * Returns a negative error number or the index of the filter with the new 12595 * MAC value. Note that this index may differ from @idx. 12596 */ 12597 int t4_del_mac(struct adapter *adap, unsigned int mbox, unsigned int viid, 12598 const u8 *addr, bool smac) 12599 { 12600 int ret; 12601 struct fw_vi_mac_cmd c; 12602 struct fw_vi_mac_exact *p = c.u.exact; 12603 unsigned int max_mac_addr = adap->chip_params->mps_tcam_size; 12604 12605 memset(&c, 0, sizeof(c)); 12606 c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_MAC_CMD) | 12607 F_FW_CMD_REQUEST | F_FW_CMD_WRITE | 12608 V_FW_VI_MAC_CMD_VIID(viid)); 12609 c.freemacs_to_len16 = cpu_to_be32( 12610 V_FW_CMD_LEN16(1) | 12611 (smac ? F_FW_VI_MAC_CMD_IS_SMAC : 0)); 12612 12613 memcpy(p->macaddr, addr, sizeof(p->macaddr)); 12614 p->valid_to_idx = cpu_to_be16( 12615 F_FW_VI_MAC_CMD_VALID | 12616 V_FW_VI_MAC_CMD_IDX(FW_VI_MAC_MAC_BASED_FREE)); 12617 12618 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c); 12619 if (ret == 0) { 12620 ret = G_FW_VI_MAC_CMD_IDX(be16_to_cpu(p->valid_to_idx)); 12621 if (ret < max_mac_addr) 12622 return -ENOMEM; 12623 } 12624 12625 return ret; 12626 } 12627 12628 /** 12629 * t4_add_mac - Adds an exact-match filter for a MAC address 12630 * @adap: the adapter 12631 * @mbox: mailbox to use for the FW command 12632 * @viid: the VI id 12633 * @idx: index of existing filter for old value of MAC address, or -1 12634 * @addr: the new MAC address value 12635 * @persist: whether a new MAC allocation should be persistent 12636 * @add_smt: if true also add the address to the HW SMT 12637 * @smac: if true, update only the smac region of MPS 12638 * 12639 * Modifies an exact-match filter and sets it to the new MAC address if 12640 * @idx >= 0, or adds the MAC address to a new filter if @idx < 0. In the 12641 * latter case the address is added persistently if @persist is %true. 12642 * 12643 * Returns a negative error number or the index of the filter with the new 12644 * MAC value. Note that this index may differ from @idx. 12645 */ 12646 int t4_add_mac(struct adapter *adap, unsigned int mbox, unsigned int viid, 12647 int idx, const u8 *addr, bool persist, u8 *smt_idx, bool smac) 12648 { 12649 int ret, mode; 12650 struct fw_vi_mac_cmd c; 12651 struct fw_vi_mac_exact *p = c.u.exact; 12652 unsigned int max_mac_addr = adap->chip_params->mps_tcam_size; 12653 12654 if (idx < 0) /* new allocation */ 12655 idx = persist ? FW_VI_MAC_ADD_PERSIST_MAC : FW_VI_MAC_ADD_MAC; 12656 mode = smt_idx ? FW_VI_MAC_SMT_AND_MPSTCAM : FW_VI_MAC_MPS_TCAM_ENTRY; 12657 12658 memset(&c, 0, sizeof(c)); 12659 c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_MAC_CMD) | 12660 F_FW_CMD_REQUEST | F_FW_CMD_WRITE | 12661 V_FW_VI_MAC_CMD_VIID(viid)); 12662 c.freemacs_to_len16 = cpu_to_be32( 12663 V_FW_CMD_LEN16(1) | 12664 (smac ? F_FW_VI_MAC_CMD_IS_SMAC : 0)); 12665 p->valid_to_idx = cpu_to_be16(F_FW_VI_MAC_CMD_VALID | 12666 V_FW_VI_MAC_CMD_SMAC_RESULT(mode) | 12667 V_FW_VI_MAC_CMD_IDX(idx)); 12668 memcpy(p->macaddr, addr, sizeof(p->macaddr)); 12669 12670 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c); 12671 if (ret == 0) { 12672 ret = G_FW_VI_MAC_CMD_IDX(be16_to_cpu(p->valid_to_idx)); 12673 if (ret >= max_mac_addr) 12674 return -ENOMEM; 12675 if (smt_idx) { 12676 /* Does fw supports returning smt_idx? */ 12677 if (adap->params.viid_smt_extn_support) 12678 *smt_idx = G_FW_VI_MAC_CMD_SMTID(be32_to_cpu(c.op_to_viid)); 12679 else { 12680 /* In T4/T5, SMT contains 256 SMAC entries 12681 * organized in 128 rows of 2 entries each. 12682 * In T6, SMT contains 256 SMAC entries in 12683 * 256 rows. 12684 */ 12685 if (chip_id(adap) <= CHELSIO_T5) 12686 *smt_idx = ((viid & M_FW_VIID_VIN) << 1); 12687 else 12688 *smt_idx = (viid & M_FW_VIID_VIN); 12689 } 12690 } 12691 } 12692 12693 return ret; 12694 } 12695