1 /*- 2 * Copyright (c) 2012, 2016 Chelsio Communications, Inc. 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 */ 26 27 #include <sys/cdefs.h> 28 __FBSDID("$FreeBSD$"); 29 30 #include "opt_inet.h" 31 32 #include <sys/param.h> 33 #include <sys/eventhandler.h> 34 35 #include "common.h" 36 #include "t4_regs.h" 37 #include "t4_regs_values.h" 38 #include "firmware/t4fw_interface.h" 39 40 #undef msleep 41 #define msleep(x) do { \ 42 if (cold) \ 43 DELAY((x) * 1000); \ 44 else \ 45 pause("t4hw", (x) * hz / 1000); \ 46 } while (0) 47 48 /** 49 * t4_wait_op_done_val - wait until an operation is completed 50 * @adapter: the adapter performing the operation 51 * @reg: the register to check for completion 52 * @mask: a single-bit field within @reg that indicates completion 53 * @polarity: the value of the field when the operation is completed 54 * @attempts: number of check iterations 55 * @delay: delay in usecs between iterations 56 * @valp: where to store the value of the register at completion time 57 * 58 * Wait until an operation is completed by checking a bit in a register 59 * up to @attempts times. If @valp is not NULL the value of the register 60 * at the time it indicated completion is stored there. Returns 0 if the 61 * operation completes and -EAGAIN otherwise. 62 */ 63 static int t4_wait_op_done_val(struct adapter *adapter, int reg, u32 mask, 64 int polarity, int attempts, int delay, u32 *valp) 65 { 66 while (1) { 67 u32 val = t4_read_reg(adapter, reg); 68 69 if (!!(val & mask) == polarity) { 70 if (valp) 71 *valp = val; 72 return 0; 73 } 74 if (--attempts == 0) 75 return -EAGAIN; 76 if (delay) 77 udelay(delay); 78 } 79 } 80 81 static inline int t4_wait_op_done(struct adapter *adapter, int reg, u32 mask, 82 int polarity, int attempts, int delay) 83 { 84 return t4_wait_op_done_val(adapter, reg, mask, polarity, attempts, 85 delay, NULL); 86 } 87 88 /** 89 * t4_set_reg_field - set a register field to a value 90 * @adapter: the adapter to program 91 * @addr: the register address 92 * @mask: specifies the portion of the register to modify 93 * @val: the new value for the register field 94 * 95 * Sets a register field specified by the supplied mask to the 96 * given value. 97 */ 98 void t4_set_reg_field(struct adapter *adapter, unsigned int addr, u32 mask, 99 u32 val) 100 { 101 u32 v = t4_read_reg(adapter, addr) & ~mask; 102 103 t4_write_reg(adapter, addr, v | val); 104 (void) t4_read_reg(adapter, addr); /* flush */ 105 } 106 107 /** 108 * t4_read_indirect - read indirectly addressed registers 109 * @adap: the adapter 110 * @addr_reg: register holding the indirect address 111 * @data_reg: register holding the value of the indirect register 112 * @vals: where the read register values are stored 113 * @nregs: how many indirect registers to read 114 * @start_idx: index of first indirect register to read 115 * 116 * Reads registers that are accessed indirectly through an address/data 117 * register pair. 118 */ 119 void t4_read_indirect(struct adapter *adap, unsigned int addr_reg, 120 unsigned int data_reg, u32 *vals, 121 unsigned int nregs, unsigned int start_idx) 122 { 123 while (nregs--) { 124 t4_write_reg(adap, addr_reg, start_idx); 125 *vals++ = t4_read_reg(adap, data_reg); 126 start_idx++; 127 } 128 } 129 130 /** 131 * t4_write_indirect - write indirectly addressed registers 132 * @adap: the adapter 133 * @addr_reg: register holding the indirect addresses 134 * @data_reg: register holding the value for the indirect registers 135 * @vals: values to write 136 * @nregs: how many indirect registers to write 137 * @start_idx: address of first indirect register to write 138 * 139 * Writes a sequential block of registers that are accessed indirectly 140 * through an address/data register pair. 141 */ 142 void t4_write_indirect(struct adapter *adap, unsigned int addr_reg, 143 unsigned int data_reg, const u32 *vals, 144 unsigned int nregs, unsigned int start_idx) 145 { 146 while (nregs--) { 147 t4_write_reg(adap, addr_reg, start_idx++); 148 t4_write_reg(adap, data_reg, *vals++); 149 } 150 } 151 152 /* 153 * Read a 32-bit PCI Configuration Space register via the PCI-E backdoor 154 * mechanism. This guarantees that we get the real value even if we're 155 * operating within a Virtual Machine and the Hypervisor is trapping our 156 * Configuration Space accesses. 157 * 158 * N.B. This routine should only be used as a last resort: the firmware uses 159 * the backdoor registers on a regular basis and we can end up 160 * conflicting with it's uses! 161 */ 162 u32 t4_hw_pci_read_cfg4(adapter_t *adap, int reg) 163 { 164 u32 req = V_FUNCTION(adap->pf) | V_REGISTER(reg); 165 u32 val; 166 167 if (chip_id(adap) <= CHELSIO_T5) 168 req |= F_ENABLE; 169 else 170 req |= F_T6_ENABLE; 171 172 if (is_t4(adap)) 173 req |= F_LOCALCFG; 174 175 t4_write_reg(adap, A_PCIE_CFG_SPACE_REQ, req); 176 val = t4_read_reg(adap, A_PCIE_CFG_SPACE_DATA); 177 178 /* 179 * Reset F_ENABLE to 0 so reads of PCIE_CFG_SPACE_DATA won't cause a 180 * Configuration Space read. (None of the other fields matter when 181 * F_ENABLE is 0 so a simple register write is easier than a 182 * read-modify-write via t4_set_reg_field().) 183 */ 184 t4_write_reg(adap, A_PCIE_CFG_SPACE_REQ, 0); 185 186 return val; 187 } 188 189 /* 190 * t4_report_fw_error - report firmware error 191 * @adap: the adapter 192 * 193 * The adapter firmware can indicate error conditions to the host. 194 * If the firmware has indicated an error, print out the reason for 195 * the firmware error. 196 */ 197 static void t4_report_fw_error(struct adapter *adap) 198 { 199 static const char *const reason[] = { 200 "Crash", /* PCIE_FW_EVAL_CRASH */ 201 "During Device Preparation", /* PCIE_FW_EVAL_PREP */ 202 "During Device Configuration", /* PCIE_FW_EVAL_CONF */ 203 "During Device Initialization", /* PCIE_FW_EVAL_INIT */ 204 "Unexpected Event", /* PCIE_FW_EVAL_UNEXPECTEDEVENT */ 205 "Insufficient Airflow", /* PCIE_FW_EVAL_OVERHEAT */ 206 "Device Shutdown", /* PCIE_FW_EVAL_DEVICESHUTDOWN */ 207 "Reserved", /* reserved */ 208 }; 209 u32 pcie_fw; 210 211 pcie_fw = t4_read_reg(adap, A_PCIE_FW); 212 if (pcie_fw & F_PCIE_FW_ERR) 213 CH_ERR(adap, "Firmware reports adapter error: %s\n", 214 reason[G_PCIE_FW_EVAL(pcie_fw)]); 215 } 216 217 /* 218 * Get the reply to a mailbox command and store it in @rpl in big-endian order. 219 */ 220 static void get_mbox_rpl(struct adapter *adap, __be64 *rpl, int nflit, 221 u32 mbox_addr) 222 { 223 for ( ; nflit; nflit--, mbox_addr += 8) 224 *rpl++ = cpu_to_be64(t4_read_reg64(adap, mbox_addr)); 225 } 226 227 /* 228 * Handle a FW assertion reported in a mailbox. 229 */ 230 static void fw_asrt(struct adapter *adap, struct fw_debug_cmd *asrt) 231 { 232 CH_ALERT(adap, 233 "FW assertion at %.16s:%u, val0 %#x, val1 %#x\n", 234 asrt->u.assert.filename_0_7, 235 be32_to_cpu(asrt->u.assert.line), 236 be32_to_cpu(asrt->u.assert.x), 237 be32_to_cpu(asrt->u.assert.y)); 238 } 239 240 #define X_CIM_PF_NOACCESS 0xeeeeeeee 241 /** 242 * t4_wr_mbox_meat_timeout - send a command to FW through the given mailbox 243 * @adap: the adapter 244 * @mbox: index of the mailbox to use 245 * @cmd: the command to write 246 * @size: command length in bytes 247 * @rpl: where to optionally store the reply 248 * @sleep_ok: if true we may sleep while awaiting command completion 249 * @timeout: time to wait for command to finish before timing out 250 * (negative implies @sleep_ok=false) 251 * 252 * Sends the given command to FW through the selected mailbox and waits 253 * for the FW to execute the command. If @rpl is not %NULL it is used to 254 * store the FW's reply to the command. The command and its optional 255 * reply are of the same length. Some FW commands like RESET and 256 * INITIALIZE can take a considerable amount of time to execute. 257 * @sleep_ok determines whether we may sleep while awaiting the response. 258 * If sleeping is allowed we use progressive backoff otherwise we spin. 259 * Note that passing in a negative @timeout is an alternate mechanism 260 * for specifying @sleep_ok=false. This is useful when a higher level 261 * interface allows for specification of @timeout but not @sleep_ok ... 262 * 263 * The return value is 0 on success or a negative errno on failure. A 264 * failure can happen either because we are not able to execute the 265 * command or FW executes it but signals an error. In the latter case 266 * the return value is the error code indicated by FW (negated). 267 */ 268 int t4_wr_mbox_meat_timeout(struct adapter *adap, int mbox, const void *cmd, 269 int size, void *rpl, bool sleep_ok, int timeout) 270 { 271 /* 272 * We delay in small increments at first in an effort to maintain 273 * responsiveness for simple, fast executing commands but then back 274 * off to larger delays to a maximum retry delay. 275 */ 276 static const int delay[] = { 277 1, 1, 3, 5, 10, 10, 20, 50, 100 278 }; 279 u32 v; 280 u64 res; 281 int i, ms, delay_idx, ret; 282 const __be64 *p = cmd; 283 u32 data_reg = PF_REG(mbox, A_CIM_PF_MAILBOX_DATA); 284 u32 ctl_reg = PF_REG(mbox, A_CIM_PF_MAILBOX_CTRL); 285 u32 ctl; 286 __be64 cmd_rpl[MBOX_LEN/8]; 287 u32 pcie_fw; 288 289 if ((size & 15) || size > MBOX_LEN) 290 return -EINVAL; 291 292 /* 293 * If we have a negative timeout, that implies that we can't sleep. 294 */ 295 if (timeout < 0) { 296 sleep_ok = false; 297 timeout = -timeout; 298 } 299 300 /* 301 * Attempt to gain access to the mailbox. 302 */ 303 for (i = 0; i < 4; i++) { 304 ctl = t4_read_reg(adap, ctl_reg); 305 v = G_MBOWNER(ctl); 306 if (v != X_MBOWNER_NONE) 307 break; 308 } 309 310 /* 311 * If we were unable to gain access, dequeue ourselves from the 312 * mailbox atomic access list and report the error to our caller. 313 */ 314 if (v != X_MBOWNER_PL) { 315 t4_report_fw_error(adap); 316 ret = (v == X_MBOWNER_FW) ? -EBUSY : -ETIMEDOUT; 317 return ret; 318 } 319 320 /* 321 * If we gain ownership of the mailbox and there's a "valid" message 322 * in it, this is likely an asynchronous error message from the 323 * firmware. So we'll report that and then proceed on with attempting 324 * to issue our own command ... which may well fail if the error 325 * presaged the firmware crashing ... 326 */ 327 if (ctl & F_MBMSGVALID) { 328 CH_ERR(adap, "found VALID command in mbox %u: " 329 "%llx %llx %llx %llx %llx %llx %llx %llx\n", mbox, 330 (unsigned long long)t4_read_reg64(adap, data_reg), 331 (unsigned long long)t4_read_reg64(adap, data_reg + 8), 332 (unsigned long long)t4_read_reg64(adap, data_reg + 16), 333 (unsigned long long)t4_read_reg64(adap, data_reg + 24), 334 (unsigned long long)t4_read_reg64(adap, data_reg + 32), 335 (unsigned long long)t4_read_reg64(adap, data_reg + 40), 336 (unsigned long long)t4_read_reg64(adap, data_reg + 48), 337 (unsigned long long)t4_read_reg64(adap, data_reg + 56)); 338 } 339 340 /* 341 * Copy in the new mailbox command and send it on its way ... 342 */ 343 for (i = 0; i < size; i += 8, p++) 344 t4_write_reg64(adap, data_reg + i, be64_to_cpu(*p)); 345 346 CH_DUMP_MBOX(adap, mbox, data_reg); 347 348 t4_write_reg(adap, ctl_reg, F_MBMSGVALID | V_MBOWNER(X_MBOWNER_FW)); 349 t4_read_reg(adap, ctl_reg); /* flush write */ 350 351 delay_idx = 0; 352 ms = delay[0]; 353 354 /* 355 * Loop waiting for the reply; bail out if we time out or the firmware 356 * reports an error. 357 */ 358 for (i = 0; 359 !((pcie_fw = t4_read_reg(adap, A_PCIE_FW)) & F_PCIE_FW_ERR) && 360 i < timeout; 361 i += ms) { 362 if (sleep_ok) { 363 ms = delay[delay_idx]; /* last element may repeat */ 364 if (delay_idx < ARRAY_SIZE(delay) - 1) 365 delay_idx++; 366 msleep(ms); 367 } else { 368 mdelay(ms); 369 } 370 371 v = t4_read_reg(adap, ctl_reg); 372 if (v == X_CIM_PF_NOACCESS) 373 continue; 374 if (G_MBOWNER(v) == X_MBOWNER_PL) { 375 if (!(v & F_MBMSGVALID)) { 376 t4_write_reg(adap, ctl_reg, 377 V_MBOWNER(X_MBOWNER_NONE)); 378 continue; 379 } 380 381 /* 382 * Retrieve the command reply and release the mailbox. 383 */ 384 get_mbox_rpl(adap, cmd_rpl, MBOX_LEN/8, data_reg); 385 t4_write_reg(adap, ctl_reg, V_MBOWNER(X_MBOWNER_NONE)); 386 387 CH_DUMP_MBOX(adap, mbox, data_reg); 388 389 res = be64_to_cpu(cmd_rpl[0]); 390 if (G_FW_CMD_OP(res >> 32) == FW_DEBUG_CMD) { 391 fw_asrt(adap, (struct fw_debug_cmd *)cmd_rpl); 392 res = V_FW_CMD_RETVAL(EIO); 393 } else if (rpl) 394 memcpy(rpl, cmd_rpl, size); 395 return -G_FW_CMD_RETVAL((int)res); 396 } 397 } 398 399 /* 400 * We timed out waiting for a reply to our mailbox command. Report 401 * the error and also check to see if the firmware reported any 402 * errors ... 403 */ 404 ret = (pcie_fw & F_PCIE_FW_ERR) ? -ENXIO : -ETIMEDOUT; 405 CH_ERR(adap, "command %#x in mailbox %d timed out\n", 406 *(const u8 *)cmd, mbox); 407 408 t4_report_fw_error(adap); 409 t4_fatal_err(adap); 410 return ret; 411 } 412 413 int t4_wr_mbox_meat(struct adapter *adap, int mbox, const void *cmd, int size, 414 void *rpl, bool sleep_ok) 415 { 416 return t4_wr_mbox_meat_timeout(adap, mbox, cmd, size, rpl, 417 sleep_ok, FW_CMD_MAX_TIMEOUT); 418 419 } 420 421 static int t4_edc_err_read(struct adapter *adap, int idx) 422 { 423 u32 edc_ecc_err_addr_reg; 424 u32 edc_bist_status_rdata_reg; 425 426 if (is_t4(adap)) { 427 CH_WARN(adap, "%s: T4 NOT supported.\n", __func__); 428 return 0; 429 } 430 if (idx != 0 && idx != 1) { 431 CH_WARN(adap, "%s: idx %d NOT supported.\n", __func__, idx); 432 return 0; 433 } 434 435 edc_ecc_err_addr_reg = EDC_T5_REG(A_EDC_H_ECC_ERR_ADDR, idx); 436 edc_bist_status_rdata_reg = EDC_T5_REG(A_EDC_H_BIST_STATUS_RDATA, idx); 437 438 CH_WARN(adap, 439 "edc%d err addr 0x%x: 0x%x.\n", 440 idx, edc_ecc_err_addr_reg, 441 t4_read_reg(adap, edc_ecc_err_addr_reg)); 442 CH_WARN(adap, 443 "bist: 0x%x, status %llx %llx %llx %llx %llx %llx %llx %llx %llx.\n", 444 edc_bist_status_rdata_reg, 445 (unsigned long long)t4_read_reg64(adap, edc_bist_status_rdata_reg), 446 (unsigned long long)t4_read_reg64(adap, edc_bist_status_rdata_reg + 8), 447 (unsigned long long)t4_read_reg64(adap, edc_bist_status_rdata_reg + 16), 448 (unsigned long long)t4_read_reg64(adap, edc_bist_status_rdata_reg + 24), 449 (unsigned long long)t4_read_reg64(adap, edc_bist_status_rdata_reg + 32), 450 (unsigned long long)t4_read_reg64(adap, edc_bist_status_rdata_reg + 40), 451 (unsigned long long)t4_read_reg64(adap, edc_bist_status_rdata_reg + 48), 452 (unsigned long long)t4_read_reg64(adap, edc_bist_status_rdata_reg + 56), 453 (unsigned long long)t4_read_reg64(adap, edc_bist_status_rdata_reg + 64)); 454 455 return 0; 456 } 457 458 /** 459 * t4_mc_read - read from MC through backdoor accesses 460 * @adap: the adapter 461 * @idx: which MC to access 462 * @addr: address of first byte requested 463 * @data: 64 bytes of data containing the requested address 464 * @ecc: where to store the corresponding 64-bit ECC word 465 * 466 * Read 64 bytes of data from MC starting at a 64-byte-aligned address 467 * that covers the requested address @addr. If @parity is not %NULL it 468 * is assigned the 64-bit ECC word for the read data. 469 */ 470 int t4_mc_read(struct adapter *adap, int idx, u32 addr, __be32 *data, u64 *ecc) 471 { 472 int i; 473 u32 mc_bist_cmd_reg, mc_bist_cmd_addr_reg, mc_bist_cmd_len_reg; 474 u32 mc_bist_status_rdata_reg, mc_bist_data_pattern_reg; 475 476 if (is_t4(adap)) { 477 mc_bist_cmd_reg = A_MC_BIST_CMD; 478 mc_bist_cmd_addr_reg = A_MC_BIST_CMD_ADDR; 479 mc_bist_cmd_len_reg = A_MC_BIST_CMD_LEN; 480 mc_bist_status_rdata_reg = A_MC_BIST_STATUS_RDATA; 481 mc_bist_data_pattern_reg = A_MC_BIST_DATA_PATTERN; 482 } else { 483 mc_bist_cmd_reg = MC_REG(A_MC_P_BIST_CMD, idx); 484 mc_bist_cmd_addr_reg = MC_REG(A_MC_P_BIST_CMD_ADDR, idx); 485 mc_bist_cmd_len_reg = MC_REG(A_MC_P_BIST_CMD_LEN, idx); 486 mc_bist_status_rdata_reg = MC_REG(A_MC_P_BIST_STATUS_RDATA, 487 idx); 488 mc_bist_data_pattern_reg = MC_REG(A_MC_P_BIST_DATA_PATTERN, 489 idx); 490 } 491 492 if (t4_read_reg(adap, mc_bist_cmd_reg) & F_START_BIST) 493 return -EBUSY; 494 t4_write_reg(adap, mc_bist_cmd_addr_reg, addr & ~0x3fU); 495 t4_write_reg(adap, mc_bist_cmd_len_reg, 64); 496 t4_write_reg(adap, mc_bist_data_pattern_reg, 0xc); 497 t4_write_reg(adap, mc_bist_cmd_reg, V_BIST_OPCODE(1) | 498 F_START_BIST | V_BIST_CMD_GAP(1)); 499 i = t4_wait_op_done(adap, mc_bist_cmd_reg, F_START_BIST, 0, 10, 1); 500 if (i) 501 return i; 502 503 #define MC_DATA(i) MC_BIST_STATUS_REG(mc_bist_status_rdata_reg, i) 504 505 for (i = 15; i >= 0; i--) 506 *data++ = ntohl(t4_read_reg(adap, MC_DATA(i))); 507 if (ecc) 508 *ecc = t4_read_reg64(adap, MC_DATA(16)); 509 #undef MC_DATA 510 return 0; 511 } 512 513 /** 514 * t4_edc_read - read from EDC through backdoor accesses 515 * @adap: the adapter 516 * @idx: which EDC to access 517 * @addr: address of first byte requested 518 * @data: 64 bytes of data containing the requested address 519 * @ecc: where to store the corresponding 64-bit ECC word 520 * 521 * Read 64 bytes of data from EDC starting at a 64-byte-aligned address 522 * that covers the requested address @addr. If @parity is not %NULL it 523 * is assigned the 64-bit ECC word for the read data. 524 */ 525 int t4_edc_read(struct adapter *adap, int idx, u32 addr, __be32 *data, u64 *ecc) 526 { 527 int i; 528 u32 edc_bist_cmd_reg, edc_bist_cmd_addr_reg, edc_bist_cmd_len_reg; 529 u32 edc_bist_cmd_data_pattern, edc_bist_status_rdata_reg; 530 531 if (is_t4(adap)) { 532 edc_bist_cmd_reg = EDC_REG(A_EDC_BIST_CMD, idx); 533 edc_bist_cmd_addr_reg = EDC_REG(A_EDC_BIST_CMD_ADDR, idx); 534 edc_bist_cmd_len_reg = EDC_REG(A_EDC_BIST_CMD_LEN, idx); 535 edc_bist_cmd_data_pattern = EDC_REG(A_EDC_BIST_DATA_PATTERN, 536 idx); 537 edc_bist_status_rdata_reg = EDC_REG(A_EDC_BIST_STATUS_RDATA, 538 idx); 539 } else { 540 /* 541 * These macro are missing in t4_regs.h file. 542 * Added temporarily for testing. 543 */ 544 #define EDC_STRIDE_T5 (EDC_T51_BASE_ADDR - EDC_T50_BASE_ADDR) 545 #define EDC_REG_T5(reg, idx) (reg + EDC_STRIDE_T5 * idx) 546 edc_bist_cmd_reg = EDC_REG_T5(A_EDC_H_BIST_CMD, idx); 547 edc_bist_cmd_addr_reg = EDC_REG_T5(A_EDC_H_BIST_CMD_ADDR, idx); 548 edc_bist_cmd_len_reg = EDC_REG_T5(A_EDC_H_BIST_CMD_LEN, idx); 549 edc_bist_cmd_data_pattern = EDC_REG_T5(A_EDC_H_BIST_DATA_PATTERN, 550 idx); 551 edc_bist_status_rdata_reg = EDC_REG_T5(A_EDC_H_BIST_STATUS_RDATA, 552 idx); 553 #undef EDC_REG_T5 554 #undef EDC_STRIDE_T5 555 } 556 557 if (t4_read_reg(adap, edc_bist_cmd_reg) & F_START_BIST) 558 return -EBUSY; 559 t4_write_reg(adap, edc_bist_cmd_addr_reg, addr & ~0x3fU); 560 t4_write_reg(adap, edc_bist_cmd_len_reg, 64); 561 t4_write_reg(adap, edc_bist_cmd_data_pattern, 0xc); 562 t4_write_reg(adap, edc_bist_cmd_reg, 563 V_BIST_OPCODE(1) | V_BIST_CMD_GAP(1) | F_START_BIST); 564 i = t4_wait_op_done(adap, edc_bist_cmd_reg, F_START_BIST, 0, 10, 1); 565 if (i) 566 return i; 567 568 #define EDC_DATA(i) EDC_BIST_STATUS_REG(edc_bist_status_rdata_reg, i) 569 570 for (i = 15; i >= 0; i--) 571 *data++ = ntohl(t4_read_reg(adap, EDC_DATA(i))); 572 if (ecc) 573 *ecc = t4_read_reg64(adap, EDC_DATA(16)); 574 #undef EDC_DATA 575 return 0; 576 } 577 578 /** 579 * t4_mem_read - read EDC 0, EDC 1 or MC into buffer 580 * @adap: the adapter 581 * @mtype: memory type: MEM_EDC0, MEM_EDC1 or MEM_MC 582 * @addr: address within indicated memory type 583 * @len: amount of memory to read 584 * @buf: host memory buffer 585 * 586 * Reads an [almost] arbitrary memory region in the firmware: the 587 * firmware memory address, length and host buffer must be aligned on 588 * 32-bit boudaries. The memory is returned as a raw byte sequence from 589 * the firmware's memory. If this memory contains data structures which 590 * contain multi-byte integers, it's the callers responsibility to 591 * perform appropriate byte order conversions. 592 */ 593 int t4_mem_read(struct adapter *adap, int mtype, u32 addr, u32 len, 594 __be32 *buf) 595 { 596 u32 pos, start, end, offset; 597 int ret; 598 599 /* 600 * Argument sanity checks ... 601 */ 602 if ((addr & 0x3) || (len & 0x3)) 603 return -EINVAL; 604 605 /* 606 * The underlaying EDC/MC read routines read 64 bytes at a time so we 607 * need to round down the start and round up the end. We'll start 608 * copying out of the first line at (addr - start) a word at a time. 609 */ 610 start = rounddown2(addr, 64); 611 end = roundup2(addr + len, 64); 612 offset = (addr - start)/sizeof(__be32); 613 614 for (pos = start; pos < end; pos += 64, offset = 0) { 615 __be32 data[16]; 616 617 /* 618 * Read the chip's memory block and bail if there's an error. 619 */ 620 if ((mtype == MEM_MC) || (mtype == MEM_MC1)) 621 ret = t4_mc_read(adap, mtype - MEM_MC, pos, data, NULL); 622 else 623 ret = t4_edc_read(adap, mtype, pos, data, NULL); 624 if (ret) 625 return ret; 626 627 /* 628 * Copy the data into the caller's memory buffer. 629 */ 630 while (offset < 16 && len > 0) { 631 *buf++ = data[offset++]; 632 len -= sizeof(__be32); 633 } 634 } 635 636 return 0; 637 } 638 639 /* 640 * Return the specified PCI-E Configuration Space register from our Physical 641 * Function. We try first via a Firmware LDST Command (if fw_attach != 0) 642 * since we prefer to let the firmware own all of these registers, but if that 643 * fails we go for it directly ourselves. 644 */ 645 u32 t4_read_pcie_cfg4(struct adapter *adap, int reg, int drv_fw_attach) 646 { 647 648 /* 649 * If fw_attach != 0, construct and send the Firmware LDST Command to 650 * retrieve the specified PCI-E Configuration Space register. 651 */ 652 if (drv_fw_attach != 0) { 653 struct fw_ldst_cmd ldst_cmd; 654 int ret; 655 656 memset(&ldst_cmd, 0, sizeof(ldst_cmd)); 657 ldst_cmd.op_to_addrspace = 658 cpu_to_be32(V_FW_CMD_OP(FW_LDST_CMD) | 659 F_FW_CMD_REQUEST | 660 F_FW_CMD_READ | 661 V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_FUNC_PCIE)); 662 ldst_cmd.cycles_to_len16 = cpu_to_be32(FW_LEN16(ldst_cmd)); 663 ldst_cmd.u.pcie.select_naccess = V_FW_LDST_CMD_NACCESS(1); 664 ldst_cmd.u.pcie.ctrl_to_fn = 665 (F_FW_LDST_CMD_LC | V_FW_LDST_CMD_FN(adap->pf)); 666 ldst_cmd.u.pcie.r = reg; 667 668 /* 669 * If the LDST Command succeeds, return the result, otherwise 670 * fall through to reading it directly ourselves ... 671 */ 672 ret = t4_wr_mbox(adap, adap->mbox, &ldst_cmd, sizeof(ldst_cmd), 673 &ldst_cmd); 674 if (ret == 0) 675 return be32_to_cpu(ldst_cmd.u.pcie.data[0]); 676 677 CH_WARN(adap, "Firmware failed to return " 678 "Configuration Space register %d, err = %d\n", 679 reg, -ret); 680 } 681 682 /* 683 * Read the desired Configuration Space register via the PCI-E 684 * Backdoor mechanism. 685 */ 686 return t4_hw_pci_read_cfg4(adap, reg); 687 } 688 689 /** 690 * t4_get_regs_len - return the size of the chips register set 691 * @adapter: the adapter 692 * 693 * Returns the size of the chip's BAR0 register space. 694 */ 695 unsigned int t4_get_regs_len(struct adapter *adapter) 696 { 697 unsigned int chip_version = chip_id(adapter); 698 699 switch (chip_version) { 700 case CHELSIO_T4: 701 return T4_REGMAP_SIZE; 702 703 case CHELSIO_T5: 704 case CHELSIO_T6: 705 return T5_REGMAP_SIZE; 706 } 707 708 CH_ERR(adapter, 709 "Unsupported chip version %d\n", chip_version); 710 return 0; 711 } 712 713 /** 714 * t4_get_regs - read chip registers into provided buffer 715 * @adap: the adapter 716 * @buf: register buffer 717 * @buf_size: size (in bytes) of register buffer 718 * 719 * If the provided register buffer isn't large enough for the chip's 720 * full register range, the register dump will be truncated to the 721 * register buffer's size. 722 */ 723 void t4_get_regs(struct adapter *adap, u8 *buf, size_t buf_size) 724 { 725 static const unsigned int t4_reg_ranges[] = { 726 0x1008, 0x1108, 727 0x1180, 0x1184, 728 0x1190, 0x1194, 729 0x11a0, 0x11a4, 730 0x11b0, 0x11b4, 731 0x11fc, 0x123c, 732 0x1300, 0x173c, 733 0x1800, 0x18fc, 734 0x3000, 0x30d8, 735 0x30e0, 0x30e4, 736 0x30ec, 0x5910, 737 0x5920, 0x5924, 738 0x5960, 0x5960, 739 0x5968, 0x5968, 740 0x5970, 0x5970, 741 0x5978, 0x5978, 742 0x5980, 0x5980, 743 0x5988, 0x5988, 744 0x5990, 0x5990, 745 0x5998, 0x5998, 746 0x59a0, 0x59d4, 747 0x5a00, 0x5ae0, 748 0x5ae8, 0x5ae8, 749 0x5af0, 0x5af0, 750 0x5af8, 0x5af8, 751 0x6000, 0x6098, 752 0x6100, 0x6150, 753 0x6200, 0x6208, 754 0x6240, 0x6248, 755 0x6280, 0x62b0, 756 0x62c0, 0x6338, 757 0x6370, 0x638c, 758 0x6400, 0x643c, 759 0x6500, 0x6524, 760 0x6a00, 0x6a04, 761 0x6a14, 0x6a38, 762 0x6a60, 0x6a70, 763 0x6a78, 0x6a78, 764 0x6b00, 0x6b0c, 765 0x6b1c, 0x6b84, 766 0x6bf0, 0x6bf8, 767 0x6c00, 0x6c0c, 768 0x6c1c, 0x6c84, 769 0x6cf0, 0x6cf8, 770 0x6d00, 0x6d0c, 771 0x6d1c, 0x6d84, 772 0x6df0, 0x6df8, 773 0x6e00, 0x6e0c, 774 0x6e1c, 0x6e84, 775 0x6ef0, 0x6ef8, 776 0x6f00, 0x6f0c, 777 0x6f1c, 0x6f84, 778 0x6ff0, 0x6ff8, 779 0x7000, 0x700c, 780 0x701c, 0x7084, 781 0x70f0, 0x70f8, 782 0x7100, 0x710c, 783 0x711c, 0x7184, 784 0x71f0, 0x71f8, 785 0x7200, 0x720c, 786 0x721c, 0x7284, 787 0x72f0, 0x72f8, 788 0x7300, 0x730c, 789 0x731c, 0x7384, 790 0x73f0, 0x73f8, 791 0x7400, 0x7450, 792 0x7500, 0x7530, 793 0x7600, 0x760c, 794 0x7614, 0x761c, 795 0x7680, 0x76cc, 796 0x7700, 0x7798, 797 0x77c0, 0x77fc, 798 0x7900, 0x79fc, 799 0x7b00, 0x7b58, 800 0x7b60, 0x7b84, 801 0x7b8c, 0x7c38, 802 0x7d00, 0x7d38, 803 0x7d40, 0x7d80, 804 0x7d8c, 0x7ddc, 805 0x7de4, 0x7e04, 806 0x7e10, 0x7e1c, 807 0x7e24, 0x7e38, 808 0x7e40, 0x7e44, 809 0x7e4c, 0x7e78, 810 0x7e80, 0x7ea4, 811 0x7eac, 0x7edc, 812 0x7ee8, 0x7efc, 813 0x8dc0, 0x8e04, 814 0x8e10, 0x8e1c, 815 0x8e30, 0x8e78, 816 0x8ea0, 0x8eb8, 817 0x8ec0, 0x8f6c, 818 0x8fc0, 0x9008, 819 0x9010, 0x9058, 820 0x9060, 0x9060, 821 0x9068, 0x9074, 822 0x90fc, 0x90fc, 823 0x9400, 0x9408, 824 0x9410, 0x9458, 825 0x9600, 0x9600, 826 0x9608, 0x9638, 827 0x9640, 0x96bc, 828 0x9800, 0x9808, 829 0x9820, 0x983c, 830 0x9850, 0x9864, 831 0x9c00, 0x9c6c, 832 0x9c80, 0x9cec, 833 0x9d00, 0x9d6c, 834 0x9d80, 0x9dec, 835 0x9e00, 0x9e6c, 836 0x9e80, 0x9eec, 837 0x9f00, 0x9f6c, 838 0x9f80, 0x9fec, 839 0xd004, 0xd004, 840 0xd010, 0xd03c, 841 0xdfc0, 0xdfe0, 842 0xe000, 0xea7c, 843 0xf000, 0x11190, 844 0x19040, 0x1906c, 845 0x19078, 0x19080, 846 0x1908c, 0x190e4, 847 0x190f0, 0x190f8, 848 0x19100, 0x19110, 849 0x19120, 0x19124, 850 0x19150, 0x19194, 851 0x1919c, 0x191b0, 852 0x191d0, 0x191e8, 853 0x19238, 0x1924c, 854 0x193f8, 0x1943c, 855 0x1944c, 0x19474, 856 0x19490, 0x194e0, 857 0x194f0, 0x194f8, 858 0x19800, 0x19c08, 859 0x19c10, 0x19c90, 860 0x19ca0, 0x19ce4, 861 0x19cf0, 0x19d40, 862 0x19d50, 0x19d94, 863 0x19da0, 0x19de8, 864 0x19df0, 0x19e40, 865 0x19e50, 0x19e90, 866 0x19ea0, 0x19f4c, 867 0x1a000, 0x1a004, 868 0x1a010, 0x1a06c, 869 0x1a0b0, 0x1a0e4, 870 0x1a0ec, 0x1a0f4, 871 0x1a100, 0x1a108, 872 0x1a114, 0x1a120, 873 0x1a128, 0x1a130, 874 0x1a138, 0x1a138, 875 0x1a190, 0x1a1c4, 876 0x1a1fc, 0x1a1fc, 877 0x1e040, 0x1e04c, 878 0x1e284, 0x1e28c, 879 0x1e2c0, 0x1e2c0, 880 0x1e2e0, 0x1e2e0, 881 0x1e300, 0x1e384, 882 0x1e3c0, 0x1e3c8, 883 0x1e440, 0x1e44c, 884 0x1e684, 0x1e68c, 885 0x1e6c0, 0x1e6c0, 886 0x1e6e0, 0x1e6e0, 887 0x1e700, 0x1e784, 888 0x1e7c0, 0x1e7c8, 889 0x1e840, 0x1e84c, 890 0x1ea84, 0x1ea8c, 891 0x1eac0, 0x1eac0, 892 0x1eae0, 0x1eae0, 893 0x1eb00, 0x1eb84, 894 0x1ebc0, 0x1ebc8, 895 0x1ec40, 0x1ec4c, 896 0x1ee84, 0x1ee8c, 897 0x1eec0, 0x1eec0, 898 0x1eee0, 0x1eee0, 899 0x1ef00, 0x1ef84, 900 0x1efc0, 0x1efc8, 901 0x1f040, 0x1f04c, 902 0x1f284, 0x1f28c, 903 0x1f2c0, 0x1f2c0, 904 0x1f2e0, 0x1f2e0, 905 0x1f300, 0x1f384, 906 0x1f3c0, 0x1f3c8, 907 0x1f440, 0x1f44c, 908 0x1f684, 0x1f68c, 909 0x1f6c0, 0x1f6c0, 910 0x1f6e0, 0x1f6e0, 911 0x1f700, 0x1f784, 912 0x1f7c0, 0x1f7c8, 913 0x1f840, 0x1f84c, 914 0x1fa84, 0x1fa8c, 915 0x1fac0, 0x1fac0, 916 0x1fae0, 0x1fae0, 917 0x1fb00, 0x1fb84, 918 0x1fbc0, 0x1fbc8, 919 0x1fc40, 0x1fc4c, 920 0x1fe84, 0x1fe8c, 921 0x1fec0, 0x1fec0, 922 0x1fee0, 0x1fee0, 923 0x1ff00, 0x1ff84, 924 0x1ffc0, 0x1ffc8, 925 0x20000, 0x2002c, 926 0x20100, 0x2013c, 927 0x20190, 0x201a0, 928 0x201a8, 0x201b8, 929 0x201c4, 0x201c8, 930 0x20200, 0x20318, 931 0x20400, 0x204b4, 932 0x204c0, 0x20528, 933 0x20540, 0x20614, 934 0x21000, 0x21040, 935 0x2104c, 0x21060, 936 0x210c0, 0x210ec, 937 0x21200, 0x21268, 938 0x21270, 0x21284, 939 0x212fc, 0x21388, 940 0x21400, 0x21404, 941 0x21500, 0x21500, 942 0x21510, 0x21518, 943 0x2152c, 0x21530, 944 0x2153c, 0x2153c, 945 0x21550, 0x21554, 946 0x21600, 0x21600, 947 0x21608, 0x2161c, 948 0x21624, 0x21628, 949 0x21630, 0x21634, 950 0x2163c, 0x2163c, 951 0x21700, 0x2171c, 952 0x21780, 0x2178c, 953 0x21800, 0x21818, 954 0x21820, 0x21828, 955 0x21830, 0x21848, 956 0x21850, 0x21854, 957 0x21860, 0x21868, 958 0x21870, 0x21870, 959 0x21878, 0x21898, 960 0x218a0, 0x218a8, 961 0x218b0, 0x218c8, 962 0x218d0, 0x218d4, 963 0x218e0, 0x218e8, 964 0x218f0, 0x218f0, 965 0x218f8, 0x21a18, 966 0x21a20, 0x21a28, 967 0x21a30, 0x21a48, 968 0x21a50, 0x21a54, 969 0x21a60, 0x21a68, 970 0x21a70, 0x21a70, 971 0x21a78, 0x21a98, 972 0x21aa0, 0x21aa8, 973 0x21ab0, 0x21ac8, 974 0x21ad0, 0x21ad4, 975 0x21ae0, 0x21ae8, 976 0x21af0, 0x21af0, 977 0x21af8, 0x21c18, 978 0x21c20, 0x21c20, 979 0x21c28, 0x21c30, 980 0x21c38, 0x21c38, 981 0x21c80, 0x21c98, 982 0x21ca0, 0x21ca8, 983 0x21cb0, 0x21cc8, 984 0x21cd0, 0x21cd4, 985 0x21ce0, 0x21ce8, 986 0x21cf0, 0x21cf0, 987 0x21cf8, 0x21d7c, 988 0x21e00, 0x21e04, 989 0x22000, 0x2202c, 990 0x22100, 0x2213c, 991 0x22190, 0x221a0, 992 0x221a8, 0x221b8, 993 0x221c4, 0x221c8, 994 0x22200, 0x22318, 995 0x22400, 0x224b4, 996 0x224c0, 0x22528, 997 0x22540, 0x22614, 998 0x23000, 0x23040, 999 0x2304c, 0x23060, 1000 0x230c0, 0x230ec, 1001 0x23200, 0x23268, 1002 0x23270, 0x23284, 1003 0x232fc, 0x23388, 1004 0x23400, 0x23404, 1005 0x23500, 0x23500, 1006 0x23510, 0x23518, 1007 0x2352c, 0x23530, 1008 0x2353c, 0x2353c, 1009 0x23550, 0x23554, 1010 0x23600, 0x23600, 1011 0x23608, 0x2361c, 1012 0x23624, 0x23628, 1013 0x23630, 0x23634, 1014 0x2363c, 0x2363c, 1015 0x23700, 0x2371c, 1016 0x23780, 0x2378c, 1017 0x23800, 0x23818, 1018 0x23820, 0x23828, 1019 0x23830, 0x23848, 1020 0x23850, 0x23854, 1021 0x23860, 0x23868, 1022 0x23870, 0x23870, 1023 0x23878, 0x23898, 1024 0x238a0, 0x238a8, 1025 0x238b0, 0x238c8, 1026 0x238d0, 0x238d4, 1027 0x238e0, 0x238e8, 1028 0x238f0, 0x238f0, 1029 0x238f8, 0x23a18, 1030 0x23a20, 0x23a28, 1031 0x23a30, 0x23a48, 1032 0x23a50, 0x23a54, 1033 0x23a60, 0x23a68, 1034 0x23a70, 0x23a70, 1035 0x23a78, 0x23a98, 1036 0x23aa0, 0x23aa8, 1037 0x23ab0, 0x23ac8, 1038 0x23ad0, 0x23ad4, 1039 0x23ae0, 0x23ae8, 1040 0x23af0, 0x23af0, 1041 0x23af8, 0x23c18, 1042 0x23c20, 0x23c20, 1043 0x23c28, 0x23c30, 1044 0x23c38, 0x23c38, 1045 0x23c80, 0x23c98, 1046 0x23ca0, 0x23ca8, 1047 0x23cb0, 0x23cc8, 1048 0x23cd0, 0x23cd4, 1049 0x23ce0, 0x23ce8, 1050 0x23cf0, 0x23cf0, 1051 0x23cf8, 0x23d7c, 1052 0x23e00, 0x23e04, 1053 0x24000, 0x2402c, 1054 0x24100, 0x2413c, 1055 0x24190, 0x241a0, 1056 0x241a8, 0x241b8, 1057 0x241c4, 0x241c8, 1058 0x24200, 0x24318, 1059 0x24400, 0x244b4, 1060 0x244c0, 0x24528, 1061 0x24540, 0x24614, 1062 0x25000, 0x25040, 1063 0x2504c, 0x25060, 1064 0x250c0, 0x250ec, 1065 0x25200, 0x25268, 1066 0x25270, 0x25284, 1067 0x252fc, 0x25388, 1068 0x25400, 0x25404, 1069 0x25500, 0x25500, 1070 0x25510, 0x25518, 1071 0x2552c, 0x25530, 1072 0x2553c, 0x2553c, 1073 0x25550, 0x25554, 1074 0x25600, 0x25600, 1075 0x25608, 0x2561c, 1076 0x25624, 0x25628, 1077 0x25630, 0x25634, 1078 0x2563c, 0x2563c, 1079 0x25700, 0x2571c, 1080 0x25780, 0x2578c, 1081 0x25800, 0x25818, 1082 0x25820, 0x25828, 1083 0x25830, 0x25848, 1084 0x25850, 0x25854, 1085 0x25860, 0x25868, 1086 0x25870, 0x25870, 1087 0x25878, 0x25898, 1088 0x258a0, 0x258a8, 1089 0x258b0, 0x258c8, 1090 0x258d0, 0x258d4, 1091 0x258e0, 0x258e8, 1092 0x258f0, 0x258f0, 1093 0x258f8, 0x25a18, 1094 0x25a20, 0x25a28, 1095 0x25a30, 0x25a48, 1096 0x25a50, 0x25a54, 1097 0x25a60, 0x25a68, 1098 0x25a70, 0x25a70, 1099 0x25a78, 0x25a98, 1100 0x25aa0, 0x25aa8, 1101 0x25ab0, 0x25ac8, 1102 0x25ad0, 0x25ad4, 1103 0x25ae0, 0x25ae8, 1104 0x25af0, 0x25af0, 1105 0x25af8, 0x25c18, 1106 0x25c20, 0x25c20, 1107 0x25c28, 0x25c30, 1108 0x25c38, 0x25c38, 1109 0x25c80, 0x25c98, 1110 0x25ca0, 0x25ca8, 1111 0x25cb0, 0x25cc8, 1112 0x25cd0, 0x25cd4, 1113 0x25ce0, 0x25ce8, 1114 0x25cf0, 0x25cf0, 1115 0x25cf8, 0x25d7c, 1116 0x25e00, 0x25e04, 1117 0x26000, 0x2602c, 1118 0x26100, 0x2613c, 1119 0x26190, 0x261a0, 1120 0x261a8, 0x261b8, 1121 0x261c4, 0x261c8, 1122 0x26200, 0x26318, 1123 0x26400, 0x264b4, 1124 0x264c0, 0x26528, 1125 0x26540, 0x26614, 1126 0x27000, 0x27040, 1127 0x2704c, 0x27060, 1128 0x270c0, 0x270ec, 1129 0x27200, 0x27268, 1130 0x27270, 0x27284, 1131 0x272fc, 0x27388, 1132 0x27400, 0x27404, 1133 0x27500, 0x27500, 1134 0x27510, 0x27518, 1135 0x2752c, 0x27530, 1136 0x2753c, 0x2753c, 1137 0x27550, 0x27554, 1138 0x27600, 0x27600, 1139 0x27608, 0x2761c, 1140 0x27624, 0x27628, 1141 0x27630, 0x27634, 1142 0x2763c, 0x2763c, 1143 0x27700, 0x2771c, 1144 0x27780, 0x2778c, 1145 0x27800, 0x27818, 1146 0x27820, 0x27828, 1147 0x27830, 0x27848, 1148 0x27850, 0x27854, 1149 0x27860, 0x27868, 1150 0x27870, 0x27870, 1151 0x27878, 0x27898, 1152 0x278a0, 0x278a8, 1153 0x278b0, 0x278c8, 1154 0x278d0, 0x278d4, 1155 0x278e0, 0x278e8, 1156 0x278f0, 0x278f0, 1157 0x278f8, 0x27a18, 1158 0x27a20, 0x27a28, 1159 0x27a30, 0x27a48, 1160 0x27a50, 0x27a54, 1161 0x27a60, 0x27a68, 1162 0x27a70, 0x27a70, 1163 0x27a78, 0x27a98, 1164 0x27aa0, 0x27aa8, 1165 0x27ab0, 0x27ac8, 1166 0x27ad0, 0x27ad4, 1167 0x27ae0, 0x27ae8, 1168 0x27af0, 0x27af0, 1169 0x27af8, 0x27c18, 1170 0x27c20, 0x27c20, 1171 0x27c28, 0x27c30, 1172 0x27c38, 0x27c38, 1173 0x27c80, 0x27c98, 1174 0x27ca0, 0x27ca8, 1175 0x27cb0, 0x27cc8, 1176 0x27cd0, 0x27cd4, 1177 0x27ce0, 0x27ce8, 1178 0x27cf0, 0x27cf0, 1179 0x27cf8, 0x27d7c, 1180 0x27e00, 0x27e04, 1181 }; 1182 1183 static const unsigned int t5_reg_ranges[] = { 1184 0x1008, 0x10c0, 1185 0x10cc, 0x10f8, 1186 0x1100, 0x1100, 1187 0x110c, 0x1148, 1188 0x1180, 0x1184, 1189 0x1190, 0x1194, 1190 0x11a0, 0x11a4, 1191 0x11b0, 0x11b4, 1192 0x11fc, 0x123c, 1193 0x1280, 0x173c, 1194 0x1800, 0x18fc, 1195 0x3000, 0x3028, 1196 0x3060, 0x30b0, 1197 0x30b8, 0x30d8, 1198 0x30e0, 0x30fc, 1199 0x3140, 0x357c, 1200 0x35a8, 0x35cc, 1201 0x35ec, 0x35ec, 1202 0x3600, 0x5624, 1203 0x56cc, 0x56ec, 1204 0x56f4, 0x5720, 1205 0x5728, 0x575c, 1206 0x580c, 0x5814, 1207 0x5890, 0x589c, 1208 0x58a4, 0x58ac, 1209 0x58b8, 0x58bc, 1210 0x5940, 0x59c8, 1211 0x59d0, 0x59dc, 1212 0x59fc, 0x5a18, 1213 0x5a60, 0x5a70, 1214 0x5a80, 0x5a9c, 1215 0x5b94, 0x5bfc, 1216 0x6000, 0x6020, 1217 0x6028, 0x6040, 1218 0x6058, 0x609c, 1219 0x60a8, 0x614c, 1220 0x7700, 0x7798, 1221 0x77c0, 0x78fc, 1222 0x7b00, 0x7b58, 1223 0x7b60, 0x7b84, 1224 0x7b8c, 0x7c54, 1225 0x7d00, 0x7d38, 1226 0x7d40, 0x7d80, 1227 0x7d8c, 0x7ddc, 1228 0x7de4, 0x7e04, 1229 0x7e10, 0x7e1c, 1230 0x7e24, 0x7e38, 1231 0x7e40, 0x7e44, 1232 0x7e4c, 0x7e78, 1233 0x7e80, 0x7edc, 1234 0x7ee8, 0x7efc, 1235 0x8dc0, 0x8de0, 1236 0x8df8, 0x8e04, 1237 0x8e10, 0x8e84, 1238 0x8ea0, 0x8f84, 1239 0x8fc0, 0x9058, 1240 0x9060, 0x9060, 1241 0x9068, 0x90f8, 1242 0x9400, 0x9408, 1243 0x9410, 0x9470, 1244 0x9600, 0x9600, 1245 0x9608, 0x9638, 1246 0x9640, 0x96f4, 1247 0x9800, 0x9808, 1248 0x9820, 0x983c, 1249 0x9850, 0x9864, 1250 0x9c00, 0x9c6c, 1251 0x9c80, 0x9cec, 1252 0x9d00, 0x9d6c, 1253 0x9d80, 0x9dec, 1254 0x9e00, 0x9e6c, 1255 0x9e80, 0x9eec, 1256 0x9f00, 0x9f6c, 1257 0x9f80, 0xa020, 1258 0xd004, 0xd004, 1259 0xd010, 0xd03c, 1260 0xdfc0, 0xdfe0, 1261 0xe000, 0x1106c, 1262 0x11074, 0x11088, 1263 0x1109c, 0x1117c, 1264 0x11190, 0x11204, 1265 0x19040, 0x1906c, 1266 0x19078, 0x19080, 1267 0x1908c, 0x190e8, 1268 0x190f0, 0x190f8, 1269 0x19100, 0x19110, 1270 0x19120, 0x19124, 1271 0x19150, 0x19194, 1272 0x1919c, 0x191b0, 1273 0x191d0, 0x191e8, 1274 0x19238, 0x19290, 1275 0x193f8, 0x19428, 1276 0x19430, 0x19444, 1277 0x1944c, 0x1946c, 1278 0x19474, 0x19474, 1279 0x19490, 0x194cc, 1280 0x194f0, 0x194f8, 1281 0x19c00, 0x19c08, 1282 0x19c10, 0x19c60, 1283 0x19c94, 0x19ce4, 1284 0x19cf0, 0x19d40, 1285 0x19d50, 0x19d94, 1286 0x19da0, 0x19de8, 1287 0x19df0, 0x19e10, 1288 0x19e50, 0x19e90, 1289 0x19ea0, 0x19f24, 1290 0x19f34, 0x19f34, 1291 0x19f40, 0x19f50, 1292 0x19f90, 0x19fb4, 1293 0x19fc4, 0x19fe4, 1294 0x1a000, 0x1a004, 1295 0x1a010, 0x1a06c, 1296 0x1a0b0, 0x1a0e4, 1297 0x1a0ec, 0x1a0f8, 1298 0x1a100, 0x1a108, 1299 0x1a114, 0x1a120, 1300 0x1a128, 0x1a130, 1301 0x1a138, 0x1a138, 1302 0x1a190, 0x1a1c4, 1303 0x1a1fc, 0x1a1fc, 1304 0x1e008, 0x1e00c, 1305 0x1e040, 0x1e044, 1306 0x1e04c, 0x1e04c, 1307 0x1e284, 0x1e290, 1308 0x1e2c0, 0x1e2c0, 1309 0x1e2e0, 0x1e2e0, 1310 0x1e300, 0x1e384, 1311 0x1e3c0, 0x1e3c8, 1312 0x1e408, 0x1e40c, 1313 0x1e440, 0x1e444, 1314 0x1e44c, 0x1e44c, 1315 0x1e684, 0x1e690, 1316 0x1e6c0, 0x1e6c0, 1317 0x1e6e0, 0x1e6e0, 1318 0x1e700, 0x1e784, 1319 0x1e7c0, 0x1e7c8, 1320 0x1e808, 0x1e80c, 1321 0x1e840, 0x1e844, 1322 0x1e84c, 0x1e84c, 1323 0x1ea84, 0x1ea90, 1324 0x1eac0, 0x1eac0, 1325 0x1eae0, 0x1eae0, 1326 0x1eb00, 0x1eb84, 1327 0x1ebc0, 0x1ebc8, 1328 0x1ec08, 0x1ec0c, 1329 0x1ec40, 0x1ec44, 1330 0x1ec4c, 0x1ec4c, 1331 0x1ee84, 0x1ee90, 1332 0x1eec0, 0x1eec0, 1333 0x1eee0, 0x1eee0, 1334 0x1ef00, 0x1ef84, 1335 0x1efc0, 0x1efc8, 1336 0x1f008, 0x1f00c, 1337 0x1f040, 0x1f044, 1338 0x1f04c, 0x1f04c, 1339 0x1f284, 0x1f290, 1340 0x1f2c0, 0x1f2c0, 1341 0x1f2e0, 0x1f2e0, 1342 0x1f300, 0x1f384, 1343 0x1f3c0, 0x1f3c8, 1344 0x1f408, 0x1f40c, 1345 0x1f440, 0x1f444, 1346 0x1f44c, 0x1f44c, 1347 0x1f684, 0x1f690, 1348 0x1f6c0, 0x1f6c0, 1349 0x1f6e0, 0x1f6e0, 1350 0x1f700, 0x1f784, 1351 0x1f7c0, 0x1f7c8, 1352 0x1f808, 0x1f80c, 1353 0x1f840, 0x1f844, 1354 0x1f84c, 0x1f84c, 1355 0x1fa84, 0x1fa90, 1356 0x1fac0, 0x1fac0, 1357 0x1fae0, 0x1fae0, 1358 0x1fb00, 0x1fb84, 1359 0x1fbc0, 0x1fbc8, 1360 0x1fc08, 0x1fc0c, 1361 0x1fc40, 0x1fc44, 1362 0x1fc4c, 0x1fc4c, 1363 0x1fe84, 0x1fe90, 1364 0x1fec0, 0x1fec0, 1365 0x1fee0, 0x1fee0, 1366 0x1ff00, 0x1ff84, 1367 0x1ffc0, 0x1ffc8, 1368 0x30000, 0x30030, 1369 0x30038, 0x30038, 1370 0x30040, 0x30040, 1371 0x30100, 0x30144, 1372 0x30190, 0x301a0, 1373 0x301a8, 0x301b8, 1374 0x301c4, 0x301c8, 1375 0x301d0, 0x301d0, 1376 0x30200, 0x30318, 1377 0x30400, 0x304b4, 1378 0x304c0, 0x3052c, 1379 0x30540, 0x3061c, 1380 0x30800, 0x30828, 1381 0x30834, 0x30834, 1382 0x308c0, 0x30908, 1383 0x30910, 0x309ac, 1384 0x30a00, 0x30a14, 1385 0x30a1c, 0x30a2c, 1386 0x30a44, 0x30a50, 1387 0x30a74, 0x30a74, 1388 0x30a7c, 0x30afc, 1389 0x30b08, 0x30c24, 1390 0x30d00, 0x30d00, 1391 0x30d08, 0x30d14, 1392 0x30d1c, 0x30d20, 1393 0x30d3c, 0x30d3c, 1394 0x30d48, 0x30d50, 1395 0x31200, 0x3120c, 1396 0x31220, 0x31220, 1397 0x31240, 0x31240, 1398 0x31600, 0x3160c, 1399 0x31a00, 0x31a1c, 1400 0x31e00, 0x31e20, 1401 0x31e38, 0x31e3c, 1402 0x31e80, 0x31e80, 1403 0x31e88, 0x31ea8, 1404 0x31eb0, 0x31eb4, 1405 0x31ec8, 0x31ed4, 1406 0x31fb8, 0x32004, 1407 0x32200, 0x32200, 1408 0x32208, 0x32240, 1409 0x32248, 0x32280, 1410 0x32288, 0x322c0, 1411 0x322c8, 0x322fc, 1412 0x32600, 0x32630, 1413 0x32a00, 0x32abc, 1414 0x32b00, 0x32b10, 1415 0x32b20, 0x32b30, 1416 0x32b40, 0x32b50, 1417 0x32b60, 0x32b70, 1418 0x33000, 0x33028, 1419 0x33030, 0x33048, 1420 0x33060, 0x33068, 1421 0x33070, 0x3309c, 1422 0x330f0, 0x33128, 1423 0x33130, 0x33148, 1424 0x33160, 0x33168, 1425 0x33170, 0x3319c, 1426 0x331f0, 0x33238, 1427 0x33240, 0x33240, 1428 0x33248, 0x33250, 1429 0x3325c, 0x33264, 1430 0x33270, 0x332b8, 1431 0x332c0, 0x332e4, 1432 0x332f8, 0x33338, 1433 0x33340, 0x33340, 1434 0x33348, 0x33350, 1435 0x3335c, 0x33364, 1436 0x33370, 0x333b8, 1437 0x333c0, 0x333e4, 1438 0x333f8, 0x33428, 1439 0x33430, 0x33448, 1440 0x33460, 0x33468, 1441 0x33470, 0x3349c, 1442 0x334f0, 0x33528, 1443 0x33530, 0x33548, 1444 0x33560, 0x33568, 1445 0x33570, 0x3359c, 1446 0x335f0, 0x33638, 1447 0x33640, 0x33640, 1448 0x33648, 0x33650, 1449 0x3365c, 0x33664, 1450 0x33670, 0x336b8, 1451 0x336c0, 0x336e4, 1452 0x336f8, 0x33738, 1453 0x33740, 0x33740, 1454 0x33748, 0x33750, 1455 0x3375c, 0x33764, 1456 0x33770, 0x337b8, 1457 0x337c0, 0x337e4, 1458 0x337f8, 0x337fc, 1459 0x33814, 0x33814, 1460 0x3382c, 0x3382c, 1461 0x33880, 0x3388c, 1462 0x338e8, 0x338ec, 1463 0x33900, 0x33928, 1464 0x33930, 0x33948, 1465 0x33960, 0x33968, 1466 0x33970, 0x3399c, 1467 0x339f0, 0x33a38, 1468 0x33a40, 0x33a40, 1469 0x33a48, 0x33a50, 1470 0x33a5c, 0x33a64, 1471 0x33a70, 0x33ab8, 1472 0x33ac0, 0x33ae4, 1473 0x33af8, 0x33b10, 1474 0x33b28, 0x33b28, 1475 0x33b3c, 0x33b50, 1476 0x33bf0, 0x33c10, 1477 0x33c28, 0x33c28, 1478 0x33c3c, 0x33c50, 1479 0x33cf0, 0x33cfc, 1480 0x34000, 0x34030, 1481 0x34038, 0x34038, 1482 0x34040, 0x34040, 1483 0x34100, 0x34144, 1484 0x34190, 0x341a0, 1485 0x341a8, 0x341b8, 1486 0x341c4, 0x341c8, 1487 0x341d0, 0x341d0, 1488 0x34200, 0x34318, 1489 0x34400, 0x344b4, 1490 0x344c0, 0x3452c, 1491 0x34540, 0x3461c, 1492 0x34800, 0x34828, 1493 0x34834, 0x34834, 1494 0x348c0, 0x34908, 1495 0x34910, 0x349ac, 1496 0x34a00, 0x34a14, 1497 0x34a1c, 0x34a2c, 1498 0x34a44, 0x34a50, 1499 0x34a74, 0x34a74, 1500 0x34a7c, 0x34afc, 1501 0x34b08, 0x34c24, 1502 0x34d00, 0x34d00, 1503 0x34d08, 0x34d14, 1504 0x34d1c, 0x34d20, 1505 0x34d3c, 0x34d3c, 1506 0x34d48, 0x34d50, 1507 0x35200, 0x3520c, 1508 0x35220, 0x35220, 1509 0x35240, 0x35240, 1510 0x35600, 0x3560c, 1511 0x35a00, 0x35a1c, 1512 0x35e00, 0x35e20, 1513 0x35e38, 0x35e3c, 1514 0x35e80, 0x35e80, 1515 0x35e88, 0x35ea8, 1516 0x35eb0, 0x35eb4, 1517 0x35ec8, 0x35ed4, 1518 0x35fb8, 0x36004, 1519 0x36200, 0x36200, 1520 0x36208, 0x36240, 1521 0x36248, 0x36280, 1522 0x36288, 0x362c0, 1523 0x362c8, 0x362fc, 1524 0x36600, 0x36630, 1525 0x36a00, 0x36abc, 1526 0x36b00, 0x36b10, 1527 0x36b20, 0x36b30, 1528 0x36b40, 0x36b50, 1529 0x36b60, 0x36b70, 1530 0x37000, 0x37028, 1531 0x37030, 0x37048, 1532 0x37060, 0x37068, 1533 0x37070, 0x3709c, 1534 0x370f0, 0x37128, 1535 0x37130, 0x37148, 1536 0x37160, 0x37168, 1537 0x37170, 0x3719c, 1538 0x371f0, 0x37238, 1539 0x37240, 0x37240, 1540 0x37248, 0x37250, 1541 0x3725c, 0x37264, 1542 0x37270, 0x372b8, 1543 0x372c0, 0x372e4, 1544 0x372f8, 0x37338, 1545 0x37340, 0x37340, 1546 0x37348, 0x37350, 1547 0x3735c, 0x37364, 1548 0x37370, 0x373b8, 1549 0x373c0, 0x373e4, 1550 0x373f8, 0x37428, 1551 0x37430, 0x37448, 1552 0x37460, 0x37468, 1553 0x37470, 0x3749c, 1554 0x374f0, 0x37528, 1555 0x37530, 0x37548, 1556 0x37560, 0x37568, 1557 0x37570, 0x3759c, 1558 0x375f0, 0x37638, 1559 0x37640, 0x37640, 1560 0x37648, 0x37650, 1561 0x3765c, 0x37664, 1562 0x37670, 0x376b8, 1563 0x376c0, 0x376e4, 1564 0x376f8, 0x37738, 1565 0x37740, 0x37740, 1566 0x37748, 0x37750, 1567 0x3775c, 0x37764, 1568 0x37770, 0x377b8, 1569 0x377c0, 0x377e4, 1570 0x377f8, 0x377fc, 1571 0x37814, 0x37814, 1572 0x3782c, 0x3782c, 1573 0x37880, 0x3788c, 1574 0x378e8, 0x378ec, 1575 0x37900, 0x37928, 1576 0x37930, 0x37948, 1577 0x37960, 0x37968, 1578 0x37970, 0x3799c, 1579 0x379f0, 0x37a38, 1580 0x37a40, 0x37a40, 1581 0x37a48, 0x37a50, 1582 0x37a5c, 0x37a64, 1583 0x37a70, 0x37ab8, 1584 0x37ac0, 0x37ae4, 1585 0x37af8, 0x37b10, 1586 0x37b28, 0x37b28, 1587 0x37b3c, 0x37b50, 1588 0x37bf0, 0x37c10, 1589 0x37c28, 0x37c28, 1590 0x37c3c, 0x37c50, 1591 0x37cf0, 0x37cfc, 1592 0x38000, 0x38030, 1593 0x38038, 0x38038, 1594 0x38040, 0x38040, 1595 0x38100, 0x38144, 1596 0x38190, 0x381a0, 1597 0x381a8, 0x381b8, 1598 0x381c4, 0x381c8, 1599 0x381d0, 0x381d0, 1600 0x38200, 0x38318, 1601 0x38400, 0x384b4, 1602 0x384c0, 0x3852c, 1603 0x38540, 0x3861c, 1604 0x38800, 0x38828, 1605 0x38834, 0x38834, 1606 0x388c0, 0x38908, 1607 0x38910, 0x389ac, 1608 0x38a00, 0x38a14, 1609 0x38a1c, 0x38a2c, 1610 0x38a44, 0x38a50, 1611 0x38a74, 0x38a74, 1612 0x38a7c, 0x38afc, 1613 0x38b08, 0x38c24, 1614 0x38d00, 0x38d00, 1615 0x38d08, 0x38d14, 1616 0x38d1c, 0x38d20, 1617 0x38d3c, 0x38d3c, 1618 0x38d48, 0x38d50, 1619 0x39200, 0x3920c, 1620 0x39220, 0x39220, 1621 0x39240, 0x39240, 1622 0x39600, 0x3960c, 1623 0x39a00, 0x39a1c, 1624 0x39e00, 0x39e20, 1625 0x39e38, 0x39e3c, 1626 0x39e80, 0x39e80, 1627 0x39e88, 0x39ea8, 1628 0x39eb0, 0x39eb4, 1629 0x39ec8, 0x39ed4, 1630 0x39fb8, 0x3a004, 1631 0x3a200, 0x3a200, 1632 0x3a208, 0x3a240, 1633 0x3a248, 0x3a280, 1634 0x3a288, 0x3a2c0, 1635 0x3a2c8, 0x3a2fc, 1636 0x3a600, 0x3a630, 1637 0x3aa00, 0x3aabc, 1638 0x3ab00, 0x3ab10, 1639 0x3ab20, 0x3ab30, 1640 0x3ab40, 0x3ab50, 1641 0x3ab60, 0x3ab70, 1642 0x3b000, 0x3b028, 1643 0x3b030, 0x3b048, 1644 0x3b060, 0x3b068, 1645 0x3b070, 0x3b09c, 1646 0x3b0f0, 0x3b128, 1647 0x3b130, 0x3b148, 1648 0x3b160, 0x3b168, 1649 0x3b170, 0x3b19c, 1650 0x3b1f0, 0x3b238, 1651 0x3b240, 0x3b240, 1652 0x3b248, 0x3b250, 1653 0x3b25c, 0x3b264, 1654 0x3b270, 0x3b2b8, 1655 0x3b2c0, 0x3b2e4, 1656 0x3b2f8, 0x3b338, 1657 0x3b340, 0x3b340, 1658 0x3b348, 0x3b350, 1659 0x3b35c, 0x3b364, 1660 0x3b370, 0x3b3b8, 1661 0x3b3c0, 0x3b3e4, 1662 0x3b3f8, 0x3b428, 1663 0x3b430, 0x3b448, 1664 0x3b460, 0x3b468, 1665 0x3b470, 0x3b49c, 1666 0x3b4f0, 0x3b528, 1667 0x3b530, 0x3b548, 1668 0x3b560, 0x3b568, 1669 0x3b570, 0x3b59c, 1670 0x3b5f0, 0x3b638, 1671 0x3b640, 0x3b640, 1672 0x3b648, 0x3b650, 1673 0x3b65c, 0x3b664, 1674 0x3b670, 0x3b6b8, 1675 0x3b6c0, 0x3b6e4, 1676 0x3b6f8, 0x3b738, 1677 0x3b740, 0x3b740, 1678 0x3b748, 0x3b750, 1679 0x3b75c, 0x3b764, 1680 0x3b770, 0x3b7b8, 1681 0x3b7c0, 0x3b7e4, 1682 0x3b7f8, 0x3b7fc, 1683 0x3b814, 0x3b814, 1684 0x3b82c, 0x3b82c, 1685 0x3b880, 0x3b88c, 1686 0x3b8e8, 0x3b8ec, 1687 0x3b900, 0x3b928, 1688 0x3b930, 0x3b948, 1689 0x3b960, 0x3b968, 1690 0x3b970, 0x3b99c, 1691 0x3b9f0, 0x3ba38, 1692 0x3ba40, 0x3ba40, 1693 0x3ba48, 0x3ba50, 1694 0x3ba5c, 0x3ba64, 1695 0x3ba70, 0x3bab8, 1696 0x3bac0, 0x3bae4, 1697 0x3baf8, 0x3bb10, 1698 0x3bb28, 0x3bb28, 1699 0x3bb3c, 0x3bb50, 1700 0x3bbf0, 0x3bc10, 1701 0x3bc28, 0x3bc28, 1702 0x3bc3c, 0x3bc50, 1703 0x3bcf0, 0x3bcfc, 1704 0x3c000, 0x3c030, 1705 0x3c038, 0x3c038, 1706 0x3c040, 0x3c040, 1707 0x3c100, 0x3c144, 1708 0x3c190, 0x3c1a0, 1709 0x3c1a8, 0x3c1b8, 1710 0x3c1c4, 0x3c1c8, 1711 0x3c1d0, 0x3c1d0, 1712 0x3c200, 0x3c318, 1713 0x3c400, 0x3c4b4, 1714 0x3c4c0, 0x3c52c, 1715 0x3c540, 0x3c61c, 1716 0x3c800, 0x3c828, 1717 0x3c834, 0x3c834, 1718 0x3c8c0, 0x3c908, 1719 0x3c910, 0x3c9ac, 1720 0x3ca00, 0x3ca14, 1721 0x3ca1c, 0x3ca2c, 1722 0x3ca44, 0x3ca50, 1723 0x3ca74, 0x3ca74, 1724 0x3ca7c, 0x3cafc, 1725 0x3cb08, 0x3cc24, 1726 0x3cd00, 0x3cd00, 1727 0x3cd08, 0x3cd14, 1728 0x3cd1c, 0x3cd20, 1729 0x3cd3c, 0x3cd3c, 1730 0x3cd48, 0x3cd50, 1731 0x3d200, 0x3d20c, 1732 0x3d220, 0x3d220, 1733 0x3d240, 0x3d240, 1734 0x3d600, 0x3d60c, 1735 0x3da00, 0x3da1c, 1736 0x3de00, 0x3de20, 1737 0x3de38, 0x3de3c, 1738 0x3de80, 0x3de80, 1739 0x3de88, 0x3dea8, 1740 0x3deb0, 0x3deb4, 1741 0x3dec8, 0x3ded4, 1742 0x3dfb8, 0x3e004, 1743 0x3e200, 0x3e200, 1744 0x3e208, 0x3e240, 1745 0x3e248, 0x3e280, 1746 0x3e288, 0x3e2c0, 1747 0x3e2c8, 0x3e2fc, 1748 0x3e600, 0x3e630, 1749 0x3ea00, 0x3eabc, 1750 0x3eb00, 0x3eb10, 1751 0x3eb20, 0x3eb30, 1752 0x3eb40, 0x3eb50, 1753 0x3eb60, 0x3eb70, 1754 0x3f000, 0x3f028, 1755 0x3f030, 0x3f048, 1756 0x3f060, 0x3f068, 1757 0x3f070, 0x3f09c, 1758 0x3f0f0, 0x3f128, 1759 0x3f130, 0x3f148, 1760 0x3f160, 0x3f168, 1761 0x3f170, 0x3f19c, 1762 0x3f1f0, 0x3f238, 1763 0x3f240, 0x3f240, 1764 0x3f248, 0x3f250, 1765 0x3f25c, 0x3f264, 1766 0x3f270, 0x3f2b8, 1767 0x3f2c0, 0x3f2e4, 1768 0x3f2f8, 0x3f338, 1769 0x3f340, 0x3f340, 1770 0x3f348, 0x3f350, 1771 0x3f35c, 0x3f364, 1772 0x3f370, 0x3f3b8, 1773 0x3f3c0, 0x3f3e4, 1774 0x3f3f8, 0x3f428, 1775 0x3f430, 0x3f448, 1776 0x3f460, 0x3f468, 1777 0x3f470, 0x3f49c, 1778 0x3f4f0, 0x3f528, 1779 0x3f530, 0x3f548, 1780 0x3f560, 0x3f568, 1781 0x3f570, 0x3f59c, 1782 0x3f5f0, 0x3f638, 1783 0x3f640, 0x3f640, 1784 0x3f648, 0x3f650, 1785 0x3f65c, 0x3f664, 1786 0x3f670, 0x3f6b8, 1787 0x3f6c0, 0x3f6e4, 1788 0x3f6f8, 0x3f738, 1789 0x3f740, 0x3f740, 1790 0x3f748, 0x3f750, 1791 0x3f75c, 0x3f764, 1792 0x3f770, 0x3f7b8, 1793 0x3f7c0, 0x3f7e4, 1794 0x3f7f8, 0x3f7fc, 1795 0x3f814, 0x3f814, 1796 0x3f82c, 0x3f82c, 1797 0x3f880, 0x3f88c, 1798 0x3f8e8, 0x3f8ec, 1799 0x3f900, 0x3f928, 1800 0x3f930, 0x3f948, 1801 0x3f960, 0x3f968, 1802 0x3f970, 0x3f99c, 1803 0x3f9f0, 0x3fa38, 1804 0x3fa40, 0x3fa40, 1805 0x3fa48, 0x3fa50, 1806 0x3fa5c, 0x3fa64, 1807 0x3fa70, 0x3fab8, 1808 0x3fac0, 0x3fae4, 1809 0x3faf8, 0x3fb10, 1810 0x3fb28, 0x3fb28, 1811 0x3fb3c, 0x3fb50, 1812 0x3fbf0, 0x3fc10, 1813 0x3fc28, 0x3fc28, 1814 0x3fc3c, 0x3fc50, 1815 0x3fcf0, 0x3fcfc, 1816 0x40000, 0x4000c, 1817 0x40040, 0x40050, 1818 0x40060, 0x40068, 1819 0x4007c, 0x4008c, 1820 0x40094, 0x400b0, 1821 0x400c0, 0x40144, 1822 0x40180, 0x4018c, 1823 0x40200, 0x40254, 1824 0x40260, 0x40264, 1825 0x40270, 0x40288, 1826 0x40290, 0x40298, 1827 0x402ac, 0x402c8, 1828 0x402d0, 0x402e0, 1829 0x402f0, 0x402f0, 1830 0x40300, 0x4033c, 1831 0x403f8, 0x403fc, 1832 0x41304, 0x413c4, 1833 0x41400, 0x4140c, 1834 0x41414, 0x4141c, 1835 0x41480, 0x414d0, 1836 0x44000, 0x44054, 1837 0x4405c, 0x44078, 1838 0x440c0, 0x44174, 1839 0x44180, 0x441ac, 1840 0x441b4, 0x441b8, 1841 0x441c0, 0x44254, 1842 0x4425c, 0x44278, 1843 0x442c0, 0x44374, 1844 0x44380, 0x443ac, 1845 0x443b4, 0x443b8, 1846 0x443c0, 0x44454, 1847 0x4445c, 0x44478, 1848 0x444c0, 0x44574, 1849 0x44580, 0x445ac, 1850 0x445b4, 0x445b8, 1851 0x445c0, 0x44654, 1852 0x4465c, 0x44678, 1853 0x446c0, 0x44774, 1854 0x44780, 0x447ac, 1855 0x447b4, 0x447b8, 1856 0x447c0, 0x44854, 1857 0x4485c, 0x44878, 1858 0x448c0, 0x44974, 1859 0x44980, 0x449ac, 1860 0x449b4, 0x449b8, 1861 0x449c0, 0x449fc, 1862 0x45000, 0x45004, 1863 0x45010, 0x45030, 1864 0x45040, 0x45060, 1865 0x45068, 0x45068, 1866 0x45080, 0x45084, 1867 0x450a0, 0x450b0, 1868 0x45200, 0x45204, 1869 0x45210, 0x45230, 1870 0x45240, 0x45260, 1871 0x45268, 0x45268, 1872 0x45280, 0x45284, 1873 0x452a0, 0x452b0, 1874 0x460c0, 0x460e4, 1875 0x47000, 0x4703c, 1876 0x47044, 0x4708c, 1877 0x47200, 0x47250, 1878 0x47400, 0x47408, 1879 0x47414, 0x47420, 1880 0x47600, 0x47618, 1881 0x47800, 0x47814, 1882 0x48000, 0x4800c, 1883 0x48040, 0x48050, 1884 0x48060, 0x48068, 1885 0x4807c, 0x4808c, 1886 0x48094, 0x480b0, 1887 0x480c0, 0x48144, 1888 0x48180, 0x4818c, 1889 0x48200, 0x48254, 1890 0x48260, 0x48264, 1891 0x48270, 0x48288, 1892 0x48290, 0x48298, 1893 0x482ac, 0x482c8, 1894 0x482d0, 0x482e0, 1895 0x482f0, 0x482f0, 1896 0x48300, 0x4833c, 1897 0x483f8, 0x483fc, 1898 0x49304, 0x493c4, 1899 0x49400, 0x4940c, 1900 0x49414, 0x4941c, 1901 0x49480, 0x494d0, 1902 0x4c000, 0x4c054, 1903 0x4c05c, 0x4c078, 1904 0x4c0c0, 0x4c174, 1905 0x4c180, 0x4c1ac, 1906 0x4c1b4, 0x4c1b8, 1907 0x4c1c0, 0x4c254, 1908 0x4c25c, 0x4c278, 1909 0x4c2c0, 0x4c374, 1910 0x4c380, 0x4c3ac, 1911 0x4c3b4, 0x4c3b8, 1912 0x4c3c0, 0x4c454, 1913 0x4c45c, 0x4c478, 1914 0x4c4c0, 0x4c574, 1915 0x4c580, 0x4c5ac, 1916 0x4c5b4, 0x4c5b8, 1917 0x4c5c0, 0x4c654, 1918 0x4c65c, 0x4c678, 1919 0x4c6c0, 0x4c774, 1920 0x4c780, 0x4c7ac, 1921 0x4c7b4, 0x4c7b8, 1922 0x4c7c0, 0x4c854, 1923 0x4c85c, 0x4c878, 1924 0x4c8c0, 0x4c974, 1925 0x4c980, 0x4c9ac, 1926 0x4c9b4, 0x4c9b8, 1927 0x4c9c0, 0x4c9fc, 1928 0x4d000, 0x4d004, 1929 0x4d010, 0x4d030, 1930 0x4d040, 0x4d060, 1931 0x4d068, 0x4d068, 1932 0x4d080, 0x4d084, 1933 0x4d0a0, 0x4d0b0, 1934 0x4d200, 0x4d204, 1935 0x4d210, 0x4d230, 1936 0x4d240, 0x4d260, 1937 0x4d268, 0x4d268, 1938 0x4d280, 0x4d284, 1939 0x4d2a0, 0x4d2b0, 1940 0x4e0c0, 0x4e0e4, 1941 0x4f000, 0x4f03c, 1942 0x4f044, 0x4f08c, 1943 0x4f200, 0x4f250, 1944 0x4f400, 0x4f408, 1945 0x4f414, 0x4f420, 1946 0x4f600, 0x4f618, 1947 0x4f800, 0x4f814, 1948 0x50000, 0x50084, 1949 0x50090, 0x500cc, 1950 0x50400, 0x50400, 1951 0x50800, 0x50884, 1952 0x50890, 0x508cc, 1953 0x50c00, 0x50c00, 1954 0x51000, 0x5101c, 1955 0x51300, 0x51308, 1956 }; 1957 1958 static const unsigned int t6_reg_ranges[] = { 1959 0x1008, 0x101c, 1960 0x1024, 0x10a8, 1961 0x10b4, 0x10f8, 1962 0x1100, 0x1114, 1963 0x111c, 0x112c, 1964 0x1138, 0x113c, 1965 0x1144, 0x114c, 1966 0x1180, 0x1184, 1967 0x1190, 0x1194, 1968 0x11a0, 0x11a4, 1969 0x11b0, 0x11b4, 1970 0x11fc, 0x1274, 1971 0x1280, 0x133c, 1972 0x1800, 0x18fc, 1973 0x3000, 0x302c, 1974 0x3060, 0x30b0, 1975 0x30b8, 0x30d8, 1976 0x30e0, 0x30fc, 1977 0x3140, 0x357c, 1978 0x35a8, 0x35cc, 1979 0x35ec, 0x35ec, 1980 0x3600, 0x5624, 1981 0x56cc, 0x56ec, 1982 0x56f4, 0x5720, 1983 0x5728, 0x575c, 1984 0x580c, 0x5814, 1985 0x5890, 0x589c, 1986 0x58a4, 0x58ac, 1987 0x58b8, 0x58bc, 1988 0x5940, 0x595c, 1989 0x5980, 0x598c, 1990 0x59b0, 0x59c8, 1991 0x59d0, 0x59dc, 1992 0x59fc, 0x5a18, 1993 0x5a60, 0x5a6c, 1994 0x5a80, 0x5a8c, 1995 0x5a94, 0x5a9c, 1996 0x5b94, 0x5bfc, 1997 0x5c10, 0x5e48, 1998 0x5e50, 0x5e94, 1999 0x5ea0, 0x5eb0, 2000 0x5ec0, 0x5ec0, 2001 0x5ec8, 0x5ed0, 2002 0x5ee0, 0x5ee0, 2003 0x5ef0, 0x5ef0, 2004 0x5f00, 0x5f00, 2005 0x6000, 0x6020, 2006 0x6028, 0x6040, 2007 0x6058, 0x609c, 2008 0x60a8, 0x619c, 2009 0x7700, 0x7798, 2010 0x77c0, 0x7880, 2011 0x78cc, 0x78fc, 2012 0x7b00, 0x7b58, 2013 0x7b60, 0x7b84, 2014 0x7b8c, 0x7c54, 2015 0x7d00, 0x7d38, 2016 0x7d40, 0x7d84, 2017 0x7d8c, 0x7ddc, 2018 0x7de4, 0x7e04, 2019 0x7e10, 0x7e1c, 2020 0x7e24, 0x7e38, 2021 0x7e40, 0x7e44, 2022 0x7e4c, 0x7e78, 2023 0x7e80, 0x7edc, 2024 0x7ee8, 0x7efc, 2025 0x8dc0, 0x8de4, 2026 0x8df8, 0x8e04, 2027 0x8e10, 0x8e84, 2028 0x8ea0, 0x8f88, 2029 0x8fb8, 0x9058, 2030 0x9060, 0x9060, 2031 0x9068, 0x90f8, 2032 0x9100, 0x9124, 2033 0x9400, 0x9470, 2034 0x9600, 0x9600, 2035 0x9608, 0x9638, 2036 0x9640, 0x9704, 2037 0x9710, 0x971c, 2038 0x9800, 0x9808, 2039 0x9820, 0x983c, 2040 0x9850, 0x9864, 2041 0x9c00, 0x9c6c, 2042 0x9c80, 0x9cec, 2043 0x9d00, 0x9d6c, 2044 0x9d80, 0x9dec, 2045 0x9e00, 0x9e6c, 2046 0x9e80, 0x9eec, 2047 0x9f00, 0x9f6c, 2048 0x9f80, 0xa020, 2049 0xd004, 0xd03c, 2050 0xd100, 0xd118, 2051 0xd200, 0xd214, 2052 0xd220, 0xd234, 2053 0xd240, 0xd254, 2054 0xd260, 0xd274, 2055 0xd280, 0xd294, 2056 0xd2a0, 0xd2b4, 2057 0xd2c0, 0xd2d4, 2058 0xd2e0, 0xd2f4, 2059 0xd300, 0xd31c, 2060 0xdfc0, 0xdfe0, 2061 0xe000, 0xf008, 2062 0xf010, 0xf018, 2063 0xf020, 0xf028, 2064 0x11000, 0x11014, 2065 0x11048, 0x1106c, 2066 0x11074, 0x11088, 2067 0x11098, 0x11120, 2068 0x1112c, 0x1117c, 2069 0x11190, 0x112e0, 2070 0x11300, 0x1130c, 2071 0x12000, 0x1206c, 2072 0x19040, 0x1906c, 2073 0x19078, 0x19080, 2074 0x1908c, 0x190e8, 2075 0x190f0, 0x190f8, 2076 0x19100, 0x19110, 2077 0x19120, 0x19124, 2078 0x19150, 0x19194, 2079 0x1919c, 0x191b0, 2080 0x191d0, 0x191e8, 2081 0x19238, 0x19290, 2082 0x192a4, 0x192b0, 2083 0x192bc, 0x192bc, 2084 0x19348, 0x1934c, 2085 0x193f8, 0x19418, 2086 0x19420, 0x19428, 2087 0x19430, 0x19444, 2088 0x1944c, 0x1946c, 2089 0x19474, 0x19474, 2090 0x19490, 0x194cc, 2091 0x194f0, 0x194f8, 2092 0x19c00, 0x19c48, 2093 0x19c50, 0x19c80, 2094 0x19c94, 0x19c98, 2095 0x19ca0, 0x19cbc, 2096 0x19ce4, 0x19ce4, 2097 0x19cf0, 0x19cf8, 2098 0x19d00, 0x19d28, 2099 0x19d50, 0x19d78, 2100 0x19d94, 0x19d98, 2101 0x19da0, 0x19dc8, 2102 0x19df0, 0x19e10, 2103 0x19e50, 0x19e6c, 2104 0x19ea0, 0x19ebc, 2105 0x19ec4, 0x19ef4, 2106 0x19f04, 0x19f2c, 2107 0x19f34, 0x19f34, 2108 0x19f40, 0x19f50, 2109 0x19f90, 0x19fac, 2110 0x19fc4, 0x19fc8, 2111 0x19fd0, 0x19fe4, 2112 0x1a000, 0x1a004, 2113 0x1a010, 0x1a06c, 2114 0x1a0b0, 0x1a0e4, 2115 0x1a0ec, 0x1a0f8, 2116 0x1a100, 0x1a108, 2117 0x1a114, 0x1a120, 2118 0x1a128, 0x1a130, 2119 0x1a138, 0x1a138, 2120 0x1a190, 0x1a1c4, 2121 0x1a1fc, 0x1a1fc, 2122 0x1e008, 0x1e00c, 2123 0x1e040, 0x1e044, 2124 0x1e04c, 0x1e04c, 2125 0x1e284, 0x1e290, 2126 0x1e2c0, 0x1e2c0, 2127 0x1e2e0, 0x1e2e0, 2128 0x1e300, 0x1e384, 2129 0x1e3c0, 0x1e3c8, 2130 0x1e408, 0x1e40c, 2131 0x1e440, 0x1e444, 2132 0x1e44c, 0x1e44c, 2133 0x1e684, 0x1e690, 2134 0x1e6c0, 0x1e6c0, 2135 0x1e6e0, 0x1e6e0, 2136 0x1e700, 0x1e784, 2137 0x1e7c0, 0x1e7c8, 2138 0x1e808, 0x1e80c, 2139 0x1e840, 0x1e844, 2140 0x1e84c, 0x1e84c, 2141 0x1ea84, 0x1ea90, 2142 0x1eac0, 0x1eac0, 2143 0x1eae0, 0x1eae0, 2144 0x1eb00, 0x1eb84, 2145 0x1ebc0, 0x1ebc8, 2146 0x1ec08, 0x1ec0c, 2147 0x1ec40, 0x1ec44, 2148 0x1ec4c, 0x1ec4c, 2149 0x1ee84, 0x1ee90, 2150 0x1eec0, 0x1eec0, 2151 0x1eee0, 0x1eee0, 2152 0x1ef00, 0x1ef84, 2153 0x1efc0, 0x1efc8, 2154 0x1f008, 0x1f00c, 2155 0x1f040, 0x1f044, 2156 0x1f04c, 0x1f04c, 2157 0x1f284, 0x1f290, 2158 0x1f2c0, 0x1f2c0, 2159 0x1f2e0, 0x1f2e0, 2160 0x1f300, 0x1f384, 2161 0x1f3c0, 0x1f3c8, 2162 0x1f408, 0x1f40c, 2163 0x1f440, 0x1f444, 2164 0x1f44c, 0x1f44c, 2165 0x1f684, 0x1f690, 2166 0x1f6c0, 0x1f6c0, 2167 0x1f6e0, 0x1f6e0, 2168 0x1f700, 0x1f784, 2169 0x1f7c0, 0x1f7c8, 2170 0x1f808, 0x1f80c, 2171 0x1f840, 0x1f844, 2172 0x1f84c, 0x1f84c, 2173 0x1fa84, 0x1fa90, 2174 0x1fac0, 0x1fac0, 2175 0x1fae0, 0x1fae0, 2176 0x1fb00, 0x1fb84, 2177 0x1fbc0, 0x1fbc8, 2178 0x1fc08, 0x1fc0c, 2179 0x1fc40, 0x1fc44, 2180 0x1fc4c, 0x1fc4c, 2181 0x1fe84, 0x1fe90, 2182 0x1fec0, 0x1fec0, 2183 0x1fee0, 0x1fee0, 2184 0x1ff00, 0x1ff84, 2185 0x1ffc0, 0x1ffc8, 2186 0x30000, 0x30030, 2187 0x30038, 0x30038, 2188 0x30040, 0x30040, 2189 0x30048, 0x30048, 2190 0x30050, 0x30050, 2191 0x3005c, 0x30060, 2192 0x30068, 0x30068, 2193 0x30070, 0x30070, 2194 0x30100, 0x30168, 2195 0x30190, 0x301a0, 2196 0x301a8, 0x301b8, 2197 0x301c4, 0x301c8, 2198 0x301d0, 0x301d0, 2199 0x30200, 0x30320, 2200 0x30400, 0x304b4, 2201 0x304c0, 0x3052c, 2202 0x30540, 0x3061c, 2203 0x30800, 0x308a0, 2204 0x308c0, 0x30908, 2205 0x30910, 0x309b8, 2206 0x30a00, 0x30a04, 2207 0x30a0c, 0x30a14, 2208 0x30a1c, 0x30a2c, 2209 0x30a44, 0x30a50, 2210 0x30a74, 0x30a74, 2211 0x30a7c, 0x30afc, 2212 0x30b08, 0x30c24, 2213 0x30d00, 0x30d14, 2214 0x30d1c, 0x30d3c, 2215 0x30d44, 0x30d4c, 2216 0x30d54, 0x30d74, 2217 0x30d7c, 0x30d7c, 2218 0x30de0, 0x30de0, 2219 0x30e00, 0x30ed4, 2220 0x30f00, 0x30fa4, 2221 0x30fc0, 0x30fc4, 2222 0x31000, 0x31004, 2223 0x31080, 0x310fc, 2224 0x31208, 0x31220, 2225 0x3123c, 0x31254, 2226 0x31300, 0x31300, 2227 0x31308, 0x3131c, 2228 0x31338, 0x3133c, 2229 0x31380, 0x31380, 2230 0x31388, 0x313a8, 2231 0x313b4, 0x313b4, 2232 0x31400, 0x31420, 2233 0x31438, 0x3143c, 2234 0x31480, 0x31480, 2235 0x314a8, 0x314a8, 2236 0x314b0, 0x314b4, 2237 0x314c8, 0x314d4, 2238 0x31a40, 0x31a4c, 2239 0x31af0, 0x31b20, 2240 0x31b38, 0x31b3c, 2241 0x31b80, 0x31b80, 2242 0x31ba8, 0x31ba8, 2243 0x31bb0, 0x31bb4, 2244 0x31bc8, 0x31bd4, 2245 0x32140, 0x3218c, 2246 0x321f0, 0x321f4, 2247 0x32200, 0x32200, 2248 0x32218, 0x32218, 2249 0x32400, 0x32400, 2250 0x32408, 0x3241c, 2251 0x32618, 0x32620, 2252 0x32664, 0x32664, 2253 0x326a8, 0x326a8, 2254 0x326ec, 0x326ec, 2255 0x32a00, 0x32abc, 2256 0x32b00, 0x32b38, 2257 0x32b40, 0x32b58, 2258 0x32b60, 0x32b78, 2259 0x32c00, 0x32c00, 2260 0x32c08, 0x32c3c, 2261 0x32e00, 0x32e2c, 2262 0x32f00, 0x32f2c, 2263 0x33000, 0x3302c, 2264 0x33034, 0x33050, 2265 0x33058, 0x33058, 2266 0x33060, 0x3308c, 2267 0x3309c, 0x330ac, 2268 0x330c0, 0x330c0, 2269 0x330c8, 0x330d0, 2270 0x330d8, 0x330e0, 2271 0x330ec, 0x3312c, 2272 0x33134, 0x33150, 2273 0x33158, 0x33158, 2274 0x33160, 0x3318c, 2275 0x3319c, 0x331ac, 2276 0x331c0, 0x331c0, 2277 0x331c8, 0x331d0, 2278 0x331d8, 0x331e0, 2279 0x331ec, 0x33290, 2280 0x33298, 0x332c4, 2281 0x332e4, 0x33390, 2282 0x33398, 0x333c4, 2283 0x333e4, 0x3342c, 2284 0x33434, 0x33450, 2285 0x33458, 0x33458, 2286 0x33460, 0x3348c, 2287 0x3349c, 0x334ac, 2288 0x334c0, 0x334c0, 2289 0x334c8, 0x334d0, 2290 0x334d8, 0x334e0, 2291 0x334ec, 0x3352c, 2292 0x33534, 0x33550, 2293 0x33558, 0x33558, 2294 0x33560, 0x3358c, 2295 0x3359c, 0x335ac, 2296 0x335c0, 0x335c0, 2297 0x335c8, 0x335d0, 2298 0x335d8, 0x335e0, 2299 0x335ec, 0x33690, 2300 0x33698, 0x336c4, 2301 0x336e4, 0x33790, 2302 0x33798, 0x337c4, 2303 0x337e4, 0x337fc, 2304 0x33814, 0x33814, 2305 0x33854, 0x33868, 2306 0x33880, 0x3388c, 2307 0x338c0, 0x338d0, 2308 0x338e8, 0x338ec, 2309 0x33900, 0x3392c, 2310 0x33934, 0x33950, 2311 0x33958, 0x33958, 2312 0x33960, 0x3398c, 2313 0x3399c, 0x339ac, 2314 0x339c0, 0x339c0, 2315 0x339c8, 0x339d0, 2316 0x339d8, 0x339e0, 2317 0x339ec, 0x33a90, 2318 0x33a98, 0x33ac4, 2319 0x33ae4, 0x33b10, 2320 0x33b24, 0x33b28, 2321 0x33b38, 0x33b50, 2322 0x33bf0, 0x33c10, 2323 0x33c24, 0x33c28, 2324 0x33c38, 0x33c50, 2325 0x33cf0, 0x33cfc, 2326 0x34000, 0x34030, 2327 0x34038, 0x34038, 2328 0x34040, 0x34040, 2329 0x34048, 0x34048, 2330 0x34050, 0x34050, 2331 0x3405c, 0x34060, 2332 0x34068, 0x34068, 2333 0x34070, 0x34070, 2334 0x34100, 0x34168, 2335 0x34190, 0x341a0, 2336 0x341a8, 0x341b8, 2337 0x341c4, 0x341c8, 2338 0x341d0, 0x341d0, 2339 0x34200, 0x34320, 2340 0x34400, 0x344b4, 2341 0x344c0, 0x3452c, 2342 0x34540, 0x3461c, 2343 0x34800, 0x348a0, 2344 0x348c0, 0x34908, 2345 0x34910, 0x349b8, 2346 0x34a00, 0x34a04, 2347 0x34a0c, 0x34a14, 2348 0x34a1c, 0x34a2c, 2349 0x34a44, 0x34a50, 2350 0x34a74, 0x34a74, 2351 0x34a7c, 0x34afc, 2352 0x34b08, 0x34c24, 2353 0x34d00, 0x34d14, 2354 0x34d1c, 0x34d3c, 2355 0x34d44, 0x34d4c, 2356 0x34d54, 0x34d74, 2357 0x34d7c, 0x34d7c, 2358 0x34de0, 0x34de0, 2359 0x34e00, 0x34ed4, 2360 0x34f00, 0x34fa4, 2361 0x34fc0, 0x34fc4, 2362 0x35000, 0x35004, 2363 0x35080, 0x350fc, 2364 0x35208, 0x35220, 2365 0x3523c, 0x35254, 2366 0x35300, 0x35300, 2367 0x35308, 0x3531c, 2368 0x35338, 0x3533c, 2369 0x35380, 0x35380, 2370 0x35388, 0x353a8, 2371 0x353b4, 0x353b4, 2372 0x35400, 0x35420, 2373 0x35438, 0x3543c, 2374 0x35480, 0x35480, 2375 0x354a8, 0x354a8, 2376 0x354b0, 0x354b4, 2377 0x354c8, 0x354d4, 2378 0x35a40, 0x35a4c, 2379 0x35af0, 0x35b20, 2380 0x35b38, 0x35b3c, 2381 0x35b80, 0x35b80, 2382 0x35ba8, 0x35ba8, 2383 0x35bb0, 0x35bb4, 2384 0x35bc8, 0x35bd4, 2385 0x36140, 0x3618c, 2386 0x361f0, 0x361f4, 2387 0x36200, 0x36200, 2388 0x36218, 0x36218, 2389 0x36400, 0x36400, 2390 0x36408, 0x3641c, 2391 0x36618, 0x36620, 2392 0x36664, 0x36664, 2393 0x366a8, 0x366a8, 2394 0x366ec, 0x366ec, 2395 0x36a00, 0x36abc, 2396 0x36b00, 0x36b38, 2397 0x36b40, 0x36b58, 2398 0x36b60, 0x36b78, 2399 0x36c00, 0x36c00, 2400 0x36c08, 0x36c3c, 2401 0x36e00, 0x36e2c, 2402 0x36f00, 0x36f2c, 2403 0x37000, 0x3702c, 2404 0x37034, 0x37050, 2405 0x37058, 0x37058, 2406 0x37060, 0x3708c, 2407 0x3709c, 0x370ac, 2408 0x370c0, 0x370c0, 2409 0x370c8, 0x370d0, 2410 0x370d8, 0x370e0, 2411 0x370ec, 0x3712c, 2412 0x37134, 0x37150, 2413 0x37158, 0x37158, 2414 0x37160, 0x3718c, 2415 0x3719c, 0x371ac, 2416 0x371c0, 0x371c0, 2417 0x371c8, 0x371d0, 2418 0x371d8, 0x371e0, 2419 0x371ec, 0x37290, 2420 0x37298, 0x372c4, 2421 0x372e4, 0x37390, 2422 0x37398, 0x373c4, 2423 0x373e4, 0x3742c, 2424 0x37434, 0x37450, 2425 0x37458, 0x37458, 2426 0x37460, 0x3748c, 2427 0x3749c, 0x374ac, 2428 0x374c0, 0x374c0, 2429 0x374c8, 0x374d0, 2430 0x374d8, 0x374e0, 2431 0x374ec, 0x3752c, 2432 0x37534, 0x37550, 2433 0x37558, 0x37558, 2434 0x37560, 0x3758c, 2435 0x3759c, 0x375ac, 2436 0x375c0, 0x375c0, 2437 0x375c8, 0x375d0, 2438 0x375d8, 0x375e0, 2439 0x375ec, 0x37690, 2440 0x37698, 0x376c4, 2441 0x376e4, 0x37790, 2442 0x37798, 0x377c4, 2443 0x377e4, 0x377fc, 2444 0x37814, 0x37814, 2445 0x37854, 0x37868, 2446 0x37880, 0x3788c, 2447 0x378c0, 0x378d0, 2448 0x378e8, 0x378ec, 2449 0x37900, 0x3792c, 2450 0x37934, 0x37950, 2451 0x37958, 0x37958, 2452 0x37960, 0x3798c, 2453 0x3799c, 0x379ac, 2454 0x379c0, 0x379c0, 2455 0x379c8, 0x379d0, 2456 0x379d8, 0x379e0, 2457 0x379ec, 0x37a90, 2458 0x37a98, 0x37ac4, 2459 0x37ae4, 0x37b10, 2460 0x37b24, 0x37b28, 2461 0x37b38, 0x37b50, 2462 0x37bf0, 0x37c10, 2463 0x37c24, 0x37c28, 2464 0x37c38, 0x37c50, 2465 0x37cf0, 0x37cfc, 2466 0x40040, 0x40040, 2467 0x40080, 0x40084, 2468 0x40100, 0x40100, 2469 0x40140, 0x401bc, 2470 0x40200, 0x40214, 2471 0x40228, 0x40228, 2472 0x40240, 0x40258, 2473 0x40280, 0x40280, 2474 0x40304, 0x40304, 2475 0x40330, 0x4033c, 2476 0x41304, 0x413c8, 2477 0x413d0, 0x413dc, 2478 0x413f0, 0x413f0, 2479 0x41400, 0x4140c, 2480 0x41414, 0x4141c, 2481 0x41480, 0x414d0, 2482 0x44000, 0x4407c, 2483 0x440c0, 0x441ac, 2484 0x441b4, 0x4427c, 2485 0x442c0, 0x443ac, 2486 0x443b4, 0x4447c, 2487 0x444c0, 0x445ac, 2488 0x445b4, 0x4467c, 2489 0x446c0, 0x447ac, 2490 0x447b4, 0x4487c, 2491 0x448c0, 0x449ac, 2492 0x449b4, 0x44a7c, 2493 0x44ac0, 0x44bac, 2494 0x44bb4, 0x44c7c, 2495 0x44cc0, 0x44dac, 2496 0x44db4, 0x44e7c, 2497 0x44ec0, 0x44fac, 2498 0x44fb4, 0x4507c, 2499 0x450c0, 0x451ac, 2500 0x451b4, 0x451fc, 2501 0x45800, 0x45804, 2502 0x45810, 0x45830, 2503 0x45840, 0x45860, 2504 0x45868, 0x45868, 2505 0x45880, 0x45884, 2506 0x458a0, 0x458b0, 2507 0x45a00, 0x45a04, 2508 0x45a10, 0x45a30, 2509 0x45a40, 0x45a60, 2510 0x45a68, 0x45a68, 2511 0x45a80, 0x45a84, 2512 0x45aa0, 0x45ab0, 2513 0x460c0, 0x460e4, 2514 0x47000, 0x4703c, 2515 0x47044, 0x4708c, 2516 0x47200, 0x47250, 2517 0x47400, 0x47408, 2518 0x47414, 0x47420, 2519 0x47600, 0x47618, 2520 0x47800, 0x47814, 2521 0x47820, 0x4782c, 2522 0x50000, 0x50084, 2523 0x50090, 0x500cc, 2524 0x50300, 0x50384, 2525 0x50400, 0x50400, 2526 0x50800, 0x50884, 2527 0x50890, 0x508cc, 2528 0x50b00, 0x50b84, 2529 0x50c00, 0x50c00, 2530 0x51000, 0x51020, 2531 0x51028, 0x510b0, 2532 0x51300, 0x51324, 2533 }; 2534 2535 u32 *buf_end = (u32 *)(buf + buf_size); 2536 const unsigned int *reg_ranges; 2537 int reg_ranges_size, range; 2538 unsigned int chip_version = chip_id(adap); 2539 2540 /* 2541 * Select the right set of register ranges to dump depending on the 2542 * adapter chip type. 2543 */ 2544 switch (chip_version) { 2545 case CHELSIO_T4: 2546 reg_ranges = t4_reg_ranges; 2547 reg_ranges_size = ARRAY_SIZE(t4_reg_ranges); 2548 break; 2549 2550 case CHELSIO_T5: 2551 reg_ranges = t5_reg_ranges; 2552 reg_ranges_size = ARRAY_SIZE(t5_reg_ranges); 2553 break; 2554 2555 case CHELSIO_T6: 2556 reg_ranges = t6_reg_ranges; 2557 reg_ranges_size = ARRAY_SIZE(t6_reg_ranges); 2558 break; 2559 2560 default: 2561 CH_ERR(adap, 2562 "Unsupported chip version %d\n", chip_version); 2563 return; 2564 } 2565 2566 /* 2567 * Clear the register buffer and insert the appropriate register 2568 * values selected by the above register ranges. 2569 */ 2570 memset(buf, 0, buf_size); 2571 for (range = 0; range < reg_ranges_size; range += 2) { 2572 unsigned int reg = reg_ranges[range]; 2573 unsigned int last_reg = reg_ranges[range + 1]; 2574 u32 *bufp = (u32 *)(buf + reg); 2575 2576 /* 2577 * Iterate across the register range filling in the register 2578 * buffer but don't write past the end of the register buffer. 2579 */ 2580 while (reg <= last_reg && bufp < buf_end) { 2581 *bufp++ = t4_read_reg(adap, reg); 2582 reg += sizeof(u32); 2583 } 2584 } 2585 } 2586 2587 /* 2588 * Partial EEPROM Vital Product Data structure. Includes only the ID and 2589 * VPD-R sections. 2590 */ 2591 struct t4_vpd_hdr { 2592 u8 id_tag; 2593 u8 id_len[2]; 2594 u8 id_data[ID_LEN]; 2595 u8 vpdr_tag; 2596 u8 vpdr_len[2]; 2597 }; 2598 2599 /* 2600 * EEPROM reads take a few tens of us while writes can take a bit over 5 ms. 2601 */ 2602 #define EEPROM_DELAY 10 /* 10us per poll spin */ 2603 #define EEPROM_MAX_POLL 5000 /* x 5000 == 50ms */ 2604 2605 #define EEPROM_STAT_ADDR 0x7bfc 2606 #define VPD_BASE 0x400 2607 #define VPD_BASE_OLD 0 2608 #define VPD_LEN 1024 2609 #define VPD_INFO_FLD_HDR_SIZE 3 2610 #define CHELSIO_VPD_UNIQUE_ID 0x82 2611 2612 /* 2613 * Small utility function to wait till any outstanding VPD Access is complete. 2614 * We have a per-adapter state variable "VPD Busy" to indicate when we have a 2615 * VPD Access in flight. This allows us to handle the problem of having a 2616 * previous VPD Access time out and prevent an attempt to inject a new VPD 2617 * Request before any in-flight VPD reguest has completed. 2618 */ 2619 static int t4_seeprom_wait(struct adapter *adapter) 2620 { 2621 unsigned int base = adapter->params.pci.vpd_cap_addr; 2622 int max_poll; 2623 2624 /* 2625 * If no VPD Access is in flight, we can just return success right 2626 * away. 2627 */ 2628 if (!adapter->vpd_busy) 2629 return 0; 2630 2631 /* 2632 * Poll the VPD Capability Address/Flag register waiting for it 2633 * to indicate that the operation is complete. 2634 */ 2635 max_poll = EEPROM_MAX_POLL; 2636 do { 2637 u16 val; 2638 2639 udelay(EEPROM_DELAY); 2640 t4_os_pci_read_cfg2(adapter, base + PCI_VPD_ADDR, &val); 2641 2642 /* 2643 * If the operation is complete, mark the VPD as no longer 2644 * busy and return success. 2645 */ 2646 if ((val & PCI_VPD_ADDR_F) == adapter->vpd_flag) { 2647 adapter->vpd_busy = 0; 2648 return 0; 2649 } 2650 } while (--max_poll); 2651 2652 /* 2653 * Failure! Note that we leave the VPD Busy status set in order to 2654 * avoid pushing a new VPD Access request into the VPD Capability till 2655 * the current operation eventually succeeds. It's a bug to issue a 2656 * new request when an existing request is in flight and will result 2657 * in corrupt hardware state. 2658 */ 2659 return -ETIMEDOUT; 2660 } 2661 2662 /** 2663 * t4_seeprom_read - read a serial EEPROM location 2664 * @adapter: adapter to read 2665 * @addr: EEPROM virtual address 2666 * @data: where to store the read data 2667 * 2668 * Read a 32-bit word from a location in serial EEPROM using the card's PCI 2669 * VPD capability. Note that this function must be called with a virtual 2670 * address. 2671 */ 2672 int t4_seeprom_read(struct adapter *adapter, u32 addr, u32 *data) 2673 { 2674 unsigned int base = adapter->params.pci.vpd_cap_addr; 2675 int ret; 2676 2677 /* 2678 * VPD Accesses must alway be 4-byte aligned! 2679 */ 2680 if (addr >= EEPROMVSIZE || (addr & 3)) 2681 return -EINVAL; 2682 2683 /* 2684 * Wait for any previous operation which may still be in flight to 2685 * complete. 2686 */ 2687 ret = t4_seeprom_wait(adapter); 2688 if (ret) { 2689 CH_ERR(adapter, "VPD still busy from previous operation\n"); 2690 return ret; 2691 } 2692 2693 /* 2694 * Issue our new VPD Read request, mark the VPD as being busy and wait 2695 * for our request to complete. If it doesn't complete, note the 2696 * error and return it to our caller. Note that we do not reset the 2697 * VPD Busy status! 2698 */ 2699 t4_os_pci_write_cfg2(adapter, base + PCI_VPD_ADDR, (u16)addr); 2700 adapter->vpd_busy = 1; 2701 adapter->vpd_flag = PCI_VPD_ADDR_F; 2702 ret = t4_seeprom_wait(adapter); 2703 if (ret) { 2704 CH_ERR(adapter, "VPD read of address %#x failed\n", addr); 2705 return ret; 2706 } 2707 2708 /* 2709 * Grab the returned data, swizzle it into our endianess and 2710 * return success. 2711 */ 2712 t4_os_pci_read_cfg4(adapter, base + PCI_VPD_DATA, data); 2713 *data = le32_to_cpu(*data); 2714 return 0; 2715 } 2716 2717 /** 2718 * t4_seeprom_write - write a serial EEPROM location 2719 * @adapter: adapter to write 2720 * @addr: virtual EEPROM address 2721 * @data: value to write 2722 * 2723 * Write a 32-bit word to a location in serial EEPROM using the card's PCI 2724 * VPD capability. Note that this function must be called with a virtual 2725 * address. 2726 */ 2727 int t4_seeprom_write(struct adapter *adapter, u32 addr, u32 data) 2728 { 2729 unsigned int base = adapter->params.pci.vpd_cap_addr; 2730 int ret; 2731 u32 stats_reg; 2732 int max_poll; 2733 2734 /* 2735 * VPD Accesses must alway be 4-byte aligned! 2736 */ 2737 if (addr >= EEPROMVSIZE || (addr & 3)) 2738 return -EINVAL; 2739 2740 /* 2741 * Wait for any previous operation which may still be in flight to 2742 * complete. 2743 */ 2744 ret = t4_seeprom_wait(adapter); 2745 if (ret) { 2746 CH_ERR(adapter, "VPD still busy from previous operation\n"); 2747 return ret; 2748 } 2749 2750 /* 2751 * Issue our new VPD Read request, mark the VPD as being busy and wait 2752 * for our request to complete. If it doesn't complete, note the 2753 * error and return it to our caller. Note that we do not reset the 2754 * VPD Busy status! 2755 */ 2756 t4_os_pci_write_cfg4(adapter, base + PCI_VPD_DATA, 2757 cpu_to_le32(data)); 2758 t4_os_pci_write_cfg2(adapter, base + PCI_VPD_ADDR, 2759 (u16)addr | PCI_VPD_ADDR_F); 2760 adapter->vpd_busy = 1; 2761 adapter->vpd_flag = 0; 2762 ret = t4_seeprom_wait(adapter); 2763 if (ret) { 2764 CH_ERR(adapter, "VPD write of address %#x failed\n", addr); 2765 return ret; 2766 } 2767 2768 /* 2769 * Reset PCI_VPD_DATA register after a transaction and wait for our 2770 * request to complete. If it doesn't complete, return error. 2771 */ 2772 t4_os_pci_write_cfg4(adapter, base + PCI_VPD_DATA, 0); 2773 max_poll = EEPROM_MAX_POLL; 2774 do { 2775 udelay(EEPROM_DELAY); 2776 t4_seeprom_read(adapter, EEPROM_STAT_ADDR, &stats_reg); 2777 } while ((stats_reg & 0x1) && --max_poll); 2778 if (!max_poll) 2779 return -ETIMEDOUT; 2780 2781 /* Return success! */ 2782 return 0; 2783 } 2784 2785 /** 2786 * t4_eeprom_ptov - translate a physical EEPROM address to virtual 2787 * @phys_addr: the physical EEPROM address 2788 * @fn: the PCI function number 2789 * @sz: size of function-specific area 2790 * 2791 * Translate a physical EEPROM address to virtual. The first 1K is 2792 * accessed through virtual addresses starting at 31K, the rest is 2793 * accessed through virtual addresses starting at 0. 2794 * 2795 * The mapping is as follows: 2796 * [0..1K) -> [31K..32K) 2797 * [1K..1K+A) -> [ES-A..ES) 2798 * [1K+A..ES) -> [0..ES-A-1K) 2799 * 2800 * where A = @fn * @sz, and ES = EEPROM size. 2801 */ 2802 int t4_eeprom_ptov(unsigned int phys_addr, unsigned int fn, unsigned int sz) 2803 { 2804 fn *= sz; 2805 if (phys_addr < 1024) 2806 return phys_addr + (31 << 10); 2807 if (phys_addr < 1024 + fn) 2808 return EEPROMSIZE - fn + phys_addr - 1024; 2809 if (phys_addr < EEPROMSIZE) 2810 return phys_addr - 1024 - fn; 2811 return -EINVAL; 2812 } 2813 2814 /** 2815 * t4_seeprom_wp - enable/disable EEPROM write protection 2816 * @adapter: the adapter 2817 * @enable: whether to enable or disable write protection 2818 * 2819 * Enables or disables write protection on the serial EEPROM. 2820 */ 2821 int t4_seeprom_wp(struct adapter *adapter, int enable) 2822 { 2823 return t4_seeprom_write(adapter, EEPROM_STAT_ADDR, enable ? 0xc : 0); 2824 } 2825 2826 /** 2827 * get_vpd_keyword_val - Locates an information field keyword in the VPD 2828 * @v: Pointer to buffered vpd data structure 2829 * @kw: The keyword to search for 2830 * 2831 * Returns the value of the information field keyword or 2832 * -ENOENT otherwise. 2833 */ 2834 static int get_vpd_keyword_val(const struct t4_vpd_hdr *v, const char *kw) 2835 { 2836 int i; 2837 unsigned int offset , len; 2838 const u8 *buf = (const u8 *)v; 2839 const u8 *vpdr_len = &v->vpdr_len[0]; 2840 offset = sizeof(struct t4_vpd_hdr); 2841 len = (u16)vpdr_len[0] + ((u16)vpdr_len[1] << 8); 2842 2843 if (len + sizeof(struct t4_vpd_hdr) > VPD_LEN) { 2844 return -ENOENT; 2845 } 2846 2847 for (i = offset; i + VPD_INFO_FLD_HDR_SIZE <= offset + len;) { 2848 if(memcmp(buf + i , kw , 2) == 0){ 2849 i += VPD_INFO_FLD_HDR_SIZE; 2850 return i; 2851 } 2852 2853 i += VPD_INFO_FLD_HDR_SIZE + buf[i+2]; 2854 } 2855 2856 return -ENOENT; 2857 } 2858 2859 2860 /** 2861 * get_vpd_params - read VPD parameters from VPD EEPROM 2862 * @adapter: adapter to read 2863 * @p: where to store the parameters 2864 * @vpd: caller provided temporary space to read the VPD into 2865 * 2866 * Reads card parameters stored in VPD EEPROM. 2867 */ 2868 static int get_vpd_params(struct adapter *adapter, struct vpd_params *p, 2869 u8 *vpd) 2870 { 2871 int i, ret, addr; 2872 int ec, sn, pn, na; 2873 u8 csum; 2874 const struct t4_vpd_hdr *v; 2875 2876 /* 2877 * Card information normally starts at VPD_BASE but early cards had 2878 * it at 0. 2879 */ 2880 ret = t4_seeprom_read(adapter, VPD_BASE, (u32 *)(vpd)); 2881 if (ret) 2882 return (ret); 2883 2884 /* 2885 * The VPD shall have a unique identifier specified by the PCI SIG. 2886 * For chelsio adapters, the identifier is 0x82. The first byte of a VPD 2887 * shall be CHELSIO_VPD_UNIQUE_ID (0x82). The VPD programming software 2888 * is expected to automatically put this entry at the 2889 * beginning of the VPD. 2890 */ 2891 addr = *vpd == CHELSIO_VPD_UNIQUE_ID ? VPD_BASE : VPD_BASE_OLD; 2892 2893 for (i = 0; i < VPD_LEN; i += 4) { 2894 ret = t4_seeprom_read(adapter, addr + i, (u32 *)(vpd + i)); 2895 if (ret) 2896 return ret; 2897 } 2898 v = (const struct t4_vpd_hdr *)vpd; 2899 2900 #define FIND_VPD_KW(var,name) do { \ 2901 var = get_vpd_keyword_val(v , name); \ 2902 if (var < 0) { \ 2903 CH_ERR(adapter, "missing VPD keyword " name "\n"); \ 2904 return -EINVAL; \ 2905 } \ 2906 } while (0) 2907 2908 FIND_VPD_KW(i, "RV"); 2909 for (csum = 0; i >= 0; i--) 2910 csum += vpd[i]; 2911 2912 if (csum) { 2913 CH_ERR(adapter, 2914 "corrupted VPD EEPROM, actual csum %u\n", csum); 2915 return -EINVAL; 2916 } 2917 2918 FIND_VPD_KW(ec, "EC"); 2919 FIND_VPD_KW(sn, "SN"); 2920 FIND_VPD_KW(pn, "PN"); 2921 FIND_VPD_KW(na, "NA"); 2922 #undef FIND_VPD_KW 2923 2924 memcpy(p->id, v->id_data, ID_LEN); 2925 strstrip(p->id); 2926 memcpy(p->ec, vpd + ec, EC_LEN); 2927 strstrip(p->ec); 2928 i = vpd[sn - VPD_INFO_FLD_HDR_SIZE + 2]; 2929 memcpy(p->sn, vpd + sn, min(i, SERNUM_LEN)); 2930 strstrip(p->sn); 2931 i = vpd[pn - VPD_INFO_FLD_HDR_SIZE + 2]; 2932 memcpy(p->pn, vpd + pn, min(i, PN_LEN)); 2933 strstrip((char *)p->pn); 2934 i = vpd[na - VPD_INFO_FLD_HDR_SIZE + 2]; 2935 memcpy(p->na, vpd + na, min(i, MACADDR_LEN)); 2936 strstrip((char *)p->na); 2937 2938 return 0; 2939 } 2940 2941 /* serial flash and firmware constants and flash config file constants */ 2942 enum { 2943 SF_ATTEMPTS = 10, /* max retries for SF operations */ 2944 2945 /* flash command opcodes */ 2946 SF_PROG_PAGE = 2, /* program page */ 2947 SF_WR_DISABLE = 4, /* disable writes */ 2948 SF_RD_STATUS = 5, /* read status register */ 2949 SF_WR_ENABLE = 6, /* enable writes */ 2950 SF_RD_DATA_FAST = 0xb, /* read flash */ 2951 SF_RD_ID = 0x9f, /* read ID */ 2952 SF_ERASE_SECTOR = 0xd8, /* erase sector */ 2953 }; 2954 2955 /** 2956 * sf1_read - read data from the serial flash 2957 * @adapter: the adapter 2958 * @byte_cnt: number of bytes to read 2959 * @cont: whether another operation will be chained 2960 * @lock: whether to lock SF for PL access only 2961 * @valp: where to store the read data 2962 * 2963 * Reads up to 4 bytes of data from the serial flash. The location of 2964 * the read needs to be specified prior to calling this by issuing the 2965 * appropriate commands to the serial flash. 2966 */ 2967 static int sf1_read(struct adapter *adapter, unsigned int byte_cnt, int cont, 2968 int lock, u32 *valp) 2969 { 2970 int ret; 2971 2972 if (!byte_cnt || byte_cnt > 4) 2973 return -EINVAL; 2974 if (t4_read_reg(adapter, A_SF_OP) & F_BUSY) 2975 return -EBUSY; 2976 t4_write_reg(adapter, A_SF_OP, 2977 V_SF_LOCK(lock) | V_CONT(cont) | V_BYTECNT(byte_cnt - 1)); 2978 ret = t4_wait_op_done(adapter, A_SF_OP, F_BUSY, 0, SF_ATTEMPTS, 5); 2979 if (!ret) 2980 *valp = t4_read_reg(adapter, A_SF_DATA); 2981 return ret; 2982 } 2983 2984 /** 2985 * sf1_write - write data to the serial flash 2986 * @adapter: the adapter 2987 * @byte_cnt: number of bytes to write 2988 * @cont: whether another operation will be chained 2989 * @lock: whether to lock SF for PL access only 2990 * @val: value to write 2991 * 2992 * Writes up to 4 bytes of data to the serial flash. The location of 2993 * the write needs to be specified prior to calling this by issuing the 2994 * appropriate commands to the serial flash. 2995 */ 2996 static int sf1_write(struct adapter *adapter, unsigned int byte_cnt, int cont, 2997 int lock, u32 val) 2998 { 2999 if (!byte_cnt || byte_cnt > 4) 3000 return -EINVAL; 3001 if (t4_read_reg(adapter, A_SF_OP) & F_BUSY) 3002 return -EBUSY; 3003 t4_write_reg(adapter, A_SF_DATA, val); 3004 t4_write_reg(adapter, A_SF_OP, V_SF_LOCK(lock) | 3005 V_CONT(cont) | V_BYTECNT(byte_cnt - 1) | V_OP(1)); 3006 return t4_wait_op_done(adapter, A_SF_OP, F_BUSY, 0, SF_ATTEMPTS, 5); 3007 } 3008 3009 /** 3010 * flash_wait_op - wait for a flash operation to complete 3011 * @adapter: the adapter 3012 * @attempts: max number of polls of the status register 3013 * @delay: delay between polls in ms 3014 * 3015 * Wait for a flash operation to complete by polling the status register. 3016 */ 3017 static int flash_wait_op(struct adapter *adapter, int attempts, int delay) 3018 { 3019 int ret; 3020 u32 status; 3021 3022 while (1) { 3023 if ((ret = sf1_write(adapter, 1, 1, 1, SF_RD_STATUS)) != 0 || 3024 (ret = sf1_read(adapter, 1, 0, 1, &status)) != 0) 3025 return ret; 3026 if (!(status & 1)) 3027 return 0; 3028 if (--attempts == 0) 3029 return -EAGAIN; 3030 if (delay) 3031 msleep(delay); 3032 } 3033 } 3034 3035 /** 3036 * t4_read_flash - read words from serial flash 3037 * @adapter: the adapter 3038 * @addr: the start address for the read 3039 * @nwords: how many 32-bit words to read 3040 * @data: where to store the read data 3041 * @byte_oriented: whether to store data as bytes or as words 3042 * 3043 * Read the specified number of 32-bit words from the serial flash. 3044 * If @byte_oriented is set the read data is stored as a byte array 3045 * (i.e., big-endian), otherwise as 32-bit words in the platform's 3046 * natural endianness. 3047 */ 3048 int t4_read_flash(struct adapter *adapter, unsigned int addr, 3049 unsigned int nwords, u32 *data, int byte_oriented) 3050 { 3051 int ret; 3052 3053 if (addr + nwords * sizeof(u32) > adapter->params.sf_size || (addr & 3)) 3054 return -EINVAL; 3055 3056 addr = swab32(addr) | SF_RD_DATA_FAST; 3057 3058 if ((ret = sf1_write(adapter, 4, 1, 0, addr)) != 0 || 3059 (ret = sf1_read(adapter, 1, 1, 0, data)) != 0) 3060 return ret; 3061 3062 for ( ; nwords; nwords--, data++) { 3063 ret = sf1_read(adapter, 4, nwords > 1, nwords == 1, data); 3064 if (nwords == 1) 3065 t4_write_reg(adapter, A_SF_OP, 0); /* unlock SF */ 3066 if (ret) 3067 return ret; 3068 if (byte_oriented) 3069 *data = (__force __u32)(cpu_to_be32(*data)); 3070 } 3071 return 0; 3072 } 3073 3074 /** 3075 * t4_write_flash - write up to a page of data to the serial flash 3076 * @adapter: the adapter 3077 * @addr: the start address to write 3078 * @n: length of data to write in bytes 3079 * @data: the data to write 3080 * @byte_oriented: whether to store data as bytes or as words 3081 * 3082 * Writes up to a page of data (256 bytes) to the serial flash starting 3083 * at the given address. All the data must be written to the same page. 3084 * If @byte_oriented is set the write data is stored as byte stream 3085 * (i.e. matches what on disk), otherwise in big-endian. 3086 */ 3087 int t4_write_flash(struct adapter *adapter, unsigned int addr, 3088 unsigned int n, const u8 *data, int byte_oriented) 3089 { 3090 int ret; 3091 u32 buf[SF_PAGE_SIZE / 4]; 3092 unsigned int i, c, left, val, offset = addr & 0xff; 3093 3094 if (addr >= adapter->params.sf_size || offset + n > SF_PAGE_SIZE) 3095 return -EINVAL; 3096 3097 val = swab32(addr) | SF_PROG_PAGE; 3098 3099 if ((ret = sf1_write(adapter, 1, 0, 1, SF_WR_ENABLE)) != 0 || 3100 (ret = sf1_write(adapter, 4, 1, 1, val)) != 0) 3101 goto unlock; 3102 3103 for (left = n; left; left -= c) { 3104 c = min(left, 4U); 3105 for (val = 0, i = 0; i < c; ++i) 3106 val = (val << 8) + *data++; 3107 3108 if (!byte_oriented) 3109 val = cpu_to_be32(val); 3110 3111 ret = sf1_write(adapter, c, c != left, 1, val); 3112 if (ret) 3113 goto unlock; 3114 } 3115 ret = flash_wait_op(adapter, 8, 1); 3116 if (ret) 3117 goto unlock; 3118 3119 t4_write_reg(adapter, A_SF_OP, 0); /* unlock SF */ 3120 3121 /* Read the page to verify the write succeeded */ 3122 ret = t4_read_flash(adapter, addr & ~0xff, ARRAY_SIZE(buf), buf, 3123 byte_oriented); 3124 if (ret) 3125 return ret; 3126 3127 if (memcmp(data - n, (u8 *)buf + offset, n)) { 3128 CH_ERR(adapter, 3129 "failed to correctly write the flash page at %#x\n", 3130 addr); 3131 return -EIO; 3132 } 3133 return 0; 3134 3135 unlock: 3136 t4_write_reg(adapter, A_SF_OP, 0); /* unlock SF */ 3137 return ret; 3138 } 3139 3140 /** 3141 * t4_get_fw_version - read the firmware version 3142 * @adapter: the adapter 3143 * @vers: where to place the version 3144 * 3145 * Reads the FW version from flash. 3146 */ 3147 int t4_get_fw_version(struct adapter *adapter, u32 *vers) 3148 { 3149 return t4_read_flash(adapter, FLASH_FW_START + 3150 offsetof(struct fw_hdr, fw_ver), 1, 3151 vers, 0); 3152 } 3153 3154 /** 3155 * t4_get_tp_version - read the TP microcode version 3156 * @adapter: the adapter 3157 * @vers: where to place the version 3158 * 3159 * Reads the TP microcode version from flash. 3160 */ 3161 int t4_get_tp_version(struct adapter *adapter, u32 *vers) 3162 { 3163 return t4_read_flash(adapter, FLASH_FW_START + 3164 offsetof(struct fw_hdr, tp_microcode_ver), 3165 1, vers, 0); 3166 } 3167 3168 /** 3169 * t4_get_exprom_version - return the Expansion ROM version (if any) 3170 * @adapter: the adapter 3171 * @vers: where to place the version 3172 * 3173 * Reads the Expansion ROM header from FLASH and returns the version 3174 * number (if present) through the @vers return value pointer. We return 3175 * this in the Firmware Version Format since it's convenient. Return 3176 * 0 on success, -ENOENT if no Expansion ROM is present. 3177 */ 3178 int t4_get_exprom_version(struct adapter *adap, u32 *vers) 3179 { 3180 struct exprom_header { 3181 unsigned char hdr_arr[16]; /* must start with 0x55aa */ 3182 unsigned char hdr_ver[4]; /* Expansion ROM version */ 3183 } *hdr; 3184 u32 exprom_header_buf[DIV_ROUND_UP(sizeof(struct exprom_header), 3185 sizeof(u32))]; 3186 int ret; 3187 3188 ret = t4_read_flash(adap, FLASH_EXP_ROM_START, 3189 ARRAY_SIZE(exprom_header_buf), exprom_header_buf, 3190 0); 3191 if (ret) 3192 return ret; 3193 3194 hdr = (struct exprom_header *)exprom_header_buf; 3195 if (hdr->hdr_arr[0] != 0x55 || hdr->hdr_arr[1] != 0xaa) 3196 return -ENOENT; 3197 3198 *vers = (V_FW_HDR_FW_VER_MAJOR(hdr->hdr_ver[0]) | 3199 V_FW_HDR_FW_VER_MINOR(hdr->hdr_ver[1]) | 3200 V_FW_HDR_FW_VER_MICRO(hdr->hdr_ver[2]) | 3201 V_FW_HDR_FW_VER_BUILD(hdr->hdr_ver[3])); 3202 return 0; 3203 } 3204 3205 /** 3206 * t4_flash_erase_sectors - erase a range of flash sectors 3207 * @adapter: the adapter 3208 * @start: the first sector to erase 3209 * @end: the last sector to erase 3210 * 3211 * Erases the sectors in the given inclusive range. 3212 */ 3213 int t4_flash_erase_sectors(struct adapter *adapter, int start, int end) 3214 { 3215 int ret = 0; 3216 3217 if (end >= adapter->params.sf_nsec) 3218 return -EINVAL; 3219 3220 while (start <= end) { 3221 if ((ret = sf1_write(adapter, 1, 0, 1, SF_WR_ENABLE)) != 0 || 3222 (ret = sf1_write(adapter, 4, 0, 1, 3223 SF_ERASE_SECTOR | (start << 8))) != 0 || 3224 (ret = flash_wait_op(adapter, 14, 500)) != 0) { 3225 CH_ERR(adapter, 3226 "erase of flash sector %d failed, error %d\n", 3227 start, ret); 3228 break; 3229 } 3230 start++; 3231 } 3232 t4_write_reg(adapter, A_SF_OP, 0); /* unlock SF */ 3233 return ret; 3234 } 3235 3236 /** 3237 * t4_flash_cfg_addr - return the address of the flash configuration file 3238 * @adapter: the adapter 3239 * 3240 * Return the address within the flash where the Firmware Configuration 3241 * File is stored, or an error if the device FLASH is too small to contain 3242 * a Firmware Configuration File. 3243 */ 3244 int t4_flash_cfg_addr(struct adapter *adapter) 3245 { 3246 /* 3247 * If the device FLASH isn't large enough to hold a Firmware 3248 * Configuration File, return an error. 3249 */ 3250 if (adapter->params.sf_size < FLASH_CFG_START + FLASH_CFG_MAX_SIZE) 3251 return -ENOSPC; 3252 3253 return FLASH_CFG_START; 3254 } 3255 3256 /* 3257 * Return TRUE if the specified firmware matches the adapter. I.e. T4 3258 * firmware for T4 adapters, T5 firmware for T5 adapters, etc. We go ahead 3259 * and emit an error message for mismatched firmware to save our caller the 3260 * effort ... 3261 */ 3262 static int t4_fw_matches_chip(struct adapter *adap, 3263 const struct fw_hdr *hdr) 3264 { 3265 /* 3266 * The expression below will return FALSE for any unsupported adapter 3267 * which will keep us "honest" in the future ... 3268 */ 3269 if ((is_t4(adap) && hdr->chip == FW_HDR_CHIP_T4) || 3270 (is_t5(adap) && hdr->chip == FW_HDR_CHIP_T5) || 3271 (is_t6(adap) && hdr->chip == FW_HDR_CHIP_T6)) 3272 return 1; 3273 3274 CH_ERR(adap, 3275 "FW image (%d) is not suitable for this adapter (%d)\n", 3276 hdr->chip, chip_id(adap)); 3277 return 0; 3278 } 3279 3280 /** 3281 * t4_load_fw - download firmware 3282 * @adap: the adapter 3283 * @fw_data: the firmware image to write 3284 * @size: image size 3285 * 3286 * Write the supplied firmware image to the card's serial flash. 3287 */ 3288 int t4_load_fw(struct adapter *adap, const u8 *fw_data, unsigned int size) 3289 { 3290 u32 csum; 3291 int ret, addr; 3292 unsigned int i; 3293 u8 first_page[SF_PAGE_SIZE]; 3294 const u32 *p = (const u32 *)fw_data; 3295 const struct fw_hdr *hdr = (const struct fw_hdr *)fw_data; 3296 unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec; 3297 unsigned int fw_start_sec; 3298 unsigned int fw_start; 3299 unsigned int fw_size; 3300 3301 if (ntohl(hdr->magic) == FW_HDR_MAGIC_BOOTSTRAP) { 3302 fw_start_sec = FLASH_FWBOOTSTRAP_START_SEC; 3303 fw_start = FLASH_FWBOOTSTRAP_START; 3304 fw_size = FLASH_FWBOOTSTRAP_MAX_SIZE; 3305 } else { 3306 fw_start_sec = FLASH_FW_START_SEC; 3307 fw_start = FLASH_FW_START; 3308 fw_size = FLASH_FW_MAX_SIZE; 3309 } 3310 3311 if (!size) { 3312 CH_ERR(adap, "FW image has no data\n"); 3313 return -EINVAL; 3314 } 3315 if (size & 511) { 3316 CH_ERR(adap, 3317 "FW image size not multiple of 512 bytes\n"); 3318 return -EINVAL; 3319 } 3320 if ((unsigned int) be16_to_cpu(hdr->len512) * 512 != size) { 3321 CH_ERR(adap, 3322 "FW image size differs from size in FW header\n"); 3323 return -EINVAL; 3324 } 3325 if (size > fw_size) { 3326 CH_ERR(adap, "FW image too large, max is %u bytes\n", 3327 fw_size); 3328 return -EFBIG; 3329 } 3330 if (!t4_fw_matches_chip(adap, hdr)) 3331 return -EINVAL; 3332 3333 for (csum = 0, i = 0; i < size / sizeof(csum); i++) 3334 csum += be32_to_cpu(p[i]); 3335 3336 if (csum != 0xffffffff) { 3337 CH_ERR(adap, 3338 "corrupted firmware image, checksum %#x\n", csum); 3339 return -EINVAL; 3340 } 3341 3342 i = DIV_ROUND_UP(size, sf_sec_size); /* # of sectors spanned */ 3343 ret = t4_flash_erase_sectors(adap, fw_start_sec, fw_start_sec + i - 1); 3344 if (ret) 3345 goto out; 3346 3347 /* 3348 * We write the correct version at the end so the driver can see a bad 3349 * version if the FW write fails. Start by writing a copy of the 3350 * first page with a bad version. 3351 */ 3352 memcpy(first_page, fw_data, SF_PAGE_SIZE); 3353 ((struct fw_hdr *)first_page)->fw_ver = cpu_to_be32(0xffffffff); 3354 ret = t4_write_flash(adap, fw_start, SF_PAGE_SIZE, first_page, 1); 3355 if (ret) 3356 goto out; 3357 3358 addr = fw_start; 3359 for (size -= SF_PAGE_SIZE; size; size -= SF_PAGE_SIZE) { 3360 addr += SF_PAGE_SIZE; 3361 fw_data += SF_PAGE_SIZE; 3362 ret = t4_write_flash(adap, addr, SF_PAGE_SIZE, fw_data, 1); 3363 if (ret) 3364 goto out; 3365 } 3366 3367 ret = t4_write_flash(adap, 3368 fw_start + offsetof(struct fw_hdr, fw_ver), 3369 sizeof(hdr->fw_ver), (const u8 *)&hdr->fw_ver, 1); 3370 out: 3371 if (ret) 3372 CH_ERR(adap, "firmware download failed, error %d\n", 3373 ret); 3374 return ret; 3375 } 3376 3377 /** 3378 * t4_fwcache - firmware cache operation 3379 * @adap: the adapter 3380 * @op : the operation (flush or flush and invalidate) 3381 */ 3382 int t4_fwcache(struct adapter *adap, enum fw_params_param_dev_fwcache op) 3383 { 3384 struct fw_params_cmd c; 3385 3386 memset(&c, 0, sizeof(c)); 3387 c.op_to_vfn = 3388 cpu_to_be32(V_FW_CMD_OP(FW_PARAMS_CMD) | 3389 F_FW_CMD_REQUEST | F_FW_CMD_WRITE | 3390 V_FW_PARAMS_CMD_PFN(adap->pf) | 3391 V_FW_PARAMS_CMD_VFN(0)); 3392 c.retval_len16 = cpu_to_be32(FW_LEN16(c)); 3393 c.param[0].mnem = 3394 cpu_to_be32(V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | 3395 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_FWCACHE)); 3396 c.param[0].val = (__force __be32)op; 3397 3398 return t4_wr_mbox(adap, adap->mbox, &c, sizeof(c), NULL); 3399 } 3400 3401 void t4_cim_read_pif_la(struct adapter *adap, u32 *pif_req, u32 *pif_rsp, 3402 unsigned int *pif_req_wrptr, 3403 unsigned int *pif_rsp_wrptr) 3404 { 3405 int i, j; 3406 u32 cfg, val, req, rsp; 3407 3408 cfg = t4_read_reg(adap, A_CIM_DEBUGCFG); 3409 if (cfg & F_LADBGEN) 3410 t4_write_reg(adap, A_CIM_DEBUGCFG, cfg ^ F_LADBGEN); 3411 3412 val = t4_read_reg(adap, A_CIM_DEBUGSTS); 3413 req = G_POLADBGWRPTR(val); 3414 rsp = G_PILADBGWRPTR(val); 3415 if (pif_req_wrptr) 3416 *pif_req_wrptr = req; 3417 if (pif_rsp_wrptr) 3418 *pif_rsp_wrptr = rsp; 3419 3420 for (i = 0; i < CIM_PIFLA_SIZE; i++) { 3421 for (j = 0; j < 6; j++) { 3422 t4_write_reg(adap, A_CIM_DEBUGCFG, V_POLADBGRDPTR(req) | 3423 V_PILADBGRDPTR(rsp)); 3424 *pif_req++ = t4_read_reg(adap, A_CIM_PO_LA_DEBUGDATA); 3425 *pif_rsp++ = t4_read_reg(adap, A_CIM_PI_LA_DEBUGDATA); 3426 req++; 3427 rsp++; 3428 } 3429 req = (req + 2) & M_POLADBGRDPTR; 3430 rsp = (rsp + 2) & M_PILADBGRDPTR; 3431 } 3432 t4_write_reg(adap, A_CIM_DEBUGCFG, cfg); 3433 } 3434 3435 void t4_cim_read_ma_la(struct adapter *adap, u32 *ma_req, u32 *ma_rsp) 3436 { 3437 u32 cfg; 3438 int i, j, idx; 3439 3440 cfg = t4_read_reg(adap, A_CIM_DEBUGCFG); 3441 if (cfg & F_LADBGEN) 3442 t4_write_reg(adap, A_CIM_DEBUGCFG, cfg ^ F_LADBGEN); 3443 3444 for (i = 0; i < CIM_MALA_SIZE; i++) { 3445 for (j = 0; j < 5; j++) { 3446 idx = 8 * i + j; 3447 t4_write_reg(adap, A_CIM_DEBUGCFG, V_POLADBGRDPTR(idx) | 3448 V_PILADBGRDPTR(idx)); 3449 *ma_req++ = t4_read_reg(adap, A_CIM_PO_LA_MADEBUGDATA); 3450 *ma_rsp++ = t4_read_reg(adap, A_CIM_PI_LA_MADEBUGDATA); 3451 } 3452 } 3453 t4_write_reg(adap, A_CIM_DEBUGCFG, cfg); 3454 } 3455 3456 void t4_ulprx_read_la(struct adapter *adap, u32 *la_buf) 3457 { 3458 unsigned int i, j; 3459 3460 for (i = 0; i < 8; i++) { 3461 u32 *p = la_buf + i; 3462 3463 t4_write_reg(adap, A_ULP_RX_LA_CTL, i); 3464 j = t4_read_reg(adap, A_ULP_RX_LA_WRPTR); 3465 t4_write_reg(adap, A_ULP_RX_LA_RDPTR, j); 3466 for (j = 0; j < ULPRX_LA_SIZE; j++, p += 8) 3467 *p = t4_read_reg(adap, A_ULP_RX_LA_RDDATA); 3468 } 3469 } 3470 3471 #define ADVERT_MASK (FW_PORT_CAP_SPEED_100M | FW_PORT_CAP_SPEED_1G |\ 3472 FW_PORT_CAP_SPEED_10G | FW_PORT_CAP_SPEED_40G | \ 3473 FW_PORT_CAP_SPEED_100G | FW_PORT_CAP_ANEG) 3474 3475 /** 3476 * t4_link_l1cfg - apply link configuration to MAC/PHY 3477 * @phy: the PHY to setup 3478 * @mac: the MAC to setup 3479 * @lc: the requested link configuration 3480 * 3481 * Set up a port's MAC and PHY according to a desired link configuration. 3482 * - If the PHY can auto-negotiate first decide what to advertise, then 3483 * enable/disable auto-negotiation as desired, and reset. 3484 * - If the PHY does not auto-negotiate just reset it. 3485 * - If auto-negotiation is off set the MAC to the proper speed/duplex/FC, 3486 * otherwise do it later based on the outcome of auto-negotiation. 3487 */ 3488 int t4_link_l1cfg(struct adapter *adap, unsigned int mbox, unsigned int port, 3489 struct link_config *lc) 3490 { 3491 struct fw_port_cmd c; 3492 unsigned int fc = 0, mdi = V_FW_PORT_CAP_MDI(FW_PORT_CAP_MDI_AUTO); 3493 3494 lc->link_ok = 0; 3495 if (lc->requested_fc & PAUSE_RX) 3496 fc |= FW_PORT_CAP_FC_RX; 3497 if (lc->requested_fc & PAUSE_TX) 3498 fc |= FW_PORT_CAP_FC_TX; 3499 3500 memset(&c, 0, sizeof(c)); 3501 c.op_to_portid = cpu_to_be32(V_FW_CMD_OP(FW_PORT_CMD) | 3502 F_FW_CMD_REQUEST | F_FW_CMD_EXEC | 3503 V_FW_PORT_CMD_PORTID(port)); 3504 c.action_to_len16 = 3505 cpu_to_be32(V_FW_PORT_CMD_ACTION(FW_PORT_ACTION_L1_CFG) | 3506 FW_LEN16(c)); 3507 3508 if (!(lc->supported & FW_PORT_CAP_ANEG)) { 3509 c.u.l1cfg.rcap = cpu_to_be32((lc->supported & ADVERT_MASK) | 3510 fc); 3511 lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX); 3512 } else if (lc->autoneg == AUTONEG_DISABLE) { 3513 c.u.l1cfg.rcap = cpu_to_be32(lc->requested_speed | fc | mdi); 3514 lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX); 3515 } else 3516 c.u.l1cfg.rcap = cpu_to_be32(lc->advertising | fc | mdi); 3517 3518 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); 3519 } 3520 3521 /** 3522 * t4_restart_aneg - restart autonegotiation 3523 * @adap: the adapter 3524 * @mbox: mbox to use for the FW command 3525 * @port: the port id 3526 * 3527 * Restarts autonegotiation for the selected port. 3528 */ 3529 int t4_restart_aneg(struct adapter *adap, unsigned int mbox, unsigned int port) 3530 { 3531 struct fw_port_cmd c; 3532 3533 memset(&c, 0, sizeof(c)); 3534 c.op_to_portid = cpu_to_be32(V_FW_CMD_OP(FW_PORT_CMD) | 3535 F_FW_CMD_REQUEST | F_FW_CMD_EXEC | 3536 V_FW_PORT_CMD_PORTID(port)); 3537 c.action_to_len16 = 3538 cpu_to_be32(V_FW_PORT_CMD_ACTION(FW_PORT_ACTION_L1_CFG) | 3539 FW_LEN16(c)); 3540 c.u.l1cfg.rcap = cpu_to_be32(FW_PORT_CAP_ANEG); 3541 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); 3542 } 3543 3544 typedef void (*int_handler_t)(struct adapter *adap); 3545 3546 struct intr_info { 3547 unsigned int mask; /* bits to check in interrupt status */ 3548 const char *msg; /* message to print or NULL */ 3549 short stat_idx; /* stat counter to increment or -1 */ 3550 unsigned short fatal; /* whether the condition reported is fatal */ 3551 int_handler_t int_handler; /* platform-specific int handler */ 3552 }; 3553 3554 /** 3555 * t4_handle_intr_status - table driven interrupt handler 3556 * @adapter: the adapter that generated the interrupt 3557 * @reg: the interrupt status register to process 3558 * @acts: table of interrupt actions 3559 * 3560 * A table driven interrupt handler that applies a set of masks to an 3561 * interrupt status word and performs the corresponding actions if the 3562 * interrupts described by the mask have occurred. The actions include 3563 * optionally emitting a warning or alert message. The table is terminated 3564 * by an entry specifying mask 0. Returns the number of fatal interrupt 3565 * conditions. 3566 */ 3567 static int t4_handle_intr_status(struct adapter *adapter, unsigned int reg, 3568 const struct intr_info *acts) 3569 { 3570 int fatal = 0; 3571 unsigned int mask = 0; 3572 unsigned int status = t4_read_reg(adapter, reg); 3573 3574 for ( ; acts->mask; ++acts) { 3575 if (!(status & acts->mask)) 3576 continue; 3577 if (acts->fatal) { 3578 fatal++; 3579 CH_ALERT(adapter, "%s (0x%x)\n", acts->msg, 3580 status & acts->mask); 3581 } else if (acts->msg) 3582 CH_WARN_RATELIMIT(adapter, "%s (0x%x)\n", acts->msg, 3583 status & acts->mask); 3584 if (acts->int_handler) 3585 acts->int_handler(adapter); 3586 mask |= acts->mask; 3587 } 3588 status &= mask; 3589 if (status) /* clear processed interrupts */ 3590 t4_write_reg(adapter, reg, status); 3591 return fatal; 3592 } 3593 3594 /* 3595 * Interrupt handler for the PCIE module. 3596 */ 3597 static void pcie_intr_handler(struct adapter *adapter) 3598 { 3599 static const struct intr_info sysbus_intr_info[] = { 3600 { F_RNPP, "RXNP array parity error", -1, 1 }, 3601 { F_RPCP, "RXPC array parity error", -1, 1 }, 3602 { F_RCIP, "RXCIF array parity error", -1, 1 }, 3603 { F_RCCP, "Rx completions control array parity error", -1, 1 }, 3604 { F_RFTP, "RXFT array parity error", -1, 1 }, 3605 { 0 } 3606 }; 3607 static const struct intr_info pcie_port_intr_info[] = { 3608 { F_TPCP, "TXPC array parity error", -1, 1 }, 3609 { F_TNPP, "TXNP array parity error", -1, 1 }, 3610 { F_TFTP, "TXFT array parity error", -1, 1 }, 3611 { F_TCAP, "TXCA array parity error", -1, 1 }, 3612 { F_TCIP, "TXCIF array parity error", -1, 1 }, 3613 { F_RCAP, "RXCA array parity error", -1, 1 }, 3614 { F_OTDD, "outbound request TLP discarded", -1, 1 }, 3615 { F_RDPE, "Rx data parity error", -1, 1 }, 3616 { F_TDUE, "Tx uncorrectable data error", -1, 1 }, 3617 { 0 } 3618 }; 3619 static const struct intr_info pcie_intr_info[] = { 3620 { F_MSIADDRLPERR, "MSI AddrL parity error", -1, 1 }, 3621 { F_MSIADDRHPERR, "MSI AddrH parity error", -1, 1 }, 3622 { F_MSIDATAPERR, "MSI data parity error", -1, 1 }, 3623 { F_MSIXADDRLPERR, "MSI-X AddrL parity error", -1, 1 }, 3624 { F_MSIXADDRHPERR, "MSI-X AddrH parity error", -1, 1 }, 3625 { F_MSIXDATAPERR, "MSI-X data parity error", -1, 1 }, 3626 { F_MSIXDIPERR, "MSI-X DI parity error", -1, 1 }, 3627 { F_PIOCPLPERR, "PCI PIO completion FIFO parity error", -1, 1 }, 3628 { F_PIOREQPERR, "PCI PIO request FIFO parity error", -1, 1 }, 3629 { F_TARTAGPERR, "PCI PCI target tag FIFO parity error", -1, 1 }, 3630 { F_CCNTPERR, "PCI CMD channel count parity error", -1, 1 }, 3631 { F_CREQPERR, "PCI CMD channel request parity error", -1, 1 }, 3632 { F_CRSPPERR, "PCI CMD channel response parity error", -1, 1 }, 3633 { F_DCNTPERR, "PCI DMA channel count parity error", -1, 1 }, 3634 { F_DREQPERR, "PCI DMA channel request parity error", -1, 1 }, 3635 { F_DRSPPERR, "PCI DMA channel response parity error", -1, 1 }, 3636 { F_HCNTPERR, "PCI HMA channel count parity error", -1, 1 }, 3637 { F_HREQPERR, "PCI HMA channel request parity error", -1, 1 }, 3638 { F_HRSPPERR, "PCI HMA channel response parity error", -1, 1 }, 3639 { F_CFGSNPPERR, "PCI config snoop FIFO parity error", -1, 1 }, 3640 { F_FIDPERR, "PCI FID parity error", -1, 1 }, 3641 { F_INTXCLRPERR, "PCI INTx clear parity error", -1, 1 }, 3642 { F_MATAGPERR, "PCI MA tag parity error", -1, 1 }, 3643 { F_PIOTAGPERR, "PCI PIO tag parity error", -1, 1 }, 3644 { F_RXCPLPERR, "PCI Rx completion parity error", -1, 1 }, 3645 { F_RXWRPERR, "PCI Rx write parity error", -1, 1 }, 3646 { F_RPLPERR, "PCI replay buffer parity error", -1, 1 }, 3647 { F_PCIESINT, "PCI core secondary fault", -1, 1 }, 3648 { F_PCIEPINT, "PCI core primary fault", -1, 1 }, 3649 { F_UNXSPLCPLERR, "PCI unexpected split completion error", -1, 3650 0 }, 3651 { 0 } 3652 }; 3653 3654 static const struct intr_info t5_pcie_intr_info[] = { 3655 { F_MSTGRPPERR, "Master Response Read Queue parity error", 3656 -1, 1 }, 3657 { F_MSTTIMEOUTPERR, "Master Timeout FIFO parity error", -1, 1 }, 3658 { F_MSIXSTIPERR, "MSI-X STI SRAM parity error", -1, 1 }, 3659 { F_MSIXADDRLPERR, "MSI-X AddrL parity error", -1, 1 }, 3660 { F_MSIXADDRHPERR, "MSI-X AddrH parity error", -1, 1 }, 3661 { F_MSIXDATAPERR, "MSI-X data parity error", -1, 1 }, 3662 { F_MSIXDIPERR, "MSI-X DI parity error", -1, 1 }, 3663 { F_PIOCPLGRPPERR, "PCI PIO completion Group FIFO parity error", 3664 -1, 1 }, 3665 { F_PIOREQGRPPERR, "PCI PIO request Group FIFO parity error", 3666 -1, 1 }, 3667 { F_TARTAGPERR, "PCI PCI target tag FIFO parity error", -1, 1 }, 3668 { F_MSTTAGQPERR, "PCI master tag queue parity error", -1, 1 }, 3669 { F_CREQPERR, "PCI CMD channel request parity error", -1, 1 }, 3670 { F_CRSPPERR, "PCI CMD channel response parity error", -1, 1 }, 3671 { F_DREQWRPERR, "PCI DMA channel write request parity error", 3672 -1, 1 }, 3673 { F_DREQPERR, "PCI DMA channel request parity error", -1, 1 }, 3674 { F_DRSPPERR, "PCI DMA channel response parity error", -1, 1 }, 3675 { F_HREQWRPERR, "PCI HMA channel count parity error", -1, 1 }, 3676 { F_HREQPERR, "PCI HMA channel request parity error", -1, 1 }, 3677 { F_HRSPPERR, "PCI HMA channel response parity error", -1, 1 }, 3678 { F_CFGSNPPERR, "PCI config snoop FIFO parity error", -1, 1 }, 3679 { F_FIDPERR, "PCI FID parity error", -1, 1 }, 3680 { F_VFIDPERR, "PCI INTx clear parity error", -1, 1 }, 3681 { F_MAGRPPERR, "PCI MA group FIFO parity error", -1, 1 }, 3682 { F_PIOTAGPERR, "PCI PIO tag parity error", -1, 1 }, 3683 { F_IPRXHDRGRPPERR, "PCI IP Rx header group parity error", 3684 -1, 1 }, 3685 { F_IPRXDATAGRPPERR, "PCI IP Rx data group parity error", 3686 -1, 1 }, 3687 { F_RPLPERR, "PCI IP replay buffer parity error", -1, 1 }, 3688 { F_IPSOTPERR, "PCI IP SOT buffer parity error", -1, 1 }, 3689 { F_TRGT1GRPPERR, "PCI TRGT1 group FIFOs parity error", -1, 1 }, 3690 { F_READRSPERR, "Outbound read error", -1, 3691 0 }, 3692 { 0 } 3693 }; 3694 3695 int fat; 3696 3697 if (is_t4(adapter)) 3698 fat = t4_handle_intr_status(adapter, 3699 A_PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS, 3700 sysbus_intr_info) + 3701 t4_handle_intr_status(adapter, 3702 A_PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS, 3703 pcie_port_intr_info) + 3704 t4_handle_intr_status(adapter, A_PCIE_INT_CAUSE, 3705 pcie_intr_info); 3706 else 3707 fat = t4_handle_intr_status(adapter, A_PCIE_INT_CAUSE, 3708 t5_pcie_intr_info); 3709 if (fat) 3710 t4_fatal_err(adapter); 3711 } 3712 3713 /* 3714 * TP interrupt handler. 3715 */ 3716 static void tp_intr_handler(struct adapter *adapter) 3717 { 3718 static const struct intr_info tp_intr_info[] = { 3719 { 0x3fffffff, "TP parity error", -1, 1 }, 3720 { F_FLMTXFLSTEMPTY, "TP out of Tx pages", -1, 1 }, 3721 { 0 } 3722 }; 3723 3724 if (t4_handle_intr_status(adapter, A_TP_INT_CAUSE, tp_intr_info)) 3725 t4_fatal_err(adapter); 3726 } 3727 3728 /* 3729 * SGE interrupt handler. 3730 */ 3731 static void sge_intr_handler(struct adapter *adapter) 3732 { 3733 u64 v; 3734 u32 err; 3735 3736 static const struct intr_info sge_intr_info[] = { 3737 { F_ERR_CPL_EXCEED_IQE_SIZE, 3738 "SGE received CPL exceeding IQE size", -1, 1 }, 3739 { F_ERR_INVALID_CIDX_INC, 3740 "SGE GTS CIDX increment too large", -1, 0 }, 3741 { F_ERR_CPL_OPCODE_0, "SGE received 0-length CPL", -1, 0 }, 3742 { F_DBFIFO_LP_INT, NULL, -1, 0, t4_db_full }, 3743 { F_ERR_DATA_CPL_ON_HIGH_QID1 | F_ERR_DATA_CPL_ON_HIGH_QID0, 3744 "SGE IQID > 1023 received CPL for FL", -1, 0 }, 3745 { F_ERR_BAD_DB_PIDX3, "SGE DBP 3 pidx increment too large", -1, 3746 0 }, 3747 { F_ERR_BAD_DB_PIDX2, "SGE DBP 2 pidx increment too large", -1, 3748 0 }, 3749 { F_ERR_BAD_DB_PIDX1, "SGE DBP 1 pidx increment too large", -1, 3750 0 }, 3751 { F_ERR_BAD_DB_PIDX0, "SGE DBP 0 pidx increment too large", -1, 3752 0 }, 3753 { F_ERR_ING_CTXT_PRIO, 3754 "SGE too many priority ingress contexts", -1, 0 }, 3755 { F_INGRESS_SIZE_ERR, "SGE illegal ingress QID", -1, 0 }, 3756 { F_EGRESS_SIZE_ERR, "SGE illegal egress QID", -1, 0 }, 3757 { 0 } 3758 }; 3759 3760 static const struct intr_info t4t5_sge_intr_info[] = { 3761 { F_ERR_DROPPED_DB, NULL, -1, 0, t4_db_dropped }, 3762 { F_DBFIFO_HP_INT, NULL, -1, 0, t4_db_full }, 3763 { F_ERR_EGR_CTXT_PRIO, 3764 "SGE too many priority egress contexts", -1, 0 }, 3765 { 0 } 3766 }; 3767 3768 /* 3769 * For now, treat below interrupts as fatal so that we disable SGE and 3770 * get better debug */ 3771 static const struct intr_info t6_sge_intr_info[] = { 3772 { F_ERR_PCIE_ERROR0 | F_ERR_PCIE_ERROR1, 3773 "SGE PCIe error for a DBP thread", -1, 1 }, 3774 { F_FATAL_WRE_LEN, 3775 "SGE Actual WRE packet is less than advertized length", 3776 -1, 1 }, 3777 { 0 } 3778 }; 3779 3780 v = (u64)t4_read_reg(adapter, A_SGE_INT_CAUSE1) | 3781 ((u64)t4_read_reg(adapter, A_SGE_INT_CAUSE2) << 32); 3782 if (v) { 3783 CH_ALERT(adapter, "SGE parity error (%#llx)\n", 3784 (unsigned long long)v); 3785 t4_write_reg(adapter, A_SGE_INT_CAUSE1, v); 3786 t4_write_reg(adapter, A_SGE_INT_CAUSE2, v >> 32); 3787 } 3788 3789 v |= t4_handle_intr_status(adapter, A_SGE_INT_CAUSE3, sge_intr_info); 3790 if (chip_id(adapter) <= CHELSIO_T5) 3791 v |= t4_handle_intr_status(adapter, A_SGE_INT_CAUSE3, 3792 t4t5_sge_intr_info); 3793 else 3794 v |= t4_handle_intr_status(adapter, A_SGE_INT_CAUSE3, 3795 t6_sge_intr_info); 3796 3797 err = t4_read_reg(adapter, A_SGE_ERROR_STATS); 3798 if (err & F_ERROR_QID_VALID) { 3799 CH_ERR(adapter, "SGE error for queue %u\n", G_ERROR_QID(err)); 3800 if (err & F_UNCAPTURED_ERROR) 3801 CH_ERR(adapter, "SGE UNCAPTURED_ERROR set (clearing)\n"); 3802 t4_write_reg(adapter, A_SGE_ERROR_STATS, F_ERROR_QID_VALID | 3803 F_UNCAPTURED_ERROR); 3804 } 3805 3806 if (v != 0) 3807 t4_fatal_err(adapter); 3808 } 3809 3810 #define CIM_OBQ_INTR (F_OBQULP0PARERR | F_OBQULP1PARERR | F_OBQULP2PARERR |\ 3811 F_OBQULP3PARERR | F_OBQSGEPARERR | F_OBQNCSIPARERR) 3812 #define CIM_IBQ_INTR (F_IBQTP0PARERR | F_IBQTP1PARERR | F_IBQULPPARERR |\ 3813 F_IBQSGEHIPARERR | F_IBQSGELOPARERR | F_IBQNCSIPARERR) 3814 3815 /* 3816 * CIM interrupt handler. 3817 */ 3818 static void cim_intr_handler(struct adapter *adapter) 3819 { 3820 static const struct intr_info cim_intr_info[] = { 3821 { F_PREFDROPINT, "CIM control register prefetch drop", -1, 1 }, 3822 { CIM_OBQ_INTR, "CIM OBQ parity error", -1, 1 }, 3823 { CIM_IBQ_INTR, "CIM IBQ parity error", -1, 1 }, 3824 { F_MBUPPARERR, "CIM mailbox uP parity error", -1, 1 }, 3825 { F_MBHOSTPARERR, "CIM mailbox host parity error", -1, 1 }, 3826 { F_TIEQINPARERRINT, "CIM TIEQ outgoing parity error", -1, 1 }, 3827 { F_TIEQOUTPARERRINT, "CIM TIEQ incoming parity error", -1, 1 }, 3828 { 0 } 3829 }; 3830 static const struct intr_info cim_upintr_info[] = { 3831 { F_RSVDSPACEINT, "CIM reserved space access", -1, 1 }, 3832 { F_ILLTRANSINT, "CIM illegal transaction", -1, 1 }, 3833 { F_ILLWRINT, "CIM illegal write", -1, 1 }, 3834 { F_ILLRDINT, "CIM illegal read", -1, 1 }, 3835 { F_ILLRDBEINT, "CIM illegal read BE", -1, 1 }, 3836 { F_ILLWRBEINT, "CIM illegal write BE", -1, 1 }, 3837 { F_SGLRDBOOTINT, "CIM single read from boot space", -1, 1 }, 3838 { F_SGLWRBOOTINT, "CIM single write to boot space", -1, 1 }, 3839 { F_BLKWRBOOTINT, "CIM block write to boot space", -1, 1 }, 3840 { F_SGLRDFLASHINT, "CIM single read from flash space", -1, 1 }, 3841 { F_SGLWRFLASHINT, "CIM single write to flash space", -1, 1 }, 3842 { F_BLKWRFLASHINT, "CIM block write to flash space", -1, 1 }, 3843 { F_SGLRDEEPROMINT, "CIM single EEPROM read", -1, 1 }, 3844 { F_SGLWREEPROMINT, "CIM single EEPROM write", -1, 1 }, 3845 { F_BLKRDEEPROMINT, "CIM block EEPROM read", -1, 1 }, 3846 { F_BLKWREEPROMINT, "CIM block EEPROM write", -1, 1 }, 3847 { F_SGLRDCTLINT , "CIM single read from CTL space", -1, 1 }, 3848 { F_SGLWRCTLINT , "CIM single write to CTL space", -1, 1 }, 3849 { F_BLKRDCTLINT , "CIM block read from CTL space", -1, 1 }, 3850 { F_BLKWRCTLINT , "CIM block write to CTL space", -1, 1 }, 3851 { F_SGLRDPLINT , "CIM single read from PL space", -1, 1 }, 3852 { F_SGLWRPLINT , "CIM single write to PL space", -1, 1 }, 3853 { F_BLKRDPLINT , "CIM block read from PL space", -1, 1 }, 3854 { F_BLKWRPLINT , "CIM block write to PL space", -1, 1 }, 3855 { F_REQOVRLOOKUPINT , "CIM request FIFO overwrite", -1, 1 }, 3856 { F_RSPOVRLOOKUPINT , "CIM response FIFO overwrite", -1, 1 }, 3857 { F_TIMEOUTINT , "CIM PIF timeout", -1, 1 }, 3858 { F_TIMEOUTMAINT , "CIM PIF MA timeout", -1, 1 }, 3859 { 0 } 3860 }; 3861 int fat; 3862 3863 if (t4_read_reg(adapter, A_PCIE_FW) & F_PCIE_FW_ERR) 3864 t4_report_fw_error(adapter); 3865 3866 fat = t4_handle_intr_status(adapter, A_CIM_HOST_INT_CAUSE, 3867 cim_intr_info) + 3868 t4_handle_intr_status(adapter, A_CIM_HOST_UPACC_INT_CAUSE, 3869 cim_upintr_info); 3870 if (fat) 3871 t4_fatal_err(adapter); 3872 } 3873 3874 /* 3875 * ULP RX interrupt handler. 3876 */ 3877 static void ulprx_intr_handler(struct adapter *adapter) 3878 { 3879 static const struct intr_info ulprx_intr_info[] = { 3880 { F_CAUSE_CTX_1, "ULPRX channel 1 context error", -1, 1 }, 3881 { F_CAUSE_CTX_0, "ULPRX channel 0 context error", -1, 1 }, 3882 { 0x7fffff, "ULPRX parity error", -1, 1 }, 3883 { 0 } 3884 }; 3885 3886 if (t4_handle_intr_status(adapter, A_ULP_RX_INT_CAUSE, ulprx_intr_info)) 3887 t4_fatal_err(adapter); 3888 } 3889 3890 /* 3891 * ULP TX interrupt handler. 3892 */ 3893 static void ulptx_intr_handler(struct adapter *adapter) 3894 { 3895 static const struct intr_info ulptx_intr_info[] = { 3896 { F_PBL_BOUND_ERR_CH3, "ULPTX channel 3 PBL out of bounds", -1, 3897 0 }, 3898 { F_PBL_BOUND_ERR_CH2, "ULPTX channel 2 PBL out of bounds", -1, 3899 0 }, 3900 { F_PBL_BOUND_ERR_CH1, "ULPTX channel 1 PBL out of bounds", -1, 3901 0 }, 3902 { F_PBL_BOUND_ERR_CH0, "ULPTX channel 0 PBL out of bounds", -1, 3903 0 }, 3904 { 0xfffffff, "ULPTX parity error", -1, 1 }, 3905 { 0 } 3906 }; 3907 3908 if (t4_handle_intr_status(adapter, A_ULP_TX_INT_CAUSE, ulptx_intr_info)) 3909 t4_fatal_err(adapter); 3910 } 3911 3912 /* 3913 * PM TX interrupt handler. 3914 */ 3915 static void pmtx_intr_handler(struct adapter *adapter) 3916 { 3917 static const struct intr_info pmtx_intr_info[] = { 3918 { F_PCMD_LEN_OVFL0, "PMTX channel 0 pcmd too large", -1, 1 }, 3919 { F_PCMD_LEN_OVFL1, "PMTX channel 1 pcmd too large", -1, 1 }, 3920 { F_PCMD_LEN_OVFL2, "PMTX channel 2 pcmd too large", -1, 1 }, 3921 { F_ZERO_C_CMD_ERROR, "PMTX 0-length pcmd", -1, 1 }, 3922 { 0xffffff0, "PMTX framing error", -1, 1 }, 3923 { F_OESPI_PAR_ERROR, "PMTX oespi parity error", -1, 1 }, 3924 { F_DB_OPTIONS_PAR_ERROR, "PMTX db_options parity error", -1, 3925 1 }, 3926 { F_ICSPI_PAR_ERROR, "PMTX icspi parity error", -1, 1 }, 3927 { F_C_PCMD_PAR_ERROR, "PMTX c_pcmd parity error", -1, 1}, 3928 { 0 } 3929 }; 3930 3931 if (t4_handle_intr_status(adapter, A_PM_TX_INT_CAUSE, pmtx_intr_info)) 3932 t4_fatal_err(adapter); 3933 } 3934 3935 /* 3936 * PM RX interrupt handler. 3937 */ 3938 static void pmrx_intr_handler(struct adapter *adapter) 3939 { 3940 static const struct intr_info pmrx_intr_info[] = { 3941 { F_ZERO_E_CMD_ERROR, "PMRX 0-length pcmd", -1, 1 }, 3942 { 0x3ffff0, "PMRX framing error", -1, 1 }, 3943 { F_OCSPI_PAR_ERROR, "PMRX ocspi parity error", -1, 1 }, 3944 { F_DB_OPTIONS_PAR_ERROR, "PMRX db_options parity error", -1, 3945 1 }, 3946 { F_IESPI_PAR_ERROR, "PMRX iespi parity error", -1, 1 }, 3947 { F_E_PCMD_PAR_ERROR, "PMRX e_pcmd parity error", -1, 1}, 3948 { 0 } 3949 }; 3950 3951 if (t4_handle_intr_status(adapter, A_PM_RX_INT_CAUSE, pmrx_intr_info)) 3952 t4_fatal_err(adapter); 3953 } 3954 3955 /* 3956 * CPL switch interrupt handler. 3957 */ 3958 static void cplsw_intr_handler(struct adapter *adapter) 3959 { 3960 static const struct intr_info cplsw_intr_info[] = { 3961 { F_CIM_OP_MAP_PERR, "CPLSW CIM op_map parity error", -1, 1 }, 3962 { F_CIM_OVFL_ERROR, "CPLSW CIM overflow", -1, 1 }, 3963 { F_TP_FRAMING_ERROR, "CPLSW TP framing error", -1, 1 }, 3964 { F_SGE_FRAMING_ERROR, "CPLSW SGE framing error", -1, 1 }, 3965 { F_CIM_FRAMING_ERROR, "CPLSW CIM framing error", -1, 1 }, 3966 { F_ZERO_SWITCH_ERROR, "CPLSW no-switch error", -1, 1 }, 3967 { 0 } 3968 }; 3969 3970 if (t4_handle_intr_status(adapter, A_CPL_INTR_CAUSE, cplsw_intr_info)) 3971 t4_fatal_err(adapter); 3972 } 3973 3974 /* 3975 * LE interrupt handler. 3976 */ 3977 static void le_intr_handler(struct adapter *adap) 3978 { 3979 unsigned int chip_ver = chip_id(adap); 3980 static const struct intr_info le_intr_info[] = { 3981 { F_LIPMISS, "LE LIP miss", -1, 0 }, 3982 { F_LIP0, "LE 0 LIP error", -1, 0 }, 3983 { F_PARITYERR, "LE parity error", -1, 1 }, 3984 { F_UNKNOWNCMD, "LE unknown command", -1, 1 }, 3985 { F_REQQPARERR, "LE request queue parity error", -1, 1 }, 3986 { 0 } 3987 }; 3988 3989 static const struct intr_info t6_le_intr_info[] = { 3990 { F_T6_LIPMISS, "LE LIP miss", -1, 0 }, 3991 { F_T6_LIP0, "LE 0 LIP error", -1, 0 }, 3992 { F_TCAMINTPERR, "LE parity error", -1, 1 }, 3993 { F_T6_UNKNOWNCMD, "LE unknown command", -1, 1 }, 3994 { F_SSRAMINTPERR, "LE request queue parity error", -1, 1 }, 3995 { 0 } 3996 }; 3997 3998 if (t4_handle_intr_status(adap, A_LE_DB_INT_CAUSE, 3999 (chip_ver <= CHELSIO_T5) ? 4000 le_intr_info : t6_le_intr_info)) 4001 t4_fatal_err(adap); 4002 } 4003 4004 /* 4005 * MPS interrupt handler. 4006 */ 4007 static void mps_intr_handler(struct adapter *adapter) 4008 { 4009 static const struct intr_info mps_rx_intr_info[] = { 4010 { 0xffffff, "MPS Rx parity error", -1, 1 }, 4011 { 0 } 4012 }; 4013 static const struct intr_info mps_tx_intr_info[] = { 4014 { V_TPFIFO(M_TPFIFO), "MPS Tx TP FIFO parity error", -1, 1 }, 4015 { F_NCSIFIFO, "MPS Tx NC-SI FIFO parity error", -1, 1 }, 4016 { V_TXDATAFIFO(M_TXDATAFIFO), "MPS Tx data FIFO parity error", 4017 -1, 1 }, 4018 { V_TXDESCFIFO(M_TXDESCFIFO), "MPS Tx desc FIFO parity error", 4019 -1, 1 }, 4020 { F_BUBBLE, "MPS Tx underflow", -1, 1 }, 4021 { F_SECNTERR, "MPS Tx SOP/EOP error", -1, 1 }, 4022 { F_FRMERR, "MPS Tx framing error", -1, 1 }, 4023 { 0 } 4024 }; 4025 static const struct intr_info mps_trc_intr_info[] = { 4026 { V_FILTMEM(M_FILTMEM), "MPS TRC filter parity error", -1, 1 }, 4027 { V_PKTFIFO(M_PKTFIFO), "MPS TRC packet FIFO parity error", -1, 4028 1 }, 4029 { F_MISCPERR, "MPS TRC misc parity error", -1, 1 }, 4030 { 0 } 4031 }; 4032 static const struct intr_info mps_stat_sram_intr_info[] = { 4033 { 0x1fffff, "MPS statistics SRAM parity error", -1, 1 }, 4034 { 0 } 4035 }; 4036 static const struct intr_info mps_stat_tx_intr_info[] = { 4037 { 0xfffff, "MPS statistics Tx FIFO parity error", -1, 1 }, 4038 { 0 } 4039 }; 4040 static const struct intr_info mps_stat_rx_intr_info[] = { 4041 { 0xffffff, "MPS statistics Rx FIFO parity error", -1, 1 }, 4042 { 0 } 4043 }; 4044 static const struct intr_info mps_cls_intr_info[] = { 4045 { F_MATCHSRAM, "MPS match SRAM parity error", -1, 1 }, 4046 { F_MATCHTCAM, "MPS match TCAM parity error", -1, 1 }, 4047 { F_HASHSRAM, "MPS hash SRAM parity error", -1, 1 }, 4048 { 0 } 4049 }; 4050 4051 int fat; 4052 4053 fat = t4_handle_intr_status(adapter, A_MPS_RX_PERR_INT_CAUSE, 4054 mps_rx_intr_info) + 4055 t4_handle_intr_status(adapter, A_MPS_TX_INT_CAUSE, 4056 mps_tx_intr_info) + 4057 t4_handle_intr_status(adapter, A_MPS_TRC_INT_CAUSE, 4058 mps_trc_intr_info) + 4059 t4_handle_intr_status(adapter, A_MPS_STAT_PERR_INT_CAUSE_SRAM, 4060 mps_stat_sram_intr_info) + 4061 t4_handle_intr_status(adapter, A_MPS_STAT_PERR_INT_CAUSE_TX_FIFO, 4062 mps_stat_tx_intr_info) + 4063 t4_handle_intr_status(adapter, A_MPS_STAT_PERR_INT_CAUSE_RX_FIFO, 4064 mps_stat_rx_intr_info) + 4065 t4_handle_intr_status(adapter, A_MPS_CLS_INT_CAUSE, 4066 mps_cls_intr_info); 4067 4068 t4_write_reg(adapter, A_MPS_INT_CAUSE, 0); 4069 t4_read_reg(adapter, A_MPS_INT_CAUSE); /* flush */ 4070 if (fat) 4071 t4_fatal_err(adapter); 4072 } 4073 4074 #define MEM_INT_MASK (F_PERR_INT_CAUSE | F_ECC_CE_INT_CAUSE | \ 4075 F_ECC_UE_INT_CAUSE) 4076 4077 /* 4078 * EDC/MC interrupt handler. 4079 */ 4080 static void mem_intr_handler(struct adapter *adapter, int idx) 4081 { 4082 static const char name[4][7] = { "EDC0", "EDC1", "MC/MC0", "MC1" }; 4083 4084 unsigned int addr, cnt_addr, v; 4085 4086 if (idx <= MEM_EDC1) { 4087 addr = EDC_REG(A_EDC_INT_CAUSE, idx); 4088 cnt_addr = EDC_REG(A_EDC_ECC_STATUS, idx); 4089 } else if (idx == MEM_MC) { 4090 if (is_t4(adapter)) { 4091 addr = A_MC_INT_CAUSE; 4092 cnt_addr = A_MC_ECC_STATUS; 4093 } else { 4094 addr = A_MC_P_INT_CAUSE; 4095 cnt_addr = A_MC_P_ECC_STATUS; 4096 } 4097 } else { 4098 addr = MC_REG(A_MC_P_INT_CAUSE, 1); 4099 cnt_addr = MC_REG(A_MC_P_ECC_STATUS, 1); 4100 } 4101 4102 v = t4_read_reg(adapter, addr) & MEM_INT_MASK; 4103 if (v & F_PERR_INT_CAUSE) 4104 CH_ALERT(adapter, "%s FIFO parity error\n", 4105 name[idx]); 4106 if (v & F_ECC_CE_INT_CAUSE) { 4107 u32 cnt = G_ECC_CECNT(t4_read_reg(adapter, cnt_addr)); 4108 4109 t4_edc_err_read(adapter, idx); 4110 4111 t4_write_reg(adapter, cnt_addr, V_ECC_CECNT(M_ECC_CECNT)); 4112 CH_WARN_RATELIMIT(adapter, 4113 "%u %s correctable ECC data error%s\n", 4114 cnt, name[idx], cnt > 1 ? "s" : ""); 4115 } 4116 if (v & F_ECC_UE_INT_CAUSE) 4117 CH_ALERT(adapter, 4118 "%s uncorrectable ECC data error\n", name[idx]); 4119 4120 t4_write_reg(adapter, addr, v); 4121 if (v & (F_PERR_INT_CAUSE | F_ECC_UE_INT_CAUSE)) 4122 t4_fatal_err(adapter); 4123 } 4124 4125 /* 4126 * MA interrupt handler. 4127 */ 4128 static void ma_intr_handler(struct adapter *adapter) 4129 { 4130 u32 v, status = t4_read_reg(adapter, A_MA_INT_CAUSE); 4131 4132 if (status & F_MEM_PERR_INT_CAUSE) { 4133 CH_ALERT(adapter, 4134 "MA parity error, parity status %#x\n", 4135 t4_read_reg(adapter, A_MA_PARITY_ERROR_STATUS1)); 4136 if (is_t5(adapter)) 4137 CH_ALERT(adapter, 4138 "MA parity error, parity status %#x\n", 4139 t4_read_reg(adapter, 4140 A_MA_PARITY_ERROR_STATUS2)); 4141 } 4142 if (status & F_MEM_WRAP_INT_CAUSE) { 4143 v = t4_read_reg(adapter, A_MA_INT_WRAP_STATUS); 4144 CH_ALERT(adapter, "MA address wrap-around error by " 4145 "client %u to address %#x\n", 4146 G_MEM_WRAP_CLIENT_NUM(v), 4147 G_MEM_WRAP_ADDRESS(v) << 4); 4148 } 4149 t4_write_reg(adapter, A_MA_INT_CAUSE, status); 4150 t4_fatal_err(adapter); 4151 } 4152 4153 /* 4154 * SMB interrupt handler. 4155 */ 4156 static void smb_intr_handler(struct adapter *adap) 4157 { 4158 static const struct intr_info smb_intr_info[] = { 4159 { F_MSTTXFIFOPARINT, "SMB master Tx FIFO parity error", -1, 1 }, 4160 { F_MSTRXFIFOPARINT, "SMB master Rx FIFO parity error", -1, 1 }, 4161 { F_SLVFIFOPARINT, "SMB slave FIFO parity error", -1, 1 }, 4162 { 0 } 4163 }; 4164 4165 if (t4_handle_intr_status(adap, A_SMB_INT_CAUSE, smb_intr_info)) 4166 t4_fatal_err(adap); 4167 } 4168 4169 /* 4170 * NC-SI interrupt handler. 4171 */ 4172 static void ncsi_intr_handler(struct adapter *adap) 4173 { 4174 static const struct intr_info ncsi_intr_info[] = { 4175 { F_CIM_DM_PRTY_ERR, "NC-SI CIM parity error", -1, 1 }, 4176 { F_MPS_DM_PRTY_ERR, "NC-SI MPS parity error", -1, 1 }, 4177 { F_TXFIFO_PRTY_ERR, "NC-SI Tx FIFO parity error", -1, 1 }, 4178 { F_RXFIFO_PRTY_ERR, "NC-SI Rx FIFO parity error", -1, 1 }, 4179 { 0 } 4180 }; 4181 4182 if (t4_handle_intr_status(adap, A_NCSI_INT_CAUSE, ncsi_intr_info)) 4183 t4_fatal_err(adap); 4184 } 4185 4186 /* 4187 * XGMAC interrupt handler. 4188 */ 4189 static void xgmac_intr_handler(struct adapter *adap, int port) 4190 { 4191 u32 v, int_cause_reg; 4192 4193 if (is_t4(adap)) 4194 int_cause_reg = PORT_REG(port, A_XGMAC_PORT_INT_CAUSE); 4195 else 4196 int_cause_reg = T5_PORT_REG(port, A_MAC_PORT_INT_CAUSE); 4197 4198 v = t4_read_reg(adap, int_cause_reg); 4199 4200 v &= (F_TXFIFO_PRTY_ERR | F_RXFIFO_PRTY_ERR); 4201 if (!v) 4202 return; 4203 4204 if (v & F_TXFIFO_PRTY_ERR) 4205 CH_ALERT(adap, "XGMAC %d Tx FIFO parity error\n", 4206 port); 4207 if (v & F_RXFIFO_PRTY_ERR) 4208 CH_ALERT(adap, "XGMAC %d Rx FIFO parity error\n", 4209 port); 4210 t4_write_reg(adap, int_cause_reg, v); 4211 t4_fatal_err(adap); 4212 } 4213 4214 /* 4215 * PL interrupt handler. 4216 */ 4217 static void pl_intr_handler(struct adapter *adap) 4218 { 4219 static const struct intr_info pl_intr_info[] = { 4220 { F_FATALPERR, "Fatal parity error", -1, 1 }, 4221 { F_PERRVFID, "PL VFID_MAP parity error", -1, 1 }, 4222 { 0 } 4223 }; 4224 4225 static const struct intr_info t5_pl_intr_info[] = { 4226 { F_FATALPERR, "Fatal parity error", -1, 1 }, 4227 { 0 } 4228 }; 4229 4230 if (t4_handle_intr_status(adap, A_PL_PL_INT_CAUSE, 4231 is_t4(adap) ? 4232 pl_intr_info : t5_pl_intr_info)) 4233 t4_fatal_err(adap); 4234 } 4235 4236 #define PF_INTR_MASK (F_PFSW | F_PFCIM) 4237 4238 /** 4239 * t4_slow_intr_handler - control path interrupt handler 4240 * @adapter: the adapter 4241 * 4242 * T4 interrupt handler for non-data global interrupt events, e.g., errors. 4243 * The designation 'slow' is because it involves register reads, while 4244 * data interrupts typically don't involve any MMIOs. 4245 */ 4246 int t4_slow_intr_handler(struct adapter *adapter) 4247 { 4248 u32 cause = t4_read_reg(adapter, A_PL_INT_CAUSE); 4249 4250 if (!(cause & GLBL_INTR_MASK)) 4251 return 0; 4252 if (cause & F_CIM) 4253 cim_intr_handler(adapter); 4254 if (cause & F_MPS) 4255 mps_intr_handler(adapter); 4256 if (cause & F_NCSI) 4257 ncsi_intr_handler(adapter); 4258 if (cause & F_PL) 4259 pl_intr_handler(adapter); 4260 if (cause & F_SMB) 4261 smb_intr_handler(adapter); 4262 if (cause & F_MAC0) 4263 xgmac_intr_handler(adapter, 0); 4264 if (cause & F_MAC1) 4265 xgmac_intr_handler(adapter, 1); 4266 if (cause & F_MAC2) 4267 xgmac_intr_handler(adapter, 2); 4268 if (cause & F_MAC3) 4269 xgmac_intr_handler(adapter, 3); 4270 if (cause & F_PCIE) 4271 pcie_intr_handler(adapter); 4272 if (cause & F_MC0) 4273 mem_intr_handler(adapter, MEM_MC); 4274 if (is_t5(adapter) && (cause & F_MC1)) 4275 mem_intr_handler(adapter, MEM_MC1); 4276 if (cause & F_EDC0) 4277 mem_intr_handler(adapter, MEM_EDC0); 4278 if (cause & F_EDC1) 4279 mem_intr_handler(adapter, MEM_EDC1); 4280 if (cause & F_LE) 4281 le_intr_handler(adapter); 4282 if (cause & F_TP) 4283 tp_intr_handler(adapter); 4284 if (cause & F_MA) 4285 ma_intr_handler(adapter); 4286 if (cause & F_PM_TX) 4287 pmtx_intr_handler(adapter); 4288 if (cause & F_PM_RX) 4289 pmrx_intr_handler(adapter); 4290 if (cause & F_ULP_RX) 4291 ulprx_intr_handler(adapter); 4292 if (cause & F_CPL_SWITCH) 4293 cplsw_intr_handler(adapter); 4294 if (cause & F_SGE) 4295 sge_intr_handler(adapter); 4296 if (cause & F_ULP_TX) 4297 ulptx_intr_handler(adapter); 4298 4299 /* Clear the interrupts just processed for which we are the master. */ 4300 t4_write_reg(adapter, A_PL_INT_CAUSE, cause & GLBL_INTR_MASK); 4301 (void)t4_read_reg(adapter, A_PL_INT_CAUSE); /* flush */ 4302 return 1; 4303 } 4304 4305 /** 4306 * t4_intr_enable - enable interrupts 4307 * @adapter: the adapter whose interrupts should be enabled 4308 * 4309 * Enable PF-specific interrupts for the calling function and the top-level 4310 * interrupt concentrator for global interrupts. Interrupts are already 4311 * enabled at each module, here we just enable the roots of the interrupt 4312 * hierarchies. 4313 * 4314 * Note: this function should be called only when the driver manages 4315 * non PF-specific interrupts from the various HW modules. Only one PCI 4316 * function at a time should be doing this. 4317 */ 4318 void t4_intr_enable(struct adapter *adapter) 4319 { 4320 u32 val = 0; 4321 u32 whoami = t4_read_reg(adapter, A_PL_WHOAMI); 4322 u32 pf = (chip_id(adapter) <= CHELSIO_T5 4323 ? G_SOURCEPF(whoami) 4324 : G_T6_SOURCEPF(whoami)); 4325 4326 if (chip_id(adapter) <= CHELSIO_T5) 4327 val = F_ERR_DROPPED_DB | F_ERR_EGR_CTXT_PRIO | F_DBFIFO_HP_INT; 4328 else 4329 val = F_ERR_PCIE_ERROR0 | F_ERR_PCIE_ERROR1 | F_FATAL_WRE_LEN; 4330 t4_write_reg(adapter, A_SGE_INT_ENABLE3, F_ERR_CPL_EXCEED_IQE_SIZE | 4331 F_ERR_INVALID_CIDX_INC | F_ERR_CPL_OPCODE_0 | 4332 F_ERR_DATA_CPL_ON_HIGH_QID1 | F_INGRESS_SIZE_ERR | 4333 F_ERR_DATA_CPL_ON_HIGH_QID0 | F_ERR_BAD_DB_PIDX3 | 4334 F_ERR_BAD_DB_PIDX2 | F_ERR_BAD_DB_PIDX1 | 4335 F_ERR_BAD_DB_PIDX0 | F_ERR_ING_CTXT_PRIO | 4336 F_DBFIFO_LP_INT | F_EGRESS_SIZE_ERR | val); 4337 t4_write_reg(adapter, MYPF_REG(A_PL_PF_INT_ENABLE), PF_INTR_MASK); 4338 t4_set_reg_field(adapter, A_PL_INT_MAP0, 0, 1 << pf); 4339 } 4340 4341 /** 4342 * t4_intr_disable - disable interrupts 4343 * @adapter: the adapter whose interrupts should be disabled 4344 * 4345 * Disable interrupts. We only disable the top-level interrupt 4346 * concentrators. The caller must be a PCI function managing global 4347 * interrupts. 4348 */ 4349 void t4_intr_disable(struct adapter *adapter) 4350 { 4351 u32 whoami = t4_read_reg(adapter, A_PL_WHOAMI); 4352 u32 pf = (chip_id(adapter) <= CHELSIO_T5 4353 ? G_SOURCEPF(whoami) 4354 : G_T6_SOURCEPF(whoami)); 4355 4356 t4_write_reg(adapter, MYPF_REG(A_PL_PF_INT_ENABLE), 0); 4357 t4_set_reg_field(adapter, A_PL_INT_MAP0, 1 << pf, 0); 4358 } 4359 4360 /** 4361 * t4_intr_clear - clear all interrupts 4362 * @adapter: the adapter whose interrupts should be cleared 4363 * 4364 * Clears all interrupts. The caller must be a PCI function managing 4365 * global interrupts. 4366 */ 4367 void t4_intr_clear(struct adapter *adapter) 4368 { 4369 static const unsigned int cause_reg[] = { 4370 A_SGE_INT_CAUSE1, A_SGE_INT_CAUSE2, A_SGE_INT_CAUSE3, 4371 A_PCIE_NONFAT_ERR, A_PCIE_INT_CAUSE, 4372 A_MA_INT_WRAP_STATUS, A_MA_PARITY_ERROR_STATUS1, A_MA_INT_CAUSE, 4373 A_EDC_INT_CAUSE, EDC_REG(A_EDC_INT_CAUSE, 1), 4374 A_CIM_HOST_INT_CAUSE, A_CIM_HOST_UPACC_INT_CAUSE, 4375 MYPF_REG(A_CIM_PF_HOST_INT_CAUSE), 4376 A_TP_INT_CAUSE, 4377 A_ULP_RX_INT_CAUSE, A_ULP_TX_INT_CAUSE, 4378 A_PM_RX_INT_CAUSE, A_PM_TX_INT_CAUSE, 4379 A_MPS_RX_PERR_INT_CAUSE, 4380 A_CPL_INTR_CAUSE, 4381 MYPF_REG(A_PL_PF_INT_CAUSE), 4382 A_PL_PL_INT_CAUSE, 4383 A_LE_DB_INT_CAUSE, 4384 }; 4385 4386 unsigned int i; 4387 4388 for (i = 0; i < ARRAY_SIZE(cause_reg); ++i) 4389 t4_write_reg(adapter, cause_reg[i], 0xffffffff); 4390 4391 t4_write_reg(adapter, is_t4(adapter) ? A_MC_INT_CAUSE : 4392 A_MC_P_INT_CAUSE, 0xffffffff); 4393 4394 if (is_t4(adapter)) { 4395 t4_write_reg(adapter, A_PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS, 4396 0xffffffff); 4397 t4_write_reg(adapter, A_PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS, 4398 0xffffffff); 4399 } else 4400 t4_write_reg(adapter, A_MA_PARITY_ERROR_STATUS2, 0xffffffff); 4401 4402 t4_write_reg(adapter, A_PL_INT_CAUSE, GLBL_INTR_MASK); 4403 (void) t4_read_reg(adapter, A_PL_INT_CAUSE); /* flush */ 4404 } 4405 4406 /** 4407 * hash_mac_addr - return the hash value of a MAC address 4408 * @addr: the 48-bit Ethernet MAC address 4409 * 4410 * Hashes a MAC address according to the hash function used by HW inexact 4411 * (hash) address matching. 4412 */ 4413 static int hash_mac_addr(const u8 *addr) 4414 { 4415 u32 a = ((u32)addr[0] << 16) | ((u32)addr[1] << 8) | addr[2]; 4416 u32 b = ((u32)addr[3] << 16) | ((u32)addr[4] << 8) | addr[5]; 4417 a ^= b; 4418 a ^= (a >> 12); 4419 a ^= (a >> 6); 4420 return a & 0x3f; 4421 } 4422 4423 /** 4424 * t4_config_rss_range - configure a portion of the RSS mapping table 4425 * @adapter: the adapter 4426 * @mbox: mbox to use for the FW command 4427 * @viid: virtual interface whose RSS subtable is to be written 4428 * @start: start entry in the table to write 4429 * @n: how many table entries to write 4430 * @rspq: values for the "response queue" (Ingress Queue) lookup table 4431 * @nrspq: number of values in @rspq 4432 * 4433 * Programs the selected part of the VI's RSS mapping table with the 4434 * provided values. If @nrspq < @n the supplied values are used repeatedly 4435 * until the full table range is populated. 4436 * 4437 * The caller must ensure the values in @rspq are in the range allowed for 4438 * @viid. 4439 */ 4440 int t4_config_rss_range(struct adapter *adapter, int mbox, unsigned int viid, 4441 int start, int n, const u16 *rspq, unsigned int nrspq) 4442 { 4443 int ret; 4444 const u16 *rsp = rspq; 4445 const u16 *rsp_end = rspq + nrspq; 4446 struct fw_rss_ind_tbl_cmd cmd; 4447 4448 memset(&cmd, 0, sizeof(cmd)); 4449 cmd.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_RSS_IND_TBL_CMD) | 4450 F_FW_CMD_REQUEST | F_FW_CMD_WRITE | 4451 V_FW_RSS_IND_TBL_CMD_VIID(viid)); 4452 cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd)); 4453 4454 /* 4455 * Each firmware RSS command can accommodate up to 32 RSS Ingress 4456 * Queue Identifiers. These Ingress Queue IDs are packed three to 4457 * a 32-bit word as 10-bit values with the upper remaining 2 bits 4458 * reserved. 4459 */ 4460 while (n > 0) { 4461 int nq = min(n, 32); 4462 int nq_packed = 0; 4463 __be32 *qp = &cmd.iq0_to_iq2; 4464 4465 /* 4466 * Set up the firmware RSS command header to send the next 4467 * "nq" Ingress Queue IDs to the firmware. 4468 */ 4469 cmd.niqid = cpu_to_be16(nq); 4470 cmd.startidx = cpu_to_be16(start); 4471 4472 /* 4473 * "nq" more done for the start of the next loop. 4474 */ 4475 start += nq; 4476 n -= nq; 4477 4478 /* 4479 * While there are still Ingress Queue IDs to stuff into the 4480 * current firmware RSS command, retrieve them from the 4481 * Ingress Queue ID array and insert them into the command. 4482 */ 4483 while (nq > 0) { 4484 /* 4485 * Grab up to the next 3 Ingress Queue IDs (wrapping 4486 * around the Ingress Queue ID array if necessary) and 4487 * insert them into the firmware RSS command at the 4488 * current 3-tuple position within the commad. 4489 */ 4490 u16 qbuf[3]; 4491 u16 *qbp = qbuf; 4492 int nqbuf = min(3, nq); 4493 4494 nq -= nqbuf; 4495 qbuf[0] = qbuf[1] = qbuf[2] = 0; 4496 while (nqbuf && nq_packed < 32) { 4497 nqbuf--; 4498 nq_packed++; 4499 *qbp++ = *rsp++; 4500 if (rsp >= rsp_end) 4501 rsp = rspq; 4502 } 4503 *qp++ = cpu_to_be32(V_FW_RSS_IND_TBL_CMD_IQ0(qbuf[0]) | 4504 V_FW_RSS_IND_TBL_CMD_IQ1(qbuf[1]) | 4505 V_FW_RSS_IND_TBL_CMD_IQ2(qbuf[2])); 4506 } 4507 4508 /* 4509 * Send this portion of the RRS table update to the firmware; 4510 * bail out on any errors. 4511 */ 4512 ret = t4_wr_mbox(adapter, mbox, &cmd, sizeof(cmd), NULL); 4513 if (ret) 4514 return ret; 4515 } 4516 return 0; 4517 } 4518 4519 /** 4520 * t4_config_glbl_rss - configure the global RSS mode 4521 * @adapter: the adapter 4522 * @mbox: mbox to use for the FW command 4523 * @mode: global RSS mode 4524 * @flags: mode-specific flags 4525 * 4526 * Sets the global RSS mode. 4527 */ 4528 int t4_config_glbl_rss(struct adapter *adapter, int mbox, unsigned int mode, 4529 unsigned int flags) 4530 { 4531 struct fw_rss_glb_config_cmd c; 4532 4533 memset(&c, 0, sizeof(c)); 4534 c.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_RSS_GLB_CONFIG_CMD) | 4535 F_FW_CMD_REQUEST | F_FW_CMD_WRITE); 4536 c.retval_len16 = cpu_to_be32(FW_LEN16(c)); 4537 if (mode == FW_RSS_GLB_CONFIG_CMD_MODE_MANUAL) { 4538 c.u.manual.mode_pkd = 4539 cpu_to_be32(V_FW_RSS_GLB_CONFIG_CMD_MODE(mode)); 4540 } else if (mode == FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL) { 4541 c.u.basicvirtual.mode_pkd = 4542 cpu_to_be32(V_FW_RSS_GLB_CONFIG_CMD_MODE(mode)); 4543 c.u.basicvirtual.synmapen_to_hashtoeplitz = cpu_to_be32(flags); 4544 } else 4545 return -EINVAL; 4546 return t4_wr_mbox(adapter, mbox, &c, sizeof(c), NULL); 4547 } 4548 4549 /** 4550 * t4_config_vi_rss - configure per VI RSS settings 4551 * @adapter: the adapter 4552 * @mbox: mbox to use for the FW command 4553 * @viid: the VI id 4554 * @flags: RSS flags 4555 * @defq: id of the default RSS queue for the VI. 4556 * 4557 * Configures VI-specific RSS properties. 4558 */ 4559 int t4_config_vi_rss(struct adapter *adapter, int mbox, unsigned int viid, 4560 unsigned int flags, unsigned int defq) 4561 { 4562 struct fw_rss_vi_config_cmd c; 4563 4564 memset(&c, 0, sizeof(c)); 4565 c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_RSS_VI_CONFIG_CMD) | 4566 F_FW_CMD_REQUEST | F_FW_CMD_WRITE | 4567 V_FW_RSS_VI_CONFIG_CMD_VIID(viid)); 4568 c.retval_len16 = cpu_to_be32(FW_LEN16(c)); 4569 c.u.basicvirtual.defaultq_to_udpen = cpu_to_be32(flags | 4570 V_FW_RSS_VI_CONFIG_CMD_DEFAULTQ(defq)); 4571 return t4_wr_mbox(adapter, mbox, &c, sizeof(c), NULL); 4572 } 4573 4574 /* Read an RSS table row */ 4575 static int rd_rss_row(struct adapter *adap, int row, u32 *val) 4576 { 4577 t4_write_reg(adap, A_TP_RSS_LKP_TABLE, 0xfff00000 | row); 4578 return t4_wait_op_done_val(adap, A_TP_RSS_LKP_TABLE, F_LKPTBLROWVLD, 1, 4579 5, 0, val); 4580 } 4581 4582 /** 4583 * t4_read_rss - read the contents of the RSS mapping table 4584 * @adapter: the adapter 4585 * @map: holds the contents of the RSS mapping table 4586 * 4587 * Reads the contents of the RSS hash->queue mapping table. 4588 */ 4589 int t4_read_rss(struct adapter *adapter, u16 *map) 4590 { 4591 u32 val; 4592 int i, ret; 4593 4594 for (i = 0; i < RSS_NENTRIES / 2; ++i) { 4595 ret = rd_rss_row(adapter, i, &val); 4596 if (ret) 4597 return ret; 4598 *map++ = G_LKPTBLQUEUE0(val); 4599 *map++ = G_LKPTBLQUEUE1(val); 4600 } 4601 return 0; 4602 } 4603 4604 /** 4605 * t4_fw_tp_pio_rw - Access TP PIO through LDST 4606 * @adap: the adapter 4607 * @vals: where the indirect register values are stored/written 4608 * @nregs: how many indirect registers to read/write 4609 * @start_idx: index of first indirect register to read/write 4610 * @rw: Read (1) or Write (0) 4611 * 4612 * Access TP PIO registers through LDST 4613 */ 4614 void t4_fw_tp_pio_rw(struct adapter *adap, u32 *vals, unsigned int nregs, 4615 unsigned int start_index, unsigned int rw) 4616 { 4617 int ret, i; 4618 int cmd = FW_LDST_ADDRSPC_TP_PIO; 4619 struct fw_ldst_cmd c; 4620 4621 for (i = 0 ; i < nregs; i++) { 4622 memset(&c, 0, sizeof(c)); 4623 c.op_to_addrspace = cpu_to_be32(V_FW_CMD_OP(FW_LDST_CMD) | 4624 F_FW_CMD_REQUEST | 4625 (rw ? F_FW_CMD_READ : 4626 F_FW_CMD_WRITE) | 4627 V_FW_LDST_CMD_ADDRSPACE(cmd)); 4628 c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c)); 4629 4630 c.u.addrval.addr = cpu_to_be32(start_index + i); 4631 c.u.addrval.val = rw ? 0 : cpu_to_be32(vals[i]); 4632 ret = t4_wr_mbox(adap, adap->mbox, &c, sizeof(c), &c); 4633 if (ret == 0) { 4634 if (rw) 4635 vals[i] = be32_to_cpu(c.u.addrval.val); 4636 } 4637 } 4638 } 4639 4640 /** 4641 * t4_read_rss_key - read the global RSS key 4642 * @adap: the adapter 4643 * @key: 10-entry array holding the 320-bit RSS key 4644 * 4645 * Reads the global 320-bit RSS key. 4646 */ 4647 void t4_read_rss_key(struct adapter *adap, u32 *key) 4648 { 4649 if (t4_use_ldst(adap)) 4650 t4_fw_tp_pio_rw(adap, key, 10, A_TP_RSS_SECRET_KEY0, 1); 4651 else 4652 t4_read_indirect(adap, A_TP_PIO_ADDR, A_TP_PIO_DATA, key, 10, 4653 A_TP_RSS_SECRET_KEY0); 4654 } 4655 4656 /** 4657 * t4_write_rss_key - program one of the RSS keys 4658 * @adap: the adapter 4659 * @key: 10-entry array holding the 320-bit RSS key 4660 * @idx: which RSS key to write 4661 * 4662 * Writes one of the RSS keys with the given 320-bit value. If @idx is 4663 * 0..15 the corresponding entry in the RSS key table is written, 4664 * otherwise the global RSS key is written. 4665 */ 4666 void t4_write_rss_key(struct adapter *adap, u32 *key, int idx) 4667 { 4668 u8 rss_key_addr_cnt = 16; 4669 u32 vrt = t4_read_reg(adap, A_TP_RSS_CONFIG_VRT); 4670 4671 /* 4672 * T6 and later: for KeyMode 3 (per-vf and per-vf scramble), 4673 * allows access to key addresses 16-63 by using KeyWrAddrX 4674 * as index[5:4](upper 2) into key table 4675 */ 4676 if ((chip_id(adap) > CHELSIO_T5) && 4677 (vrt & F_KEYEXTEND) && (G_KEYMODE(vrt) == 3)) 4678 rss_key_addr_cnt = 32; 4679 4680 if (t4_use_ldst(adap)) 4681 t4_fw_tp_pio_rw(adap, key, 10, A_TP_RSS_SECRET_KEY0, 0); 4682 else 4683 t4_write_indirect(adap, A_TP_PIO_ADDR, A_TP_PIO_DATA, key, 10, 4684 A_TP_RSS_SECRET_KEY0); 4685 4686 if (idx >= 0 && idx < rss_key_addr_cnt) { 4687 if (rss_key_addr_cnt > 16) 4688 t4_write_reg(adap, A_TP_RSS_CONFIG_VRT, 4689 V_KEYWRADDRX(idx >> 4) | 4690 V_T6_VFWRADDR(idx) | F_KEYWREN); 4691 else 4692 t4_write_reg(adap, A_TP_RSS_CONFIG_VRT, 4693 V_KEYWRADDR(idx) | F_KEYWREN); 4694 } 4695 } 4696 4697 /** 4698 * t4_read_rss_pf_config - read PF RSS Configuration Table 4699 * @adapter: the adapter 4700 * @index: the entry in the PF RSS table to read 4701 * @valp: where to store the returned value 4702 * 4703 * Reads the PF RSS Configuration Table at the specified index and returns 4704 * the value found there. 4705 */ 4706 void t4_read_rss_pf_config(struct adapter *adapter, unsigned int index, 4707 u32 *valp) 4708 { 4709 if (t4_use_ldst(adapter)) 4710 t4_fw_tp_pio_rw(adapter, valp, 1, 4711 A_TP_RSS_PF0_CONFIG + index, 1); 4712 else 4713 t4_read_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA, 4714 valp, 1, A_TP_RSS_PF0_CONFIG + index); 4715 } 4716 4717 /** 4718 * t4_write_rss_pf_config - write PF RSS Configuration Table 4719 * @adapter: the adapter 4720 * @index: the entry in the VF RSS table to read 4721 * @val: the value to store 4722 * 4723 * Writes the PF RSS Configuration Table at the specified index with the 4724 * specified value. 4725 */ 4726 void t4_write_rss_pf_config(struct adapter *adapter, unsigned int index, 4727 u32 val) 4728 { 4729 if (t4_use_ldst(adapter)) 4730 t4_fw_tp_pio_rw(adapter, &val, 1, 4731 A_TP_RSS_PF0_CONFIG + index, 0); 4732 else 4733 t4_write_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA, 4734 &val, 1, A_TP_RSS_PF0_CONFIG + index); 4735 } 4736 4737 /** 4738 * t4_read_rss_vf_config - read VF RSS Configuration Table 4739 * @adapter: the adapter 4740 * @index: the entry in the VF RSS table to read 4741 * @vfl: where to store the returned VFL 4742 * @vfh: where to store the returned VFH 4743 * 4744 * Reads the VF RSS Configuration Table at the specified index and returns 4745 * the (VFL, VFH) values found there. 4746 */ 4747 void t4_read_rss_vf_config(struct adapter *adapter, unsigned int index, 4748 u32 *vfl, u32 *vfh) 4749 { 4750 u32 vrt, mask, data; 4751 4752 if (chip_id(adapter) <= CHELSIO_T5) { 4753 mask = V_VFWRADDR(M_VFWRADDR); 4754 data = V_VFWRADDR(index); 4755 } else { 4756 mask = V_T6_VFWRADDR(M_T6_VFWRADDR); 4757 data = V_T6_VFWRADDR(index); 4758 } 4759 /* 4760 * Request that the index'th VF Table values be read into VFL/VFH. 4761 */ 4762 vrt = t4_read_reg(adapter, A_TP_RSS_CONFIG_VRT); 4763 vrt &= ~(F_VFRDRG | F_VFWREN | F_KEYWREN | mask); 4764 vrt |= data | F_VFRDEN; 4765 t4_write_reg(adapter, A_TP_RSS_CONFIG_VRT, vrt); 4766 4767 /* 4768 * Grab the VFL/VFH values ... 4769 */ 4770 if (t4_use_ldst(adapter)) { 4771 t4_fw_tp_pio_rw(adapter, vfl, 1, A_TP_RSS_VFL_CONFIG, 1); 4772 t4_fw_tp_pio_rw(adapter, vfh, 1, A_TP_RSS_VFH_CONFIG, 1); 4773 } else { 4774 t4_read_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA, 4775 vfl, 1, A_TP_RSS_VFL_CONFIG); 4776 t4_read_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA, 4777 vfh, 1, A_TP_RSS_VFH_CONFIG); 4778 } 4779 } 4780 4781 /** 4782 * t4_write_rss_vf_config - write VF RSS Configuration Table 4783 * 4784 * @adapter: the adapter 4785 * @index: the entry in the VF RSS table to write 4786 * @vfl: the VFL to store 4787 * @vfh: the VFH to store 4788 * 4789 * Writes the VF RSS Configuration Table at the specified index with the 4790 * specified (VFL, VFH) values. 4791 */ 4792 void t4_write_rss_vf_config(struct adapter *adapter, unsigned int index, 4793 u32 vfl, u32 vfh) 4794 { 4795 u32 vrt, mask, data; 4796 4797 if (chip_id(adapter) <= CHELSIO_T5) { 4798 mask = V_VFWRADDR(M_VFWRADDR); 4799 data = V_VFWRADDR(index); 4800 } else { 4801 mask = V_T6_VFWRADDR(M_T6_VFWRADDR); 4802 data = V_T6_VFWRADDR(index); 4803 } 4804 4805 /* 4806 * Load up VFL/VFH with the values to be written ... 4807 */ 4808 if (t4_use_ldst(adapter)) { 4809 t4_fw_tp_pio_rw(adapter, &vfl, 1, A_TP_RSS_VFL_CONFIG, 0); 4810 t4_fw_tp_pio_rw(adapter, &vfh, 1, A_TP_RSS_VFH_CONFIG, 0); 4811 } else { 4812 t4_write_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA, 4813 &vfl, 1, A_TP_RSS_VFL_CONFIG); 4814 t4_write_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA, 4815 &vfh, 1, A_TP_RSS_VFH_CONFIG); 4816 } 4817 4818 /* 4819 * Write the VFL/VFH into the VF Table at index'th location. 4820 */ 4821 vrt = t4_read_reg(adapter, A_TP_RSS_CONFIG_VRT); 4822 vrt &= ~(F_VFRDRG | F_VFWREN | F_KEYWREN | mask); 4823 vrt |= data | F_VFRDEN; 4824 t4_write_reg(adapter, A_TP_RSS_CONFIG_VRT, vrt); 4825 } 4826 4827 /** 4828 * t4_read_rss_pf_map - read PF RSS Map 4829 * @adapter: the adapter 4830 * 4831 * Reads the PF RSS Map register and returns its value. 4832 */ 4833 u32 t4_read_rss_pf_map(struct adapter *adapter) 4834 { 4835 u32 pfmap; 4836 4837 if (t4_use_ldst(adapter)) 4838 t4_fw_tp_pio_rw(adapter, &pfmap, 1, A_TP_RSS_PF_MAP, 1); 4839 else 4840 t4_read_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA, 4841 &pfmap, 1, A_TP_RSS_PF_MAP); 4842 return pfmap; 4843 } 4844 4845 /** 4846 * t4_write_rss_pf_map - write PF RSS Map 4847 * @adapter: the adapter 4848 * @pfmap: PF RSS Map value 4849 * 4850 * Writes the specified value to the PF RSS Map register. 4851 */ 4852 void t4_write_rss_pf_map(struct adapter *adapter, u32 pfmap) 4853 { 4854 if (t4_use_ldst(adapter)) 4855 t4_fw_tp_pio_rw(adapter, &pfmap, 1, A_TP_RSS_PF_MAP, 0); 4856 else 4857 t4_write_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA, 4858 &pfmap, 1, A_TP_RSS_PF_MAP); 4859 } 4860 4861 /** 4862 * t4_read_rss_pf_mask - read PF RSS Mask 4863 * @adapter: the adapter 4864 * 4865 * Reads the PF RSS Mask register and returns its value. 4866 */ 4867 u32 t4_read_rss_pf_mask(struct adapter *adapter) 4868 { 4869 u32 pfmask; 4870 4871 if (t4_use_ldst(adapter)) 4872 t4_fw_tp_pio_rw(adapter, &pfmask, 1, A_TP_RSS_PF_MSK, 1); 4873 else 4874 t4_read_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA, 4875 &pfmask, 1, A_TP_RSS_PF_MSK); 4876 return pfmask; 4877 } 4878 4879 /** 4880 * t4_write_rss_pf_mask - write PF RSS Mask 4881 * @adapter: the adapter 4882 * @pfmask: PF RSS Mask value 4883 * 4884 * Writes the specified value to the PF RSS Mask register. 4885 */ 4886 void t4_write_rss_pf_mask(struct adapter *adapter, u32 pfmask) 4887 { 4888 if (t4_use_ldst(adapter)) 4889 t4_fw_tp_pio_rw(adapter, &pfmask, 1, A_TP_RSS_PF_MSK, 0); 4890 else 4891 t4_write_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA, 4892 &pfmask, 1, A_TP_RSS_PF_MSK); 4893 } 4894 4895 /** 4896 * t4_tp_get_tcp_stats - read TP's TCP MIB counters 4897 * @adap: the adapter 4898 * @v4: holds the TCP/IP counter values 4899 * @v6: holds the TCP/IPv6 counter values 4900 * 4901 * Returns the values of TP's TCP/IP and TCP/IPv6 MIB counters. 4902 * Either @v4 or @v6 may be %NULL to skip the corresponding stats. 4903 */ 4904 void t4_tp_get_tcp_stats(struct adapter *adap, struct tp_tcp_stats *v4, 4905 struct tp_tcp_stats *v6) 4906 { 4907 u32 val[A_TP_MIB_TCP_RXT_SEG_LO - A_TP_MIB_TCP_OUT_RST + 1]; 4908 4909 #define STAT_IDX(x) ((A_TP_MIB_TCP_##x) - A_TP_MIB_TCP_OUT_RST) 4910 #define STAT(x) val[STAT_IDX(x)] 4911 #define STAT64(x) (((u64)STAT(x##_HI) << 32) | STAT(x##_LO)) 4912 4913 if (v4) { 4914 t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, val, 4915 ARRAY_SIZE(val), A_TP_MIB_TCP_OUT_RST); 4916 v4->tcp_out_rsts = STAT(OUT_RST); 4917 v4->tcp_in_segs = STAT64(IN_SEG); 4918 v4->tcp_out_segs = STAT64(OUT_SEG); 4919 v4->tcp_retrans_segs = STAT64(RXT_SEG); 4920 } 4921 if (v6) { 4922 t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, val, 4923 ARRAY_SIZE(val), A_TP_MIB_TCP_V6OUT_RST); 4924 v6->tcp_out_rsts = STAT(OUT_RST); 4925 v6->tcp_in_segs = STAT64(IN_SEG); 4926 v6->tcp_out_segs = STAT64(OUT_SEG); 4927 v6->tcp_retrans_segs = STAT64(RXT_SEG); 4928 } 4929 #undef STAT64 4930 #undef STAT 4931 #undef STAT_IDX 4932 } 4933 4934 /** 4935 * t4_tp_get_err_stats - read TP's error MIB counters 4936 * @adap: the adapter 4937 * @st: holds the counter values 4938 * 4939 * Returns the values of TP's error counters. 4940 */ 4941 void t4_tp_get_err_stats(struct adapter *adap, struct tp_err_stats *st) 4942 { 4943 int nchan = adap->chip_params->nchan; 4944 4945 t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, 4946 st->mac_in_errs, nchan, A_TP_MIB_MAC_IN_ERR_0); 4947 t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, 4948 st->hdr_in_errs, nchan, A_TP_MIB_HDR_IN_ERR_0); 4949 t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, 4950 st->tcp_in_errs, nchan, A_TP_MIB_TCP_IN_ERR_0); 4951 t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, 4952 st->tnl_cong_drops, nchan, A_TP_MIB_TNL_CNG_DROP_0); 4953 t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, 4954 st->ofld_chan_drops, nchan, A_TP_MIB_OFD_CHN_DROP_0); 4955 t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, 4956 st->tnl_tx_drops, nchan, A_TP_MIB_TNL_DROP_0); 4957 t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, 4958 st->ofld_vlan_drops, nchan, A_TP_MIB_OFD_VLN_DROP_0); 4959 t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, 4960 st->tcp6_in_errs, nchan, A_TP_MIB_TCP_V6IN_ERR_0); 4961 4962 t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, 4963 &st->ofld_no_neigh, 2, A_TP_MIB_OFD_ARP_DROP); 4964 } 4965 4966 /** 4967 * t4_tp_get_proxy_stats - read TP's proxy MIB counters 4968 * @adap: the adapter 4969 * @st: holds the counter values 4970 * 4971 * Returns the values of TP's proxy counters. 4972 */ 4973 void t4_tp_get_proxy_stats(struct adapter *adap, struct tp_proxy_stats *st) 4974 { 4975 int nchan = adap->chip_params->nchan; 4976 4977 t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, st->proxy, 4978 nchan, A_TP_MIB_TNL_LPBK_0); 4979 } 4980 4981 /** 4982 * t4_tp_get_cpl_stats - read TP's CPL MIB counters 4983 * @adap: the adapter 4984 * @st: holds the counter values 4985 * 4986 * Returns the values of TP's CPL counters. 4987 */ 4988 void t4_tp_get_cpl_stats(struct adapter *adap, struct tp_cpl_stats *st) 4989 { 4990 int nchan = adap->chip_params->nchan; 4991 4992 t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, st->req, 4993 nchan, A_TP_MIB_CPL_IN_REQ_0); 4994 t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, st->rsp, 4995 nchan, A_TP_MIB_CPL_OUT_RSP_0); 4996 } 4997 4998 /** 4999 * t4_tp_get_rdma_stats - read TP's RDMA MIB counters 5000 * @adap: the adapter 5001 * @st: holds the counter values 5002 * 5003 * Returns the values of TP's RDMA counters. 5004 */ 5005 void t4_tp_get_rdma_stats(struct adapter *adap, struct tp_rdma_stats *st) 5006 { 5007 t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, &st->rqe_dfr_pkt, 5008 2, A_TP_MIB_RQE_DFR_PKT); 5009 } 5010 5011 /** 5012 * t4_get_fcoe_stats - read TP's FCoE MIB counters for a port 5013 * @adap: the adapter 5014 * @idx: the port index 5015 * @st: holds the counter values 5016 * 5017 * Returns the values of TP's FCoE counters for the selected port. 5018 */ 5019 void t4_get_fcoe_stats(struct adapter *adap, unsigned int idx, 5020 struct tp_fcoe_stats *st) 5021 { 5022 u32 val[2]; 5023 5024 t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, &st->frames_ddp, 5025 1, A_TP_MIB_FCOE_DDP_0 + idx); 5026 t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, &st->frames_drop, 5027 1, A_TP_MIB_FCOE_DROP_0 + idx); 5028 t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, val, 5029 2, A_TP_MIB_FCOE_BYTE_0_HI + 2 * idx); 5030 st->octets_ddp = ((u64)val[0] << 32) | val[1]; 5031 } 5032 5033 /** 5034 * t4_get_usm_stats - read TP's non-TCP DDP MIB counters 5035 * @adap: the adapter 5036 * @st: holds the counter values 5037 * 5038 * Returns the values of TP's counters for non-TCP directly-placed packets. 5039 */ 5040 void t4_get_usm_stats(struct adapter *adap, struct tp_usm_stats *st) 5041 { 5042 u32 val[4]; 5043 5044 t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, val, 4, 5045 A_TP_MIB_USM_PKTS); 5046 st->frames = val[0]; 5047 st->drops = val[1]; 5048 st->octets = ((u64)val[2] << 32) | val[3]; 5049 } 5050 5051 /** 5052 * t4_read_mtu_tbl - returns the values in the HW path MTU table 5053 * @adap: the adapter 5054 * @mtus: where to store the MTU values 5055 * @mtu_log: where to store the MTU base-2 log (may be %NULL) 5056 * 5057 * Reads the HW path MTU table. 5058 */ 5059 void t4_read_mtu_tbl(struct adapter *adap, u16 *mtus, u8 *mtu_log) 5060 { 5061 u32 v; 5062 int i; 5063 5064 for (i = 0; i < NMTUS; ++i) { 5065 t4_write_reg(adap, A_TP_MTU_TABLE, 5066 V_MTUINDEX(0xff) | V_MTUVALUE(i)); 5067 v = t4_read_reg(adap, A_TP_MTU_TABLE); 5068 mtus[i] = G_MTUVALUE(v); 5069 if (mtu_log) 5070 mtu_log[i] = G_MTUWIDTH(v); 5071 } 5072 } 5073 5074 /** 5075 * t4_read_cong_tbl - reads the congestion control table 5076 * @adap: the adapter 5077 * @incr: where to store the alpha values 5078 * 5079 * Reads the additive increments programmed into the HW congestion 5080 * control table. 5081 */ 5082 void t4_read_cong_tbl(struct adapter *adap, u16 incr[NMTUS][NCCTRL_WIN]) 5083 { 5084 unsigned int mtu, w; 5085 5086 for (mtu = 0; mtu < NMTUS; ++mtu) 5087 for (w = 0; w < NCCTRL_WIN; ++w) { 5088 t4_write_reg(adap, A_TP_CCTRL_TABLE, 5089 V_ROWINDEX(0xffff) | (mtu << 5) | w); 5090 incr[mtu][w] = (u16)t4_read_reg(adap, 5091 A_TP_CCTRL_TABLE) & 0x1fff; 5092 } 5093 } 5094 5095 /** 5096 * t4_tp_wr_bits_indirect - set/clear bits in an indirect TP register 5097 * @adap: the adapter 5098 * @addr: the indirect TP register address 5099 * @mask: specifies the field within the register to modify 5100 * @val: new value for the field 5101 * 5102 * Sets a field of an indirect TP register to the given value. 5103 */ 5104 void t4_tp_wr_bits_indirect(struct adapter *adap, unsigned int addr, 5105 unsigned int mask, unsigned int val) 5106 { 5107 t4_write_reg(adap, A_TP_PIO_ADDR, addr); 5108 val |= t4_read_reg(adap, A_TP_PIO_DATA) & ~mask; 5109 t4_write_reg(adap, A_TP_PIO_DATA, val); 5110 } 5111 5112 /** 5113 * init_cong_ctrl - initialize congestion control parameters 5114 * @a: the alpha values for congestion control 5115 * @b: the beta values for congestion control 5116 * 5117 * Initialize the congestion control parameters. 5118 */ 5119 static void init_cong_ctrl(unsigned short *a, unsigned short *b) 5120 { 5121 a[0] = a[1] = a[2] = a[3] = a[4] = a[5] = a[6] = a[7] = a[8] = 1; 5122 a[9] = 2; 5123 a[10] = 3; 5124 a[11] = 4; 5125 a[12] = 5; 5126 a[13] = 6; 5127 a[14] = 7; 5128 a[15] = 8; 5129 a[16] = 9; 5130 a[17] = 10; 5131 a[18] = 14; 5132 a[19] = 17; 5133 a[20] = 21; 5134 a[21] = 25; 5135 a[22] = 30; 5136 a[23] = 35; 5137 a[24] = 45; 5138 a[25] = 60; 5139 a[26] = 80; 5140 a[27] = 100; 5141 a[28] = 200; 5142 a[29] = 300; 5143 a[30] = 400; 5144 a[31] = 500; 5145 5146 b[0] = b[1] = b[2] = b[3] = b[4] = b[5] = b[6] = b[7] = b[8] = 0; 5147 b[9] = b[10] = 1; 5148 b[11] = b[12] = 2; 5149 b[13] = b[14] = b[15] = b[16] = 3; 5150 b[17] = b[18] = b[19] = b[20] = b[21] = 4; 5151 b[22] = b[23] = b[24] = b[25] = b[26] = b[27] = 5; 5152 b[28] = b[29] = 6; 5153 b[30] = b[31] = 7; 5154 } 5155 5156 /* The minimum additive increment value for the congestion control table */ 5157 #define CC_MIN_INCR 2U 5158 5159 /** 5160 * t4_load_mtus - write the MTU and congestion control HW tables 5161 * @adap: the adapter 5162 * @mtus: the values for the MTU table 5163 * @alpha: the values for the congestion control alpha parameter 5164 * @beta: the values for the congestion control beta parameter 5165 * 5166 * Write the HW MTU table with the supplied MTUs and the high-speed 5167 * congestion control table with the supplied alpha, beta, and MTUs. 5168 * We write the two tables together because the additive increments 5169 * depend on the MTUs. 5170 */ 5171 void t4_load_mtus(struct adapter *adap, const unsigned short *mtus, 5172 const unsigned short *alpha, const unsigned short *beta) 5173 { 5174 static const unsigned int avg_pkts[NCCTRL_WIN] = { 5175 2, 6, 10, 14, 20, 28, 40, 56, 80, 112, 160, 224, 320, 448, 640, 5176 896, 1281, 1792, 2560, 3584, 5120, 7168, 10240, 14336, 20480, 5177 28672, 40960, 57344, 81920, 114688, 163840, 229376 5178 }; 5179 5180 unsigned int i, w; 5181 5182 for (i = 0; i < NMTUS; ++i) { 5183 unsigned int mtu = mtus[i]; 5184 unsigned int log2 = fls(mtu); 5185 5186 if (!(mtu & ((1 << log2) >> 2))) /* round */ 5187 log2--; 5188 t4_write_reg(adap, A_TP_MTU_TABLE, V_MTUINDEX(i) | 5189 V_MTUWIDTH(log2) | V_MTUVALUE(mtu)); 5190 5191 for (w = 0; w < NCCTRL_WIN; ++w) { 5192 unsigned int inc; 5193 5194 inc = max(((mtu - 40) * alpha[w]) / avg_pkts[w], 5195 CC_MIN_INCR); 5196 5197 t4_write_reg(adap, A_TP_CCTRL_TABLE, (i << 21) | 5198 (w << 16) | (beta[w] << 13) | inc); 5199 } 5200 } 5201 } 5202 5203 /** 5204 * t4_set_pace_tbl - set the pace table 5205 * @adap: the adapter 5206 * @pace_vals: the pace values in microseconds 5207 * @start: index of the first entry in the HW pace table to set 5208 * @n: how many entries to set 5209 * 5210 * Sets (a subset of the) HW pace table. 5211 */ 5212 int t4_set_pace_tbl(struct adapter *adap, const unsigned int *pace_vals, 5213 unsigned int start, unsigned int n) 5214 { 5215 unsigned int vals[NTX_SCHED], i; 5216 unsigned int tick_ns = dack_ticks_to_usec(adap, 1000); 5217 5218 if (n > NTX_SCHED) 5219 return -ERANGE; 5220 5221 /* convert values from us to dack ticks, rounding to closest value */ 5222 for (i = 0; i < n; i++, pace_vals++) { 5223 vals[i] = (1000 * *pace_vals + tick_ns / 2) / tick_ns; 5224 if (vals[i] > 0x7ff) 5225 return -ERANGE; 5226 if (*pace_vals && vals[i] == 0) 5227 return -ERANGE; 5228 } 5229 for (i = 0; i < n; i++, start++) 5230 t4_write_reg(adap, A_TP_PACE_TABLE, (start << 16) | vals[i]); 5231 return 0; 5232 } 5233 5234 /** 5235 * t4_set_sched_bps - set the bit rate for a HW traffic scheduler 5236 * @adap: the adapter 5237 * @kbps: target rate in Kbps 5238 * @sched: the scheduler index 5239 * 5240 * Configure a Tx HW scheduler for the target rate. 5241 */ 5242 int t4_set_sched_bps(struct adapter *adap, int sched, unsigned int kbps) 5243 { 5244 unsigned int v, tps, cpt, bpt, delta, mindelta = ~0; 5245 unsigned int clk = adap->params.vpd.cclk * 1000; 5246 unsigned int selected_cpt = 0, selected_bpt = 0; 5247 5248 if (kbps > 0) { 5249 kbps *= 125; /* -> bytes */ 5250 for (cpt = 1; cpt <= 255; cpt++) { 5251 tps = clk / cpt; 5252 bpt = (kbps + tps / 2) / tps; 5253 if (bpt > 0 && bpt <= 255) { 5254 v = bpt * tps; 5255 delta = v >= kbps ? v - kbps : kbps - v; 5256 if (delta < mindelta) { 5257 mindelta = delta; 5258 selected_cpt = cpt; 5259 selected_bpt = bpt; 5260 } 5261 } else if (selected_cpt) 5262 break; 5263 } 5264 if (!selected_cpt) 5265 return -EINVAL; 5266 } 5267 t4_write_reg(adap, A_TP_TM_PIO_ADDR, 5268 A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2); 5269 v = t4_read_reg(adap, A_TP_TM_PIO_DATA); 5270 if (sched & 1) 5271 v = (v & 0xffff) | (selected_cpt << 16) | (selected_bpt << 24); 5272 else 5273 v = (v & 0xffff0000) | selected_cpt | (selected_bpt << 8); 5274 t4_write_reg(adap, A_TP_TM_PIO_DATA, v); 5275 return 0; 5276 } 5277 5278 /** 5279 * t4_set_sched_ipg - set the IPG for a Tx HW packet rate scheduler 5280 * @adap: the adapter 5281 * @sched: the scheduler index 5282 * @ipg: the interpacket delay in tenths of nanoseconds 5283 * 5284 * Set the interpacket delay for a HW packet rate scheduler. 5285 */ 5286 int t4_set_sched_ipg(struct adapter *adap, int sched, unsigned int ipg) 5287 { 5288 unsigned int v, addr = A_TP_TX_MOD_Q1_Q0_TIMER_SEPARATOR - sched / 2; 5289 5290 /* convert ipg to nearest number of core clocks */ 5291 ipg *= core_ticks_per_usec(adap); 5292 ipg = (ipg + 5000) / 10000; 5293 if (ipg > M_TXTIMERSEPQ0) 5294 return -EINVAL; 5295 5296 t4_write_reg(adap, A_TP_TM_PIO_ADDR, addr); 5297 v = t4_read_reg(adap, A_TP_TM_PIO_DATA); 5298 if (sched & 1) 5299 v = (v & V_TXTIMERSEPQ0(M_TXTIMERSEPQ0)) | V_TXTIMERSEPQ1(ipg); 5300 else 5301 v = (v & V_TXTIMERSEPQ1(M_TXTIMERSEPQ1)) | V_TXTIMERSEPQ0(ipg); 5302 t4_write_reg(adap, A_TP_TM_PIO_DATA, v); 5303 t4_read_reg(adap, A_TP_TM_PIO_DATA); 5304 return 0; 5305 } 5306 5307 /* 5308 * Calculates a rate in bytes/s given the number of 256-byte units per 4K core 5309 * clocks. The formula is 5310 * 5311 * bytes/s = bytes256 * 256 * ClkFreq / 4096 5312 * 5313 * which is equivalent to 5314 * 5315 * bytes/s = 62.5 * bytes256 * ClkFreq_ms 5316 */ 5317 static u64 chan_rate(struct adapter *adap, unsigned int bytes256) 5318 { 5319 u64 v = bytes256 * adap->params.vpd.cclk; 5320 5321 return v * 62 + v / 2; 5322 } 5323 5324 /** 5325 * t4_get_chan_txrate - get the current per channel Tx rates 5326 * @adap: the adapter 5327 * @nic_rate: rates for NIC traffic 5328 * @ofld_rate: rates for offloaded traffic 5329 * 5330 * Return the current Tx rates in bytes/s for NIC and offloaded traffic 5331 * for each channel. 5332 */ 5333 void t4_get_chan_txrate(struct adapter *adap, u64 *nic_rate, u64 *ofld_rate) 5334 { 5335 u32 v; 5336 5337 v = t4_read_reg(adap, A_TP_TX_TRATE); 5338 nic_rate[0] = chan_rate(adap, G_TNLRATE0(v)); 5339 nic_rate[1] = chan_rate(adap, G_TNLRATE1(v)); 5340 if (adap->chip_params->nchan > 2) { 5341 nic_rate[2] = chan_rate(adap, G_TNLRATE2(v)); 5342 nic_rate[3] = chan_rate(adap, G_TNLRATE3(v)); 5343 } 5344 5345 v = t4_read_reg(adap, A_TP_TX_ORATE); 5346 ofld_rate[0] = chan_rate(adap, G_OFDRATE0(v)); 5347 ofld_rate[1] = chan_rate(adap, G_OFDRATE1(v)); 5348 if (adap->chip_params->nchan > 2) { 5349 ofld_rate[2] = chan_rate(adap, G_OFDRATE2(v)); 5350 ofld_rate[3] = chan_rate(adap, G_OFDRATE3(v)); 5351 } 5352 } 5353 5354 /** 5355 * t4_set_trace_filter - configure one of the tracing filters 5356 * @adap: the adapter 5357 * @tp: the desired trace filter parameters 5358 * @idx: which filter to configure 5359 * @enable: whether to enable or disable the filter 5360 * 5361 * Configures one of the tracing filters available in HW. If @tp is %NULL 5362 * it indicates that the filter is already written in the register and it 5363 * just needs to be enabled or disabled. 5364 */ 5365 int t4_set_trace_filter(struct adapter *adap, const struct trace_params *tp, 5366 int idx, int enable) 5367 { 5368 int i, ofst = idx * 4; 5369 u32 data_reg, mask_reg, cfg; 5370 u32 multitrc = F_TRCMULTIFILTER; 5371 u32 en = is_t4(adap) ? F_TFEN : F_T5_TFEN; 5372 5373 if (idx < 0 || idx >= NTRACE) 5374 return -EINVAL; 5375 5376 if (tp == NULL || !enable) { 5377 t4_set_reg_field(adap, A_MPS_TRC_FILTER_MATCH_CTL_A + ofst, en, 5378 enable ? en : 0); 5379 return 0; 5380 } 5381 5382 /* 5383 * TODO - After T4 data book is updated, specify the exact 5384 * section below. 5385 * 5386 * See T4 data book - MPS section for a complete description 5387 * of the below if..else handling of A_MPS_TRC_CFG register 5388 * value. 5389 */ 5390 cfg = t4_read_reg(adap, A_MPS_TRC_CFG); 5391 if (cfg & F_TRCMULTIFILTER) { 5392 /* 5393 * If multiple tracers are enabled, then maximum 5394 * capture size is 2.5KB (FIFO size of a single channel) 5395 * minus 2 flits for CPL_TRACE_PKT header. 5396 */ 5397 if (tp->snap_len > ((10 * 1024 / 4) - (2 * 8))) 5398 return -EINVAL; 5399 } else { 5400 /* 5401 * If multiple tracers are disabled, to avoid deadlocks 5402 * maximum packet capture size of 9600 bytes is recommended. 5403 * Also in this mode, only trace0 can be enabled and running. 5404 */ 5405 multitrc = 0; 5406 if (tp->snap_len > 9600 || idx) 5407 return -EINVAL; 5408 } 5409 5410 if (tp->port > (is_t4(adap) ? 11 : 19) || tp->invert > 1 || 5411 tp->skip_len > M_TFLENGTH || tp->skip_ofst > M_TFOFFSET || 5412 tp->min_len > M_TFMINPKTSIZE) 5413 return -EINVAL; 5414 5415 /* stop the tracer we'll be changing */ 5416 t4_set_reg_field(adap, A_MPS_TRC_FILTER_MATCH_CTL_A + ofst, en, 0); 5417 5418 idx *= (A_MPS_TRC_FILTER1_MATCH - A_MPS_TRC_FILTER0_MATCH); 5419 data_reg = A_MPS_TRC_FILTER0_MATCH + idx; 5420 mask_reg = A_MPS_TRC_FILTER0_DONT_CARE + idx; 5421 5422 for (i = 0; i < TRACE_LEN / 4; i++, data_reg += 4, mask_reg += 4) { 5423 t4_write_reg(adap, data_reg, tp->data[i]); 5424 t4_write_reg(adap, mask_reg, ~tp->mask[i]); 5425 } 5426 t4_write_reg(adap, A_MPS_TRC_FILTER_MATCH_CTL_B + ofst, 5427 V_TFCAPTUREMAX(tp->snap_len) | 5428 V_TFMINPKTSIZE(tp->min_len)); 5429 t4_write_reg(adap, A_MPS_TRC_FILTER_MATCH_CTL_A + ofst, 5430 V_TFOFFSET(tp->skip_ofst) | V_TFLENGTH(tp->skip_len) | en | 5431 (is_t4(adap) ? 5432 V_TFPORT(tp->port) | V_TFINVERTMATCH(tp->invert) : 5433 V_T5_TFPORT(tp->port) | V_T5_TFINVERTMATCH(tp->invert))); 5434 5435 return 0; 5436 } 5437 5438 /** 5439 * t4_get_trace_filter - query one of the tracing filters 5440 * @adap: the adapter 5441 * @tp: the current trace filter parameters 5442 * @idx: which trace filter to query 5443 * @enabled: non-zero if the filter is enabled 5444 * 5445 * Returns the current settings of one of the HW tracing filters. 5446 */ 5447 void t4_get_trace_filter(struct adapter *adap, struct trace_params *tp, int idx, 5448 int *enabled) 5449 { 5450 u32 ctla, ctlb; 5451 int i, ofst = idx * 4; 5452 u32 data_reg, mask_reg; 5453 5454 ctla = t4_read_reg(adap, A_MPS_TRC_FILTER_MATCH_CTL_A + ofst); 5455 ctlb = t4_read_reg(adap, A_MPS_TRC_FILTER_MATCH_CTL_B + ofst); 5456 5457 if (is_t4(adap)) { 5458 *enabled = !!(ctla & F_TFEN); 5459 tp->port = G_TFPORT(ctla); 5460 tp->invert = !!(ctla & F_TFINVERTMATCH); 5461 } else { 5462 *enabled = !!(ctla & F_T5_TFEN); 5463 tp->port = G_T5_TFPORT(ctla); 5464 tp->invert = !!(ctla & F_T5_TFINVERTMATCH); 5465 } 5466 tp->snap_len = G_TFCAPTUREMAX(ctlb); 5467 tp->min_len = G_TFMINPKTSIZE(ctlb); 5468 tp->skip_ofst = G_TFOFFSET(ctla); 5469 tp->skip_len = G_TFLENGTH(ctla); 5470 5471 ofst = (A_MPS_TRC_FILTER1_MATCH - A_MPS_TRC_FILTER0_MATCH) * idx; 5472 data_reg = A_MPS_TRC_FILTER0_MATCH + ofst; 5473 mask_reg = A_MPS_TRC_FILTER0_DONT_CARE + ofst; 5474 5475 for (i = 0; i < TRACE_LEN / 4; i++, data_reg += 4, mask_reg += 4) { 5476 tp->mask[i] = ~t4_read_reg(adap, mask_reg); 5477 tp->data[i] = t4_read_reg(adap, data_reg) & tp->mask[i]; 5478 } 5479 } 5480 5481 /** 5482 * t4_pmtx_get_stats - returns the HW stats from PMTX 5483 * @adap: the adapter 5484 * @cnt: where to store the count statistics 5485 * @cycles: where to store the cycle statistics 5486 * 5487 * Returns performance statistics from PMTX. 5488 */ 5489 void t4_pmtx_get_stats(struct adapter *adap, u32 cnt[], u64 cycles[]) 5490 { 5491 int i; 5492 u32 data[2]; 5493 5494 for (i = 0; i < adap->chip_params->pm_stats_cnt; i++) { 5495 t4_write_reg(adap, A_PM_TX_STAT_CONFIG, i + 1); 5496 cnt[i] = t4_read_reg(adap, A_PM_TX_STAT_COUNT); 5497 if (is_t4(adap)) 5498 cycles[i] = t4_read_reg64(adap, A_PM_TX_STAT_LSB); 5499 else { 5500 t4_read_indirect(adap, A_PM_TX_DBG_CTRL, 5501 A_PM_TX_DBG_DATA, data, 2, 5502 A_PM_TX_DBG_STAT_MSB); 5503 cycles[i] = (((u64)data[0] << 32) | data[1]); 5504 } 5505 } 5506 } 5507 5508 /** 5509 * t4_pmrx_get_stats - returns the HW stats from PMRX 5510 * @adap: the adapter 5511 * @cnt: where to store the count statistics 5512 * @cycles: where to store the cycle statistics 5513 * 5514 * Returns performance statistics from PMRX. 5515 */ 5516 void t4_pmrx_get_stats(struct adapter *adap, u32 cnt[], u64 cycles[]) 5517 { 5518 int i; 5519 u32 data[2]; 5520 5521 for (i = 0; i < adap->chip_params->pm_stats_cnt; i++) { 5522 t4_write_reg(adap, A_PM_RX_STAT_CONFIG, i + 1); 5523 cnt[i] = t4_read_reg(adap, A_PM_RX_STAT_COUNT); 5524 if (is_t4(adap)) { 5525 cycles[i] = t4_read_reg64(adap, A_PM_RX_STAT_LSB); 5526 } else { 5527 t4_read_indirect(adap, A_PM_RX_DBG_CTRL, 5528 A_PM_RX_DBG_DATA, data, 2, 5529 A_PM_RX_DBG_STAT_MSB); 5530 cycles[i] = (((u64)data[0] << 32) | data[1]); 5531 } 5532 } 5533 } 5534 5535 /** 5536 * t4_get_mps_bg_map - return the buffer groups associated with a port 5537 * @adap: the adapter 5538 * @idx: the port index 5539 * 5540 * Returns a bitmap indicating which MPS buffer groups are associated 5541 * with the given port. Bit i is set if buffer group i is used by the 5542 * port. 5543 */ 5544 static unsigned int t4_get_mps_bg_map(struct adapter *adap, int idx) 5545 { 5546 u32 n = G_NUMPORTS(t4_read_reg(adap, A_MPS_CMN_CTL)); 5547 5548 if (n == 0) 5549 return idx == 0 ? 0xf : 0; 5550 if (n == 1 && chip_id(adap) <= CHELSIO_T5) 5551 return idx < 2 ? (3 << (2 * idx)) : 0; 5552 return 1 << idx; 5553 } 5554 5555 /** 5556 * t4_get_port_type_description - return Port Type string description 5557 * @port_type: firmware Port Type enumeration 5558 */ 5559 const char *t4_get_port_type_description(enum fw_port_type port_type) 5560 { 5561 static const char *const port_type_description[] = { 5562 "Fiber_XFI", 5563 "Fiber_XAUI", 5564 "BT_SGMII", 5565 "BT_XFI", 5566 "BT_XAUI", 5567 "KX4", 5568 "CX4", 5569 "KX", 5570 "KR", 5571 "SFP", 5572 "BP_AP", 5573 "BP4_AP", 5574 "QSFP_10G", 5575 "QSA", 5576 "QSFP", 5577 "BP40_BA", 5578 }; 5579 5580 if (port_type < ARRAY_SIZE(port_type_description)) 5581 return port_type_description[port_type]; 5582 return "UNKNOWN"; 5583 } 5584 5585 /** 5586 * t4_get_port_stats_offset - collect port stats relative to a previous 5587 * snapshot 5588 * @adap: The adapter 5589 * @idx: The port 5590 * @stats: Current stats to fill 5591 * @offset: Previous stats snapshot 5592 */ 5593 void t4_get_port_stats_offset(struct adapter *adap, int idx, 5594 struct port_stats *stats, 5595 struct port_stats *offset) 5596 { 5597 u64 *s, *o; 5598 int i; 5599 5600 t4_get_port_stats(adap, idx, stats); 5601 for (i = 0, s = (u64 *)stats, o = (u64 *)offset ; 5602 i < (sizeof(struct port_stats)/sizeof(u64)) ; 5603 i++, s++, o++) 5604 *s -= *o; 5605 } 5606 5607 /** 5608 * t4_get_port_stats - collect port statistics 5609 * @adap: the adapter 5610 * @idx: the port index 5611 * @p: the stats structure to fill 5612 * 5613 * Collect statistics related to the given port from HW. 5614 */ 5615 void t4_get_port_stats(struct adapter *adap, int idx, struct port_stats *p) 5616 { 5617 u32 bgmap = t4_get_mps_bg_map(adap, idx); 5618 u32 stat_ctl; 5619 5620 #define GET_STAT(name) \ 5621 t4_read_reg64(adap, \ 5622 (is_t4(adap) ? PORT_REG(idx, A_MPS_PORT_STAT_##name##_L) : \ 5623 T5_PORT_REG(idx, A_MPS_PORT_STAT_##name##_L))) 5624 #define GET_STAT_COM(name) t4_read_reg64(adap, A_MPS_STAT_##name##_L) 5625 5626 stat_ctl = t4_read_reg(adap, A_MPS_STAT_CTL); 5627 5628 p->tx_pause = GET_STAT(TX_PORT_PAUSE); 5629 p->tx_octets = GET_STAT(TX_PORT_BYTES); 5630 p->tx_frames = GET_STAT(TX_PORT_FRAMES); 5631 p->tx_bcast_frames = GET_STAT(TX_PORT_BCAST); 5632 p->tx_mcast_frames = GET_STAT(TX_PORT_MCAST); 5633 p->tx_ucast_frames = GET_STAT(TX_PORT_UCAST); 5634 p->tx_error_frames = GET_STAT(TX_PORT_ERROR); 5635 p->tx_frames_64 = GET_STAT(TX_PORT_64B); 5636 p->tx_frames_65_127 = GET_STAT(TX_PORT_65B_127B); 5637 p->tx_frames_128_255 = GET_STAT(TX_PORT_128B_255B); 5638 p->tx_frames_256_511 = GET_STAT(TX_PORT_256B_511B); 5639 p->tx_frames_512_1023 = GET_STAT(TX_PORT_512B_1023B); 5640 p->tx_frames_1024_1518 = GET_STAT(TX_PORT_1024B_1518B); 5641 p->tx_frames_1519_max = GET_STAT(TX_PORT_1519B_MAX); 5642 p->tx_drop = GET_STAT(TX_PORT_DROP); 5643 p->tx_ppp0 = GET_STAT(TX_PORT_PPP0); 5644 p->tx_ppp1 = GET_STAT(TX_PORT_PPP1); 5645 p->tx_ppp2 = GET_STAT(TX_PORT_PPP2); 5646 p->tx_ppp3 = GET_STAT(TX_PORT_PPP3); 5647 p->tx_ppp4 = GET_STAT(TX_PORT_PPP4); 5648 p->tx_ppp5 = GET_STAT(TX_PORT_PPP5); 5649 p->tx_ppp6 = GET_STAT(TX_PORT_PPP6); 5650 p->tx_ppp7 = GET_STAT(TX_PORT_PPP7); 5651 5652 if (stat_ctl & F_COUNTPAUSESTATTX) { 5653 p->tx_frames -= p->tx_pause; 5654 p->tx_octets -= p->tx_pause * 64; 5655 p->tx_mcast_frames -= p->tx_pause; 5656 } 5657 5658 p->rx_pause = GET_STAT(RX_PORT_PAUSE); 5659 p->rx_octets = GET_STAT(RX_PORT_BYTES); 5660 p->rx_frames = GET_STAT(RX_PORT_FRAMES); 5661 p->rx_bcast_frames = GET_STAT(RX_PORT_BCAST); 5662 p->rx_mcast_frames = GET_STAT(RX_PORT_MCAST); 5663 p->rx_ucast_frames = GET_STAT(RX_PORT_UCAST); 5664 p->rx_too_long = GET_STAT(RX_PORT_MTU_ERROR); 5665 p->rx_jabber = GET_STAT(RX_PORT_MTU_CRC_ERROR); 5666 p->rx_fcs_err = GET_STAT(RX_PORT_CRC_ERROR); 5667 p->rx_len_err = GET_STAT(RX_PORT_LEN_ERROR); 5668 p->rx_symbol_err = GET_STAT(RX_PORT_SYM_ERROR); 5669 p->rx_runt = GET_STAT(RX_PORT_LESS_64B); 5670 p->rx_frames_64 = GET_STAT(RX_PORT_64B); 5671 p->rx_frames_65_127 = GET_STAT(RX_PORT_65B_127B); 5672 p->rx_frames_128_255 = GET_STAT(RX_PORT_128B_255B); 5673 p->rx_frames_256_511 = GET_STAT(RX_PORT_256B_511B); 5674 p->rx_frames_512_1023 = GET_STAT(RX_PORT_512B_1023B); 5675 p->rx_frames_1024_1518 = GET_STAT(RX_PORT_1024B_1518B); 5676 p->rx_frames_1519_max = GET_STAT(RX_PORT_1519B_MAX); 5677 p->rx_ppp0 = GET_STAT(RX_PORT_PPP0); 5678 p->rx_ppp1 = GET_STAT(RX_PORT_PPP1); 5679 p->rx_ppp2 = GET_STAT(RX_PORT_PPP2); 5680 p->rx_ppp3 = GET_STAT(RX_PORT_PPP3); 5681 p->rx_ppp4 = GET_STAT(RX_PORT_PPP4); 5682 p->rx_ppp5 = GET_STAT(RX_PORT_PPP5); 5683 p->rx_ppp6 = GET_STAT(RX_PORT_PPP6); 5684 p->rx_ppp7 = GET_STAT(RX_PORT_PPP7); 5685 5686 if (stat_ctl & F_COUNTPAUSESTATRX) { 5687 p->rx_frames -= p->rx_pause; 5688 p->rx_octets -= p->rx_pause * 64; 5689 p->rx_mcast_frames -= p->rx_pause; 5690 } 5691 5692 p->rx_ovflow0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_MAC_DROP_FRAME) : 0; 5693 p->rx_ovflow1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_MAC_DROP_FRAME) : 0; 5694 p->rx_ovflow2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_MAC_DROP_FRAME) : 0; 5695 p->rx_ovflow3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_MAC_DROP_FRAME) : 0; 5696 p->rx_trunc0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_MAC_TRUNC_FRAME) : 0; 5697 p->rx_trunc1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_MAC_TRUNC_FRAME) : 0; 5698 p->rx_trunc2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_MAC_TRUNC_FRAME) : 0; 5699 p->rx_trunc3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_MAC_TRUNC_FRAME) : 0; 5700 5701 #undef GET_STAT 5702 #undef GET_STAT_COM 5703 } 5704 5705 /** 5706 * t4_get_lb_stats - collect loopback port statistics 5707 * @adap: the adapter 5708 * @idx: the loopback port index 5709 * @p: the stats structure to fill 5710 * 5711 * Return HW statistics for the given loopback port. 5712 */ 5713 void t4_get_lb_stats(struct adapter *adap, int idx, struct lb_port_stats *p) 5714 { 5715 u32 bgmap = t4_get_mps_bg_map(adap, idx); 5716 5717 #define GET_STAT(name) \ 5718 t4_read_reg64(adap, \ 5719 (is_t4(adap) ? \ 5720 PORT_REG(idx, A_MPS_PORT_STAT_LB_PORT_##name##_L) : \ 5721 T5_PORT_REG(idx, A_MPS_PORT_STAT_LB_PORT_##name##_L))) 5722 #define GET_STAT_COM(name) t4_read_reg64(adap, A_MPS_STAT_##name##_L) 5723 5724 p->octets = GET_STAT(BYTES); 5725 p->frames = GET_STAT(FRAMES); 5726 p->bcast_frames = GET_STAT(BCAST); 5727 p->mcast_frames = GET_STAT(MCAST); 5728 p->ucast_frames = GET_STAT(UCAST); 5729 p->error_frames = GET_STAT(ERROR); 5730 5731 p->frames_64 = GET_STAT(64B); 5732 p->frames_65_127 = GET_STAT(65B_127B); 5733 p->frames_128_255 = GET_STAT(128B_255B); 5734 p->frames_256_511 = GET_STAT(256B_511B); 5735 p->frames_512_1023 = GET_STAT(512B_1023B); 5736 p->frames_1024_1518 = GET_STAT(1024B_1518B); 5737 p->frames_1519_max = GET_STAT(1519B_MAX); 5738 p->drop = GET_STAT(DROP_FRAMES); 5739 5740 p->ovflow0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_LB_DROP_FRAME) : 0; 5741 p->ovflow1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_LB_DROP_FRAME) : 0; 5742 p->ovflow2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_LB_DROP_FRAME) : 0; 5743 p->ovflow3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_LB_DROP_FRAME) : 0; 5744 p->trunc0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_LB_TRUNC_FRAME) : 0; 5745 p->trunc1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_LB_TRUNC_FRAME) : 0; 5746 p->trunc2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_LB_TRUNC_FRAME) : 0; 5747 p->trunc3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_LB_TRUNC_FRAME) : 0; 5748 5749 #undef GET_STAT 5750 #undef GET_STAT_COM 5751 } 5752 5753 /** 5754 * t4_wol_magic_enable - enable/disable magic packet WoL 5755 * @adap: the adapter 5756 * @port: the physical port index 5757 * @addr: MAC address expected in magic packets, %NULL to disable 5758 * 5759 * Enables/disables magic packet wake-on-LAN for the selected port. 5760 */ 5761 void t4_wol_magic_enable(struct adapter *adap, unsigned int port, 5762 const u8 *addr) 5763 { 5764 u32 mag_id_reg_l, mag_id_reg_h, port_cfg_reg; 5765 5766 if (is_t4(adap)) { 5767 mag_id_reg_l = PORT_REG(port, A_XGMAC_PORT_MAGIC_MACID_LO); 5768 mag_id_reg_h = PORT_REG(port, A_XGMAC_PORT_MAGIC_MACID_HI); 5769 port_cfg_reg = PORT_REG(port, A_XGMAC_PORT_CFG2); 5770 } else { 5771 mag_id_reg_l = T5_PORT_REG(port, A_MAC_PORT_MAGIC_MACID_LO); 5772 mag_id_reg_h = T5_PORT_REG(port, A_MAC_PORT_MAGIC_MACID_HI); 5773 port_cfg_reg = T5_PORT_REG(port, A_MAC_PORT_CFG2); 5774 } 5775 5776 if (addr) { 5777 t4_write_reg(adap, mag_id_reg_l, 5778 (addr[2] << 24) | (addr[3] << 16) | 5779 (addr[4] << 8) | addr[5]); 5780 t4_write_reg(adap, mag_id_reg_h, 5781 (addr[0] << 8) | addr[1]); 5782 } 5783 t4_set_reg_field(adap, port_cfg_reg, F_MAGICEN, 5784 V_MAGICEN(addr != NULL)); 5785 } 5786 5787 /** 5788 * t4_wol_pat_enable - enable/disable pattern-based WoL 5789 * @adap: the adapter 5790 * @port: the physical port index 5791 * @map: bitmap of which HW pattern filters to set 5792 * @mask0: byte mask for bytes 0-63 of a packet 5793 * @mask1: byte mask for bytes 64-127 of a packet 5794 * @crc: Ethernet CRC for selected bytes 5795 * @enable: enable/disable switch 5796 * 5797 * Sets the pattern filters indicated in @map to mask out the bytes 5798 * specified in @mask0/@mask1 in received packets and compare the CRC of 5799 * the resulting packet against @crc. If @enable is %true pattern-based 5800 * WoL is enabled, otherwise disabled. 5801 */ 5802 int t4_wol_pat_enable(struct adapter *adap, unsigned int port, unsigned int map, 5803 u64 mask0, u64 mask1, unsigned int crc, bool enable) 5804 { 5805 int i; 5806 u32 port_cfg_reg; 5807 5808 if (is_t4(adap)) 5809 port_cfg_reg = PORT_REG(port, A_XGMAC_PORT_CFG2); 5810 else 5811 port_cfg_reg = T5_PORT_REG(port, A_MAC_PORT_CFG2); 5812 5813 if (!enable) { 5814 t4_set_reg_field(adap, port_cfg_reg, F_PATEN, 0); 5815 return 0; 5816 } 5817 if (map > 0xff) 5818 return -EINVAL; 5819 5820 #define EPIO_REG(name) \ 5821 (is_t4(adap) ? PORT_REG(port, A_XGMAC_PORT_EPIO_##name) : \ 5822 T5_PORT_REG(port, A_MAC_PORT_EPIO_##name)) 5823 5824 t4_write_reg(adap, EPIO_REG(DATA1), mask0 >> 32); 5825 t4_write_reg(adap, EPIO_REG(DATA2), mask1); 5826 t4_write_reg(adap, EPIO_REG(DATA3), mask1 >> 32); 5827 5828 for (i = 0; i < NWOL_PAT; i++, map >>= 1) { 5829 if (!(map & 1)) 5830 continue; 5831 5832 /* write byte masks */ 5833 t4_write_reg(adap, EPIO_REG(DATA0), mask0); 5834 t4_write_reg(adap, EPIO_REG(OP), V_ADDRESS(i) | F_EPIOWR); 5835 t4_read_reg(adap, EPIO_REG(OP)); /* flush */ 5836 if (t4_read_reg(adap, EPIO_REG(OP)) & F_BUSY) 5837 return -ETIMEDOUT; 5838 5839 /* write CRC */ 5840 t4_write_reg(adap, EPIO_REG(DATA0), crc); 5841 t4_write_reg(adap, EPIO_REG(OP), V_ADDRESS(i + 32) | F_EPIOWR); 5842 t4_read_reg(adap, EPIO_REG(OP)); /* flush */ 5843 if (t4_read_reg(adap, EPIO_REG(OP)) & F_BUSY) 5844 return -ETIMEDOUT; 5845 } 5846 #undef EPIO_REG 5847 5848 t4_set_reg_field(adap, port_cfg_reg, 0, F_PATEN); 5849 return 0; 5850 } 5851 5852 /* t4_mk_filtdelwr - create a delete filter WR 5853 * @ftid: the filter ID 5854 * @wr: the filter work request to populate 5855 * @qid: ingress queue to receive the delete notification 5856 * 5857 * Creates a filter work request to delete the supplied filter. If @qid is 5858 * negative the delete notification is suppressed. 5859 */ 5860 void t4_mk_filtdelwr(unsigned int ftid, struct fw_filter_wr *wr, int qid) 5861 { 5862 memset(wr, 0, sizeof(*wr)); 5863 wr->op_pkd = cpu_to_be32(V_FW_WR_OP(FW_FILTER_WR)); 5864 wr->len16_pkd = cpu_to_be32(V_FW_WR_LEN16(sizeof(*wr) / 16)); 5865 wr->tid_to_iq = cpu_to_be32(V_FW_FILTER_WR_TID(ftid) | 5866 V_FW_FILTER_WR_NOREPLY(qid < 0)); 5867 wr->del_filter_to_l2tix = cpu_to_be32(F_FW_FILTER_WR_DEL_FILTER); 5868 if (qid >= 0) 5869 wr->rx_chan_rx_rpl_iq = 5870 cpu_to_be16(V_FW_FILTER_WR_RX_RPL_IQ(qid)); 5871 } 5872 5873 #define INIT_CMD(var, cmd, rd_wr) do { \ 5874 (var).op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_##cmd##_CMD) | \ 5875 F_FW_CMD_REQUEST | \ 5876 F_FW_CMD_##rd_wr); \ 5877 (var).retval_len16 = cpu_to_be32(FW_LEN16(var)); \ 5878 } while (0) 5879 5880 int t4_fwaddrspace_write(struct adapter *adap, unsigned int mbox, 5881 u32 addr, u32 val) 5882 { 5883 u32 ldst_addrspace; 5884 struct fw_ldst_cmd c; 5885 5886 memset(&c, 0, sizeof(c)); 5887 ldst_addrspace = V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_FIRMWARE); 5888 c.op_to_addrspace = cpu_to_be32(V_FW_CMD_OP(FW_LDST_CMD) | 5889 F_FW_CMD_REQUEST | 5890 F_FW_CMD_WRITE | 5891 ldst_addrspace); 5892 c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c)); 5893 c.u.addrval.addr = cpu_to_be32(addr); 5894 c.u.addrval.val = cpu_to_be32(val); 5895 5896 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); 5897 } 5898 5899 /** 5900 * t4_mdio_rd - read a PHY register through MDIO 5901 * @adap: the adapter 5902 * @mbox: mailbox to use for the FW command 5903 * @phy_addr: the PHY address 5904 * @mmd: the PHY MMD to access (0 for clause 22 PHYs) 5905 * @reg: the register to read 5906 * @valp: where to store the value 5907 * 5908 * Issues a FW command through the given mailbox to read a PHY register. 5909 */ 5910 int t4_mdio_rd(struct adapter *adap, unsigned int mbox, unsigned int phy_addr, 5911 unsigned int mmd, unsigned int reg, unsigned int *valp) 5912 { 5913 int ret; 5914 u32 ldst_addrspace; 5915 struct fw_ldst_cmd c; 5916 5917 memset(&c, 0, sizeof(c)); 5918 ldst_addrspace = V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_MDIO); 5919 c.op_to_addrspace = cpu_to_be32(V_FW_CMD_OP(FW_LDST_CMD) | 5920 F_FW_CMD_REQUEST | F_FW_CMD_READ | 5921 ldst_addrspace); 5922 c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c)); 5923 c.u.mdio.paddr_mmd = cpu_to_be16(V_FW_LDST_CMD_PADDR(phy_addr) | 5924 V_FW_LDST_CMD_MMD(mmd)); 5925 c.u.mdio.raddr = cpu_to_be16(reg); 5926 5927 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c); 5928 if (ret == 0) 5929 *valp = be16_to_cpu(c.u.mdio.rval); 5930 return ret; 5931 } 5932 5933 /** 5934 * t4_mdio_wr - write a PHY register through MDIO 5935 * @adap: the adapter 5936 * @mbox: mailbox to use for the FW command 5937 * @phy_addr: the PHY address 5938 * @mmd: the PHY MMD to access (0 for clause 22 PHYs) 5939 * @reg: the register to write 5940 * @valp: value to write 5941 * 5942 * Issues a FW command through the given mailbox to write a PHY register. 5943 */ 5944 int t4_mdio_wr(struct adapter *adap, unsigned int mbox, unsigned int phy_addr, 5945 unsigned int mmd, unsigned int reg, unsigned int val) 5946 { 5947 u32 ldst_addrspace; 5948 struct fw_ldst_cmd c; 5949 5950 memset(&c, 0, sizeof(c)); 5951 ldst_addrspace = V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_MDIO); 5952 c.op_to_addrspace = cpu_to_be32(V_FW_CMD_OP(FW_LDST_CMD) | 5953 F_FW_CMD_REQUEST | F_FW_CMD_WRITE | 5954 ldst_addrspace); 5955 c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c)); 5956 c.u.mdio.paddr_mmd = cpu_to_be16(V_FW_LDST_CMD_PADDR(phy_addr) | 5957 V_FW_LDST_CMD_MMD(mmd)); 5958 c.u.mdio.raddr = cpu_to_be16(reg); 5959 c.u.mdio.rval = cpu_to_be16(val); 5960 5961 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); 5962 } 5963 5964 /** 5965 * 5966 * t4_sge_decode_idma_state - decode the idma state 5967 * @adap: the adapter 5968 * @state: the state idma is stuck in 5969 */ 5970 void t4_sge_decode_idma_state(struct adapter *adapter, int state) 5971 { 5972 static const char * const t4_decode[] = { 5973 "IDMA_IDLE", 5974 "IDMA_PUSH_MORE_CPL_FIFO", 5975 "IDMA_PUSH_CPL_MSG_HEADER_TO_FIFO", 5976 "Not used", 5977 "IDMA_PHYSADDR_SEND_PCIEHDR", 5978 "IDMA_PHYSADDR_SEND_PAYLOAD_FIRST", 5979 "IDMA_PHYSADDR_SEND_PAYLOAD", 5980 "IDMA_SEND_FIFO_TO_IMSG", 5981 "IDMA_FL_REQ_DATA_FL_PREP", 5982 "IDMA_FL_REQ_DATA_FL", 5983 "IDMA_FL_DROP", 5984 "IDMA_FL_H_REQ_HEADER_FL", 5985 "IDMA_FL_H_SEND_PCIEHDR", 5986 "IDMA_FL_H_PUSH_CPL_FIFO", 5987 "IDMA_FL_H_SEND_CPL", 5988 "IDMA_FL_H_SEND_IP_HDR_FIRST", 5989 "IDMA_FL_H_SEND_IP_HDR", 5990 "IDMA_FL_H_REQ_NEXT_HEADER_FL", 5991 "IDMA_FL_H_SEND_NEXT_PCIEHDR", 5992 "IDMA_FL_H_SEND_IP_HDR_PADDING", 5993 "IDMA_FL_D_SEND_PCIEHDR", 5994 "IDMA_FL_D_SEND_CPL_AND_IP_HDR", 5995 "IDMA_FL_D_REQ_NEXT_DATA_FL", 5996 "IDMA_FL_SEND_PCIEHDR", 5997 "IDMA_FL_PUSH_CPL_FIFO", 5998 "IDMA_FL_SEND_CPL", 5999 "IDMA_FL_SEND_PAYLOAD_FIRST", 6000 "IDMA_FL_SEND_PAYLOAD", 6001 "IDMA_FL_REQ_NEXT_DATA_FL", 6002 "IDMA_FL_SEND_NEXT_PCIEHDR", 6003 "IDMA_FL_SEND_PADDING", 6004 "IDMA_FL_SEND_COMPLETION_TO_IMSG", 6005 "IDMA_FL_SEND_FIFO_TO_IMSG", 6006 "IDMA_FL_REQ_DATAFL_DONE", 6007 "IDMA_FL_REQ_HEADERFL_DONE", 6008 }; 6009 static const char * const t5_decode[] = { 6010 "IDMA_IDLE", 6011 "IDMA_ALMOST_IDLE", 6012 "IDMA_PUSH_MORE_CPL_FIFO", 6013 "IDMA_PUSH_CPL_MSG_HEADER_TO_FIFO", 6014 "IDMA_SGEFLRFLUSH_SEND_PCIEHDR", 6015 "IDMA_PHYSADDR_SEND_PCIEHDR", 6016 "IDMA_PHYSADDR_SEND_PAYLOAD_FIRST", 6017 "IDMA_PHYSADDR_SEND_PAYLOAD", 6018 "IDMA_SEND_FIFO_TO_IMSG", 6019 "IDMA_FL_REQ_DATA_FL", 6020 "IDMA_FL_DROP", 6021 "IDMA_FL_DROP_SEND_INC", 6022 "IDMA_FL_H_REQ_HEADER_FL", 6023 "IDMA_FL_H_SEND_PCIEHDR", 6024 "IDMA_FL_H_PUSH_CPL_FIFO", 6025 "IDMA_FL_H_SEND_CPL", 6026 "IDMA_FL_H_SEND_IP_HDR_FIRST", 6027 "IDMA_FL_H_SEND_IP_HDR", 6028 "IDMA_FL_H_REQ_NEXT_HEADER_FL", 6029 "IDMA_FL_H_SEND_NEXT_PCIEHDR", 6030 "IDMA_FL_H_SEND_IP_HDR_PADDING", 6031 "IDMA_FL_D_SEND_PCIEHDR", 6032 "IDMA_FL_D_SEND_CPL_AND_IP_HDR", 6033 "IDMA_FL_D_REQ_NEXT_DATA_FL", 6034 "IDMA_FL_SEND_PCIEHDR", 6035 "IDMA_FL_PUSH_CPL_FIFO", 6036 "IDMA_FL_SEND_CPL", 6037 "IDMA_FL_SEND_PAYLOAD_FIRST", 6038 "IDMA_FL_SEND_PAYLOAD", 6039 "IDMA_FL_REQ_NEXT_DATA_FL", 6040 "IDMA_FL_SEND_NEXT_PCIEHDR", 6041 "IDMA_FL_SEND_PADDING", 6042 "IDMA_FL_SEND_COMPLETION_TO_IMSG", 6043 }; 6044 static const char * const t6_decode[] = { 6045 "IDMA_IDLE", 6046 "IDMA_PUSH_MORE_CPL_FIFO", 6047 "IDMA_PUSH_CPL_MSG_HEADER_TO_FIFO", 6048 "IDMA_SGEFLRFLUSH_SEND_PCIEHDR", 6049 "IDMA_PHYSADDR_SEND_PCIEHDR", 6050 "IDMA_PHYSADDR_SEND_PAYLOAD_FIRST", 6051 "IDMA_PHYSADDR_SEND_PAYLOAD", 6052 "IDMA_FL_REQ_DATA_FL", 6053 "IDMA_FL_DROP", 6054 "IDMA_FL_DROP_SEND_INC", 6055 "IDMA_FL_H_REQ_HEADER_FL", 6056 "IDMA_FL_H_SEND_PCIEHDR", 6057 "IDMA_FL_H_PUSH_CPL_FIFO", 6058 "IDMA_FL_H_SEND_CPL", 6059 "IDMA_FL_H_SEND_IP_HDR_FIRST", 6060 "IDMA_FL_H_SEND_IP_HDR", 6061 "IDMA_FL_H_REQ_NEXT_HEADER_FL", 6062 "IDMA_FL_H_SEND_NEXT_PCIEHDR", 6063 "IDMA_FL_H_SEND_IP_HDR_PADDING", 6064 "IDMA_FL_D_SEND_PCIEHDR", 6065 "IDMA_FL_D_SEND_CPL_AND_IP_HDR", 6066 "IDMA_FL_D_REQ_NEXT_DATA_FL", 6067 "IDMA_FL_SEND_PCIEHDR", 6068 "IDMA_FL_PUSH_CPL_FIFO", 6069 "IDMA_FL_SEND_CPL", 6070 "IDMA_FL_SEND_PAYLOAD_FIRST", 6071 "IDMA_FL_SEND_PAYLOAD", 6072 "IDMA_FL_REQ_NEXT_DATA_FL", 6073 "IDMA_FL_SEND_NEXT_PCIEHDR", 6074 "IDMA_FL_SEND_PADDING", 6075 "IDMA_FL_SEND_COMPLETION_TO_IMSG", 6076 }; 6077 static const u32 sge_regs[] = { 6078 A_SGE_DEBUG_DATA_LOW_INDEX_2, 6079 A_SGE_DEBUG_DATA_LOW_INDEX_3, 6080 A_SGE_DEBUG_DATA_HIGH_INDEX_10, 6081 }; 6082 const char * const *sge_idma_decode; 6083 int sge_idma_decode_nstates; 6084 int i; 6085 unsigned int chip_version = chip_id(adapter); 6086 6087 /* Select the right set of decode strings to dump depending on the 6088 * adapter chip type. 6089 */ 6090 switch (chip_version) { 6091 case CHELSIO_T4: 6092 sge_idma_decode = (const char * const *)t4_decode; 6093 sge_idma_decode_nstates = ARRAY_SIZE(t4_decode); 6094 break; 6095 6096 case CHELSIO_T5: 6097 sge_idma_decode = (const char * const *)t5_decode; 6098 sge_idma_decode_nstates = ARRAY_SIZE(t5_decode); 6099 break; 6100 6101 case CHELSIO_T6: 6102 sge_idma_decode = (const char * const *)t6_decode; 6103 sge_idma_decode_nstates = ARRAY_SIZE(t6_decode); 6104 break; 6105 6106 default: 6107 CH_ERR(adapter, "Unsupported chip version %d\n", chip_version); 6108 return; 6109 } 6110 6111 if (state < sge_idma_decode_nstates) 6112 CH_WARN(adapter, "idma state %s\n", sge_idma_decode[state]); 6113 else 6114 CH_WARN(adapter, "idma state %d unknown\n", state); 6115 6116 for (i = 0; i < ARRAY_SIZE(sge_regs); i++) 6117 CH_WARN(adapter, "SGE register %#x value %#x\n", 6118 sge_regs[i], t4_read_reg(adapter, sge_regs[i])); 6119 } 6120 6121 /** 6122 * t4_sge_ctxt_flush - flush the SGE context cache 6123 * @adap: the adapter 6124 * @mbox: mailbox to use for the FW command 6125 * 6126 * Issues a FW command through the given mailbox to flush the 6127 * SGE context cache. 6128 */ 6129 int t4_sge_ctxt_flush(struct adapter *adap, unsigned int mbox) 6130 { 6131 int ret; 6132 u32 ldst_addrspace; 6133 struct fw_ldst_cmd c; 6134 6135 memset(&c, 0, sizeof(c)); 6136 ldst_addrspace = V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_SGE_EGRC); 6137 c.op_to_addrspace = cpu_to_be32(V_FW_CMD_OP(FW_LDST_CMD) | 6138 F_FW_CMD_REQUEST | F_FW_CMD_READ | 6139 ldst_addrspace); 6140 c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c)); 6141 c.u.idctxt.msg_ctxtflush = cpu_to_be32(F_FW_LDST_CMD_CTXTFLUSH); 6142 6143 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c); 6144 return ret; 6145 } 6146 6147 /** 6148 * t4_fw_hello - establish communication with FW 6149 * @adap: the adapter 6150 * @mbox: mailbox to use for the FW command 6151 * @evt_mbox: mailbox to receive async FW events 6152 * @master: specifies the caller's willingness to be the device master 6153 * @state: returns the current device state (if non-NULL) 6154 * 6155 * Issues a command to establish communication with FW. Returns either 6156 * an error (negative integer) or the mailbox of the Master PF. 6157 */ 6158 int t4_fw_hello(struct adapter *adap, unsigned int mbox, unsigned int evt_mbox, 6159 enum dev_master master, enum dev_state *state) 6160 { 6161 int ret; 6162 struct fw_hello_cmd c; 6163 u32 v; 6164 unsigned int master_mbox; 6165 int retries = FW_CMD_HELLO_RETRIES; 6166 6167 retry: 6168 memset(&c, 0, sizeof(c)); 6169 INIT_CMD(c, HELLO, WRITE); 6170 c.err_to_clearinit = cpu_to_be32( 6171 V_FW_HELLO_CMD_MASTERDIS(master == MASTER_CANT) | 6172 V_FW_HELLO_CMD_MASTERFORCE(master == MASTER_MUST) | 6173 V_FW_HELLO_CMD_MBMASTER(master == MASTER_MUST ? 6174 mbox : M_FW_HELLO_CMD_MBMASTER) | 6175 V_FW_HELLO_CMD_MBASYNCNOT(evt_mbox) | 6176 V_FW_HELLO_CMD_STAGE(FW_HELLO_CMD_STAGE_OS) | 6177 F_FW_HELLO_CMD_CLEARINIT); 6178 6179 /* 6180 * Issue the HELLO command to the firmware. If it's not successful 6181 * but indicates that we got a "busy" or "timeout" condition, retry 6182 * the HELLO until we exhaust our retry limit. If we do exceed our 6183 * retry limit, check to see if the firmware left us any error 6184 * information and report that if so ... 6185 */ 6186 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c); 6187 if (ret != FW_SUCCESS) { 6188 if ((ret == -EBUSY || ret == -ETIMEDOUT) && retries-- > 0) 6189 goto retry; 6190 if (t4_read_reg(adap, A_PCIE_FW) & F_PCIE_FW_ERR) 6191 t4_report_fw_error(adap); 6192 return ret; 6193 } 6194 6195 v = be32_to_cpu(c.err_to_clearinit); 6196 master_mbox = G_FW_HELLO_CMD_MBMASTER(v); 6197 if (state) { 6198 if (v & F_FW_HELLO_CMD_ERR) 6199 *state = DEV_STATE_ERR; 6200 else if (v & F_FW_HELLO_CMD_INIT) 6201 *state = DEV_STATE_INIT; 6202 else 6203 *state = DEV_STATE_UNINIT; 6204 } 6205 6206 /* 6207 * If we're not the Master PF then we need to wait around for the 6208 * Master PF Driver to finish setting up the adapter. 6209 * 6210 * Note that we also do this wait if we're a non-Master-capable PF and 6211 * there is no current Master PF; a Master PF may show up momentarily 6212 * and we wouldn't want to fail pointlessly. (This can happen when an 6213 * OS loads lots of different drivers rapidly at the same time). In 6214 * this case, the Master PF returned by the firmware will be 6215 * M_PCIE_FW_MASTER so the test below will work ... 6216 */ 6217 if ((v & (F_FW_HELLO_CMD_ERR|F_FW_HELLO_CMD_INIT)) == 0 && 6218 master_mbox != mbox) { 6219 int waiting = FW_CMD_HELLO_TIMEOUT; 6220 6221 /* 6222 * Wait for the firmware to either indicate an error or 6223 * initialized state. If we see either of these we bail out 6224 * and report the issue to the caller. If we exhaust the 6225 * "hello timeout" and we haven't exhausted our retries, try 6226 * again. Otherwise bail with a timeout error. 6227 */ 6228 for (;;) { 6229 u32 pcie_fw; 6230 6231 msleep(50); 6232 waiting -= 50; 6233 6234 /* 6235 * If neither Error nor Initialialized are indicated 6236 * by the firmware keep waiting till we exhaust our 6237 * timeout ... and then retry if we haven't exhausted 6238 * our retries ... 6239 */ 6240 pcie_fw = t4_read_reg(adap, A_PCIE_FW); 6241 if (!(pcie_fw & (F_PCIE_FW_ERR|F_PCIE_FW_INIT))) { 6242 if (waiting <= 0) { 6243 if (retries-- > 0) 6244 goto retry; 6245 6246 return -ETIMEDOUT; 6247 } 6248 continue; 6249 } 6250 6251 /* 6252 * We either have an Error or Initialized condition 6253 * report errors preferentially. 6254 */ 6255 if (state) { 6256 if (pcie_fw & F_PCIE_FW_ERR) 6257 *state = DEV_STATE_ERR; 6258 else if (pcie_fw & F_PCIE_FW_INIT) 6259 *state = DEV_STATE_INIT; 6260 } 6261 6262 /* 6263 * If we arrived before a Master PF was selected and 6264 * there's not a valid Master PF, grab its identity 6265 * for our caller. 6266 */ 6267 if (master_mbox == M_PCIE_FW_MASTER && 6268 (pcie_fw & F_PCIE_FW_MASTER_VLD)) 6269 master_mbox = G_PCIE_FW_MASTER(pcie_fw); 6270 break; 6271 } 6272 } 6273 6274 return master_mbox; 6275 } 6276 6277 /** 6278 * t4_fw_bye - end communication with FW 6279 * @adap: the adapter 6280 * @mbox: mailbox to use for the FW command 6281 * 6282 * Issues a command to terminate communication with FW. 6283 */ 6284 int t4_fw_bye(struct adapter *adap, unsigned int mbox) 6285 { 6286 struct fw_bye_cmd c; 6287 6288 memset(&c, 0, sizeof(c)); 6289 INIT_CMD(c, BYE, WRITE); 6290 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); 6291 } 6292 6293 /** 6294 * t4_fw_reset - issue a reset to FW 6295 * @adap: the adapter 6296 * @mbox: mailbox to use for the FW command 6297 * @reset: specifies the type of reset to perform 6298 * 6299 * Issues a reset command of the specified type to FW. 6300 */ 6301 int t4_fw_reset(struct adapter *adap, unsigned int mbox, int reset) 6302 { 6303 struct fw_reset_cmd c; 6304 6305 memset(&c, 0, sizeof(c)); 6306 INIT_CMD(c, RESET, WRITE); 6307 c.val = cpu_to_be32(reset); 6308 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); 6309 } 6310 6311 /** 6312 * t4_fw_halt - issue a reset/halt to FW and put uP into RESET 6313 * @adap: the adapter 6314 * @mbox: mailbox to use for the FW RESET command (if desired) 6315 * @force: force uP into RESET even if FW RESET command fails 6316 * 6317 * Issues a RESET command to firmware (if desired) with a HALT indication 6318 * and then puts the microprocessor into RESET state. The RESET command 6319 * will only be issued if a legitimate mailbox is provided (mbox <= 6320 * M_PCIE_FW_MASTER). 6321 * 6322 * This is generally used in order for the host to safely manipulate the 6323 * adapter without fear of conflicting with whatever the firmware might 6324 * be doing. The only way out of this state is to RESTART the firmware 6325 * ... 6326 */ 6327 int t4_fw_halt(struct adapter *adap, unsigned int mbox, int force) 6328 { 6329 int ret = 0; 6330 6331 /* 6332 * If a legitimate mailbox is provided, issue a RESET command 6333 * with a HALT indication. 6334 */ 6335 if (mbox <= M_PCIE_FW_MASTER) { 6336 struct fw_reset_cmd c; 6337 6338 memset(&c, 0, sizeof(c)); 6339 INIT_CMD(c, RESET, WRITE); 6340 c.val = cpu_to_be32(F_PIORST | F_PIORSTMODE); 6341 c.halt_pkd = cpu_to_be32(F_FW_RESET_CMD_HALT); 6342 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); 6343 } 6344 6345 /* 6346 * Normally we won't complete the operation if the firmware RESET 6347 * command fails but if our caller insists we'll go ahead and put the 6348 * uP into RESET. This can be useful if the firmware is hung or even 6349 * missing ... We'll have to take the risk of putting the uP into 6350 * RESET without the cooperation of firmware in that case. 6351 * 6352 * We also force the firmware's HALT flag to be on in case we bypassed 6353 * the firmware RESET command above or we're dealing with old firmware 6354 * which doesn't have the HALT capability. This will serve as a flag 6355 * for the incoming firmware to know that it's coming out of a HALT 6356 * rather than a RESET ... if it's new enough to understand that ... 6357 */ 6358 if (ret == 0 || force) { 6359 t4_set_reg_field(adap, A_CIM_BOOT_CFG, F_UPCRST, F_UPCRST); 6360 t4_set_reg_field(adap, A_PCIE_FW, F_PCIE_FW_HALT, 6361 F_PCIE_FW_HALT); 6362 } 6363 6364 /* 6365 * And we always return the result of the firmware RESET command 6366 * even when we force the uP into RESET ... 6367 */ 6368 return ret; 6369 } 6370 6371 /** 6372 * t4_fw_restart - restart the firmware by taking the uP out of RESET 6373 * @adap: the adapter 6374 * @reset: if we want to do a RESET to restart things 6375 * 6376 * Restart firmware previously halted by t4_fw_halt(). On successful 6377 * return the previous PF Master remains as the new PF Master and there 6378 * is no need to issue a new HELLO command, etc. 6379 * 6380 * We do this in two ways: 6381 * 6382 * 1. If we're dealing with newer firmware we'll simply want to take 6383 * the chip's microprocessor out of RESET. This will cause the 6384 * firmware to start up from its start vector. And then we'll loop 6385 * until the firmware indicates it's started again (PCIE_FW.HALT 6386 * reset to 0) or we timeout. 6387 * 6388 * 2. If we're dealing with older firmware then we'll need to RESET 6389 * the chip since older firmware won't recognize the PCIE_FW.HALT 6390 * flag and automatically RESET itself on startup. 6391 */ 6392 int t4_fw_restart(struct adapter *adap, unsigned int mbox, int reset) 6393 { 6394 if (reset) { 6395 /* 6396 * Since we're directing the RESET instead of the firmware 6397 * doing it automatically, we need to clear the PCIE_FW.HALT 6398 * bit. 6399 */ 6400 t4_set_reg_field(adap, A_PCIE_FW, F_PCIE_FW_HALT, 0); 6401 6402 /* 6403 * If we've been given a valid mailbox, first try to get the 6404 * firmware to do the RESET. If that works, great and we can 6405 * return success. Otherwise, if we haven't been given a 6406 * valid mailbox or the RESET command failed, fall back to 6407 * hitting the chip with a hammer. 6408 */ 6409 if (mbox <= M_PCIE_FW_MASTER) { 6410 t4_set_reg_field(adap, A_CIM_BOOT_CFG, F_UPCRST, 0); 6411 msleep(100); 6412 if (t4_fw_reset(adap, mbox, 6413 F_PIORST | F_PIORSTMODE) == 0) 6414 return 0; 6415 } 6416 6417 t4_write_reg(adap, A_PL_RST, F_PIORST | F_PIORSTMODE); 6418 msleep(2000); 6419 } else { 6420 int ms; 6421 6422 t4_set_reg_field(adap, A_CIM_BOOT_CFG, F_UPCRST, 0); 6423 for (ms = 0; ms < FW_CMD_MAX_TIMEOUT; ) { 6424 if (!(t4_read_reg(adap, A_PCIE_FW) & F_PCIE_FW_HALT)) 6425 return FW_SUCCESS; 6426 msleep(100); 6427 ms += 100; 6428 } 6429 return -ETIMEDOUT; 6430 } 6431 return 0; 6432 } 6433 6434 /** 6435 * t4_fw_upgrade - perform all of the steps necessary to upgrade FW 6436 * @adap: the adapter 6437 * @mbox: mailbox to use for the FW RESET command (if desired) 6438 * @fw_data: the firmware image to write 6439 * @size: image size 6440 * @force: force upgrade even if firmware doesn't cooperate 6441 * 6442 * Perform all of the steps necessary for upgrading an adapter's 6443 * firmware image. Normally this requires the cooperation of the 6444 * existing firmware in order to halt all existing activities 6445 * but if an invalid mailbox token is passed in we skip that step 6446 * (though we'll still put the adapter microprocessor into RESET in 6447 * that case). 6448 * 6449 * On successful return the new firmware will have been loaded and 6450 * the adapter will have been fully RESET losing all previous setup 6451 * state. On unsuccessful return the adapter may be completely hosed ... 6452 * positive errno indicates that the adapter is ~probably~ intact, a 6453 * negative errno indicates that things are looking bad ... 6454 */ 6455 int t4_fw_upgrade(struct adapter *adap, unsigned int mbox, 6456 const u8 *fw_data, unsigned int size, int force) 6457 { 6458 const struct fw_hdr *fw_hdr = (const struct fw_hdr *)fw_data; 6459 unsigned int bootstrap = 6460 be32_to_cpu(fw_hdr->magic) == FW_HDR_MAGIC_BOOTSTRAP; 6461 int reset, ret; 6462 6463 if (!t4_fw_matches_chip(adap, fw_hdr)) 6464 return -EINVAL; 6465 6466 if (!bootstrap) { 6467 ret = t4_fw_halt(adap, mbox, force); 6468 if (ret < 0 && !force) 6469 return ret; 6470 } 6471 6472 ret = t4_load_fw(adap, fw_data, size); 6473 if (ret < 0 || bootstrap) 6474 return ret; 6475 6476 /* 6477 * Older versions of the firmware don't understand the new 6478 * PCIE_FW.HALT flag and so won't know to perform a RESET when they 6479 * restart. So for newly loaded older firmware we'll have to do the 6480 * RESET for it so it starts up on a clean slate. We can tell if 6481 * the newly loaded firmware will handle this right by checking 6482 * its header flags to see if it advertises the capability. 6483 */ 6484 reset = ((be32_to_cpu(fw_hdr->flags) & FW_HDR_FLAGS_RESET_HALT) == 0); 6485 return t4_fw_restart(adap, mbox, reset); 6486 } 6487 6488 /** 6489 * t4_fw_initialize - ask FW to initialize the device 6490 * @adap: the adapter 6491 * @mbox: mailbox to use for the FW command 6492 * 6493 * Issues a command to FW to partially initialize the device. This 6494 * performs initialization that generally doesn't depend on user input. 6495 */ 6496 int t4_fw_initialize(struct adapter *adap, unsigned int mbox) 6497 { 6498 struct fw_initialize_cmd c; 6499 6500 memset(&c, 0, sizeof(c)); 6501 INIT_CMD(c, INITIALIZE, WRITE); 6502 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); 6503 } 6504 6505 /** 6506 * t4_query_params_rw - query FW or device parameters 6507 * @adap: the adapter 6508 * @mbox: mailbox to use for the FW command 6509 * @pf: the PF 6510 * @vf: the VF 6511 * @nparams: the number of parameters 6512 * @params: the parameter names 6513 * @val: the parameter values 6514 * @rw: Write and read flag 6515 * 6516 * Reads the value of FW or device parameters. Up to 7 parameters can be 6517 * queried at once. 6518 */ 6519 int t4_query_params_rw(struct adapter *adap, unsigned int mbox, unsigned int pf, 6520 unsigned int vf, unsigned int nparams, const u32 *params, 6521 u32 *val, int rw) 6522 { 6523 int i, ret; 6524 struct fw_params_cmd c; 6525 __be32 *p = &c.param[0].mnem; 6526 6527 if (nparams > 7) 6528 return -EINVAL; 6529 6530 memset(&c, 0, sizeof(c)); 6531 c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_PARAMS_CMD) | 6532 F_FW_CMD_REQUEST | F_FW_CMD_READ | 6533 V_FW_PARAMS_CMD_PFN(pf) | 6534 V_FW_PARAMS_CMD_VFN(vf)); 6535 c.retval_len16 = cpu_to_be32(FW_LEN16(c)); 6536 6537 for (i = 0; i < nparams; i++) { 6538 *p++ = cpu_to_be32(*params++); 6539 if (rw) 6540 *p = cpu_to_be32(*(val + i)); 6541 p++; 6542 } 6543 6544 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c); 6545 if (ret == 0) 6546 for (i = 0, p = &c.param[0].val; i < nparams; i++, p += 2) 6547 *val++ = be32_to_cpu(*p); 6548 return ret; 6549 } 6550 6551 int t4_query_params(struct adapter *adap, unsigned int mbox, unsigned int pf, 6552 unsigned int vf, unsigned int nparams, const u32 *params, 6553 u32 *val) 6554 { 6555 return t4_query_params_rw(adap, mbox, pf, vf, nparams, params, val, 0); 6556 } 6557 6558 /** 6559 * t4_set_params_timeout - sets FW or device parameters 6560 * @adap: the adapter 6561 * @mbox: mailbox to use for the FW command 6562 * @pf: the PF 6563 * @vf: the VF 6564 * @nparams: the number of parameters 6565 * @params: the parameter names 6566 * @val: the parameter values 6567 * @timeout: the timeout time 6568 * 6569 * Sets the value of FW or device parameters. Up to 7 parameters can be 6570 * specified at once. 6571 */ 6572 int t4_set_params_timeout(struct adapter *adap, unsigned int mbox, 6573 unsigned int pf, unsigned int vf, 6574 unsigned int nparams, const u32 *params, 6575 const u32 *val, int timeout) 6576 { 6577 struct fw_params_cmd c; 6578 __be32 *p = &c.param[0].mnem; 6579 6580 if (nparams > 7) 6581 return -EINVAL; 6582 6583 memset(&c, 0, sizeof(c)); 6584 c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_PARAMS_CMD) | 6585 F_FW_CMD_REQUEST | F_FW_CMD_WRITE | 6586 V_FW_PARAMS_CMD_PFN(pf) | 6587 V_FW_PARAMS_CMD_VFN(vf)); 6588 c.retval_len16 = cpu_to_be32(FW_LEN16(c)); 6589 6590 while (nparams--) { 6591 *p++ = cpu_to_be32(*params++); 6592 *p++ = cpu_to_be32(*val++); 6593 } 6594 6595 return t4_wr_mbox_timeout(adap, mbox, &c, sizeof(c), NULL, timeout); 6596 } 6597 6598 /** 6599 * t4_set_params - sets FW or device parameters 6600 * @adap: the adapter 6601 * @mbox: mailbox to use for the FW command 6602 * @pf: the PF 6603 * @vf: the VF 6604 * @nparams: the number of parameters 6605 * @params: the parameter names 6606 * @val: the parameter values 6607 * 6608 * Sets the value of FW or device parameters. Up to 7 parameters can be 6609 * specified at once. 6610 */ 6611 int t4_set_params(struct adapter *adap, unsigned int mbox, unsigned int pf, 6612 unsigned int vf, unsigned int nparams, const u32 *params, 6613 const u32 *val) 6614 { 6615 return t4_set_params_timeout(adap, mbox, pf, vf, nparams, params, val, 6616 FW_CMD_MAX_TIMEOUT); 6617 } 6618 6619 /** 6620 * t4_cfg_pfvf - configure PF/VF resource limits 6621 * @adap: the adapter 6622 * @mbox: mailbox to use for the FW command 6623 * @pf: the PF being configured 6624 * @vf: the VF being configured 6625 * @txq: the max number of egress queues 6626 * @txq_eth_ctrl: the max number of egress Ethernet or control queues 6627 * @rxqi: the max number of interrupt-capable ingress queues 6628 * @rxq: the max number of interruptless ingress queues 6629 * @tc: the PCI traffic class 6630 * @vi: the max number of virtual interfaces 6631 * @cmask: the channel access rights mask for the PF/VF 6632 * @pmask: the port access rights mask for the PF/VF 6633 * @nexact: the maximum number of exact MPS filters 6634 * @rcaps: read capabilities 6635 * @wxcaps: write/execute capabilities 6636 * 6637 * Configures resource limits and capabilities for a physical or virtual 6638 * function. 6639 */ 6640 int t4_cfg_pfvf(struct adapter *adap, unsigned int mbox, unsigned int pf, 6641 unsigned int vf, unsigned int txq, unsigned int txq_eth_ctrl, 6642 unsigned int rxqi, unsigned int rxq, unsigned int tc, 6643 unsigned int vi, unsigned int cmask, unsigned int pmask, 6644 unsigned int nexact, unsigned int rcaps, unsigned int wxcaps) 6645 { 6646 struct fw_pfvf_cmd c; 6647 6648 memset(&c, 0, sizeof(c)); 6649 c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_PFVF_CMD) | F_FW_CMD_REQUEST | 6650 F_FW_CMD_WRITE | V_FW_PFVF_CMD_PFN(pf) | 6651 V_FW_PFVF_CMD_VFN(vf)); 6652 c.retval_len16 = cpu_to_be32(FW_LEN16(c)); 6653 c.niqflint_niq = cpu_to_be32(V_FW_PFVF_CMD_NIQFLINT(rxqi) | 6654 V_FW_PFVF_CMD_NIQ(rxq)); 6655 c.type_to_neq = cpu_to_be32(V_FW_PFVF_CMD_CMASK(cmask) | 6656 V_FW_PFVF_CMD_PMASK(pmask) | 6657 V_FW_PFVF_CMD_NEQ(txq)); 6658 c.tc_to_nexactf = cpu_to_be32(V_FW_PFVF_CMD_TC(tc) | 6659 V_FW_PFVF_CMD_NVI(vi) | 6660 V_FW_PFVF_CMD_NEXACTF(nexact)); 6661 c.r_caps_to_nethctrl = cpu_to_be32(V_FW_PFVF_CMD_R_CAPS(rcaps) | 6662 V_FW_PFVF_CMD_WX_CAPS(wxcaps) | 6663 V_FW_PFVF_CMD_NETHCTRL(txq_eth_ctrl)); 6664 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); 6665 } 6666 6667 /** 6668 * t4_alloc_vi_func - allocate a virtual interface 6669 * @adap: the adapter 6670 * @mbox: mailbox to use for the FW command 6671 * @port: physical port associated with the VI 6672 * @pf: the PF owning the VI 6673 * @vf: the VF owning the VI 6674 * @nmac: number of MAC addresses needed (1 to 5) 6675 * @mac: the MAC addresses of the VI 6676 * @rss_size: size of RSS table slice associated with this VI 6677 * @portfunc: which Port Application Function MAC Address is desired 6678 * @idstype: Intrusion Detection Type 6679 * 6680 * Allocates a virtual interface for the given physical port. If @mac is 6681 * not %NULL it contains the MAC addresses of the VI as assigned by FW. 6682 * If @rss_size is %NULL the VI is not assigned any RSS slice by FW. 6683 * @mac should be large enough to hold @nmac Ethernet addresses, they are 6684 * stored consecutively so the space needed is @nmac * 6 bytes. 6685 * Returns a negative error number or the non-negative VI id. 6686 */ 6687 int t4_alloc_vi_func(struct adapter *adap, unsigned int mbox, 6688 unsigned int port, unsigned int pf, unsigned int vf, 6689 unsigned int nmac, u8 *mac, u16 *rss_size, 6690 unsigned int portfunc, unsigned int idstype) 6691 { 6692 int ret; 6693 struct fw_vi_cmd c; 6694 6695 memset(&c, 0, sizeof(c)); 6696 c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_VI_CMD) | F_FW_CMD_REQUEST | 6697 F_FW_CMD_WRITE | F_FW_CMD_EXEC | 6698 V_FW_VI_CMD_PFN(pf) | V_FW_VI_CMD_VFN(vf)); 6699 c.alloc_to_len16 = cpu_to_be32(F_FW_VI_CMD_ALLOC | FW_LEN16(c)); 6700 c.type_to_viid = cpu_to_be16(V_FW_VI_CMD_TYPE(idstype) | 6701 V_FW_VI_CMD_FUNC(portfunc)); 6702 c.portid_pkd = V_FW_VI_CMD_PORTID(port); 6703 c.nmac = nmac - 1; 6704 if(!rss_size) 6705 c.norss_rsssize = F_FW_VI_CMD_NORSS; 6706 6707 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c); 6708 if (ret) 6709 return ret; 6710 6711 if (mac) { 6712 memcpy(mac, c.mac, sizeof(c.mac)); 6713 switch (nmac) { 6714 case 5: 6715 memcpy(mac + 24, c.nmac3, sizeof(c.nmac3)); 6716 case 4: 6717 memcpy(mac + 18, c.nmac2, sizeof(c.nmac2)); 6718 case 3: 6719 memcpy(mac + 12, c.nmac1, sizeof(c.nmac1)); 6720 case 2: 6721 memcpy(mac + 6, c.nmac0, sizeof(c.nmac0)); 6722 } 6723 } 6724 if (rss_size) 6725 *rss_size = G_FW_VI_CMD_RSSSIZE(be16_to_cpu(c.norss_rsssize)); 6726 return G_FW_VI_CMD_VIID(be16_to_cpu(c.type_to_viid)); 6727 } 6728 6729 /** 6730 * t4_alloc_vi - allocate an [Ethernet Function] virtual interface 6731 * @adap: the adapter 6732 * @mbox: mailbox to use for the FW command 6733 * @port: physical port associated with the VI 6734 * @pf: the PF owning the VI 6735 * @vf: the VF owning the VI 6736 * @nmac: number of MAC addresses needed (1 to 5) 6737 * @mac: the MAC addresses of the VI 6738 * @rss_size: size of RSS table slice associated with this VI 6739 * 6740 * backwards compatible and convieniance routine to allocate a Virtual 6741 * Interface with a Ethernet Port Application Function and Intrustion 6742 * Detection System disabled. 6743 */ 6744 int t4_alloc_vi(struct adapter *adap, unsigned int mbox, unsigned int port, 6745 unsigned int pf, unsigned int vf, unsigned int nmac, u8 *mac, 6746 u16 *rss_size) 6747 { 6748 return t4_alloc_vi_func(adap, mbox, port, pf, vf, nmac, mac, rss_size, 6749 FW_VI_FUNC_ETH, 0); 6750 } 6751 6752 /** 6753 * t4_free_vi - free a virtual interface 6754 * @adap: the adapter 6755 * @mbox: mailbox to use for the FW command 6756 * @pf: the PF owning the VI 6757 * @vf: the VF owning the VI 6758 * @viid: virtual interface identifiler 6759 * 6760 * Free a previously allocated virtual interface. 6761 */ 6762 int t4_free_vi(struct adapter *adap, unsigned int mbox, unsigned int pf, 6763 unsigned int vf, unsigned int viid) 6764 { 6765 struct fw_vi_cmd c; 6766 6767 memset(&c, 0, sizeof(c)); 6768 c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_VI_CMD) | 6769 F_FW_CMD_REQUEST | 6770 F_FW_CMD_EXEC | 6771 V_FW_VI_CMD_PFN(pf) | 6772 V_FW_VI_CMD_VFN(vf)); 6773 c.alloc_to_len16 = cpu_to_be32(F_FW_VI_CMD_FREE | FW_LEN16(c)); 6774 c.type_to_viid = cpu_to_be16(V_FW_VI_CMD_VIID(viid)); 6775 6776 return t4_wr_mbox(adap, mbox, &c, sizeof(c), &c); 6777 } 6778 6779 /** 6780 * t4_set_rxmode - set Rx properties of a virtual interface 6781 * @adap: the adapter 6782 * @mbox: mailbox to use for the FW command 6783 * @viid: the VI id 6784 * @mtu: the new MTU or -1 6785 * @promisc: 1 to enable promiscuous mode, 0 to disable it, -1 no change 6786 * @all_multi: 1 to enable all-multi mode, 0 to disable it, -1 no change 6787 * @bcast: 1 to enable broadcast Rx, 0 to disable it, -1 no change 6788 * @vlanex: 1 to enable HW VLAN extraction, 0 to disable it, -1 no change 6789 * @sleep_ok: if true we may sleep while awaiting command completion 6790 * 6791 * Sets Rx properties of a virtual interface. 6792 */ 6793 int t4_set_rxmode(struct adapter *adap, unsigned int mbox, unsigned int viid, 6794 int mtu, int promisc, int all_multi, int bcast, int vlanex, 6795 bool sleep_ok) 6796 { 6797 struct fw_vi_rxmode_cmd c; 6798 6799 /* convert to FW values */ 6800 if (mtu < 0) 6801 mtu = M_FW_VI_RXMODE_CMD_MTU; 6802 if (promisc < 0) 6803 promisc = M_FW_VI_RXMODE_CMD_PROMISCEN; 6804 if (all_multi < 0) 6805 all_multi = M_FW_VI_RXMODE_CMD_ALLMULTIEN; 6806 if (bcast < 0) 6807 bcast = M_FW_VI_RXMODE_CMD_BROADCASTEN; 6808 if (vlanex < 0) 6809 vlanex = M_FW_VI_RXMODE_CMD_VLANEXEN; 6810 6811 memset(&c, 0, sizeof(c)); 6812 c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_RXMODE_CMD) | 6813 F_FW_CMD_REQUEST | F_FW_CMD_WRITE | 6814 V_FW_VI_RXMODE_CMD_VIID(viid)); 6815 c.retval_len16 = cpu_to_be32(FW_LEN16(c)); 6816 c.mtu_to_vlanexen = 6817 cpu_to_be32(V_FW_VI_RXMODE_CMD_MTU(mtu) | 6818 V_FW_VI_RXMODE_CMD_PROMISCEN(promisc) | 6819 V_FW_VI_RXMODE_CMD_ALLMULTIEN(all_multi) | 6820 V_FW_VI_RXMODE_CMD_BROADCASTEN(bcast) | 6821 V_FW_VI_RXMODE_CMD_VLANEXEN(vlanex)); 6822 return t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), NULL, sleep_ok); 6823 } 6824 6825 /** 6826 * t4_alloc_mac_filt - allocates exact-match filters for MAC addresses 6827 * @adap: the adapter 6828 * @mbox: mailbox to use for the FW command 6829 * @viid: the VI id 6830 * @free: if true any existing filters for this VI id are first removed 6831 * @naddr: the number of MAC addresses to allocate filters for (up to 7) 6832 * @addr: the MAC address(es) 6833 * @idx: where to store the index of each allocated filter 6834 * @hash: pointer to hash address filter bitmap 6835 * @sleep_ok: call is allowed to sleep 6836 * 6837 * Allocates an exact-match filter for each of the supplied addresses and 6838 * sets it to the corresponding address. If @idx is not %NULL it should 6839 * have at least @naddr entries, each of which will be set to the index of 6840 * the filter allocated for the corresponding MAC address. If a filter 6841 * could not be allocated for an address its index is set to 0xffff. 6842 * If @hash is not %NULL addresses that fail to allocate an exact filter 6843 * are hashed and update the hash filter bitmap pointed at by @hash. 6844 * 6845 * Returns a negative error number or the number of filters allocated. 6846 */ 6847 int t4_alloc_mac_filt(struct adapter *adap, unsigned int mbox, 6848 unsigned int viid, bool free, unsigned int naddr, 6849 const u8 **addr, u16 *idx, u64 *hash, bool sleep_ok) 6850 { 6851 int offset, ret = 0; 6852 struct fw_vi_mac_cmd c; 6853 unsigned int nfilters = 0; 6854 unsigned int max_naddr = adap->chip_params->mps_tcam_size; 6855 unsigned int rem = naddr; 6856 6857 if (naddr > max_naddr) 6858 return -EINVAL; 6859 6860 for (offset = 0; offset < naddr ; /**/) { 6861 unsigned int fw_naddr = (rem < ARRAY_SIZE(c.u.exact) 6862 ? rem 6863 : ARRAY_SIZE(c.u.exact)); 6864 size_t len16 = DIV_ROUND_UP(offsetof(struct fw_vi_mac_cmd, 6865 u.exact[fw_naddr]), 16); 6866 struct fw_vi_mac_exact *p; 6867 int i; 6868 6869 memset(&c, 0, sizeof(c)); 6870 c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_MAC_CMD) | 6871 F_FW_CMD_REQUEST | 6872 F_FW_CMD_WRITE | 6873 V_FW_CMD_EXEC(free) | 6874 V_FW_VI_MAC_CMD_VIID(viid)); 6875 c.freemacs_to_len16 = cpu_to_be32(V_FW_VI_MAC_CMD_FREEMACS(free) | 6876 V_FW_CMD_LEN16(len16)); 6877 6878 for (i = 0, p = c.u.exact; i < fw_naddr; i++, p++) { 6879 p->valid_to_idx = 6880 cpu_to_be16(F_FW_VI_MAC_CMD_VALID | 6881 V_FW_VI_MAC_CMD_IDX(FW_VI_MAC_ADD_MAC)); 6882 memcpy(p->macaddr, addr[offset+i], sizeof(p->macaddr)); 6883 } 6884 6885 /* 6886 * It's okay if we run out of space in our MAC address arena. 6887 * Some of the addresses we submit may get stored so we need 6888 * to run through the reply to see what the results were ... 6889 */ 6890 ret = t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), &c, sleep_ok); 6891 if (ret && ret != -FW_ENOMEM) 6892 break; 6893 6894 for (i = 0, p = c.u.exact; i < fw_naddr; i++, p++) { 6895 u16 index = G_FW_VI_MAC_CMD_IDX( 6896 be16_to_cpu(p->valid_to_idx)); 6897 6898 if (idx) 6899 idx[offset+i] = (index >= max_naddr 6900 ? 0xffff 6901 : index); 6902 if (index < max_naddr) 6903 nfilters++; 6904 else if (hash) 6905 *hash |= (1ULL << hash_mac_addr(addr[offset+i])); 6906 } 6907 6908 free = false; 6909 offset += fw_naddr; 6910 rem -= fw_naddr; 6911 } 6912 6913 if (ret == 0 || ret == -FW_ENOMEM) 6914 ret = nfilters; 6915 return ret; 6916 } 6917 6918 /** 6919 * t4_change_mac - modifies the exact-match filter for a MAC address 6920 * @adap: the adapter 6921 * @mbox: mailbox to use for the FW command 6922 * @viid: the VI id 6923 * @idx: index of existing filter for old value of MAC address, or -1 6924 * @addr: the new MAC address value 6925 * @persist: whether a new MAC allocation should be persistent 6926 * @add_smt: if true also add the address to the HW SMT 6927 * 6928 * Modifies an exact-match filter and sets it to the new MAC address if 6929 * @idx >= 0, or adds the MAC address to a new filter if @idx < 0. In the 6930 * latter case the address is added persistently if @persist is %true. 6931 * 6932 * Note that in general it is not possible to modify the value of a given 6933 * filter so the generic way to modify an address filter is to free the one 6934 * being used by the old address value and allocate a new filter for the 6935 * new address value. 6936 * 6937 * Returns a negative error number or the index of the filter with the new 6938 * MAC value. Note that this index may differ from @idx. 6939 */ 6940 int t4_change_mac(struct adapter *adap, unsigned int mbox, unsigned int viid, 6941 int idx, const u8 *addr, bool persist, bool add_smt) 6942 { 6943 int ret, mode; 6944 struct fw_vi_mac_cmd c; 6945 struct fw_vi_mac_exact *p = c.u.exact; 6946 unsigned int max_mac_addr = adap->chip_params->mps_tcam_size; 6947 6948 if (idx < 0) /* new allocation */ 6949 idx = persist ? FW_VI_MAC_ADD_PERSIST_MAC : FW_VI_MAC_ADD_MAC; 6950 mode = add_smt ? FW_VI_MAC_SMT_AND_MPSTCAM : FW_VI_MAC_MPS_TCAM_ENTRY; 6951 6952 memset(&c, 0, sizeof(c)); 6953 c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_MAC_CMD) | 6954 F_FW_CMD_REQUEST | F_FW_CMD_WRITE | 6955 V_FW_VI_MAC_CMD_VIID(viid)); 6956 c.freemacs_to_len16 = cpu_to_be32(V_FW_CMD_LEN16(1)); 6957 p->valid_to_idx = cpu_to_be16(F_FW_VI_MAC_CMD_VALID | 6958 V_FW_VI_MAC_CMD_SMAC_RESULT(mode) | 6959 V_FW_VI_MAC_CMD_IDX(idx)); 6960 memcpy(p->macaddr, addr, sizeof(p->macaddr)); 6961 6962 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c); 6963 if (ret == 0) { 6964 ret = G_FW_VI_MAC_CMD_IDX(be16_to_cpu(p->valid_to_idx)); 6965 if (ret >= max_mac_addr) 6966 ret = -ENOMEM; 6967 } 6968 return ret; 6969 } 6970 6971 /** 6972 * t4_set_addr_hash - program the MAC inexact-match hash filter 6973 * @adap: the adapter 6974 * @mbox: mailbox to use for the FW command 6975 * @viid: the VI id 6976 * @ucast: whether the hash filter should also match unicast addresses 6977 * @vec: the value to be written to the hash filter 6978 * @sleep_ok: call is allowed to sleep 6979 * 6980 * Sets the 64-bit inexact-match hash filter for a virtual interface. 6981 */ 6982 int t4_set_addr_hash(struct adapter *adap, unsigned int mbox, unsigned int viid, 6983 bool ucast, u64 vec, bool sleep_ok) 6984 { 6985 struct fw_vi_mac_cmd c; 6986 u32 val; 6987 6988 memset(&c, 0, sizeof(c)); 6989 c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_MAC_CMD) | 6990 F_FW_CMD_REQUEST | F_FW_CMD_WRITE | 6991 V_FW_VI_ENABLE_CMD_VIID(viid)); 6992 val = V_FW_VI_MAC_CMD_ENTRY_TYPE(FW_VI_MAC_TYPE_HASHVEC) | 6993 V_FW_VI_MAC_CMD_HASHUNIEN(ucast) | V_FW_CMD_LEN16(1); 6994 c.freemacs_to_len16 = cpu_to_be32(val); 6995 c.u.hash.hashvec = cpu_to_be64(vec); 6996 return t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), NULL, sleep_ok); 6997 } 6998 6999 /** 7000 * t4_enable_vi_params - enable/disable a virtual interface 7001 * @adap: the adapter 7002 * @mbox: mailbox to use for the FW command 7003 * @viid: the VI id 7004 * @rx_en: 1=enable Rx, 0=disable Rx 7005 * @tx_en: 1=enable Tx, 0=disable Tx 7006 * @dcb_en: 1=enable delivery of Data Center Bridging messages. 7007 * 7008 * Enables/disables a virtual interface. Note that setting DCB Enable 7009 * only makes sense when enabling a Virtual Interface ... 7010 */ 7011 int t4_enable_vi_params(struct adapter *adap, unsigned int mbox, 7012 unsigned int viid, bool rx_en, bool tx_en, bool dcb_en) 7013 { 7014 struct fw_vi_enable_cmd c; 7015 7016 memset(&c, 0, sizeof(c)); 7017 c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_ENABLE_CMD) | 7018 F_FW_CMD_REQUEST | F_FW_CMD_EXEC | 7019 V_FW_VI_ENABLE_CMD_VIID(viid)); 7020 c.ien_to_len16 = cpu_to_be32(V_FW_VI_ENABLE_CMD_IEN(rx_en) | 7021 V_FW_VI_ENABLE_CMD_EEN(tx_en) | 7022 V_FW_VI_ENABLE_CMD_DCB_INFO(dcb_en) | 7023 FW_LEN16(c)); 7024 return t4_wr_mbox_ns(adap, mbox, &c, sizeof(c), NULL); 7025 } 7026 7027 /** 7028 * t4_enable_vi - enable/disable a virtual interface 7029 * @adap: the adapter 7030 * @mbox: mailbox to use for the FW command 7031 * @viid: the VI id 7032 * @rx_en: 1=enable Rx, 0=disable Rx 7033 * @tx_en: 1=enable Tx, 0=disable Tx 7034 * 7035 * Enables/disables a virtual interface. Note that setting DCB Enable 7036 * only makes sense when enabling a Virtual Interface ... 7037 */ 7038 int t4_enable_vi(struct adapter *adap, unsigned int mbox, unsigned int viid, 7039 bool rx_en, bool tx_en) 7040 { 7041 return t4_enable_vi_params(adap, mbox, viid, rx_en, tx_en, 0); 7042 } 7043 7044 /** 7045 * t4_identify_port - identify a VI's port by blinking its LED 7046 * @adap: the adapter 7047 * @mbox: mailbox to use for the FW command 7048 * @viid: the VI id 7049 * @nblinks: how many times to blink LED at 2.5 Hz 7050 * 7051 * Identifies a VI's port by blinking its LED. 7052 */ 7053 int t4_identify_port(struct adapter *adap, unsigned int mbox, unsigned int viid, 7054 unsigned int nblinks) 7055 { 7056 struct fw_vi_enable_cmd c; 7057 7058 memset(&c, 0, sizeof(c)); 7059 c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_ENABLE_CMD) | 7060 F_FW_CMD_REQUEST | F_FW_CMD_EXEC | 7061 V_FW_VI_ENABLE_CMD_VIID(viid)); 7062 c.ien_to_len16 = cpu_to_be32(F_FW_VI_ENABLE_CMD_LED | FW_LEN16(c)); 7063 c.blinkdur = cpu_to_be16(nblinks); 7064 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); 7065 } 7066 7067 /** 7068 * t4_iq_stop - stop an ingress queue and its FLs 7069 * @adap: the adapter 7070 * @mbox: mailbox to use for the FW command 7071 * @pf: the PF owning the queues 7072 * @vf: the VF owning the queues 7073 * @iqtype: the ingress queue type (FW_IQ_TYPE_FL_INT_CAP, etc.) 7074 * @iqid: ingress queue id 7075 * @fl0id: FL0 queue id or 0xffff if no attached FL0 7076 * @fl1id: FL1 queue id or 0xffff if no attached FL1 7077 * 7078 * Stops an ingress queue and its associated FLs, if any. This causes 7079 * any current or future data/messages destined for these queues to be 7080 * tossed. 7081 */ 7082 int t4_iq_stop(struct adapter *adap, unsigned int mbox, unsigned int pf, 7083 unsigned int vf, unsigned int iqtype, unsigned int iqid, 7084 unsigned int fl0id, unsigned int fl1id) 7085 { 7086 struct fw_iq_cmd c; 7087 7088 memset(&c, 0, sizeof(c)); 7089 c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_IQ_CMD) | F_FW_CMD_REQUEST | 7090 F_FW_CMD_EXEC | V_FW_IQ_CMD_PFN(pf) | 7091 V_FW_IQ_CMD_VFN(vf)); 7092 c.alloc_to_len16 = cpu_to_be32(F_FW_IQ_CMD_IQSTOP | FW_LEN16(c)); 7093 c.type_to_iqandstindex = cpu_to_be32(V_FW_IQ_CMD_TYPE(iqtype)); 7094 c.iqid = cpu_to_be16(iqid); 7095 c.fl0id = cpu_to_be16(fl0id); 7096 c.fl1id = cpu_to_be16(fl1id); 7097 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); 7098 } 7099 7100 /** 7101 * t4_iq_free - free an ingress queue and its FLs 7102 * @adap: the adapter 7103 * @mbox: mailbox to use for the FW command 7104 * @pf: the PF owning the queues 7105 * @vf: the VF owning the queues 7106 * @iqtype: the ingress queue type (FW_IQ_TYPE_FL_INT_CAP, etc.) 7107 * @iqid: ingress queue id 7108 * @fl0id: FL0 queue id or 0xffff if no attached FL0 7109 * @fl1id: FL1 queue id or 0xffff if no attached FL1 7110 * 7111 * Frees an ingress queue and its associated FLs, if any. 7112 */ 7113 int t4_iq_free(struct adapter *adap, unsigned int mbox, unsigned int pf, 7114 unsigned int vf, unsigned int iqtype, unsigned int iqid, 7115 unsigned int fl0id, unsigned int fl1id) 7116 { 7117 struct fw_iq_cmd c; 7118 7119 memset(&c, 0, sizeof(c)); 7120 c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_IQ_CMD) | F_FW_CMD_REQUEST | 7121 F_FW_CMD_EXEC | V_FW_IQ_CMD_PFN(pf) | 7122 V_FW_IQ_CMD_VFN(vf)); 7123 c.alloc_to_len16 = cpu_to_be32(F_FW_IQ_CMD_FREE | FW_LEN16(c)); 7124 c.type_to_iqandstindex = cpu_to_be32(V_FW_IQ_CMD_TYPE(iqtype)); 7125 c.iqid = cpu_to_be16(iqid); 7126 c.fl0id = cpu_to_be16(fl0id); 7127 c.fl1id = cpu_to_be16(fl1id); 7128 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); 7129 } 7130 7131 /** 7132 * t4_eth_eq_free - free an Ethernet egress queue 7133 * @adap: the adapter 7134 * @mbox: mailbox to use for the FW command 7135 * @pf: the PF owning the queue 7136 * @vf: the VF owning the queue 7137 * @eqid: egress queue id 7138 * 7139 * Frees an Ethernet egress queue. 7140 */ 7141 int t4_eth_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf, 7142 unsigned int vf, unsigned int eqid) 7143 { 7144 struct fw_eq_eth_cmd c; 7145 7146 memset(&c, 0, sizeof(c)); 7147 c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_EQ_ETH_CMD) | 7148 F_FW_CMD_REQUEST | F_FW_CMD_EXEC | 7149 V_FW_EQ_ETH_CMD_PFN(pf) | 7150 V_FW_EQ_ETH_CMD_VFN(vf)); 7151 c.alloc_to_len16 = cpu_to_be32(F_FW_EQ_ETH_CMD_FREE | FW_LEN16(c)); 7152 c.eqid_pkd = cpu_to_be32(V_FW_EQ_ETH_CMD_EQID(eqid)); 7153 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); 7154 } 7155 7156 /** 7157 * t4_ctrl_eq_free - free a control egress queue 7158 * @adap: the adapter 7159 * @mbox: mailbox to use for the FW command 7160 * @pf: the PF owning the queue 7161 * @vf: the VF owning the queue 7162 * @eqid: egress queue id 7163 * 7164 * Frees a control egress queue. 7165 */ 7166 int t4_ctrl_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf, 7167 unsigned int vf, unsigned int eqid) 7168 { 7169 struct fw_eq_ctrl_cmd c; 7170 7171 memset(&c, 0, sizeof(c)); 7172 c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_EQ_CTRL_CMD) | 7173 F_FW_CMD_REQUEST | F_FW_CMD_EXEC | 7174 V_FW_EQ_CTRL_CMD_PFN(pf) | 7175 V_FW_EQ_CTRL_CMD_VFN(vf)); 7176 c.alloc_to_len16 = cpu_to_be32(F_FW_EQ_CTRL_CMD_FREE | FW_LEN16(c)); 7177 c.cmpliqid_eqid = cpu_to_be32(V_FW_EQ_CTRL_CMD_EQID(eqid)); 7178 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); 7179 } 7180 7181 /** 7182 * t4_ofld_eq_free - free an offload egress queue 7183 * @adap: the adapter 7184 * @mbox: mailbox to use for the FW command 7185 * @pf: the PF owning the queue 7186 * @vf: the VF owning the queue 7187 * @eqid: egress queue id 7188 * 7189 * Frees a control egress queue. 7190 */ 7191 int t4_ofld_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf, 7192 unsigned int vf, unsigned int eqid) 7193 { 7194 struct fw_eq_ofld_cmd c; 7195 7196 memset(&c, 0, sizeof(c)); 7197 c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_EQ_OFLD_CMD) | 7198 F_FW_CMD_REQUEST | F_FW_CMD_EXEC | 7199 V_FW_EQ_OFLD_CMD_PFN(pf) | 7200 V_FW_EQ_OFLD_CMD_VFN(vf)); 7201 c.alloc_to_len16 = cpu_to_be32(F_FW_EQ_OFLD_CMD_FREE | FW_LEN16(c)); 7202 c.eqid_pkd = cpu_to_be32(V_FW_EQ_OFLD_CMD_EQID(eqid)); 7203 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); 7204 } 7205 7206 /** 7207 * t4_link_down_rc_str - return a string for a Link Down Reason Code 7208 * @link_down_rc: Link Down Reason Code 7209 * 7210 * Returns a string representation of the Link Down Reason Code. 7211 */ 7212 const char *t4_link_down_rc_str(unsigned char link_down_rc) 7213 { 7214 static const char *reason[] = { 7215 "Link Down", 7216 "Remote Fault", 7217 "Auto-negotiation Failure", 7218 "Reserved3", 7219 "Insufficient Airflow", 7220 "Unable To Determine Reason", 7221 "No RX Signal Detected", 7222 "Reserved7", 7223 }; 7224 7225 if (link_down_rc >= ARRAY_SIZE(reason)) 7226 return "Bad Reason Code"; 7227 7228 return reason[link_down_rc]; 7229 } 7230 7231 /** 7232 * t4_handle_fw_rpl - process a FW reply message 7233 * @adap: the adapter 7234 * @rpl: start of the FW message 7235 * 7236 * Processes a FW message, such as link state change messages. 7237 */ 7238 int t4_handle_fw_rpl(struct adapter *adap, const __be64 *rpl) 7239 { 7240 u8 opcode = *(const u8 *)rpl; 7241 const struct fw_port_cmd *p = (const void *)rpl; 7242 unsigned int action = 7243 G_FW_PORT_CMD_ACTION(be32_to_cpu(p->action_to_len16)); 7244 7245 if (opcode == FW_PORT_CMD && action == FW_PORT_ACTION_GET_PORT_INFO) { 7246 /* link/module state change message */ 7247 int speed = 0, fc = 0, i; 7248 int chan = G_FW_PORT_CMD_PORTID(be32_to_cpu(p->op_to_portid)); 7249 struct port_info *pi = NULL; 7250 struct link_config *lc; 7251 u32 stat = be32_to_cpu(p->u.info.lstatus_to_modtype); 7252 int link_ok = (stat & F_FW_PORT_CMD_LSTATUS) != 0; 7253 u32 mod = G_FW_PORT_CMD_MODTYPE(stat); 7254 7255 if (stat & F_FW_PORT_CMD_RXPAUSE) 7256 fc |= PAUSE_RX; 7257 if (stat & F_FW_PORT_CMD_TXPAUSE) 7258 fc |= PAUSE_TX; 7259 if (stat & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_100M)) 7260 speed = 100; 7261 else if (stat & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_1G)) 7262 speed = 1000; 7263 else if (stat & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_10G)) 7264 speed = 10000; 7265 else if (stat & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_40G)) 7266 speed = 40000; 7267 7268 for_each_port(adap, i) { 7269 pi = adap2pinfo(adap, i); 7270 if (pi->tx_chan == chan) 7271 break; 7272 } 7273 lc = &pi->link_cfg; 7274 7275 if (mod != pi->mod_type) { 7276 pi->mod_type = mod; 7277 t4_os_portmod_changed(adap, i); 7278 } 7279 if (link_ok != lc->link_ok || speed != lc->speed || 7280 fc != lc->fc) { /* something changed */ 7281 int reason; 7282 7283 if (!link_ok && lc->link_ok) 7284 reason = G_FW_PORT_CMD_LINKDNRC(stat); 7285 else 7286 reason = -1; 7287 7288 lc->link_ok = link_ok; 7289 lc->speed = speed; 7290 lc->fc = fc; 7291 lc->supported = be16_to_cpu(p->u.info.pcap); 7292 t4_os_link_changed(adap, i, link_ok, reason); 7293 } 7294 } else { 7295 CH_WARN_RATELIMIT(adap, "Unknown firmware reply %d\n", opcode); 7296 return -EINVAL; 7297 } 7298 return 0; 7299 } 7300 7301 /** 7302 * get_pci_mode - determine a card's PCI mode 7303 * @adapter: the adapter 7304 * @p: where to store the PCI settings 7305 * 7306 * Determines a card's PCI mode and associated parameters, such as speed 7307 * and width. 7308 */ 7309 static void get_pci_mode(struct adapter *adapter, 7310 struct pci_params *p) 7311 { 7312 u16 val; 7313 u32 pcie_cap; 7314 7315 pcie_cap = t4_os_find_pci_capability(adapter, PCI_CAP_ID_EXP); 7316 if (pcie_cap) { 7317 t4_os_pci_read_cfg2(adapter, pcie_cap + PCI_EXP_LNKSTA, &val); 7318 p->speed = val & PCI_EXP_LNKSTA_CLS; 7319 p->width = (val & PCI_EXP_LNKSTA_NLW) >> 4; 7320 } 7321 } 7322 7323 /** 7324 * init_link_config - initialize a link's SW state 7325 * @lc: structure holding the link state 7326 * @caps: link capabilities 7327 * 7328 * Initializes the SW state maintained for each link, including the link's 7329 * capabilities and default speed/flow-control/autonegotiation settings. 7330 */ 7331 static void init_link_config(struct link_config *lc, unsigned int caps) 7332 { 7333 lc->supported = caps; 7334 lc->requested_speed = 0; 7335 lc->speed = 0; 7336 lc->requested_fc = lc->fc = PAUSE_RX | PAUSE_TX; 7337 if (lc->supported & FW_PORT_CAP_ANEG) { 7338 lc->advertising = lc->supported & ADVERT_MASK; 7339 lc->autoneg = AUTONEG_ENABLE; 7340 lc->requested_fc |= PAUSE_AUTONEG; 7341 } else { 7342 lc->advertising = 0; 7343 lc->autoneg = AUTONEG_DISABLE; 7344 } 7345 } 7346 7347 struct flash_desc { 7348 u32 vendor_and_model_id; 7349 u32 size_mb; 7350 }; 7351 7352 int t4_get_flash_params(struct adapter *adapter) 7353 { 7354 /* 7355 * Table for non-Numonix supported flash parts. Numonix parts are left 7356 * to the preexisting well-tested code. All flash parts have 64KB 7357 * sectors. 7358 */ 7359 static struct flash_desc supported_flash[] = { 7360 { 0x150201, 4 << 20 }, /* Spansion 4MB S25FL032P */ 7361 }; 7362 7363 int ret; 7364 u32 info = 0; 7365 7366 ret = sf1_write(adapter, 1, 1, 0, SF_RD_ID); 7367 if (!ret) 7368 ret = sf1_read(adapter, 3, 0, 1, &info); 7369 t4_write_reg(adapter, A_SF_OP, 0); /* unlock SF */ 7370 if (ret < 0) 7371 return ret; 7372 7373 for (ret = 0; ret < ARRAY_SIZE(supported_flash); ++ret) 7374 if (supported_flash[ret].vendor_and_model_id == info) { 7375 adapter->params.sf_size = supported_flash[ret].size_mb; 7376 adapter->params.sf_nsec = 7377 adapter->params.sf_size / SF_SEC_SIZE; 7378 return 0; 7379 } 7380 7381 if ((info & 0xff) != 0x20) /* not a Numonix flash */ 7382 return -EINVAL; 7383 info >>= 16; /* log2 of size */ 7384 if (info >= 0x14 && info < 0x18) 7385 adapter->params.sf_nsec = 1 << (info - 16); 7386 else if (info == 0x18) 7387 adapter->params.sf_nsec = 64; 7388 else 7389 return -EINVAL; 7390 adapter->params.sf_size = 1 << info; 7391 7392 /* 7393 * We should ~probably~ reject adapters with FLASHes which are too 7394 * small but we have some legacy FPGAs with small FLASHes that we'd 7395 * still like to use. So instead we emit a scary message ... 7396 */ 7397 if (adapter->params.sf_size < FLASH_MIN_SIZE) 7398 CH_WARN(adapter, "WARNING!!! FLASH size %#x < %#x!!!\n", 7399 adapter->params.sf_size, FLASH_MIN_SIZE); 7400 7401 return 0; 7402 } 7403 7404 static void set_pcie_completion_timeout(struct adapter *adapter, 7405 u8 range) 7406 { 7407 u16 val; 7408 u32 pcie_cap; 7409 7410 pcie_cap = t4_os_find_pci_capability(adapter, PCI_CAP_ID_EXP); 7411 if (pcie_cap) { 7412 t4_os_pci_read_cfg2(adapter, pcie_cap + PCI_EXP_DEVCTL2, &val); 7413 val &= 0xfff0; 7414 val |= range ; 7415 t4_os_pci_write_cfg2(adapter, pcie_cap + PCI_EXP_DEVCTL2, val); 7416 } 7417 } 7418 7419 static const struct chip_params *get_chip_params(int chipid) 7420 { 7421 static const struct chip_params chip_params[] = { 7422 { 7423 /* T4 */ 7424 .nchan = NCHAN, 7425 .pm_stats_cnt = PM_NSTATS, 7426 .cng_ch_bits_log = 2, 7427 .nsched_cls = 15, 7428 .cim_num_obq = CIM_NUM_OBQ, 7429 .mps_rplc_size = 128, 7430 .vfcount = 128, 7431 .sge_fl_db = F_DBPRIO, 7432 .mps_tcam_size = NUM_MPS_CLS_SRAM_L_INSTANCES, 7433 }, 7434 { 7435 /* T5 */ 7436 .nchan = NCHAN, 7437 .pm_stats_cnt = PM_NSTATS, 7438 .cng_ch_bits_log = 2, 7439 .nsched_cls = 16, 7440 .cim_num_obq = CIM_NUM_OBQ_T5, 7441 .mps_rplc_size = 128, 7442 .vfcount = 128, 7443 .sge_fl_db = F_DBPRIO | F_DBTYPE, 7444 .mps_tcam_size = NUM_MPS_T5_CLS_SRAM_L_INSTANCES, 7445 }, 7446 { 7447 /* T6 */ 7448 .nchan = T6_NCHAN, 7449 .pm_stats_cnt = T6_PM_NSTATS, 7450 .cng_ch_bits_log = 3, 7451 .nsched_cls = 16, 7452 .cim_num_obq = CIM_NUM_OBQ_T5, 7453 .mps_rplc_size = 256, 7454 .vfcount = 256, 7455 .sge_fl_db = 0, 7456 .mps_tcam_size = NUM_MPS_T5_CLS_SRAM_L_INSTANCES, 7457 }, 7458 }; 7459 7460 chipid -= CHELSIO_T4; 7461 if (chipid < 0 || chipid >= ARRAY_SIZE(chip_params)) 7462 return NULL; 7463 7464 return &chip_params[chipid]; 7465 } 7466 7467 /** 7468 * t4_prep_adapter - prepare SW and HW for operation 7469 * @adapter: the adapter 7470 * @buf: temporary space of at least VPD_LEN size provided by the caller. 7471 * 7472 * Initialize adapter SW state for the various HW modules, set initial 7473 * values for some adapter tunables, take PHYs out of reset, and 7474 * initialize the MDIO interface. 7475 */ 7476 int t4_prep_adapter(struct adapter *adapter, u8 *buf) 7477 { 7478 int ret; 7479 uint16_t device_id; 7480 uint32_t pl_rev; 7481 7482 get_pci_mode(adapter, &adapter->params.pci); 7483 7484 pl_rev = t4_read_reg(adapter, A_PL_REV); 7485 adapter->params.chipid = G_CHIPID(pl_rev); 7486 adapter->params.rev = G_REV(pl_rev); 7487 if (adapter->params.chipid == 0) { 7488 /* T4 did not have chipid in PL_REV (T5 onwards do) */ 7489 adapter->params.chipid = CHELSIO_T4; 7490 7491 /* T4A1 chip is not supported */ 7492 if (adapter->params.rev == 1) { 7493 CH_ALERT(adapter, "T4 rev 1 chip is not supported.\n"); 7494 return -EINVAL; 7495 } 7496 } 7497 7498 adapter->chip_params = get_chip_params(chip_id(adapter)); 7499 if (adapter->chip_params == NULL) 7500 return -EINVAL; 7501 7502 adapter->params.pci.vpd_cap_addr = 7503 t4_os_find_pci_capability(adapter, PCI_CAP_ID_VPD); 7504 7505 ret = t4_get_flash_params(adapter); 7506 if (ret < 0) 7507 return ret; 7508 7509 ret = get_vpd_params(adapter, &adapter->params.vpd, buf); 7510 if (ret < 0) 7511 return ret; 7512 7513 /* Cards with real ASICs have the chipid in the PCIe device id */ 7514 t4_os_pci_read_cfg2(adapter, PCI_DEVICE_ID, &device_id); 7515 if (device_id >> 12 == chip_id(adapter)) 7516 adapter->params.cim_la_size = CIMLA_SIZE; 7517 else { 7518 /* FPGA */ 7519 adapter->params.fpga = 1; 7520 adapter->params.cim_la_size = 2 * CIMLA_SIZE; 7521 } 7522 7523 init_cong_ctrl(adapter->params.a_wnd, adapter->params.b_wnd); 7524 7525 /* 7526 * Default port and clock for debugging in case we can't reach FW. 7527 */ 7528 adapter->params.nports = 1; 7529 adapter->params.portvec = 1; 7530 adapter->params.vpd.cclk = 50000; 7531 7532 /* Set pci completion timeout value to 4 seconds. */ 7533 set_pcie_completion_timeout(adapter, 0xd); 7534 return 0; 7535 } 7536 7537 /** 7538 * t4_shutdown_adapter - shut down adapter, host & wire 7539 * @adapter: the adapter 7540 * 7541 * Perform an emergency shutdown of the adapter and stop it from 7542 * continuing any further communication on the ports or DMA to the 7543 * host. This is typically used when the adapter and/or firmware 7544 * have crashed and we want to prevent any further accidental 7545 * communication with the rest of the world. This will also force 7546 * the port Link Status to go down -- if register writes work -- 7547 * which should help our peers figure out that we're down. 7548 */ 7549 int t4_shutdown_adapter(struct adapter *adapter) 7550 { 7551 int port; 7552 7553 t4_intr_disable(adapter); 7554 t4_write_reg(adapter, A_DBG_GPIO_EN, 0); 7555 for_each_port(adapter, port) { 7556 u32 a_port_cfg = PORT_REG(port, 7557 is_t4(adapter) 7558 ? A_XGMAC_PORT_CFG 7559 : A_MAC_PORT_CFG); 7560 7561 t4_write_reg(adapter, a_port_cfg, 7562 t4_read_reg(adapter, a_port_cfg) 7563 & ~V_SIGNAL_DET(1)); 7564 } 7565 t4_set_reg_field(adapter, A_SGE_CONTROL, F_GLOBALENABLE, 0); 7566 7567 return 0; 7568 } 7569 7570 /** 7571 * t4_init_devlog_params - initialize adapter->params.devlog 7572 * @adap: the adapter 7573 * @fw_attach: whether we can talk to the firmware 7574 * 7575 * Initialize various fields of the adapter's Firmware Device Log 7576 * Parameters structure. 7577 */ 7578 int t4_init_devlog_params(struct adapter *adap, int fw_attach) 7579 { 7580 struct devlog_params *dparams = &adap->params.devlog; 7581 u32 pf_dparams; 7582 unsigned int devlog_meminfo; 7583 struct fw_devlog_cmd devlog_cmd; 7584 int ret; 7585 7586 /* If we're dealing with newer firmware, the Device Log Paramerters 7587 * are stored in a designated register which allows us to access the 7588 * Device Log even if we can't talk to the firmware. 7589 */ 7590 pf_dparams = 7591 t4_read_reg(adap, PCIE_FW_REG(A_PCIE_FW_PF, PCIE_FW_PF_DEVLOG)); 7592 if (pf_dparams) { 7593 unsigned int nentries, nentries128; 7594 7595 dparams->memtype = G_PCIE_FW_PF_DEVLOG_MEMTYPE(pf_dparams); 7596 dparams->start = G_PCIE_FW_PF_DEVLOG_ADDR16(pf_dparams) << 4; 7597 7598 nentries128 = G_PCIE_FW_PF_DEVLOG_NENTRIES128(pf_dparams); 7599 nentries = (nentries128 + 1) * 128; 7600 dparams->size = nentries * sizeof(struct fw_devlog_e); 7601 7602 return 0; 7603 } 7604 7605 /* 7606 * For any failing returns ... 7607 */ 7608 memset(dparams, 0, sizeof *dparams); 7609 7610 /* 7611 * If we can't talk to the firmware, there's really nothing we can do 7612 * at this point. 7613 */ 7614 if (!fw_attach) 7615 return -ENXIO; 7616 7617 /* Otherwise, ask the firmware for it's Device Log Parameters. 7618 */ 7619 memset(&devlog_cmd, 0, sizeof devlog_cmd); 7620 devlog_cmd.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_DEVLOG_CMD) | 7621 F_FW_CMD_REQUEST | F_FW_CMD_READ); 7622 devlog_cmd.retval_len16 = cpu_to_be32(FW_LEN16(devlog_cmd)); 7623 ret = t4_wr_mbox(adap, adap->mbox, &devlog_cmd, sizeof(devlog_cmd), 7624 &devlog_cmd); 7625 if (ret) 7626 return ret; 7627 7628 devlog_meminfo = 7629 be32_to_cpu(devlog_cmd.memtype_devlog_memaddr16_devlog); 7630 dparams->memtype = G_FW_DEVLOG_CMD_MEMTYPE_DEVLOG(devlog_meminfo); 7631 dparams->start = G_FW_DEVLOG_CMD_MEMADDR16_DEVLOG(devlog_meminfo) << 4; 7632 dparams->size = be32_to_cpu(devlog_cmd.memsize_devlog); 7633 7634 return 0; 7635 } 7636 7637 /** 7638 * t4_init_sge_params - initialize adap->params.sge 7639 * @adapter: the adapter 7640 * 7641 * Initialize various fields of the adapter's SGE Parameters structure. 7642 */ 7643 int t4_init_sge_params(struct adapter *adapter) 7644 { 7645 u32 r; 7646 struct sge_params *sp = &adapter->params.sge; 7647 7648 r = t4_read_reg(adapter, A_SGE_INGRESS_RX_THRESHOLD); 7649 sp->counter_val[0] = G_THRESHOLD_0(r); 7650 sp->counter_val[1] = G_THRESHOLD_1(r); 7651 sp->counter_val[2] = G_THRESHOLD_2(r); 7652 sp->counter_val[3] = G_THRESHOLD_3(r); 7653 7654 r = t4_read_reg(adapter, A_SGE_TIMER_VALUE_0_AND_1); 7655 sp->timer_val[0] = core_ticks_to_us(adapter, G_TIMERVALUE0(r)); 7656 sp->timer_val[1] = core_ticks_to_us(adapter, G_TIMERVALUE1(r)); 7657 r = t4_read_reg(adapter, A_SGE_TIMER_VALUE_2_AND_3); 7658 sp->timer_val[2] = core_ticks_to_us(adapter, G_TIMERVALUE2(r)); 7659 sp->timer_val[3] = core_ticks_to_us(adapter, G_TIMERVALUE3(r)); 7660 r = t4_read_reg(adapter, A_SGE_TIMER_VALUE_4_AND_5); 7661 sp->timer_val[4] = core_ticks_to_us(adapter, G_TIMERVALUE4(r)); 7662 sp->timer_val[5] = core_ticks_to_us(adapter, G_TIMERVALUE5(r)); 7663 7664 r = t4_read_reg(adapter, A_SGE_CONM_CTRL); 7665 sp->fl_starve_threshold = G_EGRTHRESHOLD(r) * 2 + 1; 7666 if (is_t4(adapter)) 7667 sp->fl_starve_threshold2 = sp->fl_starve_threshold; 7668 else 7669 sp->fl_starve_threshold2 = G_EGRTHRESHOLDPACKING(r) * 2 + 1; 7670 7671 /* egress queues: log2 of # of doorbells per BAR2 page */ 7672 r = t4_read_reg(adapter, A_SGE_EGRESS_QUEUES_PER_PAGE_PF); 7673 r >>= S_QUEUESPERPAGEPF0 + 7674 (S_QUEUESPERPAGEPF1 - S_QUEUESPERPAGEPF0) * adapter->pf; 7675 sp->eq_s_qpp = r & M_QUEUESPERPAGEPF0; 7676 7677 /* ingress queues: log2 of # of doorbells per BAR2 page */ 7678 r = t4_read_reg(adapter, A_SGE_INGRESS_QUEUES_PER_PAGE_PF); 7679 r >>= S_QUEUESPERPAGEPF0 + 7680 (S_QUEUESPERPAGEPF1 - S_QUEUESPERPAGEPF0) * adapter->pf; 7681 sp->iq_s_qpp = r & M_QUEUESPERPAGEPF0; 7682 7683 r = t4_read_reg(adapter, A_SGE_HOST_PAGE_SIZE); 7684 r >>= S_HOSTPAGESIZEPF0 + 7685 (S_HOSTPAGESIZEPF1 - S_HOSTPAGESIZEPF0) * adapter->pf; 7686 sp->page_shift = (r & M_HOSTPAGESIZEPF0) + 10; 7687 7688 r = t4_read_reg(adapter, A_SGE_CONTROL); 7689 sp->spg_len = r & F_EGRSTATUSPAGESIZE ? 128 : 64; 7690 sp->fl_pktshift = G_PKTSHIFT(r); 7691 sp->pad_boundary = 1 << (G_INGPADBOUNDARY(r) + 5); 7692 if (is_t4(adapter)) 7693 sp->pack_boundary = sp->pad_boundary; 7694 else { 7695 r = t4_read_reg(adapter, A_SGE_CONTROL2); 7696 if (G_INGPACKBOUNDARY(r) == 0) 7697 sp->pack_boundary = 16; 7698 else 7699 sp->pack_boundary = 1 << (G_INGPACKBOUNDARY(r) + 5); 7700 } 7701 7702 return 0; 7703 } 7704 7705 /* 7706 * Read and cache the adapter's compressed filter mode and ingress config. 7707 */ 7708 static void read_filter_mode_and_ingress_config(struct adapter *adap) 7709 { 7710 struct tp_params *tpp = &adap->params.tp; 7711 7712 if (t4_use_ldst(adap)) { 7713 t4_fw_tp_pio_rw(adap, &tpp->vlan_pri_map, 1, 7714 A_TP_VLAN_PRI_MAP, 1); 7715 t4_fw_tp_pio_rw(adap, &tpp->ingress_config, 1, 7716 A_TP_INGRESS_CONFIG, 1); 7717 } else { 7718 t4_read_indirect(adap, A_TP_PIO_ADDR, A_TP_PIO_DATA, 7719 &tpp->vlan_pri_map, 1, A_TP_VLAN_PRI_MAP); 7720 t4_read_indirect(adap, A_TP_PIO_ADDR, A_TP_PIO_DATA, 7721 &tpp->ingress_config, 1, A_TP_INGRESS_CONFIG); 7722 } 7723 7724 /* 7725 * Now that we have TP_VLAN_PRI_MAP cached, we can calculate the field 7726 * shift positions of several elements of the Compressed Filter Tuple 7727 * for this adapter which we need frequently ... 7728 */ 7729 tpp->fcoe_shift = t4_filter_field_shift(adap, F_FCOE); 7730 tpp->port_shift = t4_filter_field_shift(adap, F_PORT); 7731 tpp->vnic_shift = t4_filter_field_shift(adap, F_VNIC_ID); 7732 tpp->vlan_shift = t4_filter_field_shift(adap, F_VLAN); 7733 tpp->tos_shift = t4_filter_field_shift(adap, F_TOS); 7734 tpp->protocol_shift = t4_filter_field_shift(adap, F_PROTOCOL); 7735 tpp->ethertype_shift = t4_filter_field_shift(adap, F_ETHERTYPE); 7736 tpp->macmatch_shift = t4_filter_field_shift(adap, F_MACMATCH); 7737 tpp->matchtype_shift = t4_filter_field_shift(adap, F_MPSHITTYPE); 7738 tpp->frag_shift = t4_filter_field_shift(adap, F_FRAGMENTATION); 7739 7740 /* 7741 * If TP_INGRESS_CONFIG.VNID == 0, then TP_VLAN_PRI_MAP.VNIC_ID 7742 * represents the presense of an Outer VLAN instead of a VNIC ID. 7743 */ 7744 if ((tpp->ingress_config & F_VNIC) == 0) 7745 tpp->vnic_shift = -1; 7746 } 7747 7748 /** 7749 * t4_init_tp_params - initialize adap->params.tp 7750 * @adap: the adapter 7751 * 7752 * Initialize various fields of the adapter's TP Parameters structure. 7753 */ 7754 int t4_init_tp_params(struct adapter *adap) 7755 { 7756 int chan; 7757 u32 v; 7758 struct tp_params *tpp = &adap->params.tp; 7759 7760 v = t4_read_reg(adap, A_TP_TIMER_RESOLUTION); 7761 tpp->tre = G_TIMERRESOLUTION(v); 7762 tpp->dack_re = G_DELAYEDACKRESOLUTION(v); 7763 7764 /* MODQ_REQ_MAP defaults to setting queues 0-3 to chan 0-3 */ 7765 for (chan = 0; chan < MAX_NCHAN; chan++) 7766 tpp->tx_modq[chan] = chan; 7767 7768 read_filter_mode_and_ingress_config(adap); 7769 7770 /* 7771 * For T6, cache the adapter's compressed error vector 7772 * and passing outer header info for encapsulated packets. 7773 */ 7774 if (chip_id(adap) > CHELSIO_T5) { 7775 v = t4_read_reg(adap, A_TP_OUT_CONFIG); 7776 tpp->rx_pkt_encap = (v & F_CRXPKTENC) ? 1 : 0; 7777 } 7778 7779 return 0; 7780 } 7781 7782 /** 7783 * t4_filter_field_shift - calculate filter field shift 7784 * @adap: the adapter 7785 * @filter_sel: the desired field (from TP_VLAN_PRI_MAP bits) 7786 * 7787 * Return the shift position of a filter field within the Compressed 7788 * Filter Tuple. The filter field is specified via its selection bit 7789 * within TP_VLAN_PRI_MAL (filter mode). E.g. F_VLAN. 7790 */ 7791 int t4_filter_field_shift(const struct adapter *adap, int filter_sel) 7792 { 7793 unsigned int filter_mode = adap->params.tp.vlan_pri_map; 7794 unsigned int sel; 7795 int field_shift; 7796 7797 if ((filter_mode & filter_sel) == 0) 7798 return -1; 7799 7800 for (sel = 1, field_shift = 0; sel < filter_sel; sel <<= 1) { 7801 switch (filter_mode & sel) { 7802 case F_FCOE: 7803 field_shift += W_FT_FCOE; 7804 break; 7805 case F_PORT: 7806 field_shift += W_FT_PORT; 7807 break; 7808 case F_VNIC_ID: 7809 field_shift += W_FT_VNIC_ID; 7810 break; 7811 case F_VLAN: 7812 field_shift += W_FT_VLAN; 7813 break; 7814 case F_TOS: 7815 field_shift += W_FT_TOS; 7816 break; 7817 case F_PROTOCOL: 7818 field_shift += W_FT_PROTOCOL; 7819 break; 7820 case F_ETHERTYPE: 7821 field_shift += W_FT_ETHERTYPE; 7822 break; 7823 case F_MACMATCH: 7824 field_shift += W_FT_MACMATCH; 7825 break; 7826 case F_MPSHITTYPE: 7827 field_shift += W_FT_MPSHITTYPE; 7828 break; 7829 case F_FRAGMENTATION: 7830 field_shift += W_FT_FRAGMENTATION; 7831 break; 7832 } 7833 } 7834 return field_shift; 7835 } 7836 7837 int t4_port_init(struct adapter *adap, int mbox, int pf, int vf, int port_id) 7838 { 7839 u8 addr[6]; 7840 int ret, i, j; 7841 struct fw_port_cmd c; 7842 u16 rss_size; 7843 struct port_info *p = adap2pinfo(adap, port_id); 7844 u32 param, val; 7845 7846 memset(&c, 0, sizeof(c)); 7847 7848 for (i = 0, j = -1; i <= p->port_id; i++) { 7849 do { 7850 j++; 7851 } while ((adap->params.portvec & (1 << j)) == 0); 7852 } 7853 7854 c.op_to_portid = htonl(V_FW_CMD_OP(FW_PORT_CMD) | 7855 F_FW_CMD_REQUEST | F_FW_CMD_READ | 7856 V_FW_PORT_CMD_PORTID(j)); 7857 c.action_to_len16 = htonl( 7858 V_FW_PORT_CMD_ACTION(FW_PORT_ACTION_GET_PORT_INFO) | 7859 FW_LEN16(c)); 7860 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c); 7861 if (ret) 7862 return ret; 7863 7864 ret = t4_alloc_vi(adap, mbox, j, pf, vf, 1, addr, &rss_size); 7865 if (ret < 0) 7866 return ret; 7867 7868 p->vi[0].viid = ret; 7869 p->tx_chan = j; 7870 p->rx_chan_map = t4_get_mps_bg_map(adap, j); 7871 p->lport = j; 7872 p->vi[0].rss_size = rss_size; 7873 t4_os_set_hw_addr(adap, p->port_id, addr); 7874 7875 ret = be32_to_cpu(c.u.info.lstatus_to_modtype); 7876 p->mdio_addr = (ret & F_FW_PORT_CMD_MDIOCAP) ? 7877 G_FW_PORT_CMD_MDIOADDR(ret) : -1; 7878 p->port_type = G_FW_PORT_CMD_PTYPE(ret); 7879 p->mod_type = G_FW_PORT_CMD_MODTYPE(ret); 7880 7881 init_link_config(&p->link_cfg, be16_to_cpu(c.u.info.pcap)); 7882 7883 param = V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | 7884 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_RSSINFO) | 7885 V_FW_PARAMS_PARAM_YZ(p->vi[0].viid); 7886 ret = t4_query_params(adap, mbox, pf, vf, 1, ¶m, &val); 7887 if (ret) 7888 p->vi[0].rss_base = 0xffff; 7889 else { 7890 /* MPASS((val >> 16) == rss_size); */ 7891 p->vi[0].rss_base = val & 0xffff; 7892 } 7893 7894 return 0; 7895 } 7896 7897 /** 7898 * t4_read_cimq_cfg - read CIM queue configuration 7899 * @adap: the adapter 7900 * @base: holds the queue base addresses in bytes 7901 * @size: holds the queue sizes in bytes 7902 * @thres: holds the queue full thresholds in bytes 7903 * 7904 * Returns the current configuration of the CIM queues, starting with 7905 * the IBQs, then the OBQs. 7906 */ 7907 void t4_read_cimq_cfg(struct adapter *adap, u16 *base, u16 *size, u16 *thres) 7908 { 7909 unsigned int i, v; 7910 int cim_num_obq = adap->chip_params->cim_num_obq; 7911 7912 for (i = 0; i < CIM_NUM_IBQ; i++) { 7913 t4_write_reg(adap, A_CIM_QUEUE_CONFIG_REF, F_IBQSELECT | 7914 V_QUENUMSELECT(i)); 7915 v = t4_read_reg(adap, A_CIM_QUEUE_CONFIG_CTRL); 7916 /* value is in 256-byte units */ 7917 *base++ = G_CIMQBASE(v) * 256; 7918 *size++ = G_CIMQSIZE(v) * 256; 7919 *thres++ = G_QUEFULLTHRSH(v) * 8; /* 8-byte unit */ 7920 } 7921 for (i = 0; i < cim_num_obq; i++) { 7922 t4_write_reg(adap, A_CIM_QUEUE_CONFIG_REF, F_OBQSELECT | 7923 V_QUENUMSELECT(i)); 7924 v = t4_read_reg(adap, A_CIM_QUEUE_CONFIG_CTRL); 7925 /* value is in 256-byte units */ 7926 *base++ = G_CIMQBASE(v) * 256; 7927 *size++ = G_CIMQSIZE(v) * 256; 7928 } 7929 } 7930 7931 /** 7932 * t4_read_cim_ibq - read the contents of a CIM inbound queue 7933 * @adap: the adapter 7934 * @qid: the queue index 7935 * @data: where to store the queue contents 7936 * @n: capacity of @data in 32-bit words 7937 * 7938 * Reads the contents of the selected CIM queue starting at address 0 up 7939 * to the capacity of @data. @n must be a multiple of 4. Returns < 0 on 7940 * error and the number of 32-bit words actually read on success. 7941 */ 7942 int t4_read_cim_ibq(struct adapter *adap, unsigned int qid, u32 *data, size_t n) 7943 { 7944 int i, err, attempts; 7945 unsigned int addr; 7946 const unsigned int nwords = CIM_IBQ_SIZE * 4; 7947 7948 if (qid > 5 || (n & 3)) 7949 return -EINVAL; 7950 7951 addr = qid * nwords; 7952 if (n > nwords) 7953 n = nwords; 7954 7955 /* It might take 3-10ms before the IBQ debug read access is allowed. 7956 * Wait for 1 Sec with a delay of 1 usec. 7957 */ 7958 attempts = 1000000; 7959 7960 for (i = 0; i < n; i++, addr++) { 7961 t4_write_reg(adap, A_CIM_IBQ_DBG_CFG, V_IBQDBGADDR(addr) | 7962 F_IBQDBGEN); 7963 err = t4_wait_op_done(adap, A_CIM_IBQ_DBG_CFG, F_IBQDBGBUSY, 0, 7964 attempts, 1); 7965 if (err) 7966 return err; 7967 *data++ = t4_read_reg(adap, A_CIM_IBQ_DBG_DATA); 7968 } 7969 t4_write_reg(adap, A_CIM_IBQ_DBG_CFG, 0); 7970 return i; 7971 } 7972 7973 /** 7974 * t4_read_cim_obq - read the contents of a CIM outbound queue 7975 * @adap: the adapter 7976 * @qid: the queue index 7977 * @data: where to store the queue contents 7978 * @n: capacity of @data in 32-bit words 7979 * 7980 * Reads the contents of the selected CIM queue starting at address 0 up 7981 * to the capacity of @data. @n must be a multiple of 4. Returns < 0 on 7982 * error and the number of 32-bit words actually read on success. 7983 */ 7984 int t4_read_cim_obq(struct adapter *adap, unsigned int qid, u32 *data, size_t n) 7985 { 7986 int i, err; 7987 unsigned int addr, v, nwords; 7988 int cim_num_obq = adap->chip_params->cim_num_obq; 7989 7990 if ((qid > (cim_num_obq - 1)) || (n & 3)) 7991 return -EINVAL; 7992 7993 t4_write_reg(adap, A_CIM_QUEUE_CONFIG_REF, F_OBQSELECT | 7994 V_QUENUMSELECT(qid)); 7995 v = t4_read_reg(adap, A_CIM_QUEUE_CONFIG_CTRL); 7996 7997 addr = G_CIMQBASE(v) * 64; /* muliple of 256 -> muliple of 4 */ 7998 nwords = G_CIMQSIZE(v) * 64; /* same */ 7999 if (n > nwords) 8000 n = nwords; 8001 8002 for (i = 0; i < n; i++, addr++) { 8003 t4_write_reg(adap, A_CIM_OBQ_DBG_CFG, V_OBQDBGADDR(addr) | 8004 F_OBQDBGEN); 8005 err = t4_wait_op_done(adap, A_CIM_OBQ_DBG_CFG, F_OBQDBGBUSY, 0, 8006 2, 1); 8007 if (err) 8008 return err; 8009 *data++ = t4_read_reg(adap, A_CIM_OBQ_DBG_DATA); 8010 } 8011 t4_write_reg(adap, A_CIM_OBQ_DBG_CFG, 0); 8012 return i; 8013 } 8014 8015 enum { 8016 CIM_QCTL_BASE = 0, 8017 CIM_CTL_BASE = 0x2000, 8018 CIM_PBT_ADDR_BASE = 0x2800, 8019 CIM_PBT_LRF_BASE = 0x3000, 8020 CIM_PBT_DATA_BASE = 0x3800 8021 }; 8022 8023 /** 8024 * t4_cim_read - read a block from CIM internal address space 8025 * @adap: the adapter 8026 * @addr: the start address within the CIM address space 8027 * @n: number of words to read 8028 * @valp: where to store the result 8029 * 8030 * Reads a block of 4-byte words from the CIM intenal address space. 8031 */ 8032 int t4_cim_read(struct adapter *adap, unsigned int addr, unsigned int n, 8033 unsigned int *valp) 8034 { 8035 int ret = 0; 8036 8037 if (t4_read_reg(adap, A_CIM_HOST_ACC_CTRL) & F_HOSTBUSY) 8038 return -EBUSY; 8039 8040 for ( ; !ret && n--; addr += 4) { 8041 t4_write_reg(adap, A_CIM_HOST_ACC_CTRL, addr); 8042 ret = t4_wait_op_done(adap, A_CIM_HOST_ACC_CTRL, F_HOSTBUSY, 8043 0, 5, 2); 8044 if (!ret) 8045 *valp++ = t4_read_reg(adap, A_CIM_HOST_ACC_DATA); 8046 } 8047 return ret; 8048 } 8049 8050 /** 8051 * t4_cim_write - write a block into CIM internal address space 8052 * @adap: the adapter 8053 * @addr: the start address within the CIM address space 8054 * @n: number of words to write 8055 * @valp: set of values to write 8056 * 8057 * Writes a block of 4-byte words into the CIM intenal address space. 8058 */ 8059 int t4_cim_write(struct adapter *adap, unsigned int addr, unsigned int n, 8060 const unsigned int *valp) 8061 { 8062 int ret = 0; 8063 8064 if (t4_read_reg(adap, A_CIM_HOST_ACC_CTRL) & F_HOSTBUSY) 8065 return -EBUSY; 8066 8067 for ( ; !ret && n--; addr += 4) { 8068 t4_write_reg(adap, A_CIM_HOST_ACC_DATA, *valp++); 8069 t4_write_reg(adap, A_CIM_HOST_ACC_CTRL, addr | F_HOSTWRITE); 8070 ret = t4_wait_op_done(adap, A_CIM_HOST_ACC_CTRL, F_HOSTBUSY, 8071 0, 5, 2); 8072 } 8073 return ret; 8074 } 8075 8076 static int t4_cim_write1(struct adapter *adap, unsigned int addr, 8077 unsigned int val) 8078 { 8079 return t4_cim_write(adap, addr, 1, &val); 8080 } 8081 8082 /** 8083 * t4_cim_ctl_read - read a block from CIM control region 8084 * @adap: the adapter 8085 * @addr: the start address within the CIM control region 8086 * @n: number of words to read 8087 * @valp: where to store the result 8088 * 8089 * Reads a block of 4-byte words from the CIM control region. 8090 */ 8091 int t4_cim_ctl_read(struct adapter *adap, unsigned int addr, unsigned int n, 8092 unsigned int *valp) 8093 { 8094 return t4_cim_read(adap, addr + CIM_CTL_BASE, n, valp); 8095 } 8096 8097 /** 8098 * t4_cim_read_la - read CIM LA capture buffer 8099 * @adap: the adapter 8100 * @la_buf: where to store the LA data 8101 * @wrptr: the HW write pointer within the capture buffer 8102 * 8103 * Reads the contents of the CIM LA buffer with the most recent entry at 8104 * the end of the returned data and with the entry at @wrptr first. 8105 * We try to leave the LA in the running state we find it in. 8106 */ 8107 int t4_cim_read_la(struct adapter *adap, u32 *la_buf, unsigned int *wrptr) 8108 { 8109 int i, ret; 8110 unsigned int cfg, val, idx; 8111 8112 ret = t4_cim_read(adap, A_UP_UP_DBG_LA_CFG, 1, &cfg); 8113 if (ret) 8114 return ret; 8115 8116 if (cfg & F_UPDBGLAEN) { /* LA is running, freeze it */ 8117 ret = t4_cim_write1(adap, A_UP_UP_DBG_LA_CFG, 0); 8118 if (ret) 8119 return ret; 8120 } 8121 8122 ret = t4_cim_read(adap, A_UP_UP_DBG_LA_CFG, 1, &val); 8123 if (ret) 8124 goto restart; 8125 8126 idx = G_UPDBGLAWRPTR(val); 8127 if (wrptr) 8128 *wrptr = idx; 8129 8130 for (i = 0; i < adap->params.cim_la_size; i++) { 8131 ret = t4_cim_write1(adap, A_UP_UP_DBG_LA_CFG, 8132 V_UPDBGLARDPTR(idx) | F_UPDBGLARDEN); 8133 if (ret) 8134 break; 8135 ret = t4_cim_read(adap, A_UP_UP_DBG_LA_CFG, 1, &val); 8136 if (ret) 8137 break; 8138 if (val & F_UPDBGLARDEN) { 8139 ret = -ETIMEDOUT; 8140 break; 8141 } 8142 ret = t4_cim_read(adap, A_UP_UP_DBG_LA_DATA, 1, &la_buf[i]); 8143 if (ret) 8144 break; 8145 8146 /* address can't exceed 0xfff (UpDbgLaRdPtr is of 12-bits) */ 8147 idx = (idx + 1) & M_UPDBGLARDPTR; 8148 /* 8149 * Bits 0-3 of UpDbgLaRdPtr can be between 0000 to 1001 to 8150 * identify the 32-bit portion of the full 312-bit data 8151 */ 8152 if (is_t6(adap)) 8153 while ((idx & 0xf) > 9) 8154 idx = (idx + 1) % M_UPDBGLARDPTR; 8155 } 8156 restart: 8157 if (cfg & F_UPDBGLAEN) { 8158 int r = t4_cim_write1(adap, A_UP_UP_DBG_LA_CFG, 8159 cfg & ~F_UPDBGLARDEN); 8160 if (!ret) 8161 ret = r; 8162 } 8163 return ret; 8164 } 8165 8166 /** 8167 * t4_tp_read_la - read TP LA capture buffer 8168 * @adap: the adapter 8169 * @la_buf: where to store the LA data 8170 * @wrptr: the HW write pointer within the capture buffer 8171 * 8172 * Reads the contents of the TP LA buffer with the most recent entry at 8173 * the end of the returned data and with the entry at @wrptr first. 8174 * We leave the LA in the running state we find it in. 8175 */ 8176 void t4_tp_read_la(struct adapter *adap, u64 *la_buf, unsigned int *wrptr) 8177 { 8178 bool last_incomplete; 8179 unsigned int i, cfg, val, idx; 8180 8181 cfg = t4_read_reg(adap, A_TP_DBG_LA_CONFIG) & 0xffff; 8182 if (cfg & F_DBGLAENABLE) /* freeze LA */ 8183 t4_write_reg(adap, A_TP_DBG_LA_CONFIG, 8184 adap->params.tp.la_mask | (cfg ^ F_DBGLAENABLE)); 8185 8186 val = t4_read_reg(adap, A_TP_DBG_LA_CONFIG); 8187 idx = G_DBGLAWPTR(val); 8188 last_incomplete = G_DBGLAMODE(val) >= 2 && (val & F_DBGLAWHLF) == 0; 8189 if (last_incomplete) 8190 idx = (idx + 1) & M_DBGLARPTR; 8191 if (wrptr) 8192 *wrptr = idx; 8193 8194 val &= 0xffff; 8195 val &= ~V_DBGLARPTR(M_DBGLARPTR); 8196 val |= adap->params.tp.la_mask; 8197 8198 for (i = 0; i < TPLA_SIZE; i++) { 8199 t4_write_reg(adap, A_TP_DBG_LA_CONFIG, V_DBGLARPTR(idx) | val); 8200 la_buf[i] = t4_read_reg64(adap, A_TP_DBG_LA_DATAL); 8201 idx = (idx + 1) & M_DBGLARPTR; 8202 } 8203 8204 /* Wipe out last entry if it isn't valid */ 8205 if (last_incomplete) 8206 la_buf[TPLA_SIZE - 1] = ~0ULL; 8207 8208 if (cfg & F_DBGLAENABLE) /* restore running state */ 8209 t4_write_reg(adap, A_TP_DBG_LA_CONFIG, 8210 cfg | adap->params.tp.la_mask); 8211 } 8212 8213 /* 8214 * SGE Hung Ingress DMA Warning Threshold time and Warning Repeat Rate (in 8215 * seconds). If we find one of the SGE Ingress DMA State Machines in the same 8216 * state for more than the Warning Threshold then we'll issue a warning about 8217 * a potential hang. We'll repeat the warning as the SGE Ingress DMA Channel 8218 * appears to be hung every Warning Repeat second till the situation clears. 8219 * If the situation clears, we'll note that as well. 8220 */ 8221 #define SGE_IDMA_WARN_THRESH 1 8222 #define SGE_IDMA_WARN_REPEAT 300 8223 8224 /** 8225 * t4_idma_monitor_init - initialize SGE Ingress DMA Monitor 8226 * @adapter: the adapter 8227 * @idma: the adapter IDMA Monitor state 8228 * 8229 * Initialize the state of an SGE Ingress DMA Monitor. 8230 */ 8231 void t4_idma_monitor_init(struct adapter *adapter, 8232 struct sge_idma_monitor_state *idma) 8233 { 8234 /* Initialize the state variables for detecting an SGE Ingress DMA 8235 * hang. The SGE has internal counters which count up on each clock 8236 * tick whenever the SGE finds its Ingress DMA State Engines in the 8237 * same state they were on the previous clock tick. The clock used is 8238 * the Core Clock so we have a limit on the maximum "time" they can 8239 * record; typically a very small number of seconds. For instance, 8240 * with a 600MHz Core Clock, we can only count up to a bit more than 8241 * 7s. So we'll synthesize a larger counter in order to not run the 8242 * risk of having the "timers" overflow and give us the flexibility to 8243 * maintain a Hung SGE State Machine of our own which operates across 8244 * a longer time frame. 8245 */ 8246 idma->idma_1s_thresh = core_ticks_per_usec(adapter) * 1000000; /* 1s */ 8247 idma->idma_stalled[0] = idma->idma_stalled[1] = 0; 8248 } 8249 8250 /** 8251 * t4_idma_monitor - monitor SGE Ingress DMA state 8252 * @adapter: the adapter 8253 * @idma: the adapter IDMA Monitor state 8254 * @hz: number of ticks/second 8255 * @ticks: number of ticks since the last IDMA Monitor call 8256 */ 8257 void t4_idma_monitor(struct adapter *adapter, 8258 struct sge_idma_monitor_state *idma, 8259 int hz, int ticks) 8260 { 8261 int i, idma_same_state_cnt[2]; 8262 8263 /* Read the SGE Debug Ingress DMA Same State Count registers. These 8264 * are counters inside the SGE which count up on each clock when the 8265 * SGE finds its Ingress DMA State Engines in the same states they 8266 * were in the previous clock. The counters will peg out at 8267 * 0xffffffff without wrapping around so once they pass the 1s 8268 * threshold they'll stay above that till the IDMA state changes. 8269 */ 8270 t4_write_reg(adapter, A_SGE_DEBUG_INDEX, 13); 8271 idma_same_state_cnt[0] = t4_read_reg(adapter, A_SGE_DEBUG_DATA_HIGH); 8272 idma_same_state_cnt[1] = t4_read_reg(adapter, A_SGE_DEBUG_DATA_LOW); 8273 8274 for (i = 0; i < 2; i++) { 8275 u32 debug0, debug11; 8276 8277 /* If the Ingress DMA Same State Counter ("timer") is less 8278 * than 1s, then we can reset our synthesized Stall Timer and 8279 * continue. If we have previously emitted warnings about a 8280 * potential stalled Ingress Queue, issue a note indicating 8281 * that the Ingress Queue has resumed forward progress. 8282 */ 8283 if (idma_same_state_cnt[i] < idma->idma_1s_thresh) { 8284 if (idma->idma_stalled[i] >= SGE_IDMA_WARN_THRESH*hz) 8285 CH_WARN(adapter, "SGE idma%d, queue %u, " 8286 "resumed after %d seconds\n", 8287 i, idma->idma_qid[i], 8288 idma->idma_stalled[i]/hz); 8289 idma->idma_stalled[i] = 0; 8290 continue; 8291 } 8292 8293 /* Synthesize an SGE Ingress DMA Same State Timer in the Hz 8294 * domain. The first time we get here it'll be because we 8295 * passed the 1s Threshold; each additional time it'll be 8296 * because the RX Timer Callback is being fired on its regular 8297 * schedule. 8298 * 8299 * If the stall is below our Potential Hung Ingress Queue 8300 * Warning Threshold, continue. 8301 */ 8302 if (idma->idma_stalled[i] == 0) { 8303 idma->idma_stalled[i] = hz; 8304 idma->idma_warn[i] = 0; 8305 } else { 8306 idma->idma_stalled[i] += ticks; 8307 idma->idma_warn[i] -= ticks; 8308 } 8309 8310 if (idma->idma_stalled[i] < SGE_IDMA_WARN_THRESH*hz) 8311 continue; 8312 8313 /* We'll issue a warning every SGE_IDMA_WARN_REPEAT seconds. 8314 */ 8315 if (idma->idma_warn[i] > 0) 8316 continue; 8317 idma->idma_warn[i] = SGE_IDMA_WARN_REPEAT*hz; 8318 8319 /* Read and save the SGE IDMA State and Queue ID information. 8320 * We do this every time in case it changes across time ... 8321 * can't be too careful ... 8322 */ 8323 t4_write_reg(adapter, A_SGE_DEBUG_INDEX, 0); 8324 debug0 = t4_read_reg(adapter, A_SGE_DEBUG_DATA_LOW); 8325 idma->idma_state[i] = (debug0 >> (i * 9)) & 0x3f; 8326 8327 t4_write_reg(adapter, A_SGE_DEBUG_INDEX, 11); 8328 debug11 = t4_read_reg(adapter, A_SGE_DEBUG_DATA_LOW); 8329 idma->idma_qid[i] = (debug11 >> (i * 16)) & 0xffff; 8330 8331 CH_WARN(adapter, "SGE idma%u, queue %u, potentially stuck in " 8332 " state %u for %d seconds (debug0=%#x, debug11=%#x)\n", 8333 i, idma->idma_qid[i], idma->idma_state[i], 8334 idma->idma_stalled[i]/hz, 8335 debug0, debug11); 8336 t4_sge_decode_idma_state(adapter, idma->idma_state[i]); 8337 } 8338 } 8339 8340 /** 8341 * t4_read_pace_tbl - read the pace table 8342 * @adap: the adapter 8343 * @pace_vals: holds the returned values 8344 * 8345 * Returns the values of TP's pace table in microseconds. 8346 */ 8347 void t4_read_pace_tbl(struct adapter *adap, unsigned int pace_vals[NTX_SCHED]) 8348 { 8349 unsigned int i, v; 8350 8351 for (i = 0; i < NTX_SCHED; i++) { 8352 t4_write_reg(adap, A_TP_PACE_TABLE, 0xffff0000 + i); 8353 v = t4_read_reg(adap, A_TP_PACE_TABLE); 8354 pace_vals[i] = dack_ticks_to_usec(adap, v); 8355 } 8356 } 8357 8358 /** 8359 * t4_get_tx_sched - get the configuration of a Tx HW traffic scheduler 8360 * @adap: the adapter 8361 * @sched: the scheduler index 8362 * @kbps: the byte rate in Kbps 8363 * @ipg: the interpacket delay in tenths of nanoseconds 8364 * 8365 * Return the current configuration of a HW Tx scheduler. 8366 */ 8367 void t4_get_tx_sched(struct adapter *adap, unsigned int sched, unsigned int *kbps, 8368 unsigned int *ipg) 8369 { 8370 unsigned int v, addr, bpt, cpt; 8371 8372 if (kbps) { 8373 addr = A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2; 8374 t4_write_reg(adap, A_TP_TM_PIO_ADDR, addr); 8375 v = t4_read_reg(adap, A_TP_TM_PIO_DATA); 8376 if (sched & 1) 8377 v >>= 16; 8378 bpt = (v >> 8) & 0xff; 8379 cpt = v & 0xff; 8380 if (!cpt) 8381 *kbps = 0; /* scheduler disabled */ 8382 else { 8383 v = (adap->params.vpd.cclk * 1000) / cpt; /* ticks/s */ 8384 *kbps = (v * bpt) / 125; 8385 } 8386 } 8387 if (ipg) { 8388 addr = A_TP_TX_MOD_Q1_Q0_TIMER_SEPARATOR - sched / 2; 8389 t4_write_reg(adap, A_TP_TM_PIO_ADDR, addr); 8390 v = t4_read_reg(adap, A_TP_TM_PIO_DATA); 8391 if (sched & 1) 8392 v >>= 16; 8393 v &= 0xffff; 8394 *ipg = (10000 * v) / core_ticks_per_usec(adap); 8395 } 8396 } 8397 8398 /** 8399 * t4_load_cfg - download config file 8400 * @adap: the adapter 8401 * @cfg_data: the cfg text file to write 8402 * @size: text file size 8403 * 8404 * Write the supplied config text file to the card's serial flash. 8405 */ 8406 int t4_load_cfg(struct adapter *adap, const u8 *cfg_data, unsigned int size) 8407 { 8408 int ret, i, n, cfg_addr; 8409 unsigned int addr; 8410 unsigned int flash_cfg_start_sec; 8411 unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec; 8412 8413 cfg_addr = t4_flash_cfg_addr(adap); 8414 if (cfg_addr < 0) 8415 return cfg_addr; 8416 8417 addr = cfg_addr; 8418 flash_cfg_start_sec = addr / SF_SEC_SIZE; 8419 8420 if (size > FLASH_CFG_MAX_SIZE) { 8421 CH_ERR(adap, "cfg file too large, max is %u bytes\n", 8422 FLASH_CFG_MAX_SIZE); 8423 return -EFBIG; 8424 } 8425 8426 i = DIV_ROUND_UP(FLASH_CFG_MAX_SIZE, /* # of sectors spanned */ 8427 sf_sec_size); 8428 ret = t4_flash_erase_sectors(adap, flash_cfg_start_sec, 8429 flash_cfg_start_sec + i - 1); 8430 /* 8431 * If size == 0 then we're simply erasing the FLASH sectors associated 8432 * with the on-adapter Firmware Configuration File. 8433 */ 8434 if (ret || size == 0) 8435 goto out; 8436 8437 /* this will write to the flash up to SF_PAGE_SIZE at a time */ 8438 for (i = 0; i< size; i+= SF_PAGE_SIZE) { 8439 if ( (size - i) < SF_PAGE_SIZE) 8440 n = size - i; 8441 else 8442 n = SF_PAGE_SIZE; 8443 ret = t4_write_flash(adap, addr, n, cfg_data, 1); 8444 if (ret) 8445 goto out; 8446 8447 addr += SF_PAGE_SIZE; 8448 cfg_data += SF_PAGE_SIZE; 8449 } 8450 8451 out: 8452 if (ret) 8453 CH_ERR(adap, "config file %s failed %d\n", 8454 (size == 0 ? "clear" : "download"), ret); 8455 return ret; 8456 } 8457 8458 /** 8459 * t5_fw_init_extern_mem - initialize the external memory 8460 * @adap: the adapter 8461 * 8462 * Initializes the external memory on T5. 8463 */ 8464 int t5_fw_init_extern_mem(struct adapter *adap) 8465 { 8466 u32 params[1], val[1]; 8467 int ret; 8468 8469 if (!is_t5(adap)) 8470 return 0; 8471 8472 val[0] = 0xff; /* Initialize all MCs */ 8473 params[0] = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | 8474 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_MCINIT)); 8475 ret = t4_set_params_timeout(adap, adap->mbox, adap->pf, 0, 1, params, val, 8476 FW_CMD_MAX_TIMEOUT); 8477 8478 return ret; 8479 } 8480 8481 /* BIOS boot headers */ 8482 typedef struct pci_expansion_rom_header { 8483 u8 signature[2]; /* ROM Signature. Should be 0xaa55 */ 8484 u8 reserved[22]; /* Reserved per processor Architecture data */ 8485 u8 pcir_offset[2]; /* Offset to PCI Data Structure */ 8486 } pci_exp_rom_header_t; /* PCI_EXPANSION_ROM_HEADER */ 8487 8488 /* Legacy PCI Expansion ROM Header */ 8489 typedef struct legacy_pci_expansion_rom_header { 8490 u8 signature[2]; /* ROM Signature. Should be 0xaa55 */ 8491 u8 size512; /* Current Image Size in units of 512 bytes */ 8492 u8 initentry_point[4]; 8493 u8 cksum; /* Checksum computed on the entire Image */ 8494 u8 reserved[16]; /* Reserved */ 8495 u8 pcir_offset[2]; /* Offset to PCI Data Struture */ 8496 } legacy_pci_exp_rom_header_t; /* LEGACY_PCI_EXPANSION_ROM_HEADER */ 8497 8498 /* EFI PCI Expansion ROM Header */ 8499 typedef struct efi_pci_expansion_rom_header { 8500 u8 signature[2]; // ROM signature. The value 0xaa55 8501 u8 initialization_size[2]; /* Units 512. Includes this header */ 8502 u8 efi_signature[4]; /* Signature from EFI image header. 0x0EF1 */ 8503 u8 efi_subsystem[2]; /* Subsystem value for EFI image header */ 8504 u8 efi_machine_type[2]; /* Machine type from EFI image header */ 8505 u8 compression_type[2]; /* Compression type. */ 8506 /* 8507 * Compression type definition 8508 * 0x0: uncompressed 8509 * 0x1: Compressed 8510 * 0x2-0xFFFF: Reserved 8511 */ 8512 u8 reserved[8]; /* Reserved */ 8513 u8 efi_image_header_offset[2]; /* Offset to EFI Image */ 8514 u8 pcir_offset[2]; /* Offset to PCI Data Structure */ 8515 } efi_pci_exp_rom_header_t; /* EFI PCI Expansion ROM Header */ 8516 8517 /* PCI Data Structure Format */ 8518 typedef struct pcir_data_structure { /* PCI Data Structure */ 8519 u8 signature[4]; /* Signature. The string "PCIR" */ 8520 u8 vendor_id[2]; /* Vendor Identification */ 8521 u8 device_id[2]; /* Device Identification */ 8522 u8 vital_product[2]; /* Pointer to Vital Product Data */ 8523 u8 length[2]; /* PCIR Data Structure Length */ 8524 u8 revision; /* PCIR Data Structure Revision */ 8525 u8 class_code[3]; /* Class Code */ 8526 u8 image_length[2]; /* Image Length. Multiple of 512B */ 8527 u8 code_revision[2]; /* Revision Level of Code/Data */ 8528 u8 code_type; /* Code Type. */ 8529 /* 8530 * PCI Expansion ROM Code Types 8531 * 0x00: Intel IA-32, PC-AT compatible. Legacy 8532 * 0x01: Open Firmware standard for PCI. FCODE 8533 * 0x02: Hewlett-Packard PA RISC. HP reserved 8534 * 0x03: EFI Image. EFI 8535 * 0x04-0xFF: Reserved. 8536 */ 8537 u8 indicator; /* Indicator. Identifies the last image in the ROM */ 8538 u8 reserved[2]; /* Reserved */ 8539 } pcir_data_t; /* PCI__DATA_STRUCTURE */ 8540 8541 /* BOOT constants */ 8542 enum { 8543 BOOT_FLASH_BOOT_ADDR = 0x0,/* start address of boot image in flash */ 8544 BOOT_SIGNATURE = 0xaa55, /* signature of BIOS boot ROM */ 8545 BOOT_SIZE_INC = 512, /* image size measured in 512B chunks */ 8546 BOOT_MIN_SIZE = sizeof(pci_exp_rom_header_t), /* basic header */ 8547 BOOT_MAX_SIZE = 1024*BOOT_SIZE_INC, /* 1 byte * length increment */ 8548 VENDOR_ID = 0x1425, /* Vendor ID */ 8549 PCIR_SIGNATURE = 0x52494350 /* PCIR signature */ 8550 }; 8551 8552 /* 8553 * modify_device_id - Modifies the device ID of the Boot BIOS image 8554 * @adatper: the device ID to write. 8555 * @boot_data: the boot image to modify. 8556 * 8557 * Write the supplied device ID to the boot BIOS image. 8558 */ 8559 static void modify_device_id(int device_id, u8 *boot_data) 8560 { 8561 legacy_pci_exp_rom_header_t *header; 8562 pcir_data_t *pcir_header; 8563 u32 cur_header = 0; 8564 8565 /* 8566 * Loop through all chained images and change the device ID's 8567 */ 8568 while (1) { 8569 header = (legacy_pci_exp_rom_header_t *) &boot_data[cur_header]; 8570 pcir_header = (pcir_data_t *) &boot_data[cur_header + 8571 le16_to_cpu(*(u16*)header->pcir_offset)]; 8572 8573 /* 8574 * Only modify the Device ID if code type is Legacy or HP. 8575 * 0x00: Okay to modify 8576 * 0x01: FCODE. Do not be modify 8577 * 0x03: Okay to modify 8578 * 0x04-0xFF: Do not modify 8579 */ 8580 if (pcir_header->code_type == 0x00) { 8581 u8 csum = 0; 8582 int i; 8583 8584 /* 8585 * Modify Device ID to match current adatper 8586 */ 8587 *(u16*) pcir_header->device_id = device_id; 8588 8589 /* 8590 * Set checksum temporarily to 0. 8591 * We will recalculate it later. 8592 */ 8593 header->cksum = 0x0; 8594 8595 /* 8596 * Calculate and update checksum 8597 */ 8598 for (i = 0; i < (header->size512 * 512); i++) 8599 csum += (u8)boot_data[cur_header + i]; 8600 8601 /* 8602 * Invert summed value to create the checksum 8603 * Writing new checksum value directly to the boot data 8604 */ 8605 boot_data[cur_header + 7] = -csum; 8606 8607 } else if (pcir_header->code_type == 0x03) { 8608 8609 /* 8610 * Modify Device ID to match current adatper 8611 */ 8612 *(u16*) pcir_header->device_id = device_id; 8613 8614 } 8615 8616 8617 /* 8618 * Check indicator element to identify if this is the last 8619 * image in the ROM. 8620 */ 8621 if (pcir_header->indicator & 0x80) 8622 break; 8623 8624 /* 8625 * Move header pointer up to the next image in the ROM. 8626 */ 8627 cur_header += header->size512 * 512; 8628 } 8629 } 8630 8631 /* 8632 * t4_load_boot - download boot flash 8633 * @adapter: the adapter 8634 * @boot_data: the boot image to write 8635 * @boot_addr: offset in flash to write boot_data 8636 * @size: image size 8637 * 8638 * Write the supplied boot image to the card's serial flash. 8639 * The boot image has the following sections: a 28-byte header and the 8640 * boot image. 8641 */ 8642 int t4_load_boot(struct adapter *adap, u8 *boot_data, 8643 unsigned int boot_addr, unsigned int size) 8644 { 8645 pci_exp_rom_header_t *header; 8646 int pcir_offset ; 8647 pcir_data_t *pcir_header; 8648 int ret, addr; 8649 uint16_t device_id; 8650 unsigned int i; 8651 unsigned int boot_sector = (boot_addr * 1024 ); 8652 unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec; 8653 8654 /* 8655 * Make sure the boot image does not encroach on the firmware region 8656 */ 8657 if ((boot_sector + size) >> 16 > FLASH_FW_START_SEC) { 8658 CH_ERR(adap, "boot image encroaching on firmware region\n"); 8659 return -EFBIG; 8660 } 8661 8662 /* 8663 * The boot sector is comprised of the Expansion-ROM boot, iSCSI boot, 8664 * and Boot configuration data sections. These 3 boot sections span 8665 * sectors 0 to 7 in flash and live right before the FW image location. 8666 */ 8667 i = DIV_ROUND_UP(size ? size : FLASH_FW_START, 8668 sf_sec_size); 8669 ret = t4_flash_erase_sectors(adap, boot_sector >> 16, 8670 (boot_sector >> 16) + i - 1); 8671 8672 /* 8673 * If size == 0 then we're simply erasing the FLASH sectors associated 8674 * with the on-adapter option ROM file 8675 */ 8676 if (ret || (size == 0)) 8677 goto out; 8678 8679 /* Get boot header */ 8680 header = (pci_exp_rom_header_t *)boot_data; 8681 pcir_offset = le16_to_cpu(*(u16 *)header->pcir_offset); 8682 /* PCIR Data Structure */ 8683 pcir_header = (pcir_data_t *) &boot_data[pcir_offset]; 8684 8685 /* 8686 * Perform some primitive sanity testing to avoid accidentally 8687 * writing garbage over the boot sectors. We ought to check for 8688 * more but it's not worth it for now ... 8689 */ 8690 if (size < BOOT_MIN_SIZE || size > BOOT_MAX_SIZE) { 8691 CH_ERR(adap, "boot image too small/large\n"); 8692 return -EFBIG; 8693 } 8694 8695 #ifndef CHELSIO_T4_DIAGS 8696 /* 8697 * Check BOOT ROM header signature 8698 */ 8699 if (le16_to_cpu(*(u16*)header->signature) != BOOT_SIGNATURE ) { 8700 CH_ERR(adap, "Boot image missing signature\n"); 8701 return -EINVAL; 8702 } 8703 8704 /* 8705 * Check PCI header signature 8706 */ 8707 if (le32_to_cpu(*(u32*)pcir_header->signature) != PCIR_SIGNATURE) { 8708 CH_ERR(adap, "PCI header missing signature\n"); 8709 return -EINVAL; 8710 } 8711 8712 /* 8713 * Check Vendor ID matches Chelsio ID 8714 */ 8715 if (le16_to_cpu(*(u16*)pcir_header->vendor_id) != VENDOR_ID) { 8716 CH_ERR(adap, "Vendor ID missing signature\n"); 8717 return -EINVAL; 8718 } 8719 #endif 8720 8721 /* 8722 * Retrieve adapter's device ID 8723 */ 8724 t4_os_pci_read_cfg2(adap, PCI_DEVICE_ID, &device_id); 8725 /* Want to deal with PF 0 so I strip off PF 4 indicator */ 8726 device_id = device_id & 0xf0ff; 8727 8728 /* 8729 * Check PCIE Device ID 8730 */ 8731 if (le16_to_cpu(*(u16*)pcir_header->device_id) != device_id) { 8732 /* 8733 * Change the device ID in the Boot BIOS image to match 8734 * the Device ID of the current adapter. 8735 */ 8736 modify_device_id(device_id, boot_data); 8737 } 8738 8739 /* 8740 * Skip over the first SF_PAGE_SIZE worth of data and write it after 8741 * we finish copying the rest of the boot image. This will ensure 8742 * that the BIOS boot header will only be written if the boot image 8743 * was written in full. 8744 */ 8745 addr = boot_sector; 8746 for (size -= SF_PAGE_SIZE; size; size -= SF_PAGE_SIZE) { 8747 addr += SF_PAGE_SIZE; 8748 boot_data += SF_PAGE_SIZE; 8749 ret = t4_write_flash(adap, addr, SF_PAGE_SIZE, boot_data, 0); 8750 if (ret) 8751 goto out; 8752 } 8753 8754 ret = t4_write_flash(adap, boot_sector, SF_PAGE_SIZE, 8755 (const u8 *)header, 0); 8756 8757 out: 8758 if (ret) 8759 CH_ERR(adap, "boot image download failed, error %d\n", ret); 8760 return ret; 8761 } 8762 8763 /* 8764 * t4_flash_bootcfg_addr - return the address of the flash optionrom configuration 8765 * @adapter: the adapter 8766 * 8767 * Return the address within the flash where the OptionROM Configuration 8768 * is stored, or an error if the device FLASH is too small to contain 8769 * a OptionROM Configuration. 8770 */ 8771 static int t4_flash_bootcfg_addr(struct adapter *adapter) 8772 { 8773 /* 8774 * If the device FLASH isn't large enough to hold a Firmware 8775 * Configuration File, return an error. 8776 */ 8777 if (adapter->params.sf_size < FLASH_BOOTCFG_START + FLASH_BOOTCFG_MAX_SIZE) 8778 return -ENOSPC; 8779 8780 return FLASH_BOOTCFG_START; 8781 } 8782 8783 int t4_load_bootcfg(struct adapter *adap,const u8 *cfg_data, unsigned int size) 8784 { 8785 int ret, i, n, cfg_addr; 8786 unsigned int addr; 8787 unsigned int flash_cfg_start_sec; 8788 unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec; 8789 8790 cfg_addr = t4_flash_bootcfg_addr(adap); 8791 if (cfg_addr < 0) 8792 return cfg_addr; 8793 8794 addr = cfg_addr; 8795 flash_cfg_start_sec = addr / SF_SEC_SIZE; 8796 8797 if (size > FLASH_BOOTCFG_MAX_SIZE) { 8798 CH_ERR(adap, "bootcfg file too large, max is %u bytes\n", 8799 FLASH_BOOTCFG_MAX_SIZE); 8800 return -EFBIG; 8801 } 8802 8803 i = DIV_ROUND_UP(FLASH_BOOTCFG_MAX_SIZE,/* # of sectors spanned */ 8804 sf_sec_size); 8805 ret = t4_flash_erase_sectors(adap, flash_cfg_start_sec, 8806 flash_cfg_start_sec + i - 1); 8807 8808 /* 8809 * If size == 0 then we're simply erasing the FLASH sectors associated 8810 * with the on-adapter OptionROM Configuration File. 8811 */ 8812 if (ret || size == 0) 8813 goto out; 8814 8815 /* this will write to the flash up to SF_PAGE_SIZE at a time */ 8816 for (i = 0; i< size; i+= SF_PAGE_SIZE) { 8817 if ( (size - i) < SF_PAGE_SIZE) 8818 n = size - i; 8819 else 8820 n = SF_PAGE_SIZE; 8821 ret = t4_write_flash(adap, addr, n, cfg_data, 0); 8822 if (ret) 8823 goto out; 8824 8825 addr += SF_PAGE_SIZE; 8826 cfg_data += SF_PAGE_SIZE; 8827 } 8828 8829 out: 8830 if (ret) 8831 CH_ERR(adap, "boot config data %s failed %d\n", 8832 (size == 0 ? "clear" : "download"), ret); 8833 return ret; 8834 } 8835 8836 /** 8837 * t4_set_filter_mode - configure the optional components of filter tuples 8838 * @adap: the adapter 8839 * @mode_map: a bitmap selcting which optional filter components to enable 8840 * 8841 * Sets the filter mode by selecting the optional components to enable 8842 * in filter tuples. Returns 0 on success and a negative error if the 8843 * requested mode needs more bits than are available for optional 8844 * components. 8845 */ 8846 int t4_set_filter_mode(struct adapter *adap, unsigned int mode_map) 8847 { 8848 static u8 width[] = { 1, 3, 17, 17, 8, 8, 16, 9, 3, 1 }; 8849 8850 int i, nbits = 0; 8851 8852 for (i = S_FCOE; i <= S_FRAGMENTATION; i++) 8853 if (mode_map & (1 << i)) 8854 nbits += width[i]; 8855 if (nbits > FILTER_OPT_LEN) 8856 return -EINVAL; 8857 if (t4_use_ldst(adap)) 8858 t4_fw_tp_pio_rw(adap, &mode_map, 1, A_TP_VLAN_PRI_MAP, 0); 8859 else 8860 t4_write_indirect(adap, A_TP_PIO_ADDR, A_TP_PIO_DATA, &mode_map, 8861 1, A_TP_VLAN_PRI_MAP); 8862 read_filter_mode_and_ingress_config(adap); 8863 8864 return 0; 8865 } 8866 8867 /** 8868 * t4_clr_port_stats - clear port statistics 8869 * @adap: the adapter 8870 * @idx: the port index 8871 * 8872 * Clear HW statistics for the given port. 8873 */ 8874 void t4_clr_port_stats(struct adapter *adap, int idx) 8875 { 8876 unsigned int i; 8877 u32 bgmap = t4_get_mps_bg_map(adap, idx); 8878 u32 port_base_addr; 8879 8880 if (is_t4(adap)) 8881 port_base_addr = PORT_BASE(idx); 8882 else 8883 port_base_addr = T5_PORT_BASE(idx); 8884 8885 for (i = A_MPS_PORT_STAT_TX_PORT_BYTES_L; 8886 i <= A_MPS_PORT_STAT_TX_PORT_PPP7_H; i += 8) 8887 t4_write_reg(adap, port_base_addr + i, 0); 8888 for (i = A_MPS_PORT_STAT_RX_PORT_BYTES_L; 8889 i <= A_MPS_PORT_STAT_RX_PORT_LESS_64B_H; i += 8) 8890 t4_write_reg(adap, port_base_addr + i, 0); 8891 for (i = 0; i < 4; i++) 8892 if (bgmap & (1 << i)) { 8893 t4_write_reg(adap, 8894 A_MPS_STAT_RX_BG_0_MAC_DROP_FRAME_L + i * 8, 0); 8895 t4_write_reg(adap, 8896 A_MPS_STAT_RX_BG_0_MAC_TRUNC_FRAME_L + i * 8, 0); 8897 } 8898 } 8899 8900 /** 8901 * t4_i2c_rd - read I2C data from adapter 8902 * @adap: the adapter 8903 * @port: Port number if per-port device; <0 if not 8904 * @devid: per-port device ID or absolute device ID 8905 * @offset: byte offset into device I2C space 8906 * @len: byte length of I2C space data 8907 * @buf: buffer in which to return I2C data 8908 * 8909 * Reads the I2C data from the indicated device and location. 8910 */ 8911 int t4_i2c_rd(struct adapter *adap, unsigned int mbox, 8912 int port, unsigned int devid, 8913 unsigned int offset, unsigned int len, 8914 u8 *buf) 8915 { 8916 u32 ldst_addrspace; 8917 struct fw_ldst_cmd ldst; 8918 int ret; 8919 8920 if (port >= 4 || 8921 devid >= 256 || 8922 offset >= 256 || 8923 len > sizeof ldst.u.i2c.data) 8924 return -EINVAL; 8925 8926 memset(&ldst, 0, sizeof ldst); 8927 ldst_addrspace = V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_I2C); 8928 ldst.op_to_addrspace = 8929 cpu_to_be32(V_FW_CMD_OP(FW_LDST_CMD) | 8930 F_FW_CMD_REQUEST | 8931 F_FW_CMD_READ | 8932 ldst_addrspace); 8933 ldst.cycles_to_len16 = cpu_to_be32(FW_LEN16(ldst)); 8934 ldst.u.i2c.pid = (port < 0 ? 0xff : port); 8935 ldst.u.i2c.did = devid; 8936 ldst.u.i2c.boffset = offset; 8937 ldst.u.i2c.blen = len; 8938 ret = t4_wr_mbox(adap, mbox, &ldst, sizeof ldst, &ldst); 8939 if (!ret) 8940 memcpy(buf, ldst.u.i2c.data, len); 8941 return ret; 8942 } 8943 8944 /** 8945 * t4_i2c_wr - write I2C data to adapter 8946 * @adap: the adapter 8947 * @port: Port number if per-port device; <0 if not 8948 * @devid: per-port device ID or absolute device ID 8949 * @offset: byte offset into device I2C space 8950 * @len: byte length of I2C space data 8951 * @buf: buffer containing new I2C data 8952 * 8953 * Write the I2C data to the indicated device and location. 8954 */ 8955 int t4_i2c_wr(struct adapter *adap, unsigned int mbox, 8956 int port, unsigned int devid, 8957 unsigned int offset, unsigned int len, 8958 u8 *buf) 8959 { 8960 u32 ldst_addrspace; 8961 struct fw_ldst_cmd ldst; 8962 8963 if (port >= 4 || 8964 devid >= 256 || 8965 offset >= 256 || 8966 len > sizeof ldst.u.i2c.data) 8967 return -EINVAL; 8968 8969 memset(&ldst, 0, sizeof ldst); 8970 ldst_addrspace = V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_I2C); 8971 ldst.op_to_addrspace = 8972 cpu_to_be32(V_FW_CMD_OP(FW_LDST_CMD) | 8973 F_FW_CMD_REQUEST | 8974 F_FW_CMD_WRITE | 8975 ldst_addrspace); 8976 ldst.cycles_to_len16 = cpu_to_be32(FW_LEN16(ldst)); 8977 ldst.u.i2c.pid = (port < 0 ? 0xff : port); 8978 ldst.u.i2c.did = devid; 8979 ldst.u.i2c.boffset = offset; 8980 ldst.u.i2c.blen = len; 8981 memcpy(ldst.u.i2c.data, buf, len); 8982 return t4_wr_mbox(adap, mbox, &ldst, sizeof ldst, &ldst); 8983 } 8984 8985 /** 8986 * t4_sge_ctxt_rd - read an SGE context through FW 8987 * @adap: the adapter 8988 * @mbox: mailbox to use for the FW command 8989 * @cid: the context id 8990 * @ctype: the context type 8991 * @data: where to store the context data 8992 * 8993 * Issues a FW command through the given mailbox to read an SGE context. 8994 */ 8995 int t4_sge_ctxt_rd(struct adapter *adap, unsigned int mbox, unsigned int cid, 8996 enum ctxt_type ctype, u32 *data) 8997 { 8998 int ret; 8999 struct fw_ldst_cmd c; 9000 9001 if (ctype == CTXT_EGRESS) 9002 ret = FW_LDST_ADDRSPC_SGE_EGRC; 9003 else if (ctype == CTXT_INGRESS) 9004 ret = FW_LDST_ADDRSPC_SGE_INGC; 9005 else if (ctype == CTXT_FLM) 9006 ret = FW_LDST_ADDRSPC_SGE_FLMC; 9007 else 9008 ret = FW_LDST_ADDRSPC_SGE_CONMC; 9009 9010 memset(&c, 0, sizeof(c)); 9011 c.op_to_addrspace = cpu_to_be32(V_FW_CMD_OP(FW_LDST_CMD) | 9012 F_FW_CMD_REQUEST | F_FW_CMD_READ | 9013 V_FW_LDST_CMD_ADDRSPACE(ret)); 9014 c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c)); 9015 c.u.idctxt.physid = cpu_to_be32(cid); 9016 9017 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c); 9018 if (ret == 0) { 9019 data[0] = be32_to_cpu(c.u.idctxt.ctxt_data0); 9020 data[1] = be32_to_cpu(c.u.idctxt.ctxt_data1); 9021 data[2] = be32_to_cpu(c.u.idctxt.ctxt_data2); 9022 data[3] = be32_to_cpu(c.u.idctxt.ctxt_data3); 9023 data[4] = be32_to_cpu(c.u.idctxt.ctxt_data4); 9024 data[5] = be32_to_cpu(c.u.idctxt.ctxt_data5); 9025 } 9026 return ret; 9027 } 9028 9029 /** 9030 * t4_sge_ctxt_rd_bd - read an SGE context bypassing FW 9031 * @adap: the adapter 9032 * @cid: the context id 9033 * @ctype: the context type 9034 * @data: where to store the context data 9035 * 9036 * Reads an SGE context directly, bypassing FW. This is only for 9037 * debugging when FW is unavailable. 9038 */ 9039 int t4_sge_ctxt_rd_bd(struct adapter *adap, unsigned int cid, enum ctxt_type ctype, 9040 u32 *data) 9041 { 9042 int i, ret; 9043 9044 t4_write_reg(adap, A_SGE_CTXT_CMD, V_CTXTQID(cid) | V_CTXTTYPE(ctype)); 9045 ret = t4_wait_op_done(adap, A_SGE_CTXT_CMD, F_BUSY, 0, 3, 1); 9046 if (!ret) 9047 for (i = A_SGE_CTXT_DATA0; i <= A_SGE_CTXT_DATA5; i += 4) 9048 *data++ = t4_read_reg(adap, i); 9049 return ret; 9050 } 9051 9052 int t4_sched_config(struct adapter *adapter, int type, int minmaxen, 9053 int sleep_ok) 9054 { 9055 struct fw_sched_cmd cmd; 9056 9057 memset(&cmd, 0, sizeof(cmd)); 9058 cmd.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_SCHED_CMD) | 9059 F_FW_CMD_REQUEST | 9060 F_FW_CMD_WRITE); 9061 cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd)); 9062 9063 cmd.u.config.sc = FW_SCHED_SC_CONFIG; 9064 cmd.u.config.type = type; 9065 cmd.u.config.minmaxen = minmaxen; 9066 9067 return t4_wr_mbox_meat(adapter,adapter->mbox, &cmd, sizeof(cmd), 9068 NULL, sleep_ok); 9069 } 9070 9071 int t4_sched_params(struct adapter *adapter, int type, int level, int mode, 9072 int rateunit, int ratemode, int channel, int cl, 9073 int minrate, int maxrate, int weight, int pktsize, 9074 int sleep_ok) 9075 { 9076 struct fw_sched_cmd cmd; 9077 9078 memset(&cmd, 0, sizeof(cmd)); 9079 cmd.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_SCHED_CMD) | 9080 F_FW_CMD_REQUEST | 9081 F_FW_CMD_WRITE); 9082 cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd)); 9083 9084 cmd.u.params.sc = FW_SCHED_SC_PARAMS; 9085 cmd.u.params.type = type; 9086 cmd.u.params.level = level; 9087 cmd.u.params.mode = mode; 9088 cmd.u.params.ch = channel; 9089 cmd.u.params.cl = cl; 9090 cmd.u.params.unit = rateunit; 9091 cmd.u.params.rate = ratemode; 9092 cmd.u.params.min = cpu_to_be32(minrate); 9093 cmd.u.params.max = cpu_to_be32(maxrate); 9094 cmd.u.params.weight = cpu_to_be16(weight); 9095 cmd.u.params.pktsize = cpu_to_be16(pktsize); 9096 9097 return t4_wr_mbox_meat(adapter,adapter->mbox, &cmd, sizeof(cmd), 9098 NULL, sleep_ok); 9099 } 9100 9101 /* 9102 * t4_config_watchdog - configure (enable/disable) a watchdog timer 9103 * @adapter: the adapter 9104 * @mbox: mailbox to use for the FW command 9105 * @pf: the PF owning the queue 9106 * @vf: the VF owning the queue 9107 * @timeout: watchdog timeout in ms 9108 * @action: watchdog timer / action 9109 * 9110 * There are separate watchdog timers for each possible watchdog 9111 * action. Configure one of the watchdog timers by setting a non-zero 9112 * timeout. Disable a watchdog timer by using a timeout of zero. 9113 */ 9114 int t4_config_watchdog(struct adapter *adapter, unsigned int mbox, 9115 unsigned int pf, unsigned int vf, 9116 unsigned int timeout, unsigned int action) 9117 { 9118 struct fw_watchdog_cmd wdog; 9119 unsigned int ticks; 9120 9121 /* 9122 * The watchdog command expects a timeout in units of 10ms so we need 9123 * to convert it here (via rounding) and force a minimum of one 10ms 9124 * "tick" if the timeout is non-zero but the convertion results in 0 9125 * ticks. 9126 */ 9127 ticks = (timeout + 5)/10; 9128 if (timeout && !ticks) 9129 ticks = 1; 9130 9131 memset(&wdog, 0, sizeof wdog); 9132 wdog.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_WATCHDOG_CMD) | 9133 F_FW_CMD_REQUEST | 9134 F_FW_CMD_WRITE | 9135 V_FW_PARAMS_CMD_PFN(pf) | 9136 V_FW_PARAMS_CMD_VFN(vf)); 9137 wdog.retval_len16 = cpu_to_be32(FW_LEN16(wdog)); 9138 wdog.timeout = cpu_to_be32(ticks); 9139 wdog.action = cpu_to_be32(action); 9140 9141 return t4_wr_mbox(adapter, mbox, &wdog, sizeof wdog, NULL); 9142 } 9143 9144 int t4_get_devlog_level(struct adapter *adapter, unsigned int *level) 9145 { 9146 struct fw_devlog_cmd devlog_cmd; 9147 int ret; 9148 9149 memset(&devlog_cmd, 0, sizeof(devlog_cmd)); 9150 devlog_cmd.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_DEVLOG_CMD) | 9151 F_FW_CMD_REQUEST | F_FW_CMD_READ); 9152 devlog_cmd.retval_len16 = cpu_to_be32(FW_LEN16(devlog_cmd)); 9153 ret = t4_wr_mbox(adapter, adapter->mbox, &devlog_cmd, 9154 sizeof(devlog_cmd), &devlog_cmd); 9155 if (ret) 9156 return ret; 9157 9158 *level = devlog_cmd.level; 9159 return 0; 9160 } 9161 9162 int t4_set_devlog_level(struct adapter *adapter, unsigned int level) 9163 { 9164 struct fw_devlog_cmd devlog_cmd; 9165 9166 memset(&devlog_cmd, 0, sizeof(devlog_cmd)); 9167 devlog_cmd.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_DEVLOG_CMD) | 9168 F_FW_CMD_REQUEST | 9169 F_FW_CMD_WRITE); 9170 devlog_cmd.level = level; 9171 devlog_cmd.retval_len16 = cpu_to_be32(FW_LEN16(devlog_cmd)); 9172 return t4_wr_mbox(adapter, adapter->mbox, &devlog_cmd, 9173 sizeof(devlog_cmd), &devlog_cmd); 9174 } 9175