1 /* 2 * This file and its contents are supplied under the terms of the 3 * Common Development and Distribution License ("CDDL"), version 1.0. 4 * You may only use this file in accordance with the terms of version 5 * 1.0 of the CDDL. 6 * 7 * A full copy of the text of the CDDL should have accompanied this 8 * source. A copy of the CDDL is also available via the Internet at 9 * http://www.illumos.org/license/CDDL. 10 */ 11 12 /* 13 * This file is part of the Chelsio T4/T5/T6 Ethernet driver. 14 * 15 * Copyright (C) 2003-2019 Chelsio Communications. All rights reserved. 16 * 17 * This program is distributed in the hope that it will be useful, but WITHOUT 18 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 19 * FITNESS FOR A PARTICULAR PURPOSE. See the LICENSE file included in this 20 * release for licensing terms and conditions. 21 */ 22 23 /* 24 * Copyright 2020 RackTop Systems, Inc. 25 */ 26 27 #include "common.h" 28 #include "t4_regs.h" 29 #include "t4_regs_values.h" 30 #include "t4fw_interface.h" 31 32 /** 33 * t4_wait_op_done_val - wait until an operation is completed 34 * @adapter: the adapter performing the operation 35 * @reg: the register to check for completion 36 * @mask: a single-bit field within @reg that indicates completion 37 * @polarity: the value of the field when the operation is completed 38 * @attempts: number of check iterations 39 * @delay: delay in usecs between iterations 40 * @valp: where to store the value of the register at completion time 41 * 42 * Wait until an operation is completed by checking a bit in a register 43 * up to @attempts times. If @valp is not NULL the value of the register 44 * at the time it indicated completion is stored there. Returns 0 if the 45 * operation completes and -EAGAIN otherwise. 46 */ 47 static int t4_wait_op_done_val(struct adapter *adapter, int reg, u32 mask, 48 int polarity, int attempts, int delay, u32 *valp) 49 { 50 while (1) { 51 u32 val = t4_read_reg(adapter, reg); 52 53 if (!!(val & mask) == polarity) { 54 if (valp) 55 *valp = val; 56 return 0; 57 } 58 if (--attempts == 0) 59 return -EAGAIN; 60 if (delay) 61 udelay(delay); 62 } 63 } 64 65 static inline int t4_wait_op_done(struct adapter *adapter, int reg, u32 mask, 66 int polarity, int attempts, int delay) 67 { 68 return t4_wait_op_done_val(adapter, reg, mask, polarity, attempts, 69 delay, NULL); 70 } 71 72 /** 73 * t4_set_reg_field - set a register field to a value 74 * @adapter: the adapter to program 75 * @addr: the register address 76 * @mask: specifies the portion of the register to modify 77 * @val: the new value for the register field 78 * 79 * Sets a register field specified by the supplied mask to the 80 * given value. 81 */ 82 void t4_set_reg_field(struct adapter *adapter, unsigned int addr, u32 mask, 83 u32 val) 84 { 85 u32 v = t4_read_reg(adapter, addr) & ~mask; 86 87 t4_write_reg(adapter, addr, v | val); 88 (void) t4_read_reg(adapter, addr); /* flush */ 89 } 90 91 /** 92 * t4_read_indirect - read indirectly addressed registers 93 * @adap: the adapter 94 * @addr_reg: register holding the indirect address 95 * @data_reg: register holding the value of the indirect register 96 * @vals: where the read register values are stored 97 * @nregs: how many indirect registers to read 98 * @start_idx: index of first indirect register to read 99 * 100 * Reads registers that are accessed indirectly through an address/data 101 * register pair. 102 */ 103 void t4_read_indirect(struct adapter *adap, unsigned int addr_reg, 104 unsigned int data_reg, u32 *vals, 105 unsigned int nregs, unsigned int start_idx) 106 { 107 while (nregs--) { 108 t4_write_reg(adap, addr_reg, start_idx); 109 *vals++ = t4_read_reg(adap, data_reg); 110 start_idx++; 111 } 112 } 113 114 /** 115 * t4_write_indirect - write indirectly addressed registers 116 * @adap: the adapter 117 * @addr_reg: register holding the indirect addresses 118 * @data_reg: register holding the value for the indirect registers 119 * @vals: values to write 120 * @nregs: how many indirect registers to write 121 * @start_idx: address of first indirect register to write 122 * 123 * Writes a sequential block of registers that are accessed indirectly 124 * through an address/data register pair. 125 */ 126 void t4_write_indirect(struct adapter *adap, unsigned int addr_reg, 127 unsigned int data_reg, const u32 *vals, 128 unsigned int nregs, unsigned int start_idx) 129 { 130 while (nregs--) { 131 t4_write_reg(adap, addr_reg, start_idx++); 132 t4_write_reg(adap, data_reg, *vals++); 133 } 134 } 135 136 /* 137 * Read a 32-bit PCI Configuration Space register via the PCI-E backdoor 138 * mechanism. This guarantees that we get the real value even if we're 139 * operating within a Virtual Machine and the Hypervisor is trapping our 140 * Configuration Space accesses. 141 * 142 * N.B. This routine should only be used as a last resort: the firmware uses 143 * the backdoor registers on a regular basis and we can end up 144 * conflicting with it's uses! 145 */ 146 void t4_hw_pci_read_cfg4(struct adapter *adap, int reg, u32 *val) 147 { 148 u32 req = V_FUNCTION(adap->pf) | V_REGISTER(reg); 149 150 if (CHELSIO_CHIP_VERSION(adap->params.chip) <= CHELSIO_T5) 151 req |= F_ENABLE; 152 else 153 req |= F_T6_ENABLE; 154 155 if (is_t4(adap->params.chip)) 156 req |= F_LOCALCFG; 157 158 t4_write_reg(adap, A_PCIE_CFG_SPACE_REQ, req); 159 *val = t4_read_reg(adap, A_PCIE_CFG_SPACE_DATA); 160 161 /* Reset F_ENABLE to 0 so reads of PCIE_CFG_SPACE_DATA won't cause a 162 * Configuration Space read. (None of the other fields matter when 163 * F_ENABLE is 0 so a simple register write is easier than a 164 * read-modify-write via t4_set_reg_field().) 165 */ 166 t4_write_reg(adap, A_PCIE_CFG_SPACE_REQ, 0); 167 } 168 169 /* 170 * t4_report_fw_error - report firmware error 171 * @adap: the adapter 172 * 173 * The adapter firmware can indicate error conditions to the host. 174 * If the firmware has indicated an error, print out the reason for 175 * the firmware error. 176 */ 177 static void t4_report_fw_error(struct adapter *adap) 178 { 179 static const char *const reason[] = { 180 "Crash", /* PCIE_FW_EVAL_CRASH */ 181 "During Device Preparation", /* PCIE_FW_EVAL_PREP */ 182 "During Device Configuration", /* PCIE_FW_EVAL_CONF */ 183 "During Device Initialization", /* PCIE_FW_EVAL_INIT */ 184 "Unexpected Event", /* PCIE_FW_EVAL_UNEXPECTEDEVENT */ 185 "Insufficient Airflow", /* PCIE_FW_EVAL_OVERHEAT */ 186 "Device Shutdown", /* PCIE_FW_EVAL_DEVICESHUTDOWN */ 187 "Reserved", /* reserved */ 188 }; 189 u32 pcie_fw; 190 191 pcie_fw = t4_read_reg(adap, A_PCIE_FW); 192 if (pcie_fw & F_PCIE_FW_ERR) { 193 CH_ERR(adap, "Firmware reports adapter error: %s\n", 194 reason[G_PCIE_FW_EVAL(pcie_fw)]); 195 adap->flags &= ~FW_OK; 196 } 197 } 198 199 /* 200 * Get the reply to a mailbox command and store it in @rpl in big-endian order. 201 */ 202 static void get_mbox_rpl(struct adapter *adap, __be64 *rpl, int nflit, 203 u32 mbox_addr) 204 { 205 for ( ; nflit; nflit--, mbox_addr += 8) 206 *rpl++ = cpu_to_be64(t4_read_reg64(adap, mbox_addr)); 207 } 208 209 /* 210 * Handle a FW assertion reported in a mailbox. 211 */ 212 static void fw_asrt(struct adapter *adap, struct fw_debug_cmd *asrt) 213 { 214 CH_ALERT(adap, 215 "FW assertion at %.16s:%u, val0 %#x, val1 %#x\n", 216 asrt->u.assert.filename_0_7, 217 be32_to_cpu(asrt->u.assert.line), 218 be32_to_cpu(asrt->u.assert.x), 219 be32_to_cpu(asrt->u.assert.y)); 220 } 221 222 #define X_CIM_PF_NOACCESS 0xeeeeeeee 223 224 /* 225 * If the Host OS Driver needs locking arround accesses to the mailbox, this 226 * can be turned on via the T4_OS_NEEDS_MBOX_LOCKING CPP define ... 227 */ 228 /* makes single-statement usage a bit cleaner ... */ 229 #ifdef T4_OS_NEEDS_MBOX_LOCKING 230 #define T4_OS_MBOX_LOCKING(x) x 231 #else 232 #define T4_OS_MBOX_LOCKING(x) do {} while (0) 233 #endif 234 235 /* 236 * If the OS Driver wants busy waits to keep a watchdog happy, tap it during 237 * busy loops which don't sleep. 238 */ 239 #ifdef T4_OS_NEEDS_TOUCH_NMI_WATCHDOG 240 #define T4_OS_TOUCH_NMI_WATCHDOG() t4_os_touch_nmi_watchdog() 241 #else 242 #define T4_OS_TOUCH_NMI_WATCHDOG() 243 #endif 244 245 #ifdef T4_OS_LOG_MBOX_CMDS 246 /** 247 * t4_record_mbox - record a Firmware Mailbox Command/Reply in the log 248 * @adapter: the adapter 249 * @cmd: the Firmware Mailbox Command or Reply 250 * @size: command length in bytes 251 * @access: the time (ms) needed to access the Firmware Mailbox 252 * @execute: the time (ms) the command spent being executed 253 */ 254 static void t4_record_mbox(struct adapter *adapter, 255 const __be64 *cmd, unsigned int size, 256 int access, int execute) 257 { 258 struct mbox_cmd_log *log = adapter->mbox_log; 259 struct mbox_cmd *entry; 260 int i; 261 262 entry = mbox_cmd_log_entry(log, log->cursor++); 263 if (log->cursor == log->size) 264 log->cursor = 0; 265 266 for (i = 0; i < size/8; i++) 267 entry->cmd[i] = be64_to_cpu(cmd[i]); 268 while (i < MBOX_LEN/8) 269 entry->cmd[i++] = 0; 270 entry->timestamp = t4_os_timestamp(); 271 entry->seqno = log->seqno++; 272 entry->access = access; 273 entry->execute = execute; 274 } 275 276 #define T4_RECORD_MBOX(__adapter, __cmd, __size, __access, __execute) \ 277 t4_record_mbox(__adapter, __cmd, __size, __access, __execute) 278 279 #else /* !T4_OS_LOG_MBOX_CMDS */ 280 281 #define T4_RECORD_MBOX(__adapter, __cmd, __size, __access, __execute) \ 282 /* nothing */ 283 284 #endif /* !T4_OS_LOG_MBOX_CMDS */ 285 286 /** 287 * t4_record_mbox_marker - record a marker in the mailbox log 288 * @adapter: the adapter 289 * @marker: byte array marker 290 * @size: marker size in bytes 291 * 292 * We inject a "fake mailbox command" into the Firmware Mailbox Log 293 * using a known command token and then the bytes of the specified 294 * marker. This lets debugging code inject markers into the log to 295 * help identify which commands are in response to higher level code. 296 */ 297 void t4_record_mbox_marker(struct adapter *adapter, 298 const void *marker, unsigned int size) 299 { 300 #ifdef T4_OS_LOG_MBOX_CMDS 301 __be64 marker_cmd[MBOX_LEN/8]; 302 const unsigned int max_marker = sizeof marker_cmd - sizeof (__be64); 303 unsigned int marker_cmd_size; 304 305 if (size > max_marker) 306 size = max_marker; 307 308 marker_cmd[0] = cpu_to_be64(~0LLU); 309 memcpy(&marker_cmd[1], marker, size); 310 memset((unsigned char *)&marker_cmd[1] + size, 0, max_marker - size); 311 marker_cmd_size = sizeof (__be64) + roundup(size, sizeof (__be64)); 312 313 t4_record_mbox(adapter, marker_cmd, marker_cmd_size, 0, 0); 314 #endif /* T4_OS_LOG_MBOX_CMDS */ 315 } 316 317 /* 318 * Delay time in microseconds to wait for mailbox access/fw reply 319 * to mailbox command 320 */ 321 #define MIN_MBOX_CMD_DELAY 900 322 #define MBOX_CMD_DELAY 1000 323 324 /** 325 * t4_wr_mbox_meat_timeout - send a command to FW through the given mailbox 326 * @adap: the adapter 327 * @mbox: index of the mailbox to use 328 * @cmd: the command to write 329 * @size: command length in bytes 330 * @rpl: where to optionally store the reply 331 * @sleep_ok: if true we may sleep while awaiting command completion 332 * @timeout: time to wait for command to finish before timing out 333 * (negative implies @sleep_ok=false) 334 * 335 * Sends the given command to FW through the selected mailbox and waits 336 * for the FW to execute the command. If @rpl is not %NULL it is used to 337 * store the FW's reply to the command. The command and its optional 338 * reply are of the same length. Some FW commands like RESET and 339 * INITIALIZE can take a considerable amount of time to execute. 340 * @sleep_ok determines whether we may sleep while awaiting the response. 341 * If sleeping is allowed we use progressive backoff otherwise we spin. 342 * Note that passing in a negative @timeout is an alternate mechanism 343 * for specifying @sleep_ok=false. This is useful when a higher level 344 * interface allows for specification of @timeout but not @sleep_ok ... 345 * 346 * The return value is 0 on success or a negative errno on failure. A 347 * failure can happen either because we are not able to execute the 348 * command or FW executes it but signals an error. In the latter case 349 * the return value is the error code indicated by FW (negated). 350 */ 351 int t4_wr_mbox_meat_timeout(struct adapter *adap, int mbox, const void *cmd, 352 int size, void *rpl, bool sleep_ok, int timeout) 353 { 354 #ifdef T4_OS_NEEDS_MBOX_LOCKING 355 u16 access = 0; 356 #endif 357 u32 v; 358 u64 res; 359 int i, ret; 360 const __be64 *p = cmd; 361 u32 data_reg = PF_REG(mbox, A_CIM_PF_MAILBOX_DATA); 362 u32 ctl_reg = PF_REG(mbox, A_CIM_PF_MAILBOX_CTRL); 363 u32 ctl; 364 __be64 cmd_rpl[MBOX_LEN/8]; 365 T4_OS_MBOX_LOCKING(t4_os_list_t entry); 366 u32 pcie_fw; 367 368 if ((size & 15) || size > MBOX_LEN) 369 return -EINVAL; 370 371 /* 372 * If we have a negative timeout, that implies that we can't sleep. 373 */ 374 if (timeout < 0) { 375 sleep_ok = false; 376 timeout = -timeout; 377 } 378 379 #ifdef T4_OS_NEEDS_MBOX_LOCKING 380 /* 381 * Queue ourselves onto the mailbox access list. When our entry is at 382 * the front of the list, we have rights to access the mailbox. So we 383 * wait [for a while] till we're at the front [or bail out with an 384 * EBUSY] ... 385 */ 386 t4_os_atomic_add_tail(&entry, &adap->mbox_list, &adap->mbox_lock); 387 388 for (i = 0; ; i++) { 389 /* 390 * If we've waited too long, return a busy indication. This 391 * really ought to be based on our initial position in the 392 * mailbox access list but this is a start. We very rarely 393 * contend on access to the mailbox ... Also check for a 394 * firmware error which we'll report as a device error. 395 */ 396 pcie_fw = t4_read_reg(adap, A_PCIE_FW); 397 if (i > 4*timeout || (pcie_fw & F_PCIE_FW_ERR)) { 398 t4_os_atomic_list_del(&entry, &adap->mbox_lock); 399 t4_report_fw_error(adap); 400 ret = (pcie_fw & F_PCIE_FW_ERR) ? -ENXIO : -EBUSY; 401 T4_RECORD_MBOX(adap, cmd, size, ret, 0); 402 return ret; 403 } 404 405 /* 406 * If we're at the head, break out and start the mailbox 407 * protocol. 408 */ 409 if (t4_os_list_first_entry(&adap->mbox_list) == &entry) 410 break; 411 412 /* 413 * Delay for a bit before checking again ... 414 */ 415 if (sleep_ok) { 416 usleep_range(MIN_MBOX_CMD_DELAY, MBOX_CMD_DELAY); 417 } else { 418 T4_OS_TOUCH_NMI_WATCHDOG(); 419 udelay(MBOX_CMD_DELAY); 420 } 421 } 422 access = i; 423 #endif /* T4_OS_NEEDS_MBOX_LOCKING */ 424 425 /* 426 * Attempt to gain access to the mailbox. 427 */ 428 for (i = 0; i < 4; i++) { 429 ctl = t4_read_reg(adap, ctl_reg); 430 v = G_MBOWNER(ctl); 431 if (v != X_MBOWNER_NONE) 432 break; 433 } 434 435 /* 436 * If we were unable to gain access, dequeue ourselves from the 437 * mailbox atomic access list and report the error to our caller. 438 */ 439 if (v != X_MBOWNER_PL) { 440 T4_OS_MBOX_LOCKING(t4_os_atomic_list_del(&entry, 441 &adap->mbox_lock)); 442 t4_report_fw_error(adap); 443 ret = (v == X_MBOWNER_FW) ? -EBUSY : -ETIMEDOUT; 444 T4_RECORD_MBOX(adap, cmd, size, access, ret); 445 return ret; 446 } 447 448 /* 449 * If we gain ownership of the mailbox and there's a "valid" message 450 * in it, this is likely an asynchronous error message from the 451 * firmware. So we'll report that and then proceed on with attempting 452 * to issue our own command ... which may well fail if the error 453 * presaged the firmware crashing ... 454 */ 455 if (ctl & F_MBMSGVALID) { 456 CH_ERR(adap, "found VALID command in mbox %u: " 457 "%llx %llx %llx %llx %llx %llx %llx %llx\n", mbox, 458 (unsigned long long)t4_read_reg64(adap, data_reg), 459 (unsigned long long)t4_read_reg64(adap, data_reg + 8), 460 (unsigned long long)t4_read_reg64(adap, data_reg + 16), 461 (unsigned long long)t4_read_reg64(adap, data_reg + 24), 462 (unsigned long long)t4_read_reg64(adap, data_reg + 32), 463 (unsigned long long)t4_read_reg64(adap, data_reg + 40), 464 (unsigned long long)t4_read_reg64(adap, data_reg + 48), 465 (unsigned long long)t4_read_reg64(adap, data_reg + 56)); 466 } 467 468 /* 469 * Copy in the new mailbox command and send it on its way ... 470 */ 471 T4_RECORD_MBOX(adap, cmd, size, access, 0); 472 for (i = 0; i < size; i += 8, p++) 473 t4_write_reg64(adap, data_reg + i, be64_to_cpu(*p)); 474 475 /* 476 * XXX It's not clear that we need this anymore now 477 * XXX that we have mailbox logging ... 478 */ 479 CH_DUMP_MBOX(adap, mbox, data_reg, size / 8); 480 481 t4_write_reg(adap, ctl_reg, F_MBMSGVALID | V_MBOWNER(X_MBOWNER_FW)); 482 (void) t4_read_reg(adap, ctl_reg); /* flush write */ 483 484 /* 485 * Loop waiting for the reply; bail out if we time out or the firmware 486 * reports an error. 487 */ 488 for (i = 0; 489 !((pcie_fw = t4_read_reg(adap, A_PCIE_FW)) & F_PCIE_FW_ERR) && 490 i < timeout; 491 i++) { 492 if (sleep_ok) { 493 usleep_range(MIN_MBOX_CMD_DELAY, MBOX_CMD_DELAY); 494 } else { 495 T4_OS_TOUCH_NMI_WATCHDOG(); 496 udelay(MBOX_CMD_DELAY); 497 } 498 499 v = t4_read_reg(adap, ctl_reg); 500 if (v == X_CIM_PF_NOACCESS) 501 continue; 502 if (G_MBOWNER(v) == X_MBOWNER_PL) { 503 if (!(v & F_MBMSGVALID)) { 504 t4_write_reg(adap, ctl_reg, 505 V_MBOWNER(X_MBOWNER_NONE)); 506 continue; 507 } 508 509 /* 510 * Retrieve the command reply and release the mailbox. 511 */ 512 get_mbox_rpl(adap, cmd_rpl, size/8, data_reg); 513 t4_write_reg(adap, ctl_reg, V_MBOWNER(X_MBOWNER_NONE)); 514 T4_OS_MBOX_LOCKING(t4_os_atomic_list_del(&entry, 515 &adap->mbox_lock)); 516 517 T4_RECORD_MBOX(adap, cmd_rpl, size, access, i + 1); 518 519 /* 520 * XXX It's not clear that we need this anymore now 521 * XXX that we have mailbox logging ... 522 */ 523 CH_DUMP_MBOX(adap, mbox, data_reg, size / 8); 524 CH_MSG(adap, INFO, HW, 525 "command completed in %d ms (%ssleeping)\n", 526 i + 1, sleep_ok ? "" : "non-"); 527 528 res = be64_to_cpu(cmd_rpl[0]); 529 if (G_FW_CMD_OP(res >> 32) == FW_DEBUG_CMD) { 530 fw_asrt(adap, (struct fw_debug_cmd *)cmd_rpl); 531 res = V_FW_CMD_RETVAL(EIO); 532 } else if (rpl) 533 memcpy(rpl, cmd_rpl, size); 534 return -G_FW_CMD_RETVAL((int)res); 535 } 536 } 537 538 /* 539 * We timed out waiting for a reply to our mailbox command. Report 540 * the error and also check to see if the firmware reported any 541 * errors ... 542 */ 543 T4_OS_MBOX_LOCKING(t4_os_atomic_list_del(&entry, &adap->mbox_lock)); 544 545 ret = (pcie_fw & F_PCIE_FW_ERR) ? -ENXIO : -ETIMEDOUT; 546 T4_RECORD_MBOX(adap, cmd, size, access, ret); 547 CH_ERR(adap, "command %#x in mailbox %d timed out\n", 548 *(const u8 *)cmd, mbox); 549 550 t4_report_fw_error(adap); 551 t4_fatal_err(adap); 552 return ret; 553 } 554 555 #ifdef CONFIG_CUDBG 556 /* 557 * The maximum number of times to iterate for FW reply before 558 * issuing a mailbox timeout 559 */ 560 #define FW_REPLY_WAIT_LOOP 6000000 561 562 /** 563 * t4_wr_mbox_meat_timeout_panic - send a command to FW through the given 564 * mailbox. This function is a minimal version of t4_wr_mbox_meat_timeout() 565 * and is only invoked during a kernel crash. Since this function is 566 * called through a atomic notifier chain ,we cannot sleep awaiting a 567 * response from FW, hence repeatedly loop until we get a reply. 568 * 569 * @adap: the adapter 570 * @mbox: index of the mailbox to use 571 * @cmd: the command to write 572 * @size: command length in bytes 573 * @rpl: where to optionally store the reply 574 */ 575 576 static int t4_wr_mbox_meat_timeout_panic(struct adapter *adap, int mbox, 577 const void *cmd, int size, void *rpl) 578 { 579 u32 v; 580 u64 res; 581 int i, ret; 582 u64 cnt; 583 const __be64 *p = cmd; 584 u32 data_reg = PF_REG(mbox, A_CIM_PF_MAILBOX_DATA); 585 u32 ctl_reg = PF_REG(mbox, A_CIM_PF_MAILBOX_CTRL); 586 u32 ctl; 587 __be64 cmd_rpl[MBOX_LEN/8]; 588 u32 pcie_fw; 589 590 if ((size & 15) || size > MBOX_LEN) 591 return -EINVAL; 592 593 /* 594 * Check for a firmware error which we'll report as a 595 * device error. 596 */ 597 pcie_fw = t4_read_reg(adap, A_PCIE_FW); 598 if (pcie_fw & F_PCIE_FW_ERR) { 599 t4_report_fw_error(adap); 600 ret = (pcie_fw & F_PCIE_FW_ERR) ? -ENXIO : -EBUSY; 601 return ret; 602 } 603 604 /* 605 * Attempt to gain access to the mailbox. 606 */ 607 for (i = 0; i < 4; i++) { 608 ctl = t4_read_reg(adap, ctl_reg); 609 v = G_MBOWNER(ctl); 610 if (v != X_MBOWNER_NONE) 611 break; 612 } 613 614 /* 615 * If we were unable to gain access, report the error to our caller. 616 */ 617 if (v != X_MBOWNER_PL) { 618 t4_report_fw_error(adap); 619 ret = (v == X_MBOWNER_FW) ? -EBUSY : -ETIMEDOUT; 620 return ret; 621 } 622 623 /* 624 * If we gain ownership of the mailbox and there's a "valid" message 625 * in it, this is likely an asynchronous error message from the 626 * firmware. So we'll report that and then proceed on with attempting 627 * to issue our own command ... which may well fail if the error 628 * presaged the firmware crashing ... 629 */ 630 if (ctl & F_MBMSGVALID) { 631 CH_ERR(adap, "found VALID command in mbox %u: " 632 "%llx %llx %llx %llx %llx %llx %llx %llx\n", mbox, 633 (unsigned long long)t4_read_reg64(adap, data_reg), 634 (unsigned long long)t4_read_reg64(adap, data_reg + 8), 635 (unsigned long long)t4_read_reg64(adap, data_reg + 16), 636 (unsigned long long)t4_read_reg64(adap, data_reg + 24), 637 (unsigned long long)t4_read_reg64(adap, data_reg + 32), 638 (unsigned long long)t4_read_reg64(adap, data_reg + 40), 639 (unsigned long long)t4_read_reg64(adap, data_reg + 48), 640 (unsigned long long)t4_read_reg64(adap, data_reg + 56)); 641 } 642 643 /* 644 * Copy in the new mailbox command and send it on its way ... 645 */ 646 for (i = 0; i < size; i += 8, p++) 647 t4_write_reg64(adap, data_reg + i, be64_to_cpu(*p)); 648 649 CH_DUMP_MBOX(adap, mbox, data_reg, size / 8); 650 651 t4_write_reg(adap, ctl_reg, F_MBMSGVALID | V_MBOWNER(X_MBOWNER_FW)); 652 t4_read_reg(adap, ctl_reg); /* flush write */ 653 654 /* 655 * Loop waiting for the reply; bail out if we time out or the firmware 656 * reports an error. 657 */ 658 for (cnt = 0; 659 !((pcie_fw = t4_read_reg(adap, A_PCIE_FW)) & F_PCIE_FW_ERR) && 660 cnt < FW_REPLY_WAIT_LOOP; 661 cnt++) { 662 v = t4_read_reg(adap, ctl_reg); 663 if (v == X_CIM_PF_NOACCESS) 664 continue; 665 if (G_MBOWNER(v) == X_MBOWNER_PL) { 666 if (!(v & F_MBMSGVALID)) { 667 t4_write_reg(adap, ctl_reg, 668 V_MBOWNER(X_MBOWNER_NONE)); 669 continue; 670 } 671 672 /* 673 * Retrieve the command reply and release the mailbox. 674 */ 675 get_mbox_rpl(adap, cmd_rpl, size/8, data_reg); 676 t4_write_reg(adap, ctl_reg, V_MBOWNER(X_MBOWNER_NONE)); 677 678 CH_DUMP_MBOX(adap, mbox, data_reg, size / 8); 679 680 res = be64_to_cpu(cmd_rpl[0]); 681 if (G_FW_CMD_OP(res >> 32) == FW_DEBUG_CMD) { 682 fw_asrt(adap, (struct fw_debug_cmd *)cmd_rpl); 683 res = V_FW_CMD_RETVAL(EIO); 684 } else if (rpl) 685 memcpy(rpl, cmd_rpl, size); 686 return -G_FW_CMD_RETVAL((int)res); 687 } 688 } 689 690 /* 691 * We timed out waiting for a reply to our mailbox command. Report 692 * the error and also check to see if the firmware reported any 693 * errors ... 694 */ 695 ret = (pcie_fw & F_PCIE_FW_ERR) ? -ENXIO : -ETIMEDOUT; 696 CH_ERR(adap, "command %#x in mailbox %d timed out\n", 697 *(const u8 *)cmd, mbox); 698 699 t4_report_fw_error(adap); 700 t4_fatal_err(adap); 701 return ret; 702 } 703 #endif 704 705 int t4_wr_mbox_meat(struct adapter *adap, int mbox, const void *cmd, int size, 706 void *rpl, bool sleep_ok) 707 { 708 #ifdef CONFIG_CUDBG 709 if (adap->flags & K_CRASH) 710 return t4_wr_mbox_meat_timeout_panic(adap, mbox, cmd, size, 711 rpl); 712 else 713 #endif 714 return t4_wr_mbox_meat_timeout(adap, mbox, cmd, size, rpl, 715 sleep_ok, FW_CMD_MAX_TIMEOUT); 716 717 } 718 719 static int t4_edc_err_read(struct adapter *adap, int idx) 720 { 721 u32 edc_ecc_err_addr_reg; 722 u32 edc_bist_status_rdata_reg; 723 724 if (is_t4(adap->params.chip)) { 725 CH_WARN(adap, "%s: T4 NOT supported.\n", __func__); 726 return 0; 727 } 728 if (idx != MEM_EDC0 && idx != MEM_EDC1) { 729 CH_WARN(adap, "%s: idx %d NOT supported.\n", __func__, idx); 730 return 0; 731 } 732 733 edc_ecc_err_addr_reg = EDC_T5_REG(A_EDC_H_ECC_ERR_ADDR, idx); 734 edc_bist_status_rdata_reg = EDC_T5_REG(A_EDC_H_BIST_STATUS_RDATA, idx); 735 736 CH_WARN(adap, 737 "edc%d err addr 0x%x: 0x%x.\n", 738 idx, edc_ecc_err_addr_reg, 739 t4_read_reg(adap, edc_ecc_err_addr_reg)); 740 CH_WARN(adap, 741 "bist: 0x%x, status %llx %llx %llx %llx %llx %llx %llx %llx %llx.\n", 742 edc_bist_status_rdata_reg, 743 (unsigned long long)t4_read_reg64(adap, edc_bist_status_rdata_reg), 744 (unsigned long long)t4_read_reg64(adap, edc_bist_status_rdata_reg + 8), 745 (unsigned long long)t4_read_reg64(adap, edc_bist_status_rdata_reg + 16), 746 (unsigned long long)t4_read_reg64(adap, edc_bist_status_rdata_reg + 24), 747 (unsigned long long)t4_read_reg64(adap, edc_bist_status_rdata_reg + 32), 748 (unsigned long long)t4_read_reg64(adap, edc_bist_status_rdata_reg + 40), 749 (unsigned long long)t4_read_reg64(adap, edc_bist_status_rdata_reg + 48), 750 (unsigned long long)t4_read_reg64(adap, edc_bist_status_rdata_reg + 56), 751 (unsigned long long)t4_read_reg64(adap, edc_bist_status_rdata_reg + 64)); 752 753 return 0; 754 } 755 756 /** 757 * t4_memory_rw_addr - read/write adapter memory via PCIE memory window 758 * @adap: the adapter 759 * @win: PCI-E Memory Window to use 760 * @addr: address within adapter memory 761 * @len: amount of memory to transfer 762 * @hbuf: host memory buffer 763 * @dir: direction of transfer T4_MEMORY_READ (1) or T4_MEMORY_WRITE (0) 764 * 765 * Reads/writes an [almost] arbitrary memory region in the firmware: the 766 * firmware memory address and host buffer must be aligned on 32-bit 767 * boudaries; the length may be arbitrary. 768 * 769 * NOTES: 770 * 1. The memory is transferred as a raw byte sequence from/to the 771 * firmware's memory. If this memory contains data structures which 772 * contain multi-byte integers, it's the caller's responsibility to 773 * perform appropriate byte order conversions. 774 * 775 * 2. It is the Caller's responsibility to ensure that no other code 776 * uses the specified PCI-E Memory Window while this routine is 777 * using it. This is typically done via the use of OS-specific 778 * locks, etc. 779 */ 780 int t4_memory_rw_addr(struct adapter *adap, int win, u32 addr, 781 u32 len, void *hbuf, int dir) 782 { 783 u32 pos, offset, resid; 784 u32 win_pf, mem_reg, mem_aperture, mem_base; 785 u32 *buf; 786 787 /* Argument sanity checks ... 788 */ 789 if (addr & 0x3 || (uintptr_t)hbuf & 0x3) 790 return -EINVAL; 791 buf = (u32 *)hbuf; 792 793 /* It's convenient to be able to handle lengths which aren't a 794 * multiple of 32-bits because we often end up transferring files to 795 * the firmware. So we'll handle that by normalizing the length here 796 * and then handling any residual transfer at the end. 797 */ 798 resid = len & 0x3; 799 len -= resid; 800 801 /* Each PCI-E Memory Window is programmed with a window size -- or 802 * "aperture" -- which controls the granularity of its mapping onto 803 * adapter memory. We need to grab that aperture in order to know 804 * how to use the specified window. The window is also programmed 805 * with the base address of the Memory Window in BAR0's address 806 * space. For T4 this is an absolute PCI-E Bus Address. For T5 807 * the address is relative to BAR0. 808 */ 809 mem_reg = t4_read_reg(adap, 810 PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_BASE_WIN, 811 win)); 812 813 /* a dead adapter will return 0xffffffff for PIO reads */ 814 if (mem_reg == 0xffffffff) { 815 CH_WARN(adap, "Unable to read PCI-E Memory Window Base[%d]\n", 816 win); 817 return -ENXIO; 818 } 819 820 mem_aperture = 1 << (G_WINDOW(mem_reg) + X_WINDOW_SHIFT); 821 mem_base = G_PCIEOFST(mem_reg) << X_PCIEOFST_SHIFT; 822 if (is_t4(adap->params.chip)) 823 mem_base -= adap->t4_bar0; 824 win_pf = is_t4(adap->params.chip) ? 0 : V_PFNUM(adap->pf); 825 826 /* Calculate our initial PCI-E Memory Window Position and Offset into 827 * that Window. 828 */ 829 pos = addr & ~(mem_aperture-1); 830 offset = addr - pos; 831 832 /* Set up initial PCI-E Memory Window to cover the start of our 833 * transfer. (Read it back to ensure that changes propagate before we 834 * attempt to use the new value.) 835 */ 836 t4_write_reg(adap, 837 PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_OFFSET, win), 838 pos | win_pf); 839 t4_read_reg(adap, 840 PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_OFFSET, win)); 841 842 /* Transfer data to/from the adapter as long as there's an integral 843 * number of 32-bit transfers to complete. 844 * 845 * A note on Endianness issues: 846 * 847 * The "register" reads and writes below from/to the PCI-E Memory 848 * Window invoke the standard adapter Big-Endian to PCI-E Link 849 * Little-Endian "swizzel." As a result, if we have the following 850 * data in adapter memory: 851 * 852 * Memory: ... | b0 | b1 | b2 | b3 | ... 853 * Address: i+0 i+1 i+2 i+3 854 * 855 * Then a read of the adapter memory via the PCI-E Memory Window 856 * will yield: 857 * 858 * x = readl(i) 859 * 31 0 860 * [ b3 | b2 | b1 | b0 ] 861 * 862 * If this value is stored into local memory on a Little-Endian system 863 * it will show up correctly in local memory as: 864 * 865 * ( ..., b0, b1, b2, b3, ... ) 866 * 867 * But on a Big-Endian system, the store will show up in memory 868 * incorrectly swizzled as: 869 * 870 * ( ..., b3, b2, b1, b0, ... ) 871 * 872 * So we need to account for this in the reads and writes to the 873 * PCI-E Memory Window below by undoing the register read/write 874 * swizzels. 875 */ 876 while (len > 0) { 877 if (dir == T4_MEMORY_READ) 878 *buf++ = le32_to_cpu((__force __le32)t4_read_reg(adap, 879 mem_base + offset)); 880 else 881 t4_write_reg(adap, mem_base + offset, 882 (__force u32)cpu_to_le32(*buf++)); 883 offset += sizeof(__be32); 884 len -= sizeof(__be32); 885 886 /* If we've reached the end of our current window aperture, 887 * move the PCI-E Memory Window on to the next. Note that 888 * doing this here after "len" may be 0 allows us to set up 889 * the PCI-E Memory Window for a possible final residual 890 * transfer below ... 891 */ 892 if (offset == mem_aperture) { 893 pos += mem_aperture; 894 offset = 0; 895 t4_write_reg(adap, 896 PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_OFFSET, 897 win), pos | win_pf); 898 t4_read_reg(adap, 899 PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_OFFSET, 900 win)); 901 } 902 } 903 904 /* If the original transfer had a length which wasn't a multiple of 905 * 32-bits, now's where we need to finish off the transfer of the 906 * residual amount. The PCI-E Memory Window has already been moved 907 * above (if necessary) to cover this final transfer. 908 */ 909 if (resid) { 910 union { 911 u32 word; 912 char byte[4]; 913 } last; 914 unsigned char *bp; 915 int i; 916 917 if (dir == T4_MEMORY_READ) { 918 last.word = le32_to_cpu( 919 (__force __le32)t4_read_reg(adap, 920 mem_base + offset)); 921 for (bp = (unsigned char *)buf, i = resid; i < 4; i++) 922 bp[i] = last.byte[i]; 923 } else { 924 last.word = *buf; 925 for (i = resid; i < 4; i++) 926 last.byte[i] = 0; 927 t4_write_reg(adap, mem_base + offset, 928 (__force u32)cpu_to_le32(last.word)); 929 } 930 } 931 932 return 0; 933 } 934 935 /** 936 * t4_memory_rw_mtype - read/write EDC 0, EDC 1 or MC via PCIE memory window 937 * @adap: the adapter 938 * @win: PCI-E Memory Window to use 939 * @mtype: memory type: MEM_EDC0, MEM_EDC1, MEM_HMA or MEM_MC 940 * @maddr: address within indicated memory type 941 * @len: amount of memory to transfer 942 * @hbuf: host memory buffer 943 * @dir: direction of transfer T4_MEMORY_READ (1) or T4_MEMORY_WRITE (0) 944 * 945 * Reads/writes adapter memory using t4_memory_rw_addr(). This routine 946 * provides an (memory type, address withing memory type) interface. 947 */ 948 int t4_memory_rw_mtype(struct adapter *adap, int win, int mtype, u32 maddr, 949 u32 len, void *hbuf, int dir) 950 { 951 u32 mtype_offset; 952 u32 edc_size, mc_size; 953 954 /* Offset into the region of memory which is being accessed 955 * MEM_EDC0 = 0 956 * MEM_EDC1 = 1 957 * MEM_MC = 2 -- MEM_MC for chips with only 1 memory controller 958 * MEM_MC1 = 3 -- for chips with 2 memory controllers (e.g. T5) 959 * MEM_HMA = 4 960 */ 961 edc_size = G_EDRAM0_SIZE(t4_read_reg(adap, A_MA_EDRAM0_BAR)); 962 if (mtype == MEM_HMA) { 963 mtype_offset = 2 * (edc_size * 1024 * 1024); 964 } else if (mtype != MEM_MC1) 965 mtype_offset = (mtype * (edc_size * 1024 * 1024)); 966 else { 967 mc_size = G_EXT_MEM0_SIZE(t4_read_reg(adap, 968 A_MA_EXT_MEMORY0_BAR)); 969 mtype_offset = (MEM_MC0 * edc_size + mc_size) * 1024 * 1024; 970 } 971 972 return t4_memory_rw_addr(adap, win, 973 mtype_offset + maddr, len, 974 hbuf, dir); 975 } 976 977 /* 978 * Return the specified PCI-E Configuration Space register from our Physical 979 * Function. We try first via a Firmware LDST Command (if fw_attach != 0) 980 * since we prefer to let the firmware own all of these registers, but if that 981 * fails we go for it directly ourselves. 982 */ 983 u32 t4_read_pcie_cfg4(struct adapter *adap, int reg, int drv_fw_attach) 984 { 985 u32 val; 986 987 /* 988 * If fw_attach != 0, construct and send the Firmware LDST Command to 989 * retrieve the specified PCI-E Configuration Space register. 990 */ 991 if (drv_fw_attach != 0) { 992 struct fw_ldst_cmd ldst_cmd; 993 int ret; 994 995 memset(&ldst_cmd, 0, sizeof(ldst_cmd)); 996 ldst_cmd.op_to_addrspace = 997 cpu_to_be32(V_FW_CMD_OP(FW_LDST_CMD) | 998 F_FW_CMD_REQUEST | 999 F_FW_CMD_READ | 1000 V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_FUNC_PCIE)); 1001 ldst_cmd.cycles_to_len16 = cpu_to_be32(FW_LEN16(ldst_cmd)); 1002 ldst_cmd.u.pcie.select_naccess = V_FW_LDST_CMD_NACCESS(1); 1003 ldst_cmd.u.pcie.ctrl_to_fn = 1004 (F_FW_LDST_CMD_LC | V_FW_LDST_CMD_FN(adap->pf)); 1005 ldst_cmd.u.pcie.r = reg; 1006 1007 /* 1008 * If the LDST Command succeeds, return the result, otherwise 1009 * fall through to reading it directly ourselves ... 1010 */ 1011 ret = t4_wr_mbox(adap, adap->mbox, &ldst_cmd, sizeof(ldst_cmd), 1012 &ldst_cmd); 1013 if (ret == 0) 1014 return be32_to_cpu(ldst_cmd.u.pcie.data[0]); 1015 1016 CH_WARN(adap, "Firmware failed to return " 1017 "Configuration Space register %d, err = %d\n", 1018 reg, -ret); 1019 } 1020 1021 /* 1022 * Read the desired Configuration Space register via the PCI-E 1023 * Backdoor mechanism. 1024 */ 1025 t4_hw_pci_read_cfg4(adap, reg, &val); 1026 return val; 1027 } 1028 1029 /* 1030 * Get the window based on base passed to it. 1031 * Window aperture is currently unhandled, but there is no use case for it 1032 * right now 1033 */ 1034 static int t4_get_window(struct adapter *adap, u64 pci_base, u64 pci_mask, u64 memwin_base, int drv_fw_attach) 1035 { 1036 if (is_t4(adap->params.chip)) { 1037 u32 bar0; 1038 1039 /* 1040 * Truncation intentional: we only read the bottom 32-bits of 1041 * the 64-bit BAR0/BAR1 ... We use the hardware backdoor 1042 * mechanism to read BAR0 instead of using 1043 * pci_resource_start() because we could be operating from 1044 * within a Virtual Machine which is trapping our accesses to 1045 * our Configuration Space and we need to set up the PCI-E 1046 * Memory Window decoders with the actual addresses which will 1047 * be coming across the PCI-E link. 1048 */ 1049 bar0 = t4_read_pcie_cfg4(adap, pci_base, drv_fw_attach); 1050 bar0 &= pci_mask; 1051 adap->t4_bar0 = bar0; 1052 1053 return bar0 + memwin_base; 1054 } else { 1055 /* For T5, only relative offset inside the PCIe BAR is passed */ 1056 return memwin_base; 1057 } 1058 } 1059 1060 /* Get the default utility window (win0) used by everyone */ 1061 int t4_get_util_window(struct adapter *adap, int drv_fw_attach) 1062 { 1063 return t4_get_window(adap, PCI_BASE_ADDRESS_0, PCI_BASE_ADDRESS_MEM_MASK, MEMWIN0_BASE, drv_fw_attach); 1064 } 1065 1066 /* 1067 * Set up memory window for accessing adapter memory ranges. (Read 1068 * back MA register to ensure that changes propagate before we attempt 1069 * to use the new values.) 1070 */ 1071 void t4_setup_memwin(struct adapter *adap, u32 memwin_base, u32 window) 1072 { 1073 t4_write_reg(adap, PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_BASE_WIN, window), 1074 memwin_base | V_BIR(0) | 1075 V_WINDOW(ilog2(MEMWIN0_APERTURE) - X_WINDOW_SHIFT)); 1076 t4_read_reg(adap, PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_BASE_WIN, window)); 1077 } 1078 1079 /** 1080 * t4_get_regs_len - return the size of the chips register set 1081 * @adapter: the adapter 1082 * 1083 * Returns the size of the chip's BAR0 register space. 1084 */ 1085 unsigned int t4_get_regs_len(struct adapter *adapter) 1086 { 1087 unsigned int chip_version = CHELSIO_CHIP_VERSION(adapter->params.chip); 1088 1089 switch (chip_version) { 1090 case CHELSIO_T4: 1091 return T4_REGMAP_SIZE; 1092 1093 case CHELSIO_T5: 1094 case CHELSIO_T6: 1095 return T5_REGMAP_SIZE; 1096 } 1097 1098 CH_ERR(adapter, 1099 "Unsupported chip version %d\n", chip_version); 1100 return 0; 1101 } 1102 1103 /** 1104 * t4_get_regs - read chip registers into provided buffer 1105 * @adap: the adapter 1106 * @buf: register buffer 1107 * @buf_size: size (in bytes) of register buffer 1108 * 1109 * If the provided register buffer isn't large enough for the chip's 1110 * full register range, the register dump will be truncated to the 1111 * register buffer's size. 1112 */ 1113 void t4_get_regs(struct adapter *adap, void *buf, size_t buf_size) 1114 { 1115 static const unsigned int t4_reg_ranges[] = { 1116 0x1008, 0x1108, 1117 0x1180, 0x1184, 1118 0x1190, 0x1194, 1119 0x11a0, 0x11a4, 1120 0x11b0, 0x11b4, 1121 0x11fc, 0x123c, 1122 0x1300, 0x173c, 1123 0x1800, 0x18fc, 1124 0x3000, 0x30d8, 1125 0x30e0, 0x30e4, 1126 0x30ec, 0x5910, 1127 0x5920, 0x5924, 1128 0x5960, 0x5960, 1129 0x5968, 0x5968, 1130 0x5970, 0x5970, 1131 0x5978, 0x5978, 1132 0x5980, 0x5980, 1133 0x5988, 0x5988, 1134 0x5990, 0x5990, 1135 0x5998, 0x5998, 1136 0x59a0, 0x59d4, 1137 0x5a00, 0x5ae0, 1138 0x5ae8, 0x5ae8, 1139 0x5af0, 0x5af0, 1140 0x5af8, 0x5af8, 1141 0x6000, 0x6098, 1142 0x6100, 0x6150, 1143 0x6200, 0x6208, 1144 0x6240, 0x6248, 1145 0x6280, 0x62b0, 1146 0x62c0, 0x6338, 1147 0x6370, 0x638c, 1148 0x6400, 0x643c, 1149 0x6500, 0x6524, 1150 0x6a00, 0x6a04, 1151 0x6a14, 0x6a38, 1152 0x6a60, 0x6a70, 1153 0x6a78, 0x6a78, 1154 0x6b00, 0x6b0c, 1155 0x6b1c, 0x6b84, 1156 0x6bf0, 0x6bf8, 1157 0x6c00, 0x6c0c, 1158 0x6c1c, 0x6c84, 1159 0x6cf0, 0x6cf8, 1160 0x6d00, 0x6d0c, 1161 0x6d1c, 0x6d84, 1162 0x6df0, 0x6df8, 1163 0x6e00, 0x6e0c, 1164 0x6e1c, 0x6e84, 1165 0x6ef0, 0x6ef8, 1166 0x6f00, 0x6f0c, 1167 0x6f1c, 0x6f84, 1168 0x6ff0, 0x6ff8, 1169 0x7000, 0x700c, 1170 0x701c, 0x7084, 1171 0x70f0, 0x70f8, 1172 0x7100, 0x710c, 1173 0x711c, 0x7184, 1174 0x71f0, 0x71f8, 1175 0x7200, 0x720c, 1176 0x721c, 0x7284, 1177 0x72f0, 0x72f8, 1178 0x7300, 0x730c, 1179 0x731c, 0x7384, 1180 0x73f0, 0x73f8, 1181 0x7400, 0x7450, 1182 0x7500, 0x7530, 1183 0x7600, 0x760c, 1184 0x7614, 0x761c, 1185 0x7680, 0x76cc, 1186 0x7700, 0x7798, 1187 0x77c0, 0x77fc, 1188 0x7900, 0x79fc, 1189 0x7b00, 0x7b58, 1190 0x7b60, 0x7b84, 1191 0x7b8c, 0x7c38, 1192 0x7d00, 0x7d38, 1193 0x7d40, 0x7d80, 1194 0x7d8c, 0x7ddc, 1195 0x7de4, 0x7e04, 1196 0x7e10, 0x7e1c, 1197 0x7e24, 0x7e38, 1198 0x7e40, 0x7e44, 1199 0x7e4c, 0x7e78, 1200 0x7e80, 0x7ea4, 1201 0x7eac, 0x7edc, 1202 0x7ee8, 0x7efc, 1203 0x8dc0, 0x8e04, 1204 0x8e10, 0x8e1c, 1205 0x8e30, 0x8e78, 1206 0x8ea0, 0x8eb8, 1207 0x8ec0, 0x8f6c, 1208 0x8fc0, 0x9008, 1209 0x9010, 0x9058, 1210 0x9060, 0x9060, 1211 0x9068, 0x9074, 1212 0x90fc, 0x90fc, 1213 0x9400, 0x9408, 1214 0x9410, 0x9458, 1215 0x9600, 0x9600, 1216 0x9608, 0x9638, 1217 0x9640, 0x96bc, 1218 0x9800, 0x9808, 1219 0x9820, 0x983c, 1220 0x9850, 0x9864, 1221 0x9c00, 0x9c6c, 1222 0x9c80, 0x9cec, 1223 0x9d00, 0x9d6c, 1224 0x9d80, 0x9dec, 1225 0x9e00, 0x9e6c, 1226 0x9e80, 0x9eec, 1227 0x9f00, 0x9f6c, 1228 0x9f80, 0x9fec, 1229 0xd004, 0xd004, 1230 0xd010, 0xd03c, 1231 0xdfc0, 0xdfe0, 1232 0xe000, 0xea7c, 1233 0xf000, 0x11110, 1234 0x11118, 0x11190, 1235 0x19040, 0x1906c, 1236 0x19078, 0x19080, 1237 0x1908c, 0x190e4, 1238 0x190f0, 0x190f8, 1239 0x19100, 0x19110, 1240 0x19120, 0x19124, 1241 0x19150, 0x19194, 1242 0x1919c, 0x191b0, 1243 0x191d0, 0x191e8, 1244 0x19238, 0x1924c, 1245 0x193f8, 0x1943c, 1246 0x1944c, 0x19474, 1247 0x19490, 0x194e0, 1248 0x194f0, 0x194f8, 1249 0x19800, 0x19c08, 1250 0x19c10, 0x19c90, 1251 0x19ca0, 0x19ce4, 1252 0x19cf0, 0x19d40, 1253 0x19d50, 0x19d94, 1254 0x19da0, 0x19de8, 1255 0x19df0, 0x19e40, 1256 0x19e50, 0x19e90, 1257 0x19ea0, 0x19f4c, 1258 0x1a000, 0x1a004, 1259 0x1a010, 0x1a06c, 1260 0x1a0b0, 0x1a0e4, 1261 0x1a0ec, 0x1a0f4, 1262 0x1a100, 0x1a108, 1263 0x1a114, 0x1a120, 1264 0x1a128, 0x1a130, 1265 0x1a138, 0x1a138, 1266 0x1a190, 0x1a1c4, 1267 0x1a1fc, 0x1a1fc, 1268 0x1e040, 0x1e04c, 1269 0x1e284, 0x1e28c, 1270 0x1e2c0, 0x1e2c0, 1271 0x1e2e0, 0x1e2e0, 1272 0x1e300, 0x1e384, 1273 0x1e3c0, 0x1e3c8, 1274 0x1e440, 0x1e44c, 1275 0x1e684, 0x1e68c, 1276 0x1e6c0, 0x1e6c0, 1277 0x1e6e0, 0x1e6e0, 1278 0x1e700, 0x1e784, 1279 0x1e7c0, 0x1e7c8, 1280 0x1e840, 0x1e84c, 1281 0x1ea84, 0x1ea8c, 1282 0x1eac0, 0x1eac0, 1283 0x1eae0, 0x1eae0, 1284 0x1eb00, 0x1eb84, 1285 0x1ebc0, 0x1ebc8, 1286 0x1ec40, 0x1ec4c, 1287 0x1ee84, 0x1ee8c, 1288 0x1eec0, 0x1eec0, 1289 0x1eee0, 0x1eee0, 1290 0x1ef00, 0x1ef84, 1291 0x1efc0, 0x1efc8, 1292 0x1f040, 0x1f04c, 1293 0x1f284, 0x1f28c, 1294 0x1f2c0, 0x1f2c0, 1295 0x1f2e0, 0x1f2e0, 1296 0x1f300, 0x1f384, 1297 0x1f3c0, 0x1f3c8, 1298 0x1f440, 0x1f44c, 1299 0x1f684, 0x1f68c, 1300 0x1f6c0, 0x1f6c0, 1301 0x1f6e0, 0x1f6e0, 1302 0x1f700, 0x1f784, 1303 0x1f7c0, 0x1f7c8, 1304 0x1f840, 0x1f84c, 1305 0x1fa84, 0x1fa8c, 1306 0x1fac0, 0x1fac0, 1307 0x1fae0, 0x1fae0, 1308 0x1fb00, 0x1fb84, 1309 0x1fbc0, 0x1fbc8, 1310 0x1fc40, 0x1fc4c, 1311 0x1fe84, 0x1fe8c, 1312 0x1fec0, 0x1fec0, 1313 0x1fee0, 0x1fee0, 1314 0x1ff00, 0x1ff84, 1315 0x1ffc0, 0x1ffc8, 1316 0x20000, 0x2002c, 1317 0x20100, 0x2013c, 1318 0x20190, 0x201a0, 1319 0x201a8, 0x201b8, 1320 0x201c4, 0x201c8, 1321 0x20200, 0x20318, 1322 0x20400, 0x204b4, 1323 0x204c0, 0x20528, 1324 0x20540, 0x20614, 1325 0x21000, 0x21040, 1326 0x2104c, 0x21060, 1327 0x210c0, 0x210ec, 1328 0x21200, 0x21268, 1329 0x21270, 0x21284, 1330 0x212fc, 0x21388, 1331 0x21400, 0x21404, 1332 0x21500, 0x21500, 1333 0x21510, 0x21518, 1334 0x2152c, 0x21530, 1335 0x2153c, 0x2153c, 1336 0x21550, 0x21554, 1337 0x21600, 0x21600, 1338 0x21608, 0x2161c, 1339 0x21624, 0x21628, 1340 0x21630, 0x21634, 1341 0x2163c, 0x2163c, 1342 0x21700, 0x2171c, 1343 0x21780, 0x2178c, 1344 0x21800, 0x21818, 1345 0x21820, 0x21828, 1346 0x21830, 0x21848, 1347 0x21850, 0x21854, 1348 0x21860, 0x21868, 1349 0x21870, 0x21870, 1350 0x21878, 0x21898, 1351 0x218a0, 0x218a8, 1352 0x218b0, 0x218c8, 1353 0x218d0, 0x218d4, 1354 0x218e0, 0x218e8, 1355 0x218f0, 0x218f0, 1356 0x218f8, 0x21a18, 1357 0x21a20, 0x21a28, 1358 0x21a30, 0x21a48, 1359 0x21a50, 0x21a54, 1360 0x21a60, 0x21a68, 1361 0x21a70, 0x21a70, 1362 0x21a78, 0x21a98, 1363 0x21aa0, 0x21aa8, 1364 0x21ab0, 0x21ac8, 1365 0x21ad0, 0x21ad4, 1366 0x21ae0, 0x21ae8, 1367 0x21af0, 0x21af0, 1368 0x21af8, 0x21c18, 1369 0x21c20, 0x21c20, 1370 0x21c28, 0x21c30, 1371 0x21c38, 0x21c38, 1372 0x21c80, 0x21c98, 1373 0x21ca0, 0x21ca8, 1374 0x21cb0, 0x21cc8, 1375 0x21cd0, 0x21cd4, 1376 0x21ce0, 0x21ce8, 1377 0x21cf0, 0x21cf0, 1378 0x21cf8, 0x21d7c, 1379 0x21e00, 0x21e04, 1380 0x22000, 0x2202c, 1381 0x22100, 0x2213c, 1382 0x22190, 0x221a0, 1383 0x221a8, 0x221b8, 1384 0x221c4, 0x221c8, 1385 0x22200, 0x22318, 1386 0x22400, 0x224b4, 1387 0x224c0, 0x22528, 1388 0x22540, 0x22614, 1389 0x23000, 0x23040, 1390 0x2304c, 0x23060, 1391 0x230c0, 0x230ec, 1392 0x23200, 0x23268, 1393 0x23270, 0x23284, 1394 0x232fc, 0x23388, 1395 0x23400, 0x23404, 1396 0x23500, 0x23500, 1397 0x23510, 0x23518, 1398 0x2352c, 0x23530, 1399 0x2353c, 0x2353c, 1400 0x23550, 0x23554, 1401 0x23600, 0x23600, 1402 0x23608, 0x2361c, 1403 0x23624, 0x23628, 1404 0x23630, 0x23634, 1405 0x2363c, 0x2363c, 1406 0x23700, 0x2371c, 1407 0x23780, 0x2378c, 1408 0x23800, 0x23818, 1409 0x23820, 0x23828, 1410 0x23830, 0x23848, 1411 0x23850, 0x23854, 1412 0x23860, 0x23868, 1413 0x23870, 0x23870, 1414 0x23878, 0x23898, 1415 0x238a0, 0x238a8, 1416 0x238b0, 0x238c8, 1417 0x238d0, 0x238d4, 1418 0x238e0, 0x238e8, 1419 0x238f0, 0x238f0, 1420 0x238f8, 0x23a18, 1421 0x23a20, 0x23a28, 1422 0x23a30, 0x23a48, 1423 0x23a50, 0x23a54, 1424 0x23a60, 0x23a68, 1425 0x23a70, 0x23a70, 1426 0x23a78, 0x23a98, 1427 0x23aa0, 0x23aa8, 1428 0x23ab0, 0x23ac8, 1429 0x23ad0, 0x23ad4, 1430 0x23ae0, 0x23ae8, 1431 0x23af0, 0x23af0, 1432 0x23af8, 0x23c18, 1433 0x23c20, 0x23c20, 1434 0x23c28, 0x23c30, 1435 0x23c38, 0x23c38, 1436 0x23c80, 0x23c98, 1437 0x23ca0, 0x23ca8, 1438 0x23cb0, 0x23cc8, 1439 0x23cd0, 0x23cd4, 1440 0x23ce0, 0x23ce8, 1441 0x23cf0, 0x23cf0, 1442 0x23cf8, 0x23d7c, 1443 0x23e00, 0x23e04, 1444 0x24000, 0x2402c, 1445 0x24100, 0x2413c, 1446 0x24190, 0x241a0, 1447 0x241a8, 0x241b8, 1448 0x241c4, 0x241c8, 1449 0x24200, 0x24318, 1450 0x24400, 0x244b4, 1451 0x244c0, 0x24528, 1452 0x24540, 0x24614, 1453 0x25000, 0x25040, 1454 0x2504c, 0x25060, 1455 0x250c0, 0x250ec, 1456 0x25200, 0x25268, 1457 0x25270, 0x25284, 1458 0x252fc, 0x25388, 1459 0x25400, 0x25404, 1460 0x25500, 0x25500, 1461 0x25510, 0x25518, 1462 0x2552c, 0x25530, 1463 0x2553c, 0x2553c, 1464 0x25550, 0x25554, 1465 0x25600, 0x25600, 1466 0x25608, 0x2561c, 1467 0x25624, 0x25628, 1468 0x25630, 0x25634, 1469 0x2563c, 0x2563c, 1470 0x25700, 0x2571c, 1471 0x25780, 0x2578c, 1472 0x25800, 0x25818, 1473 0x25820, 0x25828, 1474 0x25830, 0x25848, 1475 0x25850, 0x25854, 1476 0x25860, 0x25868, 1477 0x25870, 0x25870, 1478 0x25878, 0x25898, 1479 0x258a0, 0x258a8, 1480 0x258b0, 0x258c8, 1481 0x258d0, 0x258d4, 1482 0x258e0, 0x258e8, 1483 0x258f0, 0x258f0, 1484 0x258f8, 0x25a18, 1485 0x25a20, 0x25a28, 1486 0x25a30, 0x25a48, 1487 0x25a50, 0x25a54, 1488 0x25a60, 0x25a68, 1489 0x25a70, 0x25a70, 1490 0x25a78, 0x25a98, 1491 0x25aa0, 0x25aa8, 1492 0x25ab0, 0x25ac8, 1493 0x25ad0, 0x25ad4, 1494 0x25ae0, 0x25ae8, 1495 0x25af0, 0x25af0, 1496 0x25af8, 0x25c18, 1497 0x25c20, 0x25c20, 1498 0x25c28, 0x25c30, 1499 0x25c38, 0x25c38, 1500 0x25c80, 0x25c98, 1501 0x25ca0, 0x25ca8, 1502 0x25cb0, 0x25cc8, 1503 0x25cd0, 0x25cd4, 1504 0x25ce0, 0x25ce8, 1505 0x25cf0, 0x25cf0, 1506 0x25cf8, 0x25d7c, 1507 0x25e00, 0x25e04, 1508 0x26000, 0x2602c, 1509 0x26100, 0x2613c, 1510 0x26190, 0x261a0, 1511 0x261a8, 0x261b8, 1512 0x261c4, 0x261c8, 1513 0x26200, 0x26318, 1514 0x26400, 0x264b4, 1515 0x264c0, 0x26528, 1516 0x26540, 0x26614, 1517 0x27000, 0x27040, 1518 0x2704c, 0x27060, 1519 0x270c0, 0x270ec, 1520 0x27200, 0x27268, 1521 0x27270, 0x27284, 1522 0x272fc, 0x27388, 1523 0x27400, 0x27404, 1524 0x27500, 0x27500, 1525 0x27510, 0x27518, 1526 0x2752c, 0x27530, 1527 0x2753c, 0x2753c, 1528 0x27550, 0x27554, 1529 0x27600, 0x27600, 1530 0x27608, 0x2761c, 1531 0x27624, 0x27628, 1532 0x27630, 0x27634, 1533 0x2763c, 0x2763c, 1534 0x27700, 0x2771c, 1535 0x27780, 0x2778c, 1536 0x27800, 0x27818, 1537 0x27820, 0x27828, 1538 0x27830, 0x27848, 1539 0x27850, 0x27854, 1540 0x27860, 0x27868, 1541 0x27870, 0x27870, 1542 0x27878, 0x27898, 1543 0x278a0, 0x278a8, 1544 0x278b0, 0x278c8, 1545 0x278d0, 0x278d4, 1546 0x278e0, 0x278e8, 1547 0x278f0, 0x278f0, 1548 0x278f8, 0x27a18, 1549 0x27a20, 0x27a28, 1550 0x27a30, 0x27a48, 1551 0x27a50, 0x27a54, 1552 0x27a60, 0x27a68, 1553 0x27a70, 0x27a70, 1554 0x27a78, 0x27a98, 1555 0x27aa0, 0x27aa8, 1556 0x27ab0, 0x27ac8, 1557 0x27ad0, 0x27ad4, 1558 0x27ae0, 0x27ae8, 1559 0x27af0, 0x27af0, 1560 0x27af8, 0x27c18, 1561 0x27c20, 0x27c20, 1562 0x27c28, 0x27c30, 1563 0x27c38, 0x27c38, 1564 0x27c80, 0x27c98, 1565 0x27ca0, 0x27ca8, 1566 0x27cb0, 0x27cc8, 1567 0x27cd0, 0x27cd4, 1568 0x27ce0, 0x27ce8, 1569 0x27cf0, 0x27cf0, 1570 0x27cf8, 0x27d7c, 1571 0x27e00, 0x27e04, 1572 }; 1573 1574 static const unsigned int t5_reg_ranges[] = { 1575 0x1008, 0x10c0, 1576 0x10cc, 0x10f8, 1577 0x1100, 0x1100, 1578 0x110c, 0x1148, 1579 0x1180, 0x1184, 1580 0x1190, 0x1194, 1581 0x11a0, 0x11a4, 1582 0x11b0, 0x11b4, 1583 0x11fc, 0x123c, 1584 0x1280, 0x173c, 1585 0x1800, 0x18fc, 1586 0x3000, 0x3028, 1587 0x3060, 0x30b0, 1588 0x30b8, 0x30d8, 1589 0x30e0, 0x30fc, 1590 0x3140, 0x357c, 1591 0x35a8, 0x35cc, 1592 0x35ec, 0x35ec, 1593 0x3600, 0x5624, 1594 0x56cc, 0x56ec, 1595 0x56f4, 0x5720, 1596 0x5728, 0x575c, 1597 0x580c, 0x5814, 1598 0x5890, 0x589c, 1599 0x58a4, 0x58ac, 1600 0x58b8, 0x58bc, 1601 0x5940, 0x59c8, 1602 0x59d0, 0x59dc, 1603 0x59fc, 0x5a18, 1604 0x5a60, 0x5a70, 1605 0x5a80, 0x5a9c, 1606 0x5b94, 0x5bfc, 1607 0x6000, 0x6020, 1608 0x6028, 0x6040, 1609 0x6058, 0x609c, 1610 0x60a8, 0x614c, 1611 0x7700, 0x7798, 1612 0x77c0, 0x78fc, 1613 0x7b00, 0x7b58, 1614 0x7b60, 0x7b84, 1615 0x7b8c, 0x7c54, 1616 0x7d00, 0x7d38, 1617 0x7d40, 0x7d80, 1618 0x7d8c, 0x7ddc, 1619 0x7de4, 0x7e04, 1620 0x7e10, 0x7e1c, 1621 0x7e24, 0x7e38, 1622 0x7e40, 0x7e44, 1623 0x7e4c, 0x7e78, 1624 0x7e80, 0x7edc, 1625 0x7ee8, 0x7efc, 1626 0x8dc0, 0x8de0, 1627 0x8df8, 0x8e04, 1628 0x8e10, 0x8e84, 1629 0x8ea0, 0x8f84, 1630 0x8fc0, 0x9058, 1631 0x9060, 0x9060, 1632 0x9068, 0x90f8, 1633 0x9400, 0x9408, 1634 0x9410, 0x9470, 1635 0x9600, 0x9600, 1636 0x9608, 0x9638, 1637 0x9640, 0x96f4, 1638 0x9800, 0x9808, 1639 0x9820, 0x983c, 1640 0x9850, 0x9864, 1641 0x9c00, 0x9c6c, 1642 0x9c80, 0x9cec, 1643 0x9d00, 0x9d6c, 1644 0x9d80, 0x9dec, 1645 0x9e00, 0x9e6c, 1646 0x9e80, 0x9eec, 1647 0x9f00, 0x9f6c, 1648 0x9f80, 0xa020, 1649 0xd004, 0xd004, 1650 0xd010, 0xd03c, 1651 0xdfc0, 0xdfe0, 1652 0xe000, 0x1106c, 1653 0x11074, 0x11088, 1654 0x1109c, 0x1117c, 1655 0x11190, 0x11204, 1656 0x19040, 0x1906c, 1657 0x19078, 0x19080, 1658 0x1908c, 0x190e8, 1659 0x190f0, 0x190f8, 1660 0x19100, 0x19110, 1661 0x19120, 0x19124, 1662 0x19150, 0x19194, 1663 0x1919c, 0x191b0, 1664 0x191d0, 0x191e8, 1665 0x19238, 0x19290, 1666 0x193f8, 0x19428, 1667 0x19430, 0x19444, 1668 0x1944c, 0x1946c, 1669 0x19474, 0x19474, 1670 0x19490, 0x194cc, 1671 0x194f0, 0x194f8, 1672 0x19c00, 0x19c08, 1673 0x19c10, 0x19c60, 1674 0x19c94, 0x19ce4, 1675 0x19cf0, 0x19d40, 1676 0x19d50, 0x19d94, 1677 0x19da0, 0x19de8, 1678 0x19df0, 0x19e10, 1679 0x19e50, 0x19e90, 1680 0x19ea0, 0x19f24, 1681 0x19f34, 0x19f34, 1682 0x19f40, 0x19f50, 1683 0x19f90, 0x19fb4, 1684 0x19fc4, 0x19fe4, 1685 0x1a000, 0x1a004, 1686 0x1a010, 0x1a06c, 1687 0x1a0b0, 0x1a0e4, 1688 0x1a0ec, 0x1a0f8, 1689 0x1a100, 0x1a108, 1690 0x1a114, 0x1a120, 1691 0x1a128, 0x1a130, 1692 0x1a138, 0x1a138, 1693 0x1a190, 0x1a1c4, 1694 0x1a1fc, 0x1a1fc, 1695 0x1e008, 0x1e00c, 1696 0x1e040, 0x1e044, 1697 0x1e04c, 0x1e04c, 1698 0x1e284, 0x1e290, 1699 0x1e2c0, 0x1e2c0, 1700 0x1e2e0, 0x1e2e0, 1701 0x1e300, 0x1e384, 1702 0x1e3c0, 0x1e3c8, 1703 0x1e408, 0x1e40c, 1704 0x1e440, 0x1e444, 1705 0x1e44c, 0x1e44c, 1706 0x1e684, 0x1e690, 1707 0x1e6c0, 0x1e6c0, 1708 0x1e6e0, 0x1e6e0, 1709 0x1e700, 0x1e784, 1710 0x1e7c0, 0x1e7c8, 1711 0x1e808, 0x1e80c, 1712 0x1e840, 0x1e844, 1713 0x1e84c, 0x1e84c, 1714 0x1ea84, 0x1ea90, 1715 0x1eac0, 0x1eac0, 1716 0x1eae0, 0x1eae0, 1717 0x1eb00, 0x1eb84, 1718 0x1ebc0, 0x1ebc8, 1719 0x1ec08, 0x1ec0c, 1720 0x1ec40, 0x1ec44, 1721 0x1ec4c, 0x1ec4c, 1722 0x1ee84, 0x1ee90, 1723 0x1eec0, 0x1eec0, 1724 0x1eee0, 0x1eee0, 1725 0x1ef00, 0x1ef84, 1726 0x1efc0, 0x1efc8, 1727 0x1f008, 0x1f00c, 1728 0x1f040, 0x1f044, 1729 0x1f04c, 0x1f04c, 1730 0x1f284, 0x1f290, 1731 0x1f2c0, 0x1f2c0, 1732 0x1f2e0, 0x1f2e0, 1733 0x1f300, 0x1f384, 1734 0x1f3c0, 0x1f3c8, 1735 0x1f408, 0x1f40c, 1736 0x1f440, 0x1f444, 1737 0x1f44c, 0x1f44c, 1738 0x1f684, 0x1f690, 1739 0x1f6c0, 0x1f6c0, 1740 0x1f6e0, 0x1f6e0, 1741 0x1f700, 0x1f784, 1742 0x1f7c0, 0x1f7c8, 1743 0x1f808, 0x1f80c, 1744 0x1f840, 0x1f844, 1745 0x1f84c, 0x1f84c, 1746 0x1fa84, 0x1fa90, 1747 0x1fac0, 0x1fac0, 1748 0x1fae0, 0x1fae0, 1749 0x1fb00, 0x1fb84, 1750 0x1fbc0, 0x1fbc8, 1751 0x1fc08, 0x1fc0c, 1752 0x1fc40, 0x1fc44, 1753 0x1fc4c, 0x1fc4c, 1754 0x1fe84, 0x1fe90, 1755 0x1fec0, 0x1fec0, 1756 0x1fee0, 0x1fee0, 1757 0x1ff00, 0x1ff84, 1758 0x1ffc0, 0x1ffc8, 1759 0x30000, 0x30030, 1760 0x30100, 0x30144, 1761 0x30190, 0x301a0, 1762 0x301a8, 0x301b8, 1763 0x301c4, 0x301c8, 1764 0x301d0, 0x301d0, 1765 0x30200, 0x30318, 1766 0x30400, 0x304b4, 1767 0x304c0, 0x3052c, 1768 0x30540, 0x3061c, 1769 0x30800, 0x30828, 1770 0x30834, 0x30834, 1771 0x308c0, 0x30908, 1772 0x30910, 0x309ac, 1773 0x30a00, 0x30a14, 1774 0x30a1c, 0x30a2c, 1775 0x30a44, 0x30a50, 1776 0x30a74, 0x30a74, 1777 0x30a7c, 0x30afc, 1778 0x30b08, 0x30c24, 1779 0x30d00, 0x30d00, 1780 0x30d08, 0x30d14, 1781 0x30d1c, 0x30d20, 1782 0x30d3c, 0x30d3c, 1783 0x30d48, 0x30d50, 1784 0x31200, 0x3120c, 1785 0x31220, 0x31220, 1786 0x31240, 0x31240, 1787 0x31600, 0x3160c, 1788 0x31a00, 0x31a1c, 1789 0x31e00, 0x31e20, 1790 0x31e38, 0x31e3c, 1791 0x31e80, 0x31e80, 1792 0x31e88, 0x31ea8, 1793 0x31eb0, 0x31eb4, 1794 0x31ec8, 0x31ed4, 1795 0x31fb8, 0x32004, 1796 0x32200, 0x32200, 1797 0x32208, 0x32240, 1798 0x32248, 0x32280, 1799 0x32288, 0x322c0, 1800 0x322c8, 0x322fc, 1801 0x32600, 0x32630, 1802 0x32a00, 0x32abc, 1803 0x32b00, 0x32b10, 1804 0x32b20, 0x32b30, 1805 0x32b40, 0x32b50, 1806 0x32b60, 0x32b70, 1807 0x33000, 0x33028, 1808 0x33030, 0x33048, 1809 0x33060, 0x33068, 1810 0x33070, 0x3309c, 1811 0x330f0, 0x33128, 1812 0x33130, 0x33148, 1813 0x33160, 0x33168, 1814 0x33170, 0x3319c, 1815 0x331f0, 0x33238, 1816 0x33240, 0x33240, 1817 0x33248, 0x33250, 1818 0x3325c, 0x33264, 1819 0x33270, 0x332b8, 1820 0x332c0, 0x332e4, 1821 0x332f8, 0x33338, 1822 0x33340, 0x33340, 1823 0x33348, 0x33350, 1824 0x3335c, 0x33364, 1825 0x33370, 0x333b8, 1826 0x333c0, 0x333e4, 1827 0x333f8, 0x33428, 1828 0x33430, 0x33448, 1829 0x33460, 0x33468, 1830 0x33470, 0x3349c, 1831 0x334f0, 0x33528, 1832 0x33530, 0x33548, 1833 0x33560, 0x33568, 1834 0x33570, 0x3359c, 1835 0x335f0, 0x33638, 1836 0x33640, 0x33640, 1837 0x33648, 0x33650, 1838 0x3365c, 0x33664, 1839 0x33670, 0x336b8, 1840 0x336c0, 0x336e4, 1841 0x336f8, 0x33738, 1842 0x33740, 0x33740, 1843 0x33748, 0x33750, 1844 0x3375c, 0x33764, 1845 0x33770, 0x337b8, 1846 0x337c0, 0x337e4, 1847 0x337f8, 0x337fc, 1848 0x33814, 0x33814, 1849 0x3382c, 0x3382c, 1850 0x33880, 0x3388c, 1851 0x338e8, 0x338ec, 1852 0x33900, 0x33928, 1853 0x33930, 0x33948, 1854 0x33960, 0x33968, 1855 0x33970, 0x3399c, 1856 0x339f0, 0x33a38, 1857 0x33a40, 0x33a40, 1858 0x33a48, 0x33a50, 1859 0x33a5c, 0x33a64, 1860 0x33a70, 0x33ab8, 1861 0x33ac0, 0x33ae4, 1862 0x33af8, 0x33b10, 1863 0x33b28, 0x33b28, 1864 0x33b3c, 0x33b50, 1865 0x33bf0, 0x33c10, 1866 0x33c28, 0x33c28, 1867 0x33c3c, 0x33c50, 1868 0x33cf0, 0x33cfc, 1869 0x34000, 0x34030, 1870 0x34100, 0x34144, 1871 0x34190, 0x341a0, 1872 0x341a8, 0x341b8, 1873 0x341c4, 0x341c8, 1874 0x341d0, 0x341d0, 1875 0x34200, 0x34318, 1876 0x34400, 0x344b4, 1877 0x344c0, 0x3452c, 1878 0x34540, 0x3461c, 1879 0x34800, 0x34828, 1880 0x34834, 0x34834, 1881 0x348c0, 0x34908, 1882 0x34910, 0x349ac, 1883 0x34a00, 0x34a14, 1884 0x34a1c, 0x34a2c, 1885 0x34a44, 0x34a50, 1886 0x34a74, 0x34a74, 1887 0x34a7c, 0x34afc, 1888 0x34b08, 0x34c24, 1889 0x34d00, 0x34d00, 1890 0x34d08, 0x34d14, 1891 0x34d1c, 0x34d20, 1892 0x34d3c, 0x34d3c, 1893 0x34d48, 0x34d50, 1894 0x35200, 0x3520c, 1895 0x35220, 0x35220, 1896 0x35240, 0x35240, 1897 0x35600, 0x3560c, 1898 0x35a00, 0x35a1c, 1899 0x35e00, 0x35e20, 1900 0x35e38, 0x35e3c, 1901 0x35e80, 0x35e80, 1902 0x35e88, 0x35ea8, 1903 0x35eb0, 0x35eb4, 1904 0x35ec8, 0x35ed4, 1905 0x35fb8, 0x36004, 1906 0x36200, 0x36200, 1907 0x36208, 0x36240, 1908 0x36248, 0x36280, 1909 0x36288, 0x362c0, 1910 0x362c8, 0x362fc, 1911 0x36600, 0x36630, 1912 0x36a00, 0x36abc, 1913 0x36b00, 0x36b10, 1914 0x36b20, 0x36b30, 1915 0x36b40, 0x36b50, 1916 0x36b60, 0x36b70, 1917 0x37000, 0x37028, 1918 0x37030, 0x37048, 1919 0x37060, 0x37068, 1920 0x37070, 0x3709c, 1921 0x370f0, 0x37128, 1922 0x37130, 0x37148, 1923 0x37160, 0x37168, 1924 0x37170, 0x3719c, 1925 0x371f0, 0x37238, 1926 0x37240, 0x37240, 1927 0x37248, 0x37250, 1928 0x3725c, 0x37264, 1929 0x37270, 0x372b8, 1930 0x372c0, 0x372e4, 1931 0x372f8, 0x37338, 1932 0x37340, 0x37340, 1933 0x37348, 0x37350, 1934 0x3735c, 0x37364, 1935 0x37370, 0x373b8, 1936 0x373c0, 0x373e4, 1937 0x373f8, 0x37428, 1938 0x37430, 0x37448, 1939 0x37460, 0x37468, 1940 0x37470, 0x3749c, 1941 0x374f0, 0x37528, 1942 0x37530, 0x37548, 1943 0x37560, 0x37568, 1944 0x37570, 0x3759c, 1945 0x375f0, 0x37638, 1946 0x37640, 0x37640, 1947 0x37648, 0x37650, 1948 0x3765c, 0x37664, 1949 0x37670, 0x376b8, 1950 0x376c0, 0x376e4, 1951 0x376f8, 0x37738, 1952 0x37740, 0x37740, 1953 0x37748, 0x37750, 1954 0x3775c, 0x37764, 1955 0x37770, 0x377b8, 1956 0x377c0, 0x377e4, 1957 0x377f8, 0x377fc, 1958 0x37814, 0x37814, 1959 0x3782c, 0x3782c, 1960 0x37880, 0x3788c, 1961 0x378e8, 0x378ec, 1962 0x37900, 0x37928, 1963 0x37930, 0x37948, 1964 0x37960, 0x37968, 1965 0x37970, 0x3799c, 1966 0x379f0, 0x37a38, 1967 0x37a40, 0x37a40, 1968 0x37a48, 0x37a50, 1969 0x37a5c, 0x37a64, 1970 0x37a70, 0x37ab8, 1971 0x37ac0, 0x37ae4, 1972 0x37af8, 0x37b10, 1973 0x37b28, 0x37b28, 1974 0x37b3c, 0x37b50, 1975 0x37bf0, 0x37c10, 1976 0x37c28, 0x37c28, 1977 0x37c3c, 0x37c50, 1978 0x37cf0, 0x37cfc, 1979 0x38000, 0x38030, 1980 0x38100, 0x38144, 1981 0x38190, 0x381a0, 1982 0x381a8, 0x381b8, 1983 0x381c4, 0x381c8, 1984 0x381d0, 0x381d0, 1985 0x38200, 0x38318, 1986 0x38400, 0x384b4, 1987 0x384c0, 0x3852c, 1988 0x38540, 0x3861c, 1989 0x38800, 0x38828, 1990 0x38834, 0x38834, 1991 0x388c0, 0x38908, 1992 0x38910, 0x389ac, 1993 0x38a00, 0x38a14, 1994 0x38a1c, 0x38a2c, 1995 0x38a44, 0x38a50, 1996 0x38a74, 0x38a74, 1997 0x38a7c, 0x38afc, 1998 0x38b08, 0x38c24, 1999 0x38d00, 0x38d00, 2000 0x38d08, 0x38d14, 2001 0x38d1c, 0x38d20, 2002 0x38d3c, 0x38d3c, 2003 0x38d48, 0x38d50, 2004 0x39200, 0x3920c, 2005 0x39220, 0x39220, 2006 0x39240, 0x39240, 2007 0x39600, 0x3960c, 2008 0x39a00, 0x39a1c, 2009 0x39e00, 0x39e20, 2010 0x39e38, 0x39e3c, 2011 0x39e80, 0x39e80, 2012 0x39e88, 0x39ea8, 2013 0x39eb0, 0x39eb4, 2014 0x39ec8, 0x39ed4, 2015 0x39fb8, 0x3a004, 2016 0x3a200, 0x3a200, 2017 0x3a208, 0x3a240, 2018 0x3a248, 0x3a280, 2019 0x3a288, 0x3a2c0, 2020 0x3a2c8, 0x3a2fc, 2021 0x3a600, 0x3a630, 2022 0x3aa00, 0x3aabc, 2023 0x3ab00, 0x3ab10, 2024 0x3ab20, 0x3ab30, 2025 0x3ab40, 0x3ab50, 2026 0x3ab60, 0x3ab70, 2027 0x3b000, 0x3b028, 2028 0x3b030, 0x3b048, 2029 0x3b060, 0x3b068, 2030 0x3b070, 0x3b09c, 2031 0x3b0f0, 0x3b128, 2032 0x3b130, 0x3b148, 2033 0x3b160, 0x3b168, 2034 0x3b170, 0x3b19c, 2035 0x3b1f0, 0x3b238, 2036 0x3b240, 0x3b240, 2037 0x3b248, 0x3b250, 2038 0x3b25c, 0x3b264, 2039 0x3b270, 0x3b2b8, 2040 0x3b2c0, 0x3b2e4, 2041 0x3b2f8, 0x3b338, 2042 0x3b340, 0x3b340, 2043 0x3b348, 0x3b350, 2044 0x3b35c, 0x3b364, 2045 0x3b370, 0x3b3b8, 2046 0x3b3c0, 0x3b3e4, 2047 0x3b3f8, 0x3b428, 2048 0x3b430, 0x3b448, 2049 0x3b460, 0x3b468, 2050 0x3b470, 0x3b49c, 2051 0x3b4f0, 0x3b528, 2052 0x3b530, 0x3b548, 2053 0x3b560, 0x3b568, 2054 0x3b570, 0x3b59c, 2055 0x3b5f0, 0x3b638, 2056 0x3b640, 0x3b640, 2057 0x3b648, 0x3b650, 2058 0x3b65c, 0x3b664, 2059 0x3b670, 0x3b6b8, 2060 0x3b6c0, 0x3b6e4, 2061 0x3b6f8, 0x3b738, 2062 0x3b740, 0x3b740, 2063 0x3b748, 0x3b750, 2064 0x3b75c, 0x3b764, 2065 0x3b770, 0x3b7b8, 2066 0x3b7c0, 0x3b7e4, 2067 0x3b7f8, 0x3b7fc, 2068 0x3b814, 0x3b814, 2069 0x3b82c, 0x3b82c, 2070 0x3b880, 0x3b88c, 2071 0x3b8e8, 0x3b8ec, 2072 0x3b900, 0x3b928, 2073 0x3b930, 0x3b948, 2074 0x3b960, 0x3b968, 2075 0x3b970, 0x3b99c, 2076 0x3b9f0, 0x3ba38, 2077 0x3ba40, 0x3ba40, 2078 0x3ba48, 0x3ba50, 2079 0x3ba5c, 0x3ba64, 2080 0x3ba70, 0x3bab8, 2081 0x3bac0, 0x3bae4, 2082 0x3baf8, 0x3bb10, 2083 0x3bb28, 0x3bb28, 2084 0x3bb3c, 0x3bb50, 2085 0x3bbf0, 0x3bc10, 2086 0x3bc28, 0x3bc28, 2087 0x3bc3c, 0x3bc50, 2088 0x3bcf0, 0x3bcfc, 2089 0x3c000, 0x3c030, 2090 0x3c100, 0x3c144, 2091 0x3c190, 0x3c1a0, 2092 0x3c1a8, 0x3c1b8, 2093 0x3c1c4, 0x3c1c8, 2094 0x3c1d0, 0x3c1d0, 2095 0x3c200, 0x3c318, 2096 0x3c400, 0x3c4b4, 2097 0x3c4c0, 0x3c52c, 2098 0x3c540, 0x3c61c, 2099 0x3c800, 0x3c828, 2100 0x3c834, 0x3c834, 2101 0x3c8c0, 0x3c908, 2102 0x3c910, 0x3c9ac, 2103 0x3ca00, 0x3ca14, 2104 0x3ca1c, 0x3ca2c, 2105 0x3ca44, 0x3ca50, 2106 0x3ca74, 0x3ca74, 2107 0x3ca7c, 0x3cafc, 2108 0x3cb08, 0x3cc24, 2109 0x3cd00, 0x3cd00, 2110 0x3cd08, 0x3cd14, 2111 0x3cd1c, 0x3cd20, 2112 0x3cd3c, 0x3cd3c, 2113 0x3cd48, 0x3cd50, 2114 0x3d200, 0x3d20c, 2115 0x3d220, 0x3d220, 2116 0x3d240, 0x3d240, 2117 0x3d600, 0x3d60c, 2118 0x3da00, 0x3da1c, 2119 0x3de00, 0x3de20, 2120 0x3de38, 0x3de3c, 2121 0x3de80, 0x3de80, 2122 0x3de88, 0x3dea8, 2123 0x3deb0, 0x3deb4, 2124 0x3dec8, 0x3ded4, 2125 0x3dfb8, 0x3e004, 2126 0x3e200, 0x3e200, 2127 0x3e208, 0x3e240, 2128 0x3e248, 0x3e280, 2129 0x3e288, 0x3e2c0, 2130 0x3e2c8, 0x3e2fc, 2131 0x3e600, 0x3e630, 2132 0x3ea00, 0x3eabc, 2133 0x3eb00, 0x3eb10, 2134 0x3eb20, 0x3eb30, 2135 0x3eb40, 0x3eb50, 2136 0x3eb60, 0x3eb70, 2137 0x3f000, 0x3f028, 2138 0x3f030, 0x3f048, 2139 0x3f060, 0x3f068, 2140 0x3f070, 0x3f09c, 2141 0x3f0f0, 0x3f128, 2142 0x3f130, 0x3f148, 2143 0x3f160, 0x3f168, 2144 0x3f170, 0x3f19c, 2145 0x3f1f0, 0x3f238, 2146 0x3f240, 0x3f240, 2147 0x3f248, 0x3f250, 2148 0x3f25c, 0x3f264, 2149 0x3f270, 0x3f2b8, 2150 0x3f2c0, 0x3f2e4, 2151 0x3f2f8, 0x3f338, 2152 0x3f340, 0x3f340, 2153 0x3f348, 0x3f350, 2154 0x3f35c, 0x3f364, 2155 0x3f370, 0x3f3b8, 2156 0x3f3c0, 0x3f3e4, 2157 0x3f3f8, 0x3f428, 2158 0x3f430, 0x3f448, 2159 0x3f460, 0x3f468, 2160 0x3f470, 0x3f49c, 2161 0x3f4f0, 0x3f528, 2162 0x3f530, 0x3f548, 2163 0x3f560, 0x3f568, 2164 0x3f570, 0x3f59c, 2165 0x3f5f0, 0x3f638, 2166 0x3f640, 0x3f640, 2167 0x3f648, 0x3f650, 2168 0x3f65c, 0x3f664, 2169 0x3f670, 0x3f6b8, 2170 0x3f6c0, 0x3f6e4, 2171 0x3f6f8, 0x3f738, 2172 0x3f740, 0x3f740, 2173 0x3f748, 0x3f750, 2174 0x3f75c, 0x3f764, 2175 0x3f770, 0x3f7b8, 2176 0x3f7c0, 0x3f7e4, 2177 0x3f7f8, 0x3f7fc, 2178 0x3f814, 0x3f814, 2179 0x3f82c, 0x3f82c, 2180 0x3f880, 0x3f88c, 2181 0x3f8e8, 0x3f8ec, 2182 0x3f900, 0x3f928, 2183 0x3f930, 0x3f948, 2184 0x3f960, 0x3f968, 2185 0x3f970, 0x3f99c, 2186 0x3f9f0, 0x3fa38, 2187 0x3fa40, 0x3fa40, 2188 0x3fa48, 0x3fa50, 2189 0x3fa5c, 0x3fa64, 2190 0x3fa70, 0x3fab8, 2191 0x3fac0, 0x3fae4, 2192 0x3faf8, 0x3fb10, 2193 0x3fb28, 0x3fb28, 2194 0x3fb3c, 0x3fb50, 2195 0x3fbf0, 0x3fc10, 2196 0x3fc28, 0x3fc28, 2197 0x3fc3c, 0x3fc50, 2198 0x3fcf0, 0x3fcfc, 2199 0x40000, 0x4000c, 2200 0x40040, 0x40050, 2201 0x40060, 0x40068, 2202 0x4007c, 0x4008c, 2203 0x40094, 0x400b0, 2204 0x400c0, 0x40144, 2205 0x40180, 0x4018c, 2206 0x40200, 0x40254, 2207 0x40260, 0x40264, 2208 0x40270, 0x40288, 2209 0x40290, 0x40298, 2210 0x402ac, 0x402c8, 2211 0x402d0, 0x402e0, 2212 0x402f0, 0x402f0, 2213 0x40300, 0x4033c, 2214 0x403f8, 0x403fc, 2215 0x41304, 0x413c4, 2216 0x41400, 0x4140c, 2217 0x41414, 0x4141c, 2218 0x41480, 0x414d0, 2219 0x44000, 0x44054, 2220 0x4405c, 0x44078, 2221 0x440c0, 0x44174, 2222 0x44180, 0x441ac, 2223 0x441b4, 0x441b8, 2224 0x441c0, 0x44254, 2225 0x4425c, 0x44278, 2226 0x442c0, 0x44374, 2227 0x44380, 0x443ac, 2228 0x443b4, 0x443b8, 2229 0x443c0, 0x44454, 2230 0x4445c, 0x44478, 2231 0x444c0, 0x44574, 2232 0x44580, 0x445ac, 2233 0x445b4, 0x445b8, 2234 0x445c0, 0x44654, 2235 0x4465c, 0x44678, 2236 0x446c0, 0x44774, 2237 0x44780, 0x447ac, 2238 0x447b4, 0x447b8, 2239 0x447c0, 0x44854, 2240 0x4485c, 0x44878, 2241 0x448c0, 0x44974, 2242 0x44980, 0x449ac, 2243 0x449b4, 0x449b8, 2244 0x449c0, 0x449fc, 2245 0x45000, 0x45004, 2246 0x45010, 0x45030, 2247 0x45040, 0x45060, 2248 0x45068, 0x45068, 2249 0x45080, 0x45084, 2250 0x450a0, 0x450b0, 2251 0x45200, 0x45204, 2252 0x45210, 0x45230, 2253 0x45240, 0x45260, 2254 0x45268, 0x45268, 2255 0x45280, 0x45284, 2256 0x452a0, 0x452b0, 2257 0x460c0, 0x460e4, 2258 0x47000, 0x4703c, 2259 0x47044, 0x4708c, 2260 0x47200, 0x47250, 2261 0x47400, 0x47408, 2262 0x47414, 0x47420, 2263 0x47600, 0x47618, 2264 0x47800, 0x47814, 2265 0x48000, 0x4800c, 2266 0x48040, 0x48050, 2267 0x48060, 0x48068, 2268 0x4807c, 0x4808c, 2269 0x48094, 0x480b0, 2270 0x480c0, 0x48144, 2271 0x48180, 0x4818c, 2272 0x48200, 0x48254, 2273 0x48260, 0x48264, 2274 0x48270, 0x48288, 2275 0x48290, 0x48298, 2276 0x482ac, 0x482c8, 2277 0x482d0, 0x482e0, 2278 0x482f0, 0x482f0, 2279 0x48300, 0x4833c, 2280 0x483f8, 0x483fc, 2281 0x49304, 0x493c4, 2282 0x49400, 0x4940c, 2283 0x49414, 0x4941c, 2284 0x49480, 0x494d0, 2285 0x4c000, 0x4c054, 2286 0x4c05c, 0x4c078, 2287 0x4c0c0, 0x4c174, 2288 0x4c180, 0x4c1ac, 2289 0x4c1b4, 0x4c1b8, 2290 0x4c1c0, 0x4c254, 2291 0x4c25c, 0x4c278, 2292 0x4c2c0, 0x4c374, 2293 0x4c380, 0x4c3ac, 2294 0x4c3b4, 0x4c3b8, 2295 0x4c3c0, 0x4c454, 2296 0x4c45c, 0x4c478, 2297 0x4c4c0, 0x4c574, 2298 0x4c580, 0x4c5ac, 2299 0x4c5b4, 0x4c5b8, 2300 0x4c5c0, 0x4c654, 2301 0x4c65c, 0x4c678, 2302 0x4c6c0, 0x4c774, 2303 0x4c780, 0x4c7ac, 2304 0x4c7b4, 0x4c7b8, 2305 0x4c7c0, 0x4c854, 2306 0x4c85c, 0x4c878, 2307 0x4c8c0, 0x4c974, 2308 0x4c980, 0x4c9ac, 2309 0x4c9b4, 0x4c9b8, 2310 0x4c9c0, 0x4c9fc, 2311 0x4d000, 0x4d004, 2312 0x4d010, 0x4d030, 2313 0x4d040, 0x4d060, 2314 0x4d068, 0x4d068, 2315 0x4d080, 0x4d084, 2316 0x4d0a0, 0x4d0b0, 2317 0x4d200, 0x4d204, 2318 0x4d210, 0x4d230, 2319 0x4d240, 0x4d260, 2320 0x4d268, 0x4d268, 2321 0x4d280, 0x4d284, 2322 0x4d2a0, 0x4d2b0, 2323 0x4e0c0, 0x4e0e4, 2324 0x4f000, 0x4f03c, 2325 0x4f044, 0x4f08c, 2326 0x4f200, 0x4f250, 2327 0x4f400, 0x4f408, 2328 0x4f414, 0x4f420, 2329 0x4f600, 0x4f618, 2330 0x4f800, 0x4f814, 2331 0x50000, 0x50084, 2332 0x50090, 0x500cc, 2333 0x50400, 0x50400, 2334 0x50800, 0x50884, 2335 0x50890, 0x508cc, 2336 0x50c00, 0x50c00, 2337 0x51000, 0x5101c, 2338 0x51300, 0x51308, 2339 }; 2340 2341 static const unsigned int t6_reg_ranges[] = { 2342 0x1008, 0x101c, 2343 0x1024, 0x10a8, 2344 0x10b4, 0x10f8, 2345 0x1100, 0x1114, 2346 0x111c, 0x112c, 2347 0x1138, 0x113c, 2348 0x1144, 0x114c, 2349 0x1180, 0x1184, 2350 0x1190, 0x1194, 2351 0x11a0, 0x11a4, 2352 0x11b0, 0x11c4, 2353 0x11fc, 0x1274, 2354 0x1280, 0x133c, 2355 0x1800, 0x18fc, 2356 0x3000, 0x302c, 2357 0x3060, 0x30b0, 2358 0x30b8, 0x30d8, 2359 0x30e0, 0x30fc, 2360 0x3140, 0x357c, 2361 0x35a8, 0x35cc, 2362 0x35ec, 0x35ec, 2363 0x3600, 0x5624, 2364 0x56cc, 0x56ec, 2365 0x56f4, 0x5720, 2366 0x5728, 0x575c, 2367 0x580c, 0x5814, 2368 0x5890, 0x589c, 2369 0x58a4, 0x58ac, 2370 0x58b8, 0x58bc, 2371 0x5940, 0x595c, 2372 0x5980, 0x598c, 2373 0x59b0, 0x59c8, 2374 0x59d0, 0x59dc, 2375 0x59fc, 0x5a18, 2376 0x5a60, 0x5a6c, 2377 0x5a80, 0x5a8c, 2378 0x5a94, 0x5a9c, 2379 0x5b94, 0x5bfc, 2380 0x5c10, 0x5e48, 2381 0x5e50, 0x5e94, 2382 0x5ea0, 0x5eb0, 2383 0x5ec0, 0x5ec0, 2384 0x5ec8, 0x5ed0, 2385 0x5ee0, 0x5ee0, 2386 0x5ef0, 0x5ef0, 2387 0x5f00, 0x5f00, 2388 0x6000, 0x6020, 2389 0x6028, 0x6040, 2390 0x6058, 0x609c, 2391 0x60a8, 0x619c, 2392 0x7700, 0x7798, 2393 0x77c0, 0x7880, 2394 0x78cc, 0x78fc, 2395 0x7b00, 0x7b58, 2396 0x7b60, 0x7b84, 2397 0x7b8c, 0x7c54, 2398 0x7d00, 0x7d38, 2399 0x7d40, 0x7d84, 2400 0x7d8c, 0x7ddc, 2401 0x7de4, 0x7e04, 2402 0x7e10, 0x7e1c, 2403 0x7e24, 0x7e38, 2404 0x7e40, 0x7e44, 2405 0x7e4c, 0x7e78, 2406 0x7e80, 0x7edc, 2407 0x7ee8, 0x7efc, 2408 0x8dc0, 0x8de0, 2409 0x8df8, 0x8e04, 2410 0x8e10, 0x8e84, 2411 0x8ea0, 0x8f88, 2412 0x8fb8, 0x9058, 2413 0x9060, 0x9060, 2414 0x9068, 0x90f8, 2415 0x9100, 0x9124, 2416 0x9400, 0x9470, 2417 0x9600, 0x9600, 2418 0x9608, 0x9638, 2419 0x9640, 0x9704, 2420 0x9710, 0x971c, 2421 0x9800, 0x9808, 2422 0x9820, 0x983c, 2423 0x9850, 0x9864, 2424 0x9c00, 0x9c6c, 2425 0x9c80, 0x9cec, 2426 0x9d00, 0x9d6c, 2427 0x9d80, 0x9dec, 2428 0x9e00, 0x9e6c, 2429 0x9e80, 0x9eec, 2430 0x9f00, 0x9f6c, 2431 0x9f80, 0xa020, 2432 0xd004, 0xd03c, 2433 0xd100, 0xd118, 2434 0xd200, 0xd214, 2435 0xd220, 0xd234, 2436 0xd240, 0xd254, 2437 0xd260, 0xd274, 2438 0xd280, 0xd294, 2439 0xd2a0, 0xd2b4, 2440 0xd2c0, 0xd2d4, 2441 0xd2e0, 0xd2f4, 2442 0xd300, 0xd31c, 2443 0xdfc0, 0xdfe0, 2444 0xe000, 0xf008, 2445 0xf010, 0xf018, 2446 0xf020, 0xf028, 2447 0x11000, 0x11014, 2448 0x11048, 0x1106c, 2449 0x11074, 0x11088, 2450 0x11098, 0x11120, 2451 0x1112c, 0x1117c, 2452 0x11190, 0x112e0, 2453 0x11300, 0x1130c, 2454 0x12000, 0x1206c, 2455 0x19040, 0x1906c, 2456 0x19078, 0x19080, 2457 0x1908c, 0x190e8, 2458 0x190f0, 0x190f8, 2459 0x19100, 0x19110, 2460 0x19120, 0x19124, 2461 0x19150, 0x19194, 2462 0x1919c, 0x191b0, 2463 0x191d0, 0x191e8, 2464 0x19238, 0x19290, 2465 0x192a4, 0x192b0, 2466 0x19348, 0x1934c, 2467 0x193f8, 0x19418, 2468 0x19420, 0x19428, 2469 0x19430, 0x19444, 2470 0x1944c, 0x1946c, 2471 0x19474, 0x19474, 2472 0x19490, 0x194cc, 2473 0x194f0, 0x194f8, 2474 0x19c00, 0x19c48, 2475 0x19c50, 0x19c80, 2476 0x19c94, 0x19c98, 2477 0x19ca0, 0x19cbc, 2478 0x19ce4, 0x19ce4, 2479 0x19cf0, 0x19cf8, 2480 0x19d00, 0x19d28, 2481 0x19d50, 0x19d78, 2482 0x19d94, 0x19d98, 2483 0x19da0, 0x19de0, 2484 0x19df0, 0x19e10, 2485 0x19e50, 0x19e6c, 2486 0x19ea0, 0x19ebc, 2487 0x19ec4, 0x19ef4, 2488 0x19f04, 0x19f2c, 2489 0x19f34, 0x19f34, 2490 0x19f40, 0x19f50, 2491 0x19f90, 0x19fac, 2492 0x19fc4, 0x19fc8, 2493 0x19fd0, 0x19fe4, 2494 0x1a000, 0x1a004, 2495 0x1a010, 0x1a06c, 2496 0x1a0b0, 0x1a0e4, 2497 0x1a0ec, 0x1a0f8, 2498 0x1a100, 0x1a108, 2499 0x1a114, 0x1a120, 2500 0x1a128, 0x1a130, 2501 0x1a138, 0x1a138, 2502 0x1a190, 0x1a1c4, 2503 0x1a1fc, 0x1a1fc, 2504 0x1e008, 0x1e00c, 2505 0x1e040, 0x1e044, 2506 0x1e04c, 0x1e04c, 2507 0x1e284, 0x1e290, 2508 0x1e2c0, 0x1e2c0, 2509 0x1e2e0, 0x1e2e0, 2510 0x1e300, 0x1e384, 2511 0x1e3c0, 0x1e3c8, 2512 0x1e408, 0x1e40c, 2513 0x1e440, 0x1e444, 2514 0x1e44c, 0x1e44c, 2515 0x1e684, 0x1e690, 2516 0x1e6c0, 0x1e6c0, 2517 0x1e6e0, 0x1e6e0, 2518 0x1e700, 0x1e784, 2519 0x1e7c0, 0x1e7c8, 2520 0x1e808, 0x1e80c, 2521 0x1e840, 0x1e844, 2522 0x1e84c, 0x1e84c, 2523 0x1ea84, 0x1ea90, 2524 0x1eac0, 0x1eac0, 2525 0x1eae0, 0x1eae0, 2526 0x1eb00, 0x1eb84, 2527 0x1ebc0, 0x1ebc8, 2528 0x1ec08, 0x1ec0c, 2529 0x1ec40, 0x1ec44, 2530 0x1ec4c, 0x1ec4c, 2531 0x1ee84, 0x1ee90, 2532 0x1eec0, 0x1eec0, 2533 0x1eee0, 0x1eee0, 2534 0x1ef00, 0x1ef84, 2535 0x1efc0, 0x1efc8, 2536 0x1f008, 0x1f00c, 2537 0x1f040, 0x1f044, 2538 0x1f04c, 0x1f04c, 2539 0x1f284, 0x1f290, 2540 0x1f2c0, 0x1f2c0, 2541 0x1f2e0, 0x1f2e0, 2542 0x1f300, 0x1f384, 2543 0x1f3c0, 0x1f3c8, 2544 0x1f408, 0x1f40c, 2545 0x1f440, 0x1f444, 2546 0x1f44c, 0x1f44c, 2547 0x1f684, 0x1f690, 2548 0x1f6c0, 0x1f6c0, 2549 0x1f6e0, 0x1f6e0, 2550 0x1f700, 0x1f784, 2551 0x1f7c0, 0x1f7c8, 2552 0x1f808, 0x1f80c, 2553 0x1f840, 0x1f844, 2554 0x1f84c, 0x1f84c, 2555 0x1fa84, 0x1fa90, 2556 0x1fac0, 0x1fac0, 2557 0x1fae0, 0x1fae0, 2558 0x1fb00, 0x1fb84, 2559 0x1fbc0, 0x1fbc8, 2560 0x1fc08, 0x1fc0c, 2561 0x1fc40, 0x1fc44, 2562 0x1fc4c, 0x1fc4c, 2563 0x1fe84, 0x1fe90, 2564 0x1fec0, 0x1fec0, 2565 0x1fee0, 0x1fee0, 2566 0x1ff00, 0x1ff84, 2567 0x1ffc0, 0x1ffc8, 2568 0x30000, 0x30030, 2569 0x30100, 0x30168, 2570 0x30190, 0x301a0, 2571 0x301a8, 0x301b8, 2572 0x301c4, 0x301c8, 2573 0x301d0, 0x301d0, 2574 0x30200, 0x30320, 2575 0x30400, 0x304b4, 2576 0x304c0, 0x3052c, 2577 0x30540, 0x3061c, 2578 0x30800, 0x308a0, 2579 0x308c0, 0x30908, 2580 0x30910, 0x309b8, 2581 0x30a00, 0x30a04, 2582 0x30a0c, 0x30a14, 2583 0x30a1c, 0x30a2c, 2584 0x30a44, 0x30a50, 2585 0x30a74, 0x30a74, 2586 0x30a7c, 0x30afc, 2587 0x30b08, 0x30c24, 2588 0x30d00, 0x30d14, 2589 0x30d1c, 0x30d3c, 2590 0x30d44, 0x30d4c, 2591 0x30d54, 0x30d74, 2592 0x30d7c, 0x30d7c, 2593 0x30de0, 0x30de0, 2594 0x30e00, 0x30ed4, 2595 0x30f00, 0x30fa4, 2596 0x30fc0, 0x30fc4, 2597 0x31000, 0x31004, 2598 0x31080, 0x310fc, 2599 0x31208, 0x31220, 2600 0x3123c, 0x31254, 2601 0x31300, 0x31300, 2602 0x31308, 0x3131c, 2603 0x31338, 0x3133c, 2604 0x31380, 0x31380, 2605 0x31388, 0x313a8, 2606 0x313b4, 0x313b4, 2607 0x31400, 0x31420, 2608 0x31438, 0x3143c, 2609 0x31480, 0x31480, 2610 0x314a8, 0x314a8, 2611 0x314b0, 0x314b4, 2612 0x314c8, 0x314d4, 2613 0x31a40, 0x31a4c, 2614 0x31af0, 0x31b20, 2615 0x31b38, 0x31b3c, 2616 0x31b80, 0x31b80, 2617 0x31ba8, 0x31ba8, 2618 0x31bb0, 0x31bb4, 2619 0x31bc8, 0x31bd4, 2620 0x32140, 0x3218c, 2621 0x321f0, 0x321f4, 2622 0x32200, 0x32200, 2623 0x32218, 0x32218, 2624 0x32400, 0x32400, 2625 0x32408, 0x3241c, 2626 0x32618, 0x32620, 2627 0x32664, 0x32664, 2628 0x326a8, 0x326a8, 2629 0x326ec, 0x326ec, 2630 0x32a00, 0x32abc, 2631 0x32b00, 0x32b18, 2632 0x32b20, 0x32b38, 2633 0x32b40, 0x32b58, 2634 0x32b60, 0x32b78, 2635 0x32c00, 0x32c00, 2636 0x32c08, 0x32c3c, 2637 0x33000, 0x3302c, 2638 0x33034, 0x33050, 2639 0x33058, 0x33058, 2640 0x33060, 0x3308c, 2641 0x3309c, 0x330ac, 2642 0x330c0, 0x330c0, 2643 0x330c8, 0x330d0, 2644 0x330d8, 0x330e0, 2645 0x330ec, 0x3312c, 2646 0x33134, 0x33150, 2647 0x33158, 0x33158, 2648 0x33160, 0x3318c, 2649 0x3319c, 0x331ac, 2650 0x331c0, 0x331c0, 2651 0x331c8, 0x331d0, 2652 0x331d8, 0x331e0, 2653 0x331ec, 0x33290, 2654 0x33298, 0x332c4, 2655 0x332e4, 0x33390, 2656 0x33398, 0x333c4, 2657 0x333e4, 0x3342c, 2658 0x33434, 0x33450, 2659 0x33458, 0x33458, 2660 0x33460, 0x3348c, 2661 0x3349c, 0x334ac, 2662 0x334c0, 0x334c0, 2663 0x334c8, 0x334d0, 2664 0x334d8, 0x334e0, 2665 0x334ec, 0x3352c, 2666 0x33534, 0x33550, 2667 0x33558, 0x33558, 2668 0x33560, 0x3358c, 2669 0x3359c, 0x335ac, 2670 0x335c0, 0x335c0, 2671 0x335c8, 0x335d0, 2672 0x335d8, 0x335e0, 2673 0x335ec, 0x33690, 2674 0x33698, 0x336c4, 2675 0x336e4, 0x33790, 2676 0x33798, 0x337c4, 2677 0x337e4, 0x337fc, 2678 0x33814, 0x33814, 2679 0x33854, 0x33868, 2680 0x33880, 0x3388c, 2681 0x338c0, 0x338d0, 2682 0x338e8, 0x338ec, 2683 0x33900, 0x3392c, 2684 0x33934, 0x33950, 2685 0x33958, 0x33958, 2686 0x33960, 0x3398c, 2687 0x3399c, 0x339ac, 2688 0x339c0, 0x339c0, 2689 0x339c8, 0x339d0, 2690 0x339d8, 0x339e0, 2691 0x339ec, 0x33a90, 2692 0x33a98, 0x33ac4, 2693 0x33ae4, 0x33b10, 2694 0x33b24, 0x33b28, 2695 0x33b38, 0x33b50, 2696 0x33bf0, 0x33c10, 2697 0x33c24, 0x33c28, 2698 0x33c38, 0x33c50, 2699 0x33cf0, 0x33cfc, 2700 0x34000, 0x34030, 2701 0x34100, 0x34168, 2702 0x34190, 0x341a0, 2703 0x341a8, 0x341b8, 2704 0x341c4, 0x341c8, 2705 0x341d0, 0x341d0, 2706 0x34200, 0x34320, 2707 0x34400, 0x344b4, 2708 0x344c0, 0x3452c, 2709 0x34540, 0x3461c, 2710 0x34800, 0x348a0, 2711 0x348c0, 0x34908, 2712 0x34910, 0x349b8, 2713 0x34a00, 0x34a04, 2714 0x34a0c, 0x34a14, 2715 0x34a1c, 0x34a2c, 2716 0x34a44, 0x34a50, 2717 0x34a74, 0x34a74, 2718 0x34a7c, 0x34afc, 2719 0x34b08, 0x34c24, 2720 0x34d00, 0x34d14, 2721 0x34d1c, 0x34d3c, 2722 0x34d44, 0x34d4c, 2723 0x34d54, 0x34d74, 2724 0x34d7c, 0x34d7c, 2725 0x34de0, 0x34de0, 2726 0x34e00, 0x34ed4, 2727 0x34f00, 0x34fa4, 2728 0x34fc0, 0x34fc4, 2729 0x35000, 0x35004, 2730 0x35080, 0x350fc, 2731 0x35208, 0x35220, 2732 0x3523c, 0x35254, 2733 0x35300, 0x35300, 2734 0x35308, 0x3531c, 2735 0x35338, 0x3533c, 2736 0x35380, 0x35380, 2737 0x35388, 0x353a8, 2738 0x353b4, 0x353b4, 2739 0x35400, 0x35420, 2740 0x35438, 0x3543c, 2741 0x35480, 0x35480, 2742 0x354a8, 0x354a8, 2743 0x354b0, 0x354b4, 2744 0x354c8, 0x354d4, 2745 0x35a40, 0x35a4c, 2746 0x35af0, 0x35b20, 2747 0x35b38, 0x35b3c, 2748 0x35b80, 0x35b80, 2749 0x35ba8, 0x35ba8, 2750 0x35bb0, 0x35bb4, 2751 0x35bc8, 0x35bd4, 2752 0x36140, 0x3618c, 2753 0x361f0, 0x361f4, 2754 0x36200, 0x36200, 2755 0x36218, 0x36218, 2756 0x36400, 0x36400, 2757 0x36408, 0x3641c, 2758 0x36618, 0x36620, 2759 0x36664, 0x36664, 2760 0x366a8, 0x366a8, 2761 0x366ec, 0x366ec, 2762 0x36a00, 0x36abc, 2763 0x36b00, 0x36b18, 2764 0x36b20, 0x36b38, 2765 0x36b40, 0x36b58, 2766 0x36b60, 0x36b78, 2767 0x36c00, 0x36c00, 2768 0x36c08, 0x36c3c, 2769 0x37000, 0x3702c, 2770 0x37034, 0x37050, 2771 0x37058, 0x37058, 2772 0x37060, 0x3708c, 2773 0x3709c, 0x370ac, 2774 0x370c0, 0x370c0, 2775 0x370c8, 0x370d0, 2776 0x370d8, 0x370e0, 2777 0x370ec, 0x3712c, 2778 0x37134, 0x37150, 2779 0x37158, 0x37158, 2780 0x37160, 0x3718c, 2781 0x3719c, 0x371ac, 2782 0x371c0, 0x371c0, 2783 0x371c8, 0x371d0, 2784 0x371d8, 0x371e0, 2785 0x371ec, 0x37290, 2786 0x37298, 0x372c4, 2787 0x372e4, 0x37390, 2788 0x37398, 0x373c4, 2789 0x373e4, 0x3742c, 2790 0x37434, 0x37450, 2791 0x37458, 0x37458, 2792 0x37460, 0x3748c, 2793 0x3749c, 0x374ac, 2794 0x374c0, 0x374c0, 2795 0x374c8, 0x374d0, 2796 0x374d8, 0x374e0, 2797 0x374ec, 0x3752c, 2798 0x37534, 0x37550, 2799 0x37558, 0x37558, 2800 0x37560, 0x3758c, 2801 0x3759c, 0x375ac, 2802 0x375c0, 0x375c0, 2803 0x375c8, 0x375d0, 2804 0x375d8, 0x375e0, 2805 0x375ec, 0x37690, 2806 0x37698, 0x376c4, 2807 0x376e4, 0x37790, 2808 0x37798, 0x377c4, 2809 0x377e4, 0x377fc, 2810 0x37814, 0x37814, 2811 0x37854, 0x37868, 2812 0x37880, 0x3788c, 2813 0x378c0, 0x378d0, 2814 0x378e8, 0x378ec, 2815 0x37900, 0x3792c, 2816 0x37934, 0x37950, 2817 0x37958, 0x37958, 2818 0x37960, 0x3798c, 2819 0x3799c, 0x379ac, 2820 0x379c0, 0x379c0, 2821 0x379c8, 0x379d0, 2822 0x379d8, 0x379e0, 2823 0x379ec, 0x37a90, 2824 0x37a98, 0x37ac4, 2825 0x37ae4, 0x37b10, 2826 0x37b24, 0x37b28, 2827 0x37b38, 0x37b50, 2828 0x37bf0, 0x37c10, 2829 0x37c24, 0x37c28, 2830 0x37c38, 0x37c50, 2831 0x37cf0, 0x37cfc, 2832 0x40040, 0x40040, 2833 0x40080, 0x40084, 2834 0x40100, 0x40100, 2835 0x40140, 0x401bc, 2836 0x40200, 0x40214, 2837 0x40228, 0x40228, 2838 0x40240, 0x40258, 2839 0x40280, 0x40280, 2840 0x40304, 0x40304, 2841 0x40330, 0x4033c, 2842 0x41304, 0x413c8, 2843 0x413d0, 0x413dc, 2844 0x413f0, 0x413f0, 2845 0x41400, 0x4140c, 2846 0x41414, 0x4141c, 2847 0x41480, 0x414d0, 2848 0x44000, 0x4407c, 2849 0x440c0, 0x441ac, 2850 0x441b4, 0x4427c, 2851 0x442c0, 0x443ac, 2852 0x443b4, 0x4447c, 2853 0x444c0, 0x445ac, 2854 0x445b4, 0x4467c, 2855 0x446c0, 0x447ac, 2856 0x447b4, 0x4487c, 2857 0x448c0, 0x449ac, 2858 0x449b4, 0x44a7c, 2859 0x44ac0, 0x44bac, 2860 0x44bb4, 0x44c7c, 2861 0x44cc0, 0x44dac, 2862 0x44db4, 0x44e7c, 2863 0x44ec0, 0x44fac, 2864 0x44fb4, 0x4507c, 2865 0x450c0, 0x451ac, 2866 0x451b4, 0x451fc, 2867 0x45800, 0x45804, 2868 0x45810, 0x45830, 2869 0x45840, 0x45860, 2870 0x45868, 0x45868, 2871 0x45880, 0x45884, 2872 0x458a0, 0x458b0, 2873 0x45a00, 0x45a04, 2874 0x45a10, 0x45a30, 2875 0x45a40, 0x45a60, 2876 0x45a68, 0x45a68, 2877 0x45a80, 0x45a84, 2878 0x45aa0, 0x45ab0, 2879 0x460c0, 0x460e4, 2880 0x47000, 0x4703c, 2881 0x47044, 0x4708c, 2882 0x47200, 0x47250, 2883 0x47400, 0x47408, 2884 0x47414, 0x47420, 2885 0x47600, 0x47618, 2886 0x47800, 0x47814, 2887 0x47820, 0x4782c, 2888 0x50000, 0x50084, 2889 0x50090, 0x500cc, 2890 0x50300, 0x50384, 2891 0x50400, 0x50400, 2892 0x50800, 0x50884, 2893 0x50890, 0x508cc, 2894 0x50b00, 0x50b84, 2895 0x50c00, 0x50c00, 2896 0x51000, 0x51020, 2897 0x51028, 0x510b0, 2898 0x51300, 0x51324, 2899 }; 2900 2901 u32 *buf_end = (u32 *)((char *)buf + buf_size); 2902 const unsigned int *reg_ranges; 2903 int reg_ranges_size, range; 2904 unsigned int chip_version = CHELSIO_CHIP_VERSION(adap->params.chip); 2905 2906 /* Select the right set of register ranges to dump depending on the 2907 * adapter chip type. 2908 */ 2909 switch (chip_version) { 2910 case CHELSIO_T4: 2911 reg_ranges = t4_reg_ranges; 2912 reg_ranges_size = ARRAY_SIZE(t4_reg_ranges); 2913 break; 2914 2915 case CHELSIO_T5: 2916 reg_ranges = t5_reg_ranges; 2917 reg_ranges_size = ARRAY_SIZE(t5_reg_ranges); 2918 break; 2919 2920 case CHELSIO_T6: 2921 reg_ranges = t6_reg_ranges; 2922 reg_ranges_size = ARRAY_SIZE(t6_reg_ranges); 2923 break; 2924 2925 default: 2926 CH_ERR(adap, 2927 "Unsupported chip version %d\n", chip_version); 2928 return; 2929 } 2930 2931 /* Clear the register buffer and insert the appropriate register 2932 * values selected by the above register ranges. 2933 */ 2934 memset(buf, 0, buf_size); 2935 for (range = 0; range < reg_ranges_size; range += 2) { 2936 unsigned int reg = reg_ranges[range]; 2937 unsigned int last_reg = reg_ranges[range + 1]; 2938 u32 *bufp = (u32 *)((char *)buf + reg); 2939 2940 /* Iterate across the register range filling in the register 2941 * buffer but don't write past the end of the register buffer. 2942 */ 2943 while (reg <= last_reg && bufp < buf_end) { 2944 *bufp++ = t4_read_reg(adap, reg); 2945 reg += sizeof(u32); 2946 } 2947 } 2948 } 2949 2950 /* 2951 * EEPROM reads take a few tens of us while writes can take a bit over 5 ms. 2952 */ 2953 #define EEPROM_DELAY 10 // 10us per poll spin 2954 #define EEPROM_MAX_POLL 5000 // x 5000 == 50ms 2955 2956 #define EEPROM_STAT_ADDR 0x7bfc 2957 #define VPD_SIZE 0x800 2958 #define VPD_BASE 0x400 2959 #define VPD_BASE_OLD 0 2960 #define VPD_LEN 1024 2961 #define VPD_INFO_FLD_HDR_SIZE 3 2962 #define CHELSIO_VPD_UNIQUE_ID 0x82 2963 2964 /* 2965 * Small utility function to wait till any outstanding VPD Access is complete. 2966 * We have a per-adapter state variable "VPD Busy" to indicate when we have a 2967 * VPD Access in flight. This allows us to handle the problem of having a 2968 * previous VPD Access time out and prevent an attempt to inject a new VPD 2969 * Request before any in-flight VPD reguest has completed. 2970 */ 2971 static int t4_seeprom_wait(struct adapter *adapter) 2972 { 2973 unsigned int base = adapter->params.pci.vpd_cap_addr; 2974 int max_poll; 2975 2976 /* 2977 * If no VPD Access is in flight, we can just return success right 2978 * away. 2979 */ 2980 if (!adapter->vpd_busy) 2981 return 0; 2982 2983 /* 2984 * Poll the VPD Capability Address/Flag register waiting for it 2985 * to indicate that the operation is complete. 2986 */ 2987 max_poll = EEPROM_MAX_POLL; 2988 do { 2989 u16 val; 2990 2991 udelay(EEPROM_DELAY); 2992 t4_os_pci_read_cfg2(adapter, base + PCI_VPD_ADDR, &val); 2993 2994 /* 2995 * If the operation is complete, mark the VPD as no longer 2996 * busy and return success. 2997 */ 2998 if ((val & PCI_VPD_ADDR_F) == adapter->vpd_flag) { 2999 adapter->vpd_busy = 0; 3000 return 0; 3001 } 3002 } while (--max_poll); 3003 3004 /* 3005 * Failure! Note that we leave the VPD Busy status set in order to 3006 * avoid pushing a new VPD Access request into the VPD Capability till 3007 * the current operation eventually succeeds. It's a bug to issue a 3008 * new request when an existing request is in flight and will result 3009 * in corrupt hardware state. 3010 */ 3011 return -ETIMEDOUT; 3012 } 3013 3014 /** 3015 * t4_seeprom_read - read a serial EEPROM location 3016 * @adapter: adapter to read 3017 * @addr: EEPROM virtual address 3018 * @data: where to store the read data 3019 * 3020 * Read a 32-bit word from a location in serial EEPROM using the card's PCI 3021 * VPD capability. Note that this function must be called with a virtual 3022 * address. 3023 */ 3024 int t4_seeprom_read(struct adapter *adapter, u32 addr, u32 *data) 3025 { 3026 unsigned int base = adapter->params.pci.vpd_cap_addr; 3027 int ret; 3028 3029 /* 3030 * VPD Accesses must alway be 4-byte aligned! 3031 */ 3032 if (addr >= EEPROMVSIZE || (addr & 3)) 3033 return -EINVAL; 3034 3035 /* 3036 * Wait for any previous operation which may still be in flight to 3037 * complete. 3038 */ 3039 ret = t4_seeprom_wait(adapter); 3040 if (ret) { 3041 CH_ERR(adapter, "VPD still busy from previous operation\n"); 3042 return ret; 3043 } 3044 3045 /* 3046 * Issue our new VPD Read request, mark the VPD as being busy and wait 3047 * for our request to complete. If it doesn't complete, note the 3048 * error and return it to our caller. Note that we do not reset the 3049 * VPD Busy status! 3050 */ 3051 t4_os_pci_write_cfg2(adapter, base + PCI_VPD_ADDR, (u16)addr); 3052 adapter->vpd_busy = 1; 3053 adapter->vpd_flag = PCI_VPD_ADDR_F; 3054 ret = t4_seeprom_wait(adapter); 3055 if (ret) { 3056 CH_ERR(adapter, "VPD read of address %#x failed\n", addr); 3057 return ret; 3058 } 3059 3060 /* 3061 * Grab the returned data, swizzle it into our endianess and 3062 * return success. 3063 */ 3064 t4_os_pci_read_cfg4(adapter, base + PCI_VPD_DATA, data); 3065 *data = le32_to_cpu(*data); 3066 return 0; 3067 } 3068 3069 /** 3070 * t4_seeprom_write - write a serial EEPROM location 3071 * @adapter: adapter to write 3072 * @addr: virtual EEPROM address 3073 * @data: value to write 3074 * 3075 * Write a 32-bit word to a location in serial EEPROM using the card's PCI 3076 * VPD capability. Note that this function must be called with a virtual 3077 * address. 3078 */ 3079 int t4_seeprom_write(struct adapter *adapter, u32 addr, u32 data) 3080 { 3081 unsigned int base = adapter->params.pci.vpd_cap_addr; 3082 int ret; 3083 u32 stats_reg; 3084 int max_poll; 3085 3086 /* 3087 * VPD Accesses must alway be 4-byte aligned! 3088 */ 3089 if (addr >= EEPROMVSIZE || (addr & 3)) 3090 return -EINVAL; 3091 3092 /* 3093 * Wait for any previous operation which may still be in flight to 3094 * complete. 3095 */ 3096 ret = t4_seeprom_wait(adapter); 3097 if (ret) { 3098 CH_ERR(adapter, "VPD still busy from previous operation\n"); 3099 return ret; 3100 } 3101 3102 /* 3103 * Issue our new VPD Read request, mark the VPD as being busy and wait 3104 * for our request to complete. If it doesn't complete, note the 3105 * error and return it to our caller. Note that we do not reset the 3106 * VPD Busy status! 3107 */ 3108 t4_os_pci_write_cfg4(adapter, base + PCI_VPD_DATA, 3109 cpu_to_le32(data)); 3110 t4_os_pci_write_cfg2(adapter, base + PCI_VPD_ADDR, 3111 (u16)addr | PCI_VPD_ADDR_F); 3112 adapter->vpd_busy = 1; 3113 adapter->vpd_flag = 0; 3114 ret = t4_seeprom_wait(adapter); 3115 if (ret) { 3116 CH_ERR(adapter, "VPD write of address %#x failed\n", addr); 3117 return ret; 3118 } 3119 3120 /* 3121 * Reset PCI_VPD_DATA register after a transaction and wait for our 3122 * request to complete. If it doesn't complete, return error. 3123 */ 3124 t4_os_pci_write_cfg4(adapter, base + PCI_VPD_DATA, 0); 3125 max_poll = EEPROM_MAX_POLL; 3126 do { 3127 udelay(EEPROM_DELAY); 3128 t4_seeprom_read(adapter, EEPROM_STAT_ADDR, &stats_reg); 3129 } while ((stats_reg & 0x1) && --max_poll); 3130 if (!max_poll) 3131 return -ETIMEDOUT; 3132 3133 /* Return success! */ 3134 return 0; 3135 } 3136 3137 /** 3138 * t4_eeprom_ptov - translate a physical EEPROM address to virtual 3139 * @phys_addr: the physical EEPROM address 3140 * @fn: the PCI function number 3141 * @sz: size of function-specific area 3142 * 3143 * Translate a physical EEPROM address to virtual. The first 1K is 3144 * accessed through virtual addresses starting at 31K, the rest is 3145 * accessed through virtual addresses starting at 0. 3146 * 3147 * The mapping is as follows: 3148 * [0..1K) -> [31K..32K) 3149 * [1K..1K+A) -> [ES-A..ES) 3150 * [1K+A..ES) -> [0..ES-A-1K) 3151 * 3152 * where A = @fn * @sz, and ES = EEPROM size. 3153 */ 3154 int t4_eeprom_ptov(unsigned int phys_addr, unsigned int fn, unsigned int sz) 3155 { 3156 fn *= sz; 3157 if (phys_addr < 1024) 3158 return phys_addr + (31 << 10); 3159 if (phys_addr < 1024 + fn) 3160 return EEPROMSIZE - fn + phys_addr - 1024; 3161 if (phys_addr < EEPROMSIZE) 3162 return phys_addr - 1024 - fn; 3163 return -EINVAL; 3164 } 3165 3166 /** 3167 * t4_seeprom_wp - enable/disable EEPROM write protection 3168 * @adapter: the adapter 3169 * @enable: whether to enable or disable write protection 3170 * 3171 * Enables or disables write protection on the serial EEPROM. 3172 */ 3173 int t4_seeprom_wp(struct adapter *adapter, int enable) 3174 { 3175 return t4_os_pci_write_seeprom(adapter, EEPROM_STAT_ADDR, enable ? 0xc : 0); 3176 } 3177 3178 /** 3179 * get_vpd_keyword_val - Locates an information field keyword in the VPD 3180 * @v: Pointer to buffered vpd data structure 3181 * @kw: The keyword to search for 3182 * 3183 * Returns the value of the information field keyword or 3184 * -ENOENT otherwise. 3185 */ 3186 int get_vpd_keyword_val(const struct t4_vpd_hdr *v, const char *kw) 3187 { 3188 int i; 3189 unsigned int offset , len; 3190 const u8 *buf = (const u8 *)v; 3191 const u8 *vpdr_len = &v->vpdr_len[0]; 3192 offset = sizeof(struct t4_vpd_hdr); 3193 len = (u16)vpdr_len[0] + ((u16)vpdr_len[1] << 8); 3194 3195 if (len + sizeof(struct t4_vpd_hdr) > VPD_LEN) { 3196 return -ENOENT; 3197 } 3198 3199 for (i = offset; i + VPD_INFO_FLD_HDR_SIZE <= offset + len;) { 3200 if(memcmp(buf + i , kw , 2) == 0){ 3201 i += VPD_INFO_FLD_HDR_SIZE; 3202 return i; 3203 } 3204 3205 i += VPD_INFO_FLD_HDR_SIZE + buf[i+2]; 3206 } 3207 3208 return -ENOENT; 3209 } 3210 3211 /* 3212 * str_strip 3213 * Removes trailing whitespaces from string "s" 3214 * Based on strstrip() implementation in string.c 3215 */ 3216 static void str_strip(char *s) 3217 { 3218 size_t size; 3219 char *end; 3220 3221 size = strlen(s); 3222 if (!size) 3223 return; 3224 3225 end = s + size - 1; 3226 while (end >= s && isspace(*end)) 3227 end--; 3228 *(end + 1) = '\0'; 3229 } 3230 3231 /** 3232 * t4_get_raw_vpd_params - read VPD parameters from VPD EEPROM 3233 * @adapter: adapter to read 3234 * @p: where to store the parameters 3235 * 3236 * Reads card parameters stored in VPD EEPROM. 3237 */ 3238 int t4_get_raw_vpd_params(struct adapter *adapter, struct vpd_params *p) 3239 { 3240 int i, ret = 0, addr; 3241 int ec, sn, pn, na; 3242 u8 *vpd, csum; 3243 const struct t4_vpd_hdr *v; 3244 3245 vpd = (u8 *)t4_os_alloc(sizeof(u8) * VPD_LEN); 3246 if (!vpd) 3247 return -ENOMEM; 3248 3249 /* We have two VPD data structures stored in the adapter VPD area. 3250 * By default, Linux calculates the size of the VPD area by traversing 3251 * the first VPD area at offset 0x0, so we need to tell the OS what 3252 * our real VPD size is. 3253 */ 3254 ret = t4_os_pci_set_vpd_size(adapter, VPD_SIZE); 3255 if (ret < 0) 3256 goto out; 3257 3258 /* Card information normally starts at VPD_BASE but early cards had 3259 * it at 0. 3260 */ 3261 ret = t4_os_pci_read_seeprom(adapter, VPD_BASE, (u32 *)(vpd)); 3262 if (ret) 3263 goto out; 3264 3265 /* The VPD shall have a unique identifier specified by the PCI SIG. 3266 * For chelsio adapters, the identifier is 0x82. The first byte of a VPD 3267 * shall be CHELSIO_VPD_UNIQUE_ID (0x82). The VPD programming software 3268 * is expected to automatically put this entry at the 3269 * beginning of the VPD. 3270 */ 3271 addr = *vpd == CHELSIO_VPD_UNIQUE_ID ? VPD_BASE : VPD_BASE_OLD; 3272 3273 for (i = 0; i < VPD_LEN; i += 4) { 3274 ret = t4_os_pci_read_seeprom(adapter, addr+i, (u32 *)(vpd+i)); 3275 if (ret) 3276 goto out; 3277 } 3278 v = (const struct t4_vpd_hdr *)vpd; 3279 3280 #define FIND_VPD_KW(var,name) do { \ 3281 var = get_vpd_keyword_val(v , name); \ 3282 if (var < 0) { \ 3283 CH_ERR(adapter, "missing VPD keyword " name "\n"); \ 3284 ret = -EINVAL; \ 3285 goto out; \ 3286 } \ 3287 } while (0) 3288 3289 FIND_VPD_KW(i, "RV"); 3290 for (csum = 0; i >= 0; i--) 3291 csum += vpd[i]; 3292 3293 if (csum) { 3294 CH_ERR(adapter, 3295 "corrupted VPD EEPROM, actual csum %u\n", csum); 3296 ret = -EINVAL; 3297 goto out; 3298 } 3299 3300 FIND_VPD_KW(ec, "EC"); 3301 FIND_VPD_KW(sn, "SN"); 3302 FIND_VPD_KW(pn, "PN"); 3303 FIND_VPD_KW(na, "NA"); 3304 #undef FIND_VPD_KW 3305 3306 memcpy(p->id, v->id_data, ID_LEN); 3307 str_strip((char *)p->id); 3308 memcpy(p->ec, vpd + ec, EC_LEN); 3309 str_strip((char *)p->ec); 3310 i = vpd[sn - VPD_INFO_FLD_HDR_SIZE + 2]; 3311 memcpy(p->sn, vpd + sn, min(i, SERNUM_LEN)); 3312 str_strip((char *)p->sn); 3313 i = vpd[pn - VPD_INFO_FLD_HDR_SIZE + 2]; 3314 memcpy(p->pn, vpd + pn, min(i, PN_LEN)); 3315 str_strip((char *)p->pn); 3316 i = vpd[na - VPD_INFO_FLD_HDR_SIZE + 2]; 3317 memcpy(p->na, vpd + na, min(i, MACADDR_LEN)); 3318 str_strip((char *)p->na); 3319 3320 out: 3321 kmem_free(vpd, sizeof(u8) * VPD_LEN); 3322 return ret < 0 ? ret : 0; 3323 } 3324 3325 /** 3326 * t4_get_vpd_params - read VPD parameters & retrieve Core Clock 3327 * @adapter: adapter to read 3328 * @p: where to store the parameters 3329 * 3330 * Reads card parameters stored in VPD EEPROM and retrieves the Core 3331 * Clock. This can only be called after a connection to the firmware 3332 * is established. 3333 */ 3334 int t4_get_vpd_params(struct adapter *adapter, struct vpd_params *p) 3335 { 3336 u32 cclk_param, cclk_val; 3337 int ret; 3338 3339 /* 3340 * Grab the raw VPD parameters. 3341 */ 3342 ret = t4_get_raw_vpd_params(adapter, p); 3343 if (ret) 3344 return ret; 3345 3346 /* 3347 * Ask firmware for the Core Clock since it knows how to translate the 3348 * Reference Clock ('V2') VPD field into a Core Clock value ... 3349 */ 3350 cclk_param = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | 3351 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_CCLK)); 3352 ret = t4_query_params(adapter, adapter->mbox, adapter->pf, 0, 3353 1, &cclk_param, &cclk_val); 3354 3355 if (ret) 3356 return ret; 3357 p->cclk = cclk_val; 3358 3359 return 0; 3360 } 3361 3362 /** 3363 * t4_get_pfres - retrieve VF resource limits 3364 * @adapter: the adapter 3365 * 3366 * Retrieves configured resource limits and capabilities for a physical 3367 * function. The results are stored in @adapter->pfres. 3368 */ 3369 int t4_get_pfres(struct adapter *adapter) 3370 { 3371 struct pf_resources *pfres = &adapter->params.pfres; 3372 struct fw_pfvf_cmd cmd, rpl; 3373 int v; 3374 u32 word; 3375 3376 /* 3377 * Execute PFVF Read command to get VF resource limits; bail out early 3378 * with error on command failure. 3379 */ 3380 memset(&cmd, 0, sizeof(cmd)); 3381 cmd.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_PFVF_CMD) | 3382 F_FW_CMD_REQUEST | 3383 F_FW_CMD_READ | 3384 V_FW_PFVF_CMD_PFN(adapter->pf) | 3385 V_FW_PFVF_CMD_VFN(0)); 3386 cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd)); 3387 v = t4_wr_mbox(adapter, adapter->mbox, &cmd, sizeof(cmd), &rpl); 3388 if (v != FW_SUCCESS) 3389 return v; 3390 3391 /* 3392 * Extract PF resource limits and return success. 3393 */ 3394 word = be32_to_cpu(rpl.niqflint_niq); 3395 pfres->niqflint = G_FW_PFVF_CMD_NIQFLINT(word); 3396 pfres->niq = G_FW_PFVF_CMD_NIQ(word); 3397 3398 word = be32_to_cpu(rpl.type_to_neq); 3399 pfres->neq = G_FW_PFVF_CMD_NEQ(word); 3400 pfres->pmask = G_FW_PFVF_CMD_PMASK(word); 3401 3402 word = be32_to_cpu(rpl.tc_to_nexactf); 3403 pfres->tc = G_FW_PFVF_CMD_TC(word); 3404 pfres->nvi = G_FW_PFVF_CMD_NVI(word); 3405 pfres->nexactf = G_FW_PFVF_CMD_NEXACTF(word); 3406 3407 word = be32_to_cpu(rpl.r_caps_to_nethctrl); 3408 pfres->r_caps = G_FW_PFVF_CMD_R_CAPS(word); 3409 pfres->wx_caps = G_FW_PFVF_CMD_WX_CAPS(word); 3410 pfres->nethctrl = G_FW_PFVF_CMD_NETHCTRL(word); 3411 3412 return 0; 3413 } 3414 3415 /* serial flash and firmware constants and flash config file constants */ 3416 enum { 3417 SF_ATTEMPTS = 10, /* max retries for SF operations */ 3418 3419 /* flash command opcodes */ 3420 SF_PROG_PAGE = 2, /* program page */ 3421 SF_WR_DISABLE = 4, /* disable writes */ 3422 SF_RD_STATUS = 5, /* read status register */ 3423 SF_WR_ENABLE = 6, /* enable writes */ 3424 SF_RD_DATA_FAST = 0xb, /* read flash */ 3425 SF_RD_ID = 0x9f, /* read ID */ 3426 SF_ERASE_SECTOR = 0xd8, /* erase sector */ 3427 }; 3428 3429 /** 3430 * sf1_read - read data from the serial flash 3431 * @adapter: the adapter 3432 * @byte_cnt: number of bytes to read 3433 * @cont: whether another operation will be chained 3434 * @lock: whether to lock SF for PL access only 3435 * @valp: where to store the read data 3436 * 3437 * Reads up to 4 bytes of data from the serial flash. The location of 3438 * the read needs to be specified prior to calling this by issuing the 3439 * appropriate commands to the serial flash. 3440 */ 3441 static int sf1_read(struct adapter *adapter, unsigned int byte_cnt, int cont, 3442 int lock, u32 *valp) 3443 { 3444 int ret; 3445 3446 if (!byte_cnt || byte_cnt > 4) 3447 return -EINVAL; 3448 if (t4_read_reg(adapter, A_SF_OP) & F_BUSY) 3449 return -EBUSY; 3450 t4_write_reg(adapter, A_SF_OP, 3451 V_SF_LOCK(lock) | V_CONT(cont) | V_BYTECNT(byte_cnt - 1)); 3452 ret = t4_wait_op_done(adapter, A_SF_OP, F_BUSY, 0, SF_ATTEMPTS, 5); 3453 if (!ret) 3454 *valp = t4_read_reg(adapter, A_SF_DATA); 3455 return ret; 3456 } 3457 3458 /** 3459 * sf1_write - write data to the serial flash 3460 * @adapter: the adapter 3461 * @byte_cnt: number of bytes to write 3462 * @cont: whether another operation will be chained 3463 * @lock: whether to lock SF for PL access only 3464 * @val: value to write 3465 * 3466 * Writes up to 4 bytes of data to the serial flash. The location of 3467 * the write needs to be specified prior to calling this by issuing the 3468 * appropriate commands to the serial flash. 3469 */ 3470 static int sf1_write(struct adapter *adapter, unsigned int byte_cnt, int cont, 3471 int lock, u32 val) 3472 { 3473 if (!byte_cnt || byte_cnt > 4) 3474 return -EINVAL; 3475 if (t4_read_reg(adapter, A_SF_OP) & F_BUSY) 3476 return -EBUSY; 3477 t4_write_reg(adapter, A_SF_DATA, val); 3478 t4_write_reg(adapter, A_SF_OP, V_SF_LOCK(lock) | 3479 V_CONT(cont) | V_BYTECNT(byte_cnt - 1) | V_OP(1)); 3480 return t4_wait_op_done(adapter, A_SF_OP, F_BUSY, 0, SF_ATTEMPTS, 5); 3481 } 3482 3483 /** 3484 * flash_wait_op - wait for a flash operation to complete 3485 * @adapter: the adapter 3486 * @attempts: max number of polls of the status register 3487 * @delay: delay between polls in ms 3488 * 3489 * Wait for a flash operation to complete by polling the status register. 3490 */ 3491 static int flash_wait_op(struct adapter *adapter, int attempts, int ch_delay) 3492 { 3493 int ret; 3494 u32 status; 3495 3496 while (1) { 3497 if ((ret = sf1_write(adapter, 1, 1, 1, SF_RD_STATUS)) != 0 || 3498 (ret = sf1_read(adapter, 1, 0, 1, &status)) != 0) 3499 return ret; 3500 if (!(status & 1)) 3501 return 0; 3502 if (--attempts == 0) 3503 return -EAGAIN; 3504 if (ch_delay) { 3505 #ifdef CONFIG_CUDBG 3506 if (adapter->flags & K_CRASH) 3507 mdelay(ch_delay); 3508 else 3509 #endif 3510 msleep(ch_delay); 3511 } 3512 } 3513 } 3514 3515 /** 3516 * t4_read_flash - read words from serial flash 3517 * @adapter: the adapter 3518 * @addr: the start address for the read 3519 * @nwords: how many 32-bit words to read 3520 * @data: where to store the read data 3521 * @byte_oriented: whether to store data as bytes or as words 3522 * 3523 * Read the specified number of 32-bit words from the serial flash. 3524 * If @byte_oriented is set the read data is stored as a byte array 3525 * (i.e., big-endian), otherwise as 32-bit words in the platform's 3526 * natural endianness. 3527 */ 3528 int t4_read_flash(struct adapter *adapter, unsigned int addr, 3529 unsigned int nwords, u32 *data, int byte_oriented) 3530 { 3531 int ret; 3532 3533 if (addr + nwords * sizeof(u32) > adapter->params.sf_size || (addr & 3)) 3534 return -EINVAL; 3535 3536 addr = swab32(addr) | SF_RD_DATA_FAST; 3537 3538 if ((ret = sf1_write(adapter, 4, 1, 0, addr)) != 0 || 3539 (ret = sf1_read(adapter, 1, 1, 0, data)) != 0) 3540 return ret; 3541 3542 for ( ; nwords; nwords--, data++) { 3543 ret = sf1_read(adapter, 4, nwords > 1, nwords == 1, data); 3544 if (nwords == 1) 3545 t4_write_reg(adapter, A_SF_OP, 0); /* unlock SF */ 3546 if (ret) 3547 return ret; 3548 if (byte_oriented) 3549 *data = (__force __u32)(cpu_to_be32(*data)); 3550 } 3551 return 0; 3552 } 3553 3554 /** 3555 * t4_write_flash - write up to a page of data to the serial flash 3556 * @adapter: the adapter 3557 * @addr: the start address to write 3558 * @n: length of data to write in bytes 3559 * @data: the data to write 3560 * @byte_oriented: whether to store data as bytes or as words 3561 * 3562 * Writes up to a page of data (256 bytes) to the serial flash starting 3563 * at the given address. All the data must be written to the same page. 3564 * If @byte_oriented is set the write data is stored as byte stream 3565 * (i.e. matches what on disk), otherwise in big-endian. 3566 */ 3567 int t4_write_flash(struct adapter *adapter, unsigned int addr, 3568 unsigned int n, const u8 *data, int byte_oriented) 3569 { 3570 int ret; 3571 u32 buf[64]; 3572 unsigned int i, c, left, val, offset = addr & 0xff; 3573 3574 if (addr >= adapter->params.sf_size || offset + n > SF_PAGE_SIZE) 3575 return -EINVAL; 3576 3577 val = swab32(addr) | SF_PROG_PAGE; 3578 3579 if ((ret = sf1_write(adapter, 1, 0, 1, SF_WR_ENABLE)) != 0 || 3580 (ret = sf1_write(adapter, 4, 1, 1, val)) != 0) 3581 goto unlock; 3582 3583 for (left = n; left; left -= c) { 3584 c = min(left, 4U); 3585 for (val = 0, i = 0; i < c; ++i) 3586 val = (val << 8) + *data++; 3587 3588 if (!byte_oriented) 3589 val = cpu_to_be32(val); 3590 3591 ret = sf1_write(adapter, c, c != left, 1, val); 3592 if (ret) 3593 goto unlock; 3594 } 3595 ret = flash_wait_op(adapter, 8, 1); 3596 if (ret) 3597 goto unlock; 3598 3599 t4_write_reg(adapter, A_SF_OP, 0); /* unlock SF */ 3600 3601 /* Read the page to verify the write succeeded */ 3602 ret = t4_read_flash(adapter, addr & ~0xff, ARRAY_SIZE(buf), buf, 3603 byte_oriented); 3604 if (ret) 3605 return ret; 3606 3607 if (memcmp(data - n, (u8 *)buf + offset, n)) { 3608 CH_ERR(adapter, 3609 "failed to correctly write the flash page at %#x\n", 3610 addr); 3611 return -EIO; 3612 } 3613 return 0; 3614 3615 unlock: 3616 t4_write_reg(adapter, A_SF_OP, 0); /* unlock SF */ 3617 return ret; 3618 } 3619 3620 /** 3621 * t4_get_fw_version - read the firmware version 3622 * @adapter: the adapter 3623 * @vers: where to place the version 3624 * 3625 * Reads the FW version from flash. 3626 */ 3627 int t4_get_fw_version(struct adapter *adapter, u32 *vers) 3628 { 3629 return t4_read_flash(adapter, FLASH_FW_START + 3630 offsetof(struct fw_hdr, fw_ver), 1, 3631 vers, 0); 3632 } 3633 3634 /** 3635 * t4_get_bs_version - read the firmware bootstrap version 3636 * @adapter: the adapter 3637 * @vers: where to place the version 3638 * 3639 * Reads the FW Bootstrap version from flash. 3640 */ 3641 int t4_get_bs_version(struct adapter *adapter, u32 *vers) 3642 { 3643 return t4_read_flash(adapter, FLASH_FWBOOTSTRAP_START + 3644 offsetof(struct fw_hdr, fw_ver), 1, 3645 vers, 0); 3646 } 3647 3648 /** 3649 * t4_get_tp_version - read the TP microcode version 3650 * @adapter: the adapter 3651 * @vers: where to place the version 3652 * 3653 * Reads the TP microcode version from flash. 3654 */ 3655 int t4_get_tp_version(struct adapter *adapter, u32 *vers) 3656 { 3657 return t4_read_flash(adapter, FLASH_FW_START + 3658 offsetof(struct fw_hdr, tp_microcode_ver), 3659 1, vers, 0); 3660 } 3661 3662 /** 3663 * t4_get_exprom_version - return the Expansion ROM version (if any) 3664 * @adapter: the adapter 3665 * @vers: where to place the version 3666 * 3667 * Reads the Expansion ROM header from FLASH and returns the version 3668 * number (if present) through the @vers return value pointer. We return 3669 * this in the Firmware Version Format since it's convenient. Return 3670 * 0 on success, -ENOENT if no Expansion ROM is present. 3671 */ 3672 int t4_get_exprom_version(struct adapter *adapter, u32 *vers) 3673 { 3674 struct exprom_header { 3675 unsigned char hdr_arr[16]; /* must start with 0x55aa */ 3676 unsigned char hdr_ver[4]; /* Expansion ROM version */ 3677 } *hdr; 3678 u32 exprom_header_buf[DIV_ROUND_UP(sizeof(struct exprom_header), 3679 sizeof(u32))]; 3680 int ret; 3681 3682 ret = t4_read_flash(adapter, FLASH_EXP_ROM_START, 3683 ARRAY_SIZE(exprom_header_buf), exprom_header_buf, 3684 0); 3685 if (ret) 3686 return ret; 3687 3688 hdr = (struct exprom_header *)exprom_header_buf; 3689 if (hdr->hdr_arr[0] != 0x55 || hdr->hdr_arr[1] != 0xaa) 3690 return -ENOENT; 3691 3692 *vers = (V_FW_HDR_FW_VER_MAJOR(hdr->hdr_ver[0]) | 3693 V_FW_HDR_FW_VER_MINOR(hdr->hdr_ver[1]) | 3694 V_FW_HDR_FW_VER_MICRO(hdr->hdr_ver[2]) | 3695 V_FW_HDR_FW_VER_BUILD(hdr->hdr_ver[3])); 3696 return 0; 3697 } 3698 3699 /** 3700 * t4_get_scfg_version - return the Serial Configuration version 3701 * @adapter: the adapter 3702 * @vers: where to place the version 3703 * 3704 * Reads the Serial Configuration Version via the Firmware interface 3705 * (thus this can only be called once we're ready to issue Firmware 3706 * commands). The format of the Serial Configuration version is 3707 * adapter specific. Returns 0 on success, an error on failure. 3708 * 3709 * Note that early versions of the Firmware didn't include the ability 3710 * to retrieve the Serial Configuration version, so we zero-out the 3711 * return-value parameter in that case to avoid leaving it with 3712 * garbage in it. 3713 * 3714 * Also note that the Firmware will return its cached copy of the Serial 3715 * Initialization Revision ID, not the actual Revision ID as written in 3716 * the Serial EEPROM. This is only an issue if a new VPD has been written 3717 * and the Firmware/Chip haven't yet gone through a RESET sequence. So 3718 * it's best to defer calling this routine till after a FW_RESET_CMD has 3719 * been issued if the Host Driver will be performing a full adapter 3720 * initialization. 3721 */ 3722 int t4_get_scfg_version(struct adapter *adapter, u32 *vers) 3723 { 3724 u32 scfgrev_param; 3725 int ret; 3726 3727 scfgrev_param = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | 3728 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_SCFGREV)); 3729 ret = t4_query_params(adapter, adapter->mbox, adapter->pf, 0, 3730 1, &scfgrev_param, vers); 3731 if (ret) 3732 *vers = 0; 3733 return ret; 3734 } 3735 3736 /** 3737 * t4_get_vpd_version - return the VPD version 3738 * @adapter: the adapter 3739 * @vers: where to place the version 3740 * 3741 * Reads the VPD via the Firmware interface (thus this can only be called 3742 * once we're ready to issue Firmware commands). The format of the 3743 * VPD version is adapter specific. Returns 0 on success, an error on 3744 * failure. 3745 * 3746 * Note that early versions of the Firmware didn't include the ability 3747 * to retrieve the VPD version, so we zero-out the return-value parameter 3748 * in that case to avoid leaving it with garbage in it. 3749 * 3750 * Also note that the Firmware will return its cached copy of the VPD 3751 * Revision ID, not the actual Revision ID as written in the Serial 3752 * EEPROM. This is only an issue if a new VPD has been written and the 3753 * Firmware/Chip haven't yet gone through a RESET sequence. So it's best 3754 * to defer calling this routine till after a FW_RESET_CMD has been issued 3755 * if the Host Driver will be performing a full adapter initialization. 3756 */ 3757 int t4_get_vpd_version(struct adapter *adapter, u32 *vers) 3758 { 3759 u32 vpdrev_param; 3760 int ret; 3761 3762 vpdrev_param = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | 3763 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_VPDREV)); 3764 ret = t4_query_params(adapter, adapter->mbox, adapter->pf, 0, 3765 1, &vpdrev_param, vers); 3766 if (ret) 3767 *vers = 0; 3768 return ret; 3769 } 3770 3771 /** 3772 * t4_get_version_info - extract various chip/firmware version information 3773 * @adapter: the adapter 3774 * 3775 * Reads various chip/firmware version numbers and stores them into the 3776 * adapter Adapter Parameters structure. If any of the efforts fails 3777 * the first failure will be returned, but all of the version numbers 3778 * will be read. 3779 */ 3780 int t4_get_version_info(struct adapter *adapter) 3781 { 3782 int ret = 0; 3783 3784 #define FIRST_RET(__getvinfo) \ 3785 do { \ 3786 int __ret = __getvinfo; \ 3787 if (__ret && !ret) \ 3788 ret = __ret; \ 3789 } while (0) 3790 3791 FIRST_RET(t4_get_fw_version(adapter, &adapter->params.fw_vers)); 3792 FIRST_RET(t4_get_bs_version(adapter, &adapter->params.bs_vers)); 3793 FIRST_RET(t4_get_tp_version(adapter, &adapter->params.tp_vers)); 3794 FIRST_RET(t4_get_exprom_version(adapter, &adapter->params.er_vers)); 3795 FIRST_RET(t4_get_scfg_version(adapter, &adapter->params.scfg_vers)); 3796 FIRST_RET(t4_get_vpd_version(adapter, &adapter->params.vpd_vers)); 3797 3798 #undef FIRST_RET 3799 3800 return ret; 3801 } 3802 3803 /** 3804 * t4_dump_version_info - dump all of the adapter configuration IDs 3805 * @adapter: the adapter 3806 * 3807 * Dumps all of the various bits of adapter configuration version/revision 3808 * IDs information. This is typically called at some point after 3809 * t4_get_version_info() has been called. 3810 */ 3811 void t4_dump_version_info(struct adapter *adapter) 3812 { 3813 /* 3814 * Device information. 3815 */ 3816 CH_INFO(adapter, "Chelsio %s rev %d\n", 3817 adapter->params.vpd.id, 3818 CHELSIO_CHIP_RELEASE(adapter->params.chip)); 3819 CH_INFO(adapter, "S/N: %s, P/N: %s\n", 3820 adapter->params.vpd.sn, 3821 adapter->params.vpd.pn); 3822 3823 /* 3824 * Firmware Version. 3825 */ 3826 if (!adapter->params.fw_vers) 3827 CH_WARN(adapter, "No firmware loaded\n"); 3828 else 3829 CH_INFO(adapter, "Firmware version: %u.%u.%u.%u\n", 3830 G_FW_HDR_FW_VER_MAJOR(adapter->params.fw_vers), 3831 G_FW_HDR_FW_VER_MINOR(adapter->params.fw_vers), 3832 G_FW_HDR_FW_VER_MICRO(adapter->params.fw_vers), 3833 G_FW_HDR_FW_VER_BUILD(adapter->params.fw_vers)); 3834 3835 /* 3836 * Bootstrap Firmware Version. (Some adapters don't have Bootstrap 3837 * Firmware, so dev_info() is more appropriate here.) 3838 */ 3839 if (!adapter->params.bs_vers) 3840 CH_INFO(adapter, "No bootstrap loaded\n"); 3841 else 3842 CH_INFO(adapter, "Bootstrap version: %u.%u.%u.%u\n", 3843 G_FW_HDR_FW_VER_MAJOR(adapter->params.bs_vers), 3844 G_FW_HDR_FW_VER_MINOR(adapter->params.bs_vers), 3845 G_FW_HDR_FW_VER_MICRO(adapter->params.bs_vers), 3846 G_FW_HDR_FW_VER_BUILD(adapter->params.bs_vers)); 3847 3848 /* 3849 * TP Microcode Version. 3850 */ 3851 if (!adapter->params.tp_vers) 3852 CH_WARN(adapter, "No TP Microcode loaded\n"); 3853 else 3854 CH_INFO(adapter, "TP Microcode version: %u.%u.%u.%u\n", 3855 G_FW_HDR_FW_VER_MAJOR(adapter->params.tp_vers), 3856 G_FW_HDR_FW_VER_MINOR(adapter->params.tp_vers), 3857 G_FW_HDR_FW_VER_MICRO(adapter->params.tp_vers), 3858 G_FW_HDR_FW_VER_BUILD(adapter->params.tp_vers)); 3859 3860 /* 3861 * Expansion ROM version. 3862 */ 3863 if (!adapter->params.er_vers) 3864 CH_INFO(adapter, "No Expansion ROM loaded\n"); 3865 else 3866 CH_INFO(adapter, "Expansion ROM version: %u.%u.%u.%u\n", 3867 G_FW_HDR_FW_VER_MAJOR(adapter->params.er_vers), 3868 G_FW_HDR_FW_VER_MINOR(adapter->params.er_vers), 3869 G_FW_HDR_FW_VER_MICRO(adapter->params.er_vers), 3870 G_FW_HDR_FW_VER_BUILD(adapter->params.er_vers)); 3871 3872 3873 /* 3874 * Serial Configuration version. 3875 */ 3876 CH_INFO(adapter, "Serial Configuration version: %x\n", 3877 adapter->params.scfg_vers); 3878 3879 /* 3880 * VPD version. 3881 */ 3882 CH_INFO(adapter, "VPD version: %x\n", 3883 adapter->params.vpd_vers); 3884 } 3885 3886 /** 3887 * t4_check_fw_version - check if the FW is supported with this driver 3888 * @adap: the adapter 3889 * 3890 * Checks if an adapter's FW is compatible with the driver. Returns 0 3891 * if there's exact match, a negative error if the version could not be 3892 * read or there's a major version mismatch 3893 */ 3894 int t4_check_fw_version(struct adapter *adap) 3895 { 3896 int ret, major, minor, micro; 3897 int exp_major, exp_minor, exp_micro; 3898 unsigned int chip_version = CHELSIO_CHIP_VERSION(adap->params.chip); 3899 3900 ret = t4_get_fw_version(adap, &adap->params.fw_vers); 3901 if (ret) 3902 return ret; 3903 3904 major = G_FW_HDR_FW_VER_MAJOR(adap->params.fw_vers); 3905 minor = G_FW_HDR_FW_VER_MINOR(adap->params.fw_vers); 3906 micro = G_FW_HDR_FW_VER_MICRO(adap->params.fw_vers); 3907 3908 switch (chip_version) { 3909 case CHELSIO_T4: 3910 exp_major = T4FW_MIN_VERSION_MAJOR; 3911 exp_minor = T4FW_MIN_VERSION_MINOR; 3912 exp_micro = T4FW_MIN_VERSION_MICRO; 3913 break; 3914 case CHELSIO_T5: 3915 exp_major = T5FW_MIN_VERSION_MAJOR; 3916 exp_minor = T5FW_MIN_VERSION_MINOR; 3917 exp_micro = T5FW_MIN_VERSION_MICRO; 3918 break; 3919 case CHELSIO_T6: 3920 exp_major = T6FW_MIN_VERSION_MAJOR; 3921 exp_minor = T6FW_MIN_VERSION_MINOR; 3922 exp_micro = T6FW_MIN_VERSION_MICRO; 3923 break; 3924 default: 3925 CH_ERR(adap, "Unsupported chip type, %x\n", 3926 adap->params.chip); 3927 return -EINVAL; 3928 } 3929 3930 if (major < exp_major || (major == exp_major && minor < exp_minor) || 3931 (major == exp_major && minor == exp_minor && micro < exp_micro)) { 3932 CH_ERR(adap, "Card has firmware version %u.%u.%u, minimum " 3933 "supported firmware is %u.%u.%u.\n", major, minor, 3934 micro, exp_major, exp_minor, exp_micro); 3935 return -EFAULT; 3936 } 3937 return 0; 3938 } 3939 3940 /* Is the given firmware API compatible with the one the driver was compiled 3941 * with? 3942 */ 3943 static int fw_compatible(const struct fw_hdr *hdr1, const struct fw_hdr *hdr2) 3944 { 3945 3946 /* short circuit if it's the exact same firmware version */ 3947 if (hdr1->chip == hdr2->chip && hdr1->fw_ver == hdr2->fw_ver) 3948 return 1; 3949 3950 /* 3951 * XXX: Is this too conservative? Perhaps I should limit this to the 3952 * features that are supported in the driver. 3953 */ 3954 #define SAME_INTF(x) (hdr1->intfver_##x == hdr2->intfver_##x) 3955 if (hdr1->chip == hdr2->chip && SAME_INTF(nic) && SAME_INTF(vnic) && 3956 SAME_INTF(ofld) && SAME_INTF(ri) && SAME_INTF(iscsipdu) && 3957 SAME_INTF(iscsi) && SAME_INTF(fcoepdu) && SAME_INTF(fcoe)) 3958 return 1; 3959 #undef SAME_INTF 3960 3961 return 0; 3962 } 3963 3964 /* The firmware in the filesystem is usable, but should it be installed? 3965 * This routine explains itself in detail if it indicates the filesystem 3966 * firmware should be installed. 3967 */ 3968 static int should_install_fs_fw(struct adapter *adap, int card_fw_usable, 3969 int k, int c, int t4_fw_install) 3970 { 3971 const char *reason; 3972 3973 if (!card_fw_usable) { 3974 reason = "incompatible or unusable"; 3975 goto install; 3976 } 3977 3978 if (k > c) { 3979 reason = "older than the version bundled with this driver"; 3980 goto install; 3981 } 3982 3983 if (t4_fw_install == 2 && k != c) { 3984 reason = "different than the version bundled with this driver"; 3985 goto install; 3986 } 3987 3988 return 0; 3989 3990 install: 3991 if (t4_fw_install == 0) { 3992 CH_ERR(adap, "firmware on card (%u.%u.%u.%u) is %s, " 3993 "but the driver is prohibited from installing a " 3994 "different firmware on the card.\n", 3995 G_FW_HDR_FW_VER_MAJOR(c), G_FW_HDR_FW_VER_MINOR(c), 3996 G_FW_HDR_FW_VER_MICRO(c), G_FW_HDR_FW_VER_BUILD(c), 3997 reason); 3998 3999 return (0); 4000 } 4001 4002 CH_ERR(adap, "firmware on card (%u.%u.%u.%u) is %s, " 4003 "installing firmware %u.%u.%u.%u on card.\n", 4004 G_FW_HDR_FW_VER_MAJOR(c), G_FW_HDR_FW_VER_MINOR(c), 4005 G_FW_HDR_FW_VER_MICRO(c), G_FW_HDR_FW_VER_BUILD(c), reason, 4006 G_FW_HDR_FW_VER_MAJOR(k), G_FW_HDR_FW_VER_MINOR(k), 4007 G_FW_HDR_FW_VER_MICRO(k), G_FW_HDR_FW_VER_BUILD(k)); 4008 4009 return 1; 4010 } 4011 4012 int t4_prep_fw(struct adapter *adap, struct fw_info *fw_info, 4013 const u8 *fw_data, unsigned int fw_size, 4014 struct fw_hdr *card_fw, const int t4_fw_install, 4015 enum dev_state state, int *reset) 4016 { 4017 int ret, card_fw_usable, fs_fw_usable; 4018 const struct fw_hdr *fs_fw; 4019 const struct fw_hdr *drv_fw; 4020 4021 drv_fw = &fw_info->fw_hdr; 4022 4023 /* Read the header of the firmware on the card */ 4024 ret = -t4_read_flash(adap, FLASH_FW_START, 4025 sizeof(*card_fw) / sizeof(uint32_t), 4026 (uint32_t *)card_fw, 1); 4027 if (ret == 0) { 4028 card_fw_usable = fw_compatible(drv_fw, (const void *)card_fw); 4029 } else { 4030 CH_ERR(adap, 4031 "Unable to read card's firmware header: %d\n", ret); 4032 card_fw_usable = 0; 4033 } 4034 4035 if (fw_data != NULL) { 4036 fs_fw = (const void *)fw_data; 4037 fs_fw_usable = fw_compatible(drv_fw, fs_fw); 4038 } else { 4039 fs_fw = NULL; 4040 fs_fw_usable = 0; 4041 } 4042 4043 if (card_fw_usable && card_fw->fw_ver == drv_fw->fw_ver && 4044 (!fs_fw_usable || fs_fw->fw_ver == drv_fw->fw_ver)) { 4045 /* Common case: the firmware on the card is an exact match and 4046 * the filesystem one is an exact match too, or the filesystem 4047 * one is absent/incompatible. Note that t4_fw_install = 2 4048 * is ignored here -- use cxgbtool loadfw if you want to 4049 * reinstall the same firmware as the one on the card. 4050 */ 4051 } else if (fs_fw_usable && state == DEV_STATE_UNINIT && 4052 should_install_fs_fw(adap, card_fw_usable, 4053 be32_to_cpu(fs_fw->fw_ver), 4054 be32_to_cpu(card_fw->fw_ver), 4055 t4_fw_install)) { 4056 4057 ret = -t4_fw_upgrade(adap, adap->mbox, fw_data, 4058 fw_size, 0); 4059 if (ret != 0) { 4060 CH_ERR(adap, 4061 "failed to install firmware: %d\n", ret); 4062 goto bye; 4063 } 4064 4065 /* Installed successfully, update cached information */ 4066 memcpy(card_fw, fs_fw, sizeof(*card_fw)); 4067 (void)t4_init_devlog_params(adap, 1); 4068 card_fw_usable = 1; 4069 *reset = 0; /* already reset as part of load_fw */ 4070 } 4071 4072 if (!card_fw_usable) { 4073 uint32_t d, c, k; 4074 4075 d = be32_to_cpu(drv_fw->fw_ver); 4076 c = be32_to_cpu(card_fw->fw_ver); 4077 k = fs_fw ? be32_to_cpu(fs_fw->fw_ver) : 0; 4078 4079 CH_ERR(adap, "Cannot find a usable firmware: " 4080 "fw_install %d, chip state %d, " 4081 "driver compiled with %d.%d.%d.%d, " 4082 "card has %d.%d.%d.%d, filesystem has %d.%d.%d.%d\n", 4083 t4_fw_install, state, 4084 G_FW_HDR_FW_VER_MAJOR(d), G_FW_HDR_FW_VER_MINOR(d), 4085 G_FW_HDR_FW_VER_MICRO(d), G_FW_HDR_FW_VER_BUILD(d), 4086 G_FW_HDR_FW_VER_MAJOR(c), G_FW_HDR_FW_VER_MINOR(c), 4087 G_FW_HDR_FW_VER_MICRO(c), G_FW_HDR_FW_VER_BUILD(c), 4088 G_FW_HDR_FW_VER_MAJOR(k), G_FW_HDR_FW_VER_MINOR(k), 4089 G_FW_HDR_FW_VER_MICRO(k), G_FW_HDR_FW_VER_BUILD(k)); 4090 ret = EINVAL; 4091 goto bye; 4092 } 4093 4094 /* We're using whatever's on the card and it's known to be good. */ 4095 adap->params.fw_vers = be32_to_cpu(card_fw->fw_ver); 4096 adap->params.tp_vers = be32_to_cpu(card_fw->tp_microcode_ver); 4097 4098 bye: 4099 return ret; 4100 4101 } 4102 4103 /** 4104 * t4_flash_erase_sectors - erase a range of flash sectors 4105 * @adapter: the adapter 4106 * @start: the first sector to erase 4107 * @end: the last sector to erase 4108 * 4109 * Erases the sectors in the given inclusive range. 4110 */ 4111 int t4_flash_erase_sectors(struct adapter *adapter, int start, int end) 4112 { 4113 int ret = 0; 4114 4115 if (end >= adapter->params.sf_nsec) 4116 return -EINVAL; 4117 4118 while (start <= end) { 4119 if ((ret = sf1_write(adapter, 1, 0, 1, SF_WR_ENABLE)) != 0 || 4120 (ret = sf1_write(adapter, 4, 0, 1, 4121 SF_ERASE_SECTOR | (start << 8))) != 0 || 4122 (ret = flash_wait_op(adapter, 14, 500)) != 0) { 4123 CH_ERR(adapter, 4124 "erase of flash sector %d failed, error %d\n", 4125 start, ret); 4126 break; 4127 } 4128 start++; 4129 } 4130 t4_write_reg(adapter, A_SF_OP, 0); /* unlock SF */ 4131 return ret; 4132 } 4133 4134 /** 4135 * t4_flash_cfg_addr - return the address of the flash configuration file 4136 * @adapter: the adapter 4137 * 4138 * Return the address within the flash where the Firmware Configuration 4139 * File is stored, or an error if the device FLASH is too small to contain 4140 * a Firmware Configuration File. 4141 */ 4142 int t4_flash_cfg_addr(struct adapter *adapter) 4143 { 4144 /* 4145 * If the device FLASH isn't large enough to hold a Firmware 4146 * Configuration File, return an error. 4147 */ 4148 if (adapter->params.sf_size < FLASH_CFG_START + FLASH_CFG_MAX_SIZE) 4149 return -ENOSPC; 4150 4151 return FLASH_CFG_START; 4152 } 4153 4154 /* Return TRUE if the specified firmware matches the adapter. I.e. T4 4155 * firmware for T4 adapters, T5 firmware for T5 adapters, etc. We go ahead 4156 * and emit an error message for mismatched firmware to save our caller the 4157 * effort ... 4158 */ 4159 static int t4_fw_matches_chip(const struct adapter *adap, 4160 const struct fw_hdr *hdr) 4161 { 4162 /* 4163 * The expression below will return FALSE for any unsupported adapter 4164 * which will keep us "honest" in the future ... 4165 */ 4166 if ((is_t4(adap->params.chip) && hdr->chip == FW_HDR_CHIP_T4) || 4167 (is_t5(adap->params.chip) && hdr->chip == FW_HDR_CHIP_T5) || 4168 (is_t6(adap->params.chip) && hdr->chip == FW_HDR_CHIP_T6)) 4169 return 1; 4170 4171 CH_ERR(adap, 4172 "FW image (%d) is not suitable for this adapter (%d)\n", 4173 hdr->chip, CHELSIO_CHIP_VERSION(adap->params.chip)); 4174 return 0; 4175 } 4176 4177 /** 4178 * t4_load_fw - download firmware 4179 * @adap: the adapter 4180 * @fw_data: the firmware image to write 4181 * @size: image size 4182 * @bootstrap: indicates if the binary is a bootstrap fw 4183 * 4184 * Write the supplied firmware image to the card's serial flash. 4185 */ 4186 int t4_load_fw(struct adapter *adap, const u8 *fw_data, unsigned int size, 4187 unsigned int bootstrap) 4188 { 4189 u32 csum; 4190 int ret, addr; 4191 unsigned int i; 4192 u8 first_page[SF_PAGE_SIZE]; 4193 const __be32 *p = (const __be32 *)fw_data; 4194 const struct fw_hdr *hdr = (const struct fw_hdr *)fw_data; 4195 unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec; 4196 unsigned int fw_start_sec; 4197 unsigned int fw_start; 4198 unsigned int fw_size; 4199 4200 if (bootstrap) { 4201 fw_start_sec = FLASH_FWBOOTSTRAP_START_SEC; 4202 fw_start = FLASH_FWBOOTSTRAP_START; 4203 fw_size = FLASH_FWBOOTSTRAP_MAX_SIZE; 4204 } else { 4205 fw_start_sec = FLASH_FW_START_SEC; 4206 fw_start = FLASH_FW_START; 4207 fw_size = FLASH_FW_MAX_SIZE; 4208 } 4209 4210 if (!size) { 4211 CH_ERR(adap, "FW image has no data\n"); 4212 return -EINVAL; 4213 } 4214 if (size & 511) { 4215 CH_ERR(adap, 4216 "FW image size not multiple of 512 bytes\n"); 4217 return -EINVAL; 4218 } 4219 if ((unsigned int) be16_to_cpu(hdr->len512) * 512 != size) { 4220 CH_ERR(adap, 4221 "FW image size differs from size in FW header\n"); 4222 return -EINVAL; 4223 } 4224 if (size > fw_size) { 4225 CH_ERR(adap, "FW image too large, max is %u bytes\n", 4226 fw_size); 4227 return -EFBIG; 4228 } 4229 if (!t4_fw_matches_chip(adap, hdr)) 4230 return -EINVAL; 4231 4232 for (csum = 0, i = 0; i < size / sizeof(csum); i++) 4233 csum += be32_to_cpu(p[i]); 4234 4235 if (csum != 0xffffffff) { 4236 CH_ERR(adap, 4237 "corrupted firmware image, checksum %#x\n", csum); 4238 return -EINVAL; 4239 } 4240 4241 i = DIV_ROUND_UP(size, sf_sec_size); /* # of sectors spanned */ 4242 ret = t4_flash_erase_sectors(adap, fw_start_sec, fw_start_sec + i - 1); 4243 if (ret) 4244 goto out; 4245 4246 /* 4247 * We write the correct version at the end so the driver can see a bad 4248 * version if the FW write fails. Start by writing a copy of the 4249 * first page with a bad version. 4250 */ 4251 memcpy(first_page, fw_data, SF_PAGE_SIZE); 4252 ((struct fw_hdr *)first_page)->fw_ver = cpu_to_be32(0xffffffff); 4253 ret = t4_write_flash(adap, fw_start, SF_PAGE_SIZE, first_page, 1); 4254 if (ret) 4255 goto out; 4256 4257 addr = fw_start; 4258 for (size -= SF_PAGE_SIZE; size; size -= SF_PAGE_SIZE) { 4259 addr += SF_PAGE_SIZE; 4260 fw_data += SF_PAGE_SIZE; 4261 ret = t4_write_flash(adap, addr, SF_PAGE_SIZE, fw_data, 1); 4262 if (ret) 4263 goto out; 4264 } 4265 4266 ret = t4_write_flash(adap, 4267 fw_start + offsetof(struct fw_hdr, fw_ver), 4268 sizeof(hdr->fw_ver), (const u8 *)&hdr->fw_ver, 1); 4269 out: 4270 if (ret) 4271 CH_ERR(adap, "firmware download failed, error %d\n", 4272 ret); 4273 else { 4274 if (bootstrap) 4275 ret = t4_get_bs_version(adap, &adap->params.bs_vers); 4276 else 4277 ret = t4_get_fw_version(adap, &adap->params.fw_vers); 4278 } 4279 return ret; 4280 } 4281 4282 /** 4283 * t4_phy_fw_ver - return current PHY firmware version 4284 * @adap: the adapter 4285 * @phy_fw_ver: return value buffer for PHY firmware version 4286 * 4287 * Returns the current version of external PHY firmware on the 4288 * adapter. 4289 */ 4290 int t4_phy_fw_ver(struct adapter *adap, int *phy_fw_ver) 4291 { 4292 u32 param, val; 4293 int ret; 4294 4295 param = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | 4296 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_PHYFW) | 4297 V_FW_PARAMS_PARAM_Y(adap->params.portvec) | 4298 V_FW_PARAMS_PARAM_Z(FW_PARAMS_PARAM_DEV_PHYFW_VERSION)); 4299 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1, 4300 ¶m, &val); 4301 if (ret < 0) 4302 return ret; 4303 *phy_fw_ver = val; 4304 return 0; 4305 } 4306 4307 /** 4308 * t4_load_phy_fw - download port PHY firmware 4309 * @adap: the adapter 4310 * @win: the PCI-E Memory Window index to use for t4_memory_rw() 4311 * @lock: the lock to use to guard the memory copy 4312 * @phy_fw_version: function to check PHY firmware versions 4313 * @phy_fw_data: the PHY firmware image to write 4314 * @phy_fw_size: image size 4315 * 4316 * Transfer the specified PHY firmware to the adapter. If a non-NULL 4317 * @phy_fw_version is supplied, then it will be used to determine if 4318 * it's necessary to perform the transfer by comparing the version 4319 * of any existing adapter PHY firmware with that of the passed in 4320 * PHY firmware image. If @lock is non-NULL then it will be used 4321 * around the call to t4_memory_rw() which transfers the PHY firmware 4322 * to the adapter. 4323 * 4324 * A negative error number will be returned if an error occurs. If 4325 * version number support is available and there's no need to upgrade 4326 * the firmware, 0 will be returned. If firmware is successfully 4327 * transferred to the adapter, 1 will be retured. 4328 * 4329 * NOTE: some adapters only have local RAM to store the PHY firmware. As 4330 * a result, a RESET of the adapter would cause that RAM to lose its 4331 * contents. Thus, loading PHY firmware on such adapters must happen after any 4332 * FW_RESET_CMDs ... 4333 */ 4334 int t4_load_phy_fw(struct adapter *adap, 4335 int win, t4_os_lock_t *lock, 4336 int (*phy_fw_version)(const u8 *, size_t), 4337 const u8 *phy_fw_data, size_t phy_fw_size) 4338 { 4339 unsigned long mtype = 0, maddr = 0; 4340 u32 param, val; 4341 int cur_phy_fw_ver = 0, new_phy_fw_vers = 0; 4342 int ret; 4343 4344 /* 4345 * If we have version number support, then check to see if the adapter 4346 * already has up-to-date PHY firmware loaded. 4347 */ 4348 if (phy_fw_version) { 4349 new_phy_fw_vers = phy_fw_version(phy_fw_data, phy_fw_size); 4350 ret = t4_phy_fw_ver(adap, &cur_phy_fw_ver); 4351 if (ret < 0) 4352 return ret;; 4353 4354 if (cur_phy_fw_ver >= new_phy_fw_vers) { 4355 CH_WARN(adap, "PHY Firmware already up-to-date, " 4356 "version %#x\n", cur_phy_fw_ver); 4357 return 0; 4358 } 4359 } 4360 4361 /* 4362 * Ask the firmware where it wants us to copy the PHY firmware image. 4363 * The size of the file requires a special version of the READ coommand 4364 * which will pass the file size via the values field in PARAMS_CMD and 4365 * retreive the return value from firmware and place it in the same 4366 * buffer values 4367 */ 4368 param = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | 4369 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_PHYFW) | 4370 V_FW_PARAMS_PARAM_Y(adap->params.portvec) | 4371 V_FW_PARAMS_PARAM_Z(FW_PARAMS_PARAM_DEV_PHYFW_DOWNLOAD)); 4372 val = phy_fw_size; 4373 ret = t4_query_params_rw(adap, adap->mbox, adap->pf, 0, 1, 4374 ¶m, &val, 1, true); 4375 if (ret < 0) 4376 return ret; 4377 mtype = val >> 8; 4378 maddr = (val & 0xff) << 16; 4379 4380 /* 4381 * Copy the supplied PHY Firmware image to the adapter memory location 4382 * allocated by the adapter firmware. 4383 */ 4384 if (lock) 4385 t4_os_lock(lock); 4386 ret = t4_memory_rw(adap, win, mtype, maddr, 4387 phy_fw_size, (__be32*)phy_fw_data, 4388 T4_MEMORY_WRITE); 4389 if (lock) 4390 t4_os_unlock(lock); 4391 if (ret) 4392 return ret; 4393 4394 /* 4395 * Tell the firmware that the PHY firmware image has been written to 4396 * RAM and it can now start copying it over to the PHYs. The chip 4397 * firmware will RESET the affected PHYs as part of this operation 4398 * leaving them running the new PHY firmware image. 4399 */ 4400 param = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | 4401 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_PHYFW) | 4402 V_FW_PARAMS_PARAM_Y(adap->params.portvec) | 4403 V_FW_PARAMS_PARAM_Z(FW_PARAMS_PARAM_DEV_PHYFW_DOWNLOAD)); 4404 ret = t4_set_params_timeout(adap, adap->mbox, adap->pf, 0, 1, 4405 ¶m, &val, 30000); 4406 4407 /* 4408 * If we have version number support, then check to see that the new 4409 * firmware got loaded properly. 4410 */ 4411 if (phy_fw_version) { 4412 ret = t4_phy_fw_ver(adap, &cur_phy_fw_ver); 4413 if (ret < 0) 4414 return ret; 4415 4416 if (cur_phy_fw_ver != new_phy_fw_vers) { 4417 CH_WARN(adap, "PHY Firmware did not update: " 4418 "version on adapter %#x, " 4419 "version flashed %#x\n", 4420 cur_phy_fw_ver, new_phy_fw_vers); 4421 return -ENXIO; 4422 } 4423 } 4424 4425 return 1; 4426 } 4427 4428 /** 4429 * t4_fwcache - firmware cache operation 4430 * @adap: the adapter 4431 * @op : the operation (flush or flush and invalidate) 4432 */ 4433 int t4_fwcache(struct adapter *adap, enum fw_params_param_dev_fwcache op) 4434 { 4435 struct fw_params_cmd c; 4436 4437 memset(&c, 0, sizeof(c)); 4438 c.op_to_vfn = 4439 cpu_to_be32(V_FW_CMD_OP(FW_PARAMS_CMD) | 4440 F_FW_CMD_REQUEST | F_FW_CMD_WRITE | 4441 V_FW_PARAMS_CMD_PFN(adap->pf) | 4442 V_FW_PARAMS_CMD_VFN(0)); 4443 c.retval_len16 = cpu_to_be32(FW_LEN16(c)); 4444 c.param[0].mnem = 4445 cpu_to_be32(V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | 4446 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_FWCACHE)); 4447 c.param[0].val = (__force __be32)op; 4448 4449 return t4_wr_mbox(adap, adap->mbox, &c, sizeof(c), NULL); 4450 } 4451 4452 void t4_cim_read_pif_la(struct adapter *adap, u32 *pif_req, u32 *pif_rsp, 4453 unsigned int *pif_req_wrptr, 4454 unsigned int *pif_rsp_wrptr) 4455 { 4456 int i, j; 4457 u32 cfg, val, req, rsp; 4458 4459 cfg = t4_read_reg(adap, A_CIM_DEBUGCFG); 4460 if (cfg & F_LADBGEN) 4461 t4_write_reg(adap, A_CIM_DEBUGCFG, cfg ^ F_LADBGEN); 4462 4463 val = t4_read_reg(adap, A_CIM_DEBUGSTS); 4464 req = G_POLADBGWRPTR(val); 4465 rsp = G_PILADBGWRPTR(val); 4466 if (pif_req_wrptr) 4467 *pif_req_wrptr = req; 4468 if (pif_rsp_wrptr) 4469 *pif_rsp_wrptr = rsp; 4470 4471 for (i = 0; i < CIM_PIFLA_SIZE; i++) { 4472 for (j = 0; j < 6; j++) { 4473 t4_write_reg(adap, A_CIM_DEBUGCFG, V_POLADBGRDPTR(req) | 4474 V_PILADBGRDPTR(rsp)); 4475 *pif_req++ = t4_read_reg(adap, A_CIM_PO_LA_DEBUGDATA); 4476 *pif_rsp++ = t4_read_reg(adap, A_CIM_PI_LA_DEBUGDATA); 4477 req++; 4478 rsp++; 4479 } 4480 req = (req + 2) & M_POLADBGRDPTR; 4481 rsp = (rsp + 2) & M_PILADBGRDPTR; 4482 } 4483 t4_write_reg(adap, A_CIM_DEBUGCFG, cfg); 4484 } 4485 4486 void t4_cim_read_ma_la(struct adapter *adap, u32 *ma_req, u32 *ma_rsp) 4487 { 4488 u32 cfg; 4489 int i, j, idx; 4490 4491 cfg = t4_read_reg(adap, A_CIM_DEBUGCFG); 4492 if (cfg & F_LADBGEN) 4493 t4_write_reg(adap, A_CIM_DEBUGCFG, cfg ^ F_LADBGEN); 4494 4495 for (i = 0; i < CIM_MALA_SIZE; i++) { 4496 for (j = 0; j < 5; j++) { 4497 idx = 8 * i + j; 4498 t4_write_reg(adap, A_CIM_DEBUGCFG, V_POLADBGRDPTR(idx) | 4499 V_PILADBGRDPTR(idx)); 4500 *ma_req++ = t4_read_reg(adap, A_CIM_PO_LA_MADEBUGDATA); 4501 *ma_rsp++ = t4_read_reg(adap, A_CIM_PI_LA_MADEBUGDATA); 4502 } 4503 } 4504 t4_write_reg(adap, A_CIM_DEBUGCFG, cfg); 4505 } 4506 4507 void t4_ulprx_read_la(struct adapter *adap, u32 *la_buf) 4508 { 4509 unsigned int i, j; 4510 4511 for (i = 0; i < 8; i++) { 4512 u32 *p = la_buf + i; 4513 4514 t4_write_reg(adap, A_ULP_RX_LA_CTL, i); 4515 j = t4_read_reg(adap, A_ULP_RX_LA_WRPTR); 4516 t4_write_reg(adap, A_ULP_RX_LA_RDPTR, j); 4517 for (j = 0; j < ULPRX_LA_SIZE; j++, p += 8) 4518 *p = t4_read_reg(adap, A_ULP_RX_LA_RDDATA); 4519 } 4520 } 4521 4522 /* The ADVERT_MASK is used to mask out all of the Advertised Firmware Port 4523 * Capabilities which we control with separate controls -- see, for instance, 4524 * Pause Frames and Forward Error Correction. In order to determine what the 4525 * full set of Advertised Port Capabilities are, the base Advertised Port 4526 * Capabilities (masked by ADVERT_MASK) must be combined with the Advertised 4527 * Port Capabilities associated with those other controls. See 4528 * t4_link_acaps() for how this is done. 4529 */ 4530 #define ADVERT_MASK (V_FW_PORT_CAP32_SPEED(M_FW_PORT_CAP32_SPEED) | \ 4531 FW_PORT_CAP32_ANEG) 4532 4533 /** 4534 * fwcaps16_to_caps32 - convert 16-bit Port Capabilities to 32-bits 4535 * @caps16: a 16-bit Port Capabilities value 4536 * 4537 * Returns the equivalent 32-bit Port Capabilities value. 4538 */ 4539 static fw_port_cap32_t fwcaps16_to_caps32(fw_port_cap16_t caps16) 4540 { 4541 fw_port_cap32_t caps32 = 0; 4542 4543 #define CAP16_TO_CAP32(__cap) \ 4544 do { \ 4545 if (caps16 & FW_PORT_CAP_##__cap) \ 4546 caps32 |= FW_PORT_CAP32_##__cap; \ 4547 } while (0) 4548 4549 CAP16_TO_CAP32(SPEED_100M); 4550 CAP16_TO_CAP32(SPEED_1G); 4551 CAP16_TO_CAP32(SPEED_25G); 4552 CAP16_TO_CAP32(SPEED_10G); 4553 CAP16_TO_CAP32(SPEED_40G); 4554 CAP16_TO_CAP32(SPEED_100G); 4555 CAP16_TO_CAP32(FC_RX); 4556 CAP16_TO_CAP32(FC_TX); 4557 CAP16_TO_CAP32(ANEG); 4558 CAP16_TO_CAP32(FORCE_PAUSE); 4559 CAP16_TO_CAP32(MDIAUTO); 4560 CAP16_TO_CAP32(MDISTRAIGHT); 4561 CAP16_TO_CAP32(FEC_RS); 4562 CAP16_TO_CAP32(FEC_BASER_RS); 4563 CAP16_TO_CAP32(802_3_PAUSE); 4564 CAP16_TO_CAP32(802_3_ASM_DIR); 4565 4566 #undef CAP16_TO_CAP32 4567 4568 return caps32; 4569 } 4570 4571 /** 4572 * fwcaps32_to_caps16 - convert 32-bit Port Capabilities to 16-bits 4573 * @caps32: a 32-bit Port Capabilities value 4574 * 4575 * Returns the equivalent 16-bit Port Capabilities value. Note that 4576 * not all 32-bit Port Capabilities can be represented in the 16-bit 4577 * Port Capabilities and some fields/values may not make it. 4578 */ 4579 static fw_port_cap16_t fwcaps32_to_caps16(fw_port_cap32_t caps32) 4580 { 4581 fw_port_cap16_t caps16 = 0; 4582 4583 #define CAP32_TO_CAP16(__cap) \ 4584 do { \ 4585 if (caps32 & FW_PORT_CAP32_##__cap) \ 4586 caps16 |= FW_PORT_CAP_##__cap; \ 4587 } while (0) 4588 4589 CAP32_TO_CAP16(SPEED_100M); 4590 CAP32_TO_CAP16(SPEED_1G); 4591 CAP32_TO_CAP16(SPEED_10G); 4592 CAP32_TO_CAP16(SPEED_25G); 4593 CAP32_TO_CAP16(SPEED_40G); 4594 CAP32_TO_CAP16(SPEED_100G); 4595 CAP32_TO_CAP16(FC_RX); 4596 CAP32_TO_CAP16(FC_TX); 4597 CAP32_TO_CAP16(802_3_PAUSE); 4598 CAP32_TO_CAP16(802_3_ASM_DIR); 4599 CAP32_TO_CAP16(ANEG); 4600 CAP32_TO_CAP16(FORCE_PAUSE); 4601 CAP32_TO_CAP16(MDIAUTO); 4602 CAP32_TO_CAP16(MDISTRAIGHT); 4603 CAP32_TO_CAP16(FEC_RS); 4604 CAP32_TO_CAP16(FEC_BASER_RS); 4605 4606 #undef CAP32_TO_CAP16 4607 4608 return caps16; 4609 } 4610 4611 /* Translate Firmware Port Capabilities Pause specification to Common Code */ 4612 static inline cc_pause_t fwcap_to_cc_pause(fw_port_cap32_t fw_pause) 4613 { 4614 cc_pause_t cc_pause = 0; 4615 4616 if (fw_pause & FW_PORT_CAP32_FC_RX) 4617 cc_pause |= PAUSE_RX; 4618 if (fw_pause & FW_PORT_CAP32_FC_TX) 4619 cc_pause |= PAUSE_TX; 4620 4621 return cc_pause; 4622 } 4623 4624 /* Translate Common Code Pause specification into Firmware Port Capabilities */ 4625 static inline fw_port_cap32_t cc_to_fwcap_pause(cc_pause_t cc_pause) 4626 { 4627 fw_port_cap32_t fw_pause = 0; 4628 4629 /* Translate orthogonal RX/TX Pause Controls for L1 Configure 4630 * commands, etc. 4631 */ 4632 if (cc_pause & PAUSE_RX) 4633 fw_pause |= FW_PORT_CAP32_FC_RX; 4634 if (cc_pause & PAUSE_TX) 4635 fw_pause |= FW_PORT_CAP32_FC_TX; 4636 if (!(cc_pause & PAUSE_AUTONEG)) 4637 fw_pause |= FW_PORT_CAP32_FORCE_PAUSE; 4638 4639 return fw_pause; 4640 } 4641 4642 /* Translate Firmware Forward Error Correction specification to Common Code */ 4643 static inline cc_fec_t fwcap_to_cc_fec(fw_port_cap32_t fw_fec) 4644 { 4645 cc_fec_t cc_fec = 0; 4646 4647 if (fw_fec & FW_PORT_CAP32_FEC_RS) 4648 cc_fec |= FEC_RS; 4649 if (fw_fec & FW_PORT_CAP32_FEC_BASER_RS) 4650 cc_fec |= FEC_BASER_RS; 4651 4652 if (cc_fec == 0) 4653 cc_fec = FEC_NONE; 4654 4655 return (cc_fec); 4656 } 4657 4658 /* Translate Common Code Forward Error Correction specification to Firmware */ 4659 static inline boolean_t 4660 cc_to_fwcap_fec(fw_port_cap32_t *fw_fecp, cc_fec_t cc_fec, 4661 struct link_config *lc) 4662 { 4663 fw_port_cap32_t fw_fec = 0; 4664 4665 if ((cc_fec & FEC_AUTO) != 0) { 4666 if ((lc->pcaps & FW_PORT_CAP32_SPEED_100G) == 0) 4667 fw_fec |= FW_PORT_CAP32_FEC_BASER_RS; 4668 4669 if ((lc->pcaps & FW_PORT_CAP32_FORCE_FEC) != 0) 4670 fw_fec |= FW_PORT_CAP32_FEC_NO_FEC; 4671 4672 fw_fec |= FW_PORT_CAP32_FEC_RS; 4673 4674 *fw_fecp = fw_fec; 4675 return (B_TRUE); 4676 } 4677 4678 if ((cc_fec & FEC_RS) != 0) 4679 fw_fec |= FW_PORT_CAP32_FEC_RS; 4680 4681 if ((cc_fec & FEC_BASER_RS) != 0 && 4682 (lc->pcaps & FW_PORT_CAP32_SPEED_100G) == 0) 4683 fw_fec |= FW_PORT_CAP32_FEC_BASER_RS; 4684 4685 if ((cc_fec & FEC_NONE) != 0) { 4686 if ((lc->pcaps & FW_PORT_CAP32_FORCE_FEC) != 0) { 4687 fw_fec |= FW_PORT_CAP32_FORCE_FEC; 4688 fw_fec |= FW_PORT_CAP32_FEC_NO_FEC; 4689 } 4690 4691 *fw_fecp = fw_fec; 4692 return (B_TRUE); 4693 } 4694 4695 if (fw_fec == 0) 4696 return (B_FALSE); 4697 4698 if ((lc->pcaps & FW_PORT_CAP32_FORCE_FEC) != 0) 4699 fw_fec |= FW_PORT_CAP32_FORCE_FEC; 4700 4701 *fw_fecp = fw_fec; 4702 return (B_TRUE); 4703 } 4704 4705 /** 4706 * t4_link_acaps - compute Link Advertised Port Capabilities 4707 * @adapter: the adapter 4708 * @port: the Port ID 4709 * @lc: the Port's Link Configuration 4710 * 4711 * Synthesize the Advertised Port Capabilities we'll be using based on 4712 * the base Advertised Port Capabilities (which have been filtered by 4713 * ADVERT_MASK) plus the individual controls for things like Pause 4714 * Frames, Forward Error Correction, MDI, etc. 4715 */ 4716 fw_port_cap32_t t4_link_acaps(struct adapter *adapter, unsigned int port, 4717 struct link_config *lc) 4718 { 4719 unsigned int fw_mdi = 4720 (V_FW_PORT_CAP32_MDI(FW_PORT_CAP32_MDI_AUTO) & lc->pcaps); 4721 fw_port_cap32_t fw_fc, fw_fec, acaps; 4722 cc_fec_t cc_fec; 4723 4724 /* Convert driver coding of Pause Frame Flow Control settings into the 4725 * Firmware's API. 4726 */ 4727 fw_fc = cc_to_fwcap_pause(lc->requested_fc); 4728 4729 /* Convert Common Code Forward Error Control settings into the 4730 * Firmware's API. If the current Requested FEC has "Automatic" 4731 * (IEEE 802.3) specified, then we use whatever the Firmware 4732 * sent us as part of it's IEEE 802.3-based interpratation of 4733 * the Transceiver Module EPROM FEC parameters. Otherwise we 4734 * use whatever is in the current Requested FEC settings. 4735 */ 4736 if (fec_supported(lc->pcaps)) { 4737 if (lc->requested_fec & FEC_AUTO) 4738 cc_fec = fwcap_to_cc_fec(lc->def_acaps); 4739 else 4740 cc_fec = lc->requested_fec; 4741 4742 if (!cc_to_fwcap_fec(&fw_fec, cc_fec, lc)) 4743 return (0); 4744 } else { 4745 fw_fec = 0; 4746 cc_fec = FEC_NONE; 4747 } 4748 4749 /* Figure out what our Requested Port Capabilities are going to be. 4750 * Note parallel structure in t4_handle_get_port_info() and 4751 * init_link_config(). 4752 */ 4753 if (!(lc->pcaps & FW_PORT_CAP32_ANEG)) { 4754 acaps = lc->acaps | fw_fc | fw_fec; 4755 lc->fc = lc->requested_fc & ~PAUSE_AUTONEG; 4756 lc->fec = cc_fec; 4757 } else if (lc->autoneg == AUTONEG_DISABLE) { 4758 acaps = lc->speed_caps | fw_fc | fw_fec | fw_mdi; 4759 lc->fc = lc->requested_fc & ~PAUSE_AUTONEG; 4760 lc->fec = cc_fec; 4761 } else 4762 acaps = lc->acaps | fw_fc | fw_fec | fw_mdi; 4763 4764 /* Some Requested Port Capabilities are trivially wrong if they exceed 4765 * the Physical Port Capabilities. We can check that here and provide 4766 * moderately useful feedback in the system log. 4767 * 4768 * Note that older Firmware doesn't have FW_PORT_CAP32_FORCE_PAUSE, so 4769 * we need to exclude this from this check in order to maintain 4770 * compatibility ... 4771 */ 4772 if ((acaps & ~lc->pcaps) & ~FW_PORT_CAP32_FORCE_PAUSE) { 4773 CH_ERR(adapter, 4774 "Requested Port Capabilities %#x exceed Physical Port Capabilities %#x\n", 4775 acaps, lc->pcaps); 4776 return 0; 4777 } 4778 4779 return acaps; 4780 } 4781 4782 /** 4783 * t4_link_l1cfg_core - apply link configuration to MAC/PHY 4784 * @adapter: the adapter 4785 * @mbox: the Firmware Mailbox to use 4786 * @port: the Port ID 4787 * @lc: the Port's Link Configuration 4788 * @sleep_ok: if true we may sleep while awaiting command completion 4789 * @timeout: time to wait for command to finish before timing out 4790 * (negative implies @sleep_ok=false) 4791 * 4792 * Set up a port's MAC and PHY according to a desired link configuration. 4793 * - If the PHY can auto-negotiate first decide what to advertise, then 4794 * enable/disable auto-negotiation as desired, and reset. 4795 * - If the PHY does not auto-negotiate just reset it. 4796 * - If auto-negotiation is off set the MAC to the proper speed/duplex/FC, 4797 * otherwise do it later based on the outcome of auto-negotiation. 4798 */ 4799 int t4_link_l1cfg_core(struct adapter *adapter, unsigned int mbox, 4800 unsigned int port, struct link_config *lc, 4801 bool sleep_ok, int timeout) 4802 { 4803 unsigned int fw_caps = adapter->params.fw_caps_support; 4804 fw_port_cap32_t rcap; 4805 struct fw_port_cmd cmd; 4806 int ret; 4807 4808 /* Filter out nonsense. 4809 */ 4810 if (!(lc->pcaps & FW_PORT_CAP32_ANEG) && 4811 lc->autoneg == AUTONEG_ENABLE) 4812 return -EINVAL; 4813 4814 /* Compute our Requested Port Capabilities and send that on to the 4815 * Firmware. 4816 */ 4817 rcap = t4_link_acaps(adapter, port, lc); 4818 if(!rcap) 4819 return -EINVAL; 4820 memset(&cmd, 0, sizeof(cmd)); 4821 cmd.op_to_portid = cpu_to_be32(V_FW_CMD_OP(FW_PORT_CMD) | 4822 F_FW_CMD_REQUEST | F_FW_CMD_EXEC | 4823 V_FW_PORT_CMD_PORTID(port)); 4824 cmd.action_to_len16 = 4825 cpu_to_be32(V_FW_PORT_CMD_ACTION(fw_caps == FW_CAPS16 4826 ? FW_PORT_ACTION_L1_CFG 4827 : FW_PORT_ACTION_L1_CFG32) | 4828 FW_LEN16(cmd)); 4829 if (fw_caps == FW_CAPS16) 4830 cmd.u.l1cfg.rcap = cpu_to_be32(fwcaps32_to_caps16(rcap)); 4831 else 4832 cmd.u.l1cfg32.rcap32 = cpu_to_be32(rcap); 4833 ret = t4_wr_mbox_meat_timeout(adapter, mbox, &cmd, sizeof(cmd), NULL, 4834 sleep_ok, timeout); 4835 4836 /* Unfortunately, even if the Requested Port Capabilities "fit" within 4837 * the Physical Port Capabilities, some combinations of features may 4838 * still not be legal. For example, 40Gb/s and Reed-Solomon Forward 4839 * Error Correction. So if the Firmware rejects the L1 Configure 4840 * request, flag that here. 4841 */ 4842 if (ret) { 4843 CH_ERR(adapter, 4844 "Requested Port Capabilities %#x rejected, error %d\n", 4845 rcap, -ret); 4846 return ret; 4847 } 4848 return 0; 4849 } 4850 4851 /** 4852 * t4_restart_aneg - restart autonegotiation 4853 * @adap: the adapter 4854 * @mbox: mbox to use for the FW command 4855 * @port: the port id 4856 * 4857 * Restarts autonegotiation for the selected port. 4858 */ 4859 int t4_restart_aneg(struct adapter *adap, unsigned int mbox, unsigned int port) 4860 { 4861 unsigned int fw_caps = adap->params.fw_caps_support; 4862 struct fw_port_cmd c; 4863 4864 memset(&c, 0, sizeof(c)); 4865 c.op_to_portid = cpu_to_be32(V_FW_CMD_OP(FW_PORT_CMD) | 4866 F_FW_CMD_REQUEST | F_FW_CMD_EXEC | 4867 V_FW_PORT_CMD_PORTID(port)); 4868 c.action_to_len16 = 4869 cpu_to_be32(V_FW_PORT_CMD_ACTION(fw_caps == FW_CAPS16 4870 ? FW_PORT_ACTION_L1_CFG 4871 : FW_PORT_ACTION_L1_CFG32) | 4872 FW_LEN16(c)); 4873 if (fw_caps == FW_CAPS16) 4874 c.u.l1cfg.rcap = cpu_to_be32(FW_PORT_CAP_ANEG); 4875 else 4876 c.u.l1cfg32.rcap32 = cpu_to_be32(FW_PORT_CAP32_ANEG); 4877 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); 4878 } 4879 4880 typedef void (*int_handler_t)(struct adapter *adap); 4881 4882 struct intr_info { 4883 unsigned int mask; /* bits to check in interrupt status */ 4884 const char *msg; /* message to print or NULL */ 4885 short stat_idx; /* stat counter to increment or -1 */ 4886 unsigned short fatal; /* whether the condition reported is fatal */ 4887 int_handler_t int_handler; /* platform-specific int handler */ 4888 }; 4889 4890 /** 4891 * t4_handle_intr_status - table driven interrupt handler 4892 * @adapter: the adapter that generated the interrupt 4893 * @reg: the interrupt status register to process 4894 * @acts: table of interrupt actions 4895 * 4896 * A table driven interrupt handler that applies a set of masks to an 4897 * interrupt status word and performs the corresponding actions if the 4898 * interrupts described by the mask have occurred. The actions include 4899 * optionally emitting a warning or alert message. The table is terminated 4900 * by an entry specifying mask 0. Returns the number of fatal interrupt 4901 * conditions. 4902 */ 4903 static int t4_handle_intr_status(struct adapter *adapter, unsigned int reg, 4904 const struct intr_info *acts) 4905 { 4906 int fatal = 0; 4907 unsigned int mask = 0; 4908 unsigned int status = t4_read_reg(adapter, reg); 4909 4910 for ( ; acts->mask; ++acts) { 4911 if (!(status & acts->mask)) 4912 continue; 4913 if (acts->fatal) { 4914 fatal++; 4915 CH_ALERT(adapter, "%s (0x%x)\n", acts->msg, 4916 status & acts->mask); 4917 } else if (acts->msg) 4918 CH_WARN_RATELIMIT(adapter, "%s (0x%x)\n", acts->msg, 4919 status & acts->mask); 4920 if (acts->int_handler) 4921 acts->int_handler(adapter); 4922 mask |= acts->mask; 4923 } 4924 status &= mask; 4925 if (status) /* clear processed interrupts */ 4926 t4_write_reg(adapter, reg, status); 4927 return fatal; 4928 } 4929 4930 /* 4931 * Interrupt handler for the PCIE module. 4932 */ 4933 static void pcie_intr_handler(struct adapter *adapter) 4934 { 4935 static const struct intr_info sysbus_intr_info[] = { 4936 { F_RNPP, "RXNP array parity error", -1, 1 }, 4937 { F_RPCP, "RXPC array parity error", -1, 1 }, 4938 { F_RCIP, "RXCIF array parity error", -1, 1 }, 4939 { F_RCCP, "Rx completions control array parity error", -1, 1 }, 4940 { F_RFTP, "RXFT array parity error", -1, 1 }, 4941 { 0 } 4942 }; 4943 static const struct intr_info pcie_port_intr_info[] = { 4944 { F_TPCP, "TXPC array parity error", -1, 1 }, 4945 { F_TNPP, "TXNP array parity error", -1, 1 }, 4946 { F_TFTP, "TXFT array parity error", -1, 1 }, 4947 { F_TCAP, "TXCA array parity error", -1, 1 }, 4948 { F_TCIP, "TXCIF array parity error", -1, 1 }, 4949 { F_RCAP, "RXCA array parity error", -1, 1 }, 4950 { F_OTDD, "outbound request TLP discarded", -1, 1 }, 4951 { F_RDPE, "Rx data parity error", -1, 1 }, 4952 { F_TDUE, "Tx uncorrectable data error", -1, 1 }, 4953 { 0 } 4954 }; 4955 static const struct intr_info pcie_intr_info[] = { 4956 { F_MSIADDRLPERR, "MSI AddrL parity error", -1, 1 }, 4957 { F_MSIADDRHPERR, "MSI AddrH parity error", -1, 1 }, 4958 { F_MSIDATAPERR, "MSI data parity error", -1, 1 }, 4959 { F_MSIXADDRLPERR, "MSI-X AddrL parity error", -1, 1 }, 4960 { F_MSIXADDRHPERR, "MSI-X AddrH parity error", -1, 1 }, 4961 { F_MSIXDATAPERR, "MSI-X data parity error", -1, 1 }, 4962 { F_MSIXDIPERR, "MSI-X DI parity error", -1, 1 }, 4963 { F_PIOCPLPERR, "PCI PIO completion FIFO parity error", -1, 1 }, 4964 { F_PIOREQPERR, "PCI PIO request FIFO parity error", -1, 1 }, 4965 { F_TARTAGPERR, "PCI PCI target tag FIFO parity error", -1, 1 }, 4966 { F_CCNTPERR, "PCI CMD channel count parity error", -1, 1 }, 4967 { F_CREQPERR, "PCI CMD channel request parity error", -1, 1 }, 4968 { F_CRSPPERR, "PCI CMD channel response parity error", -1, 1 }, 4969 { F_DCNTPERR, "PCI DMA channel count parity error", -1, 1 }, 4970 { F_DREQPERR, "PCI DMA channel request parity error", -1, 1 }, 4971 { F_DRSPPERR, "PCI DMA channel response parity error", -1, 1 }, 4972 { F_HCNTPERR, "PCI HMA channel count parity error", -1, 1 }, 4973 { F_HREQPERR, "PCI HMA channel request parity error", -1, 1 }, 4974 { F_HRSPPERR, "PCI HMA channel response parity error", -1, 1 }, 4975 { F_CFGSNPPERR, "PCI config snoop FIFO parity error", -1, 1 }, 4976 { F_FIDPERR, "PCI FID parity error", -1, 1 }, 4977 { F_INTXCLRPERR, "PCI INTx clear parity error", -1, 1 }, 4978 { F_MATAGPERR, "PCI MA tag parity error", -1, 1 }, 4979 { F_PIOTAGPERR, "PCI PIO tag parity error", -1, 1 }, 4980 { F_RXCPLPERR, "PCI Rx completion parity error", -1, 1 }, 4981 { F_RXWRPERR, "PCI Rx write parity error", -1, 1 }, 4982 { F_RPLPERR, "PCI replay buffer parity error", -1, 1 }, 4983 { F_PCIESINT, "PCI core secondary fault", -1, 1 }, 4984 { F_PCIEPINT, "PCI core primary fault", -1, 1 }, 4985 { F_UNXSPLCPLERR, "PCI unexpected split completion error", -1, 4986 0 }, 4987 { 0 } 4988 }; 4989 4990 static struct intr_info t5_pcie_intr_info[] = { 4991 { F_MSTGRPPERR, "Master Response Read Queue parity error", 4992 -1, 1 }, 4993 { F_MSTTIMEOUTPERR, "Master Timeout FIFO parity error", -1, 1 }, 4994 { F_MSIXSTIPERR, "MSI-X STI SRAM parity error", -1, 1 }, 4995 { F_MSIXADDRLPERR, "MSI-X AddrL parity error", -1, 1 }, 4996 { F_MSIXADDRHPERR, "MSI-X AddrH parity error", -1, 1 }, 4997 { F_MSIXDATAPERR, "MSI-X data parity error", -1, 1 }, 4998 { F_MSIXDIPERR, "MSI-X DI parity error", -1, 1 }, 4999 { F_PIOCPLGRPPERR, "PCI PIO completion Group FIFO parity error", 5000 -1, 1 }, 5001 { F_PIOREQGRPPERR, "PCI PIO request Group FIFO parity error", 5002 -1, 1 }, 5003 { F_TARTAGPERR, "PCI PCI target tag FIFO parity error", -1, 1 }, 5004 { F_MSTTAGQPERR, "PCI master tag queue parity error", -1, 1 }, 5005 { F_CREQPERR, "PCI CMD channel request parity error", -1, 1 }, 5006 { F_CRSPPERR, "PCI CMD channel response parity error", -1, 1 }, 5007 { F_DREQWRPERR, "PCI DMA channel write request parity error", 5008 -1, 1 }, 5009 { F_DREQPERR, "PCI DMA channel request parity error", -1, 1 }, 5010 { F_DRSPPERR, "PCI DMA channel response parity error", -1, 1 }, 5011 { F_HREQWRPERR, "PCI HMA channel count parity error", -1, 1 }, 5012 { F_HREQPERR, "PCI HMA channel request parity error", -1, 1 }, 5013 { F_HRSPPERR, "PCI HMA channel response parity error", -1, 1 }, 5014 { F_CFGSNPPERR, "PCI config snoop FIFO parity error", -1, 1 }, 5015 { F_FIDPERR, "PCI FID parity error", -1, 1 }, 5016 { F_VFIDPERR, "PCI INTx clear parity error", -1, 1 }, 5017 { F_MAGRPPERR, "PCI MA group FIFO parity error", -1, 1 }, 5018 { F_PIOTAGPERR, "PCI PIO tag parity error", -1, 1 }, 5019 { F_IPRXHDRGRPPERR, "PCI IP Rx header group parity error", 5020 -1, 1 }, 5021 { F_IPRXDATAGRPPERR, "PCI IP Rx data group parity error", 5022 -1, 1 }, 5023 { F_RPLPERR, "PCI IP replay buffer parity error", -1, 1 }, 5024 { F_IPSOTPERR, "PCI IP SOT buffer parity error", -1, 1 }, 5025 { F_TRGT1GRPPERR, "PCI TRGT1 group FIFOs parity error", -1, 1 }, 5026 { F_READRSPERR, "Outbound read error", -1, 5027 0 }, 5028 { 0 } 5029 }; 5030 5031 int fat; 5032 5033 if (is_t4(adapter->params.chip)) 5034 fat = t4_handle_intr_status(adapter, 5035 A_PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS, 5036 sysbus_intr_info) + 5037 t4_handle_intr_status(adapter, 5038 A_PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS, 5039 pcie_port_intr_info) + 5040 t4_handle_intr_status(adapter, A_PCIE_INT_CAUSE, 5041 pcie_intr_info); 5042 else 5043 fat = t4_handle_intr_status(adapter, A_PCIE_INT_CAUSE, 5044 t5_pcie_intr_info); 5045 if (fat) 5046 t4_fatal_err(adapter); 5047 } 5048 5049 /* 5050 * TP interrupt handler. 5051 */ 5052 static void tp_intr_handler(struct adapter *adapter) 5053 { 5054 static const struct intr_info tp_intr_info[] = { 5055 { 0x3fffffff, "TP parity error", -1, 1 }, 5056 { F_FLMTXFLSTEMPTY, "TP out of Tx pages", -1, 1 }, 5057 { 0 } 5058 }; 5059 5060 if (t4_handle_intr_status(adapter, A_TP_INT_CAUSE, tp_intr_info)) 5061 t4_fatal_err(adapter); 5062 } 5063 5064 /* 5065 * SGE interrupt handler. 5066 */ 5067 static void sge_intr_handler(struct adapter *adapter) 5068 { 5069 u32 v = 0, perr; 5070 u32 err; 5071 5072 static const struct intr_info sge_intr_info[] = { 5073 { F_ERR_CPL_EXCEED_IQE_SIZE, 5074 "SGE received CPL exceeding IQE size", -1, 1 }, 5075 { F_ERR_INVALID_CIDX_INC, 5076 "SGE GTS CIDX increment too large", -1, 0 }, 5077 { F_ERR_CPL_OPCODE_0, "SGE received 0-length CPL", -1, 0 }, 5078 { F_DBFIFO_LP_INT, NULL, -1, 0, t4_db_full }, 5079 { F_ERR_DATA_CPL_ON_HIGH_QID1 | F_ERR_DATA_CPL_ON_HIGH_QID0, 5080 "SGE IQID > 1023 received CPL for FL", -1, 0 }, 5081 { F_ERR_BAD_DB_PIDX3, "SGE DBP 3 pidx increment too large", -1, 5082 0 }, 5083 { F_ERR_BAD_DB_PIDX2, "SGE DBP 2 pidx increment too large", -1, 5084 0 }, 5085 { F_ERR_BAD_DB_PIDX1, "SGE DBP 1 pidx increment too large", -1, 5086 0 }, 5087 { F_ERR_BAD_DB_PIDX0, "SGE DBP 0 pidx increment too large", -1, 5088 0 }, 5089 { F_ERR_ING_CTXT_PRIO, 5090 "SGE too many priority ingress contexts", -1, 0 }, 5091 { F_INGRESS_SIZE_ERR, "SGE illegal ingress QID", -1, 0 }, 5092 { F_EGRESS_SIZE_ERR, "SGE illegal egress QID", -1, 0 }, 5093 { F_ERR_PCIE_ERROR0 | F_ERR_PCIE_ERROR1 | 5094 F_ERR_PCIE_ERROR2 | F_ERR_PCIE_ERROR3, 5095 "SGE PCIe error for a DBP thread", -1, 0 }, 5096 { 0 } 5097 }; 5098 5099 static struct intr_info t4t5_sge_intr_info[] = { 5100 { F_ERR_DROPPED_DB, NULL, -1, 0, t4_db_dropped }, 5101 { F_DBFIFO_HP_INT, NULL, -1, 0, t4_db_full }, 5102 { F_ERR_EGR_CTXT_PRIO, 5103 "SGE too many priority egress contexts", -1, 0 }, 5104 { 0 } 5105 }; 5106 5107 /* 5108 * For now, treat below interrupts as fatal so that we disable SGE and 5109 * get better debug */ 5110 static struct intr_info t6_sge_intr_info[] = { 5111 { F_FATAL_WRE_LEN, 5112 "SGE Actual WRE packet is less than advertized length", 5113 -1, 1 }, 5114 { 0 } 5115 }; 5116 5117 perr = t4_read_reg(adapter, A_SGE_INT_CAUSE1); 5118 if (perr) { 5119 v |= perr; 5120 CH_ALERT(adapter, "SGE Cause1 Parity Error %#x\n", perr); 5121 } 5122 perr = t4_read_reg(adapter, A_SGE_INT_CAUSE2); 5123 if (perr) { 5124 v |= perr; 5125 CH_ALERT(adapter, "SGE Cause2 Parity Error %#x\n", perr); 5126 } 5127 if (CHELSIO_CHIP_VERSION(adapter->params.chip) >= CHELSIO_T5) { 5128 perr = t4_read_reg(adapter, A_SGE_INT_CAUSE5); 5129 if (perr) { 5130 v |= perr; 5131 CH_ALERT(adapter, "SGE Cause5 Parity Error %#x\n", perr); 5132 } 5133 } 5134 5135 v |= t4_handle_intr_status(adapter, A_SGE_INT_CAUSE3, sge_intr_info); 5136 if (CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5) 5137 v |= t4_handle_intr_status(adapter, A_SGE_INT_CAUSE3, 5138 t4t5_sge_intr_info); 5139 else 5140 v |= t4_handle_intr_status(adapter, A_SGE_INT_CAUSE3, 5141 t6_sge_intr_info); 5142 5143 err = t4_read_reg(adapter, A_SGE_ERROR_STATS); 5144 if (err & F_ERROR_QID_VALID) { 5145 CH_ERR(adapter, "SGE error for queue %u\n", G_ERROR_QID(err)); 5146 if (err & F_UNCAPTURED_ERROR) 5147 CH_ERR(adapter, "SGE UNCAPTURED_ERROR set (clearing)\n"); 5148 t4_write_reg(adapter, A_SGE_ERROR_STATS, F_ERROR_QID_VALID | 5149 F_UNCAPTURED_ERROR); 5150 } 5151 5152 if (v != 0) 5153 t4_fatal_err(adapter); 5154 } 5155 5156 #define CIM_OBQ_INTR (F_OBQULP0PARERR | F_OBQULP1PARERR | F_OBQULP2PARERR |\ 5157 F_OBQULP3PARERR | F_OBQSGEPARERR | F_OBQNCSIPARERR) 5158 #define CIM_IBQ_INTR (F_IBQTP0PARERR | F_IBQTP1PARERR | F_IBQULPPARERR |\ 5159 F_IBQSGEHIPARERR | F_IBQSGELOPARERR | F_IBQNCSIPARERR) 5160 5161 /* 5162 * CIM interrupt handler. 5163 */ 5164 static void cim_intr_handler(struct adapter *adapter) 5165 { 5166 static const struct intr_info cim_intr_info[] = { 5167 { F_PREFDROPINT, "CIM control register prefetch drop", -1, 1 }, 5168 { CIM_OBQ_INTR, "CIM OBQ parity error", -1, 1 }, 5169 { CIM_IBQ_INTR, "CIM IBQ parity error", -1, 1 }, 5170 { F_MBUPPARERR, "CIM mailbox uP parity error", -1, 1 }, 5171 { F_MBHOSTPARERR, "CIM mailbox host parity error", -1, 1 }, 5172 { F_TIEQINPARERRINT, "CIM TIEQ outgoing parity error", -1, 1 }, 5173 { F_TIEQOUTPARERRINT, "CIM TIEQ incoming parity error", -1, 1 }, 5174 { F_TIMER0INT, "CIM TIMER0 interrupt", -1, 1 }, 5175 { 0 } 5176 }; 5177 static const struct intr_info cim_upintr_info[] = { 5178 { F_RSVDSPACEINT, "CIM reserved space access", -1, 1 }, 5179 { F_ILLTRANSINT, "CIM illegal transaction", -1, 1 }, 5180 { F_ILLWRINT, "CIM illegal write", -1, 1 }, 5181 { F_ILLRDINT, "CIM illegal read", -1, 1 }, 5182 { F_ILLRDBEINT, "CIM illegal read BE", -1, 1 }, 5183 { F_ILLWRBEINT, "CIM illegal write BE", -1, 1 }, 5184 { F_SGLRDBOOTINT, "CIM single read from boot space", -1, 1 }, 5185 { F_SGLWRBOOTINT, "CIM single write to boot space", -1, 1 }, 5186 { F_BLKWRBOOTINT, "CIM block write to boot space", -1, 1 }, 5187 { F_SGLRDFLASHINT, "CIM single read from flash space", -1, 1 }, 5188 { F_SGLWRFLASHINT, "CIM single write to flash space", -1, 1 }, 5189 { F_BLKWRFLASHINT, "CIM block write to flash space", -1, 1 }, 5190 { F_SGLRDEEPROMINT, "CIM single EEPROM read", -1, 1 }, 5191 { F_SGLWREEPROMINT, "CIM single EEPROM write", -1, 1 }, 5192 { F_BLKRDEEPROMINT, "CIM block EEPROM read", -1, 1 }, 5193 { F_BLKWREEPROMINT, "CIM block EEPROM write", -1, 1 }, 5194 { F_SGLRDCTLINT , "CIM single read from CTL space", -1, 1 }, 5195 { F_SGLWRCTLINT , "CIM single write to CTL space", -1, 1 }, 5196 { F_BLKRDCTLINT , "CIM block read from CTL space", -1, 1 }, 5197 { F_BLKWRCTLINT , "CIM block write to CTL space", -1, 1 }, 5198 { F_SGLRDPLINT , "CIM single read from PL space", -1, 1 }, 5199 { F_SGLWRPLINT , "CIM single write to PL space", -1, 1 }, 5200 { F_BLKRDPLINT , "CIM block read from PL space", -1, 1 }, 5201 { F_BLKWRPLINT , "CIM block write to PL space", -1, 1 }, 5202 { F_REQOVRLOOKUPINT , "CIM request FIFO overwrite", -1, 1 }, 5203 { F_RSPOVRLOOKUPINT , "CIM response FIFO overwrite", -1, 1 }, 5204 { F_TIMEOUTINT , "CIM PIF timeout", -1, 1 }, 5205 { F_TIMEOUTMAINT , "CIM PIF MA timeout", -1, 1 }, 5206 { 0 } 5207 }; 5208 u32 val, fw_err; 5209 int fat; 5210 5211 fw_err = t4_read_reg(adapter, A_PCIE_FW); 5212 if (fw_err & F_PCIE_FW_ERR) 5213 t4_report_fw_error(adapter); 5214 5215 /* When the Firmware detects an internal error which normally wouldn't 5216 * raise a Host Interrupt, it forces a CIM Timer0 interrupt in order 5217 * to make sure the Host sees the Firmware Crash. So if we have a 5218 * Timer0 interrupt and don't see a Firmware Crash, ignore the Timer0 5219 * interrupt. 5220 */ 5221 val = t4_read_reg(adapter, A_CIM_HOST_INT_CAUSE); 5222 if (val & F_TIMER0INT) 5223 if (!(fw_err & F_PCIE_FW_ERR) || 5224 (G_PCIE_FW_EVAL(fw_err) != PCIE_FW_EVAL_CRASH)) 5225 t4_write_reg(adapter, A_CIM_HOST_INT_CAUSE, 5226 F_TIMER0INT); 5227 5228 fat = t4_handle_intr_status(adapter, A_CIM_HOST_INT_CAUSE, 5229 cim_intr_info) + 5230 t4_handle_intr_status(adapter, A_CIM_HOST_UPACC_INT_CAUSE, 5231 cim_upintr_info); 5232 if (fat) 5233 t4_fatal_err(adapter); 5234 } 5235 5236 /* 5237 * ULP RX interrupt handler. 5238 */ 5239 static void ulprx_intr_handler(struct adapter *adapter) 5240 { 5241 static const struct intr_info ulprx_intr_info[] = { 5242 { F_CAUSE_CTX_1, "ULPRX channel 1 context error", -1, 1 }, 5243 { F_CAUSE_CTX_0, "ULPRX channel 0 context error", -1, 1 }, 5244 { 0x7fffff, "ULPRX parity error", -1, 1 }, 5245 { 0 } 5246 }; 5247 5248 if (t4_handle_intr_status(adapter, A_ULP_RX_INT_CAUSE, ulprx_intr_info)) 5249 t4_fatal_err(adapter); 5250 } 5251 5252 /* 5253 * ULP TX interrupt handler. 5254 */ 5255 static void ulptx_intr_handler(struct adapter *adapter) 5256 { 5257 static const struct intr_info ulptx_intr_info[] = { 5258 { F_PBL_BOUND_ERR_CH3, "ULPTX channel 3 PBL out of bounds", -1, 5259 0 }, 5260 { F_PBL_BOUND_ERR_CH2, "ULPTX channel 2 PBL out of bounds", -1, 5261 0 }, 5262 { F_PBL_BOUND_ERR_CH1, "ULPTX channel 1 PBL out of bounds", -1, 5263 0 }, 5264 { F_PBL_BOUND_ERR_CH0, "ULPTX channel 0 PBL out of bounds", -1, 5265 0 }, 5266 { 0xfffffff, "ULPTX parity error", -1, 1 }, 5267 { 0 } 5268 }; 5269 5270 if (t4_handle_intr_status(adapter, A_ULP_TX_INT_CAUSE, ulptx_intr_info)) 5271 t4_fatal_err(adapter); 5272 } 5273 5274 /* 5275 * PM TX interrupt handler. 5276 */ 5277 static void pmtx_intr_handler(struct adapter *adapter) 5278 { 5279 static const struct intr_info pmtx_intr_info[] = { 5280 { F_PCMD_LEN_OVFL0, "PMTX channel 0 pcmd too large", -1, 1 }, 5281 { F_PCMD_LEN_OVFL1, "PMTX channel 1 pcmd too large", -1, 1 }, 5282 { F_PCMD_LEN_OVFL2, "PMTX channel 2 pcmd too large", -1, 1 }, 5283 { F_ZERO_C_CMD_ERROR, "PMTX 0-length pcmd", -1, 1 }, 5284 { 0xffffff0, "PMTX framing error", -1, 1 }, 5285 { F_OESPI_PAR_ERROR, "PMTX oespi parity error", -1, 1 }, 5286 { F_DB_OPTIONS_PAR_ERROR, "PMTX db_options parity error", -1, 5287 1 }, 5288 { F_ICSPI_PAR_ERROR, "PMTX icspi parity error", -1, 1 }, 5289 { F_C_PCMD_PAR_ERROR, "PMTX c_pcmd parity error", -1, 1}, 5290 { 0 } 5291 }; 5292 5293 if (t4_handle_intr_status(adapter, A_PM_TX_INT_CAUSE, pmtx_intr_info)) 5294 t4_fatal_err(adapter); 5295 } 5296 5297 /* 5298 * PM RX interrupt handler. 5299 */ 5300 static void pmrx_intr_handler(struct adapter *adapter) 5301 { 5302 static const struct intr_info pmrx_intr_info[] = { 5303 { F_ZERO_E_CMD_ERROR, "PMRX 0-length pcmd", -1, 1 }, 5304 { 0x3ffff0, "PMRX framing error", -1, 1 }, 5305 { F_OCSPI_PAR_ERROR, "PMRX ocspi parity error", -1, 1 }, 5306 { F_DB_OPTIONS_PAR_ERROR, "PMRX db_options parity error", -1, 5307 1 }, 5308 { F_IESPI_PAR_ERROR, "PMRX iespi parity error", -1, 1 }, 5309 { F_E_PCMD_PAR_ERROR, "PMRX e_pcmd parity error", -1, 1}, 5310 { 0 } 5311 }; 5312 5313 if (t4_handle_intr_status(adapter, A_PM_RX_INT_CAUSE, pmrx_intr_info)) 5314 t4_fatal_err(adapter); 5315 } 5316 5317 /* 5318 * CPL switch interrupt handler. 5319 */ 5320 static void cplsw_intr_handler(struct adapter *adapter) 5321 { 5322 static const struct intr_info cplsw_intr_info[] = { 5323 { F_CIM_OP_MAP_PERR, "CPLSW CIM op_map parity error", -1, 1 }, 5324 { F_CIM_OVFL_ERROR, "CPLSW CIM overflow", -1, 1 }, 5325 { F_TP_FRAMING_ERROR, "CPLSW TP framing error", -1, 1 }, 5326 { F_SGE_FRAMING_ERROR, "CPLSW SGE framing error", -1, 1 }, 5327 { F_CIM_FRAMING_ERROR, "CPLSW CIM framing error", -1, 1 }, 5328 { F_ZERO_SWITCH_ERROR, "CPLSW no-switch error", -1, 1 }, 5329 { 0 } 5330 }; 5331 5332 if (t4_handle_intr_status(adapter, A_CPL_INTR_CAUSE, cplsw_intr_info)) 5333 t4_fatal_err(adapter); 5334 } 5335 5336 /* 5337 * LE interrupt handler. 5338 */ 5339 static void le_intr_handler(struct adapter *adap) 5340 { 5341 unsigned int chip_ver = CHELSIO_CHIP_VERSION(adap->params.chip); 5342 static const struct intr_info le_intr_info[] = { 5343 { F_LIPMISS, "LE LIP miss", -1, 0 }, 5344 { F_LIP0, "LE 0 LIP error", -1, 0 }, 5345 { F_PARITYERR, "LE parity error", -1, 1 }, 5346 { F_UNKNOWNCMD, "LE unknown command", -1, 1 }, 5347 { F_REQQPARERR, "LE request queue parity error", -1, 1 }, 5348 { 0 } 5349 }; 5350 5351 static struct intr_info t6_le_intr_info[] = { 5352 /* log an error for HASHTBLMEMCRCERR and clear the bit */ 5353 { F_T6_HASHTBLMEMCRCERR, "LE hash table mem crc error", -1, 0 }, 5354 { F_T6_LIPMISS, "LE LIP miss", -1, 0 }, 5355 { F_T6_LIP0, "LE 0 LIP error", -1, 0 }, 5356 { F_TCAMINTPERR, "LE parity error", -1, 1 }, 5357 { F_T6_UNKNOWNCMD, "LE unknown command", -1, 1 }, 5358 { F_SSRAMINTPERR, "LE request queue parity error", -1, 1 }, 5359 { 0 } 5360 }; 5361 5362 if (t4_handle_intr_status(adap, A_LE_DB_INT_CAUSE, 5363 (chip_ver <= CHELSIO_T5) ? 5364 le_intr_info : t6_le_intr_info)) 5365 t4_fatal_err(adap); 5366 } 5367 5368 /* 5369 * MPS interrupt handler. 5370 */ 5371 static void mps_intr_handler(struct adapter *adapter) 5372 { 5373 static const struct intr_info mps_rx_intr_info[] = { 5374 { 0xffffff, "MPS Rx parity error", -1, 1 }, 5375 { 0 } 5376 }; 5377 static const struct intr_info mps_tx_intr_info[] = { 5378 { V_TPFIFO(M_TPFIFO), "MPS Tx TP FIFO parity error", -1, 1 }, 5379 { F_NCSIFIFO, "MPS Tx NC-SI FIFO parity error", -1, 1 }, 5380 { V_TXDATAFIFO(M_TXDATAFIFO), "MPS Tx data FIFO parity error", 5381 -1, 1 }, 5382 { V_TXDESCFIFO(M_TXDESCFIFO), "MPS Tx desc FIFO parity error", 5383 -1, 1 }, 5384 { F_BUBBLE, "MPS Tx underflow", -1, 1 }, 5385 { F_SECNTERR, "MPS Tx SOP/EOP error", -1, 1 }, 5386 { F_FRMERR, "MPS Tx framing error", -1, 1 }, 5387 { 0 } 5388 }; 5389 static const struct intr_info t6_mps_tx_intr_info[] = { 5390 { V_TPFIFO(M_TPFIFO), "MPS Tx TP FIFO parity error", -1, 1 }, 5391 { F_NCSIFIFO, "MPS Tx NC-SI FIFO parity error", -1, 1 }, 5392 { V_TXDATAFIFO(M_TXDATAFIFO), "MPS Tx data FIFO parity error", 5393 -1, 1 }, 5394 { V_TXDESCFIFO(M_TXDESCFIFO), "MPS Tx desc FIFO parity error", 5395 -1, 1 }, 5396 /* MPS Tx Bubble is normal for T6 */ 5397 { F_SECNTERR, "MPS Tx SOP/EOP error", -1, 1 }, 5398 { F_FRMERR, "MPS Tx framing error", -1, 1 }, 5399 { 0 } 5400 }; 5401 static const struct intr_info mps_trc_intr_info[] = { 5402 { V_FILTMEM(M_FILTMEM), "MPS TRC filter parity error", -1, 1 }, 5403 { V_PKTFIFO(M_PKTFIFO), "MPS TRC packet FIFO parity error", -1, 5404 1 }, 5405 { F_MISCPERR, "MPS TRC misc parity error", -1, 1 }, 5406 { 0 } 5407 }; 5408 static const struct intr_info mps_stat_sram_intr_info[] = { 5409 { 0x1fffff, "MPS statistics SRAM parity error", -1, 1 }, 5410 { 0 } 5411 }; 5412 static const struct intr_info mps_stat_tx_intr_info[] = { 5413 { 0xfffff, "MPS statistics Tx FIFO parity error", -1, 1 }, 5414 { 0 } 5415 }; 5416 static const struct intr_info mps_stat_rx_intr_info[] = { 5417 { 0xffffff, "MPS statistics Rx FIFO parity error", -1, 1 }, 5418 { 0 } 5419 }; 5420 static const struct intr_info mps_cls_intr_info[] = { 5421 { F_MATCHSRAM, "MPS match SRAM parity error", -1, 1 }, 5422 { F_MATCHTCAM, "MPS match TCAM parity error", -1, 1 }, 5423 { F_HASHSRAM, "MPS hash SRAM parity error", -1, 1 }, 5424 { 0 } 5425 }; 5426 5427 int fat; 5428 5429 fat = t4_handle_intr_status(adapter, A_MPS_RX_PERR_INT_CAUSE, 5430 mps_rx_intr_info) + 5431 t4_handle_intr_status(adapter, A_MPS_TX_INT_CAUSE, 5432 is_t6(adapter->params.chip) 5433 ? t6_mps_tx_intr_info 5434 : mps_tx_intr_info) + 5435 t4_handle_intr_status(adapter, A_MPS_TRC_INT_CAUSE, 5436 mps_trc_intr_info) + 5437 t4_handle_intr_status(adapter, A_MPS_STAT_PERR_INT_CAUSE_SRAM, 5438 mps_stat_sram_intr_info) + 5439 t4_handle_intr_status(adapter, A_MPS_STAT_PERR_INT_CAUSE_TX_FIFO, 5440 mps_stat_tx_intr_info) + 5441 t4_handle_intr_status(adapter, A_MPS_STAT_PERR_INT_CAUSE_RX_FIFO, 5442 mps_stat_rx_intr_info) + 5443 t4_handle_intr_status(adapter, A_MPS_CLS_INT_CAUSE, 5444 mps_cls_intr_info); 5445 5446 t4_write_reg(adapter, A_MPS_INT_CAUSE, 0); 5447 t4_read_reg(adapter, A_MPS_INT_CAUSE); /* flush */ 5448 if (fat) 5449 t4_fatal_err(adapter); 5450 } 5451 5452 #define MEM_INT_MASK (F_PERR_INT_CAUSE | F_ECC_CE_INT_CAUSE | \ 5453 F_ECC_UE_INT_CAUSE) 5454 5455 /* 5456 * EDC/MC interrupt handler. 5457 */ 5458 static void mem_intr_handler(struct adapter *adapter, int idx) 5459 { 5460 static const char name[4][7] = { "EDC0", "EDC1", "MC/MC0", "MC1" }; 5461 5462 unsigned int addr, cnt_addr, v; 5463 5464 if (idx <= MEM_EDC1) { 5465 addr = EDC_REG(A_EDC_INT_CAUSE, idx); 5466 cnt_addr = EDC_REG(A_EDC_ECC_STATUS, idx); 5467 } else if (idx == MEM_MC) { 5468 if (is_t4(adapter->params.chip)) { 5469 addr = A_MC_INT_CAUSE; 5470 cnt_addr = A_MC_ECC_STATUS; 5471 } else { 5472 addr = A_MC_P_INT_CAUSE; 5473 cnt_addr = A_MC_P_ECC_STATUS; 5474 } 5475 } else { 5476 addr = MC_REG(A_MC_P_INT_CAUSE, 1); 5477 cnt_addr = MC_REG(A_MC_P_ECC_STATUS, 1); 5478 } 5479 5480 v = t4_read_reg(adapter, addr) & MEM_INT_MASK; 5481 if (v & F_PERR_INT_CAUSE) 5482 CH_ALERT(adapter, "%s FIFO parity error\n", 5483 name[idx]); 5484 if (v & F_ECC_CE_INT_CAUSE) { 5485 u32 cnt = G_ECC_CECNT(t4_read_reg(adapter, cnt_addr)); 5486 5487 if (idx <= MEM_EDC1) 5488 t4_edc_err_read(adapter, idx); 5489 5490 t4_write_reg(adapter, cnt_addr, V_ECC_CECNT(M_ECC_CECNT)); 5491 CH_WARN_RATELIMIT(adapter, 5492 "%u %s correctable ECC data error%s\n", 5493 cnt, name[idx], cnt > 1 ? "s" : ""); 5494 } 5495 if (v & F_ECC_UE_INT_CAUSE) 5496 CH_ALERT(adapter, 5497 "%s uncorrectable ECC data error\n", name[idx]); 5498 5499 t4_write_reg(adapter, addr, v); 5500 if (v & (F_PERR_INT_CAUSE | F_ECC_UE_INT_CAUSE)) 5501 t4_fatal_err(adapter); 5502 } 5503 5504 /* 5505 * MA interrupt handler. 5506 */ 5507 static void ma_intr_handler(struct adapter *adapter) 5508 { 5509 u32 v, status = t4_read_reg(adapter, A_MA_INT_CAUSE); 5510 5511 if (status & F_MEM_PERR_INT_CAUSE) { 5512 CH_ALERT(adapter, 5513 "MA parity error, parity status %#x\n", 5514 t4_read_reg(adapter, A_MA_PARITY_ERROR_STATUS1)); 5515 if (is_t5(adapter->params.chip)) 5516 CH_ALERT(adapter, 5517 "MA parity error, parity status %#x\n", 5518 t4_read_reg(adapter, 5519 A_MA_PARITY_ERROR_STATUS2)); 5520 } 5521 if (status & F_MEM_WRAP_INT_CAUSE) { 5522 v = t4_read_reg(adapter, A_MA_INT_WRAP_STATUS); 5523 CH_ALERT(adapter, "MA address wrap-around error by " 5524 "client %u to address %#x\n", 5525 G_MEM_WRAP_CLIENT_NUM(v), 5526 G_MEM_WRAP_ADDRESS(v) << 4); 5527 } 5528 t4_write_reg(adapter, A_MA_INT_CAUSE, status); 5529 t4_fatal_err(adapter); 5530 } 5531 5532 /* 5533 * SMB interrupt handler. 5534 */ 5535 static void smb_intr_handler(struct adapter *adap) 5536 { 5537 static const struct intr_info smb_intr_info[] = { 5538 { F_MSTTXFIFOPARINT, "SMB master Tx FIFO parity error", -1, 1 }, 5539 { F_MSTRXFIFOPARINT, "SMB master Rx FIFO parity error", -1, 1 }, 5540 { F_SLVFIFOPARINT, "SMB slave FIFO parity error", -1, 1 }, 5541 { 0 } 5542 }; 5543 5544 if (t4_handle_intr_status(adap, A_SMB_INT_CAUSE, smb_intr_info)) 5545 t4_fatal_err(adap); 5546 } 5547 5548 /* 5549 * NC-SI interrupt handler. 5550 */ 5551 static void ncsi_intr_handler(struct adapter *adap) 5552 { 5553 static const struct intr_info ncsi_intr_info[] = { 5554 { F_CIM_DM_PRTY_ERR, "NC-SI CIM parity error", -1, 1 }, 5555 { F_MPS_DM_PRTY_ERR, "NC-SI MPS parity error", -1, 1 }, 5556 { F_TXFIFO_PRTY_ERR, "NC-SI Tx FIFO parity error", -1, 1 }, 5557 { F_RXFIFO_PRTY_ERR, "NC-SI Rx FIFO parity error", -1, 1 }, 5558 { 0 } 5559 }; 5560 5561 if (t4_handle_intr_status(adap, A_NCSI_INT_CAUSE, ncsi_intr_info)) 5562 t4_fatal_err(adap); 5563 } 5564 5565 /* 5566 * XGMAC interrupt handler. 5567 */ 5568 static void xgmac_intr_handler(struct adapter *adap, int port) 5569 { 5570 u32 v, int_cause_reg; 5571 5572 if (is_t4(adap->params.chip)) 5573 int_cause_reg = PORT_REG(port, A_XGMAC_PORT_INT_CAUSE); 5574 else 5575 int_cause_reg = T5_PORT_REG(port, A_MAC_PORT_INT_CAUSE); 5576 5577 v = t4_read_reg(adap, int_cause_reg); 5578 5579 v &= (F_TXFIFO_PRTY_ERR | F_RXFIFO_PRTY_ERR); 5580 if (!v) 5581 return; 5582 5583 if (v & F_TXFIFO_PRTY_ERR) 5584 CH_ALERT(adap, "XGMAC %d Tx FIFO parity error\n", 5585 port); 5586 if (v & F_RXFIFO_PRTY_ERR) 5587 CH_ALERT(adap, "XGMAC %d Rx FIFO parity error\n", 5588 port); 5589 t4_write_reg(adap, int_cause_reg, v); 5590 t4_fatal_err(adap); 5591 } 5592 5593 /* 5594 * PL Parity Error interrupt handler. 5595 */ 5596 static void pl_perr_intr_handler(struct adapter *adap) 5597 { 5598 static const struct intr_info pl_perr_info[] = { 5599 { F_UART, "UART Parity Error", -1, }, 5600 { F_ULP_TX, "ULP TX Parity Error", -1 }, 5601 { F_SGE, "SGE Parity Error", -1 }, 5602 { F_HMA, "HMA Parity Error", -1 }, 5603 { F_CPL_SWITCH, "CPL Switch Parity Error", -1 }, 5604 { F_ULP_RX, "ULP RX Parity Error", -1 }, 5605 { F_PM_RX, "PM RX Parity Error", -1 }, 5606 { F_PM_TX, "PM TX Parity Error", -1 }, 5607 { F_MA, "MA Parity Error", -1 }, 5608 { F_TP, "TP Parity Error", -1 }, 5609 { F_LE, "LE Parity Error", -1 }, 5610 { F_EDC1, "EDC1 Parity Error", -1 }, 5611 { F_EDC0, "EDC0 Parity Error", -1 }, 5612 { F_MC, "MC Parity Error", -1 }, 5613 { F_PCIE, "PCIE Parity Error", -1 }, 5614 { F_PMU, "PMU Parity Error", -1 }, 5615 { F_XGMAC_KR1, "XGMAC_KR1 Parity Error", -1 }, 5616 { F_XGMAC_KR0, "XGMAC_KR0 Parity Error", -1 }, 5617 { F_XGMAC1, "XGMAC1 Parity Error", -1 }, 5618 { F_XGMAC0, "XGMAC0 Parity Error", -1 }, 5619 { F_SMB, "SMB Parity Error", -1 }, 5620 { F_SF, "SF Parity Error", -1 }, 5621 { F_PL, "PL Parity Error", -1 }, 5622 { F_NCSI, "NCSI Parity Error", -1 }, 5623 { F_MPS, "MPS Parity Error", -1 }, 5624 { F_MI, "MI Parity Error", -1 }, 5625 { F_DBG, "DBG Parity Error", -1 }, 5626 { F_I2CM, "I2CM Parity Error", -1 }, 5627 { F_CIM, "CIM Parity Error", -1 }, 5628 }; 5629 5630 t4_handle_intr_status(adap, A_PL_PERR_CAUSE, pl_perr_info); 5631 /* pl_intr_handler() will do the t4_fatal_err(adap) */ 5632 } 5633 5634 /* 5635 * PL interrupt handler. 5636 */ 5637 static void pl_intr_handler(struct adapter *adap) 5638 { 5639 static const struct intr_info pl_intr_info[] = { 5640 { F_FATALPERR, "Fatal parity error", -1, 1, 5641 pl_perr_intr_handler }, 5642 { F_PERRVFID, "PL VFID_MAP parity error", -1, 1 }, 5643 { 0 } 5644 }; 5645 5646 static struct intr_info t5_pl_intr_info[] = { 5647 { F_FATALPERR, "Fatal parity error", -1, 1, 5648 pl_perr_intr_handler }, 5649 { 0 } 5650 }; 5651 5652 if (t4_handle_intr_status(adap, A_PL_PL_INT_CAUSE, 5653 is_t4(adap->params.chip) ? 5654 pl_intr_info : t5_pl_intr_info)) 5655 t4_fatal_err(adap); 5656 } 5657 5658 #define PF_INTR_MASK (F_PFSW | F_PFCIM) 5659 5660 /** 5661 * t4_slow_intr_handler - control path interrupt handler 5662 * @adapter: the adapter 5663 * 5664 * T4 interrupt handler for non-data global interrupt events, e.g., errors. 5665 * The designation 'slow' is because it involves register reads, while 5666 * data interrupts typically don't involve any MMIOs. 5667 */ 5668 int t4_slow_intr_handler(struct adapter *adapter) 5669 { 5670 /* There are rare cases where a PL_INT_CAUSE bit may end up getting 5671 * set when the corresponding PL_INT_ENABLE bit isn't set. It's 5672 * easiest just to mask that case here. 5673 */ 5674 u32 raw_cause = t4_read_reg(adapter, A_PL_INT_CAUSE); 5675 u32 enable = t4_read_reg(adapter, A_PL_INT_ENABLE); 5676 u32 cause = raw_cause & enable; 5677 5678 if (!(cause & GLBL_INTR_MASK)) 5679 return 0; 5680 5681 /* Disable all the interrupt(bits) in PL_INT_ENABLE */ 5682 t4_write_reg(adapter, A_PL_INT_ENABLE, 0); 5683 (void)t4_read_reg(adapter, A_PL_INT_ENABLE); /* flush */ 5684 5685 if (cause & F_CIM) 5686 cim_intr_handler(adapter); 5687 if (cause & F_MPS) 5688 mps_intr_handler(adapter); 5689 if (cause & F_NCSI) 5690 ncsi_intr_handler(adapter); 5691 if (cause & F_PL) 5692 pl_intr_handler(adapter); 5693 if (cause & F_SMB) 5694 smb_intr_handler(adapter); 5695 if (cause & F_MAC0) 5696 xgmac_intr_handler(adapter, 0); 5697 if (cause & F_MAC1) 5698 xgmac_intr_handler(adapter, 1); 5699 if (cause & F_MAC2) 5700 xgmac_intr_handler(adapter, 2); 5701 if (cause & F_MAC3) 5702 xgmac_intr_handler(adapter, 3); 5703 if (cause & F_PCIE) 5704 pcie_intr_handler(adapter); 5705 if (cause & F_MC0) 5706 mem_intr_handler(adapter, MEM_MC); 5707 if (is_t5(adapter->params.chip) && (cause & F_MC1)) 5708 mem_intr_handler(adapter, MEM_MC1); 5709 if (cause & F_EDC0) 5710 mem_intr_handler(adapter, MEM_EDC0); 5711 if (cause & F_EDC1) 5712 mem_intr_handler(adapter, MEM_EDC1); 5713 if (cause & F_LE) 5714 le_intr_handler(adapter); 5715 if (cause & F_TP) 5716 tp_intr_handler(adapter); 5717 if (cause & F_MA) 5718 ma_intr_handler(adapter); 5719 if (cause & F_PM_TX) 5720 pmtx_intr_handler(adapter); 5721 if (cause & F_PM_RX) 5722 pmrx_intr_handler(adapter); 5723 if (cause & F_ULP_RX) 5724 ulprx_intr_handler(adapter); 5725 if (cause & F_CPL_SWITCH) 5726 cplsw_intr_handler(adapter); 5727 if (cause & F_SGE) 5728 sge_intr_handler(adapter); 5729 if (cause & F_ULP_TX) 5730 ulptx_intr_handler(adapter); 5731 5732 /* Clear the interrupts just processed for which we are the master. */ 5733 t4_write_reg(adapter, A_PL_INT_CAUSE, raw_cause & GLBL_INTR_MASK); 5734 5735 /* re-enable the interrupts (bits that were disabled 5736 * earlier in PL_INT_ENABLE) 5737 */ 5738 t4_write_reg(adapter, A_PL_INT_ENABLE, enable); 5739 (void)t4_read_reg(adapter, A_PL_INT_ENABLE); /* flush */ 5740 return 1; 5741 } 5742 5743 /** 5744 * t4_intr_enable - enable interrupts 5745 * @adapter: the adapter whose interrupts should be enabled 5746 * 5747 * Enable PF-specific interrupts for the calling function and the top-level 5748 * interrupt concentrator for global interrupts. Interrupts are already 5749 * enabled at each module, here we just enable the roots of the interrupt 5750 * hierarchies. 5751 * 5752 * Note: this function should be called only when the driver manages 5753 * non PF-specific interrupts from the various HW modules. Only one PCI 5754 * function at a time should be doing this. 5755 */ 5756 void t4_intr_enable(struct adapter *adapter) 5757 { 5758 u32 val = 0; 5759 u32 whoami = t4_read_reg(adapter, A_PL_WHOAMI); 5760 u32 pf = (CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5 5761 ? G_SOURCEPF(whoami) 5762 : G_T6_SOURCEPF(whoami)); 5763 5764 if (CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5) 5765 val = F_ERR_DROPPED_DB | F_ERR_EGR_CTXT_PRIO | F_DBFIFO_HP_INT; 5766 else 5767 val = F_ERR_PCIE_ERROR0 | F_ERR_PCIE_ERROR1 | F_FATAL_WRE_LEN; 5768 t4_write_reg(adapter, A_SGE_INT_ENABLE3, F_ERR_CPL_EXCEED_IQE_SIZE | 5769 F_ERR_INVALID_CIDX_INC | F_ERR_CPL_OPCODE_0 | 5770 F_ERR_DATA_CPL_ON_HIGH_QID1 | F_INGRESS_SIZE_ERR | 5771 F_ERR_DATA_CPL_ON_HIGH_QID0 | F_ERR_BAD_DB_PIDX3 | 5772 F_ERR_BAD_DB_PIDX2 | F_ERR_BAD_DB_PIDX1 | 5773 F_ERR_BAD_DB_PIDX0 | F_ERR_ING_CTXT_PRIO | 5774 F_DBFIFO_LP_INT | F_EGRESS_SIZE_ERR | val); 5775 t4_write_reg(adapter, MYPF_REG(A_PL_PF_INT_ENABLE), PF_INTR_MASK); 5776 t4_set_reg_field(adapter, A_PL_INT_MAP0, 0, 1 << pf); 5777 } 5778 5779 /** 5780 * t4_intr_disable - disable interrupts 5781 * @adapter: the adapter whose interrupts should be disabled 5782 * 5783 * Disable interrupts. We only disable the top-level interrupt 5784 * concentrators. The caller must be a PCI function managing global 5785 * interrupts. 5786 */ 5787 void t4_intr_disable(struct adapter *adapter) 5788 { 5789 u32 whoami = t4_read_reg(adapter, A_PL_WHOAMI); 5790 u32 pf = (CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5 5791 ? G_SOURCEPF(whoami) 5792 : G_T6_SOURCEPF(whoami)); 5793 5794 t4_write_reg(adapter, MYPF_REG(A_PL_PF_INT_ENABLE), 0); 5795 t4_set_reg_field(adapter, A_PL_INT_MAP0, 1 << pf, 0); 5796 } 5797 5798 unsigned int t4_chip_rss_size(struct adapter *adap) 5799 { 5800 if (CHELSIO_CHIP_VERSION(adap->params.chip) <= CHELSIO_T5) 5801 return RSS_NENTRIES; 5802 else 5803 return T6_RSS_NENTRIES; 5804 } 5805 5806 /** 5807 * t4_config_rss_range - configure a portion of the RSS mapping table 5808 * @adapter: the adapter 5809 * @mbox: mbox to use for the FW command 5810 * @viid: virtual interface whose RSS subtable is to be written 5811 * @start: start entry in the table to write 5812 * @n: how many table entries to write 5813 * @rspq: values for the "response queue" (Ingress Queue) lookup table 5814 * @nrspq: number of values in @rspq 5815 * 5816 * Programs the selected part of the VI's RSS mapping table with the 5817 * provided values. If @nrspq < @n the supplied values are used repeatedly 5818 * until the full table range is populated. 5819 * 5820 * The caller must ensure the values in @rspq are in the range allowed for 5821 * @viid. 5822 */ 5823 int t4_config_rss_range(struct adapter *adapter, int mbox, unsigned int viid, 5824 int start, int n, const u16 *rspq, unsigned int nrspq) 5825 { 5826 int ret; 5827 const u16 *rsp = rspq; 5828 const u16 *rsp_end = rspq + nrspq; 5829 struct fw_rss_ind_tbl_cmd cmd; 5830 5831 memset(&cmd, 0, sizeof(cmd)); 5832 cmd.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_RSS_IND_TBL_CMD) | 5833 F_FW_CMD_REQUEST | F_FW_CMD_WRITE | 5834 V_FW_RSS_IND_TBL_CMD_VIID(viid)); 5835 cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd)); 5836 5837 /* Each firmware RSS command can accommodate up to 32 RSS Ingress 5838 * Queue Identifiers. These Ingress Queue IDs are packed three to 5839 * a 32-bit word as 10-bit values with the upper remaining 2 bits 5840 * reserved. 5841 */ 5842 while (n > 0) { 5843 int nq = min(n, 32); 5844 int nq_packed = 0; 5845 __be32 *qp = &cmd.iq0_to_iq2; 5846 5847 /* Set up the firmware RSS command header to send the next 5848 * "nq" Ingress Queue IDs to the firmware. 5849 */ 5850 cmd.niqid = cpu_to_be16(nq); 5851 cmd.startidx = cpu_to_be16(start); 5852 5853 /* "nq" more done for the start of the next loop. 5854 */ 5855 start += nq; 5856 n -= nq; 5857 5858 /* While there are still Ingress Queue IDs to stuff into the 5859 * current firmware RSS command, retrieve them from the 5860 * Ingress Queue ID array and insert them into the command. 5861 */ 5862 while (nq > 0) { 5863 /* Grab up to the next 3 Ingress Queue IDs (wrapping 5864 * around the Ingress Queue ID array if necessary) and 5865 * insert them into the firmware RSS command at the 5866 * current 3-tuple position within the commad. 5867 */ 5868 u16 qbuf[3]; 5869 u16 *qbp = qbuf; 5870 int nqbuf = min(3, nq); 5871 5872 nq -= nqbuf; 5873 qbuf[0] = qbuf[1] = qbuf[2] = 0; 5874 while (nqbuf && nq_packed < 32) { 5875 nqbuf--; 5876 nq_packed++; 5877 *qbp++ = *rsp++; 5878 if (rsp >= rsp_end) 5879 rsp = rspq; 5880 } 5881 *qp++ = cpu_to_be32(V_FW_RSS_IND_TBL_CMD_IQ0(qbuf[0]) | 5882 V_FW_RSS_IND_TBL_CMD_IQ1(qbuf[1]) | 5883 V_FW_RSS_IND_TBL_CMD_IQ2(qbuf[2])); 5884 } 5885 5886 /* Send this portion of the RRS table update to the firmware; 5887 * bail out on any errors. 5888 */ 5889 ret = t4_wr_mbox(adapter, mbox, &cmd, sizeof(cmd), NULL); 5890 if (ret) 5891 return ret; 5892 } 5893 return 0; 5894 } 5895 5896 /** 5897 * t4_config_glbl_rss - configure the global RSS mode 5898 * @adapter: the adapter 5899 * @mbox: mbox to use for the FW command 5900 * @mode: global RSS mode 5901 * @flags: mode-specific flags 5902 * 5903 * Sets the global RSS mode. 5904 */ 5905 int t4_config_glbl_rss(struct adapter *adapter, int mbox, unsigned int mode, 5906 unsigned int flags) 5907 { 5908 struct fw_rss_glb_config_cmd c; 5909 5910 memset(&c, 0, sizeof(c)); 5911 c.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_RSS_GLB_CONFIG_CMD) | 5912 F_FW_CMD_REQUEST | F_FW_CMD_WRITE); 5913 c.retval_len16 = cpu_to_be32(FW_LEN16(c)); 5914 if (mode == FW_RSS_GLB_CONFIG_CMD_MODE_MANUAL) { 5915 c.u.manual.mode_pkd = 5916 cpu_to_be32(V_FW_RSS_GLB_CONFIG_CMD_MODE(mode)); 5917 } else if (mode == FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL) { 5918 c.u.basicvirtual.mode_keymode = 5919 cpu_to_be32(V_FW_RSS_GLB_CONFIG_CMD_MODE(mode)); 5920 c.u.basicvirtual.synmapen_to_hashtoeplitz = cpu_to_be32(flags); 5921 } else 5922 return -EINVAL; 5923 return t4_wr_mbox(adapter, mbox, &c, sizeof(c), NULL); 5924 } 5925 5926 /** 5927 * t4_config_vi_rss - configure per VI RSS settings 5928 * @adapter: the adapter 5929 * @mbox: mbox to use for the FW command 5930 * @viid: the VI id 5931 * @flags: RSS flags 5932 * @defq: id of the default RSS queue for the VI. 5933 * @skeyidx: RSS secret key table index for non-global mode 5934 * @skey: RSS vf_scramble key for VI. 5935 * 5936 * Configures VI-specific RSS properties. 5937 */ 5938 int t4_config_vi_rss(struct adapter *adapter, int mbox, unsigned int viid, 5939 unsigned int flags, unsigned int defq, unsigned int skeyidx, 5940 unsigned int skey) 5941 { 5942 struct fw_rss_vi_config_cmd c; 5943 5944 memset(&c, 0, sizeof(c)); 5945 c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_RSS_VI_CONFIG_CMD) | 5946 F_FW_CMD_REQUEST | F_FW_CMD_WRITE | 5947 V_FW_RSS_VI_CONFIG_CMD_VIID(viid)); 5948 c.retval_len16 = cpu_to_be32(FW_LEN16(c)); 5949 c.u.basicvirtual.defaultq_to_udpen = cpu_to_be32(flags | 5950 V_FW_RSS_VI_CONFIG_CMD_DEFAULTQ(defq)); 5951 c.u.basicvirtual.secretkeyidx_pkd = cpu_to_be32( 5952 V_FW_RSS_VI_CONFIG_CMD_SECRETKEYIDX(skeyidx)); 5953 c.u.basicvirtual.secretkeyxor = cpu_to_be32(skey); 5954 5955 return t4_wr_mbox(adapter, mbox, &c, sizeof(c), NULL); 5956 } 5957 5958 /* Read an RSS table row */ 5959 static int rd_rss_row(struct adapter *adap, int row, u32 *val) 5960 { 5961 t4_write_reg(adap, A_TP_RSS_LKP_TABLE, 0xfff00000 | row); 5962 return t4_wait_op_done_val(adap, A_TP_RSS_LKP_TABLE, F_LKPTBLROWVLD, 1, 5963 5, 0, val); 5964 } 5965 5966 /** 5967 * t4_read_rss - read the contents of the RSS mapping table 5968 * @adapter: the adapter 5969 * @map: holds the contents of the RSS mapping table 5970 * 5971 * Reads the contents of the RSS hash->queue mapping table. 5972 */ 5973 int t4_read_rss(struct adapter *adapter, u16 *map) 5974 { 5975 u32 val; 5976 int i, ret, nentries; 5977 5978 nentries = t4_chip_rss_size(adapter); 5979 for (i = 0; i < nentries / 2; ++i) { 5980 ret = rd_rss_row(adapter, i, &val); 5981 if (ret) 5982 return ret; 5983 *map++ = G_LKPTBLQUEUE0(val); 5984 *map++ = G_LKPTBLQUEUE1(val); 5985 } 5986 return 0; 5987 } 5988 5989 /** 5990 * t4_tp_fw_ldst_rw - Access TP indirect register through LDST 5991 * @adap: the adapter 5992 * @cmd: TP fw ldst address space type 5993 * @vals: where the indirect register values are stored/written 5994 * @nregs: how many indirect registers to read/write 5995 * @start_idx: index of first indirect register to read/write 5996 * @rw: Read (1) or Write (0) 5997 * @sleep_ok: if true we may sleep while awaiting command completion 5998 * 5999 * Access TP indirect registers through LDST 6000 **/ 6001 static int t4_tp_fw_ldst_rw(struct adapter *adap, int cmd, u32 *vals, 6002 unsigned int nregs, unsigned int start_index, 6003 unsigned int rw, bool sleep_ok) 6004 { 6005 int ret = 0; 6006 unsigned int i; 6007 struct fw_ldst_cmd c; 6008 6009 for (i = 0; i < nregs; i++) { 6010 memset(&c, 0, sizeof(c)); 6011 c.op_to_addrspace = cpu_to_be32(V_FW_CMD_OP(FW_LDST_CMD) | 6012 F_FW_CMD_REQUEST | 6013 (rw ? F_FW_CMD_READ : 6014 F_FW_CMD_WRITE) | 6015 V_FW_LDST_CMD_ADDRSPACE(cmd)); 6016 c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c)); 6017 6018 c.u.addrval.addr = cpu_to_be32(start_index + i); 6019 c.u.addrval.val = rw ? 0 : cpu_to_be32(vals[i]); 6020 ret = t4_wr_mbox_meat(adap, adap->mbox, &c, sizeof(c), &c, 6021 sleep_ok); 6022 if (ret) 6023 return ret; 6024 6025 if (rw) 6026 vals[i] = be32_to_cpu(c.u.addrval.val); 6027 } 6028 return 0; 6029 } 6030 6031 /** 6032 * t4_tp_indirect_rw - Read/Write TP indirect register through LDST or backdoor 6033 * @adap: the adapter 6034 * @reg_addr: Address Register 6035 * @reg_data: Data register 6036 * @buff: where the indirect register values are stored/written 6037 * @nregs: how many indirect registers to read/write 6038 * @start_index: index of first indirect register to read/write 6039 * @rw: READ(1) or WRITE(0) 6040 * @sleep_ok: if true we may sleep while awaiting command completion 6041 * 6042 * Read/Write TP indirect registers through LDST if possible. 6043 * Else, use backdoor access 6044 **/ 6045 static void t4_tp_indirect_rw(struct adapter *adap, u32 reg_addr, u32 reg_data, 6046 u32 *buff, u32 nregs, u32 start_index, int rw, 6047 bool sleep_ok) 6048 { 6049 int rc = -EINVAL; 6050 int cmd; 6051 6052 switch (reg_addr) { 6053 case A_TP_PIO_ADDR: 6054 cmd = FW_LDST_ADDRSPC_TP_PIO; 6055 break; 6056 case A_TP_TM_PIO_ADDR: 6057 cmd = FW_LDST_ADDRSPC_TP_TM_PIO; 6058 break; 6059 case A_TP_MIB_INDEX: 6060 cmd = FW_LDST_ADDRSPC_TP_MIB; 6061 break; 6062 default: 6063 goto indirect_access; 6064 } 6065 6066 if (t4_use_ldst(adap)) 6067 rc = t4_tp_fw_ldst_rw(adap, cmd, buff, nregs, start_index, rw, 6068 sleep_ok); 6069 6070 indirect_access: 6071 6072 if (rc) { 6073 if (rw) 6074 t4_read_indirect(adap, reg_addr, reg_data, buff, nregs, 6075 start_index); 6076 else 6077 t4_write_indirect(adap, reg_addr, reg_data, buff, nregs, 6078 start_index); 6079 } 6080 } 6081 6082 /** 6083 * t4_tp_pio_read - Read TP PIO registers 6084 * @adap: the adapter 6085 * @buff: where the indirect register values are written 6086 * @nregs: how many indirect registers to read 6087 * @start_index: index of first indirect register to read 6088 * @sleep_ok: if true we may sleep while awaiting command completion 6089 * 6090 * Read TP PIO Registers 6091 **/ 6092 void t4_tp_pio_read(struct adapter *adap, u32 *buff, u32 nregs, 6093 u32 start_index, bool sleep_ok) 6094 { 6095 t4_tp_indirect_rw(adap, A_TP_PIO_ADDR, A_TP_PIO_DATA, buff, nregs, 6096 start_index, 1, sleep_ok); 6097 } 6098 6099 /** 6100 * t4_tp_pio_write - Write TP PIO registers 6101 * @adap: the adapter 6102 * @buff: where the indirect register values are stored 6103 * @nregs: how many indirect registers to write 6104 * @start_index: index of first indirect register to write 6105 * @sleep_ok: if true we may sleep while awaiting command completion 6106 * 6107 * Write TP PIO Registers 6108 **/ 6109 void t4_tp_pio_write(struct adapter *adap, u32 *buff, u32 nregs, 6110 u32 start_index, bool sleep_ok) 6111 { 6112 t4_tp_indirect_rw(adap, A_TP_PIO_ADDR, A_TP_PIO_DATA, buff, nregs, 6113 start_index, 0, sleep_ok); 6114 } 6115 6116 /** 6117 * t4_tp_tm_pio_read - Read TP TM PIO registers 6118 * @adap: the adapter 6119 * @buff: where the indirect register values are written 6120 * @nregs: how many indirect registers to read 6121 * @start_index: index of first indirect register to read 6122 * @sleep_ok: if true we may sleep while awaiting command completion 6123 * 6124 * Read TP TM PIO Registers 6125 **/ 6126 void t4_tp_tm_pio_read(struct adapter *adap, u32 *buff, u32 nregs, 6127 u32 start_index, bool sleep_ok) 6128 { 6129 t4_tp_indirect_rw(adap, A_TP_TM_PIO_ADDR, A_TP_TM_PIO_DATA, buff, 6130 nregs, start_index, 1, sleep_ok); 6131 } 6132 6133 /** 6134 * t4_tp_mib_read - Read TP MIB registers 6135 * @adap: the adapter 6136 * @buff: where the indirect register values are written 6137 * @nregs: how many indirect registers to read 6138 * @start_index: index of first indirect register to read 6139 * @sleep_ok: if true we may sleep while awaiting command completion 6140 * 6141 * Read TP MIB Registers 6142 **/ 6143 void t4_tp_mib_read(struct adapter *adap, u32 *buff, u32 nregs, u32 start_index, 6144 bool sleep_ok) 6145 { 6146 t4_tp_indirect_rw(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, buff, nregs, 6147 start_index, 1, sleep_ok); 6148 } 6149 6150 /** 6151 * t4_read_rss_key - read the global RSS key 6152 * @adap: the adapter 6153 * @key: 10-entry array holding the 320-bit RSS key 6154 * @sleep_ok: if true we may sleep while awaiting command completion 6155 * 6156 * Reads the global 320-bit RSS key. 6157 */ 6158 void t4_read_rss_key(struct adapter *adap, u32 *key, bool sleep_ok) 6159 { 6160 t4_tp_pio_read(adap, key, 10, A_TP_RSS_SECRET_KEY0, sleep_ok); 6161 } 6162 6163 /** 6164 * t4_write_rss_key - program one of the RSS keys 6165 * @adap: the adapter 6166 * @key: 10-entry array holding the 320-bit RSS key 6167 * @idx: which RSS key to write 6168 * @sleep_ok: if true we may sleep while awaiting command completion 6169 * 6170 * Writes one of the RSS keys with the given 320-bit value. If @idx is 6171 * 0..15 the corresponding entry in the RSS key table is written, 6172 * otherwise the global RSS key is written. 6173 */ 6174 void t4_write_rss_key(struct adapter *adap, const u32 *key, int idx, 6175 bool sleep_ok) 6176 { 6177 u8 rss_key_addr_cnt = 16; 6178 u32 vrt = t4_read_reg(adap, A_TP_RSS_CONFIG_VRT); 6179 6180 /* T6 and later: for KeyMode 3 (per-vf and per-vf scramble), 6181 * allows access to key addresses 16-63 by using KeyWrAddrX 6182 * as index[5:4](upper 2) into key table 6183 */ 6184 if ((CHELSIO_CHIP_VERSION(adap->params.chip) > CHELSIO_T5) && 6185 (vrt & F_KEYEXTEND) && (G_KEYMODE(vrt) == 3)) 6186 rss_key_addr_cnt = 32; 6187 6188 t4_tp_pio_write(adap, (void *)key, 10, A_TP_RSS_SECRET_KEY0, sleep_ok); 6189 6190 if (idx >= 0 && idx < rss_key_addr_cnt) { 6191 if (rss_key_addr_cnt > 16) 6192 t4_write_reg(adap, A_TP_RSS_CONFIG_VRT, 6193 vrt | V_KEYWRADDRX(idx >> 4) | 6194 V_T6_VFWRADDR(idx) | F_KEYWREN); 6195 else 6196 t4_write_reg(adap, A_TP_RSS_CONFIG_VRT, 6197 vrt| V_KEYWRADDR(idx) | F_KEYWREN); 6198 } 6199 } 6200 6201 /** 6202 * t4_read_rss_pf_config - read PF RSS Configuration Table 6203 * @adapter: the adapter 6204 * @index: the entry in the PF RSS table to read 6205 * @valp: where to store the returned value 6206 * @sleep_ok: if true we may sleep while awaiting command completion 6207 * 6208 * Reads the PF RSS Configuration Table at the specified index and returns 6209 * the value found there. 6210 */ 6211 void t4_read_rss_pf_config(struct adapter *adapter, unsigned int index, 6212 u32 *valp, bool sleep_ok) 6213 { 6214 t4_tp_pio_read(adapter, valp, 1, A_TP_RSS_PF0_CONFIG + index, sleep_ok); 6215 } 6216 6217 /** 6218 * t4_write_rss_pf_config - write PF RSS Configuration Table 6219 * @adapter: the adapter 6220 * @index: the entry in the VF RSS table to read 6221 * @val: the value to store 6222 * @sleep_ok: if true we may sleep while awaiting command completion 6223 * 6224 * Writes the PF RSS Configuration Table at the specified index with the 6225 * specified value. 6226 */ 6227 void t4_write_rss_pf_config(struct adapter *adapter, unsigned int index, 6228 u32 val, bool sleep_ok) 6229 { 6230 t4_tp_pio_write(adapter, &val, 1, A_TP_RSS_PF0_CONFIG + index, 6231 sleep_ok); 6232 } 6233 6234 /** 6235 * t4_read_rss_vf_config - read VF RSS Configuration Table 6236 * @adapter: the adapter 6237 * @index: the entry in the VF RSS table to read 6238 * @vfl: where to store the returned VFL 6239 * @vfh: where to store the returned VFH 6240 * @sleep_ok: if true we may sleep while awaiting command completion 6241 * 6242 * Reads the VF RSS Configuration Table at the specified index and returns 6243 * the (VFL, VFH) values found there. 6244 */ 6245 void t4_read_rss_vf_config(struct adapter *adapter, unsigned int index, 6246 u32 *vfl, u32 *vfh, bool sleep_ok) 6247 { 6248 u32 vrt, mask, data; 6249 6250 if (CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5) { 6251 mask = V_VFWRADDR(M_VFWRADDR); 6252 data = V_VFWRADDR(index); 6253 } else { 6254 mask = V_T6_VFWRADDR(M_T6_VFWRADDR); 6255 data = V_T6_VFWRADDR(index); 6256 } 6257 /* 6258 * Request that the index'th VF Table values be read into VFL/VFH. 6259 */ 6260 vrt = t4_read_reg(adapter, A_TP_RSS_CONFIG_VRT); 6261 vrt &= ~(F_VFRDRG | F_VFWREN | F_KEYWREN | mask); 6262 vrt |= data | F_VFRDEN; 6263 t4_write_reg(adapter, A_TP_RSS_CONFIG_VRT, vrt); 6264 6265 /* 6266 * Grab the VFL/VFH values ... 6267 */ 6268 t4_tp_pio_read(adapter, vfl, 1, A_TP_RSS_VFL_CONFIG, sleep_ok); 6269 t4_tp_pio_read(adapter, vfh, 1, A_TP_RSS_VFH_CONFIG, sleep_ok); 6270 } 6271 6272 /** 6273 * t4_read_rss_pf_map - read PF RSS Map 6274 * @adapter: the adapter 6275 * @sleep_ok: if true we may sleep while awaiting command completion 6276 * 6277 * Reads the PF RSS Map register and returns its value. 6278 */ 6279 u32 t4_read_rss_pf_map(struct adapter *adapter, bool sleep_ok) 6280 { 6281 u32 pfmap; 6282 6283 t4_tp_pio_read(adapter, &pfmap, 1, A_TP_RSS_PF_MAP, sleep_ok); 6284 6285 return pfmap; 6286 } 6287 6288 /** 6289 * t4_read_rss_pf_mask - read PF RSS Mask 6290 * @adapter: the adapter 6291 * @sleep_ok: if true we may sleep while awaiting command completion 6292 * 6293 * Reads the PF RSS Mask register and returns its value. 6294 */ 6295 u32 t4_read_rss_pf_mask(struct adapter *adapter, bool sleep_ok) 6296 { 6297 u32 pfmask; 6298 6299 t4_tp_pio_read(adapter, &pfmask, 1, A_TP_RSS_PF_MSK, sleep_ok); 6300 6301 return pfmask; 6302 } 6303 6304 /** 6305 * t4_tp_get_tcp_stats - read TP's TCP MIB counters 6306 * @adap: the adapter 6307 * @v4: holds the TCP/IP counter values 6308 * @v6: holds the TCP/IPv6 counter values 6309 * @sleep_ok: if true we may sleep while awaiting command completion 6310 * 6311 * Returns the values of TP's TCP/IP and TCP/IPv6 MIB counters. 6312 * Either @v4 or @v6 may be %NULL to skip the corresponding stats. 6313 */ 6314 void t4_tp_get_tcp_stats(struct adapter *adap, struct tp_tcp_stats *v4, 6315 struct tp_tcp_stats *v6, bool sleep_ok) 6316 { 6317 u32 val[A_TP_MIB_TCP_RXT_SEG_LO - A_TP_MIB_TCP_OUT_RST + 1]; 6318 6319 #define STAT_IDX(x) ((A_TP_MIB_TCP_##x) - A_TP_MIB_TCP_OUT_RST) 6320 #define STAT(x) val[STAT_IDX(x)] 6321 #define STAT64(x) (((u64)STAT(x##_HI) << 32) | STAT(x##_LO)) 6322 6323 if (v4) { 6324 t4_tp_mib_read(adap, val, ARRAY_SIZE(val), 6325 A_TP_MIB_TCP_OUT_RST, sleep_ok); 6326 v4->tcp_out_rsts = STAT(OUT_RST); 6327 v4->tcp_in_segs = STAT64(IN_SEG); 6328 v4->tcp_out_segs = STAT64(OUT_SEG); 6329 v4->tcp_retrans_segs = STAT64(RXT_SEG); 6330 } 6331 if (v6) { 6332 t4_tp_mib_read(adap, val, ARRAY_SIZE(val), 6333 A_TP_MIB_TCP_V6OUT_RST, sleep_ok); 6334 v6->tcp_out_rsts = STAT(OUT_RST); 6335 v6->tcp_in_segs = STAT64(IN_SEG); 6336 v6->tcp_out_segs = STAT64(OUT_SEG); 6337 v6->tcp_retrans_segs = STAT64(RXT_SEG); 6338 } 6339 #undef STAT64 6340 #undef STAT 6341 #undef STAT_IDX 6342 } 6343 6344 /** 6345 * t4_tp_get_err_stats - read TP's error MIB counters 6346 * @adap: the adapter 6347 * @st: holds the counter values 6348 * @sleep_ok: if true we may sleep while awaiting command completion 6349 * 6350 * Returns the values of TP's error counters. 6351 */ 6352 void t4_tp_get_err_stats(struct adapter *adap, struct tp_err_stats *st, 6353 bool sleep_ok) 6354 { 6355 int nchan = adap->params.arch.nchan; 6356 6357 t4_tp_mib_read(adap, st->mac_in_errs, nchan, A_TP_MIB_MAC_IN_ERR_0, 6358 sleep_ok); 6359 6360 t4_tp_mib_read(adap, st->hdr_in_errs, nchan, A_TP_MIB_HDR_IN_ERR_0, 6361 sleep_ok); 6362 6363 t4_tp_mib_read(adap, st->tcp_in_errs, nchan, A_TP_MIB_TCP_IN_ERR_0, 6364 sleep_ok); 6365 6366 t4_tp_mib_read(adap, st->tnl_cong_drops, nchan, 6367 A_TP_MIB_TNL_CNG_DROP_0, sleep_ok); 6368 6369 t4_tp_mib_read(adap, st->ofld_chan_drops, nchan, 6370 A_TP_MIB_OFD_CHN_DROP_0, sleep_ok); 6371 6372 t4_tp_mib_read(adap, st->tnl_tx_drops, nchan, A_TP_MIB_TNL_DROP_0, 6373 sleep_ok); 6374 6375 t4_tp_mib_read(adap, st->ofld_vlan_drops, nchan, 6376 A_TP_MIB_OFD_VLN_DROP_0, sleep_ok); 6377 6378 t4_tp_mib_read(adap, st->tcp6_in_errs, nchan, 6379 A_TP_MIB_TCP_V6IN_ERR_0, sleep_ok); 6380 6381 t4_tp_mib_read(adap, &st->ofld_no_neigh, 2, A_TP_MIB_OFD_ARP_DROP, 6382 sleep_ok); 6383 } 6384 6385 /** 6386 * t4_tp_get_cpl_stats - read TP's CPL MIB counters 6387 * @adap: the adapter 6388 * @st: holds the counter values 6389 * @sleep_ok: if true we may sleep while awaiting command completion 6390 * 6391 * Returns the values of TP's CPL counters. 6392 */ 6393 void t4_tp_get_cpl_stats(struct adapter *adap, struct tp_cpl_stats *st, 6394 bool sleep_ok) 6395 { 6396 int nchan = adap->params.arch.nchan; 6397 6398 t4_tp_mib_read(adap, st->req, nchan, A_TP_MIB_CPL_IN_REQ_0, sleep_ok); 6399 6400 t4_tp_mib_read(adap, st->rsp, nchan, A_TP_MIB_CPL_OUT_RSP_0, sleep_ok); 6401 } 6402 6403 /** 6404 * t4_tp_get_rdma_stats - read TP's RDMA MIB counters 6405 * @adap: the adapter 6406 * @st: holds the counter values 6407 * 6408 * Returns the values of TP's RDMA counters. 6409 */ 6410 void t4_tp_get_rdma_stats(struct adapter *adap, struct tp_rdma_stats *st, 6411 bool sleep_ok) 6412 { 6413 t4_tp_mib_read(adap, &st->rqe_dfr_pkt, 2, A_TP_MIB_RQE_DFR_PKT, 6414 sleep_ok); 6415 } 6416 6417 /** 6418 * t4_get_fcoe_stats - read TP's FCoE MIB counters for a port 6419 * @adap: the adapter 6420 * @idx: the port index 6421 * @st: holds the counter values 6422 * @sleep_ok: if true we may sleep while awaiting command completion 6423 * 6424 * Returns the values of TP's FCoE counters for the selected port. 6425 */ 6426 void t4_get_fcoe_stats(struct adapter *adap, unsigned int idx, 6427 struct tp_fcoe_stats *st, bool sleep_ok) 6428 { 6429 u32 val[2]; 6430 6431 t4_tp_mib_read(adap, &st->frames_ddp, 1, A_TP_MIB_FCOE_DDP_0 + idx, 6432 sleep_ok); 6433 6434 t4_tp_mib_read(adap, &st->frames_drop, 1, 6435 A_TP_MIB_FCOE_DROP_0 + idx, sleep_ok); 6436 6437 t4_tp_mib_read(adap, val, 2, A_TP_MIB_FCOE_BYTE_0_HI + 2 * idx, 6438 sleep_ok); 6439 6440 st->octets_ddp = ((u64)val[0] << 32) | val[1]; 6441 } 6442 6443 /** 6444 * t4_get_usm_stats - read TP's non-TCP DDP MIB counters 6445 * @adap: the adapter 6446 * @st: holds the counter values 6447 * @sleep_ok: if true we may sleep while awaiting command completion 6448 * 6449 * Returns the values of TP's counters for non-TCP directly-placed packets. 6450 */ 6451 void t4_get_usm_stats(struct adapter *adap, struct tp_usm_stats *st, 6452 bool sleep_ok) 6453 { 6454 u32 val[4]; 6455 6456 t4_tp_mib_read(adap, val, 4, A_TP_MIB_USM_PKTS, sleep_ok); 6457 6458 st->frames = val[0]; 6459 st->drops = val[1]; 6460 st->octets = ((u64)val[2] << 32) | val[3]; 6461 } 6462 6463 /** 6464 * t4_read_mtu_tbl - returns the values in the HW path MTU table 6465 * @adap: the adapter 6466 * @mtus: where to store the MTU values 6467 * @mtu_log: where to store the MTU base-2 log (may be %NULL) 6468 * 6469 * Reads the HW path MTU table. 6470 */ 6471 void t4_read_mtu_tbl(struct adapter *adap, u16 *mtus, u8 *mtu_log) 6472 { 6473 u32 v; 6474 int i; 6475 6476 for (i = 0; i < NMTUS; ++i) { 6477 t4_write_reg(adap, A_TP_MTU_TABLE, 6478 V_MTUINDEX(0xffU) | V_MTUVALUE(i)); 6479 v = t4_read_reg(adap, A_TP_MTU_TABLE); 6480 mtus[i] = G_MTUVALUE(v); 6481 if (mtu_log) 6482 mtu_log[i] = G_MTUWIDTH(v); 6483 } 6484 } 6485 6486 /** 6487 * t4_read_cong_tbl - reads the congestion control table 6488 * @adap: the adapter 6489 * @incr: where to store the alpha values 6490 * 6491 * Reads the additive increments programmed into the HW congestion 6492 * control table. 6493 */ 6494 void t4_read_cong_tbl(struct adapter *adap, u16 incr[NMTUS][NCCTRL_WIN]) 6495 { 6496 unsigned int mtu, w; 6497 6498 for (mtu = 0; mtu < NMTUS; ++mtu) 6499 for (w = 0; w < NCCTRL_WIN; ++w) { 6500 t4_write_reg(adap, A_TP_CCTRL_TABLE, 6501 V_ROWINDEX(0xffffU) | (mtu << 5) | w); 6502 incr[mtu][w] = (u16)t4_read_reg(adap, 6503 A_TP_CCTRL_TABLE) & 0x1fff; 6504 } 6505 } 6506 6507 /** 6508 * t4_tp_wr_bits_indirect - set/clear bits in an indirect TP register 6509 * @adap: the adapter 6510 * @addr: the indirect TP register address 6511 * @mask: specifies the field within the register to modify 6512 * @val: new value for the field 6513 * 6514 * Sets a field of an indirect TP register to the given value. 6515 */ 6516 void t4_tp_wr_bits_indirect(struct adapter *adap, unsigned int addr, 6517 unsigned int mask, unsigned int val) 6518 { 6519 t4_write_reg(adap, A_TP_PIO_ADDR, addr); 6520 val |= t4_read_reg(adap, A_TP_PIO_DATA) & ~mask; 6521 t4_write_reg(adap, A_TP_PIO_DATA, val); 6522 } 6523 6524 /** 6525 * init_cong_ctrl - initialize congestion control parameters 6526 * @a: the alpha values for congestion control 6527 * @b: the beta values for congestion control 6528 * 6529 * Initialize the congestion control parameters. 6530 */ 6531 static void init_cong_ctrl(unsigned short *a, unsigned short *b) 6532 { 6533 a[0] = a[1] = a[2] = a[3] = a[4] = a[5] = a[6] = a[7] = a[8] = 1; 6534 a[9] = 2; 6535 a[10] = 3; 6536 a[11] = 4; 6537 a[12] = 5; 6538 a[13] = 6; 6539 a[14] = 7; 6540 a[15] = 8; 6541 a[16] = 9; 6542 a[17] = 10; 6543 a[18] = 14; 6544 a[19] = 17; 6545 a[20] = 21; 6546 a[21] = 25; 6547 a[22] = 30; 6548 a[23] = 35; 6549 a[24] = 45; 6550 a[25] = 60; 6551 a[26] = 80; 6552 a[27] = 100; 6553 a[28] = 200; 6554 a[29] = 300; 6555 a[30] = 400; 6556 a[31] = 500; 6557 6558 b[0] = b[1] = b[2] = b[3] = b[4] = b[5] = b[6] = b[7] = b[8] = 0; 6559 b[9] = b[10] = 1; 6560 b[11] = b[12] = 2; 6561 b[13] = b[14] = b[15] = b[16] = 3; 6562 b[17] = b[18] = b[19] = b[20] = b[21] = 4; 6563 b[22] = b[23] = b[24] = b[25] = b[26] = b[27] = 5; 6564 b[28] = b[29] = 6; 6565 b[30] = b[31] = 7; 6566 } 6567 6568 /* The minimum additive increment value for the congestion control table */ 6569 #define CC_MIN_INCR 2U 6570 6571 /** 6572 * t4_load_mtus - write the MTU and congestion control HW tables 6573 * @adap: the adapter 6574 * @mtus: the values for the MTU table 6575 * @alpha: the values for the congestion control alpha parameter 6576 * @beta: the values for the congestion control beta parameter 6577 * 6578 * Write the HW MTU table with the supplied MTUs and the high-speed 6579 * congestion control table with the supplied alpha, beta, and MTUs. 6580 * We write the two tables together because the additive increments 6581 * depend on the MTUs. 6582 */ 6583 void t4_load_mtus(struct adapter *adap, const unsigned short *mtus, 6584 const unsigned short *alpha, const unsigned short *beta) 6585 { 6586 static const unsigned int avg_pkts[NCCTRL_WIN] = { 6587 2, 6, 10, 14, 20, 28, 40, 56, 80, 112, 160, 224, 320, 448, 640, 6588 896, 1281, 1792, 2560, 3584, 5120, 7168, 10240, 14336, 20480, 6589 28672, 40960, 57344, 81920, 114688, 163840, 229376 6590 }; 6591 6592 unsigned int i, w; 6593 6594 for (i = 0; i < NMTUS; ++i) { 6595 unsigned int mtu = mtus[i]; 6596 unsigned int log2 = fls(mtu); 6597 6598 if (!(mtu & ((1 << log2) >> 2))) /* round */ 6599 log2--; 6600 t4_write_reg(adap, A_TP_MTU_TABLE, V_MTUINDEX(i) | 6601 V_MTUWIDTH(log2) | V_MTUVALUE(mtu)); 6602 6603 for (w = 0; w < NCCTRL_WIN; ++w) { 6604 unsigned int inc; 6605 6606 inc = max(((mtu - 40) * alpha[w]) / avg_pkts[w], 6607 CC_MIN_INCR); 6608 6609 t4_write_reg(adap, A_TP_CCTRL_TABLE, (i << 21) | 6610 (w << 16) | (beta[w] << 13) | inc); 6611 } 6612 } 6613 } 6614 6615 /* 6616 * Calculates a rate in bytes/s given the number of 256-byte units per 4K core 6617 * clocks. The formula is 6618 * 6619 * bytes/s = bytes256 * 256 * ClkFreq / 4096 6620 * 6621 * which is equivalent to 6622 * 6623 * bytes/s = 62.5 * bytes256 * ClkFreq_ms 6624 */ 6625 static u64 chan_rate(struct adapter *adap, unsigned int bytes256) 6626 { 6627 u64 v = bytes256 * adap->params.vpd.cclk; 6628 6629 return v * 62 + v / 2; 6630 } 6631 6632 /** 6633 * t4_get_chan_txrate - get the current per channel Tx rates 6634 * @adap: the adapter 6635 * @nic_rate: rates for NIC traffic 6636 * @ofld_rate: rates for offloaded traffic 6637 * 6638 * Return the current Tx rates in bytes/s for NIC and offloaded traffic 6639 * for each channel. 6640 */ 6641 void t4_get_chan_txrate(struct adapter *adap, u64 *nic_rate, u64 *ofld_rate) 6642 { 6643 u32 v; 6644 6645 v = t4_read_reg(adap, A_TP_TX_TRATE); 6646 nic_rate[0] = chan_rate(adap, G_TNLRATE0(v)); 6647 nic_rate[1] = chan_rate(adap, G_TNLRATE1(v)); 6648 if (adap->params.arch.nchan == NCHAN) { 6649 nic_rate[2] = chan_rate(adap, G_TNLRATE2(v)); 6650 nic_rate[3] = chan_rate(adap, G_TNLRATE3(v)); 6651 } 6652 6653 v = t4_read_reg(adap, A_TP_TX_ORATE); 6654 ofld_rate[0] = chan_rate(adap, G_OFDRATE0(v)); 6655 ofld_rate[1] = chan_rate(adap, G_OFDRATE1(v)); 6656 if (adap->params.arch.nchan == NCHAN) { 6657 ofld_rate[2] = chan_rate(adap, G_OFDRATE2(v)); 6658 ofld_rate[3] = chan_rate(adap, G_OFDRATE3(v)); 6659 } 6660 } 6661 6662 /** 6663 * t4_set_trace_filter - configure one of the tracing filters 6664 * @adap: the adapter 6665 * @tp: the desired trace filter parameters 6666 * @idx: which filter to configure 6667 * @enable: whether to enable or disable the filter 6668 * 6669 * Configures one of the tracing filters available in HW. If @enable is 6670 * %0 @tp is not examined and may be %NULL. The user is responsible to 6671 * set the single/multiple trace mode by writing to A_MPS_TRC_CFG register 6672 * by using "cxgbtool iface reg reg_addr=val" command. See t4_sniffer/ 6673 * docs/readme.txt for a complete description of how to setup traceing on 6674 * T4. 6675 */ 6676 int t4_set_trace_filter(struct adapter *adap, const struct trace_params *tp, int idx, 6677 int enable) 6678 { 6679 int i, ofst = idx * 4; 6680 u32 data_reg, mask_reg, cfg; 6681 6682 if (!enable) { 6683 t4_write_reg(adap, A_MPS_TRC_FILTER_MATCH_CTL_A + ofst, 0); 6684 return 0; 6685 } 6686 6687 /* 6688 * TODO - After T4 data book is updated, specify the exact 6689 * section below. 6690 * 6691 * See T4 data book - MPS section for a complete description 6692 * of the below if..else handling of A_MPS_TRC_CFG register 6693 * value. 6694 */ 6695 cfg = t4_read_reg(adap, A_MPS_TRC_CFG); 6696 if (cfg & F_TRCMULTIFILTER) { 6697 /* 6698 * If multiple tracers are enabled, then maximum 6699 * capture size is 2.5KB (FIFO size of a single channel) 6700 * minus 2 flits for CPL_TRACE_PKT header. 6701 */ 6702 if (tp->snap_len > ((10 * 1024 / 4) - (2 * 8))) 6703 return -EINVAL; 6704 } 6705 else { 6706 /* 6707 * If multiple tracers are disabled, to avoid deadlocks 6708 * maximum packet capture size of 9600 bytes is recommended. 6709 * Also in this mode, only trace0 can be enabled and running. 6710 */ 6711 if (tp->snap_len > 9600 || idx) 6712 return -EINVAL; 6713 } 6714 6715 if (tp->port > (is_t4(adap->params.chip) ? 11 : 19) || tp->invert > 1 || 6716 tp->skip_len > M_TFLENGTH || tp->skip_ofst > M_TFOFFSET || 6717 tp->min_len > M_TFMINPKTSIZE) 6718 return -EINVAL; 6719 6720 /* stop the tracer we'll be changing */ 6721 t4_write_reg(adap, A_MPS_TRC_FILTER_MATCH_CTL_A + ofst, 0); 6722 6723 idx *= (A_MPS_TRC_FILTER1_MATCH - A_MPS_TRC_FILTER0_MATCH); 6724 data_reg = A_MPS_TRC_FILTER0_MATCH + idx; 6725 mask_reg = A_MPS_TRC_FILTER0_DONT_CARE + idx; 6726 6727 for (i = 0; i < TRACE_LEN / 4; i++, data_reg += 4, mask_reg += 4) { 6728 t4_write_reg(adap, data_reg, tp->data[i]); 6729 t4_write_reg(adap, mask_reg, ~tp->mask[i]); 6730 } 6731 t4_write_reg(adap, A_MPS_TRC_FILTER_MATCH_CTL_B + ofst, 6732 V_TFCAPTUREMAX(tp->snap_len) | 6733 V_TFMINPKTSIZE(tp->min_len)); 6734 t4_write_reg(adap, A_MPS_TRC_FILTER_MATCH_CTL_A + ofst, 6735 V_TFOFFSET(tp->skip_ofst) | V_TFLENGTH(tp->skip_len) | 6736 (is_t4(adap->params.chip) ? 6737 V_TFPORT(tp->port) | F_TFEN | V_TFINVERTMATCH(tp->invert) : 6738 V_T5_TFPORT(tp->port) | F_T5_TFEN | 6739 V_T5_TFINVERTMATCH(tp->invert))); 6740 6741 return 0; 6742 } 6743 6744 /** 6745 * t4_get_trace_filter - query one of the tracing filters 6746 * @adap: the adapter 6747 * @tp: the current trace filter parameters 6748 * @idx: which trace filter to query 6749 * @enabled: non-zero if the filter is enabled 6750 * 6751 * Returns the current settings of one of the HW tracing filters. 6752 */ 6753 void t4_get_trace_filter(struct adapter *adap, struct trace_params *tp, int idx, 6754 int *enabled) 6755 { 6756 u32 ctla, ctlb; 6757 int i, ofst = idx * 4; 6758 u32 data_reg, mask_reg; 6759 6760 ctla = t4_read_reg(adap, A_MPS_TRC_FILTER_MATCH_CTL_A + ofst); 6761 ctlb = t4_read_reg(adap, A_MPS_TRC_FILTER_MATCH_CTL_B + ofst); 6762 6763 if (is_t4(adap->params.chip)) { 6764 *enabled = !!(ctla & F_TFEN); 6765 tp->port = G_TFPORT(ctla); 6766 tp->invert = !!(ctla & F_TFINVERTMATCH); 6767 } else { 6768 *enabled = !!(ctla & F_T5_TFEN); 6769 tp->port = G_T5_TFPORT(ctla); 6770 tp->invert = !!(ctla & F_T5_TFINVERTMATCH); 6771 } 6772 tp->snap_len = G_TFCAPTUREMAX(ctlb); 6773 tp->min_len = G_TFMINPKTSIZE(ctlb); 6774 tp->skip_ofst = G_TFOFFSET(ctla); 6775 tp->skip_len = G_TFLENGTH(ctla); 6776 6777 ofst = (A_MPS_TRC_FILTER1_MATCH - A_MPS_TRC_FILTER0_MATCH) * idx; 6778 data_reg = A_MPS_TRC_FILTER0_MATCH + ofst; 6779 mask_reg = A_MPS_TRC_FILTER0_DONT_CARE + ofst; 6780 6781 for (i = 0; i < TRACE_LEN / 4; i++, data_reg += 4, mask_reg += 4) { 6782 tp->mask[i] = ~t4_read_reg(adap, mask_reg); 6783 tp->data[i] = t4_read_reg(adap, data_reg) & tp->mask[i]; 6784 } 6785 } 6786 6787 /** 6788 * t4_read_tcb - read a hardware TCP Control Block structure 6789 * @adap: the adapter 6790 * @win: PCI-E Memory Window to use 6791 * @tid: the TCB ID 6792 * @tcb: the buffer to return the TCB in 6793 * 6794 * Reads the indicated hardware TCP Control Block and returns it in 6795 * the supplied buffer. Returns 0 on success. 6796 */ 6797 int t4_read_tcb(struct adapter *adap, int win, int tid, u32 tcb[TCB_SIZE/4]) 6798 { 6799 u32 tcb_base = t4_read_reg(adap, A_TP_CMM_TCB_BASE); 6800 u32 tcb_addr = tcb_base + tid * TCB_SIZE; 6801 __be32 raw_tcb[TCB_SIZE/4]; 6802 int ret, word; 6803 6804 ret = t4_memory_rw_addr(adap, win, 6805 tcb_addr, sizeof raw_tcb, raw_tcb, 6806 T4_MEMORY_READ); 6807 if (ret) 6808 return ret; 6809 6810 for (word = 0; word < 32; word++) 6811 tcb[word] = be32_to_cpu(raw_tcb[word]); 6812 return 0; 6813 } 6814 6815 /** 6816 * t4_pmtx_get_stats - returns the HW stats from PMTX 6817 * @adap: the adapter 6818 * @cnt: where to store the count statistics 6819 * @cycles: where to store the cycle statistics 6820 * 6821 * Returns performance statistics from PMTX. 6822 */ 6823 void t4_pmtx_get_stats(struct adapter *adap, u32 cnt[], u64 cycles[]) 6824 { 6825 int i; 6826 u32 data[2]; 6827 6828 for (i = 0; i < adap->params.arch.pm_stats_cnt; i++) { 6829 t4_write_reg(adap, A_PM_TX_STAT_CONFIG, i + 1); 6830 cnt[i] = t4_read_reg(adap, A_PM_TX_STAT_COUNT); 6831 if (is_t4(adap->params.chip)) { 6832 cycles[i] = t4_read_reg64(adap, A_PM_TX_STAT_LSB); 6833 } else { 6834 t4_read_indirect(adap, A_PM_TX_DBG_CTRL, 6835 A_PM_TX_DBG_DATA, data, 2, 6836 A_PM_TX_DBG_STAT_MSB); 6837 cycles[i] = (((u64)data[0] << 32) | data[1]); 6838 } 6839 } 6840 } 6841 6842 /** 6843 * t4_pmrx_get_stats - returns the HW stats from PMRX 6844 * @adap: the adapter 6845 * @cnt: where to store the count statistics 6846 * @cycles: where to store the cycle statistics 6847 * 6848 * Returns performance statistics from PMRX. 6849 */ 6850 void t4_pmrx_get_stats(struct adapter *adap, u32 cnt[], u64 cycles[]) 6851 { 6852 int i; 6853 u32 data[2]; 6854 6855 for (i = 0; i < adap->params.arch.pm_stats_cnt; i++) { 6856 t4_write_reg(adap, A_PM_RX_STAT_CONFIG, i + 1); 6857 cnt[i] = t4_read_reg(adap, A_PM_RX_STAT_COUNT); 6858 if (is_t4(adap->params.chip)) { 6859 cycles[i] = t4_read_reg64(adap, A_PM_RX_STAT_LSB); 6860 } else { 6861 t4_read_indirect(adap, A_PM_RX_DBG_CTRL, 6862 A_PM_RX_DBG_DATA, data, 2, 6863 A_PM_RX_DBG_STAT_MSB); 6864 cycles[i] = (((u64)data[0] << 32) | data[1]); 6865 } 6866 } 6867 } 6868 6869 /** 6870 * compute_mps_bg_map - compute the MPS Buffer Group Map for a Port 6871 * @adapter: the adapter 6872 * @pidx: the port index 6873 * 6874 * Compuytes and returns a bitmap indicating which MPS buffer groups are 6875 * associated with the given Port. Bit i is set if buffer group i is 6876 * used by the Port. 6877 */ 6878 static inline unsigned int compute_mps_bg_map(struct adapter *adapter, 6879 int pidx) 6880 { 6881 unsigned int chip_version, nports; 6882 6883 chip_version = CHELSIO_CHIP_VERSION(adapter->params.chip); 6884 nports = 1 << G_NUMPORTS(t4_read_reg(adapter, A_MPS_CMN_CTL)); 6885 6886 switch (chip_version) { 6887 case CHELSIO_T4: 6888 case CHELSIO_T5: 6889 switch (nports) { 6890 case 1: return 0xf; 6891 case 2: return 3 << (2 * pidx); 6892 case 4: return 1 << pidx; 6893 } 6894 break; 6895 6896 case CHELSIO_T6: 6897 switch (nports) { 6898 case 2: return 1 << (2 * pidx); 6899 } 6900 break; 6901 } 6902 6903 CH_ERR(adapter, "Need MPS Buffer Group Map for Chip %0x, Nports %d\n", 6904 chip_version, nports); 6905 6906 return 0; 6907 } 6908 6909 /** 6910 * t4_get_mps_bg_map - return the buffer groups associated with a port 6911 * @adapter: the adapter 6912 * @pidx: the port index 6913 * 6914 * Returns a bitmap indicating which MPS buffer groups are associated 6915 * with the given Port. Bit i is set if buffer group i is used by the 6916 * Port. 6917 */ 6918 unsigned int t4_get_mps_bg_map(struct adapter *adapter, int pidx) 6919 { 6920 u8 *mps_bg_map; 6921 unsigned int nports; 6922 6923 nports = 1 << G_NUMPORTS(t4_read_reg(adapter, A_MPS_CMN_CTL)); 6924 if (pidx >= nports) { 6925 CH_WARN(adapter, "MPS Port Index %d >= Nports %d\n", pidx, nports); 6926 return 0; 6927 } 6928 6929 /* If we've already retrieved/computed this, just return the result. 6930 */ 6931 mps_bg_map = adapter->params.mps_bg_map; 6932 if (mps_bg_map[pidx]) 6933 return mps_bg_map[pidx]; 6934 6935 /* Newer Firmware can tell us what the MPS Buffer Group Map is. 6936 * If we're talking to such Firmware, let it tell us. If the new 6937 * API isn't supported, revert back to old hardcoded way. The value 6938 * obtained from Firmware is encoded in below format: 6939 * 6940 * val = (( MPSBGMAP[Port 3] << 24 ) | 6941 * ( MPSBGMAP[Port 2] << 16 ) | 6942 * ( MPSBGMAP[Port 1] << 8 ) | 6943 * ( MPSBGMAP[Port 0] << 0 )) 6944 */ 6945 if (adapter->flags & FW_OK) { 6946 u32 param, val; 6947 int ret; 6948 6949 param = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | 6950 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_MPSBGMAP)); 6951 ret = t4_query_params_ns(adapter, adapter->mbox, adapter->pf, 6952 0, 1, ¶m, &val); 6953 if (!ret) { 6954 int p; 6955 6956 /* Store the BG Map for all of the Ports in order to 6957 * avoid more calls to the Firmware in the future. 6958 */ 6959 for (p = 0; p < MAX_NPORTS; p++, val >>= 8) 6960 mps_bg_map[p] = val & 0xff; 6961 6962 return mps_bg_map[pidx]; 6963 } 6964 } 6965 6966 /* Either we're not talking to the Firmware or we're dealing with 6967 * older Firmware which doesn't support the new API to get the MPS 6968 * Buffer Group Map. Fall back to computing it ourselves. 6969 */ 6970 mps_bg_map[pidx] = compute_mps_bg_map(adapter, pidx); 6971 return mps_bg_map[pidx]; 6972 } 6973 6974 /** 6975 * t4_get_tp_e2c_map - return the E2C channel map associated with a port 6976 * @adapter: the adapter 6977 * @pidx: the port index 6978 */ 6979 unsigned int t4_get_tp_e2c_map(struct adapter *adapter, int pidx) 6980 { 6981 unsigned int nports = 1 << G_NUMPORTS(t4_read_reg(adapter, A_MPS_CMN_CTL)); 6982 u32 param, val = 0; 6983 int ret; 6984 6985 if (pidx >= nports) { 6986 CH_WARN(adapter, "TP E2C Channel Port Index %d >= Nports %d\n", pidx, nports); 6987 return 0; 6988 } 6989 6990 /* FW version >= 1.16.44.0 can determine E2C channel map using 6991 * FW_PARAMS_PARAM_DEV_TPCHMAP API. 6992 */ 6993 param = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | 6994 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_TPCHMAP)); 6995 ret = t4_query_params_ns(adapter, adapter->mbox, adapter->pf, 6996 0, 1, ¶m, &val); 6997 if (!ret) 6998 return (val >> (8*pidx)) & 0xff; 6999 7000 return 0; 7001 } 7002 7003 /** 7004 * t4_get_tp_ch_map - return TP ingress channels associated with a port 7005 * @adapter: the adapter 7006 * @pidx: the port index 7007 * 7008 * Returns a bitmap indicating which TP Ingress Channels are associated with 7009 * a given Port. Bit i is set if TP Ingress Channel i is used by the Port. 7010 */ 7011 unsigned int t4_get_tp_ch_map(struct adapter *adapter, int pidx) 7012 { 7013 unsigned int chip_version = CHELSIO_CHIP_VERSION(adapter->params.chip); 7014 unsigned int nports = 1 << G_NUMPORTS(t4_read_reg(adapter, A_MPS_CMN_CTL)); 7015 7016 if (pidx >= nports) { 7017 CH_WARN(adapter, "TP Port Index %d >= Nports %d\n", pidx, nports); 7018 return 0; 7019 } 7020 7021 switch (chip_version) { 7022 case CHELSIO_T4: 7023 case CHELSIO_T5: 7024 /* 7025 * Note that this happens to be the same values as the MPS 7026 * Buffer Group Map for these Chips. But we replicate the code 7027 * here because they're really separate concepts. 7028 */ 7029 switch (nports) { 7030 case 1: return 0xf; 7031 case 2: return 3 << (2 * pidx); 7032 case 4: return 1 << pidx; 7033 } 7034 break; 7035 7036 case CHELSIO_T6: 7037 switch (nports) { 7038 case 1: return 1 << pidx; 7039 case 2: return 1 << pidx; 7040 } 7041 break; 7042 } 7043 7044 CH_ERR(adapter, "Need TP Channel Map for Chip %0x, Nports %d\n", 7045 chip_version, nports); 7046 return 0; 7047 } 7048 7049 /** 7050 * t4_get_port_type_description - return Port Type string description 7051 * @port_type: firmware Port Type enumeration 7052 */ 7053 const char *t4_get_port_type_description(enum fw_port_type port_type) 7054 { 7055 static const char *const port_type_description[] = { 7056 "Fiber_XFI", 7057 "Fiber_XAUI", 7058 "BT_SGMII", 7059 "BT_XFI", 7060 "BT_XAUI", 7061 "KX4", 7062 "CX4", 7063 "KX", 7064 "KR", 7065 "SFP", 7066 "BP_AP", 7067 "BP4_AP", 7068 "QSFP_10G", 7069 "QSA", 7070 "QSFP", 7071 "BP40_BA", 7072 "KR4_100G", 7073 "CR4_QSFP", 7074 "CR_QSFP", 7075 "CR2_QSFP", 7076 "SFP28", 7077 "KR_SFP28", 7078 "KR_XLAUI", 7079 }; 7080 7081 if (port_type < ARRAY_SIZE(port_type_description)) 7082 return port_type_description[port_type]; 7083 return "UNKNOWN"; 7084 } 7085 7086 /** 7087 * t4_get_port_stats_offset - collect port stats relative to a previous 7088 * snapshot 7089 * @adap: The adapter 7090 * @idx: The port 7091 * @stats: Current stats to fill 7092 * @offset: Previous stats snapshot 7093 */ 7094 void t4_get_port_stats_offset(struct adapter *adap, int idx, 7095 struct port_stats *stats, 7096 struct port_stats *offset) 7097 { 7098 u64 *s, *o; 7099 int i; 7100 7101 t4_get_port_stats(adap, idx, stats); 7102 for (i = 0, s = (u64 *)stats, o = (u64 *)offset ; 7103 i < (sizeof(struct port_stats)/sizeof(u64)) ; 7104 i++, s++, o++) 7105 *s -= *o; 7106 } 7107 7108 /** 7109 * t4_get_port_stats - collect port statistics 7110 * @adap: the adapter 7111 * @idx: the port index 7112 * @p: the stats structure to fill 7113 * 7114 * Collect statistics related to the given port from HW. 7115 */ 7116 void t4_get_port_stats(struct adapter *adap, int idx, struct port_stats *p) 7117 { 7118 u32 bgmap = t4_get_mps_bg_map(adap, idx); 7119 u32 stat_ctl = t4_read_reg(adap, A_MPS_STAT_CTL); 7120 7121 #define GET_STAT(name) \ 7122 t4_read_reg64(adap, \ 7123 (is_t4(adap->params.chip) ? PORT_REG(idx, A_MPS_PORT_STAT_##name##_L) : \ 7124 T5_PORT_REG(idx, A_MPS_PORT_STAT_##name##_L))) 7125 #define GET_STAT_COM(name) t4_read_reg64(adap, A_MPS_STAT_##name##_L) 7126 7127 p->tx_octets = GET_STAT(TX_PORT_BYTES); 7128 p->tx_frames = GET_STAT(TX_PORT_FRAMES); 7129 p->tx_bcast_frames = GET_STAT(TX_PORT_BCAST); 7130 p->tx_mcast_frames = GET_STAT(TX_PORT_MCAST); 7131 p->tx_ucast_frames = GET_STAT(TX_PORT_UCAST); 7132 p->tx_error_frames = GET_STAT(TX_PORT_ERROR); 7133 p->tx_frames_64 = GET_STAT(TX_PORT_64B); 7134 p->tx_frames_65_127 = GET_STAT(TX_PORT_65B_127B); 7135 p->tx_frames_128_255 = GET_STAT(TX_PORT_128B_255B); 7136 p->tx_frames_256_511 = GET_STAT(TX_PORT_256B_511B); 7137 p->tx_frames_512_1023 = GET_STAT(TX_PORT_512B_1023B); 7138 p->tx_frames_1024_1518 = GET_STAT(TX_PORT_1024B_1518B); 7139 p->tx_frames_1519_max = GET_STAT(TX_PORT_1519B_MAX); 7140 p->tx_drop = GET_STAT(TX_PORT_DROP); 7141 p->tx_pause = GET_STAT(TX_PORT_PAUSE); 7142 p->tx_ppp0 = GET_STAT(TX_PORT_PPP0); 7143 p->tx_ppp1 = GET_STAT(TX_PORT_PPP1); 7144 p->tx_ppp2 = GET_STAT(TX_PORT_PPP2); 7145 p->tx_ppp3 = GET_STAT(TX_PORT_PPP3); 7146 p->tx_ppp4 = GET_STAT(TX_PORT_PPP4); 7147 p->tx_ppp5 = GET_STAT(TX_PORT_PPP5); 7148 p->tx_ppp6 = GET_STAT(TX_PORT_PPP6); 7149 p->tx_ppp7 = GET_STAT(TX_PORT_PPP7); 7150 7151 if (CHELSIO_CHIP_VERSION(adap->params.chip) >= CHELSIO_T5) { 7152 if (stat_ctl & F_COUNTPAUSESTATTX) 7153 p->tx_frames_64 -= p->tx_pause; 7154 if (stat_ctl & F_COUNTPAUSEMCTX) 7155 p->tx_mcast_frames -= p->tx_pause; 7156 } 7157 7158 p->rx_octets = GET_STAT(RX_PORT_BYTES); 7159 p->rx_frames = GET_STAT(RX_PORT_FRAMES); 7160 p->rx_bcast_frames = GET_STAT(RX_PORT_BCAST); 7161 p->rx_mcast_frames = GET_STAT(RX_PORT_MCAST); 7162 p->rx_ucast_frames = GET_STAT(RX_PORT_UCAST); 7163 p->rx_too_long = GET_STAT(RX_PORT_MTU_ERROR); 7164 p->rx_jabber = GET_STAT(RX_PORT_MTU_CRC_ERROR); 7165 p->rx_fcs_err = GET_STAT(RX_PORT_CRC_ERROR); 7166 p->rx_len_err = GET_STAT(RX_PORT_LEN_ERROR); 7167 p->rx_symbol_err = GET_STAT(RX_PORT_SYM_ERROR); 7168 p->rx_runt = GET_STAT(RX_PORT_LESS_64B); 7169 p->rx_frames_64 = GET_STAT(RX_PORT_64B); 7170 p->rx_frames_65_127 = GET_STAT(RX_PORT_65B_127B); 7171 p->rx_frames_128_255 = GET_STAT(RX_PORT_128B_255B); 7172 p->rx_frames_256_511 = GET_STAT(RX_PORT_256B_511B); 7173 p->rx_frames_512_1023 = GET_STAT(RX_PORT_512B_1023B); 7174 p->rx_frames_1024_1518 = GET_STAT(RX_PORT_1024B_1518B); 7175 p->rx_frames_1519_max = GET_STAT(RX_PORT_1519B_MAX); 7176 p->rx_pause = GET_STAT(RX_PORT_PAUSE); 7177 p->rx_ppp0 = GET_STAT(RX_PORT_PPP0); 7178 p->rx_ppp1 = GET_STAT(RX_PORT_PPP1); 7179 p->rx_ppp2 = GET_STAT(RX_PORT_PPP2); 7180 p->rx_ppp3 = GET_STAT(RX_PORT_PPP3); 7181 p->rx_ppp4 = GET_STAT(RX_PORT_PPP4); 7182 p->rx_ppp5 = GET_STAT(RX_PORT_PPP5); 7183 p->rx_ppp6 = GET_STAT(RX_PORT_PPP6); 7184 p->rx_ppp7 = GET_STAT(RX_PORT_PPP7); 7185 7186 if (CHELSIO_CHIP_VERSION(adap->params.chip) >= CHELSIO_T5) { 7187 if (stat_ctl & F_COUNTPAUSESTATRX) 7188 p->rx_frames_64 -= p->rx_pause; 7189 if (stat_ctl & F_COUNTPAUSEMCRX) 7190 p->rx_mcast_frames -= p->rx_pause; 7191 } 7192 7193 p->rx_ovflow0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_MAC_DROP_FRAME) : 0; 7194 p->rx_ovflow1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_MAC_DROP_FRAME) : 0; 7195 p->rx_ovflow2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_MAC_DROP_FRAME) : 0; 7196 p->rx_ovflow3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_MAC_DROP_FRAME) : 0; 7197 p->rx_trunc0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_MAC_TRUNC_FRAME) : 0; 7198 p->rx_trunc1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_MAC_TRUNC_FRAME) : 0; 7199 p->rx_trunc2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_MAC_TRUNC_FRAME) : 0; 7200 p->rx_trunc3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_MAC_TRUNC_FRAME) : 0; 7201 7202 #undef GET_STAT 7203 #undef GET_STAT_COM 7204 } 7205 7206 /** 7207 * t4_get_lb_stats - collect loopback port statistics 7208 * @adap: the adapter 7209 * @idx: the loopback port index 7210 * @p: the stats structure to fill 7211 * 7212 * Return HW statistics for the given loopback port. 7213 */ 7214 void t4_get_lb_stats(struct adapter *adap, int idx, struct lb_port_stats *p) 7215 { 7216 u32 bgmap = t4_get_mps_bg_map(adap, idx); 7217 7218 #define GET_STAT(name) \ 7219 t4_read_reg64(adap, \ 7220 (is_t4(adap->params.chip) ? \ 7221 PORT_REG(idx, A_MPS_PORT_STAT_LB_PORT_##name##_L) : \ 7222 T5_PORT_REG(idx, A_MPS_PORT_STAT_LB_PORT_##name##_L))) 7223 #define GET_STAT_COM(name) t4_read_reg64(adap, A_MPS_STAT_##name##_L) 7224 7225 p->octets = GET_STAT(BYTES); 7226 p->frames = GET_STAT(FRAMES); 7227 p->bcast_frames = GET_STAT(BCAST); 7228 p->mcast_frames = GET_STAT(MCAST); 7229 p->ucast_frames = GET_STAT(UCAST); 7230 p->error_frames = GET_STAT(ERROR); 7231 7232 p->frames_64 = GET_STAT(64B); 7233 p->frames_65_127 = GET_STAT(65B_127B); 7234 p->frames_128_255 = GET_STAT(128B_255B); 7235 p->frames_256_511 = GET_STAT(256B_511B); 7236 p->frames_512_1023 = GET_STAT(512B_1023B); 7237 p->frames_1024_1518 = GET_STAT(1024B_1518B); 7238 p->frames_1519_max = GET_STAT(1519B_MAX); 7239 p->drop = GET_STAT(DROP_FRAMES); 7240 7241 p->ovflow0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_LB_DROP_FRAME) : 0; 7242 p->ovflow1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_LB_DROP_FRAME) : 0; 7243 p->ovflow2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_LB_DROP_FRAME) : 0; 7244 p->ovflow3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_LB_DROP_FRAME) : 0; 7245 p->trunc0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_LB_TRUNC_FRAME) : 0; 7246 p->trunc1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_LB_TRUNC_FRAME) : 0; 7247 p->trunc2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_LB_TRUNC_FRAME) : 0; 7248 p->trunc3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_LB_TRUNC_FRAME) : 0; 7249 7250 #undef GET_STAT 7251 #undef GET_STAT_COM 7252 } 7253 7254 /* t4_mk_filtdelwr - create a delete filter WR 7255 * @ftid: the filter ID 7256 * @wr: the filter work request to populate 7257 * @rqtype: the filter Request Type: 0 => IPv4, 1 => IPv6 7258 * @qid: ingress queue to receive the delete notification 7259 * 7260 * Creates a filter work request to delete the supplied filter. If @qid 7261 * is negative the delete notification is suppressed. 7262 */ 7263 void t4_mk_filtdelwr(unsigned int ftid, struct fw_filter_wr *wr, 7264 int rqtype, int qid) 7265 { 7266 memset(wr, 0, sizeof(*wr)); 7267 wr->op_pkd = cpu_to_be32(V_FW_WR_OP(FW_FILTER_WR)); 7268 wr->len16_pkd = cpu_to_be32(V_FW_WR_LEN16(sizeof(*wr) / 16)); 7269 wr->tid_to_iq = cpu_to_be32(V_FW_FILTER_WR_TID(ftid) | 7270 V_FW_FILTER_WR_RQTYPE(rqtype) | 7271 V_FW_FILTER_WR_NOREPLY(qid < 0)); 7272 wr->del_filter_to_l2tix = cpu_to_be32(F_FW_FILTER_WR_DEL_FILTER); 7273 if (qid >= 0) 7274 wr->rx_chan_rx_rpl_iq = 7275 cpu_to_be16(V_FW_FILTER_WR_RX_RPL_IQ(qid)); 7276 } 7277 7278 #define INIT_CMD(var, cmd, rd_wr) do { \ 7279 (var).op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_##cmd##_CMD) | \ 7280 F_FW_CMD_REQUEST | \ 7281 F_FW_CMD_##rd_wr); \ 7282 (var).retval_len16 = cpu_to_be32(FW_LEN16(var)); \ 7283 } while (0) 7284 7285 int t4_fwaddrspace_write(struct adapter *adap, unsigned int mbox, 7286 u32 addr, u32 val) 7287 { 7288 u32 ldst_addrspace; 7289 struct fw_ldst_cmd c; 7290 7291 memset(&c, 0, sizeof(c)); 7292 ldst_addrspace = V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_FIRMWARE); 7293 c.op_to_addrspace = cpu_to_be32(V_FW_CMD_OP(FW_LDST_CMD) | 7294 F_FW_CMD_REQUEST | 7295 F_FW_CMD_WRITE | 7296 ldst_addrspace); 7297 c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c)); 7298 c.u.addrval.addr = cpu_to_be32(addr); 7299 c.u.addrval.val = cpu_to_be32(val); 7300 7301 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); 7302 } 7303 7304 /** 7305 * t4_mdio_rd - read a PHY register through MDIO 7306 * @adap: the adapter 7307 * @mbox: mailbox to use for the FW command 7308 * @phy_addr: the PHY address 7309 * @mmd: the PHY MMD to access (0 for clause 22 PHYs) 7310 * @reg: the register to read 7311 * @valp: where to store the value 7312 * 7313 * Issues a FW command through the given mailbox to read a PHY register. 7314 */ 7315 int t4_mdio_rd(struct adapter *adap, unsigned int mbox, unsigned int phy_addr, 7316 unsigned int mmd, unsigned int reg, unsigned int *valp) 7317 { 7318 int ret; 7319 u32 ldst_addrspace; 7320 struct fw_ldst_cmd c; 7321 7322 memset(&c, 0, sizeof(c)); 7323 ldst_addrspace = V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_MDIO); 7324 c.op_to_addrspace = cpu_to_be32(V_FW_CMD_OP(FW_LDST_CMD) | 7325 F_FW_CMD_REQUEST | F_FW_CMD_READ | 7326 ldst_addrspace); 7327 c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c)); 7328 c.u.mdio.paddr_mmd = cpu_to_be16(V_FW_LDST_CMD_PADDR(phy_addr) | 7329 V_FW_LDST_CMD_MMD(mmd)); 7330 c.u.mdio.raddr = cpu_to_be16(reg); 7331 7332 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c); 7333 if (ret == 0) 7334 *valp = be16_to_cpu(c.u.mdio.rval); 7335 return ret; 7336 } 7337 7338 /** 7339 * t4_mdio_wr - write a PHY register through MDIO 7340 * @adap: the adapter 7341 * @mbox: mailbox to use for the FW command 7342 * @phy_addr: the PHY address 7343 * @mmd: the PHY MMD to access (0 for clause 22 PHYs) 7344 * @reg: the register to write 7345 * @valp: value to write 7346 * 7347 * Issues a FW command through the given mailbox to write a PHY register. 7348 */ 7349 int t4_mdio_wr(struct adapter *adap, unsigned int mbox, unsigned int phy_addr, 7350 unsigned int mmd, unsigned int reg, unsigned int val) 7351 { 7352 u32 ldst_addrspace; 7353 struct fw_ldst_cmd c; 7354 7355 memset(&c, 0, sizeof(c)); 7356 ldst_addrspace = V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_MDIO); 7357 c.op_to_addrspace = cpu_to_be32(V_FW_CMD_OP(FW_LDST_CMD) | 7358 F_FW_CMD_REQUEST | F_FW_CMD_WRITE | 7359 ldst_addrspace); 7360 c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c)); 7361 c.u.mdio.paddr_mmd = cpu_to_be16(V_FW_LDST_CMD_PADDR(phy_addr) | 7362 V_FW_LDST_CMD_MMD(mmd)); 7363 c.u.mdio.raddr = cpu_to_be16(reg); 7364 c.u.mdio.rval = cpu_to_be16(val); 7365 7366 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); 7367 } 7368 7369 /** 7370 * 7371 * t4_sge_decode_idma_state - decode the idma state 7372 * @adap: the adapter 7373 * @state: the state idma is stuck in 7374 */ 7375 void t4_sge_decode_idma_state(struct adapter *adapter, int state) 7376 { 7377 static const char * const t4_decode[] = { 7378 "IDMA_IDLE", 7379 "IDMA_PUSH_MORE_CPL_FIFO", 7380 "IDMA_PUSH_CPL_MSG_HEADER_TO_FIFO", 7381 "Not used", 7382 "IDMA_PHYSADDR_SEND_PCIEHDR", 7383 "IDMA_PHYSADDR_SEND_PAYLOAD_FIRST", 7384 "IDMA_PHYSADDR_SEND_PAYLOAD", 7385 "IDMA_SEND_FIFO_TO_IMSG", 7386 "IDMA_FL_REQ_DATA_FL_PREP", 7387 "IDMA_FL_REQ_DATA_FL", 7388 "IDMA_FL_DROP", 7389 "IDMA_FL_H_REQ_HEADER_FL", 7390 "IDMA_FL_H_SEND_PCIEHDR", 7391 "IDMA_FL_H_PUSH_CPL_FIFO", 7392 "IDMA_FL_H_SEND_CPL", 7393 "IDMA_FL_H_SEND_IP_HDR_FIRST", 7394 "IDMA_FL_H_SEND_IP_HDR", 7395 "IDMA_FL_H_REQ_NEXT_HEADER_FL", 7396 "IDMA_FL_H_SEND_NEXT_PCIEHDR", 7397 "IDMA_FL_H_SEND_IP_HDR_PADDING", 7398 "IDMA_FL_D_SEND_PCIEHDR", 7399 "IDMA_FL_D_SEND_CPL_AND_IP_HDR", 7400 "IDMA_FL_D_REQ_NEXT_DATA_FL", 7401 "IDMA_FL_SEND_PCIEHDR", 7402 "IDMA_FL_PUSH_CPL_FIFO", 7403 "IDMA_FL_SEND_CPL", 7404 "IDMA_FL_SEND_PAYLOAD_FIRST", 7405 "IDMA_FL_SEND_PAYLOAD", 7406 "IDMA_FL_REQ_NEXT_DATA_FL", 7407 "IDMA_FL_SEND_NEXT_PCIEHDR", 7408 "IDMA_FL_SEND_PADDING", 7409 "IDMA_FL_SEND_COMPLETION_TO_IMSG", 7410 "IDMA_FL_SEND_FIFO_TO_IMSG", 7411 "IDMA_FL_REQ_DATAFL_DONE", 7412 "IDMA_FL_REQ_HEADERFL_DONE", 7413 }; 7414 static const char * const t5_decode[] = { 7415 "IDMA_IDLE", 7416 "IDMA_ALMOST_IDLE", 7417 "IDMA_PUSH_MORE_CPL_FIFO", 7418 "IDMA_PUSH_CPL_MSG_HEADER_TO_FIFO", 7419 "IDMA_SGEFLRFLUSH_SEND_PCIEHDR", 7420 "IDMA_PHYSADDR_SEND_PCIEHDR", 7421 "IDMA_PHYSADDR_SEND_PAYLOAD_FIRST", 7422 "IDMA_PHYSADDR_SEND_PAYLOAD", 7423 "IDMA_SEND_FIFO_TO_IMSG", 7424 "IDMA_FL_REQ_DATA_FL", 7425 "IDMA_FL_DROP", 7426 "IDMA_FL_DROP_SEND_INC", 7427 "IDMA_FL_H_REQ_HEADER_FL", 7428 "IDMA_FL_H_SEND_PCIEHDR", 7429 "IDMA_FL_H_PUSH_CPL_FIFO", 7430 "IDMA_FL_H_SEND_CPL", 7431 "IDMA_FL_H_SEND_IP_HDR_FIRST", 7432 "IDMA_FL_H_SEND_IP_HDR", 7433 "IDMA_FL_H_REQ_NEXT_HEADER_FL", 7434 "IDMA_FL_H_SEND_NEXT_PCIEHDR", 7435 "IDMA_FL_H_SEND_IP_HDR_PADDING", 7436 "IDMA_FL_D_SEND_PCIEHDR", 7437 "IDMA_FL_D_SEND_CPL_AND_IP_HDR", 7438 "IDMA_FL_D_REQ_NEXT_DATA_FL", 7439 "IDMA_FL_SEND_PCIEHDR", 7440 "IDMA_FL_PUSH_CPL_FIFO", 7441 "IDMA_FL_SEND_CPL", 7442 "IDMA_FL_SEND_PAYLOAD_FIRST", 7443 "IDMA_FL_SEND_PAYLOAD", 7444 "IDMA_FL_REQ_NEXT_DATA_FL", 7445 "IDMA_FL_SEND_NEXT_PCIEHDR", 7446 "IDMA_FL_SEND_PADDING", 7447 "IDMA_FL_SEND_COMPLETION_TO_IMSG", 7448 }; 7449 static const char * const t6_decode[] = { 7450 "IDMA_IDLE", 7451 "IDMA_PUSH_MORE_CPL_FIFO", 7452 "IDMA_PUSH_CPL_MSG_HEADER_TO_FIFO", 7453 "IDMA_SGEFLRFLUSH_SEND_PCIEHDR", 7454 "IDMA_PHYSADDR_SEND_PCIEHDR", 7455 "IDMA_PHYSADDR_SEND_PAYLOAD_FIRST", 7456 "IDMA_PHYSADDR_SEND_PAYLOAD", 7457 "IDMA_FL_REQ_DATA_FL", 7458 "IDMA_FL_DROP", 7459 "IDMA_FL_DROP_SEND_INC", 7460 "IDMA_FL_H_REQ_HEADER_FL", 7461 "IDMA_FL_H_SEND_PCIEHDR", 7462 "IDMA_FL_H_PUSH_CPL_FIFO", 7463 "IDMA_FL_H_SEND_CPL", 7464 "IDMA_FL_H_SEND_IP_HDR_FIRST", 7465 "IDMA_FL_H_SEND_IP_HDR", 7466 "IDMA_FL_H_REQ_NEXT_HEADER_FL", 7467 "IDMA_FL_H_SEND_NEXT_PCIEHDR", 7468 "IDMA_FL_H_SEND_IP_HDR_PADDING", 7469 "IDMA_FL_D_SEND_PCIEHDR", 7470 "IDMA_FL_D_SEND_CPL_AND_IP_HDR", 7471 "IDMA_FL_D_REQ_NEXT_DATA_FL", 7472 "IDMA_FL_SEND_PCIEHDR", 7473 "IDMA_FL_PUSH_CPL_FIFO", 7474 "IDMA_FL_SEND_CPL", 7475 "IDMA_FL_SEND_PAYLOAD_FIRST", 7476 "IDMA_FL_SEND_PAYLOAD", 7477 "IDMA_FL_REQ_NEXT_DATA_FL", 7478 "IDMA_FL_SEND_NEXT_PCIEHDR", 7479 "IDMA_FL_SEND_PADDING", 7480 "IDMA_FL_SEND_COMPLETION_TO_IMSG", 7481 }; 7482 static const u32 sge_regs[] = { 7483 A_SGE_DEBUG_DATA_LOW_INDEX_2, 7484 A_SGE_DEBUG_DATA_LOW_INDEX_3, 7485 A_SGE_DEBUG_DATA_HIGH_INDEX_10, 7486 }; 7487 const char **sge_idma_decode; 7488 int sge_idma_decode_nstates; 7489 int i; 7490 unsigned int chip_version = CHELSIO_CHIP_VERSION(adapter->params.chip); 7491 7492 /* Select the right set of decode strings to dump depending on the 7493 * adapter chip type. 7494 */ 7495 switch (chip_version) { 7496 case CHELSIO_T4: 7497 sge_idma_decode = (const char **)t4_decode; 7498 sge_idma_decode_nstates = ARRAY_SIZE(t4_decode); 7499 break; 7500 7501 case CHELSIO_T5: 7502 sge_idma_decode = (const char **)t5_decode; 7503 sge_idma_decode_nstates = ARRAY_SIZE(t5_decode); 7504 break; 7505 7506 case CHELSIO_T6: 7507 sge_idma_decode = (const char **)t6_decode; 7508 sge_idma_decode_nstates = ARRAY_SIZE(t6_decode); 7509 break; 7510 7511 default: 7512 CH_ERR(adapter, "Unsupported chip version %d\n", chip_version); 7513 return; 7514 } 7515 7516 if (state < sge_idma_decode_nstates) 7517 CH_WARN(adapter, "idma state %s\n", sge_idma_decode[state]); 7518 else 7519 CH_WARN(adapter, "idma state %d unknown\n", state); 7520 7521 for (i = 0; i < ARRAY_SIZE(sge_regs); i++) 7522 CH_WARN(adapter, "SGE register %#x value %#x\n", 7523 sge_regs[i], t4_read_reg(adapter, sge_regs[i])); 7524 } 7525 7526 /** 7527 * t4_sge_ctxt_flush - flush the SGE context cache 7528 * @adap: the adapter 7529 * @mbox: mailbox to use for the FW command 7530 * 7531 * Issues a FW command through the given mailbox to flush the 7532 * SGE context cache. 7533 */ 7534 int t4_sge_ctxt_flush(struct adapter *adap, unsigned int mbox, int ctxt_type) 7535 { 7536 int ret; 7537 u32 ldst_addrspace; 7538 struct fw_ldst_cmd c; 7539 7540 memset(&c, 0, sizeof(c)); 7541 ldst_addrspace = V_FW_LDST_CMD_ADDRSPACE(ctxt_type == CTXT_EGRESS ? 7542 FW_LDST_ADDRSPC_SGE_EGRC : 7543 FW_LDST_ADDRSPC_SGE_INGC); 7544 c.op_to_addrspace = cpu_to_be32(V_FW_CMD_OP(FW_LDST_CMD) | 7545 F_FW_CMD_REQUEST | F_FW_CMD_READ | 7546 ldst_addrspace); 7547 c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c)); 7548 c.u.idctxt.msg_ctxtflush = cpu_to_be32(F_FW_LDST_CMD_CTXTFLUSH); 7549 7550 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c); 7551 return ret; 7552 } 7553 7554 /** 7555 * t4_read_sge_dbqtimers - reag SGE Doorbell Queue Timer values 7556 * @adap - the adapter 7557 * @ndbqtimers: size of the provided SGE Doorbell Queue Timer table 7558 * @dbqtimers: SGE Doorbell Queue Timer table 7559 * 7560 * Reads the SGE Doorbell Queue Timer values into the provided table. 7561 * Returns 0 on success (Firmware and Hardware support this feature), 7562 * an error on failure. 7563 */ 7564 int t4_read_sge_dbqtimers(struct adapter *adap, unsigned int ndbqtimers, 7565 u16 *dbqtimers) 7566 { 7567 int ret, dbqtimerix; 7568 7569 ret = 0; 7570 dbqtimerix = 0; 7571 while (dbqtimerix < ndbqtimers) { 7572 int nparams, param; 7573 u32 params[7], vals[7]; 7574 7575 nparams = ndbqtimers - dbqtimerix; 7576 if (nparams > ARRAY_SIZE(params)) 7577 nparams = ARRAY_SIZE(params); 7578 7579 for (param = 0; param < nparams; param++) 7580 params[param] = 7581 (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | 7582 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_DBQ_TIMER) | 7583 V_FW_PARAMS_PARAM_Y(dbqtimerix + param)); 7584 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 7585 nparams, params, vals); 7586 if (ret) 7587 break; 7588 7589 for (param = 0; param < nparams; param++) 7590 dbqtimers[dbqtimerix++] = vals[param]; 7591 } 7592 return ret; 7593 } 7594 7595 /** 7596 * t4_fw_hello - establish communication with FW 7597 * @adap: the adapter 7598 * @mbox: mailbox to use for the FW command 7599 * @evt_mbox: mailbox to receive async FW events 7600 * @master: specifies the caller's willingness to be the device master 7601 * @state: returns the current device state (if non-NULL) 7602 * 7603 * Issues a command to establish communication with FW. Returns either 7604 * an error (negative integer) or the mailbox of the Master PF. 7605 */ 7606 int t4_fw_hello(struct adapter *adap, unsigned int mbox, unsigned int evt_mbox, 7607 enum dev_master master, enum dev_state *state) 7608 { 7609 int ret; 7610 struct fw_hello_cmd c; 7611 u32 v; 7612 unsigned int master_mbox; 7613 int retries = FW_CMD_HELLO_RETRIES; 7614 7615 retry: 7616 memset(&c, 0, sizeof(c)); 7617 INIT_CMD(c, HELLO, WRITE); 7618 c.err_to_clearinit = cpu_to_be32( 7619 V_FW_HELLO_CMD_MASTERDIS(master == MASTER_CANT) | 7620 V_FW_HELLO_CMD_MASTERFORCE(master == MASTER_MUST) | 7621 V_FW_HELLO_CMD_MBMASTER(master == MASTER_MUST ? 7622 mbox : M_FW_HELLO_CMD_MBMASTER) | 7623 V_FW_HELLO_CMD_MBASYNCNOT(evt_mbox) | 7624 V_FW_HELLO_CMD_STAGE(FW_HELLO_CMD_STAGE_OS) | 7625 F_FW_HELLO_CMD_CLEARINIT); 7626 7627 /* 7628 * Issue the HELLO command to the firmware. If it's not successful 7629 * but indicates that we got a "busy" or "timeout" condition, retry 7630 * the HELLO until we exhaust our retry limit. If we do exceed our 7631 * retry limit, check to see if the firmware left us any error 7632 * information and report that if so ... 7633 */ 7634 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c); 7635 if (ret != FW_SUCCESS) { 7636 if ((ret == -EBUSY || ret == -ETIMEDOUT) && retries-- > 0) 7637 goto retry; 7638 if (t4_read_reg(adap, A_PCIE_FW) & F_PCIE_FW_ERR) 7639 t4_report_fw_error(adap); 7640 return ret; 7641 } 7642 7643 v = be32_to_cpu(c.err_to_clearinit); 7644 master_mbox = G_FW_HELLO_CMD_MBMASTER(v); 7645 if (state) { 7646 if (v & F_FW_HELLO_CMD_ERR) 7647 *state = DEV_STATE_ERR; 7648 else if (v & F_FW_HELLO_CMD_INIT) 7649 *state = DEV_STATE_INIT; 7650 else 7651 *state = DEV_STATE_UNINIT; 7652 } 7653 7654 /* 7655 * If we're not the Master PF then we need to wait around for the 7656 * Master PF Driver to finish setting up the adapter. 7657 * 7658 * Note that we also do this wait if we're a non-Master-capable PF and 7659 * there is no current Master PF; a Master PF may show up momentarily 7660 * and we wouldn't want to fail pointlessly. (This can happen when an 7661 * OS loads lots of different drivers rapidly at the same time). In 7662 * this case, the Master PF returned by the firmware will be 7663 * M_PCIE_FW_MASTER so the test below will work ... 7664 */ 7665 if ((v & (F_FW_HELLO_CMD_ERR|F_FW_HELLO_CMD_INIT)) == 0 && 7666 master_mbox != mbox) { 7667 int waiting = FW_CMD_HELLO_TIMEOUT; 7668 7669 /* 7670 * Wait for the firmware to either indicate an error or 7671 * initialized state. If we see either of these we bail out 7672 * and report the issue to the caller. If we exhaust the 7673 * "hello timeout" and we haven't exhausted our retries, try 7674 * again. Otherwise bail with a timeout error. 7675 */ 7676 for (;;) { 7677 u32 pcie_fw; 7678 7679 msleep(50); 7680 waiting -= 50; 7681 7682 /* 7683 * If neither Error nor Initialialized are indicated 7684 * by the firmware keep waiting till we exaust our 7685 * timeout ... and then retry if we haven't exhausted 7686 * our retries ... 7687 */ 7688 pcie_fw = t4_read_reg(adap, A_PCIE_FW); 7689 if (!(pcie_fw & (F_PCIE_FW_ERR|F_PCIE_FW_INIT))) { 7690 if (waiting <= 0) { 7691 if (retries-- > 0) 7692 goto retry; 7693 7694 return -ETIMEDOUT; 7695 } 7696 continue; 7697 } 7698 7699 /* 7700 * We either have an Error or Initialized condition 7701 * report errors preferentially. 7702 */ 7703 if (state) { 7704 if (pcie_fw & F_PCIE_FW_ERR) 7705 *state = DEV_STATE_ERR; 7706 else if (pcie_fw & F_PCIE_FW_INIT) 7707 *state = DEV_STATE_INIT; 7708 } 7709 7710 /* 7711 * If we arrived before a Master PF was selected and 7712 * there's not a valid Master PF, grab its identity 7713 * for our caller. 7714 */ 7715 if (master_mbox == M_PCIE_FW_MASTER && 7716 (pcie_fw & F_PCIE_FW_MASTER_VLD)) 7717 master_mbox = G_PCIE_FW_MASTER(pcie_fw); 7718 break; 7719 } 7720 } 7721 7722 return master_mbox; 7723 } 7724 7725 /** 7726 * t4_fw_bye - end communication with FW 7727 * @adap: the adapter 7728 * @mbox: mailbox to use for the FW command 7729 * 7730 * Issues a command to terminate communication with FW. 7731 */ 7732 int t4_fw_bye(struct adapter *adap, unsigned int mbox) 7733 { 7734 struct fw_bye_cmd c; 7735 7736 memset(&c, 0, sizeof(c)); 7737 INIT_CMD(c, BYE, WRITE); 7738 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); 7739 } 7740 7741 /** 7742 * t4_fw_reset - issue a reset to FW 7743 * @adap: the adapter 7744 * @mbox: mailbox to use for the FW command 7745 * @reset: specifies the type of reset to perform 7746 * 7747 * Issues a reset command of the specified type to FW. 7748 */ 7749 int t4_fw_reset(struct adapter *adap, unsigned int mbox, int reset) 7750 { 7751 struct fw_reset_cmd c; 7752 7753 memset(&c, 0, sizeof(c)); 7754 INIT_CMD(c, RESET, WRITE); 7755 c.val = cpu_to_be32(reset); 7756 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); 7757 } 7758 7759 /** 7760 * t4_fw_halt - issue a reset/halt to FW and put uP into RESET 7761 * @adap: the adapter 7762 * @mbox: mailbox to use for the FW RESET command (if desired) 7763 * @force: force uP into RESET even if FW RESET command fails 7764 * 7765 * Issues a RESET command to firmware (if desired) with a HALT indication 7766 * and then puts the microprocessor into RESET state. The RESET command 7767 * will only be issued if a legitimate mailbox is provided (mbox <= 7768 * M_PCIE_FW_MASTER). 7769 * 7770 * This is generally used in order for the host to safely manipulate the 7771 * adapter without fear of conflicting with whatever the firmware might 7772 * be doing. The only way out of this state is to RESTART the firmware 7773 * ... 7774 */ 7775 static int t4_fw_halt(struct adapter *adap, unsigned int mbox, int force) 7776 { 7777 int ret = 0; 7778 7779 /* 7780 * If a legitimate mailbox is provided, issue a RESET command 7781 * with a HALT indication. 7782 */ 7783 if (mbox <= M_PCIE_FW_MASTER) { 7784 struct fw_reset_cmd c; 7785 7786 memset(&c, 0, sizeof(c)); 7787 INIT_CMD(c, RESET, WRITE); 7788 c.val = cpu_to_be32(F_PIORST | F_PIORSTMODE); 7789 c.halt_pkd = cpu_to_be32(F_FW_RESET_CMD_HALT); 7790 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); 7791 } 7792 7793 /* 7794 * Normally we won't complete the operation if the firmware RESET 7795 * command fails but if our caller insists we'll go ahead and put the 7796 * uP into RESET. This can be useful if the firmware is hung or even 7797 * missing ... We'll have to take the risk of putting the uP into 7798 * RESET without the cooperation of firmware in that case. 7799 * 7800 * We also force the firmware's HALT flag to be on in case we bypassed 7801 * the firmware RESET command above or we're dealing with old firmware 7802 * which doesn't have the HALT capability. This will serve as a flag 7803 * for the incoming firmware to know that it's coming out of a HALT 7804 * rather than a RESET ... if it's new enough to understand that ... 7805 */ 7806 if (ret == 0 || force) { 7807 t4_set_reg_field(adap, A_CIM_BOOT_CFG, F_UPCRST, F_UPCRST); 7808 t4_set_reg_field(adap, A_PCIE_FW, F_PCIE_FW_HALT, 7809 F_PCIE_FW_HALT); 7810 } 7811 7812 /* 7813 * And we always return the result of the firmware RESET command 7814 * even when we force the uP into RESET ... 7815 */ 7816 return ret; 7817 } 7818 7819 /** 7820 * t4_fw_restart - restart the firmware by taking the uP out of RESET 7821 * @adap: the adapter 7822 * @reset: if we want to do a RESET to restart things 7823 * 7824 * Restart firmware previously halted by t4_fw_halt(). On successful 7825 * return the previous PF Master remains as the new PF Master and there 7826 * is no need to issue a new HELLO command, etc. 7827 * 7828 * We do this in two ways: 7829 * 7830 * 1. If we're dealing with newer firmware we'll simply want to take 7831 * the chip's microprocessor out of RESET. This will cause the 7832 * firmware to start up from its start vector. And then we'll loop 7833 * until the firmware indicates it's started again (PCIE_FW.HALT 7834 * reset to 0) or we timeout. 7835 * 7836 * 2. If we're dealing with older firmware then we'll need to RESET 7837 * the chip since older firmware won't recognize the PCIE_FW.HALT 7838 * flag and automatically RESET itself on startup. 7839 */ 7840 static int t4_fw_restart(struct adapter *adap, unsigned int mbox, int reset) 7841 { 7842 if (reset) { 7843 /* 7844 * Since we're directing the RESET instead of the firmware 7845 * doing it automatically, we need to clear the PCIE_FW.HALT 7846 * bit. 7847 */ 7848 t4_set_reg_field(adap, A_PCIE_FW, F_PCIE_FW_HALT, 0); 7849 7850 /* 7851 * If we've been given a valid mailbox, first try to get the 7852 * firmware to do the RESET. If that works, great and we can 7853 * return success. Otherwise, if we haven't been given a 7854 * valid mailbox or the RESET command failed, fall back to 7855 * hitting the chip with a hammer. 7856 */ 7857 if (mbox <= M_PCIE_FW_MASTER) { 7858 t4_set_reg_field(adap, A_CIM_BOOT_CFG, F_UPCRST, 0); 7859 msleep(100); 7860 if (t4_fw_reset(adap, mbox, 7861 F_PIORST | F_PIORSTMODE) == 0) 7862 return 0; 7863 } 7864 7865 t4_write_reg(adap, A_PL_RST, F_PIORST | F_PIORSTMODE); 7866 msleep(2000); 7867 } else { 7868 int ms; 7869 7870 t4_set_reg_field(adap, A_CIM_BOOT_CFG, F_UPCRST, 0); 7871 for (ms = 0; ms < FW_CMD_MAX_TIMEOUT; ) { 7872 if (!(t4_read_reg(adap, A_PCIE_FW) & F_PCIE_FW_HALT)) 7873 return FW_SUCCESS; 7874 msleep(100); 7875 ms += 100; 7876 } 7877 return -ETIMEDOUT; 7878 } 7879 return 0; 7880 } 7881 7882 /** 7883 * t4_fw_upgrade - perform all of the steps necessary to upgrade FW 7884 * @adap: the adapter 7885 * @mbox: mailbox to use for the FW RESET command (if desired) 7886 * @fw_data: the firmware image to write 7887 * @size: image size 7888 * @force: force upgrade even if firmware doesn't cooperate 7889 * 7890 * Perform all of the steps necessary for upgrading an adapter's 7891 * firmware image. Normally this requires the cooperation of the 7892 * existing firmware in order to halt all existing activities 7893 * but if an invalid mailbox token is passed in we skip that step 7894 * (though we'll still put the adapter microprocessor into RESET in 7895 * that case). 7896 * 7897 * On successful return the new firmware will have been loaded and 7898 * the adapter will have been fully RESET losing all previous setup 7899 * state. On unsuccessful return the adapter may be completely hosed ... 7900 * positive errno indicates that the adapter is ~probably~ intact, a 7901 * negative errno indicates that things are looking bad ... 7902 */ 7903 int t4_fw_upgrade(struct adapter *adap, unsigned int mbox, 7904 const u8 *fw_data, unsigned int size, int force) 7905 { 7906 const struct fw_hdr *fw_hdr = (const struct fw_hdr *)fw_data; 7907 unsigned int bootstrap = 7908 be32_to_cpu(fw_hdr->magic) == FW_HDR_MAGIC_BOOTSTRAP; 7909 int reset, ret; 7910 7911 if (!t4_fw_matches_chip(adap, fw_hdr)) 7912 return -EINVAL; 7913 7914 /* Disable FW_OK flags so that mbox commands with FW_OK flags check 7915 * wont be send when we are flashing FW. 7916 */ 7917 adap->flags &= ~FW_OK; 7918 7919 if (!bootstrap) { 7920 ret = t4_fw_halt(adap, mbox, force); 7921 if (ret < 0 && !force) 7922 goto out; 7923 } 7924 7925 ret = t4_load_fw(adap, fw_data, size, bootstrap); 7926 if (ret < 0 || bootstrap) 7927 goto out; 7928 7929 /* 7930 * If there was a Firmware Configuration File staored in FLASH, 7931 * there's a good chance that it won't be compatible with the new 7932 * Firmware. In order to prevent difficult to diagnose adapter 7933 * initialization issues, we clear out the Firmware Configuration File 7934 * portion of the FLASH . The user will need to re-FLASH a new 7935 * Firmware Configuration File which is compatible with the new 7936 * Firmware if that's desired. 7937 */ 7938 (void)t4_load_cfg(adap, NULL, 0); 7939 7940 /* 7941 * Older versions of the firmware don't understand the new 7942 * PCIE_FW.HALT flag and so won't know to perform a RESET when they 7943 * restart. So for newly loaded older firmware we'll have to do the 7944 * RESET for it so it starts up on a clean slate. We can tell if 7945 * the newly loaded firmware will handle this right by checking 7946 * its header flags to see if it advertises the capability. 7947 */ 7948 reset = ((be32_to_cpu(fw_hdr->flags) & FW_HDR_FLAGS_RESET_HALT) == 0); 7949 ret = t4_fw_restart(adap, mbox, reset); 7950 7951 /* Grab potentially new Firmware Device Log parameters so we can see 7952 * how helthy the new Firmware is. It's okay to contact the new 7953 * Firmware for these parameters even though, as far as it's 7954 * concerned, we've never said "HELLO" to it ... 7955 */ 7956 (void)t4_init_devlog_params(adap, 1); 7957 7958 out: 7959 adap->flags |= FW_OK; 7960 return ret; 7961 } 7962 7963 /** 7964 * t4_fl_pkt_align - return the fl packet alignment 7965 * @adap: the adapter 7966 * is_packed: True when the driver uses packed FLM mode 7967 * 7968 * T4 has a single field to specify the packing and padding boundary. 7969 * T5 onwards has separate fields for this and hence the alignment for 7970 * next packet offset is maximum of these two. 7971 * 7972 */ 7973 int t4_fl_pkt_align(struct adapter *adap, bool is_packed) 7974 { 7975 u32 sge_control, sge_control2; 7976 unsigned int ingpadboundary, ingpackboundary, fl_align, ingpad_shift; 7977 7978 sge_control = t4_read_reg(adap, A_SGE_CONTROL); 7979 7980 /* T4 uses a single control field to specify both the PCIe Padding and 7981 * Packing Boundary. T5 introduced the ability to specify these 7982 * separately. The actual Ingress Packet Data alignment boundary 7983 * within Packed Buffer Mode is the maximum of these two 7984 * specifications. (Note that it makes no real practical sense to 7985 * have the Pading Boudary be larger than the Packing Boundary but you 7986 * could set the chip up that way and, in fact, legacy T4 code would 7987 * end doing this because it would initialize the Padding Boundary and 7988 * leave the Packing Boundary initialized to 0 (16 bytes).) 7989 * Padding Boundary values in T6 starts from 8B, 7990 * where as it is 32B for T4 and T5. 7991 */ 7992 if (CHELSIO_CHIP_VERSION(adap->params.chip) <= CHELSIO_T5) 7993 ingpad_shift = X_INGPADBOUNDARY_SHIFT; 7994 else 7995 ingpad_shift = X_T6_INGPADBOUNDARY_SHIFT; 7996 7997 ingpadboundary = 1 << (G_INGPADBOUNDARY(sge_control) + ingpad_shift); 7998 7999 fl_align = ingpadboundary; 8000 if (!is_t4(adap->params.chip) && is_packed) { 8001 /* T5 has a weird interpretation of one of the PCIe Packing 8002 * Boundary values. No idea why ... 8003 */ 8004 sge_control2 = t4_read_reg(adap, A_SGE_CONTROL2); 8005 ingpackboundary = G_INGPACKBOUNDARY(sge_control2); 8006 if (ingpackboundary == X_INGPACKBOUNDARY_16B) 8007 ingpackboundary = 16; 8008 else 8009 ingpackboundary = 1 << (ingpackboundary + 8010 X_INGPACKBOUNDARY_SHIFT); 8011 8012 fl_align = max(ingpadboundary, ingpackboundary); 8013 } 8014 return fl_align; 8015 } 8016 8017 /** 8018 * t4_fixup_host_params_compat - fix up host-dependent parameters 8019 * @adap: the adapter 8020 * @page_size: the host's Base Page Size 8021 * @cache_line_size: the host's Cache Line Size 8022 * @chip_compat: maintain compatibility with designated chip 8023 * 8024 * Various registers in the chip contain values which are dependent on the 8025 * host's Base Page and Cache Line Sizes. This function will fix all of 8026 * those registers with the appropriate values as passed in ... 8027 * 8028 * @chip_compat is used to limit the set of changes that are made 8029 * to be compatible with the indicated chip release. This is used by 8030 * drivers to maintain compatibility with chip register settings when 8031 * the drivers haven't [yet] been updated with new chip support. 8032 */ 8033 int t4_fixup_host_params_compat(struct adapter *adap, 8034 unsigned int page_size, 8035 unsigned int cache_line_size, 8036 enum chip_type chip_compat) 8037 { 8038 unsigned int page_shift = fls(page_size) - 1; 8039 unsigned int sge_hps = page_shift - 10; 8040 unsigned int stat_len = cache_line_size > 64 ? 128 : 64; 8041 unsigned int fl_align = cache_line_size < 32 ? 32 : cache_line_size; 8042 unsigned int fl_align_log = fls(fl_align) - 1; 8043 8044 t4_write_reg(adap, A_SGE_HOST_PAGE_SIZE, 8045 V_HOSTPAGESIZEPF0(sge_hps) | 8046 V_HOSTPAGESIZEPF1(sge_hps) | 8047 V_HOSTPAGESIZEPF2(sge_hps) | 8048 V_HOSTPAGESIZEPF3(sge_hps) | 8049 V_HOSTPAGESIZEPF4(sge_hps) | 8050 V_HOSTPAGESIZEPF5(sge_hps) | 8051 V_HOSTPAGESIZEPF6(sge_hps) | 8052 V_HOSTPAGESIZEPF7(sge_hps)); 8053 8054 if (is_t4(adap->params.chip) || is_t4(chip_compat)) { 8055 t4_set_reg_field(adap, A_SGE_CONTROL, 8056 V_INGPADBOUNDARY(M_INGPADBOUNDARY) | 8057 F_EGRSTATUSPAGESIZE, 8058 V_INGPADBOUNDARY(fl_align_log - 8059 X_INGPADBOUNDARY_SHIFT) | 8060 V_EGRSTATUSPAGESIZE(stat_len != 64)); 8061 } else { 8062 unsigned int pack_align; 8063 unsigned int ingpad, ingpack; 8064 unsigned int pcie_cap; 8065 8066 /* T5 introduced the separation of the Free List Padding and 8067 * Packing Boundaries. Thus, we can select a smaller Padding 8068 * Boundary to avoid uselessly chewing up PCIe Link and Memory 8069 * Bandwidth, and use a Packing Boundary which is large enough 8070 * to avoid false sharing between CPUs, etc. 8071 * 8072 * For the PCI Link, the smaller the Padding Boundary the 8073 * better. For the Memory Controller, a smaller Padding 8074 * Boundary is better until we cross under the Memory Line 8075 * Size (the minimum unit of transfer to/from Memory). If we 8076 * have a Padding Boundary which is smaller than the Memory 8077 * Line Size, that'll involve a Read-Modify-Write cycle on the 8078 * Memory Controller which is never good. 8079 */ 8080 8081 /* We want the Packing Boundary to be based on the Cache Line 8082 * Size in order to help avoid False Sharing performance 8083 * issues between CPUs, etc. We also want the Packing 8084 * Boundary to incorporate the PCI-E Maximum Payload Size. We 8085 * get best performance when the Packing Boundary is a 8086 * multiple of the Maximum Payload Size. 8087 */ 8088 pack_align = fl_align; 8089 pcie_cap = t4_os_find_pci_capability(adap, PCI_CAP_ID_EXP); 8090 if (pcie_cap) { 8091 unsigned int mps, mps_log; 8092 u16 devctl; 8093 8094 /* 8095 * The PCIe Device Control Maximum Payload Size field 8096 * [bits 7:5] encodes sizes as powers of 2 starting at 8097 * 128 bytes. 8098 */ 8099 t4_os_pci_read_cfg2(adap, pcie_cap + PCI_EXP_DEVCTL, 8100 &devctl); 8101 mps_log = ((devctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5) + 7; 8102 mps = 1 << mps_log; 8103 if (mps > pack_align) 8104 pack_align = mps; 8105 } 8106 8107 /* N.B. T5/T6 have a crazy special interpretation of the "0" 8108 * value for the Packing Boundary. This corresponds to 16 8109 * bytes instead of the expected 32 bytes. So if we want 32 8110 * bytes, the best we can really do is 64 bytes ... 8111 */ 8112 if (pack_align <= 16) { 8113 ingpack = X_INGPACKBOUNDARY_16B; 8114 fl_align = 16; 8115 } else if (pack_align == 32) { 8116 ingpack = X_INGPACKBOUNDARY_64B; 8117 fl_align = 64; 8118 } else { 8119 unsigned int pack_align_log = fls(pack_align) - 1; 8120 ingpack = pack_align_log - X_INGPACKBOUNDARY_SHIFT; 8121 fl_align = pack_align; 8122 } 8123 8124 /* Use the smallest Ingress Padding which isn't smaller than 8125 * the Memory Controller Read/Write Size. We'll take that as 8126 * being 8 bytes since we don't know of any system with a 8127 * wider Memory Controller Bus Width. 8128 */ 8129 if (is_t5(adap->params.chip)) 8130 ingpad = X_INGPADBOUNDARY_32B; 8131 else 8132 ingpad = X_T6_INGPADBOUNDARY_8B; 8133 8134 t4_set_reg_field(adap, A_SGE_CONTROL, 8135 V_INGPADBOUNDARY(M_INGPADBOUNDARY) | 8136 F_EGRSTATUSPAGESIZE, 8137 V_INGPADBOUNDARY(ingpad) | 8138 V_EGRSTATUSPAGESIZE(stat_len != 64)); 8139 t4_set_reg_field(adap, A_SGE_CONTROL2, 8140 V_INGPACKBOUNDARY(M_INGPACKBOUNDARY), 8141 V_INGPACKBOUNDARY(ingpack)); 8142 } 8143 /* 8144 * Adjust various SGE Free List Host Buffer Sizes. 8145 * 8146 * This is something of a crock since we're using fixed indices into 8147 * the array which are also known by the sge.c code and the T4 8148 * Firmware Configuration File. We need to come up with a much better 8149 * approach to managing this array. For now, the first four entries 8150 * are: 8151 * 8152 * 0: Host Page Size 8153 * 1: 64KB 8154 * 2: Buffer size corresponding to 1500 byte MTU (unpacked mode) 8155 * 3: Buffer size corresponding to 9000 byte MTU (unpacked mode) 8156 * 8157 * For the single-MTU buffers in unpacked mode we need to include 8158 * space for the SGE Control Packet Shift, 14 byte Ethernet header, 8159 * possible 4 byte VLAN tag, all rounded up to the next Ingress Packet 8160 * Padding boundary. All of these are accommodated in the Factory 8161 * Default Firmware Configuration File but we need to adjust it for 8162 * this host's cache line size. 8163 */ 8164 t4_write_reg(adap, A_SGE_FL_BUFFER_SIZE0, page_size); 8165 t4_write_reg(adap, A_SGE_FL_BUFFER_SIZE2, 8166 (t4_read_reg(adap, A_SGE_FL_BUFFER_SIZE2) + fl_align-1) 8167 & ~(fl_align-1)); 8168 t4_write_reg(adap, A_SGE_FL_BUFFER_SIZE3, 8169 (t4_read_reg(adap, A_SGE_FL_BUFFER_SIZE3) + fl_align-1) 8170 & ~(fl_align-1)); 8171 8172 t4_write_reg(adap, A_ULP_RX_TDDP_PSZ, V_HPZ0(page_shift - 12)); 8173 8174 return 0; 8175 } 8176 8177 /** 8178 * t4_fixup_host_params - fix up host-dependent parameters (T4 compatible) 8179 * @adap: the adapter 8180 * @page_size: the host's Base Page Size 8181 * @cache_line_size: the host's Cache Line Size 8182 * 8183 * Various registers in T4 contain values which are dependent on the 8184 * host's Base Page and Cache Line Sizes. This function will fix all of 8185 * those registers with the appropriate values as passed in ... 8186 * 8187 * This routine makes changes which are compatible with T4 chips. 8188 */ 8189 int t4_fixup_host_params(struct adapter *adap, unsigned int page_size, 8190 unsigned int cache_line_size) 8191 { 8192 return t4_fixup_host_params_compat(adap, page_size, cache_line_size, 8193 T4_LAST_REV); 8194 } 8195 8196 /** 8197 * t4_fw_initialize - ask FW to initialize the device 8198 * @adap: the adapter 8199 * @mbox: mailbox to use for the FW command 8200 * 8201 * Issues a command to FW to partially initialize the device. This 8202 * performs initialization that generally doesn't depend on user input. 8203 */ 8204 int t4_fw_initialize(struct adapter *adap, unsigned int mbox) 8205 { 8206 struct fw_initialize_cmd c; 8207 8208 memset(&c, 0, sizeof(c)); 8209 INIT_CMD(c, INITIALIZE, WRITE); 8210 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); 8211 } 8212 8213 /** 8214 * t4_query_params_rw - query FW or device parameters 8215 * @adap: the adapter 8216 * @mbox: mailbox to use for the FW command 8217 * @pf: the PF 8218 * @vf: the VF 8219 * @nparams: the number of parameters 8220 * @params: the parameter names 8221 * @val: the parameter values 8222 * @rw: Write and read flag 8223 * @sleep_ok: if true, we may sleep awaiting mbox cmd completion 8224 * 8225 * Reads the value of FW or device parameters. Up to 7 parameters can be 8226 * queried at once. 8227 */ 8228 int t4_query_params_rw(struct adapter *adap, unsigned int mbox, unsigned int pf, 8229 unsigned int vf, unsigned int nparams, const u32 *params, 8230 u32 *val, int rw, bool sleep_ok) 8231 { 8232 int i, ret; 8233 struct fw_params_cmd c; 8234 __be32 *p = &c.param[0].mnem; 8235 8236 if (nparams > 7) 8237 return -EINVAL; 8238 8239 memset(&c, 0, sizeof(c)); 8240 c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_PARAMS_CMD) | 8241 F_FW_CMD_REQUEST | F_FW_CMD_READ | 8242 V_FW_PARAMS_CMD_PFN(pf) | 8243 V_FW_PARAMS_CMD_VFN(vf)); 8244 c.retval_len16 = cpu_to_be32(FW_LEN16(c)); 8245 8246 for (i = 0; i < nparams; i++) { 8247 *p++ = cpu_to_be32(*params++); 8248 if (rw) 8249 *p = cpu_to_be32(*(val + i)); 8250 p++; 8251 } 8252 8253 ret = t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), &c, sleep_ok); 8254 8255 /* 8256 * We always copy back the reults, even if there's an error. We'll 8257 * get an error if any of the parameters was unknown to the Firmware, 8258 * but there will be results for the others ... (Older Firmware 8259 * stopped at the first unknown parameter; newer Firmware processes 8260 * them all and flags the unknown parameters with a return value of 8261 * ~0UL.) 8262 */ 8263 for (i = 0, p = &c.param[0].val; i < nparams; i++, p += 2) 8264 *val++ = be32_to_cpu(*p); 8265 8266 return ret; 8267 } 8268 8269 int t4_query_params(struct adapter *adap, unsigned int mbox, unsigned int pf, 8270 unsigned int vf, unsigned int nparams, const u32 *params, 8271 u32 *val) 8272 { 8273 return t4_query_params_rw(adap, mbox, pf, vf, nparams, params, val, 0, 8274 true); 8275 } 8276 8277 int t4_query_params_ns(struct adapter *adap, unsigned int mbox, unsigned int pf, 8278 unsigned int vf, unsigned int nparams, const u32 *params, 8279 u32 *val) 8280 { 8281 return t4_query_params_rw(adap, mbox, pf, vf, nparams, params, val, 0, 8282 false); 8283 } 8284 8285 /** 8286 * t4_set_params_timeout - sets FW or device parameters 8287 * @adap: the adapter 8288 * @mbox: mailbox to use for the FW command 8289 * @pf: the PF 8290 * @vf: the VF 8291 * @nparams: the number of parameters 8292 * @params: the parameter names 8293 * @val: the parameter values 8294 * @timeout: the timeout time 8295 * 8296 * Sets the value of FW or device parameters. Up to 7 parameters can be 8297 * specified at once. 8298 */ 8299 int t4_set_params_timeout(struct adapter *adap, unsigned int mbox, 8300 unsigned int pf, unsigned int vf, 8301 unsigned int nparams, const u32 *params, 8302 const u32 *val, int timeout) 8303 { 8304 struct fw_params_cmd c; 8305 __be32 *p = &c.param[0].mnem; 8306 8307 if (nparams > 7) 8308 return -EINVAL; 8309 8310 memset(&c, 0, sizeof(c)); 8311 c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_PARAMS_CMD) | 8312 F_FW_CMD_REQUEST | F_FW_CMD_WRITE | 8313 V_FW_PARAMS_CMD_PFN(pf) | 8314 V_FW_PARAMS_CMD_VFN(vf)); 8315 c.retval_len16 = cpu_to_be32(FW_LEN16(c)); 8316 8317 while (nparams--) { 8318 *p++ = cpu_to_be32(*params++); 8319 *p++ = cpu_to_be32(*val++); 8320 } 8321 8322 return t4_wr_mbox_timeout(adap, mbox, &c, sizeof(c), NULL, timeout); 8323 } 8324 8325 /** 8326 * t4_set_params - sets FW or device parameters 8327 * @adap: the adapter 8328 * @mbox: mailbox to use for the FW command 8329 * @pf: the PF 8330 * @vf: the VF 8331 * @nparams: the number of parameters 8332 * @params: the parameter names 8333 * @val: the parameter values 8334 * 8335 * Sets the value of FW or device parameters. Up to 7 parameters can be 8336 * specified at once. 8337 */ 8338 int t4_set_params(struct adapter *adap, unsigned int mbox, unsigned int pf, 8339 unsigned int vf, unsigned int nparams, const u32 *params, 8340 const u32 *val) 8341 { 8342 return t4_set_params_timeout(adap, mbox, pf, vf, nparams, params, val, 8343 FW_CMD_MAX_TIMEOUT); 8344 } 8345 8346 /** 8347 * t4_cfg_pfvf - configure PF/VF resource limits 8348 * @adap: the adapter 8349 * @mbox: mailbox to use for the FW command 8350 * @pf: the PF being configured 8351 * @vf: the VF being configured 8352 * @txq: the max number of egress queues 8353 * @txq_eth_ctrl: the max number of egress Ethernet or control queues 8354 * @rxqi: the max number of interrupt-capable ingress queues 8355 * @rxq: the max number of interruptless ingress queues 8356 * @tc: the PCI traffic class 8357 * @vi: the max number of virtual interfaces 8358 * @cmask: the channel access rights mask for the PF/VF 8359 * @pmask: the port access rights mask for the PF/VF 8360 * @nexact: the maximum number of exact MPS filters 8361 * @rcaps: read capabilities 8362 * @wxcaps: write/execute capabilities 8363 * 8364 * Configures resource limits and capabilities for a physical or virtual 8365 * function. 8366 */ 8367 int t4_cfg_pfvf(struct adapter *adap, unsigned int mbox, unsigned int pf, 8368 unsigned int vf, unsigned int txq, unsigned int txq_eth_ctrl, 8369 unsigned int rxqi, unsigned int rxq, unsigned int tc, 8370 unsigned int vi, unsigned int cmask, unsigned int pmask, 8371 unsigned int nexact, unsigned int rcaps, unsigned int wxcaps) 8372 { 8373 struct fw_pfvf_cmd c; 8374 8375 memset(&c, 0, sizeof(c)); 8376 c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_PFVF_CMD) | F_FW_CMD_REQUEST | 8377 F_FW_CMD_WRITE | V_FW_PFVF_CMD_PFN(pf) | 8378 V_FW_PFVF_CMD_VFN(vf)); 8379 c.retval_len16 = cpu_to_be32(FW_LEN16(c)); 8380 c.niqflint_niq = cpu_to_be32(V_FW_PFVF_CMD_NIQFLINT(rxqi) | 8381 V_FW_PFVF_CMD_NIQ(rxq)); 8382 c.type_to_neq = cpu_to_be32(V_FW_PFVF_CMD_CMASK(cmask) | 8383 V_FW_PFVF_CMD_PMASK(pmask) | 8384 V_FW_PFVF_CMD_NEQ(txq)); 8385 c.tc_to_nexactf = cpu_to_be32(V_FW_PFVF_CMD_TC(tc) | 8386 V_FW_PFVF_CMD_NVI(vi) | 8387 V_FW_PFVF_CMD_NEXACTF(nexact)); 8388 c.r_caps_to_nethctrl = cpu_to_be32(V_FW_PFVF_CMD_R_CAPS(rcaps) | 8389 V_FW_PFVF_CMD_WX_CAPS(wxcaps) | 8390 V_FW_PFVF_CMD_NETHCTRL(txq_eth_ctrl)); 8391 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); 8392 } 8393 8394 /** 8395 * t4_alloc_vi_func - allocate a virtual interface 8396 * @adap: the adapter 8397 * @mbox: mailbox to use for the FW command 8398 * @port: physical port associated with the VI 8399 * @pf: the PF owning the VI 8400 * @vf: the VF owning the VI 8401 * @nmac: number of MAC addresses needed (1 to 5) 8402 * @mac: the MAC addresses of the VI 8403 * @rss_size: size of RSS table slice associated with this VI 8404 * @portfunc: which Port Application Function MAC Address is desired 8405 * @idstype: Intrusion Detection Type 8406 * 8407 * Allocates a virtual interface for the given physical port. If @mac is 8408 * not %NULL it contains the MAC addresses of the VI as assigned by FW. 8409 * If @rss_size is %NULL the VI is not assigned any RSS slice by FW. 8410 * @mac should be large enough to hold @nmac Ethernet addresses, they are 8411 * stored consecutively so the space needed is @nmac * 6 bytes. 8412 * Returns a negative error number or the non-negative VI id. 8413 */ 8414 int t4_alloc_vi_func(struct adapter *adap, unsigned int mbox, 8415 unsigned int port, unsigned int pf, unsigned int vf, 8416 unsigned int nmac, u8 *mac, unsigned int *rss_size, 8417 u8 *vivld, u8 *vin, 8418 unsigned int portfunc, unsigned int idstype) 8419 { 8420 int ret; 8421 struct fw_vi_cmd c; 8422 8423 memset(&c, 0, sizeof(c)); 8424 c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_VI_CMD) | F_FW_CMD_REQUEST | 8425 F_FW_CMD_WRITE | F_FW_CMD_EXEC | 8426 V_FW_VI_CMD_PFN(pf) | V_FW_VI_CMD_VFN(vf)); 8427 c.alloc_to_len16 = cpu_to_be32(F_FW_VI_CMD_ALLOC | FW_LEN16(c)); 8428 c.type_to_viid = cpu_to_be16(V_FW_VI_CMD_TYPE(idstype) | 8429 V_FW_VI_CMD_FUNC(portfunc)); 8430 c.portid_pkd = V_FW_VI_CMD_PORTID(port); 8431 c.nmac = nmac - 1; 8432 if(!rss_size) 8433 c.norss_rsssize = F_FW_VI_CMD_NORSS; 8434 8435 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c); 8436 if (ret) 8437 return ret; 8438 8439 if (mac) { 8440 memcpy(mac, c.mac, sizeof(c.mac)); 8441 switch (nmac) { 8442 case 5: 8443 memcpy(mac + 24, c.nmac3, sizeof(c.nmac3)); 8444 /* FALLTHRU */ 8445 case 4: 8446 memcpy(mac + 18, c.nmac2, sizeof(c.nmac2)); 8447 /* FALLTHRU */ 8448 case 3: 8449 memcpy(mac + 12, c.nmac1, sizeof(c.nmac1)); 8450 /* FALLTHRU */ 8451 case 2: 8452 memcpy(mac + 6, c.nmac0, sizeof(c.nmac0)); 8453 } 8454 } 8455 if (rss_size) 8456 *rss_size = G_FW_VI_CMD_RSSSIZE(be16_to_cpu(c.norss_rsssize)); 8457 8458 if (vivld) 8459 *vivld = G_FW_VI_CMD_VFVLD(be32_to_cpu(c.alloc_to_len16)); 8460 8461 if (vin) 8462 *vin = G_FW_VI_CMD_VIN(be32_to_cpu(c.alloc_to_len16)); 8463 8464 return G_FW_VI_CMD_VIID(be16_to_cpu(c.type_to_viid)); 8465 } 8466 8467 /** 8468 * t4_alloc_vi - allocate an [Ethernet Function] virtual interface 8469 * @adap: the adapter 8470 * @mbox: mailbox to use for the FW command 8471 * @port: physical port associated with the VI 8472 * @pf: the PF owning the VI 8473 * @vf: the VF owning the VI 8474 * @nmac: number of MAC addresses needed (1 to 5) 8475 * @mac: the MAC addresses of the VI 8476 * @rss_size: size of RSS table slice associated with this VI 8477 * 8478 * backwards compatible and convieniance routine to allocate a Virtual 8479 * Interface with a Ethernet Port Application Function and Intrustion 8480 * Detection System disabled. 8481 */ 8482 int t4_alloc_vi(struct adapter *adap, unsigned int mbox, unsigned int port, 8483 unsigned int pf, unsigned int vf, unsigned int nmac, u8 *mac, 8484 unsigned int *rss_size, u8 *vivld, u8 *vin) 8485 { 8486 return t4_alloc_vi_func(adap, mbox, port, pf, vf, nmac, mac, rss_size, 8487 vivld, vin, FW_VI_FUNC_ETH, 0); 8488 } 8489 8490 8491 /** 8492 * t4_free_vi - free a virtual interface 8493 * @adap: the adapter 8494 * @mbox: mailbox to use for the FW command 8495 * @pf: the PF owning the VI 8496 * @vf: the VF owning the VI 8497 * @viid: virtual interface identifiler 8498 * 8499 * Free a previously allocated virtual interface. 8500 */ 8501 int t4_free_vi(struct adapter *adap, unsigned int mbox, unsigned int pf, 8502 unsigned int vf, unsigned int viid) 8503 { 8504 struct fw_vi_cmd c; 8505 8506 memset(&c, 0, sizeof(c)); 8507 c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_VI_CMD) | 8508 F_FW_CMD_REQUEST | 8509 F_FW_CMD_EXEC | 8510 V_FW_VI_CMD_PFN(pf) | 8511 V_FW_VI_CMD_VFN(vf)); 8512 c.alloc_to_len16 = cpu_to_be32(F_FW_VI_CMD_FREE | FW_LEN16(c)); 8513 c.type_to_viid = cpu_to_be16(V_FW_VI_CMD_VIID(viid)); 8514 8515 return t4_wr_mbox(adap, mbox, &c, sizeof(c), &c); 8516 } 8517 8518 /** 8519 * t4_set_rxmode - set Rx properties of a virtual interface 8520 * @adap: the adapter 8521 * @mbox: mailbox to use for the FW command 8522 * @viid: the VI id 8523 * @mtu: the new MTU or -1 8524 * @promisc: 1 to enable promiscuous mode, 0 to disable it, -1 no change 8525 * @all_multi: 1 to enable all-multi mode, 0 to disable it, -1 no change 8526 * @bcast: 1 to enable broadcast Rx, 0 to disable it, -1 no change 8527 * @vlanex: 1 to enable HW VLAN extraction, 0 to disable it, -1 no change 8528 * @sleep_ok: if true we may sleep while awaiting command completion 8529 * 8530 * Sets Rx properties of a virtual interface. 8531 */ 8532 int t4_set_rxmode(struct adapter *adap, unsigned int mbox, unsigned int viid, 8533 int mtu, int promisc, int all_multi, int bcast, int vlanex, 8534 bool sleep_ok) 8535 { 8536 struct fw_vi_rxmode_cmd c; 8537 8538 /* convert to FW values */ 8539 if (mtu < 0) 8540 mtu = M_FW_VI_RXMODE_CMD_MTU; 8541 if (promisc < 0) 8542 promisc = M_FW_VI_RXMODE_CMD_PROMISCEN; 8543 if (all_multi < 0) 8544 all_multi = M_FW_VI_RXMODE_CMD_ALLMULTIEN; 8545 if (bcast < 0) 8546 bcast = M_FW_VI_RXMODE_CMD_BROADCASTEN; 8547 if (vlanex < 0) 8548 vlanex = M_FW_VI_RXMODE_CMD_VLANEXEN; 8549 8550 memset(&c, 0, sizeof(c)); 8551 c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_RXMODE_CMD) | 8552 F_FW_CMD_REQUEST | F_FW_CMD_WRITE | 8553 V_FW_VI_RXMODE_CMD_VIID(viid)); 8554 c.retval_len16 = cpu_to_be32(FW_LEN16(c)); 8555 c.mtu_to_vlanexen = 8556 cpu_to_be32(V_FW_VI_RXMODE_CMD_MTU(mtu) | 8557 V_FW_VI_RXMODE_CMD_PROMISCEN(promisc) | 8558 V_FW_VI_RXMODE_CMD_ALLMULTIEN(all_multi) | 8559 V_FW_VI_RXMODE_CMD_BROADCASTEN(bcast) | 8560 V_FW_VI_RXMODE_CMD_VLANEXEN(vlanex)); 8561 return t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), NULL, sleep_ok); 8562 } 8563 8564 /** 8565 * t4_alloc_encap_mac_filt - Adds a mac entry in mps tcam with VNI support 8566 * @adap: the adapter 8567 * @viid: the VI id 8568 * @mac: the MAC address 8569 * @mask: the mask 8570 * @vni: the VNI id for the tunnel protocol 8571 * @vni_mask: mask for the VNI id 8572 * @dip_hit: to enable DIP match for the MPS entry 8573 * @lookup_type: MAC address for inner (1) or outer (0) header 8574 * @sleep_ok: call is allowed to sleep 8575 * 8576 * Allocates an MPS entry with specified MAC address and VNI value. 8577 * 8578 * Returns a negative error number or the allocated index for this mac. 8579 */ 8580 int t4_alloc_encap_mac_filt(struct adapter *adap, unsigned int viid, 8581 const u8 *addr, const u8 *mask, unsigned int vni, 8582 unsigned int vni_mask, u8 dip_hit, u8 lookup_type, 8583 bool sleep_ok) 8584 { 8585 struct fw_vi_mac_cmd c; 8586 struct fw_vi_mac_vni *p = c.u.exact_vni; 8587 int ret = 0; 8588 u32 val; 8589 8590 memset(&c, 0, sizeof(c)); 8591 c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_MAC_CMD) | 8592 F_FW_CMD_REQUEST | F_FW_CMD_WRITE | 8593 V_FW_VI_MAC_CMD_VIID(viid)); 8594 val = V_FW_CMD_LEN16(1) | 8595 V_FW_VI_MAC_CMD_ENTRY_TYPE(FW_VI_MAC_TYPE_EXACTMAC_VNI); 8596 c.freemacs_to_len16 = cpu_to_be32(val); 8597 p->valid_to_idx = cpu_to_be16(F_FW_VI_MAC_CMD_VALID | 8598 V_FW_VI_MAC_CMD_IDX(FW_VI_MAC_ADD_MAC)); 8599 memcpy(p->macaddr, addr, sizeof(p->macaddr)); 8600 memcpy(p->macaddr_mask, mask, sizeof(p->macaddr_mask)); 8601 8602 p->lookup_type_to_vni = cpu_to_be32(V_FW_VI_MAC_CMD_VNI(vni) | 8603 V_FW_VI_MAC_CMD_DIP_HIT(dip_hit) | 8604 V_FW_VI_MAC_CMD_LOOKUP_TYPE(lookup_type)); 8605 p->vni_mask_pkd = cpu_to_be32(V_FW_VI_MAC_CMD_VNI_MASK(vni_mask)); 8606 8607 ret = t4_wr_mbox_meat(adap, adap->mbox, &c, sizeof(c), &c, sleep_ok); 8608 if (ret == 0) 8609 ret = G_FW_VI_MAC_CMD_IDX(be16_to_cpu(p->valid_to_idx)); 8610 return ret; 8611 } 8612 8613 /** 8614 * t4_alloc_raw_mac_filt - Adds a mac entry in mps tcam 8615 * @adap: the adapter 8616 * @viid: the VI id 8617 * @mac: the MAC address 8618 * @mask: the mask 8619 * @idx: index at which to add this entry 8620 * @port_id: the port index 8621 * @lookup_type: MAC address for inner (1) or outer (0) header 8622 * @sleep_ok: call is allowed to sleep 8623 * 8624 * Adds the mac entry at the specified index using raw mac interface. 8625 * 8626 * Returns a negative error number or the allocated index for this mac. 8627 */ 8628 int t4_alloc_raw_mac_filt(struct adapter *adap, unsigned int viid, 8629 const u8 *addr, const u8 *mask, unsigned int idx, 8630 u8 lookup_type, u8 port_id, bool sleep_ok) 8631 { 8632 int ret = 0; 8633 struct fw_vi_mac_cmd c; 8634 struct fw_vi_mac_raw *p = &c.u.raw; 8635 u32 val; 8636 8637 memset(&c, 0, sizeof(c)); 8638 c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_MAC_CMD) | 8639 F_FW_CMD_REQUEST | F_FW_CMD_WRITE | 8640 V_FW_VI_MAC_CMD_VIID(viid)); 8641 val = V_FW_CMD_LEN16(1) | 8642 V_FW_VI_MAC_CMD_ENTRY_TYPE(FW_VI_MAC_TYPE_RAW); 8643 c.freemacs_to_len16 = cpu_to_be32(val); 8644 8645 /* Specify that this is an inner mac address */ 8646 p->raw_idx_pkd = cpu_to_be32(V_FW_VI_MAC_CMD_RAW_IDX(idx)); 8647 8648 /* Lookup Type. Outer header: 0, Inner header: 1 */ 8649 p->data0_pkd = cpu_to_be32(V_DATALKPTYPE(lookup_type) | 8650 V_DATAPORTNUM(port_id)); 8651 /* Lookup mask and port mask */ 8652 p->data0m_pkd = cpu_to_be64(V_DATALKPTYPE(M_DATALKPTYPE) | 8653 V_DATAPORTNUM(M_DATAPORTNUM)); 8654 8655 /* Copy the address and the mask */ 8656 memcpy((u8 *)&p->data1[0] + 2, addr, ETH_ALEN); 8657 memcpy((u8 *)&p->data1m[0] + 2, mask, ETH_ALEN); 8658 8659 ret = t4_wr_mbox_meat(adap, adap->mbox, &c, sizeof(c), &c, sleep_ok); 8660 if (ret == 0) { 8661 ret = G_FW_VI_MAC_CMD_RAW_IDX(be32_to_cpu(p->raw_idx_pkd)); 8662 if (ret != idx) 8663 ret = -ENOMEM; 8664 } 8665 8666 return ret; 8667 } 8668 8669 /** 8670 * t4_alloc_mac_filt - allocates exact-match filters for MAC addresses 8671 * @adap: the adapter 8672 * @mbox: mailbox to use for the FW command 8673 * @viid: the VI id 8674 * @free: if true any existing filters for this VI id are first removed 8675 * @naddr: the number of MAC addresses to allocate filters for (up to 7) 8676 * @addr: the MAC address(es) 8677 * @idx: where to store the index of each allocated filter 8678 * @hash: pointer to hash address filter bitmap 8679 * @sleep_ok: call is allowed to sleep 8680 * 8681 * Allocates an exact-match filter for each of the supplied addresses and 8682 * sets it to the corresponding address. If @idx is not %NULL it should 8683 * have at least @naddr entries, each of which will be set to the index of 8684 * the filter allocated for the corresponding MAC address. If a filter 8685 * could not be allocated for an address its index is set to 0xffff. 8686 * If @hash is not %NULL addresses that fail to allocate an exact filter 8687 * are hashed and update the hash filter bitmap pointed at by @hash. 8688 * 8689 * Returns a negative error number or the number of filters allocated. 8690 */ 8691 int t4_alloc_mac_filt(struct adapter *adap, unsigned int mbox, 8692 unsigned int viid, bool free, unsigned int naddr, 8693 const u8 **addr, u16 *idx, u64 *hash, bool sleep_ok) 8694 { 8695 int offset, ret = 0; 8696 struct fw_vi_mac_cmd c; 8697 unsigned int nfilters = 0; 8698 unsigned int max_naddr = adap->params.arch.mps_tcam_size; 8699 unsigned int rem = naddr; 8700 8701 if (naddr > max_naddr) 8702 return -EINVAL; 8703 8704 for (offset = 0; offset < naddr ; /**/) { 8705 unsigned int fw_naddr = (rem < ARRAY_SIZE(c.u.exact) 8706 ? rem 8707 : ARRAY_SIZE(c.u.exact)); 8708 size_t len16 = DIV_ROUND_UP(offsetof(struct fw_vi_mac_cmd, 8709 u.exact[fw_naddr]), 16); 8710 struct fw_vi_mac_exact *p; 8711 int i; 8712 8713 memset(&c, 0, sizeof(c)); 8714 c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_MAC_CMD) | 8715 F_FW_CMD_REQUEST | 8716 F_FW_CMD_WRITE | 8717 V_FW_CMD_EXEC(free) | 8718 V_FW_VI_MAC_CMD_VIID(viid)); 8719 c.freemacs_to_len16 = cpu_to_be32(V_FW_VI_MAC_CMD_FREEMACS(free) | 8720 V_FW_CMD_LEN16(len16)); 8721 8722 for (i = 0, p = c.u.exact; i < fw_naddr; i++, p++) { 8723 p->valid_to_idx = 8724 cpu_to_be16(F_FW_VI_MAC_CMD_VALID | 8725 V_FW_VI_MAC_CMD_IDX(FW_VI_MAC_ADD_MAC)); 8726 memcpy(p->macaddr, addr[offset+i], sizeof(p->macaddr)); 8727 } 8728 8729 /* 8730 * It's okay if we run out of space in our MAC address arena. 8731 * Some of the addresses we submit may get stored so we need 8732 * to run through the reply to see what the results were ... 8733 */ 8734 ret = t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), &c, sleep_ok); 8735 if (ret && ret != -FW_ENOMEM) 8736 break; 8737 8738 for (i = 0, p = c.u.exact; i < fw_naddr; i++, p++) { 8739 u16 index = G_FW_VI_MAC_CMD_IDX( 8740 be16_to_cpu(p->valid_to_idx)); 8741 8742 if (idx) 8743 idx[offset+i] = (index >= max_naddr 8744 ? 0xffff 8745 : index); 8746 if (index < max_naddr) 8747 nfilters++; 8748 else if (hash) 8749 *hash |= (1ULL << hash_mac_addr(addr[offset+i])); 8750 } 8751 8752 free = false; 8753 offset += fw_naddr; 8754 rem -= fw_naddr; 8755 } 8756 8757 if (ret == 0 || ret == -FW_ENOMEM) 8758 ret = nfilters; 8759 return ret; 8760 } 8761 8762 /** 8763 * t4_free_encap_mac_filt - frees MPS entry at given index 8764 * @adap: the adapter 8765 * @viid: the VI id 8766 * @idx: index of MPS entry to be freed 8767 * @sleep_ok: call is allowed to sleep 8768 * 8769 * Frees the MPS entry at supplied index 8770 * 8771 * Returns a negative error number or zero on success 8772 */ 8773 int t4_free_encap_mac_filt(struct adapter *adap, unsigned int viid, 8774 int idx, bool sleep_ok) 8775 { 8776 struct fw_vi_mac_exact *p; 8777 struct fw_vi_mac_cmd c; 8778 u8 addr[] = {0,0,0,0,0,0}; 8779 int ret = 0; 8780 u32 exact; 8781 8782 memset(&c, 0, sizeof(c)); 8783 c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_MAC_CMD) | 8784 F_FW_CMD_REQUEST | 8785 F_FW_CMD_WRITE | 8786 V_FW_CMD_EXEC(0) | 8787 V_FW_VI_MAC_CMD_VIID(viid)); 8788 exact = V_FW_VI_MAC_CMD_ENTRY_TYPE(FW_VI_MAC_TYPE_EXACTMAC); 8789 c.freemacs_to_len16 = cpu_to_be32(V_FW_VI_MAC_CMD_FREEMACS(0) | 8790 exact | 8791 V_FW_CMD_LEN16(1)); 8792 p = c.u.exact; 8793 p->valid_to_idx = cpu_to_be16(F_FW_VI_MAC_CMD_VALID | 8794 V_FW_VI_MAC_CMD_IDX(idx)); 8795 memcpy(p->macaddr, addr, sizeof(p->macaddr)); 8796 8797 ret = t4_wr_mbox_meat(adap, adap->mbox, &c, sizeof(c), &c, sleep_ok); 8798 return ret; 8799 } 8800 8801 /** 8802 * t4_free_raw_mac_filt - Frees a raw mac entry in mps tcam 8803 * @adap: the adapter 8804 * @viid: the VI id 8805 * @addr: the MAC address 8806 * @mask: the mask 8807 * @idx: index of the entry in mps tcam 8808 * @lookup_type: MAC address for inner (1) or outer (0) header 8809 * @port_id: the port index 8810 * @sleep_ok: call is allowed to sleep 8811 * 8812 * Removes the mac entry at the specified index using raw mac interface. 8813 * 8814 * Returns a negative error number on failure. 8815 */ 8816 int t4_free_raw_mac_filt(struct adapter *adap, unsigned int viid, 8817 const u8 *addr, const u8 *mask, unsigned int idx, 8818 u8 lookup_type, u8 port_id, bool sleep_ok) 8819 { 8820 struct fw_vi_mac_cmd c; 8821 struct fw_vi_mac_raw *p = &c.u.raw; 8822 u32 raw; 8823 8824 memset(&c, 0, sizeof(c)); 8825 c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_MAC_CMD) | 8826 F_FW_CMD_REQUEST | F_FW_CMD_WRITE | 8827 V_FW_CMD_EXEC(0) | 8828 V_FW_VI_MAC_CMD_VIID(viid)); 8829 raw = V_FW_VI_MAC_CMD_ENTRY_TYPE(FW_VI_MAC_TYPE_RAW); 8830 c.freemacs_to_len16 = cpu_to_be32(V_FW_VI_MAC_CMD_FREEMACS(0) | 8831 raw | 8832 V_FW_CMD_LEN16(1)); 8833 8834 p->raw_idx_pkd = cpu_to_be32(V_FW_VI_MAC_CMD_RAW_IDX(idx) | 8835 FW_VI_MAC_ID_BASED_FREE); 8836 8837 /* Lookup Type. Outer header: 0, Inner header: 1 */ 8838 p->data0_pkd = cpu_to_be32(V_DATALKPTYPE(lookup_type) | 8839 V_DATAPORTNUM(port_id)); 8840 /* Lookup mask and port mask */ 8841 p->data0m_pkd = cpu_to_be64(V_DATALKPTYPE(M_DATALKPTYPE) | 8842 V_DATAPORTNUM(M_DATAPORTNUM)); 8843 8844 /* Copy the address and the mask */ 8845 memcpy((u8 *)&p->data1[0] + 2, addr, ETH_ALEN); 8846 memcpy((u8 *)&p->data1m[0] + 2, mask, ETH_ALEN); 8847 8848 return t4_wr_mbox_meat(adap, adap->mbox, &c, sizeof(c), &c, sleep_ok); 8849 } 8850 8851 /** 8852 * t4_free_mac_filt - frees exact-match filters of given MAC addresses 8853 * @adap: the adapter 8854 * @mbox: mailbox to use for the FW command 8855 * @viid: the VI id 8856 * @naddr: the number of MAC addresses to allocate filters for (up to 7) 8857 * @addr: the MAC address(es) 8858 * @sleep_ok: call is allowed to sleep 8859 * 8860 * Frees the exact-match filter for each of the supplied addresses 8861 * 8862 * Returns a negative error number or the number of filters freed. 8863 */ 8864 int t4_free_mac_filt(struct adapter *adap, unsigned int mbox, 8865 unsigned int viid, unsigned int naddr, 8866 const u8 **addr, bool sleep_ok) 8867 { 8868 int offset, ret = 0; 8869 struct fw_vi_mac_cmd c; 8870 unsigned int nfilters = 0; 8871 unsigned int max_naddr = is_t4(adap->params.chip) ? 8872 NUM_MPS_CLS_SRAM_L_INSTANCES : 8873 NUM_MPS_T5_CLS_SRAM_L_INSTANCES; 8874 unsigned int rem = naddr; 8875 8876 if (naddr > max_naddr) 8877 return -EINVAL; 8878 8879 for (offset = 0; offset < (int)naddr ; /**/) { 8880 unsigned int fw_naddr = (rem < ARRAY_SIZE(c.u.exact) 8881 ? rem 8882 : ARRAY_SIZE(c.u.exact)); 8883 size_t len16 = DIV_ROUND_UP(offsetof(struct fw_vi_mac_cmd, 8884 u.exact[fw_naddr]), 16); 8885 struct fw_vi_mac_exact *p; 8886 int i; 8887 8888 memset(&c, 0, sizeof(c)); 8889 c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_MAC_CMD) | 8890 F_FW_CMD_REQUEST | 8891 F_FW_CMD_WRITE | 8892 V_FW_CMD_EXEC(0) | 8893 V_FW_VI_MAC_CMD_VIID(viid)); 8894 c.freemacs_to_len16 = 8895 cpu_to_be32(V_FW_VI_MAC_CMD_FREEMACS(0) | 8896 V_FW_CMD_LEN16(len16)); 8897 8898 for (i = 0, p = c.u.exact; i < (int)fw_naddr; i++, p++) { 8899 p->valid_to_idx = cpu_to_be16( 8900 F_FW_VI_MAC_CMD_VALID | 8901 V_FW_VI_MAC_CMD_IDX(FW_VI_MAC_MAC_BASED_FREE)); 8902 memcpy(p->macaddr, addr[offset+i], sizeof(p->macaddr)); 8903 } 8904 8905 ret = t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), &c, sleep_ok); 8906 if (ret) 8907 break; 8908 8909 for (i = 0, p = c.u.exact; i < fw_naddr; i++, p++) { 8910 u16 index = G_FW_VI_MAC_CMD_IDX( 8911 be16_to_cpu(p->valid_to_idx)); 8912 8913 if (index < max_naddr) 8914 nfilters++; 8915 } 8916 8917 offset += fw_naddr; 8918 rem -= fw_naddr; 8919 } 8920 8921 if (ret == 0) 8922 ret = nfilters; 8923 return ret; 8924 } 8925 8926 /** 8927 * t4_change_mac - modifies the exact-match filter for a MAC address 8928 * @adap: the adapter 8929 * @mbox: mailbox to use for the FW command 8930 * @viid: the VI id 8931 * @idx: index of existing filter for old value of MAC address, or -1 8932 * @addr: the new MAC address value 8933 * @persist: whether a new MAC allocation should be persistent 8934 * @add_smt: if true also add the address to the HW SMT 8935 * 8936 * Modifies an exact-match filter and sets it to the new MAC address if 8937 * @idx >= 0, or adds the MAC address to a new filter if @idx < 0. In the 8938 * latter case the address is added persistently if @persist is %true. 8939 * 8940 * Note that in general it is not possible to modify the value of a given 8941 * filter so the generic way to modify an address filter is to free the one 8942 * being used by the old address value and allocate a new filter for the 8943 * new address value. 8944 * 8945 * Returns a negative error number or the index of the filter with the new 8946 * MAC value. Note that this index may differ from @idx. 8947 */ 8948 int t4_change_mac(struct adapter *adap, unsigned int mbox, unsigned int viid, 8949 int idx, const u8 *addr, bool persist, u8 *smt_idx) 8950 { 8951 /* This will add this mac address to the destination TCAM region */ 8952 return t4_add_mac(adap, mbox, viid, idx, addr, persist, smt_idx, 0); 8953 } 8954 8955 /** 8956 * t4_set_addr_hash - program the MAC inexact-match hash filter 8957 * @adap: the adapter 8958 * @mbox: mailbox to use for the FW command 8959 * @viid: the VI id 8960 * @ucast: whether the hash filter should also match unicast addresses 8961 * @vec: the value to be written to the hash filter 8962 * @sleep_ok: call is allowed to sleep 8963 * 8964 * Sets the 64-bit inexact-match hash filter for a virtual interface. 8965 */ 8966 int t4_set_addr_hash(struct adapter *adap, unsigned int mbox, unsigned int viid, 8967 bool ucast, u64 vec, bool sleep_ok) 8968 { 8969 struct fw_vi_mac_cmd c; 8970 u32 val; 8971 8972 memset(&c, 0, sizeof(c)); 8973 c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_MAC_CMD) | 8974 F_FW_CMD_REQUEST | F_FW_CMD_WRITE | 8975 V_FW_VI_ENABLE_CMD_VIID(viid)); 8976 val = V_FW_VI_MAC_CMD_ENTRY_TYPE(FW_VI_MAC_TYPE_HASHVEC) | 8977 V_FW_VI_MAC_CMD_HASHUNIEN(ucast) | V_FW_CMD_LEN16(1); 8978 c.freemacs_to_len16 = cpu_to_be32(val); 8979 c.u.hash.hashvec = cpu_to_be64(vec); 8980 return t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), NULL, sleep_ok); 8981 } 8982 8983 /** 8984 * t4_enable_vi_params - enable/disable a virtual interface 8985 * @adap: the adapter 8986 * @mbox: mailbox to use for the FW command 8987 * @viid: the VI id 8988 * @rx_en: 1=enable Rx, 0=disable Rx 8989 * @tx_en: 1=enable Tx, 0=disable Tx 8990 * @dcb_en: 1=enable delivery of Data Center Bridging messages. 8991 * 8992 * Enables/disables a virtual interface. Note that setting DCB Enable 8993 * only makes sense when enabling a Virtual Interface ... 8994 */ 8995 int t4_enable_vi_params(struct adapter *adap, unsigned int mbox, 8996 unsigned int viid, bool rx_en, bool tx_en, bool dcb_en) 8997 { 8998 struct fw_vi_enable_cmd c; 8999 9000 memset(&c, 0, sizeof(c)); 9001 c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_ENABLE_CMD) | 9002 F_FW_CMD_REQUEST | F_FW_CMD_EXEC | 9003 V_FW_VI_ENABLE_CMD_VIID(viid)); 9004 c.ien_to_len16 = cpu_to_be32(V_FW_VI_ENABLE_CMD_IEN(rx_en) | 9005 V_FW_VI_ENABLE_CMD_EEN(tx_en) | 9006 V_FW_VI_ENABLE_CMD_DCB_INFO(dcb_en) | 9007 FW_LEN16(c)); 9008 return t4_wr_mbox_ns(adap, mbox, &c, sizeof(c), NULL); 9009 } 9010 9011 /** 9012 * t4_enable_vi - enable/disable a virtual interface 9013 * @adap: the adapter 9014 * @mbox: mailbox to use for the FW command 9015 * @viid: the VI id 9016 * @rx_en: 1=enable Rx, 0=disable Rx 9017 * @tx_en: 1=enable Tx, 0=disable Tx 9018 * 9019 * Enables/disables a virtual interface. Note that setting DCB Enable 9020 * only makes sense when enabling a Virtual Interface ... 9021 */ 9022 int t4_enable_vi(struct adapter *adap, unsigned int mbox, unsigned int viid, 9023 bool rx_en, bool tx_en) 9024 { 9025 return t4_enable_vi_params(adap, mbox, viid, rx_en, tx_en, 0); 9026 } 9027 9028 /** 9029 * t4_enable_pi_params - enable/disable a Port's Virtual Interface 9030 * @adap: the adapter 9031 * @mbox: mailbox to use for the FW command 9032 * @pi: the Port Information structure 9033 * @rx_en: 1=enable Rx, 0=disable Rx 9034 * @tx_en: 1=enable Tx, 0=disable Tx 9035 * @dcb_en: 1=enable delivery of Data Center Bridging messages. 9036 * 9037 * Enables/disables a Port's Virtual Interface. Note that setting DCB 9038 * Enable only makes sense when enabling a Virtual Interface ... 9039 * If the Virtual Interface enable/disable operation is successful, 9040 * we notify the OS-specific code of a potential Link Status change 9041 * via the OS Contract API t4_os_link_changed(). 9042 */ 9043 int t4_enable_pi_params(struct adapter *adap, unsigned int mbox, 9044 struct port_info *pi, 9045 bool rx_en, bool tx_en, bool dcb_en) 9046 { 9047 int ret = t4_enable_vi_params(adap, mbox, pi->viid, 9048 rx_en, tx_en, dcb_en); 9049 if (ret) 9050 return ret; 9051 t4_os_link_changed(adap, pi->port_id, 9052 rx_en && tx_en && pi->link_cfg.link_ok); 9053 return 0; 9054 } 9055 9056 /** 9057 * t4_identify_port - identify a VI's port by blinking its LED 9058 * @adap: the adapter 9059 * @mbox: mailbox to use for the FW command 9060 * @viid: the VI id 9061 * @nblinks: how many times to blink LED at 2.5 Hz 9062 * 9063 * Identifies a VI's port by blinking its LED. 9064 */ 9065 int t4_identify_port(struct adapter *adap, unsigned int mbox, unsigned int viid, 9066 unsigned int nblinks) 9067 { 9068 struct fw_vi_enable_cmd c; 9069 9070 memset(&c, 0, sizeof(c)); 9071 c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_ENABLE_CMD) | 9072 F_FW_CMD_REQUEST | F_FW_CMD_EXEC | 9073 V_FW_VI_ENABLE_CMD_VIID(viid)); 9074 c.ien_to_len16 = cpu_to_be32(F_FW_VI_ENABLE_CMD_LED | FW_LEN16(c)); 9075 c.blinkdur = cpu_to_be16(nblinks); 9076 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); 9077 } 9078 9079 /** 9080 * t4_iq_stop - stop an ingress queue and its FLs 9081 * @adap: the adapter 9082 * @mbox: mailbox to use for the FW command 9083 * @pf: the PF owning the queues 9084 * @vf: the VF owning the queues 9085 * @iqtype: the ingress queue type (FW_IQ_TYPE_FL_INT_CAP, etc.) 9086 * @iqid: ingress queue id 9087 * @fl0id: FL0 queue id or 0xffff if no attached FL0 9088 * @fl1id: FL1 queue id or 0xffff if no attached FL1 9089 * 9090 * Stops an ingress queue and its associated FLs, if any. This causes 9091 * any current or future data/messages destined for these queues to be 9092 * tossed. 9093 */ 9094 int t4_iq_stop(struct adapter *adap, unsigned int mbox, unsigned int pf, 9095 unsigned int vf, unsigned int iqtype, unsigned int iqid, 9096 unsigned int fl0id, unsigned int fl1id) 9097 { 9098 struct fw_iq_cmd c; 9099 9100 memset(&c, 0, sizeof(c)); 9101 c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_IQ_CMD) | F_FW_CMD_REQUEST | 9102 F_FW_CMD_EXEC | V_FW_IQ_CMD_PFN(pf) | 9103 V_FW_IQ_CMD_VFN(vf)); 9104 c.alloc_to_len16 = cpu_to_be32(F_FW_IQ_CMD_IQSTOP | FW_LEN16(c)); 9105 c.type_to_iqandstindex = cpu_to_be32(V_FW_IQ_CMD_TYPE(iqtype)); 9106 c.iqid = cpu_to_be16(iqid); 9107 c.fl0id = cpu_to_be16(fl0id); 9108 c.fl1id = cpu_to_be16(fl1id); 9109 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); 9110 } 9111 9112 /** 9113 * t4_iq_free - free an ingress queue and its FLs 9114 * @adap: the adapter 9115 * @mbox: mailbox to use for the FW command 9116 * @pf: the PF owning the queues 9117 * @vf: the VF owning the queues 9118 * @iqtype: the ingress queue type (FW_IQ_TYPE_FL_INT_CAP, etc.) 9119 * @iqid: ingress queue id 9120 * @fl0id: FL0 queue id or 0xffff if no attached FL0 9121 * @fl1id: FL1 queue id or 0xffff if no attached FL1 9122 * 9123 * Frees an ingress queue and its associated FLs, if any. 9124 */ 9125 int t4_iq_free(struct adapter *adap, unsigned int mbox, unsigned int pf, 9126 unsigned int vf, unsigned int iqtype, unsigned int iqid, 9127 unsigned int fl0id, unsigned int fl1id) 9128 { 9129 struct fw_iq_cmd c; 9130 9131 memset(&c, 0, sizeof(c)); 9132 c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_IQ_CMD) | F_FW_CMD_REQUEST | 9133 F_FW_CMD_EXEC | V_FW_IQ_CMD_PFN(pf) | 9134 V_FW_IQ_CMD_VFN(vf)); 9135 c.alloc_to_len16 = cpu_to_be32(F_FW_IQ_CMD_FREE | FW_LEN16(c)); 9136 c.type_to_iqandstindex = cpu_to_be32(V_FW_IQ_CMD_TYPE(iqtype)); 9137 c.iqid = cpu_to_be16(iqid); 9138 c.fl0id = cpu_to_be16(fl0id); 9139 c.fl1id = cpu_to_be16(fl1id); 9140 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); 9141 } 9142 9143 /** 9144 * t4_eth_eq_free - free an Ethernet egress queue 9145 * @adap: the adapter 9146 * @mbox: mailbox to use for the FW command 9147 * @pf: the PF owning the queue 9148 * @vf: the VF owning the queue 9149 * @eqid: egress queue id 9150 * 9151 * Frees an Ethernet egress queue. 9152 */ 9153 int t4_eth_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf, 9154 unsigned int vf, unsigned int eqid) 9155 { 9156 struct fw_eq_eth_cmd c; 9157 9158 memset(&c, 0, sizeof(c)); 9159 c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_EQ_ETH_CMD) | 9160 F_FW_CMD_REQUEST | F_FW_CMD_EXEC | 9161 V_FW_EQ_ETH_CMD_PFN(pf) | 9162 V_FW_EQ_ETH_CMD_VFN(vf)); 9163 c.alloc_to_len16 = cpu_to_be32(F_FW_EQ_ETH_CMD_FREE | FW_LEN16(c)); 9164 c.eqid_pkd = cpu_to_be32(V_FW_EQ_ETH_CMD_EQID(eqid)); 9165 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); 9166 } 9167 9168 /** 9169 * t4_ctrl_eq_free - free a control egress queue 9170 * @adap: the adapter 9171 * @mbox: mailbox to use for the FW command 9172 * @pf: the PF owning the queue 9173 * @vf: the VF owning the queue 9174 * @eqid: egress queue id 9175 * 9176 * Frees a control egress queue. 9177 */ 9178 int t4_ctrl_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf, 9179 unsigned int vf, unsigned int eqid) 9180 { 9181 struct fw_eq_ctrl_cmd c; 9182 9183 memset(&c, 0, sizeof(c)); 9184 c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_EQ_CTRL_CMD) | 9185 F_FW_CMD_REQUEST | F_FW_CMD_EXEC | 9186 V_FW_EQ_CTRL_CMD_PFN(pf) | 9187 V_FW_EQ_CTRL_CMD_VFN(vf)); 9188 c.alloc_to_len16 = cpu_to_be32(F_FW_EQ_CTRL_CMD_FREE | FW_LEN16(c)); 9189 c.cmpliqid_eqid = cpu_to_be32(V_FW_EQ_CTRL_CMD_EQID(eqid)); 9190 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); 9191 } 9192 9193 /** 9194 * t4_ofld_eq_free - free an offload egress queue 9195 * @adap: the adapter 9196 * @mbox: mailbox to use for the FW command 9197 * @pf: the PF owning the queue 9198 * @vf: the VF owning the queue 9199 * @eqid: egress queue id 9200 * 9201 * Frees a control egress queue. 9202 */ 9203 int t4_ofld_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf, 9204 unsigned int vf, unsigned int eqid) 9205 { 9206 struct fw_eq_ofld_cmd c; 9207 9208 memset(&c, 0, sizeof(c)); 9209 c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_EQ_OFLD_CMD) | 9210 F_FW_CMD_REQUEST | F_FW_CMD_EXEC | 9211 V_FW_EQ_OFLD_CMD_PFN(pf) | 9212 V_FW_EQ_OFLD_CMD_VFN(vf)); 9213 c.alloc_to_len16 = cpu_to_be32(F_FW_EQ_OFLD_CMD_FREE | FW_LEN16(c)); 9214 c.eqid_pkd = cpu_to_be32(V_FW_EQ_OFLD_CMD_EQID(eqid)); 9215 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); 9216 } 9217 9218 /** 9219 * t4_link_down_rc_str - return a string for a Link Down Reason Code 9220 * @link_down_rc: Link Down Reason Code 9221 * 9222 * Returns a string representation of the Link Down Reason Code. 9223 */ 9224 const char *t4_link_down_rc_str(unsigned char link_down_rc) 9225 { 9226 static const char * const reason[] = { 9227 "Link Down", 9228 "Remote Fault", 9229 "Auto-negotiation Failure", 9230 "Reserved", 9231 "Insufficient Airflow", 9232 "Unable To Determine Reason", 9233 "No RX Signal Detected", 9234 "Reserved", 9235 }; 9236 9237 if (link_down_rc >= ARRAY_SIZE(reason)) 9238 return "Bad Reason Code"; 9239 9240 return reason[link_down_rc]; 9241 } 9242 9243 /** 9244 * Return the highest speed set in the port capabilities, in Mb/s. 9245 */ 9246 static unsigned int fwcap_to_speed(fw_port_cap32_t caps) 9247 { 9248 #define TEST_SPEED_RETURN(__caps_speed, __speed) \ 9249 do { \ 9250 if (caps & FW_PORT_CAP32_SPEED_##__caps_speed) \ 9251 return __speed; \ 9252 } while (0) 9253 9254 TEST_SPEED_RETURN(400G, 400000); 9255 TEST_SPEED_RETURN(200G, 200000); 9256 TEST_SPEED_RETURN(100G, 100000); 9257 TEST_SPEED_RETURN(50G, 50000); 9258 TEST_SPEED_RETURN(40G, 40000); 9259 TEST_SPEED_RETURN(25G, 25000); 9260 TEST_SPEED_RETURN(10G, 10000); 9261 TEST_SPEED_RETURN(1G, 1000); 9262 TEST_SPEED_RETURN(100M, 100); 9263 9264 #undef TEST_SPEED_RETURN 9265 9266 return 0; 9267 } 9268 9269 /** 9270 * fwcap_to_fwspeed - return highest speed in Port Capabilities 9271 * @acaps: advertised Port Capabilities 9272 * 9273 * Get the highest speed for the port from the advertised Port 9274 * Capabilities. It will be either the highest speed from the list of 9275 * speeds or whatever user has set using ethtool. 9276 */ 9277 static fw_port_cap32_t fwcap_to_fwspeed(fw_port_cap32_t acaps) 9278 { 9279 #define TEST_SPEED_RETURN(__caps_speed) \ 9280 do { \ 9281 if (acaps & FW_PORT_CAP32_SPEED_##__caps_speed) \ 9282 return FW_PORT_CAP32_SPEED_##__caps_speed; \ 9283 } while (0) 9284 9285 TEST_SPEED_RETURN(400G); 9286 TEST_SPEED_RETURN(200G); 9287 TEST_SPEED_RETURN(100G); 9288 TEST_SPEED_RETURN(50G); 9289 TEST_SPEED_RETURN(40G); 9290 TEST_SPEED_RETURN(25G); 9291 TEST_SPEED_RETURN(10G); 9292 TEST_SPEED_RETURN(1G); 9293 TEST_SPEED_RETURN(100M); 9294 9295 #undef TEST_SPEED_RETURN 9296 9297 return 0; 9298 } 9299 9300 /** 9301 * lstatus_to_fwcap - translate old lstatus to 32-bit Port Capabilities 9302 * @lstatus: old FW_PORT_ACTION_GET_PORT_INFO lstatus value 9303 * 9304 * Translates old FW_PORT_ACTION_GET_PORT_INFO lstatus field into new 9305 * 32-bit Port Capabilities value. 9306 */ 9307 static fw_port_cap32_t lstatus_to_fwcap(u32 lstatus) 9308 { 9309 fw_port_cap32_t linkattr = 0; 9310 9311 /* 9312 * Unfortunately the format of the Link Status in the old 9313 * 16-bit Port Information message isn't the same as the 9314 * 16-bit Port Capabilities bitfield used everywhere else ... 9315 */ 9316 if (lstatus & F_FW_PORT_CMD_RXPAUSE) 9317 linkattr |= FW_PORT_CAP32_FC_RX; 9318 if (lstatus & F_FW_PORT_CMD_TXPAUSE) 9319 linkattr |= FW_PORT_CAP32_FC_TX; 9320 if (lstatus & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_100M)) 9321 linkattr |= FW_PORT_CAP32_SPEED_100M; 9322 if (lstatus & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_1G)) 9323 linkattr |= FW_PORT_CAP32_SPEED_1G; 9324 if (lstatus & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_10G)) 9325 linkattr |= FW_PORT_CAP32_SPEED_10G; 9326 if (lstatus & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_25G)) 9327 linkattr |= FW_PORT_CAP32_SPEED_25G; 9328 if (lstatus & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_40G)) 9329 linkattr |= FW_PORT_CAP32_SPEED_40G; 9330 if (lstatus & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_100G)) 9331 linkattr |= FW_PORT_CAP32_SPEED_100G; 9332 9333 return linkattr; 9334 } 9335 9336 /** 9337 * t4_handle_get_port_info - process a FW reply message 9338 * @pi: the port info 9339 * @rpl: start of the FW message 9340 * 9341 * Processes a GET_PORT_INFO FW reply message. 9342 */ 9343 void t4_handle_get_port_info(struct port_info *pi, const __be64 *rpl) 9344 { 9345 const struct fw_port_cmd *cmd = (const void *)rpl; 9346 int action = G_FW_PORT_CMD_ACTION(be32_to_cpu(cmd->action_to_len16)); 9347 struct adapter *adapter = pi->adapter; 9348 struct link_config *lc = &pi->link_cfg; 9349 int link_ok, linkdnrc; 9350 enum fw_port_type port_type; 9351 enum fw_port_module_type mod_type; 9352 unsigned int speed, fc, fec; 9353 fw_port_cap32_t pcaps, acaps, lpacaps, linkattr; 9354 boolean_t fec_changed; 9355 9356 /* 9357 * Extract the various fields from the Port Information message. 9358 */ 9359 switch (action) { 9360 case FW_PORT_ACTION_GET_PORT_INFO: { 9361 u32 lstatus = be32_to_cpu(cmd->u.info.lstatus_to_modtype); 9362 9363 link_ok = (lstatus & F_FW_PORT_CMD_LSTATUS) != 0; 9364 linkdnrc = G_FW_PORT_CMD_LINKDNRC(lstatus); 9365 port_type = G_FW_PORT_CMD_PTYPE(lstatus); 9366 mod_type = G_FW_PORT_CMD_MODTYPE(lstatus); 9367 pcaps = fwcaps16_to_caps32(be16_to_cpu(cmd->u.info.pcap)); 9368 acaps = fwcaps16_to_caps32(be16_to_cpu(cmd->u.info.acap)); 9369 lpacaps = fwcaps16_to_caps32(be16_to_cpu(cmd->u.info.lpacap)); 9370 linkattr = lstatus_to_fwcap(lstatus); 9371 break; 9372 } 9373 9374 case FW_PORT_ACTION_GET_PORT_INFO32: { 9375 u32 lstatus32 = be32_to_cpu(cmd->u.info32.lstatus32_to_cbllen32); 9376 9377 link_ok = (lstatus32 & F_FW_PORT_CMD_LSTATUS32) != 0; 9378 linkdnrc = G_FW_PORT_CMD_LINKDNRC32(lstatus32); 9379 port_type = G_FW_PORT_CMD_PORTTYPE32(lstatus32); 9380 mod_type = G_FW_PORT_CMD_MODTYPE32(lstatus32); 9381 pcaps = be32_to_cpu(cmd->u.info32.pcaps32); 9382 acaps = be32_to_cpu(cmd->u.info32.acaps32); 9383 lpacaps = be32_to_cpu(cmd->u.info32.lpacaps32); 9384 linkattr = be32_to_cpu(cmd->u.info32.linkattr32); 9385 break; 9386 } 9387 9388 default: 9389 CH_ERR(adapter, "Handle Port Information: Bad Command/Action %#x\n", 9390 be32_to_cpu(cmd->action_to_len16)); 9391 return; 9392 } 9393 9394 fec = fwcap_to_cc_fec(linkattr); 9395 fc = fwcap_to_cc_pause(linkattr); 9396 speed = fwcap_to_speed(linkattr); 9397 9398 /* 9399 * Reset state for communicating new Transceiver Module status and 9400 * whether the OS-dependent layer wants us to redo the current 9401 * "sticky" L1 Configure Link Parameters. 9402 */ 9403 lc->new_module = false; 9404 lc->redo_l1cfg = false; 9405 9406 if (mod_type != pi->mod_type) { 9407 /* 9408 * With the newer SFP28 and QSFP28 Transceiver Module Types, 9409 * various fundamental Port Capabilities which used to be 9410 * immutable can now change radically. We can now have 9411 * Speeds, Auto-Negotiation, Forward Error Correction, etc. 9412 * all change based on what Transceiver Module is inserted. 9413 * So we need to record the Physical "Port" Capabilities on 9414 * every Transceiver Module change. 9415 */ 9416 lc->pcaps = pcaps; 9417 9418 /* 9419 * When a new Transceiver Module is inserted, the Firmware 9420 * will examine its i2c EPROM to determine its type and 9421 * general operating parameters including things like Forward 9422 * Error Control, etc. Various IEEE 802.3 standards dictate 9423 * how to interpret these i2c values to determine default 9424 * "sutomatic" settings. We record these for future use when 9425 * the user explicitly requests these standards-based values. 9426 */ 9427 lc->def_acaps = acaps; 9428 9429 /* 9430 * Some versions of the early T6 Firmware "cheated" when 9431 * handling different Transceiver Modules by changing the 9432 * underlaying Port Type reported to the Host Drivers. As 9433 * such we need to capture whatever Port Type the Firmware 9434 * sends us and record it in case it's different from what we 9435 * were told earlier. Unfortunately, since Firmware is 9436 * forever, we'll need to keep this code here forever, but in 9437 * later T6 Firmware it should just be an assignment of the 9438 * same value already recorded. 9439 */ 9440 pi->port_type = port_type; 9441 9442 /* 9443 * Record new Module Type information. 9444 */ 9445 pi->mod_type = mod_type; 9446 9447 /* 9448 * Let the OS-dependent layer know if we have a new 9449 * Transceiver Module inserted. 9450 */ 9451 lc->new_module = t4_is_inserted_mod_type(mod_type); 9452 9453 t4_os_portmod_changed(adapter, pi->port_id); 9454 } 9455 9456 fec_changed = fec != (lc->requested_fec == FEC_AUTO ? 9457 lc->fec : lc->requested_fec); 9458 if (link_ok != lc->link_ok || speed != lc->speed || 9459 fc != lc->fc || fec_changed) { 9460 /* something changed */ 9461 if (!link_ok && lc->link_ok) { 9462 lc->link_down_rc = linkdnrc; 9463 CH_WARN_RATELIMIT(adapter, 9464 "Port %d link down, reason: %s\n", 9465 pi->tx_chan, t4_link_down_rc_str(linkdnrc)); 9466 } 9467 lc->link_ok = link_ok; 9468 lc->speed = speed; 9469 lc->fc = fc; 9470 lc->fec = fec; 9471 if (fec_changed) { 9472 /* 9473 * If the fec is not as requested we need 9474 * to save the l1 config. 9475 */ 9476 lc->redo_l1cfg = B_TRUE; 9477 } 9478 9479 lc->lpacaps = lpacaps; 9480 lc->acaps = acaps & ADVERT_MASK; 9481 9482 /* If we're not physically capable of Auto-Negotiation, note 9483 * this as Auto-Negotiation disabled. Otherwise, we track 9484 * what Auto-Negotiation settings we have. Note parallel 9485 * structure in t4_link_l1cfg_core() and init_link_config(). 9486 */ 9487 if (!(lc->pcaps & FW_PORT_CAP32_ANEG)) { 9488 lc->autoneg = AUTONEG_DISABLE; 9489 } else if (lc->acaps & FW_PORT_CAP32_ANEG) { 9490 lc->autoneg = AUTONEG_ENABLE; 9491 } else { 9492 /* When Autoneg is disabled, user needs to set 9493 * single speed. 9494 * Similar to cxgb4_ethtool.c: set_link_ksettings 9495 */ 9496 lc->acaps = 0; 9497 lc->speed_caps = fwcap_to_fwspeed(acaps); 9498 lc->autoneg = AUTONEG_DISABLE; 9499 } 9500 9501 t4_os_link_changed(adapter, pi->port_id, link_ok); 9502 } 9503 9504 /* 9505 * If we have a new Transceiver Module and the OS-dependent code has 9506 * told us that it wants us to redo whatever "sticky" L1 Configuration 9507 * Link Parameters are set, do that now. 9508 */ 9509 if (lc->new_module && lc->redo_l1cfg) { 9510 struct link_config old_lc; 9511 int ret; 9512 9513 /* 9514 * Save the current L1 Configuration and restore it if an 9515 * error occurs. We probably should fix the l1_cfg*() 9516 * routines not to change the link_config when an error 9517 * occurs ... 9518 */ 9519 old_lc = *lc; 9520 ret = t4_link_l1cfg_ns(adapter, adapter->mbox, pi->lport, lc); 9521 if (ret) { 9522 *lc = old_lc; 9523 CH_WARN(adapter, 9524 "Attempt to update new Transceiver Module settings failed\n"); 9525 } 9526 } 9527 lc->new_module = false; 9528 lc->redo_l1cfg = false; 9529 } 9530 9531 /** 9532 * t4_update_port_info - retrieve and update port information if changed 9533 * @pi: the port_info 9534 * 9535 * We issue a Get Port Information Command to the Firmware and, if 9536 * successful, we check to see if anything is different from what we 9537 * last recorded and update things accordingly. 9538 */ 9539 int t4_update_port_info(struct port_info *pi) 9540 { 9541 unsigned int fw_caps = pi->adapter->params.fw_caps_support; 9542 struct fw_port_cmd port_cmd; 9543 int ret; 9544 9545 memset(&port_cmd, 0, sizeof port_cmd); 9546 port_cmd.op_to_portid = cpu_to_be32(V_FW_CMD_OP(FW_PORT_CMD) | 9547 F_FW_CMD_REQUEST | F_FW_CMD_READ | 9548 V_FW_PORT_CMD_PORTID(pi->lport)); 9549 port_cmd.action_to_len16 = cpu_to_be32( 9550 V_FW_PORT_CMD_ACTION(fw_caps == FW_CAPS16 9551 ? FW_PORT_ACTION_GET_PORT_INFO 9552 : FW_PORT_ACTION_GET_PORT_INFO32) | 9553 FW_LEN16(port_cmd)); 9554 ret = t4_wr_mbox(pi->adapter, pi->adapter->mbox, 9555 &port_cmd, sizeof(port_cmd), &port_cmd); 9556 if (ret) 9557 return ret; 9558 9559 t4_handle_get_port_info(pi, (__be64 *)&port_cmd); 9560 return 0; 9561 } 9562 9563 /** 9564 * t4_get_link_params - retrieve basic link parameters for given port 9565 * @pi: the port 9566 * @link_okp: value return pointer for link up/down 9567 * @speedp: value return pointer for speed (Mb/s) 9568 * @mtup: value return pointer for mtu 9569 * 9570 * Retrieves basic link parameters for a port: link up/down, speed (Mb/s), 9571 * and MTU for a specified port. A negative error is returned on 9572 * failure; 0 on success. 9573 */ 9574 int t4_get_link_params(struct port_info *pi, unsigned int *link_okp, 9575 unsigned int *speedp, unsigned int *mtup) 9576 { 9577 unsigned int fw_caps = pi->adapter->params.fw_caps_support; 9578 struct fw_port_cmd port_cmd; 9579 unsigned int action, link_ok, mtu; 9580 fw_port_cap32_t linkattr; 9581 int ret; 9582 9583 memset(&port_cmd, 0, sizeof port_cmd); 9584 port_cmd.op_to_portid = cpu_to_be32(V_FW_CMD_OP(FW_PORT_CMD) | 9585 F_FW_CMD_REQUEST | F_FW_CMD_READ | 9586 V_FW_PORT_CMD_PORTID(pi->tx_chan)); 9587 action = (fw_caps == FW_CAPS16 9588 ? FW_PORT_ACTION_GET_PORT_INFO 9589 : FW_PORT_ACTION_GET_PORT_INFO32); 9590 port_cmd.action_to_len16 = cpu_to_be32( 9591 V_FW_PORT_CMD_ACTION(action) | 9592 FW_LEN16(port_cmd)); 9593 ret = t4_wr_mbox(pi->adapter, pi->adapter->mbox, 9594 &port_cmd, sizeof(port_cmd), &port_cmd); 9595 if (ret) 9596 return ret; 9597 9598 if (action == FW_PORT_ACTION_GET_PORT_INFO) { 9599 u32 lstatus = be32_to_cpu(port_cmd.u.info.lstatus_to_modtype); 9600 9601 link_ok = !!(lstatus & F_FW_PORT_CMD_LSTATUS); 9602 linkattr = lstatus_to_fwcap(lstatus); 9603 mtu = be16_to_cpu(port_cmd.u.info.mtu);; 9604 } else { 9605 u32 lstatus32 = be32_to_cpu(port_cmd.u.info32.lstatus32_to_cbllen32); 9606 9607 link_ok = !!(lstatus32 & F_FW_PORT_CMD_LSTATUS32); 9608 linkattr = be32_to_cpu(port_cmd.u.info32.linkattr32); 9609 mtu = G_FW_PORT_CMD_MTU32( 9610 be32_to_cpu(port_cmd.u.info32.auxlinfo32_mtu32)); 9611 } 9612 9613 *link_okp = link_ok; 9614 *speedp = fwcap_to_speed(linkattr); 9615 *mtup = mtu; 9616 9617 return 0; 9618 } 9619 9620 /** 9621 * t4_handle_fw_rpl - process a FW reply message 9622 * @adap: the adapter 9623 * @rpl: start of the FW message 9624 * 9625 * Processes a FW message, such as link state change messages. 9626 */ 9627 int t4_handle_fw_rpl(struct adapter *adap, const __be64 *rpl) 9628 { 9629 u8 opcode = *(const u8 *)rpl; 9630 9631 /* 9632 * This might be a port command ... this simplifies the following 9633 * conditionals ... We can get away with pre-dereferencing 9634 * action_to_len16 because it's in the first 16 bytes and all messages 9635 * will be at least that long. 9636 */ 9637 const struct fw_port_cmd *p = (const void *)rpl; 9638 unsigned int action = 9639 G_FW_PORT_CMD_ACTION(be32_to_cpu(p->action_to_len16)); 9640 9641 if (opcode == FW_PORT_CMD && 9642 (action == FW_PORT_ACTION_GET_PORT_INFO || 9643 action == FW_PORT_ACTION_GET_PORT_INFO32)) { 9644 int i; 9645 int chan = G_FW_PORT_CMD_PORTID(be32_to_cpu(p->op_to_portid)); 9646 struct port_info *pi = NULL; 9647 9648 for_each_port(adap, i) { 9649 pi = adap2pinfo(adap, i); 9650 if (pi->lport == chan) 9651 break; 9652 } 9653 9654 t4_handle_get_port_info(pi, rpl); 9655 } else { 9656 CH_WARN_RATELIMIT(adap, "Unknown firmware reply %d\n", opcode); 9657 return -EINVAL; 9658 } 9659 return 0; 9660 } 9661 9662 /** 9663 * get_pci_mode - determine a card's PCI mode 9664 * @adapter: the adapter 9665 * @p: where to store the PCI settings 9666 * 9667 * Determines a card's PCI mode and associated parameters, such as speed 9668 * and width. 9669 */ 9670 static void get_pci_mode(struct adapter *adapter, 9671 struct pci_params *p) 9672 { 9673 u16 val; 9674 u32 pcie_cap; 9675 9676 pcie_cap = t4_os_find_pci_capability(adapter, PCI_CAP_ID_EXP); 9677 if (pcie_cap) { 9678 t4_os_pci_read_cfg2(adapter, pcie_cap + PCI_EXP_LNKSTA, &val); 9679 p->speed = val & PCI_EXP_LNKSTA_CLS; 9680 p->width = (val & PCI_EXP_LNKSTA_NLW) >> 4; 9681 } 9682 } 9683 9684 /** 9685 * init_link_config - initialize a link's SW state 9686 * @lc: pointer to structure holding the link state 9687 * @pcaps: link Port Capabilities 9688 * @acaps: link current Advertised Port Capabilities 9689 * 9690 * Initializes the SW state maintained for each link, including the link's 9691 * capabilities and default speed/flow-control/autonegotiation settings. 9692 */ 9693 static void init_link_config(struct link_config *lc, fw_port_cap32_t pcaps, 9694 fw_port_cap32_t acaps) 9695 { 9696 lc->pcaps = pcaps; 9697 lc->def_acaps = acaps; 9698 lc->lpacaps = 0; 9699 lc->speed_caps = 0; 9700 lc->speed = 0; 9701 lc->requested_fc = lc->fc = PAUSE_RX | PAUSE_TX; 9702 9703 if (fec_supported(pcaps)) { 9704 /* 9705 * For Forward Error Control, we default to whatever the 9706 * Firmware tells us the Link is currently advertising. 9707 * We also retain any overrides set. 9708 */ 9709 if (lc->requested_fec == 0) 9710 lc->requested_fec = FEC_AUTO; 9711 lc->fec = fwcap_to_cc_fec(lc->def_acaps); 9712 } else { 9713 lc->requested_fec = FEC_NONE; 9714 lc->fec = FEC_NONE; 9715 } 9716 9717 /* If the Port is capable of Auto-Negtotiation, initialize it as 9718 * "enabled" and copy over all of the Physical Port Capabilities 9719 * to the Advertised Port Capabilities. Otherwise mark it as 9720 * Auto-Negotiate disabled and select the highest supported speed 9721 * for the link. Note parallel structure in t4_link_l1cfg_core() 9722 * and t4_handle_get_port_info(). 9723 */ 9724 if (lc->pcaps & FW_PORT_CAP32_ANEG) { 9725 lc->acaps = lc->pcaps & ADVERT_MASK; 9726 lc->autoneg = AUTONEG_ENABLE; 9727 lc->requested_fc |= PAUSE_AUTONEG; 9728 } else { 9729 lc->acaps = 0; 9730 lc->autoneg = AUTONEG_DISABLE; 9731 lc->speed_caps = fwcap_to_fwspeed(acaps); 9732 } 9733 } 9734 9735 /** 9736 * t4_wait_dev_ready - wait till to reads of registers work 9737 * 9738 * Right after the device is RESET is can take a small amount of time 9739 * for it to respond to register reads. Until then, all reads will 9740 * return either 0xff...ff or 0xee...ee. Return an error if reads 9741 * don't work within a reasonable time frame. 9742 */ 9743 int t4_wait_dev_ready(struct adapter *adapter) 9744 { 9745 u32 whoami; 9746 9747 whoami = t4_read_reg(adapter, A_PL_WHOAMI); 9748 if (whoami != 0xffffffff && whoami != X_CIM_PF_NOACCESS) 9749 return 0; 9750 9751 msleep(500); 9752 whoami = t4_read_reg(adapter, A_PL_WHOAMI); 9753 if (whoami != 0xffffffff && whoami != X_CIM_PF_NOACCESS) 9754 return 0; 9755 9756 CH_ERR(adapter, "Device didn't become ready for access, " 9757 "whoami = %#x\n", whoami); 9758 return -EIO; 9759 } 9760 9761 struct flash_desc { 9762 u32 vendor_and_model_id; 9763 u32 size_mb; 9764 }; 9765 9766 int t4_get_flash_params(struct adapter *adapter) 9767 { 9768 /* 9769 * Table for non-standard supported Flash parts. Note, all Flash 9770 * parts must have 64KB sectors. 9771 */ 9772 static struct flash_desc supported_flash[] = { 9773 { 0x00150201, 4 << 20 }, /* Spansion 4MB S25FL032P */ 9774 }; 9775 9776 int ret; 9777 u32 flashid = 0; 9778 unsigned int part, manufacturer; 9779 unsigned int density, size = 0; 9780 9781 9782 /* 9783 * Issue a Read ID Command to the Flash part. We decode supported 9784 * Flash parts and their sizes from this. There's a newer Query 9785 * Command which can retrieve detailed geometry information but many 9786 * Flash parts don't support it. 9787 */ 9788 ret = sf1_write(adapter, 1, 1, 0, SF_RD_ID); 9789 if (!ret) 9790 ret = sf1_read(adapter, 3, 0, 1, &flashid); 9791 t4_write_reg(adapter, A_SF_OP, 0); /* unlock SF */ 9792 if (ret < 0) 9793 return ret; 9794 9795 /* 9796 * Check to see if it's one of our non-standard supported Flash parts. 9797 */ 9798 for (part = 0; part < ARRAY_SIZE(supported_flash); part++) 9799 if (supported_flash[part].vendor_and_model_id == flashid) { 9800 adapter->params.sf_size = 9801 supported_flash[part].size_mb; 9802 adapter->params.sf_nsec = 9803 adapter->params.sf_size / SF_SEC_SIZE; 9804 goto found; 9805 } 9806 9807 /* 9808 * Decode Flash part size. The code below looks repetative with 9809 * common encodings, but that's not guaranteed in the JEDEC 9810 * specification for the Read JADEC ID command. The only thing that 9811 * we're guaranteed by the JADEC specification is where the 9812 * Manufacturer ID is in the returned result. After that each 9813 * Manufacturer ~could~ encode things completely differently. 9814 * Note, all Flash parts must have 64KB sectors. 9815 */ 9816 manufacturer = flashid & 0xff; 9817 switch (manufacturer) { 9818 case 0x20: { /* Micron/Numonix */ 9819 /* 9820 * This Density -> Size decoding table is taken from Micron 9821 * Data Sheets. 9822 */ 9823 density = (flashid >> 16) & 0xff; 9824 switch (density) { 9825 case 0x14: size = 1 << 20; break; /* 1MB */ 9826 case 0x15: size = 1 << 21; break; /* 2MB */ 9827 case 0x16: size = 1 << 22; break; /* 4MB */ 9828 case 0x17: size = 1 << 23; break; /* 8MB */ 9829 case 0x18: size = 1 << 24; break; /* 16MB */ 9830 case 0x19: size = 1 << 25; break; /* 32MB */ 9831 case 0x20: size = 1 << 26; break; /* 64MB */ 9832 case 0x21: size = 1 << 27; break; /* 128MB */ 9833 case 0x22: size = 1 << 28; break; /* 256MB */ 9834 } 9835 break; 9836 } 9837 9838 case 0x9d: { /* ISSI -- Integrated Silicon Solution, Inc. */ 9839 /* 9840 * This Density -> Size decoding table is taken from ISSI 9841 * Data Sheets. 9842 */ 9843 density = (flashid >> 16) & 0xff; 9844 switch (density) { 9845 case 0x16: size = 1 << 25; break; /* 32MB */ 9846 case 0x17: size = 1 << 26; break; /* 64MB */ 9847 } 9848 break; 9849 } 9850 9851 case 0xc2: { /* Macronix */ 9852 /* 9853 * This Density -> Size decoding table is taken from Macronix 9854 * Data Sheets. 9855 */ 9856 density = (flashid >> 16) & 0xff; 9857 switch (density) { 9858 case 0x17: size = 1 << 23; break; /* 8MB */ 9859 case 0x18: size = 1 << 24; break; /* 16MB */ 9860 } 9861 break; 9862 } 9863 9864 case 0xef: { /* Winbond */ 9865 /* 9866 * This Density -> Size decoding table is taken from Winbond 9867 * Data Sheets. 9868 */ 9869 density = (flashid >> 16) & 0xff; 9870 switch (density) { 9871 case 0x17: size = 1 << 23; break; /* 8MB */ 9872 case 0x18: size = 1 << 24; break; /* 16MB */ 9873 } 9874 break; 9875 } 9876 } 9877 9878 /* 9879 * If we didn't recognize the FLASH part, that's no real issue: the 9880 * Hardware/Software contract says that Hardware will _*ALWAYS*_ 9881 * use a FLASH part which is at least 4MB in size and has 64KB 9882 * sectors. The unrecognized FLASH part is likely to be much larger 9883 * than 4MB, but that's all we really need. 9884 */ 9885 if (size == 0) { 9886 CH_WARN(adapter, "Unknown Flash Part, ID = %#x, assuming 4MB\n", flashid); 9887 size = 1 << 22; 9888 } 9889 9890 /* 9891 * Store decoded Flash size and fall through into vetting code. 9892 */ 9893 adapter->params.sf_size = size; 9894 adapter->params.sf_nsec = size / SF_SEC_SIZE; 9895 9896 found: 9897 /* 9898 * We should ~probably~ reject adapters with FLASHes which are too 9899 * small but we have some legacy FPGAs with small FLASHes that we'd 9900 * still like to use. So instead we emit a scary message ... 9901 */ 9902 if (adapter->params.sf_size < FLASH_MIN_SIZE) 9903 CH_WARN(adapter, "WARNING: Flash Part ID %#x, size %#x < %#x\n", 9904 flashid, adapter->params.sf_size, FLASH_MIN_SIZE); 9905 9906 return 0; 9907 } 9908 9909 static void set_pcie_completion_timeout(struct adapter *adapter, 9910 u8 range) 9911 { 9912 u16 val; 9913 u32 pcie_cap; 9914 9915 pcie_cap = t4_os_find_pci_capability(adapter, PCI_CAP_ID_EXP); 9916 if (pcie_cap) { 9917 t4_os_pci_read_cfg2(adapter, pcie_cap + PCI_EXP_DEVCTL2, &val); 9918 val &= 0xfff0; 9919 val |= range ; 9920 t4_os_pci_write_cfg2(adapter, pcie_cap + PCI_EXP_DEVCTL2, val); 9921 } 9922 } 9923 9924 /** 9925 * t4_get_chip_type - Determine chip type from device ID 9926 * @adap: the adapter 9927 * @ver: adapter version 9928 */ 9929 enum chip_type t4_get_chip_type(struct adapter *adap, int ver) 9930 { 9931 enum chip_type chip = 0; 9932 u32 pl_rev = G_REV(t4_read_reg(adap, A_PL_REV)); 9933 9934 /* Retrieve adapter's device ID */ 9935 switch (ver) { 9936 case CHELSIO_T4_FPGA: 9937 chip |= CHELSIO_CHIP_FPGA; 9938 /*FALLTHROUGH*/ 9939 case CHELSIO_T4: 9940 chip |= CHELSIO_CHIP_CODE(CHELSIO_T4, pl_rev); 9941 break; 9942 case CHELSIO_T5_FPGA: 9943 chip |= CHELSIO_CHIP_FPGA; 9944 /*FALLTHROUGH*/ 9945 case CHELSIO_T5: 9946 chip |= CHELSIO_CHIP_CODE(CHELSIO_T5, pl_rev); 9947 break; 9948 case CHELSIO_T6_FPGA: 9949 chip |= CHELSIO_CHIP_FPGA; 9950 /*FALLTHROUGH*/ 9951 case CHELSIO_T6: 9952 chip |= CHELSIO_CHIP_CODE(CHELSIO_T6, pl_rev); 9953 break; 9954 default: 9955 CH_ERR(adap, "Device %d is not supported\n", 9956 adap->params.pci.device_id); 9957 return -EINVAL; 9958 } 9959 9960 /* T4A1 chip is no longer supported */ 9961 if (chip == T4_A1) { 9962 CH_ALERT(adap, "T4 rev 1 chip is no longer supported\n"); 9963 return -EINVAL; 9964 } 9965 return chip; 9966 } 9967 9968 /** 9969 * t4_prep_pf - prepare SW and HW for PF operation 9970 * @adapter: the adapter 9971 * 9972 * Initialize adapter SW state for the various HW modules, set initial 9973 * values for some adapter tunables on each PF. 9974 */ 9975 int t4_prep_pf(struct adapter *adapter) 9976 { 9977 int ret, ver; 9978 9979 ret = t4_wait_dev_ready(adapter); 9980 if (ret < 0) 9981 return ret; 9982 9983 get_pci_mode(adapter, &adapter->params.pci); 9984 9985 9986 /* Retrieve adapter's device ID 9987 */ 9988 t4_os_pci_read_cfg2(adapter, PCI_DEVICE_ID, &adapter->params.pci.device_id); 9989 t4_os_pci_read_cfg2(adapter, PCI_VENDOR_ID, &adapter->params.pci.vendor_id); 9990 9991 ver = CHELSIO_PCI_ID_VER(adapter->params.pci.device_id); 9992 adapter->params.chip = t4_get_chip_type(adapter, ver); 9993 if (is_t4(adapter->params.chip)) { 9994 adapter->params.arch.sge_fl_db = F_DBPRIO; 9995 adapter->params.arch.mps_tcam_size = 9996 NUM_MPS_CLS_SRAM_L_INSTANCES; 9997 adapter->params.arch.mps_rplc_size = 128; 9998 adapter->params.arch.nchan = NCHAN; 9999 adapter->params.arch.pm_stats_cnt = PM_NSTATS; 10000 adapter->params.arch.vfcount = 128; 10001 /* Congestion map is for 4 channels so that 10002 * MPS can have 4 priority per port. 10003 */ 10004 adapter->params.arch.cng_ch_bits_log = 2; 10005 } else if (is_t5(adapter->params.chip)) { 10006 adapter->params.arch.sge_fl_db = F_DBPRIO | F_DBTYPE; 10007 adapter->params.arch.mps_tcam_size = 10008 NUM_MPS_T5_CLS_SRAM_L_INSTANCES; 10009 adapter->params.arch.mps_rplc_size = 128; 10010 adapter->params.arch.nchan = NCHAN; 10011 adapter->params.arch.pm_stats_cnt = PM_NSTATS; 10012 adapter->params.arch.vfcount = 128; 10013 adapter->params.arch.cng_ch_bits_log = 2; 10014 } else if (is_t6(adapter->params.chip)) { 10015 adapter->params.arch.sge_fl_db = 0; 10016 adapter->params.arch.mps_tcam_size = 10017 NUM_MPS_T5_CLS_SRAM_L_INSTANCES; 10018 adapter->params.arch.mps_rplc_size = 256; 10019 adapter->params.arch.nchan = 2; 10020 adapter->params.arch.pm_stats_cnt = T6_PM_NSTATS; 10021 adapter->params.arch.vfcount = 256; 10022 /* Congestion map will be for 2 channels so that 10023 * MPS can have 8 priority per port. 10024 */ 10025 adapter->params.arch.cng_ch_bits_log = 3; 10026 } else { 10027 CH_ERR(adapter, "Device %d is not supported\n", 10028 adapter->params.pci.device_id); 10029 return -EINVAL; 10030 } 10031 10032 adapter->params.pci.vpd_cap_addr = 10033 t4_os_find_pci_capability(adapter, PCI_CAP_ID_VPD); 10034 10035 if (is_fpga(adapter->params.chip)) { 10036 /* FPGA */ 10037 adapter->params.cim_la_size = 2 * CIMLA_SIZE; 10038 } else { 10039 /* ASIC */ 10040 adapter->params.cim_la_size = CIMLA_SIZE; 10041 } 10042 10043 init_cong_ctrl(adapter->params.a_wnd, adapter->params.b_wnd); 10044 10045 /* 10046 * Default port and clock for debugging in case we can't reach FW. 10047 */ 10048 adapter->params.nports = 1; 10049 adapter->params.portvec = 1; 10050 adapter->params.vpd.cclk = 50000; 10051 10052 /* Set pci completion timeout value to 4 seconds. */ 10053 set_pcie_completion_timeout(adapter, 0xd); 10054 return 0; 10055 } 10056 10057 /** 10058 * t4_prep_master_pf - prepare SW for master PF operations 10059 * @adapter: the adapter 10060 * 10061 */ 10062 int t4_prep_master_pf(struct adapter *adapter) 10063 { 10064 int ret; 10065 10066 ret = t4_prep_pf(adapter); 10067 if (ret < 0) 10068 return ret; 10069 10070 ret = t4_get_flash_params(adapter); 10071 if (ret < 0) { 10072 CH_ERR(adapter, 10073 "Unable to retrieve Flash parameters ret = %d\n", -ret); 10074 return ret; 10075 } 10076 10077 return 0; 10078 } 10079 10080 /** 10081 * t4_prep_adapter - prepare SW and HW for operation 10082 * @adapter: the adapter 10083 * @reset: if true perform a HW reset 10084 * 10085 * Initialize adapter SW state for the various HW modules, set initial 10086 * values for some adapter tunables. 10087 */ 10088 int t4_prep_adapter(struct adapter *adapter, bool reset) 10089 { 10090 return t4_prep_master_pf(adapter); 10091 } 10092 10093 /** 10094 * t4_shutdown_adapter - shut down adapter, host & wire 10095 * @adapter: the adapter 10096 * 10097 * Perform an emergency shutdown of the adapter and stop it from 10098 * continuing any further communication on the ports or DMA to the 10099 * host. This is typically used when the adapter and/or firmware 10100 * have crashed and we want to prevent any further accidental 10101 * communication with the rest of the world. This will also force 10102 * the port Link Status to go down -- if register writes work -- 10103 * which should help our peers figure out that we're down. 10104 */ 10105 int t4_shutdown_adapter(struct adapter *adapter) 10106 { 10107 int port; 10108 10109 t4_intr_disable(adapter); 10110 t4_write_reg(adapter, A_DBG_GPIO_EN, 0); 10111 for_each_port(adapter, port) { 10112 u32 a_port_cfg = is_t4(adapter->params.chip) ? 10113 PORT_REG(port, A_XGMAC_PORT_CFG) : 10114 T5_PORT_REG(port, A_MAC_PORT_CFG); 10115 10116 t4_write_reg(adapter, a_port_cfg, 10117 t4_read_reg(adapter, a_port_cfg) 10118 & ~V_SIGNAL_DET(1)); 10119 } 10120 t4_set_reg_field(adapter, A_SGE_CONTROL, F_GLOBALENABLE, 0); 10121 10122 return 0; 10123 } 10124 10125 /** 10126 * t4_bar2_sge_qregs - return BAR2 SGE Queue register information 10127 * @adapter: the adapter 10128 * @qid: the Queue ID 10129 * @qtype: the Ingress or Egress type for @qid 10130 * @user: true if this request is for a user mode queue 10131 * @pbar2_qoffset: BAR2 Queue Offset 10132 * @pbar2_qid: BAR2 Queue ID or 0 for Queue ID inferred SGE Queues 10133 * 10134 * Returns the BAR2 SGE Queue Registers information associated with the 10135 * indicated Absolute Queue ID. These are passed back in return value 10136 * pointers. @qtype should be T4_BAR2_QTYPE_EGRESS for Egress Queue 10137 * and T4_BAR2_QTYPE_INGRESS for Ingress Queues. 10138 * 10139 * This may return an error which indicates that BAR2 SGE Queue 10140 * registers aren't available. If an error is not returned, then the 10141 * following values are returned: 10142 * 10143 * *@pbar2_qoffset: the BAR2 Offset of the @qid Registers 10144 * *@pbar2_qid: the BAR2 SGE Queue ID or 0 of @qid 10145 * 10146 * If the returned BAR2 Queue ID is 0, then BAR2 SGE registers which 10147 * require the "Inferred Queue ID" ability may be used. E.g. the 10148 * Write Combining Doorbell Buffer. If the BAR2 Queue ID is not 0, 10149 * then these "Inferred Queue ID" register may not be used. 10150 */ 10151 int t4_bar2_sge_qregs(struct adapter *adapter, 10152 unsigned int qid, 10153 enum t4_bar2_qtype qtype, 10154 int user, 10155 u64 *pbar2_qoffset, 10156 unsigned int *pbar2_qid) 10157 { 10158 unsigned int page_shift, page_size, qpp_shift, qpp_mask; 10159 u64 bar2_page_offset, bar2_qoffset; 10160 unsigned int bar2_qid, bar2_qid_offset, bar2_qinferred; 10161 10162 /* T4 doesn't support BAR2 SGE Queue registers for kernel 10163 * mode queues. 10164 */ 10165 if (!user && is_t4(adapter->params.chip)) 10166 return -EINVAL; 10167 10168 /* Get our SGE Page Size parameters. 10169 */ 10170 page_shift = adapter->params.sge.hps + 10; 10171 page_size = 1 << page_shift; 10172 10173 /* Get the right Queues per Page parameters for our Queue. 10174 */ 10175 qpp_shift = (qtype == T4_BAR2_QTYPE_EGRESS 10176 ? adapter->params.sge.eq_qpp 10177 : adapter->params.sge.iq_qpp); 10178 qpp_mask = (1 << qpp_shift) - 1; 10179 10180 /* Calculate the basics of the BAR2 SGE Queue register area: 10181 * o The BAR2 page the Queue registers will be in. 10182 * o The BAR2 Queue ID. 10183 * o The BAR2 Queue ID Offset into the BAR2 page. 10184 */ 10185 bar2_page_offset = ((u64)(qid >> qpp_shift) << page_shift); 10186 bar2_qid = qid & qpp_mask; 10187 bar2_qid_offset = bar2_qid * SGE_UDB_SIZE; 10188 10189 /* If the BAR2 Queue ID Offset is less than the Page Size, then the 10190 * hardware will infer the Absolute Queue ID simply from the writes to 10191 * the BAR2 Queue ID Offset within the BAR2 Page (and we need to use a 10192 * BAR2 Queue ID of 0 for those writes). Otherwise, we'll simply 10193 * write to the first BAR2 SGE Queue Area within the BAR2 Page with 10194 * the BAR2 Queue ID and the hardware will infer the Absolute Queue ID 10195 * from the BAR2 Page and BAR2 Queue ID. 10196 * 10197 * One important censequence of this is that some BAR2 SGE registers 10198 * have a "Queue ID" field and we can write the BAR2 SGE Queue ID 10199 * there. But other registers synthesize the SGE Queue ID purely 10200 * from the writes to the registers -- the Write Combined Doorbell 10201 * Buffer is a good example. These BAR2 SGE Registers are only 10202 * available for those BAR2 SGE Register areas where the SGE Absolute 10203 * Queue ID can be inferred from simple writes. 10204 */ 10205 bar2_qoffset = bar2_page_offset; 10206 bar2_qinferred = (bar2_qid_offset < page_size); 10207 if (bar2_qinferred) { 10208 bar2_qoffset += bar2_qid_offset; 10209 bar2_qid = 0; 10210 } 10211 10212 *pbar2_qoffset = bar2_qoffset; 10213 *pbar2_qid = bar2_qid; 10214 return 0; 10215 } 10216 10217 /** 10218 * t4_init_devlog_params - initialize adapter->params.devlog 10219 * @adap: the adapter 10220 * @fw_attach: whether we can talk to the firmware 10221 * 10222 * Initialize various fields of the adapter's Firmware Device Log 10223 * Parameters structure. 10224 */ 10225 int t4_init_devlog_params(struct adapter *adap, int fw_attach) 10226 { 10227 struct devlog_params *dparams = &adap->params.devlog; 10228 u32 pf_dparams; 10229 unsigned int devlog_meminfo; 10230 struct fw_devlog_cmd devlog_cmd; 10231 int ret; 10232 10233 /* If we're dealing with newer firmware, the Device Log Paramerters 10234 * are stored in a designated register which allows us to access the 10235 * Device Log even if we can't talk to the firmware. 10236 */ 10237 pf_dparams = 10238 t4_read_reg(adap, PCIE_FW_REG(A_PCIE_FW_PF, PCIE_FW_PF_DEVLOG)); 10239 if (pf_dparams) { 10240 unsigned int nentries, nentries128; 10241 10242 dparams->memtype = G_PCIE_FW_PF_DEVLOG_MEMTYPE(pf_dparams); 10243 dparams->start = G_PCIE_FW_PF_DEVLOG_ADDR16(pf_dparams) << 4; 10244 10245 nentries128 = G_PCIE_FW_PF_DEVLOG_NENTRIES128(pf_dparams); 10246 nentries = (nentries128 + 1) * 128; 10247 dparams->size = nentries * sizeof(struct fw_devlog_e); 10248 10249 return 0; 10250 } 10251 10252 /* 10253 * For any failing returns ... 10254 */ 10255 memset(dparams, 0, sizeof *dparams); 10256 10257 /* 10258 * If we can't talk to the firmware, there's really nothing we can do 10259 * at this point. 10260 */ 10261 if (!fw_attach) 10262 return -ENXIO; 10263 10264 /* Otherwise, ask the firmware for it's Device Log Parameters. 10265 */ 10266 memset(&devlog_cmd, 0, sizeof devlog_cmd); 10267 devlog_cmd.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_DEVLOG_CMD) | 10268 F_FW_CMD_REQUEST | F_FW_CMD_READ); 10269 devlog_cmd.retval_len16 = cpu_to_be32(FW_LEN16(devlog_cmd)); 10270 ret = t4_wr_mbox(adap, adap->mbox, &devlog_cmd, sizeof(devlog_cmd), 10271 &devlog_cmd); 10272 if (ret) 10273 return ret; 10274 10275 devlog_meminfo = 10276 be32_to_cpu(devlog_cmd.memtype_devlog_memaddr16_devlog); 10277 dparams->memtype = G_FW_DEVLOG_CMD_MEMTYPE_DEVLOG(devlog_meminfo); 10278 dparams->start = G_FW_DEVLOG_CMD_MEMADDR16_DEVLOG(devlog_meminfo) << 4; 10279 dparams->size = be32_to_cpu(devlog_cmd.memsize_devlog); 10280 10281 return 0; 10282 } 10283 10284 /** 10285 * t4_init_sge_params - initialize adap->params.sge 10286 * @adapter: the adapter 10287 * 10288 * Initialize various fields of the adapter's SGE Parameters structure. 10289 */ 10290 int t4_init_sge_params(struct adapter *adapter) 10291 { 10292 struct sge_params *sge_params = &adapter->params.sge; 10293 u32 hps, qpp; 10294 unsigned int s_hps, s_qpp; 10295 10296 /* Extract the SGE Page Size for our PF. 10297 */ 10298 hps = t4_read_reg(adapter, A_SGE_HOST_PAGE_SIZE); 10299 s_hps = (S_HOSTPAGESIZEPF0 + 10300 (S_HOSTPAGESIZEPF1 - S_HOSTPAGESIZEPF0) * adapter->pf); 10301 sge_params->hps = ((hps >> s_hps) & M_HOSTPAGESIZEPF0); 10302 10303 /* Extract the SGE Egress and Ingess Queues Per Page for our PF. 10304 */ 10305 s_qpp = (S_QUEUESPERPAGEPF0 + 10306 (S_QUEUESPERPAGEPF1 - S_QUEUESPERPAGEPF0) * adapter->pf); 10307 qpp = t4_read_reg(adapter, A_SGE_EGRESS_QUEUES_PER_PAGE_PF); 10308 sge_params->eq_qpp = ((qpp >> s_qpp) & M_QUEUESPERPAGEPF0); 10309 qpp = t4_read_reg(adapter, A_SGE_INGRESS_QUEUES_PER_PAGE_PF); 10310 sge_params->iq_qpp = ((qpp >> s_qpp) & M_QUEUESPERPAGEPF0); 10311 10312 return 0; 10313 } 10314 10315 /** 10316 * t4_init_tp_params - initialize adap->params.tp 10317 * @adap: the adapter 10318 * @sleep_ok: if true we may sleep while awaiting command completion 10319 * 10320 * Initialize various fields of the adapter's TP Parameters structure. 10321 */ 10322 int t4_init_tp_params(struct adapter *adap, bool sleep_ok) 10323 { 10324 u32 param, val, v; 10325 int chan, ret; 10326 10327 v = t4_read_reg(adap, A_TP_TIMER_RESOLUTION); 10328 adap->params.tp.tre = G_TIMERRESOLUTION(v); 10329 adap->params.tp.dack_re = G_DELAYEDACKRESOLUTION(v); 10330 10331 /* MODQ_REQ_MAP defaults to setting queues 0-3 to chan 0-3 */ 10332 for (chan = 0; chan < NCHAN; chan++) 10333 adap->params.tp.tx_modq[chan] = chan; 10334 10335 /* Cache the adapter's Compressed Filter Mode/Mask and global Ingress 10336 * Configuration. 10337 */ 10338 10339 param = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | 10340 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_FILTER) | 10341 V_FW_PARAMS_PARAM_Y(FW_PARAM_DEV_FILTER_MODE_MASK)); 10342 10343 /* Read current value */ 10344 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1, 10345 ¶m, &val); 10346 if (ret == 0) { 10347 CH_INFO(adap, 10348 "Current filter mode/mask 0x%x:0x%x\n", 10349 G_FW_PARAMS_PARAM_FILTER_MODE(val), 10350 G_FW_PARAMS_PARAM_FILTER_MASK(val)); 10351 adap->params.tp.vlan_pri_map = G_FW_PARAMS_PARAM_FILTER_MODE(val); 10352 adap->params.tp.filter_mask = G_FW_PARAMS_PARAM_FILTER_MASK(val); 10353 } else { 10354 CH_WARN(adap, 10355 "Reading filter mode/mask not supported via fw api, " 10356 "falling back to older indirect-reg-read \n"); 10357 10358 /* Incase of older-fw (which doesn't expose the api 10359 * FW_PARAM_DEV_FILTER_MODE_MASK) and newer-driver (which uses 10360 * the fw api) combination, fall-back to older method of reading 10361 * the filter mode from indirect-register 10362 */ 10363 t4_tp_pio_read(adap, &adap->params.tp.vlan_pri_map, 1, 10364 A_TP_VLAN_PRI_MAP, sleep_ok); 10365 10366 /* With the older-fw and newer-driver combination we might run 10367 * into an issue when user wants to use hash filter region but 10368 * the filter_mask is zero, in this case filter_mask validation 10369 * is tough. To avoid that we set the filter_mask same as filter 10370 * mode, which will behave exactly as the older way of ignoring 10371 * the filter mask validation. 10372 */ 10373 adap->params.tp.filter_mask = adap->params.tp.vlan_pri_map; 10374 } 10375 10376 t4_tp_pio_read(adap, &adap->params.tp.ingress_config, 1, 10377 A_TP_INGRESS_CONFIG, sleep_ok); 10378 10379 /* For T6, cache the adapter's compressed error vector 10380 * and passing outer header info for encapsulated packets. 10381 */ 10382 if (CHELSIO_CHIP_VERSION(adap->params.chip) > CHELSIO_T5) { 10383 v = t4_read_reg(adap, A_TP_OUT_CONFIG); 10384 adap->params.tp.rx_pkt_encap = (v & F_CRXPKTENC) ? 1 : 0; 10385 } 10386 10387 /* Now that we have TP_VLAN_PRI_MAP cached, we can calculate the field 10388 * shift positions of several elements of the Compressed Filter Tuple 10389 * for this adapter which we need frequently ... 10390 */ 10391 adap->params.tp.fcoe_shift = t4_filter_field_shift(adap, F_FCOE); 10392 adap->params.tp.port_shift = t4_filter_field_shift(adap, F_PORT); 10393 adap->params.tp.vnic_shift = t4_filter_field_shift(adap, F_VNIC_ID); 10394 adap->params.tp.vlan_shift = t4_filter_field_shift(adap, F_VLAN); 10395 adap->params.tp.tos_shift = t4_filter_field_shift(adap, F_TOS); 10396 adap->params.tp.protocol_shift = t4_filter_field_shift(adap, F_PROTOCOL); 10397 adap->params.tp.ethertype_shift = t4_filter_field_shift(adap, 10398 F_ETHERTYPE); 10399 adap->params.tp.macmatch_shift = t4_filter_field_shift(adap, 10400 F_MACMATCH); 10401 adap->params.tp.matchtype_shift = t4_filter_field_shift(adap, 10402 F_MPSHITTYPE); 10403 adap->params.tp.frag_shift = t4_filter_field_shift(adap, 10404 F_FRAGMENTATION); 10405 return 0; 10406 } 10407 10408 /** 10409 * t4_filter_field_shift - calculate filter field shift 10410 * @adap: the adapter 10411 * @filter_sel: the desired field (from TP_VLAN_PRI_MAP bits) 10412 * 10413 * Return the shift position of a filter field within the Compressed 10414 * Filter Tuple. The filter field is specified via its selection bit 10415 * within TP_VLAN_PRI_MAL (filter mode). E.g. F_VLAN. 10416 */ 10417 int t4_filter_field_shift(const struct adapter *adap, int filter_sel) 10418 { 10419 unsigned int filter_mode = adap->params.tp.vlan_pri_map; 10420 unsigned int sel; 10421 int field_shift; 10422 10423 if ((filter_mode & filter_sel) == 0) 10424 return -1; 10425 10426 for (sel = 1, field_shift = 0; sel < filter_sel; sel <<= 1) { 10427 switch (filter_mode & sel) { 10428 case F_FCOE: 10429 field_shift += W_FT_FCOE; 10430 break; 10431 case F_PORT: 10432 field_shift += W_FT_PORT; 10433 break; 10434 case F_VNIC_ID: 10435 field_shift += W_FT_VNIC_ID; 10436 break; 10437 case F_VLAN: 10438 field_shift += W_FT_VLAN; 10439 break; 10440 case F_TOS: 10441 field_shift += W_FT_TOS; 10442 break; 10443 case F_PROTOCOL: 10444 field_shift += W_FT_PROTOCOL; 10445 break; 10446 case F_ETHERTYPE: 10447 field_shift += W_FT_ETHERTYPE; 10448 break; 10449 case F_MACMATCH: 10450 field_shift += W_FT_MACMATCH; 10451 break; 10452 case F_MPSHITTYPE: 10453 field_shift += W_FT_MPSHITTYPE; 10454 break; 10455 case F_FRAGMENTATION: 10456 field_shift += W_FT_FRAGMENTATION; 10457 break; 10458 } 10459 } 10460 return field_shift; 10461 } 10462 10463 /** 10464 * t4_create_filter_info - return Compressed Filter Value/Mask tuple 10465 * @adapter: the adapter 10466 * @filter_value: Filter Value return value pointer 10467 * @filter_mask: Filter Mask return value pointer 10468 * @fcoe: FCoE filter selection 10469 * @port: physical port filter selection 10470 * @vnic: Virtual NIC ID filter selection 10471 * @vlan: VLAN ID filter selection 10472 * @vlan_pcp: VLAN Priority Code Point 10473 * @vlan_dei: VLAN Drop Eligibility Indicator 10474 * @tos: Type Of Server filter selection 10475 * @protocol: IP Protocol filter selection 10476 * @ethertype: Ethernet Type filter selection 10477 * @macmatch: MPS MAC Index filter selection 10478 * @matchtype: MPS Hit Type filter selection 10479 * @frag: IP Fragmentation filter selection 10480 * 10481 * Construct a Compressed Filter Value/Mask tuple based on a set of 10482 * "filter selection" values. For each passed filter selection value 10483 * which is greater than or equal to 0, we put that value into the 10484 * constructed Filter Value and the appropriate mask into the Filter 10485 * Mask. If a filter selections is specified which is not currently 10486 * configured into the hardware, an error will be returned. Otherwise 10487 * the constructed FIlter Value/Mask tuple will be returned via the 10488 * specified return value pointers and success will be returned. 10489 * 10490 * All filter selection values and the returned Filter Value/Mask values 10491 * are in Host-Endian format. 10492 */ 10493 int t4_create_filter_info(const struct adapter *adapter, 10494 u64 *filter_value, u64 *filter_mask, 10495 int fcoe, int port, int vnic, 10496 int vlan, int vlan_pcp, int vlan_dei, 10497 int tos, int protocol, int ethertype, 10498 int macmatch, int matchtype, int frag) 10499 { 10500 const struct tp_params *tp = &adapter->params.tp; 10501 u64 v, m; 10502 10503 /* 10504 * If any selected filter field isn't enabled, return an error. 10505 */ 10506 #define BAD_FILTER(__field) \ 10507 ((__field) >= 0 && tp->__field##_shift < 0) 10508 if (BAD_FILTER(fcoe) || 10509 BAD_FILTER(port) || 10510 BAD_FILTER(vnic) || 10511 BAD_FILTER(vlan) || 10512 BAD_FILTER(tos) || 10513 BAD_FILTER(protocol) || 10514 BAD_FILTER(ethertype) || 10515 BAD_FILTER(macmatch) || 10516 BAD_FILTER(matchtype) || 10517 BAD_FILTER(frag)) 10518 return -EINVAL; 10519 #undef BAD_FILTER 10520 10521 /* 10522 * We have to have VLAN ID selected if we want to also select on 10523 * either the Priority Code Point or Drop Eligibility Indicator 10524 * fields. 10525 */ 10526 if ((vlan_pcp >= 0 || vlan_dei >= 0) && vlan < 0) 10527 return -EINVAL; 10528 10529 /* 10530 * Construct Filter Value and Mask. 10531 */ 10532 v = m = 0; 10533 #define SET_FILTER_FIELD(__field, __width) \ 10534 do { \ 10535 if ((__field) >= 0) { \ 10536 const int shift = tp->__field##_shift; \ 10537 \ 10538 v |= (__field) << shift; \ 10539 m |= ((1ULL << (__width)) - 1) << shift; \ 10540 } \ 10541 } while (0) 10542 SET_FILTER_FIELD(fcoe, W_FT_FCOE); 10543 SET_FILTER_FIELD(port, W_FT_PORT); 10544 SET_FILTER_FIELD(tos, W_FT_TOS); 10545 SET_FILTER_FIELD(protocol, W_FT_PROTOCOL); 10546 SET_FILTER_FIELD(ethertype, W_FT_ETHERTYPE); 10547 SET_FILTER_FIELD(macmatch, W_FT_MACMATCH); 10548 SET_FILTER_FIELD(matchtype, W_FT_MPSHITTYPE); 10549 SET_FILTER_FIELD(frag, W_FT_FRAGMENTATION); 10550 #undef SET_FILTER_FIELD 10551 10552 /* 10553 * We handle VNIC ID and VLANs separately because they're slightly 10554 * different than the rest of the fields. Both require that a 10555 * corresponding "valid" bit be set in the Filter Value and Mask. 10556 * These bits are in the top bit of the field. Additionally, we can 10557 * select the Priority Code Point and Drop Eligibility Indicator 10558 * fields for VLANs as an option. Remember that the format of a VLAN 10559 * Tag is: 10560 * 10561 * bits: 3 1 12 10562 * +---+-+------------+ 10563 * |PCP|D| VLAN ID | 10564 * +---+-+------------+ 10565 */ 10566 if (vnic >= 0) { 10567 v |= ((1ULL << (W_FT_VNIC_ID-1)) | vnic) << tp->vnic_shift; 10568 m |= ((1ULL << W_FT_VNIC_ID) - 1) << tp->vnic_shift; 10569 } 10570 if (vlan >= 0) { 10571 v |= ((1ULL << (W_FT_VLAN-1)) | vlan) << tp->vlan_shift; 10572 m |= ((1ULL << (W_FT_VLAN-1)) | 0xfff) << tp->vlan_shift; 10573 10574 if (vlan_dei >= 0) { 10575 v |= vlan_dei << (tp->vlan_shift + 12); 10576 m |= 0x7 << (tp->vlan_shift + 12); 10577 } 10578 if (vlan_pcp >= 0) { 10579 v |= vlan_pcp << (tp->vlan_shift + 13); 10580 m |= 0x7 << (tp->vlan_shift + 13); 10581 } 10582 } 10583 10584 /* 10585 * Pass back computed Filter Value and Mask; return success. 10586 */ 10587 *filter_value = v; 10588 *filter_mask = m; 10589 return 0; 10590 } 10591 10592 int t4_init_rss_mode(struct adapter *adap, int mbox) 10593 { 10594 int i, ret; 10595 struct fw_rss_vi_config_cmd rvc; 10596 10597 memset(&rvc, 0, sizeof(rvc)); 10598 10599 for_each_port(adap, i) { 10600 struct port_info *p = adap2pinfo(adap, i); 10601 rvc.op_to_viid = 10602 cpu_to_be32(V_FW_CMD_OP(FW_RSS_VI_CONFIG_CMD) | 10603 F_FW_CMD_REQUEST | F_FW_CMD_READ | 10604 V_FW_RSS_VI_CONFIG_CMD_VIID(p->viid)); 10605 rvc.retval_len16 = cpu_to_be32(FW_LEN16(rvc)); 10606 ret = t4_wr_mbox(adap, mbox, &rvc, sizeof(rvc), &rvc); 10607 if (ret) 10608 return ret; 10609 p->rss_mode = be32_to_cpu(rvc.u.basicvirtual.defaultq_to_udpen); 10610 } 10611 return 0; 10612 } 10613 10614 static int t4_init_portmirror(struct port_info *pi, int mbox, 10615 int port, int pf, int vf) 10616 { 10617 struct adapter *adapter = pi->adapter; 10618 int ret; 10619 u8 vivld = 0, vin = 0; 10620 10621 ret = t4_alloc_vi(pi->adapter, mbox, port, pf, vf, 1, NULL, NULL, 10622 &vivld, &vin); 10623 if (ret < 0) 10624 return ret; 10625 10626 pi->viid_mirror = ret; 10627 10628 /* If fw supports returning the VIN as part of FW_VI_CMD, 10629 * save the returned values. 10630 */ 10631 if (adapter->params.viid_smt_extn_support) { 10632 pi->vivld_mirror = vivld; 10633 pi->vin_mirror = vin; 10634 } else { 10635 /* Retrieve the values from VIID */ 10636 pi->vivld_mirror = G_FW_VIID_VIVLD(pi->viid_mirror); 10637 pi->vin_mirror = G_FW_VIID_VIN(pi->viid_mirror); 10638 } 10639 10640 CH_INFO(pi->adapter, "Port %d Traffic Mirror PF = %u; VF = %u\n", 10641 port, pf, pi->vin_mirror); 10642 return 0; 10643 } 10644 10645 int t4_mirror_init(struct adapter *adap, int mbox, int pf, int vf, 10646 bool enable_ringbb) 10647 { 10648 int ret, i, j = 0; 10649 10650 for_each_port(adap, i) { 10651 struct port_info *pi = adap2pinfo(adap, i); 10652 10653 /* We want mirroring only on Port0 for ringbackbone 10654 * configuration. 10655 */ 10656 if (enable_ringbb && i) 10657 break; 10658 while ((adap->params.portvec & (1 << j)) == 0) 10659 j++; 10660 10661 ret = t4_init_portmirror(pi, mbox, j, pf, vf); 10662 if (ret) 10663 return ret; 10664 j++; 10665 } 10666 return 0; 10667 } 10668 10669 /** 10670 * t4_init_portinfo_viid - allocate a virtual interface and initialize 10671 * port_info 10672 * @pi: the port_info 10673 * @mbox: mailbox to use for the FW command 10674 * @port: physical port associated with the VI 10675 * @pf: the PF owning the VI 10676 * @vf: the VF owning the VI 10677 * @mac: the MAC address of the VI 10678 * @alloc_vi: Indicator to alloc VI 10679 * 10680 * Allocates a virtual interface for the given physical port. If @mac is 10681 * not %NULL it contains the MAC address of the VI as assigned by FW. 10682 * @mac should be large enough to hold an Ethernet address. 10683 * Returns < 0 on error. 10684 */ 10685 int t4_init_portinfo_viid(struct port_info *pi, int mbox, 10686 int port, int pf, int vf, u8 mac[], bool alloc_vi) 10687 { 10688 struct adapter *adapter = pi->adapter; 10689 unsigned int fw_caps = adapter->params.fw_caps_support; 10690 struct fw_port_cmd cmd; 10691 unsigned int rss_size; 10692 enum fw_port_type port_type; 10693 int mdio_addr; 10694 fw_port_cap32_t pcaps, acaps; 10695 int ret; 10696 10697 /* 10698 * If we haven't yet determined whether we're talking to Firmware 10699 * which knows the new 32-bit Port Capabilities, it's time to find 10700 * out now. This will also tell new Firmware to send us Port Status 10701 * Updates using the new 32-bit Port Capabilities version of the 10702 * Port Information message. 10703 */ 10704 if (fw_caps == FW_CAPS_UNKNOWN) { 10705 u32 param, val; 10706 10707 param = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_PFVF) | 10708 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_PFVF_PORT_CAPS32)); 10709 val = 1; 10710 ret = t4_set_params(adapter, mbox, pf, vf, 1, ¶m, &val); 10711 fw_caps = (ret == 0 ? FW_CAPS32 : FW_CAPS16); 10712 adapter->params.fw_caps_support = fw_caps; 10713 } 10714 10715 memset(&cmd, 0, sizeof(cmd)); 10716 cmd.op_to_portid = cpu_to_be32(V_FW_CMD_OP(FW_PORT_CMD) | 10717 F_FW_CMD_REQUEST | F_FW_CMD_READ | 10718 V_FW_PORT_CMD_PORTID(port)); 10719 cmd.action_to_len16 = cpu_to_be32( 10720 V_FW_PORT_CMD_ACTION(fw_caps == FW_CAPS16 10721 ? FW_PORT_ACTION_GET_PORT_INFO 10722 : FW_PORT_ACTION_GET_PORT_INFO32) | 10723 FW_LEN16(cmd)); 10724 ret = t4_wr_mbox(pi->adapter, mbox, &cmd, sizeof(cmd), &cmd); 10725 if (ret) 10726 return ret; 10727 10728 /* 10729 * Extract the various fields from the Port Information message. 10730 */ 10731 if (fw_caps == FW_CAPS16) { 10732 u32 lstatus = be32_to_cpu(cmd.u.info.lstatus_to_modtype); 10733 10734 port_type = G_FW_PORT_CMD_PTYPE(lstatus); 10735 mdio_addr = ((lstatus & F_FW_PORT_CMD_MDIOCAP) 10736 ? G_FW_PORT_CMD_MDIOADDR(lstatus) 10737 : -1); 10738 pcaps = fwcaps16_to_caps32(be16_to_cpu(cmd.u.info.pcap)); 10739 acaps = fwcaps16_to_caps32(be16_to_cpu(cmd.u.info.acap)); 10740 } else { 10741 u32 lstatus32 = be32_to_cpu(cmd.u.info32.lstatus32_to_cbllen32); 10742 10743 port_type = G_FW_PORT_CMD_PORTTYPE32(lstatus32); 10744 mdio_addr = ((lstatus32 & F_FW_PORT_CMD_MDIOCAP32) 10745 ? G_FW_PORT_CMD_MDIOADDR32(lstatus32) 10746 : -1); 10747 pcaps = be32_to_cpu(cmd.u.info32.pcaps32); 10748 acaps = be32_to_cpu(cmd.u.info32.acaps32); 10749 } 10750 10751 if (alloc_vi) { 10752 u8 vivld = 0, vin = 0; 10753 10754 ret = t4_alloc_vi(pi->adapter, mbox, port, pf, vf, 1, mac, 10755 &rss_size, &vivld, &vin); 10756 if (ret < 0) 10757 return ret; 10758 10759 pi->viid = ret; 10760 pi->rss_size = rss_size; 10761 10762 /* If fw supports returning the VIN as part of FW_VI_CMD, 10763 * save the returned values. 10764 */ 10765 if (adapter->params.viid_smt_extn_support) { 10766 pi->vivld = vivld; 10767 pi->vin = vin; 10768 } else { 10769 /* Retrieve the values from VIID */ 10770 pi->vivld = G_FW_VIID_VIVLD(pi->viid); 10771 pi->vin = G_FW_VIID_VIN(pi->viid); 10772 } 10773 } 10774 10775 pi->tx_chan = port; 10776 pi->lport = port; 10777 pi->rx_chan = port; 10778 pi->rx_cchan = t4_get_tp_e2c_map(pi->adapter, port); 10779 10780 pi->port_type = port_type; 10781 pi->mdio_addr = mdio_addr; 10782 pi->mod_type = FW_PORT_MOD_TYPE_NA; 10783 10784 init_link_config(&pi->link_cfg, pcaps, acaps); 10785 return 0; 10786 } 10787 10788 /** 10789 * t4_init_portinfo - allocate a virtual interface and initialize port_info 10790 * @pi: the port_info 10791 * @mbox: mailbox to use for the FW command 10792 * @port: physical port associated with the VI 10793 * @pf: the PF owning the VI 10794 * @vf: the VF owning the VI 10795 * @mac: the MAC address of the VI 10796 * 10797 * Allocates a virtual interface for the given physical port. If @mac is 10798 * not %NULL it contains the MAC address of the VI as assigned by FW. 10799 * @mac should be large enough to hold an Ethernet address. 10800 * Returns < 0 on error. 10801 */ 10802 int t4_init_portinfo(struct port_info *pi, int mbox, 10803 int port, int pf, int vf, u8 mac[]) 10804 { 10805 return t4_init_portinfo_viid(pi, mbox, port, pf, vf, mac, true); 10806 } 10807 10808 int t4_port_init(struct adapter *adap, int mbox, int pf, int vf) 10809 { 10810 u8 addr[6]; 10811 int ret, i, j = 0; 10812 10813 for_each_port(adap, i) { 10814 struct port_info *pi = adap2pinfo(adap, i); 10815 10816 while ((adap->params.portvec & (1 << j)) == 0) 10817 j++; 10818 10819 ret = t4_init_portinfo(pi, mbox, j, pf, vf, addr); 10820 if (ret) 10821 return ret; 10822 10823 t4_os_set_hw_addr(adap, i, addr); 10824 j++; 10825 } 10826 return 0; 10827 } 10828 10829 /** 10830 * t4_read_cimq_cfg - read CIM queue configuration 10831 * @adap: the adapter 10832 * @base: holds the queue base addresses in bytes 10833 * @size: holds the queue sizes in bytes 10834 * @thres: holds the queue full thresholds in bytes 10835 * 10836 * Returns the current configuration of the CIM queues, starting with 10837 * the IBQs, then the OBQs. 10838 */ 10839 void t4_read_cimq_cfg(struct adapter *adap, u16 *base, u16 *size, u16 *thres) 10840 { 10841 unsigned int i, v; 10842 int cim_num_obq = is_t4(adap->params.chip) ? 10843 CIM_NUM_OBQ : CIM_NUM_OBQ_T5; 10844 10845 for (i = 0; i < CIM_NUM_IBQ; i++) { 10846 t4_write_reg(adap, A_CIM_QUEUE_CONFIG_REF, F_IBQSELECT | 10847 V_QUENUMSELECT(i)); 10848 v = t4_read_reg(adap, A_CIM_QUEUE_CONFIG_CTRL); 10849 /* value is in 256-byte units */ 10850 *base++ = G_CIMQBASE(v) * 256; 10851 *size++ = G_CIMQSIZE(v) * 256; 10852 *thres++ = G_QUEFULLTHRSH(v) * 8; /* 8-byte unit */ 10853 } 10854 for (i = 0; i < cim_num_obq; i++) { 10855 t4_write_reg(adap, A_CIM_QUEUE_CONFIG_REF, F_OBQSELECT | 10856 V_QUENUMSELECT(i)); 10857 v = t4_read_reg(adap, A_CIM_QUEUE_CONFIG_CTRL); 10858 /* value is in 256-byte units */ 10859 *base++ = G_CIMQBASE(v) * 256; 10860 *size++ = G_CIMQSIZE(v) * 256; 10861 } 10862 } 10863 10864 /** 10865 * t4_read_cim_ibq - read the contents of a CIM inbound queue 10866 * @adap: the adapter 10867 * @qid: the queue index 10868 * @data: where to store the queue contents 10869 * @n: capacity of @data in 32-bit words 10870 * 10871 * Reads the contents of the selected CIM queue starting at address 0 up 10872 * to the capacity of @data. @n must be a multiple of 4. Returns < 0 on 10873 * error and the number of 32-bit words actually read on success. 10874 */ 10875 int t4_read_cim_ibq(struct adapter *adap, unsigned int qid, u32 *data, size_t n) 10876 { 10877 int i, err, attempts; 10878 unsigned int addr; 10879 const unsigned int nwords = CIM_IBQ_SIZE * 4; 10880 10881 if (qid > 5 || (n & 3)) 10882 return -EINVAL; 10883 10884 addr = qid * nwords; 10885 if (n > nwords) 10886 n = nwords; 10887 10888 /* It might take 3-10ms before the IBQ debug read access is allowed. 10889 * Wait for 1 Sec with a delay of 1 usec. 10890 */ 10891 attempts = 1000000; 10892 10893 for (i = 0; i < n; i++, addr++) { 10894 t4_write_reg(adap, A_CIM_IBQ_DBG_CFG, V_IBQDBGADDR(addr) | 10895 F_IBQDBGEN); 10896 err = t4_wait_op_done(adap, A_CIM_IBQ_DBG_CFG, F_IBQDBGBUSY, 0, 10897 attempts, 1); 10898 if (err) 10899 return err; 10900 *data++ = t4_read_reg(adap, A_CIM_IBQ_DBG_DATA); 10901 } 10902 t4_write_reg(adap, A_CIM_IBQ_DBG_CFG, 0); 10903 return i; 10904 } 10905 10906 /** 10907 * t4_read_cim_obq - read the contents of a CIM outbound queue 10908 * @adap: the adapter 10909 * @qid: the queue index 10910 * @data: where to store the queue contents 10911 * @n: capacity of @data in 32-bit words 10912 * 10913 * Reads the contents of the selected CIM queue starting at address 0 up 10914 * to the capacity of @data. @n must be a multiple of 4. Returns < 0 on 10915 * error and the number of 32-bit words actually read on success. 10916 */ 10917 int t4_read_cim_obq(struct adapter *adap, unsigned int qid, u32 *data, size_t n) 10918 { 10919 int i, err; 10920 unsigned int addr, v, nwords; 10921 int cim_num_obq = is_t4(adap->params.chip) ? 10922 CIM_NUM_OBQ : CIM_NUM_OBQ_T5; 10923 10924 if ((qid > (cim_num_obq - 1)) || (n & 3)) 10925 return -EINVAL; 10926 10927 t4_write_reg(adap, A_CIM_QUEUE_CONFIG_REF, F_OBQSELECT | 10928 V_QUENUMSELECT(qid)); 10929 v = t4_read_reg(adap, A_CIM_QUEUE_CONFIG_CTRL); 10930 10931 addr = G_CIMQBASE(v) * 64; /* muliple of 256 -> muliple of 4 */ 10932 nwords = G_CIMQSIZE(v) * 64; /* same */ 10933 if (n > nwords) 10934 n = nwords; 10935 10936 for (i = 0; i < n; i++, addr++) { 10937 t4_write_reg(adap, A_CIM_OBQ_DBG_CFG, V_OBQDBGADDR(addr) | 10938 F_OBQDBGEN); 10939 err = t4_wait_op_done(adap, A_CIM_OBQ_DBG_CFG, F_OBQDBGBUSY, 0, 10940 2, 1); 10941 if (err) 10942 return err; 10943 *data++ = t4_read_reg(adap, A_CIM_OBQ_DBG_DATA); 10944 } 10945 t4_write_reg(adap, A_CIM_OBQ_DBG_CFG, 0); 10946 return i; 10947 } 10948 10949 /** 10950 * t4_cim_read - read a block from CIM internal address space 10951 * @adap: the adapter 10952 * @addr: the start address within the CIM address space 10953 * @n: number of words to read 10954 * @valp: where to store the result 10955 * 10956 * Reads a block of 4-byte words from the CIM intenal address space. 10957 */ 10958 int t4_cim_read(struct adapter *adap, unsigned int addr, unsigned int n, 10959 unsigned int *valp) 10960 { 10961 int ret = 0; 10962 10963 if (t4_read_reg(adap, A_CIM_HOST_ACC_CTRL) & F_HOSTBUSY) 10964 return -EBUSY; 10965 10966 for ( ; !ret && n--; addr += 4) { 10967 t4_write_reg(adap, A_CIM_HOST_ACC_CTRL, addr); 10968 ret = t4_wait_op_done(adap, A_CIM_HOST_ACC_CTRL, F_HOSTBUSY, 10969 0, 5, 2); 10970 if (!ret) 10971 *valp++ = t4_read_reg(adap, A_CIM_HOST_ACC_DATA); 10972 } 10973 return ret; 10974 } 10975 10976 /** 10977 * t4_cim_write - write a block into CIM internal address space 10978 * @adap: the adapter 10979 * @addr: the start address within the CIM address space 10980 * @n: number of words to write 10981 * @valp: set of values to write 10982 * 10983 * Writes a block of 4-byte words into the CIM intenal address space. 10984 */ 10985 int t4_cim_write(struct adapter *adap, unsigned int addr, unsigned int n, 10986 const unsigned int *valp) 10987 { 10988 int ret = 0; 10989 10990 if (t4_read_reg(adap, A_CIM_HOST_ACC_CTRL) & F_HOSTBUSY) 10991 return -EBUSY; 10992 10993 for ( ; !ret && n--; addr += 4) { 10994 t4_write_reg(adap, A_CIM_HOST_ACC_DATA, *valp++); 10995 t4_write_reg(adap, A_CIM_HOST_ACC_CTRL, addr | F_HOSTWRITE); 10996 ret = t4_wait_op_done(adap, A_CIM_HOST_ACC_CTRL, F_HOSTBUSY, 10997 0, 5, 2); 10998 } 10999 return ret; 11000 } 11001 11002 static int t4_cim_write1(struct adapter *adap, unsigned int addr, 11003 unsigned int val) 11004 { 11005 return t4_cim_write(adap, addr, 1, &val); 11006 } 11007 11008 /** 11009 * t4_cim_read_la - read CIM LA capture buffer 11010 * @adap: the adapter 11011 * @la_buf: where to store the LA data 11012 * @wrptr: the HW write pointer within the capture buffer 11013 * 11014 * Reads the contents of the CIM LA buffer with the most recent entry at 11015 * the end of the returned data and with the entry at @wrptr first. 11016 * We try to leave the LA in the running state we find it in. 11017 */ 11018 int t4_cim_read_la(struct adapter *adap, u32 *la_buf, unsigned int *wrptr) 11019 { 11020 int i, ret; 11021 unsigned int cfg, val, idx; 11022 11023 ret = t4_cim_read(adap, A_UP_UP_DBG_LA_CFG, 1, &cfg); 11024 if (ret) 11025 return ret; 11026 11027 if (cfg & F_UPDBGLAEN) { /* LA is running, freeze it */ 11028 ret = t4_cim_write1(adap, A_UP_UP_DBG_LA_CFG, 0); 11029 if (ret) 11030 return ret; 11031 } 11032 11033 ret = t4_cim_read(adap, A_UP_UP_DBG_LA_CFG, 1, &val); 11034 if (ret) 11035 goto restart; 11036 11037 idx = G_UPDBGLAWRPTR(val); 11038 if (wrptr) 11039 *wrptr = idx; 11040 11041 for (i = 0; i < adap->params.cim_la_size; i++) { 11042 ret = t4_cim_write1(adap, A_UP_UP_DBG_LA_CFG, 11043 V_UPDBGLARDPTR(idx) | F_UPDBGLARDEN); 11044 if (ret) 11045 break; 11046 ret = t4_cim_read(adap, A_UP_UP_DBG_LA_CFG, 1, &val); 11047 if (ret) 11048 break; 11049 if (val & F_UPDBGLARDEN) { 11050 ret = -ETIMEDOUT; 11051 break; 11052 } 11053 ret = t4_cim_read(adap, A_UP_UP_DBG_LA_DATA, 1, &la_buf[i]); 11054 if (ret) 11055 break; 11056 11057 /* Bits 0-3 of UpDbgLaRdPtr can be between 0000 to 1001 to 11058 * identify the 32-bit portion of the full 312-bit data 11059 */ 11060 if (is_t6(adap->params.chip) && (idx & 0xf) >= 9) 11061 idx = (idx & 0xff0) + 0x10; 11062 else 11063 idx++; 11064 /* address can't exceed 0xfff */ 11065 idx &= M_UPDBGLARDPTR; 11066 } 11067 restart: 11068 if (cfg & F_UPDBGLAEN) { 11069 int r = t4_cim_write1(adap, A_UP_UP_DBG_LA_CFG, 11070 cfg & ~F_UPDBGLARDEN); 11071 if (!ret) 11072 ret = r; 11073 } 11074 return ret; 11075 } 11076 11077 /** 11078 * t4_tp_read_la - read TP LA capture buffer 11079 * @adap: the adapter 11080 * @la_buf: where to store the LA data 11081 * @wrptr: the HW write pointer within the capture buffer 11082 * 11083 * Reads the contents of the TP LA buffer with the most recent entry at 11084 * the end of the returned data and with the entry at @wrptr first. 11085 * We leave the LA in the running state we find it in. 11086 */ 11087 void t4_tp_read_la(struct adapter *adap, u64 *la_buf, unsigned int *wrptr) 11088 { 11089 bool last_incomplete; 11090 unsigned int i, cfg, val, idx; 11091 11092 cfg = t4_read_reg(adap, A_TP_DBG_LA_CONFIG) & 0xffff; 11093 if (cfg & F_DBGLAENABLE) /* freeze LA */ 11094 t4_write_reg(adap, A_TP_DBG_LA_CONFIG, 11095 adap->params.tp.la_mask | (cfg ^ F_DBGLAENABLE)); 11096 11097 val = t4_read_reg(adap, A_TP_DBG_LA_CONFIG); 11098 idx = G_DBGLAWPTR(val); 11099 last_incomplete = G_DBGLAMODE(val) >= 2 && (val & F_DBGLAWHLF) == 0; 11100 if (last_incomplete) 11101 idx = (idx + 1) & M_DBGLARPTR; 11102 if (wrptr) 11103 *wrptr = idx; 11104 11105 val &= 0xffff; 11106 val &= ~V_DBGLARPTR(M_DBGLARPTR); 11107 val |= adap->params.tp.la_mask; 11108 11109 for (i = 0; i < TPLA_SIZE; i++) { 11110 t4_write_reg(adap, A_TP_DBG_LA_CONFIG, V_DBGLARPTR(idx) | val); 11111 la_buf[i] = t4_read_reg64(adap, A_TP_DBG_LA_DATAL); 11112 idx = (idx + 1) & M_DBGLARPTR; 11113 } 11114 11115 /* Wipe out last entry if it isn't valid */ 11116 if (last_incomplete) 11117 la_buf[TPLA_SIZE - 1] = ~0ULL; 11118 11119 if (cfg & F_DBGLAENABLE) /* restore running state */ 11120 t4_write_reg(adap, A_TP_DBG_LA_CONFIG, 11121 cfg | adap->params.tp.la_mask); 11122 } 11123 11124 /* SGE Hung Ingress DMA Warning Threshold time and Warning Repeat Rate (in 11125 * seconds). If we find one of the SGE Ingress DMA State Machines in the same 11126 * state for more than the Warning Threshold then we'll issue a warning about 11127 * a potential hang. We'll repeat the warning as the SGE Ingress DMA Channel 11128 * appears to be hung every Warning Repeat second till the situation clears. 11129 * If the situation clears, we'll note that as well. 11130 */ 11131 #define SGE_IDMA_WARN_THRESH 1 11132 #define SGE_IDMA_WARN_REPEAT 300 11133 11134 /** 11135 * t4_idma_monitor_init - initialize SGE Ingress DMA Monitor 11136 * @adapter: the adapter 11137 * @idma: the adapter IDMA Monitor state 11138 * 11139 * Initialize the state of an SGE Ingress DMA Monitor. 11140 */ 11141 void t4_idma_monitor_init(struct adapter *adapter, 11142 struct sge_idma_monitor_state *idma) 11143 { 11144 /* Initialize the state variables for detecting an SGE Ingress DMA 11145 * hang. The SGE has internal counters which count up on each clock 11146 * tick whenever the SGE finds its Ingress DMA State Engines in the 11147 * same state they were on the previous clock tick. The clock used is 11148 * the Core Clock so we have a limit on the maximum "time" they can 11149 * record; typically a very small number of seconds. For instance, 11150 * with a 600MHz Core Clock, we can only count up to a bit more than 11151 * 7s. So we'll synthesize a larger counter in order to not run the 11152 * risk of having the "timers" overflow and give us the flexibility to 11153 * maintain a Hung SGE State Machine of our own which operates across 11154 * a longer time frame. 11155 */ 11156 idma->idma_1s_thresh = core_ticks_per_usec(adapter) * 1000000; /* 1s */ 11157 idma->idma_stalled[0] = idma->idma_stalled[1] = 0; 11158 } 11159 11160 /** 11161 * t4_idma_monitor - monitor SGE Ingress DMA state 11162 * @adapter: the adapter 11163 * @idma: the adapter IDMA Monitor state 11164 * @hz: number of ticks/second 11165 * @ticks: number of ticks since the last IDMA Monitor call 11166 */ 11167 void t4_idma_monitor(struct adapter *adapter, 11168 struct sge_idma_monitor_state *idma, 11169 int hz, int ticks) 11170 { 11171 int i, idma_same_state_cnt[2]; 11172 11173 /* Read the SGE Debug Ingress DMA Same State Count registers. These 11174 * are counters inside the SGE which count up on each clock when the 11175 * SGE finds its Ingress DMA State Engines in the same states they 11176 * were in the previous clock. The counters will peg out at 11177 * 0xffffffff without wrapping around so once they pass the 1s 11178 * threshold they'll stay above that till the IDMA state changes. 11179 */ 11180 t4_write_reg(adapter, A_SGE_DEBUG_INDEX, 13); 11181 idma_same_state_cnt[0] = t4_read_reg(adapter, A_SGE_DEBUG_DATA_HIGH); 11182 idma_same_state_cnt[1] = t4_read_reg(adapter, A_SGE_DEBUG_DATA_LOW); 11183 11184 for (i = 0; i < 2; i++) { 11185 u32 debug0, debug11; 11186 11187 /* If the Ingress DMA Same State Counter ("timer") is less 11188 * than 1s, then we can reset our synthesized Stall Timer and 11189 * continue. If we have previously emitted warnings about a 11190 * potential stalled Ingress Queue, issue a note indicating 11191 * that the Ingress Queue has resumed forward progress. 11192 */ 11193 if (idma_same_state_cnt[i] < idma->idma_1s_thresh) { 11194 if (idma->idma_stalled[i] >= SGE_IDMA_WARN_THRESH*hz) 11195 CH_WARN(adapter, "SGE idma%d, queue %u, " 11196 "resumed after %d seconds\n", 11197 i, idma->idma_qid[i], 11198 idma->idma_stalled[i]/hz); 11199 idma->idma_stalled[i] = 0; 11200 continue; 11201 } 11202 11203 /* Synthesize an SGE Ingress DMA Same State Timer in the Hz 11204 * domain. The first time we get here it'll be because we 11205 * passed the 1s Threshold; each additional time it'll be 11206 * because the RX Timer Callback is being fired on its regular 11207 * schedule. 11208 * 11209 * If the stall is below our Potential Hung Ingress Queue 11210 * Warning Threshold, continue. 11211 */ 11212 if (idma->idma_stalled[i] == 0) { 11213 idma->idma_stalled[i] = hz; 11214 idma->idma_warn[i] = 0; 11215 } else { 11216 idma->idma_stalled[i] += ticks; 11217 idma->idma_warn[i] -= ticks; 11218 } 11219 11220 if (idma->idma_stalled[i] < SGE_IDMA_WARN_THRESH*hz) 11221 continue; 11222 11223 /* We'll issue a warning every SGE_IDMA_WARN_REPEAT seconds. 11224 */ 11225 if (idma->idma_warn[i] > 0) 11226 continue; 11227 idma->idma_warn[i] = SGE_IDMA_WARN_REPEAT*hz; 11228 11229 /* Read and save the SGE IDMA State and Queue ID information. 11230 * We do this every time in case it changes across time ... 11231 * can't be too careful ... 11232 */ 11233 t4_write_reg(adapter, A_SGE_DEBUG_INDEX, 0); 11234 debug0 = t4_read_reg(adapter, A_SGE_DEBUG_DATA_LOW); 11235 idma->idma_state[i] = (debug0 >> (i * 9)) & 0x3f; 11236 11237 t4_write_reg(adapter, A_SGE_DEBUG_INDEX, 11); 11238 debug11 = t4_read_reg(adapter, A_SGE_DEBUG_DATA_LOW); 11239 idma->idma_qid[i] = (debug11 >> (i * 16)) & 0xffff; 11240 11241 CH_WARN(adapter, "SGE idma%u, queue %u, potentially stuck in " 11242 " state %u for %d seconds (debug0=%#x, debug11=%#x)\n", 11243 i, idma->idma_qid[i], idma->idma_state[i], 11244 idma->idma_stalled[i]/hz, 11245 debug0, debug11); 11246 t4_sge_decode_idma_state(adapter, idma->idma_state[i]); 11247 } 11248 } 11249 11250 /** 11251 * t4_set_vf_mac - Set MAC address for the specified VF 11252 * @adapter: The adapter 11253 * @vf: one of the VFs instantiated by the specified PF 11254 * @naddr: the number of MAC addresses 11255 * @addr: the MAC address(es) to be set to the specified VF 11256 */ 11257 int t4_set_vf_mac_acl(struct adapter *adapter, unsigned int vf, 11258 unsigned int naddr, u8 *addr) 11259 { 11260 struct fw_acl_mac_cmd cmd; 11261 11262 memset(&cmd, 0, sizeof(cmd)); 11263 cmd.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_ACL_MAC_CMD) | 11264 F_FW_CMD_REQUEST | 11265 F_FW_CMD_WRITE | 11266 V_FW_ACL_MAC_CMD_PFN(adapter->pf) | 11267 V_FW_ACL_MAC_CMD_VFN(vf)); 11268 11269 /* Note: Do not enable the ACL */ 11270 cmd.en_to_len16 = cpu_to_be32((unsigned int)FW_LEN16(cmd)); 11271 cmd.nmac = naddr; 11272 11273 switch (adapter->pf) { 11274 case 3: 11275 memcpy(cmd.macaddr3, addr, sizeof(cmd.macaddr3)); 11276 break; 11277 case 2: 11278 memcpy(cmd.macaddr2, addr, sizeof(cmd.macaddr2)); 11279 break; 11280 case 1: 11281 memcpy(cmd.macaddr1, addr, sizeof(cmd.macaddr1)); 11282 break; 11283 case 0: 11284 memcpy(cmd.macaddr0, addr, sizeof(cmd.macaddr0)); 11285 break; 11286 } 11287 11288 return t4_wr_mbox(adapter, adapter->mbox, &cmd, sizeof(cmd), &cmd); 11289 } 11290 11291 /* Code which cannot be pushed to kernel.org e.g., cxgbtool ioctl helper 11292 * functions 11293 */ 11294 11295 /** 11296 * t4_read_pace_tbl - read the pace table 11297 * @adap: the adapter 11298 * @pace_vals: holds the returned values 11299 * 11300 * Returns the values of TP's pace table in microseconds. 11301 */ 11302 void t4_read_pace_tbl(struct adapter *adap, unsigned int pace_vals[NTX_SCHED]) 11303 { 11304 unsigned int i, v; 11305 11306 for (i = 0; i < NTX_SCHED; i++) { 11307 t4_write_reg(adap, A_TP_PACE_TABLE, 0xffff0000 + i); 11308 v = t4_read_reg(adap, A_TP_PACE_TABLE); 11309 pace_vals[i] = dack_ticks_to_usec(adap, v); 11310 } 11311 } 11312 11313 /** 11314 * t4_get_tx_sched - get the configuration of a Tx HW traffic scheduler 11315 * @adap: the adapter 11316 * @sched: the scheduler index 11317 * @kbps: the byte rate in Kbps 11318 * @ipg: the interpacket delay in tenths of nanoseconds 11319 * @sleep_ok: if true we may sleep while awaiting command completion 11320 * 11321 * Return the current configuration of a HW Tx scheduler. 11322 */ 11323 void t4_get_tx_sched(struct adapter *adap, unsigned int sched, unsigned int *kbps, 11324 unsigned int *ipg, bool sleep_ok) 11325 { 11326 unsigned int v, addr, bpt, cpt; 11327 11328 if (kbps) { 11329 addr = A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2; 11330 t4_tp_tm_pio_read(adap, &v, 1, addr, sleep_ok); 11331 if (sched & 1) 11332 v >>= 16; 11333 bpt = (v >> 8) & 0xff; 11334 cpt = v & 0xff; 11335 if (!cpt) 11336 *kbps = 0; /* scheduler disabled */ 11337 else { 11338 v = (adap->params.vpd.cclk * 1000) / cpt; /* ticks/s */ 11339 *kbps = (v * bpt) / 125; 11340 } 11341 } 11342 if (ipg) { 11343 addr = A_TP_TX_MOD_Q1_Q0_TIMER_SEPARATOR - sched / 2; 11344 t4_tp_tm_pio_read(adap, &v, 1, addr, sleep_ok); 11345 if (sched & 1) 11346 v >>= 16; 11347 v &= 0xffff; 11348 *ipg = (10000 * v) / core_ticks_per_usec(adap); 11349 } 11350 } 11351 11352 /** 11353 * t4_load_cfg - download config file 11354 * @adap: the adapter 11355 * @cfg_data: the cfg text file to write 11356 * @size: text file size 11357 * 11358 * Write the supplied config text file to the card's serial flash. 11359 */ 11360 int t4_load_cfg(struct adapter *adap, const u8 *cfg_data, unsigned int size) 11361 { 11362 int ret, i, n, cfg_addr; 11363 unsigned int addr; 11364 unsigned int flash_cfg_start_sec; 11365 unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec; 11366 11367 cfg_addr = t4_flash_cfg_addr(adap); 11368 if (cfg_addr < 0) 11369 return cfg_addr; 11370 11371 addr = cfg_addr; 11372 flash_cfg_start_sec = addr / SF_SEC_SIZE; 11373 11374 if (size > FLASH_CFG_MAX_SIZE) { 11375 CH_ERR(adap, "cfg file too large, max is %u bytes\n", 11376 FLASH_CFG_MAX_SIZE); 11377 return -EFBIG; 11378 } 11379 11380 i = DIV_ROUND_UP(FLASH_CFG_MAX_SIZE, /* # of sectors spanned */ 11381 sf_sec_size); 11382 ret = t4_flash_erase_sectors(adap, flash_cfg_start_sec, 11383 flash_cfg_start_sec + i - 1); 11384 /* 11385 * If size == 0 then we're simply erasing the FLASH sectors associated 11386 * with the on-adapter Firmware Configuration File. 11387 */ 11388 if (ret || size == 0) 11389 goto out; 11390 11391 /* this will write to the flash up to SF_PAGE_SIZE at a time */ 11392 for (i = 0; i< size; i+= SF_PAGE_SIZE) { 11393 if ( (size - i) < SF_PAGE_SIZE) 11394 n = size - i; 11395 else 11396 n = SF_PAGE_SIZE; 11397 ret = t4_write_flash(adap, addr, n, cfg_data, 1); 11398 if (ret) 11399 goto out; 11400 11401 addr += SF_PAGE_SIZE; 11402 cfg_data += SF_PAGE_SIZE; 11403 } 11404 11405 out: 11406 if (ret) 11407 CH_ERR(adap, "config file %s failed %d\n", 11408 (size == 0 ? "clear" : "download"), ret); 11409 return ret; 11410 } 11411 11412 /** 11413 * t5_fw_init_extern_mem - initialize the external memory 11414 * @adap: the adapter 11415 * 11416 * Initializes the external memory on T5. 11417 */ 11418 int t5_fw_init_extern_mem(struct adapter *adap) 11419 { 11420 u32 params[1], val[1]; 11421 int ret; 11422 11423 if (!is_t5(adap->params.chip)) 11424 return 0; 11425 11426 val[0] = 0xff; /* Initialize all MCs */ 11427 params[0] = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | 11428 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_MCINIT)); 11429 ret = t4_set_params_timeout(adap, adap->mbox, adap->pf, 0, 1, params, val, 11430 FW_CMD_MAX_TIMEOUT); 11431 11432 return ret; 11433 } 11434 11435 /* BIOS boot headers */ 11436 typedef struct pci_expansion_rom_header { 11437 u8 signature[2]; /* ROM Signature. Should be 0xaa55 */ 11438 u8 reserved[22]; /* Reserved per processor Architecture data */ 11439 u8 pcir_offset[2]; /* Offset to PCI Data Structure */ 11440 } pci_exp_rom_header_t; /* PCI_EXPANSION_ROM_HEADER */ 11441 11442 /* Legacy PCI Expansion ROM Header */ 11443 typedef struct legacy_pci_expansion_rom_header { 11444 u8 signature[2]; /* ROM Signature. Should be 0xaa55 */ 11445 u8 size512; /* Current Image Size in units of 512 bytes */ 11446 u8 initentry_point[4]; 11447 u8 cksum; /* Checksum computed on the entire Image */ 11448 u8 reserved[16]; /* Reserved */ 11449 u8 pcir_offset[2]; /* Offset to PCI Data Struture */ 11450 } legacy_pci_exp_rom_header_t; /* LEGACY_PCI_EXPANSION_ROM_HEADER */ 11451 11452 /* EFI PCI Expansion ROM Header */ 11453 typedef struct efi_pci_expansion_rom_header { 11454 u8 signature[2]; // ROM signature. The value 0xaa55 11455 u8 initialization_size[2]; /* Units 512. Includes this header */ 11456 u8 efi_signature[4]; /* Signature from EFI image header. 0x0EF1 */ 11457 u8 efi_subsystem[2]; /* Subsystem value for EFI image header */ 11458 u8 efi_machine_type[2]; /* Machine type from EFI image header */ 11459 u8 compression_type[2]; /* Compression type. */ 11460 /* 11461 * Compression type definition 11462 * 0x0: uncompressed 11463 * 0x1: Compressed 11464 * 0x2-0xFFFF: Reserved 11465 */ 11466 u8 reserved[8]; /* Reserved */ 11467 u8 efi_image_header_offset[2]; /* Offset to EFI Image */ 11468 u8 pcir_offset[2]; /* Offset to PCI Data Structure */ 11469 } efi_pci_exp_rom_header_t; /* EFI PCI Expansion ROM Header */ 11470 11471 /* PCI Data Structure Format */ 11472 typedef struct pcir_data_structure { /* PCI Data Structure */ 11473 u8 signature[4]; /* Signature. The string "PCIR" */ 11474 u8 vendor_id[2]; /* Vendor Identification */ 11475 u8 device_id[2]; /* Device Identification */ 11476 u8 vital_product[2]; /* Pointer to Vital Product Data */ 11477 u8 length[2]; /* PCIR Data Structure Length */ 11478 u8 revision; /* PCIR Data Structure Revision */ 11479 u8 class_code[3]; /* Class Code */ 11480 u8 image_length[2]; /* Image Length. Multiple of 512B */ 11481 u8 code_revision[2]; /* Revision Level of Code/Data */ 11482 u8 code_type; /* Code Type. */ 11483 /* 11484 * PCI Expansion ROM Code Types 11485 * 0x00: Intel IA-32, PC-AT compatible. Legacy 11486 * 0x01: Open Firmware standard for PCI. FCODE 11487 * 0x02: Hewlett-Packard PA RISC. HP reserved 11488 * 0x03: EFI Image. EFI 11489 * 0x04-0xFF: Reserved. 11490 */ 11491 u8 indicator; /* Indicator. Identifies the last image in the ROM */ 11492 u8 reserved[2]; /* Reserved */ 11493 } pcir_data_t; /* PCI__DATA_STRUCTURE */ 11494 11495 /* BOOT constants */ 11496 enum { 11497 BOOT_FLASH_BOOT_ADDR = 0x0,/* start address of boot image in flash */ 11498 BOOT_SIGNATURE = 0xaa55, /* signature of BIOS boot ROM */ 11499 BOOT_SIZE_INC = 512, /* image size measured in 512B chunks */ 11500 BOOT_MIN_SIZE = sizeof(pci_exp_rom_header_t), /* basic header */ 11501 BOOT_MAX_SIZE = 1024*BOOT_SIZE_INC, /* 1 byte * length increment */ 11502 VENDOR_ID = 0x1425, /* Vendor ID */ 11503 PCIR_SIGNATURE = 0x52494350 /* PCIR signature */ 11504 }; 11505 11506 /* 11507 * modify_device_id - Modifies the device ID of the Boot BIOS image 11508 * @adatper: the device ID to write. 11509 * @boot_data: the boot image to modify. 11510 * 11511 * Write the supplied device ID to the boot BIOS image. 11512 */ 11513 static void modify_device_id(int device_id, u8 *boot_data) 11514 { 11515 legacy_pci_exp_rom_header_t *header; 11516 pcir_data_t *pcir_header; 11517 u32 cur_header = 0; 11518 11519 /* 11520 * Loop through all chained images and change the device ID's 11521 */ 11522 while (1) { 11523 header = (legacy_pci_exp_rom_header_t *) &boot_data[cur_header]; 11524 pcir_header = (pcir_data_t *) &boot_data[cur_header + 11525 le16_to_cpu(*(u16*)header->pcir_offset)]; 11526 11527 /* 11528 * Only modify the Device ID if code type is Legacy or HP. 11529 * 0x00: Okay to modify 11530 * 0x01: FCODE. Do not be modify 11531 * 0x03: Okay to modify 11532 * 0x04-0xFF: Do not modify 11533 */ 11534 if (pcir_header->code_type == 0x00) { 11535 u8 csum = 0; 11536 int i; 11537 11538 /* 11539 * Modify Device ID to match current adatper 11540 */ 11541 *(u16*) pcir_header->device_id = device_id; 11542 11543 /* 11544 * Set checksum temporarily to 0. 11545 * We will recalculate it later. 11546 */ 11547 header->cksum = 0x0; 11548 11549 /* 11550 * Calculate and update checksum 11551 */ 11552 for (i = 0; i < (header->size512 * 512); i++) 11553 csum += (u8)boot_data[cur_header + i]; 11554 11555 /* 11556 * Invert summed value to create the checksum 11557 * Writing new checksum value directly to the boot data 11558 */ 11559 boot_data[cur_header + 7] = -csum; 11560 11561 } else if (pcir_header->code_type == 0x03) { 11562 11563 /* 11564 * Modify Device ID to match current adatper 11565 */ 11566 *(u16*) pcir_header->device_id = device_id; 11567 11568 } 11569 11570 11571 /* 11572 * Check indicator element to identify if this is the last 11573 * image in the ROM. 11574 */ 11575 if (pcir_header->indicator & 0x80) 11576 break; 11577 11578 /* 11579 * Move header pointer up to the next image in the ROM. 11580 */ 11581 cur_header += header->size512 * 512; 11582 } 11583 } 11584 11585 #ifdef CHELSIO_T4_DIAGS 11586 /* 11587 * t4_earse_sf - Erase entire serial Flash region 11588 * @adapter: the adapter 11589 * 11590 * Clears the entire serial flash region. 11591 */ 11592 int t4_erase_sf(struct adapter *adap) 11593 { 11594 unsigned int nsectors; 11595 int ret; 11596 11597 nsectors = FLASH_END_SEC; 11598 if (nsectors > adap->params.sf_nsec) 11599 nsectors = adap->params.sf_nsec; 11600 11601 // Erase all sectors of flash before and including the FW. 11602 // Flash layout is in t4_hw.h. 11603 ret = t4_flash_erase_sectors(adap, 0, nsectors - 1); 11604 if (ret) 11605 CH_ERR(adap, "Erasing serial flash failed, error %d\n", ret); 11606 return ret; 11607 } 11608 #endif 11609 11610 /* 11611 * t4_load_boot - download boot flash 11612 * @adapter: the adapter 11613 * @boot_data: the boot image to write 11614 * @boot_addr: offset in flash to write boot_data 11615 * @size: image size 11616 * 11617 * Write the supplied boot image to the card's serial flash. 11618 * The boot image has the following sections: a 28-byte header and the 11619 * boot image. 11620 */ 11621 int t4_load_boot(struct adapter *adap, u8 *boot_data, 11622 unsigned int boot_addr, unsigned int size) 11623 { 11624 pci_exp_rom_header_t *header; 11625 int pcir_offset ; 11626 pcir_data_t *pcir_header; 11627 int ret, addr; 11628 uint16_t device_id; 11629 unsigned int i; 11630 unsigned int boot_sector = (boot_addr * 1024 ); 11631 unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec; 11632 11633 /* 11634 * Make sure the boot image does not encroach on the firmware region 11635 */ 11636 if ((boot_sector + size) >> 16 > FLASH_FW_START_SEC) { 11637 CH_ERR(adap, "boot image encroaching on firmware region\n"); 11638 return -EFBIG; 11639 } 11640 11641 /* 11642 * The boot sector is comprised of the Expansion-ROM boot, iSCSI boot, 11643 * and Boot configuration data sections. These 3 boot sections span 11644 * sectors 0 to 7 in flash and live right before the FW image location. 11645 */ 11646 i = DIV_ROUND_UP(size ? size : FLASH_FW_START, 11647 sf_sec_size); 11648 ret = t4_flash_erase_sectors(adap, boot_sector >> 16, 11649 (boot_sector >> 16) + i - 1); 11650 11651 /* 11652 * If size == 0 then we're simply erasing the FLASH sectors associated 11653 * with the on-adapter option ROM file 11654 */ 11655 if (ret || (size == 0)) 11656 goto out; 11657 11658 /* Get boot header */ 11659 header = (pci_exp_rom_header_t *)boot_data; 11660 pcir_offset = le16_to_cpu(*(u16 *)header->pcir_offset); 11661 /* PCIR Data Structure */ 11662 pcir_header = (pcir_data_t *) &boot_data[pcir_offset]; 11663 11664 /* 11665 * Perform some primitive sanity testing to avoid accidentally 11666 * writing garbage over the boot sectors. We ought to check for 11667 * more but it's not worth it for now ... 11668 */ 11669 if (size < BOOT_MIN_SIZE || size > BOOT_MAX_SIZE) { 11670 CH_ERR(adap, "boot image too small/large\n"); 11671 return -EFBIG; 11672 } 11673 11674 #ifndef CHELSIO_T4_DIAGS 11675 /* 11676 * Check BOOT ROM header signature 11677 */ 11678 if (le16_to_cpu(*(u16*)header->signature) != BOOT_SIGNATURE ) { 11679 CH_ERR(adap, "Boot image missing signature\n"); 11680 return -EINVAL; 11681 } 11682 11683 /* 11684 * Check PCI header signature 11685 */ 11686 if (le32_to_cpu(*(u32*)pcir_header->signature) != PCIR_SIGNATURE) { 11687 CH_ERR(adap, "PCI header missing signature\n"); 11688 return -EINVAL; 11689 } 11690 11691 /* 11692 * Check Vendor ID matches Chelsio ID 11693 */ 11694 if (le16_to_cpu(*(u16*)pcir_header->vendor_id) != VENDOR_ID) { 11695 CH_ERR(adap, "Vendor ID missing signature\n"); 11696 return -EINVAL; 11697 } 11698 #endif 11699 11700 /* 11701 * Retrieve adapter's device ID 11702 */ 11703 t4_os_pci_read_cfg2(adap, PCI_DEVICE_ID, &device_id); 11704 /* Want to deal with PF 0 so I strip off PF 4 indicator */ 11705 device_id = device_id & 0xf0ff; 11706 11707 /* 11708 * Check PCIE Device ID 11709 */ 11710 if (le16_to_cpu(*(u16*)pcir_header->device_id) != device_id) { 11711 /* 11712 * Change the device ID in the Boot BIOS image to match 11713 * the Device ID of the current adapter. 11714 */ 11715 modify_device_id(device_id, boot_data); 11716 } 11717 11718 /* 11719 * Skip over the first SF_PAGE_SIZE worth of data and write it after 11720 * we finish copying the rest of the boot image. This will ensure 11721 * that the BIOS boot header will only be written if the boot image 11722 * was written in full. 11723 */ 11724 addr = boot_sector; 11725 for (size -= SF_PAGE_SIZE; size; size -= SF_PAGE_SIZE) { 11726 addr += SF_PAGE_SIZE; 11727 boot_data += SF_PAGE_SIZE; 11728 ret = t4_write_flash(adap, addr, SF_PAGE_SIZE, boot_data, 0); 11729 if (ret) 11730 goto out; 11731 } 11732 11733 ret = t4_write_flash(adap, boot_sector, SF_PAGE_SIZE, 11734 (const u8 *)header, 0); 11735 11736 out: 11737 if (ret) 11738 CH_ERR(adap, "boot image download failed, error %d\n", ret); 11739 return ret; 11740 } 11741 11742 /* 11743 * t4_flash_bootcfg_addr - return the address of the flash optionrom configuration 11744 * @adapter: the adapter 11745 * 11746 * Return the address within the flash where the OptionROM Configuration 11747 * is stored, or an error if the device FLASH is too small to contain 11748 * a OptionROM Configuration. 11749 */ 11750 static int t4_flash_bootcfg_addr(struct adapter *adapter) 11751 { 11752 /* 11753 * If the device FLASH isn't large enough to hold a Firmware 11754 * Configuration File, return an error. 11755 */ 11756 if (adapter->params.sf_size < FLASH_BOOTCFG_START + FLASH_BOOTCFG_MAX_SIZE) 11757 return -ENOSPC; 11758 11759 return FLASH_BOOTCFG_START; 11760 } 11761 11762 int t4_load_bootcfg(struct adapter *adap,const u8 *cfg_data, unsigned int size) 11763 { 11764 int ret, i, n, cfg_addr; 11765 unsigned int addr; 11766 unsigned int flash_cfg_start_sec; 11767 unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec; 11768 11769 cfg_addr = t4_flash_bootcfg_addr(adap); 11770 if (cfg_addr < 0) 11771 return cfg_addr; 11772 11773 addr = cfg_addr; 11774 flash_cfg_start_sec = addr / SF_SEC_SIZE; 11775 11776 if (size > FLASH_BOOTCFG_MAX_SIZE) { 11777 CH_ERR(adap, "bootcfg file too large, max is %u bytes\n", 11778 FLASH_BOOTCFG_MAX_SIZE); 11779 return -EFBIG; 11780 } 11781 11782 i = DIV_ROUND_UP(FLASH_BOOTCFG_MAX_SIZE,/* # of sectors spanned */ 11783 sf_sec_size); 11784 ret = t4_flash_erase_sectors(adap, flash_cfg_start_sec, 11785 flash_cfg_start_sec + i - 1); 11786 11787 /* 11788 * If size == 0 then we're simply erasing the FLASH sectors associated 11789 * with the on-adapter OptionROM Configuration File. 11790 */ 11791 if (ret || size == 0) 11792 goto out; 11793 11794 /* this will write to the flash up to SF_PAGE_SIZE at a time */ 11795 for (i = 0; i< size; i+= SF_PAGE_SIZE) { 11796 if ( (size - i) < SF_PAGE_SIZE) 11797 n = size - i; 11798 else 11799 n = SF_PAGE_SIZE; 11800 ret = t4_write_flash(adap, addr, n, cfg_data, 0); 11801 if (ret) 11802 goto out; 11803 11804 addr += SF_PAGE_SIZE; 11805 cfg_data += SF_PAGE_SIZE; 11806 } 11807 11808 out: 11809 if (ret) 11810 CH_ERR(adap, "boot config data %s failed %d\n", 11811 (size == 0 ? "clear" : "download"), ret); 11812 return ret; 11813 } 11814 11815 /** 11816 * t4_read_bootcfg - read the current (boot)OptionROM configuration from FLASH 11817 * @adap: the adapter 11818 * @cfg_data: where to store the read OptionROM configuration data 11819 * 11820 * Read the current OptionROM configuration from FLASH and write to the 11821 * buffer @cfg_data supplied. 11822 */ 11823 int t4_read_bootcfg(struct adapter *adap, u8 *cfg_data, unsigned int size) 11824 { 11825 u32 *ptr = (u32 *)cfg_data; 11826 int i, n, cfg_addr; 11827 int ret = 0; 11828 11829 if (size > FLASH_BOOTCFG_MAX_SIZE) { 11830 CH_ERR(adap, "bootcfg file too big, max is %u bytes\n", 11831 FLASH_BOOTCFG_MAX_SIZE); 11832 return -EINVAL; 11833 } 11834 11835 cfg_addr = t4_flash_bootcfg_addr(adap); 11836 if (cfg_addr < 0) 11837 return cfg_addr; 11838 11839 size = size / sizeof (u32); 11840 for (i = 0; i < size; i += SF_PAGE_SIZE) { 11841 if ( (size - i) < SF_PAGE_SIZE) 11842 n = size - i; 11843 else 11844 n = SF_PAGE_SIZE; 11845 11846 ret = t4_read_flash(adap, cfg_addr, n, ptr, 0); 11847 if (ret) 11848 goto out; 11849 11850 cfg_addr += (n*4); 11851 ptr += n; 11852 } 11853 11854 out: 11855 return ret; 11856 } 11857 11858 /** 11859 * t4_set_filter_mode - configure the optional components of filter tuples 11860 * @adap: the adapter 11861 * @mode_map: a bitmap selcting which optional filter components to enable 11862 * @sleep_ok: if true we may sleep while awaiting command completion 11863 * 11864 * Sets the filter mode by selecting the optional components to enable 11865 * in filter tuples. Returns 0 on success and a negative error if the 11866 * requested mode needs more bits than are available for optional 11867 * components. 11868 */ 11869 int t4_set_filter_mode(struct adapter *adap, unsigned int mode_map, 11870 bool sleep_ok) 11871 { 11872 static u8 width[] = { 1, 3, 17, 17, 8, 8, 16, 9, 3, 1 }; 11873 11874 int i, nbits = 0; 11875 11876 for (i = S_FCOE; i <= S_FRAGMENTATION; i++) 11877 if (mode_map & (1 << i)) 11878 nbits += width[i]; 11879 if (nbits > FILTER_OPT_LEN) 11880 return -EINVAL; 11881 11882 t4_tp_pio_write(adap, &mode_map, 1, A_TP_VLAN_PRI_MAP, sleep_ok); 11883 11884 return 0; 11885 } 11886 11887 /** 11888 * t4_clr_port_stats - clear port statistics 11889 * @adap: the adapter 11890 * @idx: the port index 11891 * 11892 * Clear HW statistics for the given port. 11893 */ 11894 void t4_clr_port_stats(struct adapter *adap, int idx) 11895 { 11896 unsigned int i; 11897 u32 bgmap = t4_get_mps_bg_map(adap, idx); 11898 u32 port_base_addr; 11899 11900 if (is_t4(adap->params.chip)) 11901 port_base_addr = PORT_BASE(idx); 11902 else 11903 port_base_addr = T5_PORT_BASE(idx); 11904 11905 for (i = A_MPS_PORT_STAT_TX_PORT_BYTES_L; 11906 i <= A_MPS_PORT_STAT_TX_PORT_PPP7_H; i += 8) 11907 t4_write_reg(adap, port_base_addr + i, 0); 11908 for (i = A_MPS_PORT_STAT_RX_PORT_BYTES_L; 11909 i <= A_MPS_PORT_STAT_RX_PORT_LESS_64B_H; i += 8) 11910 t4_write_reg(adap, port_base_addr + i, 0); 11911 for (i = 0; i < 4; i++) 11912 if (bgmap & (1 << i)) { 11913 t4_write_reg(adap, 11914 A_MPS_STAT_RX_BG_0_MAC_DROP_FRAME_L + i * 8, 0); 11915 t4_write_reg(adap, 11916 A_MPS_STAT_RX_BG_0_MAC_TRUNC_FRAME_L + i * 8, 0); 11917 } 11918 } 11919 11920 /** 11921 * t4_i2c_io - read/write I2C data from adapter 11922 * @adap: the adapter 11923 * @port: Port number if per-port device; <0 if not 11924 * @devid: per-port device ID or absolute device ID 11925 * @offset: byte offset into device I2C space 11926 * @len: byte length of I2C space data 11927 * @buf: buffer in which to return I2C data for read 11928 * buffer which holds the I2C data for write 11929 * @write: if true, do a write; else do a read 11930 * Reads/Writes the I2C data from/to the indicated device and location. 11931 */ 11932 int t4_i2c_io(struct adapter *adap, unsigned int mbox, 11933 int port, unsigned int devid, 11934 unsigned int offset, unsigned int len, 11935 u8 *buf, bool write) 11936 { 11937 struct fw_ldst_cmd ldst_cmd, ldst_rpl; 11938 unsigned int i2c_max = sizeof(ldst_cmd.u.i2c.data); 11939 int ret = 0; 11940 11941 if (len > I2C_PAGE_SIZE) 11942 return -EINVAL; 11943 11944 /* Dont allow reads that spans multiple pages */ 11945 if (offset < I2C_PAGE_SIZE && offset + len > I2C_PAGE_SIZE) 11946 return -EINVAL; 11947 11948 memset(&ldst_cmd, 0, sizeof(ldst_cmd)); 11949 ldst_cmd.op_to_addrspace = 11950 cpu_to_be32(V_FW_CMD_OP(FW_LDST_CMD) | 11951 F_FW_CMD_REQUEST | 11952 (write ? F_FW_CMD_WRITE : F_FW_CMD_READ) | 11953 V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_I2C)); 11954 ldst_cmd.cycles_to_len16 = cpu_to_be32(FW_LEN16(ldst_cmd)); 11955 ldst_cmd.u.i2c.pid = (port < 0 ? 0xff : port); 11956 ldst_cmd.u.i2c.did = devid; 11957 11958 while (len > 0) { 11959 unsigned int i2c_len = (len < i2c_max) ? len : i2c_max; 11960 11961 ldst_cmd.u.i2c.boffset = offset; 11962 ldst_cmd.u.i2c.blen = i2c_len; 11963 11964 if (write) 11965 memcpy(ldst_cmd.u.i2c.data, buf, i2c_len); 11966 11967 ret = t4_wr_mbox(adap, mbox, &ldst_cmd, sizeof(ldst_cmd), 11968 write ? NULL : &ldst_rpl); 11969 if (ret) 11970 break; 11971 11972 if (!write) 11973 memcpy(buf, ldst_rpl.u.i2c.data, i2c_len); 11974 offset += i2c_len; 11975 buf += i2c_len; 11976 len -= i2c_len; 11977 } 11978 11979 return ret; 11980 } 11981 11982 int t4_i2c_rd(struct adapter *adap, unsigned int mbox, 11983 int port, unsigned int devid, 11984 unsigned int offset, unsigned int len, 11985 u8 *buf) 11986 { 11987 return t4_i2c_io(adap, mbox, port, devid, offset, len, buf, false); 11988 } 11989 11990 int t4_i2c_wr(struct adapter *adap, unsigned int mbox, 11991 int port, unsigned int devid, 11992 unsigned int offset, unsigned int len, 11993 u8 *buf) 11994 { 11995 return t4_i2c_io(adap, mbox, port, devid, offset, len, buf, true); 11996 } 11997 11998 /** 11999 * t4_sge_ctxt_rd - read an SGE context through FW 12000 * @adap: the adapter 12001 * @mbox: mailbox to use for the FW command 12002 * @cid: the context id 12003 * @ctype: the context type 12004 * @data: where to store the context data 12005 * 12006 * Issues a FW command through the given mailbox to read an SGE context. 12007 */ 12008 int t4_sge_ctxt_rd(struct adapter *adap, unsigned int mbox, unsigned int cid, 12009 enum ctxt_type ctype, u32 *data) 12010 { 12011 int ret; 12012 struct fw_ldst_cmd c; 12013 12014 if (ctype == CTXT_EGRESS) 12015 ret = FW_LDST_ADDRSPC_SGE_EGRC; 12016 else if (ctype == CTXT_INGRESS) 12017 ret = FW_LDST_ADDRSPC_SGE_INGC; 12018 else if (ctype == CTXT_FLM) 12019 ret = FW_LDST_ADDRSPC_SGE_FLMC; 12020 else 12021 ret = FW_LDST_ADDRSPC_SGE_CONMC; 12022 12023 memset(&c, 0, sizeof(c)); 12024 c.op_to_addrspace = cpu_to_be32(V_FW_CMD_OP(FW_LDST_CMD) | 12025 F_FW_CMD_REQUEST | F_FW_CMD_READ | 12026 V_FW_LDST_CMD_ADDRSPACE(ret)); 12027 c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c)); 12028 c.u.idctxt.physid = cpu_to_be32(cid); 12029 12030 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c); 12031 if (ret == 0) { 12032 data[0] = be32_to_cpu(c.u.idctxt.ctxt_data0); 12033 data[1] = be32_to_cpu(c.u.idctxt.ctxt_data1); 12034 data[2] = be32_to_cpu(c.u.idctxt.ctxt_data2); 12035 data[3] = be32_to_cpu(c.u.idctxt.ctxt_data3); 12036 data[4] = be32_to_cpu(c.u.idctxt.ctxt_data4); 12037 data[5] = be32_to_cpu(c.u.idctxt.ctxt_data5); 12038 } 12039 return ret; 12040 } 12041 12042 /** 12043 * t4_sge_ctxt_rd_bd - read an SGE context bypassing FW 12044 * @adap: the adapter 12045 * @cid: the context id 12046 * @ctype: the context type 12047 * @data: where to store the context data 12048 * 12049 * Reads an SGE context directly, bypassing FW. This is only for 12050 * debugging when FW is unavailable. 12051 */ 12052 int t4_sge_ctxt_rd_bd(struct adapter *adap, unsigned int cid, enum ctxt_type ctype, 12053 u32 *data) 12054 { 12055 int i, ret; 12056 12057 t4_write_reg(adap, A_SGE_CTXT_CMD, V_CTXTQID(cid) | V_CTXTTYPE(ctype)); 12058 ret = t4_wait_op_done(adap, A_SGE_CTXT_CMD, F_BUSY, 0, 3, 1); 12059 if (!ret) 12060 for (i = A_SGE_CTXT_DATA0; i <= A_SGE_CTXT_DATA5; i += 4) 12061 *data++ = t4_read_reg(adap, i); 12062 return ret; 12063 } 12064 12065 int t4_sched_config(struct adapter *adapter, int type, int minmaxen) 12066 { 12067 struct fw_sched_cmd cmd; 12068 12069 memset(&cmd, 0, sizeof(cmd)); 12070 cmd.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_SCHED_CMD) | 12071 F_FW_CMD_REQUEST | 12072 F_FW_CMD_WRITE); 12073 cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd)); 12074 12075 cmd.u.config.sc = FW_SCHED_SC_CONFIG; 12076 cmd.u.config.type = type; 12077 cmd.u.config.minmaxen = minmaxen; 12078 12079 return t4_wr_mbox_meat(adapter,adapter->mbox, &cmd, sizeof(cmd), 12080 NULL, 1); 12081 } 12082 12083 int t4_sched_params(struct adapter *adapter, 12084 int channel, int cls, 12085 int level, int mode, int type, 12086 int rateunit, int ratemode, 12087 int minrate, int maxrate, int weight, 12088 int pktsize, int burstsize) 12089 { 12090 struct fw_sched_cmd cmd; 12091 12092 memset(&cmd, 0, sizeof(cmd)); 12093 cmd.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_SCHED_CMD) | 12094 F_FW_CMD_REQUEST | 12095 F_FW_CMD_WRITE); 12096 cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd)); 12097 12098 cmd.u.params.sc = FW_SCHED_SC_PARAMS; 12099 cmd.u.params.type = type; 12100 cmd.u.params.level = level; 12101 cmd.u.params.mode = mode; 12102 cmd.u.params.ch = channel; 12103 cmd.u.params.cl = cls; 12104 cmd.u.params.unit = rateunit; 12105 cmd.u.params.rate = ratemode; 12106 cmd.u.params.min = cpu_to_be32(minrate); 12107 cmd.u.params.max = cpu_to_be32(maxrate); 12108 cmd.u.params.weight = cpu_to_be16(weight); 12109 cmd.u.params.pktsize = cpu_to_be16(pktsize); 12110 cmd.u.params.burstsize = cpu_to_be16(burstsize); 12111 12112 return t4_wr_mbox_meat(adapter,adapter->mbox, &cmd, sizeof(cmd), 12113 NULL, 1); 12114 } 12115 12116 int t4_read_sched_params(struct adapter *adapter, 12117 int channel, int cls, 12118 int *level, int *mode, int *type, 12119 int *rateunit, int *ratemode, 12120 int *minrate, int *maxrate, int *weight, 12121 int *pktsize, int *burstsize) 12122 { 12123 struct fw_sched_cmd cmd; 12124 int ret = 0; 12125 12126 memset(&cmd, 0, sizeof(cmd)); 12127 cmd.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_SCHED_CMD) | 12128 F_FW_CMD_REQUEST | 12129 F_FW_CMD_READ); 12130 cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd)); 12131 cmd.u.params.sc = FW_SCHED_SC_PARAMS; 12132 cmd.u.params.ch = channel; 12133 cmd.u.params.cl = cls; 12134 12135 ret = t4_wr_mbox_meat(adapter, adapter->mbox, &cmd, sizeof(cmd), 12136 &cmd, 1); 12137 if (ret) 12138 goto out; 12139 12140 *level = cmd.u.params.level; 12141 *mode = cmd.u.params.mode; 12142 *type = cmd.u.params.type; 12143 *rateunit = cmd.u.params.unit; 12144 *ratemode = cmd.u.params.rate; 12145 *minrate = be32_to_cpu(cmd.u.params.min); 12146 *maxrate = be32_to_cpu(cmd.u.params.max); 12147 *weight = be16_to_cpu(cmd.u.params.weight); 12148 *pktsize = be16_to_cpu(cmd.u.params.pktsize); 12149 *burstsize = be16_to_cpu(cmd.u.params.burstsize); 12150 12151 out: 12152 return ret; 12153 } 12154 12155 /* 12156 * t4_config_watchdog - configure (enable/disable) a watchdog timer 12157 * @adapter: the adapter 12158 * @mbox: mailbox to use for the FW command 12159 * @pf: the PF owning the queue 12160 * @vf: the VF owning the queue 12161 * @timeout: watchdog timeout in ms 12162 * @action: watchdog timer / action 12163 * 12164 * There are separate watchdog timers for each possible watchdog 12165 * action. Configure one of the watchdog timers by setting a non-zero 12166 * timeout. Disable a watchdog timer by using a timeout of zero. 12167 */ 12168 int t4_config_watchdog(struct adapter *adapter, unsigned int mbox, 12169 unsigned int pf, unsigned int vf, 12170 unsigned int timeout, unsigned int action) 12171 { 12172 struct fw_watchdog_cmd wdog; 12173 unsigned int ticks; 12174 12175 /* 12176 * The watchdog command expects a timeout in units of 10ms so we need 12177 * to convert it here (via rounding) and force a minimum of one 10ms 12178 * "tick" if the timeout is non-zero but the convertion results in 0 12179 * ticks. 12180 */ 12181 ticks = (timeout + 5)/10; 12182 if (timeout && !ticks) 12183 ticks = 1; 12184 12185 memset(&wdog, 0, sizeof wdog); 12186 wdog.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_WATCHDOG_CMD) | 12187 F_FW_CMD_REQUEST | 12188 F_FW_CMD_WRITE | 12189 V_FW_PARAMS_CMD_PFN(pf) | 12190 V_FW_PARAMS_CMD_VFN(vf)); 12191 wdog.retval_len16 = cpu_to_be32(FW_LEN16(wdog)); 12192 wdog.timeout = cpu_to_be32(ticks); 12193 wdog.action = cpu_to_be32(action); 12194 12195 return t4_wr_mbox(adapter, mbox, &wdog, sizeof wdog, NULL); 12196 } 12197 12198 int t4_get_devlog_level(struct adapter *adapter, unsigned int *level) 12199 { 12200 struct fw_devlog_cmd devlog_cmd; 12201 int ret; 12202 12203 memset(&devlog_cmd, 0, sizeof(devlog_cmd)); 12204 devlog_cmd.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_DEVLOG_CMD) | 12205 F_FW_CMD_REQUEST | F_FW_CMD_READ); 12206 devlog_cmd.retval_len16 = cpu_to_be32(FW_LEN16(devlog_cmd)); 12207 ret = t4_wr_mbox(adapter, adapter->mbox, &devlog_cmd, 12208 sizeof(devlog_cmd), &devlog_cmd); 12209 if (ret) 12210 return ret; 12211 12212 *level = devlog_cmd.level; 12213 return 0; 12214 } 12215 12216 int t4_set_devlog_level(struct adapter *adapter, unsigned int level) 12217 { 12218 struct fw_devlog_cmd devlog_cmd; 12219 12220 memset(&devlog_cmd, 0, sizeof(devlog_cmd)); 12221 devlog_cmd.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_DEVLOG_CMD) | 12222 F_FW_CMD_REQUEST | 12223 F_FW_CMD_WRITE); 12224 devlog_cmd.level = level; 12225 devlog_cmd.retval_len16 = cpu_to_be32(FW_LEN16(devlog_cmd)); 12226 return t4_wr_mbox(adapter, adapter->mbox, &devlog_cmd, 12227 sizeof(devlog_cmd), &devlog_cmd); 12228 } 12229 12230 int t4_configure_add_smac(struct adapter *adap) 12231 { 12232 unsigned int param, val; 12233 int ret = 0; 12234 12235 adap->params.smac_add_support = 0; 12236 param = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | 12237 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_ADD_SMAC)); 12238 /* Query FW to check if FW supports adding source mac address 12239 * to TCAM feature or not. 12240 * If FW returns 1, driver can use this feature and driver need to send 12241 * FW_PARAMS_PARAM_DEV_ADD_SMAC write command with value 1 to 12242 * enable adding smac to TCAM. 12243 */ 12244 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1, ¶m, &val); 12245 if (ret) 12246 return ret; 12247 12248 if (val == 1) { 12249 ret = t4_set_params(adap, adap->mbox, adap->pf, 0, 1, 12250 ¶m, &val); 12251 if (!ret) 12252 /* Firmware allows adding explicit TCAM entries. 12253 * Save this internally. 12254 */ 12255 adap->params.smac_add_support = 1; 12256 } 12257 12258 return ret; 12259 } 12260 12261 int t4_configure_ringbb(struct adapter *adap) 12262 { 12263 unsigned int param, val; 12264 int ret = 0; 12265 12266 param = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | 12267 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_RING_BACKBONE)); 12268 /* Query FW to check if FW supports ring switch feature or not. 12269 * If FW returns 1, driver can use this feature and driver need to send 12270 * FW_PARAMS_PARAM_DEV_RING_BACKBONE write command with value 1 to 12271 * enable the ring backbone configuration. 12272 */ 12273 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1, ¶m, &val); 12274 if (ret < 0) { 12275 CH_ERR(adap, "Querying FW using Ring backbone params command failed, err=%d\n", 12276 ret); 12277 goto out; 12278 } 12279 12280 if (val != 1) { 12281 CH_ERR(adap, "FW doesnot support ringbackbone features\n"); 12282 goto out; 12283 } 12284 12285 ret = t4_set_params(adap, adap->mbox, adap->pf, 0, 1, ¶m, &val); 12286 if (ret < 0) { 12287 CH_ERR(adap, "Could not set Ringbackbone, err= %d\n", 12288 ret); 12289 goto out; 12290 } 12291 12292 out: 12293 return ret; 12294 } 12295 12296 /* 12297 * t4_set_vlan_acl - Set a VLAN id for the specified VF 12298 * @adapter: the adapter 12299 * @mbox: mailbox to use for the FW command 12300 * @vf: one of the VFs instantiated by the specified PF 12301 * @vlan: The vlanid to be set 12302 * 12303 */ 12304 int t4_set_vlan_acl(struct adapter *adap, unsigned int mbox, unsigned int vf, 12305 u16 vlan) 12306 { 12307 struct fw_acl_vlan_cmd vlan_cmd; 12308 unsigned int enable; 12309 12310 enable = (vlan ? F_FW_ACL_VLAN_CMD_EN : 0); 12311 memset(&vlan_cmd, 0, sizeof(vlan_cmd)); 12312 vlan_cmd.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_ACL_VLAN_CMD) | 12313 F_FW_CMD_REQUEST | 12314 F_FW_CMD_WRITE | 12315 F_FW_CMD_EXEC | 12316 V_FW_ACL_VLAN_CMD_PFN(adap->pf) | 12317 V_FW_ACL_VLAN_CMD_VFN(vf)); 12318 vlan_cmd.en_to_len16 = cpu_to_be32(enable | FW_LEN16(vlan_cmd)); 12319 /* Drop all packets that donot match vlan id */ 12320 vlan_cmd.dropnovlan_fm = (enable 12321 ? (F_FW_ACL_VLAN_CMD_DROPNOVLAN | 12322 F_FW_ACL_VLAN_CMD_FM) 12323 : 0); 12324 if (enable != 0) { 12325 vlan_cmd.nvlan = 1; 12326 vlan_cmd.vlanid[0] = cpu_to_be16(vlan); 12327 } 12328 12329 return t4_wr_mbox(adap, adap->mbox, &vlan_cmd, sizeof(vlan_cmd), NULL); 12330 } 12331 12332 /** 12333 * t4_del_mac - Removes the exact-match filter for a MAC address 12334 * @adap: the adapter 12335 * @mbox: mailbox to use for the FW command 12336 * @viid: the VI id 12337 * @addr: the MAC address value 12338 * @smac: if true, delete from only the smac region of MPS 12339 * 12340 * Modifies an exact-match filter and sets it to the new MAC address if 12341 * @idx >= 0, or adds the MAC address to a new filter if @idx < 0. In the 12342 * latter case the address is added persistently if @persist is %true. 12343 * 12344 * Returns a negative error number or the index of the filter with the new 12345 * MAC value. Note that this index may differ from @idx. 12346 */ 12347 int t4_del_mac(struct adapter *adap, unsigned int mbox, unsigned int viid, 12348 const u8 *addr, bool smac) 12349 { 12350 int ret; 12351 struct fw_vi_mac_cmd c; 12352 struct fw_vi_mac_exact *p = c.u.exact; 12353 unsigned int max_mac_addr = adap->params.arch.mps_tcam_size; 12354 12355 memset(&c, 0, sizeof(c)); 12356 c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_MAC_CMD) | 12357 F_FW_CMD_REQUEST | F_FW_CMD_WRITE | 12358 V_FW_VI_MAC_CMD_VIID(viid)); 12359 c.freemacs_to_len16 = cpu_to_be32( 12360 V_FW_CMD_LEN16(1) | 12361 (smac ? F_FW_VI_MAC_CMD_IS_SMAC : 0)); 12362 12363 memcpy(p->macaddr, addr, sizeof(p->macaddr)); 12364 p->valid_to_idx = cpu_to_be16( 12365 F_FW_VI_MAC_CMD_VALID | 12366 V_FW_VI_MAC_CMD_IDX(FW_VI_MAC_MAC_BASED_FREE)); 12367 12368 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c); 12369 if (ret == 0) { 12370 ret = G_FW_VI_MAC_CMD_IDX(be16_to_cpu(p->valid_to_idx)); 12371 if (ret < max_mac_addr) 12372 return -ENOMEM; 12373 } 12374 12375 return ret; 12376 } 12377 12378 /** 12379 * t4_add_mac - Adds an exact-match filter for a MAC address 12380 * @adap: the adapter 12381 * @mbox: mailbox to use for the FW command 12382 * @viid: the VI id 12383 * @idx: index of existing filter for old value of MAC address, or -1 12384 * @addr: the new MAC address value 12385 * @persist: whether a new MAC allocation should be persistent 12386 * @add_smt: if true also add the address to the HW SMT 12387 * @smac: if true, update only the smac region of MPS 12388 * 12389 * Modifies an exact-match filter and sets it to the new MAC address if 12390 * @idx >= 0, or adds the MAC address to a new filter if @idx < 0. In the 12391 * latter case the address is added persistently if @persist is %true. 12392 * 12393 * Returns a negative error number or the index of the filter with the new 12394 * MAC value. Note that this index may differ from @idx. 12395 */ 12396 int t4_add_mac(struct adapter *adap, unsigned int mbox, unsigned int viid, 12397 int idx, const u8 *addr, bool persist, u8 *smt_idx, bool smac) 12398 { 12399 int ret, mode; 12400 struct fw_vi_mac_cmd c; 12401 struct fw_vi_mac_exact *p = c.u.exact; 12402 unsigned int max_mac_addr = adap->params.arch.mps_tcam_size; 12403 12404 if (idx < 0) /* new allocation */ 12405 idx = persist ? FW_VI_MAC_ADD_PERSIST_MAC : FW_VI_MAC_ADD_MAC; 12406 mode = smt_idx ? FW_VI_MAC_SMT_AND_MPSTCAM : FW_VI_MAC_MPS_TCAM_ENTRY; 12407 12408 memset(&c, 0, sizeof(c)); 12409 c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_MAC_CMD) | 12410 F_FW_CMD_REQUEST | F_FW_CMD_WRITE | 12411 V_FW_VI_MAC_CMD_VIID(viid)); 12412 c.freemacs_to_len16 = cpu_to_be32( 12413 V_FW_CMD_LEN16(1) | 12414 (smac ? F_FW_VI_MAC_CMD_IS_SMAC : 0)); 12415 p->valid_to_idx = cpu_to_be16(F_FW_VI_MAC_CMD_VALID | 12416 V_FW_VI_MAC_CMD_SMAC_RESULT(mode) | 12417 V_FW_VI_MAC_CMD_IDX(idx)); 12418 memcpy(p->macaddr, addr, sizeof(p->macaddr)); 12419 12420 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c); 12421 if (ret == 0) { 12422 ret = G_FW_VI_MAC_CMD_IDX(be16_to_cpu(p->valid_to_idx)); 12423 if (ret >= max_mac_addr) 12424 return -ENOMEM; 12425 if (smt_idx) { 12426 /* Does fw supports returning smt_idx? */ 12427 if (adap->params.viid_smt_extn_support) 12428 *smt_idx = G_FW_VI_MAC_CMD_SMTID(be32_to_cpu(c.op_to_viid)); 12429 else { 12430 /* In T4/T5, SMT contains 256 SMAC entries 12431 * organized in 128 rows of 2 entries each. 12432 * In T6, SMT contains 256 SMAC entries in 12433 * 256 rows. 12434 */ 12435 if (CHELSIO_CHIP_VERSION(adap->params.chip) <= CHELSIO_T5) 12436 *smt_idx = ((viid & M_FW_VIID_VIN) << 1); 12437 else 12438 *smt_idx = (viid & M_FW_VIID_VIN); 12439 } 12440 } 12441 } 12442 12443 return ret; 12444 } 12445 12446