1 /*- 2 * Copyright (c) 2011 Chelsio Communications, Inc. 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 */ 26 27 #include <sys/cdefs.h> 28 __FBSDID("$FreeBSD$"); 29 30 #include "common.h" 31 #include "t4_regs.h" 32 #include "t4_regs_values.h" 33 #include "t4fw_interface.h" 34 35 #undef msleep 36 #define msleep(x) DELAY((x) * 1000) 37 38 /** 39 * t4_wait_op_done_val - wait until an operation is completed 40 * @adapter: the adapter performing the operation 41 * @reg: the register to check for completion 42 * @mask: a single-bit field within @reg that indicates completion 43 * @polarity: the value of the field when the operation is completed 44 * @attempts: number of check iterations 45 * @delay: delay in usecs between iterations 46 * @valp: where to store the value of the register at completion time 47 * 48 * Wait until an operation is completed by checking a bit in a register 49 * up to @attempts times. If @valp is not NULL the value of the register 50 * at the time it indicated completion is stored there. Returns 0 if the 51 * operation completes and -EAGAIN otherwise. 52 */ 53 int t4_wait_op_done_val(struct adapter *adapter, int reg, u32 mask, 54 int polarity, int attempts, int delay, u32 *valp) 55 { 56 while (1) { 57 u32 val = t4_read_reg(adapter, reg); 58 59 if (!!(val & mask) == polarity) { 60 if (valp) 61 *valp = val; 62 return 0; 63 } 64 if (--attempts == 0) 65 return -EAGAIN; 66 if (delay) 67 udelay(delay); 68 } 69 } 70 71 /** 72 * t4_set_reg_field - set a register field to a value 73 * @adapter: the adapter to program 74 * @addr: the register address 75 * @mask: specifies the portion of the register to modify 76 * @val: the new value for the register field 77 * 78 * Sets a register field specified by the supplied mask to the 79 * given value. 80 */ 81 void t4_set_reg_field(struct adapter *adapter, unsigned int addr, u32 mask, 82 u32 val) 83 { 84 u32 v = t4_read_reg(adapter, addr) & ~mask; 85 86 t4_write_reg(adapter, addr, v | val); 87 (void) t4_read_reg(adapter, addr); /* flush */ 88 } 89 90 /** 91 * t4_read_indirect - read indirectly addressed registers 92 * @adap: the adapter 93 * @addr_reg: register holding the indirect address 94 * @data_reg: register holding the value of the indirect register 95 * @vals: where the read register values are stored 96 * @nregs: how many indirect registers to read 97 * @start_idx: index of first indirect register to read 98 * 99 * Reads registers that are accessed indirectly through an address/data 100 * register pair. 101 */ 102 void t4_read_indirect(struct adapter *adap, unsigned int addr_reg, 103 unsigned int data_reg, u32 *vals, unsigned int nregs, 104 unsigned int start_idx) 105 { 106 while (nregs--) { 107 t4_write_reg(adap, addr_reg, start_idx); 108 *vals++ = t4_read_reg(adap, data_reg); 109 start_idx++; 110 } 111 } 112 113 /** 114 * t4_write_indirect - write indirectly addressed registers 115 * @adap: the adapter 116 * @addr_reg: register holding the indirect addresses 117 * @data_reg: register holding the value for the indirect registers 118 * @vals: values to write 119 * @nregs: how many indirect registers to write 120 * @start_idx: address of first indirect register to write 121 * 122 * Writes a sequential block of registers that are accessed indirectly 123 * through an address/data register pair. 124 */ 125 void t4_write_indirect(struct adapter *adap, unsigned int addr_reg, 126 unsigned int data_reg, const u32 *vals, 127 unsigned int nregs, unsigned int start_idx) 128 { 129 while (nregs--) { 130 t4_write_reg(adap, addr_reg, start_idx++); 131 t4_write_reg(adap, data_reg, *vals++); 132 } 133 } 134 135 /* 136 * Get the reply to a mailbox command and store it in @rpl in big-endian order. 137 */ 138 static void get_mbox_rpl(struct adapter *adap, __be64 *rpl, int nflit, 139 u32 mbox_addr) 140 { 141 for ( ; nflit; nflit--, mbox_addr += 8) 142 *rpl++ = cpu_to_be64(t4_read_reg64(adap, mbox_addr)); 143 } 144 145 /* 146 * Handle a FW assertion reported in a mailbox. 147 */ 148 static void fw_asrt(struct adapter *adap, u32 mbox_addr) 149 { 150 struct fw_debug_cmd asrt; 151 152 get_mbox_rpl(adap, (__be64 *)&asrt, sizeof(asrt) / 8, mbox_addr); 153 CH_ALERT(adap, "FW assertion at %.16s:%u, val0 %#x, val1 %#x\n", 154 asrt.u.assert.filename_0_7, ntohl(asrt.u.assert.line), 155 ntohl(asrt.u.assert.x), ntohl(asrt.u.assert.y)); 156 } 157 158 #define X_CIM_PF_NOACCESS 0xeeeeeeee 159 /** 160 * t4_wr_mbox_meat - send a command to FW through the given mailbox 161 * @adap: the adapter 162 * @mbox: index of the mailbox to use 163 * @cmd: the command to write 164 * @size: command length in bytes 165 * @rpl: where to optionally store the reply 166 * @sleep_ok: if true we may sleep while awaiting command completion 167 * 168 * Sends the given command to FW through the selected mailbox and waits 169 * for the FW to execute the command. If @rpl is not %NULL it is used to 170 * store the FW's reply to the command. The command and its optional 171 * reply are of the same length. Some FW commands like RESET and 172 * INITIALIZE can take a considerable amount of time to execute. 173 * @sleep_ok determines whether we may sleep while awaiting the response. 174 * If sleeping is allowed we use progressive backoff otherwise we spin. 175 * 176 * The return value is 0 on success or a negative errno on failure. A 177 * failure can happen either because we are not able to execute the 178 * command or FW executes it but signals an error. In the latter case 179 * the return value is the error code indicated by FW (negated). 180 */ 181 int t4_wr_mbox_meat(struct adapter *adap, int mbox, const void *cmd, int size, 182 void *rpl, bool sleep_ok) 183 { 184 /* 185 * We delay in small increments at first in an effort to maintain 186 * responsiveness for simple, fast executing commands but then back 187 * off to larger delays to a maximum retry delay. 188 */ 189 static const int delay[] = { 190 1, 1, 3, 5, 10, 10, 20, 50, 100, 200 191 }; 192 193 u32 v; 194 u64 res; 195 int i, ms, delay_idx; 196 const __be64 *p = cmd; 197 198 u32 data_reg = PF_REG(mbox, A_CIM_PF_MAILBOX_DATA); 199 u32 ctl_reg = PF_REG(mbox, A_CIM_PF_MAILBOX_CTRL); 200 201 if ((size & 15) || size > MBOX_LEN) 202 return -EINVAL; 203 204 v = G_MBOWNER(t4_read_reg(adap, ctl_reg)); 205 for (i = 0; v == X_MBOWNER_NONE && i < 3; i++) 206 v = G_MBOWNER(t4_read_reg(adap, ctl_reg)); 207 208 if (v != X_MBOWNER_PL) 209 return v ? -EBUSY : -ETIMEDOUT; 210 211 for (i = 0; i < size; i += 8, p++) 212 t4_write_reg64(adap, data_reg + i, be64_to_cpu(*p)); 213 214 t4_write_reg(adap, ctl_reg, F_MBMSGVALID | V_MBOWNER(X_MBOWNER_FW)); 215 t4_read_reg(adap, ctl_reg); /* flush write */ 216 217 delay_idx = 0; 218 ms = delay[0]; 219 220 for (i = 0; i < FW_CMD_MAX_TIMEOUT; i += ms) { 221 if (sleep_ok) { 222 ms = delay[delay_idx]; /* last element may repeat */ 223 if (delay_idx < ARRAY_SIZE(delay) - 1) 224 delay_idx++; 225 msleep(ms); 226 } else 227 mdelay(ms); 228 229 v = t4_read_reg(adap, ctl_reg); 230 if (v == X_CIM_PF_NOACCESS) 231 continue; 232 if (G_MBOWNER(v) == X_MBOWNER_PL) { 233 if (!(v & F_MBMSGVALID)) { 234 t4_write_reg(adap, ctl_reg, 235 V_MBOWNER(X_MBOWNER_NONE)); 236 continue; 237 } 238 239 res = t4_read_reg64(adap, data_reg); 240 if (G_FW_CMD_OP(res >> 32) == FW_DEBUG_CMD) { 241 fw_asrt(adap, data_reg); 242 res = V_FW_CMD_RETVAL(EIO); 243 } else if (rpl) 244 get_mbox_rpl(adap, rpl, size / 8, data_reg); 245 t4_write_reg(adap, ctl_reg, V_MBOWNER(X_MBOWNER_NONE)); 246 return -G_FW_CMD_RETVAL((int)res); 247 } 248 } 249 250 CH_ERR(adap, "command %#x in mailbox %d timed out\n", 251 *(const u8 *)cmd, mbox); 252 return -ETIMEDOUT; 253 } 254 255 /** 256 * t4_mc_read - read from MC through backdoor accesses 257 * @adap: the adapter 258 * @addr: address of first byte requested 259 * @data: 64 bytes of data containing the requested address 260 * @ecc: where to store the corresponding 64-bit ECC word 261 * 262 * Read 64 bytes of data from MC starting at a 64-byte-aligned address 263 * that covers the requested address @addr. If @parity is not %NULL it 264 * is assigned the 64-bit ECC word for the read data. 265 */ 266 int t4_mc_read(struct adapter *adap, u32 addr, __be32 *data, u64 *ecc) 267 { 268 int i; 269 270 if (t4_read_reg(adap, A_MC_BIST_CMD) & F_START_BIST) 271 return -EBUSY; 272 t4_write_reg(adap, A_MC_BIST_CMD_ADDR, addr & ~0x3fU); 273 t4_write_reg(adap, A_MC_BIST_CMD_LEN, 64); 274 t4_write_reg(adap, A_MC_BIST_DATA_PATTERN, 0xc); 275 t4_write_reg(adap, A_MC_BIST_CMD, V_BIST_OPCODE(1) | F_START_BIST | 276 V_BIST_CMD_GAP(1)); 277 i = t4_wait_op_done(adap, A_MC_BIST_CMD, F_START_BIST, 0, 10, 1); 278 if (i) 279 return i; 280 281 #define MC_DATA(i) MC_BIST_STATUS_REG(A_MC_BIST_STATUS_RDATA, i) 282 283 for (i = 15; i >= 0; i--) 284 *data++ = htonl(t4_read_reg(adap, MC_DATA(i))); 285 if (ecc) 286 *ecc = t4_read_reg64(adap, MC_DATA(16)); 287 #undef MC_DATA 288 return 0; 289 } 290 291 /** 292 * t4_edc_read - read from EDC through backdoor accesses 293 * @adap: the adapter 294 * @idx: which EDC to access 295 * @addr: address of first byte requested 296 * @data: 64 bytes of data containing the requested address 297 * @ecc: where to store the corresponding 64-bit ECC word 298 * 299 * Read 64 bytes of data from EDC starting at a 64-byte-aligned address 300 * that covers the requested address @addr. If @parity is not %NULL it 301 * is assigned the 64-bit ECC word for the read data. 302 */ 303 int t4_edc_read(struct adapter *adap, int idx, u32 addr, __be32 *data, u64 *ecc) 304 { 305 int i; 306 307 idx *= EDC_STRIDE; 308 if (t4_read_reg(adap, A_EDC_BIST_CMD + idx) & F_START_BIST) 309 return -EBUSY; 310 t4_write_reg(adap, A_EDC_BIST_CMD_ADDR + idx, addr & ~0x3fU); 311 t4_write_reg(adap, A_EDC_BIST_CMD_LEN + idx, 64); 312 t4_write_reg(adap, A_EDC_BIST_DATA_PATTERN + idx, 0xc); 313 t4_write_reg(adap, A_EDC_BIST_CMD + idx, 314 V_BIST_OPCODE(1) | V_BIST_CMD_GAP(1) | F_START_BIST); 315 i = t4_wait_op_done(adap, A_EDC_BIST_CMD + idx, F_START_BIST, 0, 10, 1); 316 if (i) 317 return i; 318 319 #define EDC_DATA(i) (EDC_BIST_STATUS_REG(A_EDC_BIST_STATUS_RDATA, i) + idx) 320 321 for (i = 15; i >= 0; i--) 322 *data++ = htonl(t4_read_reg(adap, EDC_DATA(i))); 323 if (ecc) 324 *ecc = t4_read_reg64(adap, EDC_DATA(16)); 325 #undef EDC_DATA 326 return 0; 327 } 328 329 /** 330 * t4_mem_read - read EDC 0, EDC 1 or MC into buffer 331 * @adap: the adapter 332 * @mtype: memory type: MEM_EDC0, MEM_EDC1 or MEM_MC 333 * @addr: address within indicated memory type 334 * @len: amount of memory to read 335 * @buf: host memory buffer 336 * 337 * Reads an [almost] arbitrary memory region in the firmware: the 338 * firmware memory address, length and host buffer must be aligned on 339 * 32-bit boudaries. The memory is returned as a raw byte sequence from 340 * the firmware's memory. If this memory contains data structures which 341 * contain multi-byte integers, it's the callers responsibility to 342 * perform appropriate byte order conversions. 343 */ 344 int t4_mem_read(struct adapter *adap, int mtype, u32 addr, u32 len, 345 __be32 *buf) 346 { 347 u32 pos, start, end, offset; 348 int ret; 349 350 /* 351 * Argument sanity checks ... 352 */ 353 if ((addr & 0x3) || (len & 0x3)) 354 return -EINVAL; 355 356 /* 357 * The underlaying EDC/MC read routines read 64 bytes at a time so we 358 * need to round down the start and round up the end. We'll start 359 * copying out of the first line at (addr - start) a word at a time. 360 */ 361 start = addr & ~(64-1); 362 end = (addr + len + 64-1) & ~(64-1); 363 offset = (addr - start)/sizeof(__be32); 364 365 for (pos = start; pos < end; pos += 64, offset = 0) { 366 __be32 data[16]; 367 368 /* 369 * Read the chip's memory block and bail if there's an error. 370 */ 371 if (mtype == MEM_MC) 372 ret = t4_mc_read(adap, pos, data, NULL); 373 else 374 ret = t4_edc_read(adap, mtype, pos, data, NULL); 375 if (ret) 376 return ret; 377 378 /* 379 * Copy the data into the caller's memory buffer. 380 */ 381 while (offset < 16 && len > 0) { 382 *buf++ = data[offset++]; 383 len -= sizeof(__be32); 384 } 385 } 386 387 return 0; 388 } 389 390 /* 391 * Partial EEPROM Vital Product Data structure. Includes only the ID and 392 * VPD-R header. 393 */ 394 struct t4_vpd_hdr { 395 u8 id_tag; 396 u8 id_len[2]; 397 u8 id_data[ID_LEN]; 398 u8 vpdr_tag; 399 u8 vpdr_len[2]; 400 }; 401 402 /* 403 * EEPROM reads take a few tens of us while writes can take a bit over 5 ms. 404 */ 405 #define EEPROM_MAX_RD_POLL 40 406 #define EEPROM_MAX_WR_POLL 6 407 #define EEPROM_STAT_ADDR 0x7bfc 408 #define VPD_BASE 0x400 409 #define VPD_BASE_OLD 0 410 #define VPD_LEN 512 411 #define VPD_INFO_FLD_HDR_SIZE 3 412 413 /** 414 * t4_seeprom_read - read a serial EEPROM location 415 * @adapter: adapter to read 416 * @addr: EEPROM virtual address 417 * @data: where to store the read data 418 * 419 * Read a 32-bit word from a location in serial EEPROM using the card's PCI 420 * VPD capability. Note that this function must be called with a virtual 421 * address. 422 */ 423 int t4_seeprom_read(struct adapter *adapter, u32 addr, u32 *data) 424 { 425 u16 val; 426 int attempts = EEPROM_MAX_RD_POLL; 427 unsigned int base = adapter->params.pci.vpd_cap_addr; 428 429 if (addr >= EEPROMVSIZE || (addr & 3)) 430 return -EINVAL; 431 432 t4_os_pci_write_cfg2(adapter, base + PCI_VPD_ADDR, (u16)addr); 433 do { 434 udelay(10); 435 t4_os_pci_read_cfg2(adapter, base + PCI_VPD_ADDR, &val); 436 } while (!(val & PCI_VPD_ADDR_F) && --attempts); 437 438 if (!(val & PCI_VPD_ADDR_F)) { 439 CH_ERR(adapter, "reading EEPROM address 0x%x failed\n", addr); 440 return -EIO; 441 } 442 t4_os_pci_read_cfg4(adapter, base + PCI_VPD_DATA, data); 443 *data = le32_to_cpu(*data); 444 return 0; 445 } 446 447 /** 448 * t4_seeprom_write - write a serial EEPROM location 449 * @adapter: adapter to write 450 * @addr: virtual EEPROM address 451 * @data: value to write 452 * 453 * Write a 32-bit word to a location in serial EEPROM using the card's PCI 454 * VPD capability. Note that this function must be called with a virtual 455 * address. 456 */ 457 int t4_seeprom_write(struct adapter *adapter, u32 addr, u32 data) 458 { 459 u16 val; 460 int attempts = EEPROM_MAX_WR_POLL; 461 unsigned int base = adapter->params.pci.vpd_cap_addr; 462 463 if (addr >= EEPROMVSIZE || (addr & 3)) 464 return -EINVAL; 465 466 t4_os_pci_write_cfg4(adapter, base + PCI_VPD_DATA, 467 cpu_to_le32(data)); 468 t4_os_pci_write_cfg2(adapter, base + PCI_VPD_ADDR, 469 (u16)addr | PCI_VPD_ADDR_F); 470 do { 471 msleep(1); 472 t4_os_pci_read_cfg2(adapter, base + PCI_VPD_ADDR, &val); 473 } while ((val & PCI_VPD_ADDR_F) && --attempts); 474 475 if (val & PCI_VPD_ADDR_F) { 476 CH_ERR(adapter, "write to EEPROM address 0x%x failed\n", addr); 477 return -EIO; 478 } 479 return 0; 480 } 481 482 /** 483 * t4_eeprom_ptov - translate a physical EEPROM address to virtual 484 * @phys_addr: the physical EEPROM address 485 * @fn: the PCI function number 486 * @sz: size of function-specific area 487 * 488 * Translate a physical EEPROM address to virtual. The first 1K is 489 * accessed through virtual addresses starting at 31K, the rest is 490 * accessed through virtual addresses starting at 0. 491 * 492 * The mapping is as follows: 493 * [0..1K) -> [31K..32K) 494 * [1K..1K+A) -> [ES-A..ES) 495 * [1K+A..ES) -> [0..ES-A-1K) 496 * 497 * where A = @fn * @sz, and ES = EEPROM size. 498 */ 499 int t4_eeprom_ptov(unsigned int phys_addr, unsigned int fn, unsigned int sz) 500 { 501 fn *= sz; 502 if (phys_addr < 1024) 503 return phys_addr + (31 << 10); 504 if (phys_addr < 1024 + fn) 505 return EEPROMSIZE - fn + phys_addr - 1024; 506 if (phys_addr < EEPROMSIZE) 507 return phys_addr - 1024 - fn; 508 return -EINVAL; 509 } 510 511 /** 512 * t4_seeprom_wp - enable/disable EEPROM write protection 513 * @adapter: the adapter 514 * @enable: whether to enable or disable write protection 515 * 516 * Enables or disables write protection on the serial EEPROM. 517 */ 518 int t4_seeprom_wp(struct adapter *adapter, int enable) 519 { 520 return t4_seeprom_write(adapter, EEPROM_STAT_ADDR, enable ? 0xc : 0); 521 } 522 523 /** 524 * get_vpd_keyword_val - Locates an information field keyword in the VPD 525 * @v: Pointer to buffered vpd data structure 526 * @kw: The keyword to search for 527 * 528 * Returns the value of the information field keyword or 529 * -ENOENT otherwise. 530 */ 531 static int get_vpd_keyword_val(const struct t4_vpd_hdr *v, const char *kw) 532 { 533 int i; 534 unsigned int offset , len; 535 const u8 *buf = &v->id_tag; 536 const u8 *vpdr_len = &v->vpdr_tag; 537 offset = sizeof(struct t4_vpd_hdr); 538 len = (u16)vpdr_len[1] + ((u16)vpdr_len[2] << 8); 539 540 if (len + sizeof(struct t4_vpd_hdr) > VPD_LEN) { 541 return -ENOENT; 542 } 543 544 for (i = offset; i + VPD_INFO_FLD_HDR_SIZE <= offset + len;) { 545 if(memcmp(buf + i , kw , 2) == 0){ 546 i += VPD_INFO_FLD_HDR_SIZE; 547 return i; 548 } 549 550 i += VPD_INFO_FLD_HDR_SIZE + buf[i+2]; 551 } 552 553 return -ENOENT; 554 } 555 556 557 /** 558 * get_vpd_params - read VPD parameters from VPD EEPROM 559 * @adapter: adapter to read 560 * @p: where to store the parameters 561 * 562 * Reads card parameters stored in VPD EEPROM. 563 */ 564 static int get_vpd_params(struct adapter *adapter, struct vpd_params *p) 565 { 566 int i, ret, addr; 567 int ec, sn; 568 u8 vpd[VPD_LEN], csum; 569 const struct t4_vpd_hdr *v; 570 571 /* 572 * Card information normally starts at VPD_BASE but early cards had 573 * it at 0. 574 */ 575 ret = t4_seeprom_read(adapter, VPD_BASE, (u32 *)(vpd)); 576 addr = *vpd == 0x82 ? VPD_BASE : VPD_BASE_OLD; 577 578 for (i = 0; i < sizeof(vpd); i += 4) { 579 ret = t4_seeprom_read(adapter, addr + i, (u32 *)(vpd + i)); 580 if (ret) 581 return ret; 582 } 583 v = (const struct t4_vpd_hdr *)vpd; 584 585 #define FIND_VPD_KW(var,name) do { \ 586 var = get_vpd_keyword_val(v , name); \ 587 if (var < 0) { \ 588 CH_ERR(adapter, "missing VPD keyword " name "\n"); \ 589 return -EINVAL; \ 590 } \ 591 } while (0) 592 593 FIND_VPD_KW(i, "RV"); 594 for (csum = 0; i >= 0; i--) 595 csum += vpd[i]; 596 597 if (csum) { 598 CH_ERR(adapter, "corrupted VPD EEPROM, actual csum %u\n", csum); 599 return -EINVAL; 600 } 601 FIND_VPD_KW(ec, "EC"); 602 FIND_VPD_KW(sn, "SN"); 603 #undef FIND_VPD_KW 604 605 memcpy(p->id, v->id_data, ID_LEN); 606 strstrip(p->id); 607 memcpy(p->ec, vpd + ec, EC_LEN); 608 strstrip(p->ec); 609 i = vpd[sn - VPD_INFO_FLD_HDR_SIZE + 2]; 610 memcpy(p->sn, vpd + sn, min(i, SERNUM_LEN)); 611 strstrip(p->sn); 612 613 return 0; 614 } 615 616 /* serial flash and firmware constants and flash config file constants */ 617 enum { 618 SF_ATTEMPTS = 10, /* max retries for SF operations */ 619 620 /* flash command opcodes */ 621 SF_PROG_PAGE = 2, /* program page */ 622 SF_WR_DISABLE = 4, /* disable writes */ 623 SF_RD_STATUS = 5, /* read status register */ 624 SF_WR_ENABLE = 6, /* enable writes */ 625 SF_RD_DATA_FAST = 0xb, /* read flash */ 626 SF_RD_ID = 0x9f, /* read ID */ 627 SF_ERASE_SECTOR = 0xd8, /* erase sector */ 628 629 FW_START_SEC = 8, /* first flash sector for FW */ 630 FW_END_SEC = 15, /* last flash sector for FW */ 631 FW_IMG_START = FW_START_SEC * SF_SEC_SIZE, 632 FW_MAX_SIZE = (FW_END_SEC - FW_START_SEC + 1) * SF_SEC_SIZE, 633 634 FLASH_CFG_MAX_SIZE = 0x10000 , /* max size of the flash config file */ 635 FLASH_CFG_OFFSET = 0x1f0000, 636 FLASH_CFG_START_SEC = FLASH_CFG_OFFSET / SF_SEC_SIZE, 637 FPGA_FLASH_CFG_OFFSET = 0xf0000 , /* if FPGA mode, then cfg file is at 1MB - 64KB */ 638 FPGA_FLASH_CFG_START_SEC = FPGA_FLASH_CFG_OFFSET / SF_SEC_SIZE, 639 }; 640 641 /** 642 * sf1_read - read data from the serial flash 643 * @adapter: the adapter 644 * @byte_cnt: number of bytes to read 645 * @cont: whether another operation will be chained 646 * @lock: whether to lock SF for PL access only 647 * @valp: where to store the read data 648 * 649 * Reads up to 4 bytes of data from the serial flash. The location of 650 * the read needs to be specified prior to calling this by issuing the 651 * appropriate commands to the serial flash. 652 */ 653 static int sf1_read(struct adapter *adapter, unsigned int byte_cnt, int cont, 654 int lock, u32 *valp) 655 { 656 int ret; 657 658 if (!byte_cnt || byte_cnt > 4) 659 return -EINVAL; 660 if (t4_read_reg(adapter, A_SF_OP) & F_BUSY) 661 return -EBUSY; 662 t4_write_reg(adapter, A_SF_OP, 663 V_SF_LOCK(lock) | V_CONT(cont) | V_BYTECNT(byte_cnt - 1)); 664 ret = t4_wait_op_done(adapter, A_SF_OP, F_BUSY, 0, SF_ATTEMPTS, 5); 665 if (!ret) 666 *valp = t4_read_reg(adapter, A_SF_DATA); 667 return ret; 668 } 669 670 /** 671 * sf1_write - write data to the serial flash 672 * @adapter: the adapter 673 * @byte_cnt: number of bytes to write 674 * @cont: whether another operation will be chained 675 * @lock: whether to lock SF for PL access only 676 * @val: value to write 677 * 678 * Writes up to 4 bytes of data to the serial flash. The location of 679 * the write needs to be specified prior to calling this by issuing the 680 * appropriate commands to the serial flash. 681 */ 682 static int sf1_write(struct adapter *adapter, unsigned int byte_cnt, int cont, 683 int lock, u32 val) 684 { 685 if (!byte_cnt || byte_cnt > 4) 686 return -EINVAL; 687 if (t4_read_reg(adapter, A_SF_OP) & F_BUSY) 688 return -EBUSY; 689 t4_write_reg(adapter, A_SF_DATA, val); 690 t4_write_reg(adapter, A_SF_OP, V_SF_LOCK(lock) | 691 V_CONT(cont) | V_BYTECNT(byte_cnt - 1) | V_OP(1)); 692 return t4_wait_op_done(adapter, A_SF_OP, F_BUSY, 0, SF_ATTEMPTS, 5); 693 } 694 695 /** 696 * flash_wait_op - wait for a flash operation to complete 697 * @adapter: the adapter 698 * @attempts: max number of polls of the status register 699 * @delay: delay between polls in ms 700 * 701 * Wait for a flash operation to complete by polling the status register. 702 */ 703 static int flash_wait_op(struct adapter *adapter, int attempts, int delay) 704 { 705 int ret; 706 u32 status; 707 708 while (1) { 709 if ((ret = sf1_write(adapter, 1, 1, 1, SF_RD_STATUS)) != 0 || 710 (ret = sf1_read(adapter, 1, 0, 1, &status)) != 0) 711 return ret; 712 if (!(status & 1)) 713 return 0; 714 if (--attempts == 0) 715 return -EAGAIN; 716 if (delay) 717 msleep(delay); 718 } 719 } 720 721 /** 722 * t4_read_flash - read words from serial flash 723 * @adapter: the adapter 724 * @addr: the start address for the read 725 * @nwords: how many 32-bit words to read 726 * @data: where to store the read data 727 * @byte_oriented: whether to store data as bytes or as words 728 * 729 * Read the specified number of 32-bit words from the serial flash. 730 * If @byte_oriented is set the read data is stored as a byte array 731 * (i.e., big-endian), otherwise as 32-bit words in the platform's 732 * natural endianess. 733 */ 734 int t4_read_flash(struct adapter *adapter, unsigned int addr, 735 unsigned int nwords, u32 *data, int byte_oriented) 736 { 737 int ret; 738 739 if (addr + nwords * sizeof(u32) > adapter->params.sf_size || (addr & 3)) 740 return -EINVAL; 741 742 addr = swab32(addr) | SF_RD_DATA_FAST; 743 744 if ((ret = sf1_write(adapter, 4, 1, 0, addr)) != 0 || 745 (ret = sf1_read(adapter, 1, 1, 0, data)) != 0) 746 return ret; 747 748 for ( ; nwords; nwords--, data++) { 749 ret = sf1_read(adapter, 4, nwords > 1, nwords == 1, data); 750 if (nwords == 1) 751 t4_write_reg(adapter, A_SF_OP, 0); /* unlock SF */ 752 if (ret) 753 return ret; 754 if (byte_oriented) 755 *data = htonl(*data); 756 } 757 return 0; 758 } 759 760 /** 761 * t4_write_flash - write up to a page of data to the serial flash 762 * @adapter: the adapter 763 * @addr: the start address to write 764 * @n: length of data to write in bytes 765 * @data: the data to write 766 * 767 * Writes up to a page of data (256 bytes) to the serial flash starting 768 * at the given address. All the data must be written to the same page. 769 */ 770 static int t4_write_flash(struct adapter *adapter, unsigned int addr, 771 unsigned int n, const u8 *data) 772 { 773 int ret; 774 u32 buf[SF_PAGE_SIZE / 4]; 775 unsigned int i, c, left, val, offset = addr & 0xff; 776 777 if (addr >= adapter->params.sf_size || offset + n > SF_PAGE_SIZE) 778 return -EINVAL; 779 780 val = swab32(addr) | SF_PROG_PAGE; 781 782 if ((ret = sf1_write(adapter, 1, 0, 1, SF_WR_ENABLE)) != 0 || 783 (ret = sf1_write(adapter, 4, 1, 1, val)) != 0) 784 goto unlock; 785 786 for (left = n; left; left -= c) { 787 c = min(left, 4U); 788 for (val = 0, i = 0; i < c; ++i) 789 val = (val << 8) + *data++; 790 791 ret = sf1_write(adapter, c, c != left, 1, val); 792 if (ret) 793 goto unlock; 794 } 795 ret = flash_wait_op(adapter, 8, 1); 796 if (ret) 797 goto unlock; 798 799 t4_write_reg(adapter, A_SF_OP, 0); /* unlock SF */ 800 801 /* Read the page to verify the write succeeded */ 802 ret = t4_read_flash(adapter, addr & ~0xff, ARRAY_SIZE(buf), buf, 1); 803 if (ret) 804 return ret; 805 806 if (memcmp(data - n, (u8 *)buf + offset, n)) { 807 CH_ERR(adapter, "failed to correctly write the flash page " 808 "at %#x\n", addr); 809 return -EIO; 810 } 811 return 0; 812 813 unlock: 814 t4_write_reg(adapter, A_SF_OP, 0); /* unlock SF */ 815 return ret; 816 } 817 818 /** 819 * t4_get_fw_version - read the firmware version 820 * @adapter: the adapter 821 * @vers: where to place the version 822 * 823 * Reads the FW version from flash. 824 */ 825 int t4_get_fw_version(struct adapter *adapter, u32 *vers) 826 { 827 return t4_read_flash(adapter, 828 FW_IMG_START + offsetof(struct fw_hdr, fw_ver), 1, 829 vers, 0); 830 } 831 832 /** 833 * t4_get_tp_version - read the TP microcode version 834 * @adapter: the adapter 835 * @vers: where to place the version 836 * 837 * Reads the TP microcode version from flash. 838 */ 839 int t4_get_tp_version(struct adapter *adapter, u32 *vers) 840 { 841 return t4_read_flash(adapter, FW_IMG_START + offsetof(struct fw_hdr, 842 tp_microcode_ver), 843 1, vers, 0); 844 } 845 846 /** 847 * t4_check_fw_version - check if the FW is compatible with this driver 848 * @adapter: the adapter 849 * 850 * Checks if an adapter's FW is compatible with the driver. Returns 0 851 * if there's exact match, a negative error if the version could not be 852 * read or there's a major version mismatch, and a positive value if the 853 * expected major version is found but there's a minor version mismatch. 854 */ 855 int t4_check_fw_version(struct adapter *adapter) 856 { 857 u32 api_vers[2]; 858 int ret, major, minor, micro; 859 860 ret = t4_get_fw_version(adapter, &adapter->params.fw_vers); 861 if (!ret) 862 ret = t4_get_tp_version(adapter, &adapter->params.tp_vers); 863 if (!ret) 864 ret = t4_read_flash(adapter, 865 FW_IMG_START + offsetof(struct fw_hdr, intfver_nic), 866 2, api_vers, 1); 867 if (ret) 868 return ret; 869 870 major = G_FW_HDR_FW_VER_MAJOR(adapter->params.fw_vers); 871 minor = G_FW_HDR_FW_VER_MINOR(adapter->params.fw_vers); 872 micro = G_FW_HDR_FW_VER_MICRO(adapter->params.fw_vers); 873 memcpy(adapter->params.api_vers, api_vers, 874 sizeof(adapter->params.api_vers)); 875 876 if (major != FW_VERSION_MAJOR) { /* major mismatch - fail */ 877 CH_ERR(adapter, "card FW has major version %u, driver wants " 878 "%u\n", major, FW_VERSION_MAJOR); 879 return -EINVAL; 880 } 881 882 if (minor == FW_VERSION_MINOR && micro == FW_VERSION_MICRO) 883 return 0; /* perfect match */ 884 885 /* Minor/micro version mismatch. Report it but often it's OK. */ 886 return 1; 887 } 888 889 /** 890 * t4_flash_erase_sectors - erase a range of flash sectors 891 * @adapter: the adapter 892 * @start: the first sector to erase 893 * @end: the last sector to erase 894 * 895 * Erases the sectors in the given inclusive range. 896 */ 897 static int t4_flash_erase_sectors(struct adapter *adapter, int start, int end) 898 { 899 int ret = 0; 900 901 while (start <= end) { 902 if ((ret = sf1_write(adapter, 1, 0, 1, SF_WR_ENABLE)) != 0 || 903 (ret = sf1_write(adapter, 4, 0, 1, 904 SF_ERASE_SECTOR | (start << 8))) != 0 || 905 (ret = flash_wait_op(adapter, 14, 500)) != 0) { 906 CH_ERR(adapter, "erase of flash sector %d failed, " 907 "error %d\n", start, ret); 908 break; 909 } 910 start++; 911 } 912 t4_write_reg(adapter, A_SF_OP, 0); /* unlock SF */ 913 return ret; 914 } 915 916 /** 917 * t4_load_cfg - download config file 918 * @adap: the adapter 919 * @cfg_data: the cfg text file to write 920 * @size: text file size 921 * 922 * Write the supplied config text file to the card's serial flash. 923 */ 924 int t4_load_cfg(struct adapter *adap, const u8 *cfg_data, unsigned int size) 925 { 926 int ret, i, n; 927 unsigned int addr; 928 unsigned int flash_cfg_start_sec; 929 unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec; 930 931 if (adap->params.sf_size == 0x100000) { 932 addr = FPGA_FLASH_CFG_OFFSET; 933 flash_cfg_start_sec = FPGA_FLASH_CFG_START_SEC; 934 } else { 935 addr = FLASH_CFG_OFFSET; 936 flash_cfg_start_sec = FLASH_CFG_START_SEC; 937 } 938 if (!size) { 939 CH_ERR(adap, "cfg file has no data\n"); 940 return -EINVAL; 941 } 942 943 if (size > FLASH_CFG_MAX_SIZE) { 944 CH_ERR(adap, "cfg file too large, max is %u bytes\n", 945 FLASH_CFG_MAX_SIZE); 946 return -EFBIG; 947 } 948 949 i = DIV_ROUND_UP(FLASH_CFG_MAX_SIZE, /* # of sectors spanned */ 950 sf_sec_size); 951 ret = t4_flash_erase_sectors(adap, flash_cfg_start_sec, 952 flash_cfg_start_sec + i - 1); 953 if (ret) 954 goto out; 955 956 /* this will write to the flash up to SF_PAGE_SIZE at a time */ 957 for (i = 0; i< size; i+= SF_PAGE_SIZE) { 958 if ( (size - i) < SF_PAGE_SIZE) 959 n = size - i; 960 else 961 n = SF_PAGE_SIZE; 962 ret = t4_write_flash(adap, addr, n, cfg_data); 963 if (ret) 964 goto out; 965 966 addr += SF_PAGE_SIZE; 967 cfg_data += SF_PAGE_SIZE; 968 } 969 970 out: 971 if (ret) 972 CH_ERR(adap, "config file download failed %d\n", ret); 973 return ret; 974 } 975 976 977 /** 978 * t4_load_fw - download firmware 979 * @adap: the adapter 980 * @fw_data: the firmware image to write 981 * @size: image size 982 * 983 * Write the supplied firmware image to the card's serial flash. 984 */ 985 int t4_load_fw(struct adapter *adap, const u8 *fw_data, unsigned int size) 986 { 987 u32 csum; 988 int ret, addr; 989 unsigned int i; 990 u8 first_page[SF_PAGE_SIZE]; 991 const u32 *p = (const u32 *)fw_data; 992 const struct fw_hdr *hdr = (const struct fw_hdr *)fw_data; 993 unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec; 994 995 if (!size) { 996 CH_ERR(adap, "FW image has no data\n"); 997 return -EINVAL; 998 } 999 if (size & 511) { 1000 CH_ERR(adap, "FW image size not multiple of 512 bytes\n"); 1001 return -EINVAL; 1002 } 1003 if (ntohs(hdr->len512) * 512 != size) { 1004 CH_ERR(adap, "FW image size differs from size in FW header\n"); 1005 return -EINVAL; 1006 } 1007 if (size > FW_MAX_SIZE) { 1008 CH_ERR(adap, "FW image too large, max is %u bytes\n", 1009 FW_MAX_SIZE); 1010 return -EFBIG; 1011 } 1012 1013 for (csum = 0, i = 0; i < size / sizeof(csum); i++) 1014 csum += ntohl(p[i]); 1015 1016 if (csum != 0xffffffff) { 1017 CH_ERR(adap, "corrupted firmware image, checksum %#x\n", 1018 csum); 1019 return -EINVAL; 1020 } 1021 1022 i = DIV_ROUND_UP(size, sf_sec_size); /* # of sectors spanned */ 1023 ret = t4_flash_erase_sectors(adap, FW_START_SEC, FW_START_SEC + i - 1); 1024 if (ret) 1025 goto out; 1026 1027 /* 1028 * We write the correct version at the end so the driver can see a bad 1029 * version if the FW write fails. Start by writing a copy of the 1030 * first page with a bad version. 1031 */ 1032 memcpy(first_page, fw_data, SF_PAGE_SIZE); 1033 ((struct fw_hdr *)first_page)->fw_ver = htonl(0xffffffff); 1034 ret = t4_write_flash(adap, FW_IMG_START, SF_PAGE_SIZE, first_page); 1035 if (ret) 1036 goto out; 1037 1038 addr = FW_IMG_START; 1039 for (size -= SF_PAGE_SIZE; size; size -= SF_PAGE_SIZE) { 1040 addr += SF_PAGE_SIZE; 1041 fw_data += SF_PAGE_SIZE; 1042 ret = t4_write_flash(adap, addr, SF_PAGE_SIZE, fw_data); 1043 if (ret) 1044 goto out; 1045 } 1046 1047 ret = t4_write_flash(adap, 1048 FW_IMG_START + offsetof(struct fw_hdr, fw_ver), 1049 sizeof(hdr->fw_ver), (const u8 *)&hdr->fw_ver); 1050 out: 1051 if (ret) 1052 CH_ERR(adap, "firmware download failed, error %d\n", ret); 1053 return ret; 1054 } 1055 1056 /** 1057 * t4_read_cimq_cfg - read CIM queue configuration 1058 * @adap: the adapter 1059 * @base: holds the queue base addresses in bytes 1060 * @size: holds the queue sizes in bytes 1061 * @thres: holds the queue full thresholds in bytes 1062 * 1063 * Returns the current configuration of the CIM queues, starting with 1064 * the IBQs, then the OBQs. 1065 */ 1066 void t4_read_cimq_cfg(struct adapter *adap, u16 *base, u16 *size, u16 *thres) 1067 { 1068 unsigned int i, v; 1069 1070 for (i = 0; i < CIM_NUM_IBQ; i++) { 1071 t4_write_reg(adap, A_CIM_QUEUE_CONFIG_REF, F_IBQSELECT | 1072 V_QUENUMSELECT(i)); 1073 v = t4_read_reg(adap, A_CIM_QUEUE_CONFIG_CTRL); 1074 *base++ = G_CIMQBASE(v) * 256; /* value is in 256-byte units */ 1075 *size++ = G_CIMQSIZE(v) * 256; /* value is in 256-byte units */ 1076 *thres++ = G_QUEFULLTHRSH(v) * 8; /* 8-byte unit */ 1077 } 1078 for (i = 0; i < CIM_NUM_OBQ; i++) { 1079 t4_write_reg(adap, A_CIM_QUEUE_CONFIG_REF, F_OBQSELECT | 1080 V_QUENUMSELECT(i)); 1081 v = t4_read_reg(adap, A_CIM_QUEUE_CONFIG_CTRL); 1082 *base++ = G_CIMQBASE(v) * 256; /* value is in 256-byte units */ 1083 *size++ = G_CIMQSIZE(v) * 256; /* value is in 256-byte units */ 1084 } 1085 } 1086 1087 /** 1088 * t4_read_cim_ibq - read the contents of a CIM inbound queue 1089 * @adap: the adapter 1090 * @qid: the queue index 1091 * @data: where to store the queue contents 1092 * @n: capacity of @data in 32-bit words 1093 * 1094 * Reads the contents of the selected CIM queue starting at address 0 up 1095 * to the capacity of @data. @n must be a multiple of 4. Returns < 0 on 1096 * error and the number of 32-bit words actually read on success. 1097 */ 1098 int t4_read_cim_ibq(struct adapter *adap, unsigned int qid, u32 *data, size_t n) 1099 { 1100 int i, err; 1101 unsigned int addr; 1102 const unsigned int nwords = CIM_IBQ_SIZE * 4; 1103 1104 if (qid > 5 || (n & 3)) 1105 return -EINVAL; 1106 1107 addr = qid * nwords; 1108 if (n > nwords) 1109 n = nwords; 1110 1111 for (i = 0; i < n; i++, addr++) { 1112 t4_write_reg(adap, A_CIM_IBQ_DBG_CFG, V_IBQDBGADDR(addr) | 1113 F_IBQDBGEN); 1114 err = t4_wait_op_done(adap, A_CIM_IBQ_DBG_CFG, F_IBQDBGBUSY, 0, 1115 2, 1); 1116 if (err) 1117 return err; 1118 *data++ = t4_read_reg(adap, A_CIM_IBQ_DBG_DATA); 1119 } 1120 t4_write_reg(adap, A_CIM_IBQ_DBG_CFG, 0); 1121 return i; 1122 } 1123 1124 /** 1125 * t4_read_cim_obq - read the contents of a CIM outbound queue 1126 * @adap: the adapter 1127 * @qid: the queue index 1128 * @data: where to store the queue contents 1129 * @n: capacity of @data in 32-bit words 1130 * 1131 * Reads the contents of the selected CIM queue starting at address 0 up 1132 * to the capacity of @data. @n must be a multiple of 4. Returns < 0 on 1133 * error and the number of 32-bit words actually read on success. 1134 */ 1135 int t4_read_cim_obq(struct adapter *adap, unsigned int qid, u32 *data, size_t n) 1136 { 1137 int i, err; 1138 unsigned int addr, v, nwords; 1139 1140 if (qid > 5 || (n & 3)) 1141 return -EINVAL; 1142 1143 t4_write_reg(adap, A_CIM_QUEUE_CONFIG_REF, F_OBQSELECT | 1144 V_QUENUMSELECT(qid)); 1145 v = t4_read_reg(adap, A_CIM_QUEUE_CONFIG_CTRL); 1146 1147 addr = G_CIMQBASE(v) * 64; /* muliple of 256 -> muliple of 4 */ 1148 nwords = G_CIMQSIZE(v) * 64; /* same */ 1149 if (n > nwords) 1150 n = nwords; 1151 1152 for (i = 0; i < n; i++, addr++) { 1153 t4_write_reg(adap, A_CIM_OBQ_DBG_CFG, V_OBQDBGADDR(addr) | 1154 F_OBQDBGEN); 1155 err = t4_wait_op_done(adap, A_CIM_OBQ_DBG_CFG, F_OBQDBGBUSY, 0, 1156 2, 1); 1157 if (err) 1158 return err; 1159 *data++ = t4_read_reg(adap, A_CIM_OBQ_DBG_DATA); 1160 } 1161 t4_write_reg(adap, A_CIM_OBQ_DBG_CFG, 0); 1162 return i; 1163 } 1164 1165 enum { 1166 CIM_QCTL_BASE = 0, 1167 CIM_CTL_BASE = 0x2000, 1168 CIM_PBT_ADDR_BASE = 0x2800, 1169 CIM_PBT_LRF_BASE = 0x3000, 1170 CIM_PBT_DATA_BASE = 0x3800 1171 }; 1172 1173 /** 1174 * t4_cim_read - read a block from CIM internal address space 1175 * @adap: the adapter 1176 * @addr: the start address within the CIM address space 1177 * @n: number of words to read 1178 * @valp: where to store the result 1179 * 1180 * Reads a block of 4-byte words from the CIM intenal address space. 1181 */ 1182 int t4_cim_read(struct adapter *adap, unsigned int addr, unsigned int n, 1183 unsigned int *valp) 1184 { 1185 int ret = 0; 1186 1187 if (t4_read_reg(adap, A_CIM_HOST_ACC_CTRL) & F_HOSTBUSY) 1188 return -EBUSY; 1189 1190 for ( ; !ret && n--; addr += 4) { 1191 t4_write_reg(adap, A_CIM_HOST_ACC_CTRL, addr); 1192 ret = t4_wait_op_done(adap, A_CIM_HOST_ACC_CTRL, F_HOSTBUSY, 1193 0, 5, 2); 1194 if (!ret) 1195 *valp++ = t4_read_reg(adap, A_CIM_HOST_ACC_DATA); 1196 } 1197 return ret; 1198 } 1199 1200 /** 1201 * t4_cim_write - write a block into CIM internal address space 1202 * @adap: the adapter 1203 * @addr: the start address within the CIM address space 1204 * @n: number of words to write 1205 * @valp: set of values to write 1206 * 1207 * Writes a block of 4-byte words into the CIM intenal address space. 1208 */ 1209 int t4_cim_write(struct adapter *adap, unsigned int addr, unsigned int n, 1210 const unsigned int *valp) 1211 { 1212 int ret = 0; 1213 1214 if (t4_read_reg(adap, A_CIM_HOST_ACC_CTRL) & F_HOSTBUSY) 1215 return -EBUSY; 1216 1217 for ( ; !ret && n--; addr += 4) { 1218 t4_write_reg(adap, A_CIM_HOST_ACC_DATA, *valp++); 1219 t4_write_reg(adap, A_CIM_HOST_ACC_CTRL, addr | F_HOSTWRITE); 1220 ret = t4_wait_op_done(adap, A_CIM_HOST_ACC_CTRL, F_HOSTBUSY, 1221 0, 5, 2); 1222 } 1223 return ret; 1224 } 1225 1226 static int t4_cim_write1(struct adapter *adap, unsigned int addr, unsigned int val) 1227 { 1228 return t4_cim_write(adap, addr, 1, &val); 1229 } 1230 1231 /** 1232 * t4_cim_ctl_read - read a block from CIM control region 1233 * @adap: the adapter 1234 * @addr: the start address within the CIM control region 1235 * @n: number of words to read 1236 * @valp: where to store the result 1237 * 1238 * Reads a block of 4-byte words from the CIM control region. 1239 */ 1240 int t4_cim_ctl_read(struct adapter *adap, unsigned int addr, unsigned int n, 1241 unsigned int *valp) 1242 { 1243 return t4_cim_read(adap, addr + CIM_CTL_BASE, n, valp); 1244 } 1245 1246 /** 1247 * t4_cim_read_la - read CIM LA capture buffer 1248 * @adap: the adapter 1249 * @la_buf: where to store the LA data 1250 * @wrptr: the HW write pointer within the capture buffer 1251 * 1252 * Reads the contents of the CIM LA buffer with the most recent entry at 1253 * the end of the returned data and with the entry at @wrptr first. 1254 * We try to leave the LA in the running state we find it in. 1255 */ 1256 int t4_cim_read_la(struct adapter *adap, u32 *la_buf, unsigned int *wrptr) 1257 { 1258 int i, ret; 1259 unsigned int cfg, val, idx; 1260 1261 ret = t4_cim_read(adap, A_UP_UP_DBG_LA_CFG, 1, &cfg); 1262 if (ret) 1263 return ret; 1264 1265 if (cfg & F_UPDBGLAEN) { /* LA is running, freeze it */ 1266 ret = t4_cim_write1(adap, A_UP_UP_DBG_LA_CFG, 0); 1267 if (ret) 1268 return ret; 1269 } 1270 1271 ret = t4_cim_read(adap, A_UP_UP_DBG_LA_CFG, 1, &val); 1272 if (ret) 1273 goto restart; 1274 1275 idx = G_UPDBGLAWRPTR(val); 1276 if (wrptr) 1277 *wrptr = idx; 1278 1279 for (i = 0; i < adap->params.cim_la_size; i++) { 1280 ret = t4_cim_write1(adap, A_UP_UP_DBG_LA_CFG, 1281 V_UPDBGLARDPTR(idx) | F_UPDBGLARDEN); 1282 if (ret) 1283 break; 1284 ret = t4_cim_read(adap, A_UP_UP_DBG_LA_CFG, 1, &val); 1285 if (ret) 1286 break; 1287 if (val & F_UPDBGLARDEN) { 1288 ret = -ETIMEDOUT; 1289 break; 1290 } 1291 ret = t4_cim_read(adap, A_UP_UP_DBG_LA_DATA, 1, &la_buf[i]); 1292 if (ret) 1293 break; 1294 idx = (idx + 1) & M_UPDBGLARDPTR; 1295 } 1296 restart: 1297 if (cfg & F_UPDBGLAEN) { 1298 int r = t4_cim_write1(adap, A_UP_UP_DBG_LA_CFG, 1299 cfg & ~F_UPDBGLARDEN); 1300 if (!ret) 1301 ret = r; 1302 } 1303 return ret; 1304 } 1305 1306 void t4_cim_read_pif_la(struct adapter *adap, u32 *pif_req, u32 *pif_rsp, 1307 unsigned int *pif_req_wrptr, 1308 unsigned int *pif_rsp_wrptr) 1309 { 1310 int i, j; 1311 u32 cfg, val, req, rsp; 1312 1313 cfg = t4_read_reg(adap, A_CIM_DEBUGCFG); 1314 if (cfg & F_LADBGEN) 1315 t4_write_reg(adap, A_CIM_DEBUGCFG, cfg ^ F_LADBGEN); 1316 1317 val = t4_read_reg(adap, A_CIM_DEBUGSTS); 1318 req = G_POLADBGWRPTR(val); 1319 rsp = G_PILADBGWRPTR(val); 1320 if (pif_req_wrptr) 1321 *pif_req_wrptr = req; 1322 if (pif_rsp_wrptr) 1323 *pif_rsp_wrptr = rsp; 1324 1325 for (i = 0; i < CIM_PIFLA_SIZE; i++) { 1326 for (j = 0; j < 6; j++) { 1327 t4_write_reg(adap, A_CIM_DEBUGCFG, V_POLADBGRDPTR(req) | 1328 V_PILADBGRDPTR(rsp)); 1329 *pif_req++ = t4_read_reg(adap, A_CIM_PO_LA_DEBUGDATA); 1330 *pif_rsp++ = t4_read_reg(adap, A_CIM_PI_LA_DEBUGDATA); 1331 req++; 1332 rsp++; 1333 } 1334 req = (req + 2) & M_POLADBGRDPTR; 1335 rsp = (rsp + 2) & M_PILADBGRDPTR; 1336 } 1337 t4_write_reg(adap, A_CIM_DEBUGCFG, cfg); 1338 } 1339 1340 void t4_cim_read_ma_la(struct adapter *adap, u32 *ma_req, u32 *ma_rsp) 1341 { 1342 u32 cfg; 1343 int i, j, idx; 1344 1345 cfg = t4_read_reg(adap, A_CIM_DEBUGCFG); 1346 if (cfg & F_LADBGEN) 1347 t4_write_reg(adap, A_CIM_DEBUGCFG, cfg ^ F_LADBGEN); 1348 1349 for (i = 0; i < CIM_MALA_SIZE; i++) { 1350 for (j = 0; j < 5; j++) { 1351 idx = 8 * i + j; 1352 t4_write_reg(adap, A_CIM_DEBUGCFG, V_POLADBGRDPTR(idx) | 1353 V_PILADBGRDPTR(idx)); 1354 *ma_req++ = t4_read_reg(adap, A_CIM_PO_LA_MADEBUGDATA); 1355 *ma_rsp++ = t4_read_reg(adap, A_CIM_PI_LA_MADEBUGDATA); 1356 } 1357 } 1358 t4_write_reg(adap, A_CIM_DEBUGCFG, cfg); 1359 } 1360 1361 /** 1362 * t4_tp_read_la - read TP LA capture buffer 1363 * @adap: the adapter 1364 * @la_buf: where to store the LA data 1365 * @wrptr: the HW write pointer within the capture buffer 1366 * 1367 * Reads the contents of the TP LA buffer with the most recent entry at 1368 * the end of the returned data and with the entry at @wrptr first. 1369 * We leave the LA in the running state we find it in. 1370 */ 1371 void t4_tp_read_la(struct adapter *adap, u64 *la_buf, unsigned int *wrptr) 1372 { 1373 bool last_incomplete; 1374 unsigned int i, cfg, val, idx; 1375 1376 cfg = t4_read_reg(adap, A_TP_DBG_LA_CONFIG) & 0xffff; 1377 if (cfg & F_DBGLAENABLE) /* freeze LA */ 1378 t4_write_reg(adap, A_TP_DBG_LA_CONFIG, 1379 adap->params.tp.la_mask | (cfg ^ F_DBGLAENABLE)); 1380 1381 val = t4_read_reg(adap, A_TP_DBG_LA_CONFIG); 1382 idx = G_DBGLAWPTR(val); 1383 last_incomplete = G_DBGLAMODE(val) >= 2 && (val & F_DBGLAWHLF) == 0; 1384 if (last_incomplete) 1385 idx = (idx + 1) & M_DBGLARPTR; 1386 if (wrptr) 1387 *wrptr = idx; 1388 1389 val &= 0xffff; 1390 val &= ~V_DBGLARPTR(M_DBGLARPTR); 1391 val |= adap->params.tp.la_mask; 1392 1393 for (i = 0; i < TPLA_SIZE; i++) { 1394 t4_write_reg(adap, A_TP_DBG_LA_CONFIG, V_DBGLARPTR(idx) | val); 1395 la_buf[i] = t4_read_reg64(adap, A_TP_DBG_LA_DATAL); 1396 idx = (idx + 1) & M_DBGLARPTR; 1397 } 1398 1399 /* Wipe out last entry if it isn't valid */ 1400 if (last_incomplete) 1401 la_buf[TPLA_SIZE - 1] = ~0ULL; 1402 1403 if (cfg & F_DBGLAENABLE) /* restore running state */ 1404 t4_write_reg(adap, A_TP_DBG_LA_CONFIG, 1405 cfg | adap->params.tp.la_mask); 1406 } 1407 1408 void t4_ulprx_read_la(struct adapter *adap, u32 *la_buf) 1409 { 1410 unsigned int i, j; 1411 1412 for (i = 0; i < 8; i++) { 1413 u32 *p = la_buf + i; 1414 1415 t4_write_reg(adap, A_ULP_RX_LA_CTL, i); 1416 j = t4_read_reg(adap, A_ULP_RX_LA_WRPTR); 1417 t4_write_reg(adap, A_ULP_RX_LA_RDPTR, j); 1418 for (j = 0; j < ULPRX_LA_SIZE; j++, p += 8) 1419 *p = t4_read_reg(adap, A_ULP_RX_LA_RDDATA); 1420 } 1421 } 1422 1423 #define ADVERT_MASK (FW_PORT_CAP_SPEED_100M | FW_PORT_CAP_SPEED_1G |\ 1424 FW_PORT_CAP_SPEED_10G | FW_PORT_CAP_ANEG) 1425 1426 /** 1427 * t4_link_start - apply link configuration to MAC/PHY 1428 * @phy: the PHY to setup 1429 * @mac: the MAC to setup 1430 * @lc: the requested link configuration 1431 * 1432 * Set up a port's MAC and PHY according to a desired link configuration. 1433 * - If the PHY can auto-negotiate first decide what to advertise, then 1434 * enable/disable auto-negotiation as desired, and reset. 1435 * - If the PHY does not auto-negotiate just reset it. 1436 * - If auto-negotiation is off set the MAC to the proper speed/duplex/FC, 1437 * otherwise do it later based on the outcome of auto-negotiation. 1438 */ 1439 int t4_link_start(struct adapter *adap, unsigned int mbox, unsigned int port, 1440 struct link_config *lc) 1441 { 1442 struct fw_port_cmd c; 1443 unsigned int fc = 0, mdi = V_FW_PORT_CAP_MDI(FW_PORT_CAP_MDI_AUTO); 1444 1445 lc->link_ok = 0; 1446 if (lc->requested_fc & PAUSE_RX) 1447 fc |= FW_PORT_CAP_FC_RX; 1448 if (lc->requested_fc & PAUSE_TX) 1449 fc |= FW_PORT_CAP_FC_TX; 1450 1451 memset(&c, 0, sizeof(c)); 1452 c.op_to_portid = htonl(V_FW_CMD_OP(FW_PORT_CMD) | F_FW_CMD_REQUEST | 1453 F_FW_CMD_EXEC | V_FW_PORT_CMD_PORTID(port)); 1454 c.action_to_len16 = htonl(V_FW_PORT_CMD_ACTION(FW_PORT_ACTION_L1_CFG) | 1455 FW_LEN16(c)); 1456 1457 if (!(lc->supported & FW_PORT_CAP_ANEG)) { 1458 c.u.l1cfg.rcap = htonl((lc->supported & ADVERT_MASK) | fc); 1459 lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX); 1460 } else if (lc->autoneg == AUTONEG_DISABLE) { 1461 c.u.l1cfg.rcap = htonl(lc->requested_speed | fc | mdi); 1462 lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX); 1463 } else 1464 c.u.l1cfg.rcap = htonl(lc->advertising | fc | mdi); 1465 1466 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); 1467 } 1468 1469 /** 1470 * t4_restart_aneg - restart autonegotiation 1471 * @adap: the adapter 1472 * @mbox: mbox to use for the FW command 1473 * @port: the port id 1474 * 1475 * Restarts autonegotiation for the selected port. 1476 */ 1477 int t4_restart_aneg(struct adapter *adap, unsigned int mbox, unsigned int port) 1478 { 1479 struct fw_port_cmd c; 1480 1481 memset(&c, 0, sizeof(c)); 1482 c.op_to_portid = htonl(V_FW_CMD_OP(FW_PORT_CMD) | F_FW_CMD_REQUEST | 1483 F_FW_CMD_EXEC | V_FW_PORT_CMD_PORTID(port)); 1484 c.action_to_len16 = htonl(V_FW_PORT_CMD_ACTION(FW_PORT_ACTION_L1_CFG) | 1485 FW_LEN16(c)); 1486 c.u.l1cfg.rcap = htonl(FW_PORT_CAP_ANEG); 1487 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); 1488 } 1489 1490 struct intr_info { 1491 unsigned int mask; /* bits to check in interrupt status */ 1492 const char *msg; /* message to print or NULL */ 1493 short stat_idx; /* stat counter to increment or -1 */ 1494 unsigned short fatal; /* whether the condition reported is fatal */ 1495 }; 1496 1497 /** 1498 * t4_handle_intr_status - table driven interrupt handler 1499 * @adapter: the adapter that generated the interrupt 1500 * @reg: the interrupt status register to process 1501 * @acts: table of interrupt actions 1502 * 1503 * A table driven interrupt handler that applies a set of masks to an 1504 * interrupt status word and performs the corresponding actions if the 1505 * interrupts described by the mask have occured. The actions include 1506 * optionally emitting a warning or alert message. The table is terminated 1507 * by an entry specifying mask 0. Returns the number of fatal interrupt 1508 * conditions. 1509 */ 1510 static int t4_handle_intr_status(struct adapter *adapter, unsigned int reg, 1511 const struct intr_info *acts) 1512 { 1513 int fatal = 0; 1514 unsigned int mask = 0; 1515 unsigned int status = t4_read_reg(adapter, reg); 1516 1517 for ( ; acts->mask; ++acts) { 1518 if (!(status & acts->mask)) 1519 continue; 1520 if (acts->fatal) { 1521 fatal++; 1522 CH_ALERT(adapter, "%s (0x%x)\n", 1523 acts->msg, status & acts->mask); 1524 } else if (acts->msg) 1525 CH_WARN_RATELIMIT(adapter, "%s (0x%x)\n", 1526 acts->msg, status & acts->mask); 1527 mask |= acts->mask; 1528 } 1529 status &= mask; 1530 if (status) /* clear processed interrupts */ 1531 t4_write_reg(adapter, reg, status); 1532 return fatal; 1533 } 1534 1535 /* 1536 * Interrupt handler for the PCIE module. 1537 */ 1538 static void pcie_intr_handler(struct adapter *adapter) 1539 { 1540 static struct intr_info sysbus_intr_info[] = { 1541 { F_RNPP, "RXNP array parity error", -1, 1 }, 1542 { F_RPCP, "RXPC array parity error", -1, 1 }, 1543 { F_RCIP, "RXCIF array parity error", -1, 1 }, 1544 { F_RCCP, "Rx completions control array parity error", -1, 1 }, 1545 { F_RFTP, "RXFT array parity error", -1, 1 }, 1546 { 0 } 1547 }; 1548 static struct intr_info pcie_port_intr_info[] = { 1549 { F_TPCP, "TXPC array parity error", -1, 1 }, 1550 { F_TNPP, "TXNP array parity error", -1, 1 }, 1551 { F_TFTP, "TXFT array parity error", -1, 1 }, 1552 { F_TCAP, "TXCA array parity error", -1, 1 }, 1553 { F_TCIP, "TXCIF array parity error", -1, 1 }, 1554 { F_RCAP, "RXCA array parity error", -1, 1 }, 1555 { F_OTDD, "outbound request TLP discarded", -1, 1 }, 1556 { F_RDPE, "Rx data parity error", -1, 1 }, 1557 { F_TDUE, "Tx uncorrectable data error", -1, 1 }, 1558 { 0 } 1559 }; 1560 static struct intr_info pcie_intr_info[] = { 1561 { F_MSIADDRLPERR, "MSI AddrL parity error", -1, 1 }, 1562 { F_MSIADDRHPERR, "MSI AddrH parity error", -1, 1 }, 1563 { F_MSIDATAPERR, "MSI data parity error", -1, 1 }, 1564 { F_MSIXADDRLPERR, "MSI-X AddrL parity error", -1, 1 }, 1565 { F_MSIXADDRHPERR, "MSI-X AddrH parity error", -1, 1 }, 1566 { F_MSIXDATAPERR, "MSI-X data parity error", -1, 1 }, 1567 { F_MSIXDIPERR, "MSI-X DI parity error", -1, 1 }, 1568 { F_PIOCPLPERR, "PCI PIO completion FIFO parity error", -1, 1 }, 1569 { F_PIOREQPERR, "PCI PIO request FIFO parity error", -1, 1 }, 1570 { F_TARTAGPERR, "PCI PCI target tag FIFO parity error", -1, 1 }, 1571 { F_CCNTPERR, "PCI CMD channel count parity error", -1, 1 }, 1572 { F_CREQPERR, "PCI CMD channel request parity error", -1, 1 }, 1573 { F_CRSPPERR, "PCI CMD channel response parity error", -1, 1 }, 1574 { F_DCNTPERR, "PCI DMA channel count parity error", -1, 1 }, 1575 { F_DREQPERR, "PCI DMA channel request parity error", -1, 1 }, 1576 { F_DRSPPERR, "PCI DMA channel response parity error", -1, 1 }, 1577 { F_HCNTPERR, "PCI HMA channel count parity error", -1, 1 }, 1578 { F_HREQPERR, "PCI HMA channel request parity error", -1, 1 }, 1579 { F_HRSPPERR, "PCI HMA channel response parity error", -1, 1 }, 1580 { F_CFGSNPPERR, "PCI config snoop FIFO parity error", -1, 1 }, 1581 { F_FIDPERR, "PCI FID parity error", -1, 1 }, 1582 { F_INTXCLRPERR, "PCI INTx clear parity error", -1, 1 }, 1583 { F_MATAGPERR, "PCI MA tag parity error", -1, 1 }, 1584 { F_PIOTAGPERR, "PCI PIO tag parity error", -1, 1 }, 1585 { F_RXCPLPERR, "PCI Rx completion parity error", -1, 1 }, 1586 { F_RXWRPERR, "PCI Rx write parity error", -1, 1 }, 1587 { F_RPLPERR, "PCI replay buffer parity error", -1, 1 }, 1588 { F_PCIESINT, "PCI core secondary fault", -1, 1 }, 1589 { F_PCIEPINT, "PCI core primary fault", -1, 1 }, 1590 { F_UNXSPLCPLERR, "PCI unexpected split completion error", -1, 1591 0 }, 1592 { 0 } 1593 }; 1594 1595 int fat; 1596 1597 fat = t4_handle_intr_status(adapter, 1598 A_PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS, 1599 sysbus_intr_info) + 1600 t4_handle_intr_status(adapter, 1601 A_PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS, 1602 pcie_port_intr_info) + 1603 t4_handle_intr_status(adapter, A_PCIE_INT_CAUSE, pcie_intr_info); 1604 if (fat) 1605 t4_fatal_err(adapter); 1606 } 1607 1608 /* 1609 * TP interrupt handler. 1610 */ 1611 static void tp_intr_handler(struct adapter *adapter) 1612 { 1613 static struct intr_info tp_intr_info[] = { 1614 { 0x3fffffff, "TP parity error", -1, 1 }, 1615 { F_FLMTXFLSTEMPTY, "TP out of Tx pages", -1, 1 }, 1616 { 0 } 1617 }; 1618 1619 if (t4_handle_intr_status(adapter, A_TP_INT_CAUSE, tp_intr_info)) 1620 t4_fatal_err(adapter); 1621 } 1622 1623 /* 1624 * SGE interrupt handler. 1625 */ 1626 static void sge_intr_handler(struct adapter *adapter) 1627 { 1628 u64 v; 1629 u32 err; 1630 1631 static struct intr_info sge_intr_info[] = { 1632 { F_ERR_CPL_EXCEED_IQE_SIZE, 1633 "SGE received CPL exceeding IQE size", -1, 1 }, 1634 { F_ERR_INVALID_CIDX_INC, 1635 "SGE GTS CIDX increment too large", -1, 0 }, 1636 { F_ERR_CPL_OPCODE_0, "SGE received 0-length CPL", -1, 0 }, 1637 { F_ERR_DROPPED_DB, "SGE doorbell dropped", -1, 0 }, 1638 { F_ERR_DATA_CPL_ON_HIGH_QID1 | F_ERR_DATA_CPL_ON_HIGH_QID0, 1639 "SGE IQID > 1023 received CPL for FL", -1, 0 }, 1640 { F_ERR_BAD_DB_PIDX3, "SGE DBP 3 pidx increment too large", -1, 1641 0 }, 1642 { F_ERR_BAD_DB_PIDX2, "SGE DBP 2 pidx increment too large", -1, 1643 0 }, 1644 { F_ERR_BAD_DB_PIDX1, "SGE DBP 1 pidx increment too large", -1, 1645 0 }, 1646 { F_ERR_BAD_DB_PIDX0, "SGE DBP 0 pidx increment too large", -1, 1647 0 }, 1648 { F_ERR_ING_CTXT_PRIO, 1649 "SGE too many priority ingress contexts", -1, 0 }, 1650 { F_ERR_EGR_CTXT_PRIO, 1651 "SGE too many priority egress contexts", -1, 0 }, 1652 { F_INGRESS_SIZE_ERR, "SGE illegal ingress QID", -1, 0 }, 1653 { F_EGRESS_SIZE_ERR, "SGE illegal egress QID", -1, 0 }, 1654 { 0 } 1655 }; 1656 1657 v = (u64)t4_read_reg(adapter, A_SGE_INT_CAUSE1) | 1658 ((u64)t4_read_reg(adapter, A_SGE_INT_CAUSE2) << 32); 1659 if (v) { 1660 CH_ALERT(adapter, "SGE parity error (%#llx)\n", 1661 (unsigned long long)v); 1662 t4_write_reg(adapter, A_SGE_INT_CAUSE1, v); 1663 t4_write_reg(adapter, A_SGE_INT_CAUSE2, v >> 32); 1664 } 1665 1666 v |= t4_handle_intr_status(adapter, A_SGE_INT_CAUSE3, sge_intr_info); 1667 1668 err = t4_read_reg(adapter, A_SGE_ERROR_STATS); 1669 if (err & F_ERROR_QID_VALID) { 1670 CH_ERR(adapter, "SGE error for queue %u\n", G_ERROR_QID(err)); 1671 t4_write_reg(adapter, A_SGE_ERROR_STATS, F_ERROR_QID_VALID); 1672 } 1673 1674 if (v != 0) 1675 t4_fatal_err(adapter); 1676 } 1677 1678 #define CIM_OBQ_INTR (F_OBQULP0PARERR | F_OBQULP1PARERR | F_OBQULP2PARERR |\ 1679 F_OBQULP3PARERR | F_OBQSGEPARERR | F_OBQNCSIPARERR) 1680 #define CIM_IBQ_INTR (F_IBQTP0PARERR | F_IBQTP1PARERR | F_IBQULPPARERR |\ 1681 F_IBQSGEHIPARERR | F_IBQSGELOPARERR | F_IBQNCSIPARERR) 1682 1683 /* 1684 * CIM interrupt handler. 1685 */ 1686 static void cim_intr_handler(struct adapter *adapter) 1687 { 1688 static struct intr_info cim_intr_info[] = { 1689 { F_PREFDROPINT, "CIM control register prefetch drop", -1, 1 }, 1690 { CIM_OBQ_INTR, "CIM OBQ parity error", -1, 1 }, 1691 { CIM_IBQ_INTR, "CIM IBQ parity error", -1, 1 }, 1692 { F_MBUPPARERR, "CIM mailbox uP parity error", -1, 1 }, 1693 { F_MBHOSTPARERR, "CIM mailbox host parity error", -1, 1 }, 1694 { F_TIEQINPARERRINT, "CIM TIEQ outgoing parity error", -1, 1 }, 1695 { F_TIEQOUTPARERRINT, "CIM TIEQ incoming parity error", -1, 1 }, 1696 { 0 } 1697 }; 1698 static struct intr_info cim_upintr_info[] = { 1699 { F_RSVDSPACEINT, "CIM reserved space access", -1, 1 }, 1700 { F_ILLTRANSINT, "CIM illegal transaction", -1, 1 }, 1701 { F_ILLWRINT, "CIM illegal write", -1, 1 }, 1702 { F_ILLRDINT, "CIM illegal read", -1, 1 }, 1703 { F_ILLRDBEINT, "CIM illegal read BE", -1, 1 }, 1704 { F_ILLWRBEINT, "CIM illegal write BE", -1, 1 }, 1705 { F_SGLRDBOOTINT, "CIM single read from boot space", -1, 1 }, 1706 { F_SGLWRBOOTINT, "CIM single write to boot space", -1, 1 }, 1707 { F_BLKWRBOOTINT, "CIM block write to boot space", -1, 1 }, 1708 { F_SGLRDFLASHINT, "CIM single read from flash space", -1, 1 }, 1709 { F_SGLWRFLASHINT, "CIM single write to flash space", -1, 1 }, 1710 { F_BLKWRFLASHINT, "CIM block write to flash space", -1, 1 }, 1711 { F_SGLRDEEPROMINT, "CIM single EEPROM read", -1, 1 }, 1712 { F_SGLWREEPROMINT, "CIM single EEPROM write", -1, 1 }, 1713 { F_BLKRDEEPROMINT, "CIM block EEPROM read", -1, 1 }, 1714 { F_BLKWREEPROMINT, "CIM block EEPROM write", -1, 1 }, 1715 { F_SGLRDCTLINT , "CIM single read from CTL space", -1, 1 }, 1716 { F_SGLWRCTLINT , "CIM single write to CTL space", -1, 1 }, 1717 { F_BLKRDCTLINT , "CIM block read from CTL space", -1, 1 }, 1718 { F_BLKWRCTLINT , "CIM block write to CTL space", -1, 1 }, 1719 { F_SGLRDPLINT , "CIM single read from PL space", -1, 1 }, 1720 { F_SGLWRPLINT , "CIM single write to PL space", -1, 1 }, 1721 { F_BLKRDPLINT , "CIM block read from PL space", -1, 1 }, 1722 { F_BLKWRPLINT , "CIM block write to PL space", -1, 1 }, 1723 { F_REQOVRLOOKUPINT , "CIM request FIFO overwrite", -1, 1 }, 1724 { F_RSPOVRLOOKUPINT , "CIM response FIFO overwrite", -1, 1 }, 1725 { F_TIMEOUTINT , "CIM PIF timeout", -1, 1 }, 1726 { F_TIMEOUTMAINT , "CIM PIF MA timeout", -1, 1 }, 1727 { 0 } 1728 }; 1729 1730 int fat; 1731 1732 fat = t4_handle_intr_status(adapter, A_CIM_HOST_INT_CAUSE, 1733 cim_intr_info) + 1734 t4_handle_intr_status(adapter, A_CIM_HOST_UPACC_INT_CAUSE, 1735 cim_upintr_info); 1736 if (fat) 1737 t4_fatal_err(adapter); 1738 } 1739 1740 /* 1741 * ULP RX interrupt handler. 1742 */ 1743 static void ulprx_intr_handler(struct adapter *adapter) 1744 { 1745 static struct intr_info ulprx_intr_info[] = { 1746 { F_CAUSE_CTX_1, "ULPRX channel 1 context error", -1, 1 }, 1747 { F_CAUSE_CTX_0, "ULPRX channel 0 context error", -1, 1 }, 1748 { 0x7fffff, "ULPRX parity error", -1, 1 }, 1749 { 0 } 1750 }; 1751 1752 if (t4_handle_intr_status(adapter, A_ULP_RX_INT_CAUSE, ulprx_intr_info)) 1753 t4_fatal_err(adapter); 1754 } 1755 1756 /* 1757 * ULP TX interrupt handler. 1758 */ 1759 static void ulptx_intr_handler(struct adapter *adapter) 1760 { 1761 static struct intr_info ulptx_intr_info[] = { 1762 { F_PBL_BOUND_ERR_CH3, "ULPTX channel 3 PBL out of bounds", -1, 1763 0 }, 1764 { F_PBL_BOUND_ERR_CH2, "ULPTX channel 2 PBL out of bounds", -1, 1765 0 }, 1766 { F_PBL_BOUND_ERR_CH1, "ULPTX channel 1 PBL out of bounds", -1, 1767 0 }, 1768 { F_PBL_BOUND_ERR_CH0, "ULPTX channel 0 PBL out of bounds", -1, 1769 0 }, 1770 { 0xfffffff, "ULPTX parity error", -1, 1 }, 1771 { 0 } 1772 }; 1773 1774 if (t4_handle_intr_status(adapter, A_ULP_TX_INT_CAUSE, ulptx_intr_info)) 1775 t4_fatal_err(adapter); 1776 } 1777 1778 /* 1779 * PM TX interrupt handler. 1780 */ 1781 static void pmtx_intr_handler(struct adapter *adapter) 1782 { 1783 static struct intr_info pmtx_intr_info[] = { 1784 { F_PCMD_LEN_OVFL0, "PMTX channel 0 pcmd too large", -1, 1 }, 1785 { F_PCMD_LEN_OVFL1, "PMTX channel 1 pcmd too large", -1, 1 }, 1786 { F_PCMD_LEN_OVFL2, "PMTX channel 2 pcmd too large", -1, 1 }, 1787 { F_ZERO_C_CMD_ERROR, "PMTX 0-length pcmd", -1, 1 }, 1788 { 0xffffff0, "PMTX framing error", -1, 1 }, 1789 { F_OESPI_PAR_ERROR, "PMTX oespi parity error", -1, 1 }, 1790 { F_DB_OPTIONS_PAR_ERROR, "PMTX db_options parity error", -1, 1791 1 }, 1792 { F_ICSPI_PAR_ERROR, "PMTX icspi parity error", -1, 1 }, 1793 { F_C_PCMD_PAR_ERROR, "PMTX c_pcmd parity error", -1, 1}, 1794 { 0 } 1795 }; 1796 1797 if (t4_handle_intr_status(adapter, A_PM_TX_INT_CAUSE, pmtx_intr_info)) 1798 t4_fatal_err(adapter); 1799 } 1800 1801 /* 1802 * PM RX interrupt handler. 1803 */ 1804 static void pmrx_intr_handler(struct adapter *adapter) 1805 { 1806 static struct intr_info pmrx_intr_info[] = { 1807 { F_ZERO_E_CMD_ERROR, "PMRX 0-length pcmd", -1, 1 }, 1808 { 0x3ffff0, "PMRX framing error", -1, 1 }, 1809 { F_OCSPI_PAR_ERROR, "PMRX ocspi parity error", -1, 1 }, 1810 { F_DB_OPTIONS_PAR_ERROR, "PMRX db_options parity error", -1, 1811 1 }, 1812 { F_IESPI_PAR_ERROR, "PMRX iespi parity error", -1, 1 }, 1813 { F_E_PCMD_PAR_ERROR, "PMRX e_pcmd parity error", -1, 1}, 1814 { 0 } 1815 }; 1816 1817 if (t4_handle_intr_status(adapter, A_PM_RX_INT_CAUSE, pmrx_intr_info)) 1818 t4_fatal_err(adapter); 1819 } 1820 1821 /* 1822 * CPL switch interrupt handler. 1823 */ 1824 static void cplsw_intr_handler(struct adapter *adapter) 1825 { 1826 static struct intr_info cplsw_intr_info[] = { 1827 { F_CIM_OP_MAP_PERR, "CPLSW CIM op_map parity error", -1, 1 }, 1828 { F_CIM_OVFL_ERROR, "CPLSW CIM overflow", -1, 1 }, 1829 { F_TP_FRAMING_ERROR, "CPLSW TP framing error", -1, 1 }, 1830 { F_SGE_FRAMING_ERROR, "CPLSW SGE framing error", -1, 1 }, 1831 { F_CIM_FRAMING_ERROR, "CPLSW CIM framing error", -1, 1 }, 1832 { F_ZERO_SWITCH_ERROR, "CPLSW no-switch error", -1, 1 }, 1833 { 0 } 1834 }; 1835 1836 if (t4_handle_intr_status(adapter, A_CPL_INTR_CAUSE, cplsw_intr_info)) 1837 t4_fatal_err(adapter); 1838 } 1839 1840 /* 1841 * LE interrupt handler. 1842 */ 1843 static void le_intr_handler(struct adapter *adap) 1844 { 1845 static struct intr_info le_intr_info[] = { 1846 { F_LIPMISS, "LE LIP miss", -1, 0 }, 1847 { F_LIP0, "LE 0 LIP error", -1, 0 }, 1848 { F_PARITYERR, "LE parity error", -1, 1 }, 1849 { F_UNKNOWNCMD, "LE unknown command", -1, 1 }, 1850 { F_REQQPARERR, "LE request queue parity error", -1, 1 }, 1851 { 0 } 1852 }; 1853 1854 if (t4_handle_intr_status(adap, A_LE_DB_INT_CAUSE, le_intr_info)) 1855 t4_fatal_err(adap); 1856 } 1857 1858 /* 1859 * MPS interrupt handler. 1860 */ 1861 static void mps_intr_handler(struct adapter *adapter) 1862 { 1863 static struct intr_info mps_rx_intr_info[] = { 1864 { 0xffffff, "MPS Rx parity error", -1, 1 }, 1865 { 0 } 1866 }; 1867 static struct intr_info mps_tx_intr_info[] = { 1868 { V_TPFIFO(M_TPFIFO), "MPS Tx TP FIFO parity error", -1, 1 }, 1869 { F_NCSIFIFO, "MPS Tx NC-SI FIFO parity error", -1, 1 }, 1870 { V_TXDATAFIFO(M_TXDATAFIFO), "MPS Tx data FIFO parity error", 1871 -1, 1 }, 1872 { V_TXDESCFIFO(M_TXDESCFIFO), "MPS Tx desc FIFO parity error", 1873 -1, 1 }, 1874 { F_BUBBLE, "MPS Tx underflow", -1, 1 }, 1875 { F_SECNTERR, "MPS Tx SOP/EOP error", -1, 1 }, 1876 { F_FRMERR, "MPS Tx framing error", -1, 1 }, 1877 { 0 } 1878 }; 1879 static struct intr_info mps_trc_intr_info[] = { 1880 { V_FILTMEM(M_FILTMEM), "MPS TRC filter parity error", -1, 1 }, 1881 { V_PKTFIFO(M_PKTFIFO), "MPS TRC packet FIFO parity error", -1, 1882 1 }, 1883 { F_MISCPERR, "MPS TRC misc parity error", -1, 1 }, 1884 { 0 } 1885 }; 1886 static struct intr_info mps_stat_sram_intr_info[] = { 1887 { 0x1fffff, "MPS statistics SRAM parity error", -1, 1 }, 1888 { 0 } 1889 }; 1890 static struct intr_info mps_stat_tx_intr_info[] = { 1891 { 0xfffff, "MPS statistics Tx FIFO parity error", -1, 1 }, 1892 { 0 } 1893 }; 1894 static struct intr_info mps_stat_rx_intr_info[] = { 1895 { 0xffffff, "MPS statistics Rx FIFO parity error", -1, 1 }, 1896 { 0 } 1897 }; 1898 static struct intr_info mps_cls_intr_info[] = { 1899 { F_MATCHSRAM, "MPS match SRAM parity error", -1, 1 }, 1900 { F_MATCHTCAM, "MPS match TCAM parity error", -1, 1 }, 1901 { F_HASHSRAM, "MPS hash SRAM parity error", -1, 1 }, 1902 { 0 } 1903 }; 1904 1905 int fat; 1906 1907 fat = t4_handle_intr_status(adapter, A_MPS_RX_PERR_INT_CAUSE, 1908 mps_rx_intr_info) + 1909 t4_handle_intr_status(adapter, A_MPS_TX_INT_CAUSE, 1910 mps_tx_intr_info) + 1911 t4_handle_intr_status(adapter, A_MPS_TRC_INT_CAUSE, 1912 mps_trc_intr_info) + 1913 t4_handle_intr_status(adapter, A_MPS_STAT_PERR_INT_CAUSE_SRAM, 1914 mps_stat_sram_intr_info) + 1915 t4_handle_intr_status(adapter, A_MPS_STAT_PERR_INT_CAUSE_TX_FIFO, 1916 mps_stat_tx_intr_info) + 1917 t4_handle_intr_status(adapter, A_MPS_STAT_PERR_INT_CAUSE_RX_FIFO, 1918 mps_stat_rx_intr_info) + 1919 t4_handle_intr_status(adapter, A_MPS_CLS_INT_CAUSE, 1920 mps_cls_intr_info); 1921 1922 t4_write_reg(adapter, A_MPS_INT_CAUSE, 0); 1923 t4_read_reg(adapter, A_MPS_INT_CAUSE); /* flush */ 1924 if (fat) 1925 t4_fatal_err(adapter); 1926 } 1927 1928 #define MEM_INT_MASK (F_PERR_INT_CAUSE | F_ECC_CE_INT_CAUSE | F_ECC_UE_INT_CAUSE) 1929 1930 /* 1931 * EDC/MC interrupt handler. 1932 */ 1933 static void mem_intr_handler(struct adapter *adapter, int idx) 1934 { 1935 static const char name[3][5] = { "EDC0", "EDC1", "MC" }; 1936 1937 unsigned int addr, cnt_addr, v; 1938 1939 if (idx <= MEM_EDC1) { 1940 addr = EDC_REG(A_EDC_INT_CAUSE, idx); 1941 cnt_addr = EDC_REG(A_EDC_ECC_STATUS, idx); 1942 } else { 1943 addr = A_MC_INT_CAUSE; 1944 cnt_addr = A_MC_ECC_STATUS; 1945 } 1946 1947 v = t4_read_reg(adapter, addr) & MEM_INT_MASK; 1948 if (v & F_PERR_INT_CAUSE) 1949 CH_ALERT(adapter, "%s FIFO parity error\n", name[idx]); 1950 if (v & F_ECC_CE_INT_CAUSE) { 1951 u32 cnt = G_ECC_CECNT(t4_read_reg(adapter, cnt_addr)); 1952 1953 t4_write_reg(adapter, cnt_addr, V_ECC_CECNT(M_ECC_CECNT)); 1954 CH_WARN_RATELIMIT(adapter, 1955 "%u %s correctable ECC data error%s\n", 1956 cnt, name[idx], cnt > 1 ? "s" : ""); 1957 } 1958 if (v & F_ECC_UE_INT_CAUSE) 1959 CH_ALERT(adapter, "%s uncorrectable ECC data error\n", 1960 name[idx]); 1961 1962 t4_write_reg(adapter, addr, v); 1963 if (v & (F_PERR_INT_CAUSE | F_ECC_UE_INT_CAUSE)) 1964 t4_fatal_err(adapter); 1965 } 1966 1967 /* 1968 * MA interrupt handler. 1969 */ 1970 static void ma_intr_handler(struct adapter *adapter) 1971 { 1972 u32 v, status = t4_read_reg(adapter, A_MA_INT_CAUSE); 1973 1974 if (status & F_MEM_PERR_INT_CAUSE) 1975 CH_ALERT(adapter, "MA parity error, parity status %#x\n", 1976 t4_read_reg(adapter, A_MA_PARITY_ERROR_STATUS)); 1977 if (status & F_MEM_WRAP_INT_CAUSE) { 1978 v = t4_read_reg(adapter, A_MA_INT_WRAP_STATUS); 1979 CH_ALERT(adapter, "MA address wrap-around error by client %u to" 1980 " address %#x\n", G_MEM_WRAP_CLIENT_NUM(v), 1981 G_MEM_WRAP_ADDRESS(v) << 4); 1982 } 1983 t4_write_reg(adapter, A_MA_INT_CAUSE, status); 1984 t4_fatal_err(adapter); 1985 } 1986 1987 /* 1988 * SMB interrupt handler. 1989 */ 1990 static void smb_intr_handler(struct adapter *adap) 1991 { 1992 static struct intr_info smb_intr_info[] = { 1993 { F_MSTTXFIFOPARINT, "SMB master Tx FIFO parity error", -1, 1 }, 1994 { F_MSTRXFIFOPARINT, "SMB master Rx FIFO parity error", -1, 1 }, 1995 { F_SLVFIFOPARINT, "SMB slave FIFO parity error", -1, 1 }, 1996 { 0 } 1997 }; 1998 1999 if (t4_handle_intr_status(adap, A_SMB_INT_CAUSE, smb_intr_info)) 2000 t4_fatal_err(adap); 2001 } 2002 2003 /* 2004 * NC-SI interrupt handler. 2005 */ 2006 static void ncsi_intr_handler(struct adapter *adap) 2007 { 2008 static struct intr_info ncsi_intr_info[] = { 2009 { F_CIM_DM_PRTY_ERR, "NC-SI CIM parity error", -1, 1 }, 2010 { F_MPS_DM_PRTY_ERR, "NC-SI MPS parity error", -1, 1 }, 2011 { F_TXFIFO_PRTY_ERR, "NC-SI Tx FIFO parity error", -1, 1 }, 2012 { F_RXFIFO_PRTY_ERR, "NC-SI Rx FIFO parity error", -1, 1 }, 2013 { 0 } 2014 }; 2015 2016 if (t4_handle_intr_status(adap, A_NCSI_INT_CAUSE, ncsi_intr_info)) 2017 t4_fatal_err(adap); 2018 } 2019 2020 /* 2021 * XGMAC interrupt handler. 2022 */ 2023 static void xgmac_intr_handler(struct adapter *adap, int port) 2024 { 2025 u32 v = t4_read_reg(adap, PORT_REG(port, A_XGMAC_PORT_INT_CAUSE)); 2026 2027 v &= F_TXFIFO_PRTY_ERR | F_RXFIFO_PRTY_ERR; 2028 if (!v) 2029 return; 2030 2031 if (v & F_TXFIFO_PRTY_ERR) 2032 CH_ALERT(adap, "XGMAC %d Tx FIFO parity error\n", port); 2033 if (v & F_RXFIFO_PRTY_ERR) 2034 CH_ALERT(adap, "XGMAC %d Rx FIFO parity error\n", port); 2035 t4_write_reg(adap, PORT_REG(port, A_XGMAC_PORT_INT_CAUSE), v); 2036 t4_fatal_err(adap); 2037 } 2038 2039 /* 2040 * PL interrupt handler. 2041 */ 2042 static void pl_intr_handler(struct adapter *adap) 2043 { 2044 static struct intr_info pl_intr_info[] = { 2045 { F_FATALPERR, "T4 fatal parity error", -1, 1 }, 2046 { F_PERRVFID, "PL VFID_MAP parity error", -1, 1 }, 2047 { 0 } 2048 }; 2049 2050 if (t4_handle_intr_status(adap, A_PL_PL_INT_CAUSE, pl_intr_info)) 2051 t4_fatal_err(adap); 2052 } 2053 2054 #define PF_INTR_MASK (F_PFSW | F_PFCIM) 2055 #define GLBL_INTR_MASK (F_CIM | F_MPS | F_PL | F_PCIE | F_MC | F_EDC0 | \ 2056 F_EDC1 | F_LE | F_TP | F_MA | F_PM_TX | F_PM_RX | F_ULP_RX | \ 2057 F_CPL_SWITCH | F_SGE | F_ULP_TX) 2058 2059 /** 2060 * t4_slow_intr_handler - control path interrupt handler 2061 * @adapter: the adapter 2062 * 2063 * T4 interrupt handler for non-data global interrupt events, e.g., errors. 2064 * The designation 'slow' is because it involves register reads, while 2065 * data interrupts typically don't involve any MMIOs. 2066 */ 2067 int t4_slow_intr_handler(struct adapter *adapter) 2068 { 2069 u32 cause = t4_read_reg(adapter, A_PL_INT_CAUSE); 2070 2071 if (!(cause & GLBL_INTR_MASK)) 2072 return 0; 2073 if (cause & F_CIM) 2074 cim_intr_handler(adapter); 2075 if (cause & F_MPS) 2076 mps_intr_handler(adapter); 2077 if (cause & F_NCSI) 2078 ncsi_intr_handler(adapter); 2079 if (cause & F_PL) 2080 pl_intr_handler(adapter); 2081 if (cause & F_SMB) 2082 smb_intr_handler(adapter); 2083 if (cause & F_XGMAC0) 2084 xgmac_intr_handler(adapter, 0); 2085 if (cause & F_XGMAC1) 2086 xgmac_intr_handler(adapter, 1); 2087 if (cause & F_XGMAC_KR0) 2088 xgmac_intr_handler(adapter, 2); 2089 if (cause & F_XGMAC_KR1) 2090 xgmac_intr_handler(adapter, 3); 2091 if (cause & F_PCIE) 2092 pcie_intr_handler(adapter); 2093 if (cause & F_MC) 2094 mem_intr_handler(adapter, MEM_MC); 2095 if (cause & F_EDC0) 2096 mem_intr_handler(adapter, MEM_EDC0); 2097 if (cause & F_EDC1) 2098 mem_intr_handler(adapter, MEM_EDC1); 2099 if (cause & F_LE) 2100 le_intr_handler(adapter); 2101 if (cause & F_TP) 2102 tp_intr_handler(adapter); 2103 if (cause & F_MA) 2104 ma_intr_handler(adapter); 2105 if (cause & F_PM_TX) 2106 pmtx_intr_handler(adapter); 2107 if (cause & F_PM_RX) 2108 pmrx_intr_handler(adapter); 2109 if (cause & F_ULP_RX) 2110 ulprx_intr_handler(adapter); 2111 if (cause & F_CPL_SWITCH) 2112 cplsw_intr_handler(adapter); 2113 if (cause & F_SGE) 2114 sge_intr_handler(adapter); 2115 if (cause & F_ULP_TX) 2116 ulptx_intr_handler(adapter); 2117 2118 /* Clear the interrupts just processed for which we are the master. */ 2119 t4_write_reg(adapter, A_PL_INT_CAUSE, cause & GLBL_INTR_MASK); 2120 (void) t4_read_reg(adapter, A_PL_INT_CAUSE); /* flush */ 2121 return 1; 2122 } 2123 2124 /** 2125 * t4_intr_enable - enable interrupts 2126 * @adapter: the adapter whose interrupts should be enabled 2127 * 2128 * Enable PF-specific interrupts for the calling function and the top-level 2129 * interrupt concentrator for global interrupts. Interrupts are already 2130 * enabled at each module, here we just enable the roots of the interrupt 2131 * hierarchies. 2132 * 2133 * Note: this function should be called only when the driver manages 2134 * non PF-specific interrupts from the various HW modules. Only one PCI 2135 * function at a time should be doing this. 2136 */ 2137 void t4_intr_enable(struct adapter *adapter) 2138 { 2139 u32 pf = G_SOURCEPF(t4_read_reg(adapter, A_PL_WHOAMI)); 2140 2141 t4_write_reg(adapter, A_SGE_INT_ENABLE3, F_ERR_CPL_EXCEED_IQE_SIZE | 2142 F_ERR_INVALID_CIDX_INC | F_ERR_CPL_OPCODE_0 | 2143 F_ERR_DROPPED_DB | F_ERR_DATA_CPL_ON_HIGH_QID1 | 2144 F_ERR_DATA_CPL_ON_HIGH_QID0 | F_ERR_BAD_DB_PIDX3 | 2145 F_ERR_BAD_DB_PIDX2 | F_ERR_BAD_DB_PIDX1 | 2146 F_ERR_BAD_DB_PIDX0 | F_ERR_ING_CTXT_PRIO | 2147 F_ERR_EGR_CTXT_PRIO | F_INGRESS_SIZE_ERR | 2148 F_EGRESS_SIZE_ERR); 2149 t4_write_reg(adapter, MYPF_REG(A_PL_PF_INT_ENABLE), PF_INTR_MASK); 2150 t4_set_reg_field(adapter, A_PL_INT_MAP0, 0, 1 << pf); 2151 } 2152 2153 /** 2154 * t4_intr_disable - disable interrupts 2155 * @adapter: the adapter whose interrupts should be disabled 2156 * 2157 * Disable interrupts. We only disable the top-level interrupt 2158 * concentrators. The caller must be a PCI function managing global 2159 * interrupts. 2160 */ 2161 void t4_intr_disable(struct adapter *adapter) 2162 { 2163 u32 pf = G_SOURCEPF(t4_read_reg(adapter, A_PL_WHOAMI)); 2164 2165 t4_write_reg(adapter, MYPF_REG(A_PL_PF_INT_ENABLE), 0); 2166 t4_set_reg_field(adapter, A_PL_INT_MAP0, 1 << pf, 0); 2167 } 2168 2169 /** 2170 * t4_intr_clear - clear all interrupts 2171 * @adapter: the adapter whose interrupts should be cleared 2172 * 2173 * Clears all interrupts. The caller must be a PCI function managing 2174 * global interrupts. 2175 */ 2176 void t4_intr_clear(struct adapter *adapter) 2177 { 2178 static const unsigned int cause_reg[] = { 2179 A_SGE_INT_CAUSE1, A_SGE_INT_CAUSE2, A_SGE_INT_CAUSE3, 2180 A_PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS, 2181 A_PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS, 2182 A_PCIE_NONFAT_ERR, A_PCIE_INT_CAUSE, 2183 A_MC_INT_CAUSE, 2184 A_MA_INT_WRAP_STATUS, A_MA_PARITY_ERROR_STATUS, A_MA_INT_CAUSE, 2185 A_EDC_INT_CAUSE, EDC_REG(A_EDC_INT_CAUSE, 1), 2186 A_CIM_HOST_INT_CAUSE, A_CIM_HOST_UPACC_INT_CAUSE, 2187 MYPF_REG(A_CIM_PF_HOST_INT_CAUSE), 2188 A_TP_INT_CAUSE, 2189 A_ULP_RX_INT_CAUSE, A_ULP_TX_INT_CAUSE, 2190 A_PM_RX_INT_CAUSE, A_PM_TX_INT_CAUSE, 2191 A_MPS_RX_PERR_INT_CAUSE, 2192 A_CPL_INTR_CAUSE, 2193 MYPF_REG(A_PL_PF_INT_CAUSE), 2194 A_PL_PL_INT_CAUSE, 2195 A_LE_DB_INT_CAUSE, 2196 }; 2197 2198 unsigned int i; 2199 2200 for (i = 0; i < ARRAY_SIZE(cause_reg); ++i) 2201 t4_write_reg(adapter, cause_reg[i], 0xffffffff); 2202 2203 t4_write_reg(adapter, A_PL_INT_CAUSE, GLBL_INTR_MASK); 2204 (void) t4_read_reg(adapter, A_PL_INT_CAUSE); /* flush */ 2205 } 2206 2207 /** 2208 * hash_mac_addr - return the hash value of a MAC address 2209 * @addr: the 48-bit Ethernet MAC address 2210 * 2211 * Hashes a MAC address according to the hash function used by HW inexact 2212 * (hash) address matching. 2213 */ 2214 static int hash_mac_addr(const u8 *addr) 2215 { 2216 u32 a = ((u32)addr[0] << 16) | ((u32)addr[1] << 8) | addr[2]; 2217 u32 b = ((u32)addr[3] << 16) | ((u32)addr[4] << 8) | addr[5]; 2218 a ^= b; 2219 a ^= (a >> 12); 2220 a ^= (a >> 6); 2221 return a & 0x3f; 2222 } 2223 2224 /** 2225 * t4_config_rss_range - configure a portion of the RSS mapping table 2226 * @adapter: the adapter 2227 * @mbox: mbox to use for the FW command 2228 * @viid: virtual interface whose RSS subtable is to be written 2229 * @start: start entry in the table to write 2230 * @n: how many table entries to write 2231 * @rspq: values for the "response queue" (Ingress Queue) lookup table 2232 * @nrspq: number of values in @rspq 2233 * 2234 * Programs the selected part of the VI's RSS mapping table with the 2235 * provided values. If @nrspq < @n the supplied values are used repeatedly 2236 * until the full table range is populated. 2237 * 2238 * The caller must ensure the values in @rspq are in the range allowed for 2239 * @viid. 2240 */ 2241 int t4_config_rss_range(struct adapter *adapter, int mbox, unsigned int viid, 2242 int start, int n, const u16 *rspq, unsigned int nrspq) 2243 { 2244 int ret; 2245 const u16 *rsp = rspq; 2246 const u16 *rsp_end = rspq + nrspq; 2247 struct fw_rss_ind_tbl_cmd cmd; 2248 2249 memset(&cmd, 0, sizeof(cmd)); 2250 cmd.op_to_viid = htonl(V_FW_CMD_OP(FW_RSS_IND_TBL_CMD) | 2251 F_FW_CMD_REQUEST | F_FW_CMD_WRITE | 2252 V_FW_RSS_IND_TBL_CMD_VIID(viid)); 2253 cmd.retval_len16 = htonl(FW_LEN16(cmd)); 2254 2255 2256 /* 2257 * Each firmware RSS command can accommodate up to 32 RSS Ingress 2258 * Queue Identifiers. These Ingress Queue IDs are packed three to 2259 * a 32-bit word as 10-bit values with the upper remaining 2 bits 2260 * reserved. 2261 */ 2262 while (n > 0) { 2263 int nq = min(n, 32); 2264 __be32 *qp = &cmd.iq0_to_iq2; 2265 2266 /* 2267 * Set up the firmware RSS command header to send the next 2268 * "nq" Ingress Queue IDs to the firmware. 2269 */ 2270 cmd.niqid = htons(nq); 2271 cmd.startidx = htons(start); 2272 2273 /* 2274 * "nq" more done for the start of the next loop. 2275 */ 2276 start += nq; 2277 n -= nq; 2278 2279 /* 2280 * While there are still Ingress Queue IDs to stuff into the 2281 * current firmware RSS command, retrieve them from the 2282 * Ingress Queue ID array and insert them into the command. 2283 */ 2284 while (nq > 0) { 2285 unsigned int v; 2286 /* 2287 * Grab up to the next 3 Ingress Queue IDs (wrapping 2288 * around the Ingress Queue ID array if necessary) and 2289 * insert them into the firmware RSS command at the 2290 * current 3-tuple position within the commad. 2291 */ 2292 v = V_FW_RSS_IND_TBL_CMD_IQ0(*rsp); 2293 if (++rsp >= rsp_end) 2294 rsp = rspq; 2295 v |= V_FW_RSS_IND_TBL_CMD_IQ1(*rsp); 2296 if (++rsp >= rsp_end) 2297 rsp = rspq; 2298 v |= V_FW_RSS_IND_TBL_CMD_IQ2(*rsp); 2299 if (++rsp >= rsp_end) 2300 rsp = rspq; 2301 2302 *qp++ = htonl(v); 2303 nq -= 3; 2304 } 2305 2306 /* 2307 * Send this portion of the RRS table update to the firmware; 2308 * bail out on any errors. 2309 */ 2310 ret = t4_wr_mbox(adapter, mbox, &cmd, sizeof(cmd), NULL); 2311 if (ret) 2312 return ret; 2313 } 2314 2315 return 0; 2316 } 2317 2318 /** 2319 * t4_config_glbl_rss - configure the global RSS mode 2320 * @adapter: the adapter 2321 * @mbox: mbox to use for the FW command 2322 * @mode: global RSS mode 2323 * @flags: mode-specific flags 2324 * 2325 * Sets the global RSS mode. 2326 */ 2327 int t4_config_glbl_rss(struct adapter *adapter, int mbox, unsigned int mode, 2328 unsigned int flags) 2329 { 2330 struct fw_rss_glb_config_cmd c; 2331 2332 memset(&c, 0, sizeof(c)); 2333 c.op_to_write = htonl(V_FW_CMD_OP(FW_RSS_GLB_CONFIG_CMD) | 2334 F_FW_CMD_REQUEST | F_FW_CMD_WRITE); 2335 c.retval_len16 = htonl(FW_LEN16(c)); 2336 if (mode == FW_RSS_GLB_CONFIG_CMD_MODE_MANUAL) { 2337 c.u.manual.mode_pkd = htonl(V_FW_RSS_GLB_CONFIG_CMD_MODE(mode)); 2338 } else if (mode == FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL) { 2339 c.u.basicvirtual.mode_pkd = 2340 htonl(V_FW_RSS_GLB_CONFIG_CMD_MODE(mode)); 2341 c.u.basicvirtual.synmapen_to_hashtoeplitz = htonl(flags); 2342 } else 2343 return -EINVAL; 2344 return t4_wr_mbox(adapter, mbox, &c, sizeof(c), NULL); 2345 } 2346 2347 /** 2348 * t4_config_vi_rss - configure per VI RSS settings 2349 * @adapter: the adapter 2350 * @mbox: mbox to use for the FW command 2351 * @viid: the VI id 2352 * @flags: RSS flags 2353 * @defq: id of the default RSS queue for the VI. 2354 * 2355 * Configures VI-specific RSS properties. 2356 */ 2357 int t4_config_vi_rss(struct adapter *adapter, int mbox, unsigned int viid, 2358 unsigned int flags, unsigned int defq) 2359 { 2360 struct fw_rss_vi_config_cmd c; 2361 2362 memset(&c, 0, sizeof(c)); 2363 c.op_to_viid = htonl(V_FW_CMD_OP(FW_RSS_VI_CONFIG_CMD) | 2364 F_FW_CMD_REQUEST | F_FW_CMD_WRITE | 2365 V_FW_RSS_VI_CONFIG_CMD_VIID(viid)); 2366 c.retval_len16 = htonl(FW_LEN16(c)); 2367 c.u.basicvirtual.defaultq_to_udpen = htonl(flags | 2368 V_FW_RSS_VI_CONFIG_CMD_DEFAULTQ(defq)); 2369 return t4_wr_mbox(adapter, mbox, &c, sizeof(c), NULL); 2370 } 2371 2372 /* Read an RSS table row */ 2373 static int rd_rss_row(struct adapter *adap, int row, u32 *val) 2374 { 2375 t4_write_reg(adap, A_TP_RSS_LKP_TABLE, 0xfff00000 | row); 2376 return t4_wait_op_done_val(adap, A_TP_RSS_LKP_TABLE, F_LKPTBLROWVLD, 1, 2377 5, 0, val); 2378 } 2379 2380 /** 2381 * t4_read_rss - read the contents of the RSS mapping table 2382 * @adapter: the adapter 2383 * @map: holds the contents of the RSS mapping table 2384 * 2385 * Reads the contents of the RSS hash->queue mapping table. 2386 */ 2387 int t4_read_rss(struct adapter *adapter, u16 *map) 2388 { 2389 u32 val; 2390 int i, ret; 2391 2392 for (i = 0; i < RSS_NENTRIES / 2; ++i) { 2393 ret = rd_rss_row(adapter, i, &val); 2394 if (ret) 2395 return ret; 2396 *map++ = G_LKPTBLQUEUE0(val); 2397 *map++ = G_LKPTBLQUEUE1(val); 2398 } 2399 return 0; 2400 } 2401 2402 /** 2403 * t4_read_rss_key - read the global RSS key 2404 * @adap: the adapter 2405 * @key: 10-entry array holding the 320-bit RSS key 2406 * 2407 * Reads the global 320-bit RSS key. 2408 */ 2409 void t4_read_rss_key(struct adapter *adap, u32 *key) 2410 { 2411 t4_read_indirect(adap, A_TP_PIO_ADDR, A_TP_PIO_DATA, key, 10, 2412 A_TP_RSS_SECRET_KEY0); 2413 } 2414 2415 /** 2416 * t4_write_rss_key - program one of the RSS keys 2417 * @adap: the adapter 2418 * @key: 10-entry array holding the 320-bit RSS key 2419 * @idx: which RSS key to write 2420 * 2421 * Writes one of the RSS keys with the given 320-bit value. If @idx is 2422 * 0..15 the corresponding entry in the RSS key table is written, 2423 * otherwise the global RSS key is written. 2424 */ 2425 void t4_write_rss_key(struct adapter *adap, const u32 *key, int idx) 2426 { 2427 t4_write_indirect(adap, A_TP_PIO_ADDR, A_TP_PIO_DATA, key, 10, 2428 A_TP_RSS_SECRET_KEY0); 2429 if (idx >= 0 && idx < 16) 2430 t4_write_reg(adap, A_TP_RSS_CONFIG_VRT, 2431 V_KEYWRADDR(idx) | F_KEYWREN); 2432 } 2433 2434 /** 2435 * t4_read_rss_pf_config - read PF RSS Configuration Table 2436 * @adapter: the adapter 2437 * @index: the entry in the PF RSS table to read 2438 * @valp: where to store the returned value 2439 * 2440 * Reads the PF RSS Configuration Table at the specified index and returns 2441 * the value found there. 2442 */ 2443 void t4_read_rss_pf_config(struct adapter *adapter, unsigned int index, u32 *valp) 2444 { 2445 t4_read_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA, 2446 valp, 1, A_TP_RSS_PF0_CONFIG + index); 2447 } 2448 2449 /** 2450 * t4_write_rss_pf_config - write PF RSS Configuration Table 2451 * @adapter: the adapter 2452 * @index: the entry in the VF RSS table to read 2453 * @val: the value to store 2454 * 2455 * Writes the PF RSS Configuration Table at the specified index with the 2456 * specified value. 2457 */ 2458 void t4_write_rss_pf_config(struct adapter *adapter, unsigned int index, u32 val) 2459 { 2460 t4_write_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA, 2461 &val, 1, A_TP_RSS_PF0_CONFIG + index); 2462 } 2463 2464 /** 2465 * t4_read_rss_vf_config - read VF RSS Configuration Table 2466 * @adapter: the adapter 2467 * @index: the entry in the VF RSS table to read 2468 * @vfl: where to store the returned VFL 2469 * @vfh: where to store the returned VFH 2470 * 2471 * Reads the VF RSS Configuration Table at the specified index and returns 2472 * the (VFL, VFH) values found there. 2473 */ 2474 void t4_read_rss_vf_config(struct adapter *adapter, unsigned int index, 2475 u32 *vfl, u32 *vfh) 2476 { 2477 u32 vrt; 2478 2479 /* 2480 * Request that the index'th VF Table values be read into VFL/VFH. 2481 */ 2482 vrt = t4_read_reg(adapter, A_TP_RSS_CONFIG_VRT); 2483 vrt &= ~(F_VFRDRG | V_VFWRADDR(M_VFWRADDR) | F_VFWREN | F_KEYWREN); 2484 vrt |= V_VFWRADDR(index) | F_VFRDEN; 2485 t4_write_reg(adapter, A_TP_RSS_CONFIG_VRT, vrt); 2486 2487 /* 2488 * Grab the VFL/VFH values ... 2489 */ 2490 t4_read_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA, 2491 vfl, 1, A_TP_RSS_VFL_CONFIG); 2492 t4_read_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA, 2493 vfh, 1, A_TP_RSS_VFH_CONFIG); 2494 } 2495 2496 /** 2497 * t4_write_rss_vf_config - write VF RSS Configuration Table 2498 * 2499 * @adapter: the adapter 2500 * @index: the entry in the VF RSS table to write 2501 * @vfl: the VFL to store 2502 * @vfh: the VFH to store 2503 * 2504 * Writes the VF RSS Configuration Table at the specified index with the 2505 * specified (VFL, VFH) values. 2506 */ 2507 void t4_write_rss_vf_config(struct adapter *adapter, unsigned int index, 2508 u32 vfl, u32 vfh) 2509 { 2510 u32 vrt; 2511 2512 /* 2513 * Load up VFL/VFH with the values to be written ... 2514 */ 2515 t4_write_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA, 2516 &vfl, 1, A_TP_RSS_VFL_CONFIG); 2517 t4_write_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA, 2518 &vfh, 1, A_TP_RSS_VFH_CONFIG); 2519 2520 /* 2521 * Write the VFL/VFH into the VF Table at index'th location. 2522 */ 2523 vrt = t4_read_reg(adapter, A_TP_RSS_CONFIG_VRT); 2524 vrt &= ~(F_VFRDRG | F_VFRDEN | V_VFWRADDR(M_VFWRADDR) | F_KEYWREN); 2525 vrt |= V_VFWRADDR(index) | F_VFWREN; 2526 t4_write_reg(adapter, A_TP_RSS_CONFIG_VRT, vrt); 2527 } 2528 2529 /** 2530 * t4_read_rss_pf_map - read PF RSS Map 2531 * @adapter: the adapter 2532 * 2533 * Reads the PF RSS Map register and returns its value. 2534 */ 2535 u32 t4_read_rss_pf_map(struct adapter *adapter) 2536 { 2537 u32 pfmap; 2538 2539 t4_read_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA, 2540 &pfmap, 1, A_TP_RSS_PF_MAP); 2541 return pfmap; 2542 } 2543 2544 /** 2545 * t4_write_rss_pf_map - write PF RSS Map 2546 * @adapter: the adapter 2547 * @pfmap: PF RSS Map value 2548 * 2549 * Writes the specified value to the PF RSS Map register. 2550 */ 2551 void t4_write_rss_pf_map(struct adapter *adapter, u32 pfmap) 2552 { 2553 t4_write_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA, 2554 &pfmap, 1, A_TP_RSS_PF_MAP); 2555 } 2556 2557 /** 2558 * t4_read_rss_pf_mask - read PF RSS Mask 2559 * @adapter: the adapter 2560 * 2561 * Reads the PF RSS Mask register and returns its value. 2562 */ 2563 u32 t4_read_rss_pf_mask(struct adapter *adapter) 2564 { 2565 u32 pfmask; 2566 2567 t4_read_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA, 2568 &pfmask, 1, A_TP_RSS_PF_MSK); 2569 return pfmask; 2570 } 2571 2572 /** 2573 * t4_write_rss_pf_mask - write PF RSS Mask 2574 * @adapter: the adapter 2575 * @pfmask: PF RSS Mask value 2576 * 2577 * Writes the specified value to the PF RSS Mask register. 2578 */ 2579 void t4_write_rss_pf_mask(struct adapter *adapter, u32 pfmask) 2580 { 2581 t4_write_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA, 2582 &pfmask, 1, A_TP_RSS_PF_MSK); 2583 } 2584 2585 /** 2586 * t4_set_filter_mode - configure the optional components of filter tuples 2587 * @adap: the adapter 2588 * @mode_map: a bitmap selcting which optional filter components to enable 2589 * 2590 * Sets the filter mode by selecting the optional components to enable 2591 * in filter tuples. Returns 0 on success and a negative error if the 2592 * requested mode needs more bits than are available for optional 2593 * components. 2594 */ 2595 int t4_set_filter_mode(struct adapter *adap, unsigned int mode_map) 2596 { 2597 static u8 width[] = { 1, 3, 17, 17, 8, 8, 16, 9, 3, 1 }; 2598 2599 int i, nbits = 0; 2600 2601 for (i = S_FCOE; i <= S_FRAGMENTATION; i++) 2602 if (mode_map & (1 << i)) 2603 nbits += width[i]; 2604 if (nbits > FILTER_OPT_LEN) 2605 return -EINVAL; 2606 t4_write_indirect(adap, A_TP_PIO_ADDR, A_TP_PIO_DATA, &mode_map, 1, 2607 A_TP_VLAN_PRI_MAP); 2608 return 0; 2609 } 2610 2611 /** 2612 * t4_tp_get_tcp_stats - read TP's TCP MIB counters 2613 * @adap: the adapter 2614 * @v4: holds the TCP/IP counter values 2615 * @v6: holds the TCP/IPv6 counter values 2616 * 2617 * Returns the values of TP's TCP/IP and TCP/IPv6 MIB counters. 2618 * Either @v4 or @v6 may be %NULL to skip the corresponding stats. 2619 */ 2620 void t4_tp_get_tcp_stats(struct adapter *adap, struct tp_tcp_stats *v4, 2621 struct tp_tcp_stats *v6) 2622 { 2623 u32 val[A_TP_MIB_TCP_RXT_SEG_LO - A_TP_MIB_TCP_OUT_RST + 1]; 2624 2625 #define STAT_IDX(x) ((A_TP_MIB_TCP_##x) - A_TP_MIB_TCP_OUT_RST) 2626 #define STAT(x) val[STAT_IDX(x)] 2627 #define STAT64(x) (((u64)STAT(x##_HI) << 32) | STAT(x##_LO)) 2628 2629 if (v4) { 2630 t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, val, 2631 ARRAY_SIZE(val), A_TP_MIB_TCP_OUT_RST); 2632 v4->tcpOutRsts = STAT(OUT_RST); 2633 v4->tcpInSegs = STAT64(IN_SEG); 2634 v4->tcpOutSegs = STAT64(OUT_SEG); 2635 v4->tcpRetransSegs = STAT64(RXT_SEG); 2636 } 2637 if (v6) { 2638 t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, val, 2639 ARRAY_SIZE(val), A_TP_MIB_TCP_V6OUT_RST); 2640 v6->tcpOutRsts = STAT(OUT_RST); 2641 v6->tcpInSegs = STAT64(IN_SEG); 2642 v6->tcpOutSegs = STAT64(OUT_SEG); 2643 v6->tcpRetransSegs = STAT64(RXT_SEG); 2644 } 2645 #undef STAT64 2646 #undef STAT 2647 #undef STAT_IDX 2648 } 2649 2650 /** 2651 * t4_tp_get_err_stats - read TP's error MIB counters 2652 * @adap: the adapter 2653 * @st: holds the counter values 2654 * 2655 * Returns the values of TP's error counters. 2656 */ 2657 void t4_tp_get_err_stats(struct adapter *adap, struct tp_err_stats *st) 2658 { 2659 t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, st->macInErrs, 2660 12, A_TP_MIB_MAC_IN_ERR_0); 2661 t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, st->tnlCongDrops, 2662 8, A_TP_MIB_TNL_CNG_DROP_0); 2663 t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, st->tnlTxDrops, 2664 4, A_TP_MIB_TNL_DROP_0); 2665 t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, st->ofldVlanDrops, 2666 4, A_TP_MIB_OFD_VLN_DROP_0); 2667 t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, st->tcp6InErrs, 2668 4, A_TP_MIB_TCP_V6IN_ERR_0); 2669 t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, &st->ofldNoNeigh, 2670 2, A_TP_MIB_OFD_ARP_DROP); 2671 } 2672 2673 /** 2674 * t4_tp_get_proxy_stats - read TP's proxy MIB counters 2675 * @adap: the adapter 2676 * @st: holds the counter values 2677 * 2678 * Returns the values of TP's proxy counters. 2679 */ 2680 void t4_tp_get_proxy_stats(struct adapter *adap, struct tp_proxy_stats *st) 2681 { 2682 t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, st->proxy, 2683 4, A_TP_MIB_TNL_LPBK_0); 2684 } 2685 2686 /** 2687 * t4_tp_get_cpl_stats - read TP's CPL MIB counters 2688 * @adap: the adapter 2689 * @st: holds the counter values 2690 * 2691 * Returns the values of TP's CPL counters. 2692 */ 2693 void t4_tp_get_cpl_stats(struct adapter *adap, struct tp_cpl_stats *st) 2694 { 2695 t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, st->req, 2696 8, A_TP_MIB_CPL_IN_REQ_0); 2697 t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, st->tx_err, 2698 4, A_TP_MIB_CPL_OUT_ERR_0); 2699 } 2700 2701 /** 2702 * t4_tp_get_rdma_stats - read TP's RDMA MIB counters 2703 * @adap: the adapter 2704 * @st: holds the counter values 2705 * 2706 * Returns the values of TP's RDMA counters. 2707 */ 2708 void t4_tp_get_rdma_stats(struct adapter *adap, struct tp_rdma_stats *st) 2709 { 2710 t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, &st->rqe_dfr_mod, 2711 2, A_TP_MIB_RQE_DFR_MOD); 2712 } 2713 2714 /** 2715 * t4_get_fcoe_stats - read TP's FCoE MIB counters for a port 2716 * @adap: the adapter 2717 * @idx: the port index 2718 * @st: holds the counter values 2719 * 2720 * Returns the values of TP's FCoE counters for the selected port. 2721 */ 2722 void t4_get_fcoe_stats(struct adapter *adap, unsigned int idx, 2723 struct tp_fcoe_stats *st) 2724 { 2725 u32 val[2]; 2726 2727 t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, &st->framesDDP, 2728 1, A_TP_MIB_FCOE_DDP_0 + idx); 2729 t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, &st->framesDrop, 2730 1, A_TP_MIB_FCOE_DROP_0 + idx); 2731 t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, val, 2732 2, A_TP_MIB_FCOE_BYTE_0_HI + 2 * idx); 2733 st->octetsDDP = ((u64)val[0] << 32) | val[1]; 2734 } 2735 2736 /** 2737 * t4_get_usm_stats - read TP's non-TCP DDP MIB counters 2738 * @adap: the adapter 2739 * @st: holds the counter values 2740 * 2741 * Returns the values of TP's counters for non-TCP directly-placed packets. 2742 */ 2743 void t4_get_usm_stats(struct adapter *adap, struct tp_usm_stats *st) 2744 { 2745 u32 val[4]; 2746 2747 t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, val, 4, 2748 A_TP_MIB_USM_PKTS); 2749 st->frames = val[0]; 2750 st->drops = val[1]; 2751 st->octets = ((u64)val[2] << 32) | val[3]; 2752 } 2753 2754 /** 2755 * t4_read_mtu_tbl - returns the values in the HW path MTU table 2756 * @adap: the adapter 2757 * @mtus: where to store the MTU values 2758 * @mtu_log: where to store the MTU base-2 log (may be %NULL) 2759 * 2760 * Reads the HW path MTU table. 2761 */ 2762 void t4_read_mtu_tbl(struct adapter *adap, u16 *mtus, u8 *mtu_log) 2763 { 2764 u32 v; 2765 int i; 2766 2767 for (i = 0; i < NMTUS; ++i) { 2768 t4_write_reg(adap, A_TP_MTU_TABLE, 2769 V_MTUINDEX(0xff) | V_MTUVALUE(i)); 2770 v = t4_read_reg(adap, A_TP_MTU_TABLE); 2771 mtus[i] = G_MTUVALUE(v); 2772 if (mtu_log) 2773 mtu_log[i] = G_MTUWIDTH(v); 2774 } 2775 } 2776 2777 /** 2778 * t4_read_cong_tbl - reads the congestion control table 2779 * @adap: the adapter 2780 * @incr: where to store the alpha values 2781 * 2782 * Reads the additive increments programmed into the HW congestion 2783 * control table. 2784 */ 2785 void t4_read_cong_tbl(struct adapter *adap, u16 incr[NMTUS][NCCTRL_WIN]) 2786 { 2787 unsigned int mtu, w; 2788 2789 for (mtu = 0; mtu < NMTUS; ++mtu) 2790 for (w = 0; w < NCCTRL_WIN; ++w) { 2791 t4_write_reg(adap, A_TP_CCTRL_TABLE, 2792 V_ROWINDEX(0xffff) | (mtu << 5) | w); 2793 incr[mtu][w] = (u16)t4_read_reg(adap, 2794 A_TP_CCTRL_TABLE) & 0x1fff; 2795 } 2796 } 2797 2798 /** 2799 * t4_read_pace_tbl - read the pace table 2800 * @adap: the adapter 2801 * @pace_vals: holds the returned values 2802 * 2803 * Returns the values of TP's pace table in microseconds. 2804 */ 2805 void t4_read_pace_tbl(struct adapter *adap, unsigned int pace_vals[NTX_SCHED]) 2806 { 2807 unsigned int i, v; 2808 2809 for (i = 0; i < NTX_SCHED; i++) { 2810 t4_write_reg(adap, A_TP_PACE_TABLE, 0xffff0000 + i); 2811 v = t4_read_reg(adap, A_TP_PACE_TABLE); 2812 pace_vals[i] = dack_ticks_to_usec(adap, v); 2813 } 2814 } 2815 2816 /** 2817 * t4_tp_wr_bits_indirect - set/clear bits in an indirect TP register 2818 * @adap: the adapter 2819 * @addr: the indirect TP register address 2820 * @mask: specifies the field within the register to modify 2821 * @val: new value for the field 2822 * 2823 * Sets a field of an indirect TP register to the given value. 2824 */ 2825 void t4_tp_wr_bits_indirect(struct adapter *adap, unsigned int addr, 2826 unsigned int mask, unsigned int val) 2827 { 2828 t4_write_reg(adap, A_TP_PIO_ADDR, addr); 2829 val |= t4_read_reg(adap, A_TP_PIO_DATA) & ~mask; 2830 t4_write_reg(adap, A_TP_PIO_DATA, val); 2831 } 2832 2833 /** 2834 * init_cong_ctrl - initialize congestion control parameters 2835 * @a: the alpha values for congestion control 2836 * @b: the beta values for congestion control 2837 * 2838 * Initialize the congestion control parameters. 2839 */ 2840 static void __devinit init_cong_ctrl(unsigned short *a, unsigned short *b) 2841 { 2842 a[0] = a[1] = a[2] = a[3] = a[4] = a[5] = a[6] = a[7] = a[8] = 1; 2843 a[9] = 2; 2844 a[10] = 3; 2845 a[11] = 4; 2846 a[12] = 5; 2847 a[13] = 6; 2848 a[14] = 7; 2849 a[15] = 8; 2850 a[16] = 9; 2851 a[17] = 10; 2852 a[18] = 14; 2853 a[19] = 17; 2854 a[20] = 21; 2855 a[21] = 25; 2856 a[22] = 30; 2857 a[23] = 35; 2858 a[24] = 45; 2859 a[25] = 60; 2860 a[26] = 80; 2861 a[27] = 100; 2862 a[28] = 200; 2863 a[29] = 300; 2864 a[30] = 400; 2865 a[31] = 500; 2866 2867 b[0] = b[1] = b[2] = b[3] = b[4] = b[5] = b[6] = b[7] = b[8] = 0; 2868 b[9] = b[10] = 1; 2869 b[11] = b[12] = 2; 2870 b[13] = b[14] = b[15] = b[16] = 3; 2871 b[17] = b[18] = b[19] = b[20] = b[21] = 4; 2872 b[22] = b[23] = b[24] = b[25] = b[26] = b[27] = 5; 2873 b[28] = b[29] = 6; 2874 b[30] = b[31] = 7; 2875 } 2876 2877 /* The minimum additive increment value for the congestion control table */ 2878 #define CC_MIN_INCR 2U 2879 2880 /** 2881 * t4_load_mtus - write the MTU and congestion control HW tables 2882 * @adap: the adapter 2883 * @mtus: the values for the MTU table 2884 * @alpha: the values for the congestion control alpha parameter 2885 * @beta: the values for the congestion control beta parameter 2886 * 2887 * Write the HW MTU table with the supplied MTUs and the high-speed 2888 * congestion control table with the supplied alpha, beta, and MTUs. 2889 * We write the two tables together because the additive increments 2890 * depend on the MTUs. 2891 */ 2892 void t4_load_mtus(struct adapter *adap, const unsigned short *mtus, 2893 const unsigned short *alpha, const unsigned short *beta) 2894 { 2895 static const unsigned int avg_pkts[NCCTRL_WIN] = { 2896 2, 6, 10, 14, 20, 28, 40, 56, 80, 112, 160, 224, 320, 448, 640, 2897 896, 1281, 1792, 2560, 3584, 5120, 7168, 10240, 14336, 20480, 2898 28672, 40960, 57344, 81920, 114688, 163840, 229376 2899 }; 2900 2901 unsigned int i, w; 2902 2903 for (i = 0; i < NMTUS; ++i) { 2904 unsigned int mtu = mtus[i]; 2905 unsigned int log2 = fls(mtu); 2906 2907 if (!(mtu & ((1 << log2) >> 2))) /* round */ 2908 log2--; 2909 t4_write_reg(adap, A_TP_MTU_TABLE, V_MTUINDEX(i) | 2910 V_MTUWIDTH(log2) | V_MTUVALUE(mtu)); 2911 2912 for (w = 0; w < NCCTRL_WIN; ++w) { 2913 unsigned int inc; 2914 2915 inc = max(((mtu - 40) * alpha[w]) / avg_pkts[w], 2916 CC_MIN_INCR); 2917 2918 t4_write_reg(adap, A_TP_CCTRL_TABLE, (i << 21) | 2919 (w << 16) | (beta[w] << 13) | inc); 2920 } 2921 } 2922 } 2923 2924 /** 2925 * t4_set_pace_tbl - set the pace table 2926 * @adap: the adapter 2927 * @pace_vals: the pace values in microseconds 2928 * @start: index of the first entry in the HW pace table to set 2929 * @n: how many entries to set 2930 * 2931 * Sets (a subset of the) HW pace table. 2932 */ 2933 int t4_set_pace_tbl(struct adapter *adap, const unsigned int *pace_vals, 2934 unsigned int start, unsigned int n) 2935 { 2936 unsigned int vals[NTX_SCHED], i; 2937 unsigned int tick_ns = dack_ticks_to_usec(adap, 1000); 2938 2939 if (n > NTX_SCHED) 2940 return -ERANGE; 2941 2942 /* convert values from us to dack ticks, rounding to closest value */ 2943 for (i = 0; i < n; i++, pace_vals++) { 2944 vals[i] = (1000 * *pace_vals + tick_ns / 2) / tick_ns; 2945 if (vals[i] > 0x7ff) 2946 return -ERANGE; 2947 if (*pace_vals && vals[i] == 0) 2948 return -ERANGE; 2949 } 2950 for (i = 0; i < n; i++, start++) 2951 t4_write_reg(adap, A_TP_PACE_TABLE, (start << 16) | vals[i]); 2952 return 0; 2953 } 2954 2955 /** 2956 * t4_set_sched_bps - set the bit rate for a HW traffic scheduler 2957 * @adap: the adapter 2958 * @kbps: target rate in Kbps 2959 * @sched: the scheduler index 2960 * 2961 * Configure a Tx HW scheduler for the target rate. 2962 */ 2963 int t4_set_sched_bps(struct adapter *adap, int sched, unsigned int kbps) 2964 { 2965 unsigned int v, tps, cpt, bpt, delta, mindelta = ~0; 2966 unsigned int clk = adap->params.vpd.cclk * 1000; 2967 unsigned int selected_cpt = 0, selected_bpt = 0; 2968 2969 if (kbps > 0) { 2970 kbps *= 125; /* -> bytes */ 2971 for (cpt = 1; cpt <= 255; cpt++) { 2972 tps = clk / cpt; 2973 bpt = (kbps + tps / 2) / tps; 2974 if (bpt > 0 && bpt <= 255) { 2975 v = bpt * tps; 2976 delta = v >= kbps ? v - kbps : kbps - v; 2977 if (delta < mindelta) { 2978 mindelta = delta; 2979 selected_cpt = cpt; 2980 selected_bpt = bpt; 2981 } 2982 } else if (selected_cpt) 2983 break; 2984 } 2985 if (!selected_cpt) 2986 return -EINVAL; 2987 } 2988 t4_write_reg(adap, A_TP_TM_PIO_ADDR, 2989 A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2); 2990 v = t4_read_reg(adap, A_TP_TM_PIO_DATA); 2991 if (sched & 1) 2992 v = (v & 0xffff) | (selected_cpt << 16) | (selected_bpt << 24); 2993 else 2994 v = (v & 0xffff0000) | selected_cpt | (selected_bpt << 8); 2995 t4_write_reg(adap, A_TP_TM_PIO_DATA, v); 2996 return 0; 2997 } 2998 2999 /** 3000 * t4_set_sched_ipg - set the IPG for a Tx HW packet rate scheduler 3001 * @adap: the adapter 3002 * @sched: the scheduler index 3003 * @ipg: the interpacket delay in tenths of nanoseconds 3004 * 3005 * Set the interpacket delay for a HW packet rate scheduler. 3006 */ 3007 int t4_set_sched_ipg(struct adapter *adap, int sched, unsigned int ipg) 3008 { 3009 unsigned int v, addr = A_TP_TX_MOD_Q1_Q0_TIMER_SEPARATOR - sched / 2; 3010 3011 /* convert ipg to nearest number of core clocks */ 3012 ipg *= core_ticks_per_usec(adap); 3013 ipg = (ipg + 5000) / 10000; 3014 if (ipg > M_TXTIMERSEPQ0) 3015 return -EINVAL; 3016 3017 t4_write_reg(adap, A_TP_TM_PIO_ADDR, addr); 3018 v = t4_read_reg(adap, A_TP_TM_PIO_DATA); 3019 if (sched & 1) 3020 v = (v & V_TXTIMERSEPQ0(M_TXTIMERSEPQ0)) | V_TXTIMERSEPQ1(ipg); 3021 else 3022 v = (v & V_TXTIMERSEPQ1(M_TXTIMERSEPQ1)) | V_TXTIMERSEPQ0(ipg); 3023 t4_write_reg(adap, A_TP_TM_PIO_DATA, v); 3024 t4_read_reg(adap, A_TP_TM_PIO_DATA); 3025 return 0; 3026 } 3027 3028 /** 3029 * t4_get_tx_sched - get the configuration of a Tx HW traffic scheduler 3030 * @adap: the adapter 3031 * @sched: the scheduler index 3032 * @kbps: the byte rate in Kbps 3033 * @ipg: the interpacket delay in tenths of nanoseconds 3034 * 3035 * Return the current configuration of a HW Tx scheduler. 3036 */ 3037 void t4_get_tx_sched(struct adapter *adap, unsigned int sched, unsigned int *kbps, 3038 unsigned int *ipg) 3039 { 3040 unsigned int v, addr, bpt, cpt; 3041 3042 if (kbps) { 3043 addr = A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2; 3044 t4_write_reg(adap, A_TP_TM_PIO_ADDR, addr); 3045 v = t4_read_reg(adap, A_TP_TM_PIO_DATA); 3046 if (sched & 1) 3047 v >>= 16; 3048 bpt = (v >> 8) & 0xff; 3049 cpt = v & 0xff; 3050 if (!cpt) 3051 *kbps = 0; /* scheduler disabled */ 3052 else { 3053 v = (adap->params.vpd.cclk * 1000) / cpt; /* ticks/s */ 3054 *kbps = (v * bpt) / 125; 3055 } 3056 } 3057 if (ipg) { 3058 addr = A_TP_TX_MOD_Q1_Q0_TIMER_SEPARATOR - sched / 2; 3059 t4_write_reg(adap, A_TP_TM_PIO_ADDR, addr); 3060 v = t4_read_reg(adap, A_TP_TM_PIO_DATA); 3061 if (sched & 1) 3062 v >>= 16; 3063 v &= 0xffff; 3064 *ipg = (10000 * v) / core_ticks_per_usec(adap); 3065 } 3066 } 3067 3068 /* 3069 * Calculates a rate in bytes/s given the number of 256-byte units per 4K core 3070 * clocks. The formula is 3071 * 3072 * bytes/s = bytes256 * 256 * ClkFreq / 4096 3073 * 3074 * which is equivalent to 3075 * 3076 * bytes/s = 62.5 * bytes256 * ClkFreq_ms 3077 */ 3078 static u64 chan_rate(struct adapter *adap, unsigned int bytes256) 3079 { 3080 u64 v = bytes256 * adap->params.vpd.cclk; 3081 3082 return v * 62 + v / 2; 3083 } 3084 3085 /** 3086 * t4_get_chan_txrate - get the current per channel Tx rates 3087 * @adap: the adapter 3088 * @nic_rate: rates for NIC traffic 3089 * @ofld_rate: rates for offloaded traffic 3090 * 3091 * Return the current Tx rates in bytes/s for NIC and offloaded traffic 3092 * for each channel. 3093 */ 3094 void t4_get_chan_txrate(struct adapter *adap, u64 *nic_rate, u64 *ofld_rate) 3095 { 3096 u32 v; 3097 3098 v = t4_read_reg(adap, A_TP_TX_TRATE); 3099 nic_rate[0] = chan_rate(adap, G_TNLRATE0(v)); 3100 nic_rate[1] = chan_rate(adap, G_TNLRATE1(v)); 3101 nic_rate[2] = chan_rate(adap, G_TNLRATE2(v)); 3102 nic_rate[3] = chan_rate(adap, G_TNLRATE3(v)); 3103 3104 v = t4_read_reg(adap, A_TP_TX_ORATE); 3105 ofld_rate[0] = chan_rate(adap, G_OFDRATE0(v)); 3106 ofld_rate[1] = chan_rate(adap, G_OFDRATE1(v)); 3107 ofld_rate[2] = chan_rate(adap, G_OFDRATE2(v)); 3108 ofld_rate[3] = chan_rate(adap, G_OFDRATE3(v)); 3109 } 3110 3111 /** 3112 * t4_set_trace_filter - configure one of the tracing filters 3113 * @adap: the adapter 3114 * @tp: the desired trace filter parameters 3115 * @idx: which filter to configure 3116 * @enable: whether to enable or disable the filter 3117 * 3118 * Configures one of the tracing filters available in HW. If @enable is 3119 * %0 @tp is not examined and may be %NULL. 3120 */ 3121 int t4_set_trace_filter(struct adapter *adap, const struct trace_params *tp, int idx, 3122 int enable) 3123 { 3124 int i, ofst = idx * 4; 3125 u32 data_reg, mask_reg, cfg; 3126 u32 multitrc = F_TRCMULTIFILTER; 3127 3128 if (!enable) { 3129 t4_write_reg(adap, A_MPS_TRC_FILTER_MATCH_CTL_A + ofst, 0); 3130 goto out; 3131 } 3132 3133 if (tp->port > 11 || tp->invert > 1 || tp->skip_len > M_TFLENGTH || 3134 tp->skip_ofst > M_TFOFFSET || tp->min_len > M_TFMINPKTSIZE || 3135 tp->snap_len > 9600 || (idx && tp->snap_len > 256)) 3136 return -EINVAL; 3137 3138 if (tp->snap_len > 256) { /* must be tracer 0 */ 3139 if ((t4_read_reg(adap, A_MPS_TRC_FILTER_MATCH_CTL_A + 4) | 3140 t4_read_reg(adap, A_MPS_TRC_FILTER_MATCH_CTL_A + 8) | 3141 t4_read_reg(adap, A_MPS_TRC_FILTER_MATCH_CTL_A + 12)) & 3142 F_TFEN) 3143 return -EINVAL; /* other tracers are enabled */ 3144 multitrc = 0; 3145 } else if (idx) { 3146 i = t4_read_reg(adap, A_MPS_TRC_FILTER_MATCH_CTL_B); 3147 if (G_TFCAPTUREMAX(i) > 256 && 3148 (t4_read_reg(adap, A_MPS_TRC_FILTER_MATCH_CTL_A) & F_TFEN)) 3149 return -EINVAL; 3150 } 3151 3152 /* stop the tracer we'll be changing */ 3153 t4_write_reg(adap, A_MPS_TRC_FILTER_MATCH_CTL_A + ofst, 0); 3154 3155 /* disable tracing globally if running in the wrong single/multi mode */ 3156 cfg = t4_read_reg(adap, A_MPS_TRC_CFG); 3157 if ((cfg & F_TRCEN) && multitrc != (cfg & F_TRCMULTIFILTER)) { 3158 t4_write_reg(adap, A_MPS_TRC_CFG, cfg ^ F_TRCEN); 3159 t4_read_reg(adap, A_MPS_TRC_CFG); /* flush */ 3160 msleep(1); 3161 if (!(t4_read_reg(adap, A_MPS_TRC_CFG) & F_TRCFIFOEMPTY)) 3162 return -ETIMEDOUT; 3163 } 3164 /* 3165 * At this point either the tracing is enabled and in the right mode or 3166 * disabled. 3167 */ 3168 3169 idx *= (A_MPS_TRC_FILTER1_MATCH - A_MPS_TRC_FILTER0_MATCH); 3170 data_reg = A_MPS_TRC_FILTER0_MATCH + idx; 3171 mask_reg = A_MPS_TRC_FILTER0_DONT_CARE + idx; 3172 3173 for (i = 0; i < TRACE_LEN / 4; i++, data_reg += 4, mask_reg += 4) { 3174 t4_write_reg(adap, data_reg, tp->data[i]); 3175 t4_write_reg(adap, mask_reg, ~tp->mask[i]); 3176 } 3177 t4_write_reg(adap, A_MPS_TRC_FILTER_MATCH_CTL_B + ofst, 3178 V_TFCAPTUREMAX(tp->snap_len) | 3179 V_TFMINPKTSIZE(tp->min_len)); 3180 t4_write_reg(adap, A_MPS_TRC_FILTER_MATCH_CTL_A + ofst, 3181 V_TFOFFSET(tp->skip_ofst) | V_TFLENGTH(tp->skip_len) | 3182 V_TFPORT(tp->port) | F_TFEN | V_TFINVERTMATCH(tp->invert)); 3183 3184 cfg &= ~F_TRCMULTIFILTER; 3185 t4_write_reg(adap, A_MPS_TRC_CFG, cfg | F_TRCEN | multitrc); 3186 out: t4_read_reg(adap, A_MPS_TRC_CFG); /* flush */ 3187 return 0; 3188 } 3189 3190 /** 3191 * t4_get_trace_filter - query one of the tracing filters 3192 * @adap: the adapter 3193 * @tp: the current trace filter parameters 3194 * @idx: which trace filter to query 3195 * @enabled: non-zero if the filter is enabled 3196 * 3197 * Returns the current settings of one of the HW tracing filters. 3198 */ 3199 void t4_get_trace_filter(struct adapter *adap, struct trace_params *tp, int idx, 3200 int *enabled) 3201 { 3202 u32 ctla, ctlb; 3203 int i, ofst = idx * 4; 3204 u32 data_reg, mask_reg; 3205 3206 ctla = t4_read_reg(adap, A_MPS_TRC_FILTER_MATCH_CTL_A + ofst); 3207 ctlb = t4_read_reg(adap, A_MPS_TRC_FILTER_MATCH_CTL_B + ofst); 3208 3209 *enabled = !!(ctla & F_TFEN); 3210 tp->snap_len = G_TFCAPTUREMAX(ctlb); 3211 tp->min_len = G_TFMINPKTSIZE(ctlb); 3212 tp->skip_ofst = G_TFOFFSET(ctla); 3213 tp->skip_len = G_TFLENGTH(ctla); 3214 tp->invert = !!(ctla & F_TFINVERTMATCH); 3215 tp->port = G_TFPORT(ctla); 3216 3217 ofst = (A_MPS_TRC_FILTER1_MATCH - A_MPS_TRC_FILTER0_MATCH) * idx; 3218 data_reg = A_MPS_TRC_FILTER0_MATCH + ofst; 3219 mask_reg = A_MPS_TRC_FILTER0_DONT_CARE + ofst; 3220 3221 for (i = 0; i < TRACE_LEN / 4; i++, data_reg += 4, mask_reg += 4) { 3222 tp->mask[i] = ~t4_read_reg(adap, mask_reg); 3223 tp->data[i] = t4_read_reg(adap, data_reg) & tp->mask[i]; 3224 } 3225 } 3226 3227 /** 3228 * t4_pmtx_get_stats - returns the HW stats from PMTX 3229 * @adap: the adapter 3230 * @cnt: where to store the count statistics 3231 * @cycles: where to store the cycle statistics 3232 * 3233 * Returns performance statistics from PMTX. 3234 */ 3235 void t4_pmtx_get_stats(struct adapter *adap, u32 cnt[], u64 cycles[]) 3236 { 3237 int i; 3238 3239 for (i = 0; i < PM_NSTATS; i++) { 3240 t4_write_reg(adap, A_PM_TX_STAT_CONFIG, i + 1); 3241 cnt[i] = t4_read_reg(adap, A_PM_TX_STAT_COUNT); 3242 cycles[i] = t4_read_reg64(adap, A_PM_TX_STAT_LSB); 3243 } 3244 } 3245 3246 /** 3247 * t4_pmrx_get_stats - returns the HW stats from PMRX 3248 * @adap: the adapter 3249 * @cnt: where to store the count statistics 3250 * @cycles: where to store the cycle statistics 3251 * 3252 * Returns performance statistics from PMRX. 3253 */ 3254 void t4_pmrx_get_stats(struct adapter *adap, u32 cnt[], u64 cycles[]) 3255 { 3256 int i; 3257 3258 for (i = 0; i < PM_NSTATS; i++) { 3259 t4_write_reg(adap, A_PM_RX_STAT_CONFIG, i + 1); 3260 cnt[i] = t4_read_reg(adap, A_PM_RX_STAT_COUNT); 3261 cycles[i] = t4_read_reg64(adap, A_PM_RX_STAT_LSB); 3262 } 3263 } 3264 3265 /** 3266 * get_mps_bg_map - return the buffer groups associated with a port 3267 * @adap: the adapter 3268 * @idx: the port index 3269 * 3270 * Returns a bitmap indicating which MPS buffer groups are associated 3271 * with the given port. Bit i is set if buffer group i is used by the 3272 * port. 3273 */ 3274 static unsigned int get_mps_bg_map(struct adapter *adap, int idx) 3275 { 3276 u32 n = G_NUMPORTS(t4_read_reg(adap, A_MPS_CMN_CTL)); 3277 3278 if (n == 0) 3279 return idx == 0 ? 0xf : 0; 3280 if (n == 1) 3281 return idx < 2 ? (3 << (2 * idx)) : 0; 3282 return 1 << idx; 3283 } 3284 3285 /** 3286 * t4_get_port_stats - collect port statistics 3287 * @adap: the adapter 3288 * @idx: the port index 3289 * @p: the stats structure to fill 3290 * 3291 * Collect statistics related to the given port from HW. 3292 */ 3293 void t4_get_port_stats(struct adapter *adap, int idx, struct port_stats *p) 3294 { 3295 u32 bgmap = get_mps_bg_map(adap, idx); 3296 3297 #define GET_STAT(name) \ 3298 t4_read_reg64(adap, PORT_REG(idx, A_MPS_PORT_STAT_##name##_L)) 3299 #define GET_STAT_COM(name) t4_read_reg64(adap, A_MPS_STAT_##name##_L) 3300 3301 p->tx_octets = GET_STAT(TX_PORT_BYTES); 3302 p->tx_frames = GET_STAT(TX_PORT_FRAMES); 3303 p->tx_bcast_frames = GET_STAT(TX_PORT_BCAST); 3304 p->tx_mcast_frames = GET_STAT(TX_PORT_MCAST); 3305 p->tx_ucast_frames = GET_STAT(TX_PORT_UCAST); 3306 p->tx_error_frames = GET_STAT(TX_PORT_ERROR); 3307 p->tx_frames_64 = GET_STAT(TX_PORT_64B); 3308 p->tx_frames_65_127 = GET_STAT(TX_PORT_65B_127B); 3309 p->tx_frames_128_255 = GET_STAT(TX_PORT_128B_255B); 3310 p->tx_frames_256_511 = GET_STAT(TX_PORT_256B_511B); 3311 p->tx_frames_512_1023 = GET_STAT(TX_PORT_512B_1023B); 3312 p->tx_frames_1024_1518 = GET_STAT(TX_PORT_1024B_1518B); 3313 p->tx_frames_1519_max = GET_STAT(TX_PORT_1519B_MAX); 3314 p->tx_drop = GET_STAT(TX_PORT_DROP); 3315 p->tx_pause = GET_STAT(TX_PORT_PAUSE); 3316 p->tx_ppp0 = GET_STAT(TX_PORT_PPP0); 3317 p->tx_ppp1 = GET_STAT(TX_PORT_PPP1); 3318 p->tx_ppp2 = GET_STAT(TX_PORT_PPP2); 3319 p->tx_ppp3 = GET_STAT(TX_PORT_PPP3); 3320 p->tx_ppp4 = GET_STAT(TX_PORT_PPP4); 3321 p->tx_ppp5 = GET_STAT(TX_PORT_PPP5); 3322 p->tx_ppp6 = GET_STAT(TX_PORT_PPP6); 3323 p->tx_ppp7 = GET_STAT(TX_PORT_PPP7); 3324 3325 p->rx_octets = GET_STAT(RX_PORT_BYTES); 3326 p->rx_frames = GET_STAT(RX_PORT_FRAMES); 3327 p->rx_bcast_frames = GET_STAT(RX_PORT_BCAST); 3328 p->rx_mcast_frames = GET_STAT(RX_PORT_MCAST); 3329 p->rx_ucast_frames = GET_STAT(RX_PORT_UCAST); 3330 p->rx_too_long = GET_STAT(RX_PORT_MTU_ERROR); 3331 p->rx_jabber = GET_STAT(RX_PORT_MTU_CRC_ERROR); 3332 p->rx_fcs_err = GET_STAT(RX_PORT_CRC_ERROR); 3333 p->rx_len_err = GET_STAT(RX_PORT_LEN_ERROR); 3334 p->rx_symbol_err = GET_STAT(RX_PORT_SYM_ERROR); 3335 p->rx_runt = GET_STAT(RX_PORT_LESS_64B); 3336 p->rx_frames_64 = GET_STAT(RX_PORT_64B); 3337 p->rx_frames_65_127 = GET_STAT(RX_PORT_65B_127B); 3338 p->rx_frames_128_255 = GET_STAT(RX_PORT_128B_255B); 3339 p->rx_frames_256_511 = GET_STAT(RX_PORT_256B_511B); 3340 p->rx_frames_512_1023 = GET_STAT(RX_PORT_512B_1023B); 3341 p->rx_frames_1024_1518 = GET_STAT(RX_PORT_1024B_1518B); 3342 p->rx_frames_1519_max = GET_STAT(RX_PORT_1519B_MAX); 3343 p->rx_pause = GET_STAT(RX_PORT_PAUSE); 3344 p->rx_ppp0 = GET_STAT(RX_PORT_PPP0); 3345 p->rx_ppp1 = GET_STAT(RX_PORT_PPP1); 3346 p->rx_ppp2 = GET_STAT(RX_PORT_PPP2); 3347 p->rx_ppp3 = GET_STAT(RX_PORT_PPP3); 3348 p->rx_ppp4 = GET_STAT(RX_PORT_PPP4); 3349 p->rx_ppp5 = GET_STAT(RX_PORT_PPP5); 3350 p->rx_ppp6 = GET_STAT(RX_PORT_PPP6); 3351 p->rx_ppp7 = GET_STAT(RX_PORT_PPP7); 3352 3353 p->rx_ovflow0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_MAC_DROP_FRAME) : 0; 3354 p->rx_ovflow1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_MAC_DROP_FRAME) : 0; 3355 p->rx_ovflow2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_MAC_DROP_FRAME) : 0; 3356 p->rx_ovflow3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_MAC_DROP_FRAME) : 0; 3357 p->rx_trunc0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_MAC_TRUNC_FRAME) : 0; 3358 p->rx_trunc1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_MAC_TRUNC_FRAME) : 0; 3359 p->rx_trunc2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_MAC_TRUNC_FRAME) : 0; 3360 p->rx_trunc3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_MAC_TRUNC_FRAME) : 0; 3361 3362 #undef GET_STAT 3363 #undef GET_STAT_COM 3364 } 3365 3366 /** 3367 * t4_clr_port_stats - clear port statistics 3368 * @adap: the adapter 3369 * @idx: the port index 3370 * 3371 * Clear HW statistics for the given port. 3372 */ 3373 void t4_clr_port_stats(struct adapter *adap, int idx) 3374 { 3375 unsigned int i; 3376 u32 bgmap = get_mps_bg_map(adap, idx); 3377 3378 for (i = A_MPS_PORT_STAT_TX_PORT_BYTES_L; 3379 i <= A_MPS_PORT_STAT_TX_PORT_PPP7_H; i += 8) 3380 t4_write_reg(adap, PORT_REG(idx, i), 0); 3381 for (i = A_MPS_PORT_STAT_RX_PORT_BYTES_L; 3382 i <= A_MPS_PORT_STAT_RX_PORT_LESS_64B_H; i += 8) 3383 t4_write_reg(adap, PORT_REG(idx, i), 0); 3384 for (i = 0; i < 4; i++) 3385 if (bgmap & (1 << i)) { 3386 t4_write_reg(adap, 3387 A_MPS_STAT_RX_BG_0_MAC_DROP_FRAME_L + i * 8, 0); 3388 t4_write_reg(adap, 3389 A_MPS_STAT_RX_BG_0_MAC_TRUNC_FRAME_L + i * 8, 0); 3390 } 3391 } 3392 3393 /** 3394 * t4_get_lb_stats - collect loopback port statistics 3395 * @adap: the adapter 3396 * @idx: the loopback port index 3397 * @p: the stats structure to fill 3398 * 3399 * Return HW statistics for the given loopback port. 3400 */ 3401 void t4_get_lb_stats(struct adapter *adap, int idx, struct lb_port_stats *p) 3402 { 3403 u32 bgmap = get_mps_bg_map(adap, idx); 3404 3405 #define GET_STAT(name) \ 3406 t4_read_reg64(adap, PORT_REG(idx, A_MPS_PORT_STAT_LB_PORT_##name##_L)) 3407 #define GET_STAT_COM(name) t4_read_reg64(adap, A_MPS_STAT_##name##_L) 3408 3409 p->octets = GET_STAT(BYTES); 3410 p->frames = GET_STAT(FRAMES); 3411 p->bcast_frames = GET_STAT(BCAST); 3412 p->mcast_frames = GET_STAT(MCAST); 3413 p->ucast_frames = GET_STAT(UCAST); 3414 p->error_frames = GET_STAT(ERROR); 3415 3416 p->frames_64 = GET_STAT(64B); 3417 p->frames_65_127 = GET_STAT(65B_127B); 3418 p->frames_128_255 = GET_STAT(128B_255B); 3419 p->frames_256_511 = GET_STAT(256B_511B); 3420 p->frames_512_1023 = GET_STAT(512B_1023B); 3421 p->frames_1024_1518 = GET_STAT(1024B_1518B); 3422 p->frames_1519_max = GET_STAT(1519B_MAX); 3423 p->drop = t4_read_reg(adap, PORT_REG(idx, 3424 A_MPS_PORT_STAT_LB_PORT_DROP_FRAMES)); 3425 3426 p->ovflow0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_LB_DROP_FRAME) : 0; 3427 p->ovflow1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_LB_DROP_FRAME) : 0; 3428 p->ovflow2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_LB_DROP_FRAME) : 0; 3429 p->ovflow3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_LB_DROP_FRAME) : 0; 3430 p->trunc0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_LB_TRUNC_FRAME) : 0; 3431 p->trunc1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_LB_TRUNC_FRAME) : 0; 3432 p->trunc2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_LB_TRUNC_FRAME) : 0; 3433 p->trunc3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_LB_TRUNC_FRAME) : 0; 3434 3435 #undef GET_STAT 3436 #undef GET_STAT_COM 3437 } 3438 3439 /** 3440 * t4_wol_magic_enable - enable/disable magic packet WoL 3441 * @adap: the adapter 3442 * @port: the physical port index 3443 * @addr: MAC address expected in magic packets, %NULL to disable 3444 * 3445 * Enables/disables magic packet wake-on-LAN for the selected port. 3446 */ 3447 void t4_wol_magic_enable(struct adapter *adap, unsigned int port, 3448 const u8 *addr) 3449 { 3450 if (addr) { 3451 t4_write_reg(adap, PORT_REG(port, A_XGMAC_PORT_MAGIC_MACID_LO), 3452 (addr[2] << 24) | (addr[3] << 16) | 3453 (addr[4] << 8) | addr[5]); 3454 t4_write_reg(adap, PORT_REG(port, A_XGMAC_PORT_MAGIC_MACID_HI), 3455 (addr[0] << 8) | addr[1]); 3456 } 3457 t4_set_reg_field(adap, PORT_REG(port, A_XGMAC_PORT_CFG2), F_MAGICEN, 3458 V_MAGICEN(addr != NULL)); 3459 } 3460 3461 /** 3462 * t4_wol_pat_enable - enable/disable pattern-based WoL 3463 * @adap: the adapter 3464 * @port: the physical port index 3465 * @map: bitmap of which HW pattern filters to set 3466 * @mask0: byte mask for bytes 0-63 of a packet 3467 * @mask1: byte mask for bytes 64-127 of a packet 3468 * @crc: Ethernet CRC for selected bytes 3469 * @enable: enable/disable switch 3470 * 3471 * Sets the pattern filters indicated in @map to mask out the bytes 3472 * specified in @mask0/@mask1 in received packets and compare the CRC of 3473 * the resulting packet against @crc. If @enable is %true pattern-based 3474 * WoL is enabled, otherwise disabled. 3475 */ 3476 int t4_wol_pat_enable(struct adapter *adap, unsigned int port, unsigned int map, 3477 u64 mask0, u64 mask1, unsigned int crc, bool enable) 3478 { 3479 int i; 3480 3481 if (!enable) { 3482 t4_set_reg_field(adap, PORT_REG(port, A_XGMAC_PORT_CFG2), 3483 F_PATEN, 0); 3484 return 0; 3485 } 3486 if (map > 0xff) 3487 return -EINVAL; 3488 3489 #define EPIO_REG(name) PORT_REG(port, A_XGMAC_PORT_EPIO_##name) 3490 3491 t4_write_reg(adap, EPIO_REG(DATA1), mask0 >> 32); 3492 t4_write_reg(adap, EPIO_REG(DATA2), mask1); 3493 t4_write_reg(adap, EPIO_REG(DATA3), mask1 >> 32); 3494 3495 for (i = 0; i < NWOL_PAT; i++, map >>= 1) { 3496 if (!(map & 1)) 3497 continue; 3498 3499 /* write byte masks */ 3500 t4_write_reg(adap, EPIO_REG(DATA0), mask0); 3501 t4_write_reg(adap, EPIO_REG(OP), V_ADDRESS(i) | F_EPIOWR); 3502 t4_read_reg(adap, EPIO_REG(OP)); /* flush */ 3503 if (t4_read_reg(adap, EPIO_REG(OP)) & F_BUSY) 3504 return -ETIMEDOUT; 3505 3506 /* write CRC */ 3507 t4_write_reg(adap, EPIO_REG(DATA0), crc); 3508 t4_write_reg(adap, EPIO_REG(OP), V_ADDRESS(i + 32) | F_EPIOWR); 3509 t4_read_reg(adap, EPIO_REG(OP)); /* flush */ 3510 if (t4_read_reg(adap, EPIO_REG(OP)) & F_BUSY) 3511 return -ETIMEDOUT; 3512 } 3513 #undef EPIO_REG 3514 3515 t4_set_reg_field(adap, PORT_REG(port, A_XGMAC_PORT_CFG2), 0, F_PATEN); 3516 return 0; 3517 } 3518 3519 /** 3520 * t4_mk_filtdelwr - create a delete filter WR 3521 * @ftid: the filter ID 3522 * @wr: the filter work request to populate 3523 * @qid: ingress queue to receive the delete notification 3524 * 3525 * Creates a filter work request to delete the supplied filter. If @qid is 3526 * negative the delete notification is suppressed. 3527 */ 3528 void t4_mk_filtdelwr(unsigned int ftid, struct fw_filter_wr *wr, int qid) 3529 { 3530 memset(wr, 0, sizeof(*wr)); 3531 wr->op_pkd = htonl(V_FW_WR_OP(FW_FILTER_WR)); 3532 wr->len16_pkd = htonl(V_FW_WR_LEN16(sizeof(*wr) / 16)); 3533 wr->tid_to_iq = htonl(V_FW_FILTER_WR_TID(ftid) | 3534 V_FW_FILTER_WR_NOREPLY(qid < 0)); 3535 wr->del_filter_to_l2tix = htonl(F_FW_FILTER_WR_DEL_FILTER); 3536 if (qid >= 0) 3537 wr->rx_chan_rx_rpl_iq = htons(V_FW_FILTER_WR_RX_RPL_IQ(qid)); 3538 } 3539 3540 #define INIT_CMD(var, cmd, rd_wr) do { \ 3541 (var).op_to_write = htonl(V_FW_CMD_OP(FW_##cmd##_CMD) | \ 3542 F_FW_CMD_REQUEST | F_FW_CMD_##rd_wr); \ 3543 (var).retval_len16 = htonl(FW_LEN16(var)); \ 3544 } while (0) 3545 3546 /** 3547 * t4_mdio_rd - read a PHY register through MDIO 3548 * @adap: the adapter 3549 * @mbox: mailbox to use for the FW command 3550 * @phy_addr: the PHY address 3551 * @mmd: the PHY MMD to access (0 for clause 22 PHYs) 3552 * @reg: the register to read 3553 * @valp: where to store the value 3554 * 3555 * Issues a FW command through the given mailbox to read a PHY register. 3556 */ 3557 int t4_mdio_rd(struct adapter *adap, unsigned int mbox, unsigned int phy_addr, 3558 unsigned int mmd, unsigned int reg, unsigned int *valp) 3559 { 3560 int ret; 3561 struct fw_ldst_cmd c; 3562 3563 memset(&c, 0, sizeof(c)); 3564 c.op_to_addrspace = htonl(V_FW_CMD_OP(FW_LDST_CMD) | F_FW_CMD_REQUEST | 3565 F_FW_CMD_READ | V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_MDIO)); 3566 c.cycles_to_len16 = htonl(FW_LEN16(c)); 3567 c.u.mdio.paddr_mmd = htons(V_FW_LDST_CMD_PADDR(phy_addr) | 3568 V_FW_LDST_CMD_MMD(mmd)); 3569 c.u.mdio.raddr = htons(reg); 3570 3571 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c); 3572 if (ret == 0) 3573 *valp = ntohs(c.u.mdio.rval); 3574 return ret; 3575 } 3576 3577 /** 3578 * t4_mdio_wr - write a PHY register through MDIO 3579 * @adap: the adapter 3580 * @mbox: mailbox to use for the FW command 3581 * @phy_addr: the PHY address 3582 * @mmd: the PHY MMD to access (0 for clause 22 PHYs) 3583 * @reg: the register to write 3584 * @valp: value to write 3585 * 3586 * Issues a FW command through the given mailbox to write a PHY register. 3587 */ 3588 int t4_mdio_wr(struct adapter *adap, unsigned int mbox, unsigned int phy_addr, 3589 unsigned int mmd, unsigned int reg, unsigned int val) 3590 { 3591 struct fw_ldst_cmd c; 3592 3593 memset(&c, 0, sizeof(c)); 3594 c.op_to_addrspace = htonl(V_FW_CMD_OP(FW_LDST_CMD) | F_FW_CMD_REQUEST | 3595 F_FW_CMD_WRITE | V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_MDIO)); 3596 c.cycles_to_len16 = htonl(FW_LEN16(c)); 3597 c.u.mdio.paddr_mmd = htons(V_FW_LDST_CMD_PADDR(phy_addr) | 3598 V_FW_LDST_CMD_MMD(mmd)); 3599 c.u.mdio.raddr = htons(reg); 3600 c.u.mdio.rval = htons(val); 3601 3602 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); 3603 } 3604 3605 /** 3606 * t4_sge_ctxt_rd - read an SGE context through FW 3607 * @adap: the adapter 3608 * @mbox: mailbox to use for the FW command 3609 * @cid: the context id 3610 * @ctype: the context type 3611 * @data: where to store the context data 3612 * 3613 * Issues a FW command through the given mailbox to read an SGE context. 3614 */ 3615 int t4_sge_ctxt_rd(struct adapter *adap, unsigned int mbox, unsigned int cid, 3616 enum ctxt_type ctype, u32 *data) 3617 { 3618 int ret; 3619 struct fw_ldst_cmd c; 3620 3621 if (ctype == CTXT_EGRESS) 3622 ret = FW_LDST_ADDRSPC_SGE_EGRC; 3623 else if (ctype == CTXT_INGRESS) 3624 ret = FW_LDST_ADDRSPC_SGE_INGC; 3625 else if (ctype == CTXT_FLM) 3626 ret = FW_LDST_ADDRSPC_SGE_FLMC; 3627 else 3628 ret = FW_LDST_ADDRSPC_SGE_CONMC; 3629 3630 memset(&c, 0, sizeof(c)); 3631 c.op_to_addrspace = htonl(V_FW_CMD_OP(FW_LDST_CMD) | F_FW_CMD_REQUEST | 3632 F_FW_CMD_READ | V_FW_LDST_CMD_ADDRSPACE(ret)); 3633 c.cycles_to_len16 = htonl(FW_LEN16(c)); 3634 c.u.idctxt.physid = htonl(cid); 3635 3636 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c); 3637 if (ret == 0) { 3638 data[0] = ntohl(c.u.idctxt.ctxt_data0); 3639 data[1] = ntohl(c.u.idctxt.ctxt_data1); 3640 data[2] = ntohl(c.u.idctxt.ctxt_data2); 3641 data[3] = ntohl(c.u.idctxt.ctxt_data3); 3642 data[4] = ntohl(c.u.idctxt.ctxt_data4); 3643 data[5] = ntohl(c.u.idctxt.ctxt_data5); 3644 } 3645 return ret; 3646 } 3647 3648 /** 3649 * t4_sge_ctxt_rd_bd - read an SGE context bypassing FW 3650 * @adap: the adapter 3651 * @cid: the context id 3652 * @ctype: the context type 3653 * @data: where to store the context data 3654 * 3655 * Reads an SGE context directly, bypassing FW. This is only for 3656 * debugging when FW is unavailable. 3657 */ 3658 int t4_sge_ctxt_rd_bd(struct adapter *adap, unsigned int cid, enum ctxt_type ctype, 3659 u32 *data) 3660 { 3661 int i, ret; 3662 3663 t4_write_reg(adap, A_SGE_CTXT_CMD, V_CTXTQID(cid) | V_CTXTTYPE(ctype)); 3664 ret = t4_wait_op_done(adap, A_SGE_CTXT_CMD, F_BUSY, 0, 3, 1); 3665 if (!ret) 3666 for (i = A_SGE_CTXT_DATA0; i <= A_SGE_CTXT_DATA5; i += 4) 3667 *data++ = t4_read_reg(adap, i); 3668 return ret; 3669 } 3670 3671 /** 3672 * t4_fw_hello - establish communication with FW 3673 * @adap: the adapter 3674 * @mbox: mailbox to use for the FW command 3675 * @evt_mbox: mailbox to receive async FW events 3676 * @master: specifies the caller's willingness to be the device master 3677 * @state: returns the current device state 3678 * 3679 * Issues a command to establish communication with FW. 3680 */ 3681 int t4_fw_hello(struct adapter *adap, unsigned int mbox, unsigned int evt_mbox, 3682 enum dev_master master, enum dev_state *state) 3683 { 3684 int ret; 3685 struct fw_hello_cmd c; 3686 3687 memset(&c, 0, sizeof(c)); 3688 INIT_CMD(c, HELLO, WRITE); 3689 c.err_to_mbasyncnot = htonl( 3690 V_FW_HELLO_CMD_MASTERDIS(master == MASTER_CANT) | 3691 V_FW_HELLO_CMD_MASTERFORCE(master == MASTER_MUST) | 3692 V_FW_HELLO_CMD_MBMASTER(master == MASTER_MUST ? mbox : 3693 M_FW_HELLO_CMD_MBMASTER) | 3694 V_FW_HELLO_CMD_MBASYNCNOT(evt_mbox)); 3695 3696 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c); 3697 if (ret == 0 && state) { 3698 u32 v = ntohl(c.err_to_mbasyncnot); 3699 if (v & F_FW_HELLO_CMD_INIT) 3700 *state = DEV_STATE_INIT; 3701 else if (v & F_FW_HELLO_CMD_ERR) 3702 *state = DEV_STATE_ERR; 3703 else 3704 *state = DEV_STATE_UNINIT; 3705 return G_FW_HELLO_CMD_MBMASTER(v); 3706 } 3707 return ret; 3708 } 3709 3710 /** 3711 * t4_fw_bye - end communication with FW 3712 * @adap: the adapter 3713 * @mbox: mailbox to use for the FW command 3714 * 3715 * Issues a command to terminate communication with FW. 3716 */ 3717 int t4_fw_bye(struct adapter *adap, unsigned int mbox) 3718 { 3719 struct fw_bye_cmd c; 3720 3721 memset(&c, 0, sizeof(c)); 3722 INIT_CMD(c, BYE, WRITE); 3723 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); 3724 } 3725 3726 /** 3727 * t4_init_cmd - ask FW to initialize the device 3728 * @adap: the adapter 3729 * @mbox: mailbox to use for the FW command 3730 * 3731 * Issues a command to FW to partially initialize the device. This 3732 * performs initialization that generally doesn't depend on user input. 3733 */ 3734 int t4_early_init(struct adapter *adap, unsigned int mbox) 3735 { 3736 struct fw_initialize_cmd c; 3737 3738 memset(&c, 0, sizeof(c)); 3739 INIT_CMD(c, INITIALIZE, WRITE); 3740 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); 3741 } 3742 3743 /** 3744 * t4_fw_reset - issue a reset to FW 3745 * @adap: the adapter 3746 * @mbox: mailbox to use for the FW command 3747 * @reset: specifies the type of reset to perform 3748 * 3749 * Issues a reset command of the specified type to FW. 3750 */ 3751 int t4_fw_reset(struct adapter *adap, unsigned int mbox, int reset) 3752 { 3753 struct fw_reset_cmd c; 3754 3755 memset(&c, 0, sizeof(c)); 3756 INIT_CMD(c, RESET, WRITE); 3757 c.val = htonl(reset); 3758 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); 3759 } 3760 3761 /** 3762 * t4_query_params - query FW or device parameters 3763 * @adap: the adapter 3764 * @mbox: mailbox to use for the FW command 3765 * @pf: the PF 3766 * @vf: the VF 3767 * @nparams: the number of parameters 3768 * @params: the parameter names 3769 * @val: the parameter values 3770 * 3771 * Reads the value of FW or device parameters. Up to 7 parameters can be 3772 * queried at once. 3773 */ 3774 int t4_query_params(struct adapter *adap, unsigned int mbox, unsigned int pf, 3775 unsigned int vf, unsigned int nparams, const u32 *params, 3776 u32 *val) 3777 { 3778 int i, ret; 3779 struct fw_params_cmd c; 3780 __be32 *p = &c.param[0].mnem; 3781 3782 if (nparams > 7) 3783 return -EINVAL; 3784 3785 memset(&c, 0, sizeof(c)); 3786 c.op_to_vfn = htonl(V_FW_CMD_OP(FW_PARAMS_CMD) | F_FW_CMD_REQUEST | 3787 F_FW_CMD_READ | V_FW_PARAMS_CMD_PFN(pf) | 3788 V_FW_PARAMS_CMD_VFN(vf)); 3789 c.retval_len16 = htonl(FW_LEN16(c)); 3790 3791 for (i = 0; i < nparams; i++, p += 2) 3792 *p = htonl(*params++); 3793 3794 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c); 3795 if (ret == 0) 3796 for (i = 0, p = &c.param[0].val; i < nparams; i++, p += 2) 3797 *val++ = ntohl(*p); 3798 return ret; 3799 } 3800 3801 /** 3802 * t4_set_params - sets FW or device parameters 3803 * @adap: the adapter 3804 * @mbox: mailbox to use for the FW command 3805 * @pf: the PF 3806 * @vf: the VF 3807 * @nparams: the number of parameters 3808 * @params: the parameter names 3809 * @val: the parameter values 3810 * 3811 * Sets the value of FW or device parameters. Up to 7 parameters can be 3812 * specified at once. 3813 */ 3814 int t4_set_params(struct adapter *adap, unsigned int mbox, unsigned int pf, 3815 unsigned int vf, unsigned int nparams, const u32 *params, 3816 const u32 *val) 3817 { 3818 struct fw_params_cmd c; 3819 __be32 *p = &c.param[0].mnem; 3820 3821 if (nparams > 7) 3822 return -EINVAL; 3823 3824 memset(&c, 0, sizeof(c)); 3825 c.op_to_vfn = htonl(V_FW_CMD_OP(FW_PARAMS_CMD) | F_FW_CMD_REQUEST | 3826 F_FW_CMD_WRITE | V_FW_PARAMS_CMD_PFN(pf) | 3827 V_FW_PARAMS_CMD_VFN(vf)); 3828 c.retval_len16 = htonl(FW_LEN16(c)); 3829 3830 while (nparams--) { 3831 *p++ = htonl(*params++); 3832 *p++ = htonl(*val++); 3833 } 3834 3835 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); 3836 } 3837 3838 /** 3839 * t4_cfg_pfvf - configure PF/VF resource limits 3840 * @adap: the adapter 3841 * @mbox: mailbox to use for the FW command 3842 * @pf: the PF being configured 3843 * @vf: the VF being configured 3844 * @txq: the max number of egress queues 3845 * @txq_eth_ctrl: the max number of egress Ethernet or control queues 3846 * @rxqi: the max number of interrupt-capable ingress queues 3847 * @rxq: the max number of interruptless ingress queues 3848 * @tc: the PCI traffic class 3849 * @vi: the max number of virtual interfaces 3850 * @cmask: the channel access rights mask for the PF/VF 3851 * @pmask: the port access rights mask for the PF/VF 3852 * @nexact: the maximum number of exact MPS filters 3853 * @rcaps: read capabilities 3854 * @wxcaps: write/execute capabilities 3855 * 3856 * Configures resource limits and capabilities for a physical or virtual 3857 * function. 3858 */ 3859 int t4_cfg_pfvf(struct adapter *adap, unsigned int mbox, unsigned int pf, 3860 unsigned int vf, unsigned int txq, unsigned int txq_eth_ctrl, 3861 unsigned int rxqi, unsigned int rxq, unsigned int tc, 3862 unsigned int vi, unsigned int cmask, unsigned int pmask, 3863 unsigned int nexact, unsigned int rcaps, unsigned int wxcaps) 3864 { 3865 struct fw_pfvf_cmd c; 3866 3867 memset(&c, 0, sizeof(c)); 3868 c.op_to_vfn = htonl(V_FW_CMD_OP(FW_PFVF_CMD) | F_FW_CMD_REQUEST | 3869 F_FW_CMD_WRITE | V_FW_PFVF_CMD_PFN(pf) | 3870 V_FW_PFVF_CMD_VFN(vf)); 3871 c.retval_len16 = htonl(FW_LEN16(c)); 3872 c.niqflint_niq = htonl(V_FW_PFVF_CMD_NIQFLINT(rxqi) | 3873 V_FW_PFVF_CMD_NIQ(rxq)); 3874 c.type_to_neq = htonl(V_FW_PFVF_CMD_CMASK(cmask) | 3875 V_FW_PFVF_CMD_PMASK(pmask) | 3876 V_FW_PFVF_CMD_NEQ(txq)); 3877 c.tc_to_nexactf = htonl(V_FW_PFVF_CMD_TC(tc) | V_FW_PFVF_CMD_NVI(vi) | 3878 V_FW_PFVF_CMD_NEXACTF(nexact)); 3879 c.r_caps_to_nethctrl = htonl(V_FW_PFVF_CMD_R_CAPS(rcaps) | 3880 V_FW_PFVF_CMD_WX_CAPS(wxcaps) | 3881 V_FW_PFVF_CMD_NETHCTRL(txq_eth_ctrl)); 3882 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); 3883 } 3884 3885 /** 3886 * t4_alloc_vi - allocate a virtual interface 3887 * @adap: the adapter 3888 * @mbox: mailbox to use for the FW command 3889 * @port: physical port associated with the VI 3890 * @pf: the PF owning the VI 3891 * @vf: the VF owning the VI 3892 * @nmac: number of MAC addresses needed (1 to 5) 3893 * @mac: the MAC addresses of the VI 3894 * @rss_size: size of RSS table slice associated with this VI 3895 * 3896 * Allocates a virtual interface for the given physical port. If @mac is 3897 * not %NULL it contains the MAC addresses of the VI as assigned by FW. 3898 * @mac should be large enough to hold @nmac Ethernet addresses, they are 3899 * stored consecutively so the space needed is @nmac * 6 bytes. 3900 * Returns a negative error number or the non-negative VI id. 3901 */ 3902 int t4_alloc_vi(struct adapter *adap, unsigned int mbox, unsigned int port, 3903 unsigned int pf, unsigned int vf, unsigned int nmac, u8 *mac, 3904 unsigned int *rss_size) 3905 { 3906 int ret; 3907 struct fw_vi_cmd c; 3908 3909 memset(&c, 0, sizeof(c)); 3910 c.op_to_vfn = htonl(V_FW_CMD_OP(FW_VI_CMD) | F_FW_CMD_REQUEST | 3911 F_FW_CMD_WRITE | F_FW_CMD_EXEC | 3912 V_FW_VI_CMD_PFN(pf) | V_FW_VI_CMD_VFN(vf)); 3913 c.alloc_to_len16 = htonl(F_FW_VI_CMD_ALLOC | FW_LEN16(c)); 3914 c.portid_pkd = V_FW_VI_CMD_PORTID(port); 3915 c.nmac = nmac - 1; 3916 3917 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c); 3918 if (ret) 3919 return ret; 3920 3921 if (mac) { 3922 memcpy(mac, c.mac, sizeof(c.mac)); 3923 switch (nmac) { 3924 case 5: 3925 memcpy(mac + 24, c.nmac3, sizeof(c.nmac3)); 3926 case 4: 3927 memcpy(mac + 18, c.nmac2, sizeof(c.nmac2)); 3928 case 3: 3929 memcpy(mac + 12, c.nmac1, sizeof(c.nmac1)); 3930 case 2: 3931 memcpy(mac + 6, c.nmac0, sizeof(c.nmac0)); 3932 } 3933 } 3934 if (rss_size) 3935 *rss_size = G_FW_VI_CMD_RSSSIZE(ntohs(c.rsssize_pkd)); 3936 return G_FW_VI_CMD_VIID(ntohs(c.type_to_viid)); 3937 } 3938 3939 /** 3940 * t4_free_vi - free a virtual interface 3941 * @adap: the adapter 3942 * @mbox: mailbox to use for the FW command 3943 * @pf: the PF owning the VI 3944 * @vf: the VF owning the VI 3945 * @viid: virtual interface identifiler 3946 * 3947 * Free a previously allocated virtual interface. 3948 */ 3949 int t4_free_vi(struct adapter *adap, unsigned int mbox, unsigned int pf, 3950 unsigned int vf, unsigned int viid) 3951 { 3952 struct fw_vi_cmd c; 3953 3954 memset(&c, 0, sizeof(c)); 3955 c.op_to_vfn = htonl(V_FW_CMD_OP(FW_VI_CMD) | 3956 F_FW_CMD_REQUEST | 3957 F_FW_CMD_EXEC | 3958 V_FW_VI_CMD_PFN(pf) | 3959 V_FW_VI_CMD_VFN(vf)); 3960 c.alloc_to_len16 = htonl(F_FW_VI_CMD_FREE | FW_LEN16(c)); 3961 c.type_to_viid = htons(V_FW_VI_CMD_VIID(viid)); 3962 3963 return t4_wr_mbox(adap, mbox, &c, sizeof(c), &c); 3964 } 3965 3966 /** 3967 * t4_set_rxmode - set Rx properties of a virtual interface 3968 * @adap: the adapter 3969 * @mbox: mailbox to use for the FW command 3970 * @viid: the VI id 3971 * @mtu: the new MTU or -1 3972 * @promisc: 1 to enable promiscuous mode, 0 to disable it, -1 no change 3973 * @all_multi: 1 to enable all-multi mode, 0 to disable it, -1 no change 3974 * @bcast: 1 to enable broadcast Rx, 0 to disable it, -1 no change 3975 * @vlanex: 1 to enable HVLAN extraction, 0 to disable it, -1 no change 3976 * @sleep_ok: if true we may sleep while awaiting command completion 3977 * 3978 * Sets Rx properties of a virtual interface. 3979 */ 3980 int t4_set_rxmode(struct adapter *adap, unsigned int mbox, unsigned int viid, 3981 int mtu, int promisc, int all_multi, int bcast, int vlanex, 3982 bool sleep_ok) 3983 { 3984 struct fw_vi_rxmode_cmd c; 3985 3986 /* convert to FW values */ 3987 if (mtu < 0) 3988 mtu = M_FW_VI_RXMODE_CMD_MTU; 3989 if (promisc < 0) 3990 promisc = M_FW_VI_RXMODE_CMD_PROMISCEN; 3991 if (all_multi < 0) 3992 all_multi = M_FW_VI_RXMODE_CMD_ALLMULTIEN; 3993 if (bcast < 0) 3994 bcast = M_FW_VI_RXMODE_CMD_BROADCASTEN; 3995 if (vlanex < 0) 3996 vlanex = M_FW_VI_RXMODE_CMD_VLANEXEN; 3997 3998 memset(&c, 0, sizeof(c)); 3999 c.op_to_viid = htonl(V_FW_CMD_OP(FW_VI_RXMODE_CMD) | F_FW_CMD_REQUEST | 4000 F_FW_CMD_WRITE | V_FW_VI_RXMODE_CMD_VIID(viid)); 4001 c.retval_len16 = htonl(FW_LEN16(c)); 4002 c.mtu_to_vlanexen = htonl(V_FW_VI_RXMODE_CMD_MTU(mtu) | 4003 V_FW_VI_RXMODE_CMD_PROMISCEN(promisc) | 4004 V_FW_VI_RXMODE_CMD_ALLMULTIEN(all_multi) | 4005 V_FW_VI_RXMODE_CMD_BROADCASTEN(bcast) | 4006 V_FW_VI_RXMODE_CMD_VLANEXEN(vlanex)); 4007 return t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), NULL, sleep_ok); 4008 } 4009 4010 /** 4011 * t4_alloc_mac_filt - allocates exact-match filters for MAC addresses 4012 * @adap: the adapter 4013 * @mbox: mailbox to use for the FW command 4014 * @viid: the VI id 4015 * @free: if true any existing filters for this VI id are first removed 4016 * @naddr: the number of MAC addresses to allocate filters for (up to 7) 4017 * @addr: the MAC address(es) 4018 * @idx: where to store the index of each allocated filter 4019 * @hash: pointer to hash address filter bitmap 4020 * @sleep_ok: call is allowed to sleep 4021 * 4022 * Allocates an exact-match filter for each of the supplied addresses and 4023 * sets it to the corresponding address. If @idx is not %NULL it should 4024 * have at least @naddr entries, each of which will be set to the index of 4025 * the filter allocated for the corresponding MAC address. If a filter 4026 * could not be allocated for an address its index is set to 0xffff. 4027 * If @hash is not %NULL addresses that fail to allocate an exact filter 4028 * are hashed and update the hash filter bitmap pointed at by @hash. 4029 * 4030 * Returns a negative error number or the number of filters allocated. 4031 */ 4032 int t4_alloc_mac_filt(struct adapter *adap, unsigned int mbox, 4033 unsigned int viid, bool free, unsigned int naddr, 4034 const u8 **addr, u16 *idx, u64 *hash, bool sleep_ok) 4035 { 4036 int offset, ret = 0; 4037 struct fw_vi_mac_cmd c; 4038 unsigned int nfilters = 0; 4039 unsigned int rem = naddr; 4040 4041 if (naddr > FW_CLS_TCAM_NUM_ENTRIES) 4042 return -EINVAL; 4043 4044 for (offset = 0; offset < naddr ; /**/) { 4045 unsigned int fw_naddr = (rem < ARRAY_SIZE(c.u.exact) 4046 ? rem 4047 : ARRAY_SIZE(c.u.exact)); 4048 size_t len16 = DIV_ROUND_UP(offsetof(struct fw_vi_mac_cmd, 4049 u.exact[fw_naddr]), 16); 4050 struct fw_vi_mac_exact *p; 4051 int i; 4052 4053 memset(&c, 0, sizeof(c)); 4054 c.op_to_viid = htonl(V_FW_CMD_OP(FW_VI_MAC_CMD) | 4055 F_FW_CMD_REQUEST | 4056 F_FW_CMD_WRITE | 4057 V_FW_CMD_EXEC(free) | 4058 V_FW_VI_MAC_CMD_VIID(viid)); 4059 c.freemacs_to_len16 = htonl(V_FW_VI_MAC_CMD_FREEMACS(free) | 4060 V_FW_CMD_LEN16(len16)); 4061 4062 for (i = 0, p = c.u.exact; i < fw_naddr; i++, p++) { 4063 p->valid_to_idx = htons( 4064 F_FW_VI_MAC_CMD_VALID | 4065 V_FW_VI_MAC_CMD_IDX(FW_VI_MAC_ADD_MAC)); 4066 memcpy(p->macaddr, addr[offset+i], sizeof(p->macaddr)); 4067 } 4068 4069 /* 4070 * It's okay if we run out of space in our MAC address arena. 4071 * Some of the addresses we submit may get stored so we need 4072 * to run through the reply to see what the results were ... 4073 */ 4074 ret = t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), &c, sleep_ok); 4075 if (ret && ret != -FW_ENOMEM) 4076 break; 4077 4078 for (i = 0, p = c.u.exact; i < fw_naddr; i++, p++) { 4079 u16 index = G_FW_VI_MAC_CMD_IDX(ntohs(p->valid_to_idx)); 4080 4081 if (idx) 4082 idx[offset+i] = (index >= FW_CLS_TCAM_NUM_ENTRIES 4083 ? 0xffff 4084 : index); 4085 if (index < FW_CLS_TCAM_NUM_ENTRIES) 4086 nfilters++; 4087 else if (hash) 4088 *hash |= (1ULL << hash_mac_addr(addr[offset+i])); 4089 } 4090 4091 free = false; 4092 offset += fw_naddr; 4093 rem -= fw_naddr; 4094 } 4095 4096 if (ret == 0 || ret == -FW_ENOMEM) 4097 ret = nfilters; 4098 return ret; 4099 } 4100 4101 /** 4102 * t4_change_mac - modifies the exact-match filter for a MAC address 4103 * @adap: the adapter 4104 * @mbox: mailbox to use for the FW command 4105 * @viid: the VI id 4106 * @idx: index of existing filter for old value of MAC address, or -1 4107 * @addr: the new MAC address value 4108 * @persist: whether a new MAC allocation should be persistent 4109 * @add_smt: if true also add the address to the HW SMT 4110 * 4111 * Modifies an exact-match filter and sets it to the new MAC address if 4112 * @idx >= 0, or adds the MAC address to a new filter if @idx < 0. In the 4113 * latter case the address is added persistently if @persist is %true. 4114 * 4115 * Note that in general it is not possible to modify the value of a given 4116 * filter so the generic way to modify an address filter is to free the one 4117 * being used by the old address value and allocate a new filter for the 4118 * new address value. 4119 * 4120 * Returns a negative error number or the index of the filter with the new 4121 * MAC value. Note that this index may differ from @idx. 4122 */ 4123 int t4_change_mac(struct adapter *adap, unsigned int mbox, unsigned int viid, 4124 int idx, const u8 *addr, bool persist, bool add_smt) 4125 { 4126 int ret, mode; 4127 struct fw_vi_mac_cmd c; 4128 struct fw_vi_mac_exact *p = c.u.exact; 4129 4130 if (idx < 0) /* new allocation */ 4131 idx = persist ? FW_VI_MAC_ADD_PERSIST_MAC : FW_VI_MAC_ADD_MAC; 4132 mode = add_smt ? FW_VI_MAC_SMT_AND_MPSTCAM : FW_VI_MAC_MPS_TCAM_ENTRY; 4133 4134 memset(&c, 0, sizeof(c)); 4135 c.op_to_viid = htonl(V_FW_CMD_OP(FW_VI_MAC_CMD) | F_FW_CMD_REQUEST | 4136 F_FW_CMD_WRITE | V_FW_VI_MAC_CMD_VIID(viid)); 4137 c.freemacs_to_len16 = htonl(V_FW_CMD_LEN16(1)); 4138 p->valid_to_idx = htons(F_FW_VI_MAC_CMD_VALID | 4139 V_FW_VI_MAC_CMD_SMAC_RESULT(mode) | 4140 V_FW_VI_MAC_CMD_IDX(idx)); 4141 memcpy(p->macaddr, addr, sizeof(p->macaddr)); 4142 4143 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c); 4144 if (ret == 0) { 4145 ret = G_FW_VI_MAC_CMD_IDX(ntohs(p->valid_to_idx)); 4146 if (ret >= FW_CLS_TCAM_NUM_ENTRIES) 4147 ret = -ENOMEM; 4148 } 4149 return ret; 4150 } 4151 4152 /** 4153 * t4_set_addr_hash - program the MAC inexact-match hash filter 4154 * @adap: the adapter 4155 * @mbox: mailbox to use for the FW command 4156 * @viid: the VI id 4157 * @ucast: whether the hash filter should also match unicast addresses 4158 * @vec: the value to be written to the hash filter 4159 * @sleep_ok: call is allowed to sleep 4160 * 4161 * Sets the 64-bit inexact-match hash filter for a virtual interface. 4162 */ 4163 int t4_set_addr_hash(struct adapter *adap, unsigned int mbox, unsigned int viid, 4164 bool ucast, u64 vec, bool sleep_ok) 4165 { 4166 struct fw_vi_mac_cmd c; 4167 4168 memset(&c, 0, sizeof(c)); 4169 c.op_to_viid = htonl(V_FW_CMD_OP(FW_VI_MAC_CMD) | F_FW_CMD_REQUEST | 4170 F_FW_CMD_WRITE | V_FW_VI_ENABLE_CMD_VIID(viid)); 4171 c.freemacs_to_len16 = htonl(F_FW_VI_MAC_CMD_HASHVECEN | 4172 V_FW_VI_MAC_CMD_HASHUNIEN(ucast) | 4173 V_FW_CMD_LEN16(1)); 4174 c.u.hash.hashvec = cpu_to_be64(vec); 4175 return t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), NULL, sleep_ok); 4176 } 4177 4178 /** 4179 * t4_enable_vi - enable/disable a virtual interface 4180 * @adap: the adapter 4181 * @mbox: mailbox to use for the FW command 4182 * @viid: the VI id 4183 * @rx_en: 1=enable Rx, 0=disable Rx 4184 * @tx_en: 1=enable Tx, 0=disable Tx 4185 * 4186 * Enables/disables a virtual interface. 4187 */ 4188 int t4_enable_vi(struct adapter *adap, unsigned int mbox, unsigned int viid, 4189 bool rx_en, bool tx_en) 4190 { 4191 struct fw_vi_enable_cmd c; 4192 4193 memset(&c, 0, sizeof(c)); 4194 c.op_to_viid = htonl(V_FW_CMD_OP(FW_VI_ENABLE_CMD) | F_FW_CMD_REQUEST | 4195 F_FW_CMD_EXEC | V_FW_VI_ENABLE_CMD_VIID(viid)); 4196 c.ien_to_len16 = htonl(V_FW_VI_ENABLE_CMD_IEN(rx_en) | 4197 V_FW_VI_ENABLE_CMD_EEN(tx_en) | FW_LEN16(c)); 4198 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); 4199 } 4200 4201 /** 4202 * t4_identify_port - identify a VI's port by blinking its LED 4203 * @adap: the adapter 4204 * @mbox: mailbox to use for the FW command 4205 * @viid: the VI id 4206 * @nblinks: how many times to blink LED at 2.5 Hz 4207 * 4208 * Identifies a VI's port by blinking its LED. 4209 */ 4210 int t4_identify_port(struct adapter *adap, unsigned int mbox, unsigned int viid, 4211 unsigned int nblinks) 4212 { 4213 struct fw_vi_enable_cmd c; 4214 4215 memset(&c, 0, sizeof(c)); 4216 c.op_to_viid = htonl(V_FW_CMD_OP(FW_VI_ENABLE_CMD) | F_FW_CMD_REQUEST | 4217 F_FW_CMD_EXEC | V_FW_VI_ENABLE_CMD_VIID(viid)); 4218 c.ien_to_len16 = htonl(F_FW_VI_ENABLE_CMD_LED | FW_LEN16(c)); 4219 c.blinkdur = htons(nblinks); 4220 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); 4221 } 4222 4223 /** 4224 * t4_iq_start_stop - enable/disable an ingress queue and its FLs 4225 * @adap: the adapter 4226 * @mbox: mailbox to use for the FW command 4227 * @start: %true to enable the queues, %false to disable them 4228 * @pf: the PF owning the queues 4229 * @vf: the VF owning the queues 4230 * @iqid: ingress queue id 4231 * @fl0id: FL0 queue id or 0xffff if no attached FL0 4232 * @fl1id: FL1 queue id or 0xffff if no attached FL1 4233 * 4234 * Starts or stops an ingress queue and its associated FLs, if any. 4235 */ 4236 int t4_iq_start_stop(struct adapter *adap, unsigned int mbox, bool start, 4237 unsigned int pf, unsigned int vf, unsigned int iqid, 4238 unsigned int fl0id, unsigned int fl1id) 4239 { 4240 struct fw_iq_cmd c; 4241 4242 memset(&c, 0, sizeof(c)); 4243 c.op_to_vfn = htonl(V_FW_CMD_OP(FW_IQ_CMD) | F_FW_CMD_REQUEST | 4244 F_FW_CMD_EXEC | V_FW_IQ_CMD_PFN(pf) | 4245 V_FW_IQ_CMD_VFN(vf)); 4246 c.alloc_to_len16 = htonl(V_FW_IQ_CMD_IQSTART(start) | 4247 V_FW_IQ_CMD_IQSTOP(!start) | FW_LEN16(c)); 4248 c.iqid = htons(iqid); 4249 c.fl0id = htons(fl0id); 4250 c.fl1id = htons(fl1id); 4251 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); 4252 } 4253 4254 /** 4255 * t4_iq_free - free an ingress queue and its FLs 4256 * @adap: the adapter 4257 * @mbox: mailbox to use for the FW command 4258 * @pf: the PF owning the queues 4259 * @vf: the VF owning the queues 4260 * @iqtype: the ingress queue type (FW_IQ_TYPE_FL_INT_CAP, etc.) 4261 * @iqid: ingress queue id 4262 * @fl0id: FL0 queue id or 0xffff if no attached FL0 4263 * @fl1id: FL1 queue id or 0xffff if no attached FL1 4264 * 4265 * Frees an ingress queue and its associated FLs, if any. 4266 */ 4267 int t4_iq_free(struct adapter *adap, unsigned int mbox, unsigned int pf, 4268 unsigned int vf, unsigned int iqtype, unsigned int iqid, 4269 unsigned int fl0id, unsigned int fl1id) 4270 { 4271 struct fw_iq_cmd c; 4272 4273 memset(&c, 0, sizeof(c)); 4274 c.op_to_vfn = htonl(V_FW_CMD_OP(FW_IQ_CMD) | F_FW_CMD_REQUEST | 4275 F_FW_CMD_EXEC | V_FW_IQ_CMD_PFN(pf) | 4276 V_FW_IQ_CMD_VFN(vf)); 4277 c.alloc_to_len16 = htonl(F_FW_IQ_CMD_FREE | FW_LEN16(c)); 4278 c.type_to_iqandstindex = htonl(V_FW_IQ_CMD_TYPE(iqtype)); 4279 c.iqid = htons(iqid); 4280 c.fl0id = htons(fl0id); 4281 c.fl1id = htons(fl1id); 4282 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); 4283 } 4284 4285 /** 4286 * t4_eth_eq_free - free an Ethernet egress queue 4287 * @adap: the adapter 4288 * @mbox: mailbox to use for the FW command 4289 * @pf: the PF owning the queue 4290 * @vf: the VF owning the queue 4291 * @eqid: egress queue id 4292 * 4293 * Frees an Ethernet egress queue. 4294 */ 4295 int t4_eth_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf, 4296 unsigned int vf, unsigned int eqid) 4297 { 4298 struct fw_eq_eth_cmd c; 4299 4300 memset(&c, 0, sizeof(c)); 4301 c.op_to_vfn = htonl(V_FW_CMD_OP(FW_EQ_ETH_CMD) | F_FW_CMD_REQUEST | 4302 F_FW_CMD_EXEC | V_FW_EQ_ETH_CMD_PFN(pf) | 4303 V_FW_EQ_ETH_CMD_VFN(vf)); 4304 c.alloc_to_len16 = htonl(F_FW_EQ_ETH_CMD_FREE | FW_LEN16(c)); 4305 c.eqid_pkd = htonl(V_FW_EQ_ETH_CMD_EQID(eqid)); 4306 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); 4307 } 4308 4309 /** 4310 * t4_ctrl_eq_free - free a control egress queue 4311 * @adap: the adapter 4312 * @mbox: mailbox to use for the FW command 4313 * @pf: the PF owning the queue 4314 * @vf: the VF owning the queue 4315 * @eqid: egress queue id 4316 * 4317 * Frees a control egress queue. 4318 */ 4319 int t4_ctrl_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf, 4320 unsigned int vf, unsigned int eqid) 4321 { 4322 struct fw_eq_ctrl_cmd c; 4323 4324 memset(&c, 0, sizeof(c)); 4325 c.op_to_vfn = htonl(V_FW_CMD_OP(FW_EQ_CTRL_CMD) | F_FW_CMD_REQUEST | 4326 F_FW_CMD_EXEC | V_FW_EQ_CTRL_CMD_PFN(pf) | 4327 V_FW_EQ_CTRL_CMD_VFN(vf)); 4328 c.alloc_to_len16 = htonl(F_FW_EQ_CTRL_CMD_FREE | FW_LEN16(c)); 4329 c.cmpliqid_eqid = htonl(V_FW_EQ_CTRL_CMD_EQID(eqid)); 4330 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); 4331 } 4332 4333 /** 4334 * t4_ofld_eq_free - free an offload egress queue 4335 * @adap: the adapter 4336 * @mbox: mailbox to use for the FW command 4337 * @pf: the PF owning the queue 4338 * @vf: the VF owning the queue 4339 * @eqid: egress queue id 4340 * 4341 * Frees a control egress queue. 4342 */ 4343 int t4_ofld_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf, 4344 unsigned int vf, unsigned int eqid) 4345 { 4346 struct fw_eq_ofld_cmd c; 4347 4348 memset(&c, 0, sizeof(c)); 4349 c.op_to_vfn = htonl(V_FW_CMD_OP(FW_EQ_OFLD_CMD) | F_FW_CMD_REQUEST | 4350 F_FW_CMD_EXEC | V_FW_EQ_OFLD_CMD_PFN(pf) | 4351 V_FW_EQ_OFLD_CMD_VFN(vf)); 4352 c.alloc_to_len16 = htonl(F_FW_EQ_OFLD_CMD_FREE | FW_LEN16(c)); 4353 c.eqid_pkd = htonl(V_FW_EQ_OFLD_CMD_EQID(eqid)); 4354 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); 4355 } 4356 4357 /** 4358 * t4_handle_fw_rpl - process a FW reply message 4359 * @adap: the adapter 4360 * @rpl: start of the FW message 4361 * 4362 * Processes a FW message, such as link state change messages. 4363 */ 4364 int t4_handle_fw_rpl(struct adapter *adap, const __be64 *rpl) 4365 { 4366 u8 opcode = *(const u8 *)rpl; 4367 4368 if (opcode == FW_PORT_CMD) { /* link/module state change message */ 4369 int speed = 0, fc = 0, i; 4370 const struct fw_port_cmd *p = (const void *)rpl; 4371 int chan = G_FW_PORT_CMD_PORTID(ntohl(p->op_to_portid)); 4372 struct port_info *pi = NULL; 4373 struct link_config *lc; 4374 u32 stat = ntohl(p->u.info.lstatus_to_modtype); 4375 int link_ok = (stat & F_FW_PORT_CMD_LSTATUS) != 0; 4376 u32 mod = G_FW_PORT_CMD_MODTYPE(stat); 4377 4378 if (stat & F_FW_PORT_CMD_RXPAUSE) 4379 fc |= PAUSE_RX; 4380 if (stat & F_FW_PORT_CMD_TXPAUSE) 4381 fc |= PAUSE_TX; 4382 if (stat & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_100M)) 4383 speed = SPEED_100; 4384 else if (stat & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_1G)) 4385 speed = SPEED_1000; 4386 else if (stat & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_10G)) 4387 speed = SPEED_10000; 4388 4389 for_each_port(adap, i) { 4390 pi = adap2pinfo(adap, i); 4391 if (pi->tx_chan == chan) 4392 break; 4393 } 4394 lc = &pi->link_cfg; 4395 4396 if (link_ok != lc->link_ok || speed != lc->speed || 4397 fc != lc->fc) { /* something changed */ 4398 lc->link_ok = link_ok; 4399 lc->speed = speed; 4400 lc->fc = fc; 4401 t4_os_link_changed(adap, i, link_ok); 4402 } 4403 if (mod != pi->mod_type) { 4404 pi->mod_type = mod; 4405 t4_os_portmod_changed(adap, i); 4406 } 4407 } 4408 return 0; 4409 } 4410 4411 /** 4412 * get_pci_mode - determine a card's PCI mode 4413 * @adapter: the adapter 4414 * @p: where to store the PCI settings 4415 * 4416 * Determines a card's PCI mode and associated parameters, such as speed 4417 * and width. 4418 */ 4419 static void __devinit get_pci_mode(struct adapter *adapter, 4420 struct pci_params *p) 4421 { 4422 u16 val; 4423 u32 pcie_cap; 4424 4425 pcie_cap = t4_os_find_pci_capability(adapter, PCI_CAP_ID_EXP); 4426 if (pcie_cap) { 4427 t4_os_pci_read_cfg2(adapter, pcie_cap + PCI_EXP_LNKSTA, &val); 4428 p->speed = val & PCI_EXP_LNKSTA_CLS; 4429 p->width = (val & PCI_EXP_LNKSTA_NLW) >> 4; 4430 } 4431 } 4432 4433 /** 4434 * init_link_config - initialize a link's SW state 4435 * @lc: structure holding the link state 4436 * @caps: link capabilities 4437 * 4438 * Initializes the SW state maintained for each link, including the link's 4439 * capabilities and default speed/flow-control/autonegotiation settings. 4440 */ 4441 static void __devinit init_link_config(struct link_config *lc, 4442 unsigned int caps) 4443 { 4444 lc->supported = caps; 4445 lc->requested_speed = 0; 4446 lc->speed = 0; 4447 lc->requested_fc = lc->fc = PAUSE_RX | PAUSE_TX; 4448 if (lc->supported & FW_PORT_CAP_ANEG) { 4449 lc->advertising = lc->supported & ADVERT_MASK; 4450 lc->autoneg = AUTONEG_ENABLE; 4451 lc->requested_fc |= PAUSE_AUTONEG; 4452 } else { 4453 lc->advertising = 0; 4454 lc->autoneg = AUTONEG_DISABLE; 4455 } 4456 } 4457 4458 static int __devinit wait_dev_ready(struct adapter *adap) 4459 { 4460 u32 whoami; 4461 4462 whoami = t4_read_reg(adap, A_PL_WHOAMI); 4463 4464 if (whoami != 0xffffffff && whoami != X_CIM_PF_NOACCESS) 4465 return 0; 4466 4467 msleep(500); 4468 whoami = t4_read_reg(adap, A_PL_WHOAMI); 4469 return (whoami != 0xffffffff && whoami != X_CIM_PF_NOACCESS 4470 ? 0 : -EIO); 4471 } 4472 4473 static int __devinit get_flash_params(struct adapter *adapter) 4474 { 4475 int ret; 4476 u32 info = 0; 4477 4478 ret = sf1_write(adapter, 1, 1, 0, SF_RD_ID); 4479 if (!ret) 4480 ret = sf1_read(adapter, 3, 0, 1, &info); 4481 t4_write_reg(adapter, A_SF_OP, 0); /* unlock SF */ 4482 if (ret < 0) 4483 return ret; 4484 4485 if ((info & 0xff) != 0x20) /* not a Numonix flash */ 4486 return -EINVAL; 4487 info >>= 16; /* log2 of size */ 4488 if (info >= 0x14 && info < 0x18) 4489 adapter->params.sf_nsec = 1 << (info - 16); 4490 else if (info == 0x18) 4491 adapter->params.sf_nsec = 64; 4492 else 4493 return -EINVAL; 4494 adapter->params.sf_size = 1 << info; 4495 return 0; 4496 } 4497 4498 /** 4499 * t4_prep_adapter - prepare SW and HW for operation 4500 * @adapter: the adapter 4501 * @reset: if true perform a HW reset 4502 * 4503 * Initialize adapter SW state for the various HW modules, set initial 4504 * values for some adapter tunables, take PHYs out of reset, and 4505 * initialize the MDIO interface. 4506 */ 4507 int __devinit t4_prep_adapter(struct adapter *adapter) 4508 { 4509 int ret; 4510 4511 ret = wait_dev_ready(adapter); 4512 if (ret < 0) 4513 return ret; 4514 4515 get_pci_mode(adapter, &adapter->params.pci); 4516 4517 adapter->params.rev = t4_read_reg(adapter, A_PL_REV); 4518 adapter->params.pci.vpd_cap_addr = 4519 t4_os_find_pci_capability(adapter, PCI_CAP_ID_VPD); 4520 4521 ret = get_flash_params(adapter); 4522 if (ret < 0) 4523 return ret; 4524 4525 ret = get_vpd_params(adapter, &adapter->params.vpd); 4526 if (ret < 0) 4527 return ret; 4528 4529 if (t4_read_reg(adapter, A_SGE_PC0_REQ_BIST_CMD) != 0xffffffff) { 4530 adapter->params.cim_la_size = 2 * CIMLA_SIZE; 4531 } else { 4532 adapter->params.cim_la_size = CIMLA_SIZE; 4533 } 4534 4535 init_cong_ctrl(adapter->params.a_wnd, adapter->params.b_wnd); 4536 4537 /* 4538 * Default port and clock for debugging in case we can't reach FW. 4539 */ 4540 adapter->params.nports = 1; 4541 adapter->params.portvec = 1; 4542 adapter->params.vpd.cclk = 50000; 4543 4544 return 0; 4545 } 4546 4547 int __devinit t4_port_init(struct port_info *p, int mbox, int pf, int vf) 4548 { 4549 u8 addr[6]; 4550 int ret, i, j; 4551 struct fw_port_cmd c; 4552 unsigned int rss_size; 4553 adapter_t *adap = p->adapter; 4554 4555 memset(&c, 0, sizeof(c)); 4556 4557 for (i = 0, j = -1; i <= p->port_id; i++) { 4558 do { 4559 j++; 4560 } while ((adap->params.portvec & (1 << j)) == 0); 4561 } 4562 4563 c.op_to_portid = htonl(V_FW_CMD_OP(FW_PORT_CMD) | 4564 F_FW_CMD_REQUEST | F_FW_CMD_READ | 4565 V_FW_PORT_CMD_PORTID(j)); 4566 c.action_to_len16 = htonl( 4567 V_FW_PORT_CMD_ACTION(FW_PORT_ACTION_GET_PORT_INFO) | 4568 FW_LEN16(c)); 4569 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c); 4570 if (ret) 4571 return ret; 4572 4573 ret = t4_alloc_vi(adap, mbox, j, pf, vf, 1, addr, &rss_size); 4574 if (ret < 0) 4575 return ret; 4576 4577 p->viid = ret; 4578 p->tx_chan = j; 4579 p->lport = j; 4580 p->rss_size = rss_size; 4581 t4_os_set_hw_addr(adap, p->port_id, addr); 4582 4583 ret = ntohl(c.u.info.lstatus_to_modtype); 4584 p->mdio_addr = (ret & F_FW_PORT_CMD_MDIOCAP) ? 4585 G_FW_PORT_CMD_MDIOADDR(ret) : -1; 4586 p->port_type = G_FW_PORT_CMD_PTYPE(ret); 4587 p->mod_type = G_FW_PORT_CMD_MODTYPE(ret); 4588 4589 init_link_config(&p->link_cfg, ntohs(c.u.info.pcap)); 4590 4591 return 0; 4592 } 4593